From 1aea663c6de4c08f0b2a2d4b2ca788772dc0b686 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 1 May 2022 22:52:40 +0800 Subject: [PATCH] HBASE-26899 Run spotless:apply --- RELEASENOTES.md | 301 +- bin/considerAsDead.sh | 8 +- bin/hbase-cleanup.sh | 6 +- bin/hbase-config.sh | 4 +- bin/master-backup.sh | 6 +- bin/regionservers.sh | 4 +- bin/stop-hbase.sh | 4 +- bin/test/process_based_cluster.sh | 4 +- bin/zookeepers.sh | 2 +- conf/hbase-env.sh | 10 +- conf/hbase-policy.xml | 16 +- dev-support/HBase Code Template.xml | 2 +- dev-support/HOW_TO_YETUS_LOCAL.md | 2 +- .../hbase_nightly_pseudo-distributed-test.sh | 56 +- dev-support/hbasetests.sh | 106 +- dev-support/jenkinsEnv.sh | 1 - dev-support/make_rc.sh | 2 +- dev-support/rebase_all_git_branches.sh | 22 +- dev-support/smart-apply-patch.sh | 2 +- dev-support/test-util.sh | 6 +- dev-support/zombie-detector.sh | 2 +- hbase-annotations/pom.xml | 4 +- .../hbase/testclassification/ClientTests.java | 2 - .../testclassification/CoprocessorTests.java | 2 - .../hbase/testclassification/FilterTests.java | 2 - .../hbase/testclassification/FlakeyTests.java | 2 - .../hbase/testclassification/IOTests.java | 2 - .../testclassification/IntegrationTests.java | 17 +- .../hbase/testclassification/LargeTests.java | 18 +- .../testclassification/MapReduceTests.java | 2 - .../hbase/testclassification/MasterTests.java | 2 - .../hbase/testclassification/MediumTests.java | 17 +- .../testclassification/MetricsTests.java | 1 - .../hbase/testclassification/MiscTests.java | 2 - .../hbase/testclassification/RPCTests.java | 2 - .../testclassification/RegionServerTests.java | 2 - .../testclassification/ReplicationTests.java | 2 - .../hbase/testclassification/RestTests.java | 2 - .../testclassification/SecurityTests.java | 2 - .../hbase/testclassification/SmallTests.java | 14 +- .../VerySlowMapReduceTests.java | 4 +- .../VerySlowRegionServerTests.java | 2 - .../hbase/testclassification/ZKTests.java | 1 - .../hbase-archetype-builder/pom.xml | 61 +- hbase-archetypes/hbase-client-project/pom.xml | 7 +- .../exemplars/client/HelloHBase.java | 110 +- .../exemplars/client/TestHelloHBase.java | 34 +- .../hbase-shaded-client-project/pom.xml | 27 +- .../exemplars/shaded_client/HelloHBase.java | 109 +- .../shaded_client/TestHelloHBase.java | 34 +- hbase-archetypes/pom.xml | 9 +- hbase-assembly/pom.xml | 335 +- hbase-asyncfs/pom.xml | 69 +- .../hbase/io/asyncfs/AsyncFSOutput.java | 5 +- .../hbase/io/asyncfs/AsyncFSOutputHelper.java | 8 +- .../asyncfs/FanOutOneBlockAsyncDFSOutput.java | 40 +- .../FanOutOneBlockAsyncDFSOutputHelper.java | 129 +- ...anOutOneBlockAsyncDFSOutputSaslHelper.java | 118 +- .../hbase/io/asyncfs/ProtobufDecoder.java | 58 +- .../io/asyncfs/SendBufSizePredictor.java | 2 +- .../io/asyncfs/WrapperAsyncFSOutput.java | 9 +- .../monitor/ExcludeDatanodeManager.java | 31 +- .../io/asyncfs/monitor/StreamSlowMonitor.java | 70 +- .../hbase/util/CancelableProgressable.java | 9 +- .../hbase/util/RecoverLeaseFSUtils.java | 24 +- .../hbase/io/asyncfs/AsyncFSTestBase.java | 2 +- .../asyncfs/TestExcludeDatanodeManager.java | 45 +- .../TestFanOutOneBlockAsyncDFSOutput.java | 9 +- .../TestFanOutOneBlockAsyncDFSOutputHang.java | 7 +- .../io/asyncfs/TestLocalAsyncOutput.java | 2 +- .../TestOverwriteFileUnderConstruction.java | 2 +- .../TestSaslFanOutOneBlockAsyncDFSOutput.java | 4 +- .../io/asyncfs/TestSendBufSizePredictor.java | 4 +- .../hbase/security/HBaseKerberosUtils.java | 6 +- .../hbase/util/TestRecoverLeaseFSUtils.java | 7 +- hbase-build-configuration/pom.xml | 32 +- hbase-checkstyle/pom.xml | 56 +- hbase-client/pom.xml | 59 +- .../org/apache/hadoop/hbase/Abortable.java | 12 +- .../hadoop/hbase/AsyncMetaTableAccessor.java | 169 +- .../hadoop/hbase/CacheEvictionStats.java | 18 +- .../hbase/CacheEvictionStatsAggregator.java | 5 +- .../hbase/CacheEvictionStatsBuilder.java | 6 +- .../hadoop/hbase/CallDroppedException.java | 7 +- .../hbase/CallQueueTooBigException.java | 7 +- .../hadoop/hbase/ClockOutOfSyncException.java | 7 +- .../org/apache/hadoop/hbase/ClusterId.java | 25 +- .../apache/hadoop/hbase/ClusterMetrics.java | 63 +- .../hadoop/hbase/ClusterMetricsBuilder.java | 247 +- .../apache/hadoop/hbase/ClusterStatus.java | 150 +- .../ConcurrentTableModificationException.java | 3 +- .../org/apache/hadoop/hbase/Coprocessor.java | 39 +- .../hadoop/hbase/CoprocessorEnvironment.java | 18 +- .../hadoop/hbase/DoNotRetryIOException.java | 5 +- .../hbase/DroppedSnapshotException.java | 21 +- ...loseWALAfterInitializedErrorException.java | 22 +- .../hadoop/hbase/HBaseServerException.java | 6 +- .../hadoop/hbase/HColumnDescriptor.java | 255 +- .../org/apache/hadoop/hbase/HRegionInfo.java | 576 ++-- .../apache/hadoop/hbase/HRegionLocation.java | 36 +- .../apache/hadoop/hbase/HTableDescriptor.java | 379 +-- .../InvalidFamilyOperationException.java | 8 +- .../apache/hadoop/hbase/KeepDeletedCells.java | 21 +- .../hbase/MasterNotRunningException.java | 4 +- .../hadoop/hbase/MemoryCompactionPolicy.java | 16 +- .../hadoop/hbase/MetaTableAccessor.java | 733 ++--- .../hbase/MultiActionResultTooLarge.java | 11 +- .../hadoop/hbase/NamespaceExistException.java | 3 +- .../hbase/NamespaceNotFoundException.java | 3 +- .../NotAllMetaRegionsOnlineException.java | 7 +- .../hbase/NotServingRegionException.java | 4 +- .../hadoop/hbase/PleaseHoldException.java | 11 +- .../hbase/PleaseRestartMasterException.java | 2 - .../hadoop/hbase/RSGroupTableAccessor.java | 20 +- .../apache/hadoop/hbase/RegionException.java | 6 +- .../org/apache/hadoop/hbase/RegionLoad.java | 156 +- .../apache/hadoop/hbase/RegionLocations.java | 118 +- .../apache/hadoop/hbase/RegionMetrics.java | 18 +- .../hadoop/hbase/RegionMetricsBuilder.java | 321 +- .../hadoop/hbase/RegionTooBusyException.java | 13 +- .../ReplicationPeerNotFoundException.java | 3 +- .../hbase/RetryImmediatelyException.java | 5 +- .../org/apache/hadoop/hbase/ServerLoad.java | 217 +- .../apache/hadoop/hbase/ServerMetrics.java | 11 +- .../hadoop/hbase/ServerMetricsBuilder.java | 124 +- .../org/apache/hadoop/hbase/ServerTask.java | 2 +- .../hadoop/hbase/ServerTaskBuilder.java | 7 +- .../java/org/apache/hadoop/hbase/Size.java | 16 +- .../hadoop/hbase/TableExistsException.java | 13 +- .../hbase/TableInfoMissingException.java | 4 +- .../hbase/TableNotDisabledException.java | 3 +- .../hbase/TableNotEnabledException.java | 4 +- .../hadoop/hbase/TableNotFoundException.java | 3 +- .../hadoop/hbase/UnknownRegionException.java | 7 +- .../hadoop/hbase/UnknownScannerException.java | 12 +- .../org/apache/hadoop/hbase/UserMetrics.java | 14 +- .../hadoop/hbase/UserMetricsBuilder.java | 76 +- .../hbase/ZooKeeperConnectionException.java | 7 +- .../hbase/client/AbstractClientScanner.java | 4 +- .../hadoop/hbase/client/AbstractResponse.java | 8 +- .../AbstractRpcBasedConnectionRegistry.java | 9 +- .../apache/hadoop/hbase/client/Action.java | 15 +- .../org/apache/hadoop/hbase/client/Admin.java | 1546 +++++---- .../client/AdvancedScanResultConsumer.java | 14 +- .../client/AllowPartialScanResultCache.java | 3 +- .../apache/hadoop/hbase/client/Append.java | 80 +- .../hadoop/hbase/client/AsyncAdmin.java | 478 ++- .../hbase/client/AsyncAdminBuilder.java | 15 +- .../hbase/client/AsyncAdminBuilderBase.java | 3 +- .../AsyncAdminRequestRetryingCaller.java | 6 +- .../client/AsyncBatchRpcRetryingCaller.java | 41 +- .../hbase/client/AsyncBufferedMutator.java | 2 +- .../client/AsyncBufferedMutatorBuilder.java | 3 +- .../AsyncBufferedMutatorBuilderImpl.java | 4 +- .../client/AsyncBufferedMutatorImpl.java | 9 +- .../hbase/client/AsyncClientScanner.java | 50 +- .../hadoop/hbase/client/AsyncConnection.java | 10 +- .../client/AsyncConnectionConfiguration.java | 44 +- .../hbase/client/AsyncConnectionImpl.java | 8 +- .../hadoop/hbase/client/AsyncHBaseAdmin.java | 80 +- .../AsyncMasterRequestRpcRetryingCaller.java | 12 +- .../hbase/client/AsyncMetaRegionLocator.java | 8 +- .../client/AsyncNonMetaRegionLocator.java | 69 +- .../hadoop/hbase/client/AsyncProcess.java | 163 +- .../hadoop/hbase/client/AsyncProcessTask.java | 37 +- .../hbase/client/AsyncRegionLocator.java | 79 +- .../client/AsyncRegionLocatorHelper.java | 11 +- .../hbase/client/AsyncRequestFuture.java | 8 +- .../hbase/client/AsyncRequestFutureImpl.java | 370 ++- .../hbase/client/AsyncRpcRetryingCaller.java | 21 +- .../client/AsyncRpcRetryingCallerFactory.java | 28 +- ...syncScanSingleRegionRpcRetryingCaller.java | 56 +- .../AsyncServerRequestRpcRetryingCaller.java | 14 +- .../AsyncSingleRequestRpcRetryingCaller.java | 18 +- .../hadoop/hbase/client/AsyncTable.java | 66 +- .../hbase/client/AsyncTableBuilder.java | 15 +- .../hbase/client/AsyncTableBuilderBase.java | 8 +- .../hadoop/hbase/client/AsyncTableImpl.java | 10 +- .../hbase/client/AsyncTableRegionLocator.java | 12 +- .../client/AsyncTableRegionLocatorImpl.java | 6 +- .../hbase/client/AsyncTableResultScanner.java | 2 +- .../hadoop/hbase/client/Attributes.java | 10 +- .../hadoop/hbase/client/BalanceRequest.java | 37 +- .../hadoop/hbase/client/BalanceResponse.java | 28 +- .../hadoop/hbase/client/BalancerDecision.java | 29 +- .../hbase/client/BalancerRejection.java | 41 +- .../hadoop/hbase/client/BatchErrors.java | 16 +- .../hbase/client/BatchScanResultCache.java | 10 +- .../hadoop/hbase/client/BufferedMutator.java | 114 +- .../hbase/client/BufferedMutatorImpl.java | 140 +- .../hbase/client/BufferedMutatorParams.java | 32 +- .../hadoop/hbase/client/Cancellable.java | 10 +- .../CancellableRegionServerCallable.java | 64 +- .../CatalogReplicaLoadBalanceSelector.java | 7 +- ...alogReplicaLoadBalanceSelectorFactory.java | 8 +- ...talogReplicaLoadBalanceSimpleSelector.java | 110 +- .../hbase/client/CatalogReplicaMode.java | 26 +- .../hadoop/hbase/client/CheckAndMutate.java | 36 +- .../client/ClientAsyncPrefetchScanner.java | 8 +- .../ClientCoprocessorRpcController.java | 3 +- .../hbase/client/ClientIdGenerator.java | 25 +- .../hadoop/hbase/client/ClientScanner.java | 38 +- .../hbase/client/ClientServiceCallable.java | 37 +- .../hbase/client/ClientSimpleScanner.java | 18 +- .../hadoop/hbase/client/ClientUtil.java | 25 +- .../hbase/client/ClusterConnection.java | 154 +- .../hbase/client/ClusterStatusListener.java | 57 +- .../hbase/client/ColumnFamilyDescriptor.java | 145 +- .../client/ColumnFamilyDescriptorBuilder.java | 396 ++- .../hadoop/hbase/client/CompactType.java | 15 +- .../hadoop/hbase/client/CompactionState.java | 7 +- .../hbase/client/CompleteScanResultCache.java | 5 +- .../hadoop/hbase/client/Connection.java | 149 +- .../hbase/client/ConnectionConfiguration.java | 100 +- .../hbase/client/ConnectionFactory.java | 24 +- .../client/ConnectionImplementation.java | 434 ++- .../hbase/client/ConnectionRegistry.java | 4 +- .../client/ConnectionRegistryFactory.java | 1 + .../hadoop/hbase/client/ConnectionUtils.java | 34 +- .../hadoop/hbase/client/Consistency.java | 31 +- .../hbase/client/CoprocessorDescriptor.java | 11 +- .../client/CoprocessorDescriptorBuilder.java | 7 +- .../apache/hadoop/hbase/client/Cursor.java | 9 +- .../hadoop/hbase/client/DelayingRunner.java | 31 +- .../apache/hadoop/hbase/client/Delete.java | 199 +- .../client/DoNotRetryRegionException.java | 3 +- .../hadoop/hbase/client/Durability.java | 23 +- .../hadoop/hbase/client/FailureInfo.java | 19 +- .../client/FastFailInterceptorContext.java | 5 +- .../hbase/client/FlushRegionCallable.java | 25 +- .../org/apache/hadoop/hbase/client/Get.java | 149 +- .../hadoop/hbase/client/HBaseAdmin.java | 1869 ++++++----- .../apache/hadoop/hbase/client/HBaseHbck.java | 85 +- .../hadoop/hbase/client/HRegionLocator.java | 75 +- .../apache/hadoop/hbase/client/HTable.java | 572 ++-- .../hbase/client/HTableMultiplexer.java | 135 +- .../org/apache/hadoop/hbase/client/Hbck.java | 76 +- .../client/ImmutableHColumnDescriptor.java | 12 +- .../hbase/client/ImmutableHRegionInfo.java | 7 +- .../client/ImmutableHTableDescriptor.java | 12 +- .../hadoop/hbase/client/ImmutableScan.java | 3 - .../apache/hadoop/hbase/client/Increment.java | 123 +- .../hadoop/hbase/client/IsolationLevel.java | 22 +- .../hbase/client/LockTimeoutException.java | 3 +- .../apache/hadoop/hbase/client/LogEntry.java | 6 +- .../hadoop/hbase/client/LogQueryFilter.java | 43 +- .../hadoop/hbase/client/MasterCallable.java | 45 +- .../MasterCoprocessorRpcChannelImpl.java | 15 +- .../client/MasterKeepAliveConnection.java | 25 +- .../hadoop/hbase/client/MasterRegistry.java | 2 +- .../hadoop/hbase/client/MasterSwitchType.java | 4 +- .../apache/hadoop/hbase/client/MetaCache.java | 80 +- .../hbase/client/MetricsConnection.java | 183 +- .../client/MobCompactPartitionPolicy.java | 1 - .../hadoop/hbase/client/MultiAction.java | 28 +- .../hadoop/hbase/client/MultiResponse.java | 37 +- .../hbase/client/MultiServerCallable.java | 27 +- .../hbase/client/MutableRegionInfo.java | 95 +- .../apache/hadoop/hbase/client/Mutation.java | 361 +-- .../NoOpRetryableCallerInterceptor.java | 23 +- .../NoOpRetryingInterceptorContext.java | 2 +- .../client/NoServerForRegionException.java | 3 +- .../hadoop/hbase/client/NonceGenerator.java | 10 +- .../client/NoncedRegionServerCallable.java | 36 +- .../client/NormalizeTableFilterParams.java | 29 +- .../hadoop/hbase/client/OnlineLogRecord.java | 87 +- .../apache/hadoop/hbase/client/Operation.java | 45 +- .../hbase/client/OperationWithAttributes.java | 28 +- .../client/PackagePrivateFieldAccessor.java | 2 +- .../client/PerClientRandomNonceGenerator.java | 7 +- .../client/PreemptiveFastFailInterceptor.java | 177 +- .../org/apache/hadoop/hbase/client/Put.java | 153 +- .../org/apache/hadoop/hbase/client/Query.java | 96 +- .../hbase/client/RawAsyncHBaseAdmin.java | 1284 ++++---- .../hbase/client/RawAsyncTableImpl.java | 292 +- .../client/RegionAdminServiceCallable.java | 57 +- .../client/RegionCoprocessorRpcChannel.java | 66 +- .../RegionCoprocessorRpcChannelImpl.java | 36 +- .../client/RegionCoprocessorServiceExec.java | 39 +- .../hadoop/hbase/client/RegionInfo.java | 387 ++- .../hbase/client/RegionInfoBuilder.java | 14 +- .../hbase/client/RegionInfoDisplay.java | 45 +- .../hadoop/hbase/client/RegionLoadStats.java | 6 +- .../hadoop/hbase/client/RegionLocateType.java | 6 +- .../hadoop/hbase/client/RegionLocator.java | 23 +- .../hbase/client/RegionOfflineException.java | 4 +- .../hbase/client/RegionReplicaUtil.java | 67 +- .../hbase/client/RegionServerCallable.java | 100 +- ...RegionServerCoprocessorRpcChannelImpl.java | 15 +- .../hbase/client/RegionStatesCount.java | 4 +- .../client/RegistryEndpointsRefresher.java | 4 +- .../hbase/client/RequestController.java | 45 +- .../client/RequestControllerFactory.java | 10 +- .../apache/hadoop/hbase/client/Result.java | 564 ++-- .../ResultBoundedCompletionService.java | 79 +- .../hadoop/hbase/client/ResultStatsUtil.java | 11 +- .../client/RetriesExhaustedException.java | 37 +- .../RetriesExhaustedWithDetailsException.java | 44 +- .../hadoop/hbase/client/RetryingCallable.java | 24 +- .../client/RetryingCallerInterceptor.java | 66 +- .../RetryingCallerInterceptorContext.java | 40 +- .../RetryingCallerInterceptorFactory.java | 45 +- .../hbase/client/RetryingTimeTracker.java | 21 +- .../hbase/client/ReversedClientScanner.java | 40 +- .../hbase/client/ReversedScannerCallable.java | 93 +- .../org/apache/hadoop/hbase/client/Row.java | 8 +- .../apache/hadoop/hbase/client/RowAccess.java | 7 +- .../hadoop/hbase/client/RowMutations.java | 57 +- .../hbase/client/RowTooBigException.java | 9 +- .../hbase/client/RpcConnectionRegistry.java | 4 +- .../hbase/client/RpcRetryingCallable.java | 3 +- .../hbase/client/RpcRetryingCaller.java | 22 +- .../client/RpcRetryingCallerFactory.java | 38 +- .../hbase/client/RpcRetryingCallerImpl.java | 77 +- .../RpcRetryingCallerWithReadReplicas.java | 119 +- .../org/apache/hadoop/hbase/client/Scan.java | 431 ++- .../hadoop/hbase/client/ScanResultCache.java | 5 +- .../hbase/client/ScanResultConsumer.java | 2 +- .../hbase/client/ScanResultConsumerBase.java | 2 +- .../hadoop/hbase/client/ScannerCallable.java | 74 +- .../client/ScannerCallableWithReplicas.java | 106 +- .../hbase/client/SecureBulkLoadClient.java | 109 +- .../hbase/client/ServerStatisticTracker.java | 8 +- .../hadoop/hbase/client/ServerType.java | 8 +- .../hadoop/hbase/client/ServiceCaller.java | 7 +- .../client/ShortCircuitMasterConnection.java | 197 +- .../hbase/client/SimpleRequestController.java | 145 +- .../hadoop/hbase/client/SingleResponse.java | 3 +- .../hadoop/hbase/client/SlowLogParams.java | 19 +- .../hbase/client/SnapshotDescription.java | 54 +- .../hadoop/hbase/client/SnapshotType.java | 6 +- .../hbase/client/StatisticTrackable.java | 19 +- .../client/SyncCoprocessorRpcChannel.java | 33 +- .../org/apache/hadoop/hbase/client/Table.java | 627 ++-- .../hadoop/hbase/client/TableBuilder.java | 5 +- .../hadoop/hbase/client/TableBuilderBase.java | 7 +- .../hadoop/hbase/client/TableDescriptor.java | 137 +- .../hbase/client/TableDescriptorBuilder.java | 553 ++-- .../hbase/client/TableDescriptorUtils.java | 11 +- .../hadoop/hbase/client/TableState.java | 96 +- .../hbase/client/UnmodifyableHRegionInfo.java | 8 +- .../hbase/client/ZKConnectionRegistry.java | 18 +- .../client/backoff/ClientBackoffPolicy.java | 9 +- .../backoff/ClientBackoffPolicyFactory.java | 13 +- .../ExponentialClientBackoffPolicy.java | 22 +- .../client/backoff/ServerStatistics.java | 23 +- .../hbase/client/coprocessor/Batch.java | 37 +- .../BigDecimalColumnInterpreter.java | 28 +- .../coprocessor/DoubleColumnInterpreter.java | 39 +- .../coprocessor/LongColumnInterpreter.java | 35 +- .../hbase/client/metrics/ScanMetrics.java | 28 +- .../client/metrics/ServerSideScanMetrics.java | 31 +- .../client/replication/ReplicationAdmin.java | 168 +- .../ReplicationPeerConfigUtil.java | 152 +- .../hbase/client/replication/TableCFs.java | 8 +- .../client/security/SecurityCapability.java | 18 +- .../client/trace/ConnectionSpanBuilder.java | 43 +- .../client/trace/IpcClientSpanBuilder.java | 19 +- .../trace/TableOperationSpanBuilder.java | 37 +- .../hbase/client/trace/TableSpanBuilder.java | 13 +- .../hbase/coprocessor/ColumnInterpreter.java | 127 +- .../coprocessor/CoprocessorException.java | 9 +- .../exceptions/ClientExceptionsUtil.java | 30 +- .../exceptions/ConnectionClosedException.java | 5 +- .../ConnectionClosingException.java | 16 +- .../FailedSanityCheckException.java | 7 +- .../MasterRegistryFetchException.java | 2 +- .../exceptions/MasterStoppedException.java | 2 +- .../exceptions/MergeRegionException.java | 6 +- .../OutOfOrderScannerNextException.java | 2 +- .../PreemptiveFastFailException.java | 64 +- .../exceptions/RegionMovedException.java | 24 +- .../exceptions/RegionOpeningException.java | 5 +- .../exceptions/RequestTooBigException.java | 8 +- .../exceptions/ScannerResetException.java | 3 +- .../exceptions/UnknownProtocolException.java | 6 +- .../hbase/filter/BigDecimalComparator.java | 20 +- .../hadoop/hbase/filter/BinaryComparator.java | 31 +- .../filter/BinaryComponentComparator.java | 33 +- .../hbase/filter/BinaryPrefixComparator.java | 35 +- .../hadoop/hbase/filter/BitComparator.java | 49 +- .../hbase/filter/ColumnCountGetFilter.java | 33 +- .../hbase/filter/ColumnPaginationFilter.java | 105 +- .../hbase/filter/ColumnPrefixFilter.java | 48 +- .../hbase/filter/ColumnRangeFilter.java | 99 +- .../hbase/filter/ColumnValueFilter.java | 65 +- .../hadoop/hbase/filter/CompareFilter.java | 137 +- .../hbase/filter/DependentColumnFilter.java | 177 +- .../hadoop/hbase/filter/FamilyFilter.java | 74 +- .../apache/hadoop/hbase/filter/Filter.java | 180 +- .../hadoop/hbase/filter/FilterBase.java | 77 +- .../hadoop/hbase/filter/FilterList.java | 31 +- .../hadoop/hbase/filter/FilterListBase.java | 8 +- .../hbase/filter/FilterListWithAND.java | 28 +- .../hadoop/hbase/filter/FilterListWithOR.java | 230 +- .../hbase/filter/FirstKeyOnlyFilter.java | 30 +- ...FirstKeyValueMatchingQualifiersFilter.java | 47 +- .../hadoop/hbase/filter/FuzzyRowFilter.java | 97 +- .../hbase/filter/InclusiveStopFilter.java | 57 +- .../filter/IncompatibleFilterException.java | 5 +- .../filter/InvalidRowFilterException.java | 4 +- .../hadoop/hbase/filter/KeyOnlyFilter.java | 47 +- .../hadoop/hbase/filter/LongComparator.java | 77 +- .../hbase/filter/MultiRowRangeFilter.java | 252 +- .../filter/MultipleColumnPrefixFilter.java | 95 +- .../hadoop/hbase/filter/NullComparator.java | 34 +- .../hadoop/hbase/filter/PageFilter.java | 44 +- .../hadoop/hbase/filter/ParseConstants.java | 52 +- .../hadoop/hbase/filter/ParseFilter.java | 738 +++-- .../hadoop/hbase/filter/PrefixFilter.java | 45 +- .../hadoop/hbase/filter/QualifierFilter.java | 61 +- .../hadoop/hbase/filter/RandomRowFilter.java | 38 +- .../hbase/filter/RegexStringComparator.java | 116 +- .../apache/hadoop/hbase/filter/RowFilter.java | 67 +- .../SingleColumnValueExcludeFilter.java | 175 +- .../hbase/filter/SingleColumnValueFilter.java | 281 +- .../hadoop/hbase/filter/SkipFilter.java | 49 +- .../hbase/filter/SubstringComparator.java | 41 +- .../hadoop/hbase/filter/TimestampsFilter.java | 70 +- .../hadoop/hbase/filter/ValueFilter.java | 65 +- .../hadoop/hbase/filter/WhileMatchFilter.java | 32 +- .../hadoop/hbase/ipc/AbstractRpcClient.java | 94 +- .../hadoop/hbase/ipc/BadAuthException.java | 2 +- .../hadoop/hbase/ipc/BlockingRpcCallback.java | 20 +- .../hadoop/hbase/ipc/BlockingRpcClient.java | 8 +- .../hbase/ipc/BlockingRpcConnection.java | 101 +- .../ipc/BufferCallBeforeInitHandler.java | 20 +- .../org/apache/hadoop/hbase/ipc/Call.java | 25 +- .../hbase/ipc/CallCancelledException.java | 2 +- .../apache/hadoop/hbase/ipc/CallEvent.java | 5 +- .../hbase/ipc/CallTimeoutException.java | 5 +- .../ipc/CallerDisconnectedException.java | 9 +- .../hadoop/hbase/ipc/CellBlockBuilder.java | 76 +- .../ipc/CellScannerButNoCodecException.java | 2 +- .../apache/hadoop/hbase/ipc/ConnectionId.java | 20 +- .../hbase/ipc/CoprocessorRpcChannel.java | 12 +- .../hadoop/hbase/ipc/CoprocessorRpcUtils.java | 155 +- .../ipc/DelegatingHBaseRpcController.java | 9 +- .../hbase/ipc/FailedServerException.java | 9 +- .../hadoop/hbase/ipc/FailedServers.java | 17 +- .../ipc/FallbackDisallowedException.java | 4 +- .../hbase/ipc/FatalConnectionException.java | 7 +- .../hadoop/hbase/ipc/HBaseRpcController.java | 15 +- .../hbase/ipc/HBaseRpcControllerImpl.java | 14 +- .../org/apache/hadoop/hbase/ipc/IPCUtil.java | 78 +- .../hadoop/hbase/ipc/NettyRpcClient.java | 9 +- .../hbase/ipc/NettyRpcClientConfigHelper.java | 13 +- .../hadoop/hbase/ipc/NettyRpcConnection.java | 2 +- .../hbase/ipc/NettyRpcDuplexHandler.java | 16 +- .../hbase/ipc/RemoteWithExtrasException.java | 22 +- .../apache/hadoop/hbase/ipc/RpcClient.java | 41 +- .../hadoop/hbase/ipc/RpcClientFactory.java | 23 +- .../hadoop/hbase/ipc/RpcConnection.java | 20 +- .../hbase/ipc/RpcControllerFactory.java | 11 +- .../ipc/ServerNotRunningYetException.java | 3 - .../hadoop/hbase/ipc/ServerRpcController.java | 25 +- .../hbase/ipc/ServerTooBusyException.java | 2 - .../hbase/ipc/StoppedRpcClientException.java | 2 +- .../ipc/UnsupportedCellCodecException.java | 2 +- .../UnsupportedCompressionCodecException.java | 2 +- .../hbase/ipc/UnsupportedCryptoException.java | 3 +- .../hbase/ipc/WrongVersionException.java | 2 +- .../hadoop/hbase/master/RegionState.java | 79 +- .../hadoop/hbase/protobuf/ProtobufMagic.java | 30 +- .../protobuf/ProtobufMessageConverter.java | 33 +- .../hadoop/hbase/protobuf/ProtobufUtil.java | 778 ++--- .../hbase/quotas/QuotaExceededException.java | 3 +- .../hadoop/hbase/quotas/QuotaFilter.java | 3 +- .../hadoop/hbase/quotas/QuotaRetriever.java | 26 +- .../hadoop/hbase/quotas/QuotaScope.java | 15 +- .../hadoop/hbase/quotas/QuotaSettings.java | 79 +- .../hbase/quotas/QuotaSettingsFactory.java | 226 +- .../hadoop/hbase/quotas/QuotaTableUtil.java | 351 ++- .../apache/hadoop/hbase/quotas/QuotaType.java | 2 +- .../hbase/quotas/RpcThrottlingException.java | 70 +- .../hbase/quotas/SpaceLimitSettings.java | 80 +- .../hbase/quotas/SpaceQuotaSnapshot.java | 43 +- .../hbase/quotas/SpaceQuotaSnapshotView.java | 2 +- .../hbase/quotas/SpaceViolationPolicy.java | 22 +- .../hadoop/hbase/quotas/ThrottleSettings.java | 31 +- .../hadoop/hbase/quotas/ThrottleType.java | 2 +- .../hbase/quotas/ThrottlingException.java | 62 +- .../hadoop/hbase/regionserver/BloomType.java | 4 +- .../hbase/regionserver/LeaseException.java | 5 +- .../NoSuchColumnFamilyException.java | 3 +- .../RegionServerAbortedException.java | 3 +- .../RegionServerRunningException.java | 8 +- .../RegionServerStoppedException.java | 4 +- .../regionserver/WrongRegionException.java | 4 +- .../wal/FailedLogCloseException.java | 10 +- .../FailedSyncBeforeLogCloseException.java | 9 +- .../replication/ReplicationException.java | 4 +- .../replication/ReplicationLoadSink.java | 30 +- .../replication/ReplicationLoadSource.java | 45 +- .../replication/ReplicationPeerConfig.java | 48 +- .../ReplicationPeerConfigBuilder.java | 31 +- .../ReplicationPeerDescription.java | 2 +- .../security/AbstractHBaseSaslRpcClient.java | 49 +- .../hbase/security/AccessDeniedException.java | 5 +- .../hadoop/hbase/security/AuthMethod.java | 9 +- .../security/CryptoAESUnwrapHandler.java | 8 +- .../hbase/security/CryptoAESWrapHandler.java | 11 +- .../hadoop/hbase/security/EncryptionUtil.java | 126 +- .../hbase/security/HBaseSaslRpcClient.java | 47 +- .../NettyHBaseRpcConnectionHeaderHandler.java | 19 +- .../security/NettyHBaseSaslRpcClient.java | 14 +- .../NettyHBaseSaslRpcClientHandler.java | 41 +- .../hbase/security/SaslChallengeDecoder.java | 23 +- .../hadoop/hbase/security/SaslStatus.java | 9 +- .../hbase/security/SaslUnwrapHandler.java | 9 +- .../hadoop/hbase/security/SaslUtil.java | 17 +- .../hbase/security/SaslWrapHandler.java | 6 +- .../hadoop/hbase/security/SecurityInfo.java | 20 +- .../security/access/AccessControlClient.java | 235 +- .../access/AccessControlConstants.java | 9 +- .../security/access/AccessControlUtil.java | 536 ++-- .../access/GetUserPermissionsRequest.java | 5 +- .../security/access/GlobalPermission.java | 3 +- .../security/access/NamespacePermission.java | 8 +- .../hbase/security/access/Permission.java | 64 +- .../access/ShadedAccessControlUtil.java | 67 +- .../security/access/TablePermission.java | 50 +- .../hbase/security/access/UserPermission.java | 13 +- ...tractSaslClientAuthenticationProvider.java | 5 +- .../AuthenticationProviderSelector.java | 11 +- .../provider/BuiltInProviderSelector.java | 49 +- .../BuiltInSaslAuthenticationProvider.java | 5 +- .../DigestSaslAuthenticationProvider.java | 4 +- ...igestSaslClientAuthenticationProvider.java | 12 +- .../GssSaslAuthenticationProvider.java | 4 +- .../GssSaslClientAuthenticationProvider.java | 29 +- .../security/provider/SaslAuthMethod.java | 17 +- .../provider/SaslAuthenticationProvider.java | 12 +- .../SaslClientAuthenticationProvider.java | 41 +- .../SaslClientAuthenticationProviders.java | 77 +- .../SimpleSaslAuthenticationProvider.java | 4 +- ...impleSaslClientAuthenticationProvider.java | 10 +- .../token/AuthenticationTokenIdentifier.java | 45 +- .../token/AuthenticationTokenSelector.java | 23 +- .../hbase/security/token/ClientTokenUtil.java | 49 +- .../security/visibility/Authorizations.java | 8 +- .../security/visibility/CellVisibility.java | 21 +- .../visibility/InvalidLabelException.java | 1 - .../security/visibility/VisibilityClient.java | 297 +- .../visibility/VisibilityConstants.java | 20 +- ...VisibilityControllerNotReadyException.java | 3 +- .../visibility/VisibilityLabelsValidator.java | 3 +- .../hbase/shaded/protobuf/ProtobufUtil.java | 1823 +++++------ .../shaded/protobuf/RequestConverter.java | 928 +++--- .../shaded/protobuf/ResponseConverter.java | 172 +- .../hbase/slowlog/SlowLogTableAccessor.java | 33 +- .../ClientSnapshotDescriptionUtils.java | 25 +- .../snapshot/CorruptedSnapshotException.java | 11 +- .../snapshot/ExportSnapshotException.java | 4 +- .../snapshot/HBaseSnapshotException.java | 16 +- .../snapshot/RestoreSnapshotException.java | 12 +- .../snapshot/SnapshotCreationException.java | 16 +- .../SnapshotDoesNotExistException.java | 4 +- .../snapshot/SnapshotExistsException.java | 6 +- .../snapshot/TablePartiallyOpenException.java | 3 +- .../snapshot/UnknownSnapshotException.java | 4 +- .../util/FileSystemVersionException.java | 5 +- .../apache/hadoop/hbase/util/JsonMapper.java | 2 +- .../org/apache/hadoop/hbase/util/PoolMap.java | 123 +- .../apache/hadoop/hbase/util/Writables.java | 75 +- .../hbase/zookeeper/ReadOnlyZKClient.java | 17 +- .../hadoop/hbase/zookeeper/ZKMetadata.java | 3 +- .../hadoop/hbase/zookeeper/ZNodePaths.java | 70 +- .../hbase/zookeeper/ZooKeeperHelper.java | 19 +- .../hadoop/hbase/TestHColumnDescriptor.java | 42 +- .../hadoop/hbase/TestHTableDescriptor.java | 46 +- .../hadoop/hbase/TestRegionLocations.java | 14 +- .../client/RpcRetryingCallerImplTest.java | 36 +- .../client/TestAsyncAdminRpcPriority.java | 2 +- .../TestAsyncConnectionConfiguration.java | 3 +- .../client/TestAsyncConnectionTracing.java | 13 +- .../hadoop/hbase/client/TestAsyncProcess.java | 485 ++- .../TestAsyncProcessWithRegionException.java | 53 +- .../client/TestAsyncRegionLocatorTracing.java | 58 +- .../client/TestAsyncTableRpcPriority.java | 41 +- .../hbase/client/TestAsyncTableTracing.java | 138 +- .../hadoop/hbase/client/TestAttributes.java | 35 +- .../hbase/client/TestBufferedMutator.java | 16 +- .../client/TestBufferedMutatorParams.java | 46 +- .../client/TestClientExponentialBackoff.java | 44 +- .../hbase/client/TestClientNoCluster.java | 313 +- .../hbase/client/TestClientScanner.java | 192 +- .../TestColumnFamilyDescriptorBuilder.java | 51 +- ...stColumnFamilyDescriptorLowerCaseEnum.java | 4 +- .../client/TestConnectionConfiguration.java | 4 +- .../client/TestConnectionFactoryTracing.java | 2 + .../TestConnectionImplementationTracing.java | 1 + .../client/TestConnectionRegistryLeak.java | 4 +- .../client/TestCoprocessorDescriptor.java | 17 +- .../hbase/client/TestDelayingRunner.java | 13 +- .../hbase/client/TestDeleteTimeStamp.java | 9 +- .../apache/hadoop/hbase/client/TestGet.java | 84 +- .../client/TestHTableMultiplexerViaMocks.java | 10 +- .../hbase/client/TestHTableTracing.java | 130 +- .../TestImmutableHColumnDescriptor.java | 49 +- .../client/TestImmutableHRegionInfo.java | 14 +- .../client/TestImmutableHTableDescriptor.java | 58 +- .../hbase/client/TestImmutableScan.java | 64 +- .../hadoop/hbase/client/TestIncrement.java | 16 +- .../hbase/client/TestInterfaceAlign.java | 2 +- .../hbase/client/TestMetricsConnection.java | 103 +- .../hadoop/hbase/client/TestMutation.java | 102 +- .../hadoop/hbase/client/TestOperation.java | 300 +- .../hbase/client/TestProcedureFuture.java | 50 +- .../hadoop/hbase/client/TestPutDotHas.java | 9 +- .../hbase/client/TestPutWriteToWal.java | 13 +- .../hbase/client/TestRegionInfoBuilder.java | 4 +- .../hbase/client/TestRegionInfoDisplay.java | 67 +- .../client/TestRegionLocatorTracing.java | 51 +- .../hbase/client/TestResultStatsUtil.java | 23 +- ...tRetriesExhaustedWithDetailsException.java | 11 +- .../client/TestReversedScannerCallable.java | 20 +- .../hbase/client/TestRowComparator.java | 13 +- .../TestRpcBasedRegistryHedgedReads.java | 5 +- .../client/TestRpcRetryingCallerImpl.java | 8 +- .../apache/hadoop/hbase/client/TestScan.java | 97 +- .../hbase/client/TestScannerCallable.java | 12 +- .../client/TestSimpleRequestController.java | 53 +- .../hbase/client/TestSnapshotFromAdmin.java | 58 +- .../client/TestTableDescriptorBuilder.java | 183 +- .../client/TestTableDescriptorUtils.java | 33 +- .../hbase/client/TestTableRpcPriority.java | 31 +- .../hadoop/hbase/client/TestTracingBase.java | 30 +- .../client/trace/StringTraceRenderer.java | 31 +- .../trace/hamcrest/AttributesMatchers.java | 41 +- .../client/trace/hamcrest/EventMatchers.java | 14 +- .../trace/hamcrest/SpanDataMatchers.java | 20 +- .../client/trace/hamcrest/TraceTestUtil.java | 22 +- .../exceptions/TestClientExceptionsUtil.java | 4 +- .../hadoop/hbase/filter/TestComparators.java | 58 +- .../hbase/filter/TestKeyOnlyFilter.java | 35 +- .../hbase/filter/TestLongComparator.java | 10 +- .../hbase/ipc/TestCellBlockBuilder.java | 30 +- .../hadoop/hbase/ipc/TestConnectionId.java | 25 +- .../hbase/ipc/TestFailedServersLog.java | 6 +- .../hbase/ipc/TestHBaseRpcControllerImpl.java | 4 +- .../apache/hadoop/hbase/ipc/TestIPCUtil.java | 23 +- .../hbase/ipc/TestNettyRpcConnection.java | 2 +- .../ipc/TestRemoteWithExtrasException.java | 14 +- .../TestRpcClientDeprecatedNameMapping.java | 4 +- .../hadoop/hbase/quotas/TestQuotaFilter.java | 21 +- .../TestQuotaGlobalsSettingsBypass.java | 37 +- .../quotas/TestQuotaSettingsFactory.java | 52 +- .../hbase/quotas/TestSpaceLimitSettings.java | 24 +- .../hbase/quotas/TestThrottleSettings.java | 38 +- .../TestReplicationPeerConfig.java | 140 +- .../hbase/security/TestEncryptionUtil.java | 23 +- .../security/TestHBaseSaslRpcClient.java | 124 +- .../hadoop/hbase/security/TestSaslUtil.java | 6 +- .../provider/TestDefaultProviderSelector.java | 12 +- ...TestSaslClientAuthenticationProviders.java | 61 +- .../security/token/TestClientTokenUtil.java | 6 +- .../shaded/protobuf/TestProtobufUtil.java | 173 +- .../hadoop/hbase/util/BuilderStyleTest.java | 39 +- .../hadoop/hbase/util/PoolMapTestBase.java | 10 +- .../hbase/util/TestRoundRobinPoolMap.java | 4 +- .../hbase/util/TestThreadLocalPoolMap.java | 4 +- .../hbase/zookeeper/TestZNodePaths.java | 4 +- hbase-common/pom.xml | 233 +- .../apache/hadoop/hbase/ArrayBackedTag.java | 30 +- .../org/apache/hadoop/hbase/AuthUtil.java | 92 +- .../apache/hadoop/hbase/BaseConfigurable.java | 9 +- .../hadoop/hbase/ByteBufferExtendedCell.java | 67 +- .../hbase/ByteBufferKeyOnlyKeyValue.java | 24 +- .../hadoop/hbase/ByteBufferKeyValue.java | 24 +- .../apache/hadoop/hbase/ByteBufferTag.java | 12 +- .../java/org/apache/hadoop/hbase/Cell.java | 60 +- .../org/apache/hadoop/hbase/CellBuilder.java | 8 +- .../hadoop/hbase/CellBuilderFactory.java | 14 +- .../apache/hadoop/hbase/CellBuilderType.java | 9 +- .../apache/hadoop/hbase/CellComparator.java | 52 +- .../hadoop/hbase/CellComparatorImpl.java | 185 +- .../apache/hadoop/hbase/CellScannable.java | 11 +- .../org/apache/hadoop/hbase/CellScanner.java | 10 +- .../org/apache/hadoop/hbase/CellUtil.java | 503 ++- .../org/apache/hadoop/hbase/ChoreService.java | 45 +- .../hadoop/hbase/CompoundConfiguration.java | 125 +- .../org/apache/hadoop/hbase/ExtendedCell.java | 22 +- .../hadoop/hbase/ExtendedCellBuilder.java | 15 +- .../hbase/ExtendedCellBuilderFactory.java | 4 +- .../hadoop/hbase/ExtendedCellBuilderImpl.java | 1 - .../hadoop/hbase/HBaseConfiguration.java | 161 +- .../apache/hadoop/hbase/HBaseIOException.java | 7 +- .../hadoop/hbase/HBaseInterfaceAudience.java | 9 +- .../org/apache/hadoop/hbase/HConstants.java | 593 ++-- .../hbase/IndividualBytesFieldCell.java | 112 +- .../IndividualBytesFieldCellBuilder.java | 9 +- ...JitterScheduledThreadPoolExecutorImpl.java | 34 +- .../org/apache/hadoop/hbase/KeyValue.java | 1215 ++++---- .../apache/hadoop/hbase/KeyValueBuilder.java | 10 +- .../apache/hadoop/hbase/KeyValueTestUtil.java | 73 +- .../org/apache/hadoop/hbase/KeyValueUtil.java | 387 +-- .../hadoop/hbase/MetaCellComparator.java | 20 +- .../hadoop/hbase/MetaMutationAnnotation.java | 11 +- .../hadoop/hbase/NamespaceDescriptor.java | 50 +- .../hbase/NoTagsByteBufferKeyValue.java | 3 +- .../apache/hadoop/hbase/NoTagsKeyValue.java | 5 +- .../apache/hadoop/hbase/PrivateCellUtil.java | 641 ++-- .../java/org/apache/hadoop/hbase/RawCell.java | 3 +- .../apache/hadoop/hbase/RawCellBuilder.java | 11 +- .../hadoop/hbase/RawCellBuilderFactory.java | 8 +- .../apache/hadoop/hbase/ScheduledChore.java | 56 +- .../org/apache/hadoop/hbase/ServerName.java | 145 +- .../hbase/ServiceNotRunningException.java | 18 + .../hbase/SizeCachedByteBufferKeyValue.java | 11 +- .../hadoop/hbase/SizeCachedKeyValue.java | 18 +- .../SizeCachedNoTagsByteBufferKeyValue.java | 13 +- .../hbase/SizeCachedNoTagsKeyValue.java | 13 +- .../org/apache/hadoop/hbase/Stoppable.java | 6 +- .../org/apache/hadoop/hbase/TableName.java | 261 +- .../java/org/apache/hadoop/hbase/Tag.java | 26 +- .../org/apache/hadoop/hbase/TagBuilder.java | 9 +- .../hadoop/hbase/TagBuilderFactory.java | 7 +- .../java/org/apache/hadoop/hbase/TagType.java | 6 +- .../java/org/apache/hadoop/hbase/TagUtil.java | 19 +- .../hadoop/hbase/backup/BackupType.java | 7 +- .../hadoop/hbase/codec/BaseDecoder.java | 16 +- .../hadoop/hbase/codec/BaseEncoder.java | 6 +- .../apache/hadoop/hbase/codec/CellCodec.java | 49 +- .../hadoop/hbase/codec/CellCodecWithTags.java | 34 +- .../org/apache/hadoop/hbase/codec/Codec.java | 29 +- .../hadoop/hbase/codec/CodecException.java | 3 +- .../hadoop/hbase/codec/KeyValueCodec.java | 17 +- .../hbase/codec/KeyValueCodecWithTags.java | 18 +- .../hbase/conf/ConfigurationManager.java | 72 +- .../hbase/conf/ConfigurationObserver.java | 9 +- .../PropagatingConfigurationObserver.java | 9 +- .../exceptions/DeserializationException.java | 2 +- .../hbase/exceptions/HBaseException.java | 2 +- .../IllegalArgumentIOException.java | 4 +- .../hbase/exceptions/TimeoutIOException.java | 4 +- .../exceptions/UnexpectedStateException.java | 2 +- .../hbase/filter/ByteArrayComparable.java | 42 +- .../io/BoundedDelegatingInputStream.java | 19 +- .../hbase/io/ByteArrayOutputStream.java | 5 +- .../hadoop/hbase/io/ByteBuffAllocator.java | 36 +- .../hadoop/hbase/io/ByteBuffInputStream.java | 13 +- .../hbase/io/ByteBufferInputStream.java | 9 +- .../hbase/io/ByteBufferListOutputStream.java | 8 +- .../hbase/io/ByteBufferOutputStream.java | 45 +- .../hadoop/hbase/io/ByteBufferWriter.java | 6 +- .../io/ByteBufferWriterDataOutputStream.java | 6 +- .../io/ByteBufferWriterOutputStream.java | 35 +- .../hadoop/hbase/io/CellOutputStream.java | 27 +- .../DeallocateRewriteByteBuffAllocator.java | 10 +- .../hbase/io/DelegatingInputStream.java | 22 +- .../org/apache/hadoop/hbase/io/HeapSize.java | 21 +- .../hbase/io/ImmutableBytesWritable.java | 102 +- .../hadoop/hbase/io/SizedCellScanner.java | 11 +- .../hbase/io/TagCompressionContext.java | 50 +- .../org/apache/hadoop/hbase/io/TimeRange.java | 42 +- .../hadoop/hbase/io/compress/CanReinit.java | 27 +- .../hadoop/hbase/io/compress/Compression.java | 172 +- .../hbase/io/compress/CompressionUtil.java | 40 +- .../hbase/io/compress/DictionaryCache.java | 68 +- .../io/compress/ReusableStreamGzipCodec.java | 55 +- .../apache/hadoop/hbase/io/crypto/Cipher.java | 54 +- .../hbase/io/crypto/CipherProvider.java | 26 +- .../hadoop/hbase/io/crypto/Context.java | 32 +- .../hbase/io/crypto/CryptoCipherProvider.java | 30 +- .../hadoop/hbase/io/crypto/Decryptor.java | 12 +- .../io/crypto/DefaultCipherProvider.java | 30 +- .../hadoop/hbase/io/crypto/Encryption.java | 311 +- .../hadoop/hbase/io/crypto/Encryptor.java | 12 +- .../hadoop/hbase/io/crypto/KeyProvider.java | 42 +- .../hbase/io/crypto/KeyStoreKeyProvider.java | 68 +- .../hadoop/hbase/io/crypto/aes/AES.java | 10 +- .../hbase/io/crypto/aes/AESDecryptor.java | 3 +- .../hbase/io/crypto/aes/AESEncryptor.java | 3 +- .../hbase/io/crypto/aes/CommonsCryptoAES.java | 20 +- .../crypto/aes/CommonsCryptoAESDecryptor.java | 6 +- .../crypto/aes/CommonsCryptoAESEncryptor.java | 6 +- .../hadoop/hbase/io/crypto/aes/CryptoAES.java | 46 +- .../io/encoding/AbstractDataBlockEncoder.java | 35 +- .../io/encoding/BufferedDataBlockEncoder.java | 256 +- .../hbase/io/encoding/CompressionState.java | 65 +- .../io/encoding/CopyKeyDataBlockEncoder.java | 49 +- .../hbase/io/encoding/DataBlockEncoder.java | 103 +- .../hbase/io/encoding/DataBlockEncoding.java | 69 +- .../io/encoding/DiffKeyDeltaEncoder.java | 143 +- .../hbase/io/encoding/EncodedDataBlock.java | 104 +- .../EncoderBufferTooSmallException.java | 23 +- .../hbase/io/encoding/EncodingState.java | 2 +- .../io/encoding/FastDiffDeltaEncoder.java | 229 +- .../encoding/HFileBlockDecodingContext.java | 55 +- .../HFileBlockDefaultDecodingContext.java | 39 +- .../HFileBlockDefaultEncodingContext.java | 65 +- .../encoding/HFileBlockEncodingContext.java | 44 +- .../hadoop/hbase/io/encoding/NoneEncoder.java | 5 +- .../io/encoding/PrefixKeyDeltaEncoder.java | 74 +- .../hbase/io/encoding/RowIndexCodecV1.java | 70 +- .../hbase/io/encoding/RowIndexEncoderV1.java | 32 +- .../hbase/io/encoding/RowIndexSeekerV1.java | 72 +- .../hadoopbackport/ThrottledInputStream.java | 45 +- .../hadoop/hbase/io/hfile/BlockType.java | 56 +- .../hadoop/hbase/io/hfile/HFileContext.java | 73 +- .../hbase/io/hfile/HFileContextBuilder.java | 15 +- .../hadoop/hbase/io/util/BlockIOUtils.java | 76 +- .../hadoop/hbase/io/util/Dictionary.java | 50 +- .../hadoop/hbase/io/util/LRUDictionary.java | 41 +- .../hadoop/hbase/io/util/StreamUtils.java | 16 +- .../apache/hadoop/hbase/log/HBaseMarkers.java | 3 +- .../org/apache/hadoop/hbase/net/Address.java | 27 +- .../org/apache/hadoop/hbase/nio/ByteBuff.java | 241 +- .../hbase/nio/HBaseReferenceCounted.java | 2 +- .../hadoop/hbase/nio/MultiByteBuff.java | 193 +- .../org/apache/hadoop/hbase/nio/RefCnt.java | 2 +- .../hadoop/hbase/nio/SingleByteBuff.java | 4 +- .../hadoop/hbase/rsgroup/RSGroupInfo.java | 13 +- .../hbase/security/SecurityConstants.java | 15 +- .../hadoop/hbase/security/Superusers.java | 30 +- .../apache/hadoop/hbase/security/User.java | 155 +- .../hadoop/hbase/security/UserProvider.java | 108 +- .../hbase/trace/HBaseSemanticAttributes.java | 18 +- .../apache/hadoop/hbase/trace/TraceUtil.java | 33 +- .../hbase/types/CopyOnWriteArrayMap.java | 88 +- .../apache/hadoop/hbase/types/DataType.java | 68 +- .../hbase/types/FixedLengthWrapper.java | 27 +- .../hadoop/hbase/types/OrderedBlob.java | 8 +- .../hadoop/hbase/types/OrderedBlobVar.java | 6 +- .../hadoop/hbase/types/OrderedBytesBase.java | 5 +- .../hadoop/hbase/types/OrderedFloat32.java | 2 +- .../hadoop/hbase/types/OrderedFloat64.java | 2 +- .../hadoop/hbase/types/OrderedInt16.java | 2 +- .../hadoop/hbase/types/OrderedInt32.java | 2 +- .../hadoop/hbase/types/OrderedInt64.java | 2 +- .../hadoop/hbase/types/OrderedInt8.java | 2 +- .../hadoop/hbase/types/OrderedNumeric.java | 14 +- .../hadoop/hbase/types/OrderedString.java | 2 +- .../org/apache/hadoop/hbase/types/PBType.java | 21 +- .../apache/hadoop/hbase/types/RawByte.java | 6 +- .../apache/hadoop/hbase/types/RawBytes.java | 9 +- .../hbase/types/RawBytesFixedLength.java | 14 +- .../hbase/types/RawBytesTerminated.java | 17 +- .../apache/hadoop/hbase/types/RawDouble.java | 6 +- .../apache/hadoop/hbase/types/RawFloat.java | 6 +- .../apache/hadoop/hbase/types/RawInteger.java | 6 +- .../apache/hadoop/hbase/types/RawLong.java | 6 +- .../apache/hadoop/hbase/types/RawShort.java | 6 +- .../apache/hadoop/hbase/types/RawString.java | 9 +- .../hbase/types/RawStringFixedLength.java | 10 +- .../hbase/types/RawStringTerminated.java | 15 +- .../org/apache/hadoop/hbase/types/Struct.java | 54 +- .../hadoop/hbase/types/StructBuilder.java | 6 +- .../hadoop/hbase/types/StructIterator.java | 38 +- .../hadoop/hbase/types/TerminatedWrapper.java | 20 +- .../org/apache/hadoop/hbase/types/Union2.java | 10 +- .../org/apache/hadoop/hbase/types/Union3.java | 10 +- .../org/apache/hadoop/hbase/types/Union4.java | 10 +- .../hadoop/hbase/util/AbstractByteRange.java | 48 +- .../hadoop/hbase/util/AbstractHBaseTool.java | 58 +- .../util/AbstractPositionedByteRange.java | 40 +- .../apache/hadoop/hbase/util/Addressing.java | 40 +- .../apache/hadoop/hbase/util/AtomicUtils.java | 3 +- .../org/apache/hadoop/hbase/util/AvlUtil.java | 168 +- .../hbase/util/BoundedCompletionService.java | 19 +- .../hadoop/hbase/util/ByteArrayHashKey.java | 2 +- .../hbase/util/ByteBufferAllocator.java | 3 +- .../hadoop/hbase/util/ByteBufferArray.java | 46 +- .../hadoop/hbase/util/ByteBufferUtils.java | 329 +- .../apache/hadoop/hbase/util/ByteRange.java | 158 +- .../hadoop/hbase/util/ByteRangeUtils.java | 3 +- .../org/apache/hadoop/hbase/util/Bytes.java | 1067 +++---- .../apache/hadoop/hbase/util/CellHashKey.java | 2 +- .../hadoop/hbase/util/ChecksumType.java | 35 +- .../hadoop/hbase/util/ClassLoaderBase.java | 10 +- .../apache/hadoop/hbase/util/ClassSize.java | 173 +- .../org/apache/hadoop/hbase/util/Classes.java | 19 +- .../hadoop/hbase/util/CollectionUtils.java | 5 +- .../hadoop/hbase/util/CommonFSUtils.java | 274 +- .../hadoop/hbase/util/ConcatenatedLists.java | 21 +- .../hadoop/hbase/util/ConcurrentMapUtils.java | 8 +- .../hbase/util/CoprocessorClassLoader.java | 156 +- .../org/apache/hadoop/hbase/util/Counter.java | 48 +- .../org/apache/hadoop/hbase/util/DNS.java | 49 +- .../hbase/util/DefaultEnvironmentEdge.java | 1 - .../hadoop/hbase/util/DynamicClassLoader.java | 74 +- .../hadoop/hbase/util/EnvironmentEdge.java | 7 +- .../hbase/util/EnvironmentEdgeManager.java | 86 +- .../hadoop/hbase/util/ExceptionUtil.java | 15 +- .../hbase/util/ExponentialMovingAverage.java | 1 - .../apache/hadoop/hbase/util/FutureUtils.java | 10 +- .../apache/hadoop/hbase/util/GsonUtil.java | 2 +- .../org/apache/hadoop/hbase/util/Hash.java | 17 +- .../org/apache/hadoop/hbase/util/HashKey.java | 6 +- .../org/apache/hadoop/hbase/util/IdLock.java | 39 +- .../hadoop/hbase/util/ImmutableByteArray.java | 2 +- .../util/IncrementingEnvironmentEdge.java | 5 +- .../apache/hadoop/hbase/util/JRubyFormat.java | 29 +- .../org/apache/hadoop/hbase/util/JVM.java | 88 +- .../apache/hadoop/hbase/util/JenkinsHash.java | 291 +- .../apache/hadoop/hbase/util/KeyLocker.java | 43 +- .../org/apache/hadoop/hbase/util/MD5Hash.java | 27 +- .../org/apache/hadoop/hbase/util/Methods.java | 26 +- .../hadoop/hbase/util/MovingAverage.java | 4 +- .../apache/hadoop/hbase/util/MurmurHash.java | 15 +- .../apache/hadoop/hbase/util/MurmurHash3.java | 48 +- .../apache/hadoop/hbase/util/NonceKey.java | 13 +- .../hadoop/hbase/util/ObjectIntPair.java | 8 +- .../apache/hadoop/hbase/util/ObjectPool.java | 62 +- .../org/apache/hadoop/hbase/util/Order.java | 13 +- .../hadoop/hbase/util/OrderedBytes.java | 921 +++--- .../org/apache/hadoop/hbase/util/Pair.java | 67 +- .../hadoop/hbase/util/PairOfSameType.java | 27 +- .../hbase/util/PositionedByteRange.java | 46 +- .../hadoop/hbase/util/PrettyPrinter.java | 109 +- .../apache/hadoop/hbase/util/Random64.java | 44 +- .../hadoop/hbase/util/ReflectionUtils.java | 82 +- .../hadoop/hbase/util/ReservoirSample.java | 2 +- .../hadoop/hbase/util/RetryCounter.java | 28 +- .../hbase/util/RetryCounterFactory.java | 10 +- .../hadoop/hbase/util/RowBloomHashKey.java | 2 +- .../hadoop/hbase/util/RowColBloomHashKey.java | 4 +- .../hadoop/hbase/util/SimpleByteRange.java | 12 +- .../hbase/util/SimpleMovingAverage.java | 1 - .../hbase/util/SimpleMutableByteRange.java | 70 +- .../hbase/util/SimplePositionedByteRange.java | 34 +- .../SimplePositionedMutableByteRange.java | 111 +- .../org/apache/hadoop/hbase/util/Sleeper.java | 35 +- .../hadoop/hbase/util/SoftObjectPool.java | 16 +- .../org/apache/hadoop/hbase/util/Strings.java | 44 +- .../org/apache/hadoop/hbase/util/Threads.java | 62 +- .../hadoop/hbase/util/TimeMeasurable.java | 8 +- .../org/apache/hadoop/hbase/util/Triple.java | 4 +- .../hadoop/hbase/util/UnsafeAccess.java | 67 +- .../apache/hadoop/hbase/util/VersionInfo.java | 27 +- .../hadoop/hbase/util/WeakObjectPool.java | 21 +- .../hbase/util/WeightedMovingAverage.java | 6 +- .../hbase/util/WindowMovingAverage.java | 14 +- .../hadoop/hbase/zookeeper/ZKConfig.java | 119 +- .../src/main/resources/hbase-default.xml | 4 +- hbase-common/src/saveVersion.sh | 1 - .../org/apache/hadoop/hbase/ClassFinder.java | 63 +- .../apache/hadoop/hbase/ClassTestFinder.java | 23 +- .../hadoop/hbase/HBaseClassTestRule.java | 28 +- .../hbase/HBaseClassTestRuleChecker.java | 16 +- .../hbase/HBaseCommonTestingUtility.java | 53 +- .../apache/hadoop/hbase/MatcherPredicate.java | 13 +- .../apache/hadoop/hbase/ResourceChecker.java | 35 +- .../hbase/ResourceCheckerJUnitListener.java | 16 +- .../apache/hadoop/hbase/SystemExitRule.java | 7 +- .../hadoop/hbase/TableNameTestRule.java | 4 +- .../hadoop/hbase/TestByteBufferKeyValue.java | 155 +- .../apache/hadoop/hbase/TestCellBuilder.java | 84 +- .../hadoop/hbase/TestCellComparator.java | 127 +- .../org/apache/hadoop/hbase/TestCellUtil.java | 103 +- .../apache/hadoop/hbase/TestChoreService.java | 4 +- .../apache/hadoop/hbase/TestClassFinder.java | 74 +- .../hbase/TestCompoundConfiguration.java | 36 +- .../hadoop/hbase/TestHBaseClassTestRule.java | 28 +- .../hadoop/hbase/TestHBaseConfiguration.java | 150 +- .../hbase/TestIndividualBytesFieldCell.java | 171 +- .../org/apache/hadoop/hbase/TestKeyValue.java | 239 +- .../apache/hadoop/hbase/TestServerName.java | 16 +- .../hadoop/hbase/TestSystemExitInTest.java | 7 +- .../apache/hadoop/hbase/TestTableName.java | 55 +- .../apache/hadoop/hbase/TestTagBuilder.java | 15 +- .../org/apache/hadoop/hbase/TestTagUtil.java | 4 +- .../org/apache/hadoop/hbase/TestTimeout.java | 16 +- .../hadoop/hbase/TimedOutTestsListener.java | 38 +- .../java/org/apache/hadoop/hbase/Waiter.java | 44 +- .../hadoop/hbase/codec/TestCellCodec.java | 15 +- .../hbase/codec/TestCellCodecWithTags.java | 27 +- .../hadoop/hbase/codec/TestKeyValueCodec.java | 23 +- .../codec/TestKeyValueCodecWithTags.java | 27 +- .../hbase/conf/TestConfigurationManager.java | 9 +- .../hbase/io/TestByteBuffAllocator.java | 6 +- .../io/TestByteBufferListOutputStream.java | 4 +- .../io/TestMultiByteBuffInputStream.java | 4 +- .../hbase/io/TestTagCompressionContext.java | 46 +- .../io/compress/CompressionTestBase.java | 86 +- .../io/crypto/KeyProviderForTesting.java | 26 +- .../hbase/io/crypto/TestCipherProvider.java | 18 +- .../hbase/io/crypto/TestEncryption.java | 40 +- .../hbase/io/crypto/TestKeyProvider.java | 6 +- .../io/crypto/TestKeyStoreKeyProvider.java | 13 +- .../hadoop/hbase/io/crypto/aes/TestAES.java | 8 +- .../hbase/io/crypto/aes/TestCommonsAES.java | 7 +- .../TestThrottledInputStream.java | 6 +- .../hbase/io/util/TestLRUDictionary.java | 45 +- .../hadoop/hbase/logging/TestJul2Slf4j.java | 4 +- .../hadoop/hbase/logging/TestLog4jUtils.java | 2 +- .../hadoop/hbase/net/BoundSocketMaker.java | 13 +- .../apache/hadoop/hbase/net/TestAddress.java | 14 +- .../hadoop/hbase/nio/TestMultiByteBuff.java | 23 +- .../hadoop/hbase/nio/TestSingleByteBuff.java | 4 +- .../hbase/types/TestCopyOnWriteMaps.java | 42 +- .../hbase/types/TestFixedLengthWrapper.java | 18 +- .../hadoop/hbase/types/TestOrderedBlob.java | 21 +- .../hbase/types/TestOrderedBlobVar.java | 22 +- .../hadoop/hbase/types/TestOrderedString.java | 19 +- .../hadoop/hbase/types/TestRawString.java | 14 +- .../apache/hadoop/hbase/types/TestStruct.java | 122 +- .../hbase/types/TestStructNullExtension.java | 27 +- .../hbase/types/TestTerminatedWrapper.java | 21 +- .../apache/hadoop/hbase/types/TestUnion2.java | 19 +- .../hbase/util/AbstractHBaseToolTest.java | 36 +- .../hbase/util/ClassLoaderTestHelper.java | 79 +- .../EnvironmentEdgeManagerTestHelper.java | 7 +- .../hbase/util/LoadTestKVGenerator.java | 74 +- .../util/NonRepeatedEnvironmentEdge.java | 2 +- .../hadoop/hbase/util/RandomDistribution.java | 105 +- .../hbase/util/RedundantKVGenerator.java | 133 +- .../hbase/util/SimpleKdcServerUtil.java | 25 +- .../apache/hadoop/hbase/util/TestAvlUtil.java | 14 +- .../hbase/util/TestByteBufferArray.java | 6 +- .../hbase/util/TestByteBufferUtils.java | 123 +- .../hadoop/hbase/util/TestByteRangeUtils.java | 46 +- .../TestByteRangeWithKVSerialization.java | 19 +- .../apache/hadoop/hbase/util/TestBytes.java | 134 +- .../apache/hadoop/hbase/util/TestClasses.java | 8 +- .../hadoop/hbase/util/TestCommonFSUtils.java | 13 +- .../hbase/util/TestConcatenatedLists.java | 10 +- .../util/TestCoprocessorClassLoader.java | 36 +- .../apache/hadoop/hbase/util/TestCounter.java | 26 +- .../hbase/util/TestDynamicClassLoader.java | 18 +- .../util/TestEnvironmentEdgeManager.java | 9 +- .../hadoop/hbase/util/TestFutureUtils.java | 2 +- .../hadoop/hbase/util/TestGsonUtil.java | 5 +- .../hadoop/hbase/util/TestJRubyFormat.java | 7 +- .../hadoop/hbase/util/TestKeyLocker.java | 8 +- .../hbase/util/TestLoadTestKVGenerator.java | 8 +- .../hadoop/hbase/util/TestMovingAverage.java | 7 +- .../apache/hadoop/hbase/util/TestOrder.java | 7 +- .../hadoop/hbase/util/TestOrderedBytes.java | 327 +- .../hbase/util/TestReservoirSample.java | 2 +- .../hadoop/hbase/util/TestRetryCounter.java | 14 +- .../hadoop/hbase/util/TestShowProperties.java | 10 +- .../hbase/util/TestSimpleKdcServerUtil.java | 9 +- .../util/TestSimpleMutableByteRange.java | 30 +- .../TestSimplePositionedMutableByteRange.java | 6 +- .../apache/hadoop/hbase/util/TestThreads.java | 17 +- .../hadoop/hbase/util/TestVersionInfo.java | 4 +- .../hadoop/hbase/util/TestWeakObjectPool.java | 23 +- .../hbase/util/TimeOffsetEnvironmentEdge.java | 7 +- .../hadoop/hbase/zookeeper/TestZKConfig.java | 27 +- .../hbase-compression-aircompressor/pom.xml | 72 +- .../aircompressor/HadoopCompressor.java | 31 +- .../aircompressor/HadoopDecompressor.java | 31 +- .../io/compress/aircompressor/Lz4Codec.java | 33 +- .../io/compress/aircompressor/LzoCodec.java | 33 +- .../compress/aircompressor/SnappyCodec.java | 33 +- .../io/compress/aircompressor/ZstdCodec.java | 48 +- .../TestHFileCompressionLz4.java | 6 +- .../TestHFileCompressionLzo.java | 6 +- .../TestHFileCompressionSnappy.java | 6 +- .../TestHFileCompressionZstd.java | 6 +- .../compress/aircompressor/TestLz4Codec.java | 27 +- .../compress/aircompressor/TestLzoCodec.java | 27 +- .../aircompressor/TestSnappyCodec.java | 27 +- .../aircompressor/TestWALCompressionLz4.java | 4 +- .../aircompressor/TestWALCompressionLzo.java | 4 +- .../TestWALCompressionSnappy.java | 4 +- .../aircompressor/TestWALCompressionZstd.java | 4 +- .../compress/aircompressor/TestZstdCodec.java | 27 +- .../hbase-compression-brotli/pom.xml | 72 +- .../hbase/io/compress/brotli/BrotliCodec.java | 27 +- .../io/compress/brotli/BrotliCompressor.java | 24 +- .../compress/brotli/BrotliDecompressor.java | 24 +- .../io/compress/brotli/TestBrotliCodec.java | 27 +- .../brotli/TestHFileCompressionBrotli.java | 6 +- .../brotli/TestWALCompressionBrotli.java | 4 +- .../hbase-compression-lz4/pom.xml | 50 +- .../hbase/io/compress/lz4/Lz4Codec.java | 28 +- .../hbase/io/compress/lz4/Lz4Compressor.java | 29 +- .../io/compress/lz4/Lz4Decompressor.java | 29 +- .../compress/lz4/TestHFileCompressionLz4.java | 6 +- .../hbase/io/compress/lz4/TestLz4Codec.java | 29 +- .../compress/lz4/TestWALCompressionLz4.java | 4 +- .../hbase-compression-snappy/pom.xml | 50 +- .../hbase/io/compress/xerial/SnappyCodec.java | 28 +- .../io/compress/xerial/SnappyCompressor.java | 25 +- .../compress/xerial/SnappyDecompressor.java | 25 +- .../xerial/TestHFileCompressionSnappy.java | 6 +- .../io/compress/xerial/TestSnappyCodec.java | 27 +- .../xerial/TestWALCompressionSnappy.java | 4 +- .../hbase-compression-xz/pom.xml | 50 +- .../hbase/io/compress/xz/LzmaCodec.java | 28 +- .../hbase/io/compress/xz/LzmaCompressor.java | 28 +- .../io/compress/xz/LzmaDecompressor.java | 24 +- .../compress/xz/TestHFileCompressionLzma.java | 6 +- .../hbase/io/compress/xz/TestLzmaCodec.java | 29 +- .../compress/xz/TestWALCompressionLzma.java | 4 +- .../hbase-compression-zstd/pom.xml | 50 +- .../hbase/io/compress/zstd/ZstdCodec.java | 38 +- .../io/compress/zstd/ZstdCompressor.java | 29 +- .../io/compress/zstd/ZstdDecompressor.java | 27 +- .../zstd/TestHFileCompressionZstd.java | 6 +- .../compress/zstd/TestWALCompressionZstd.java | 4 +- .../hbase/io/compress/zstd/TestZstdCodec.java | 29 +- .../io/compress/zstd/TestZstdDictionary.java | 27 +- .../zstd/TestZstdDictionarySplitMerge.java | 16 +- hbase-compression/pom.xml | 8 +- hbase-endpoint/pom.xml | 94 +- .../client/coprocessor/AggregationClient.java | 679 ++-- .../client/coprocessor/AggregationHelper.java | 25 +- .../coprocessor/AsyncAggregationClient.java | 112 +- .../coprocessor/AggregateImplementation.java | 176 +- .../hadoop/hbase/coprocessor/Export.java | 172 +- .../access/SecureBulkLoadEndpoint.java | 68 +- .../client/TestAsyncAggregationClient.java | 41 +- .../client/TestRpcControllerFactory.java | 8 +- .../ColumnAggregationEndpoint.java | 10 +- ...ColumnAggregationEndpointNullResponse.java | 23 +- .../ColumnAggregationEndpointWithErrors.java | 18 +- .../ProtobufCoprocessorService.java | 21 +- .../TestAsyncCoprocessorEndpoint.java | 59 +- .../TestBatchCoprocessorEndpoint.java | 116 +- .../hbase/coprocessor/TestClassLoading.java | 195 +- .../coprocessor/TestCoprocessorEndpoint.java | 111 +- .../TestCoprocessorEndpointTracing.java | 253 +- ...processorServiceBackwardCompatibility.java | 36 +- .../TestCoprocessorTableEndpoint.java | 25 +- .../hbase/coprocessor/TestImportExport.java | 6 +- .../TestRegionServerCoprocessorEndpoint.java | 41 +- .../coprocessor/TestRowProcessorEndpoint.java | 134 +- .../hbase/coprocessor/TestSecureExport.java | 175 +- .../hbase/ipc/TestCoprocessorRpcUtils.java | 8 +- .../SecureBulkLoadEndpointClient.java | 62 +- ...onServerBulkLoadWithOldSecureEndpoint.java | 48 +- .../TestServerCustomProtocol.java | 216 +- ...plicationSyncUpToolWithBulkLoadedData.java | 16 +- hbase-examples/README.txt | 2 +- hbase-examples/pom.xml | 112 +- .../client/example/AsyncClientExample.java | 17 +- .../example/BufferedMutatorExample.java | 8 +- .../client/example/ExportEndpointExample.java | 16 +- .../client/example/HttpProxyExample.java | 11 +- .../example/MultiThreadedClientExample.java | 79 +- .../client/example/RefreshHFilesClient.java | 24 +- .../example/BulkDeleteEndpoint.java | 27 +- .../example/DelegatingInternalScanner.java | 2 +- .../ExampleMasterObserverWithMetrics.java | 16 +- .../ExampleRegionObserverWithMetrics.java | 38 +- .../example/RefreshHFilesEndpoint.java | 10 +- .../coprocessor/example/RowCountEndpoint.java | 28 +- .../example/ScanModifyingObserver.java | 28 +- .../example/ValueRewritingObserver.java | 34 +- .../example/WriteHeavyIncrementObserver.java | 52 +- .../example/ZooKeeperScanPolicyObserver.java | 39 +- .../hadoop/hbase/mapreduce/IndexBuilder.java | 47 +- .../hbase/mapreduce/SampleUploader.java | 46 +- .../provider/example/SaslPlainServer.java | 32 +- .../example/ShadeClientTokenUtil.java | 12 +- .../example/ShadeProviderSelector.java | 20 +- .../ShadeSaslAuthenticationProvider.java | 10 +- ...ShadeSaslClientAuthenticationProvider.java | 17 +- ...ShadeSaslServerAuthenticationProvider.java | 39 +- .../example/ShadeTokenIdentifier.java | 1 - .../hadoop/hbase/thrift/DemoClient.java | 115 +- .../hadoop/hbase/thrift/HttpDoAsClient.java | 54 +- .../hadoop/hbase/thrift2/DemoClient.java | 40 +- .../org/apache/hadoop/hbase/types/PBCell.java | 2 +- .../apache/hadoop/hbase/util/ClientUtils.java | 19 +- .../src/main/python/thrift1/DemoClient.py | 1 - .../main/python/thrift1/gen-py/hbase/Hbase.py | 1 - .../thrift2/gen-py/hbase/THBaseService.py | 1 - .../src/main/sh/healthcheck/healthcheck.sh | 12 +- .../example/TestAsyncClientExample.java | 4 +- .../client/example/TestHttpProxyExample.java | 10 +- .../example/TestRefreshHFilesClient.java | 4 +- .../example/TestRefreshHFilesBase.java | 8 +- .../example/TestRefreshHFilesEndpoint.java | 15 +- .../example/TestScanModifyingObserver.java | 18 +- .../example/TestValueReplacingCompaction.java | 18 +- .../TestWriteHeavyIncrementObserver.java | 10 +- ...crementObserverWithMemStoreCompaction.java | 16 +- .../TestZooKeeperScanPolicyObserver.java | 20 +- .../WriteHeavyIncrementObserverTestBase.java | 5 +- .../mapreduce/TestMapReduceExamples.java | 20 +- .../TestShadeSaslAuthenticationProvider.java | 72 +- .../apache/hadoop/hbase/types/TestPBCell.java | 11 +- hbase-external-blockcache/pom.xml | 79 +- .../hbase/io/hfile/MemcachedBlockCache.java | 75 +- hbase-hadoop-compat/pom.xml | 65 +- .../hadoop/hbase/CompatibilityFactory.java | 14 +- .../hbase/CompatibilitySingletonFactory.java | 19 +- .../hadoop/hbase/io/MetricsIOSource.java | 19 +- .../hadoop/hbase/io/MetricsIOWrapper.java | 3 +- .../hbase/ipc/MetricsHBaseServerSource.java | 51 +- .../ipc/MetricsHBaseServerSourceFactory.java | 14 +- .../hbase/ipc/MetricsHBaseServerWrapper.java | 4 +- .../MetricsAssignmentManagerSource.java | 24 +- .../master/MetricsMasterFileSystemSource.java | 4 +- .../hbase/master/MetricsMasterProcSource.java | 3 +- .../MetricsMasterProcSourceFactory.java | 3 +- .../master/MetricsMasterQuotaSource.java | 46 +- .../MetricsMasterQuotaSourceFactory.java | 15 +- .../hbase/master/MetricsMasterSource.java | 7 +- .../master/MetricsMasterSourceFactory.java | 3 +- .../hbase/master/MetricsMasterWrapper.java | 20 +- .../hbase/master/MetricsSnapshotSource.java | 3 +- .../balancer/MetricsBalancerSource.java | 5 +- .../MetricsStochasticBalancerSource.java | 12 +- .../hadoop/hbase/metrics/BaseSource.java | 30 +- .../metrics/ExceptionTrackingSource.java | 40 +- .../hbase/metrics/JvmPauseMonitorSource.java | 9 +- .../hadoop/hbase/metrics/MBeanSource.java | 12 +- .../hbase/metrics/OperationMetrics.java | 7 +- .../MetricsHeapMemoryManagerSource.java | 37 +- .../MetricsRegionAggregateSource.java | 9 +- .../MetricsRegionServerQuotaSource.java | 32 +- .../MetricsRegionServerSource.java | 198 +- .../MetricsRegionServerSourceFactory.java | 10 +- .../MetricsRegionServerWrapper.java | 26 +- .../regionserver/MetricsRegionSource.java | 12 +- .../regionserver/MetricsRegionWrapper.java | 44 +- .../MetricsTableAggregateSource.java | 8 +- .../regionserver/MetricsTableLatencies.java | 48 +- .../regionserver/MetricsTableQueryMeter.java | 19 +- .../regionserver/MetricsTableSource.java | 13 +- .../MetricsTableWrapperAggregate.java | 15 +- .../MetricsUserAggregateSource.java | 10 +- .../hbase/regionserver/MetricsUserSource.java | 11 +- .../regionserver/wal/MetricsWALSource.java | 13 +- .../MetricsReplicationSinkSource.java | 10 +- .../MetricsReplicationSource.java | 3 +- .../MetricsReplicationSourceFactory.java | 6 +- .../MetricsReplicationSourceSource.java | 39 +- .../MetricsReplicationTableSource.java | 7 +- .../hadoop/hbase/rest/MetricsRESTSource.java | 19 +- .../thrift/MetricsThriftServerSource.java | 6 +- .../MetricsThriftServerSourceFactory.java | 7 +- .../zookeeper/MetricsZooKeeperSource.java | 11 +- .../hadoop/metrics2/MetricHistogram.java | 12 +- .../hadoop/metrics2/MetricsExecutor.java | 3 +- .../org/apache/hadoop/hbase/HadoopShims.java | 12 +- .../hadoop/hbase/RandomStringGenerator.java | 3 +- .../hbase/RandomStringGeneratorImpl.java | 8 +- .../TestCompatibilitySingletonFactory.java | 13 +- .../TestMetricsMasterSourceFactory.java | 12 +- .../TestMetricsRegionServerSourceFactory.java | 12 +- .../wal/TestMetricsWALSource.java | 10 +- .../TestMetricsReplicationSourceFactory.java | 12 +- .../hbase/rest/TestMetricsRESTSource.java | 12 +- .../hbase/test/MetricsAssertHelper.java | 81 +- .../TestMetricsThriftServerSourceFactory.java | 11 +- .../zookeeper/TestMetricsZooKeeperSource.java | 10 +- hbase-hadoop2-compat/pom.xml | 134 +- .../hadoop/hbase/io/MetricsIOSourceImpl.java | 22 +- .../MetricsHBaseServerSourceFactoryImpl.java | 19 +- .../ipc/MetricsHBaseServerSourceImpl.java | 127 +- .../hadoop/hbase/mapreduce/JobUtil.java | 13 +- .../MetricsAssignmentManagerSourceImpl.java | 33 +- .../MetricsMasterFilesystemSourceImpl.java | 18 +- .../MetricsMasterProcSourceFactoryImpl.java | 3 +- .../master/MetricsMasterProcSourceImpl.java | 31 +- .../MetricsMasterQuotaSourceFactoryImpl.java | 15 +- .../master/MetricsMasterQuotaSourceImpl.java | 65 +- .../MetricsMasterSourceFactoryImpl.java | 6 +- .../hbase/master/MetricsMasterSourceImpl.java | 95 +- .../master/MetricsSnapshotSourceImpl.java | 19 +- .../balancer/MetricsBalancerSourceImpl.java | 10 +- .../MetricsStochasticBalancerSourceImpl.java | 24 +- .../hadoop/hbase/metrics/BaseSourceImpl.java | 50 +- .../metrics/ExceptionTrackingSourceImpl.java | 65 +- .../apache/hadoop/hbase/metrics/Interns.java | 34 +- .../hadoop/hbase/metrics/MBeanSourceImpl.java | 6 +- .../hadoop/hbase/metrics/MetricsInfoImpl.java | 26 +- .../impl/GlobalMetricRegistriesAdapter.java | 22 +- .../HBaseMetrics2HadoopMetricsAdapter.java | 59 +- .../MetricsHeapMemoryManagerSourceImpl.java | 78 +- .../MetricsRegionAggregateSourceImpl.java | 25 +- .../MetricsRegionServerQuotaSourceImpl.java | 22 +- .../MetricsRegionServerSourceFactoryImpl.java | 11 +- .../MetricsRegionServerSourceImpl.java | 552 ++-- .../regionserver/MetricsRegionSourceImpl.java | 176 +- .../MetricsTableAggregateSourceImpl.java | 19 +- .../MetricsTableLatenciesImpl.java | 28 +- .../MetricsTableQueryMeterImpl.java | 24 +- .../regionserver/MetricsTableSourceImpl.java | 119 +- .../MetricsUserAggregateSourceImpl.java | 12 +- .../regionserver/MetricsUserSourceImpl.java | 38 +- .../wal/MetricsWALSourceImpl.java | 48 +- ...ricsReplicationGlobalSourceSourceImpl.java | 71 +- .../MetricsReplicationSinkSourceImpl.java | 17 +- .../MetricsReplicationSourceFactoryImpl.java | 13 +- .../MetricsReplicationSourceImpl.java | 18 +- .../MetricsReplicationSourceSourceImpl.java | 60 +- .../MetricsReplicationTableSourceImpl.java | 5 +- .../hbase/rest/MetricsRESTSourceImpl.java | 22 +- .../MetricsThriftServerSourceFactoryImpl.java | 12 +- .../thrift/MetricsThriftServerSourceImpl.java | 26 +- .../zookeeper/MetricsZooKeeperSourceImpl.java | 66 +- .../hadoop/metrics2/impl/JmxCacheBuster.java | 17 +- .../lib/DefaultMetricsSystemHelper.java | 11 +- .../metrics2/lib/DynamicMetricsRegistry.java | 240 +- .../metrics2/lib/MetricsExecutorImpl.java | 14 +- .../metrics2/lib/MutableFastCounter.java | 4 +- .../hadoop/metrics2/lib/MutableHistogram.java | 30 +- .../metrics2/lib/MutableRangeHistogram.java | 22 +- .../metrics2/lib/MutableSizeHistogram.java | 6 +- .../metrics2/lib/MutableTimeHistogram.java | 3 +- .../hadoop/metrics2/util/MetricQuantile.java | 8 +- .../metrics2/util/MetricSampleQuantiles.java | 78 +- .../apache/hadoop/hbase/HadoopShimsImpl.java | 9 +- .../TestMetricsMasterProcSourceImpl.java | 14 +- .../master/TestMetricsMasterSourceImpl.java | 14 +- .../hbase/metrics/TestBaseSourceImpl.java | 8 +- .../regionserver/MetricsTableWrapperStub.java | 1 - .../TestMetricsRegionServerSourceImpl.java | 12 +- .../TestMetricsRegionSourceImpl.java | 9 +- .../TestMetricsTableSourceImpl.java | 23 +- .../TestMetricsUserSourceImpl.java | 18 +- .../wal/TestMetricsWALSourceImpl.java | 12 +- ...stMetricsReplicationSourceFactoryImpl.java | 8 +- .../TestMetricsReplicationSourceImpl.java | 8 +- .../hbase/rest/TestMetricsRESTSourceImpl.java | 8 +- .../hbase/test/MetricsAssertHelperImpl.java | 14 +- ...tMetricsThriftServerSourceFactoryImpl.java | 20 +- .../TestMetricsZooKeeperSourceImpl.java | 6 +- .../lib/TestMutableRangeHistogram.java | 8 +- hbase-hbtop/pom.xml | 24 +- .../org/apache/hadoop/hbase/hbtop/HBTop.java | 12 +- .../org/apache/hadoop/hbase/hbtop/Record.java | 18 +- .../hadoop/hbase/hbtop/RecordFilter.java | 18 +- .../hadoop/hbase/hbtop/field/Field.java | 9 +- .../hadoop/hbase/hbtop/field/FieldInfo.java | 13 +- .../hadoop/hbase/hbtop/field/FieldValue.java | 39 +- .../hbase/hbtop/field/FieldValueType.java | 10 +- .../hbase/hbtop/mode/ClientModeStrategy.java | 105 +- .../hbase/hbtop/mode/DrillDownInfo.java | 9 +- .../apache/hadoop/hbase/hbtop/mode/Mode.java | 7 +- .../hadoop/hbase/hbtop/mode/ModeStrategy.java | 9 +- .../hbase/hbtop/mode/ModeStrategyUtils.java | 19 +- .../hbtop/mode/NamespaceModeStrategy.java | 25 +- .../hbase/hbtop/mode/RegionModeStrategy.java | 61 +- .../hbtop/mode/RegionServerModeStrategy.java | 52 +- .../hbtop/mode/RequestCountPerSecond.java | 3 +- .../hbase/hbtop/mode/TableModeStrategy.java | 37 +- .../hbase/hbtop/mode/UserModeStrategy.java | 41 +- .../hbtop/screen/AbstractScreenView.java | 3 +- .../hadoop/hbase/hbtop/screen/Screen.java | 11 +- .../hadoop/hbase/hbtop/screen/ScreenView.java | 11 +- .../screen/field/FieldScreenPresenter.java | 12 +- .../hbtop/screen/field/FieldScreenView.java | 17 +- .../hbtop/screen/help/CommandDescription.java | 4 +- .../screen/help/HelpScreenPresenter.java | 39 +- .../hbtop/screen/help/HelpScreenView.java | 7 +- .../screen/mode/ModeScreenPresenter.java | 3 +- .../hbtop/screen/mode/ModeScreenView.java | 19 +- .../top/FilterDisplayModeScreenPresenter.java | 3 +- .../top/FilterDisplayModeScreenView.java | 7 +- .../hadoop/hbase/hbtop/screen/top/Header.java | 5 +- .../screen/top/InputModeScreenPresenter.java | 3 +- .../hbtop/screen/top/InputModeScreenView.java | 7 +- .../top/MessageModeScreenPresenter.java | 7 +- .../screen/top/MessageModeScreenView.java | 6 +- .../hadoop/hbase/hbtop/screen/top/Paging.java | 3 +- .../hbase/hbtop/screen/top/Summary.java | 7 +- .../hbtop/screen/top/TopScreenModel.java | 20 +- .../hbtop/screen/top/TopScreenPresenter.java | 25 +- .../hbase/hbtop/screen/top/TopScreenView.java | 51 +- .../hbase/hbtop/terminal/Attributes.java | 3 +- .../hadoop/hbase/hbtop/terminal/Color.java | 12 +- .../hbase/hbtop/terminal/CursorPosition.java | 3 +- .../hadoop/hbase/hbtop/terminal/KeyPress.java | 12 +- .../hadoop/hbase/hbtop/terminal/Terminal.java | 20 +- .../hbase/hbtop/terminal/TerminalPrinter.java | 3 +- .../hbase/hbtop/terminal/TerminalSize.java | 3 +- .../hbase/hbtop/terminal/impl/Cell.java | 3 +- .../hbtop/terminal/impl/EscapeSequences.java | 3 +- .../terminal/impl/KeyPressGenerator.java | 24 +- .../hbtop/terminal/impl/ScreenBuffer.java | 9 +- .../hbtop/terminal/impl/TerminalImpl.java | 23 +- .../terminal/impl/TerminalPrinterImpl.java | 2 +- .../terminal/impl/batch/BatchTerminal.java | 10 +- .../impl/batch/BatchTerminalPrinter.java | 2 +- .../apache/hadoop/hbase/hbtop/TestRecord.java | 34 +- .../hadoop/hbase/hbtop/TestRecordFilter.java | 33 +- .../apache/hadoop/hbase/hbtop/TestUtils.java | 201 +- .../hbase/hbtop/field/TestFieldValue.java | 33 +- .../hbase/hbtop/mode/TestClientMode.java | 16 +- .../hadoop/hbase/hbtop/mode/TestModeBase.java | 10 +- .../hbase/hbtop/mode/TestNamespaceMode.java | 6 +- .../hbase/hbtop/mode/TestRegionMode.java | 3 +- .../hbtop/mode/TestRegionServerMode.java | 3 +- .../hbtop/mode/TestRequestCountPerSecond.java | 3 +- .../hbase/hbtop/mode/TestTableMode.java | 6 +- .../hadoop/hbase/hbtop/mode/TestUserMode.java | 2 +- .../field/TestFieldScreenPresenter.java | 23 +- .../screen/help/TestHelpScreenPresenter.java | 7 +- .../screen/mode/TestModeScreenPresenter.java | 5 +- .../TestFilterDisplayModeScreenPresenter.java | 20 +- .../top/TestInputModeScreenPresenter.java | 7 +- .../top/TestMessageModeScreenPresenter.java | 7 +- .../hbase/hbtop/screen/top/TestPaging.java | 6 +- .../hbtop/screen/top/TestTopScreenModel.java | 14 +- .../screen/top/TestTopScreenPresenter.java | 51 +- .../hbase/hbtop/terminal/impl/TestCursor.java | 4 +- .../hbtop/terminal/impl/TestKeyPress.java | 4 +- .../terminal/impl/TestTerminalPrinter.java | 8 +- hbase-http/pom.xml | 225 +- .../hbase/http/AdminAuthorizedFilter.java | 19 +- .../hbase/http/AdminAuthorizedServlet.java | 5 +- .../http/ClickjackingPreventionFilter.java | 11 +- .../hadoop/hbase/http/FilterContainer.java | 15 +- .../hadoop/hbase/http/FilterInitializer.java | 4 +- .../apache/hadoop/hbase/http/HtmlQuoting.java | 42 +- .../apache/hadoop/hbase/http/HttpConfig.java | 7 +- .../hadoop/hbase/http/HttpRequestLog.java | 2 +- .../apache/hadoop/hbase/http/HttpServer.java | 577 ++-- .../hadoop/hbase/http/HttpServerUtil.java | 9 +- .../apache/hadoop/hbase/http/InfoServer.java | 90 +- .../hadoop/hbase/http/NoCacheFilter.java | 7 +- .../hbase/http/ProfileOutputServlet.java | 6 +- .../hadoop/hbase/http/ProfileServlet.java | 117 +- .../http/ProxyUserAuthenticationFilter.java | 87 +- .../hbase/http/SecurityHeadersFilter.java | 17 +- .../hbase/http/ServerConfigurationKeys.java | 11 +- .../hadoop/hbase/http/conf/ConfServlet.java | 20 +- .../hbase/http/gson/ByteArraySerializer.java | 1 + .../http/gson/GsonMessageBodyWriter.java | 22 +- .../http/jersey/ResponseEntityMapper.java | 7 +- .../http/jersey/SupplierFactoryAdapter.java | 8 +- .../hadoop/hbase/http/jmx/JMXJsonServlet.java | 107 +- .../hbase/http/lib/StaticUserWebFilter.java | 33 +- .../hadoop/hbase/http/log/LogLevel.java | 146 +- .../apache/hadoop/hbase/util/JSONBean.java | 32 +- .../hadoop/hbase/util/JSONMetricUtil.java | 20 +- .../hadoop/hbase/util/LogMonitoring.java | 3 +- .../hadoop/hbase/util/ProcessUtils.java | 5 +- .../hbase/http/HttpServerFunctionalTest.java | 122 +- .../hadoop/hbase/http/TestGlobalFilter.java | 31 +- .../hadoop/hbase/http/TestHtmlQuoting.java | 36 +- .../hadoop/hbase/http/TestHttpCookieFlag.java | 102 +- .../hadoop/hbase/http/TestHttpRequestLog.java | 2 +- .../hadoop/hbase/http/TestHttpServer.java | 202 +- .../hbase/http/TestHttpServerLifecycle.java | 41 +- .../hbase/http/TestHttpServerWebapps.java | 10 +- .../hadoop/hbase/http/TestPathFilter.java | 27 +- .../hbase/http/TestProfileOutputServlet.java | 11 +- .../http/TestProxyUserSpnegoHttpServer.java | 87 +- .../hadoop/hbase/http/TestSSLHttpServer.java | 32 +- .../hbase/http/TestSecurityHeadersFilter.java | 43 +- .../hadoop/hbase/http/TestServletFilter.java | 56 +- .../hbase/http/TestSpnegoHttpServer.java | 60 +- .../hbase/http/conf/TestConfServlet.java | 26 +- .../hbase/http/jmx/TestJMXJsonServlet.java | 65 +- .../http/lib/TestStaticUserWebFilter.java | 15 +- .../hadoop/hbase/http/log/TestLogLevel.java | 34 +- .../hbase/http/resource/JerseyResource.java | 15 +- .../hbase/http/ssl/KeyStoreTestUtil.java | 273 +- .../hadoop/hbase/util/TestJSONBean.java | 27 +- hbase-it/pom.xml | 273 +- .../apache/hadoop/hbase/chaos/ChaosAgent.java | 216 +- .../hadoop/hbase/chaos/ChaosConstants.java | 33 +- .../hadoop/hbase/chaos/ChaosService.java | 27 +- .../apache/hadoop/hbase/chaos/ChaosUtils.java | 8 +- .../apache/hadoop/hbase/ChaosZKClient.java | 92 +- .../apache/hadoop/hbase/ClusterManager.java | 29 +- .../hadoop/hbase/CoprocClusterManager.java | 41 +- .../hadoop/hbase/DistributedHBaseCluster.java | 134 +- .../hadoop/hbase/HBaseClusterManager.java | 133 +- .../hbase/IntegrationTestAcidGuarantees.java | 9 +- .../hadoop/hbase/IntegrationTestBase.java | 44 +- .../IntegrationTestDDLMasterFailover.java | 376 ++- .../hadoop/hbase/IntegrationTestIngest.java | 70 +- ...ntegrationTestIngestStripeCompactions.java | 8 +- .../hbase/IntegrationTestIngestWithACL.java | 18 +- .../IntegrationTestIngestWithEncryption.java | 11 +- .../hbase/IntegrationTestIngestWithMOB.java | 25 +- .../hbase/IntegrationTestIngestWithTags.java | 5 +- ...grationTestIngestWithVisibilityLabels.java | 17 +- .../hbase/IntegrationTestLazyCfLoading.java | 44 +- .../hbase/IntegrationTestManyRegions.java | 51 +- .../hbase/IntegrationTestMetaReplicas.java | 25 +- .../IntegrationTestRegionReplicaPerf.java | 89 +- ...tegrationTestRegionReplicaReplication.java | 61 +- .../hbase/IntegrationTestingUtility.java | 69 +- .../hadoop/hbase/IntegrationTestsDriver.java | 31 +- .../apache/hadoop/hbase/MockHttpApiRule.java | 31 +- .../hadoop/hbase/RESTApiClusterManager.java | 261 +- .../hbase/ShellExecEndpointCoprocessor.java | 57 +- ...tripeCompactionsPerformanceEvaluation.java | 68 +- .../hadoop/hbase/TestIntegrationTestBase.java | 4 +- .../hbase/TestRESTApiClusterManager.java | 51 +- .../TestShellExecEndpointCoprocessor.java | 44 +- .../hadoop/hbase/ZNodeClusterManager.java | 28 +- .../hadoop/hbase/chaos/actions/Action.java | 89 +- .../hbase/chaos/actions/AddCPULoadAction.java | 19 +- .../hbase/chaos/actions/AddColumnAction.java | 17 +- .../chaos/actions/BatchRestartRsAction.java | 23 +- .../actions/ChangeBloomFilterAction.java | 11 +- .../actions/ChangeCompressionAction.java | 8 +- .../chaos/actions/ChangeEncodingAction.java | 6 +- .../actions/ChangeSplitPolicyAction.java | 12 +- .../chaos/actions/ChangeVersionsAction.java | 11 +- .../hbase/chaos/actions/CompactMobAction.java | 4 +- .../CompactRandomRegionOfTableAction.java | 11 +- .../chaos/actions/CompactTableAction.java | 7 +- .../chaos/actions/CorruptDataFilesAction.java | 12 +- .../actions/CorruptPacketsCommandAction.java | 17 +- .../actions/DecreaseMaxHFileSizeAction.java | 14 +- .../actions/DelayPacketsCommandAction.java | 18 +- .../chaos/actions/DeleteDataFilesAction.java | 12 +- .../actions/DumpClusterStatusAction.java | 26 +- .../DuplicatePacketsCommandAction.java | 17 +- .../chaos/actions/FillDiskCommandAction.java | 29 +- .../FlushRandomRegionOfTableAction.java | 16 +- .../hbase/chaos/actions/FlushTableAction.java | 8 +- .../chaos/actions/ForceBalancerAction.java | 8 +- .../GracefulRollingRestartRsAction.java | 14 +- .../actions/LosePacketsCommandAction.java | 17 +- ...rgeRandomAdjacentRegionsOfTableAction.java | 6 +- .../MoveRandomRegionOfTableAction.java | 15 +- .../actions/MoveRegionsOfTableAction.java | 13 +- .../chaos/actions/RemoveColumnAction.java | 17 +- .../actions/ReorderPacketsCommandAction.java | 23 +- .../actions/RestartActionBaseAction.java | 7 +- .../actions/RestartActiveMasterAction.java | 8 +- .../actions/RestartActiveNameNodeAction.java | 10 +- .../actions/RestartRandomDataNodeAction.java | 16 +- .../chaos/actions/RestartRandomRsAction.java | 4 +- .../RestartRandomRsExceptMetaAction.java | 4 +- .../actions/RestartRandomZKNodeAction.java | 14 +- .../actions/RestartRsHoldingMetaAction.java | 11 +- .../actions/RestartRsHoldingTableAction.java | 15 +- .../actions/RollingBatchRestartRsAction.java | 22 +- .../RollingBatchSuspendResumeRsAction.java | 18 +- .../chaos/actions/SnapshotTableAction.java | 8 +- .../actions/SplitAllRegionOfTableAction.java | 9 +- .../SplitRandomRegionOfTableAction.java | 14 +- .../chaos/actions/SudoCommandAction.java | 9 +- .../hbase/chaos/actions/TCCommandAction.java | 3 +- .../actions/TestChangeSplitPolicyAction.java | 6 +- .../chaos/actions/TruncateTableAction.java | 4 +- .../UnbalanceKillAndRebalanceAction.java | 34 +- .../chaos/actions/UnbalanceRegionsAction.java | 14 +- .../chaos/factories/CalmMonkeyFactory.java | 4 +- ...gurableSlowDeterministicMonkeyFactory.java | 45 +- .../factories/DataIssuesMonkeyFactory.java | 35 +- .../DistributedIssuesMonkeyFactory.java | 109 +- .../factories/MasterKillingMonkeyFactory.java | 32 +- .../factories/MobNoKillMonkeyFactory.java | 31 +- .../MobSlowDeterministicMonkeyFactory.java | 154 +- .../chaos/factories/MonkeyConstants.java | 8 +- .../hbase/chaos/factories/MonkeyFactory.java | 18 +- .../chaos/factories/NoKillMonkeyFactory.java | 59 +- ...erAndDependenciesKillingMonkeyFactory.java | 28 +- .../factories/ServerKillingMonkeyFactory.java | 30 +- .../SlowDeterministicMonkeyFactory.java | 140 +- .../StressAssignmentManagerMonkeyFactory.java | 77 +- .../factories/UnbalanceMonkeyFactory.java | 45 +- .../hbase/chaos/monkies/CalmChaosMonkey.java | 3 +- .../hbase/chaos/monkies/ChaosMonkey.java | 29 +- .../chaos/monkies/PolicyBasedChaosMonkey.java | 23 +- .../policies/CompositeSequentialPolicy.java | 2 +- .../chaos/policies/DoActionsOncePolicy.java | 8 +- .../hbase/chaos/policies/PeriodicPolicy.java | 5 +- .../policies/PeriodicRandomActionPolicy.java | 8 +- .../hadoop/hbase/chaos/policies/Policy.java | 4 +- .../policies/TwoConcurrentActionPolicy.java | 21 +- .../hbase/chaos/util/ChaosMonkeyRunner.java | 44 +- .../hadoop/hbase/chaos/util/Monkeys.java | 8 +- .../hbase/ipc/IntegrationTestRpcClient.java | 40 +- .../mapreduce/IntegrationTestBulkLoad.java | 225 +- .../IntegrationTestFileBasedSFTBulkLoad.java | 47 +- .../mapreduce/IntegrationTestImportTsv.java | 75 +- .../IntegrationTestTableMapReduceUtil.java | 9 +- ...tegrationTestTableSnapshotInputFormat.java | 79 +- .../hbase/mttr/IntegrationTestMTTR.java | 155 +- .../test/IntegrationTestBigLinkedList.java | 595 ++-- ...rationTestBigLinkedListWithVisibility.java | 145 +- .../test/IntegrationTestLoadAndVerify.java | 232 +- .../test/IntegrationTestLoadCommonCrawl.java | 213 +- .../hbase/test/IntegrationTestMonkeys.java | 7 +- .../test/IntegrationTestReplication.java | 213 +- ...dedMultiGetRequestsWithRegionReplicas.java | 32 +- ...TimeBoundedRequestsWithRegionReplicas.java | 117 +- ...onTestWithCellVisibilityLoadAndVerify.java | 100 +- .../IntegrationTestZKAndFSPermissions.java | 42 +- .../apache/hadoop/hbase/test/util/CRC64.java | 5 +- .../hbase/test/util/warc/WARCFileReader.java | 50 +- .../hbase/test/util/warc/WARCFileWriter.java | 128 +- .../hbase/test/util/warc/WARCInputFormat.java | 49 +- .../test/util/warc/WARCOutputFormat.java | 48 +- .../hbase/test/util/warc/WARCRecord.java | 269 +- .../hbase/test/util/warc/WARCWritable.java | 36 +- .../IntegrationTestSendTraceRequests.java | 15 +- hbase-logging/pom.xml | 68 +- .../hbase/logging/InternalLog4jUtils.java | 2 +- .../hbase/logging/JulToSlf4jInitializer.java | 2 +- .../hadoop/hbase/logging/Log4jUtils.java | 2 +- .../hbase/logging/HBaseTestAppender.java | 10 +- .../apache/hadoop/hbase/logging/Target.java | 2 +- .../java/org/apache/log4j/FileAppender.java | 3 +- hbase-mapreduce/pom.xml | 89 +- .../apache/hadoop/hbase/mapred/Driver.java | 13 +- .../hadoop/hbase/mapred/GroupingTableMap.java | 92 +- .../hbase/mapred/HRegionPartitioner.java | 29 +- .../hadoop/hbase/mapred/IdentityTableMap.java | 40 +- .../hbase/mapred/IdentityTableReduce.java | 31 +- .../mapred/MultiTableSnapshotInputFormat.java | 72 +- .../hadoop/hbase/mapred/RowCounter.java | 41 +- .../hadoop/hbase/mapred/TableInputFormat.java | 20 +- .../hbase/mapred/TableInputFormatBase.java | 126 +- .../apache/hadoop/hbase/mapred/TableMap.java | 12 +- .../hbase/mapred/TableMapReduceUtil.java | 286 +- .../hbase/mapred/TableOutputFormat.java | 34 +- .../hbase/mapred/TableRecordReader.java | 40 +- .../hbase/mapred/TableRecordReaderImpl.java | 58 +- .../hadoop/hbase/mapred/TableReduce.java | 8 +- .../mapred/TableSnapshotInputFormat.java | 57 +- .../hadoop/hbase/mapred/TableSplit.java | 66 +- .../hadoop/hbase/mapreduce/CellCounter.java | 95 +- .../hadoop/hbase/mapreduce/CellCreator.java | 109 +- .../hbase/mapreduce/CellSerialization.java | 9 +- .../hbase/mapreduce/CellSortReducer.java | 18 +- .../hadoop/hbase/mapreduce/CopyTable.java | 81 +- .../DefaultVisibilityExpressionResolver.java | 13 +- .../apache/hadoop/hbase/mapreduce/Driver.java | 38 +- .../apache/hadoop/hbase/mapreduce/Export.java | 56 +- .../hadoop/hbase/mapreduce/ExportUtils.java | 48 +- .../hbase/mapreduce/GroupingTableMapper.java | 91 +- .../hbase/mapreduce/HFileInputFormat.java | 20 +- .../hbase/mapreduce/HFileOutputFormat2.java | 421 ++- .../hbase/mapreduce/HRegionPartitioner.java | 79 +- .../hadoop/hbase/mapreduce/HashTable.java | 89 +- .../hbase/mapreduce/IdentityTableMapper.java | 38 +- .../hbase/mapreduce/IdentityTableReducer.java | 61 +- .../apache/hadoop/hbase/mapreduce/Import.java | 330 +- .../hadoop/hbase/mapreduce/ImportTsv.java | 271 +- .../hadoop/hbase/mapreduce/JarFinder.java | 59 +- .../mapreduce/KeyValueSerialization.java | 14 +- .../hbase/mapreduce/KeyValueSortReducer.java | 25 +- .../MultiTableHFileOutputFormat.java | 74 +- .../mapreduce/MultiTableInputFormat.java | 28 +- .../mapreduce/MultiTableInputFormatBase.java | 135 +- .../mapreduce/MultiTableOutputFormat.java | 90 +- .../MultiTableSnapshotInputFormat.java | 67 +- .../MultiTableSnapshotInputFormatImpl.java | 66 +- .../mapreduce/MultithreadedTableMapper.java | 131 +- .../mapreduce/MutationSerialization.java | 13 +- .../hadoop/hbase/mapreduce/PutCombiner.java | 24 +- .../hbase/mapreduce/PutSortReducer.java | 47 +- .../hbase/mapreduce/RegionSizeCalculator.java | 28 +- .../hbase/mapreduce/ResultSerialization.java | 14 +- .../mapreduce/RoundRobinTableInputFormat.java | 44 +- .../hadoop/hbase/mapreduce/RowCounter.java | 176 +- .../SimpleTotalOrderPartitioner.java | 54 +- .../hadoop/hbase/mapreduce/SyncTable.java | 230 +- .../hbase/mapreduce/TableInputFormat.java | 94 +- .../hbase/mapreduce/TableInputFormatBase.java | 288 +- .../hbase/mapreduce/TableMapReduceUtil.java | 848 +++-- .../hadoop/hbase/mapreduce/TableMapper.java | 15 +- .../hbase/mapreduce/TableOutputCommitter.java | 10 +- .../hbase/mapreduce/TableOutputFormat.java | 93 +- .../hbase/mapreduce/TableRecordReader.java | 49 +- .../mapreduce/TableRecordReaderImpl.java | 104 +- .../hadoop/hbase/mapreduce/TableReducer.java | 29 +- .../mapreduce/TableSnapshotInputFormat.java | 119 +- .../TableSnapshotInputFormatImpl.java | 148 +- .../hadoop/hbase/mapreduce/TableSplit.java | 157 +- .../hbase/mapreduce/TextSortReducer.java | 63 +- .../hbase/mapreduce/TsvImporterMapper.java | 74 +- .../mapreduce/TsvImporterTextMapper.java | 36 +- .../VisibilityExpressionResolver.java | 5 +- .../hbase/mapreduce/WALInputFormat.java | 64 +- .../hadoop/hbase/mapreduce/WALPlayer.java | 119 +- .../replication/VerifyReplication.java | 273 +- .../hbase/mob/mapreduce/MobRefReporter.java | 126 +- .../hbase/regionserver/CompactionTool.java | 107 +- .../hadoop/hbase/snapshot/ExportSnapshot.java | 355 +-- .../MapreduceDependencyClasspathTool.java | 16 +- .../hadoop/hbase/PerformanceEvaluation.java | 639 ++-- .../hbase/ScanPerformanceEvaluation.java | 158 +- .../hbase/TestPerformanceEvaluation.java | 24 +- .../hadoop/hbase/mapred/TestDriver.java | 9 +- .../hbase/mapred/TestGroupingTableMap.java | 72 +- .../hbase/mapred/TestIdentityTableMap.java | 17 +- .../TestMultiTableSnapshotInputFormat.java | 40 +- .../hadoop/hbase/mapred/TestRowCounter.java | 45 +- .../hadoop/hbase/mapred/TestSplitTable.java | 35 +- .../hbase/mapred/TestTableInputFormat.java | 116 +- .../hbase/mapred/TestTableMapReduce.java | 32 +- .../hbase/mapred/TestTableMapReduceUtil.java | 90 +- ...estTableOutputFormatConnectionExhaust.java | 21 +- .../mapred/TestTableSnapshotInputFormat.java | 90 +- .../MultiTableInputFormatTestBase.java | 84 +- .../hbase/mapreduce/NMapInputFormat.java | 20 +- .../TableSnapshotInputFormatTestBase.java | 36 +- .../TestCellBasedHFileOutputFormat2.java | 562 ++-- .../mapreduce/TestCellBasedImportExport2.java | 345 +- .../mapreduce/TestCellBasedWALPlayer2.java | 31 +- .../hbase/mapreduce/TestCellCounter.java | 31 +- .../hadoop/hbase/mapreduce/TestCopyTable.java | 84 +- .../mapreduce/TestGroupingTableMapper.java | 16 +- .../mapreduce/TestHBaseMRTestingUtility.java | 35 +- .../mapreduce/TestHFileOutputFormat2.java | 585 ++-- .../mapreduce/TestHRegionPartitioner.java | 10 +- .../hadoop/hbase/mapreduce/TestHashTable.java | 68 +- .../hbase/mapreduce/TestImportExport.java | 400 +-- .../TestImportTSVWithOperationAttributes.java | 65 +- .../mapreduce/TestImportTSVWithTTLs.java | 29 +- .../TestImportTSVWithVisibilityLabels.java | 167 +- .../hadoop/hbase/mapreduce/TestImportTsv.java | 199 +- .../hbase/mapreduce/TestImportTsvParser.java | 22 +- .../hadoop/hbase/mapreduce/TestJarFinder.java | 26 +- .../mapreduce/TestMultiTableInputFormat.java | 19 +- .../TestMultiTableInputFormatBase.java | 89 +- .../TestMultiTableSnapshotInputFormat.java | 12 +- ...TestMultiTableSnapshotInputFormatImpl.java | 34 +- .../TestMultithreadedTableMapper.java | 111 +- .../mapreduce/TestRegionSizeCalculator.java | 52 +- .../TestRoundRobinTableInputFormat.java | 55 +- .../hbase/mapreduce/TestRowCounter.java | 286 +- .../TestSimpleTotalOrderPartitioner.java | 15 +- .../hadoop/hbase/mapreduce/TestSyncTable.java | 149 +- .../hbase/mapreduce/TestTableInputFormat.java | 170 +- .../mapreduce/TestTableInputFormatBase.java | 96 +- .../mapreduce/TestTableInputFormatScan.java | 6 +- .../TestTableInputFormatScanBase.java | 63 +- .../TestTableInputFormatScanEmptyToAPP.java | 4 +- .../TestTableInputFormatScanEmptyToBBA.java | 4 +- .../TestTableInputFormatScanEmptyToBBB.java | 4 +- .../TestTableInputFormatScanEmptyToEmpty.java | 4 +- .../TestTableInputFormatScanEmptyToOPP.java | 4 +- .../TestTableInputFormatScanOBBToOPP.java | 2 +- .../TestTableInputFormatScanOBBToQPP.java | 2 +- .../TestTableInputFormatScanOPPToEmpty.java | 4 +- .../TestTableInputFormatScanYYXToEmpty.java | 4 +- .../TestTableInputFormatScanYYYToEmpty.java | 4 +- .../TestTableInputFormatScanYZYToEmpty.java | 4 +- .../hbase/mapreduce/TestTableMapReduce.java | 57 +- .../mapreduce/TestTableMapReduceBase.java | 61 +- .../mapreduce/TestTableMapReduceUtil.java | 45 +- .../mapreduce/TestTableRecordReader.java | 15 +- .../TestTableSnapshotInputFormat.java | 146 +- .../hbase/mapreduce/TestTableSplit.java | 62 +- .../hbase/mapreduce/TestTimeRangeMapRed.java | 51 +- .../hbase/mapreduce/TestWALInputFormat.java | 3 +- .../hadoop/hbase/mapreduce/TestWALPlayer.java | 35 +- .../hbase/mapreduce/TestWALRecordReader.java | 61 +- .../TsvImporterCustomTestMapper.java | 29 +- ...TsvImporterCustomTestMapperForOprAttr.java | 14 +- .../regionserver/TestCompactionTool.java | 8 +- .../replication/TestVerifyReplication.java | 43 +- .../TestVerifyReplicationAdjunct.java | 24 +- .../TestVerifyReplicationCrossDiffHdfs.java | 19 +- ...fyReplicationSecureClusterCredentials.java | 35 +- .../hbase/snapshot/TestExportSnapshot.java | 60 +- .../snapshot/TestExportSnapshotAdjunct.java | 36 +- .../snapshot/TestExportSnapshotHelpers.java | 45 +- .../TestExportSnapshotV1NoCluster.java | 39 +- .../TestExportSnapshotV2NoCluster.java | 10 +- ...tExportSnapshotWithTemporaryDirectory.java | 14 +- .../hbase/snapshot/TestMobExportSnapshot.java | 6 +- .../snapshot/TestMobSecureExportSnapshot.java | 6 +- .../snapshot/TestSecureExportSnapshot.java | 6 +- .../hadoop/hbase/util/LoadTestTool.java | 284 +- .../src/test/resources/mapred-site.xml | 1 - hbase-metrics-api/README.txt | 2 +- hbase-metrics-api/pom.xml | 92 +- .../apache/hadoop/hbase/metrics/Counter.java | 4 +- .../apache/hadoop/hbase/metrics/Gauge.java | 3 +- .../hadoop/hbase/metrics/Histogram.java | 4 +- .../apache/hadoop/hbase/metrics/Meter.java | 14 +- .../apache/hadoop/hbase/metrics/Metric.java | 2 +- .../hbase/metrics/MetricRegistries.java | 11 +- .../hbase/metrics/MetricRegistriesLoader.java | 15 +- .../hadoop/hbase/metrics/MetricRegistry.java | 12 +- .../hbase/metrics/MetricRegistryFactory.java | 7 +- .../hbase/metrics/MetricRegistryInfo.java | 35 +- .../hadoop/hbase/metrics/MetricSet.java | 6 +- .../hadoop/hbase/metrics/PackageMarker.java | 19 +- .../apache/hadoop/hbase/metrics/Snapshot.java | 19 +- .../apache/hadoop/hbase/metrics/Timer.java | 5 +- .../metrics/TestMetricRegistriesLoader.java | 8 +- hbase-metrics/README.txt | 2 +- hbase-metrics/pom.xml | 92 +- .../hbase/metrics/impl/CounterImpl.java | 3 +- .../hbase/metrics/impl/DropwizardMeter.java | 10 +- .../hbase/metrics/impl/FastLongHistogram.java | 16 +- .../hbase/metrics/impl/HistogramImpl.java | 10 +- .../metrics/impl/MetricRegistriesImpl.java | 6 +- .../impl/MetricRegistryFactoryImpl.java | 5 +- .../metrics/impl/MetricRegistryImpl.java | 5 +- .../hbase/metrics/impl/RefCountingMap.java | 18 +- .../hadoop/hbase/metrics/impl/TimerImpl.java | 3 +- .../hbase/metrics/impl/TestCounterImpl.java | 10 +- .../metrics/impl/TestDropwizardMeter.java | 10 +- .../metrics/impl/TestFastLongHistogram.java | 9 +- .../hadoop/hbase/metrics/impl/TestGauge.java | 12 +- .../hbase/metrics/impl/TestHistogramImpl.java | 4 +- .../metrics/impl/TestMetricRegistryImpl.java | 15 +- .../metrics/impl/TestRefCountingMap.java | 9 +- .../hbase/metrics/impl/TestTimerImpl.java | 4 +- hbase-procedure/pom.xml | 73 +- .../AbstractProcedureScheduler.java | 37 +- .../procedure2/CompletedProcedureCleaner.java | 8 +- .../CompletedProcedureRetainer.java | 8 +- .../hbase/procedure2/DelayedProcedure.java | 4 +- .../hbase/procedure2/FailedProcedure.java | 6 +- .../FailedRemoteDispatchException.java | 3 +- .../hadoop/hbase/procedure2/InlineChore.java | 2 +- .../hadoop/hbase/procedure2/LockAndQueue.java | 13 +- .../hadoop/hbase/procedure2/LockStatus.java | 3 +- .../hadoop/hbase/procedure2/LockType.java | 4 +- .../hbase/procedure2/LockedResource.java | 7 +- .../hbase/procedure2/LockedResourceType.java | 10 +- .../procedure2/NoNodeDispatchException.java | 5 +- .../procedure2/NoServerDispatchException.java | 5 +- .../NullTargetServerDispatchException.java | 6 +- .../hbase/procedure2/OnePhaseProcedure.java | 3 +- .../hadoop/hbase/procedure2/Procedure.java | 227 +- .../procedure2/ProcedureAbortedException.java | 3 +- .../hbase/procedure2/ProcedureDeque.java | 11 +- .../hbase/procedure2/ProcedureEvent.java | 40 +- .../hbase/procedure2/ProcedureException.java | 2 +- .../hbase/procedure2/ProcedureExecutor.java | 392 ++- .../procedure2/ProcedureInMemoryChore.java | 20 +- .../hbase/procedure2/ProcedureMetrics.java | 12 +- .../hbase/procedure2/ProcedureScheduler.java | 25 +- .../procedure2/ProcedureStateSerializer.java | 1 - .../ProcedureSuspendedException.java | 2 +- .../hbase/procedure2/ProcedureUtil.java | 68 +- .../procedure2/ProcedureYieldException.java | 3 +- .../procedure2/RemoteProcedureDispatcher.java | 77 +- .../procedure2/RemoteProcedureException.java | 27 +- .../hbase/procedure2/RootProcedureState.java | 37 +- .../hbase/procedure2/SequentialProcedure.java | 26 +- .../procedure2/SimpleProcedureScheduler.java | 6 +- .../procedure2/StateMachineProcedure.java | 60 +- .../hbase/procedure2/StoppableThread.java | 2 +- .../procedure2/TimeoutExecutorThread.java | 5 +- .../hbase/procedure2/TwoPhaseProcedure.java | 3 +- .../store/InMemoryProcedureIterator.java | 4 +- .../hbase/procedure2/store/LeaseRecovery.java | 4 +- .../procedure2/store/NoopProcedureStore.java | 3 +- .../procedure2/store/ProcedureStore.java | 55 +- .../procedure2/store/ProcedureStoreBase.java | 9 +- .../hbase/procedure2/store/ProcedureTree.java | 6 +- .../procedure2/store/ProtoAndProcedure.java | 4 +- .../procedure2/store/wal/BitSetNode.java | 4 +- .../CorruptedWALProcedureStoreException.java | 2 +- .../store/wal/ProcedureStoreTracker.java | 64 +- .../store/wal/ProcedureWALFile.java | 10 +- .../store/wal/ProcedureWALFormat.java | 73 +- .../store/wal/ProcedureWALFormatReader.java | 13 +- .../store/wal/ProcedureWALPrettyPrinter.java | 31 +- .../procedure2/store/wal/WALProcedureMap.java | 4 +- .../store/wal/WALProcedureStore.java | 130 +- .../hbase/procedure2/util/ByteSlot.java | 13 +- .../hbase/procedure2/util/DelayedUtil.java | 12 +- .../hbase/procedure2/util/StringUtils.java | 17 +- .../procedure2/ProcedureTestingUtility.java | 7 +- .../hbase/procedure2/TestChildProcedures.java | 18 +- .../procedure2/TestForceUpdateProcedure.java | 8 +- .../hbase/procedure2/TestLockAndQueue.java | 2 +- .../hbase/procedure2/TestProcedureBypass.java | 64 +- .../procedure2/TestProcedureCleanup.java | 9 +- .../hbase/procedure2/TestProcedureEvents.java | 53 +- .../procedure2/TestProcedureExecution.java | 9 +- .../procedure2/TestProcedureExecutor.java | 13 +- .../TestProcedureInMemoryChore.java | 6 +- .../procedure2/TestProcedureMetrics.java | 12 +- .../hbase/procedure2/TestProcedureNonce.java | 19 +- .../procedure2/TestProcedureRecovery.java | 48 +- .../procedure2/TestProcedureReplayOrder.java | 26 +- .../TestProcedureRollbackAIOOB.java | 8 +- .../TestProcedureSchedulerConcurrency.java | 14 +- .../TestProcedureSkipPersistence.java | 4 +- .../procedure2/TestProcedureSuspended.java | 18 +- .../procedure2/TestProcedureToString.java | 31 +- .../hbase/procedure2/TestProcedureUtil.java | 7 +- ...ureDispatcherUncaughtExceptionHandler.java | 2 +- .../procedure2/TestStateMachineProcedure.java | 26 +- .../hbase/procedure2/TestYieldProcedures.java | 30 +- .../ProcedureStorePerformanceEvaluation.java | 37 +- .../procedure2/store/TestProcedureTree.java | 4 +- ...ocedureWALLoaderPerformanceEvaluation.java | 61 +- .../ProcedureWALPerformanceEvaluation.java | 6 +- .../procedure2/store/wal/TestBitSetNode.java | 2 +- .../store/wal/TestProcedureStoreTracker.java | 7 +- .../wal/TestStressWALProcedureStore.java | 13 +- .../store/wal/TestWALProcedureStore.java | 126 +- .../procedure2/util/TestDelayedUtil.java | 17 +- hbase-protocol-shaded/pom.xml | 98 +- .../hbase/util/ForeignExceptionUtil.java | 26 +- hbase-protocol/pom.xml | 72 +- .../protobuf/HBaseZeroCopyByteString.java | 22 +- .../hadoop/hbase/util/ByteStringer.java | 18 +- hbase-replication/pom.xml | 57 +- .../hbase/replication/ReplicationFactory.java | 2 +- .../replication/ReplicationListener.java | 1 - .../hbase/replication/ReplicationPeer.java | 7 +- .../ReplicationPeerConfigListener.java | 6 +- .../replication/ReplicationPeerImpl.java | 6 +- .../replication/ReplicationPeerStorage.java | 7 +- .../hbase/replication/ReplicationPeers.java | 7 +- .../replication/ReplicationQueueInfo.java | 59 +- .../replication/ReplicationQueueStorage.java | 51 +- .../ReplicationStorageFactory.java | 4 +- .../hbase/replication/ReplicationUtils.java | 24 +- .../replication/ZKReplicationPeerStorage.java | 19 +- .../ZKReplicationQueueStorage.java | 133 +- .../replication/ZKReplicationStorageBase.java | 5 +- .../TestReplicationStateBasic.java | 24 +- .../TestReplicationStateZKImpl.java | 10 +- .../TestZKReplicationPeerStorage.java | 73 +- .../TestZKReplicationQueueStorage.java | 6 +- hbase-resource-bundle/pom.xml | 16 +- hbase-rest/pom.xml | 250 +- .../apache/hadoop/hbase/rest/Constants.java | 13 +- .../hadoop/hbase/rest/ExistsResource.java | 20 +- .../apache/hadoop/hbase/rest/MetricsREST.java | 18 +- .../hadoop/hbase/rest/MultiRowResource.java | 27 +- .../rest/NamespacesInstanceResource.java | 108 +- .../hadoop/hbase/rest/NamespacesResource.java | 11 +- .../hbase/rest/ProtobufMessageHandler.java | 14 +- .../hbase/rest/ProtobufStreamingOutput.java | 18 +- .../apache/hadoop/hbase/rest/RESTServer.java | 117 +- .../apache/hadoop/hbase/rest/RESTServlet.java | 40 +- .../hbase/rest/RESTServletContainer.java | 24 +- .../hadoop/hbase/rest/RegionsResource.java | 20 +- .../hadoop/hbase/rest/ResourceBase.java | 51 +- .../hadoop/hbase/rest/ResultGenerator.java | 8 +- .../hadoop/hbase/rest/RootResource.java | 17 +- .../apache/hadoop/hbase/rest/RowResource.java | 363 +-- .../hadoop/hbase/rest/RowResultGenerator.java | 10 +- .../org/apache/hadoop/hbase/rest/RowSpec.java | 109 +- .../hbase/rest/ScannerInstanceResource.java | 56 +- .../hadoop/hbase/rest/ScannerResource.java | 66 +- .../hbase/rest/ScannerResultGenerator.java | 29 +- .../hadoop/hbase/rest/SchemaResource.java | 75 +- .../rest/StorageClusterStatusResource.java | 40 +- .../rest/StorageClusterVersionResource.java | 18 +- .../hadoop/hbase/rest/TableResource.java | 71 +- .../hadoop/hbase/rest/TableScanResource.java | 17 +- .../hadoop/hbase/rest/VersionResource.java | 15 +- .../hadoop/hbase/rest/client/Client.java | 342 +- .../hadoop/hbase/rest/client/Cluster.java | 21 +- .../hadoop/hbase/rest/client/Response.java | 24 +- .../hadoop/hbase/rest/filter/AuthFilter.java | 29 +- .../hbase/rest/filter/GZIPRequestStream.java | 3 - .../hbase/rest/filter/GZIPRequestWrapper.java | 4 - .../hbase/rest/filter/GZIPResponseStream.java | 3 - .../rest/filter/GZIPResponseWrapper.java | 12 +- .../hadoop/hbase/rest/filter/GzipFilter.java | 24 +- .../rest/filter/RestCsrfPreventionFilter.java | 139 +- .../hadoop/hbase/rest/model/CellModel.java | 77 +- .../hadoop/hbase/rest/model/CellSetModel.java | 18 +- .../hbase/rest/model/ColumnSchemaModel.java | 48 +- .../rest/model/NamespacesInstanceModel.java | 51 +- .../hbase/rest/model/NamespacesModel.java | 23 +- .../hadoop/hbase/rest/model/RowModel.java | 56 +- .../hadoop/hbase/rest/model/ScannerModel.java | 432 +-- .../rest/model/StorageClusterStatusModel.java | 110 +- .../model/StorageClusterVersionModel.java | 18 +- .../hbase/rest/model/TableInfoModel.java | 37 +- .../hbase/rest/model/TableListModel.java | 25 +- .../hadoop/hbase/rest/model/TableModel.java | 21 +- .../hbase/rest/model/TableRegionModel.java | 55 +- .../hbase/rest/model/TableSchemaModel.java | 54 +- .../hadoop/hbase/rest/model/VersionModel.java | 39 +- .../rest/provider/JAXBContextResolver.java | 31 +- .../consumer/ProtobufMessageBodyConsumer.java | 28 +- .../PlainTextMessageBodyProducer.java | 24 +- .../producer/ProtobufMessageBodyProducer.java | 25 +- .../apache/hadoop/hbase/rest/DummyFilter.java | 10 +- .../hbase/rest/HBaseRESTTestingUtility.java | 7 +- .../hbase/rest/PerformanceEvaluation.java | 328 +- .../hadoop/hbase/rest/RowResourceBase.java | 284 +- .../hadoop/hbase/rest/TestDeleteRow.java | 10 +- .../hbase/rest/TestGZIPResponseWrapper.java | 6 +- .../hbase/rest/TestGetAndPutResource.java | 191 +- .../hadoop/hbase/rest/TestGzipFilter.java | 13 +- .../hbase/rest/TestMultiRowResource.java | 15 +- .../rest/TestNamespacesInstanceResource.java | 71 +- .../hbase/rest/TestNamespacesResource.java | 9 +- .../hadoop/hbase/rest/TestRESTServerSSL.java | 15 +- .../hadoop/hbase/rest/TestResourceFilter.java | 12 +- .../hbase/rest/TestScannerResource.java | 66 +- .../hbase/rest/TestScannersWithFilters.java | 273 +- .../hbase/rest/TestScannersWithLabels.java | 25 +- .../hadoop/hbase/rest/TestSchemaResource.java | 39 +- .../hbase/rest/TestSecureRESTServer.java | 121 +- .../hbase/rest/TestSecurityHeadersFilter.java | 50 +- .../hadoop/hbase/rest/TestStatusResource.java | 18 +- .../hadoop/hbase/rest/TestTableResource.java | 43 +- .../hadoop/hbase/rest/TestTableScan.java | 154 +- .../hbase/rest/TestVersionResource.java | 46 +- .../hadoop/hbase/rest/client/RemoteAdmin.java | 273 +- .../hbase/rest/client/RemoteHTable.java | 389 ++- .../rest/client/TestRemoteAdminRetries.java | 10 +- .../rest/client/TestRemoteHTableRetries.java | 25 +- .../hbase/rest/client/TestRemoteTable.java | 53 +- .../hbase/rest/client/TestXmlParsing.java | 19 +- .../hbase/rest/model/TestCellModel.java | 15 +- .../hbase/rest/model/TestCellSetModel.java | 60 +- .../rest/model/TestColumnSchemaModel.java | 21 +- .../hbase/rest/model/TestModelBase.java | 28 +- .../model/TestNamespacesInstanceModel.java | 39 +- .../hbase/rest/model/TestNamespacesModel.java | 15 +- .../hadoop/hbase/rest/model/TestRowModel.java | 14 +- .../hbase/rest/model/TestScannerModel.java | 27 +- .../model/TestStorageClusterStatusModel.java | 103 +- .../model/TestStorageClusterVersionModel.java | 14 +- .../hbase/rest/model/TestTableInfoModel.java | 32 +- .../hbase/rest/model/TestTableListModel.java | 15 +- .../rest/model/TestTableRegionModel.java | 35 +- .../rest/model/TestTableSchemaModel.java | 36 +- .../hbase/rest/model/TestVersionModel.java | 32 +- hbase-rest/src/test/resources/mapred-site.xml | 1 - hbase-rsgroup/pom.xml | 87 +- .../hadoop/hbase/rsgroup/RSGroupAdmin.java | 36 +- .../hbase/rsgroup/RSGroupAdminClient.java | 94 +- .../hbase/rsgroup/RSGroupAdminEndpoint.java | 187 +- .../hbase/rsgroup/RSGroupAdminServer.java | 222 +- .../rsgroup/RSGroupBasedLoadBalancer.java | 103 +- .../hbase/rsgroup/RSGroupInfoManager.java | 34 +- .../hbase/rsgroup/RSGroupInfoManagerImpl.java | 88 +- .../rsgroup/RSGroupMajorCompactionTTL.java | 29 +- .../hbase/rsgroup/RSGroupProtobufUtil.java | 58 +- .../hadoop/hbase/rsgroup/RSGroupUtil.java | 3 +- .../hbase/rsgroup/RSGroupableBalancer.java | 7 +- .../balancer/RSGroupableBalancerTestBase.java | 153 +- .../TestRSGroupBasedLoadBalancer.java | 58 +- ...rWithStochasticLoadBalancerAsInternal.java | 22 +- .../TestSCPWithReplicasWithRSGroup.java | 6 +- .../TestDetermineRSGroupInfoForTable.java | 26 +- .../hbase/rsgroup/TestEnableRSGroups.java | 10 +- .../hbase/rsgroup/TestRSGroupConfig.java | 7 +- .../TestRSGroupMajorCompactionTTL.java | 22 +- .../rsgroup/TestRSGroupMappingScript.java | 30 +- .../hadoop/hbase/rsgroup/TestRSGroupUtil.java | 19 +- .../hbase/rsgroup/TestRSGroupsAdmin1.java | 48 +- .../hbase/rsgroup/TestRSGroupsAdmin2.java | 76 +- .../hbase/rsgroup/TestRSGroupsBalance.java | 9 +- .../hbase/rsgroup/TestRSGroupsBase.java | 120 +- .../hbase/rsgroup/TestRSGroupsBasics.java | 13 +- .../hbase/rsgroup/TestRSGroupsFallback.java | 19 +- .../hbase/rsgroup/TestRSGroupsKillRS.java | 51 +- .../rsgroup/TestRSGroupsOfflineMode.java | 27 +- .../hbase/rsgroup/TestRSGroupsWithACL.java | 111 +- .../TestTableDescriptorWithRSGroup.java | 35 +- .../TestUpdateRSGroupConfiguration.java | 23 +- .../rsgroup/VerifyingRSGroupAdminClient.java | 35 +- hbase-server/pom.xml | 486 +-- .../hadoop/hbase/CoordinatedStateManager.java | 15 +- .../hadoop/hbase/DaemonThreadFactory.java | 2 +- .../hadoop/hbase/HDFSBlocksDistribution.java | 58 +- .../apache/hadoop/hbase/HealthCheckChore.java | 11 +- .../apache/hadoop/hbase/HealthChecker.java | 49 +- .../org/apache/hadoop/hbase/HealthReport.java | 10 +- .../org/apache/hadoop/hbase/JMXListener.java | 60 +- .../hadoop/hbase/LocalHBaseCluster.java | 240 +- .../hadoop/hbase/MetaRegionLocationCache.java | 34 +- .../hadoop/hbase/RegionStateListener.java | 15 +- .../java/org/apache/hadoop/hbase/Server.java | 20 +- .../apache/hadoop/hbase/SplitLogCounters.java | 30 +- .../org/apache/hadoop/hbase/SplitLogTask.java | 34 +- .../SslRMIClientSocketFactorySecure.java | 28 +- .../SslRMIServerSocketFactorySecure.java | 30 +- .../apache/hadoop/hbase/TableDescriptors.java | 11 +- .../hadoop/hbase/YouAreDeadException.java | 4 +- .../hadoop/hbase/ZKNamespaceManager.java | 52 +- .../org/apache/hadoop/hbase/ZNodeClearer.java | 69 +- .../hbase/backup/FailedArchiveException.java | 11 +- .../hadoop/hbase/backup/HFileArchiver.java | 272 +- .../backup/example/HFileArchiveManager.java | 17 +- .../example/HFileArchiveTableMonitor.java | 8 +- .../LongTermArchivingHFileCleaner.java | 15 +- .../example/TableHFileArchiveTracker.java | 20 +- .../backup/example/ZKTableArchiveClient.java | 26 +- .../hbase/client/ClientSideRegionScanner.java | 16 +- .../hbase/client/RegionServerRegistry.java | 2 +- .../hbase/client/ServerConnectionUtils.java | 49 +- .../hbase/client/TableSnapshotScanner.java | 68 +- .../hadoop/hbase/client/VersionInfoUtil.java | 42 +- .../coprocessor/RowProcessorClient.java | 21 +- .../hbase/client/locking/EntityLock.java | 102 +- .../client/locking/LockServiceClient.java | 51 +- .../hadoop/hbase/codec/MessageCodec.java | 32 +- .../hbase/constraint/BaseConstraint.java | 8 +- .../hadoop/hbase/constraint/Constraint.java | 60 +- .../hbase/constraint/ConstraintException.java | 19 +- .../hbase/constraint/ConstraintProcessor.java | 18 +- .../hadoop/hbase/constraint/Constraints.java | 382 +-- .../SplitLogManagerCoordination.java | 16 +- .../SplitLogWorkerCoordination.java | 63 +- .../ZKSplitLogManagerCoordination.java | 104 +- .../ZkCoordinatedStateManager.java | 9 +- .../ZkSplitLogWorkerCoordination.java | 98 +- .../hbase/coprocessor/BaseEnvironment.java | 25 +- .../coprocessor/BaseRowProcessorEndpoint.java | 81 +- .../hbase/coprocessor/BulkLoadObserver.java | 73 +- .../hbase/coprocessor/CoprocessorHost.java | 278 +- .../hbase/coprocessor/CoprocessorService.java | 9 +- ...oprocessorServiceBackwardCompatiblity.java | 26 +- .../hbase/coprocessor/CoreCoprocessor.java | 16 +- .../hbase/coprocessor/EndpointObserver.java | 83 +- .../hbase/coprocessor/HasMasterServices.java | 6 +- .../coprocessor/HasRegionServerServices.java | 6 +- .../hbase/coprocessor/MasterCoprocessor.java | 14 +- .../MasterCoprocessorEnvironment.java | 66 +- .../hbase/coprocessor/MasterObserver.java | 1410 +++++---- .../hbase/coprocessor/MetaTableMetrics.java | 37 +- .../hbase/coprocessor/MetricsCoprocessor.java | 78 +- .../coprocessor/MultiRowMutationEndpoint.java | 46 +- .../hbase/coprocessor/ObserverContext.java | 57 +- .../coprocessor/ObserverContextImpl.java | 2 +- .../coprocessor/ReadOnlyConfiguration.java | 8 +- .../hbase/coprocessor/RegionCoprocessor.java | 14 +- .../RegionCoprocessorEnvironment.java | 21 +- .../hbase/coprocessor/RegionObserver.java | 1165 +++---- .../coprocessor/RegionServerCoprocessor.java | 14 +- .../RegionServerCoprocessorEnvironment.java | 65 +- .../coprocessor/RegionServerObserver.java | 84 +- .../SingletonCoprocessorService.java | 5 +- .../hbase/coprocessor/WALCoprocessor.java | 14 +- .../WALCoprocessorEnvironment.java | 15 +- .../hadoop/hbase/coprocessor/WALObserver.java | 92 +- .../hbase/errorhandling/ForeignException.java | 68 +- .../ForeignExceptionDispatcher.java | 31 +- .../ForeignExceptionListener.java | 2 +- .../errorhandling/ForeignExceptionSnare.java | 36 +- .../hbase/errorhandling/TimeoutException.java | 14 +- .../TimeoutExceptionInjector.java | 19 +- .../hadoop/hbase/executor/EventHandler.java | 71 +- .../hadoop/hbase/executor/EventType.java | 119 +- .../hbase/executor/ExecutorService.java | 122 +- .../hadoop/hbase/executor/ExecutorType.java | 46 +- .../favored/FavoredNodeAssignmentHelper.java | 226 +- .../favored/FavoredNodeLoadBalancer.java | 167 +- .../hbase/favored/FavoredNodesManager.java | 74 +- .../hbase/favored/FavoredNodesPlan.java | 35 +- .../hbase/favored/FavoredNodesPromoter.java | 13 +- .../favored/StartcodeAgnosticServerName.java | 24 +- .../hadoop/hbase/filter/FilterWrapper.java | 50 +- .../apache/hadoop/hbase/fs/HFileSystem.java | 188 +- .../hbase/io/FSDataInputStreamWrapper.java | 101 +- .../org/apache/hadoop/hbase/io/FileLink.java | 137 +- .../org/apache/hadoop/hbase/io/HFileLink.java | 296 +- .../hadoop/hbase/io/HalfStoreFileReader.java | 67 +- .../org/apache/hadoop/hbase/io/MetricsIO.java | 5 +- .../hadoop/hbase/io/MetricsIOWrapperImpl.java | 3 +- .../org/apache/hadoop/hbase/io/Reference.java | 113 +- .../org/apache/hadoop/hbase/io/WALLink.java | 28 +- .../hadoop/hbase/io/WritableWithSize.java | 9 +- .../hadoop/hbase/io/hfile/AgeSnapshot.java | 9 +- .../hadoop/hbase/io/hfile/BlockCache.java | 36 +- .../hbase/io/hfile/BlockCacheFactory.java | 67 +- .../hadoop/hbase/io/hfile/BlockCacheKey.java | 18 +- .../hadoop/hbase/io/hfile/BlockCacheUtil.java | 102 +- .../hbase/io/hfile/BlockCachesIterator.java | 9 +- .../hbase/io/hfile/BlockWithScanInfo.java | 8 +- .../hadoop/hbase/io/hfile/CacheConfig.java | 141 +- .../hadoop/hbase/io/hfile/CacheStats.java | 55 +- .../hadoop/hbase/io/hfile/Cacheable.java | 24 +- .../hbase/io/hfile/CacheableDeserializer.java | 8 +- .../hfile/CacheableDeserializerIdManager.java | 56 +- .../hadoop/hbase/io/hfile/CachedBlock.java | 7 +- .../hadoop/hbase/io/hfile/ChecksumUtil.java | 100 +- .../hbase/io/hfile/CombinedBlockCache.java | 119 +- .../hbase/io/hfile/CompoundBloomFilter.java | 50 +- .../io/hfile/CompoundBloomFilterBase.java | 15 +- .../io/hfile/CompoundBloomFilterWriter.java | 97 +- .../hbase/io/hfile/CorruptHFileException.java | 4 +- .../io/hfile/ExclusiveMemHFileBlock.java | 10 +- .../hbase/io/hfile/FirstLevelBlockCache.java | 4 +- .../hbase/io/hfile/FixedFileTrailer.java | 189 +- .../apache/hadoop/hbase/io/hfile/HFile.java | 339 +- .../hadoop/hbase/io/hfile/HFileBlock.java | 928 +++--- .../hbase/io/hfile/HFileBlockBuilder.java | 10 +- .../hbase/io/hfile/HFileBlockIndex.java | 797 ++--- .../hbase/io/hfile/HFileDataBlockEncoder.java | 81 +- .../io/hfile/HFileDataBlockEncoderImpl.java | 45 +- .../hadoop/hbase/io/hfile/HFileInfo.java | 121 +- .../hbase/io/hfile/HFilePreadReader.java | 11 +- .../hbase/io/hfile/HFilePrettyPrinter.java | 194 +- .../hbase/io/hfile/HFileReaderImpl.java | 447 ++- .../hadoop/hbase/io/hfile/HFileScanner.java | 119 +- .../hbase/io/hfile/HFileStreamReader.java | 8 +- .../hadoop/hbase/io/hfile/HFileUtil.java | 15 +- .../hbase/io/hfile/HFileWriterImpl.java | 239 +- .../io/hfile/InclusiveCombinedBlockCache.java | 37 +- .../io/hfile/IndexOnlyLruBlockCache.java | 8 +- .../hbase/io/hfile/InlineBlockWriter.java | 36 +- .../hbase/io/hfile/InvalidHFileException.java | 3 +- .../hbase/io/hfile/LruAdaptiveBlockCache.java | 457 ++- .../hadoop/hbase/io/hfile/LruBlockCache.java | 347 +-- .../hadoop/hbase/io/hfile/LruCachedBlock.java | 37 +- .../hbase/io/hfile/LruCachedBlockQueue.java | 45 +- .../hbase/io/hfile/NoOpDataBlockEncoder.java | 51 +- .../hbase/io/hfile/PrefetchExecutor.java | 46 +- .../hadoop/hbase/io/hfile/ReaderContext.java | 6 +- .../hbase/io/hfile/ReaderContextBuilder.java | 15 +- .../hbase/io/hfile/ResizableBlockCache.java | 3 +- .../hbase/io/hfile/SharedMemHFileBlock.java | 10 +- .../hbase/io/hfile/TinyLfuBlockCache.java | 134 +- .../io/hfile/bucket/BucketAllocator.java | 135 +- .../bucket/BucketAllocatorException.java | 28 +- .../hbase/io/hfile/bucket/BucketCache.java | 384 ++- .../io/hfile/bucket/BucketCacheStats.java | 37 +- .../hbase/io/hfile/bucket/BucketEntry.java | 14 +- .../io/hfile/bucket/BucketProtoUtils.java | 64 +- .../io/hfile/bucket/ByteBufferIOEngine.java | 47 +- .../io/hfile/bucket/CacheFullException.java | 31 +- .../io/hfile/bucket/CachedEntryQueue.java | 65 +- .../bucket/ExclusiveMemoryMmapIOEngine.java | 26 +- .../hbase/io/hfile/bucket/FileIOEngine.java | 87 +- .../io/hfile/bucket/FileMmapIOEngine.java | 17 +- .../hbase/io/hfile/bucket/IOEngine.java | 45 +- .../io/hfile/bucket/PersistentIOEngine.java | 20 +- .../bucket/SharedMemoryMmapIOEngine.java | 3 +- .../hadoop/hbase/io/util/MemorySizeUtil.java | 108 +- .../hbase/ipc/AdaptiveLifoCoDelCallQueue.java | 134 +- .../hbase/ipc/BalancedQueueRpcExecutor.java | 10 +- .../apache/hadoop/hbase/ipc/BufferChain.java | 21 +- .../hadoop/hbase/ipc/CallQueueInfo.java | 9 +- .../apache/hadoop/hbase/ipc/CallRunner.java | 36 +- .../hbase/ipc/EmptyServiceNameException.java | 5 +- .../ipc/FastPathBalancedQueueRpcExecutor.java | 24 +- .../hbase/ipc/FastPathRWQueueRpcExecutor.java | 17 +- .../hadoop/hbase/ipc/FastPathRpcHandler.java | 7 +- .../hadoop/hbase/ipc/FifoRpcScheduler.java | 14 +- .../hbase/ipc/HBaseRPCErrorHandler.java | 6 +- .../hbase/ipc/MasterFifoRpcScheduler.java | 7 +- .../hbase/ipc/MetaRWQueueRpcExecutor.java | 9 +- .../hadoop/hbase/ipc/MetricsHBaseServer.java | 33 +- .../ipc/MetricsHBaseServerWrapperImpl.java | 4 +- .../hbase/ipc/NettyRpcFrameDecoder.java | 34 +- .../hadoop/hbase/ipc/NettyRpcServer.java | 59 +- .../ipc/NettyRpcServerPreambleHandler.java | 9 +- .../ipc/NettyRpcServerRequestDecoder.java | 12 +- .../ipc/NettyRpcServerResponseEncoder.java | 8 +- .../hadoop/hbase/ipc/NettyServerCall.java | 13 +- .../hbase/ipc/NettyServerRpcConnection.java | 21 +- .../hbase/ipc/PluggableBlockingQueue.java | 26 +- .../hbase/ipc/PluggableRpcQueueNotFound.java | 6 +- .../hadoop/hbase/ipc/PriorityFunction.java | 27 +- .../apache/hadoop/hbase/ipc/QosPriority.java | 2 +- .../hadoop/hbase/ipc/QueueBalancer.java | 3 +- .../hadoop/hbase/ipc/RPCTInfoGetter.java | 14 +- .../hadoop/hbase/ipc/RWQueueRpcExecutor.java | 57 +- .../hadoop/hbase/ipc/RandomQueueBalancer.java | 7 +- .../org/apache/hadoop/hbase/ipc/RpcCall.java | 33 +- .../hadoop/hbase/ipc/RpcCallContext.java | 42 +- .../apache/hadoop/hbase/ipc/RpcCallback.java | 4 +- .../apache/hadoop/hbase/ipc/RpcExecutor.java | 114 +- .../apache/hadoop/hbase/ipc/RpcHandler.java | 14 +- .../apache/hadoop/hbase/ipc/RpcResponse.java | 2 +- .../apache/hadoop/hbase/ipc/RpcScheduler.java | 22 +- .../hadoop/hbase/ipc/RpcSchedulerContext.java | 5 +- .../apache/hadoop/hbase/ipc/RpcServer.java | 253 +- .../hadoop/hbase/ipc/RpcServerFactory.java | 29 +- .../hadoop/hbase/ipc/RpcServerInterface.java | 28 +- .../apache/hadoop/hbase/ipc/ServerCall.java | 100 +- .../hadoop/hbase/ipc/ServerRpcConnection.java | 216 +- .../hadoop/hbase/ipc/SimpleRpcScheduler.java | 115 +- .../hadoop/hbase/ipc/SimpleRpcServer.java | 210 +- .../hbase/ipc/SimpleRpcServerResponder.java | 26 +- .../hadoop/hbase/ipc/SimpleServerCall.java | 16 +- .../hbase/ipc/SimpleServerRpcConnection.java | 36 +- .../hbase/ipc/UnknownServiceException.java | 2 +- .../mapreduce/LoadIncrementalHFiles.java | 8 +- .../hbase/master/ActiveMasterManager.java | 125 +- .../master/AssignmentVerificationReport.java | 227 +- .../hadoop/hbase/master/CachedClusterId.java | 24 +- .../hadoop/hbase/master/ClusterSchema.java | 45 +- .../hbase/master/ClusterSchemaService.java | 6 +- .../master/ClusterSchemaServiceImpl.java | 31 +- .../hbase/master/ClusterStatusPublisher.java | 74 +- .../hadoop/hbase/master/DeadServer.java | 66 +- .../hbase/master/DrainingServerTracker.java | 57 +- .../master/ExpiredMobFileCleanerChore.java | 32 +- .../apache/hadoop/hbase/master/HMaster.java | 1289 ++++---- .../hbase/master/HMasterCommandLine.java | 83 +- .../apache/hadoop/hbase/master/HbckChore.java | 101 +- .../hadoop/hbase/master/LoadBalancer.java | 64 +- ...sterAnnotationReadingPriorityFunction.java | 29 +- .../hbase/master/MasterCoprocessorHost.java | 384 +-- .../hadoop/hbase/master/MasterFileSystem.java | 123 +- .../master/MasterInitializationMonitor.java | 6 +- .../master/MasterMobCompactionThread.java | 42 +- .../hbase/master/MasterRegionServerList.java | 2 +- .../hbase/master/MasterRpcServices.java | 1021 +++--- .../hadoop/hbase/master/MasterServices.java | 250 +- .../hadoop/hbase/master/MasterWalManager.java | 73 +- .../master/MetricsAssignmentManager.java | 21 +- .../hadoop/hbase/master/MetricsMaster.java | 42 +- .../hbase/master/MetricsMasterFileSystem.java | 3 +- .../master/MetricsMasterWrapperImpl.java | 35 +- .../hadoop/hbase/master/MetricsSnapshot.java | 3 +- .../master/MirroringTableStateManager.java | 3 +- .../hbase/master/MobCompactionChore.java | 23 +- .../master/NoSuchProcedureException.java | 2 +- .../hadoop/hbase/master/RackManager.java | 20 +- .../master/RegionPlacementMaintainer.java | 408 ++- .../hadoop/hbase/master/RegionPlan.java | 34 +- .../hadoop/hbase/master/RegionServerList.java | 2 +- .../hbase/master/RegionServerTracker.java | 34 +- .../hbase/master/RegionsRecoveryChore.java | 44 +- .../master/RegionsRecoveryConfigManager.java | 15 +- .../hadoop/hbase/master/ServerListener.java | 16 +- .../hadoop/hbase/master/ServerManager.java | 363 +-- .../SnapshotOfRegionAssignmentFromMeta.java | 23 +- .../hadoop/hbase/master/SnapshotSentinel.java | 12 +- .../hadoop/hbase/master/SplitLogManager.java | 166 +- .../hbase/master/SplitOrMergeTracker.java | 29 +- .../hadoop/hbase/master/SplitWALManager.java | 69 +- .../hbase/master/TableNamespaceManager.java | 108 +- .../hbase/master/TableStateManager.java | 28 +- .../master/assignment/AssignProcedure.java | 18 +- .../master/assignment/AssignmentManager.java | 606 ++-- .../assignment/AssignmentManagerUtil.java | 77 +- .../assignment/CloseRegionProcedure.java | 14 +- .../assignment/GCMergedRegionsProcedure.java | 75 +- .../GCMultipleMergedRegionsProcedure.java | 73 +- .../master/assignment/GCRegionProcedure.java | 24 +- .../MergeTableRegionsProcedure.java | 193 +- .../assignment/MoveRegionProcedure.java | 20 +- .../assignment/OpenRegionProcedure.java | 25 +- .../assignment/RegionRemoteProcedureBase.java | 33 +- .../master/assignment/RegionStateNode.java | 10 +- .../master/assignment/RegionStateStore.java | 56 +- .../hbase/master/assignment/RegionStates.java | 121 +- .../assignment/RegionTransitionProcedure.java | 25 +- .../hbase/master/assignment/ServerState.java | 13 +- .../master/assignment/ServerStateNode.java | 8 +- .../assignment/SplitTableRegionProcedure.java | 215 +- .../TransitRegionStateProcedure.java | 65 +- .../master/assignment/UnassignProcedure.java | 17 +- .../master/balancer/AssignRegionAction.java | 2 +- .../hbase/master/balancer/BalanceAction.java | 7 +- .../hbase/master/balancer/BalancerChore.java | 10 +- .../master/balancer/BalancerClusterState.java | 75 +- .../master/balancer/BalancerRegionLoad.java | 7 +- .../master/balancer/BaseLoadBalancer.java | 142 +- .../master/balancer/CandidateGenerator.java | 30 +- .../master/balancer/ClusterLoadState.java | 2 +- .../master/balancer/ClusterStatusChore.java | 12 +- .../balancer/CostFromRegionLoadFunction.java | 2 +- .../hbase/master/balancer/CostFunction.java | 10 +- .../master/balancer/DoubleArrayCost.java | 14 +- .../balancer/FavoredStochasticBalancer.java | 156 +- .../HeterogeneousRegionCountCostFunction.java | 58 +- .../master/balancer/LoadBalancerFactory.java | 9 +- .../balancer/LoadCandidateGenerator.java | 19 +- .../LocalityBasedCandidateGenerator.java | 21 +- .../balancer/MaintenanceLoadBalancer.java | 4 +- .../balancer/MemStoreSizeCostFunction.java | 2 +- .../master/balancer/MetricsBalancer.java | 7 +- .../balancer/MetricsStochasticBalancer.java | 11 +- .../master/balancer/MoveRegionAction.java | 3 +- .../PrimaryRegionCountSkewCostFunction.java | 2 +- .../balancer/RackLocalityCostFunction.java | 2 +- .../balancer/RandomCandidateGenerator.java | 2 +- .../balancer/ReadRequestCostFunction.java | 2 +- .../master/balancer/RegionInfoComparator.java | 11 +- .../master/balancer/RegionLocationFinder.java | 87 +- .../RegionReplicaCandidateGenerator.java | 20 +- .../RegionReplicaGroupingCostFunction.java | 10 +- .../RegionReplicaHostCostFunction.java | 3 +- .../hbase/master/balancer/ServerAndLoad.java | 5 +- .../balancer/ServerLocalityCostFunction.java | 2 +- .../master/balancer/SimpleLoadBalancer.java | 261 +- .../balancer/StochasticLoadBalancer.java | 291 +- .../balancer/StoreFileCostFunction.java | 2 +- .../master/balancer/SwapRegionsAction.java | 2 +- .../balancer/TableSkewCostFunction.java | 1 + .../balancer/WriteRequestCostFunction.java | 2 +- .../cleaner/BaseFileCleanerDelegate.java | 8 +- .../cleaner/BaseHFileCleanerDelegate.java | 11 +- .../cleaner/BaseLogCleanerDelegate.java | 24 +- .../cleaner/BaseTimeToLiveFileCleaner.java | 4 +- .../hbase/master/cleaner/CleanerChore.java | 70 +- .../hbase/master/cleaner/DirScanPool.java | 2 +- .../master/cleaner/FileCleanerDelegate.java | 13 +- .../hbase/master/cleaner/HFileCleaner.java | 98 +- .../master/cleaner/HFileLinkCleaner.java | 30 +- .../hbase/master/cleaner/LogCleaner.java | 41 +- .../cleaner/ReplicationBarrierCleaner.java | 17 +- .../master/cleaner/SnapshotCleanerChore.java | 35 +- .../cleaner/TimeToLiveHFileCleaner.java | 6 +- .../master/cleaner/TimeToLiveLogCleaner.java | 2 +- ...imeToLiveMasterLocalStoreHFileCleaner.java | 2 +- .../TimeToLiveMasterLocalStoreWALCleaner.java | 2 +- .../TimeToLiveProcedureWALCleaner.java | 2 +- .../hbase/master/http/MasterDumpServlet.java | 13 +- .../master/http/MasterRedirectServlet.java | 25 +- .../master/http/MasterStatusServlet.java | 36 +- .../hadoop/hbase/master/http/MetaBrowser.java | 85 +- .../hbase/master/http/RegionReplicaInfo.java | 47 +- .../hbase/master/http/RegionVisualizer.java | 60 +- .../http/api_v1/ResourceConfigFactory.java | 28 +- .../cluster_metrics/model/ClusterMetrics.java | 15 +- .../resource/ClusterMetricsResource.java | 18 +- .../hbase/master/http/gson/GsonFactory.java | 10 +- .../http/gson/GsonSerializationFeature.java | 9 +- .../http/gson/SizeAsBytesSerializer.java | 1 + .../master/http/jersey/MasterFeature.java | 9 +- .../hbase/master/janitor/CatalogJanitor.java | 46 +- .../hbase/master/janitor/MetaFixer.java | 132 +- .../hadoop/hbase/master/janitor/Report.java | 6 +- .../master/janitor/ReportMakingVisitor.java | 6 +- .../hbase/master/locking/LockManager.java | 107 +- .../hbase/master/locking/LockProcedure.java | 76 +- .../master/migrate/RollingUpgradeChore.java | 19 +- .../normalizer/MergeNormalizationPlan.java | 21 +- .../master/normalizer/NormalizationPlan.java | 6 +- .../normalizer/NormalizationTarget.java | 18 +- .../master/normalizer/RegionNormalizer.java | 19 +- .../normalizer/RegionNormalizerFactory.java | 6 +- .../normalizer/RegionNormalizerManager.java | 24 +- .../normalizer/RegionNormalizerWorkQueue.java | 42 +- .../normalizer/RegionNormalizerWorker.java | 44 +- .../normalizer/SimpleRegionNormalizer.java | 198 +- .../normalizer/SplitNormalizationPlan.java | 15 +- ...bstractStateMachineNamespaceProcedure.java | 12 +- .../AbstractStateMachineRegionProcedure.java | 18 +- .../AbstractStateMachineTableProcedure.java | 31 +- .../procedure/CloneSnapshotProcedure.java | 189 +- .../procedure/CreateNamespaceProcedure.java | 135 +- .../procedure/CreateTableProcedure.java | 116 +- .../procedure/DeleteNamespaceProcedure.java | 152 +- .../procedure/DeleteTableProcedure.java | 54 +- .../procedure/DisableTableProcedure.java | 65 +- .../procedure/EnableTableProcedure.java | 95 +- .../hbase/master/procedure/FairQueue.java | 2 +- .../procedure/HBCKServerCrashProcedure.java | 84 +- .../master/procedure/InitMetaProcedure.java | 9 +- .../procedure/MasterDDLOperationHelper.java | 16 +- .../procedure/MasterProcedureConstants.java | 16 +- .../master/procedure/MasterProcedureEnv.java | 5 +- .../procedure/MasterProcedureScheduler.java | 201 +- .../master/procedure/MasterProcedureUtil.java | 42 +- .../procedure/MetaProcedureInterface.java | 2 +- .../hbase/master/procedure/MetaQueue.java | 2 +- .../procedure/ModifyNamespaceProcedure.java | 89 +- .../ModifyTableDescriptorProcedure.java | 4 +- .../procedure/ModifyTableProcedure.java | 139 +- .../procedure/PeerProcedureInterface.java | 9 +- .../hbase/master/procedure/PeerQueue.java | 4 +- .../master/procedure/ProcedureDescriber.java | 30 +- .../procedure/ProcedurePrepareLatch.java | 26 +- .../master/procedure/ProcedureSyncWait.java | 61 +- .../hadoop/hbase/master/procedure/Queue.java | 7 +- .../procedure/RSProcedureDispatcher.java | 85 +- .../procedure/RecoverMetaProcedure.java | 14 +- .../ReopenTableRegionsProcedure.java | 28 +- .../procedure/RestoreSnapshotProcedure.java | 166 +- .../hbase/master/procedure/SchemaLocking.java | 18 +- .../procedure/ServerCrashException.java | 8 +- .../procedure/ServerCrashProcedure.java | 107 +- .../procedure/ServerProcedureInterface.java | 12 +- .../hbase/master/procedure/ServerQueue.java | 2 +- .../procedure/ServerRemoteProcedure.java | 92 +- .../master/procedure/SplitWALProcedure.java | 42 +- .../procedure/SplitWALRemoteProcedure.java | 33 +- .../procedure/SwitchRpcThrottleProcedure.java | 20 +- .../SwitchRpcThrottleRemoteProcedure.java | 19 +- .../procedure/TableProcedureInterface.java | 28 +- .../hbase/master/procedure/TableQueue.java | 4 +- .../procedure/TruncateTableProcedure.java | 53 +- .../hbase/master/region/MasterRegion.java | 27 +- .../master/region/MasterRegionFactory.java | 9 +- .../MasterRegionFlusherAndCompactor.java | 2 +- .../master/region/MasterRegionParams.java | 2 +- .../master/region/MasterRegionUtils.java | 2 +- .../master/region/MasterRegionWALRoller.java | 6 +- .../region/RegionScannerAsResultScanner.java | 4 +- .../master/region/UpdateMasterRegion.java | 4 +- .../replication/AbstractPeerProcedure.java | 4 +- .../master/replication/AddPeerProcedure.java | 11 +- .../ClaimReplicationQueueRemoteProcedure.java | 2 +- .../ClaimReplicationQueuesProcedure.java | 2 +- .../replication/DisablePeerProcedure.java | 2 +- .../replication/EnablePeerProcedure.java | 2 +- .../replication/ModifyPeerProcedure.java | 41 +- .../replication/RefreshPeerProcedure.java | 11 +- .../replication/RemovePeerProcedure.java | 4 +- .../replication/ReplicationPeerManager.java | 79 +- .../UpdatePeerConfigProcedure.java | 8 +- .../master/slowlog/SlowLogMasterService.java | 21 +- .../DisabledTableSnapshotHandler.java | 22 +- .../snapshot/EnabledTableSnapshotHandler.java | 32 +- .../snapshot/MasterSnapshotVerifier.java | 72 +- .../master/snapshot/SnapshotFileCache.java | 59 +- .../master/snapshot/SnapshotHFileCleaner.java | 22 +- .../master/snapshot/SnapshotManager.java | 403 ++- .../master/snapshot/TakeSnapshotHandler.java | 88 +- .../hbase/master/zksyncer/ClientZKSyncer.java | 5 +- .../master/zksyncer/MasterAddressSyncer.java | 3 +- .../master/zksyncer/MetaLocationSyncer.java | 3 +- .../hadoop/hbase/mob/CachedMobFile.java | 26 +- .../hbase/mob/DefaultMobStoreCompactor.java | 115 +- .../hbase/mob/DefaultMobStoreFlusher.java | 103 +- .../hbase/mob/ExpiredMobFileCleaner.java | 39 +- .../hbase/mob/ManualMobMaintHFileCleaner.java | 16 +- .../org/apache/hadoop/hbase/mob/MobCell.java | 5 +- .../apache/hadoop/hbase/mob/MobConstants.java | 41 +- .../org/apache/hadoop/hbase/mob/MobFile.java | 42 +- .../apache/hadoop/hbase/mob/MobFileCache.java | 47 +- .../apache/hadoop/hbase/mob/MobFileName.java | 59 +- .../hadoop/hbase/mob/MobStoreEngine.java | 3 +- .../org/apache/hadoop/hbase/mob/MobUtils.java | 512 ++- .../mob/compactions/MobCompactionRequest.java | 3 +- .../hbase/mob/compactions/MobCompactor.java | 19 +- .../PartitionedMobCompactionRequest.java | 48 +- .../compactions/PartitionedMobCompactor.java | 226 +- .../MemoryBoundedLogMessageBuffer.java | 49 +- .../hbase/monitoring/MonitoredRPCHandler.java | 18 +- .../monitoring/MonitoredRPCHandlerImpl.java | 90 +- .../hbase/monitoring/MonitoredTask.java | 26 +- .../hbase/monitoring/MonitoredTaskImpl.java | 19 +- .../hbase/monitoring/StateDumpServlet.java | 16 +- .../hadoop/hbase/monitoring/TaskMonitor.java | 130 +- .../hbase/monitoring/ThreadMonitoring.java | 31 +- .../namequeues/BalancerDecisionDetails.java | 6 +- .../namequeues/BalancerRejectionDetails.java | 6 +- .../namequeues/DisruptorExceptionHandler.java | 2 - .../hbase/namequeues/LogEventHandler.java | 35 +- .../hbase/namequeues/LogHandlerUtils.java | 17 +- .../hbase/namequeues/NamedQueuePayload.java | 9 +- .../hbase/namequeues/NamedQueueRecorder.java | 34 +- .../hbase/namequeues/NamedQueueService.java | 19 +- .../hbase/namequeues/RingBufferEnvelope.java | 16 +- .../hbase/namequeues/RpcLogDetails.java | 20 +- .../namequeues/SlowLogPersistentService.java | 20 +- .../namequeues/SlowLogTableOpsChore.java | 12 +- .../impl/BalancerDecisionQueueService.java | 28 +- .../impl/BalancerRejectionQueueService.java | 43 +- .../namequeues/impl/SlowLogQueueService.java | 64 +- .../request/NamedQueueGetRequest.java | 33 +- .../response/NamedQueueGetResponse.java | 13 +- .../hbase/namespace/NamespaceAuditor.java | 46 +- .../namespace/NamespaceStateManager.java | 87 +- .../NamespaceTableAndRegionInfo.java | 23 +- .../procedure/MasterProcedureManager.java | 84 +- .../procedure/MasterProcedureManagerHost.java | 13 +- .../hadoop/hbase/procedure/Procedure.java | 171 +- .../hbase/procedure/ProcedureCoordinator.java | 124 +- .../procedure/ProcedureCoordinatorRpcs.java | 38 +- .../hbase/procedure/ProcedureManager.java | 10 +- .../hbase/procedure/ProcedureManagerHost.java | 33 +- .../hbase/procedure/ProcedureMember.java | 98 +- .../hbase/procedure/ProcedureMemberRpcs.java | 24 +- .../RegionServerProcedureManager.java | 16 +- .../RegionServerProcedureManagerHost.java | 19 +- .../hadoop/hbase/procedure/Subprocedure.java | 144 +- .../hbase/procedure/SubprocedureFactory.java | 8 +- .../procedure/ZKProcedureCoordinator.java | 62 +- .../procedure/ZKProcedureMemberRpcs.java | 104 +- .../hbase/procedure/ZKProcedureUtil.java | 85 +- .../flush/FlushTableSubprocedure.java | 31 +- .../MasterFlushTableProcedureManager.java | 30 +- ...egionServerFlushTableProcedureManager.java | 112 +- .../procedure2/BaseRSProcedureCallable.java | 2 +- .../hbase/procedure2/RSProcedureCallable.java | 5 +- .../region/HFileProcedurePrettyPrinter.java | 10 +- .../store/region/RegionProcedureStore.java | 30 +- .../region/WALProcedurePrettyPrinter.java | 12 +- .../protobuf/ReplicationProtbufUtil.java | 79 +- .../hbase/quotas/ActivePolicyEnforcement.java | 67 +- .../quotas/AverageIntervalRateLimiter.java | 35 +- .../hbase/quotas/DefaultOperationQuota.java | 22 +- .../hbase/quotas/ExceedOperationQuota.java | 8 +- .../hbase/quotas/FileArchiverNotifier.java | 29 +- .../quotas/FileArchiverNotifierFactory.java | 17 +- .../FileArchiverNotifierFactoryImpl.java | 29 +- .../quotas/FileArchiverNotifierImpl.java | 144 +- .../quotas/FileSystemUtilizationChore.java | 51 +- .../quotas/FixedIntervalRateLimiter.java | 27 +- .../hbase/quotas/GlobalQuotaSettings.java | 22 +- .../hbase/quotas/GlobalQuotaSettingsImpl.java | 68 +- .../hbase/quotas/MasterQuotaManager.java | 181 +- .../hbase/quotas/MasterQuotasObserver.java | 30 +- .../quotas/NamespaceQuotaSnapshotStore.java | 33 +- .../hbase/quotas/NoOpRegionSizeStore.java | 30 +- .../hbase/quotas/NoopOperationQuota.java | 11 +- .../hadoop/hbase/quotas/NoopQuotaLimiter.java | 9 +- .../hadoop/hbase/quotas/OperationQuota.java | 43 +- .../hadoop/hbase/quotas/QuotaCache.java | 77 +- .../hadoop/hbase/quotas/QuotaLimiter.java | 52 +- .../hbase/quotas/QuotaLimiterFactory.java | 6 +- .../hbase/quotas/QuotaObserverChore.java | 261 +- .../hbase/quotas/QuotaSnapshotStore.java | 39 +- .../hadoop/hbase/quotas/QuotaState.java | 21 +- .../apache/hadoop/hbase/quotas/QuotaUtil.java | 163 +- .../hadoop/hbase/quotas/RateLimiter.java | 89 +- .../quotas/RegionServerRpcQuotaManager.java | 88 +- .../quotas/RegionServerSpaceQuotaManager.java | 97 +- .../hadoop/hbase/quotas/RegionSize.java | 20 +- .../hadoop/hbase/quotas/RegionSizeImpl.java | 26 +- .../quotas/RegionSizeReportingChore.java | 52 +- .../hadoop/hbase/quotas/RegionSizeStore.java | 30 +- .../hbase/quotas/RegionSizeStoreFactory.java | 18 +- .../hbase/quotas/RegionSizeStoreImpl.java | 31 +- .../hbase/quotas/RpcThrottleStorage.java | 1 - .../quotas/SnapshotQuotaObserverChore.java | 110 +- .../hbase/quotas/SpaceLimitingException.java | 17 +- .../quotas/SpaceQuotaRefresherChore.java | 80 +- .../quotas/SpaceQuotaSnapshotNotifier.java | 26 +- .../SpaceQuotaSnapshotNotifierFactory.java | 31 +- .../SpaceViolationPolicyEnforcement.java | 32 +- ...paceViolationPolicyEnforcementFactory.java | 43 +- .../hbase/quotas/TableQuotaSnapshotStore.java | 34 +- .../TableSpaceQuotaSnapshotNotifier.java | 18 +- .../hadoop/hbase/quotas/TimeBasedLimiter.java | 29 +- .../hadoop/hbase/quotas/UserQuotaState.java | 51 +- .../AbstractViolationPolicyEnforcement.java | 29 +- .../DefaultViolationPolicyEnforcement.java | 33 +- ...isableTableViolationPolicyEnforcement.java | 22 +- ...ingSnapshotViolationPolicyEnforcement.java | 29 +- .../NoInsertsViolationPolicyEnforcement.java | 27 +- ...CompactionsViolationPolicyEnforcement.java | 26 +- .../NoWritesViolationPolicyEnforcement.java | 29 +- .../hbase/regionserver/AbstractMemStore.java | 116 +- .../regionserver/AbstractMultiFileWriter.java | 14 +- .../AdaptiveMemStoreCompactionStrategy.java | 53 +- .../AnnotationReadingPriorityFunction.java | 120 +- .../hbase/regionserver/BaseRowProcessor.java | 12 +- .../BasicMemStoreCompactionStrategy.java | 10 +- .../regionserver/BrokenStoreFileCleaner.java | 67 +- .../regionserver/BusyRegionSplitPolicy.java | 46 +- .../regionserver/ByteBufferChunkKeyValue.java | 7 +- .../regionserver/CSLMImmutableSegment.java | 18 +- .../CellArrayImmutableSegment.java | 73 +- .../hbase/regionserver/CellArrayMap.java | 26 +- .../CellChunkImmutableSegment.java | 117 +- .../hbase/regionserver/CellChunkMap.java | 94 +- .../hbase/regionserver/CellFlatMap.java | 108 +- .../hadoop/hbase/regionserver/CellSet.java | 26 +- .../hadoop/hbase/regionserver/CellSink.java | 11 +- .../regionserver/ChangedReadersObserver.java | 9 +- .../hadoop/hbase/regionserver/Chunk.java | 23 +- .../hbase/regionserver/ChunkCreator.java | 146 +- .../hbase/regionserver/CompactSplit.java | 207 +- .../CompactedHFilesDischargeHandler.java | 6 +- .../CompactedHFilesDischarger.java | 40 +- .../regionserver/CompactingMemStore.java | 196 +- .../regionserver/CompactionPipeline.java | 132 +- .../CompositeImmutableSegment.java | 32 +- .../ConstantSizeRegionSplitPolicy.java | 30 +- .../CreateStoreFileWriterParams.java | 2 +- .../CustomizedScanInfoBuilder.java | 8 +- .../DateTieredMultiFileWriter.java | 9 +- .../regionserver/DateTieredStoreEngine.java | 27 +- .../regionserver/DefaultHeapMemoryTuner.java | 266 +- .../hbase/regionserver/DefaultMemStore.java | 92 +- .../regionserver/DefaultStoreEngine.java | 69 +- .../regionserver/DefaultStoreFileManager.java | 33 +- .../regionserver/DefaultStoreFlusher.java | 11 +- .../DelimitedKeyPrefixRegionSplitPolicy.java | 46 +- ...imitedKeyPrefixRegionSplitRestriction.java | 19 +- .../DisabledRegionSplitPolicy.java | 5 +- .../regionserver/DumpRegionServerMetrics.java | 21 +- .../EagerMemStoreCompactionStrategy.java | 5 +- .../regionserver/FavoredNodesForRegion.java | 22 +- .../regionserver/FifoRpcSchedulerFactory.java | 6 +- .../FlushAllLargeStoresPolicy.java | 3 +- .../regionserver/FlushAllStoresPolicy.java | 1 - .../regionserver/FlushLargeStoresPolicy.java | 62 +- .../regionserver/FlushLifeCycleTracker.java | 2 +- .../FlushNonSloppyStoresFirstPolicy.java | 8 +- .../hbase/regionserver/FlushPolicy.java | 3 +- .../regionserver/FlushPolicyFactory.java | 18 +- .../regionserver/FlushRequestListener.java | 7 +- .../hbase/regionserver/FlushRequester.java | 29 +- .../hadoop/hbase/regionserver/FlushType.java | 19 +- .../hadoop/hbase/regionserver/HMobStore.java | 208 +- .../hadoop/hbase/regionserver/HRegion.java | 2761 ++++++++--------- .../hbase/regionserver/HRegionFileSystem.java | 398 ++- .../hbase/regionserver/HRegionServer.java | 1065 +++---- .../HRegionServerCommandLine.java | 19 +- .../regionserver/HRegionWALFileSystem.java | 2 +- .../hadoop/hbase/regionserver/HStore.java | 706 ++--- .../hadoop/hbase/regionserver/HStoreFile.java | 132 +- .../hbase/regionserver/HeapMemoryManager.java | 168 +- .../hbase/regionserver/HeapMemoryTuner.java | 13 +- .../regionserver/ImmutableMemStoreLAB.java | 36 +- .../hbase/regionserver/ImmutableSegment.java | 35 +- ...creasingToUpperBoundRegionSplitPolicy.java | 44 +- .../InputStreamBlockDistribution.java | 47 +- .../hbase/regionserver/InternalScan.java | 32 +- .../hbase/regionserver/InternalScanner.java | 33 +- .../KeyPrefixRegionSplitPolicy.java | 39 +- .../KeyPrefixRegionSplitRestriction.java | 18 +- .../hbase/regionserver/KeyValueHeap.java | 156 +- .../hbase/regionserver/KeyValueScanner.java | 105 +- .../hbase/regionserver/LastSequenceId.java | 4 +- .../hbase/regionserver/LeaseListener.java | 14 +- .../hbase/regionserver/LeaseManager.java | 78 +- .../hadoop/hbase/regionserver/LogRoller.java | 12 +- .../MasterFifoRpcSchedulerFactory.java | 4 +- .../hadoop/hbase/regionserver/MemStore.java | 64 +- .../MemStoreCompactionStrategy.java | 54 +- .../hbase/regionserver/MemStoreCompactor.java | 137 +- .../MemStoreCompactorSegmentsIterator.java | 22 +- .../hbase/regionserver/MemStoreFlusher.java | 300 +- .../hbase/regionserver/MemStoreLAB.java | 43 +- .../hbase/regionserver/MemStoreLABImpl.java | 62 +- .../MemStoreMergerSegmentsIterator.java | 23 +- .../MemStoreSegmentsIterator.java | 16 +- .../hbase/regionserver/MemStoreSize.java | 27 +- .../hbase/regionserver/MemStoreSizing.java | 44 +- .../hbase/regionserver/MemStoreSnapshot.java | 3 +- .../MetricsHeapMemoryManager.java | 28 +- .../hbase/regionserver/MetricsRegion.java | 37 +- .../regionserver/MetricsRegionServer.java | 53 +- .../MetricsRegionServerWrapperImpl.java | 99 +- .../MetricsRegionWrapperImpl.java | 27 +- .../hbase/regionserver/MetricsTable.java | 46 +- .../MetricsTableWrapperAggregateImpl.java | 39 +- .../regionserver/MetricsUserAggregate.java | 3 +- .../MetricsUserAggregateFactory.java | 35 +- .../MetricsUserAggregateImpl.java | 17 +- .../MiniBatchOperationInProgress.java | 48 +- .../regionserver/MobReferenceOnlyFilter.java | 5 +- .../hbase/regionserver/MobStoreScanner.java | 12 +- .../MultiVersionConcurrencyControl.java | 70 +- .../regionserver/MutableOnlineRegions.java | 12 +- .../hbase/regionserver/MutableSegment.java | 27 +- .../regionserver/NoLimitScannerContext.java | 2 +- .../NoTagByteBufferChunkKeyValue.java | 6 +- .../regionserver/NonLazyKeyValueScanner.java | 19 +- .../NonReversedNonLazyKeyValueScanner.java | 50 +- .../NonThreadSafeMemStoreSizing.java | 10 +- .../hbase/regionserver/OffheapChunk.java | 3 +- .../hbase/regionserver/OnheapChunk.java | 3 +- .../hbase/regionserver/OnlineRegions.java | 36 +- .../hbase/regionserver/OperationStatus.java | 22 +- .../hbase/regionserver/RSRpcServices.java | 973 +++--- .../hadoop/hbase/regionserver/Region.java | 370 +-- .../regionserver/RegionCoprocessorHost.java | 838 +++-- .../hbase/regionserver/RegionScanner.java | 29 +- .../hbase/regionserver/RegionScannerImpl.java | 44 +- .../regionserver/RegionServerAccounting.java | 22 +- .../RegionServerCoprocessorHost.java | 97 +- .../regionserver/RegionServerServices.java | 35 +- .../RegionServerTableMetrics.java | 19 +- .../regionserver/RegionServicesForStores.java | 22 +- .../hbase/regionserver/RegionSplitPolicy.java | 93 +- .../regionserver/RegionSplitRestriction.java | 21 +- .../RemoteProcedureResultReporter.java | 8 +- .../regionserver/ReplicationSinkService.java | 26 +- .../ReplicationSourceService.java | 7 +- .../regionserver/ReversedKeyValueHeap.java | 92 +- .../regionserver/ReversedMobStoreScanner.java | 12 +- .../ReversedRegionScannerImpl.java | 37 +- .../regionserver/ReversedStoreScanner.java | 70 +- .../hbase/regionserver/RowProcessor.java | 88 +- .../regionserver/RowTooBigException.java | 13 +- .../regionserver/RpcSchedulerFactory.java | 8 +- .../hadoop/hbase/regionserver/ScanInfo.java | 90 +- .../hbase/regionserver/ScanOptions.java | 11 +- .../hadoop/hbase/regionserver/ScanType.java | 2 +- .../hbase/regionserver/ScannerContext.java | 71 +- .../regionserver/ScannerIdGenerator.java | 21 +- .../regionserver/SecureBulkLoadManager.java | 145 +- .../hadoop/hbase/regionserver/Segment.java | 70 +- .../hbase/regionserver/SegmentFactory.java | 96 +- .../hbase/regionserver/SegmentScanner.java | 110 +- .../hadoop/hbase/regionserver/SequenceId.java | 4 +- .../regionserver/ServerNonceManager.java | 54 +- .../hadoop/hbase/regionserver/Shipper.java | 3 +- .../hbase/regionserver/ShipperListener.java | 6 +- .../hbase/regionserver/ShutdownHook.java | 133 +- .../SimpleRpcSchedulerFactory.java | 29 +- .../regionserver/SnapshotSegmentScanner.java | 18 +- .../hbase/regionserver/SplitLogWorker.java | 51 +- .../hbase/regionserver/SplitRequest.java | 25 +- .../hbase/regionserver/SplitWALCallable.java | 21 +- .../regionserver/SteppingSplitPolicy.java | 11 +- .../hadoop/hbase/regionserver/Store.java | 25 +- .../regionserver/StoreConfigInformation.java | 20 +- .../hbase/regionserver/StoreContext.java | 4 +- .../hbase/regionserver/StoreEngine.java | 37 +- .../hadoop/hbase/regionserver/StoreFile.java | 3 +- .../regionserver/StoreFileComparators.java | 2 +- .../hbase/regionserver/StoreFileInfo.java | 191 +- .../hbase/regionserver/StoreFileManager.java | 56 +- .../hbase/regionserver/StoreFileReader.java | 215 +- .../hbase/regionserver/StoreFileScanner.java | 125 +- .../hbase/regionserver/StoreFileWriter.java | 144 +- .../hbase/regionserver/StoreFlushContext.java | 42 +- .../hbase/regionserver/StoreFlusher.java | 40 +- .../hbase/regionserver/StoreScanner.java | 291 +- .../hadoop/hbase/regionserver/StoreUtils.java | 30 +- .../regionserver/StorefileRefresherChore.java | 55 +- .../regionserver/StripeMultiFileWriter.java | 111 +- .../hbase/regionserver/StripeStoreConfig.java | 72 +- .../hbase/regionserver/StripeStoreEngine.java | 25 +- .../regionserver/StripeStoreFileManager.java | 249 +- .../regionserver/StripeStoreFlusher.java | 25 +- .../ThreadSafeMemStoreSizing.java | 7 +- .../hbase/regionserver/TimeRangeTracker.java | 63 +- .../regionserver/VersionedSegmentsList.java | 20 +- .../AbstractMultiOutputCompactor.java | 12 +- .../compactions/CloseChecker.java | 8 +- .../compactions/CompactionConfiguration.java | 124 +- .../compactions/CompactionContext.java | 26 +- .../CompactionLifeCycleTracker.java | 2 +- .../compactions/CompactionPolicy.java | 15 +- .../compactions/CompactionProgress.java | 32 +- .../compactions/CompactionRequest.java | 6 +- .../compactions/CompactionRequestImpl.java | 36 +- .../compactions/CompactionRequester.java | 12 +- .../compactions/CompactionWindow.java | 2 +- .../compactions/CompactionWindowFactory.java | 2 +- .../regionserver/compactions/Compactor.java | 161 +- .../compactions/CurrentHourProvider.java | 2 +- .../DateTieredCompactionPolicy.java | 102 +- .../DateTieredCompactionRequest.java | 10 +- .../compactions/DateTieredCompactor.java | 24 +- .../compactions/DefaultCompactor.java | 10 +- .../ExploringCompactionPolicy.java | 70 +- .../ExponentialCompactionWindowFactory.java | 16 +- .../compactions/FIFOCompactionPolicy.java | 42 +- .../compactions/OffPeakHours.java | 24 +- .../RatioBasedCompactionPolicy.java | 100 +- .../compactions/SortedCompactionPolicy.java | 92 +- .../compactions/StripeCompactionPolicy.java | 177 +- .../compactions/StripeCompactor.java | 50 +- .../handler/AssignRegionHandler.java | 28 +- .../handler/CloseMetaHandler.java | 9 +- .../handler/CloseRegionHandler.java | 49 +- .../regionserver/handler/HandlerUtil.java | 2 +- .../regionserver/handler/OpenMetaHandler.java | 8 +- .../handler/OpenPriorityRegionHandler.java | 7 +- .../handler/OpenRegionHandler.java | 126 +- .../handler/ParallelSeekHandler.java | 14 +- .../handler/RegionReplicaFlushHandler.java | 78 +- .../handler/UnassignRegionHandler.java | 38 +- .../handler/WALSplitterHandler.java | 71 +- .../regionserver/http/RSDumpServlet.java | 31 +- .../regionserver/http/RSStatusServlet.java | 24 +- .../querymatcher/ColumnCount.java | 7 +- .../querymatcher/ColumnTracker.java | 92 +- .../CompactionScanQueryMatcher.java | 35 +- .../querymatcher/DeleteTracker.java | 8 +- ...DropDeletesCompactionScanQueryMatcher.java | 14 +- .../querymatcher/ExplicitColumnTracker.java | 15 +- .../IncludeAllCompactionQueryMatcher.java | 9 +- .../MajorCompactionScanQueryMatcher.java | 9 +- .../MinorCompactionScanQueryMatcher.java | 7 +- .../NewVersionBehaviorTracker.java | 100 +- .../NormalUserScanQueryMatcher.java | 25 +- .../querymatcher/RawScanQueryMatcher.java | 17 +- .../querymatcher/ScanDeleteTracker.java | 21 +- .../querymatcher/ScanQueryMatcher.java | 43 +- .../ScanWildcardColumnTracker.java | 27 +- .../StripeCompactionScanQueryMatcher.java | 20 +- .../querymatcher/UserScanQueryMatcher.java | 40 +- .../snapshot/FlushSnapshotSubprocedure.java | 61 +- .../snapshot/RegionServerSnapshotManager.java | 176 +- .../DefaultStoreFileTracker.java | 3 +- .../FileBasedStoreFileTracker.java | 8 +- .../InitializeStoreFileTrackerProcedure.java | 5 +- .../MigrationStoreFileTracker.java | 2 +- ...ColumnFamilyStoreFileTrackerProcedure.java | 2 +- .../ModifyStoreFileTrackerProcedure.java | 12 +- .../ModifyTableStoreFileTrackerProcedure.java | 2 +- .../storefiletracker/StoreFileListFile.java | 6 +- .../storefiletracker/StoreFileTracker.java | 2 +- .../StoreFileTrackerBase.java | 30 +- .../StoreFileTrackerFactory.java | 20 +- .../StoreFileTrackerValidationUtils.java | 74 +- ...CompactionThroughputControllerFactory.java | 30 +- .../FlushThroughputControllerFactory.java | 32 +- .../throttle/NoLimitThroughputController.java | 4 +- ...reAwareCompactionThroughputController.java | 69 +- ...ressureAwareFlushThroughputController.java | 59 +- .../PressureAwareThroughputController.java | 35 +- .../throttle/StoreHotnessProtector.java | 74 +- .../throttle/ThroughputControlUtil.java | 11 +- .../throttle/ThroughputController.java | 8 +- .../hbase/regionserver/wal/AbstractFSWAL.java | 152 +- .../wal/AbstractProtobufLogWriter.java | 52 +- .../hbase/regionserver/wal/AsyncFSWAL.java | 92 +- .../wal/AsyncProtobufLogWriter.java | 48 +- .../regionserver/wal/CompressionContext.java | 53 +- .../hbase/regionserver/wal/Compressor.java | 63 +- .../regionserver/wal/DamagedWALException.java | 6 +- .../hadoop/hbase/regionserver/wal/FSHLog.java | 200 +- .../hbase/regionserver/wal/FSWALEntry.java | 24 +- .../hbase/regionserver/wal/MetricsWAL.java | 23 +- .../regionserver/wal/ProtobufLogReader.java | 154 +- .../regionserver/wal/ProtobufLogWriter.java | 17 +- .../hbase/regionserver/wal/ReaderBase.java | 33 +- .../regionserver/wal/RingBufferTruck.java | 7 +- .../wal/SecureAsyncProtobufLogWriter.java | 11 +- .../wal/SecureProtobufLogReader.java | 20 +- .../wal/SecureProtobufLogWriter.java | 7 +- .../regionserver/wal/SecureWALCellCodec.java | 12 +- .../wal/SequenceIdAccounting.java | 121 +- .../hbase/regionserver/wal/SyncFuture.java | 21 +- .../regionserver/wal/SyncFutureCache.java | 26 +- .../regionserver/wal/WALActionsListener.java | 61 +- .../hbase/regionserver/wal/WALCellCodec.java | 92 +- .../regionserver/wal/WALCoprocessorHost.java | 46 +- .../hbase/regionserver/wal/WALUtil.java | 56 +- .../replication/BaseReplicationEndpoint.java | 37 +- .../hbase/replication/BulkLoadCellFilter.java | 26 +- .../replication/ChainWALEntryFilter.java | 11 +- .../ClusterMarkingEntryFilter.java | 31 +- .../replication/HBaseReplicationEndpoint.java | 34 +- .../NamespaceTableCfWALEntryFilter.java | 4 +- .../replication/ReplicationEndpoint.java | 123 +- .../ReplicationSinkServiceImpl.java | 10 +- .../replication/ScopeWALEntryFilter.java | 3 +- .../SystemTableWALEntryFilter.java | 4 +- .../VerifyWALEntriesReplicationEndpoint.java | 2 +- .../hbase/replication/WALCellFilter.java | 14 +- .../hbase/replication/WALEntryFilter.java | 18 +- .../master/ReplicationHFileCleaner.java | 18 +- .../master/ReplicationLogCleaner.java | 10 +- .../master/ReplicationPeerConfigUpgrader.java | 21 +- .../CatalogReplicationSourcePeer.java | 24 +- .../ClaimReplicationQueueCallable.java | 2 +- .../DefaultSourceFSConfigurationProvider.java | 35 +- .../regionserver/DumpReplicationQueues.java | 57 +- .../HBaseInterClusterReplicationEndpoint.java | 144 +- .../regionserver/HFileReplicator.java | 91 +- .../replication/regionserver/MetricsSink.java | 31 +- .../regionserver/MetricsSource.java | 85 +- .../NoopReplicationQueueStorage.java | 42 +- .../regionserver/PeerProcedureHandler.java | 3 +- .../PeerProcedureHandlerImpl.java | 10 +- .../RecoveredReplicationSource.java | 19 +- .../RecoveredReplicationSourceShipper.java | 10 +- .../regionserver/RefreshPeerCallable.java | 2 +- .../RegionReplicaReplicationEndpoint.java | 178 +- .../replication/regionserver/Replication.java | 63 +- .../regionserver/ReplicationLoad.java | 50 +- .../regionserver/ReplicationObserver.java | 23 +- .../ReplicationRuntimeException.java | 8 +- .../regionserver/ReplicationSink.java | 156 +- .../regionserver/ReplicationSinkManager.java | 56 +- .../regionserver/ReplicationSource.java | 190 +- .../ReplicationSourceFactory.java | 17 +- .../ReplicationSourceInterface.java | 42 +- .../ReplicationSourceLogQueue.java | 25 +- .../ReplicationSourceManager.java | 162 +- .../ReplicationSourceShipper.java | 87 +- .../ReplicationSourceWALActionListener.java | 9 +- .../ReplicationSourceWALReader.java | 80 +- .../regionserver/ReplicationStatus.java | 2 +- .../regionserver/ReplicationSyncUp.java | 8 +- .../regionserver/ReplicationThrottler.java | 40 +- .../SerialReplicationChecker.java | 16 +- .../SerialReplicationSourceWALReader.java | 8 +- .../SourceFSConfigurationProvider.java | 28 +- .../SwitchRpcThrottleRemoteCallable.java | 2 +- .../regionserver/WALEntryBatch.java | 11 +- .../WALEntryFilterRetryableException.java | 9 +- .../regionserver/WALEntrySinkFilter.java | 24 +- .../regionserver/WALEntryStream.java | 44 +- .../regionserver/WALFileLengthProvider.java | 11 +- .../hbase/security/HBasePolicyProvider.java | 24 +- .../hbase/security/HBaseSaslRpcServer.java | 10 +- .../hadoop/hbase/security/SecurityUtil.java | 3 +- .../hbase/security/access/AccessChecker.java | 251 +- .../security/access/AccessControlFilter.java | 69 +- .../security/access/AccessController.java | 981 +++--- .../hbase/security/access/AuthManager.java | 140 +- .../hbase/security/access/AuthResult.java | 89 +- .../CoprocessorWhitelistMasterObserver.java | 73 +- .../security/access/NoopAccessChecker.java | 21 +- .../security/access/PermissionStorage.java | 237 +- .../access/SnapshotScannerHDFSAclCleaner.java | 9 +- .../SnapshotScannerHDFSAclController.java | 193 +- .../access/SnapshotScannerHDFSAclHelper.java | 180 +- .../security/access/ZKPermissionWatcher.java | 41 +- .../AttemptingUserProvidingSaslServer.java | 10 +- ...igestSaslServerAuthenticationProvider.java | 31 +- .../GssSaslServerAuthenticationProvider.java | 27 +- .../SaslServerAuthenticationProvider.java | 12 +- .../SaslServerAuthenticationProviders.java | 47 +- ...impleSaslServerAuthenticationProvider.java | 11 +- .../security/token/AuthenticationKey.java | 28 +- .../AuthenticationTokenSecretManager.java | 100 +- .../security/token/FsDelegationToken.java | 24 +- .../hbase/security/token/TokenProvider.java | 60 +- .../hbase/security/token/TokenUtil.java | 117 +- .../hbase/security/token/ZKSecretWatcher.java | 68 +- .../DefaultVisibilityLabelServiceImpl.java | 173 +- .../DefinedSetFilterScanLabelGenerator.java | 18 +- .../EnforcingScanLabelGenerator.java | 14 +- .../visibility/ExpressionExpander.java | 33 +- .../security/visibility/ExpressionParser.java | 75 +- .../FeedUserAuthScanLabelGenerator.java | 26 +- .../security/visibility/ParseException.java | 2 +- .../visibility/ScanLabelGenerator.java | 15 +- .../visibility/SimpleScanLabelGenerator.java | 5 +- .../visibility/VisibilityController.java | 220 +- .../visibility/VisibilityExpEvaluator.java | 6 +- .../visibility/VisibilityLabelFilter.java | 30 +- .../VisibilityLabelOrdinalProvider.java | 2 +- .../visibility/VisibilityLabelService.java | 121 +- .../VisibilityLabelServiceManager.java | 23 +- .../visibility/VisibilityLabelsCache.java | 25 +- .../VisibilityNewVersionBehaivorTracker.java | 67 +- .../visibility/VisibilityReplication.java | 20 +- .../VisibilityReplicationEndpoint.java | 27 +- .../VisibilityScanDeleteTracker.java | 70 +- .../security/visibility/VisibilityUtils.java | 102 +- .../visibility/ZKVisibilityLabelWatcher.java | 27 +- .../visibility/expression/ExpressionNode.java | 2 +- .../expression/LeafExpressionNode.java | 2 +- .../expression/NonLeafExpressionNode.java | 3 +- .../visibility/expression/Operator.java | 6 +- .../server/trace/IpcServerSpanBuilder.java | 25 +- .../hadoop/hbase/snapshot/CreateSnapshot.java | 69 +- .../hbase/snapshot/RestoreSnapshotHelper.java | 418 ++- .../snapshot/SnapshotDescriptionUtils.java | 181 +- .../hadoop/hbase/snapshot/SnapshotInfo.java | 325 +- .../hbase/snapshot/SnapshotManifest.java | 157 +- .../hbase/snapshot/SnapshotManifestV1.java | 57 +- .../hbase/snapshot/SnapshotManifestV2.java | 51 +- .../hbase/snapshot/SnapshotReferenceUtil.java | 161 +- .../hadoop/hbase/tool/BulkLoadHFiles.java | 14 +- .../hadoop/hbase/tool/BulkLoadHFilesTool.java | 10 +- .../org/apache/hadoop/hbase/tool/Canary.java | 8 +- .../hbase/tool/CanaryStatusServlet.java | 7 +- .../apache/hadoop/hbase/tool/CanaryTool.java | 450 ++- .../tool/DataBlockEncodingValidator.java | 17 +- .../hbase/tool/HFileContentValidator.java | 8 +- .../hbase/tool/LoadIncrementalHFiles.java | 402 +-- .../hbase/tool/PreUpgradeValidator.java | 14 +- .../hbase/tool/WriteSinkCoprocessor.java | 32 +- .../Branch1CoprocessorMethods.java | 1590 ++++------ .../tool/coprocessor/CoprocessorMethod.java | 14 +- .../tool/coprocessor/CoprocessorMethods.java | 20 +- .../coprocessor/CoprocessorValidator.java | 67 +- .../coprocessor/CoprocessorViolation.java | 18 +- .../CurrentCoprocessorMethods.java | 4 +- .../hbase/util/AbstractFileStatusFilter.java | 28 +- .../hadoop/hbase/util/BloomContext.java | 18 +- .../apache/hadoop/hbase/util/BloomFilter.java | 82 +- .../hadoop/hbase/util/BloomFilterBase.java | 4 +- .../hadoop/hbase/util/BloomFilterChunk.java | 80 +- .../hadoop/hbase/util/BloomFilterFactory.java | 124 +- .../hadoop/hbase/util/BloomFilterUtil.java | 168 +- .../hadoop/hbase/util/BloomFilterWriter.java | 15 +- .../util/BoundedPriorityBlockingQueue.java | 61 +- .../hbase/util/CollectionBackedScanner.java | 22 +- .../hadoop/hbase/util/CompressionTest.java | 59 +- .../hadoop/hbase/util/ConfigurationUtil.java | 38 +- .../hadoop/hbase/util/ConnectionCache.java | 45 +- .../hadoop/hbase/util/DirectMemoryUtils.java | 44 +- .../hadoop/hbase/util/EncryptionTest.java | 69 +- .../hadoop/hbase/util/FSRegionScanner.java | 43 +- .../hadoop/hbase/util/FSTableDescriptors.java | 172 +- .../org/apache/hadoop/hbase/util/FSUtils.java | 730 ++--- .../apache/hadoop/hbase/util/FSVisitor.java | 31 +- .../hadoop/hbase/util/FileStatusFilter.java | 13 +- .../hadoop/hbase/util/GetJavaProperty.java | 12 +- .../hadoop/hbase/util/HBaseConfTool.java | 7 +- .../apache/hadoop/hbase/util/HBaseFsck.java | 1232 ++++---- .../hadoop/hbase/util/HBaseFsckRepair.java | 92 +- .../hadoop/hbase/util/HFileArchiveUtil.java | 32 +- .../apache/hadoop/hbase/util/HashedBytes.java | 12 +- .../hadoop/hbase/util/HbckErrorReporter.java | 51 +- .../hadoop/hbase/util/HbckRegionInfo.java | 37 +- .../hadoop/hbase/util/HbckTableInfo.java | 317 +- .../hadoop/hbase/util/IdReadWriteLock.java | 11 +- .../hadoop/hbase/util/JVMClusterUtil.java | 149 +- .../hadoop/hbase/util/JvmPauseMonitor.java | 54 +- .../apache/hadoop/hbase/util/JvmVersion.java | 10 +- .../apache/hadoop/hbase/util/KeyRange.java | 3 +- .../util/LeaseNotRecoveredException.java | 10 +- .../hadoop/hbase/util/LossyCounting.java | 41 +- .../hbase/util/ManualEnvironmentEdge.java | 5 +- .../hadoop/hbase/util/ModifyRegionUtils.java | 95 +- .../apache/hadoop/hbase/util/MoveWithAck.java | 29 +- .../hadoop/hbase/util/MoveWithoutAck.java | 11 +- .../hadoop/hbase/util/MultiHConnection.java | 40 +- .../hadoop/hbase/util/MunkresAssignment.java | 69 +- .../hbase/util/NettyEventLoopGroupConfig.java | 11 +- .../apache/hadoop/hbase/util/RegionMover.java | 161 +- .../hbase/util/RegionSplitCalculator.java | 90 +- .../hadoop/hbase/util/RegionSplitter.java | 505 ++- .../hbase/util/RollingStatCalculator.java | 60 +- .../hadoop/hbase/util/RowBloomContext.java | 3 +- .../hadoop/hbase/util/RowColBloomContext.java | 7 +- .../RowPrefixFixedLengthBloomContext.java | 13 +- .../hadoop/hbase/util/ServerCommandLine.java | 52 +- .../hbase/util/ServerRegionReplicaUtil.java | 88 +- .../hbase/util/ShutdownHookManager.java | 21 +- .../apache/hadoop/hbase/util/SortedList.java | 48 +- .../hadoop/hbase/util/StealJobQueue.java | 29 +- .../hbase/util/TableDescriptorChecker.java | 83 +- .../hbase/util/YammerHistogramUtils.java | 68 +- .../hadoop/hbase/util/ZKDataMigrator.java | 74 +- .../compaction/ClusterCompactionQueues.java | 8 +- .../compaction/MajorCompactionRequest.java | 50 +- .../compaction/MajorCompactionTTLRequest.java | 14 +- .../hbase/util/compaction/MajorCompactor.java | 209 +- .../util/compaction/MajorCompactorTTL.java | 33 +- .../util/hbck/HFileCorruptionChecker.java | 144 +- .../hbase/util/hbck/OfflineMetaRepair.java | 9 +- .../hbase/util/hbck/ReplicationChecker.java | 20 +- .../util/hbck/TableIntegrityErrorHandler.java | 52 +- .../hbck/TableIntegrityErrorHandlerImpl.java | 19 +- .../hbase/wal/AbstractFSWALProvider.java | 36 +- .../wal/AbstractRecoveredEditsOutputSink.java | 44 +- .../hadoop/hbase/wal/AbstractWALRoller.java | 29 +- .../hadoop/hbase/wal/AsyncFSWALProvider.java | 43 +- .../hadoop/hbase/wal/BoundedEntryBuffers.java | 11 +- .../hbase/wal/BoundedGroupingStrategy.java | 8 +- .../wal/BoundedRecoveredEditsOutputSink.java | 20 +- .../wal/BoundedRecoveredHFilesOutputSink.java | 36 +- .../hadoop/hbase/wal/DisabledWALProvider.java | 31 +- .../apache/hadoop/hbase/wal/EntryBuffers.java | 10 +- .../hadoop/hbase/wal/FSHLogProvider.java | 37 +- .../hbase/wal/NamespaceGroupingStrategy.java | 11 +- .../wal/NettyAsyncFSWALConfigHelper.java | 11 +- .../apache/hadoop/hbase/wal/OutputSink.java | 18 +- .../hbase/wal/RecoveredEditsOutputSink.java | 23 +- .../hbase/wal/RegionGroupingProvider.java | 48 +- .../java/org/apache/hadoop/hbase/wal/WAL.java | 161 +- .../org/apache/hadoop/hbase/wal/WALEdit.java | 194 +- .../apache/hadoop/hbase/wal/WALFactory.java | 131 +- .../org/apache/hadoop/hbase/wal/WALKey.java | 49 +- .../apache/hadoop/hbase/wal/WALKeyImpl.java | 345 +- .../hadoop/hbase/wal/WALPrettyPrinter.java | 177 +- .../apache/hadoop/hbase/wal/WALProvider.java | 33 +- .../apache/hadoop/hbase/wal/WALSplitUtil.java | 102 +- .../apache/hadoop/hbase/wal/WALSplitter.java | 113 +- .../src/main/python/hbase/merge_conf.py | 1 - .../hadoop/hbase/AcidGuaranteesTestBase.java | 10 +- .../hadoop/hbase/AcidGuaranteesTestTool.java | 19 +- .../ClearUserNamespacesAndTablesRule.java | 91 +- .../apache/hadoop/hbase/ConnectionRule.java | 33 +- .../apache/hadoop/hbase/GenericTestUtils.java | 87 +- .../org/apache/hadoop/hbase/HBaseCluster.java | 159 +- .../apache/hadoop/hbase/HBaseTestCase.java | 224 +- .../hadoop/hbase/HBaseTestingUtility.java | 2095 ++++++------- .../hbase/HFilePerformanceEvaluation.java | 193 +- .../org/apache/hadoop/hbase/HTestConst.java | 25 +- .../apache/hadoop/hbase/MetaMockingUtil.java | 67 +- .../apache/hadoop/hbase/MiniClusterRule.java | 22 +- .../apache/hadoop/hbase/MiniHBaseCluster.java | 280 +- .../hbase/MockRegionServerServices.java | 14 +- .../hadoop/hbase/MultithreadedTestUtil.java | 32 +- .../hbase/PerformanceEvaluationCommons.java | 34 +- .../hadoop/hbase/ProcedureTestUtil.java | 8 +- .../apache/hadoop/hbase/QosTestHelper.java | 16 +- .../hadoop/hbase/StartMiniClusterOption.java | 20 +- .../TestAcidGuaranteesWithAdaptivePolicy.java | 4 +- .../TestAcidGuaranteesWithBasicPolicy.java | 4 +- .../TestAcidGuaranteesWithEagerPolicy.java | 2 +- ...stAcidGuaranteesWithNoInMemCompaction.java | 2 +- .../hadoop/hbase/TestCachedClusterId.java | 8 +- .../hadoop/hbase/TestCheckTestClasses.java | 11 +- .../hbase/TestClientClusterMetrics.java | 162 +- .../hadoop/hbase/TestClientClusterStatus.java | 40 +- .../hbase/TestClientOperationTimeout.java | 14 +- .../hadoop/hbase/TestClusterBootOrder.java | 22 +- .../hbase/TestClusterPortAssignment.java | 14 +- .../TestFSTableDescriptorForceCreation.java | 22 +- .../hbase/TestFullLogReconstruction.java | 4 +- .../hadoop/hbase/TestGlobalMemStoreSize.java | 66 +- .../hadoop/hbase/TestHBaseMetaEdit.java | 43 +- .../hbase/TestHBaseOnOtherDfsCluster.java | 8 +- .../hadoop/hbase/TestHBaseTestingUtility.java | 97 +- .../TestHColumnDescriptorDefaultVersions.java | 14 +- .../hbase/TestHDFSBlocksDistribution.java | 42 +- .../hadoop/hbase/TestHRegionLocation.java | 24 +- .../apache/hadoop/hbase/TestIOFencing.java | 109 +- .../hbase/TestIPv6NIOServerSocketChannel.java | 56 +- .../apache/hadoop/hbase/TestInfoServers.java | 31 +- .../hadoop/hbase/TestJMXConnectorServer.java | 8 +- .../apache/hadoop/hbase/TestJMXListener.java | 26 +- .../hadoop/hbase/TestLocalHBaseCluster.java | 27 +- .../hadoop/hbase/TestMetaTableAccessor.java | 373 +-- .../hbase/TestMetaTableAccessorNoCluster.java | 111 +- .../hadoop/hbase/TestMetaTableLocator.java | 4 +- .../hadoop/hbase/TestMovedRegionCache.java | 15 +- .../hadoop/hbase/TestMultiVersions.java | 79 +- .../apache/hadoop/hbase/TestNamespace.java | 74 +- .../hbase/TestNodeHealthCheckChore.java | 19 +- .../TestPartialResultsFromClientSide.java | 174 +- .../apache/hadoop/hbase/TestRegionLoad.java | 71 +- .../hadoop/hbase/TestRegionMetrics.java | 42 +- .../hadoop/hbase/TestRegionRebalancing.java | 99 +- ...TestSequenceIdMonotonicallyIncreasing.java | 6 +- .../hadoop/hbase/TestSerialization.java | 347 +-- .../apache/hadoop/hbase/TestServerLoad.java | 9 +- .../hadoop/hbase/TestServerMetrics.java | 71 +- ...stServerSideScanMetricsFromClientSide.java | 38 +- .../org/apache/hadoop/hbase/TestSize.java | 22 +- .../apache/hadoop/hbase/TestSplitMerge.java | 18 +- .../hadoop/hbase/TestTagRewriteCell.java | 15 +- .../apache/hadoop/hbase/TestZooKeeper.java | 46 +- .../hadoop/hbase/TimestampTestBase.java | 142 +- .../hbase/backup/TestHFileArchiving.java | 123 +- .../TestZooKeeperTableArchiveClient.java | 50 +- ...tractTestAsyncTableRegionReplicasRead.java | 8 +- .../client/AbstractTestAsyncTableScan.java | 53 +- .../AbstractTestCIOperationTimeout.java | 8 +- .../client/AbstractTestCIRpcTimeout.java | 12 +- .../hbase/client/AbstractTestCITimeout.java | 18 +- .../client/AbstractTestRegionLocator.java | 10 +- .../AbstractTestResultScannerCursor.java | 11 +- .../hbase/client/AbstractTestScanCursor.java | 17 +- .../AbstractTestUpdateConfiguration.java | 33 +- .../client/BufferingScanResultConsumer.java | 7 +- .../hbase/client/ClientPushbackTestBase.java | 8 +- ...romClientAfterSplittingRegionTestBase.java | 4 +- ...omClientCloneLinksAfterDeleteTestBase.java | 4 +- .../CloneSnapshotFromClientErrorTestBase.java | 2 +- ...CloneSnapshotFromClientNormalTestBase.java | 4 +- .../CloneSnapshotFromClientTestBase.java | 4 +- .../hbase/client/ColumnCountOnRowFilter.java | 5 +- .../hbase/client/FromClientSideBase.java | 599 ++-- .../client/HConnectionTestingUtility.java | 143 +- .../client/MetaWithReplicasTestBase.java | 2 +- .../hbase/client/RegionReplicaTestHelper.java | 17 +- ...omClientAfterSplittingRegionsTestBase.java | 4 +- ...apshotFromClientAfterTruncateTestBase.java | 4 +- ...estoreSnapshotFromClientCloneTestBase.java | 2 +- ...tFromClientGetCompactionStateTestBase.java | 6 +- ...napshotFromClientSchemaChangeTestBase.java | 4 +- ...storeSnapshotFromClientSimpleTestBase.java | 2 +- .../RestoreSnapshotFromClientTestBase.java | 4 +- .../hbase/client/SnapshotWithAclTestBase.java | 15 +- .../apache/hadoop/hbase/client/TestAdmin.java | 15 +- .../hadoop/hbase/client/TestAdmin1.java | 31 +- .../hadoop/hbase/client/TestAdmin2.java | 187 +- .../hadoop/hbase/client/TestAdmin3.java | 21 +- .../hadoop/hbase/client/TestAdmin4.java | 14 +- .../hadoop/hbase/client/TestAdminBase.java | 2 +- .../TestAllowPartialScanResultCache.java | 6 +- .../hbase/client/TestAlwaysSetScannerId.java | 4 +- .../client/TestAppendFromClientSide.java | 10 +- .../TestAsyncAccessControlAdminApi.java | 53 +- .../hbase/client/TestAsyncAdminBase.java | 19 +- .../hbase/client/TestAsyncAdminBuilder.java | 28 +- .../client/TestAsyncAdminMasterSwitch.java | 2 +- .../TestAsyncAdminModifyStoreFileTracker.java | 2 +- .../TestAsyncAdminWithRegionReplicas.java | 10 +- .../hbase/client/TestAsyncBufferMutator.java | 9 +- ...stAsyncClientPauseForServerOverloaded.java | 13 +- .../hbase/client/TestAsyncClientPushback.java | 2 +- .../client/TestAsyncClusterAdminApi.java | 58 +- .../client/TestAsyncClusterAdminApi2.java | 4 +- .../client/TestAsyncDecommissionAdminApi.java | 15 +- .../client/TestAsyncMetaRegionLocator.java | 4 +- .../client/TestAsyncNamespaceAdminApi.java | 11 +- .../client/TestAsyncNonMetaRegionLocator.java | 20 +- ...ncNonMetaRegionLocatorConcurrenyLimit.java | 22 +- .../client/TestAsyncProcedureAdminApi.java | 5 +- .../hbase/client/TestAsyncQuotaAdminApi.java | 40 +- .../hbase/client/TestAsyncRegionAdminApi.java | 113 +- .../client/TestAsyncRegionAdminApi2.java | 51 +- .../hbase/client/TestAsyncRegionLocator.java | 6 +- .../client/TestAsyncReplicationAdminApi.java | 6 +- ...tAsyncReplicationAdminApiWithClusters.java | 20 +- .../client/TestAsyncResultScannerCursor.java | 2 +- ...stAsyncSingleRequestRpcRetryingCaller.java | 6 +- .../client/TestAsyncSnapshotAdminApi.java | 19 +- .../hadoop/hbase/client/TestAsyncTable.java | 553 ++-- .../hbase/client/TestAsyncTableAdminApi.java | 52 +- .../hbase/client/TestAsyncTableAdminApi2.java | 21 +- .../hbase/client/TestAsyncTableAdminApi3.java | 31 +- .../hbase/client/TestAsyncTableBatch.java | 146 +- .../TestAsyncTableBatchRetryImmediately.java | 2 +- .../TestAsyncTableGetMultiThreaded.java | 26 +- ...leGetMultiThreadedWithBasicCompaction.java | 8 +- ...leGetMultiThreadedWithEagerCompaction.java | 8 +- .../client/TestAsyncTableLocatePrefetch.java | 2 +- ...AsyncTableLocateRegionForDeletedTable.java | 2 +- .../client/TestAsyncTableNoncedRetry.java | 119 +- .../client/TestAsyncTableRSCrashPublish.java | 34 +- .../client/TestAsyncTableRegionLocator.java | 6 +- .../TestAsyncTableRegionReplicasGet.java | 2 +- .../TestAsyncTableRegionReplicasScan.java | 2 +- .../hbase/client/TestAsyncTableScan.java | 46 +- .../hbase/client/TestAsyncTableScanAll.java | 22 +- .../client/TestAsyncTableScanException.java | 4 +- .../client/TestAsyncTableScanMetrics.java | 14 +- .../client/TestAsyncTableScanRenewLease.java | 6 +- .../hbase/client/TestAsyncTableScanner.java | 20 +- ...AsyncTableScannerCloseWhileSuspending.java | 9 +- .../client/TestAsyncTableUseMetaReplicas.java | 14 +- .../hbase/client/TestAsyncToolAdminApi.java | 4 +- ...tAvoidCellReferencesIntoShippedBlocks.java | 16 +- .../client/TestBatchScanResultCache.java | 26 +- .../client/TestBlockEvictionFromClient.java | 132 +- .../client/TestCIDeleteOperationTimeout.java | 4 +- .../hbase/client/TestCIDeleteRpcTimeout.java | 4 +- .../client/TestCIGetOperationTimeout.java | 4 +- .../hbase/client/TestCIGetRpcTimeout.java | 4 +- .../client/TestCIIncrementRpcTimeout.java | 4 +- .../client/TestCIPutOperationTimeout.java | 4 +- .../hbase/client/TestCIPutRpcTimeout.java | 4 +- .../hadoop/hbase/client/TestCISleep.java | 15 +- ...talogReplicaLoadBalanceSimpleSelector.java | 60 +- .../hbase/client/TestCheckAndMutate.java | 438 ++- .../TestCheckAndMutateWithByteBuff.java | 27 +- .../hbase/client/TestCleanupMetaReplica.java | 2 +- .../TestCleanupMetaReplicaThroughConfig.java | 2 +- .../client/TestClientOperationInterrupt.java | 21 +- .../hbase/client/TestClientPushback.java | 2 +- .../client/TestClientScannerRPCTimeout.java | 26 +- .../client/TestClientSideRegionScanner.java | 5 +- .../hbase/client/TestClientTimeouts.java | 50 +- ...napshotFromClientAfterSplittingRegion.java | 4 +- ...apshotFromClientCloneLinksAfterDelete.java | 4 +- .../TestCloneSnapshotFromClientCustomSFT.java | 8 +- .../TestCloneSnapshotFromClientError.java | 2 +- .../TestCloneSnapshotFromClientNormal.java | 2 +- .../TestCompleteResultScanResultCache.java | 6 +- .../client/TestConnectionImplementation.java | 310 +- .../hbase/client/TestConnectionUtils.java | 12 +- .../hbase/client/TestDropTimeoutRequest.java | 17 +- .../hadoop/hbase/client/TestEnableTable.java | 41 +- .../TestFailedMetaReplicaAssigment.java | 2 +- .../hadoop/hbase/client/TestFastFail.java | 123 +- .../hbase/client/TestFlushFromClient.java | 37 +- .../hbase/client/TestFromClientSide.java | 216 +- .../hbase/client/TestFromClientSide3.java | 245 +- .../hbase/client/TestFromClientSide4.java | 185 +- .../hbase/client/TestFromClientSide5.java | 766 +++-- .../client/TestFromClientSideNoCodec.java | 21 +- .../TestFromClientSideScanExcpetion.java | 31 +- ...lientSideScanExcpetionWithCoprocessor.java | 6 +- .../TestFromClientSideWithCoprocessor.java | 13 +- .../TestFromClientSideWithCoprocessor4.java | 12 +- .../TestFromClientSideWithCoprocessor5.java | 8 +- .../hbase/client/TestGetProcedureResult.java | 14 +- ...tGetScanColumnsWithNewVersionBehavior.java | 27 +- .../client/TestGetScanPartialResult.java | 2 +- .../hbase/client/TestHBaseAdminNoCluster.java | 115 +- .../hbase/client/TestHTableMultiplexer.java | 34 +- .../TestHTableMultiplexerFlushCache.java | 26 +- .../hbase/client/TestHTableNoncedRetry.java | 78 +- .../apache/hadoop/hbase/client/TestHbck.java | 35 +- .../client/TestIllegalTableDescriptor.java | 17 +- .../TestIncreaseMetaReplicaThroughConfig.java | 2 +- ...ncrementFromClientSideWithCoprocessor.java | 14 +- .../client/TestIncrementsFromClientSide.java | 123 +- .../hbase/client/TestIntraRowPagination.java | 26 +- .../hadoop/hbase/client/TestLeaseRenewal.java | 14 +- .../client/TestLimitedScanWithFilter.java | 26 +- .../client/TestMalformedCellFromClient.java | 37 +- .../hadoop/hbase/client/TestMetaCache.java | 110 +- .../client/TestMetaRegionLocationCache.java | 34 +- .../client/TestMetaReplicasAddressChange.java | 2 +- .../client/TestMetaWithReplicasBasic.java | 2 +- .../TestMetaWithReplicasShutdownHandling.java | 2 +- ...napshotFromClientAfterSplittingRegion.java | 4 +- ...apshotFromClientCloneLinksAfterDelete.java | 12 +- .../TestMobCloneSnapshotFromClientError.java | 2 +- .../TestMobCloneSnapshotFromClientNormal.java | 4 +- ...apshotFromClientAfterSplittingRegions.java | 6 +- ...estoreSnapshotFromClientAfterTruncate.java | 6 +- ...TestMobRestoreSnapshotFromClientClone.java | 4 +- ...eSnapshotFromClientGetCompactionState.java | 6 +- ...RestoreSnapshotFromClientSchemaChange.java | 6 +- ...estMobRestoreSnapshotFromClientSimple.java | 6 +- .../TestMobSnapshotCloneIndependence.java | 4 +- .../client/TestMobSnapshotFromClient.java | 6 +- .../TestMultiActionMetricsFromClient.java | 7 +- .../hbase/client/TestMultiParallel.java | 91 +- .../hbase/client/TestMultiRespectsLimits.java | 43 +- .../hbase/client/TestMultipleTimestamps.java | 227 +- .../client/TestMvccConsistentScanner.java | 13 +- .../client/TestPutDeleteEtcCellIteration.java | 42 +- .../hbase/client/TestPutWithDelete.java | 27 +- .../hbase/client/TestRawAsyncScanCursor.java | 10 +- ...estRawAsyncTableLimitedScanWithFilter.java | 18 +- .../client/TestRawAsyncTablePartialScan.java | 17 +- .../hbase/client/TestRawAsyncTableScan.java | 40 +- .../client/TestRegionLocationCaching.java | 16 +- .../hbase/client/TestRegionLocator.java | 6 +- .../hbase/client/TestReplicaWithCluster.java | 148 +- .../hbase/client/TestReplicasClient.java | 49 +- .../client/TestRequestTooBigException.java | 3 +- ...apshotFromClientAfterSplittingRegions.java | 4 +- ...estoreSnapshotFromClientAfterTruncate.java | 4 +- .../TestRestoreSnapshotFromClientClone.java | 2 +- ...eSnapshotFromClientGetCompactionState.java | 4 +- ...RestoreSnapshotFromClientSchemaChange.java | 4 +- .../TestRestoreSnapshotFromClientSimple.java | 2 +- .../hadoop/hbase/client/TestResult.java | 96 +- .../client/TestResultFromCoprocessor.java | 18 +- .../hbase/client/TestResultScannerCursor.java | 4 +- .../client/TestResultScannerTracing.java | 60 +- .../client/TestResultSizeEstimation.java | 33 +- .../client/TestRpcConnectionRegistry.java | 2 +- .../client/TestScanWithoutFetchingData.java | 6 +- .../hbase/client/TestScannerTimeout.java | 41 +- .../client/TestScannersFromClientSide.java | 215 +- .../client/TestScannersFromClientSide2.java | 18 +- .../client/TestSeparateClientZKCluster.java | 3 +- .../hbase/client/TestServerBusyException.java | 77 +- .../client/TestServerLoadDurability.java | 13 +- .../client/TestShortCircuitConnection.java | 7 +- .../TestShutdownOfMetaReplicaHolder.java | 2 +- .../hadoop/hbase/client/TestSizeFailures.java | 25 +- .../client/TestSmallReversedScanner.java | 18 +- .../client/TestSnapshotCloneIndependence.java | 16 +- .../TestSnapshotDFSTemporaryDirectory.java | 20 +- .../hbase/client/TestSnapshotFromClient.java | 49 +- ...tSnapshotFromClientWithRegionReplicas.java | 6 +- .../hbase/client/TestSnapshotMetadata.java | 46 +- .../TestSnapshotTemporaryDirectory.java | 67 +- ...tTemporaryDirectoryWithRegionReplicas.java | 6 +- .../hbase/client/TestSnapshotWithAcl.java | 4 +- .../client/TestSnapshotWithAclAsyncAdmin.java | 4 +- .../client/TestSplitOrMergeAtTableLevel.java | 33 +- .../hbase/client/TestSplitOrMergeStatus.java | 39 +- .../hbase/client/TestTableFavoredNodes.java | 84 +- .../client/TestTableOperationException.java | 40 +- .../client/TestTableSnapshotScanner.java | 90 +- .../TestTableSnapshotScannerWithSFT.java | 4 +- .../hbase/client/TestTimestampsFilter.java | 133 +- .../hbase/client/TestUpdateConfiguration.java | 12 +- .../client/TestZKConnectionRegistry.java | 31 +- .../hbase/client/hamcrest/BytesMatchers.java | 13 +- .../hbase/client/locking/TestEntityLocks.java | 47 +- .../replication/TestReplicationAdmin.java | 40 +- .../TestReplicationAdminWithClusters.java | 19 +- ...cationAdminWithTwoDifferentZKClusters.java | 4 +- .../hadoop/hbase/codec/CodecPerformance.java | 52 +- .../hbase/codec/TestCellMessageCodec.java | 18 +- .../hbase/constraint/AllFailConstraint.java | 2 +- .../hbase/constraint/AllPassConstraint.java | 2 +- .../CheckConfigurationConstraint.java | 16 +- .../constraint/RuntimeFailConstraint.java | 5 +- .../hbase/constraint/TestConstraint.java | 30 +- .../hbase/constraint/TestConstraints.java | 62 +- .../hbase/constraint/WorksConstraint.java | 7 +- .../SampleRegionWALCoprocessor.java | 56 +- .../coprocessor/SimpleRegionObserver.java | 260 +- .../coprocessor/TestAppendTimeRange.java | 33 +- .../TestCoprocessorConfiguration.java | 54 +- .../coprocessor/TestCoprocessorHost.java | 39 +- .../coprocessor/TestCoprocessorInterface.java | 130 +- .../coprocessor/TestCoprocessorMetrics.java | 202 +- .../TestCoprocessorShortCircuitRPC.java | 12 +- .../coprocessor/TestCoprocessorStop.java | 23 +- .../TestCoreMasterCoprocessor.java | 33 +- .../TestCoreRegionCoprocessor.java | 32 +- .../TestCoreRegionServerCoprocessor.java | 29 +- .../TestIncrementAndAppendWithNullResult.java | 41 +- .../coprocessor/TestIncrementTimeRange.java | 42 +- ...stMasterCoprocessorExceptionWithAbort.java | 73 +- ...tMasterCoprocessorExceptionWithRemove.java | 55 +- .../hbase/coprocessor/TestMasterObserver.java | 521 ++-- ...TestMasterObserverToModifyTableSchema.java | 27 +- .../coprocessor/TestMetaTableMetrics.java | 46 +- ...gativeMemStoreSizeWithSlowCoprocessor.java | 8 +- .../TestOpenTableInCoprocessor.java | 32 +- .../TestPassCustomCellViaRegionObserver.java | 34 +- .../TestPostIncrementAndAppendBeforeWAL.java | 97 +- .../TestReadOnlyConfiguration.java | 4 +- .../TestRegionCoprocessorHost.java | 22 +- .../coprocessor/TestRegionObserverBypass.java | 81 +- ...verForAddingMutationsFromCoprocessors.java | 88 +- .../TestRegionObserverInterface.java | 212 +- ...stRegionObserverPreFlushAndPreCompact.java | 23 +- .../TestRegionObserverScannerOpenHook.java | 38 +- .../TestRegionObserverStacking.java | 52 +- ...onServerCoprocessorExceptionWithAbort.java | 31 +- ...nServerCoprocessorExceptionWithRemove.java | 34 +- .../hbase/coprocessor/TestWALObserver.java | 96 +- .../TestForeignExceptionDispatcher.java | 16 +- .../TestForeignExceptionSerialization.java | 18 +- .../TestTimeoutExceptionInjector.java | 10 +- .../hbase/executor/TestExecutorService.java | 48 +- .../TestFavoredNodeAssignmentHelper.java | 170 +- .../TestStartcodeAgnosticServerName.java | 10 +- .../hadoop/hbase/filter/FilterAllFilter.java | 7 +- .../hbase/filter/FilterTestingCluster.java | 11 +- .../filter/TestBigDecimalComparator.java | 4 +- .../hbase/filter/TestBitComparator.java | 31 +- .../filter/TestColumnPaginationFilter.java | 103 +- .../hbase/filter/TestColumnPrefixFilter.java | 40 +- .../hbase/filter/TestColumnRangeFilter.java | 59 +- .../filter/TestComparatorSerialization.java | 10 +- .../filter/TestDependentColumnFilter.java | 109 +- .../hadoop/hbase/filter/TestFilter.java | 1451 ++++----- .../filter/TestFilterFromRegionSide.java | 26 +- .../hadoop/hbase/filter/TestFilterList.java | 301 +- .../hbase/filter/TestFilterListOnMini.java | 37 +- .../TestFilterListOrOperatorWithBlkCnt.java | 21 +- .../hbase/filter/TestFilterSerialization.java | 177 +- .../filter/TestFilterWithScanLimits.java | 14 +- .../hbase/filter/TestFilterWrapper.java | 18 +- ...tFiltersWithBinaryComponentComparator.java | 176 +- ...FirstKeyValueMatchingQualifiersFilter.java | 21 +- .../TestFuzzyRowAndColumnRangeFilter.java | 30 +- .../hbase/filter/TestFuzzyRowFilter.java | 517 ++- .../filter/TestFuzzyRowFilterEndToEnd.java | 55 +- .../hbase/filter/TestInclusiveStopFilter.java | 19 +- .../filter/TestInvocationRecordFilter.java | 56 +- .../hbase/filter/TestMultiRowRangeFilter.java | 118 +- .../TestMultipleColumnPrefixFilter.java | 63 +- .../hbase/filter/TestNullComparator.java | 9 +- .../hadoop/hbase/filter/TestPageFilter.java | 17 +- .../hadoop/hbase/filter/TestParseFilter.java | 184 +- .../hadoop/hbase/filter/TestPrefixFilter.java | 22 +- ...TestQualifierFilterWithEmptyQualifier.java | 63 +- .../hbase/filter/TestRandomRowFilter.java | 25 +- .../hbase/filter/TestRegexComparator.java | 161 +- .../hbase/filter/TestScanRowPrefix.java | 70 +- .../hadoop/hbase/filter/TestSeekHints.java | 32 +- .../TestSingleColumnValueExcludeFilter.java | 26 +- .../filter/TestSingleColumnValueFilter.java | 87 +- .../hadoop/hbase/fs/TestBlockReorder.java | 61 +- .../fs/TestBlockReorderBlockLocation.java | 32 +- .../hbase/fs/TestBlockReorderMultiBlocks.java | 69 +- .../hadoop/hbase/http/TestInfoServersACL.java | 177 +- .../hbase/io/TestByteBufferOutputStream.java | 8 +- .../io/TestFSDataInputStreamWrapper.java | 34 +- .../apache/hadoop/hbase/io/TestFileLink.java | 69 +- .../apache/hadoop/hbase/io/TestHFileLink.java | 68 +- .../hbase/io/TestHalfStoreFileReader.java | 23 +- .../apache/hadoop/hbase/io/TestHeapSize.java | 107 +- .../hbase/io/TestImmutableBytesWritable.java | 78 +- .../apache/hadoop/hbase/io/TestMetricsIO.java | 8 +- .../hbase/io/compress/HFileTestBase.java | 19 +- .../TestBufferedDataBlockEncoder.java | 38 +- .../io/encoding/TestChangingEncoding.java | 34 +- .../io/encoding/TestDataBlockEncoders.java | 132 +- .../io/encoding/TestDataBlockEncoding.java | 4 +- .../hbase/io/encoding/TestEncodedSeekers.java | 41 +- .../TestLoadAndSwitchEncodeOnDisk.java | 17 +- .../TestSeekBeforeWithReverseScan.java | 10 +- .../encoding/TestSeekToBlockWithEncoders.java | 137 +- .../hadoop/hbase/io/hfile/CacheTestUtils.java | 61 +- .../hadoop/hbase/io/hfile/KVGenerator.java | 51 +- .../hadoop/hbase/io/hfile/KeySampler.java | 35 +- .../hadoop/hbase/io/hfile/NanoTimer.java | 88 +- .../hbase/io/hfile/RandomKeyValueUtil.java | 43 +- .../hadoop/hbase/io/hfile/TagUsage.java | 11 +- .../io/hfile/TestBlockCacheReporting.java | 33 +- .../hbase/io/hfile/TestBlockIOUtils.java | 13 +- .../hbase/io/hfile/TestCacheConfig.java | 36 +- .../hbase/io/hfile/TestCacheOnWrite.java | 188 +- .../hbase/io/hfile/TestCachedBlockQueue.java | 23 +- .../hadoop/hbase/io/hfile/TestChecksum.java | 185 +- .../io/hfile/TestCombinedBlockCache.java | 16 +- .../hbase/io/hfile/TestFixedFileTrailer.java | 125 +- .../hfile/TestForceCacheImportantBlocks.java | 39 +- .../hadoop/hbase/io/hfile/TestHFile.java | 275 +- .../hadoop/hbase/io/hfile/TestHFileBlock.java | 351 +-- .../hbase/io/hfile/TestHFileBlockIndex.java | 284 +- .../io/hfile/TestHFileDataBlockEncoder.java | 89 +- .../hbase/io/hfile/TestHFileEncryption.java | 87 +- .../TestHFileInlineToRootChunkConversion.java | 29 +- .../io/hfile/TestHFilePrettyPrinter.java | 29 +- .../hbase/io/hfile/TestHFileReaderImpl.java | 25 +- .../TestHFileScannerImplReferenceCount.java | 57 +- .../hadoop/hbase/io/hfile/TestHFileSeek.java | 233 +- .../hbase/io/hfile/TestHFileWriterV3.java | 122 +- .../TestHFileWriterV3WithDataEncoders.java | 119 +- .../hfile/TestLazyDataBlockDecompression.java | 99 +- .../io/hfile/TestLruAdaptiveBlockCache.java | 439 +-- .../hbase/io/hfile/TestLruBlockCache.java | 274 +- .../hbase/io/hfile/TestLruCachedBlock.java | 6 +- .../hadoop/hbase/io/hfile/TestPrefetch.java | 53 +- .../hadoop/hbase/io/hfile/TestReseekTo.java | 35 +- .../io/hfile/TestRowIndexV1DataEncoder.java | 5 +- .../io/hfile/TestScannerFromBucketCache.java | 24 +- .../TestScannerSelectionUsingKeyRange.java | 20 +- .../hfile/TestScannerSelectionUsingTTL.java | 27 +- .../hfile/TestSeekBeforeWithInlineBlocks.java | 61 +- .../hadoop/hbase/io/hfile/TestSeekTo.java | 63 +- .../hbase/io/hfile/TestTinyLfuBlockCache.java | 90 +- .../io/hfile/bucket/TestBucketCache.java | 137 +- .../hfile/bucket/TestBucketCacheRefCnt.java | 59 +- .../hfile/bucket/TestBucketWriterThread.java | 57 +- .../hfile/bucket/TestByteBufferIOEngine.java | 7 +- .../bucket/TestExclusiveMemoryMmapEngine.java | 4 +- .../io/hfile/bucket/TestFileIOEngine.java | 13 +- .../hbase/io/hfile/bucket/TestRAMCache.java | 19 +- .../bucket/TestVerifyBucketCacheFile.java | 65 +- .../hadoop/hbase/ipc/AbstractTestIPC.java | 186 +- .../hbase/ipc/DelegatingRpcScheduler.java | 4 +- .../ipc/MetricsHBaseServerWrapperStub.java | 5 +- .../hadoop/hbase/ipc/TestBlockingIPC.java | 23 +- .../hadoop/hbase/ipc/TestBufferChain.java | 31 +- .../hadoop/hbase/ipc/TestCallRunner.java | 53 +- .../hbase/ipc/TestFifoRpcScheduler.java | 30 +- .../hadoop/hbase/ipc/TestHBaseClient.java | 21 +- .../hbase/ipc/TestMasterFifoRpcScheduler.java | 10 +- .../apache/hadoop/hbase/ipc/TestNettyIPC.java | 14 +- .../hadoop/hbase/ipc/TestNettyRpcServer.java | 15 +- .../hbase/ipc/TestPluggableQueueImpl.java | 90 +- .../hadoop/hbase/ipc/TestProtoBufRpc.java | 15 +- .../hbase/ipc/TestProtobufRpcServiceImpl.java | 30 +- .../hbase/ipc/TestRWQueueRpcExecutor.java | 11 +- .../hadoop/hbase/ipc/TestRpcClientLeaks.java | 6 +- .../hbase/ipc/TestRpcHandlerException.java | 10 +- .../hadoop/hbase/ipc/TestRpcMetrics.java | 49 +- .../ipc/TestRpcServerSlowConnectionSetup.java | 12 +- .../hbase/ipc/TestRpcServerTraceLogging.java | 13 +- .../hbase/ipc/TestSimpleRpcScheduler.java | 154 +- .../hbase/mapreduce/MapreduceTestingShim.java | 48 +- .../hadoop/hbase/master/AbstractTestDLS.java | 28 +- .../master/AbstractTestRestartCluster.java | 2 +- .../hbase/master/AlwaysStandByHMaster.java | 19 +- .../hbase/master/DummyRegionProcedure.java | 8 +- .../master/DummyRegionProcedureState.java | 2 +- .../hbase/master/DummyRegionServerList.java | 2 +- .../hbase/master/MockNoopMasterServices.java | 99 +- .../hadoop/hbase/master/MockRegionServer.java | 188 +- .../hbase/master/TestActiveMasterManager.java | 84 +- .../master/TestAlwaysStandByHMaster.java | 18 +- .../master/TestAssignmentManagerMetrics.java | 25 +- .../hadoop/hbase/master/TestBalancer.java | 7 +- .../master/TestClientMetaServiceRPCs.java | 58 +- .../hbase/master/TestClockSkewDetection.java | 19 +- .../master/TestCloseAnOpeningRegion.java | 2 +- .../hbase/master/TestClusterRestart.java | 2 +- .../master/TestClusterRestartFailover.java | 47 +- ...tClusterRestartFailoverSplitWithoutZk.java | 2 +- .../TestClusterRestartSplitWithoutZk.java | 2 +- .../master/TestClusterStatusPublisher.java | 7 +- .../hbase/master/TestDLSAsyncFSWAL.java | 4 +- .../hadoop/hbase/master/TestDLSFSHLog.java | 4 +- .../hadoop/hbase/master/TestDeadServer.java | 27 +- .../hadoop/hbase/master/TestGetInfoPort.java | 6 +- .../master/TestGetLastFlushedSequenceId.java | 22 +- .../hbase/master/TestGetReplicationLoad.java | 92 +- .../hbase/master/TestHMasterCommandLine.java | 9 +- .../hbase/master/TestHMasterRPCException.java | 12 +- .../hbase/master/TestLoadProcedureError.java | 6 +- .../hadoop/hbase/master/TestMaster.java | 65 +- .../master/TestMasterAbortAndRSGotKilled.java | 8 +- .../master/TestMasterBalanceThrottling.java | 15 +- .../hbase/master/TestMasterBalancerNPE.java | 40 +- .../master/TestMasterChoreScheduled.java | 21 +- .../master/TestMasterCoprocessorServices.java | 106 +- .../master/TestMasterDryRunBalancer.java | 14 +- .../hbase/master/TestMasterFailover.java | 38 +- ...TestMasterFailoverBalancerPersistence.java | 29 +- .../hbase/master/TestMasterFileSystem.java | 14 +- ...MasterFileSystemWithStoreFileTracking.java | 11 +- .../TestMasterFileSystemWithWALDir.java | 6 +- ...estMasterHandlerFullWhenTransitRegion.java | 18 +- .../hbase/master/TestMasterMetrics.java | 39 +- .../master/TestMasterMetricsWrapper.java | 39 +- .../hbase/master/TestMasterNoCluster.java | 95 +- .../hbase/master/TestMasterNotCarryTable.java | 8 +- ...TestMasterOperationsForRegionReplicas.java | 27 +- .../hbase/master/TestMasterQosFunction.java | 40 +- .../TestMasterRestartAfterDisablingTable.java | 35 +- .../hbase/master/TestMasterShutdown.java | 40 +- .../hbase/master/TestMasterTransitions.java | 593 ++-- .../TestMergeTableRegionsWhileRSCrash.java | 29 +- .../TestMetaAssignmentWithStopMaster.java | 23 +- .../hbase/master/TestMetaShutdownHandler.java | 49 +- .../TestMigrateAndMirrorMetaLocations.java | 2 +- .../TestMirroringTableStateManager.java | 34 +- .../TestNewStartedRegionServerVersion.java | 2 +- .../hbase/master/TestRecreateCluster.java | 42 +- .../hbase/master/TestRegionPlacement.java | 233 +- .../hbase/master/TestRegionPlacement2.java | 65 +- .../hadoop/hbase/master/TestRegionPlan.java | 8 +- .../master/TestRegionPlansWithThrottle.java | 7 +- .../hadoop/hbase/master/TestRegionState.java | 8 +- .../master/TestRegionsRecoveryChore.java | 62 +- .../TestRegionsRecoveryConfigManager.java | 1 - .../TestRestartWithEmptyWALDirectory.java | 4 +- .../master/TestRetainAssignmentOnRestart.java | 8 +- ...tainAssignmentOnRestartSplitWithoutZk.java | 5 +- .../hbase/master/TestRollingRestart.java | 64 +- .../TestRoundRobinAssignmentOnRestart.java | 15 +- ...obinAssignmentOnRestartSplitWithoutZk.java | 6 +- ...ServerCrashProcedureCarryingMetaStuck.java | 2 +- .../master/TestServerCrashProcedureStuck.java | 2 +- .../master/TestShutdownBackupMaster.java | 10 +- .../TestShutdownWithNoRegionServer.java | 2 +- .../hbase/master/TestSplitLogManager.java | 91 +- .../master/TestSplitRegionWhileRSCrash.java | 5 +- .../hbase/master/TestSplitWALManager.java | 71 +- .../hbase/master/TestTableStateManager.java | 11 +- .../hadoop/hbase/master/TestWarmupRegion.java | 58 +- .../assignment/AssignmentTestingUtil.java | 37 +- .../master/assignment/MockMasterServices.java | 69 +- .../assignment/TestAMAssignWithRandExec.java | 2 +- .../assignment/TestAMServerFailedOpen.java | 6 +- ...signRegionToUninitializedRegionServer.java | 2 +- .../assignment/TestAssignmentManager.java | 15 +- .../assignment/TestAssignmentManagerBase.java | 50 +- ...tAssignmentManagerLoadMetaRegionState.java | 2 +- .../assignment/TestAssignmentManagerUtil.java | 2 +- .../assignment/TestAssignmentOnRSCrash.java | 20 +- .../TestCloseRegionWhileRSCrash.java | 6 +- .../TestDeadServerMetricRegionChore.java | 1 + .../TestExceptionInAssignRegion.java | 9 +- .../TestExceptionInUnassignedRegion.java | 2 +- .../master/assignment/TestHbckChore.java | 22 +- .../TestMasterAbortWhileMergingTable.java | 44 +- .../TestMergeTableRegionsProcedure.java | 44 +- .../TestModifyTableWhileMerging.java | 34 +- .../TestOpenRegionProcedureBackoff.java | 2 +- .../TestOpenRegionProcedureHang.java | 20 +- .../assignment/TestRaceBetweenSCPAndDTP.java | 12 +- .../assignment/TestRaceBetweenSCPAndTRSP.java | 2 +- ...RegionAssignedToMultipleRegionServers.java | 8 +- .../master/assignment/TestRegionBypass.java | 71 +- .../assignment/TestRegionMoveAndAbandon.java | 5 +- .../assignment/TestRegionReplicaSplit.java | 14 +- .../master/assignment/TestRegionSplit.java | 29 +- .../TestRegionSplitAndSeparateChildren.java | 29 +- .../assignment/TestRegionStateStore.java | 24 +- .../master/assignment/TestRegionStates.java | 41 +- .../TestReportOnlineRegionsRace.java | 6 +- ...rtRegionStateTransitionFromDeadServer.java | 14 +- .../TestReportRegionStateTransitionRetry.java | 4 +- .../assignment/TestRogueRSAssignment.java | 28 +- .../assignment/TestSCPGetRegionsRace.java | 12 +- .../TestSplitTableRegionProcedure.java | 100 +- .../TestTransitRegionStateProcedure.java | 2 +- .../TestWakeUpUnexpectedProcedure.java | 16 +- .../master/balancer/BalancerTestBase.java | 274 +- .../master/balancer/BalancerTestBase2.java | 2 +- .../master/balancer/DummyCostFunction.java | 2 +- .../DummyMetricsStochasticBalancer.java | 21 +- .../LoadBalancerPerformanceEvaluation.java | 32 +- .../LoadOnlyFavoredStochasticBalancer.java | 2 +- .../master/balancer/TestBalancerDecision.java | 16 +- .../balancer/TestBalancerRejection.java | 50 +- .../TestBalancerStatusTagInJMXMetrics.java | 12 +- .../master/balancer/TestBaseLoadBalancer.java | 147 +- .../master/balancer/TestDoubleArrayCost.java | 2 +- .../balancer/TestFavoredNodeTableImport.java | 6 +- .../TestFavoredStochasticBalancerPickers.java | 60 +- .../TestFavoredStochasticLoadBalancer.java | 84 +- .../balancer/TestRegionLocationFinder.java | 31 +- .../balancer/TestRegionsOnMasterOptions.java | 111 +- .../master/balancer/TestServerAndLoad.java | 8 +- .../balancer/TestSimpleLoadBalancer.java | 122 +- .../TestStochasticBalancerJmxMetrics.java | 10 +- .../balancer/TestStochasticLoadBalancer.java | 180 +- ...tStochasticLoadBalancerBalanceCluster.java | 8 +- ...ochasticLoadBalancerHeterogeneousCost.java | 73 +- ...ticLoadBalancerHeterogeneousCostRules.java | 38 +- ...estStochasticLoadBalancerLargeCluster.java | 4 +- .../TestStochasticLoadBalancerMidCluster.java | 4 +- ...stStochasticLoadBalancerRegionReplica.java | 12 +- ...LoadBalancerRegionReplicaLargeCluster.java | 2 +- ...icLoadBalancerRegionReplicaMidCluster.java | 2 +- ...ReplicaReplicationGreaterThanNumNodes.java | 6 +- ...ticLoadBalancerRegionReplicaSameHosts.java | 4 +- ...ticLoadBalancerRegionReplicaWithRacks.java | 6 +- ...estStochasticLoadBalancerSmallCluster.java | 4 +- .../master/cleaner/TestCleanerChore.java | 4 +- .../master/cleaner/TestHFileCleaner.java | 60 +- .../master/cleaner/TestHFileLinkCleaner.java | 12 +- .../hbase/master/cleaner/TestLogsCleaner.java | 82 +- .../TestReplicationBarrierCleaner.java | 6 +- .../cleaner/TestReplicationHFileCleaner.java | 29 +- .../cleaner/TestSnapshotCleanerChore.java | 44 +- .../cleaner/TestSnapshotFromMaster.java | 93 +- .../http/TestApiV1ClusterMetricsResource.java | 111 +- .../master/http/TestMasterStatusServlet.java | 50 +- .../hbase/master/http/TestMetaBrowser.java | 159 +- .../master/http/TestMetaBrowserNoCluster.java | 71 +- .../master/http/TestRegionVisualizer.java | 4 +- .../master/http/gson/GsonFactoryTest.java | 16 +- .../master/janitor/TestCatalogJanitor.java | 224 +- .../janitor/TestCatalogJanitorCluster.java | 14 +- .../TestCatalogJanitorInMemoryStates.java | 14 +- .../hbase/master/janitor/TestMetaFixer.java | 101 +- .../hbase/master/locking/TestLockManager.java | 32 +- .../master/locking/TestLockProcedure.java | 83 +- .../TestInitializeStoreFileTracker.java | 7 +- ...ormalizerManagerConfigurationObserver.java | 16 +- .../TestRegionNormalizerWorkQueue.java | 32 +- .../TestRegionNormalizerWorker.java | 100 +- .../TestSimpleRegionNormalizer.java | 323 +- .../TestSimpleRegionNormalizerOnCluster.java | 183 +- .../MasterFailoverWithProceduresTestBase.java | 2 +- ...ocedureSchedulerPerformanceEvaluation.java | 63 +- .../MasterProcedureTestingUtility.java | 188 +- .../procedure/TestCloneSnapshotProcedure.java | 37 +- ...estCloneSnapshotProcedureFileBasedSFT.java | 7 +- .../TestCreateNamespaceProcedure.java | 53 +- .../TestCreateTableNoRegionServer.java | 37 +- .../procedure/TestCreateTableProcedure.java | 61 +- ...stCreateTableProcedureMuitipleRegions.java | 2 +- .../TestCreateTableWithMasterFailover.java | 2 +- ...DeleteColumnFamilyProcedureFromClient.java | 16 +- .../TestDeleteNamespaceProcedure.java | 42 +- .../procedure/TestDeleteTableProcedure.java | 51 +- .../TestDeleteTableWithMasterFailover.java | 2 +- .../procedure/TestDisableTableProcedure.java | 38 +- .../TestDisableTableWithMasterFailover.java | 2 +- .../procedure/TestEnableTableProcedure.java | 3 +- .../TestEnableTableWithMasterFailover.java | 2 +- .../TestFastFailOnProcedureNotRegistered.java | 10 +- .../hbase/master/procedure/TestHBCKSCP.java | 25 +- .../master/procedure/TestHBCKSCPUnknown.java | 15 +- .../procedure/TestIgnoreUnknownFamily.java | 39 +- .../TestMasterObserverPostCalls.java | 110 +- .../procedure/TestMasterProcedureEvents.java | 20 +- .../TestMasterProcedureScheduler.java | 24 +- ...stMasterProcedureSchedulerConcurrency.java | 40 +- .../TestModifyNamespaceProcedure.java | 57 +- .../procedure/TestModifyTableProcedure.java | 118 +- .../master/procedure/TestProcedureAdmin.java | 47 +- .../procedure/TestProcedurePriority.java | 6 +- ...estReopenTableRegionsProcedureBackoff.java | 2 +- ...openTableRegionsProcedureInfiniteLoop.java | 2 +- .../TestRestoreSnapshotProcedure.java | 31 +- .../hbase/master/procedure/TestSCP.java | 2 +- .../hbase/master/procedure/TestSCPBase.java | 7 +- .../master/procedure/TestSCPWithMeta.java | 2 +- .../TestSCPWithMetaWithReplicas.java | 2 +- ...hMetaWithReplicasWithoutZKCoordinated.java | 2 +- .../TestSCPWithMetaWithoutZKCoordinated.java | 2 +- .../master/procedure/TestSCPWithReplicas.java | 25 +- ...stSCPWithReplicasWithoutZKCoordinated.java | 4 +- .../master/procedure/TestSCPWithoutMeta.java | 2 +- .../TestSCPWithoutMetaWithReplicas.java | 2 +- ...tMetaWithReplicasWithoutZKCoordinated.java | 4 +- ...estSCPWithoutMetaWithoutZKCoordinated.java | 2 +- .../TestSCPWithoutZKCoordinated.java | 6 +- .../TestSafemodeBringsDownMaster.java | 22 +- .../procedure/TestSchedulerQueueDeadLock.java | 14 +- .../procedure/TestServerRemoteProcedure.java | 37 +- .../procedure/TestSplitWALProcedure.java | 21 +- .../procedure/TestTableDDLProcedureBase.java | 9 +- ...TableDescriptorModificationFromClient.java | 30 +- .../procedure/TestTruncateTableProcedure.java | 82 +- .../TestTruncateTableWithMasterFailover.java | 2 +- .../TestWALProcedureStoreOnHDFS.java | 41 +- .../master/region/MasterRegionTestBase.java | 2 +- .../region/TestChangeSFTForMasterRegion.java | 2 +- .../region/TestMasterRegionCompaction.java | 6 +- .../master/region/TestMasterRegionFlush.java | 2 +- .../region/TestMasterRegionInitialize.java | 2 +- .../TestMasterRegionOnTwoFileSystems.java | 6 +- .../region/TestMasterRegionWALCleaner.java | 10 +- .../region/TestMasterRegionWALRecovery.java | 3 +- .../TestModifyPeerProcedureRetryBackoff.java | 12 +- .../snapshot/TestSnapshotFileCache.java | 56 +- ...pshotFileCacheWithDifferentWorkingDir.java | 11 +- .../snapshot/TestSnapshotHFileCleaner.java | 53 +- .../master/snapshot/TestSnapshotManager.java | 36 +- .../snapshot/TestSnapshotWhileRSCrashes.java | 4 +- .../snapshot/TestTakeSnapshotHandler.java | 26 +- .../apache/hadoop/hbase/mob/MobTestUtil.java | 33 +- .../hadoop/hbase/mob/TestCachedMobFile.java | 20 +- .../hbase/mob/TestDefaultMobStoreFlusher.java | 94 +- .../hbase/mob/TestExpiredMobFileCleaner.java | 24 +- .../hbase/mob/TestMobDataBlockEncoding.java | 18 +- .../apache/hadoop/hbase/mob/TestMobFile.java | 22 +- .../hadoop/hbase/mob/TestMobFileCache.java | 33 +- .../hadoop/hbase/mob/TestMobFileLink.java | 13 +- .../hadoop/hbase/mob/TestMobFileName.java | 13 +- .../mob/TestMobWithByteBuffAllocator.java | 5 +- .../mob/compactions/TestMobCompactor.java | 218 +- .../TestPartitionedMobCompactionRequest.java | 4 +- .../TestPartitionedMobCompactor.java | 371 +-- .../TestMemoryBoundedLogMessageBuffer.java | 30 +- .../hbase/monitoring/TestTaskMonitor.java | 39 +- .../namequeues/TestNamedQueueRecorder.java | 428 ++- .../hbase/namequeues/TestSlowLogAccessor.java | 26 +- .../hbase/namespace/TestNamespaceAuditor.java | 164 +- .../SimpleMasterProcedureManager.java | 22 +- .../procedure/SimpleRSProcedureManager.java | 32 +- .../procedure/TestFailedProcCleanup.java | 28 +- .../hadoop/hbase/procedure/TestProcedure.java | 29 +- .../procedure/TestProcedureCoordinator.java | 89 +- .../procedure/TestProcedureDescriber.java | 25 +- .../hbase/procedure/TestProcedureManager.java | 12 +- .../hbase/procedure/TestProcedureMember.java | 157 +- .../hbase/procedure/TestZKProcedure.java | 145 +- .../procedure/TestZKProcedureControllers.java | 128 +- ...onProcedureStorePerformanceEvaluation.java | 7 +- .../region/RegionProcedureStoreTestBase.java | 2 +- .../RegionProcedureStoreTestHelper.java | 2 +- .../RegionProcedureStoreTestProcedure.java | 4 +- .../TestHFileProcedurePrettyPrinter.java | 5 +- .../region/TestRegionProcedureStore.java | 3 +- .../TestRegionProcedureStoreMigration.java | 2 +- .../region/TestWALProcedurePrettyPrinter.java | 2 +- .../hbase/protobuf/TestProtobufUtil.java | 94 +- .../protobuf/TestReplicationProtobuf.java | 12 +- .../quotas/SpaceQuotaHelperForTests.java | 194 +- .../SpaceQuotaSnapshotNotifierForTest.java | 27 +- .../quotas/TestActivePolicyEnforcement.java | 58 +- .../quotas/TestClusterScopeQuotaThrottle.java | 12 +- .../quotas/TestFileArchiverNotifierImpl.java | 53 +- .../TestFileSystemUtilizationChore.java | 64 +- .../quotas/TestGlobalQuotaSettingsImpl.java | 62 +- .../quotas/TestLowLatencySpaceQuotas.java | 86 +- .../hbase/quotas/TestMasterQuotaManager.java | 4 +- .../quotas/TestMasterQuotasObserver.java | 35 +- .../TestMasterQuotasObserverWithMocks.java | 19 +- .../TestNamespaceQuotaViolationStore.java | 74 +- .../hadoop/hbase/quotas/TestQuotaAdmin.java | 168 +- .../hbase/quotas/TestQuotaObserverChore.java | 24 +- .../TestQuotaObserverChoreRegionReports.java | 25 +- ...TestQuotaObserverChoreWithMiniCluster.java | 98 +- .../hadoop/hbase/quotas/TestQuotaState.java | 17 +- .../hbase/quotas/TestQuotaStatusRPCs.java | 40 +- .../hbase/quotas/TestQuotaTableUtil.java | 106 +- .../hbase/quotas/TestQuotaThrottle.java | 15 +- .../hadoop/hbase/quotas/TestRateLimiter.java | 22 +- .../TestRegionServerSpaceQuotaManager.java | 54 +- .../hbase/quotas/TestRegionSizeImpl.java | 19 +- .../quotas/TestRegionSizeReportingChore.java | 39 +- .../hbase/quotas/TestRegionSizeStoreImpl.java | 28 +- .../hbase/quotas/TestRegionSizeUse.java | 19 +- .../TestSnapshotQuotaObserverChore.java | 134 +- .../TestSpaceQuotaBasicFunctioning.java | 40 +- .../hbase/quotas/TestSpaceQuotaDropTable.java | 13 +- .../hbase/quotas/TestSpaceQuotaIncrease.java | 11 +- .../quotas/TestSpaceQuotaOnBulkLoad.java | 28 +- .../TestSpaceQuotaOnNonExistingTables.java | 9 +- .../hbase/quotas/TestSpaceQuotaRemoval.java | 27 +- .../quotas/TestSpaceQuotaSwitchPolicies.java | 24 +- ...aceQuotaViolationPolicyRefresherChore.java | 98 +- .../hadoop/hbase/quotas/TestSpaceQuotas.java | 107 +- .../TestSpaceQuotasWithRegionReplicas.java | 18 +- .../quotas/TestSpaceQuotasWithSnapshots.java | 84 +- .../quotas/TestSuperUserQuotaPermissions.java | 32 +- .../quotas/TestTableQuotaViolationStore.java | 85 +- .../TestTableSpaceQuotaViolationNotifier.java | 22 +- .../hbase/quotas/TestTablesWithQuotas.java | 31 +- .../hbase/quotas/ThrottleQuotaTestUtil.java | 24 +- .../BaseViolationPolicyEnforcement.java | 15 +- ...oadCheckingViolationPolicyEnforcement.java | 16 +- ...isableTableViolationPolicyEnforcement.java | 4 +- ...stNoInsertsViolationPolicyEnforcement.java | 4 +- ...CompactionsViolationPolicyEnforcement.java | 6 +- ...estNoWritesViolationPolicyEnforcement.java | 4 +- ...bstractTestDateTieredCompactionPolicy.java | 18 +- .../regionserver/CreateRandomStoreFile.java | 106 +- .../regionserver/DataBlockEncodingTool.java | 334 +- .../DelegatingInternalScanner.java | 3 +- .../DelegatingKeyValueScanner.java | 4 +- .../EncodedSeekPerformanceTest.java | 65 +- .../regionserver/KeyValueScanFixture.java | 14 +- .../MetricsRegionServerWrapperStub.java | 9 +- .../MetricsRegionWrapperStub.java | 1 - .../hbase/regionserver/MockHStoreFile.java | 41 +- .../regionserver/NoOpScanPolicyObserver.java | 10 +- .../hbase/regionserver/OOMERegionServer.java | 18 +- .../hbase/regionserver/RegionAsTable.java | 104 +- .../regionserver/StatefulStoreMockMaker.java | 12 +- .../regionserver/TestAtomicOperation.java | 147 +- .../hbase/regionserver/TestBlocksRead.java | 88 +- .../hbase/regionserver/TestBlocksScanned.java | 54 +- .../TestBrokenStoreFileCleaner.java | 71 +- .../hbase/regionserver/TestBulkLoad.java | 115 +- .../regionserver/TestBulkLoadReplication.java | 118 +- .../TestBulkLoadReplicationHFileRefs.java | 66 +- .../hbase/regionserver/TestBulkloadBase.java | 20 +- .../TestCacheOnWriteInSchema.java | 67 +- .../hbase/regionserver/TestCellFlatSet.java | 135 +- .../regionserver/TestCellSkipListSet.java | 27 +- ...TestCleanupCompactedFileAfterFailover.java | 17 +- ...TestCleanupCompactedFileOnRegionClose.java | 50 +- .../regionserver/TestCleanupMetaWAL.java | 14 +- .../TestClearRegionBlockCache.java | 30 +- .../hbase/regionserver/TestClusterId.java | 26 +- .../hbase/regionserver/TestColumnSeeking.java | 26 +- .../regionserver/TestCompactSplitThread.java | 12 +- .../regionserver/TestCompactingMemStore.java | 250 +- .../TestCompactingToCellFlatMapMemStore.java | 359 ++- .../hbase/regionserver/TestCompaction.java | 144 +- .../TestCompactionAfterBulkLoad.java | 27 +- .../TestCompactionArchiveConcurrentClose.java | 20 +- .../TestCompactionArchiveIOException.java | 17 +- .../TestCompactionFileNotFound.java | 13 +- .../TestCompactionInDeadRegionServer.java | 12 +- .../TestCompactionLifeCycleTracker.java | 62 +- .../regionserver/TestCompactionPolicy.java | 25 +- .../regionserver/TestCompactionState.java | 64 +- .../TestCompactionWithByteBuff.java | 15 +- .../TestCompactionWithCoprocessor.java | 12 +- .../regionserver/TestCompactorMemLeak.java | 9 +- .../regionserver/TestCompoundBloomFilter.java | 165 +- .../TestDataBlockEncodingTool.java | 37 +- .../TestDateTieredCompactionPolicy.java | 36 +- ...dCompactionPolicyHeterogeneousStorage.java | 58 +- ...estDateTieredCompactionPolicyOverflow.java | 7 +- .../TestDefaultCompactSelection.java | 66 +- .../regionserver/TestDefaultMemStore.java | 340 +- .../regionserver/TestDefaultStoreEngine.java | 14 +- .../regionserver/TestDeleteMobTable.java | 25 +- .../TestDirectStoreSplitsMerges.java | 120 +- .../regionserver/TestEncryptionDisabled.java | 15 +- .../TestEncryptionKeyRotation.java | 45 +- .../TestEncryptionRandomKeying.java | 23 +- .../TestEndToEndSplitTransaction.java | 37 +- .../regionserver/TestFSErrorsExposed.java | 87 +- .../regionserver/TestFailedAppendAndSync.java | 51 +- .../TestFlushLifeCycleTracker.java | 46 +- .../regionserver/TestFlushRegionEntry.java | 7 +- .../TestGetClosestAtOrBefore.java | 82 +- .../hbase/regionserver/TestHMobStore.java | 200 +- .../hbase/regionserver/TestHRegion.java | 1438 ++++----- .../regionserver/TestHRegionFileSystem.java | 25 +- .../hbase/regionserver/TestHRegionInfo.java | 210 +- .../regionserver/TestHRegionOnCluster.java | 18 +- .../regionserver/TestHRegionReplayEvents.java | 330 +- .../TestHRegionServerBulkLoad.java | 117 +- ...estHRegionServerBulkLoadWithOldClient.java | 43 +- .../regionserver/TestHRegionTracing.java | 6 +- .../TestHRegionWithInMemoryFlush.java | 46 +- .../hadoop/hbase/regionserver/TestHStore.java | 679 ++-- .../hbase/regionserver/TestHStoreFile.java | 52 +- .../regionserver/TestHdfsSnapshotHRegion.java | 40 +- .../regionserver/TestHeapMemoryManager.java | 121 +- .../TestInputStreamBlockDistribution.java | 26 +- .../regionserver/TestIsDeleteFailure.java | 27 +- .../regionserver/TestJoinedScanners.java | 45 +- .../hbase/regionserver/TestKeepDeletes.java | 283 +- .../hbase/regionserver/TestKeyValueHeap.java | 6 +- .../regionserver/TestKeyValueScanFixture.java | 17 +- .../hbase/regionserver/TestLogRoller.java | 15 +- .../regionserver/TestMajorCompaction.java | 8 +- .../TestMaxResultsPerColumnFamily.java | 20 +- .../regionserver/TestMemStoreChunkPool.java | 43 +- .../regionserver/TestMemStoreFlusher.java | 15 +- .../hbase/regionserver/TestMemStoreLAB.java | 69 +- .../TestMemStoreSegmentsIterator.java | 9 +- .../TestMemstoreLABWithoutPool.java | 27 +- .../TestMergesSplitsAddToTracker.java | 94 +- .../TestMetricsHeapMemoryManager.java | 8 +- .../hbase/regionserver/TestMetricsRegion.java | 41 +- .../regionserver/TestMetricsRegionServer.java | 28 +- .../TestMetricsTableAggregate.java | 3 +- .../TestMetricsTableLatencies.java | 83 +- .../TestMetricsUserAggregate.java | 82 +- .../hbase/regionserver/TestMinVersions.java | 210 +- .../TestMiniBatchOperationInProgress.java | 14 +- .../regionserver/TestMinorCompaction.java | 38 +- .../regionserver/TestMobStoreCompaction.java | 42 +- .../regionserver/TestMobStoreScanner.java | 88 +- .../regionserver/TestMultiColumnScanner.java | 90 +- ...umnScannerWithAlgoGZAndNoDataEncoding.java | 5 +- ...mnScannerWithAlgoGZAndUseDataEncoding.java | 5 +- ...olumnScannerWithNoneAndNoDataEncoding.java | 5 +- ...lumnScannerWithNoneAndUseDataEncoding.java | 5 +- .../regionserver/TestMultiLogThreshold.java | 9 +- .../TestMultiVersionConcurrencyControl.java | 13 +- ...stMultiVersionConcurrencyControlBasic.java | 6 +- .../regionserver/TestMutateRowsRecovery.java | 4 +- .../TestNewVersionBehaviorFromClientSide.java | 6 +- ...tCleanupCompactedFileWhenRegionWarmup.java | 21 +- .../TestObservedExceptionsInBatch.java | 4 +- .../TestOpenRegionFailedMemoryLeak.java | 8 +- .../TestOpenSeqNumUnexpectedIncrease.java | 6 +- .../hbase/regionserver/TestParallelPut.java | 45 +- .../TestPerColumnFamilyFlush.java | 66 +- .../hbase/regionserver/TestPriorityRpc.java | 62 +- .../hbase/regionserver/TestQosFunction.java | 14 +- .../regionserver/TestRSChoresScheduled.java | 15 +- .../TestRSKilledWhenInitializing.java | 36 +- .../hbase/regionserver/TestRSRpcServices.java | 10 +- .../TestReadAndWriteRegionInfoFile.java | 2 +- .../regionserver/TestRecoveredEdits.java | 72 +- .../TestRecoveredEditsReplayAndAbort.java | 91 +- .../regionserver/TestRegionFavoredNodes.java | 49 +- .../regionserver/TestRegionIncrement.java | 82 +- .../TestRegionInfoStaticInitialization.java | 13 +- .../regionserver/TestRegionInterrupt.java | 30 +- .../TestRegionMergeTransactionOnCluster.java | 147 +- .../hbase/regionserver/TestRegionMove.java | 20 +- .../hbase/regionserver/TestRegionOpen.java | 17 +- .../TestRegionReplicaFailover.java | 48 +- ...tRegionReplicaWaitForPrimaryFlushConf.java | 12 +- .../regionserver/TestRegionReplicas.java | 67 +- .../TestRegionReplicasAreDistributed.java | 20 +- .../TestRegionReplicasWithModifyTable.java | 2 +- ...estRegionReplicasWithRestartScenarios.java | 30 +- .../regionserver/TestRegionServerAbort.java | 42 +- .../TestRegionServerAbortTimeout.java | 9 +- .../TestRegionServerAccounting.java | 8 +- .../TestRegionServerCrashDisableWAL.java | 2 +- .../TestRegionServerHostname.java | 59 +- .../regionserver/TestRegionServerMetrics.java | 88 +- .../TestRegionServerNoMaster.java | 63 +- .../TestRegionServerOnlineConfigChange.java | 96 +- .../TestRegionServerReadRequestMetrics.java | 60 +- .../TestRegionServerRegionSpaceUseReport.java | 26 +- .../TestRegionServerReportForDuty.java | 36 +- .../regionserver/TestRegionServerScan.java | 14 +- .../regionserver/TestRegionSplitPolicy.java | 2 +- .../TestRegionSplitRestriction.java | 45 +- .../regionserver/TestRemoveRegionMetrics.java | 29 +- .../TestRequestsPerSecondMetric.java | 39 +- .../regionserver/TestResettingCounters.java | 39 +- .../regionserver/TestReversibleScanners.java | 255 +- .../TestRowPrefixBloomFilter.java | 69 +- .../hbase/regionserver/TestRowTooBig.java | 49 +- .../regionserver/TestRpcSchedulerFactory.java | 11 +- .../regionserver/TestSCVFWithMiniCluster.java | 13 +- .../regionserver/TestScanWithBloomError.java | 64 +- .../hbase/regionserver/TestScanner.java | 183 +- .../TestScannerHeartbeatMessages.java | 59 +- .../TestScannerRPCScanMetrics.java | 24 +- .../TestScannerRetriableFailure.java | 15 +- .../regionserver/TestScannerWithBulkload.java | 73 +- .../TestScannerWithCorruptHFile.java | 8 +- .../TestSecureBulkLoadManager.java | 89 +- .../TestSecureBulkloadListener.java | 40 +- .../regionserver/TestSeekOptimizations.java | 177 +- .../regionserver/TestServerNonceManager.java | 20 +- .../TestSettingTimeoutOnBlockingPoint.java | 14 +- .../regionserver/TestShortCircuitGet.java | 25 +- .../TestShutdownWhileWALBroken.java | 6 +- .../TestSimpleTimeRangeTracker.java | 8 +- .../regionserver/TestSplitLogWorker.java | 112 +- .../TestSplitTransactionOnCluster.java | 261 +- .../regionserver/TestSplitWalDataLoss.java | 22 +- .../TestSplitWithBlockingFiles.java | 9 +- .../hbase/regionserver/TestStoreFileInfo.java | 24 +- .../TestStoreFileRefresherChore.java | 50 +- ...estStoreFileScannerWithTagCompression.java | 20 +- .../hbase/regionserver/TestStoreScanner.java | 436 ++- .../regionserver/TestStoreScannerClosure.java | 48 +- .../regionserver/TestStripeStoreEngine.java | 15 +- .../TestStripeStoreFileManager.java | 169 +- .../regionserver/TestSwitchToStreamRead.java | 18 +- .../TestSyncTimeRangeTracker.java | 24 +- .../hadoop/hbase/regionserver/TestTags.java | 39 +- .../TestTimestampFilterSeekHint.java | 25 +- .../hbase/regionserver/TestWALLockup.java | 41 +- .../TestWALMonotonicallyIncreasingSeqId.java | 27 +- .../TestWalAndCompactingMemStoreFlush.java | 318 +- .../hbase/regionserver/TestWideScanner.java | 2 +- .../ConstantSizeFileListGenerator.java | 3 +- .../compactions/EverythingPolicy.java | 7 +- .../ExplicitFileListGenerator.java | 36 +- .../GaussianFileListGenerator.java | 5 +- .../compactions/MockStoreFileGenerator.java | 16 +- .../PerfTestCompactionPolicies.java | 82 +- .../SemiConstantSizeFileListGenerator.java | 2 +- .../SinusoidalFileListGenerator.java | 4 +- .../compactions/SpikyFileListGenerator.java | 4 +- .../compactions/StoreFileListGenerator.java | 4 +- .../compactions/TestCloseChecker.java | 5 +- .../TestCompactedHFilesDischarger.java | 22 +- .../compactions/TestCompactor.java | 5 +- .../compactions/TestCurrentHourProvider.java | 2 +- .../compactions/TestDateTieredCompactor.java | 20 +- .../compactions/TestFIFOCompactionPolicy.java | 70 +- .../compactions/TestOffPeakHours.java | 12 +- .../TestStripeCompactionPolicy.java | 259 +- .../compactions/TestStripeCompactor.java | 20 +- .../http/TestRSStatusServlet.java | 19 +- .../AbstractTestScanQueryMatcher.java | 2 +- .../TestCompactionScanQueryMatcher.java | 14 +- .../TestExplicitColumnTracker.java | 12 +- .../TestNewVersionBehaviorTracker.java | 48 +- .../querymatcher/TestScanDeleteTracker.java | 48 +- .../TestScanWildcardColumnTracker.java | 16 +- .../TestUserScanQueryMatcher.java | 73 +- .../StoreFileTrackerForTest.java | 2 +- .../TestChangeStoreFileTracker.java | 2 +- .../TestMigrationStoreFileTracker.java | 8 +- ...stRegionWithFileBasedStoreFileTracker.java | 2 +- .../TestStoreFileListFile.java | 2 +- .../TestStoreFileTrackerFactory.java | 2 +- .../TestStoreFileTrackerValidationUtils.java | 2 +- ...estCompactionWithThroughputController.java | 28 +- .../TestFlushWithThroughputController.java | 32 +- .../throttle/TestStoreHotnessProtector.java | 25 +- .../regionserver/wal/AbstractTestFSWAL.java | 86 +- .../wal/AbstractTestLogRollPeriod.java | 10 +- .../wal/AbstractTestLogRolling.java | 28 +- .../wal/AbstractTestProtobufLog.java | 34 +- .../wal/AbstractTestWALReplay.java | 311 +- .../wal/FaultyProtobufLogReader.java | 13 +- .../wal/InstrumentedLogWriter.java | 22 +- .../regionserver/wal/TestAsyncFSWAL.java | 102 +- .../wal/TestAsyncFSWALDurability.java | 2 +- .../wal/TestAsyncFSWALRollStuck.java | 2 +- .../wal/TestAsyncLogRollPeriod.java | 4 +- .../regionserver/wal/TestAsyncLogRolling.java | 4 +- .../wal/TestAsyncProtobufLog.java | 5 +- .../regionserver/wal/TestAsyncWALReplay.java | 6 +- .../wal/TestAsyncWALReplayCompressed.java | 4 +- .../TestAsyncWALReplayValueCompression.java | 4 +- .../regionserver/wal/TestCompressor.java | 32 +- .../wal/TestCustomWALCellCodec.java | 9 +- .../regionserver/wal/TestDurability.java | 29 +- .../hbase/regionserver/wal/TestFSHLog.java | 81 +- .../wal/TestFSHLogDurability.java | 4 +- .../regionserver/wal/TestFSWALEntry.java | 5 +- .../regionserver/wal/TestHBaseWalOnEC.java | 1 - .../regionserver/wal/TestLogRollAbort.java | 38 +- .../regionserver/wal/TestLogRollPeriod.java | 4 +- .../regionserver/wal/TestLogRolling.java | 47 +- .../wal/TestLogRollingNoCluster.java | 38 +- .../regionserver/wal/TestMetricsWAL.java | 8 +- .../regionserver/wal/TestProtobufLog.java | 4 +- .../wal/TestSecureAsyncWALReplay.java | 4 +- .../regionserver/wal/TestSecureWALReplay.java | 4 +- .../wal/TestSequenceIdAccounting.java | 16 +- .../regionserver/wal/TestSyncFuture.java | 4 +- .../regionserver/wal/TestSyncFutureCache.java | 7 +- .../wal/TestWALActionsListener.java | 21 +- .../wal/TestWALCellCodecWithCompression.java | 58 +- .../wal/TestWALConfiguration.java | 29 +- .../regionserver/wal/TestWALOpenError.java | 20 +- .../hbase/regionserver/wal/TestWALReplay.java | 4 +- ...TestWALReplayBoundedLogWriterCreation.java | 5 +- .../wal/TestWALReplayCompressed.java | 4 +- .../wal/TestWALReplayValueCompression.java | 4 +- .../wal/WALDurabilityTestBase.java | 8 +- .../replication/DummyReplicationEndpoint.java | 2 +- .../replication/ReplicationSourceDummy.java | 12 +- .../SerialReplicationTestBase.java | 9 +- .../TestAddToSerialReplicationPeer.java | 6 +- .../TestClaimReplicationQueue.java | 6 +- .../replication/TestMasterReplication.java | 185 +- .../TestMultiSlaveReplication.java | 52 +- .../replication/TestNamespaceReplication.java | 34 +- ...amespaceReplicationWithBulkLoadedData.java | 97 +- .../TestNonHBaseReplicationEndpoint.java | 16 +- .../TestPerTableCFReplication.java | 85 +- .../TestRemoveFromSerialReplicationPeer.java | 2 +- .../replication/TestReplicationBase.java | 28 +- ...tReplicationChangingPeerRegionservers.java | 15 +- .../TestReplicationDisableInactivePeer.java | 17 +- .../TestReplicationDroppedTables.java | 20 +- ...cationEditsDroppedWithDeletedTableCFs.java | 28 +- ...plicationEditsDroppedWithDroppedTable.java | 28 +- .../TestReplicationEmptyWALRecovery.java | 15 +- .../replication/TestReplicationEndpoint.java | 94 +- .../TestReplicationKillMasterRS.java | 2 +- ...TestReplicationKillMasterRSCompressed.java | 8 +- ...cationKillMasterRSWithSeparateOldWALs.java | 2 +- .../replication/TestReplicationKillRS.java | 6 +- .../TestReplicationKillSlaveRS.java | 2 +- ...icationKillSlaveRSWithSeparateOldWALs.java | 3 +- .../TestReplicationMetricsforUI.java | 11 +- .../TestReplicationProcedureRetry.java | 11 +- .../TestReplicationSmallTests.java | 18 +- .../replication/TestReplicationStatus.java | 25 +- .../TestReplicationStatusAfterLagging.java | 2 +- ...ionStatusBothNormalAndRecoveryLagging.java | 2 +- .../TestReplicationStatusSink.java | 13 +- ...StatusSourceStartedTargetStoppedNewOp.java | 2 +- ...StatusSourceStartedTargetStoppedNoOps.java | 2 +- ...ourceStartedTargetStoppedWithRecovery.java | 4 +- ...stReplicationStuckWithDeletedTableCFs.java | 27 +- .../TestReplicationStuckWithDroppedTable.java | 26 +- .../TestReplicationSyncUpTool.java | 12 +- .../TestReplicationSyncUpToolBase.java | 2 +- .../TestReplicationWALEntryFilters.java | 59 +- .../replication/TestReplicationWithTags.java | 29 +- .../replication/TestSerialReplication.java | 14 +- .../TestSerialReplicationFailover.java | 2 +- .../TestVerifyCellsReplicationEndpoint.java | 2 +- .../master/TestTableCFsUpdater.java | 8 +- ...plicationEndpointWithMultipleAsyncWAL.java | 4 +- ...estReplicationEndpointWithMultipleWAL.java | 4 +- ...asterRSCompressedWithMultipleAsyncWAL.java | 10 +- ...KillMasterRSCompressedWithMultipleWAL.java | 10 +- ...icationSyncUpToolWithMultipleAsyncWAL.java | 4 +- ...tReplicationSyncUpToolWithMultipleWAL.java | 2 +- .../regionserver/TestBasicWALEntryStream.java | 12 +- .../TestBasicWALEntryStreamAsyncFSWAL.java | 2 +- .../TestBasicWALEntryStreamFSHLog.java | 2 +- .../TestDumpReplicationQueues.java | 36 +- .../TestGlobalReplicationThrottler.java | 26 +- ...ClusterReplicationEndpointFilterEdits.java | 37 +- ...tMetaRegionReplicaReplicationEndpoint.java | 60 +- ...TestRaceWhenCreatingReplicationSource.java | 2 +- ...tRefreshPeerWhileRegionServerRestarts.java | 6 +- .../TestRefreshRecoveredReplication.java | 26 +- .../TestRegionReplicaReplicationEndpoint.java | 101 +- ...ionReplicaReplicationEndpointNoMaster.java | 58 +- .../TestReplicationCompressedWAL.java | 5 +- .../regionserver/TestReplicationSink.java | 104 +- .../TestReplicationSinkManager.java | 24 +- .../regionserver/TestReplicationSource.java | 185 +- .../TestReplicationSourceLogQueue.java | 9 +- .../TestReplicationSourceManager.java | 146 +- .../TestReplicationSourceManagerJoin.java | 8 +- .../TestReplicationSourceManagerZkImpl.java | 8 +- .../TestReplicationThrottler.java | 38 +- .../TestReplicationValueCompressedWAL.java | 4 +- .../regionserver/TestReplicator.java | 16 +- .../TestSerialReplicationChecker.java | 4 +- .../TestSerialReplicationEndpoint.java | 27 +- .../TestSourceFSConfigurationProvider.java | 26 +- .../regionserver/TestWALEntrySinkFilter.java | 120 +- .../TestWALEntryStreamDifferentCounts.java | 2 +- ...LEntryStreamDifferentCountsAsyncFSWAL.java | 5 +- ...stWALEntryStreamDifferentCountsFSHLog.java | 2 +- .../regionserver/WALEntryStreamTestBase.java | 2 +- ...SecurityEnabledUserProviderForTesting.java | 4 +- .../hadoop/hbase/security/TestSecureIPC.java | 81 +- .../hadoop/hbase/security/TestUser.java | 74 +- .../TestUsersOperationsWithSecureHadoop.java | 12 +- .../hbase/security/access/SecureTestUtil.java | 318 +- .../access/TestAccessControlFilter.java | 19 +- .../security/access/TestAccessController.java | 702 ++--- .../access/TestAccessController2.java | 110 +- .../access/TestAccessController3.java | 72 +- .../TestCellACLWithMultipleVersions.java | 168 +- .../hbase/security/access/TestCellACLs.java | 83 +- ...estCoprocessorWhitelistMasterObserver.java | 157 +- .../security/access/TestHDFSAclHelper.java | 63 +- .../access/TestNamespaceCommands.java | 125 +- .../access/TestPermissionBuilder.java | 18 +- .../security/access/TestRpcAccessChecks.java | 105 +- .../access/TestScanEarlyTermination.java | 23 +- .../TestSnapshotScannerHDFSAclController.java | 34 +- ...TestSnapshotScannerHDFSAclController2.java | 31 +- .../security/access/TestTablePermissions.java | 159 +- .../access/TestUnloadAccessController.java | 12 +- .../access/TestWithDisabledAuthorization.java | 191 +- .../access/TestZKPermissionWatcher.java | 21 +- ...tomSaslAuthenticationProviderTestBase.java | 12 +- ...lAuthenticationProviderNettyRpcServer.java | 2 +- ...AuthenticationProviderSimpleRpcServer.java | 2 +- ...TestSaslServerAuthenticationProviders.java | 41 +- .../security/token/SecureTestCluster.java | 29 +- .../security/token/TestAuthenticationKey.java | 6 +- .../TestDelegationTokenWithEncryption.java | 8 +- .../security/token/TestFsDelegationToken.java | 37 +- .../token/TestGenerateDelegationToken.java | 11 +- .../token/TestTokenAuthentication.java | 186 +- .../security/token/TestZKSecretWatcher.java | 65 +- .../token/TestZKSecretWatcherRefreshKeys.java | 21 +- ...ExpAsStringVisibilityLabelServiceImpl.java | 85 +- .../LabelFilteringScanLabelGenerator.java | 5 +- ...TestDataGeneratorWithVisibilityLabels.java | 42 +- .../TestDefaultScanLabelGeneratorStack.java | 22 +- .../TestEnforcingScanLabelGenerator.java | 16 +- .../visibility/TestExpressionExpander.java | 83 +- .../visibility/TestExpressionParser.java | 30 +- ...bilityLabelReplicationWithExpAsString.java | 44 +- .../visibility/TestVisibilityLabels.java | 175 +- ...bilityLabelsOnNewVersionBehaviorTable.java | 15 +- ...bilityLabelsOpWithDifferentUsersNoACL.java | 104 +- .../TestVisibilityLabelsReplication.java | 123 +- .../TestVisibilityLabelsWithACL.java | 123 +- ...sibilityLabelsWithCustomVisLabService.java | 13 +- ...ilityLabelsWithDefaultVisLabelService.java | 199 +- .../TestVisibilityLabelsWithDeletes.java | 90 +- .../TestVisibilityLabelsWithSLGStack.java | 30 +- .../TestVisibilityLablesWithGroups.java | 33 +- .../TestVisibilityWithCheckAuths.java | 79 +- .../TestWithDisabledAuthorization.java | 39 +- .../VisibilityLabelsWithDeletesTestBase.java | 4 +- .../visibility/VisibilityTestUtil.java | 26 +- .../snapshot/MobSnapshotTestingUtils.java | 2 +- .../hbase/snapshot/SnapshotTestingUtils.java | 377 ++- ...TestConcurrentFlushSnapshotFromClient.java | 4 +- .../snapshot/TestFlushSnapshotFromClient.java | 64 +- .../TestMobFlushSnapshotFromClient.java | 15 +- ...TestMobRestoreFlushSnapshotFromClient.java | 15 +- .../TestMobRestoreSnapshotHelper.java | 6 +- .../snapshot/TestRegionSnapshotTask.java | 55 +- .../TestRestoreFlushSnapshotFromClient.java | 43 +- .../snapshot/TestRestoreSnapshotHelper.java | 39 +- .../snapshot/TestSnapshotClientRetries.java | 27 +- .../TestSnapshotDescriptionUtils.java | 113 +- .../hbase/snapshot/TestSnapshotManifest.java | 33 +- .../snapshot/TestSnapshotStoreFileSize.java | 17 +- .../TestSnapshotWhenChoreCleaning.java | 13 +- .../hbase/tool/MapreduceTestingShim.java | 34 +- .../hbase/tool/TestCanaryStatusServlet.java | 21 +- .../hadoop/hbase/tool/TestCanaryTool.java | 6 +- .../hbase/tool/TestLoadIncrementalHFiles.java | 124 +- ...estLoadIncrementalHFilesSplitRecovery.java | 127 +- .../tool/TestSecureLoadIncrementalHFiles.java | 4 +- ...ureLoadIncrementalHFilesSplitRecovery.java | 6 +- .../coprocessor/CoprocessorValidatorTest.java | 52 +- .../hbase/trace/OpenTelemetryClassRule.java | 77 +- .../hbase/trace/OpenTelemetryTestRule.java | 4 +- .../hadoop/hbase/util/BaseTestHBaseFsck.java | 193 +- .../hadoop/hbase/util/ConstantDelayQueue.java | 7 +- .../hadoop/hbase/util/HBaseHomePath.java | 34 +- .../hbase/util/HFileArchiveTestingUtil.java | 44 +- .../hadoop/hbase/util/HFileTestUtil.java | 104 +- .../hbase/util/LauncherSecurityManager.java | 19 +- .../util/LoadTestDataGeneratorWithMOB.java | 28 +- .../util/LoadTestDataGeneratorWithTags.java | 44 +- .../apache/hadoop/hbase/util/MockServer.java | 12 +- .../hbase/util/MultiThreadedAction.java | 218 +- .../hbase/util/MultiThreadedReader.java | 109 +- .../util/MultiThreadedReaderWithACL.java | 41 +- .../hbase/util/MultiThreadedUpdater.java | 111 +- .../util/MultiThreadedUpdaterWithACL.java | 39 +- .../hbase/util/MultiThreadedWriter.java | 22 +- .../hbase/util/MultiThreadedWriterBase.java | 35 +- .../util/MultiThreadedWriterWithACL.java | 17 +- .../util/ProcessBasedLocalHBaseCluster.java | 115 +- .../hadoop/hbase/util/RestartMetaTest.java | 66 +- .../hbase/util/StoppableImplementation.java | 5 +- .../hbase/util/TestBloomFilterChunk.java | 93 +- .../TestBoundedPriorityBlockingQueue.java | 13 +- .../hadoop/hbase/util/TestByteBuffUtils.java | 4 +- .../hbase/util/TestCompressionTest.java | 34 +- .../hbase/util/TestConfigurationUtil.java | 4 +- .../hbase/util/TestConnectionCache.java | 2 +- .../hbase/util/TestCoprocessorScanPolicy.java | 28 +- .../util/TestDefaultEnvironmentEdge.java | 12 +- .../hadoop/hbase/util/TestEncryptionTest.java | 27 +- .../hbase/util/TestFSTableDescriptors.java | 22 +- .../apache/hadoop/hbase/util/TestFSUtils.java | 146 +- .../hadoop/hbase/util/TestFSVisitor.java | 22 +- .../util/TestFromClientSide3WoUnsafe.java | 6 +- ...TestHBaseFsckCleanReplicationBarriers.java | 53 +- .../hbase/util/TestHBaseFsckComparator.java | 16 +- .../hbase/util/TestHBaseFsckEncryption.java | 22 +- .../hadoop/hbase/util/TestHBaseFsckMOB.java | 16 +- .../hbase/util/TestHBaseFsckReplication.java | 8 +- .../hbase/util/TestHFileArchiveUtil.java | 14 +- .../apache/hadoop/hbase/util/TestIdLock.java | 12 +- .../hbase/util/TestIdReadWriteLock.java | 34 +- .../util/TestIncrementingEnvironmentEdge.java | 9 +- .../hadoop/hbase/util/TestJSONMetricUtil.java | 2 +- .../hadoop/hbase/util/TestLossyCounting.java | 15 +- .../util/TestMiniClusterLoadEncoded.java | 17 +- .../util/TestMiniClusterLoadParallel.java | 16 +- .../util/TestMiniClusterLoadSequential.java | 50 +- .../hadoop/hbase/util/TestRegionMover1.java | 55 +- .../hadoop/hbase/util/TestRegionMover2.java | 23 +- .../hadoop/hbase/util/TestRegionMover3.java | 18 +- .../hbase/util/TestRegionSplitCalculator.java | 56 +- .../hadoop/hbase/util/TestRegionSplitter.java | 580 ++-- .../hadoop/hbase/util/TestRootPath.java | 7 +- .../hadoop/hbase/util/TestSortedList.java | 26 +- .../hadoop/hbase/util/TestStealJobQueue.java | 43 +- .../TestMajorCompactionRequest.java | 44 +- .../TestMajorCompactionTTLRequest.java | 13 +- .../util/compaction/TestMajorCompactor.java | 25 +- .../compaction/TestMajorCompactorTTL.java | 11 +- .../hbase/util/hbck/HbckTestingUtil.java | 30 +- .../util/test/LoadTestDataGenerator.java | 89 +- .../test/LoadTestDataGeneratorWithACL.java | 41 +- .../hbase/wal/CompressedWALTestBase.java | 41 +- .../apache/hadoop/hbase/wal/FaultyFSLog.java | 14 +- .../hadoop/hbase/wal/FileSystemProxy.java | 17 +- .../hadoop/hbase/wal/IOTestProvider.java | 81 +- ...SWALCorruptionDueToDanglingByteBuffer.java | 2 +- .../TestBoundedRegionGroupingStrategy.java | 30 +- .../hadoop/hbase/wal/TestCompressedWAL.java | 7 +- .../TestCompressedWALValueCompression.java | 15 +- .../hadoop/hbase/wal/TestDisabledWAL.java | 6 +- ...HLogCorruptionDueToDanglingByteBuffer.java | 2 +- ...onWithMultiPutDueToDanglingByteBuffer.java | 8 +- .../hadoop/hbase/wal/TestFSHLogProvider.java | 89 +- .../hbase/wal/TestOutputSinkWriter.java | 50 +- .../wal/TestRaceBetweenGetWALAndGetWALs.java | 2 +- .../hbase/wal/TestReadWriteSeqIdFiles.java | 2 +- .../hadoop/hbase/wal/TestSecureWAL.java | 10 +- .../hadoop/hbase/wal/TestWALFactory.java | 218 +- .../hadoop/hbase/wal/TestWALFiltering.java | 37 +- .../hadoop/hbase/wal/TestWALMethods.java | 61 +- .../wal/TestWALOpenAfterDNRollingStart.java | 9 +- .../hadoop/hbase/wal/TestWALProvider.java | 27 +- .../hbase/wal/TestWALReaderOnSecureWAL.java | 22 +- .../hadoop/hbase/wal/TestWALRootDir.java | 47 +- .../apache/hadoop/hbase/wal/TestWALSplit.java | 382 ++- .../TestWALSplitBoundedLogWriterCreation.java | 7 +- .../hbase/wal/TestWALSplitCompressed.java | 6 +- .../hadoop/hbase/wal/TestWALSplitToHFile.java | 66 +- .../wal/TestWALSplitValueCompression.java | 12 +- .../wal/TestWALSplitWithDeletedTableData.java | 17 +- .../hbase/wal/TestWrongMetaWALFileName.java | 2 +- ...uptionDueToDanglingByteBufferTestBase.java | 2 +- ...ltiPutDueToDanglingByteBufferTestBase.java | 11 +- .../hbase/wal/WALPerformanceEvaluation.java | 126 +- .../hbase/zookeeper/TestZooKeeperACL.java | 108 +- .../src/test/resources/mapred-site.xml | 1 - .../hbase-shaded-check-invariants/pom.xml | 30 +- .../hbase-shaded-client-byo-hadoop/pom.xml | 302 +- hbase-shaded/hbase-shaded-client/pom.xml | 26 +- hbase-shaded/hbase-shaded-mapreduce/pom.xml | 722 ++--- .../hbase-shaded-testing-util-tester/pom.xml | 7 +- .../shaded/TestShadedHBaseTestingUtility.java | 5 +- .../hbase-shaded-testing-util/pom.xml | 19 +- .../org/eclipse/jetty/webapp/webdefault.xml | 1 - .../pom.xml | 30 +- hbase-shaded/pom.xml | 130 +- hbase-shell/pom.xml | 121 +- .../hbase/client/AbstractTestShell.java | 3 +- .../hadoop/hbase/client/TestAdminShell.java | 5 +- .../hadoop/hbase/client/TestAdminShell2.java | 5 +- .../hbase/client/TestChangeSftShell.java | 2 +- .../hadoop/hbase/client/TestQuotasShell.java | 5 +- .../hadoop/hbase/client/TestRSGroupShell.java | 9 +- .../hbase/client/TestReplicationShell.java | 7 +- .../apache/hadoop/hbase/client/TestShell.java | 9 +- .../hbase/client/TestShellNoCluster.java | 5 +- .../hadoop/hbase/client/TestTableShell.java | 5 +- .../client/procedure/ShellTestProcedure.java | 13 +- hbase-testing-util/pom.xml | 26 +- .../hbase/testing/TestingHBaseCluster.java | 2 +- .../testing/TestingHBaseClusterImpl.java | 2 +- .../testing/TestingHBaseClusterOption.java | 10 +- .../hbase/TestHBaseTestingUtilSpinup.java | 8 +- .../testing/TestTestingHBaseCluster.java | 2 +- .../TestTestingHBaseClusterImplForCPs.java | 2 +- hbase-thrift/pom.xml | 257 +- .../apache/hadoop/hbase/thrift/CallQueue.java | 22 +- .../apache/hadoop/hbase/thrift/Constants.java | 61 +- .../hbase/thrift/HBaseServiceHandler.java | 19 +- .../thrift/HThreadedSelectorServerArgs.java | 71 +- .../thrift/HbaseHandlerMetricsProxy.java | 42 +- .../thrift/HttpAuthenticationException.java | 18 +- .../apache/hadoop/hbase/thrift/ImplType.java | 13 +- .../hbase/thrift/IncrementCoalescer.java | 41 +- .../hbase/thrift/IncrementCoalescerMBean.java | 1 - .../thrift/TBoundedThreadPoolServer.java | 114 +- .../thrift/THBaseThreadPoolExecutor.java | 13 +- .../thrift/ThriftHBaseServiceHandler.java | 469 ++- .../hbase/thrift/ThriftHttpServlet.java | 64 +- .../hadoop/hbase/thrift/ThriftMetrics.java | 54 +- .../hadoop/hbase/thrift/ThriftServer.java | 341 +- .../hadoop/hbase/thrift/ThriftUtilities.java | 121 +- .../thrift2/ThriftHBaseServiceHandler.java | 118 +- .../hadoop/hbase/thrift2/ThriftServer.java | 23 +- .../hadoop/hbase/thrift2/ThriftUtilities.java | 618 ++-- .../hbase/thrift2/client/ThriftAdmin.java | 125 +- .../thrift2/client/ThriftClientBuilder.java | 4 +- .../thrift2/client/ThriftConnection.java | 45 +- .../hbase/thrift2/client/ThriftTable.java | 88 +- .../thrift/ErrorThrowingGetObserver.java | 6 +- .../thrift/HBaseThriftTestingUtility.java | 9 +- .../thrift/TestBindExceptionHandling.java | 12 +- .../hadoop/hbase/thrift/TestCallQueue.java | 24 +- .../hbase/thrift/TestThriftHttpServer.java | 31 +- .../hadoop/hbase/thrift/TestThriftServer.java | 303 +- .../hbase/thrift/TestThriftServerCmdLine.java | 76 +- .../TestThriftSpnegoHttpFallbackServer.java | 58 +- .../thrift/TestThriftSpnegoHttpServer.java | 62 +- .../hbase/thrift/ThriftServerRunner.java | 18 +- .../hbase/thrift2/TestThrift2HttpServer.java | 11 +- .../thrift2/TestThrift2ServerCmdLine.java | 14 +- .../hbase/thrift2/TestThriftConnection.java | 134 +- .../TestThriftHBaseServiceHandler.java | 305 +- ...stThriftHBaseServiceHandlerWithLabels.java | 118 +- ...ThriftHBaseServiceHandlerWithReadOnly.java | 44 +- hbase-zookeeper/pom.xml | 164 +- .../hbase/zookeeper/ClusterStatusTracker.java | 46 +- .../hbase/zookeeper/DeletionListener.java | 18 +- .../hadoop/hbase/zookeeper/EmptyWatcher.java | 7 +- .../hadoop/hbase/zookeeper/HQuorumPeer.java | 56 +- .../hbase/zookeeper/InstancePending.java | 15 +- .../hbase/zookeeper/LoadBalancerTracker.java | 15 +- .../hbase/zookeeper/MasterAddressTracker.java | 122 +- .../MasterMaintenanceModeTracker.java | 15 +- .../hbase/zookeeper/MetaTableLocator.java | 110 +- .../hbase/zookeeper/MiniZooKeeperCluster.java | 72 +- .../hbase/zookeeper/PendingWatcher.java | 21 +- .../hbase/zookeeper/RecoverableZooKeeper.java | 208 +- .../zookeeper/RegionNormalizerTracker.java | 15 +- .../zookeeper/SnapshotCleanupTracker.java | 46 +- .../hadoop/hbase/zookeeper/ZKAclReset.java | 25 +- .../hbase/zookeeper/ZKAuthentication.java | 140 +- .../hadoop/hbase/zookeeper/ZKClusterId.java | 21 +- .../apache/hadoop/hbase/zookeeper/ZKDump.java | 46 +- .../hbase/zookeeper/ZKLeaderManager.java | 45 +- .../hadoop/hbase/zookeeper/ZKListener.java | 13 +- .../hadoop/hbase/zookeeper/ZKMainServer.java | 19 +- .../hadoop/hbase/zookeeper/ZKNodeTracker.java | 86 +- .../hadoop/hbase/zookeeper/ZKServerTool.java | 9 +- .../hadoop/hbase/zookeeper/ZKSplitLog.java | 25 +- .../apache/hadoop/hbase/zookeeper/ZKUtil.java | 871 +++--- .../hadoop/hbase/zookeeper/ZKWatcher.java | 177 +- .../hadoop/hbase/HBaseZKTestingUtility.java | 7 +- .../hbase/zookeeper/TestHQuorumPeer.java | 25 +- .../hbase/zookeeper/TestInstancePending.java | 4 +- .../zookeeper/TestMasterAddressTracker.java | 4 +- .../hbase/zookeeper/TestReadOnlyZKClient.java | 6 +- .../zookeeper/TestRecoverableZooKeeper.java | 14 +- .../TestRegionServerAddressTracker.java | 2 +- .../hbase/zookeeper/TestZKLeaderManager.java | 43 +- .../hbase/zookeeper/TestZKMainServer.java | 11 +- .../hadoop/hbase/zookeeper/TestZKMulti.java | 66 +- .../hbase/zookeeper/TestZKNodeTracker.java | 38 +- .../hadoop/hbase/zookeeper/TestZKUtil.java | 13 +- .../hbase/zookeeper/TestZKUtilNoServer.java | 8 +- 4584 files changed, 119787 insertions(+), 143318 deletions(-) diff --git a/RELEASENOTES.md b/RELEASENOTES.md index 527e543dec4..1a342a87559 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -32,7 +32,7 @@ These release notes cover new developer and user-facing incompatibilities, impor See the document http://hbase.apache.org/book.html#upgrade2.2 about how to upgrade from 2.0 or 2.1 to 2.2+. -HBase 2.2+ uses a new Procedure form assiging/unassigning/moving Regions. It does not process HBase 2.1 and 2.0's Unassign/Assign Procedure types. Upgrade requires that we first drain the Master Procedure Store of old style Procedures before starting the new 2.2 Master. So you need to make sure that before you kill the old version (2.0 or 2.1) Master, there is no region in transition. And once the new version (2.2+) Master is up, you can rolling upgrade RegionServers one by one. +HBase 2.2+ uses a new Procedure form assiging/unassigning/moving Regions. It does not process HBase 2.1 and 2.0's Unassign/Assign Procedure types. Upgrade requires that we first drain the Master Procedure Store of old style Procedures before starting the new 2.2 Master. So you need to make sure that before you kill the old version (2.0 or 2.1) Master, there is no region in transition. And once the new version (2.2+) Master is up, you can rolling upgrade RegionServers one by one. And there is a more safer way if you are running 2.1.1+ or 2.0.3+ cluster. It need four steps to upgrade Master. @@ -421,15 +421,15 @@ Previously the recovered.edits directory was under the root directory. This JIRA When oldwals (and hfile) cleaner cleans stale wals (and hfiles), it will periodically check and wait the clean results from filesystem, the total wait time will be no more than a max time. -The periodically wait and check configurations are hbase.oldwals.cleaner.thread.check.interval.msec (default is 500 ms) and hbase.regionserver.hfilecleaner.thread.check.interval.msec (default is 1000 ms). +The periodically wait and check configurations are hbase.oldwals.cleaner.thread.check.interval.msec (default is 500 ms) and hbase.regionserver.hfilecleaner.thread.check.interval.msec (default is 1000 ms). Meanwhile, The max time configurations are hbase.oldwals.cleaner.thread.timeout.msec and hbase.regionserver.hfilecleaner.thread.timeout.msec, they are set to 60 seconds by default. All support dynamic configuration. -e.g. in the oldwals cleaning scenario, one may consider tuning hbase.oldwals.cleaner.thread.timeout.msec and hbase.oldwals.cleaner.thread.check.interval.msec +e.g. in the oldwals cleaning scenario, one may consider tuning hbase.oldwals.cleaner.thread.timeout.msec and hbase.oldwals.cleaner.thread.check.interval.msec -1. While deleting a oldwal never complete (strange but possible), then delete file task needs to wait for a max of 60 seconds. Here, 60 seconds might be too long, or the opposite way is to increase more than 60 seconds in the use cases of slow file delete. +1. While deleting a oldwal never complete (strange but possible), then delete file task needs to wait for a max of 60 seconds. Here, 60 seconds might be too long, or the opposite way is to increase more than 60 seconds in the use cases of slow file delete. 2. The check and wait of a file delete is set to default in the period of 500 milliseconds, one might want to tune this checking period to a short interval to check more frequently or to a longer interval to avoid checking too often to manage their delete file task checking period (the longer interval may be use to avoid checking too fast while using a high latency storage). @@ -461,12 +461,12 @@ Solution: After this jira, the compaction event tracker will be writed to HFile. * [HBASE-21820](https://issues.apache.org/jira/browse/HBASE-21820) | *Major* | **Implement CLUSTER quota scope** -HBase contains two quota scopes: MACHINE and CLUSTER. Before this patch, set quota operations did not expose scope option to client api and use MACHINE as default, CLUSTER scope can not be set and used. +HBase contains two quota scopes: MACHINE and CLUSTER. Before this patch, set quota operations did not expose scope option to client api and use MACHINE as default, CLUSTER scope can not be set and used. Shell commands are as follows: set\_quota, TYPE =\> THROTTLE, TABLE =\> 't1', LIMIT =\> '10req/sec' This issue implements CLUSTER scope in a simple way: For user, namespace, user over namespace quota, use [ClusterLimit / RSNum] as machine limit. For table and user over table quota, use [ClusterLimit / TotalTableRegionNum \* MachineTableRegionNum] as machine limit. -After this patch, user can set CLUSTER scope quota, but MACHINE is still default if user ignore scope. +After this patch, user can set CLUSTER scope quota, but MACHINE is still default if user ignore scope. Shell commands are as follows: set\_quota, TYPE =\> THROTTLE, TABLE =\> 't1', LIMIT =\> '10req/sec' set\_quota, TYPE =\> THROTTLE, TABLE =\> 't1', LIMIT =\> '10req/sec', SCOPE =\> MACHINE @@ -491,11 +491,11 @@ Remove bloom filter type ROWPREFIX\_DELIMITED. May add it back when find a bette * [HBASE-21783](https://issues.apache.org/jira/browse/HBASE-21783) | *Major* | **Support exceed user/table/ns throttle quota if region server has available quota** -Support enable or disable exceed throttle quota. Exceed throttle quota means, user can over consume user/namespace/table quota if region server has additional available quota because other users don't consume at the same time. +Support enable or disable exceed throttle quota. Exceed throttle quota means, user can over consume user/namespace/table quota if region server has additional available quota because other users don't consume at the same time. Use the following shell commands to enable/disable exceed throttle quota: enable\_exceed\_throttle\_quota disable\_exceed\_throttle\_quota -There are two limits when enable exceed throttle quota: -1. Must set at least one read and one write region server throttle quota; +There are two limits when enable exceed throttle quota: +1. Must set at least one read and one write region server throttle quota; 2. All region server throttle quotas must be in seconds time unit. Because once previous requests exceed their quota and consume region server quota, quota in other time units may be refilled in a long time, this may affect later requests. @@ -621,7 +621,7 @@ Add a clearRegionLocationCache method in Connection to clear the region location * [HBASE-21713](https://issues.apache.org/jira/browse/HBASE-21713) | *Major* | **Support set region server throttle quota** -Support set region server rpc throttle quota which represents the read/write ability of region servers and throttles when region server's total requests exceeding the limit. +Support set region server rpc throttle quota which represents the read/write ability of region servers and throttles when region server's total requests exceeding the limit. Use the following shell command to set RS quota: set\_quota TYPE =\> THROTTLE, REGIONSERVER =\> 'all', THROTTLE\_TYPE =\> WRITE, LIMIT =\> '20000req/sec' @@ -650,7 +650,7 @@ Adds shell support for the following: * [HBASE-21734](https://issues.apache.org/jira/browse/HBASE-21734) | *Major* | **Some optimization in FilterListWithOR** -After HBASE-21620, the filterListWithOR has been a bit slow because we need to merge each sub-filter's RC , while before HBASE-21620, we will skip many RC merging, but the logic was wrong. So here we choose another way to optimaze the performance: removing the KeyValueUtil#toNewKeyCell. +After HBASE-21620, the filterListWithOR has been a bit slow because we need to merge each sub-filter's RC , while before HBASE-21620, we will skip many RC merging, but the logic was wrong. So here we choose another way to optimaze the performance: removing the KeyValueUtil#toNewKeyCell. Anoop Sam John suggested that the KeyValueUtil#toNewKeyCell can save some GC before because if we copy key part of cell into a single byte[], then the block the cell refering won't be refered by the filter list any more, the upper layer can GC the data block quickly. while after HBASE-21620, we will update the prevCellList for every encountered cell now, so the lifecycle of cell in prevCellList for FilterList will be quite shorter. so just use the cell ref for saving cpu. BTW, we removed all the arrays streams usage in filter list, because it's also quite time-consuming in our test. @@ -702,15 +702,15 @@ Python3 support was added to dev-support/submit-patch.py. To install newly requi In HBASE-21657, I simplified the path of estimatedSerialiedSize() & estimatedSerialiedSizeOfCell() by moving the general getSerializedSize() and heapSize() from ExtendedCell to Cell interface. The patch also included some other improvments: -1. For 99% of case, our cells has no tags, so let the HFileScannerImpl just return the NoTagsByteBufferKeyValue if no tags, which means we can save - lots of cpu time when sending no tags cell to rpc because can just return the length instead of getting the serialize size by caculating offset/length +1. For 99% of case, our cells has no tags, so let the HFileScannerImpl just return the NoTagsByteBufferKeyValue if no tags, which means we can save + lots of cpu time when sending no tags cell to rpc because can just return the length instead of getting the serialize size by caculating offset/length of each fields(row/cf/cq..) 2. Move the subclass's getSerializedSize implementation from ExtendedCell to their own class, which mean we did not need to call ExtendedCell's getSerialiedSize() firstly, then forward to subclass's getSerializedSize(withTags). 3. Give a estimated result arraylist size for avoiding the frequent list extension when in a big scan, now we estimate the array size as min(scan.rows, 512). it's also help a lot. -We gain almost ~40% throughput improvement in 100% scan case for branch-2 (cacheHitRatio~100%)[1], it's a good thing. While it's a incompatible change in +We gain almost ~40% throughput improvement in 100% scan case for branch-2 (cacheHitRatio~100%)[1], it's a good thing. While it's a incompatible change in some case, such as if the upstream user implemented their own Cells, although it's rare but can happen, then their compile will be error. @@ -732,7 +732,7 @@ Before this issue, thrift1 server and thrift2 server are totally different serve * [HBASE-21661](https://issues.apache.org/jira/browse/HBASE-21661) | *Major* | **Provide Thrift2 implementation of Table/Admin** -ThriftAdmin/ThriftTable are implemented based on Thrift2. With ThriftAdmin/ThriftTable, People can use thrift2 protocol just like HTable/HBaseAdmin. +ThriftAdmin/ThriftTable are implemented based on Thrift2. With ThriftAdmin/ThriftTable, People can use thrift2 protocol just like HTable/HBaseAdmin. Example of using ThriftConnection Configuration conf = HBaseConfiguration.create(); conf.set(ClusterConnection.HBASE\_CLIENT\_CONNECTION\_IMPL,ThriftConnection.class.getName()); @@ -766,7 +766,7 @@ Add a new configuration "hbase.skip.load.duplicate.table.coprocessor". The defau * [HBASE-21650](https://issues.apache.org/jira/browse/HBASE-21650) | *Major* | **Add DDL operation and some other miscellaneous to thrift2** -Added DDL operations and some other structure definition to thrift2. Methods added: +Added DDL operations and some other structure definition to thrift2. Methods added: create/modify/addColumnFamily/deleteColumnFamily/modifyColumnFamily/enable/disable/truncate/delete table create/modify/delete namespace get(list)TableDescriptor(s)/get(list)NamespaceDescirptor(s) @@ -845,8 +845,8 @@ hbase(main):003:0> rit hbase(main):004:0> unassign '56f0c38c81ae453d19906ce156a2d6a1' 0 row(s) in 0.0540 seconds -hbase(main):005:0> rit -IntegrationTestBigLinkedList,L\xCC\xCC\xCC\xCC\xCC\xCC\xCB,1539117183224.56f0c38c81ae453d19906ce156a2d6a1. state=PENDING_CLOSE, ts=Tue Oct 09 20:33:34 UTC 2018 (0s ago), server=null +hbase(main):005:0> rit +IntegrationTestBigLinkedList,L\xCC\xCC\xCC\xCC\xCC\xCC\xCB,1539117183224.56f0c38c81ae453d19906ce156a2d6a1. state=PENDING_CLOSE, ts=Tue Oct 09 20:33:34 UTC 2018 (0s ago), server=null 1 row(s) in 0.0170 seconds ``` @@ -1329,7 +1329,7 @@ This represents an incompatible change for users who relied on this implementati This enhances the AccessControlClient APIs to retrieve the permissions based on namespace, table name, family and qualifier for specific user. AccessControlClient can also validate a user whether allowed to perform specified operations on a particular table. Following APIs have been added, -1) getUserPermissions(Connection connection, String tableRegex, byte[] columnFamily, byte[] columnQualifier, String userName) +1) getUserPermissions(Connection connection, String tableRegex, byte[] columnFamily, byte[] columnQualifier, String userName) Scope of retrieving permission will be same as existing. 2) hasPermission(onnection connection, String tableName, byte[] columnFamily, byte[] columnQualifier, String userName, Permission.Action... actions) Scope of validating user privilege, @@ -2095,11 +2095,11 @@ ColumnValueFilter provides a way to fetch matched cells only by providing specif A region is flushed if its memory component exceeds the region flush threshold. A flush policy decides which stores to flush by comparing the size of the store to a column-family-flush threshold. -If the overall size of all memstores in the machine exceeds the bounds defined by the administrator (denoted global pressure) a region is selected and flushed. +If the overall size of all memstores in the machine exceeds the bounds defined by the administrator (denoted global pressure) a region is selected and flushed. HBASE-18294 changes flush decisions to be based on heap-occupancy and not data (key-value) size, consistently across levels. This rolls back some of the changes by HBASE-16747. Specifically, (1) RSs, Regions and stores track their overall on-heap and off-heap occupancy, (2) A region is flushed when its on-heap+off-heap size exceeds the region flush threshold specified in hbase.hregion.memstore.flush.size, -(3) The store to be flushed is chosen based on its on-heap+off-heap size +(3) The store to be flushed is chosen based on its on-heap+off-heap size (4) At the RS level, a flush is triggered when the overall on-heap exceeds the on-heap limit, or when the overall off-heap size exceeds the off-heap limit (low/high water marks). Note that when the region flush size is set to XXmb a region flush may be triggered even before writing keys and values of size XX because the total heap occupancy of the region which includes additional metadata exceeded the threshold. @@ -2615,13 +2615,13 @@ And for server side, the default hbase.client.serverside.retries.multiplier was * [HBASE-18090](https://issues.apache.org/jira/browse/HBASE-18090) | *Major* | **Improve TableSnapshotInputFormat to allow more multiple mappers per region** -In this task, we make it possible to run multiple mappers per region in the table snapshot. The following code is primary table snapshot mapper initializatio: +In this task, we make it possible to run multiple mappers per region in the table snapshot. The following code is primary table snapshot mapper initializatio: TableMapReduceUtil.initTableSnapshotMapperJob( snapshotName, // The name of the snapshot (of a table) to read from scan, // Scan instance to control CF and attribute selection mapper, // mapper - outputKeyClass, // mapper output key + outputKeyClass, // mapper output key outputValueClass, // mapper output value job, // The current job to adjust true, // upload HBase jars and jars for any of the configured job classes via the distributed cache (tmpjars) @@ -2634,7 +2634,7 @@ TableMapReduceUtil.initTableSnapshotMapperJob( snapshotName, // The name of the snapshot (of a table) to read from scan, // Scan instance to control CF and attribute selection mapper, // mapper - outputKeyClass, // mapper output key + outputKeyClass, // mapper output key outputValueClass, // mapper output value job, // The current job to adjust true, // upload HBase jars and jars for any of the configured job classes via the distributed cache (tmpjars) @@ -2672,7 +2672,7 @@ List\ getTags() Optional\ getTag(byte type) byte[] cloneTags() -The above APIs helps to read tags from the Cell. +The above APIs helps to read tags from the Cell. CellUtil#createCell(Cell cell, List\ tags) CellUtil#createCell(Cell cell, byte[] tags) @@ -2808,7 +2808,7 @@ Change the import order rule that now we should put the shaded import at bottom. * [HBASE-19187](https://issues.apache.org/jira/browse/HBASE-19187) | *Minor* | **Remove option to create on heap bucket cache** Removing the on heap Bucket cache feature. -The config "hbase.bucketcache.ioengine" no longer support the 'heap' value. +The config "hbase.bucketcache.ioengine" no longer support the 'heap' value. Its supported values now are 'offheap', 'file:\', 'files:\' and 'mmap:\' @@ -2964,12 +2964,12 @@ Removes blanket bypass mechanism (Observer#bypass). Instead, a curated subset of The below methods have been marked deprecated in hbase2. We would have liked to have removed them because they use IA.Private parameters but they are in use by CoreCoprocessors or are critical to downstreamers and we have no alternatives to provide currently. @Deprecated public boolean prePrepareTimeStampForDeleteVersion(final Mutation mutation, final Cell kv, final byte[] byteNow, final Get get) throws IOException { - + @Deprecated public boolean preWALRestore(final RegionInfo info, final WALKey logKey, final WALEdit logEdit) throws IOException { @Deprecated public void postWALRestore(final RegionInfo info, final WALKey logKey, final WALEdit logEdit) throws IOException { - -@Deprecated public DeleteTracker postInstantiateDeleteTracker(DeleteTracker result) throws IOException + +@Deprecated public DeleteTracker postInstantiateDeleteTracker(DeleteTracker result) throws IOException Metrics are updated now even if the Coprocessor does a bypass; e.g. The put count is updated even if a Coprocessor bypasses the core put operation (We do it this way so no need for Coprocessors to have access to our core metrics system). @@ -3000,7 +3000,7 @@ Made defaults for Server#isStopping and Server#getFileSystem. Should have done t * [HBASE-19047](https://issues.apache.org/jira/browse/HBASE-19047) | *Critical* | **CP exposed Scanner types should not extend Shipper** RegionObserver#preScannerOpen signature changed -RegionScanner preScannerOpen( ObserverContext\ c, Scan scan, RegionScanner s) -\> void preScannerOpen( ObserverContext\ c, Scan scan) +RegionScanner preScannerOpen( ObserverContext\ c, Scan scan, RegionScanner s) -\> void preScannerOpen( ObserverContext\ c, Scan scan) The pre hook can no longer return a RegionScanner instance. @@ -3084,12 +3084,12 @@ Add missing deprecation tag for long getRpcTimeout(TimeUnit unit) in AsyncTableB * [HBASE-18410](https://issues.apache.org/jira/browse/HBASE-18410) | *Major* | **FilterList Improvement.** -In this task, we fixed all existing bugs in FilterList, and did the code refactor which ensured interface compatibility . +In this task, we fixed all existing bugs in FilterList, and did the code refactor which ensured interface compatibility . -The primary bug fixes are : -1. For sub-filter in FilterList with MUST\_PASS\_ONE, if previous filterKeyValue() of sub-filter returns NEXT\_COL, we cannot make sure that the next cell will be the first cell in next column, because FilterList choose the minimal forward step among sub-filters, and it may return a SKIP. so here we add an extra check to ensure that the next cell will match preivous return code for sub-filters. +The primary bug fixes are : +1. For sub-filter in FilterList with MUST\_PASS\_ONE, if previous filterKeyValue() of sub-filter returns NEXT\_COL, we cannot make sure that the next cell will be the first cell in next column, because FilterList choose the minimal forward step among sub-filters, and it may return a SKIP. so here we add an extra check to ensure that the next cell will match preivous return code for sub-filters. 2. Previous logic about transforming cell of FilterList is incorrect, we should set the previous transform result (rather than the given cell in question) as the initial vaule of transform cell before call filterKeyValue() of FilterList. -3. Handle the ReturnCodes which the previous code did not handle. +3. Handle the ReturnCodes which the previous code did not handle. About code refactor, we divided the FilterList into two separated sub-classes: FilterListWithOR and FilterListWithAND, The FilterListWithOR has been optimised to choose the next minimal step to seek cell rather than SKIP cell one by one, and the FilterListWithAND has been optimised to choose the next maximal key to seek among sub-filters in filter list. All in all, The code in FilterList is clean and easier to follow now. @@ -3901,7 +3901,7 @@ Changes ObserverContext from a class to an interface and hides away constructor, * [HBASE-18649](https://issues.apache.org/jira/browse/HBASE-18649) | *Major* | **Deprecate KV Usage in MR to move to Cells in 3.0** -All the mappers and reducers output type will be now of MapReduceCell type. No more KeyValue type. How ever in branch-2 for compatibility we have allowed the older interfaces/classes that work with KeyValue to stay in the code base but they have been marked as deprecated. +All the mappers and reducers output type will be now of MapReduceCell type. No more KeyValue type. How ever in branch-2 for compatibility we have allowed the older interfaces/classes that work with KeyValue to stay in the code base but they have been marked as deprecated. The following interfaces/classes have been deprecated in branch-2 Import#KeyValueWritableComparablePartitioner Import#KeyValueWritableComparator @@ -3936,8 +3936,8 @@ The changes of IA.Public/IA.LimitedPrivate classes are shown below: HTableDescriptor class \* boolean hasRegionMemstoreReplication() + boolean hasRegionMemStoreReplication() -\* HTableDescriptor setRegionMemstoreReplication(boolean) -+ HTableDescriptor setRegionMemStoreReplication(boolean) +\* HTableDescriptor setRegionMemstoreReplication(boolean) ++ HTableDescriptor setRegionMemStoreReplication(boolean) RegionLoadStats class \* int getMemstoreLoad() @@ -4013,8 +4013,8 @@ HBaseTestingUtility class - void modifyTableSync(Admin admin, HTableDescriptor desc) - HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey, byte [] endKey) - HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) -- HRegion createLocalHRegion(HRegionInfo info, TableDescriptor desc) -+ HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc) +- HRegion createLocalHRegion(HRegionInfo info, TableDescriptor desc) ++ HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc) - HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, WAL wal) - HRegion createLocalHRegion(HRegionInfo info, TableDescriptor desc, WAL wal) + HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc, WAL wal) @@ -4121,7 +4121,7 @@ We used to pass the RegionServerServices (RSS) which gave Coprocesosrs (CP) all Removed method getRegionServerServices from CP exposed RegionCoprocessorEnvironment and RegionServerCoprocessorEnvironment and replaced with getCoprocessorRegionServerServices. This returns a new interface CoprocessorRegionServerServices which is only a subset of RegionServerServices. With that below methods are no longer exposed for CPs WAL getWAL(HRegionInfo regionInfo) -List\ getWALs() +List\ getWALs() FlushRequester getFlushRequester() RegionServerAccounting getRegionServerAccounting() RegionServerRpcQuotaManager getRegionServerRpcQuotaManager() @@ -4161,8 +4161,8 @@ void addToOnlineRegions(Region region) boolean removeFromOnlineRegions(final Region r, ServerName destination) Also 3 methods name have been changed -List\ getOnlineRegions(TableName tableName) -\> List\ getRegions(TableName tableName) -List\ getOnlineRegions() -\> List\ getRegions() +List\ getOnlineRegions(TableName tableName) -\> List\ getRegions(TableName tableName) +List\ getOnlineRegions() -\> List\ getRegions() Region getFromOnlineRegions(final String encodedRegionName) -\> Region getRegion(final String encodedRegionName) @@ -4225,7 +4225,7 @@ void closeReader(boolean evictOnClose) throws IOException; void markCompactedAway(); void deleteReader() throws IOException; -Notice that these methods are still available in HStoreFile. +Notice that these methods are still available in HStoreFile. And the return value of getFirstKey and getLastKey are changed from Cell to Optional\ to better indicate that they may not be available. @@ -4528,7 +4528,7 @@ Replaces hbase-shaded-server-\.jar with hbase-shaded-mapreduce-\,SnapshotDescription,TableDescripto) + preRestoreSnapshot(ObserverContext\,List\, List\,String) ++ preGetTableDescriptors(ObserverContext\,List\, List\,String) + postGetTableDescriptors(ObserverContext\,List\, List\,String) + preGetTableNames(ObserverContext\,List\, String) + postGetTableNames(ObserverContext\,List\, String) @@ -5063,11 +5063,11 @@ Committed to master and branch-2. Thanks! In order to use this feature, a user must 
 1. Register their tables when configuring their job -
2. Create a composite key of the tablename and original rowkey to send as the mapper output key. +
2. Create a composite key of the tablename and original rowkey to send as the mapper output key. 

To register their tables (and configure their job for incremental load into multiple tables), a user must call the static MultiHFileOutputFormat.configureIncrementalLoad function to register the HBase tables that will be ingested into. 

 -To create the composite key, a helper function MultiHFileOutputFormat2.createCompositeKey should be called with the destination tablename and rowkey as arguments, and the result should be output as the mapper key. +To create the composite key, a helper function MultiHFileOutputFormat2.createCompositeKey should be called with the destination tablename and rowkey as arguments, and the result should be output as the mapper key. 
Before this JIRA, for HFileOutputFormat2 a configuration for the storage policy was set per Column Family. This was set manually by the user. In this JIRA, this is unchanged when using HFileOutputFormat2. However, when specifically using MultiHFileOutputFormat2, the user now has to manually set the prefix by creating a composite of the table name and the column family. The user can create the new composite value by calling MultiHFileOutputFormat2.createCompositeKey with the tablename and column family as arguments. @@ -5080,9 +5080,9 @@ The configuration parameter "hbase.mapreduce.hfileoutputformat.table.name" is no * [HBASE-18229](https://issues.apache.org/jira/browse/HBASE-18229) | *Critical* | **create new Async Split API to embrace AM v2** -A new splitRegionAsync() API is added in client. The existing splitRegion() and split() API will call the new API so client does not have to change its code. +A new splitRegionAsync() API is added in client. The existing splitRegion() and split() API will call the new API so client does not have to change its code. -Move HBaseAdmin.splitXXX() logic to master, client splitXXX() API now go to master directly instead of going to RegionServer first. +Move HBaseAdmin.splitXXX() logic to master, client splitXXX() API now go to master directly instead of going to RegionServer first. Also added splitSync() API @@ -5236,7 +5236,7 @@ Add unit tests for truncate\_preserve * [HBASE-18240](https://issues.apache.org/jira/browse/HBASE-18240) | *Major* | **Add hbase-thirdparty, a project with hbase utility including an hbase-shaded-thirdparty module with guava, netty, etc.** -Adds a new project, hbase-thirdparty, at https://git-wip-us.apache.org/repos/asf/hbase-thirdparty used by core hbase. GroupID org.apache.hbase.thirdparty. Version 1.0.0. +Adds a new project, hbase-thirdparty, at https://git-wip-us.apache.org/repos/asf/hbase-thirdparty used by core hbase. GroupID org.apache.hbase.thirdparty. Version 1.0.0. This project packages relocated third-party libraries used by Apache HBase such as protobuf, guava, and netty among others. HBase core depends on it. @@ -5275,9 +5275,9 @@ After HBASE-17110 the bytable strategy for SimpleLoadBalancer will also take ser Adds clear\_compaction\_queues to the hbase shell. {code} Clear compaction queues on a regionserver. - The queue\_name contains short and long. + The queue\_name contains short and long. short is shortCompactions's queue,long is longCompactions's queue. - + Examples: hbase\> clear\_compaction\_queues 'host187.example.com,60020' hbase\> clear\_compaction\_queues 'host187.example.com,60020','long' @@ -5367,8 +5367,8 @@ Adds a sort of procedures before submission so system tables are queued first (w * [HBASE-18008](https://issues.apache.org/jira/browse/HBASE-18008) | *Major* | **Any HColumnDescriptor we give out should be immutable** -1) The HColumnDescriptor got from Admin, AsyncAdmin, and Table is immutable. -2) HColumnDescriptor have been marked as "Deprecated" and user should substituted +1) The HColumnDescriptor got from Admin, AsyncAdmin, and Table is immutable. +2) HColumnDescriptor have been marked as "Deprecated" and user should substituted ColumnFamilyDescriptor for HColumnDescriptor. 3) ColumnFamilyDescriptor is constructed through ColumnFamilyDescriptorBuilder and it contains all of the read-only methods from HColumnDescriptor 4) The value to which the IS\_MOB/MOB\_THRESHOLD is mapped is stored as String rather than Boolean/Long. The MOB is an new feature to 2.0 so this change should be acceptable @@ -5551,7 +5551,7 @@ The default behavior for abort() method of StateMachineProcedure class is change * [HBASE-16851](https://issues.apache.org/jira/browse/HBASE-16851) | *Major* | **User-facing documentation for the In-Memory Compaction feature** -Two blog posts on Apache HBase blog: user manual and programmer manual. +Two blog posts on Apache HBase blog: user manual and programmer manual. Ref. guide draft published: https://docs.google.com/document/d/1Xi1jh\_30NKnjE3wSR-XF5JQixtyT6H\_CdFTaVi78LKw/edit @@ -5564,18 +5564,18 @@ Ref. guide draft published: https://docs.google.com/document/d/1Xi1jh\_30NKnjE3w CompactingMemStore achieves these gains through smart use of RAM. The algorithm periodically re-organizes the in-memory data in efficient data structures and reduces redundancies. The HBase server’s memory footprint therefore periodically expands and contracts. The outcome is longer lifetime of data in memory, less I/O, and overall faster performance. More details about the algorithm and its use appear in the Apache HBase Blog: https://blogs.apache.org/hbase/ How To Use: -The in-memory compaction level can be configured both globally and per column family. The supported levels are none (DefaultMemStore), basic, and eager. +The in-memory compaction level can be configured both globally and per column family. The supported levels are none (DefaultMemStore), basic, and eager. -By default, all tables apply basic in-memory compaction. This global configuration can be overridden in hbase-site.xml, as follows: +By default, all tables apply basic in-memory compaction. This global configuration can be overridden in hbase-site.xml, as follows: \ \hbase.hregion.compacting.memstore.type\ \\\ \ -The level can also be configured in the HBase shell per column family, as follows: +The level can also be configured in the HBase shell per column family, as follows: -create ‘\’, +create ‘\’, {NAME =\> ‘\’, IN\_MEMORY\_COMPACTION =\> ‘\’} @@ -5656,7 +5656,7 @@ MVCCPreAssign is added by HBASE-16698, but pre-assign mvcc is only used in put/d * [HBASE-16466](https://issues.apache.org/jira/browse/HBASE-16466) | *Major* | **HBase snapshots support in VerifyReplication tool to reduce load on live HBase cluster with large tables** -Support for snapshots in VerifyReplication tool i.e. verifyrep can compare source table snapshot against peer table snapshot which reduces load on RS by reading data from HDFS directly using Snapshot scanners. +Support for snapshots in VerifyReplication tool i.e. verifyrep can compare source table snapshot against peer table snapshot which reduces load on RS by reading data from HDFS directly using Snapshot scanners. Instead of comparing against live tables whose state changes due to writes and compactions its better to compare HBase snapshots which are immutable in nature. @@ -5827,7 +5827,7 @@ Now small scan and limited scan could also return partial results. * [HBASE-16014](https://issues.apache.org/jira/browse/HBASE-16014) | *Major* | **Get and Put constructor argument lists are divergent** Add 2 constructors fot API Get -1. Get(byte[], int, int) +1. Get(byte[], int, int) 2. Get(ByteBuffer) @@ -5986,7 +5986,7 @@ Changes all tests to use the TestName JUnit Rule everywhere rather than hardcode The HBase cleaner chore process cleans up old WAL files and archived HFiles. Cleaner operation can affect query performance when running heavy workloads, so disable the cleaner during peak hours. The cleaner has the following HBase shell commands: -- cleaner\_chore\_enabled: Queries whether cleaner chore is enabled/ disabled. +- cleaner\_chore\_enabled: Queries whether cleaner chore is enabled/ disabled. - cleaner\_chore\_run: Manually runs the cleaner to remove files. - cleaner\_chore\_switch: enables or disables the cleaner and returns the previous state of the cleaner. For example, cleaner-switch true enables the cleaner. @@ -6049,8 +6049,8 @@ Now the scan.setSmall method is deprecated. Consider using scan.setLimit and sca Mob compaction partition policy can be set by hbase\> create 't1', {NAME =\> 'f1', IS\_MOB =\> true, MOB\_THRESHOLD =\> 1000000, MOB\_COMPACT\_PARTITION\_POLICY =\> 'weekly'} - -or + +or hbase\> alter 't1', {NAME =\> 'f1', IS\_MOB =\> true, MOB\_THRESHOLD =\> 1000000, MOB\_COMPACT\_PARTITION\_POLICY =\> 'monthly'} @@ -6093,16 +6093,16 @@ Fix inability at finding static content post push of parent issue moving us to j * [HBASE-9774](https://issues.apache.org/jira/browse/HBASE-9774) | *Major* | **HBase native metrics and metric collection for coprocessors** -This issue adds two new modules, hbase-metrics and hbase-metrics-api which define and implement the "new" metric system used internally within HBase. These two modules (and some other code in hbase-hadoop2-compat) module are referred as "HBase metrics framework" which is HBase-specific and independent of any other metrics library (including Hadoop metrics2 and dropwizards metrics). +This issue adds two new modules, hbase-metrics and hbase-metrics-api which define and implement the "new" metric system used internally within HBase. These two modules (and some other code in hbase-hadoop2-compat) module are referred as "HBase metrics framework" which is HBase-specific and independent of any other metrics library (including Hadoop metrics2 and dropwizards metrics). HBase Metrics API (hbase-metrics-api) contains the interface that HBase exposes internally and to third party code (including coprocessors). It is a thin -abstraction over the actual implementation for backwards compatibility guarantees. The metrics API in this hbase-metrics-api module is inspired by the Dropwizard metrics 3.1 API, however, the API is completely independent. +abstraction over the actual implementation for backwards compatibility guarantees. The metrics API in this hbase-metrics-api module is inspired by the Dropwizard metrics 3.1 API, however, the API is completely independent. -hbase-metrics module contains implementation of the "HBase Metrics API", including MetricRegistry, Counter, Histogram, etc. These are highly concurrent implementations of the Metric interfaces. Metrics in HBase are grouped into different sets (like WAL, RPC, RegionServer, etc). Each group of metrics should be tracked via a MetricRegistry specific to that group. +hbase-metrics module contains implementation of the "HBase Metrics API", including MetricRegistry, Counter, Histogram, etc. These are highly concurrent implementations of the Metric interfaces. Metrics in HBase are grouped into different sets (like WAL, RPC, RegionServer, etc). Each group of metrics should be tracked via a MetricRegistry specific to that group. Historically, HBase has been using Hadoop's Metrics2 framework [3] for collecting and reporting the metrics internally. However, due to the difficultly of dealing with the Metrics2 framework, HBase is moving away from Hadoop's metrics implementation to its custom implementation. The move will happen incrementally, and during the time, both Hadoop Metrics2-based metrics and hbase-metrics module based classes will be in the source code. All new implementations for metrics SHOULD use the new API and framework. -This jira also introduces the metrics API to coprocessor implementations. Coprocessor writes can export custom metrics using the API and have those collected via metrics2 sinks, as well as exported via JMX in regionserver metrics. +This jira also introduces the metrics API to coprocessor implementations. Coprocessor writes can export custom metrics using the API and have those collected via metrics2 sinks, as well as exported via JMX in regionserver metrics. More documentation available at: hbase-metrics-api/README.txt @@ -6166,7 +6166,7 @@ Move locking to be procedure (Pv2) rather than zookeeper based. All locking move * [HBASE-17470](https://issues.apache.org/jira/browse/HBASE-17470) | *Major* | **Remove merge region code from region server** -In 1.x branches, Admin.mergeRegions calls MASTER via dispatchMergingRegions RPC; when executing dispatchMergingRegions RPC, MASTER calls RS via MergeRegions to complete the merge in RS-side. +In 1.x branches, Admin.mergeRegions calls MASTER via dispatchMergingRegions RPC; when executing dispatchMergingRegions RPC, MASTER calls RS via MergeRegions to complete the merge in RS-side. With HBASE-16119, the merge logic moves to master-side. This JIRA cleans up unused RPCs (dispatchMergingRegions and MergeRegions) , removes dangerous tools such as Merge and HMerge, and deletes unused RegionServer-side merge region logic in 2.0 release. @@ -6336,7 +6336,7 @@ Possible memstore compaction policies are: Memory compaction policeman be set at the column family level at table creation time: {code} create ‘\’, - {NAME =\> ‘\’, + {NAME =\> ‘\’, IN\_MEMORY\_COMPACTION =\> ‘\’} {code} or as a property at the global configuration level by setting the property in hbase-site.xml, with BASIC being the default value: @@ -6374,7 +6374,7 @@ Provides ability to restrict table coprocessors based on HDFS path whitelist. (P * [HBASE-17221](https://issues.apache.org/jira/browse/HBASE-17221) | *Major* | **Abstract out an interface for RpcServer.Call** -Provide an interface RpcCall on the server side. +Provide an interface RpcCall on the server side. RpcServer.Call now is marked as @InterfaceAudience.Private, and implements the interface RpcCall, @@ -6682,7 +6682,7 @@ Add AsyncConnection, AsyncTable and AsyncTableRegionLocator. Now the AsyncTable This issue fix three bugs: 1. rpcTimeout configuration not work for one rpc call in AP -2. operationTimeout configuration not work for multi-request (batch, put) in AP +2. operationTimeout configuration not work for multi-request (batch, put) in AP 3. setRpcTimeout and setOperationTimeout in HTable is not worked for AP and BufferedMutator. @@ -6712,7 +6712,7 @@ exist in a cleanly closed file. If an EOF is detected due to parsing or other errors while there are still unparsed bytes before the end-of-file trailer, we now reset the WAL to the very beginning and attempt a clean read-through. Because we will retry these failures indefinitely, two additional changes are made to help with diagnostics: \* On each retry attempt, a log message like the below will be emitted at the WARN level: - + Processing end of WAL file '{}'. At position {}, which is too far away from reported file length {}. Restarting WAL reading (see HBASE-15983 for details). @@ -7035,7 +7035,7 @@ Adds logging of region and server. Helpful debugging. Logging now looks like thi * [HBASE-14743](https://issues.apache.org/jira/browse/HBASE-14743) | *Minor* | **Add metrics around HeapMemoryManager** -A memory metrics reveals situations happened in both MemStores and BlockCache in RegionServer. Through this metrics, users/operators can know +A memory metrics reveals situations happened in both MemStores and BlockCache in RegionServer. Through this metrics, users/operators can know 1). Current size of MemStores and BlockCache in bytes. 2). Occurrence for Memstore minor and major flush. (named unblocked flush and blocked flush respectively, shown in histogram) 3). Dynamic changes in size between MemStores and BlockCache. (with Increase/Decrease as prefix, shown in histogram). And a counter for no changes, named DoNothingCounter. @@ -7062,7 +7062,7 @@ When LocalHBaseCluster is started from the command line the Master would give up * [HBASE-16052](https://issues.apache.org/jira/browse/HBASE-16052) | *Major* | **Improve HBaseFsck Scalability** -HBASE-16052 improves the performance and scalability of HBaseFsck, especially for large clusters with a small number of large tables. +HBASE-16052 improves the performance and scalability of HBaseFsck, especially for large clusters with a small number of large tables. Searching for lingering reference files is now a multi-threaded operation. Loading HDFS region directory information is now multi-threaded at the region-level instead of the table-level to maximize concurrency. A performance bug in HBaseFsck that resulted in redundant I/O and RPCs was fixed by introducing a FileStatusFilter that filters FileStatus objects directly. @@ -7078,7 +7078,7 @@ If zk based replication queue is used and useMulti is false, we will schedule a * [HBASE-3727](https://issues.apache.org/jira/browse/HBASE-3727) | *Minor* | **MultiHFileOutputFormat** -MultiHFileOutputFormat support output of HFiles from multiple tables. It will output directories and hfiles as follow, +MultiHFileOutputFormat support output of HFiles from multiple tables. It will output directories and hfiles as follow, --table1 --family1 --family2 @@ -7102,7 +7102,7 @@ Prior to this change, the integration test clients (IntegrationTest\*) relied on * [HBASE-13823](https://issues.apache.org/jira/browse/HBASE-13823) | *Major* | **Procedure V2: unnecessaery operations on AssignmentManager#recoverTableInDisablingState() and recoverTableInEnablingState()** -For cluster upgraded from 1.0.x or older releases, master startup would not continue the in-progress enable/disable table process. If orphaned znode with ENABLING/DISABLING state exists in the cluster, run hbck or manually fix the issue. +For cluster upgraded from 1.0.x or older releases, master startup would not continue the in-progress enable/disable table process. If orphaned znode with ENABLING/DISABLING state exists in the cluster, run hbck or manually fix the issue. For new cluster or cluster upgraded from 1.1.x and newer release, there is no issue to worry about. @@ -7111,9 +7111,9 @@ For new cluster or cluster upgraded from 1.1.x and newer release, there is no is * [HBASE-16095](https://issues.apache.org/jira/browse/HBASE-16095) | *Major* | **Add priority to TableDescriptor and priority region open thread pool** -Adds a PRIORITY property to the HTableDescriptor. PRIORITY should be in the same range as the RpcScheduler defines it (HConstants.XXX\_QOS). +Adds a PRIORITY property to the HTableDescriptor. PRIORITY should be in the same range as the RpcScheduler defines it (HConstants.XXX\_QOS). -Table priorities are only used for region opening for now. There can be other uses later (like RpcScheduling). +Table priorities are only used for region opening for now. There can be other uses later (like RpcScheduling). Regions of high priority tables (priority \>= than HIGH\_QOS) are opened from a different thread pool than the regular region open thread pool. However, table priorities are not used as a global order for region assigning or opening. @@ -7129,7 +7129,7 @@ When a replication endpoint is sent a shutdown request by the replication source * [HBASE-16087](https://issues.apache.org/jira/browse/HBASE-16087) | *Major* | **Replication shouldn't start on a master if if only hosts system tables** -Masters will no longer start any replication threads if they are hosting only system tables. +Masters will no longer start any replication threads if they are hosting only system tables. In order to change this add something to the config for tables on master that doesn't start with "hbase:" ( Replicating system tables is something that's currently unsupported and can open up security holes, so do this at your own peril) @@ -7138,7 +7138,7 @@ In order to change this add something to the config for tables on master that do * [HBASE-14548](https://issues.apache.org/jira/browse/HBASE-14548) | *Major* | **Expand how table coprocessor jar and dependency path can be specified** -Allow a directory containing the jars or some wildcards to be specified, such as: hdfs://namenode:port/user/hadoop-user/ +Allow a directory containing the jars or some wildcards to be specified, such as: hdfs://namenode:port/user/hadoop-user/ or hdfs://namenode:port/user/hadoop-user/\*.jar @@ -7185,12 +7185,12 @@ This patch introduces a new infrastructure for creation and maintenance of Maven NOTE that this patch should introduce two new WARNINGs ("Using platform encoding ... to copy filtered resources") into the hbase install process. These warnings are hard-wired into the maven-archetype-plugin:create-from-project goal. See hbase/hbase-archetypes/README.md, footnote [6] for details. -After applying the patch, see hbase/hbase-archetypes/README.md for details regarding the new archetype infrastructure introduced by this patch. (The README text is also conveniently positioned at the top of the patch itself.) +After applying the patch, see hbase/hbase-archetypes/README.md for details regarding the new archetype infrastructure introduced by this patch. (The README text is also conveniently positioned at the top of the patch itself.) -Here is the opening paragraph of the README.md file: -================= -The hbase-archetypes subproject of hbase provides an infrastructure for creation and maintenance of Maven archetypes pertinent to HBase. Upon deployment to the archetype catalog of the central Maven repository, these archetypes may be used by end-user developers to autogenerate completely configured Maven projects (including fully-functioning sample code) through invocation of the archetype:generate goal of the maven-archetype-plugin. -======== +Here is the opening paragraph of the README.md file: +================= +The hbase-archetypes subproject of hbase provides an infrastructure for creation and maintenance of Maven archetypes pertinent to HBase. Upon deployment to the archetype catalog of the central Maven repository, these archetypes may be used by end-user developers to autogenerate completely configured Maven projects (including fully-functioning sample code) through invocation of the archetype:generate goal of the maven-archetype-plugin. +======== The README.md file also contains several paragraphs under the heading, "Notes for contributors and committers to the HBase project", which explains the layout of 'hbase-archetypes', and how archetypes are created and installed into the local Maven repository, ready for deployment to the central Maven repository. It also outlines how new archetypes may be developed and added to the collection in the future. @@ -7249,7 +7249,7 @@ Adds a FifoRpcSchedulerFactory so you can try the FifoRpcScheduler by setting " * [HBASE-15989](https://issues.apache.org/jira/browse/HBASE-15989) | *Major* | **Remove hbase.online.schema.update.enable** -Removes the "hbase.online.schema.update.enable" property. +Removes the "hbase.online.schema.update.enable" property. from now, every operation that alter the schema (e.g. modifyTable, addFamily, removeFamily, ...) will use the online schema update. there is no need to disable/enable the table. @@ -7318,12 +7318,12 @@ See http://mail-archives.apache.org/mod\_mbox/hbase-dev/201605.mbox/%3CCAMUu0w-Z * [HBASE-15228](https://issues.apache.org/jira/browse/HBASE-15228) | *Major* | **Add the methods to RegionObserver to trigger start/complete restoring WALs** -Added two hooks around WAL restore. +Added two hooks around WAL restore. preReplayWALs(final ObserverContext\ ctx, HRegionInfo info, Path edits) and -postReplayWALs(final ObserverContext\ ctx, HRegionInfo info, Path edits) +postReplayWALs(final ObserverContext\ ctx, HRegionInfo info, Path edits) -Will be called at start and end of restore of a WAL file. +Will be called at start and end of restore of a WAL file. The other hook around WAL restore (preWALRestore ) will be called before restore of every entry within the WAL file. @@ -7565,12 +7565,12 @@ No functional change. Added javadoc, comments, and extra trace-level logging to Use 'hbase.hstore.compaction.date.tiered.window.factory.class' to specify the window implementation you like for date tiered compaction. Now the only and default implementation is org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory. -{code} -\ -\hbase.hstore.compaction.date.tiered.window.factory.class\ -\org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory\ -\ -\ +{code} +\ +\hbase.hstore.compaction.date.tiered.window.factory.class\ +\org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory\ +\ +\ {code} @@ -7669,15 +7669,15 @@ With this patch combined with HBASE-15389, when we compact, we can output multip 2. Bulk load files and the old file generated by major compaction before upgrading to DTCP. This will change the way to enable date tiered compaction. -To turn it on: +To turn it on: hbase.hstore.engine.class: org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine -With tiered compaction all servers in the cluster will promote windows to higher tier at the same time, so using a compaction throttle is recommended: -hbase.regionserver.throughput.controller:org.apache.hadoop.hbase.regionserver.compactions.PressureAwareCompactionThroughputController +With tiered compaction all servers in the cluster will promote windows to higher tier at the same time, so using a compaction throttle is recommended: +hbase.regionserver.throughput.controller:org.apache.hadoop.hbase.regionserver.compactions.PressureAwareCompactionThroughputController hbase.hstore.compaction.throughput.higher.bound and hbase.hstore.compaction.throughput.lower.bound need to be set for desired throughput range as uncompressed rates. -Because there will most likely be more store files around, we need to adjust the configuration so that flush won't be blocked and compaction will be properly throttled: -hbase.hstore.blockingStoreFiles: change to 50 if using all default parameters when turning on date tiered compaction. Use 1.5~2 x projected file count if changing the parameters, Projected file count = windows per tier x tier count + incoming window min + files older than max age +Because there will most likely be more store files around, we need to adjust the configuration so that flush won't be blocked and compaction will be properly throttled: +hbase.hstore.blockingStoreFiles: change to 50 if using all default parameters when turning on date tiered compaction. Use 1.5~2 x projected file count if changing the parameters, Projected file count = windows per tier x tier count + incoming window min + files older than max age Because major compaction is turned on now, we also need to adjust the configuration for max file to compact according to the larger file count: hbase.hstore.compaction.max: set to the same number as hbase.hstore.blockingStoreFiles. @@ -7774,7 +7774,7 @@ Adds a configuration parameter "hbase.ipc.max.request.size" which defaults to 25 * [HBASE-15412](https://issues.apache.org/jira/browse/HBASE-15412) | *Major* | **Add average region size metric** -Adds a new metric for called "averageRegionSize" that is emitted as a regionserver metric. Metric description: +Adds a new metric for called "averageRegionSize" that is emitted as a regionserver metric. Metric description: Average region size over the region server including memstore and storefile sizes @@ -7817,7 +7817,7 @@ Fixed an issue in REST server checkAndDelete operation where the remaining cells * [HBASE-15377](https://issues.apache.org/jira/browse/HBASE-15377) | *Major* | **Per-RS Get metric is time based, per-region metric is size-based** -Per-region metrics related to Get histograms are changed from being response size based into being latency based similar to the per-regionserver metrics of the same name. +Per-region metrics related to Get histograms are changed from being response size based into being latency based similar to the per-regionserver metrics of the same name. Added GetSize histogram metrics at the per-regionserver and per-region level for the response sizes. @@ -7826,9 +7826,9 @@ Added GetSize histogram metrics at the per-regionserver and per-region level for * [HBASE-6721](https://issues.apache.org/jira/browse/HBASE-6721) | *Major* | **RegionServer Group based Assignment** -[ADVANCED USERS ONLY] This patch adds a new experimental module hbase-rsgroup. It is an advanced feature for partitioning regionservers into distinctive groups for strict isolation, and should only be used by users who are sophisticated enough to understand the full implications and have a sufficient background in managing HBase clusters. +[ADVANCED USERS ONLY] This patch adds a new experimental module hbase-rsgroup. It is an advanced feature for partitioning regionservers into distinctive groups for strict isolation, and should only be used by users who are sophisticated enough to understand the full implications and have a sufficient background in managing HBase clusters. -RSGroups can be defined and managed with shell commands or corresponding Java APIs. A server can be added to a group with hostname and port pair, and tables can be moved to this group so that only regionservers in the same rsgroup can host the regions of the table. RegionServers and tables can only belong to 1 group at a time. By default, all tables and regionservers belong to the "default" group. System tables can also be put into a group using the regular APIs. A custom balancer implementation tracks assignments per rsgroup and makes sure to move regions to the relevant regionservers in that group. The group information is stored in a regular HBase table, and a zookeeper-based read-only cache is used at the cluster bootstrap time. +RSGroups can be defined and managed with shell commands or corresponding Java APIs. A server can be added to a group with hostname and port pair, and tables can be moved to this group so that only regionservers in the same rsgroup can host the regions of the table. RegionServers and tables can only belong to 1 group at a time. By default, all tables and regionservers belong to the "default" group. System tables can also be put into a group using the regular APIs. A custom balancer implementation tracks assignments per rsgroup and makes sure to move regions to the relevant regionservers in that group. The group information is stored in a regular HBase table, and a zookeeper-based read-only cache is used at the cluster bootstrap time. To enable, add the following to your hbase-site.xml and restart your Master: @@ -7857,7 +7857,7 @@ This adds a group to the 'hbase:rsgroup' system table. Add a server (hostname + * [HBASE-15435](https://issues.apache.org/jira/browse/HBASE-15435) | *Major* | **Add WAL (in bytes) written metric** -Adds a new metric named "writtenBytes" as a per-regionserver metric. Metric Description: +Adds a new metric named "writtenBytes" as a per-regionserver metric. Metric Description: Size (in bytes) of the data written to the WAL. @@ -7908,7 +7908,7 @@ on branch-1, branch-1.2 and branch 1.3 we now check if the exception is meta-cle * [HBASE-15376](https://issues.apache.org/jira/browse/HBASE-15376) | *Major* | **ScanNext metric is size-based while every other per-operation metric is time based** -Removed ScanNext histogram metrics as regionserver level and per-region level metrics since the semantics is not compatible with other similar metrics (size histogram vs latency histogram). +Removed ScanNext histogram metrics as regionserver level and per-region level metrics since the semantics is not compatible with other similar metrics (size histogram vs latency histogram). Instead, this patch adds ScanTime and ScanSize histogram metrics at the regionserver and per-region level. @@ -7931,7 +7931,7 @@ Previously RPC request scheduler in HBase had 2 modes in could operate in: This patch adds new type of scheduler to HBase, based on the research around controlled delay (CoDel) algorithm [1], used in networking to combat bufferbloat, as well as some analysis on generalizing it to generic request queues [2]. The purpose of that work is to prevent long standing call queues caused by discrepancy between request rate and available throughput, caused by kernel/disk IO/networking stalls. -New RPC scheduler could be enabled by setting hbase.ipc.server.callqueue.type=codel in configuration. Several additional params allow to configure algorithm behavior - +New RPC scheduler could be enabled by setting hbase.ipc.server.callqueue.type=codel in configuration. Several additional params allow to configure algorithm behavior - hbase.ipc.server.callqueue.codel.target.delay hbase.ipc.server.callqueue.codel.interval @@ -8105,7 +8105,7 @@ Removed IncrementPerformanceTest. It is not as configurable as the additions mad * [HBASE-15218](https://issues.apache.org/jira/browse/HBASE-15218) | *Blocker* | **On RS crash and replay of WAL, loosing all Tags in Cells** -This issue fixes +This issue fixes - In case of normal WAL (Not encrypted) we were loosing all cell tags on WAL replay after an RS crash - In case of encrypted WAL we were not even persisting Cell tags in WAL. Tags from all unflushed (to HFile) Cells will get lost even after WAL replay recovery is done. @@ -8154,13 +8154,13 @@ If you are using co processors and refer the Cells in the read results, DO NOT s * [HBASE-15145](https://issues.apache.org/jira/browse/HBASE-15145) | *Major* | **HBCK and Replication should authenticate to zookepeer using server principal** -Added a new command line argument: --auth-as-server to enable authenticating to ZooKeeper as the HBase Server principal. This is required for secure clusters for doing replication operations like add\_peer, list\_peers, etc until HBASE-11392 is fixed. This advanced option can also be used for manually fixing secure znodes. +Added a new command line argument: --auth-as-server to enable authenticating to ZooKeeper as the HBase Server principal. This is required for secure clusters for doing replication operations like add\_peer, list\_peers, etc until HBASE-11392 is fixed. This advanced option can also be used for manually fixing secure znodes. -Commands can now be invoked like: -hbase --auth-as-server shell -hbase --auth-as-server zkcli +Commands can now be invoked like: +hbase --auth-as-server shell +hbase --auth-as-server zkcli -HBCK in secure setup also needs to authenticate to ZK using servers principals.This is turned on by default (no need to pass additional argument). +HBCK in secure setup also needs to authenticate to ZK using servers principals.This is turned on by default (no need to pass additional argument). When authenticating as server, HBASE\_SERVER\_JAAS\_OPTS is concatenated to HBASE\_OPTS if defined in hbase-env.sh. Otherwise, HBASE\_REGIONSERVER\_OPTS is concatenated. @@ -8209,7 +8209,7 @@ The \`hbase version\` command now outputs directly to stdout rather than to a lo * [HBASE-15027](https://issues.apache.org/jira/browse/HBASE-15027) | *Major* | **Refactor the way the CompactedHFileDischarger threads are created** The property 'hbase.hfile.compactions.discharger.interval' has been renamed to 'hbase.hfile.compaction.discharger.interval' that describes the interval after which the compaction discharger chore service should run. -The property 'hbase.hfile.compaction.discharger.thread.count' describes the thread count that does the compaction discharge work. +The property 'hbase.hfile.compaction.discharger.thread.count' describes the thread count that does the compaction discharge work. The CompactedHFilesDischarger is a chore service now started as part of the RegionServer and this chore service iterates over all the onlineRegions in that RS and uses the RegionServer's executor service to launch a set of threads that does this job of compaction files clean up. @@ -8217,8 +8217,8 @@ The CompactedHFilesDischarger is a chore service now started as part of the Regi * [HBASE-14468](https://issues.apache.org/jira/browse/HBASE-14468) | *Major* | **Compaction improvements: FIFO compaction policy** -FIFO compaction policy selects only files which have all cells expired. The column family MUST have non-default TTL. -Essentially, FIFO compactor does only one job: collects expired store files. +FIFO compaction policy selects only files which have all cells expired. The column family MUST have non-default TTL. +Essentially, FIFO compactor does only one job: collects expired store files. Because we do not do any real compaction, we do not use CPU and IO (disk and network), we do not evict hot data from a block cache. The result: improved throughput and latency both write and read. See: https://github.com/facebook/rocksdb/wiki/FIFO-compaction-style @@ -8281,7 +8281,7 @@ All clients before 1.2.0 will not get this multi request chunking based upon blo * [HBASE-14951](https://issues.apache.org/jira/browse/HBASE-14951) | *Minor* | **Make hbase.regionserver.maxlogs obsolete** -Rolling WAL events across a cluster can be highly correlated, hence flushing memstores, hence triggering minor compactions, that can be promoted to major ones. These events are highly correlated in time if there is a balanced write-load on the regions in a table. Default value for maximum WAL files (\* hbase.regionserver.maxlogs\*), which controls WAL rolling events - 32 is too small for many modern deployments. +Rolling WAL events across a cluster can be highly correlated, hence flushing memstores, hence triggering minor compactions, that can be promoted to major ones. These events are highly correlated in time if there is a balanced write-load on the regions in a table. Default value for maximum WAL files (\* hbase.regionserver.maxlogs\*), which controls WAL rolling events - 32 is too small for many modern deployments. Now we calculate this value dynamically (if not defined by user), using the following formula: maxLogs = Math.max( 32, HBASE\_HEAP\_SIZE \* memstoreRatio \* 2/ LogRollSize), where @@ -8289,7 +8289,7 @@ maxLogs = Math.max( 32, HBASE\_HEAP\_SIZE \* memstoreRatio \* 2/ LogRollSize), w memstoreRatio is \*hbase.regionserver.global.memstore.size\* LogRollSize is maximum WAL file size (default 0.95 \* HDFS block size) -We need to make sure that we avoid fully or minimize events when RS has to flush memstores prematurely only because it reached artificial limit of hbase.regionserver.maxlogs, this is why we put this 2 x multiplier in equation, this gives us maximum WAL capacity of 2 x RS memstore-size. +We need to make sure that we avoid fully or minimize events when RS has to flush memstores prematurely only because it reached artificial limit of hbase.regionserver.maxlogs, this is why we put this 2 x multiplier in equation, this gives us maximum WAL capacity of 2 x RS memstore-size. Runaway WAL files. @@ -8321,7 +8321,7 @@ Setting it to false ( the default ) will help ensure a more even distribution of * [HBASE-14534](https://issues.apache.org/jira/browse/HBASE-14534) | *Minor* | **Bump yammer/coda/dropwizard metrics dependency version** -Updated yammer metrics to version 3.1.2 (now it's been renamed to dropwizard). API has changed quite a bit, consult https://dropwizard.github.io/metrics/3.1.0/manual/core/ for additional information. +Updated yammer metrics to version 3.1.2 (now it's been renamed to dropwizard). API has changed quite a bit, consult https://dropwizard.github.io/metrics/3.1.0/manual/core/ for additional information. Note that among other things, in yammer 2.2.0 histograms were by default created in non-biased mode (uniform sampling), while in 3.1.0 histograms created via MetricsRegistry.histogram(...) are by default exponentially decayed. This shouldn't affect end users, though. @@ -8375,7 +8375,7 @@ Following are the additional configurations added for this enhancement, For example: If source cluster FS client configurations are copied in peer cluster under directory /home/user/dc1/ then hbase.replication.cluster.id should be configured as dc1 and hbase.replication.conf.dir as /home/user -Note: +Note: a. Any modification to source cluster FS client configuration files in peer cluster side replication configuration directory then it needs to restart all its peer(s) cluster RS with default hbase.replication.source.fs.conf.provider. b. Only 'xml' type files will be loaded by the default hbase.replication.source.fs.conf.provider. @@ -8573,7 +8573,7 @@ This patch adds shell support for region normalizer (see HBASE-13103). - 'normalizer\_switch' allows user to turn normalizer on and off - 'normalize' runs region normalizer if it's turned on. -Also 'alter' command has been extended to allow user to enable/disable region normalization per table (disabled by default). Use it as +Also 'alter' command has been extended to allow user to enable/disable region normalization per table (disabled by default). Use it as alter 'testtable', {NORMALIZATION\_MODE =\> 'true'} @@ -8871,14 +8871,14 @@ For more details on how to use the feature please consult the HBase Reference Gu Removed Table#getRowOrBefore, Region#getClosestRowBefore, Store#getRowKeyAtOrBefore, RemoteHTable#getRowOrBefore apis and Thrift support for getRowOrBefore. Also removed two coprocessor hooks preGetClosestRowBefore and postGetClosestRowBefore. -User using this api can instead use reverse scan something like below, -{code} - Scan scan = new Scan(row); - scan.setSmall(true); - scan.setCaching(1); - scan.setReversed(true); - scan.addFamily(family); -{code} +User using this api can instead use reverse scan something like below, +{code} + Scan scan = new Scan(row); + scan.setSmall(true); + scan.setCaching(1); + scan.setReversed(true); + scan.addFamily(family); +{code} pass this scan object to the scanner and retrieve the first Result from scanner output. @@ -8894,7 +8894,7 @@ Changes parameters to filterColumn so takes a Cell rather than a byte []. hbase-client-1.2.7-SNAPSHOT.jar, ColumnPrefixFilter.class package org.apache.hadoop.hbase.filter -ColumnPrefixFilter.filterColumn ( byte[ ] buffer, int qualifierOffset, int qualifierLength ) : Filter.ReturnCode +ColumnPrefixFilter.filterColumn ( byte[ ] buffer, int qualifierOffset, int qualifierLength ) : Filter.ReturnCode org/apache/hadoop/hbase/filter/ColumnPrefixFilter.filterColumn:([BII)Lorg/apache/hadoop/hbase/filter/Filter$ReturnCode; Ditto for filterColumnValue in SingleColumnValueFilter. Takes a Cell instead of byte array. @@ -9088,7 +9088,7 @@ hbase-shaded-client and hbase-shaded-server modules will not build the actual ja * [HBASE-13754](https://issues.apache.org/jira/browse/HBASE-13754) | *Major* | **Allow non KeyValue Cell types also to oswrite** -This jira has removed the already deprecated method +This jira has removed the already deprecated method KeyValue#oswrite(final KeyValue kv, final OutputStream out) @@ -9128,11 +9128,11 @@ Purge support for parsing zookeepers zoo.cfg deprecated since hbase-0.96.0 MOTIVATION -A pipelined scan API is introduced for speeding up applications that combine massive data traversal with compute-intensive processing. Traditional HBase scans save network trips through prefetching the data to the client side cache. However, they prefetch synchronously: the fetch request to regionserver is invoked only when the entire cache is consumed. This leads to a stop-and-wait access pattern, in which the client stalls until the next chunk of data is fetched. Applications that do significant processing can benefit from background data prefetching, which eliminates this bottleneck. The pipelined scan implementation overlaps the cache population at the client side with application processing. Namely, it issues a new scan RPC when the iteration retrieves 50% of the cache. If the application processing (that is, the time between invocations of next()) is substantial, the new chunk of data will be available before the previous one is exhausted, and the client will not experience any delay. Ideally, the prefetch and the processing times should be balanced. +A pipelined scan API is introduced for speeding up applications that combine massive data traversal with compute-intensive processing. Traditional HBase scans save network trips through prefetching the data to the client side cache. However, they prefetch synchronously: the fetch request to regionserver is invoked only when the entire cache is consumed. This leads to a stop-and-wait access pattern, in which the client stalls until the next chunk of data is fetched. Applications that do significant processing can benefit from background data prefetching, which eliminates this bottleneck. The pipelined scan implementation overlaps the cache population at the client side with application processing. Namely, it issues a new scan RPC when the iteration retrieves 50% of the cache. If the application processing (that is, the time between invocations of next()) is substantial, the new chunk of data will be available before the previous one is exhausted, and the client will not experience any delay. Ideally, the prefetch and the processing times should be balanced. API AND CONFIGURATION -Asynchronous scanning can be configured either globally for all tables and scans, or on per-scan basis via a new Scan class API. +Asynchronous scanning can be configured either globally for all tables and scans, or on per-scan basis via a new Scan class API. Configuration in hbase-site.xml: hbase.client.scanner.async.prefetch, default false: @@ -9175,8 +9175,8 @@ Introduces a new config hbase.fs.tmp.dir which is a directory in HDFS (or defaul * [HBASE-10800](https://issues.apache.org/jira/browse/HBASE-10800) | *Major* | **Use CellComparator instead of KVComparator** -From 2.0 branch onwards KVComparator and its subclasses MetaComparator, RawBytesComparator are all deprecated. -All the comparators are moved to CellComparator. MetaCellComparator, a subclass of CellComparator, will be used to compare hbase:meta cells. +From 2.0 branch onwards KVComparator and its subclasses MetaComparator, RawBytesComparator are all deprecated. +All the comparators are moved to CellComparator. MetaCellComparator, a subclass of CellComparator, will be used to compare hbase:meta cells. Previously exposed static instances KeyValue.COMPARATOR, KeyValue.META\_COMPARATOR and KeyValue.RAW\_COMPARATOR are deprecated instead use CellComparator.COMPARATOR and CellComparator.META\_COMPARATOR. Also note that there will be no RawBytesComparator. Where ever we need to compare raw bytes use Bytes.BYTES\_RAWCOMPARATOR. CellComparator will always operate on cells and its components, abstracting the fact that a cell can be backed by a single byte[] as opposed to how KVComparators were working. @@ -9194,7 +9194,7 @@ Adds a renewLease call to ClientScanner * [HBASE-13564](https://issues.apache.org/jira/browse/HBASE-13564) | *Major* | **Master MBeans are not published** To use the coprocessor-based JMX implementation provided by HBase for Master. -Add below property in hbase-site.xml file: +Add below property in hbase-site.xml file: \ \hbase.coprocessor.master.classes\ @@ -9310,7 +9310,7 @@ Compose thrift exception text from the text of the entire cause chain of the und * [HBASE-13275](https://issues.apache.org/jira/browse/HBASE-13275) | *Major* | **Setting hbase.security.authorization to false does not disable authorization** -Prior to this change the configuration setting 'hbase.security.authorization' had no effect if security coprocessor were installed. The act of installing the security coprocessors was assumed to indicate active authorizaton was desired and required. Now it is possible to install the security coprocessors yet have them operate in a passive state with active authorization disabled by setting 'hbase.security.authorization' to false. This can be useful but is probably not what you want. For more information, consult the Security section of the HBase online manual. +Prior to this change the configuration setting 'hbase.security.authorization' had no effect if security coprocessor were installed. The act of installing the security coprocessors was assumed to indicate active authorizaton was desired and required. Now it is possible to install the security coprocessors yet have them operate in a passive state with active authorization disabled by setting 'hbase.security.authorization' to false. This can be useful but is probably not what you want. For more information, consult the Security section of the HBase online manual. 'hbase.security.authorization' defaults to true for backwards comptatible behavior. @@ -9346,15 +9346,15 @@ Use hbase.client.scanner.max.result.size instead to enforce practical chunk size Results returned from RPC calls may now be returned as partials -When is a Result marked as a partial? +When is a Result marked as a partial? When the server must stop the scan because the max size limit has been reached. Means that the LAST Result returned within the ScanResult's Result array may be marked as a partial if the scan's max size limit caused it to stop in the middle of a row. Incompatible Change: The return type of InternalScanners#next and RegionScanners#nextRaw has been changed to NextState from boolean The previous boolean return value can be accessed via NextState#hasMoreValues() Provides more context as to what happened inside the scanner -Scan caching default has been changed to Integer.Max\_Value -This value works together with the new maxResultSize value from HBASE-12976 (defaults to 2MB) +Scan caching default has been changed to Integer.Max\_Value +This value works together with the new maxResultSize value from HBASE-12976 (defaults to 2MB) Results returned from server on basis of size rather than number of rows Provides better use of network since row size varies amongst tables @@ -9672,14 +9672,14 @@ This client is on by default in master branch (2.0 hbase). It is off in branch-1 Namespace auditor provides basic quota support for namespaces in terms of number of tables and number of regions. In order to use namespace quotas, quota support must be enabled by setting "hbase.quota.enabled" property to true in hbase-site.xml file. -The users can add quota information to namespace, while creating new namespaces or by altering existing ones. +The users can add quota information to namespace, while creating new namespaces or by altering existing ones. Examples: 1. create\_namespace 'ns1', {'hbase.namespace.quota.maxregions'=\>'10'} 2. create\_namespace 'ns2', {'hbase.namespace.quota.maxtables'=\>'2','hbase.namespace.quota.maxregions'=\>'5'} 3. alter\_namespace 'ns3', {METHOD =\> 'set', 'hbase.namespace.quota.maxtables'=\>'5','hbase.namespace.quota.maxregions'=\>'25'} -The quotas can be modified/added to namespace at any point of time. To remove quotas, the following command can be used: +The quotas can be modified/added to namespace at any point of time. To remove quotas, the following command can be used: alter\_namespace 'ns3', {METHOD =\> 'unset', NAME =\> 'hbase.namespace.quota.maxtables'} alter\_namespace 'ns3', {METHOD =\> 'unset', NAME =\> 'hbase.namespace.quota.maxregions'} @@ -9839,7 +9839,7 @@ NavigableMap\\> getFamilyMap() * [HBASE-12084](https://issues.apache.org/jira/browse/HBASE-12084) | *Major* | **Remove deprecated APIs from Result** The below KeyValue based APIs are removed from Result -KeyValue[] raw() +KeyValue[] raw() List\ list() List\ getColumn(byte [] family, byte [] qualifier) KeyValue getColumnLatest(byte [] family, byte [] qualifier) @@ -9854,7 +9854,7 @@ Cell getColumnLatestCell(byte [] family, int foffset, int flength, byte [] quali respectively Also the constructors which were taking KeyValues also removed -Result(KeyValue [] cells) +Result(KeyValue [] cells) Result(List\ kvs) @@ -9865,7 +9865,7 @@ Result(List\ kvs) The following APIs are removed from Filter KeyValue transform(KeyValue) KeyValue getNextKeyHint(KeyValue) -and replaced with +and replaced with Cell transformCell(Cell) Cell getNextCellHint(Cell) respectively. @@ -10012,6 +10012,3 @@ To enable zoo.cfg reading, for which support may be removed in a future release, properties from a zoo.cfg file has been deprecated. \ \ - - - diff --git a/bin/considerAsDead.sh b/bin/considerAsDead.sh index ae1b8d885bf..848e276cd00 100755 --- a/bin/considerAsDead.sh +++ b/bin/considerAsDead.sh @@ -17,7 +17,7 @@ # * See the License for the specific language governing permissions and # * limitations under the License. # */ -# +# usage="Usage: considerAsDead.sh --hostname serverName" @@ -50,12 +50,12 @@ do rs_parts=(${rs//,/ }) hostname=${rs_parts[0]} echo $deadhost - echo $hostname + echo $hostname if [ "$deadhost" == "$hostname" ]; then znode="$zkrs/$rs" echo "ZNode Deleting:" $znode $bin/hbase zkcli delete $znode > /dev/null 2>&1 sleep 1 - ssh $HBASE_SSH_OPTS $hostname $remote_cmd 2>&1 | sed "s/^/$hostname: /" - fi + ssh $HBASE_SSH_OPTS $hostname $remote_cmd 2>&1 | sed "s/^/$hostname: /" + fi done diff --git a/bin/hbase-cleanup.sh b/bin/hbase-cleanup.sh index 92b40cca6ae..69c1f72b607 100755 --- a/bin/hbase-cleanup.sh +++ b/bin/hbase-cleanup.sh @@ -74,7 +74,7 @@ check_for_znodes() { znodes=`"$bin"/hbase zkcli ls $zparent/$zchild 2>&1 | tail -1 | sed "s/\[//" | sed "s/\]//"` if [ "$znodes" != "" ]; then echo -n "ZNode(s) [${znodes}] of $command are not expired. Exiting without cleaning hbase data." - echo #force a newline + echo #force a newline exit 1; else echo -n "All ZNode(s) of $command are expired." @@ -99,7 +99,7 @@ execute_clean_acls() { clean_up() { case $1 in - --cleanZk) + --cleanZk) execute_zk_command "deleteall ${zparent}"; ;; --cleanHdfs) @@ -120,7 +120,7 @@ clean_up() { ;; *) ;; - esac + esac } check_znode_exists() { diff --git a/bin/hbase-config.sh b/bin/hbase-config.sh index 3e85ec59fb6..104e9a0b67c 100644 --- a/bin/hbase-config.sh +++ b/bin/hbase-config.sh @@ -103,7 +103,7 @@ do break fi done - + # Allow alternate hbase conf dir location. HBASE_CONF_DIR="${HBASE_CONF_DIR:-$HBASE_HOME/conf}" # List of hbase regions servers. @@ -162,7 +162,7 @@ fi # memory usage to explode. Tune the variable down to prevent vmem explosion. export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4} -# Now having JAVA_HOME defined is required +# Now having JAVA_HOME defined is required if [ -z "$JAVA_HOME" ]; then cat 1>&2 </dev/null; pwd` . "$bin"/hbase-config.sh # If the master backup file is specified in the command line, -# then it takes precedence over the definition in +# then it takes precedence over the definition in # hbase-env.sh. Save it here. HOSTLIST=$HBASE_BACKUP_MASTERS @@ -69,6 +69,6 @@ if [ -f $HOSTLIST ]; then sleep $HBASE_SLAVE_SLEEP fi done -fi +fi wait diff --git a/bin/regionservers.sh b/bin/regionservers.sh index b83c1f3c79e..b10e5a3ec9f 100755 --- a/bin/regionservers.sh +++ b/bin/regionservers.sh @@ -17,7 +17,7 @@ # * See the License for the specific language governing permissions and # * limitations under the License. # */ -# +# # Run a shell command on all regionserver hosts. # # Environment Variables @@ -45,7 +45,7 @@ bin=`cd "$bin">/dev/null; pwd` . "$bin"/hbase-config.sh # If the regionservers file is specified in the command line, -# then it takes precedence over the definition in +# then it takes precedence over the definition in # hbase-env.sh. Save it here. HOSTLIST=$HBASE_REGIONSERVERS diff --git a/bin/stop-hbase.sh b/bin/stop-hbase.sh index b47ae1f7743..d10e618f2d2 100755 --- a/bin/stop-hbase.sh +++ b/bin/stop-hbase.sh @@ -52,7 +52,7 @@ fi export HBASE_LOG_PREFIX=hbase-$HBASE_IDENT_STRING-master-$HOSTNAME export HBASE_LOGFILE=$HBASE_LOG_PREFIX.log -logout=$HBASE_LOG_DIR/$HBASE_LOG_PREFIX.out +logout=$HBASE_LOG_DIR/$HBASE_LOG_PREFIX.out loglog="${HBASE_LOG_DIR}/${HBASE_LOGFILE}" pid=${HBASE_PID_DIR:-/tmp}/hbase-$HBASE_IDENT_STRING-master.pid @@ -74,7 +74,7 @@ fi # distributed == false means that the HMaster will kill ZK when it exits # HBASE-6504 - only take the first line of the output in case verbose gc is on distMode=`$bin/hbase --config "$HBASE_CONF_DIR" org.apache.hadoop.hbase.util.HBaseConfTool hbase.cluster.distributed | head -n 1` -if [ "$distMode" == 'true' ] +if [ "$distMode" == 'true' ] then "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" stop zookeeper fi diff --git a/bin/test/process_based_cluster.sh b/bin/test/process_based_cluster.sh index eb8633f502c..1c4c7253213 100755 --- a/bin/test/process_based_cluster.sh +++ b/bin/test/process_based_cluster.sh @@ -68,7 +68,7 @@ while [ $# -ne 0 ]; do -h|--help) print_usage ;; --kill) - IS_KILL=1 + IS_KILL=1 cmd_specified ;; --show) IS_SHOW=1 @@ -106,5 +106,3 @@ else echo "No command specified" >&2 exit 1 fi - - diff --git a/bin/zookeepers.sh b/bin/zookeepers.sh index 97bf41b6052..5d22d82a559 100755 --- a/bin/zookeepers.sh +++ b/bin/zookeepers.sh @@ -17,7 +17,7 @@ # * See the License for the specific language governing permissions and # * limitations under the License. # */ -# +# # Run a shell command on all zookeeper hosts. # # Environment Variables diff --git a/conf/hbase-env.sh b/conf/hbase-env.sh index e049fd6d853..06720b4d5e3 100644 --- a/conf/hbase-env.sh +++ b/conf/hbase-env.sh @@ -33,7 +33,7 @@ # The maximum amount of heap to use. Default is left to JVM default. # export HBASE_HEAPSIZE=1G -# Uncomment below if you intend to use off heap cache. For example, to allocate 8G of +# Uncomment below if you intend to use off heap cache. For example, to allocate 8G of # offheap, set the value to "8G". # export HBASE_OFFHEAPSIZE=1G @@ -70,7 +70,7 @@ # export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc: -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M" # See the package documentation for org.apache.hadoop.hbase.io.hfile for other configurations -# needed setting up off-heap block caching. +# needed setting up off-heap block caching. # Uncomment and adjust to enable JMX exporting # See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access. @@ -101,7 +101,7 @@ # Where log files are stored. $HBASE_HOME/logs by default. # export HBASE_LOG_DIR=${HBASE_HOME}/logs -# Enable remote JDWP debugging of major HBase processes. Meant for Core Developers +# Enable remote JDWP debugging of major HBase processes. Meant for Core Developers # export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8070" # export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8071" # export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8072" @@ -125,13 +125,13 @@ # Tell HBase whether it should manage it's own instance of ZooKeeper or not. # export HBASE_MANAGES_ZK=true -# The default log rolling policy is RFA, where the log file is rolled as per the size defined for the +# The default log rolling policy is RFA, where the log file is rolled as per the size defined for the # RFA appender. Please refer to the log4j2.properties file to see more details on this appender. # In case one needs to do log rolling on a date change, one should set the environment property # HBASE_ROOT_LOGGER to ",DRFA". # For example: # export HBASE_ROOT_LOGGER=INFO,DRFA -# The reason for changing default to RFA is to avoid the boundary case of filling out disk space as +# The reason for changing default to RFA is to avoid the boundary case of filling out disk space as # DRFA doesn't put any cap on the log size. Please refer to HBase-5655 for more context. # Tell HBase whether it should include Hadoop's lib when start up, diff --git a/conf/hbase-policy.xml b/conf/hbase-policy.xml index bf472407d17..5a0256d5164 100644 --- a/conf/hbase-policy.xml +++ b/conf/hbase-policy.xml @@ -24,20 +24,20 @@ security.client.protocol.acl * - ACL for ClientProtocol and AdminProtocol implementations (ie. + ACL for ClientProtocol and AdminProtocol implementations (ie. clients talking to HRegionServers) - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". A special value of "*" means all users are allowed. security.admin.protocol.acl * - ACL for HMasterInterface protocol implementation (ie. + ACL for HMasterInterface protocol implementation (ie. clients talking to HMaster for admin operations). - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". A special value of "*" means all users are allowed. @@ -46,8 +46,8 @@ * ACL for HMasterRegionInterface protocol implementations (for HRegionServers communicating with HMaster) - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". A special value of "*" means all users are allowed. diff --git a/dev-support/HBase Code Template.xml b/dev-support/HBase Code Template.xml index 3b666c97a8a..9c69a5a40b3 100644 --- a/dev-support/HBase Code Template.xml +++ b/dev-support/HBase Code Template.xml @@ -38,4 +38,4 @@ ${type_declaration} \ No newline at end of file +// ${todo} Implement constructor diff --git a/dev-support/HOW_TO_YETUS_LOCAL.md b/dev-support/HOW_TO_YETUS_LOCAL.md index 8d22978d422..2ac4ecd09dc 100644 --- a/dev-support/HOW_TO_YETUS_LOCAL.md +++ b/dev-support/HOW_TO_YETUS_LOCAL.md @@ -87,7 +87,7 @@ these personalities; a pre-packaged personality can be selected via the `--project` parameter. There is a provided HBase personality in Yetus, however the HBase project maintains its own within the HBase source repository. Specify the path to the personality file using `--personality`. The HBase repository -places this file under `dev-support/hbase-personality.sh`. +places this file under `dev-support/hbase-personality.sh`. ## Docker mode diff --git a/dev-support/hbase_nightly_pseudo-distributed-test.sh b/dev-support/hbase_nightly_pseudo-distributed-test.sh index bf5051267be..8d03195715c 100755 --- a/dev-support/hbase_nightly_pseudo-distributed-test.sh +++ b/dev-support/hbase_nightly_pseudo-distributed-test.sh @@ -340,53 +340,53 @@ EOF echo "writing out example TSV to example.tsv" cat >"${working_dir}/example.tsv" < the test didn't finish notFinishedCounter=$(($notFinishedCounter + 1)) notFinishedList="$notFinishedList,$testClass" - fi + fi done #list of all tests that failed @@ -411,7 +411,7 @@ echo echo "Tests in error are: $errorPresList" echo "Tests that didn't finish are: $notFinishedPresList" echo -echo "Execution time in minutes: $exeTime" +echo "Execution time in minutes: $exeTime" echo "##########################" diff --git a/dev-support/jenkinsEnv.sh b/dev-support/jenkinsEnv.sh index d7fe87339e2..969ece4dc4c 100755 --- a/dev-support/jenkinsEnv.sh +++ b/dev-support/jenkinsEnv.sh @@ -33,4 +33,3 @@ export PATH=$PATH:$JAVA_HOME/bin:$ANT_HOME/bin: export MAVEN_OPTS="${MAVEN_OPTS:-"-Xmx3100M -XX:-UsePerfData"}" ulimit -n - diff --git a/dev-support/make_rc.sh b/dev-support/make_rc.sh index 73691791f6a..1892d5a906e 100755 --- a/dev-support/make_rc.sh +++ b/dev-support/make_rc.sh @@ -21,7 +21,7 @@ # timestamp suffix. Deploys builds to maven. # # To finish, check what was build. If good copy to people.apache.org and -# close the maven repos. Call a vote. +# close the maven repos. Call a vote. # # Presumes that dev-support/generate-hadoopX-poms.sh has already been run. # Presumes your settings.xml all set up so can sign artifacts published to mvn, etc. diff --git a/dev-support/rebase_all_git_branches.sh b/dev-support/rebase_all_git_branches.sh index ef213c8fb3d..5c63e405469 100755 --- a/dev-support/rebase_all_git_branches.sh +++ b/dev-support/rebase_all_git_branches.sh @@ -17,11 +17,11 @@ # specific language governing permissions and limitations # under the License. -# This script assumes that your remote is called "origin" +# This script assumes that your remote is called "origin" # and that your local master branch is called "master". # I am sure it could be made more abstract but these are the defaults. -# Edit this line to point to your default directory, +# Edit this line to point to your default directory, # or always pass a directory to the script. DEFAULT_DIR="EDIT_ME" @@ -69,13 +69,13 @@ function check_git_branch_status { } function get_jira_status { - # This function expects as an argument the JIRA ID, + # This function expects as an argument the JIRA ID, # and returns 99 if resolved and 1 if it couldn't # get the status. - # The JIRA status looks like this in the HTML: + # The JIRA status looks like this in the HTML: # span id="resolution-val" class="value resolved" > - # The following is a bit brittle, but filters for lines with + # The following is a bit brittle, but filters for lines with # resolution-val returns 99 if it's resolved jira_url='https://issues.apache.org/jira/rest/api/2/issue' jira_id="$1" @@ -106,7 +106,7 @@ while getopts ":hd:" opt; do print_usage exit 0 ;; - *) + *) echo "Invalid argument: $OPTARG" >&2 print_usage >&2 exit 1 @@ -135,7 +135,7 @@ get_tracking_branches for i in "${tracking_branches[@]}"; do git checkout -q "$i" # Exit if git status is dirty - check_git_branch_status + check_git_branch_status git pull -q --rebase status=$? if [ "$status" -ne 0 ]; then @@ -169,7 +169,7 @@ for i in "${all_branches[@]}"; do git checkout -q "$i" # Exit if git status is dirty - check_git_branch_status + check_git_branch_status # If this branch has a remote, don't rebase it # If it has a remote, it has a log with at least one entry @@ -184,7 +184,7 @@ for i in "${all_branches[@]}"; do echo "Failed. Rolling back. Rebase $i manually." git rebase --abort fi - elif [ $status -ne 0 ]; then + elif [ $status -ne 0 ]; then # If status is 0 it means there is a remote branch, we already took care of it echo "Unknown error: $?" >&2 exit 1 @@ -195,10 +195,10 @@ done for i in "${deleted_branches[@]}"; do read -p "$i's JIRA is resolved. Delete? " yn case $yn in - [Yy]) + [Yy]) git branch -D $i ;; - *) + *) echo "To delete it manually, run git branch -D $deleted_branches" ;; esac diff --git a/dev-support/smart-apply-patch.sh b/dev-support/smart-apply-patch.sh index 9200e3ba921..a8a22b06ef1 100755 --- a/dev-support/smart-apply-patch.sh +++ b/dev-support/smart-apply-patch.sh @@ -52,7 +52,7 @@ if $PATCH -p0 -E --dry-run < $PATCH_FILE 2>&1 > $TMP; then # correct place to put those files. # NOTE 2014/07/17: -# Temporarily disabling below check since our jenkins boxes seems to be not defaulting to bash +# Temporarily disabling below check since our jenkins boxes seems to be not defaulting to bash # causing below checks to fail. Once it is fixed, we can revert the commit and enable this again. # TMP2=/tmp/tmp.paths.2.$$ diff --git a/dev-support/test-util.sh b/dev-support/test-util.sh index 9219bb96606..b97e2de383f 100755 --- a/dev-support/test-util.sh +++ b/dev-support/test-util.sh @@ -32,7 +32,7 @@ options: -h Show this message -c Run 'mvn clean' before running the tests -f FILE Run the additional tests listed in the FILE - -u Only run unit tests. Default is to run + -u Only run unit tests. Default is to run unit and integration tests -n N Run each test N times. Default = 1. -s N Print N slowest tests @@ -92,7 +92,7 @@ do r) server=1 ;; - ?) + ?) usage exit 1 esac @@ -175,7 +175,7 @@ done # Print a report of the slowest running tests if [ ! -z $showSlowest ]; then - + testNameIdx=0 for (( i = 0; i < ${#test[@]}; i++ )) do diff --git a/dev-support/zombie-detector.sh b/dev-support/zombie-detector.sh index df4c197ce4d..3a2708a14ad 100755 --- a/dev-support/zombie-detector.sh +++ b/dev-support/zombie-detector.sh @@ -29,7 +29,7 @@ #set -x # printenv -### Setup some variables. +### Setup some variables. bindir=$(dirname $0) # This key is set by our surefire configuration up in the main pom.xml diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml index 0e1910b0fb2..d8bcf07aeb7 100644 --- a/hbase-annotations/pom.xml +++ b/hbase-annotations/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase org.apache.hbase + hbase 2.5.0-SNAPSHOT .. diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java index c2510efb026..d9bae849063 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the client. This tests the hbase-client package and all of the client * tests in hbase-server. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java index 4341becbd68..a168adec08a 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to coprocessors. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java index a91033fa2d3..84f346baaea 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the {@code org.apache.hadoop.hbase.filter} package. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java index 22fbc1b724f..c23bfa298b3 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as failing commonly on public build infrastructure. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java index c2375ca4e5c..8eee0e6ae4b 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the {@code org.apache.hadoop.hbase.io} package. Things like HFile and * the like. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java index 6bc712e270c..4e555b73fed 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java @@ -15,23 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as 'integration/system' test, meaning that the test class has the following * characteristics: *
    - *
  • Possibly takes hours to complete
  • - *
  • Can be run on a mini cluster or an actual cluster
  • - *
  • Can make changes to the given cluster (starting stopping daemons, etc)
  • - *
  • Should not be run in parallel of other integration tests
  • + *
  • Possibly takes hours to complete
  • + *
  • Can be run on a mini cluster or an actual cluster
  • + *
  • Can make changes to the given cluster (starting stopping daemons, etc)
  • + *
  • Should not be run in parallel of other integration tests
  • *
- * - * Integration / System tests should have a class name starting with "IntegrationTest", and - * should be annotated with @Category(IntegrationTests.class). Integration tests can be run - * using the IntegrationTestsDriver class or from mvn verify. - * + * Integration / System tests should have a class name starting with "IntegrationTest", and should + * be annotated with @Category(IntegrationTests.class). Integration tests can be run using the + * IntegrationTestsDriver class or from mvn verify. * @see SmallTests * @see MediumTests * @see LargeTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java index aa183d5607d..b47e5bab9a4 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tagging a test as 'large', means that the test class has the following characteristics: *
    - *
  • it can executed in an isolated JVM (Tests can however be executed in different JVM on the - * same machine simultaneously so be careful two concurrent tests end up fighting over ports - * or other singular resources).
  • - *
  • ideally, the whole large test-suite/class, no matter how many or how few test methods it - * has, will run in last less than three minutes
  • - *
  • No large test can take longer than ten minutes; it will be killed. See 'Integeration Tests' - * if you need to run tests longer than this.
  • + *
  • it can executed in an isolated JVM (Tests can however be executed in different JVM on the + * same machine simultaneously so be careful two concurrent tests end up fighting over ports or + * other singular resources).
  • + *
  • ideally, the whole large test-suite/class, no matter how many or how few test methods it has, + * will run in last less than three minutes
  • + *
  • No large test can take longer than ten minutes; it will be killed. See 'Integeration Tests' + * if you need to run tests longer than this.
  • *
- * * @see SmallTests * @see MediumTests * @see IntegrationTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java index 4b49da4e4dc..0e68ab3c034 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to mapred or mapreduce. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java index e837f49a268..5dcf51b27e5 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the master. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java index 0f8055b5bab..d1f836ec004 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tagging a test as 'medium' means that the test class has the following characteristics: *
    - *
  • it can be executed in an isolated JVM (Tests can however be executed in different JVMs on - * the same machine simultaneously so be careful two concurrent tests end up fighting over ports - * or other singular resources).
  • - *
  • ideally, the whole medium test-suite/class, no matter how many or how few test methods it - * has, will complete in 50 seconds; otherwise make it a 'large' test.
  • + *
  • it can be executed in an isolated JVM (Tests can however be executed in different JVMs on the + * same machine simultaneously so be careful two concurrent tests end up fighting over ports or + * other singular resources).
  • + *
  • ideally, the whole medium test-suite/class, no matter how many or how few test methods it + * has, will complete in 50 seconds; otherwise make it a 'large' test.
  • *
- * - * Use it for tests that cannot be tagged as 'small'. Use it when you need to start up a cluster. - * + * Use it for tests that cannot be tagged as 'small'. Use it when you need to start up a cluster. * @see SmallTests * @see LargeTests * @see IntegrationTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java index 59962a74c28..27beaacf963 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java index 2759bfc96df..695042e801b 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as not easily falling into any of the below categories. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java index 4edb9bf031d..929bd6487ed 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to RPC. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java index 0f03b761fcb..3439afa76eb 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the regionserver. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java index 8b8be4de812..df606c960c2 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to replication. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java index e7d1d1d4c88..a648b4c39e0 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the REST capability of HBase. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java index 5263d467cbe..a4e55ad3aba 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to security. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java index 80e6c9d2420..64d2bce381b 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +20,14 @@ package org.apache.hadoop.hbase.testclassification; /** * Tagging a test as 'small' means that the test class has the following characteristics: *
    - *
  • it can be run simultaneously with other small tests all in the same JVM
  • - *
  • ideally, the WHOLE implementing test-suite/class, no matter how many or how few test - * methods it has, should take less than 15 seconds to complete
  • - *
  • it does not use a cluster
  • + *
  • it can be run simultaneously with other small tests all in the same JVM
  • + *
  • ideally, the WHOLE implementing test-suite/class, no matter how many or how few test methods + * it has, should take less than 15 seconds to complete
  • + *
  • it does not use a cluster
  • *
- * * @see MediumTests * @see LargeTests * @see IntegrationTests */ -public interface SmallTests {} +public interface SmallTests { +} diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java index efc8d5ddc84..d1f433b9719 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** - * Tag a test as related to mapreduce and taking longer than 5 minutes to run on public build + * Tag a test as related to mapreduce and taking longer than 5 minutes to run on public build * infrastructure. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java index 85507de5ad4..f556979e5b6 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as region tests which takes longer than 5 minutes to run on public build * infrastructure. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java index 86aa6bdc85e..9fa0579ed47 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** diff --git a/hbase-archetypes/hbase-archetype-builder/pom.xml b/hbase-archetypes/hbase-archetype-builder/pom.xml index 2d7f3888035..cb0085c84da 100644 --- a/hbase-archetypes/hbase-archetype-builder/pom.xml +++ b/hbase-archetypes/hbase-archetype-builder/pom.xml @@ -1,6 +1,5 @@ - - + + hbase-client__copy-src-to-build-archetype-subdir - generate-resources copy-resources + generate-resources /${project.basedir}/../${hbase-client.dir}/${build.archetype.subdir} @@ -76,29 +75,30 @@ hbase-client__copy-pom-to-temp-for-xslt-processing - generate-resources copy-resources + generate-resources /${project.basedir}/../${hbase-client.dir}/${temp.exemplar.subdir} /${project.basedir}/../${hbase-client.dir} - true + true + pom.xml - + hbase-shaded-client__copy-src-to-build-archetype-subdir - generate-resources copy-resources + generate-resources /${project.basedir}/../${hbase-shaded-client.dir}/${build.archetype.subdir} @@ -113,20 +113,21 @@ hbase-shaded-client__copy-pom-to-temp-for-xslt-processing - generate-resources copy-resources + generate-resources /${project.basedir}/../${hbase-shaded-client.dir}/${temp.exemplar.subdir} /${project.basedir}/../${hbase-shaded-client.dir} - true + true + pom.xml - + @@ -137,10 +138,10 @@ using xml-maven-plugin for xslt transformation, below. --> hbase-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing - prepare-package copy-resources + prepare-package /${project.basedir}/../${hbase-client.dir}/${temp.archetype.subdir} @@ -149,16 +150,16 @@ pom.xml - + hbase-shaded-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing - prepare-package copy-resources + prepare-package /${project.basedir}/../${hbase-shaded-client.dir}/${temp.archetype.subdir} @@ -167,7 +168,7 @@ pom.xml - + @@ -183,10 +184,10 @@ modify-exemplar-pom-files-via-xslt - process-resources transform + process-resources @@ -213,10 +214,10 @@ prevent warnings when project is generated from archetype. --> modify-archetype-pom-files-via-xslt - package transform + package @@ -243,32 +244,32 @@ - maven-antrun-plugin + maven-antrun-plugin make-scripts-executable - process-resources run + process-resources - - + + run-createArchetypes-script - compile run + compile - - - + + + run-installArchetypes-script - install run + install - - - + + + diff --git a/hbase-archetypes/hbase-client-project/pom.xml b/hbase-archetypes/hbase-client-project/pom.xml index 08630ae8ee5..3bc71c10f59 100644 --- a/hbase-archetypes/hbase-client-project/pom.xml +++ b/hbase-archetypes/hbase-client-project/pom.xml @@ -1,8 +1,5 @@ - + 4.0.0 - hbase-archetypes org.apache.hbase + hbase-archetypes 2.5.0-SNAPSHOT .. diff --git a/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/HelloHBase.java b/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/HelloHBase.java index 5164ab21716..198ee1c7afd 100644 --- a/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/HelloHBase.java +++ b/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/HelloHBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,19 +37,17 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.util.Bytes; /** - * Successful running of this application requires access to an active instance - * of HBase. For install instructions for a standalone instance of HBase, please - * refer to https://hbase.apache.org/book.html#quickstart + * Successful running of this application requires access to an active instance of HBase. For + * install instructions for a standalone instance of HBase, please refer to + * https://hbase.apache.org/book.html#quickstart */ public final class HelloHBase { protected static final String MY_NAMESPACE_NAME = "myTestNamespace"; static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable"); static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf"); - static final byte[] MY_FIRST_COLUMN_QUALIFIER - = Bytes.toBytes("myFirstColumn"); - static final byte[] MY_SECOND_COLUMN_QUALIFIER - = Bytes.toBytes("mySecondColumn"); + static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn"); + static final byte[] MY_SECOND_COLUMN_QUALIFIER = Bytes.toBytes("mySecondColumn"); static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01"); // Private constructor included here to avoid checkstyle warnings @@ -61,21 +58,21 @@ public final class HelloHBase { final boolean deleteAllAtEOJ = true; /** - * ConnectionFactory#createConnection() automatically looks for - * hbase-site.xml (HBase configuration parameters) on the system's - * CLASSPATH, to enable creation of Connection to HBase via ZooKeeper. + * ConnectionFactory#createConnection() automatically looks for hbase-site.xml (HBase + * configuration parameters) on the system's CLASSPATH, to enable creation of Connection to + * HBase via ZooKeeper. */ try (Connection connection = ConnectionFactory.createConnection(); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { admin.getClusterStatus(); // assure connection successfully established - System.out.println("\n*** Hello HBase! -- Connection has been " - + "established via ZooKeeper!!\n"); + System.out + .println("\n*** Hello HBase! -- Connection has been " + "established via ZooKeeper!!\n"); createNamespaceAndTable(admin); System.out.println("Getting a Table object for [" + MY_TABLE_NAME - + "] with which to perform CRUD operations in HBase."); + + "] with which to perform CRUD operations in HBase."); try (Table table = connection.getTable(MY_TABLE_NAME)) { putRowToTable(table); @@ -93,9 +90,8 @@ public final class HelloHBase { } /** - * Invokes Admin#createNamespace and Admin#createTable to create a namespace - * with a table that has one column-family. - * + * Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has + * one column-family. * @param admin Standard Admin object * @throws IOException If IO problem encountered */ @@ -104,48 +100,38 @@ public final class HelloHBase { if (!namespaceExists(admin, MY_NAMESPACE_NAME)) { System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "]."); - admin.createNamespace(NamespaceDescriptor - .create(MY_NAMESPACE_NAME).build()); + admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build()); } if (!admin.tableExists(MY_TABLE_NAME)) { System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString() - + "], with one Column Family [" - + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "]."); + + "], with one Column Family [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "]."); TableDescriptor desc = TableDescriptorBuilder.newBuilder(MY_TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build(); admin.createTable(desc); } } /** - * Invokes Table#put to store a row (with two new columns created 'on the - * fly') into the table. - * + * Invokes Table#put to store a row (with two new columns created 'on the fly') into the table. * @param table Standard Table object (used for CRUD operations). * @throws IOException If IO problem encountered */ static void putRowToTable(final Table table) throws IOException { - table.put(new Put(MY_ROW_ID).addColumn(MY_COLUMN_FAMILY_NAME, - MY_FIRST_COLUMN_QUALIFIER, - Bytes.toBytes("Hello")).addColumn(MY_COLUMN_FAMILY_NAME, - MY_SECOND_COLUMN_QUALIFIER, - Bytes.toBytes("World!"))); + table.put(new Put(MY_ROW_ID) + .addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello")) + .addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!"))); - System.out.println("Row [" + Bytes.toString(MY_ROW_ID) - + "] was put into Table [" - + table.getName().getNameAsString() + "] in HBase;\n" - + " the row's two columns (created 'on the fly') are: [" - + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" - + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) - + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" - + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]"); + System.out.println("Row [" + Bytes.toString(MY_ROW_ID) + "] was put into Table [" + + table.getName().getNameAsString() + "] in HBase;\n" + + " the row's two columns (created 'on the fly') are: [" + + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) + + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]"); } /** * Invokes Table#get and prints out the contents of the retrieved row. - * * @param table Standard Table object * @throws IOException If IO problem encountered */ @@ -153,38 +139,32 @@ public final class HelloHBase { Result row = table.get(new Get(MY_ROW_ID)); - System.out.println("Row [" + Bytes.toString(row.getRow()) - + "] was retrieved from Table [" - + table.getName().getNameAsString() - + "] in HBase, with the following content:"); + System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table [" + + table.getName().getNameAsString() + "] in HBase, with the following content:"); - for (Entry> colFamilyEntry - : row.getNoVersionMap().entrySet()) { + for (Entry> colFamilyEntry : row.getNoVersionMap() + .entrySet()) { String columnFamilyName = Bytes.toString(colFamilyEntry.getKey()); - System.out.println(" Columns in Column Family [" + columnFamilyName - + "]:"); + System.out.println(" Columns in Column Family [" + columnFamilyName + "]:"); - for (Entry columnNameAndValueMap - : colFamilyEntry.getValue().entrySet()) { + for (Entry columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) { System.out.println(" Value of Column [" + columnFamilyName + ":" - + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " - + Bytes.toString(columnNameAndValueMap.getValue())); + + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " + + Bytes.toString(columnNameAndValueMap.getValue())); } } } /** * Checks to see whether a namespace exists. - * - * @param admin Standard Admin object + * @param admin Standard Admin object * @param namespaceName Name of namespace * @return true If namespace exists * @throws IOException If IO problem encountered */ - static boolean namespaceExists(final Admin admin, final String namespaceName) - throws IOException { + static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException { try { admin.getNamespaceDescriptor(namespaceName); } catch (NamespaceNotFoundException e) { @@ -195,28 +175,24 @@ public final class HelloHBase { /** * Invokes Table#delete to delete test data (i.e. the row) - * * @param table Standard Table object * @throws IOException If IO problem is encountered */ static void deleteRow(final Table table) throws IOException { - System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) - + "] from Table [" - + table.getName().getNameAsString() + "]."); + System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table [" + + table.getName().getNameAsString() + "]."); table.delete(new Delete(MY_ROW_ID)); } /** - * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to - * disable/delete Table and delete Namespace. - * + * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete + * Table and delete Namespace. * @param admin Standard Admin object * @throws IOException If IO problem is encountered */ static void deleteNamespaceAndTable(final Admin admin) throws IOException { if (admin.tableExists(MY_TABLE_NAME)) { - System.out.println("Disabling/deleting Table [" - + MY_TABLE_NAME.getNameAsString() + "]."); + System.out.println("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString() + "]."); admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it. admin.deleteTable(MY_TABLE_NAME); } diff --git a/hbase-archetypes/hbase-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/client/TestHelloHBase.java b/hbase-archetypes/hbase-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/client/TestHelloHBase.java index 9a92e606ffb..b08ecf7ab1b 100644 --- a/hbase-archetypes/hbase-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/client/TestHelloHBase.java +++ b/hbase-archetypes/hbase-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/client/TestHelloHBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,10 +44,9 @@ public class TestHelloHBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHelloHBase.class); + HBaseClassTestRule.forClass(TestHelloHBase.class); - private static final HBaseTestingUtility TEST_UTIL - = new HBaseTestingUtility(); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @BeforeClass public static void beforeClass() throws Exception { @@ -67,13 +66,11 @@ public class TestHelloHBase { Admin admin = TEST_UTIL.getAdmin(); exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE); - assertEquals("#namespaceExists failed: found nonexistent namespace.", - false, exists); + assertEquals("#namespaceExists failed: found nonexistent namespace.", false, exists); admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build()); exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE); - assertEquals("#namespaceExists failed: did NOT find existing namespace.", - true, exists); + assertEquals("#namespaceExists failed: did NOT find existing namespace.", true, exists); admin.deleteNamespace(EXISTING_NAMESPACE); } @@ -82,14 +79,11 @@ public class TestHelloHBase { Admin admin = TEST_UTIL.getAdmin(); HelloHBase.createNamespaceAndTable(admin); - boolean namespaceExists - = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); - assertEquals("#createNamespaceAndTable failed to create namespace.", - true, namespaceExists); + boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); + assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists); boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME); - assertEquals("#createNamespaceAndTable failed to create table.", - true, tableExists); + assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists); admin.disableTable(HelloHBase.MY_TABLE_NAME); admin.deleteTable(HelloHBase.MY_TABLE_NAME); @@ -100,8 +94,7 @@ public class TestHelloHBase { public void testPutRowToTable() throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); - Table table - = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); + Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); HelloHBase.putRowToTable(table); Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); @@ -115,13 +108,10 @@ public class TestHelloHBase { public void testDeleteRow() throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); - Table table - = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); + Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); - table.put(new Put(HelloHBase.MY_ROW_ID). - addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, - HelloHBase.MY_FIRST_COLUMN_QUALIFIER, - Bytes.toBytes("xyz"))); + table.put(new Put(HelloHBase.MY_ROW_ID).addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, + HelloHBase.MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("xyz"))); HelloHBase.deleteRow(table); Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); assertEquals("#deleteRow failed to delete row.", true, row.isEmpty()); diff --git a/hbase-archetypes/hbase-shaded-client-project/pom.xml b/hbase-archetypes/hbase-shaded-client-project/pom.xml index 29f46050688..168f6b28df2 100644 --- a/hbase-archetypes/hbase-shaded-client-project/pom.xml +++ b/hbase-archetypes/hbase-shaded-client-project/pom.xml @@ -1,8 +1,5 @@ - + 4.0.0 - hbase-archetypes org.apache.hbase + hbase-archetypes 2.5.0-SNAPSHOT .. @@ -44,16 +41,16 @@ org.apache.hbase hbase-testing-util test - - - javax.xml.bind - jaxb-api - - - javax.ws.rs - jsr311-api - - + + + javax.xml.bind + jaxb-api + + + javax.ws.rs + jsr311-api + + org.apache.hbase diff --git a/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/HelloHBase.java b/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/HelloHBase.java index 94a1e711d47..44629174601 100644 --- a/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/HelloHBase.java +++ b/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/HelloHBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,19 +36,17 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.util.Bytes; /** - * Successful running of this application requires access to an active instance - * of HBase. For install instructions for a standalone instance of HBase, please - * refer to https://hbase.apache.org/book.html#quickstart + * Successful running of this application requires access to an active instance of HBase. For + * install instructions for a standalone instance of HBase, please refer to + * https://hbase.apache.org/book.html#quickstart */ public final class HelloHBase { protected static final String MY_NAMESPACE_NAME = "myTestNamespace"; static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable"); static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf"); - static final byte[] MY_FIRST_COLUMN_QUALIFIER - = Bytes.toBytes("myFirstColumn"); - static final byte[] MY_SECOND_COLUMN_QUALIFIER - = Bytes.toBytes("mySecondColumn"); + static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn"); + static final byte[] MY_SECOND_COLUMN_QUALIFIER = Bytes.toBytes("mySecondColumn"); static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01"); // Private constructor included here to avoid checkstyle warnings @@ -60,21 +57,21 @@ public final class HelloHBase { final boolean deleteAllAtEOJ = true; /** - * ConnectionFactory#createConnection() automatically looks for - * hbase-site.xml (HBase configuration parameters) on the system's - * CLASSPATH, to enable creation of Connection to HBase via ZooKeeper. + * ConnectionFactory#createConnection() automatically looks for hbase-site.xml (HBase + * configuration parameters) on the system's CLASSPATH, to enable creation of Connection to + * HBase via ZooKeeper. */ try (Connection connection = ConnectionFactory.createConnection(); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { admin.getClusterStatus(); // assure connection successfully established - System.out.println("\n*** Hello HBase! -- Connection has been " - + "established via ZooKeeper!!\n"); + System.out + .println("\n*** Hello HBase! -- Connection has been " + "established via ZooKeeper!!\n"); createNamespaceAndTable(admin); System.out.println("Getting a Table object for [" + MY_TABLE_NAME - + "] with which to perform CRUD operations in HBase."); + + "] with which to perform CRUD operations in HBase."); try (Table table = connection.getTable(MY_TABLE_NAME)) { putRowToTable(table); @@ -92,9 +89,8 @@ public final class HelloHBase { } /** - * Invokes Admin#createNamespace and Admin#createTable to create a namespace - * with a table that has one column-family. - * + * Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has + * one column-family. * @param admin Standard Admin object * @throws IOException If IO problem encountered */ @@ -103,47 +99,38 @@ public final class HelloHBase { if (!namespaceExists(admin, MY_NAMESPACE_NAME)) { System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "]."); - admin.createNamespace(NamespaceDescriptor - .create(MY_NAMESPACE_NAME).build()); + admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build()); } if (!admin.tableExists(MY_TABLE_NAME)) { System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString() - + "], with one Column Family [" - + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "]."); + + "], with one Column Family [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "]."); admin.createTable(new HTableDescriptor(MY_TABLE_NAME) - .addFamily(new HColumnDescriptor(MY_COLUMN_FAMILY_NAME))); + .addFamily(new HColumnDescriptor(MY_COLUMN_FAMILY_NAME))); } } /** - * Invokes Table#put to store a row (with two new columns created 'on the - * fly') into the table. - * + * Invokes Table#put to store a row (with two new columns created 'on the fly') into the table. * @param table Standard Table object (used for CRUD operations). * @throws IOException If IO problem encountered */ static void putRowToTable(final Table table) throws IOException { - table.put(new Put(MY_ROW_ID).addColumn(MY_COLUMN_FAMILY_NAME, - MY_FIRST_COLUMN_QUALIFIER, - Bytes.toBytes("Hello")).addColumn(MY_COLUMN_FAMILY_NAME, - MY_SECOND_COLUMN_QUALIFIER, - Bytes.toBytes("World!"))); + table.put(new Put(MY_ROW_ID) + .addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello")) + .addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!"))); - System.out.println("Row [" + Bytes.toString(MY_ROW_ID) - + "] was put into Table [" - + table.getName().getNameAsString() + "] in HBase;\n" - + " the row's two columns (created 'on the fly') are: [" - + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" - + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) - + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" - + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]"); + System.out.println("Row [" + Bytes.toString(MY_ROW_ID) + "] was put into Table [" + + table.getName().getNameAsString() + "] in HBase;\n" + + " the row's two columns (created 'on the fly') are: [" + + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) + + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]"); } /** * Invokes Table#get and prints out the contents of the retrieved row. - * * @param table Standard Table object * @throws IOException If IO problem encountered */ @@ -151,38 +138,32 @@ public final class HelloHBase { Result row = table.get(new Get(MY_ROW_ID)); - System.out.println("Row [" + Bytes.toString(row.getRow()) - + "] was retrieved from Table [" - + table.getName().getNameAsString() - + "] in HBase, with the following content:"); + System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table [" + + table.getName().getNameAsString() + "] in HBase, with the following content:"); - for (Entry> colFamilyEntry - : row.getNoVersionMap().entrySet()) { + for (Entry> colFamilyEntry : row.getNoVersionMap() + .entrySet()) { String columnFamilyName = Bytes.toString(colFamilyEntry.getKey()); - System.out.println(" Columns in Column Family [" + columnFamilyName - + "]:"); + System.out.println(" Columns in Column Family [" + columnFamilyName + "]:"); - for (Entry columnNameAndValueMap - : colFamilyEntry.getValue().entrySet()) { + for (Entry columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) { System.out.println(" Value of Column [" + columnFamilyName + ":" - + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " - + Bytes.toString(columnNameAndValueMap.getValue())); + + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " + + Bytes.toString(columnNameAndValueMap.getValue())); } } } /** * Checks to see whether a namespace exists. - * - * @param admin Standard Admin object + * @param admin Standard Admin object * @param namespaceName Name of namespace * @return true If namespace exists * @throws IOException If IO problem encountered */ - static boolean namespaceExists(final Admin admin, final String namespaceName) - throws IOException { + static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException { try { admin.getNamespaceDescriptor(namespaceName); } catch (NamespaceNotFoundException e) { @@ -193,28 +174,24 @@ public final class HelloHBase { /** * Invokes Table#delete to delete test data (i.e. the row) - * * @param table Standard Table object * @throws IOException If IO problem is encountered */ static void deleteRow(final Table table) throws IOException { - System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) - + "] from Table [" - + table.getName().getNameAsString() + "]."); + System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table [" + + table.getName().getNameAsString() + "]."); table.delete(new Delete(MY_ROW_ID)); } /** - * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to - * disable/delete Table and delete Namespace. - * + * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete + * Table and delete Namespace. * @param admin Standard Admin object * @throws IOException If IO problem is encountered */ static void deleteNamespaceAndTable(final Admin admin) throws IOException { if (admin.tableExists(MY_TABLE_NAME)) { - System.out.println("Disabling/deleting Table [" - + MY_TABLE_NAME.getNameAsString() + "]."); + System.out.println("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString() + "]."); admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it. admin.deleteTable(MY_TABLE_NAME); } diff --git a/hbase-archetypes/hbase-shaded-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/shaded_client/TestHelloHBase.java b/hbase-archetypes/hbase-shaded-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/shaded_client/TestHelloHBase.java index 0f0f7d91ade..f87d9d7c700 100644 --- a/hbase-archetypes/hbase-shaded-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/shaded_client/TestHelloHBase.java +++ b/hbase-archetypes/hbase-shaded-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/shaded_client/TestHelloHBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,10 +44,9 @@ public class TestHelloHBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHelloHBase.class); + HBaseClassTestRule.forClass(TestHelloHBase.class); - private static final HBaseTestingUtility TEST_UTIL - = new HBaseTestingUtility(); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @BeforeClass public static void beforeClass() throws Exception { @@ -67,13 +66,11 @@ public class TestHelloHBase { Admin admin = TEST_UTIL.getAdmin(); exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE); - assertEquals("#namespaceExists failed: found nonexistent namespace.", - false, exists); + assertEquals("#namespaceExists failed: found nonexistent namespace.", false, exists); admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build()); exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE); - assertEquals("#namespaceExists failed: did NOT find existing namespace.", - true, exists); + assertEquals("#namespaceExists failed: did NOT find existing namespace.", true, exists); admin.deleteNamespace(EXISTING_NAMESPACE); } @@ -82,14 +79,11 @@ public class TestHelloHBase { Admin admin = TEST_UTIL.getAdmin(); HelloHBase.createNamespaceAndTable(admin); - boolean namespaceExists - = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); - assertEquals("#createNamespaceAndTable failed to create namespace.", - true, namespaceExists); + boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); + assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists); boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME); - assertEquals("#createNamespaceAndTable failed to create table.", - true, tableExists); + assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists); admin.disableTable(HelloHBase.MY_TABLE_NAME); admin.deleteTable(HelloHBase.MY_TABLE_NAME); @@ -100,8 +94,7 @@ public class TestHelloHBase { public void testPutRowToTable() throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); - Table table - = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); + Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); HelloHBase.putRowToTable(table); Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); @@ -115,13 +108,10 @@ public class TestHelloHBase { public void testDeleteRow() throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); - Table table - = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); + Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); - table.put(new Put(HelloHBase.MY_ROW_ID). - addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, - HelloHBase.MY_FIRST_COLUMN_QUALIFIER, - Bytes.toBytes("xyz"))); + table.put(new Put(HelloHBase.MY_ROW_ID).addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, + HelloHBase.MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("xyz"))); HelloHBase.deleteRow(table); Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); assertEquals("#deleteRow failed to delete row.", true, row.isEmpty()); diff --git a/hbase-archetypes/pom.xml b/hbase-archetypes/pom.xml index 9cdd4cff599..87e2f72ddb7 100644 --- a/hbase-archetypes/pom.xml +++ b/hbase-archetypes/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -68,10 +67,10 @@ spotbugs-maven-plugin - false spotbugs + false ${project.basedir}/../dev-support/spotbugs-exclude.xml diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml index 298be40c0b5..5c239643392 100644 --- a/hbase-assembly/pom.xml +++ b/hbase-assembly/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-assembly - Apache HBase - Assembly - - Module that does project assembly and that is all that it does. - pom + Apache HBase - Assembly + Module that does project assembly and that is all that it does. true - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - aggregate-licenses - - process - - - - ${build.year} - ${license.debug.print.included} - ${license.bundles.dependencies} - ${license.bundles.jquery} - ${license.bundles.vega} - ${license.bundles.logo} - ${license.bundles.bootstrap} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - supplemental-models.xml - - - - - - - maven-assembly-plugin - - - hbase-${project.version} - false - true - posix - - ${assembly.file} - src/main/assembly/client.xml - - - - - maven-dependency-plugin - - - - create-hbase-generated-classpath - test - - build-classpath - - - ${project.parent.basedir}/target/cached_classpath.txt - jline,jruby-complete,hbase-shaded-client,hbase-shaded-client-byo-hadoop,hbase-shaded-mapreduce - - - - - - create-hbase-generated-classpath-jline - test - - build-classpath - - - ${project.parent.basedir}/target/cached_classpath_jline.txt - jline - - - - - - create-hbase-generated-classpath-jruby - test - - build-classpath - - - ${project.parent.basedir}/target/cached_classpath_jruby.txt - jruby-complete - - - - - - - unpack-dependency-notices - prepare-package - - unpack-dependencies - - - pom - true - **\/NOTICE,**\/NOTICE.txt - - - - - - org.codehaus.mojo - exec-maven-plugin - ${exec.maven.version} - - - concat-NOTICE-files - package - - exec - - - env - - bash - -c - cat maven-shared-archive-resources/META-INF/NOTICE \ - `find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt` - - - ${project.build.directory}/NOTICE.aggregate - ${project.build.directory} - - - - - - - @@ -189,7 +47,7 @@ org.apache.hbase hbase-shaded-mapreduce - + org.apache.hbase hbase-it @@ -258,16 +116,16 @@ hbase-external-blockcache - org.apache.hbase - hbase-testing-util + org.apache.hbase + hbase-testing-util - org.apache.hbase - hbase-metrics-api + org.apache.hbase + hbase-metrics-api - org.apache.hbase - hbase-metrics + org.apache.hbase + hbase-metrics org.apache.hbase @@ -278,9 +136,9 @@ hbase-protocol-shaded - org.apache.hbase - hbase-resource-bundle - true + org.apache.hbase + hbase-resource-bundle + true org.apache.httpcomponents @@ -379,12 +237,151 @@ log4j-1.2-api + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + aggregate-licenses + + process + + + + ${build.year} + ${license.debug.print.included} + ${license.bundles.dependencies} + ${license.bundles.jquery} + ${license.bundles.vega} + ${license.bundles.logo} + ${license.bundles.bootstrap} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + supplemental-models.xml + + + + + + + maven-assembly-plugin + + + hbase-${project.version} + false + true + posix + + ${assembly.file} + src/main/assembly/client.xml + + + + + maven-dependency-plugin + + + + create-hbase-generated-classpath + + build-classpath + + test + + ${project.parent.basedir}/target/cached_classpath.txt + jline,jruby-complete,hbase-shaded-client,hbase-shaded-client-byo-hadoop,hbase-shaded-mapreduce + + + + + + create-hbase-generated-classpath-jline + + build-classpath + + test + + ${project.parent.basedir}/target/cached_classpath_jline.txt + jline + + + + + + create-hbase-generated-classpath-jruby + + build-classpath + + test + + ${project.parent.basedir}/target/cached_classpath_jruby.txt + jruby-complete + + + + + + + unpack-dependency-notices + + unpack-dependencies + + prepare-package + + pom + true + **\/NOTICE,**\/NOTICE.txt + + + + + + org.codehaus.mojo + exec-maven-plugin + ${exec.maven.version} + + + concat-NOTICE-files + + exec + + package + + env + + bash + -c + cat maven-shared-archive-resources/META-INF/NOTICE \ + `find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt` + + ${project.build.directory}/NOTICE.aggregate + ${project.build.directory} + + + + + + + rsgroup - !skip-rsgroup + !skip-rsgroup @@ -392,18 +389,18 @@ org.apache.hbase hbase-rsgroup - - junit - junit - - - org.mockito - mockito-core - - - compile - - + + junit + junit + + + org.mockito + mockito-core + + + compile + + diff --git a/hbase-asyncfs/pom.xml b/hbase-asyncfs/pom.xml index 99a325d26e8..4eb26471ceb 100644 --- a/hbase-asyncfs/pom.xml +++ b/hbase-asyncfs/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -31,33 +30,6 @@ hbase-asyncfs Apache HBase - Asynchronous FileSystem HBase Asynchronous FileSystem Implementation for WAL - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - @@ -169,6 +141,33 @@ test + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + @@ -176,8 +175,9 @@ hadoop-2.0 - - !hadoop.profile + + + !hadoop.profile @@ -265,8 +265,7 @@ lifecycle-mapping - - + diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java index 059ca00b02c..b88b32bdb81 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +21,9 @@ import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; import java.util.concurrent.CompletableFuture; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Interface for asynchronous filesystem output stream. diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java index 5b713196d0b..a530ca4a2a0 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,9 +47,9 @@ public final class AsyncFSOutputHelper { * implementation for other {@link FileSystem} which wraps around a {@link FSDataOutputStream}. */ public static AsyncFSOutput createOutput(FileSystem fs, Path f, boolean overwrite, - boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup, - Class channelClass, StreamSlowMonitor monitor) - throws IOException, CommonFSUtils.StreamLacksCapabilityException { + boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup, + Class channelClass, StreamSlowMonitor monitor) + throws IOException, CommonFSUtils.StreamLacksCapabilityException { if (fs instanceof DistributedFileSystem) { return FanOutOneBlockAsyncDFSOutputHelper.createOutput((DistributedFileSystem) fs, f, overwrite, createParent, replication, blockSize, eventLoopGroup, channelClass, monitor); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java index f618158fdb3..8906f003bc8 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,9 +22,9 @@ import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHel import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile; import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease; import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; import static org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE; import static org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.WRITER_IDLE; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; import com.google.errorprone.annotations.RestrictedApi; import java.io.IOException; @@ -41,7 +41,6 @@ import java.util.concurrent.ConcurrentLinkedDeque; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.Encryptor; import org.apache.hadoop.fs.Path; @@ -181,7 +180,10 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput { // State for connections to DN private enum State { - STREAMING, CLOSING, BROKEN, CLOSED + STREAMING, + CLOSING, + BROKEN, + CLOSED } private volatile State state; @@ -197,7 +199,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput { if (c.unfinishedReplicas.remove(channel.id())) { long current = EnvironmentEdgeManager.currentTime(); streamSlowMonitor.checkProcessTimeAndSpeed(datanodeInfoMap.get(channel), c.packetDataLen, - current - c.flushTimestamp, c.lastAckTimestamp, c.unfinishedReplicas.size()); + current - c.flushTimestamp, c.lastAckTimestamp, c.unfinishedReplicas.size()); c.lastAckTimestamp = current; if (c.unfinishedReplicas.isEmpty()) { // we need to remove first before complete the future. It is possible that after we @@ -285,13 +287,13 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput { protected void channelRead0(ChannelHandlerContext ctx, PipelineAckProto ack) throws Exception { Status reply = getStatus(ack); if (reply != Status.SUCCESS) { - failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " + - block + " from datanode " + ctx.channel().remoteAddress())); + failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " + block + + " from datanode " + ctx.channel().remoteAddress())); return; } if (PipelineAck.isRestartOOBStatus(reply)) { - failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block " + - block + " from datanode " + ctx.channel().remoteAddress())); + failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block " + + block + " from datanode " + ctx.channel().remoteAddress())); return; } if (ack.getSeqno() == HEART_BEAT_SEQNO) { @@ -346,10 +348,10 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput { } } - FanOutOneBlockAsyncDFSOutput(Configuration conf,DistributedFileSystem dfs, - DFSClient client, ClientProtocol namenode, String clientName, String src, long fileId, - LocatedBlock locatedBlock, Encryptor encryptor, Map datanodeInfoMap, - DataChecksum summer, ByteBufAllocator alloc, StreamSlowMonitor streamSlowMonitor) { + FanOutOneBlockAsyncDFSOutput(Configuration conf, DistributedFileSystem dfs, DFSClient client, + ClientProtocol namenode, String clientName, String src, long fileId, LocatedBlock locatedBlock, + Encryptor encryptor, Map datanodeInfoMap, DataChecksum summer, + ByteBufAllocator alloc, StreamSlowMonitor streamSlowMonitor) { this.conf = conf; this.dfs = dfs; this.client = client; @@ -404,7 +406,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput { } private void flushBuffer(CompletableFuture future, ByteBuf dataBuf, - long nextPacketOffsetInBlock, boolean syncBlock) { + long nextPacketOffsetInBlock, boolean syncBlock) { int dataLen = dataBuf.readableBytes(); int chunkLen = summer.getBytesPerChecksum(); int trailingPartialChunkLen = dataLen % chunkLen; @@ -414,13 +416,13 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput { summer.calculateChunkedSums(dataBuf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen)); checksumBuf.writerIndex(checksumLen); PacketHeader header = new PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock, - nextPacketSeqno, false, dataLen, syncBlock); + nextPacketSeqno, false, dataLen, syncBlock); int headerLen = header.getSerializedSize(); ByteBuf headerBuf = alloc.buffer(headerLen); header.putInBuffer(headerBuf.nioBuffer(0, headerLen)); headerBuf.writerIndex(headerLen); - Callback c = new Callback(future, nextPacketOffsetInBlock + dataLen, - datanodeInfoMap.keySet(), dataLen); + Callback c = + new Callback(future, nextPacketOffsetInBlock + dataLen, datanodeInfoMap.keySet(), dataLen); waitingAckQueue.addLast(c); // recheck again after we pushed the callback to queue if (state != State.STREAMING && waitingAckQueue.peekFirst() == c) { @@ -430,7 +432,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput { return; } // TODO: we should perhaps measure time taken per DN here; - // we could collect statistics per DN, and/or exclude bad nodes in createOutput. + // we could collect statistics per DN, and/or exclude bad nodes in createOutput. datanodeInfoMap.keySet().forEach(ch -> { ch.write(headerBuf.retainedDuplicate()); ch.write(checksumBuf.retainedDuplicate()); @@ -515,7 +517,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput { } trailingPartialChunkLength = dataLen % summer.getBytesPerChecksum(); ByteBuf newBuf = alloc.directBuffer(sendBufSizePRedictor.guess(dataLen)) - .ensureWritable(trailingPartialChunkLength); + .ensureWritable(trailingPartialChunkLength); if (trailingPartialChunkLength != 0) { buf.readerIndex(dataLen - trailingPartialChunkLength).readBytes(newBuf, trailingPartialChunkLength); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java index 45ff1cb145f..3596438c153 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -116,7 +116,7 @@ import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise; @InterfaceAudience.Private public final class FanOutOneBlockAsyncDFSOutputHelper { private static final Logger LOG = - LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputHelper.class); + LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputHelper.class); private FanOutOneBlockAsyncDFSOutputHelper() { } @@ -154,9 +154,8 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { // helper class for creating files. private interface FileCreator { default HdfsFileStatus create(ClientProtocol instance, String src, FsPermission masked, - String clientName, EnumSetWritable flag, boolean createParent, - short replication, long blockSize, CryptoProtocolVersion[] supportedVersions) - throws Exception { + String clientName, EnumSetWritable flag, boolean createParent, short replication, + long blockSize, CryptoProtocolVersion[] supportedVersions) throws Exception { try { return (HdfsFileStatus) createObject(instance, src, masked, clientName, flag, createParent, replication, blockSize, supportedVersions); @@ -170,8 +169,8 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { }; Object createObject(ClientProtocol instance, String src, FsPermission masked, String clientName, - EnumSetWritable flag, boolean createParent, short replication, long blockSize, - CryptoProtocolVersion[] supportedVersions) throws Exception; + EnumSetWritable flag, boolean createParent, short replication, long blockSize, + CryptoProtocolVersion[] supportedVersions) throws Exception; } private static final FileCreator FILE_CREATOR; @@ -199,7 +198,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { private static LeaseManager createLeaseManager() throws NoSuchMethodException { Method beginFileLeaseMethod = - DFSClient.class.getDeclaredMethod("beginFileLease", long.class, DFSOutputStream.class); + DFSClient.class.getDeclaredMethod("beginFileLease", long.class, DFSOutputStream.class); beginFileLeaseMethod.setAccessible(true); Method endFileLeaseMethod = DFSClient.class.getDeclaredMethod("endFileLease", long.class); endFileLeaseMethod.setAccessible(true); @@ -227,13 +226,13 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { private static FileCreator createFileCreator3_3() throws NoSuchMethodException { Method createMethod = ClientProtocol.class.getMethod("create", String.class, FsPermission.class, - String.class, EnumSetWritable.class, boolean.class, short.class, long.class, - CryptoProtocolVersion[].class, String.class, String.class); + String.class, EnumSetWritable.class, boolean.class, short.class, long.class, + CryptoProtocolVersion[].class, String.class, String.class); return (instance, src, masked, clientName, flag, createParent, replication, blockSize, - supportedVersions) -> { + supportedVersions) -> { return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag, - createParent, replication, blockSize, supportedVersions, null, null); + createParent, replication, blockSize, supportedVersions, null, null); }; } @@ -243,7 +242,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { CryptoProtocolVersion[].class, String.class); return (instance, src, masked, clientName, flag, createParent, replication, blockSize, - supportedVersions) -> { + supportedVersions) -> { return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag, createParent, replication, blockSize, supportedVersions, null); }; @@ -255,7 +254,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { CryptoProtocolVersion[].class); return (instance, src, masked, clientName, flag, createParent, replication, blockSize, - supportedVersions) -> { + supportedVersions) -> { return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag, createParent, replication, blockSize, supportedVersions); }; @@ -307,9 +306,9 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { FILE_CREATOR = createFileCreator(); SHOULD_REPLICATE_FLAG = loadShouldReplicateFlag(); } catch (Exception e) { - String msg = "Couldn't properly initialize access to HDFS internals. Please " + - "update your WAL Provider to not make use of the 'asyncfs' provider. See " + - "HBASE-16110 for more information."; + String msg = "Couldn't properly initialize access to HDFS internals. Please " + + "update your WAL Provider to not make use of the 'asyncfs' provider. See " + + "HBASE-16110 for more information."; LOG.error(msg, e); throw new Error(msg, e); } @@ -340,7 +339,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { } private static void processWriteBlockResponse(Channel channel, DatanodeInfo dnInfo, - Promise promise, int timeoutMs) { + Promise promise, int timeoutMs) { channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS), new ProtobufVarint32FrameDecoder(), new ProtobufDecoder(BlockOpResponseProto.getDefaultInstance()), @@ -348,7 +347,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { @Override protected void channelRead0(ChannelHandlerContext ctx, BlockOpResponseProto resp) - throws Exception { + throws Exception { Status pipelineStatus = resp.getStatus(); if (PipelineAck.isRestartOOBStatus(pipelineStatus)) { throw new IOException("datanode " + dnInfo + " is restarting"); @@ -356,11 +355,11 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { String logInfo = "ack with firstBadLink as " + resp.getFirstBadLink(); if (resp.getStatus() != Status.SUCCESS) { if (resp.getStatus() == Status.ERROR_ACCESS_TOKEN) { - throw new InvalidBlockTokenException("Got access token error" + ", status message " + - resp.getMessage() + ", " + logInfo); + throw new InvalidBlockTokenException("Got access token error" + ", status message " + + resp.getMessage() + ", " + logInfo); } else { - throw new IOException("Got error" + ", status=" + resp.getStatus().name() + - ", status message " + resp.getMessage() + ", " + logInfo); + throw new IOException("Got error" + ", status=" + resp.getStatus().name() + + ", status message " + resp.getMessage() + ", " + logInfo); } } // success @@ -387,7 +386,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { if (evt instanceof IdleStateEvent && ((IdleStateEvent) evt).state() == READER_IDLE) { promise - .tryFailure(new IOException("Timeout(" + timeoutMs + "ms) waiting for response")); + .tryFailure(new IOException("Timeout(" + timeoutMs + "ms) waiting for response")); } else { super.userEventTriggered(ctx, evt); } @@ -401,7 +400,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { } private static void requestWriteBlock(Channel channel, StorageType storageType, - OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException { + OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException { OpWriteBlockProto proto = writeBlockProtoBuilder.setStorageType(PBHelperClient.convertStorageType(storageType)).build(); int protoLen = proto.getSerializedSize(); @@ -414,9 +413,9 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { } private static void initialize(Configuration conf, Channel channel, DatanodeInfo dnInfo, - StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs, - DFSClient client, Token accessToken, Promise promise) - throws IOException { + StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs, + DFSClient client, Token accessToken, Promise promise) + throws IOException { Promise saslPromise = channel.eventLoop().newPromise(); trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise); saslPromise.addListener(new FutureListener() { @@ -435,13 +434,13 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { } private static List> connectToDataNodes(Configuration conf, DFSClient client, - String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, - BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup, - Class channelClass) { + String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, + BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup, + Class channelClass) { StorageType[] storageTypes = locatedBlock.getStorageTypes(); DatanodeInfo[] datanodeInfos = locatedBlock.getLocations(); boolean connectToDnViaHostname = - conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT); + conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT); int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT); ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock()); blockCopy.setNumBytes(locatedBlock.getBlockSize()); @@ -450,11 +449,11 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { .setToken(PBHelperClient.convert(locatedBlock.getBlockToken()))) .setClientName(clientName).build(); ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer); - OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder() - .setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())) - .setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()) - .setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS) - .setRequestedChecksum(checksumProto) + OpWriteBlockProto.Builder writeBlockProtoBuilder = + OpWriteBlockProto.newBuilder().setHeader(header) + .setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1) + .setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd) + .setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto) .setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build()); List> futureList = new ArrayList<>(datanodeInfos.length); for (int i = 0; i < datanodeInfos.length; i++) { @@ -464,26 +463,26 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { futureList.add(promise); String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname); new Bootstrap().group(eventLoopGroup).channel(channelClass) - .option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer() { + .option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer() { - @Override - protected void initChannel(Channel ch) throws Exception { - // we need to get the remote address of the channel so we can only move on after - // channel connected. Leave an empty implementation here because netty does not allow - // a null handler. - } - }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() { + @Override + protected void initChannel(Channel ch) throws Exception { + // we need to get the remote address of the channel so we can only move on after + // channel connected. Leave an empty implementation here because netty does not allow + // a null handler. + } + }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - if (future.isSuccess()) { - initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, - timeoutMs, client, locatedBlock.getBlockToken(), promise); - } else { - promise.tryFailure(future.cause()); - } + @Override + public void operationComplete(ChannelFuture future) throws Exception { + if (future.isSuccess()) { + initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, + timeoutMs, client, locatedBlock.getBlockToken(), promise); + } else { + promise.tryFailure(future.cause()); } - }); + } + }); } return futureList; } @@ -513,21 +512,21 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { } private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, - boolean overwrite, boolean createParent, short replication, long blockSize, - EventLoopGroup eventLoopGroup, Class channelClass, - StreamSlowMonitor monitor) throws IOException { + boolean overwrite, boolean createParent, short replication, long blockSize, + EventLoopGroup eventLoopGroup, Class channelClass, StreamSlowMonitor monitor) + throws IOException { Configuration conf = dfs.getConf(); DFSClient client = dfs.getClient(); String clientName = client.getClientName(); ClientProtocol namenode = client.getNamenode(); - int createMaxRetries = conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES, - DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES); + int createMaxRetries = + conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES, DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES); ExcludeDatanodeManager excludeDatanodeManager = monitor.getExcludeDatanodeManager(); Set toExcludeNodes = new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet()); for (int retry = 0;; retry++) { LOG.debug("When create output stream for {}, exclude list is {}, retry={}", src, - toExcludeNodes, retry); + toExcludeNodes, retry); HdfsFileStatus stat; try { stat = FILE_CREATOR.create(namenode, src, @@ -616,14 +615,14 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { * inside an {@link EventLoop}. */ public static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, Path f, - boolean overwrite, boolean createParent, short replication, long blockSize, - EventLoopGroup eventLoopGroup, Class channelClass, - final StreamSlowMonitor monitor) throws IOException { + boolean overwrite, boolean createParent, short replication, long blockSize, + EventLoopGroup eventLoopGroup, Class channelClass, + final StreamSlowMonitor monitor) throws IOException { return new FileSystemLinkResolver() { @Override public FanOutOneBlockAsyncDFSOutput doCall(Path p) - throws IOException, UnresolvedLinkException { + throws IOException, UnresolvedLinkException { return createOutput(dfs, p.toUri().getPath(), overwrite, createParent, replication, blockSize, eventLoopGroup, channelClass, monitor); } @@ -643,7 +642,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { } static void completeFile(DFSClient client, ClientProtocol namenode, String src, String clientName, - ExtendedBlock block, long fileId) { + ExtendedBlock block, long fileId) { for (int retry = 0;; retry++) { try { if (namenode.complete(src, clientName, block, fileId)) { diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java index 090b9b4a63f..89f386c8d64 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -104,7 +104,7 @@ import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise; @InterfaceAudience.Private public final class FanOutOneBlockAsyncDFSOutputSaslHelper { private static final Logger LOG = - LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputSaslHelper.class); + LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputSaslHelper.class); private FanOutOneBlockAsyncDFSOutputSaslHelper() { } @@ -129,21 +129,21 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { private interface TransparentCryptoHelper { Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, DFSClient client) - throws IOException; + throws IOException; } private static final TransparentCryptoHelper TRANSPARENT_CRYPTO_HELPER; private static SaslAdaptor createSaslAdaptor() - throws NoSuchFieldException, NoSuchMethodException { + throws NoSuchFieldException, NoSuchMethodException { Field saslPropsResolverField = - SaslDataTransferClient.class.getDeclaredField("saslPropsResolver"); + SaslDataTransferClient.class.getDeclaredField("saslPropsResolver"); saslPropsResolverField.setAccessible(true); Field trustedChannelResolverField = - SaslDataTransferClient.class.getDeclaredField("trustedChannelResolver"); + SaslDataTransferClient.class.getDeclaredField("trustedChannelResolver"); trustedChannelResolverField.setAccessible(true); Field fallbackToSimpleAuthField = - SaslDataTransferClient.class.getDeclaredField("fallbackToSimpleAuth"); + SaslDataTransferClient.class.getDeclaredField("fallbackToSimpleAuth"); fallbackToSimpleAuthField.setAccessible(true); return new SaslAdaptor() { @@ -177,7 +177,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { } private static TransparentCryptoHelper createTransparentCryptoHelperWithoutHDFS12396() - throws NoSuchMethodException { + throws NoSuchMethodException { Method decryptEncryptedDataEncryptionKeyMethod = DFSClient.class .getDeclaredMethod("decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class); decryptEncryptedDataEncryptionKeyMethod.setAccessible(true); @@ -185,7 +185,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { @Override public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, - DFSClient client) throws IOException { + DFSClient client) throws IOException { try { KeyVersion decryptedKey = (KeyVersion) decryptEncryptedDataEncryptionKeyMethod.invoke(client, feInfo); @@ -206,7 +206,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { } private static TransparentCryptoHelper createTransparentCryptoHelperWithHDFS12396() - throws ClassNotFoundException, NoSuchMethodException { + throws ClassNotFoundException, NoSuchMethodException { Class hdfsKMSUtilCls = Class.forName("org.apache.hadoop.hdfs.HdfsKMSUtil"); Method decryptEncryptedDataEncryptionKeyMethod = hdfsKMSUtilCls.getDeclaredMethod( "decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class, KeyProvider.class); @@ -215,7 +215,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { @Override public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, - DFSClient client) throws IOException { + DFSClient client) throws IOException { try { KeyVersion decryptedKey = (KeyVersion) decryptEncryptedDataEncryptionKeyMethod .invoke(null, feInfo, client.getKeyProvider()); @@ -236,12 +236,12 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { } private static TransparentCryptoHelper createTransparentCryptoHelper() - throws NoSuchMethodException, ClassNotFoundException { + throws NoSuchMethodException, ClassNotFoundException { try { return createTransparentCryptoHelperWithoutHDFS12396(); } catch (NoSuchMethodException e) { - LOG.debug("No decryptEncryptedDataEncryptionKey method in DFSClient," + - " should be hadoop version with HDFS-12396", e); + LOG.debug("No decryptEncryptedDataEncryptionKey method in DFSClient," + + " should be hadoop version with HDFS-12396", e); } return createTransparentCryptoHelperWithHDFS12396(); } @@ -252,8 +252,8 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { TRANSPARENT_CRYPTO_HELPER = createTransparentCryptoHelper(); } catch (Exception e) { String msg = "Couldn't properly initialize access to HDFS internals. Please " - + "update your WAL Provider to not make use of the 'asyncfs' provider. See " - + "HBASE-16110 for more information."; + + "update your WAL Provider to not make use of the 'asyncfs' provider. See " + + "HBASE-16110 for more information."; LOG.error(msg, e); throw new Error(msg, e); } @@ -324,8 +324,8 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { private int step = 0; public SaslNegotiateHandler(Configuration conf, String username, char[] password, - Map saslProps, int timeoutMs, Promise promise, - DFSClient dfsClient) throws SaslException { + Map saslProps, int timeoutMs, Promise promise, DFSClient dfsClient) + throws SaslException { this.conf = conf; this.saslProps = saslProps; this.saslClient = Sasl.createSaslClient(new String[] { MECHANISM }, username, PROTOCOL, @@ -355,8 +355,8 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { } /** - * The asyncfs subsystem emulates a HDFS client by sending protobuf messages via netty. - * After Hadoop 3.3.0, the protobuf classes are relocated to org.apache.hadoop.thirdparty.protobuf.*. + * The asyncfs subsystem emulates a HDFS client by sending protobuf messages via netty. After + * Hadoop 3.3.0, the protobuf classes are relocated to org.apache.hadoop.thirdparty.protobuf.*. * Use Reflection to check which ones to use. */ private static class BuilderPayloadSetter { @@ -366,13 +366,11 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { /** * Create a ByteString from byte array without copying (wrap), and then set it as the payload * for the builder. - * * @param builder builder for HDFS DataTransferEncryptorMessage. - * @param payload byte array of payload. - * @throws IOException + * @param payload byte array of payload. n */ - static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, byte[] payload) - throws IOException { + static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, + byte[] payload) throws IOException { Object byteStringObject; try { // byteStringObject = new LiteralByteString(payload); @@ -396,18 +394,18 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { try { // See if it can load the relocated ByteString, which comes from hadoop-thirdparty. byteStringClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString"); - LOG.debug("Found relocated ByteString class from hadoop-thirdparty." + - " Assuming this is Hadoop 3.3.0+."); + LOG.debug("Found relocated ByteString class from hadoop-thirdparty." + + " Assuming this is Hadoop 3.3.0+."); } catch (ClassNotFoundException e) { - LOG.debug("Did not find relocated ByteString class from hadoop-thirdparty." + - " Assuming this is below Hadoop 3.3.0", e); + LOG.debug("Did not find relocated ByteString class from hadoop-thirdparty." + + " Assuming this is below Hadoop 3.3.0", e); } // LiteralByteString is a package private class in protobuf. Make it accessible. Class literalByteStringClass; try { - literalByteStringClass = Class.forName( - "org.apache.hadoop.thirdparty.protobuf.ByteString$LiteralByteString"); + literalByteStringClass = + Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString$LiteralByteString"); LOG.debug("Shaded LiteralByteString from hadoop-thirdparty is found."); } catch (ClassNotFoundException e) { try { @@ -435,9 +433,9 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { } private void sendSaslMessage(ChannelHandlerContext ctx, byte[] payload, - List options) throws IOException { + List options) throws IOException { DataTransferEncryptorMessageProto.Builder builder = - DataTransferEncryptorMessageProto.newBuilder(); + DataTransferEncryptorMessageProto.newBuilder(); builder.setStatus(DataTransferEncryptorStatus.SUCCESS); if (payload != null) { BuilderPayloadSetter.wrapAndSetPayload(builder, payload); @@ -486,7 +484,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { private boolean requestedQopContainsPrivacy() { Set requestedQop = - ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(","))); + ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(","))); return requestedQop.contains("auth-conf"); } @@ -495,15 +493,14 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { throw new IOException("Failed to complete SASL handshake"); } Set requestedQop = - ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(","))); + ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(","))); String negotiatedQop = getNegotiatedQop(); LOG.debug( "Verifying QOP, requested QOP = " + requestedQop + ", negotiated QOP = " + negotiatedQop); if (!requestedQop.contains(negotiatedQop)) { throw new IOException(String.format("SASL handshake completed, but " - + "channel does not have acceptable quality of protection, " - + "requested = %s, negotiated = %s", - requestedQop, negotiatedQop)); + + "channel does not have acceptable quality of protection, " + + "requested = %s, negotiated = %s", requestedQop, negotiatedQop)); } } @@ -522,13 +519,13 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { outKey = saslClient.unwrap(outKey, 0, outKey.length); } return new CipherOption(option.getCipherSuite(), inKey, option.getInIv(), outKey, - option.getOutIv()); + option.getOutIv()); } private CipherOption getCipherOption(DataTransferEncryptorMessageProto proto, - boolean isNegotiatedQopPrivacy, SaslClient saslClient) throws IOException { + boolean isNegotiatedQopPrivacy, SaslClient saslClient) throws IOException { List cipherOptions = - PBHelperClient.convertCipherOptionProtos(proto.getCipherOptionList()); + PBHelperClient.convertCipherOptionProtos(proto.getCipherOptionList()); if (cipherOptions == null || cipherOptions.isEmpty()) { return null; } @@ -558,7 +555,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { assert response == null; checkSaslComplete(); CipherOption cipherOption = - getCipherOption(proto, isNegotiatedQopPrivacy(), saslClient); + getCipherOption(proto, isNegotiatedQopPrivacy(), saslClient); ChannelPipeline p = ctx.pipeline(); while (p.first() != null) { p.removeFirst(); @@ -639,7 +636,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) - throws Exception { + throws Exception { if (msg instanceof ByteBuf) { ByteBuf buf = (ByteBuf) msg; cBuf.addComponent(buf); @@ -676,7 +673,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { private final Decryptor decryptor; public DecryptHandler(CryptoCodec codec, byte[] key, byte[] iv) - throws GeneralSecurityException, IOException { + throws GeneralSecurityException, IOException { this.decryptor = codec.createDecryptor(); this.decryptor.init(key, Arrays.copyOf(iv, iv.length)); } @@ -709,14 +706,14 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { private final Encryptor encryptor; public EncryptHandler(CryptoCodec codec, byte[] key, byte[] iv) - throws GeneralSecurityException, IOException { + throws GeneralSecurityException, IOException { this.encryptor = codec.createEncryptor(); this.encryptor.init(key, Arrays.copyOf(iv, iv.length)); } @Override protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, ByteBuf msg, boolean preferDirect) - throws Exception { + throws Exception { if (preferDirect) { return ctx.alloc().directBuffer(msg.readableBytes()); } else { @@ -747,7 +744,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { private static String getUserNameFromEncryptionKey(DataEncryptionKey encryptionKey) { return encryptionKey.keyId + NAME_DELIMITER + encryptionKey.blockPoolId + NAME_DELIMITER - + Base64.getEncoder().encodeToString(encryptionKey.nonce); + + Base64.getEncoder().encodeToString(encryptionKey.nonce); } private static char[] encryptionKeyToPassword(byte[] encryptionKey) { @@ -771,26 +768,26 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { } private static void doSaslNegotiation(Configuration conf, Channel channel, int timeoutMs, - String username, char[] password, Map saslProps, Promise saslPromise, - DFSClient dfsClient) { + String username, char[] password, Map saslProps, Promise saslPromise, + DFSClient dfsClient) { try { channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS), new ProtobufVarint32FrameDecoder(), new ProtobufDecoder(DataTransferEncryptorMessageProto.getDefaultInstance()), new SaslNegotiateHandler(conf, username, password, saslProps, timeoutMs, saslPromise, - dfsClient)); + dfsClient)); } catch (SaslException e) { saslPromise.tryFailure(e); } } static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo dnInfo, - int timeoutMs, DFSClient client, Token accessToken, - Promise saslPromise) throws IOException { + int timeoutMs, DFSClient client, Token accessToken, + Promise saslPromise) throws IOException { SaslDataTransferClient saslClient = client.getSaslDataTransferClient(); SaslPropertiesResolver saslPropsResolver = SASL_ADAPTOR.getSaslPropsResolver(saslClient); TrustedChannelResolver trustedChannelResolver = - SASL_ADAPTOR.getTrustedChannelResolver(saslClient); + SASL_ADAPTOR.getTrustedChannelResolver(saslClient); AtomicBoolean fallbackToSimpleAuth = SASL_ADAPTOR.getFallbackToSimpleAuth(saslClient); InetAddress addr = ((InetSocketAddress) channel.remoteAddress()).getAddress(); if (trustedChannelResolver.isTrusted() || trustedChannelResolver.isTrusted(addr)) { @@ -805,24 +802,23 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { } doSaslNegotiation(conf, channel, timeoutMs, getUserNameFromEncryptionKey(encryptionKey), encryptionKeyToPassword(encryptionKey.encryptionKey), - createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise, - client); + createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise, client); } else if (!UserGroupInformation.isSecurityEnabled()) { if (LOG.isDebugEnabled()) { LOG.debug("SASL client skipping handshake in unsecured configuration for addr = " + addr - + ", datanodeId = " + dnInfo); + + ", datanodeId = " + dnInfo); } saslPromise.trySuccess(null); } else if (dnInfo.getXferPort() < 1024) { if (LOG.isDebugEnabled()) { LOG.debug("SASL client skipping handshake in secured configuration with " - + "privileged port for addr = " + addr + ", datanodeId = " + dnInfo); + + "privileged port for addr = " + addr + ", datanodeId = " + dnInfo); } saslPromise.trySuccess(null); } else if (fallbackToSimpleAuth != null && fallbackToSimpleAuth.get()) { if (LOG.isDebugEnabled()) { LOG.debug("SASL client skipping handshake in secured configuration with " - + "unsecured cluster for addr = " + addr + ", datanodeId = " + dnInfo); + + "unsecured cluster for addr = " + addr + ", datanodeId = " + dnInfo); } saslPromise.trySuccess(null); } else if (saslPropsResolver != null) { @@ -832,21 +828,21 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { } doSaslNegotiation(conf, channel, timeoutMs, buildUsername(accessToken), buildClientPassword(accessToken), saslPropsResolver.getClientProperties(addr), saslPromise, - client); + client); } else { // It's a secured cluster using non-privileged ports, but no SASL. The only way this can // happen is if the DataNode has ignore.secure.ports.for.testing configured, so this is a rare // edge case. if (LOG.isDebugEnabled()) { LOG.debug("SASL client skipping handshake in secured configuration with no SASL " - + "protection configured for addr = " + addr + ", datanodeId = " + dnInfo); + + "protection configured for addr = " + addr + ", datanodeId = " + dnInfo); } saslPromise.trySuccess(null); } } static Encryptor createEncryptor(Configuration conf, HdfsFileStatus stat, DFSClient client) - throws IOException { + throws IOException { FileEncryptionInfo feInfo = stat.getFileEncryptionInfo(); if (feInfo == null) { return null; diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java index 3be9a2e49c1..a0b5cc00841 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,33 +17,29 @@ */ package org.apache.hadoop.hbase.io.asyncfs; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.List; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufUtil; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToMessageDecoder; import org.apache.hbase.thirdparty.io.netty.util.internal.ObjectUtil; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.List; /** - * Modified based on io.netty.handler.codec.protobuf.ProtobufDecoder. - * The Netty's ProtobufDecode supports unshaded protobuf messages (com.google.protobuf). - * - * Hadoop 3.3.0 and above relocates protobuf classes to a shaded jar (hadoop-thirdparty), and - * so we must use reflection to detect which one (relocated or not) to use. - * - * Do not use this to process HBase's shaded protobuf messages. This is meant to process the - * protobuf messages in HDFS for the asyncfs use case. - * */ + * Modified based on io.netty.handler.codec.protobuf.ProtobufDecoder. The Netty's ProtobufDecode + * supports unshaded protobuf messages (com.google.protobuf). Hadoop 3.3.0 and above relocates + * protobuf classes to a shaded jar (hadoop-thirdparty), and so we must use reflection to detect + * which one (relocated or not) to use. Do not use this to process HBase's shaded protobuf messages. + * This is meant to process the protobuf messages in HDFS for the asyncfs use case. + */ @InterfaceAudience.Private public class ProtobufDecoder extends MessageToMessageDecoder { - private static final Logger LOG = - LoggerFactory.getLogger(ProtobufDecoder.class); + private static final Logger LOG = LoggerFactory.getLogger(ProtobufDecoder.class); private static Class protobufMessageLiteClass = null; private static Class protobufMessageLiteBuilderClass = null; @@ -60,23 +56,22 @@ public class ProtobufDecoder extends MessageToMessageDecoder { private Object parser; private Object builder; - public ProtobufDecoder(Object prototype) { try { - Method getDefaultInstanceForTypeMethod = protobufMessageLiteClass.getMethod( - "getDefaultInstanceForType"); - Object prototype1 = getDefaultInstanceForTypeMethod - .invoke(ObjectUtil.checkNotNull(prototype, "prototype")); + Method getDefaultInstanceForTypeMethod = + protobufMessageLiteClass.getMethod("getDefaultInstanceForType"); + Object prototype1 = + getDefaultInstanceForTypeMethod.invoke(ObjectUtil.checkNotNull(prototype, "prototype")); // parser = prototype.getParserForType() parser = getParserForTypeMethod.invoke(prototype1); - parseFromMethod = parser.getClass().getMethod( - "parseFrom", byte[].class, int.class, int.class); + parseFromMethod = + parser.getClass().getMethod("parseFrom", byte[].class, int.class, int.class); // builder = prototype.newBuilderForType(); builder = newBuilderForTypeMethod.invoke(prototype1); - mergeFromMethod = builder.getClass().getMethod( - "mergeFrom", byte[].class, int.class, int.class); + mergeFromMethod = + builder.getClass().getMethod("mergeFrom", byte[].class, int.class, int.class); // All protobuf message builders inherits from MessageLite.Builder buildMethod = protobufMessageLiteBuilderClass.getDeclaredMethod("build"); @@ -88,8 +83,7 @@ public class ProtobufDecoder extends MessageToMessageDecoder { } } - protected void decode( - ChannelHandlerContext ctx, ByteBuf msg, List out) throws Exception { + protected void decode(ChannelHandlerContext ctx, ByteBuf msg, List out) throws Exception { int length = msg.readableBytes(); byte[] array; int offset; @@ -122,8 +116,8 @@ public class ProtobufDecoder extends MessageToMessageDecoder { try { protobufMessageLiteClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite"); - protobufMessageLiteBuilderClass = Class.forName( - "org.apache.hadoop.thirdparty.protobuf.MessageLite$Builder"); + protobufMessageLiteBuilderClass = + Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite$Builder"); LOG.debug("Hadoop 3.3 and above shades protobuf."); } catch (ClassNotFoundException e) { LOG.debug("Hadoop 3.2 and below use unshaded protobuf.", e); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.java index 2f652440e38..d5dbfb02abc 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/WrapperAsyncFSOutput.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/WrapperAsyncFSOutput.java index c7cc1fcfcb4..0297285b93e 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/WrapperAsyncFSOutput.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/WrapperAsyncFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.nio.ByteBuffer; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; - import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.ByteArrayOutputStream; @@ -50,7 +49,7 @@ public class WrapperAsyncFSOutput implements AsyncFSOutput { public WrapperAsyncFSOutput(Path file, FSDataOutputStream out) { this.out = out; this.executor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("AsyncFSOutputFlusher-" + file.toString().replace("%", "%%")).build()); + .setNameFormat("AsyncFSOutputFlusher-" + file.toString().replace("%", "%%")).build()); } @Override @@ -95,8 +94,8 @@ public class WrapperAsyncFSOutput implements AsyncFSOutput { } long pos = out.getPos(); /** - * This flush0 method could only be called by single thread, so here we could - * safely overwrite without any synchronization. + * This flush0 method could only be called by single thread, so here we could safely overwrite + * without any synchronization. */ this.syncedLength = pos; future.complete(pos); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/ExcludeDatanodeManager.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/ExcludeDatanodeManager.java index 80748cad609..61f75582a1c 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/ExcludeDatanodeManager.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/ExcludeDatanodeManager.java @@ -56,24 +56,23 @@ public class ExcludeDatanodeManager implements ConfigurationObserver { private final int maxExcludeDNCount; private final Configuration conf; // This is a map of providerId->StreamSlowMonitor - private final Map streamSlowMonitors = - new ConcurrentHashMap<>(1); + private final Map streamSlowMonitors = new ConcurrentHashMap<>(1); public ExcludeDatanodeManager(Configuration conf) { this.conf = conf; this.maxExcludeDNCount = conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT); this.excludeDNsCache = CacheBuilder.newBuilder() - .expireAfterWrite(this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, - DEFAULT_WAL_EXCLUDE_DATANODE_TTL), TimeUnit.HOURS) - .maximumSize(this.maxExcludeDNCount) - .build(); + .expireAfterWrite( + this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL), + TimeUnit.HOURS) + .maximumSize(this.maxExcludeDNCount).build(); } /** * Try to add a datanode to the regionserver excluding cache * @param datanodeInfo the datanode to be added to the excluded cache - * @param cause the cause that the datanode is hope to be excluded + * @param cause the cause that the datanode is hope to be excluded * @return True if the datanode is added to the regionserver excluding cache, false otherwise */ public boolean tryAddExcludeDN(DatanodeInfo datanodeInfo, String cause) { @@ -85,15 +84,15 @@ public class ExcludeDatanodeManager implements ConfigurationObserver { datanodeInfo, cause, excludeDNsCache.size()); return true; } - LOG.debug("Try add datanode {} to exclude cache by [{}] failed, " - + "current exclude DNs are {}", datanodeInfo, cause, getExcludeDNs().keySet()); + LOG.debug( + "Try add datanode {} to exclude cache by [{}] failed, " + "current exclude DNs are {}", + datanodeInfo, cause, getExcludeDNs().keySet()); return false; } public StreamSlowMonitor getStreamSlowMonitor(String name) { String key = name == null || name.isEmpty() ? "defaultMonitorName" : name; - return streamSlowMonitors - .computeIfAbsent(key, k -> new StreamSlowMonitor(conf, key, this)); + return streamSlowMonitors.computeIfAbsent(key, k -> new StreamSlowMonitor(conf, key, this)); } public Map getExcludeDNs() { @@ -105,10 +104,12 @@ public class ExcludeDatanodeManager implements ConfigurationObserver { for (StreamSlowMonitor monitor : streamSlowMonitors.values()) { monitor.onConfigurationChange(conf); } - this.excludeDNsCache = CacheBuilder.newBuilder().expireAfterWrite( - this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL), - TimeUnit.HOURS).maximumSize(this.conf - .getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT)) + this.excludeDNsCache = CacheBuilder.newBuilder() + .expireAfterWrite( + this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL), + TimeUnit.HOURS) + .maximumSize(this.conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, + DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT)) .build(); } } diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/StreamSlowMonitor.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/StreamSlowMonitor.java index 73cce189574..c415706aa6a 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/StreamSlowMonitor.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/StreamSlowMonitor.java @@ -38,18 +38,16 @@ import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader; import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache; /** - * Class for monitor the wal file flush performance. - * Each active wal file has a StreamSlowMonitor. + * Class for monitor the wal file flush performance. Each active wal file has a StreamSlowMonitor. */ @InterfaceAudience.Private public class StreamSlowMonitor implements ConfigurationObserver { private static final Logger LOG = LoggerFactory.getLogger(StreamSlowMonitor.class); /** - * Configure for the min count for a datanode detected slow. - * If a datanode is detected slow times up to this count, then it will be added to the exclude - * datanode cache by {@link ExcludeDatanodeManager#tryAddExcludeDN(DatanodeInfo, String)} - * of this regionsever. + * Configure for the min count for a datanode detected slow. If a datanode is detected slow times + * up to this count, then it will be added to the exclude datanode cache by + * {@link ExcludeDatanodeManager#tryAddExcludeDN(DatanodeInfo, String)} of this regionsever. */ private static final String WAL_SLOW_DETECT_MIN_COUNT_KEY = "hbase.regionserver.async.wal.min.slow.detect.count"; @@ -63,9 +61,9 @@ public class StreamSlowMonitor implements ConfigurationObserver { private static final long DEFAULT_WAL_SLOW_DETECT_DATA_TTL = 10 * 60 * 1000; // 10min in ms /** - * Configure for the speed check of packet min length. - * For packets whose data length smaller than this value, check slow by processing time. - * While for packets whose data length larger than this value, check slow by flushing speed. + * Configure for the speed check of packet min length. For packets whose data length smaller than + * this value, check slow by processing time. While for packets whose data length larger than this + * value, check slow by flushing speed. */ private static final String DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY = "hbase.regionserver.async.wal.datanode.slow.check.speed.packet.data.length.min"; @@ -73,8 +71,8 @@ public class StreamSlowMonitor implements ConfigurationObserver { private static final long DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH = 64 * 1024; /** - * Configure for the slow packet process time, a duration from send to ACK. - * The processing time check is for packets that data length smaller than + * Configure for the slow packet process time, a duration from send to ACK. The processing time + * check is for packets that data length smaller than * {@link StreamSlowMonitor#DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY} */ public static final String DATANODE_SLOW_PACKET_PROCESS_TIME_KEY = @@ -105,15 +103,16 @@ public class StreamSlowMonitor implements ConfigurationObserver { private long minLengthForSpeedCheck; public StreamSlowMonitor(Configuration conf, String name, - ExcludeDatanodeManager excludeDatanodeManager) { + ExcludeDatanodeManager excludeDatanodeManager) { setConf(conf); this.name = name; this.excludeDatanodeManager = excludeDatanodeManager; this.datanodeSlowDataQueue = CacheBuilder.newBuilder() .maximumSize(conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT)) - .expireAfterWrite(conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, - DEFAULT_WAL_EXCLUDE_DATANODE_TTL), TimeUnit.HOURS) + .expireAfterWrite( + conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL), + TimeUnit.HOURS) .build(new CacheLoader>() { @Override public Deque load(DatanodeInfo key) throws Exception { @@ -129,30 +128,33 @@ public class StreamSlowMonitor implements ConfigurationObserver { /** * Check if the packet process time shows that the relevant datanode is a slow node. - * @param datanodeInfo the datanode that processed the packet - * @param packetDataLen the data length of the packet (in bytes) - * @param processTimeMs the process time (in ms) of the packet on the datanode, + * @param datanodeInfo the datanode that processed the packet + * @param packetDataLen the data length of the packet (in bytes) + * @param processTimeMs the process time (in ms) of the packet on the datanode, * @param lastAckTimestamp the last acked timestamp of the packet on another datanode - * @param unfinished if the packet is unfinished flushed to the datanode replicas + * @param unfinished if the packet is unfinished flushed to the datanode replicas */ public void checkProcessTimeAndSpeed(DatanodeInfo datanodeInfo, long packetDataLen, - long processTimeMs, long lastAckTimestamp, int unfinished) { + long processTimeMs, long lastAckTimestamp, int unfinished) { long current = EnvironmentEdgeManager.currentTime(); // Here are two conditions used to determine whether a datanode is slow, // 1. For small packet, we just have a simple time limit, without considering // the size of the packet. // 2. For large packet, we will calculate the speed, and check if the speed is too slow. - boolean slow = (packetDataLen <= minLengthForSpeedCheck && processTimeMs > slowPacketAckMs) || ( - packetDataLen > minLengthForSpeedCheck + boolean slow = (packetDataLen <= minLengthForSpeedCheck && processTimeMs > slowPacketAckMs) + || (packetDataLen > minLengthForSpeedCheck && (double) packetDataLen / processTimeMs < minPacketFlushSpeedKBs); if (slow) { // Check if large diff ack timestamp between replicas, // should try to avoid misjudgments that caused by GC STW. - if ((lastAckTimestamp > 0 && current - lastAckTimestamp > slowPacketAckMs / 2) || ( - lastAckTimestamp <= 0 && unfinished == 0)) { - LOG.info("Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, " - + "lastAckTimestamp={}, monitor name: {}", datanodeInfo, packetDataLen, processTimeMs, - unfinished, lastAckTimestamp, this.name); + if ( + (lastAckTimestamp > 0 && current - lastAckTimestamp > slowPacketAckMs / 2) + || (lastAckTimestamp <= 0 && unfinished == 0) + ) { + LOG.info( + "Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, " + + "lastAckTimestamp={}, monitor name: {}", + datanodeInfo, packetDataLen, processTimeMs, unfinished, lastAckTimestamp, this.name); if (addSlowAckData(datanodeInfo, packetDataLen, processTimeMs)) { excludeDatanodeManager.tryAddExcludeDN(datanodeInfo, "slow packet ack"); } @@ -168,8 +170,10 @@ public class StreamSlowMonitor implements ConfigurationObserver { private boolean addSlowAckData(DatanodeInfo datanodeInfo, long dataLength, long processTime) { Deque slowDNQueue = datanodeSlowDataQueue.getUnchecked(datanodeInfo); long current = EnvironmentEdgeManager.currentTime(); - while (!slowDNQueue.isEmpty() && (current - slowDNQueue.getFirst().getTimestamp() > slowDataTtl - || slowDNQueue.size() >= minSlowDetectCount)) { + while ( + !slowDNQueue.isEmpty() && (current - slowDNQueue.getFirst().getTimestamp() > slowDataTtl + || slowDNQueue.size() >= minSlowDetectCount) + ) { slowDNQueue.removeFirst(); } slowDNQueue.addLast(new PacketAckData(dataLength, processTime)); @@ -177,13 +181,13 @@ public class StreamSlowMonitor implements ConfigurationObserver { } private void setConf(Configuration conf) { - this.minSlowDetectCount = conf.getInt(WAL_SLOW_DETECT_MIN_COUNT_KEY, - DEFAULT_WAL_SLOW_DETECT_MIN_COUNT); + this.minSlowDetectCount = + conf.getInt(WAL_SLOW_DETECT_MIN_COUNT_KEY, DEFAULT_WAL_SLOW_DETECT_MIN_COUNT); this.slowDataTtl = conf.getLong(WAL_SLOW_DETECT_DATA_TTL_KEY, DEFAULT_WAL_SLOW_DETECT_DATA_TTL); this.slowPacketAckMs = conf.getLong(DATANODE_SLOW_PACKET_PROCESS_TIME_KEY, - DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME); - this.minLengthForSpeedCheck = conf.getLong( - DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY, + DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME); + this.minLengthForSpeedCheck = + conf.getLong(DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY, DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH); this.minPacketFlushSpeedKBs = conf.getDouble(DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED_KEY, DEFAULT_DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java index 91c003cb6dd..0f80f874a31 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +20,8 @@ package org.apache.hadoop.hbase.util; import org.apache.yetus.audience.InterfaceAudience; /** - * Similar interface as {@link org.apache.hadoop.util.Progressable} but returns - * a boolean to support canceling the operation. + * Similar interface as {@link org.apache.hadoop.util.Progressable} but returns a boolean to support + * canceling the operation. *

* Used for doing updating of OPENING znode during log replay on region open. */ @@ -30,8 +29,8 @@ import org.apache.yetus.audience.InterfaceAudience; public interface CancelableProgressable { /** - * Report progress. Returns true if operations should continue, false if the - * operation should be canceled and rolled back. + * Report progress. Returns true if operations should continue, false if the operation should be + * canceled and rolled back. * @return whether to continue (true) or cancel (false) the operation */ boolean progress(); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java index 9c3da1658c7..e4a410aa9c3 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -120,8 +120,10 @@ public final class RecoverLeaseFSUtils { // Cycle here until (subsequentPause * nbAttempt) elapses. While spinning, check // isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though. long localStartWaiting = EnvironmentEdgeManager.currentTime(); - while ((EnvironmentEdgeManager.currentTime() - localStartWaiting) < subsequentPauseBase * - nbAttempt) { + while ( + (EnvironmentEdgeManager.currentTime() - localStartWaiting) + < subsequentPauseBase * nbAttempt + ) { Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000)); if (findIsFileClosedMeth) { try { @@ -152,10 +154,10 @@ public final class RecoverLeaseFSUtils { private static boolean checkIfTimedout(final Configuration conf, final long recoveryTimeout, final int nbAttempt, final Path p, final long startWaiting) { if (recoveryTimeout < EnvironmentEdgeManager.currentTime()) { - LOG.warn("Cannot recoverLease after trying for " + - conf.getInt("hbase.lease.recovery.timeout", 900000) + - "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " + - getLogMessageDetail(nbAttempt, p, startWaiting)); + LOG.warn("Cannot recoverLease after trying for " + + conf.getInt("hbase.lease.recovery.timeout", 900000) + + "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " + + getLogMessageDetail(nbAttempt, p, startWaiting)); return true; } return false; @@ -170,8 +172,8 @@ public final class RecoverLeaseFSUtils { boolean recovered = false; try { recovered = dfs.recoverLease(p); - LOG.info((recovered ? "Recovered lease, " : "Failed to recover lease, ") + - getLogMessageDetail(nbAttempt, p, startWaiting)); + LOG.info((recovered ? "Recovered lease, " : "Failed to recover lease, ") + + getLogMessageDetail(nbAttempt, p, startWaiting)); } catch (IOException e) { if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) { // This exception comes out instead of FNFE, fix it @@ -189,8 +191,8 @@ public final class RecoverLeaseFSUtils { */ private static String getLogMessageDetail(final int nbAttempt, final Path p, final long startWaiting) { - return "attempt=" + nbAttempt + " on file=" + p + " after " + - (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms"; + return "attempt=" + nbAttempt + " on file=" + p + " after " + + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms"; } /** diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSTestBase.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSTestBase.java index e1bc83ca684..0bac2fb00a3 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSTestBase.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestExcludeDatanodeManager.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestExcludeDatanodeManager.java index a3da52ef335..f7ca1639ec6 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestExcludeDatanodeManager.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestExcludeDatanodeManager.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.io.asyncfs; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -44,19 +45,15 @@ public class TestExcludeDatanodeManager { StreamSlowMonitor streamSlowDNsMonitor = excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); assertEquals(0, excludeDatanodeManager.getExcludeDNs().size()); - DatanodeInfo datanodeInfo = - new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0").setHostName("hostname1") - .setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222).setInfoSecurePort(333) - .setIpcPort(444).setNetworkLocation("location1").build(); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, - System.currentTimeMillis() - 5100, 0); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, - System.currentTimeMillis() - 5100, 0); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, - System.currentTimeMillis() - 5100, 0); + DatanodeInfo datanodeInfo = new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0") + .setHostName("hostname1").setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222) + .setInfoSecurePort(333).setIpcPort(444).setNetworkLocation("location1").build(); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, + System.currentTimeMillis() - 5100, 0); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, + System.currentTimeMillis() - 5100, 0); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, + System.currentTimeMillis() - 5100, 0); assertEquals(1, excludeDatanodeManager.getExcludeDNs().size()); assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo)); } @@ -68,19 +65,15 @@ public class TestExcludeDatanodeManager { StreamSlowMonitor streamSlowDNsMonitor = excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); assertEquals(0, excludeDatanodeManager.getExcludeDNs().size()); - DatanodeInfo datanodeInfo = - new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0").setHostName("hostname1") - .setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222).setInfoSecurePort(333) - .setIpcPort(444).setNetworkLocation("location1").build(); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, - System.currentTimeMillis() - 7000, 0); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, - System.currentTimeMillis() - 7000, 0); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, - System.currentTimeMillis() - 7000, 0); + DatanodeInfo datanodeInfo = new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0") + .setHostName("hostname1").setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222) + .setInfoSecurePort(333).setIpcPort(444).setNetworkLocation("location1").build(); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, + System.currentTimeMillis() - 7000, 0); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, + System.currentTimeMillis() - 7000, 0); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, + System.currentTimeMillis() - 7000, 0); assertEquals(1, excludeDatanodeManager.getExcludeDNs().size()); assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo)); } diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java index d363282921c..26cbbe034a5 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,6 +57,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoop; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; @@ -240,9 +241,9 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase { StreamSlowMonitor streamSlowDNsMonitor = excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); assertEquals(0, excludeDatanodeManager.getExcludeDNs().size()); - try (FanOutOneBlockAsyncDFSOutput output = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, - f, true, false, (short) 3, FS.getDefaultBlockSize(), eventLoop, - CHANNEL_CLASS, streamSlowDNsMonitor)) { + try (FanOutOneBlockAsyncDFSOutput output = + FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3, + FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, streamSlowDNsMonitor)) { // should exclude the dead dn when retry so here we only have 2 DNs in pipeline assertEquals(2, output.getPipeline().length); assertEquals(1, excludeDatanodeManager.getExcludeDNs().size()); diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java index 8ee838449e1..3a9c2979b6c 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,6 +47,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; @@ -70,10 +71,10 @@ public class TestFanOutOneBlockAsyncDFSOutputHang extends AsyncFSTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFanOutOneBlockAsyncDFSOutputHang.class); + HBaseClassTestRule.forClass(TestFanOutOneBlockAsyncDFSOutputHang.class); private static final Logger LOG = - LoggerFactory.getLogger(TestFanOutOneBlockAsyncDFSOutputHang.class); + LoggerFactory.getLogger(TestFanOutOneBlockAsyncDFSOutputHang.class); private static DistributedFileSystem FS; diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java index 66735a3fc8e..ae0ddb1e4d7 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestOverwriteFileUnderConstruction.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestOverwriteFileUnderConstruction.java index 592598c8bb4..7a3a6de10f0 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestOverwriteFileUnderConstruction.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestOverwriteFileUnderConstruction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java index ab23b741b26..356fc664166 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY; + import java.io.File; import java.io.IOException; import java.lang.reflect.Method; @@ -62,6 +63,7 @@ import org.junit.runners.Parameterized.Parameter; import org.junit.runners.Parameterized.Parameters; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoop; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java index 55ef0b72b52..07fc3afbf2f 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ public class TestSendBufSizePredictor { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSendBufSizePredictor.class); + HBaseClassTestRule.forClass(TestSendBufSizePredictor.class); @Test public void test() { diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java index 5d0b2ebfff3..af5154b9460 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java @@ -110,9 +110,9 @@ public final class HBaseKerberosUtils { /** * Set up configuration for a secure HDFS+HBase cluster. - * @param conf configuration object. + * @param conf configuration object. * @param servicePrincipal service principal used by NN, HM and RS. - * @param spnegoPrincipal SPNEGO principal used by NN web UI. + * @param spnegoPrincipal SPNEGO principal used by NN web UI. */ public static void setSecuredConfiguration(Configuration conf, String servicePrincipal, String spnegoPrincipal) { @@ -156,7 +156,7 @@ public final class HBaseKerberosUtils { /** * Set up SSL configuration for HDFS NameNode and DataNode. * @param utility a HBaseTestingUtility object. - * @param clazz the caller test class. + * @param clazz the caller test class. * @throws Exception if unable to set up SSL configuration */ public static void setSSLConfiguration(HBaseCommonTestingUtility utility, Class clazz) diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/util/TestRecoverLeaseFSUtils.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/util/TestRecoverLeaseFSUtils.java index 3931dfd5ba2..953d66b3fa4 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/util/TestRecoverLeaseFSUtils.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/util/TestRecoverLeaseFSUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.util; import static org.junit.Assert.assertTrue; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -69,8 +68,8 @@ public class TestRecoverLeaseFSUtils { Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE); // Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two // invocations will happen pretty fast... the we fall into the longer wait loop). - assertTrue((EnvironmentEdgeManager.currentTime() - startTime) > (3 * - HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000))); + assertTrue((EnvironmentEdgeManager.currentTime() - startTime) + > (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000))); } /** diff --git a/hbase-build-configuration/pom.xml b/hbase-build-configuration/pom.xml index dd5002ff458..49437d96f49 100644 --- a/hbase-build-configuration/pom.xml +++ b/hbase-build-configuration/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase org.apache.hbase + hbase 2.5.0-SNAPSHOT .. hbase-build-configuration - Apache HBase - Build Configuration - Configure the build-support artifacts for maven build pom + Apache HBase - Build Configuration + Configure the build-support artifacts for maven build + + + org.apache.hbase + hbase-annotations + test-jar + test + + + org.apache.yetus + audience-annotations + + @@ -50,18 +62,6 @@ - - - org.apache.hbase - hbase-annotations - test-jar - test - - - org.apache.yetus - audience-annotations - - diff --git a/hbase-checkstyle/pom.xml b/hbase-checkstyle/pom.xml index 6771dc8ebb4..1618e8f13d3 100644 --- a/hbase-checkstyle/pom.xml +++ b/hbase-checkstyle/pom.xml @@ -1,7 +1,5 @@ - + -4.0.0 -org.apache.hbase -hbase-checkstyle -2.5.0-SNAPSHOT -Apache HBase - Checkstyle -Module to hold Checkstyle properties for HBase. - + 4.0.0 + - hbase org.apache.hbase + hbase 2.5.0-SNAPSHOT .. + org.apache.hbase + hbase-checkstyle + 2.5.0-SNAPSHOT + Apache HBase - Checkstyle + Module to hold Checkstyle properties for HBase. - - + + - - org.apache.maven.plugins - maven-site-plugin - - true - - - - - maven-assembly-plugin - - true - - - - + + org.apache.maven.plugins + maven-site-plugin + + true + + + + + maven-assembly-plugin + + true + + + + diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml index 93432a213b1..e2f9d2ed10d 100644 --- a/hbase-client/pom.xml +++ b/hbase-client/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -31,28 +30,6 @@ hbase-client Apache HBase - Client Client of HBase - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - @@ -221,6 +198,28 @@ + + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + @@ -242,8 +241,9 @@ hadoop-2.0 - - !hadoop.profile + + + !hadoop.profile @@ -398,8 +398,7 @@ lifecycle-mapping - - + diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java index b137a7da2ce..b9736d57345 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,8 +22,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** * Interface to support the aborting of a given server or client. *

- * This is used primarily for ZooKeeper usage when we could get an unexpected - * and fatal exception, requiring an abort. + * This is used primarily for ZooKeeper usage when we could get an unexpected and fatal exception, + * requiring an abort. *

* Implemented by the Master, RegionServer, and TableServers (client). */ @@ -33,13 +32,12 @@ public interface Abortable { /** * Abort the server or client. * @param why Why we're aborting. - * @param e Throwable that caused abort. Can be null. + * @param e Throwable that caused abort. Can be null. */ void abort(String why, Throwable e); /** - * It just call another abort method and the Throwable - * parameter is null. + * It just call another abort method and the Throwable parameter is null. * @param why Why we're aborting. * @see Abortable#abort(String, Throwable) */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java index b1fcd945b7d..367d4dad785 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,21 +63,20 @@ public class AsyncMetaTableAccessor { private static final Logger LOG = LoggerFactory.getLogger(AsyncMetaTableAccessor.class); - /** The delimiter for meta columns for replicaIds > 0 */ private static final char META_REPLICA_ID_DELIMITER = '_'; /** A regex for parsing server columns from meta. See above javadoc for meta layout */ - private static final Pattern SERVER_COLUMN_PATTERN = Pattern - .compile("^server(_[0-9a-fA-F]{4})?$"); + private static final Pattern SERVER_COLUMN_PATTERN = + Pattern.compile("^server(_[0-9a-fA-F]{4})?$"); public static CompletableFuture tableExists(AsyncTable metaTable, - TableName tableName) { + TableName tableName) { return getTableState(metaTable, tableName).thenApply(Optional::isPresent); } public static CompletableFuture> getTableState(AsyncTable metaTable, - TableName tableName) { + TableName tableName) { CompletableFuture> future = new CompletableFuture<>(); Get get = new Get(tableName.getName()).addColumn(getTableFamily(), getStateColumn()); long time = EnvironmentEdgeManager.currentTime(); @@ -101,13 +100,12 @@ public class AsyncMetaTableAccessor { } /** - * Returns the HRegionLocation from meta for the given region - * @param metaTable - * @param regionName region we're looking for + * Returns the HRegionLocation from meta for the given region n * @param regionName region we're + * looking for * @return HRegionLocation for the given region */ - public static CompletableFuture> getRegionLocation( - AsyncTable metaTable, byte[] regionName) { + public static CompletableFuture> + getRegionLocation(AsyncTable metaTable, byte[] regionName) { CompletableFuture> future = new CompletableFuture<>(); try { RegionInfo parsedRegionInfo = MetaTableAccessor.parseRegionInfoFromRegionName(regionName); @@ -128,13 +126,12 @@ public class AsyncMetaTableAccessor { } /** - * Returns the HRegionLocation from meta for the given encoded region name - * @param metaTable - * @param encodedRegionName region we're looking for + * Returns the HRegionLocation from meta for the given encoded region name n * @param + * encodedRegionName region we're looking for * @return HRegionLocation for the given region */ - public static CompletableFuture> getRegionLocationWithEncodedName( - AsyncTable metaTable, byte[] encodedRegionName) { + public static CompletableFuture> + getRegionLocationWithEncodedName(AsyncTable metaTable, byte[] encodedRegionName) { CompletableFuture> future = new CompletableFuture<>(); addListener( metaTable @@ -149,8 +146,10 @@ public class AsyncMetaTableAccessor { .filter(result -> MetaTableAccessor.getRegionInfo(result) != null).forEach(result -> { getRegionLocations(result).ifPresent(locations -> { for (HRegionLocation location : locations.getRegionLocations()) { - if (location != null && - encodedRegionNameStr.equals(location.getRegion().getEncodedName())) { + if ( + location != null + && encodedRegionNameStr.equals(location.getRegion().getEncodedName()) + ) { future.complete(Optional.of(location)); return; } @@ -166,24 +165,22 @@ public class AsyncMetaTableAccessor { Cell cell = r.getColumnLatestCell(getTableFamily(), getStateColumn()); if (cell == null) return Optional.empty(); try { - return Optional.of(TableState.parseFrom( - TableName.valueOf(r.getRow()), - Arrays.copyOfRange(cell.getValueArray(), cell.getValueOffset(), cell.getValueOffset() - + cell.getValueLength()))); + return Optional.of( + TableState.parseFrom(TableName.valueOf(r.getRow()), Arrays.copyOfRange(cell.getValueArray(), + cell.getValueOffset(), cell.getValueOffset() + cell.getValueLength()))); } catch (DeserializationException e) { throw new IOException("Failed to parse table state from result: " + r, e); } } /** - * Used to get all region locations for the specific table. - * @param metaTable - * @param tableName table we're looking for, can be null for getting all regions + * Used to get all region locations for the specific table. n * @param tableName table we're + * looking for, can be null for getting all regions * @return the list of region locations. The return value will be wrapped by a * {@link CompletableFuture}. */ public static CompletableFuture> getTableHRegionLocations( - AsyncTable metaTable, TableName tableName) { + AsyncTable metaTable, TableName tableName) { CompletableFuture> future = new CompletableFuture<>(); addListener(getTableRegionsAndLocations(metaTable, tableName, true), (locations, err) -> { if (err != null) { @@ -201,54 +198,53 @@ public class AsyncMetaTableAccessor { } /** - * Used to get table regions' info and server. - * @param metaTable - * @param tableName table we're looking for, can be null for getting all regions + * Used to get table regions' info and server. n * @param tableName table we're looking for, can + * be null for getting all regions * @param excludeOfflinedSplitParents don't return split parents * @return the list of regioninfos and server. The return value will be wrapped by a * {@link CompletableFuture}. */ private static CompletableFuture>> getTableRegionsAndLocations( - final AsyncTable metaTable, - final TableName tableName, final boolean excludeOfflinedSplitParents) { + final AsyncTable metaTable, final TableName tableName, + final boolean excludeOfflinedSplitParents) { CompletableFuture>> future = new CompletableFuture<>(); if (TableName.META_TABLE_NAME.equals(tableName)) { future.completeExceptionally(new IOException( - "This method can't be used to locate meta regions;" + " use MetaTableLocator instead")); + "This method can't be used to locate meta regions;" + " use MetaTableLocator instead")); } // Make a version of CollectingVisitor that collects RegionInfo and ServerAddress CollectingVisitor> visitor = new CollectingVisitor>() { - private RegionLocations current = null; + private RegionLocations current = null; - @Override - public boolean visit(Result r) throws IOException { - Optional currentRegionLocations = getRegionLocations(r); - current = currentRegionLocations.orElse(null); - if (current == null || current.getRegionLocation().getRegion() == null) { - LOG.warn("No serialized RegionInfo in " + r); - return true; + @Override + public boolean visit(Result r) throws IOException { + Optional currentRegionLocations = getRegionLocations(r); + current = currentRegionLocations.orElse(null); + if (current == null || current.getRegionLocation().getRegion() == null) { + LOG.warn("No serialized RegionInfo in " + r); + return true; + } + RegionInfo hri = current.getRegionLocation().getRegion(); + if (excludeOfflinedSplitParents && hri.isSplitParent()) return true; + // Else call super and add this Result to the collection. + return super.visit(r); } - RegionInfo hri = current.getRegionLocation().getRegion(); - if (excludeOfflinedSplitParents && hri.isSplitParent()) return true; - // Else call super and add this Result to the collection. - return super.visit(r); - } - @Override - void add(Result r) { - if (current == null) { - return; - } - for (HRegionLocation loc : current.getRegionLocations()) { - if (loc != null) { - this.results.add(new Pair(loc.getRegion(), loc - .getServerName())); + @Override + void add(Result r) { + if (current == null) { + return; + } + for (HRegionLocation loc : current.getRegionLocations()) { + if (loc != null) { + this.results + .add(new Pair(loc.getRegion(), loc.getServerName())); + } } } - } - }; + }; addListener(scanMeta(metaTable, tableName, QueryType.REGION, visitor), (v, error) -> { if (error != null) { @@ -261,29 +257,25 @@ public class AsyncMetaTableAccessor { } /** - * Performs a scan of META table for given table. - * @param metaTable - * @param tableName table withing we scan - * @param type scanned part of meta + * Performs a scan of META table for given table. n * @param tableName table withing we scan + * @param type scanned part of meta * @param visitor Visitor invoked against each row */ private static CompletableFuture scanMeta(AsyncTable metaTable, - TableName tableName, QueryType type, final Visitor visitor) { + TableName tableName, QueryType type, final Visitor visitor) { return scanMeta(metaTable, getTableStartRowForMeta(tableName, type), getTableStopRowForMeta(tableName, type), type, Integer.MAX_VALUE, visitor); } /** - * Performs a scan of META table for given table. - * @param metaTable - * @param startRow Where to start the scan + * Performs a scan of META table for given table. n * @param startRow Where to start the scan * @param stopRow Where to stop the scan - * @param type scanned part of meta + * @param type scanned part of meta * @param maxRows maximum rows to return * @param visitor Visitor invoked against each row */ private static CompletableFuture scanMeta(AsyncTable metaTable, - byte[] startRow, byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) { + byte[] startRow, byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) { int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE; Scan scan = getMetaScan(metaTable, rowUpperLimit); for (byte[] family : type.getFamilies()) { @@ -298,8 +290,8 @@ public class AsyncMetaTableAccessor { if (LOG.isDebugEnabled()) { LOG.debug("Scanning META" + " starting at row=" + Bytes.toStringBinary(scan.getStartRow()) - + " stopping at row=" + Bytes.toStringBinary(scan.getStopRow()) + " for max=" - + rowUpperLimit + " with caching=" + scan.getCaching()); + + " stopping at row=" + Bytes.toStringBinary(scan.getStopRow()) + " for max=" + + rowUpperLimit + " with caching=" + scan.getCaching()); } CompletableFuture future = new CompletableFuture(); @@ -318,7 +310,7 @@ public class AsyncMetaTableAccessor { private final CompletableFuture future; MetaTableScanResultConsumer(int rowUpperLimit, Visitor visitor, - CompletableFuture future) { + CompletableFuture future) { this.rowUpperLimit = rowUpperLimit; this.visitor = visitor; this.future = future; @@ -332,7 +324,7 @@ public class AsyncMetaTableAccessor { @Override @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NONNULL_PARAM_VIOLATION", - justification = "https://github.com/findbugsproject/findbugs/issues/79") + justification = "https://github.com/findbugsproject/findbugs/issues/79") public void onComplete() { future.complete(null); } @@ -366,8 +358,10 @@ public class AsyncMetaTableAccessor { Scan scan = new Scan(); int scannerCaching = metaTable.getConfiguration().getInt(HConstants.HBASE_META_SCANNER_CACHING, HConstants.DEFAULT_HBASE_META_SCANNER_CACHING); - if (metaTable.getConfiguration().getBoolean(HConstants.USE_META_REPLICAS, - HConstants.DEFAULT_USE_META_REPLICAS)) { + if ( + metaTable.getConfiguration().getBoolean(HConstants.USE_META_REPLICAS, + HConstants.DEFAULT_USE_META_REPLICAS) + ) { scan.setConsistency(Consistency.TIMELINE); } if (rowUpperLimit <= scannerCaching) { @@ -423,16 +417,15 @@ public class AsyncMetaTableAccessor { } /** - * Returns the HRegionLocation parsed from the given meta row Result - * for the given regionInfo and replicaId. The regionInfo can be the default region info - * for the replica. - * @param r the meta row result + * Returns the HRegionLocation parsed from the given meta row Result for the given regionInfo and + * replicaId. The regionInfo can be the default region info for the replica. + * @param r the meta row result * @param regionInfo RegionInfo for default replica - * @param replicaId the replicaId for the HRegionLocation + * @param replicaId the replicaId for the HRegionLocation * @return HRegionLocation parsed from the given meta row Result for the given replicaId */ private static HRegionLocation getRegionLocation(final Result r, final RegionInfo regionInfo, - final int replicaId) { + final int replicaId) { Optional serverName = getServerName(r, replicaId); long seqNum = getSeqNumDuringOpen(r, replicaId); RegionInfo replicaInfo = RegionReplicaUtil.getRegionInfoForReplica(regionInfo, replicaId); @@ -448,8 +441,8 @@ public class AsyncMetaTableAccessor { byte[] serverColumn = getServerColumn(replicaId); Cell cell = r.getColumnLatestCell(getCatalogFamily(), serverColumn); if (cell == null || cell.getValueLength() == 0) return Optional.empty(); - String hostAndPort = Bytes.toString(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength()); + String hostAndPort = + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); byte[] startcodeColumn = getStartCodeColumn(replicaId); cell = r.getColumnLatestCell(getCatalogFamily(), startcodeColumn); if (cell == null || cell.getValueLength() == 0) return Optional.empty(); @@ -463,8 +456,8 @@ public class AsyncMetaTableAccessor { } /** - * The latest seqnum that the server writing to meta observed when opening the region. - * E.g. the seqNum when the result of {@link #getServerName(Result, int)} was written. + * The latest seqnum that the server writing to meta observed when opening the region. E.g. the + * seqNum when the result of {@link #getServerName(Result, int)} was written. * @param r Result to pull the seqNum from * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written. */ @@ -533,7 +526,7 @@ public class AsyncMetaTableAccessor { /** * Returns the RegionInfo object from the column {@link HConstants#CATALOG_FAMILY} and * qualifier of the catalog table result. - * @param r a Result object from the catalog table scan + * @param r a Result object from the catalog table scan * @param qualifier Column family qualifier * @return An RegionInfo instance. */ @@ -585,7 +578,7 @@ public class AsyncMetaTableAccessor { return replicaId == 0 ? HConstants.SERVER_QUALIFIER : Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -597,7 +590,7 @@ public class AsyncMetaTableAccessor { return replicaId == 0 ? HConstants.STARTCODE_QUALIFIER : Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -609,12 +602,12 @@ public class AsyncMetaTableAccessor { return replicaId == 0 ? HConstants.SEQNUM_QUALIFIER : Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** - * Parses the replicaId from the server column qualifier. See top of the class javadoc - * for the actual meta layout + * Parses the replicaId from the server column qualifier. See top of the class javadoc for the + * actual meta layout * @param serverColumn the column qualifier * @return an int for the replicaId */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java index 91cedd60299..615b3a467e6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ package org.apache.hadoop.hbase; import java.util.Collections; import java.util.Map; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -56,9 +54,8 @@ public final class CacheEvictionStats { private String getFailedRegions() { return exceptions.keySet().stream() - .map(regionName -> RegionInfo.prettyPrint(RegionInfo.encodeRegionName(regionName))) - .collect(Collectors.toList()) - .toString(); + .map(regionName -> RegionInfo.prettyPrint(RegionInfo.encodeRegionName(regionName))) + .collect(Collectors.toList()).toString(); } @InterfaceAudience.Private @@ -68,11 +65,8 @@ public final class CacheEvictionStats { @Override public String toString() { - return "CacheEvictionStats{" + - "evictedBlocks=" + evictedBlocks + - ", maxCacheSize=" + maxCacheSize + - ", failedRegionsSize=" + getExceptionCount() + - ", failedRegions=" + getFailedRegions() + - '}'; + return "CacheEvictionStats{" + "evictedBlocks=" + evictedBlocks + ", maxCacheSize=" + + maxCacheSize + ", failedRegionsSize=" + getExceptionCount() + ", failedRegions=" + + getFailedRegions() + '}'; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsAggregator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsAggregator.java index 85d68dcc08b..fabe7f03027 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsAggregator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsAggregator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,4 +38,4 @@ public class CacheEvictionStatsAggregator { public synchronized CacheEvictionStats sum() { return this.builder.build(); } -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java index d9e1400da16..4b31d98611b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ package org.apache.hadoop.hbase; import java.util.HashMap; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -42,7 +40,7 @@ public final class CacheEvictionStatsBuilder { return this; } - public void addException(byte[] regionName, Throwable ie){ + public void addException(byte[] regionName, Throwable ie) { exceptions.put(regionName, ie); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java index 3feaaaf17a8..8bfde779176 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; /** - * Returned to the clients when their request was discarded due to server being overloaded. - * Clients should retry upon receiving it. + * Returned to the clients when their request was discarded due to server being overloaded. Clients + * should retry upon receiving it. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java index 6bf68bc4ad0..ecad4d9f0bc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; /** - * Returned to clients when their request was dropped because the call queue was too big to - * accept a new call. Clients should retry upon receiving it. + * Returned to clients when their request was dropped because the call queue was too big to accept a + * new call. Clients should retry upon receiving it. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java index a63ca6936ec..1afcb30ece0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,12 +18,10 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * This exception is thrown by the master when a region server clock skew is - * too high. + * This exception is thrown by the master when a region server clock skew is too high. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java index 1dd01faf808..e769e80847f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java @@ -15,29 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; import java.util.UUID; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterIdProtos; -import org.apache.hadoop.hbase.util.Bytes; /** - * The identifier for this cluster. - * It is serialized to the filesystem and up into zookeeper. This is a container for the id. - * Also knows how to serialize and deserialize the cluster id. + * The identifier for this cluster. It is serialized to the filesystem and up into zookeeper. This + * is a container for the id. Also knows how to serialize and deserialize the cluster id. */ @InterfaceAudience.Private public class ClusterId { private final String id; /** - * New ClusterID. Generates a uniqueid. + * New ClusterID. Generates a uniqueid. */ public ClusterId() { this(UUID.randomUUID().toString()); @@ -50,17 +48,15 @@ public class ClusterId { /** * @return The clusterid serialized using pb w/ pb magic prefix */ - public byte [] toByteArray() { + public byte[] toByteArray() { return ProtobufUtil.prependPBMagic(convert().toByteArray()); } /** * @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix - * @return An instance of {@link ClusterId} made from bytes - * @throws DeserializationException - * @see #toByteArray() + * @return An instance of {@link ClusterId} made from bytes n * @see #toByteArray() */ - public static ClusterId parseFrom(final byte [] bytes) throws DeserializationException { + public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(bytes)) { int pblen = ProtobufUtil.lengthOfPBMagic(); ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder(); @@ -87,8 +83,7 @@ public class ClusterId { } /** - * @param cid - * @return A {@link ClusterId} made from the passed in cid + * n * @return A {@link ClusterId} made from the passed in cid */ public static ClusterId convert(final ClusterIdProtos.ClusterId cid) { return new ClusterId(cid.getClusterId()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java index 29679e6fb6f..769d48496af 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; @@ -39,28 +37,32 @@ import org.apache.yetus.audience.InterfaceAudience; *

  • The average cluster load.
  • *
  • The number of regions deployed on the cluster.
  • *
  • The number of requests since last report.
  • - *
  • Detailed region server loading and resource usage information, - * per server and per region.
  • + *
  • Detailed region server loading and resource usage information, per server and per + * region.
  • *
  • Regions in transition at master
  • *
  • The unique cluster ID
  • * - * {@link Option} provides a way to get desired ClusterStatus information. - * The following codes will get all the cluster information. + * {@link Option} provides a way to get desired ClusterStatus information. The following + * codes will get all the cluster information. + * *
    - * {@code
    - * // Original version still works
    - * Admin admin = connection.getAdmin();
    - * ClusterMetrics metrics = admin.getClusterStatus();
    - * // or below, a new version which has the same effects
    - * ClusterMetrics metrics = admin.getClusterStatus(EnumSet.allOf(Option.class));
    + * {
    + *   @code
    + *   // Original version still works
    + *   Admin admin = connection.getAdmin();
    + *   ClusterMetrics metrics = admin.getClusterStatus();
    + *   // or below, a new version which has the same effects
    + *   ClusterMetrics metrics = admin.getClusterStatus(EnumSet.allOf(Option.class));
      * }
      * 
    - * If information about live servers is the only wanted. - * then codes in the following way: + * + * If information about live servers is the only wanted. then codes in the following way: + * *
    - * {@code
    - * Admin admin = connection.getAdmin();
    - * ClusterMetrics metrics = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
    + * {
    + *   @code
    + *   Admin admin = connection.getAdmin();
    + *   ClusterMetrics metrics = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
      * }
      * 
    */ @@ -88,7 +90,7 @@ public interface ClusterMetrics { */ default int getRegionCount() { return getLiveServerMetrics().entrySet().stream() - .mapToInt(v -> v.getValue().getRegionMetrics().size()).sum(); + .mapToInt(v -> v.getValue().getRegionMetrics().size()).sum(); } /** @@ -96,8 +98,8 @@ public interface ClusterMetrics { */ default long getRequestCount() { return getLiveServerMetrics().entrySet().stream() - .flatMap(v -> v.getValue().getRegionMetrics().values().stream()) - .mapToLong(RegionMetrics::getRequestCount).sum(); + .flatMap(v -> v.getValue().getRegionMetrics().values().stream()) + .mapToLong(RegionMetrics::getRequestCount).sum(); } /** @@ -122,17 +124,15 @@ public interface ClusterMetrics { default long getLastMajorCompactionTimestamp(TableName table) { return getLiveServerMetrics().values().stream() - .flatMap(s -> s.getRegionMetrics().values().stream()) - .filter(r -> RegionInfo.getTable(r.getRegionName()).equals(table)) - .mapToLong(RegionMetrics::getLastMajorCompactionTimestamp).min().orElse(0); + .flatMap(s -> s.getRegionMetrics().values().stream()) + .filter(r -> RegionInfo.getTable(r.getRegionName()).equals(table)) + .mapToLong(RegionMetrics::getLastMajorCompactionTimestamp).min().orElse(0); } default long getLastMajorCompactionTimestamp(byte[] regionName) { return getLiveServerMetrics().values().stream() - .filter(s -> s.getRegionMetrics().containsKey(regionName)) - .findAny() - .map(s -> s.getRegionMetrics().get(regionName).getLastMajorCompactionTimestamp()) - .orElse(0L); + .filter(s -> s.getRegionMetrics().containsKey(regionName)).findAny() + .map(s -> s.getRegionMetrics().get(regionName).getLastMajorCompactionTimestamp()).orElse(0L); } @Nullable @@ -150,13 +150,12 @@ public interface ClusterMetrics { if (serverSize == 0) { return 0; } - return (double)getRegionCount() / (double)serverSize; + return (double) getRegionCount() / (double) serverSize; } /** - * Provide region states count for given table. - * e.g howmany regions of give table are opened/closed/rit etc - * + * Provide region states count for given table. e.g howmany regions of give table are + * opened/closed/rit etc * @return map of table to region states count */ Map getTableRegionStatesCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java index 011f93f9fe9..7ef8a208611 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; @@ -26,13 +24,13 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.master.RegionState; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.Option; @@ -43,42 +41,34 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; public final class ClusterMetricsBuilder { public static ClusterStatusProtos.ClusterStatus toClusterStatus(ClusterMetrics metrics) { - ClusterStatusProtos.ClusterStatus.Builder builder - = ClusterStatusProtos.ClusterStatus.newBuilder() - .addAllBackupMasters(metrics.getBackupMasterNames().stream() - .map(ProtobufUtil::toServerName).collect(Collectors.toList())) - .addAllDeadServers(metrics.getDeadServerNames().stream() - .map(ProtobufUtil::toServerName).collect(Collectors.toList())) + ClusterStatusProtos.ClusterStatus.Builder builder = + ClusterStatusProtos.ClusterStatus.newBuilder() + .addAllBackupMasters(metrics.getBackupMasterNames().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList())) + .addAllDeadServers(metrics.getDeadServerNames().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList())) .addAllLiveServers(metrics.getLiveServerMetrics().entrySet().stream() - .map(s -> ClusterStatusProtos.LiveServerInfo - .newBuilder() - .setServer(ProtobufUtil.toServerName(s.getKey())) - .setServerLoad(ServerMetricsBuilder.toServerLoad(s.getValue())) - .build()) - .collect(Collectors.toList())) + .map(s -> ClusterStatusProtos.LiveServerInfo.newBuilder() + .setServer(ProtobufUtil.toServerName(s.getKey())) + .setServerLoad(ServerMetricsBuilder.toServerLoad(s.getValue())).build()) + .collect(Collectors.toList())) .addAllMasterCoprocessors(metrics.getMasterCoprocessorNames().stream() - .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) - .collect(Collectors.toList())) + .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) + .collect(Collectors.toList())) .addAllRegionsInTransition(metrics.getRegionStatesInTransition().stream() - .map(r -> ClusterStatusProtos.RegionInTransition - .newBuilder() - .setSpec(HBaseProtos.RegionSpecifier - .newBuilder() - .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) - .setValue(UnsafeByteOperations.unsafeWrap(r.getRegion().getRegionName())) - .build()) - .setRegionState(r.convert()) - .build()) - .collect(Collectors.toList())) + .map(r -> ClusterStatusProtos.RegionInTransition.newBuilder() + .setSpec(HBaseProtos.RegionSpecifier.newBuilder() + .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) + .setValue(UnsafeByteOperations.unsafeWrap(r.getRegion().getRegionName())).build()) + .setRegionState(r.convert()).build()) + .collect(Collectors.toList())) .setMasterInfoPort(metrics.getMasterInfoPort()) .addAllServersName(metrics.getServersName().stream().map(ProtobufUtil::toServerName) .collect(Collectors.toList())) .addAllTableRegionStatesCount(metrics.getTableRegionStatesCount().entrySet().stream() - .map(status -> - ClusterStatusProtos.TableRegionStatesCount.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName((status.getKey()))) - .setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue())) - .build()) + .map(status -> ClusterStatusProtos.TableRegionStatesCount.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName((status.getKey()))) + .setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue())).build()) .collect(Collectors.toList())); if (metrics.getMasterName() != null) { builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName()))); @@ -95,40 +85,33 @@ public final class ClusterMetricsBuilder { } if (metrics.getHBaseVersion() != null) { builder.setHbaseVersion( - FSProtos.HBaseVersionFileContent.newBuilder() - .setVersion(metrics.getHBaseVersion())); + FSProtos.HBaseVersionFileContent.newBuilder().setVersion(metrics.getHBaseVersion())); } return builder.build(); } - public static ClusterMetrics toClusterMetrics( - ClusterStatusProtos.ClusterStatus proto) { + public static ClusterMetrics toClusterMetrics(ClusterStatusProtos.ClusterStatus proto) { ClusterMetricsBuilder builder = ClusterMetricsBuilder.newBuilder(); - builder.setLiveServerMetrics(proto.getLiveServersList().stream() + builder + .setLiveServerMetrics(proto.getLiveServersList().stream() .collect(Collectors.toMap(e -> ProtobufUtil.toServerName(e.getServer()), - ServerMetricsBuilder::toServerMetrics))) - .setDeadServerNames(proto.getDeadServersList().stream() - .map(ProtobufUtil::toServerName) - .collect(Collectors.toList())) - .setBackerMasterNames(proto.getBackupMastersList().stream() - .map(ProtobufUtil::toServerName) - .collect(Collectors.toList())) - .setRegionsInTransition(proto.getRegionsInTransitionList().stream() - .map(ClusterStatusProtos.RegionInTransition::getRegionState) - .map(RegionState::convert) - .collect(Collectors.toList())) - .setMasterCoprocessorNames(proto.getMasterCoprocessorsList().stream() - .map(HBaseProtos.Coprocessor::getName) - .collect(Collectors.toList())) - .setServerNames(proto.getServersNameList().stream().map(ProtobufUtil::toServerName) - .collect(Collectors.toList())) - .setTableRegionStatesCount( - proto.getTableRegionStatesCountList().stream() - .collect(Collectors.toMap( - e -> ProtobufUtil.toTableName(e.getTableName()), - e -> ProtobufUtil.toTableRegionStatesCount(e.getRegionStatesCount())))) - .setMasterTasks(proto.getMasterTasksList().stream() - .map(t -> ProtobufUtil.getServerTask(t)).collect(Collectors.toList())); + ServerMetricsBuilder::toServerMetrics))) + .setDeadServerNames(proto.getDeadServersList().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList())) + .setBackerMasterNames(proto.getBackupMastersList().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList())) + .setRegionsInTransition(proto.getRegionsInTransitionList().stream() + .map(ClusterStatusProtos.RegionInTransition::getRegionState).map(RegionState::convert) + .collect(Collectors.toList())) + .setMasterCoprocessorNames(proto.getMasterCoprocessorsList().stream() + .map(HBaseProtos.Coprocessor::getName).collect(Collectors.toList())) + .setServerNames(proto.getServersNameList().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList())) + .setTableRegionStatesCount(proto.getTableRegionStatesCountList().stream() + .collect(Collectors.toMap(e -> ProtobufUtil.toTableName(e.getTableName()), + e -> ProtobufUtil.toTableRegionStatesCount(e.getRegionStatesCount())))) + .setMasterTasks(proto.getMasterTasksList().stream().map(t -> ProtobufUtil.getServerTask(t)) + .collect(Collectors.toList())); if (proto.hasClusterId()) { builder.setClusterId(ClusterId.convert(proto.getClusterId()).toString()); } @@ -158,21 +141,35 @@ public final class ClusterMetricsBuilder { */ public static ClusterMetrics.Option toOption(ClusterStatusProtos.Option option) { switch (option) { - case HBASE_VERSION: return ClusterMetrics.Option.HBASE_VERSION; - case LIVE_SERVERS: return ClusterMetrics.Option.LIVE_SERVERS; - case DEAD_SERVERS: return ClusterMetrics.Option.DEAD_SERVERS; - case REGIONS_IN_TRANSITION: return ClusterMetrics.Option.REGIONS_IN_TRANSITION; - case CLUSTER_ID: return ClusterMetrics.Option.CLUSTER_ID; - case MASTER_COPROCESSORS: return ClusterMetrics.Option.MASTER_COPROCESSORS; - case MASTER: return ClusterMetrics.Option.MASTER; - case BACKUP_MASTERS: return ClusterMetrics.Option.BACKUP_MASTERS; - case BALANCER_ON: return ClusterMetrics.Option.BALANCER_ON; - case SERVERS_NAME: return ClusterMetrics.Option.SERVERS_NAME; - case MASTER_INFO_PORT: return ClusterMetrics.Option.MASTER_INFO_PORT; - case TABLE_TO_REGIONS_COUNT: return ClusterMetrics.Option.TABLE_TO_REGIONS_COUNT; - case TASKS: return ClusterMetrics.Option.TASKS; + case HBASE_VERSION: + return ClusterMetrics.Option.HBASE_VERSION; + case LIVE_SERVERS: + return ClusterMetrics.Option.LIVE_SERVERS; + case DEAD_SERVERS: + return ClusterMetrics.Option.DEAD_SERVERS; + case REGIONS_IN_TRANSITION: + return ClusterMetrics.Option.REGIONS_IN_TRANSITION; + case CLUSTER_ID: + return ClusterMetrics.Option.CLUSTER_ID; + case MASTER_COPROCESSORS: + return ClusterMetrics.Option.MASTER_COPROCESSORS; + case MASTER: + return ClusterMetrics.Option.MASTER; + case BACKUP_MASTERS: + return ClusterMetrics.Option.BACKUP_MASTERS; + case BALANCER_ON: + return ClusterMetrics.Option.BALANCER_ON; + case SERVERS_NAME: + return ClusterMetrics.Option.SERVERS_NAME; + case MASTER_INFO_PORT: + return ClusterMetrics.Option.MASTER_INFO_PORT; + case TABLE_TO_REGIONS_COUNT: + return ClusterMetrics.Option.TABLE_TO_REGIONS_COUNT; + case TASKS: + return ClusterMetrics.Option.TASKS; // should not reach here - default: throw new IllegalArgumentException("Invalid option: " + option); + default: + throw new IllegalArgumentException("Invalid option: " + option); } } @@ -183,21 +180,35 @@ public final class ClusterMetricsBuilder { */ public static ClusterStatusProtos.Option toOption(ClusterMetrics.Option option) { switch (option) { - case HBASE_VERSION: return ClusterStatusProtos.Option.HBASE_VERSION; - case LIVE_SERVERS: return ClusterStatusProtos.Option.LIVE_SERVERS; - case DEAD_SERVERS: return ClusterStatusProtos.Option.DEAD_SERVERS; - case REGIONS_IN_TRANSITION: return ClusterStatusProtos.Option.REGIONS_IN_TRANSITION; - case CLUSTER_ID: return ClusterStatusProtos.Option.CLUSTER_ID; - case MASTER_COPROCESSORS: return ClusterStatusProtos.Option.MASTER_COPROCESSORS; - case MASTER: return ClusterStatusProtos.Option.MASTER; - case BACKUP_MASTERS: return ClusterStatusProtos.Option.BACKUP_MASTERS; - case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON; - case SERVERS_NAME: return Option.SERVERS_NAME; - case MASTER_INFO_PORT: return ClusterStatusProtos.Option.MASTER_INFO_PORT; - case TABLE_TO_REGIONS_COUNT: return ClusterStatusProtos.Option.TABLE_TO_REGIONS_COUNT; - case TASKS: return ClusterStatusProtos.Option.TASKS; + case HBASE_VERSION: + return ClusterStatusProtos.Option.HBASE_VERSION; + case LIVE_SERVERS: + return ClusterStatusProtos.Option.LIVE_SERVERS; + case DEAD_SERVERS: + return ClusterStatusProtos.Option.DEAD_SERVERS; + case REGIONS_IN_TRANSITION: + return ClusterStatusProtos.Option.REGIONS_IN_TRANSITION; + case CLUSTER_ID: + return ClusterStatusProtos.Option.CLUSTER_ID; + case MASTER_COPROCESSORS: + return ClusterStatusProtos.Option.MASTER_COPROCESSORS; + case MASTER: + return ClusterStatusProtos.Option.MASTER; + case BACKUP_MASTERS: + return ClusterStatusProtos.Option.BACKUP_MASTERS; + case BALANCER_ON: + return ClusterStatusProtos.Option.BALANCER_ON; + case SERVERS_NAME: + return Option.SERVERS_NAME; + case MASTER_INFO_PORT: + return ClusterStatusProtos.Option.MASTER_INFO_PORT; + case TABLE_TO_REGIONS_COUNT: + return ClusterStatusProtos.Option.TABLE_TO_REGIONS_COUNT; + case TASKS: + return ClusterStatusProtos.Option.TASKS; // should not reach here - default: throw new IllegalArgumentException("Invalid option: " + option); + default: + throw new IllegalArgumentException("Invalid option: " + option); } } @@ -208,7 +219,7 @@ public final class ClusterMetricsBuilder { */ public static EnumSet toOptions(List options) { return options.stream().map(ClusterMetricsBuilder::toOption) - .collect(Collectors.toCollection(() -> EnumSet.noneOf(ClusterMetrics.Option.class))); + .collect(Collectors.toCollection(() -> EnumSet.noneOf(ClusterMetrics.Option.class))); } /** @@ -223,6 +234,7 @@ public final class ClusterMetricsBuilder { public static ClusterMetricsBuilder newBuilder() { return new ClusterMetricsBuilder(); } + @Nullable private String hbaseVersion; private List deadServerNames = Collections.emptyList(); @@ -244,10 +256,12 @@ public final class ClusterMetricsBuilder { private ClusterMetricsBuilder() { } + public ClusterMetricsBuilder setHBaseVersion(String value) { this.hbaseVersion = value; return this; } + public ClusterMetricsBuilder setDeadServerNames(List value) { this.deadServerNames = value; return this; @@ -262,62 +276,59 @@ public final class ClusterMetricsBuilder { this.masterName = value; return this; } + public ClusterMetricsBuilder setBackerMasterNames(List value) { this.backupMasterNames = value; return this; } + public ClusterMetricsBuilder setRegionsInTransition(List value) { this.regionsInTransition = value; return this; } + public ClusterMetricsBuilder setClusterId(String value) { this.clusterId = value; return this; } + public ClusterMetricsBuilder setMasterCoprocessorNames(List value) { this.masterCoprocessorNames = value; return this; } + public ClusterMetricsBuilder setBalancerOn(@Nullable Boolean value) { this.balancerOn = value; return this; } + public ClusterMetricsBuilder setMasterInfoPort(int value) { this.masterInfoPort = value; return this; } + public ClusterMetricsBuilder setServerNames(List serversName) { this.serversName = serversName; return this; } + public ClusterMetricsBuilder setMasterTasks(List masterTasks) { this.masterTasks = masterTasks; return this; } - public ClusterMetricsBuilder setTableRegionStatesCount( - Map tableRegionStatesCount) { + public ClusterMetricsBuilder + setTableRegionStatesCount(Map tableRegionStatesCount) { this.tableRegionStatesCount = tableRegionStatesCount; return this; } public ClusterMetrics build() { - return new ClusterMetricsImpl( - hbaseVersion, - deadServerNames, - liveServerMetrics, - masterName, - backupMasterNames, - regionsInTransition, - clusterId, - masterCoprocessorNames, - balancerOn, - masterInfoPort, - serversName, - tableRegionStatesCount, - masterTasks - ); + return new ClusterMetricsImpl(hbaseVersion, deadServerNames, liveServerMetrics, masterName, + backupMasterNames, regionsInTransition, clusterId, masterCoprocessorNames, balancerOn, + masterInfoPort, serversName, tableRegionStatesCount, masterTasks); } + private static class ClusterMetricsImpl implements ClusterMetrics { @Nullable private final String hbaseVersion; @@ -338,17 +349,11 @@ public final class ClusterMetricsBuilder { private final List masterTasks; ClusterMetricsImpl(String hbaseVersion, List deadServerNames, - Map liveServerMetrics, - ServerName masterName, - List backupMasterNames, - List regionsInTransition, - String clusterId, - List masterCoprocessorNames, - Boolean balancerOn, - int masterInfoPort, - List serversName, - Map tableRegionStatesCount, - List masterTasks) { + Map liveServerMetrics, ServerName masterName, + List backupMasterNames, List regionsInTransition, String clusterId, + List masterCoprocessorNames, Boolean balancerOn, int masterInfoPort, + List serversName, Map tableRegionStatesCount, + List masterTasks) { this.hbaseVersion = hbaseVersion; this.deadServerNames = Preconditions.checkNotNull(deadServerNames); this.liveServerMetrics = Preconditions.checkNotNull(liveServerMetrics); @@ -437,15 +442,15 @@ public final class ClusterMetricsBuilder { int backupMastersSize = getBackupMasterNames().size(); sb.append("\nNumber of backup masters: " + backupMastersSize); if (backupMastersSize > 0) { - for (ServerName serverName: getBackupMasterNames()) { + for (ServerName serverName : getBackupMasterNames()) { sb.append("\n " + serverName); } } int serversSize = getLiveServerMetrics().size(); int serversNameSize = getServersName().size(); - sb.append("\nNumber of live region servers: " - + (serversSize > 0 ? serversSize : serversNameSize)); + sb.append( + "\nNumber of live region servers: " + (serversSize > 0 ? serversSize : serversNameSize)); if (serversSize > 0) { for (ServerName serverName : getLiveServerMetrics().keySet()) { sb.append("\n " + serverName.getServerName()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java index 6a51db08d8c..b202f0ec43e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; @@ -26,7 +24,6 @@ import java.util.Collection; import java.util.List; import java.util.Map; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.master.RegionState; import org.apache.yetus.audience.InterfaceAudience; @@ -45,32 +42,37 @@ import org.apache.hbase.thirdparty.com.google.common.base.Objects; *
  • The average cluster load.
  • *
  • The number of regions deployed on the cluster.
  • *
  • The number of requests since last report.
  • - *
  • Detailed region server loading and resource usage information, - * per server and per region.
  • + *
  • Detailed region server loading and resource usage information, per server and per + * region.
  • *
  • Regions in transition at master
  • *
  • The unique cluster ID
  • * * {@link ClusterMetrics.Option} provides a way to get desired ClusterStatus information. * The following codes will get all the cluster information. + * *
    - * {@code
    - * // Original version still works
    - * Admin admin = connection.getAdmin();
    - * ClusterStatus status = admin.getClusterStatus();
    - * // or below, a new version which has the same effects
    - * ClusterStatus status = admin.getClusterStatus(EnumSet.allOf(Option.class));
    + * {
    + *   @code
    + *   // Original version still works
    + *   Admin admin = connection.getAdmin();
    + *   ClusterStatus status = admin.getClusterStatus();
    + *   // or below, a new version which has the same effects
    + *   ClusterStatus status = admin.getClusterStatus(EnumSet.allOf(Option.class));
      * }
      * 
    - * If information about live servers is the only wanted. - * then codes in the following way: + * + * If information about live servers is the only wanted. then codes in the following way: + * *
    - * {@code
    - * Admin admin = connection.getAdmin();
    - * ClusterStatus status = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
    + * {
    + *   @code
    + *   Admin admin = connection.getAdmin();
    + *   ClusterStatus status = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
      * }
      * 
    - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link ClusterMetrics} instead. + * + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use {@link ClusterMetrics} + * instead. */ @InterfaceAudience.Public @Deprecated @@ -86,26 +88,18 @@ public class ClusterStatus implements ClusterMetrics { */ @Deprecated public ClusterStatus(final String hbaseVersion, final String clusterid, - final Map servers, - final Collection deadServers, - final ServerName master, - final Collection backupMasters, - final List rit, - final String[] masterCoprocessors, - final Boolean balancerOn, - final int masterInfoPort) { + final Map servers, final Collection deadServers, + final ServerName master, final Collection backupMasters, + final List rit, final String[] masterCoprocessors, final Boolean balancerOn, + final int masterInfoPort) { // TODO: make this constructor private this(ClusterMetricsBuilder.newBuilder().setHBaseVersion(hbaseVersion) .setDeadServerNames(new ArrayList<>(deadServers)) - .setLiveServerMetrics(servers.entrySet().stream() - .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()))) + .setLiveServerMetrics( + servers.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()))) .setBackerMasterNames(new ArrayList<>(backupMasters)).setBalancerOn(balancerOn) - .setClusterId(clusterid) - .setMasterCoprocessorNames(Arrays.asList(masterCoprocessors)) - .setMasterName(master) - .setMasterInfoPort(masterInfoPort) - .setRegionsInTransition(rit) - .build()); + .setClusterId(clusterid).setMasterCoprocessorNames(Arrays.asList(masterCoprocessors)) + .setMasterName(master).setMasterInfoPort(masterInfoPort).setRegionsInTransition(rit).build()); } @InterfaceAudience.Private @@ -127,10 +121,10 @@ public class ClusterStatus implements ClusterMetrics { } /** - * @return the number of region servers in the cluster - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getLiveServerMetrics()}. - */ + * @return the number of region servers in the cluster + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getLiveServerMetrics()}. + */ @Deprecated public int getServersSize() { return metrics.getLiveServerMetrics().size(); @@ -139,8 +133,8 @@ public class ClusterStatus implements ClusterMetrics { /** * @return the number of dead region servers in the cluster * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * (HBASE-13656). - * Use {@link #getDeadServerNames()}. + * (HBASE-13656). Use + * {@link #getDeadServerNames()}. */ @Deprecated public int getDeadServers() { @@ -149,8 +143,8 @@ public class ClusterStatus implements ClusterMetrics { /** * @return the number of dead region servers in the cluster - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getDeadServerNames()}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getDeadServerNames()}. */ @Deprecated public int getDeadServersSize() { @@ -159,8 +153,8 @@ public class ClusterStatus implements ClusterMetrics { /** * @return the number of regions deployed on the cluster - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionCount()}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionCount()}. */ @Deprecated public int getRegionsCount() { @@ -169,8 +163,8 @@ public class ClusterStatus implements ClusterMetrics { /** * @return the number of requests since last report - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRequestCount()} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRequestCount()} instead. */ @Deprecated public int getRequestsCount() { @@ -214,14 +208,14 @@ public class ClusterStatus implements ClusterMetrics { return false; } ClusterStatus other = (ClusterStatus) o; - return Objects.equal(getHBaseVersion(), other.getHBaseVersion()) && - Objects.equal(getLiveServerLoads(), other.getLiveServerLoads()) && - getDeadServerNames().containsAll(other.getDeadServerNames()) && - Arrays.equals(getMasterCoprocessors(), other.getMasterCoprocessors()) && - Objects.equal(getMaster(), other.getMaster()) && - getBackupMasters().containsAll(other.getBackupMasters()) && - Objects.equal(getClusterId(), other.getClusterId()) && - getMasterInfoPort() == other.getMasterInfoPort(); + return Objects.equal(getHBaseVersion(), other.getHBaseVersion()) + && Objects.equal(getLiveServerLoads(), other.getLiveServerLoads()) + && getDeadServerNames().containsAll(other.getDeadServerNames()) + && Arrays.equals(getMasterCoprocessors(), other.getMasterCoprocessors()) + && Objects.equal(getMaster(), other.getMaster()) + && getBackupMasters().containsAll(other.getBackupMasters()) + && Objects.equal(getClusterId(), other.getClusterId()) + && getMasterInfoPort() == other.getMasterInfoPort(); } @Override @@ -239,8 +233,8 @@ public class ClusterStatus implements ClusterMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getLiveServerMetrics()} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getLiveServerMetrics()} instead. */ @Deprecated public Collection getServers() { @@ -250,8 +244,8 @@ public class ClusterStatus implements ClusterMetrics { /** * Returns detailed information about the current master {@link ServerName}. * @return current master information if it exists - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getMasterName} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use {@link #getMasterName} + * instead. */ @Deprecated public ServerName getMaster() { @@ -260,8 +254,8 @@ public class ClusterStatus implements ClusterMetrics { /** * @return the number of backup masters in the cluster - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getBackupMasterNames} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getBackupMasterNames} instead. */ @Deprecated public int getBackupMastersSize() { @@ -270,8 +264,8 @@ public class ClusterStatus implements ClusterMetrics { /** * @return the names of backup masters - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getBackupMasterNames} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getBackupMasterNames} instead. */ @Deprecated public List getBackupMasters() { @@ -279,10 +273,9 @@ public class ClusterStatus implements ClusterMetrics { } /** - * @param sn - * @return Server's load or null if not found. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getLiveServerMetrics} instead. + * n * @return Server's load or null if not found. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getLiveServerMetrics} instead. */ @Deprecated public ServerLoad getLoad(final ServerName sn) { @@ -300,8 +293,8 @@ public class ClusterStatus implements ClusterMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getMasterCoprocessorNames} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getMasterCoprocessorNames} instead. */ @Deprecated public String[] getMasterCoprocessors() { @@ -310,8 +303,8 @@ public class ClusterStatus implements ClusterMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getLastMajorCompactionTimestamp(TableName)} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getLastMajorCompactionTimestamp(TableName)} instead. */ @Deprecated public long getLastMajorCompactionTsForTable(TableName table) { @@ -319,8 +312,8 @@ public class ClusterStatus implements ClusterMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getLastMajorCompactionTimestamp(byte[])} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getLastMajorCompactionTimestamp(byte[])} instead. */ @Deprecated public long getLastMajorCompactionTsForRegion(final byte[] region) { @@ -328,8 +321,7 @@ public class ClusterStatus implements ClusterMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * No flag in 2.0 + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 No flag in 2.0 */ @Deprecated public boolean isBalancerOn() { @@ -369,15 +361,15 @@ public class ClusterStatus implements ClusterMetrics { int backupMastersSize = getBackupMastersSize(); sb.append("\nNumber of backup masters: " + backupMastersSize); if (backupMastersSize > 0) { - for (ServerName serverName: metrics.getBackupMasterNames()) { + for (ServerName serverName : metrics.getBackupMasterNames()) { sb.append("\n " + serverName); } } int serversSize = getServersSize(); int serversNameSize = getServersName().size(); - sb.append("\nNumber of live region servers: " - + (serversSize > 0 ? serversSize : serversNameSize)); + sb.append( + "\nNumber of live region servers: " + (serversSize > 0 ? serversSize : serversNameSize)); if (serversSize > 0) { for (ServerName serverName : metrics.getLiveServerMetrics().keySet()) { sb.append("\n " + serverName.getServerName()); @@ -403,7 +395,7 @@ public class ClusterStatus implements ClusterMetrics { int ritSize = metrics.getRegionStatesInTransition().size(); sb.append("\nNumber of regions in transition: " + ritSize); if (ritSize > 0) { - for (RegionState state: metrics.getRegionStatesInTransition()) { + for (RegionState state : metrics.getRegionStatesInTransition()) { sb.append("\n " + state.toDescriptiveString()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ConcurrentTableModificationException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ConcurrentTableModificationException.java index 86aca2bc817..b8b2519dc09 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ConcurrentTableModificationException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ConcurrentTableModificationException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java index c0d9b603a8a..20cc35da042 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java @@ -7,33 +7,28 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.hadoop.hbase; +import com.google.protobuf.Service; import java.io.IOException; import java.util.Collections; - -import com.google.protobuf.Service; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** * Base interface for the 4 coprocessors - MasterCoprocessor, RegionCoprocessor, - * RegionServerCoprocessor, and WALCoprocessor. - * Do NOT implement this interface directly. Unless an implementation implements one (or more) of - * the above mentioned 4 coprocessors, it'll fail to be loaded by any coprocessor host. + * RegionServerCoprocessor, and WALCoprocessor. Do NOT implement this interface directly. Unless an + * implementation implements one (or more) of the above mentioned 4 coprocessors, it'll fail to be + * loaded by any coprocessor host. Example: Building a coprocessor to observe Master operations. * - * Example: - * Building a coprocessor to observe Master operations. *
      * class MyMasterCoprocessor implements MasterCoprocessor {
      *   @Override
    @@ -48,6 +43,7 @@ import org.apache.yetus.audience.InterfaceStability;
      * 
    * * Building a Service which can be loaded by both Master and RegionServer + * *
      * class MyCoprocessorService implements MasterCoprocessor, RegionServerCoprocessor {
      *   @Override
    @@ -87,18 +83,19 @@ public interface Coprocessor {
        * Called by the {@link CoprocessorEnvironment} during it's own startup to initialize the
        * coprocessor.
        */
    -  default void start(CoprocessorEnvironment env) throws IOException {}
    +  default void start(CoprocessorEnvironment env) throws IOException {
    +  }
     
       /**
    -   * Called by the {@link CoprocessorEnvironment} during it's own shutdown to stop the
    -   * coprocessor.
    +   * Called by the {@link CoprocessorEnvironment} during it's own shutdown to stop the coprocessor.
        */
    -  default void stop(CoprocessorEnvironment env) throws IOException {}
    +  default void stop(CoprocessorEnvironment env) throws IOException {
    +  }
     
       /**
        * Coprocessor endpoints providing protobuf services should override this method.
    -   * @return Iterable of {@link Service}s or empty collection. Implementations should never
    -   * return null.
    +   * @return Iterable of {@link Service}s or empty collection. Implementations should never return
    +   *         null.
        */
       default Iterable getServices() {
         return Collections.EMPTY_SET;
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java
    index 4fab7333dcd..edbc5f479d6 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java
    @@ -7,16 +7,14 @@
      * "License"); you may not use this file except in compliance
      * with the License.  You may obtain a copy of the License at
      *
    - *   http://www.apache.org/licenses/LICENSE-2.0
    + *     http://www.apache.org/licenses/LICENSE-2.0
      *
    - * Unless required by applicable law or agreed to in writing,
    - * software distributed under the License is distributed on an
    - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    - * KIND, either express or implied.  See the License for the
    - * specific language governing permissions and limitations
    - * under the License.
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
      */
    -
     package org.apache.hadoop.hbase;
     
     import org.apache.hadoop.conf.Configuration;
    @@ -46,8 +44,8 @@ public interface CoprocessorEnvironment {
       int getLoadSequence();
     
       /**
    -   * @return a Read-only Configuration; throws {@link UnsupportedOperationException} if you try
    -   *   to set a configuration.
    +   * @return a Read-only Configuration; throws {@link UnsupportedOperationException} if you try to
    +   *         set a configuration.
        */
       Configuration getConfiguration();
     
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java
    index 509844e367d..7e1821de7d4 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -41,7 +40,7 @@ public class DoNotRetryIOException extends HBaseIOException {
       }
     
       /**
    -   * @param message the message for this exception
    +   * @param message   the message for this exception
        * @param throwable the {@link Throwable} to use for this exception
        */
       public DoNotRetryIOException(String message, Throwable throwable) {
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java
    index 76f374c412f..f4391f1025c 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java
    @@ -7,24 +7,22 @@
      * "License"); you may not use this file except in compliance
      * with the License.  You may obtain a copy of the License at
      *
    - *   http://www.apache.org/licenses/LICENSE-2.0
    + *     http://www.apache.org/licenses/LICENSE-2.0
      *
    - * Unless required by applicable law or agreed to in writing,
    - * software distributed under the License is distributed on an
    - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    - * KIND, either express or implied.  See the License for the
    - * specific language governing permissions and limitations
    - * under the License.
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
      */
     package org.apache.hadoop.hbase;
     
     import java.io.IOException;
    -
     import org.apache.yetus.audience.InterfaceAudience;
     
     /**
    - * Thrown during flush if the possibility snapshot content was not properly
    - * persisted into store files.  Response should include replay of wal content.
    + * Thrown during flush if the possibility snapshot content was not properly persisted into store
    + * files. Response should include replay of wal content.
      */
     @InterfaceAudience.Public
     public class DroppedSnapshotException extends IOException {
    @@ -43,9 +41,8 @@ public class DroppedSnapshotException extends IOException {
     
       /**
        * DroppedSnapshotException with cause
    -   *
        * @param message the message for this exception
    -   * @param cause the cause for this exception
    +   * @param cause   the cause for this exception
        */
       public DroppedSnapshotException(String message, Throwable cause) {
         super(message, cause);
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/FailedCloseWALAfterInitializedErrorException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/FailedCloseWALAfterInitializedErrorException.java
    index 6445be9cfaf..e5e2f7b7cca 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/FailedCloseWALAfterInitializedErrorException.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/FailedCloseWALAfterInitializedErrorException.java
    @@ -7,35 +7,31 @@
      * "License"); you may not use this file except in compliance
      * with the License.  You may obtain a copy of the License at
      *
    - *   http://www.apache.org/licenses/LICENSE-2.0
    + *     http://www.apache.org/licenses/LICENSE-2.0
      *
    - * Unless required by applicable law or agreed to in writing,
    - * software distributed under the License is distributed on an
    - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    - * KIND, either express or implied.  See the License for the
    - * specific language governing permissions and limitations
    - * under the License.
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
      */
     package org.apache.hadoop.hbase;
     
    -
     import java.io.IOException;
    -
     import org.apache.yetus.audience.InterfaceAudience;
     
     /**
      * Throw when failed cleanup unsuccessful initialized wal
      */
     @InterfaceAudience.Public
    -public class FailedCloseWALAfterInitializedErrorException
    -  extends IOException {
    +public class FailedCloseWALAfterInitializedErrorException extends IOException {
     
       private static final long serialVersionUID = -5463156587431677322L;
     
       /**
        * constructor with error msg and throwable
        * @param msg message
    -   * @param t throwable
    +   * @param t   throwable
        */
       public FailedCloseWALAfterInitializedErrorException(String msg, Throwable t) {
         super(msg, t);
    @@ -55,4 +51,4 @@ public class FailedCloseWALAfterInitializedErrorException
       public FailedCloseWALAfterInitializedErrorException() {
         super();
       }
    -}
    \ No newline at end of file
    +}
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java
    index c72ed19e486..2e4ebbd0baa 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java
    @@ -1,4 +1,4 @@
    -/**
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -20,8 +20,8 @@ package org.apache.hadoop.hbase;
     import org.apache.yetus.audience.InterfaceAudience;
     
     /**
    - * Base class for exceptions thrown by an HBase server. May contain extra info about
    - * the state of the server when the exception was thrown.
    + * Base class for exceptions thrown by an HBase server. May contain extra info about the state of
    + * the server when the exception was thrown.
      */
     @InterfaceAudience.Public
     public class HBaseServerException extends HBaseIOException {
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
    index 2f21d60878b..4694afbb0b1 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -19,8 +18,6 @@
     package org.apache.hadoop.hbase;
     
     import java.util.Map;
    -
    -import org.apache.yetus.audience.InterfaceAudience;
     import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
     import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
     import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor;
    @@ -32,30 +29,39 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
     import org.apache.hadoop.hbase.regionserver.BloomType;
     import org.apache.hadoop.hbase.util.Bytes;
     import org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
    +import org.apache.yetus.audience.InterfaceAudience;
     
     /**
    - * An HColumnDescriptor contains information about a column family such as the
    - * number of versions, compression settings, etc.
    - *
    - * It is used as input when creating a table or adding a column.
    + * An HColumnDescriptor contains information about a column family such as the number of versions,
    + * compression settings, etc. It is used as input when creating a table or adding a column.
      */
     @InterfaceAudience.Public
     @Deprecated // remove it in 3.0
     public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable {
    -  public static final String IN_MEMORY_COMPACTION = ColumnFamilyDescriptorBuilder.IN_MEMORY_COMPACTION;
    +  public static final String IN_MEMORY_COMPACTION =
    +    ColumnFamilyDescriptorBuilder.IN_MEMORY_COMPACTION;
       public static final String COMPRESSION = ColumnFamilyDescriptorBuilder.COMPRESSION;
    -  public static final String COMPRESSION_COMPACT = ColumnFamilyDescriptorBuilder.COMPRESSION_COMPACT;
    -  public static final String COMPRESSION_COMPACT_MAJOR = ColumnFamilyDescriptorBuilder.COMPRESSION_COMPACT_MAJOR;
    -  public static final String COMPRESSION_COMPACT_MINOR = ColumnFamilyDescriptorBuilder.COMPRESSION_COMPACT_MINOR;
    +  public static final String COMPRESSION_COMPACT =
    +    ColumnFamilyDescriptorBuilder.COMPRESSION_COMPACT;
    +  public static final String COMPRESSION_COMPACT_MAJOR =
    +    ColumnFamilyDescriptorBuilder.COMPRESSION_COMPACT_MAJOR;
    +  public static final String COMPRESSION_COMPACT_MINOR =
    +    ColumnFamilyDescriptorBuilder.COMPRESSION_COMPACT_MINOR;
       public static final String ENCODE_ON_DISK = "ENCODE_ON_DISK";
    -  public static final String DATA_BLOCK_ENCODING = ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING;
    +  public static final String DATA_BLOCK_ENCODING =
    +    ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING;
       public static final String BLOCKCACHE = ColumnFamilyDescriptorBuilder.BLOCKCACHE;
    -  public static final String CACHE_DATA_ON_WRITE = ColumnFamilyDescriptorBuilder.CACHE_DATA_ON_WRITE;
    -  public static final String CACHE_INDEX_ON_WRITE = ColumnFamilyDescriptorBuilder.CACHE_INDEX_ON_WRITE;
    -  public static final String CACHE_BLOOMS_ON_WRITE = ColumnFamilyDescriptorBuilder.CACHE_BLOOMS_ON_WRITE;
    -  public static final String EVICT_BLOCKS_ON_CLOSE = ColumnFamilyDescriptorBuilder.EVICT_BLOCKS_ON_CLOSE;
    +  public static final String CACHE_DATA_ON_WRITE =
    +    ColumnFamilyDescriptorBuilder.CACHE_DATA_ON_WRITE;
    +  public static final String CACHE_INDEX_ON_WRITE =
    +    ColumnFamilyDescriptorBuilder.CACHE_INDEX_ON_WRITE;
    +  public static final String CACHE_BLOOMS_ON_WRITE =
    +    ColumnFamilyDescriptorBuilder.CACHE_BLOOMS_ON_WRITE;
    +  public static final String EVICT_BLOCKS_ON_CLOSE =
    +    ColumnFamilyDescriptorBuilder.EVICT_BLOCKS_ON_CLOSE;
       public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1";
    -  public static final String PREFETCH_BLOCKS_ON_OPEN = ColumnFamilyDescriptorBuilder.PREFETCH_BLOCKS_ON_OPEN;
    +  public static final String PREFETCH_BLOCKS_ON_OPEN =
    +    ColumnFamilyDescriptorBuilder.PREFETCH_BLOCKS_ON_OPEN;
       public static final String BLOCKSIZE = ColumnFamilyDescriptorBuilder.BLOCKSIZE;
       public static final String LENGTH = "LENGTH";
       public static final String TTL = ColumnFamilyDescriptorBuilder.TTL;
    @@ -72,46 +78,62 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable:
    +   * Construct a column descriptor specifying only the family name The other attributes are
    +   * defaulted.
    +   * @param familyName Column family name. Must be 'printable' -- digit or letter -- and may not
    +   *                   contain a :
        * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
    -   *             (HBASE-18433).
    -   *             Use {@link ColumnFamilyDescriptorBuilder#of(String)}.
    +   *             (HBASE-18433). Use
    +   *             {@link ColumnFamilyDescriptorBuilder#of(String)}.
        */
       @Deprecated
       public HColumnDescriptor(final String familyName) {
    @@ -119,29 +141,26 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable:
    +   * Construct a column descriptor specifying only the family name The other attributes are
    +   * defaulted.
    +   * @param familyName Column family name. Must be 'printable' -- digit or letter -- and may not
    +   *                   contain a :
        * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
    -   *             (HBASE-18433).
    -   *             Use {@link ColumnFamilyDescriptorBuilder#of(byte[])}.
    +   *             (HBASE-18433). Use
    +   *             {@link ColumnFamilyDescriptorBuilder#of(byte[])}.
        */
       @Deprecated
    -  public HColumnDescriptor(final byte [] familyName) {
    +  public HColumnDescriptor(final byte[] familyName) {
         this(new ModifyableColumnFamilyDescriptor(familyName));
       }
     
       /**
    -   * Constructor.
    -   * Makes a deep copy of the supplied descriptor.
    -   * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
    -   *
    +   * Constructor. Makes a deep copy of the supplied descriptor. Can make a modifiable descriptor
    +   * from an UnmodifyableHColumnDescriptor.
        * @param desc The descriptor.
        * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
    -   *             (HBASE-18433).
    -   *             Use {@link ColumnFamilyDescriptorBuilder#copy(ColumnFamilyDescriptor)}.
    +   *             (HBASE-18433). Use
    +   *             {@link ColumnFamilyDescriptorBuilder#copy(ColumnFamilyDescriptor)}.
        */
       @Deprecated
       public HColumnDescriptor(HColumnDescriptor desc) {
    @@ -149,8 +168,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparableb
    -   * @throws IllegalArgumentException If not null and not a legitimate family
    -   * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
    -   * b can be null when deserializing).  Cannot start with a '.'
    -   * either. Also Family can not be an empty value or equal "recovered.edits".
    +   * @throws IllegalArgumentException If not null and not a legitimate family name: i.e. 'printable'
    +   *                                  and ends in a ':' (Null passes are allowed because
    +   *                                  b can be null when deserializing). Cannot start
    +   *                                  with a '.' either. Also Family can not be an empty value or
    +   *                                  equal "recovered.edits".
        * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
    -   *   {@link ColumnFamilyDescriptorBuilder#isLegalColumnFamilyName(byte[])} instead.
    +   *             {@link ColumnFamilyDescriptorBuilder#isLegalColumnFamilyName(byte[])} instead.
        * @see ColumnFamilyDescriptorBuilder#isLegalColumnFamilyName(byte[])
        * @see HBASE-18008
        */
       @Deprecated
    -  public static byte [] isLegalFamilyName(final byte [] b) {
    +  public static byte[] isLegalFamilyName(final byte[] b) {
         return ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(b);
       }
     
    @@ -178,7 +197,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, ComparableHBASE-13655).
    -   *             Use {@link #getCompressionType()}.
    +   *             (HBASE-13655). Use
    +   *             {@link #getCompressionType()}.
        */
       @Deprecated
       public Compression.Algorithm getCompression() {
    @@ -252,10 +271,10 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, ComparableHBASE-13655).
    -   *             Use {@link #getCompactionCompressionType()}.
    +   * @return compression type being used for the column family for major compaction
    +   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
    +   *             (HBASE-13655). Use
    +   *             {@link #getCompactionCompressionType()}.
        */
       @Deprecated
       public Compression.Algorithm getCompactionCompression() {
    @@ -278,7 +297,6 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable= minimum versions.");
    +      throw new IllegalArgumentException(
    +        "Unable to set MaxVersion to " + maxVersions + " and set MinVersion to " + minVersions
    +          + ", as maximum versions must be >= minimum versions.");
         }
         setMinVersions(minVersions);
         setMaxVersions(maxVersions);
    @@ -306,8 +324,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, ComparableLZO Compression
    -   * for how to enable it.
    +   * Compression types supported in hbase. LZO is not bundled as part of the hbase distribution. See
    +   * LZO Compression for how to
    +   * enable it.
        * @param value Compression type setting.
        * @return this (for chained invocation)
        */
    @@ -355,10 +371,8 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, ComparableLZO Compression
    -   * for how to enable it.
    +   * Compression types supported in hbase. LZO is not bundled as part of the hbase distribution. See
    +   * LZO Compression for how to
    +   * enable it.
        * @param value Compression type setting.
        * @return this (for chained invocation)
        */
    @@ -415,7 +428,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparablebytes
    -   * @throws DeserializationException
    -   * @see #toByteArray()
    +   * @return An instance of {@link HColumnDescriptor} made from bytes n * @see
    +   *         #toByteArray()
        */
    -  public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException {
    +  public static HColumnDescriptor parseFrom(final byte[] bytes) throws DeserializationException {
         ColumnFamilyDescriptor desc = ColumnFamilyDescriptorBuilder.parseFrom(bytes);
         if (desc instanceof ModifyableColumnFamilyDescriptor) {
           return new HColumnDescriptor((ModifyableColumnFamilyDescriptor) desc);
    @@ -721,7 +727,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable"LAZY_PERSIST",
    -   *          "ALL_SSD", "ONE_SSD", "HOT", "WARM", "COLD"
    +   *              "ALL_SSD", "ONE_SSD", "HOT", "WARM", "COLD"
        */
       public HColumnDescriptor setStoragePolicy(String value) {
         getDelegateeForModification().setStoragePolicy(value);
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
    index 2a0e804ff7a..1769704a992 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -24,7 +23,6 @@ import java.util.ArrayList;
     import java.util.Arrays;
     import java.util.List;
     import java.util.stream.Collectors;
    -
     import org.apache.hadoop.conf.Configuration;
     import org.apache.hadoop.hbase.KeyValue.KVComparator;
     import org.apache.hadoop.hbase.client.RegionInfo;
    @@ -38,40 +36,38 @@ import org.apache.hadoop.io.DataInputBuffer;
     import org.apache.yetus.audience.InterfaceAudience;
     import org.slf4j.Logger;
     import org.slf4j.LoggerFactory;
    +
     import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
     import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
     
     /**
      * Information about a region. A region is a range of keys in the whole keyspace of a table, an
    - * identifier (a timestamp) for differentiating between subset ranges (after region split)
    - * and a replicaId for differentiating the instance for the same range and some status information
    - * about the region.
    - *
    - * The region has a unique name which consists of the following fields:
    + * identifier (a timestamp) for differentiating between subset ranges (after region split) and a
    + * replicaId for differentiating the instance for the same range and some status information about
    + * the region. The region has a unique name which consists of the following fields:
      * 
      - *
    • tableName : The name of the table
    • - *
    • startKey : The startKey for the region.
    • - *
    • regionId : A timestamp when the region is created.
    • - *
    • replicaId : An id starting from 0 to differentiate replicas of the same region range - * but hosted in separated servers. The same region range can be hosted in multiple locations.
    • - *
    • encodedName : An MD5 encoded string for the region name.
    • + *
    • tableName : The name of the table
    • + *
    • startKey : The startKey for the region.
    • + *
    • regionId : A timestamp when the region is created.
    • + *
    • replicaId : An id starting from 0 to differentiate replicas of the same region range but + * hosted in separated servers. The same region range can be hosted in multiple locations.
    • + *
    • encodedName : An MD5 encoded string for the region name.
    • *
    - * - *
    Other than the fields in the region name, region info contains: + *
    + * Other than the fields in the region name, region info contains: *
      - *
    • endKey : the endKey for the region (exclusive)
    • - *
    • split : Whether the region is split
    • - *
    • offline : Whether the region is offline
    • + *
    • endKey : the endKey for the region (exclusive)
    • + *
    • split : Whether the region is split
    • + *
    • offline : Whether the region is offline
    • *
    - * * In 0.98 or before, a list of table's regions would fully cover the total keyspace, and at any * point in time, a row key always belongs to a single region, which is hosted in a single server. * In 0.99+, a region can have multiple instances (called replicas), and thus a range (or row) can * correspond to multiple HRegionInfo's. These HRI's share the same fields however except the * replicaId field. If the replicaId is not set, it defaults to 0, which is compatible with the * previous behavior of a range corresponding to 1 region. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * use {@link RegionInfoBuilder} to build {@link RegionInfo}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. use + * {@link RegionInfoBuilder} to build {@link RegionInfo}. */ @Deprecated @InterfaceAudience.Public @@ -79,41 +75,31 @@ public class HRegionInfo implements RegionInfo { private static final Logger LOG = LoggerFactory.getLogger(HRegionInfo.class); /** - * The new format for a region name contains its encodedName at the end. - * The encoded name also serves as the directory name for the region - * in the filesystem. - * - * New region name format: - * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. - * where, - * <encodedName> is a hex version of the MD5 hash of - * <tablename>,<startkey>,<regionIdTimestamp> - * - * The old region name format: - * <tablename>,<startkey>,<regionIdTimestamp> - * For region names in the old format, the encoded name is a 32-bit - * JenkinsHash integer value (in its decimal notation, string form). - *

    - * **NOTE** - * - * The first hbase:meta region, and regions created by an older - * version of HBase (0.20 or prior) will continue to use the - * old region name format. + * The new format for a region name contains its encodedName at the end. The encoded name also + * serves as the directory name for the region in the filesystem. New region name format: + * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. where, <encodedName> + * is a hex version of the MD5 hash of <tablename>,<startkey>,<regionIdTimestamp> The old + * region name format: <tablename>,<startkey>,<regionIdTimestamp> For region names in the + * old format, the encoded name is a 32-bit JenkinsHash integer value (in its decimal notation, + * string form). + *

    + * **NOTE** The first hbase:meta region, and regions created by an older version of HBase (0.20 or + * prior) will continue to use the old region name format. */ /** A non-capture group so that this can be embedded. */ - public static final String ENCODED_REGION_NAME_REGEX = RegionInfoBuilder.ENCODED_REGION_NAME_REGEX; + public static final String ENCODED_REGION_NAME_REGEX = + RegionInfoBuilder.ENCODED_REGION_NAME_REGEX; private static final int MAX_REPLICA_ID = 0xFFFF; /** - * @param regionName - * @return the encodedName - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#encodeRegionName(byte[])}. + * n * @return the encodedName + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#encodeRegionName(byte[])}. */ @Deprecated - public static String encodeRegionName(final byte [] regionName) { + public static String encodeRegionName(final byte[] regionName) { return RegionInfo.encodeRegionName(regionName); } @@ -126,19 +112,19 @@ public class HRegionInfo implements RegionInfo { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#getShortNameToLog(RegionInfo...)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#getShortNameToLog(RegionInfo...)}. */ @Deprecated - public static String getShortNameToLog(HRegionInfo...hris) { + public static String getShortNameToLog(HRegionInfo... hris) { return RegionInfo.getShortNameToLog(Arrays.asList(hris)); } /** - * @return Return a String of short, printable names for hris - * (usually encoded name) for us logging. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#getShortNameToLog(List)})}. + * @return Return a String of short, printable names for hris (usually encoded name) + * for us logging. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#getShortNameToLog(List)})}. */ @Deprecated public static String getShortNameToLog(final List hris) { @@ -149,9 +135,9 @@ public class HRegionInfo implements RegionInfo { * Use logging. * @param encodedRegionName The encoded regionname. * @return hbase:meta if passed 1028785192 else returns - * encodedRegionName - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#prettyPrint(String)}. + * encodedRegionName + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#prettyPrint(String)}. */ @Deprecated @InterfaceAudience.Private @@ -159,7 +145,7 @@ public class HRegionInfo implements RegionInfo { return RegionInfo.prettyPrint(encodedRegionName); } - private byte [] endKey = HConstants.EMPTY_BYTE_ARRAY; + private byte[] endKey = HConstants.EMPTY_BYTE_ARRAY; // This flag is in the parent of a split while the parent is still referenced by daughter regions. // We USED to set this flag when we disabled a table but now table state is kept up in zookeeper // as of 0.90.0 HBase. And now in DisableTableProcedure, finally we will create bunch of @@ -167,14 +153,14 @@ public class HRegionInfo implements RegionInfo { // will not change the offLine flag. private boolean offLine = false; private long regionId = -1; - private transient byte [] regionName = HConstants.EMPTY_BYTE_ARRAY; + private transient byte[] regionName = HConstants.EMPTY_BYTE_ARRAY; private boolean split = false; - private byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; + private byte[] startKey = HConstants.EMPTY_BYTE_ARRAY; private int hashCode = -1; - //TODO: Move NO_HASH to HStoreFile which is really the only place it is used. + // TODO: Move NO_HASH to HStoreFile which is really the only place it is used. public static final String NO_HASH = null; private String encodedName = null; - private byte [] encodedNameAsBytes = null; + private byte[] encodedNameAsBytes = null; private int replicaId = DEFAULT_REPLICA_ID; // Current TableName @@ -188,7 +174,7 @@ public class HRegionInfo implements RegionInfo { /** HRegionInfo for first meta region */ // TODO: How come Meta regions still do not have encoded region names? Fix. public static final HRegionInfo FIRST_META_REGIONINFO = - new HRegionInfo(1L, TableName.META_TABLE_NAME); + new HRegionInfo(1L, TableName.META_TABLE_NAME); private void setHashCode() { int result = Arrays.hashCode(this.regionName); @@ -202,8 +188,7 @@ public class HRegionInfo implements RegionInfo { } /** - * Private constructor used constructing HRegionInfo for the - * first meta regions + * Private constructor used constructing HRegionInfo for the first meta regions */ private HRegionInfo(long regionId, TableName tableName) { this(regionId, tableName, DEFAULT_REPLICA_ID); @@ -225,66 +210,54 @@ public class HRegionInfo implements RegionInfo { /** * Construct HRegionInfo with explicit parameters - * * @param tableName the table name - * @param startKey first key in region - * @param endKey end of key range - * @throws IllegalArgumentException + * @param startKey first key in region + * @param endKey end of key range n */ public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey) - throws IllegalArgumentException { + throws IllegalArgumentException { this(tableName, startKey, endKey, false); } /** * Construct HRegionInfo with explicit parameters - * * @param tableName the table descriptor - * @param startKey first key in region - * @param endKey end of key range - * @param split true if this region has split and we have daughter regions - * regions that may or may not hold references to this region. - * @throws IllegalArgumentException + * @param startKey first key in region + * @param endKey end of key range + * @param split true if this region has split and we have daughter regions regions that may or + * may not hold references to this region. n */ public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, - final boolean split) - throws IllegalArgumentException { + final boolean split) throws IllegalArgumentException { this(tableName, startKey, endKey, split, EnvironmentEdgeManager.currentTime()); } /** * Construct HRegionInfo with explicit parameters - * * @param tableName the table descriptor - * @param startKey first key in region - * @param endKey end of key range - * @param split true if this region has split and we have daughter regions - * regions that may or may not hold references to this region. - * @param regionid Region id to use. - * @throws IllegalArgumentException + * @param startKey first key in region + * @param endKey end of key range + * @param split true if this region has split and we have daughter regions regions that may or + * may not hold references to this region. + * @param regionid Region id to use. n */ - public HRegionInfo(final TableName tableName, final byte[] startKey, - final byte[] endKey, final boolean split, final long regionid) - throws IllegalArgumentException { + public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, + final boolean split, final long regionid) throws IllegalArgumentException { this(tableName, startKey, endKey, split, regionid, DEFAULT_REPLICA_ID); } /** * Construct HRegionInfo with explicit parameters - * * @param tableName the table descriptor - * @param startKey first key in region - * @param endKey end of key range - * @param split true if this region has split and we have daughter regions - * regions that may or may not hold references to this region. - * @param regionid Region id to use. - * @param replicaId the replicaId to use - * @throws IllegalArgumentException + * @param startKey first key in region + * @param endKey end of key range + * @param split true if this region has split and we have daughter regions regions that may or + * may not hold references to this region. + * @param regionid Region id to use. + * @param replicaId the replicaId to use n */ - public HRegionInfo(final TableName tableName, final byte[] startKey, - final byte[] endKey, final boolean split, final long regionid, - final int replicaId) - throws IllegalArgumentException { + public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, + final boolean split, final long regionid, final int replicaId) throws IllegalArgumentException { super(); if (tableName == null) { throw new IllegalArgumentException("TableName cannot be null"); @@ -300,17 +273,14 @@ public class HRegionInfo implements RegionInfo { this.regionName = createRegionName(this.tableName, startKey, regionId, replicaId, true); this.split = split; - this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone(); - this.startKey = startKey == null? - HConstants.EMPTY_START_ROW: startKey.clone(); + this.endKey = endKey == null ? HConstants.EMPTY_END_ROW : endKey.clone(); + this.startKey = startKey == null ? HConstants.EMPTY_START_ROW : startKey.clone(); this.tableName = tableName; setHashCode(); } /** - * Costruct a copy of another HRegionInfo - * - * @param other + * Costruct a copy of another HRegionInfo n */ public HRegionInfo(RegionInfo other) { super(); @@ -333,93 +303,82 @@ public class HRegionInfo implements RegionInfo { } /** - * Make a region name of passed parameters. - * @param tableName - * @param startKey Can be null - * @param regionid Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). + * Make a region name of passed parameters. n * @param startKey Can be null + * @param regionid Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format (such that it contains its + * encoded name?). * @return Region name made of passed tableName, startKey and id - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#createRegionName(TableName, byte[], long, boolean)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#createRegionName(TableName, byte[], long, boolean)}. */ @Deprecated @InterfaceAudience.Private - public static byte [] createRegionName(final TableName tableName, - final byte [] startKey, final long regionid, boolean newFormat) { + public static byte[] createRegionName(final TableName tableName, final byte[] startKey, + final long regionid, boolean newFormat) { return RegionInfo.createRegionName(tableName, startKey, Long.toString(regionid), newFormat); } /** - * Make a region name of passed parameters. - * @param tableName - * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). + * Make a region name of passed parameters. n * @param startKey Can be null + * @param id Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format (such that it contains its + * encoded name?). * @return Region name made of passed tableName, startKey and id - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#createRegionName(TableName, byte[], String, boolean)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#createRegionName(TableName, byte[], String, boolean)}. */ @Deprecated @InterfaceAudience.Private - public static byte [] createRegionName(final TableName tableName, - final byte [] startKey, final String id, boolean newFormat) { + public static byte[] createRegionName(final TableName tableName, final byte[] startKey, + final String id, boolean newFormat) { return RegionInfo.createRegionName(tableName, startKey, Bytes.toBytes(id), newFormat); } /** - * Make a region name of passed parameters. - * @param tableName - * @param startKey Can be null - * @param regionid Region id (Usually timestamp from when region was created). - * @param replicaId - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). + * Make a region name of passed parameters. n * @param startKey Can be null + * @param regionid Region id (Usually timestamp from when region was created). n * @param + * newFormat should we create the region name in the new format (such that it + * contains its encoded name?). * @return Region name made of passed tableName, startKey, id and replicaId - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#createRegionName(TableName, byte[], long, int, boolean)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#createRegionName(TableName, byte[], long, int, boolean)}. */ @Deprecated @InterfaceAudience.Private - public static byte [] createRegionName(final TableName tableName, - final byte [] startKey, final long regionid, int replicaId, boolean newFormat) { + public static byte[] createRegionName(final TableName tableName, final byte[] startKey, + final long regionid, int replicaId, boolean newFormat) { return RegionInfo.createRegionName(tableName, startKey, Bytes.toBytes(Long.toString(regionid)), - replicaId, newFormat); + replicaId, newFormat); } /** - * Make a region name of passed parameters. - * @param tableName - * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). + * Make a region name of passed parameters. n * @param startKey Can be null + * @param id Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format (such that it contains its + * encoded name?). * @return Region name made of passed tableName, startKey and id - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#createRegionName(TableName, byte[], byte[], boolean)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#createRegionName(TableName, byte[], byte[], boolean)}. */ @Deprecated @InterfaceAudience.Private - public static byte [] createRegionName(final TableName tableName, - final byte [] startKey, final byte [] id, boolean newFormat) { + public static byte[] createRegionName(final TableName tableName, final byte[] startKey, + final byte[] id, boolean newFormat) { return RegionInfo.createRegionName(tableName, startKey, id, DEFAULT_REPLICA_ID, newFormat); } + /** - * Make a region name of passed parameters. - * @param tableName - * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). - * @param replicaId - * @param newFormat should we create the region name in the new format + * Make a region name of passed parameters. n * @param startKey Can be null + * @param id Region id (Usually timestamp from when region was created). n * @param newFormat + * should we create the region name in the new format * @return Region name made of passed tableName, startKey, id and replicaId - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#createRegionName(TableName, byte[], byte[], int, boolean)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#createRegionName(TableName, byte[], byte[], int, boolean)}. */ @Deprecated @InterfaceAudience.Private - public static byte [] createRegionName(final TableName tableName, - final byte [] startKey, final byte [] id, final int replicaId, boolean newFormat) { + public static byte[] createRegionName(final TableName tableName, final byte[] startKey, + final byte[] id, final int replicaId, boolean newFormat) { return RegionInfo.createRegionName(tableName, startKey, id, replicaId, newFormat); } @@ -427,20 +386,18 @@ public class HRegionInfo implements RegionInfo { * Gets the table name from the specified region name. * @param regionName to extract the table name from * @return Table name - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#getTable(byte[])}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#getTable(byte[])}. */ @Deprecated - public static TableName getTable(final byte [] regionName) { + public static TableName getTable(final byte[] regionName) { return RegionInfo.getTable(regionName); } /** - * Gets the start key from the specified region name. - * @param regionName - * @return Start key. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#getStartKey(byte[])}. + * Gets the start key from the specified region name. n * @return Start key. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#getStartKey(byte[])}. */ @Deprecated public static byte[] getStartKey(final byte[] regionName) throws IOException { @@ -448,26 +405,20 @@ public class HRegionInfo implements RegionInfo { } /** - * Separate elements of a regionName. - * @param regionName - * @return Array of byte[] containing tableName, startKey and id - * @throws IOException - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#parseRegionName(byte[])}. + * Separate elements of a regionName. n * @return Array of byte[] containing tableName, startKey + * and id n * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#parseRegionName(byte[])}. */ @Deprecated @InterfaceAudience.Private - public static byte [][] parseRegionName(final byte [] regionName) throws IOException { + public static byte[][] parseRegionName(final byte[] regionName) throws IOException { return RegionInfo.parseRegionName(regionName); } /** - * - * @param regionName - * @return if region name is encoded. - * @throws IOException - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#isEncodedRegionName(byte[])}. + * n * @return if region name is encoded. n * @deprecated As of release 2.0.0, this will be + * removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#isEncodedRegionName(byte[])}. */ @Deprecated public static boolean isEncodedRegionName(byte[] regionName) throws IOException { @@ -476,7 +427,7 @@ public class HRegionInfo implements RegionInfo { /** @return the regionId */ @Override - public long getRegionId(){ + public long getRegionId() { return regionId; } @@ -485,7 +436,7 @@ public class HRegionInfo implements RegionInfo { * @see #getRegionNameAsString() */ @Override - public byte [] getRegionName(){ + public byte[] getRegionName() { return regionName; } @@ -517,7 +468,7 @@ public class HRegionInfo implements RegionInfo { } @Override - public synchronized byte [] getEncodedNameAsBytes() { + public synchronized byte[] getEncodedNameAsBytes() { if (this.encodedNameAsBytes == null) { this.encodedNameAsBytes = Bytes.toBytes(getEncodedName()); } @@ -528,7 +479,7 @@ public class HRegionInfo implements RegionInfo { * @return the startKey */ @Override - public byte [] getStartKey(){ + public byte[] getStartKey() { return startKey; } @@ -536,18 +487,17 @@ public class HRegionInfo implements RegionInfo { * @return the endKey */ @Override - public byte [] getEndKey(){ + public byte[] getEndKey() { return endKey; } /** - * Get current table name of the region - * @return TableName + * Get current table name of the region n */ @Override public TableName getTable() { // This method name should be getTableName but there was already a method getTableName - // that returned a byte array. It is unfortunate given everywhere else, getTableName returns + // that returned a byte array. It is unfortunate given everywhere else, getTableName returns // a TableName instance. if (tableName == null || tableName.getName().length == 0) { tableName = getTable(getRegionName()); @@ -556,24 +506,21 @@ public class HRegionInfo implements RegionInfo { } /** - * Returns true if the given inclusive range of rows is fully contained - * by this region. For example, if the region is foo,a,g and this is - * passed ["b","c"] or ["a","c"] it will return true, but if this is passed - * ["b","z"] it will return false. + * Returns true if the given inclusive range of rows is fully contained by this region. For + * example, if the region is foo,a,g and this is passed ["b","c"] or ["a","c"] it will return + * true, but if this is passed ["b","z"] it will return false. * @throws IllegalArgumentException if the range passed is invalid (ie. end < start) */ @Override public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { if (Bytes.compareTo(rangeStartKey, rangeEndKey) > 0) { - throw new IllegalArgumentException( - "Invalid range: " + Bytes.toStringBinary(rangeStartKey) + - " > " + Bytes.toStringBinary(rangeEndKey)); + throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(rangeStartKey) + + " > " + Bytes.toStringBinary(rangeEndKey)); } boolean firstKeyInRange = Bytes.compareTo(rangeStartKey, startKey) >= 0; boolean lastKeyInRange = - Bytes.compareTo(rangeEndKey, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY); + Bytes.compareTo(rangeEndKey, endKey) < 0 || Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY); return firstKeyInRange && lastKeyInRange; } @@ -582,9 +529,8 @@ public class HRegionInfo implements RegionInfo { */ @Override public boolean containsRow(byte[] row) { - return Bytes.compareTo(row, startKey) >= 0 && - (Bytes.compareTo(row, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); + return Bytes.compareTo(row, startKey) >= 0 + && (Bytes.compareTo(row, endKey) < 0 || Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); } /** @@ -599,7 +545,7 @@ public class HRegionInfo implements RegionInfo { */ @Override public boolean isMetaRegion() { - return tableName.equals(HRegionInfo.FIRST_META_REGIONINFO.getTable()); + return tableName.equals(HRegionInfo.FIRST_META_REGIONINFO.getTable()); } /** @@ -633,8 +579,8 @@ public class HRegionInfo implements RegionInfo { } /** - * The parent of a region split is offline while split daughters hold - * references to the parent. Offlined regions are closed. + * The parent of a region split is offline while split daughters hold references to the parent. + * Offlined regions are closed. * @param offLine Set online/offline status. */ public void setOffline(boolean offLine) { @@ -667,14 +613,11 @@ public class HRegionInfo implements RegionInfo { */ @Override public String toString() { - return "{ENCODED => " + getEncodedName() + ", " + - HConstants.NAME + " => '" + Bytes.toStringBinary(this.regionName) - + "', STARTKEY => '" + - Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + - Bytes.toStringBinary(this.endKey) + "'" + - (isOffline()? ", OFFLINE => true": "") + - (isSplit()? ", SPLIT => true": "") + - ((replicaId > 0)? ", REPLICA_ID => " + replicaId : "") + "}"; + return "{ENCODED => " + getEncodedName() + ", " + HConstants.NAME + " => '" + + Bytes.toStringBinary(this.regionName) + "', STARTKEY => '" + + Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + Bytes.toStringBinary(this.endKey) + + "'" + (isOffline() ? ", OFFLINE => true" : "") + (isSplit() ? ", SPLIT => true" : "") + + ((replicaId > 0) ? ", REPLICA_ID => " + replicaId : "") + "}"; } /** @@ -691,7 +634,7 @@ public class HRegionInfo implements RegionInfo { if (!(o instanceof HRegionInfo)) { return false; } - return this.compareTo((HRegionInfo)o) == 0; + return this.compareTo((HRegionInfo) o) == 0; } /** @@ -704,17 +647,15 @@ public class HRegionInfo implements RegionInfo { /** * @return Comparator to use comparing {@link KeyValue}s. - * @deprecated Use Region#getCellComparator(). deprecated for hbase 2.0, remove for hbase 3.0 + * @deprecated Use Region#getCellComparator(). deprecated for hbase 2.0, remove for hbase 3.0 */ @Deprecated public KVComparator getComparator() { - return isMetaRegion()? - KeyValue.META_COMPARATOR: KeyValue.COMPARATOR; + return isMetaRegion() ? KeyValue.META_COMPARATOR : KeyValue.COMPARATOR; } /** * Convert a HRegionInfo to the protobuf RegionInfo - * * @return the converted RegionInfo */ HBaseProtos.RegionInfo convert() { @@ -723,12 +664,11 @@ public class HRegionInfo implements RegionInfo { /** * Convert a HRegionInfo to a RegionInfo - * * @param info the HRegionInfo to convert * @return the converted RegionInfo - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use toRegionInfo(org.apache.hadoop.hbase.client.RegionInfo) - * in org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * toRegionInfo(org.apache.hadoop.hbase.client.RegionInfo) in + * org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil. */ @Deprecated @InterfaceAudience.Private @@ -738,12 +678,11 @@ public class HRegionInfo implements RegionInfo { /** * Convert a RegionInfo to a HRegionInfo - * * @param proto the RegionInfo to convert * @return the converted HRegionInfo - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use toRegionInfo(HBaseProtos.RegionInfo) - * in org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * toRegionInfo(HBaseProtos.RegionInfo) in + * org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil. */ @Deprecated @InterfaceAudience.Private @@ -753,17 +692,12 @@ public class HRegionInfo implements RegionInfo { // RegionInfo into HRegionInfo which is what is wanted here. HRegionInfo hri; if (ri.isMetaRegion()) { - hri = ri.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID ? - HRegionInfo.FIRST_META_REGIONINFO : - new HRegionInfo(ri.getRegionId(), ri.getTable(), ri.getReplicaId()); + hri = ri.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID + ? HRegionInfo.FIRST_META_REGIONINFO + : new HRegionInfo(ri.getRegionId(), ri.getTable(), ri.getReplicaId()); } else { - hri = new HRegionInfo( - ri.getTable(), - ri.getStartKey(), - ri.getEndKey(), - ri.isSplit(), - ri.getRegionId(), - ri.getReplicaId()); + hri = new HRegionInfo(ri.getTable(), ri.getStartKey(), ri.getEndKey(), ri.isSplit(), + ri.getRegionId(), ri.getReplicaId()); if (proto.hasOffline()) { hri.setOffline(proto.getOffline()); } @@ -774,36 +708,36 @@ public class HRegionInfo implements RegionInfo { /** * @return This instance serialized as protobuf w/ a magic pb prefix. * @see #parseFrom(byte[]) - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#toByteArray(RegionInfo)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#toByteArray(RegionInfo)}. */ @Deprecated - public byte [] toByteArray() { + public byte[] toByteArray() { return RegionInfo.toByteArray(this); } /** - * @return A deserialized {@link HRegionInfo} - * or null if we failed deserialize or passed bytes null + * @return A deserialized {@link HRegionInfo} or null if we failed deserialize or passed bytes + * null * @see #toByteArray() - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#parseFromOrNull(byte[])}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#parseFromOrNull(byte[])}. */ @Deprecated - public static HRegionInfo parseFromOrNull(final byte [] bytes) { + public static HRegionInfo parseFromOrNull(final byte[] bytes) { if (bytes == null) return null; return parseFromOrNull(bytes, 0, bytes.length); } /** - * @return A deserialized {@link HRegionInfo} or null - * if we failed deserialize or passed bytes null + * @return A deserialized {@link HRegionInfo} or null if we failed deserialize or passed bytes + * null * @see #toByteArray() - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#parseFromOrNull(byte[], int, int)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#parseFromOrNull(byte[], int, int)}. */ @Deprecated - public static HRegionInfo parseFromOrNull(final byte [] bytes, int offset, int len) { + public static HRegionInfo parseFromOrNull(final byte[] bytes, int offset, int len) { if (bytes == null || len <= 0) return null; try { return parseFrom(bytes, offset, len); @@ -814,30 +748,26 @@ public class HRegionInfo implements RegionInfo { /** * @param bytes A pb RegionInfo serialized with a pb magic prefix. - * @return A deserialized {@link HRegionInfo} - * @throws DeserializationException - * @see #toByteArray() - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#parseFrom(byte[])}. + * @return A deserialized {@link HRegionInfo} n * @see #toByteArray() + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#parseFrom(byte[])}. */ - public static HRegionInfo parseFrom(final byte [] bytes) throws DeserializationException { + public static HRegionInfo parseFrom(final byte[] bytes) throws DeserializationException { if (bytes == null) return null; return parseFrom(bytes, 0, bytes.length); } /** - * @param bytes A pb RegionInfo serialized with a pb magic prefix. + * @param bytes A pb RegionInfo serialized with a pb magic prefix. * @param offset starting point in the byte array - * @param len length to read on the byte array - * @return A deserialized {@link HRegionInfo} - * @throws DeserializationException - * @see #toByteArray() - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#parseFrom(byte[], int, int)}. + * @param len length to read on the byte array + * @return A deserialized {@link HRegionInfo} n * @see #toByteArray() + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#parseFrom(byte[], int, int)}. */ @Deprecated - public static HRegionInfo parseFrom(final byte [] bytes, int offset, int len) - throws DeserializationException { + public static HRegionInfo parseFrom(final byte[] bytes, int offset, int len) + throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(bytes, offset, len)) { int pblen = ProtobufUtil.lengthOfPBMagic(); try { @@ -854,44 +784,37 @@ public class HRegionInfo implements RegionInfo { } /** - * Use this instead of {@link #toByteArray()} when writing to a stream and you want to use - * the pb mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what you want). - * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. - * @throws IOException - * @see #toByteArray() - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#toDelimitedByteArray(RegionInfo)}. + * Use this instead of {@link #toByteArray()} when writing to a stream and you want to use the pb + * mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what you want). + * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. n * @see + * #toByteArray() + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#toDelimitedByteArray(RegionInfo)}. */ @Deprecated - public byte [] toDelimitedByteArray() throws IOException { + public byte[] toDelimitedByteArray() throws IOException { return RegionInfo.toDelimitedByteArray(this); } /** - * Get the descriptive name as {@link RegionState} does it but with hidden - * startkey optionally - * @param state - * @param conf - * @return descriptive string - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use RegionInfoDisplay#getDescriptiveNameFromRegionStateForDisplay(RegionState, Configuration) - * over in hbase-server module. + * Get the descriptive name as {@link RegionState} does it but with hidden startkey optionally nn + * * @return descriptive string + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * RegionInfoDisplay#getDescriptiveNameFromRegionStateForDisplay(RegionState, + * Configuration) over in hbase-server module. */ @Deprecated @InterfaceAudience.Private public static String getDescriptiveNameFromRegionStateForDisplay(RegionState state, - Configuration conf) { + Configuration conf) { return RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(state, conf); } /** - * Get the end key for display. Optionally hide the real end key. - * @param hri - * @param conf - * @return the endkey - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use RegionInfoDisplay#getEndKeyForDisplay(RegionInfo, Configuration) - * over in hbase-server module. + * Get the end key for display. Optionally hide the real end key. nn * @return the endkey + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * RegionInfoDisplay#getEndKeyForDisplay(RegionInfo, Configuration) over in + * hbase-server module. */ @Deprecated @InterfaceAudience.Private @@ -900,13 +823,10 @@ public class HRegionInfo implements RegionInfo { } /** - * Get the start key for display. Optionally hide the real start key. - * @param hri - * @param conf - * @return the startkey - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use RegionInfoDisplay#getStartKeyForDisplay(RegionInfo, Configuration) - * over in hbase-server module. + * Get the start key for display. Optionally hide the real start key. nn * @return the startkey + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * RegionInfoDisplay#getStartKeyForDisplay(RegionInfo, Configuration) over in + * hbase-server module. */ @Deprecated @InterfaceAudience.Private @@ -915,13 +835,11 @@ public class HRegionInfo implements RegionInfo { } /** - * Get the region name for display. Optionally hide the start key. - * @param hri - * @param conf - * @return region name as String - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use RegionInfoDisplay#getRegionNameAsStringForDisplay(RegionInfo, Configuration) - * over in hbase-server module. + * Get the region name for display. Optionally hide the start key. nn * @return region name as + * String + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * RegionInfoDisplay#getRegionNameAsStringForDisplay(RegionInfo, Configuration) over + * in hbase-server module. */ @Deprecated @InterfaceAudience.Private @@ -930,13 +848,10 @@ public class HRegionInfo implements RegionInfo { } /** - * Get the region name for display. Optionally hide the start key. - * @param hri - * @param conf - * @return region name bytes - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use RegionInfoDisplay#getRegionNameForDisplay(RegionInfo, Configuration) - * over in hbase-server module. + * Get the region name for display. Optionally hide the start key. nn * @return region name bytes + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * RegionInfoDisplay#getRegionNameForDisplay(RegionInfo, Configuration) over in + * hbase-server module. */ @Deprecated @InterfaceAudience.Private @@ -945,13 +860,10 @@ public class HRegionInfo implements RegionInfo { } /** - * Parses an HRegionInfo instance from the passed in stream. Presumes the HRegionInfo was - * serialized to the stream with {@link #toDelimitedByteArray()} - * @param in - * @return An instance of HRegionInfo. - * @throws IOException - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#parseFrom(DataInputStream)}. + * Parses an HRegionInfo instance from the passed in stream. Presumes the HRegionInfo was + * serialized to the stream with {@link #toDelimitedByteArray()} n * @return An instance of + * HRegionInfo. n * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#parseFrom(DataInputStream)}. */ @Deprecated @InterfaceAudience.Private @@ -959,12 +871,12 @@ public class HRegionInfo implements RegionInfo { // I need to be able to move back in the stream if this is not a pb serialization so I can // do the Writable decoding instead. int pblen = ProtobufUtil.lengthOfPBMagic(); - byte [] pbuf = new byte[pblen]; - if (in.markSupported()) { //read it with mark() + byte[] pbuf = new byte[pblen]; + if (in.markSupported()) { // read it with mark() in.mark(pblen); } - //assumption: if Writable serialization, it should be longer than pblen. + // assumption: if Writable serialization, it should be longer than pblen. in.readFully(pbuf, 0, pblen); if (ProtobufUtil.isPBMagicPrefix(pbuf)) { return convert(HBaseProtos.RegionInfo.parseDelimitedFrom(in)); @@ -976,14 +888,13 @@ public class HRegionInfo implements RegionInfo { /** * Serializes given HRegionInfo's as a byte array. Use this instead of {@link #toByteArray()} when * writing to a stream and you want to use the pb mergeDelimitedFrom (w/o the delimiter, pb reads - * to EOF which may not be what you want). {@link #parseDelimitedFrom(byte[], int, int)} can - * be used to read back the instances. + * to EOF which may not be what you want). {@link #parseDelimitedFrom(byte[], int, int)} can be + * used to read back the instances. * @param infos HRegionInfo objects to serialize - * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. - * @throws IOException - * @see #toByteArray() - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#toDelimitedByteArray(RegionInfo...)}. + * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. n * @see + * #toByteArray() + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#toDelimitedByteArray(RegionInfo...)}. */ @Deprecated @InterfaceAudience.Private @@ -994,16 +905,16 @@ public class HRegionInfo implements RegionInfo { /** * Parses all the HRegionInfo instances from the passed in stream until EOF. Presumes the * HRegionInfo's were serialized to the stream with {@link #toDelimitedByteArray()} - * @param bytes serialized bytes + * @param bytes serialized bytes * @param offset the start offset into the byte[] buffer * @param length how far we should read into the byte[] buffer * @return All the hregioninfos that are in the byte array. Keeps reading till we hit the end. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#parseDelimitedFrom(byte[], int, int)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#parseDelimitedFrom(byte[], int, int)}. */ @Deprecated public static List parseDelimitedFrom(final byte[] bytes, final int offset, - final int length) throws IOException { + final int length) throws IOException { if (bytes == null) { throw new IllegalArgumentException("Can't build an object with empty bytes array"); } @@ -1022,12 +933,9 @@ public class HRegionInfo implements RegionInfo { } /** - * Check whether two regions are adjacent - * @param regionA - * @param regionB - * @return true if two regions are adjacent - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#areAdjacent(RegionInfo, RegionInfo)}. + * Check whether two regions are adjacent nn * @return true if two regions are adjacent + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#areAdjacent(RegionInfo, RegionInfo)}. */ @Deprecated public static boolean areAdjacent(HRegionInfo regionA, HRegionInfo regionB) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java index fd679bd0cbc..6ae93bb3954 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,17 +23,13 @@ import org.apache.hadoop.hbase.util.Addressing; import org.apache.yetus.audience.InterfaceAudience; /** - * Data structure to hold RegionInfo and the address for the hosting - * HRegionServer. Immutable. Comparable, but we compare the 'location' only: - * i.e. the hostname and port, and *not* the regioninfo. This means two - * instances are the same if they refer to the same 'location' (the same - * hostname and port), though they may be carrying different regions. - * - * On a big cluster, each client will have thousands of instances of this object, often - * 100 000 of them if not million. It's important to keep the object size as small - * as possible. - * - *
    This interface has been marked InterfaceAudience.Public in 0.96 and 0.98. + * Data structure to hold RegionInfo and the address for the hosting HRegionServer. Immutable. + * Comparable, but we compare the 'location' only: i.e. the hostname and port, and *not* the + * regioninfo. This means two instances are the same if they refer to the same 'location' (the same + * hostname and port), though they may be carrying different regions. On a big cluster, each client + * will have thousands of instances of this object, often 100 000 of them if not million. It's + * important to keep the object size as small as possible.
    + * This interface has been marked InterfaceAudience.Public in 0.96 and 0.98. */ @InterfaceAudience.Public public class HRegionLocation implements Comparable { @@ -58,7 +53,7 @@ public class HRegionLocation implements Comparable { @Override public String toString() { return "region=" + (this.regionInfo == null ? "null" : this.regionInfo.getRegionNameAsString()) - + ", hostname=" + this.serverName + ", seqNum=" + seqNum; + + ", hostname=" + this.serverName + ", seqNum=" + seqNum; } /** @@ -75,7 +70,7 @@ public class HRegionLocation implements Comparable { if (!(o instanceof HRegionLocation)) { return false; } - return this.compareTo((HRegionLocation)o) == 0; + return this.compareTo((HRegionLocation) o) == 0; } /** @@ -87,19 +82,18 @@ public class HRegionLocation implements Comparable { } /** - * * @return Immutable HRegionInfo * @deprecated Since 2.0.0. Will remove in 3.0.0. Use {@link #getRegion()}} instead. */ @Deprecated - public HRegionInfo getRegionInfo(){ + public HRegionInfo getRegionInfo() { return regionInfo == null ? null : new ImmutableHRegionInfo(regionInfo); } /** - * @return regionInfo + * n */ - public RegionInfo getRegion(){ + public RegionInfo getRegion() { return regionInfo; } @@ -116,8 +110,8 @@ public class HRegionLocation implements Comparable { } /** - * @return String made of hostname and port formatted as - * per {@link Addressing#createHostAndPortStr(String, int)} + * @return String made of hostname and port formatted as per + * {@link Addressing#createHostAndPortStr(String, int)} */ public String getHostnamePort() { return Addressing.createHostAndPortStr(this.getHostname(), this.getPort()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 8f9e77ac648..c48b31c1d1c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,12 +41,12 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** - * HTableDescriptor contains the details about an HBase table such as the descriptors of - * all the column families, is the table a catalog table, hbase:meta , - * if the table is read only, the maximum size of the memstore, - * when the region split should occur, coprocessors associated with it etc... - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link TableDescriptorBuilder} to build {@link HTableDescriptor}. + * HTableDescriptor contains the details about an HBase table such as the descriptors of all the + * column families, is the table a catalog table, hbase:meta , if the table is read + * only, the maximum size of the memstore, when the region split should occur, coprocessors + * associated with it etc... + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link TableDescriptorBuilder} to build {@link HTableDescriptor}. */ @Deprecated @InterfaceAudience.Public @@ -66,26 +65,32 @@ public class HTableDescriptor implements TableDescriptor, ComparableHADOOP-1581 HBASE: (HBASE-174) Un-openable tablename bug + * @see HADOOP-1581 HBASE: (HBASE-174) + * Un-openable tablename bug */ public HTableDescriptor(final TableName name) { this(new ModifyableTableDescriptor(name)); @@ -94,8 +99,8 @@ public class HTableDescriptor implements TableDescriptor, Comparable - * Makes a deep copy of the supplied descriptor. - * Can make a modifiable descriptor from an ImmutableHTableDescriptor. + * Makes a deep copy of the supplied descriptor. Can make a modifiable descriptor from an + * ImmutableHTableDescriptor. * @param desc The descriptor. */ public HTableDescriptor(final HTableDescriptor desc) { @@ -103,8 +108,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable - * Makes a deep copy of the supplied descriptor. - * Can make a modifiable descriptor from an ImmutableHTableDescriptor. + * Makes a deep copy of the supplied descriptor. Can make a modifiable descriptor from an + * ImmutableHTableDescriptor. * @param name Table name. * @param desc The descriptor. */ @@ -130,7 +134,6 @@ public class HTableDescriptor implements TableDescriptor, Comparable hbase:meta - * region. - * - * @return true if this table is hbase:meta - * region + * Checks if this table is hbase:meta region. + * @return true if this table is hbase:meta region */ @Override public boolean isMetaRegion() { @@ -151,7 +151,6 @@ public class HTableDescriptor implements TableDescriptor, Comparablehbase:meta table - * * @return true if table is hbase:meta region. */ @Override @@ -169,8 +168,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable - * This is not an absolute value and might vary. Assume that a single row exceeds - * the maxFileSize then the storeFileSize will be greater than maxFileSize since - * a single row cannot be split across multiple regions + * This is not an absolute value and might vary. Assume that a single row exceeds the maxFileSize + * then the storeFileSize will be greater than maxFileSize since a single row cannot be split + * across multiple regions *

    - * - * @param maxFileSize The maximum file size that a store file can grow to - * before a split is triggered. + * @param maxFileSize The maximum file size that a store file can grow to before a split is + * triggered. */ public HTableDescriptor setMaxFileSize(long maxFileSize) { getDelegateeForModification().setMaxFileSize(maxFileSize); @@ -461,9 +430,7 @@ public class HTableDescriptor implements TableDescriptor, ComparableHBASE-18008 */ @Deprecated public Collection getFamilies() { - return Stream.of(delegatee.getColumnFamilies()) - .map(this::toHColumnDescriptor) - .collect(Collectors.toList()); + return Stream.of(delegatee.getColumnFamilies()).map(this::toHColumnDescriptor) + .collect(Collectors.toList()); } /** @@ -641,8 +601,8 @@ public class HTableDescriptor implements TableDescriptor, ComparableHBASE-18008). - * Use {@link #getColumnFamilyNames()}. + * (HBASE-18008). Use + * {@link #getColumnFamilyNames()}. */ @Deprecated public Set getFamiliesKeys() { @@ -708,7 +664,6 @@ public class HTableDescriptor implements TableDescriptor, Comparable new HColumnDescriptor[size]); + return Stream.of(delegatee.getColumnFamilies()).map(this::toHColumnDescriptor) + .toArray(size -> new HColumnDescriptor[size]); } /** - * Returns the HColumnDescriptor for a specific column family with name as - * specified by the parameter column. + * Returns the HColumnDescriptor for a specific column family with name as specified by the + * parameter column. * @param column Column family name - * @return Column descriptor for the passed family name or the family on - * passed in column. + * @return Column descriptor for the passed family name or the family on passed in column. * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #getColumnFamily(byte[])} - * instead. + * instead. * @see #getColumnFamily(byte[]) * @see HBASE-18008 */ @@ -749,16 +700,13 @@ public class HTableDescriptor implements TableDescriptor, Comparable kvs) - throws IOException { - getDelegateeForModification().setCoprocessor( - CoprocessorDescriptorBuilder.newBuilder(className) - .setJarPath(jarFilePath == null ? null : jarFilePath.toString()) - .setPriority(priority) - .setProperties(kvs == null ? Collections.emptyMap() : kvs) - .build()); + public HTableDescriptor addCoprocessor(String className, Path jarFilePath, int priority, + final Map kvs) throws IOException { + getDelegateeForModification().setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className) + .setJarPath(jarFilePath == null ? null : jarFilePath.toString()).setPriority(priority) + .setProperties(kvs == null ? Collections.emptyMap() : kvs).build()); return this; } /** - * Add a table coprocessor to this table. The coprocessor - * type must be org.apache.hadoop.hbase.coprocessor.RegionCoprocessor. - * It won't check if the class can be loaded or not. - * Whether a coprocessor is loadable or not will be determined when - * a region is opened. + * Add a table coprocessor to this table. The coprocessor type must be + * org.apache.hadoop.hbase.coprocessor.RegionCoprocessor. It won't check if the class can be + * loaded or not. Whether a coprocessor is loadable or not will be determined when a region is + * opened. * @param specStr The Coprocessor specification all in in one String formatted so matches - * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN} - * @throws IOException + * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN} n */ public HTableDescriptor addCoprocessorWithSpec(final String specStr) throws IOException { getDelegateeForModification().setCoprocessorWithSpec(specStr); @@ -835,7 +773,6 @@ public class HTableDescriptor implements TableDescriptor, Comparablebytes - * @throws DeserializationException - * @throws IOException - * @see #toByteArray() + * @return An instance of {@link HTableDescriptor} made from bytes nn * @see + * #toByteArray() */ - public static HTableDescriptor parseFrom(final byte [] bytes) - throws DeserializationException, IOException { + public static HTableDescriptor parseFrom(final byte[] bytes) + throws DeserializationException, IOException { TableDescriptor desc = TableDescriptorBuilder.parseFrom(bytes); if (desc instanceof ModifyableTableDescriptor) { return new HTableDescriptor((ModifyableTableDescriptor) desc); @@ -932,16 +869,14 @@ public class HTableDescriptor implements TableDescriptor, Comparable getConfiguration() { - return delegatee.getValues().entrySet().stream() - .collect(Collectors.toMap( - e -> Bytes.toString(e.getKey().get(), e.getKey().getOffset(), e.getKey().getLength()), - e -> Bytes.toString(e.getValue().get(), e.getValue().getOffset(), e.getValue().getLength()) - )); + return delegatee.getValues().entrySet().stream().collect(Collectors.toMap( + e -> Bytes.toString(e.getKey().get(), e.getKey().getOffset(), e.getKey().getLength()), + e -> Bytes.toString(e.getValue().get(), e.getValue().getOffset(), e.getValue().getLength()))); } /** * Setter for storing a configuration setting in map. - * @param key Config key. Same as XML config key e.g. hbase.something.or.other. + * @param key Config key. Same as XML config key e.g. hbase.something.or.other. * @param value String value. If null, removes the setting. */ public HTableDescriptor setConfiguration(String key, String value) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java index 63c26e2c393..2a099157bc7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +20,13 @@ package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown if a request is table schema modification is requested but - * made for an invalid family name. + * Thrown if a request is table schema modification is requested but made for an invalid family + * name. */ @InterfaceAudience.Public public class InvalidFamilyOperationException extends DoNotRetryIOException { private static final long serialVersionUID = (1L << 22) - 1L; + /** default constructor */ public InvalidFamilyOperationException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java index dd19fa1c227..2ae80cade98 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,27 +23,25 @@ import org.apache.yetus.audience.InterfaceAudience; * Ways to keep cells marked for delete around. */ /* - * Don't change the TRUE/FALSE labels below, these have to be called - * this way for backwards compatibility. + * Don't change the TRUE/FALSE labels below, these have to be called this way for backwards + * compatibility. */ @InterfaceAudience.Public public enum KeepDeletedCells { /** Deleted Cells are not retained. */ FALSE, /** - * Deleted Cells are retained until they are removed by other means - * such TTL or VERSIONS. - * If no TTL is specified or no new versions of delete cells are - * written, they are retained forever. + * Deleted Cells are retained until they are removed by other means such TTL or VERSIONS. If no + * TTL is specified or no new versions of delete cells are written, they are retained forever. */ TRUE, /** - * Deleted Cells are retained until the delete marker expires due to TTL. - * This is useful when TTL is combined with MIN_VERSIONS and one - * wants to keep a minimum number of versions around but at the same - * time remove deleted cells after the TTL. + * Deleted Cells are retained until the delete marker expires due to TTL. This is useful when TTL + * is combined with MIN_VERSIONS and one wants to keep a minimum number of versions around but at + * the same time remove deleted cells after the TTL. */ TTL; + public static KeepDeletedCells getValue(String val) { return valueOf(val.toUpperCase()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java index 35cdecba9bb..86e394e3340 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,6 +25,7 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Public public class MasterNotRunningException extends HBaseIOException { private static final long serialVersionUID = (1L << 23) - 1L; + /** default constructor */ public MasterNotRunningException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java index 099ea405459..b913ac0506c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,16 +30,15 @@ public enum MemoryCompactionPolicy { NONE, /** * Basic policy applies optimizations which modify the index to a more compacted representation. - * This is beneficial in all access patterns. The smaller the cells are the greater the - * benefit of this policy. - * This is the default policy. + * This is beneficial in all access patterns. The smaller the cells are the greater the benefit of + * this policy. This is the default policy. */ BASIC, /** - * In addition to compacting the index representation as the basic policy, eager policy - * eliminates duplication while the data is still in memory (much like the - * on-disk compaction does after the data is flushed to disk). This policy is most useful for - * applications with high data churn or small working sets. + * In addition to compacting the index representation as the basic policy, eager policy eliminates + * duplication while the data is still in memory (much like the on-disk compaction does after the + * data is flushed to disk). This policy is most useful for applications with high data churn or + * small working sets. */ EAGER, /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index c55086c7fbe..223d5a1ac58 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -72,11 +72,12 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.ExceptionUtil; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; -import org.apache.hbase.thirdparty.com.google.common.base.Throwables; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.base.Throwables; + /** *

    * Read/write operations on hbase:meta region as well as assignment information stored @@ -174,8 +175,8 @@ public class MetaTableAccessor { static final char META_REPLICA_ID_DELIMITER = '_'; /** A regex for parsing server columns from meta. See above javadoc for meta layout */ - private static final Pattern SERVER_COLUMN_PATTERN - = Pattern.compile("^server(_[0-9a-fA-F]{4})?$"); + private static final Pattern SERVER_COLUMN_PATTERN = + Pattern.compile("^server(_[0-9a-fA-F]{4})?$"); //////////////////////// // Reading operations // @@ -184,10 +185,10 @@ public class MetaTableAccessor { /** * Performs a full scan of hbase:meta for regions. * @param connection connection we're using - * @param visitor Visitor invoked against each row in regions family. + * @param visitor Visitor invoked against each row in regions family. */ public static void fullScanRegions(Connection connection, final Visitor visitor) - throws IOException { + throws IOException { scanMeta(connection, null, null, QueryType.REGION, visitor); } @@ -202,17 +203,17 @@ public class MetaTableAccessor { /** * Performs a full scan of hbase:meta for tables. * @param connection connection we're using - * @param visitor Visitor invoked against each row in tables family. + * @param visitor Visitor invoked against each row in tables family. */ public static void fullScanTables(Connection connection, final Visitor visitor) - throws IOException { + throws IOException { scanMeta(connection, null, null, QueryType.TABLE, visitor); } /** * Performs a full scan of hbase:meta. * @param connection connection we're using - * @param type scanned part of meta + * @param type scanned part of meta * @return List of {@link Result} */ private static List fullScan(Connection connection, QueryType type) throws IOException { @@ -257,12 +258,10 @@ public class MetaTableAccessor { * @deprecated use {@link #getRegionLocation(Connection, byte[])} instead */ @Deprecated - public static Pair getRegion(Connection connection, byte [] regionName) + public static Pair getRegion(Connection connection, byte[] regionName) throws IOException { HRegionLocation location = getRegionLocation(connection, regionName); - return location == null - ? null - : new Pair<>(location.getRegionInfo(), location.getServerName()); + return location == null ? null : new Pair<>(location.getRegionInfo(), location.getServerName()); } /** @@ -272,7 +271,7 @@ public class MetaTableAccessor { * @return HRegionLocation for the given region */ public static HRegionLocation getRegionLocation(Connection connection, byte[] regionName) - throws IOException { + throws IOException { byte[] row = regionName; RegionInfo parsedInfo = null; try { @@ -287,7 +286,8 @@ public class MetaTableAccessor { get.addFamily(HConstants.CATALOG_FAMILY); Result r = get(getMetaHTable(connection), get); RegionLocations locations = getRegionLocations(r); - return locations == null ? null + return locations == null + ? null : locations.getRegionLocation(parsedInfo == null ? 0 : parsedInfo.getReplicaId()); } @@ -298,16 +298,16 @@ public class MetaTableAccessor { * @return HRegionLocation for the given region */ public static HRegionLocation getRegionLocation(Connection connection, RegionInfo regionInfo) - throws IOException { - return getRegionLocation(getCatalogFamilyRow(connection, regionInfo), - regionInfo, regionInfo.getReplicaId()); + throws IOException { + return getRegionLocation(getCatalogFamilyRow(connection, regionInfo), regionInfo, + regionInfo.getReplicaId()); } /** * @return Return the {@link HConstants#CATALOG_FAMILY} row from hbase:meta. */ public static Result getCatalogFamilyRow(Connection connection, RegionInfo ri) - throws IOException { + throws IOException { Get get = new Get(getMetaKeyForRegion(ri)); get.addFamily(HConstants.CATALOG_FAMILY); return get(getMetaHTable(connection), get); @@ -318,17 +318,17 @@ public class MetaTableAccessor { return RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo).getRegionName(); } - /** Returns an HRI parsed from this regionName. Not all the fields of the HRI - * is stored in the name, so the returned object should only be used for the fields - * in the regionName. + /** + * Returns an HRI parsed from this regionName. Not all the fields of the HRI is stored in the + * name, so the returned object should only be used for the fields in the regionName. */ // This should be moved to RegionInfo? TODO. public static RegionInfo parseRegionInfoFromRegionName(byte[] regionName) throws IOException { byte[][] fields = RegionInfo.parseRegionName(regionName); long regionId = Long.parseLong(Bytes.toString(fields[2])); int replicaId = fields.length > 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0; - return RegionInfoBuilder.newBuilder(TableName.valueOf(fields[0])) - .setStartKey(fields[1]).setRegionId(regionId).setReplicaId(replicaId).build(); + return RegionInfoBuilder.newBuilder(TableName.valueOf(fields[0])).setStartKey(fields[1]) + .setRegionId(regionId).setReplicaId(replicaId).build(); } /** @@ -337,41 +337,40 @@ public class MetaTableAccessor { * @param regionName region we're looking for * @return result of the specified region */ - public static Result getRegionResult(Connection connection, - byte[] regionName) throws IOException { + public static Result getRegionResult(Connection connection, byte[] regionName) + throws IOException { Get get = new Get(regionName); get.addFamily(HConstants.CATALOG_FAMILY); return get(getMetaHTable(connection), get); } /** - * Scans META table for a row whose key contains the specified regionEncodedName, - * returning a single related Result instance if any row is found, null otherwise. - * - * @param connection the connection to query META table. + * Scans META table for a row whose key contains the specified regionEncodedName, returning + * a single related Result instance if any row is found, null otherwise. + * @param connection the connection to query META table. * @param regionEncodedName the region encoded name to look for at META. * @return Result instance with the row related info in META, null otherwise. * @throws IOException if any errors occur while querying META. */ - public static Result scanByRegionEncodedName(Connection connection, - String regionEncodedName) throws IOException { - RowFilter rowFilter = new RowFilter(CompareOperator.EQUAL, - new SubstringComparator(regionEncodedName)); + public static Result scanByRegionEncodedName(Connection connection, String regionEncodedName) + throws IOException { + RowFilter rowFilter = + new RowFilter(CompareOperator.EQUAL, new SubstringComparator(regionEncodedName)); Scan scan = getMetaScan(connection.getConfiguration(), 1); scan.setFilter(rowFilter); try (Table table = getMetaHTable(connection); - ResultScanner resultScanner = table.getScanner(scan)) { + ResultScanner resultScanner = table.getScanner(scan)) { return resultScanner.next(); } } /** - * @return Return all regioninfos listed in the 'info:merge*' columns of - * the regionName row. + * @return Return all regioninfos listed in the 'info:merge*' columns of the + * regionName row. */ @Nullable public static List getMergeRegions(Connection connection, byte[] regionName) - throws IOException { + throws IOException { return getMergeRegions(getRegionResult(connection, regionName).rawCells()); } @@ -387,12 +386,12 @@ public class MetaTableAccessor { * match the regex 'info:merge.*' in array of cells. */ @Nullable - public static Map getMergeRegionsWithName(Cell [] cells) { + public static Map getMergeRegionsWithName(Cell[] cells) { if (cells == null) { return null; } Map regionsToMerge = null; - for (Cell cell: cells) { + for (Cell cell : cells) { if (!isMergeQualifierPrefix(cell)) { continue; } @@ -410,21 +409,21 @@ public class MetaTableAccessor { } /** - * @return Deserialized regioninfo values taken from column values that match - * the regex 'info:merge.*' in array of cells. + * @return Deserialized regioninfo values taken from column values that match the regex + * 'info:merge.*' in array of cells. */ @Nullable - public static List getMergeRegions(Cell [] cells) { + public static List getMergeRegions(Cell[] cells) { Map mergeRegionsWithName = getMergeRegionsWithName(cells); return (mergeRegionsWithName == null) ? null : new ArrayList<>(mergeRegionsWithName.values()); } /** - * @return True if any merge regions present in cells; i.e. - * the column in cell matches the regex 'info:merge.*'. + * @return True if any merge regions present in cells; i.e. the column in + * cell matches the regex 'info:merge.*'. */ - public static boolean hasMergeRegions(Cell [] cells) { - for (Cell cell: cells) { + public static boolean hasMergeRegions(Cell[] cells) { + for (Cell cell : cells) { if (!isMergeQualifierPrefix(cell)) { continue; } @@ -438,60 +437,57 @@ public class MetaTableAccessor { */ private static boolean isMergeQualifierPrefix(Cell cell) { // Check to see if has family and that qualifier starts with the merge qualifier 'merge' - return CellUtil.matchingFamily(cell, HConstants.CATALOG_FAMILY) && - PrivateCellUtil.qualifierStartsWith(cell, HConstants.MERGE_QUALIFIER_PREFIX); + return CellUtil.matchingFamily(cell, HConstants.CATALOG_FAMILY) + && PrivateCellUtil.qualifierStartsWith(cell, HConstants.MERGE_QUALIFIER_PREFIX); } /** * Lists all of the regions currently in META. - * - * @param connection to connect with + * @param connection to connect with * @param excludeOfflinedSplitParents False if we are to include offlined/splitparents regions, * true and we'll leave out offlined regions from returned list * @return List of all user-space regions. */ public static List getAllRegions(Connection connection, - boolean excludeOfflinedSplitParents) - throws IOException { + boolean excludeOfflinedSplitParents) throws IOException { List> result; - result = getTableRegionsAndLocations(connection, null, - excludeOfflinedSplitParents); + result = getTableRegionsAndLocations(connection, null, excludeOfflinedSplitParents); return getListOfRegionInfos(result); } /** - * Gets all of the regions of the specified table. Do not use this method - * to get meta table regions, use methods in MetaTableLocator instead. + * Gets all of the regions of the specified table. Do not use this method to get meta table + * regions, use methods in MetaTableLocator instead. * @param connection connection we're using - * @param tableName table we're looking for + * @param tableName table we're looking for * @return Ordered list of {@link RegionInfo}. */ public static List getTableRegions(Connection connection, TableName tableName) - throws IOException { + throws IOException { return getTableRegions(connection, tableName, false); } /** - * Gets all of the regions of the specified table. Do not use this method - * to get meta table regions, use methods in MetaTableLocator instead. - * @param connection connection we're using - * @param tableName table we're looking for - * @param excludeOfflinedSplitParents If true, do not include offlined split - * parents in the return. + * Gets all of the regions of the specified table. Do not use this method to get meta table + * regions, use methods in MetaTableLocator instead. + * @param connection connection we're using + * @param tableName table we're looking for + * @param excludeOfflinedSplitParents If true, do not include offlined split parents in the + * return. * @return Ordered list of {@link RegionInfo}. */ public static List getTableRegions(Connection connection, TableName tableName, - final boolean excludeOfflinedSplitParents) throws IOException { + final boolean excludeOfflinedSplitParents) throws IOException { List> result = getTableRegionsAndLocations(connection, tableName, excludeOfflinedSplitParents); return getListOfRegionInfos(result); } - private static List getListOfRegionInfos( - final List> pairs) { + private static List + getListOfRegionInfos(final List> pairs) { if (pairs == null || pairs.isEmpty()) { return Collections.emptyList(); } @@ -511,16 +507,16 @@ public class MetaTableAccessor { return null; } switch (type) { - case REGION: - byte[] startRow = new byte[tableName.getName().length + 2]; - System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length); - startRow[startRow.length - 2] = HConstants.DELIMITER; - startRow[startRow.length - 1] = HConstants.DELIMITER; - return startRow; - case ALL: - case TABLE: - default: - return tableName.getName(); + case REGION: + byte[] startRow = new byte[tableName.getName().length + 2]; + System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length); + startRow[startRow.length - 2] = HConstants.DELIMITER; + startRow[startRow.length - 1] = HConstants.DELIMITER; + return startRow; + case ALL: + case TABLE: + default: + return tableName.getName(); } } @@ -534,30 +530,28 @@ public class MetaTableAccessor { } final byte[] stopRow; switch (type) { - case REGION: - stopRow = new byte[tableName.getName().length + 3]; - System.arraycopy(tableName.getName(), 0, stopRow, 0, tableName.getName().length); - stopRow[stopRow.length - 3] = ' '; - stopRow[stopRow.length - 2] = HConstants.DELIMITER; - stopRow[stopRow.length - 1] = HConstants.DELIMITER; - break; - case ALL: - case TABLE: - default: - stopRow = new byte[tableName.getName().length + 1]; - System.arraycopy(tableName.getName(), 0, stopRow, 0, tableName.getName().length); - stopRow[stopRow.length - 1] = ' '; - break; + case REGION: + stopRow = new byte[tableName.getName().length + 3]; + System.arraycopy(tableName.getName(), 0, stopRow, 0, tableName.getName().length); + stopRow[stopRow.length - 3] = ' '; + stopRow[stopRow.length - 2] = HConstants.DELIMITER; + stopRow[stopRow.length - 1] = HConstants.DELIMITER; + break; + case ALL: + case TABLE: + default: + stopRow = new byte[tableName.getName().length + 1]; + System.arraycopy(tableName.getName(), 0, stopRow, 0, tableName.getName().length); + stopRow[stopRow.length - 1] = ' '; + break; } return stopRow; } /** - * This method creates a Scan object that will only scan catalog rows that - * belong to the specified table. It doesn't specify any columns. - * This is a better alternative to just using a start row and scan until - * it hits a new table since that requires parsing the HRI to get the table - * name. + * This method creates a Scan object that will only scan catalog rows that belong to the specified + * table. It doesn't specify any columns. This is a better alternative to just using a start row + * and scan until it hits a new table since that requires parsing the HRI to get the table name. * @param tableName bytes of table's name * @return configured Scan object */ @@ -591,29 +585,28 @@ public class MetaTableAccessor { /** * Do not use this method to get meta table regions, use methods in MetaTableLocator instead. * @param connection connection we're using - * @param tableName table we're looking for + * @param tableName table we're looking for * @return Return list of regioninfos and server. */ public static List> - getTableRegionsAndLocations(Connection connection, TableName tableName) - throws IOException { + getTableRegionsAndLocations(Connection connection, TableName tableName) throws IOException { return getTableRegionsAndLocations(connection, tableName, true); } /** * Do not use this method to get meta table regions, use methods in MetaTableLocator instead. - * @param connection connection we're using - * @param tableName table to work with, can be null for getting all regions + * @param connection connection we're using + * @param tableName table to work with, can be null for getting all regions * @param excludeOfflinedSplitParents don't return split parents * @return Return list of regioninfos and server addresses. */ // What happens here when 1M regions in hbase:meta? This won't scale? public static List> getTableRegionsAndLocations( - Connection connection, @Nullable final TableName tableName, - final boolean excludeOfflinedSplitParents) throws IOException { + Connection connection, @Nullable final TableName tableName, + final boolean excludeOfflinedSplitParents) throws IOException { if (tableName != null && tableName.equals(TableName.META_TABLE_NAME)) { - throw new IOException("This method can't be used to locate meta regions;" - + " use MetaTableLocator instead"); + throw new IOException( + "This method can't be used to locate meta regions;" + " use MetaTableLocator instead"); } // Make a version of CollectingVisitor that collects RegionInfo and ServerAddress CollectingVisitor> visitor = @@ -645,23 +638,18 @@ public class MetaTableAccessor { } } }; - scanMeta(connection, - getTableStartRowForMeta(tableName, QueryType.REGION), - getTableStopRowForMeta(tableName, QueryType.REGION), - QueryType.REGION, visitor); + scanMeta(connection, getTableStartRowForMeta(tableName, QueryType.REGION), + getTableStopRowForMeta(tableName, QueryType.REGION), QueryType.REGION, visitor); return visitor.getResults(); } /** * @param connection connection we're using * @param serverName server whose regions we're interested in - * @return List of user regions installed on this server (does not include - * catalog regions). - * @throws IOException + * @return List of user regions installed on this server (does not include catalog regions). n */ - public static NavigableMap - getServerUserRegions(Connection connection, final ServerName serverName) - throws IOException { + public static NavigableMap getServerUserRegions(Connection connection, + final ServerName serverName) throws IOException { final NavigableMap hris = new TreeMap<>(); // Fill the above hris map with entries from hbase:meta that have the passed // servername. @@ -684,10 +672,9 @@ public class MetaTableAccessor { return hris; } - public static void fullScanMetaAndPrint(Connection connection) - throws IOException { + public static void fullScanMetaAndPrint(Connection connection) throws IOException { Visitor v = r -> { - if (r == null || r.isEmpty()) { + if (r == null || r.isEmpty()) { return true; } LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r); @@ -711,18 +698,18 @@ public class MetaTableAccessor { } public static void scanMetaForTableRegions(Connection connection, Visitor visitor, - TableName tableName) throws IOException { + TableName tableName) throws IOException { scanMeta(connection, tableName, QueryType.REGION, Integer.MAX_VALUE, visitor); } private static void scanMeta(Connection connection, TableName table, QueryType type, int maxRows, - final Visitor visitor) throws IOException { + final Visitor visitor) throws IOException { scanMeta(connection, getTableStartRowForMeta(table, type), getTableStopRowForMeta(table, type), type, maxRows, visitor); } private static void scanMeta(Connection connection, @Nullable final byte[] startRow, - @Nullable final byte[] stopRow, QueryType type, final Visitor visitor) throws IOException { + @Nullable final byte[] stopRow, QueryType type, final Visitor visitor) throws IOException { scanMeta(connection, startRow, stopRow, type, Integer.MAX_VALUE, visitor); } @@ -735,7 +722,7 @@ public class MetaTableAccessor { * @param rowLimit max number of rows to return */ public static void scanMeta(Connection connection, final Visitor visitor, - final TableName tableName, final byte[] row, final int rowLimit) throws IOException { + final TableName tableName, final byte[] row, final int rowLimit) throws IOException { byte[] startRow = null; byte[] stopRow = null; if (tableName != null) { @@ -753,23 +740,21 @@ public class MetaTableAccessor { /** * Performs a scan of META table. * @param connection connection we're using - * @param startRow Where to start the scan. Pass null if want to begin scan - * at first row. - * @param stopRow Where to stop the scan. Pass null if want to scan all rows - * from the start one - * @param type scanned part of meta - * @param maxRows maximum rows to return - * @param visitor Visitor invoked against each row. + * @param startRow Where to start the scan. Pass null if want to begin scan at first row. + * @param stopRow Where to stop the scan. Pass null if want to scan all rows from the start one + * @param type scanned part of meta + * @param maxRows maximum rows to return + * @param visitor Visitor invoked against each row. */ static void scanMeta(Connection connection, @Nullable final byte[] startRow, - @Nullable final byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) - throws IOException { + @Nullable final byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) + throws IOException { scanMeta(connection, startRow, stopRow, type, null, maxRows, visitor); } private static void scanMeta(Connection connection, @Nullable final byte[] startRow, - @Nullable final byte[] stopRow, QueryType type, @Nullable Filter filter, int maxRows, - final Visitor visitor) throws IOException { + @Nullable final byte[] stopRow, QueryType type, @Nullable Filter filter, int maxRows, + final Visitor visitor) throws IOException { int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE; Scan scan = getMetaScan(connection.getConfiguration(), rowUpperLimit); @@ -787,9 +772,9 @@ public class MetaTableAccessor { } if (LOG.isTraceEnabled()) { - LOG.trace("Scanning META" + " starting at row=" + Bytes.toStringBinary(startRow) + - " stopping at row=" + Bytes.toStringBinary(stopRow) + " for max=" + rowUpperLimit + - " with caching=" + scan.getCaching()); + LOG.trace("Scanning META" + " starting at row=" + Bytes.toStringBinary(startRow) + + " stopping at row=" + Bytes.toStringBinary(stopRow) + " for max=" + rowUpperLimit + + " with caching=" + scan.getCaching()); } int currentRow = 0; @@ -819,7 +804,7 @@ public class MetaTableAccessor { */ @NonNull private static RegionInfo getClosestRegionInfo(Connection connection, - @NonNull final TableName tableName, @NonNull final byte[] row) throws IOException { + @NonNull final TableName tableName, @NonNull final byte[] row) throws IOException { byte[] searchRow = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false); Scan scan = getMetaScan(connection.getConfiguration(), 1); scan.setReversed(true); @@ -827,13 +812,13 @@ public class MetaTableAccessor { try (ResultScanner resultScanner = getMetaHTable(connection).getScanner(scan)) { Result result = resultScanner.next(); if (result == null) { - throw new TableNotFoundException("Cannot find row in META " + - " for table: " + tableName + ", row=" + Bytes.toStringBinary(row)); + throw new TableNotFoundException("Cannot find row in META " + " for table: " + tableName + + ", row=" + Bytes.toStringBinary(row)); } RegionInfo regionInfo = getRegionInfo(result); if (regionInfo == null) { - throw new IOException("RegionInfo was null or empty in Meta for " + - tableName + ", row=" + Bytes.toStringBinary(row)); + throw new IOException("RegionInfo was null or empty in Meta for " + tableName + ", row=" + + Bytes.toStringBinary(row)); } return regionInfo; } @@ -885,9 +870,10 @@ public class MetaTableAccessor { * @return a byte[] for state qualifier */ public static byte[] getRegionStateColumn(int replicaId) { - return replicaId == 0 ? HConstants.STATE_QUALIFIER - : Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 + ? HConstants.STATE_QUALIFIER + : Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -896,9 +882,10 @@ public class MetaTableAccessor { * @return a byte[] for sn column qualifier */ public static byte[] getServerNameColumn(int replicaId) { - return replicaId == 0 ? HConstants.SERVERNAME_QUALIFIER - : Bytes.toBytes(HConstants.SERVERNAME_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 + ? HConstants.SERVERNAME_QUALIFIER + : Bytes.toBytes(HConstants.SERVERNAME_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -910,7 +897,7 @@ public class MetaTableAccessor { return replicaId == 0 ? HConstants.SERVER_QUALIFIER : Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -922,7 +909,7 @@ public class MetaTableAccessor { return replicaId == 0 ? HConstants.STARTCODE_QUALIFIER : Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -934,12 +921,12 @@ public class MetaTableAccessor { return replicaId == 0 ? HConstants.SEQNUM_QUALIFIER : Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** - * Parses the replicaId from the server column qualifier. See top of the class javadoc - * for the actual meta layout + * Parses the replicaId from the server column qualifier. See top of the class javadoc for the + * actual meta layout * @param serverColumn the column qualifier * @return an int for the replicaId */ @@ -969,14 +956,14 @@ public class MetaTableAccessor { byte[] serverColumn = getServerColumn(replicaId); Cell cell = r.getColumnLatestCell(getCatalogFamily(), serverColumn); if (cell == null || cell.getValueLength() == 0) return null; - String hostAndPort = Bytes.toString( - cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + String hostAndPort = + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); byte[] startcodeColumn = getStartCodeColumn(replicaId); cell = r.getColumnLatestCell(getCatalogFamily(), startcodeColumn); if (cell == null || cell.getValueLength() == 0) return null; try { return ServerName.valueOf(hostAndPort, - Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); } catch (IllegalArgumentException e) { LOG.error("Ignoring invalid region for server " + hostAndPort + "; cell=" + cell, e); return null; @@ -987,15 +974,14 @@ public class MetaTableAccessor { * Returns the {@link ServerName} from catalog table {@link Result} where the region is * transitioning on. It should be the same as {@link MetaTableAccessor#getServerName(Result,int)} * if the server is at OPEN state. - * * @param r Result to pull the transitioning server name from - * @return A ServerName instance or {@link MetaTableAccessor#getServerName(Result,int)} - * if necessary fields not found or empty. + * @return A ServerName instance or {@link MetaTableAccessor#getServerName(Result,int)} if + * necessary fields not found or empty. */ @Nullable public static ServerName getTargetServerName(final Result r, final int replicaId) { - final Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, - getServerNameColumn(replicaId)); + final Cell cell = + r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getServerNameColumn(replicaId)); if (cell == null || cell.getValueLength() == 0) { RegionLocations locations = MetaTableAccessor.getRegionLocations(r); if (locations != null) { @@ -1006,13 +992,13 @@ public class MetaTableAccessor { } return null; } - return ServerName.parseServerName(Bytes.toString(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength())); + return ServerName.parseServerName( + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); } /** - * The latest seqnum that the server writing to meta observed when opening the region. - * E.g. the seqNum when the result of {@link #getServerName(Result, int)} was written. + * The latest seqnum that the server writing to meta observed when opening the region. E.g. the + * seqNum when the result of {@link #getServerName(Result, int)} was written. * @param r Result to pull the seqNum from * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written. */ @@ -1023,8 +1009,7 @@ public class MetaTableAccessor { } /** - * Returns the daughter regions by reading the corresponding columns of the catalog table - * Result. + * Returns the daughter regions by reading the corresponding columns of the catalog table Result. * @param data a Result object from the catalog table scan * @return pair of RegionInfo or PairOfSameType(null, null) if region is not a split parent */ @@ -1036,8 +1021,8 @@ public class MetaTableAccessor { /** * Returns an HRegionLocationList extracted from the result. - * @return an HRegionLocationList containing all locations for the region range or null if - * we can't deserialize the result. + * @return an HRegionLocationList containing all locations for the region range or null if we + * can't deserialize the result. */ @Nullable public static RegionLocations getRegionLocations(final Result r) { @@ -1046,7 +1031,7 @@ public class MetaTableAccessor { if (regionInfo == null) return null; List locations = new ArrayList<>(1); - NavigableMap> familyMap = r.getNoVersionMap(); + NavigableMap> familyMap = r.getNoVersionMap(); locations.add(getRegionLocation(r, regionInfo, 0)); @@ -1080,16 +1065,15 @@ public class MetaTableAccessor { } /** - * Returns the HRegionLocation parsed from the given meta row Result - * for the given regionInfo and replicaId. The regionInfo can be the default region info - * for the replica. - * @param r the meta row result + * Returns the HRegionLocation parsed from the given meta row Result for the given regionInfo and + * replicaId. The regionInfo can be the default region info for the replica. + * @param r the meta row result * @param regionInfo RegionInfo for default replica - * @param replicaId the replicaId for the HRegionLocation + * @param replicaId the replicaId for the HRegionLocation * @return HRegionLocation parsed from the given meta row Result for the given replicaId */ private static HRegionLocation getRegionLocation(final Result r, final RegionInfo regionInfo, - final int replicaId) { + final int replicaId) { ServerName serverName = getServerName(r, replicaId); long seqNum = getSeqNumDuringOpen(r, replicaId); RegionInfo replicaInfo = RegionReplicaUtil.getRegionInfoForReplica(regionInfo, replicaId); @@ -1098,8 +1082,7 @@ public class MetaTableAccessor { /** * Returns RegionInfo object from the column - * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog - * table Result. + * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog table Result. * @param data a Result object from the catalog table scan * @return RegionInfo or null */ @@ -1110,26 +1093,25 @@ public class MetaTableAccessor { /** * Returns the RegionInfo object from the column {@link HConstants#CATALOG_FAMILY} and * qualifier of the catalog table result. - * @param r a Result object from the catalog table scan + * @param r a Result object from the catalog table scan * @param qualifier Column family qualifier * @return An RegionInfo instance or null. */ @Nullable - public static RegionInfo getRegionInfo(final Result r, byte [] qualifier) { + public static RegionInfo getRegionInfo(final Result r, byte[] qualifier) { Cell cell = r.getColumnLatestCell(getCatalogFamily(), qualifier); if (cell == null) return null; - return RegionInfo.parseFromOrNull(cell.getValueArray(), - cell.getValueOffset(), cell.getValueLength()); + return RegionInfo.parseFromOrNull(cell.getValueArray(), cell.getValueOffset(), + cell.getValueLength()); } /** * Fetch table state for given table from META table - * @param conn connection to use + * @param conn connection to use * @param tableName table to fetch state for */ @Nullable - public static TableState getTableState(Connection conn, TableName tableName) - throws IOException { + public static TableState getTableState(Connection conn, TableName tableName) throws IOException { if (tableName.equals(TableName.META_TABLE_NAME)) { return new TableState(tableName, TableState.State.ENABLED); } @@ -1144,8 +1126,7 @@ public class MetaTableAccessor { * @param conn connection to use * @return map {tableName -> state} */ - public static Map getTableStates(Connection conn) - throws IOException { + public static Map getTableStates(Connection conn) throws IOException { final Map states = new LinkedHashMap<>(); Visitor collector = r -> { TableState state = getTableState(r); @@ -1159,19 +1140,17 @@ public class MetaTableAccessor { } /** - * Updates state in META - * Do not use. For internal use only. - * @param conn connection to use + * Updates state in META Do not use. For internal use only. + * @param conn connection to use * @param tableName table to look for */ - public static void updateTableState(Connection conn, TableName tableName, - TableState.State actual) throws IOException { + public static void updateTableState(Connection conn, TableName tableName, TableState.State actual) + throws IOException { updateTableState(conn, new TableState(tableName, actual)); } /** - * Decode table state from META Result. - * Should contain cell from HConstants.TABLE_FAMILY + * Decode table state from META Result. Should contain cell from HConstants.TABLE_FAMILY * @return null if not found */ @Nullable @@ -1196,8 +1175,7 @@ public class MetaTableAccessor { /** * Visit the catalog table row. * @param r A row from catalog table - * @return True if we are to proceed scanning the table, else false if - * we are to stop now. + * @return True if we are to proceed scanning the table, else false if we are to stop now. */ boolean visit(final Result r) throws IOException; } @@ -1213,6 +1191,7 @@ public class MetaTableAccessor { */ static abstract class CollectingVisitor implements Visitor { final List results = new ArrayList<>(); + @Override public boolean visit(Result r) throws IOException { if (r != null && !r.isEmpty()) { @@ -1224,8 +1203,7 @@ public class MetaTableAccessor { abstract void add(Result r); /** - * @return Collected results; wait till visits complete to collect all - * possible results + * @return Collected results; wait till visits complete to collect all possible results */ List getResults() { return this.results; @@ -1260,7 +1238,7 @@ public class MetaTableAccessor { return true; } - //skip over offline and split regions + // skip over offline and split regions if (!(info.isOffline() || info.isSplit())) { return visitInternal(rowResult); } @@ -1269,10 +1247,10 @@ public class MetaTableAccessor { } /** - * A Visitor for a table. Provides a consistent view of the table's - * hbase:meta entries during concurrent splits (see HBASE-5986 for details). This class - * does not guarantee ordered traversal of meta entries, and can block until the - * hbase:meta entries for daughters are available during splits. + * A Visitor for a table. Provides a consistent view of the table's hbase:meta entries during + * concurrent splits (see HBASE-5986 for details). This class does not guarantee ordered traversal + * of meta entries, and can block until the hbase:meta entries for daughters are available during + * splits. */ public static abstract class TableVisitorBase extends DefaultVisitorBase { private TableName tableName; @@ -1321,26 +1299,18 @@ public class MetaTableAccessor { * Adds split daughters to the Put */ private static Put addDaughtersToPut(Put put, RegionInfo splitA, RegionInfo splitB) - throws IOException { + throws IOException { if (splitA != null) { - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(HConstants.SPLITA_QUALIFIER) - .setTimestamp(put.getTimestamp()) - .setType(Type.Put) - .setValue(RegionInfo.toByteArray(splitA)) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.SPLITA_QUALIFIER) + .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(splitA)) + .build()); } if (splitB != null) { - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(HConstants.SPLITB_QUALIFIER) - .setTimestamp(put.getTimestamp()) - .setType(Type.Put) - .setValue(RegionInfo.toByteArray(splitB)) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.SPLITB_QUALIFIER) + .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(splitB)) + .build()); } return put; } @@ -1348,7 +1318,7 @@ public class MetaTableAccessor { /** * Put the passed p to the hbase:meta table. * @param connection connection we're using - * @param p Put to add to hbase:meta + * @param p Put to add to hbase:meta */ private static void putToMetaTable(Connection connection, Put p) throws IOException { try (Table table = getMetaHTable(connection)) { @@ -1368,10 +1338,10 @@ public class MetaTableAccessor { /** * Put the passed ps to the hbase:meta table. * @param connection connection we're using - * @param ps Put to add to hbase:meta + * @param ps Put to add to hbase:meta */ public static void putsToMetaTable(final Connection connection, final List ps) - throws IOException { + throws IOException { if (ps.isEmpty()) { return; } @@ -1389,10 +1359,10 @@ public class MetaTableAccessor { /** * Delete the passed d from the hbase:meta table. * @param connection connection we're using - * @param d Delete to add to hbase:meta + * @param d Delete to add to hbase:meta */ private static void deleteFromMetaTable(final Connection connection, final Delete d) - throws IOException { + throws IOException { List dels = new ArrayList<>(1); dels.add(d); deleteFromMetaTable(connection, dels); @@ -1401,10 +1371,10 @@ public class MetaTableAccessor { /** * Delete the passed deletes from the hbase:meta table. * @param connection connection we're using - * @param deletes Deletes to add to hbase:meta This list should support #remove. + * @param deletes Deletes to add to hbase:meta This list should support #remove. */ private static void deleteFromMetaTable(final Connection connection, final List deletes) - throws IOException { + throws IOException { try (Table t = getMetaHTable(connection)) { debugLogMutations(deletes); t.delete(deletes); @@ -1412,14 +1382,10 @@ public class MetaTableAccessor { } private static Put addRegionStateToPut(Put put, RegionState.State state) throws IOException { - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(getRegionStateColumn()) - .setTimestamp(put.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes(state.name())) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(getRegionStateColumn()) + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put).setValue(Bytes.toBytes(state.name())) + .build()); return put; } @@ -1427,26 +1393,26 @@ public class MetaTableAccessor { * Update state column in hbase:meta. */ public static void updateRegionState(Connection connection, RegionInfo ri, - RegionState.State state) throws IOException { + RegionState.State state) throws IOException { Put put = new Put(RegionReplicaUtil.getRegionInfoForDefaultReplica(ri).getRegionName()); MetaTableAccessor.putsToMetaTable(connection, - Collections.singletonList(addRegionStateToPut(put, state))); + Collections.singletonList(addRegionStateToPut(put, state))); } /** * Adds daughter region infos to hbase:meta row for the specified region. Note that this does not * add its daughter's as different rows, but adds information about the daughters in the same row * as the parent. Use - * {@link #splitRegion(Connection, RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)} - * if you want to do that. + * {@link #splitRegion(Connection, RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)} if + * you want to do that. * @param connection connection we're using * @param regionInfo RegionInfo of parent region - * @param splitA first split daughter of the parent regionInfo - * @param splitB second split daughter of the parent regionInfo + * @param splitA first split daughter of the parent regionInfo + * @param splitB second split daughter of the parent regionInfo * @throws IOException if problem connecting or updating meta */ public static void addSplitsToParent(Connection connection, RegionInfo regionInfo, - RegionInfo splitA, RegionInfo splitB) throws IOException { + RegionInfo splitA, RegionInfo splitB) throws IOException { try (Table meta = getMetaHTable(connection)) { Put put = makePutFromRegionInfo(regionInfo, EnvironmentEdgeManager.currentTime()); addDaughtersToPut(put, splitA, splitB); @@ -1458,42 +1424,42 @@ public class MetaTableAccessor { /** * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this - * does not add its daughter's as different rows, but adds information about the daughters - * in the same row as the parent. Use - * {@link #splitRegion(Connection, RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)} - * if you want to do that. + * does not add its daughter's as different rows, but adds information about the daughters in the + * same row as the parent. Use + * {@link #splitRegion(Connection, RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)} if + * you want to do that. * @param connection connection we're using * @param regionInfo region information * @throws IOException if problem connecting or updating meta */ public static void addRegionToMeta(Connection connection, RegionInfo regionInfo) - throws IOException { + throws IOException { addRegionsToMeta(connection, Collections.singletonList(regionInfo), 1); } /** - * Adds a hbase:meta row for each of the specified new regions. Initial state for new regions - * is CLOSED. - * @param connection connection we're using + * Adds a hbase:meta row for each of the specified new regions. Initial state for new regions is + * CLOSED. + * @param connection connection we're using * @param regionInfos region information list * @throws IOException if problem connecting or updating meta */ public static void addRegionsToMeta(Connection connection, List regionInfos, - int regionReplication) throws IOException { + int regionReplication) throws IOException { addRegionsToMeta(connection, regionInfos, regionReplication, EnvironmentEdgeManager.currentTime()); } /** - * Adds a hbase:meta row for each of the specified new regions. Initial state for new regions - * is CLOSED. - * @param connection connection we're using + * Adds a hbase:meta row for each of the specified new regions. Initial state for new regions is + * CLOSED. + * @param connection connection we're using * @param regionInfos region information list - * @param ts desired timestamp + * @param ts desired timestamp * @throws IOException if problem connecting or updating meta */ private static void addRegionsToMeta(Connection connection, List regionInfos, - int regionReplication, long ts) throws IOException { + int regionReplication, long ts) throws IOException { List puts = new ArrayList<>(); for (RegionInfo regionInfo : regionInfos) { if (RegionReplicaUtil.isDefaultReplica(regionInfo)) { @@ -1517,41 +1483,36 @@ public class MetaTableAccessor { int max = mergeRegions.size(); if (max > limit) { // Should never happen!!!!! But just in case. - throw new RuntimeException("Can't merge " + max + " regions in one go; " + limit + - " is upper-limit."); + throw new RuntimeException( + "Can't merge " + max + " regions in one go; " + limit + " is upper-limit."); } int counter = 0; - for (RegionInfo ri: mergeRegions) { + for (RegionInfo ri : mergeRegions) { String qualifier = String.format(HConstants.MERGE_QUALIFIER_PREFIX_STR + "%04d", counter++); - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY). - setRow(put.getRow()). - setFamily(HConstants.CATALOG_FAMILY). - setQualifier(Bytes.toBytes(qualifier)). - setTimestamp(put.getTimestamp()). - setType(Type.Put). - setValue(RegionInfo.toByteArray(ri)). - build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(Bytes.toBytes(qualifier)) + .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(ri)) + .build()); } return put; } /** - * Merge regions into one in an atomic operation. Deletes the merging regions in - * hbase:meta and adds the merged region. - * @param connection connection we're using + * Merge regions into one in an atomic operation. Deletes the merging regions in hbase:meta and + * adds the merged region. + * @param connection connection we're using * @param mergedRegion the merged region - * @param parentSeqNum Parent regions to merge and their next open sequence id used - * by serial replication. Set to -1 if not needed by this table. - * @param sn the location of the region + * @param parentSeqNum Parent regions to merge and their next open sequence id used by serial + * replication. Set to -1 if not needed by this table. + * @param sn the location of the region */ public static void mergeRegions(Connection connection, RegionInfo mergedRegion, - Map parentSeqNum, ServerName sn, int regionReplication) - throws IOException { + Map parentSeqNum, ServerName sn, int regionReplication) throws IOException { try (Table meta = getMetaHTable(connection)) { long time = HConstants.LATEST_TIMESTAMP; List mutations = new ArrayList<>(); List replicationParents = new ArrayList<>(); - for (Map.Entry e: parentSeqNum.entrySet()) { + for (Map.Entry e : parentSeqNum.entrySet()) { RegionInfo ri = e.getKey(); long seqNum = e.getValue(); // Deletes for merging regions @@ -1597,23 +1558,21 @@ public class MetaTableAccessor { * Splits the region into two in an atomic operation. Offlines the parent region with the * information that it is split into two, and also adds the daughter regions. Does not add the * location information to the daughter regions since they are not open yet. - * @param connection connection we're using - * @param parent the parent region which is split + * @param connection connection we're using + * @param parent the parent region which is split * @param parentOpenSeqNum the next open sequence id for parent region, used by serial - * replication. -1 if not necessary. - * @param splitA Split daughter region A - * @param splitB Split daughter region B - * @param sn the location of the region + * replication. -1 if not necessary. + * @param splitA Split daughter region A + * @param splitB Split daughter region B + * @param sn the location of the region */ public static void splitRegion(Connection connection, RegionInfo parent, long parentOpenSeqNum, - RegionInfo splitA, RegionInfo splitB, ServerName sn, int regionReplication) - throws IOException { + RegionInfo splitA, RegionInfo splitB, ServerName sn, int regionReplication) throws IOException { try (Table meta = getMetaHTable(connection)) { long time = EnvironmentEdgeManager.currentTime(); // Put for parent - Put putParent = makePutFromRegionInfo(RegionInfoBuilder.newBuilder(parent) - .setOffline(true) - .setSplit(true).build(), time); + Put putParent = makePutFromRegionInfo( + RegionInfoBuilder.newBuilder(parent).setOffline(true).setSplit(true).build(), time); addDaughtersToPut(putParent, splitA, splitB); // Puts for daughters @@ -1650,7 +1609,7 @@ public class MetaTableAccessor { /** * Update state of the table in meta. * @param connection what we use for update - * @param state new state + * @param state new state */ private static void updateTableState(Connection connection, TableState state) throws IOException { Put put = makePutFromTableState(state, EnvironmentEdgeManager.currentTime()); @@ -1671,10 +1630,9 @@ public class MetaTableAccessor { /** * Remove state for table from meta * @param connection to use for deletion - * @param table to delete state for + * @param table to delete state for */ - public static void deleteTableState(Connection connection, TableName table) - throws IOException { + public static void deleteTableState(Connection connection, TableName table) throws IOException { long time = EnvironmentEdgeManager.currentTime(); Delete delete = new Delete(table.getName()); delete.addColumns(getTableFamily(), getTableStateColumn(), time); @@ -1682,18 +1640,18 @@ public class MetaTableAccessor { LOG.info("Deleted table " + table + " state from META"); } - private static void multiMutate(Table table, byte[] row, - Mutation... mutations) throws IOException { + private static void multiMutate(Table table, byte[] row, Mutation... mutations) + throws IOException { multiMutate(table, row, Arrays.asList(mutations)); } /** - * Performs an atomic multi-mutate operation against the given table. Used by the likes of - * merge and split as these want to make atomic mutations across multiple rows. + * Performs an atomic multi-mutate operation against the given table. Used by the likes of merge + * and split as these want to make atomic mutations across multiple rows. * @throws IOException even if we encounter a RuntimeException, we'll still wrap it in an IOE. */ static void multiMutate(final Table table, byte[] row, final List mutations) - throws IOException { + throws IOException { debugLogMutations(mutations); Batch.Call callable = instance -> { MutateRowsRequest.Builder builder = MutateRowsRequest.newBuilder(); @@ -1738,14 +1696,14 @@ public class MetaTableAccessor { *

    * Uses passed catalog tracker to get a connection to the server hosting hbase:meta and makes * edits to that region. - * @param connection connection we're using - * @param regionInfo region to update location of - * @param openSeqNum the latest sequence number obtained when the region was open - * @param sn Server name + * @param connection connection we're using + * @param regionInfo region to update location of + * @param openSeqNum the latest sequence number obtained when the region was open + * @param sn Server name * @param masterSystemTime wall clock time from master if passed in the open region RPC */ public static void updateRegionLocation(Connection connection, RegionInfo regionInfo, - ServerName sn, long openSeqNum, long masterSystemTime) throws IOException { + ServerName sn, long openSeqNum, long masterSystemTime) throws IOException { updateLocation(connection, regionInfo, sn, openSeqNum, masterSystemTime); } @@ -1754,16 +1712,16 @@ public class MetaTableAccessor { *

    * Connects to the specified server which should be hosting the specified catalog region name to * perform the edit. - * @param connection connection we're using - * @param regionInfo region to update location of - * @param sn Server name - * @param openSeqNum the latest sequence number obtained when the region was open + * @param connection connection we're using + * @param regionInfo region to update location of + * @param sn Server name + * @param openSeqNum the latest sequence number obtained when the region was open * @param masterSystemTime wall clock time from master if passed in the open region RPC * @throws IOException In particular could throw {@link java.net.ConnectException} if the server - * is down on other end. + * is down on other end. */ private static void updateLocation(Connection connection, RegionInfo regionInfo, ServerName sn, - long openSeqNum, long masterSystemTime) throws IOException { + long openSeqNum, long masterSystemTime) throws IOException { // region replicas are kept in the primary region's row Put put = new Put(getMetaKeyForRegion(regionInfo), masterSystemTime); addRegionInfo(put, regionInfo); @@ -1778,7 +1736,7 @@ public class MetaTableAccessor { * @param regionInfo region to be deleted from META */ public static void deleteRegionInfo(Connection connection, RegionInfo regionInfo) - throws IOException { + throws IOException { Delete delete = new Delete(regionInfo.getRegionName()); delete.addFamily(getCatalogFamily(), HConstants.LATEST_TIMESTAMP); deleteFromMetaTable(connection, delete); @@ -1787,22 +1745,21 @@ public class MetaTableAccessor { /** * Deletes the specified regions from META. - * @param connection connection we're using + * @param connection connection we're using * @param regionsInfo list of regions to be deleted from META */ public static void deleteRegionInfos(Connection connection, List regionsInfo) - throws IOException { + throws IOException { deleteRegionInfos(connection, regionsInfo, EnvironmentEdgeManager.currentTime()); } /** * Deletes the specified regions from META. - * @param connection connection we're using + * @param connection connection we're using * @param regionsInfo list of regions to be deleted from META */ private static void deleteRegionInfos(Connection connection, List regionsInfo, - long ts) - throws IOException { + long ts) throws IOException { List deletes = new ArrayList<>(regionsInfo.size()); for (RegionInfo hri : regionsInfo) { Delete e = new Delete(hri.getRegionName()); @@ -1817,11 +1774,11 @@ public class MetaTableAccessor { /** * Overwrites the specified regions from hbase:meta. Deletes old rows for the given regions and * adds new ones. Regions added back have state CLOSED. - * @param connection connection we're using + * @param connection connection we're using * @param regionInfos list of regions to be added to META */ public static void overwriteRegions(Connection connection, List regionInfos, - int regionReplication) throws IOException { + int regionReplication) throws IOException { // use master time for delete marker and the Put long now = EnvironmentEdgeManager.currentTime(); deleteRegionInfos(connection, regionInfos, now); @@ -1838,11 +1795,11 @@ public class MetaTableAccessor { /** * Deletes merge qualifiers for the specified merge region. - * @param connection connection we're using + * @param connection connection we're using * @param mergeRegion the merged region */ public static void deleteMergeQualifiers(Connection connection, final RegionInfo mergeRegion) - throws IOException { + throws IOException { Delete delete = new Delete(mergeRegion.getRegionName()); // NOTE: We are doing a new hbase:meta read here. Cell[] cells = getRegionResult(connection, mergeRegion.getRegionName()).rawCells(); @@ -1863,60 +1820,42 @@ public class MetaTableAccessor { // the previous GCMultipleMergedRegionsProcedure is still going on, in this case, the second // GCMultipleMergedRegionsProcedure could delete the merged region by accident! if (qualifiers.isEmpty()) { - LOG.info("No merged qualifiers for region " + mergeRegion.getRegionNameAsString() + - " in meta table, they are cleaned up already, Skip."); + LOG.info("No merged qualifiers for region " + mergeRegion.getRegionNameAsString() + + " in meta table, they are cleaned up already, Skip."); return; } deleteFromMetaTable(connection, delete); - LOG.info("Deleted merge references in " + mergeRegion.getRegionNameAsString() + - ", deleted qualifiers " + qualifiers.stream().map(Bytes::toStringBinary). - collect(Collectors.joining(", "))); + LOG.info( + "Deleted merge references in " + mergeRegion.getRegionNameAsString() + ", deleted qualifiers " + + qualifiers.stream().map(Bytes::toStringBinary).collect(Collectors.joining(", "))); } - public static Put addRegionInfo(final Put p, final RegionInfo hri) - throws IOException { - p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(p.getRow()) - .setFamily(getCatalogFamily()) - .setQualifier(HConstants.REGIONINFO_QUALIFIER) - .setTimestamp(p.getTimestamp()) - .setType(Type.Put) - // Serialize the Default Replica HRI otherwise scan of hbase:meta - // shows an info:regioninfo value with encoded name and region - // name that differs from that of the hbase;meta row. - .setValue(RegionInfo.toByteArray(RegionReplicaUtil.getRegionInfoForDefaultReplica(hri))) - .build()); + public static Put addRegionInfo(final Put p, final RegionInfo hri) throws IOException { + p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(p.getRow()) + .setFamily(getCatalogFamily()).setQualifier(HConstants.REGIONINFO_QUALIFIER) + .setTimestamp(p.getTimestamp()).setType(Type.Put) + // Serialize the Default Replica HRI otherwise scan of hbase:meta + // shows an info:regioninfo value with encoded name and region + // name that differs from that of the hbase;meta row. + .setValue(RegionInfo.toByteArray(RegionReplicaUtil.getRegionInfoForDefaultReplica(hri))) + .build()); return p; } public static Put addLocation(Put p, ServerName sn, long openSeqNum, int replicaId) - throws IOException { + throws IOException { CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - return p.add(builder.clear() - .setRow(p.getRow()) - .setFamily(getCatalogFamily()) - .setQualifier(getServerColumn(replicaId)) - .setTimestamp(p.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes(sn.getAddress().toString())) - .build()) - .add(builder.clear() - .setRow(p.getRow()) - .setFamily(getCatalogFamily()) - .setQualifier(getStartCodeColumn(replicaId)) - .setTimestamp(p.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes(sn.getStartcode())) - .build()) - .add(builder.clear() - .setRow(p.getRow()) - .setFamily(getCatalogFamily()) - .setQualifier(getSeqNumColumn(replicaId)) - .setTimestamp(p.getTimestamp()) - .setType(Type.Put) - .setValue(Bytes.toBytes(openSeqNum)) - .build()); + return p + .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) + .setQualifier(getServerColumn(replicaId)).setTimestamp(p.getTimestamp()) + .setType(Cell.Type.Put).setValue(Bytes.toBytes(sn.getAddress().toString())).build()) + .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) + .setQualifier(getStartCodeColumn(replicaId)).setTimestamp(p.getTimestamp()) + .setType(Cell.Type.Put).setValue(Bytes.toBytes(sn.getStartcode())).build()) + .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) + .setQualifier(getSeqNumColumn(replicaId)).setTimestamp(p.getTimestamp()).setType(Type.Put) + .setValue(Bytes.toBytes(openSeqNum)).build()); } private static void writeRegionName(ByteArrayOutputStream out, byte[] regionName) { @@ -1969,7 +1908,7 @@ public class MetaTableAccessor { } public static Put makePutForReplicationBarrier(RegionInfo regionInfo, long openSeqNum, long ts) - throws IOException { + throws IOException { Put put = new Put(regionInfo.getRegionName(), ts); addReplicationBarrier(put, openSeqNum); return put; @@ -1979,39 +1918,24 @@ public class MetaTableAccessor { * See class comment on SerialReplicationChecker */ public static void addReplicationBarrier(Put put, long openSeqNum) throws IOException { - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(HConstants.REPLICATION_BARRIER_FAMILY) - .setQualifier(HConstants.SEQNUM_QUALIFIER) - .setTimestamp(put.getTimestamp()) - .setType(Type.Put) - .setValue(Bytes.toBytes(openSeqNum)) + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(HConstants.REPLICATION_BARRIER_FAMILY).setQualifier(HConstants.SEQNUM_QUALIFIER) + .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)) .build()); } public static Put addEmptyLocation(Put p, int replicaId) throws IOException { CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - return p.add(builder.clear() - .setRow(p.getRow()) - .setFamily(getCatalogFamily()) - .setQualifier(getServerColumn(replicaId)) - .setTimestamp(p.getTimestamp()) - .setType(Type.Put) - .build()) - .add(builder.clear() - .setRow(p.getRow()) - .setFamily(getCatalogFamily()) - .setQualifier(getStartCodeColumn(replicaId)) - .setTimestamp(p.getTimestamp()) - .setType(Cell.Type.Put) - .build()) - .add(builder.clear() - .setRow(p.getRow()) - .setFamily(getCatalogFamily()) - .setQualifier(getSeqNumColumn(replicaId)) - .setTimestamp(p.getTimestamp()) - .setType(Cell.Type.Put) - .build()); + return p + .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) + .setQualifier(getServerColumn(replicaId)).setTimestamp(p.getTimestamp()).setType(Type.Put) + .build()) + .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) + .setQualifier(getStartCodeColumn(replicaId)).setTimestamp(p.getTimestamp()) + .setType(Cell.Type.Put).build()) + .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) + .setQualifier(getSeqNumColumn(replicaId)).setTimestamp(p.getTimestamp()) + .setType(Cell.Type.Put).build()); } public static final class ReplicationBarrierResult { @@ -2039,10 +1963,10 @@ public class MetaTableAccessor { @Override public String toString() { - return "ReplicationBarrierResult [barriers=" + Arrays.toString(barriers) + ", state=" + - state + ", parentRegionNames=" + - parentRegionNames.stream().map(Bytes::toStringBinary).collect(Collectors.joining(", ")) + - "]"; + return "ReplicationBarrierResult [barriers=" + Arrays.toString(barriers) + ", state=" + state + + ", parentRegionNames=" + + parentRegionNames.stream().map(Bytes::toStringBinary).collect(Collectors.joining(", ")) + + "]"; } } @@ -2068,7 +1992,7 @@ public class MetaTableAccessor { } public static ReplicationBarrierResult getReplicationBarrierResult(Connection conn, - TableName tableName, byte[] row, byte[] encodedRegionName) throws IOException { + TableName tableName, byte[] row, byte[] encodedRegionName) throws IOException { byte[] metaStartKey = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false); byte[] metaStopKey = RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false); @@ -2086,8 +2010,9 @@ public class MetaTableAccessor { // TODO: we may look up a region which has already been split or merged so we need to check // whether the encoded name matches. Need to find a way to quit earlier when there is no // record for the given region, for now it will scan to the end of the table. - if (!Bytes.equals(encodedRegionName, - Bytes.toBytes(RegionInfo.encodeRegionName(regionName)))) { + if ( + !Bytes.equals(encodedRegionName, Bytes.toBytes(RegionInfo.encodeRegionName(regionName))) + ) { continue; } return getReplicationBarrierResult(result); @@ -2096,7 +2021,7 @@ public class MetaTableAccessor { } public static long[] getReplicationBarrier(Connection conn, byte[] regionName) - throws IOException { + throws IOException { try (Table table = getMetaHTable(conn)) { Result result = table.get(new Get(regionName) .addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER) @@ -2106,7 +2031,7 @@ public class MetaTableAccessor { } public static List> getTableEncodedRegionNameAndLastBarrier(Connection conn, - TableName tableName) throws IOException { + TableName tableName) throws IOException { List> list = new ArrayList<>(); scanMeta(conn, getTableStartRowForMeta(tableName, QueryType.REPLICATION), getTableStopRowForMeta(tableName, QueryType.REPLICATION), QueryType.REPLICATION, r -> { @@ -2124,7 +2049,7 @@ public class MetaTableAccessor { } public static List getTableEncodedRegionNamesForSerialReplication(Connection conn, - TableName tableName) throws IOException { + TableName tableName) throws IOException { List list = new ArrayList<>(); scanMeta(conn, getTableStartRowForMeta(tableName, QueryType.REPLICATION), getTableStopRowForMeta(tableName, QueryType.REPLICATION), QueryType.REPLICATION, @@ -2151,13 +2076,9 @@ public class MetaTableAccessor { } private static Put addSequenceNum(Put p, long openSeqNum, int replicaId) throws IOException { - return p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(p.getRow()) - .setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(getSeqNumColumn(replicaId)) - .setTimestamp(p.getTimestamp()) - .setType(Type.Put) - .setValue(Bytes.toBytes(openSeqNum)) - .build()); + return p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(p.getRow()) + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(getSeqNumColumn(replicaId)) + .setTimestamp(p.getTimestamp()).setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)) + .build()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java index 3e06f4250af..a49575849b0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; /** - * Exception thrown when the result needs to be chunked on the server side. - * It signals that retries should happen right away and not count against the number of - * retries because some of the multi was a success. + * Exception thrown when the result needs to be chunked on the server side. It signals that retries + * should happen right away and not count against the number of retries because some of the multi + * was a success. */ @InterfaceAudience.Public public class MultiActionResultTooLarge extends RetryImmediatelyException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceExistException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceExistException.java index 5263523417e..83e29fd9edc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceExistException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceExistException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceNotFoundException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceNotFoundException.java index 72ff1e61b84..0af01d23bdd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceNotFoundException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceNotFoundException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java index c51fccb5955..a15833ac17a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; @@ -27,6 +25,7 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Public public class NotAllMetaRegionsOnlineException extends DoNotRetryIOException { private static final long serialVersionUID = 6439786157874827523L; + /** * default constructor */ @@ -35,7 +34,7 @@ public class NotAllMetaRegionsOnlineException extends DoNotRetryIOException { } /** - * @param message + * n */ public NotAllMetaRegionsOnlineException(String message) { super(message); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java index 918408778c0..aa138478b4a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java index e887928da82..473947b8f76 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +20,10 @@ package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; /** - * This exception is thrown by the master when a region server was shut down and - * restarted so fast that the master still hasn't processed the server shutdown - * of the first instance, or when master is initializing and client call admin - * operations, or when an operation is performed on a region server that is still starting. + * This exception is thrown by the master when a region server was shut down and restarted so fast + * that the master still hasn't processed the server shutdown of the first instance, or when master + * is initializing and client call admin operations, or when an operation is performed on a region + * server that is still starting. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseRestartMasterException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseRestartMasterException.java index 62f84e9495b..5e60e44243a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseRestartMasterException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseRestartMasterException.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RSGroupTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RSGroupTableAccessor.java index 406c41ee52c..ba1ccfa5d9c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RSGroupTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RSGroupTableAccessor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; @@ -34,14 +32,14 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** - * Read rs group information from hbase:rsgroup. + * Read rs group information from hbase:rsgroup. */ @InterfaceAudience.Private public final class RSGroupTableAccessor { - //Assigned before user tables + // Assigned before user tables private static final TableName RSGROUP_TABLE_NAME = - TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "rsgroup"); + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "rsgroup"); private static final byte[] META_FAMILY_BYTES = Bytes.toBytes("m"); private static final byte[] META_QUALIFIER_BYTES = Bytes.toBytes("i"); @@ -52,8 +50,7 @@ public final class RSGroupTableAccessor { return connection.getAdmin().tableExists(RSGROUP_TABLE_NAME); } - public static List getAllRSGroupInfo(Connection connection) - throws IOException { + public static List getAllRSGroupInfo(Connection connection) throws IOException { try (Table rsGroupTable = connection.getTable(RSGROUP_TABLE_NAME)) { List rsGroupInfos = new ArrayList<>(); for (Result result : rsGroupTable.getScanner(new Scan())) { @@ -71,14 +68,13 @@ public final class RSGroupTableAccessor { if (rsGroupInfo == null) { return null; } - RSGroupProtos.RSGroupInfo proto = - RSGroupProtos.RSGroupInfo.parseFrom(rsGroupInfo); + RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo.parseFrom(rsGroupInfo); return ProtobufUtil.toGroupInfo(proto); } public static RSGroupInfo getRSGroupInfo(Connection connection, byte[] rsGroupName) - throws IOException { - try (Table rsGroupTable = connection.getTable(RSGROUP_TABLE_NAME)){ + throws IOException { + try (Table rsGroupTable = connection.getTable(RSGROUP_TABLE_NAME)) { Result result = rsGroupTable.get(new Get(rsGroupName)); return getRSGroupInfo(result); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionException.java index 8a8d2151aa2..aff9ff8af47 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +20,7 @@ package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when something happens related to region handling. - * Subclasses have to be more specific. + * Thrown when something happens related to region handling. Subclasses have to be more specific. */ @InterfaceAudience.Public public class RegionException extends HBaseIOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java index e05a0e6f109..6184940313d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.List; @@ -28,12 +25,13 @@ import org.apache.hadoop.hbase.util.Strings; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; /** * Encapsulates per-region load metrics. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use {@link RegionMetrics} + * instead. */ @InterfaceAudience.Public @Deprecated @@ -43,7 +41,7 @@ public class RegionLoad implements RegionMetrics { protected ClusterStatusProtos.RegionLoad regionLoadPB; private final RegionMetrics metrics; - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") public RegionLoad(ClusterStatusProtos.RegionLoad regionLoadPB) { this.regionLoadPB = regionLoadPB; this.metrics = RegionMetricsBuilder.toRegionMetrics(regionLoadPB); @@ -56,8 +54,8 @@ public class RegionLoad implements RegionMetrics { /** * @return the region name - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionName} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use {@link #getRegionName} + * instead. */ @Deprecated public byte[] getName() { @@ -151,8 +149,8 @@ public class RegionLoad implements RegionMetrics { /** * @return the number of stores - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getStoreCount} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use {@link #getStoreCount} + * instead. */ @Deprecated public int getStores() { @@ -161,8 +159,8 @@ public class RegionLoad implements RegionMetrics { /** * @return the number of storefiles - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getStoreFileCount} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getStoreFileCount} instead. */ @Deprecated public int getStorefiles() { @@ -171,8 +169,8 @@ public class RegionLoad implements RegionMetrics { /** * @return the total size of the storefiles, in MB - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getStoreFileSize} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getStoreFileSize} instead. */ @Deprecated public int getStorefileSizeMB() { @@ -181,8 +179,8 @@ public class RegionLoad implements RegionMetrics { /** * @return the memstore size, in MB - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getMemStoreSize} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getMemStoreSize} instead. */ @Deprecated public int getMemStoreSizeMB() { @@ -191,8 +189,8 @@ public class RegionLoad implements RegionMetrics { /** * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * ((HBASE-3935)). - * Use {@link #getStoreFileRootLevelIndexSize} instead. + * ((HBASE-3935)). Use + * {@link #getStoreFileRootLevelIndexSize} instead. */ @Deprecated public int getStorefileIndexSizeMB() { @@ -201,8 +199,8 @@ public class RegionLoad implements RegionMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getStoreFileRootLevelIndexSize()} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getStoreFileRootLevelIndexSize()} instead. */ @Deprecated public int getStorefileIndexSizeKB() { @@ -211,8 +209,8 @@ public class RegionLoad implements RegionMetrics { /** * @return the number of requests made to region - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRequestCount()} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRequestCount()} instead. */ @Deprecated public long getRequestsCount() { @@ -221,8 +219,8 @@ public class RegionLoad implements RegionMetrics { /** * @return the number of read requests made to region - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getReadRequestCount} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getReadRequestCount} instead. */ @Deprecated public long getReadRequestsCount() { @@ -231,8 +229,8 @@ public class RegionLoad implements RegionMetrics { /** * @return the number of filtered read requests made to region - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getFilteredReadRequestCount} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getFilteredReadRequestCount} instead. */ @Deprecated public long getFilteredReadRequestsCount() { @@ -241,8 +239,8 @@ public class RegionLoad implements RegionMetrics { /** * @return the number of write requests made to region - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getWriteRequestCount} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getWriteRequestCount} instead. */ @Deprecated public long getWriteRequestsCount() { @@ -251,8 +249,8 @@ public class RegionLoad implements RegionMetrics { /** * @return The current total size of root-level indexes for the region, in KB. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getStoreFileRootLevelIndexSize} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getStoreFileRootLevelIndexSize} instead. */ @Deprecated public int getRootIndexSizeKB() { @@ -261,8 +259,8 @@ public class RegionLoad implements RegionMetrics { /** * @return The total size of all index blocks, not just the root level, in KB. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getStoreFileUncompressedDataIndexSize} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getStoreFileUncompressedDataIndexSize} instead. */ @Deprecated public int getTotalStaticIndexSizeKB() { @@ -270,10 +268,9 @@ public class RegionLoad implements RegionMetrics { } /** - * @return The total size of all Bloom filter blocks, not just loaded into the - * block cache, in KB. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getBloomFilterSize} instead. + * @return The total size of all Bloom filter blocks, not just loaded into the block cache, in KB. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getBloomFilterSize} instead. */ @Deprecated public int getTotalStaticBloomSizeKB() { @@ -282,8 +279,8 @@ public class RegionLoad implements RegionMetrics { /** * @return the total number of kvs in current compaction - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getCompactingCellCount} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getCompactingCellCount} instead. */ @Deprecated public long getTotalCompactingKVs() { @@ -292,8 +289,8 @@ public class RegionLoad implements RegionMetrics { /** * @return the number of already compacted kvs in current compaction - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getCompactedCellCount} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getCompactedCellCount} instead. */ @Deprecated public long getCurrentCompactedKVs() { @@ -303,8 +300,8 @@ public class RegionLoad implements RegionMetrics { /** * This does not really belong inside RegionLoad but its being done in the name of expediency. * @return the completed sequence Id for the region - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getCompletedSequenceId} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getCompletedSequenceId} instead. */ @Deprecated public long getCompleteSequenceId() { @@ -313,23 +310,22 @@ public class RegionLoad implements RegionMetrics { /** * @return completed sequence id per store. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getStoreSequenceId} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getStoreSequenceId} instead. */ @Deprecated public List getStoreCompleteSequenceId() { return metrics.getStoreSequenceId().entrySet().stream() - .map(s -> ClusterStatusProtos.StoreSequenceId.newBuilder() - .setFamilyName(UnsafeByteOperations.unsafeWrap(s.getKey())) - .setSequenceId(s.getValue()) - .build()) - .collect(Collectors.toList()); + .map(s -> ClusterStatusProtos.StoreSequenceId.newBuilder() + .setFamilyName(UnsafeByteOperations.unsafeWrap(s.getKey())).setSequenceId(s.getValue()) + .build()) + .collect(Collectors.toList()); } /** * @return the uncompressed size of the storefiles in MB. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getUncompressedStoreFileSize} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getUncompressedStoreFileSize} instead. */ @Deprecated public int getStoreUncompressedSizeMB() { @@ -351,8 +347,8 @@ public class RegionLoad implements RegionMetrics { /** * @return the timestamp of the oldest hfile for any store of this region. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getLastMajorCompactionTimestamp} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getLastMajorCompactionTimestamp} instead. */ @Deprecated public long getLastMajorCompactionTs() { @@ -401,47 +397,33 @@ public class RegionLoad implements RegionMetrics { */ @Override public String toString() { - StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "numberOfStores", - this.getStores()); + StringBuilder sb = + Strings.appendKeyValue(new StringBuilder(), "numberOfStores", this.getStores()); Strings.appendKeyValue(sb, "numberOfStorefiles", this.getStorefiles()); Strings.appendKeyValue(sb, "storeRefCount", this.getStoreRefCount()); - Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", - this.getStoreUncompressedSizeMB()); - Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp", - this.getLastMajorCompactionTs()); + Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", this.getStoreUncompressedSizeMB()); + Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp", this.getLastMajorCompactionTs()); Strings.appendKeyValue(sb, "storefileSizeMB", this.getStorefileSizeMB()); if (this.getStoreUncompressedSizeMB() != 0) { - Strings.appendKeyValue(sb, "compressionRatio", - String.format("%.4f", (float) this.getStorefileSizeMB() / - (float) this.getStoreUncompressedSizeMB())); + Strings.appendKeyValue(sb, "compressionRatio", String.format("%.4f", + (float) this.getStorefileSizeMB() / (float) this.getStoreUncompressedSizeMB())); } - Strings.appendKeyValue(sb, "memstoreSizeMB", - this.getMemStoreSizeMB()); - Strings.appendKeyValue(sb, "readRequestsCount", - this.getReadRequestsCount()); - Strings.appendKeyValue(sb, "writeRequestsCount", - this.getWriteRequestsCount()); - Strings.appendKeyValue(sb, "rootIndexSizeKB", - this.getRootIndexSizeKB()); - Strings.appendKeyValue(sb, "totalStaticIndexSizeKB", - this.getTotalStaticIndexSizeKB()); - Strings.appendKeyValue(sb, "totalStaticBloomSizeKB", - this.getTotalStaticBloomSizeKB()); - Strings.appendKeyValue(sb, "totalCompactingKVs", - this.getTotalCompactingKVs()); - Strings.appendKeyValue(sb, "currentCompactedKVs", - this.getCurrentCompactedKVs()); + Strings.appendKeyValue(sb, "memstoreSizeMB", this.getMemStoreSizeMB()); + Strings.appendKeyValue(sb, "readRequestsCount", this.getReadRequestsCount()); + Strings.appendKeyValue(sb, "writeRequestsCount", this.getWriteRequestsCount()); + Strings.appendKeyValue(sb, "rootIndexSizeKB", this.getRootIndexSizeKB()); + Strings.appendKeyValue(sb, "totalStaticIndexSizeKB", this.getTotalStaticIndexSizeKB()); + Strings.appendKeyValue(sb, "totalStaticBloomSizeKB", this.getTotalStaticBloomSizeKB()); + Strings.appendKeyValue(sb, "totalCompactingKVs", this.getTotalCompactingKVs()); + Strings.appendKeyValue(sb, "currentCompactedKVs", this.getCurrentCompactedKVs()); float compactionProgressPct = Float.NaN; if (this.getTotalCompactingKVs() > 0) { - compactionProgressPct = ((float) this.getCurrentCompactedKVs() / - (float) this.getTotalCompactingKVs()); + compactionProgressPct = + ((float) this.getCurrentCompactedKVs() / (float) this.getTotalCompactingKVs()); } - Strings.appendKeyValue(sb, "compactionProgressPct", - compactionProgressPct); - Strings.appendKeyValue(sb, "completeSequenceId", - this.getCompleteSequenceId()); - Strings.appendKeyValue(sb, "dataLocality", - this.getDataLocality()); + Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); + Strings.appendKeyValue(sb, "completeSequenceId", this.getCompleteSequenceId()); + Strings.appendKeyValue(sb, "dataLocality", this.getDataLocality()); return sb.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java index 0d3a464e0f8..4d6dd6d43fa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,23 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Arrays; import java.util.Collection; import java.util.Iterator; - import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** - * Container for holding a list of {@link HRegionLocation}'s that correspond to the - * same range. The list is indexed by the replicaId. This is an immutable list, - * however mutation operations are provided which returns a new List via copy-on-write - * (assuming small number of locations) + * Container for holding a list of {@link HRegionLocation}'s that correspond to the same range. The + * list is indexed by the replicaId. This is an immutable list, however mutation operations are + * provided which returns a new List via copy-on-write (assuming small number of locations) */ @InterfaceAudience.Private public class RegionLocations implements Iterable { @@ -45,10 +42,9 @@ public class RegionLocations implements Iterable { private final HRegionLocation[] locations; // replicaId -> HRegionLocation. /** - * Constructs the region location list. The locations array should - * contain all the locations for known replicas for the region, and should be - * sorted in replicaId ascending order, although it can contain nulls indicating replicaIds - * that the locations of which are not known. + * Constructs the region location list. The locations array should contain all the locations for + * known replicas for the region, and should be sorted in replicaId ascending order, although it + * can contain nulls indicating replicaIds that the locations of which are not known. * @param locations an array of HRegionLocations for the same region range */ public RegionLocations(HRegionLocation... locations) { @@ -66,7 +62,7 @@ public class RegionLocations implements Iterable { index++; } // account for the null elements in the array after maxReplicaIdIndex - maxReplicaId = maxReplicaId + (locations.length - (maxReplicaIdIndex + 1) ); + maxReplicaId = maxReplicaId + (locations.length - (maxReplicaIdIndex + 1)); if (maxReplicaId + 1 == locations.length) { this.locations = locations; @@ -79,7 +75,7 @@ public class RegionLocations implements Iterable { } } for (HRegionLocation loc : this.locations) { - if (loc != null && loc.getServerName() != null){ + if (loc != null && loc.getServerName() != null) { numNonNullElements++; } } @@ -91,8 +87,7 @@ public class RegionLocations implements Iterable { } /** - * Returns the size of the list even if some of the elements - * might be null. + * Returns the size of the list even if some of the elements might be null. * @return the size of the list (corresponding to the max replicaId) */ public int size() { @@ -116,18 +111,18 @@ public class RegionLocations implements Iterable { } /** - * Returns a new RegionLocations with the locations removed (set to null) - * which have the destination server as given. + * Returns a new RegionLocations with the locations removed (set to null) which have the + * destination server as given. * @param serverName the serverName to remove locations of - * @return an RegionLocations object with removed locations or the same object - * if nothing is removed + * @return an RegionLocations object with removed locations or the same object if nothing is + * removed */ public RegionLocations removeByServer(ServerName serverName) { HRegionLocation[] newLocations = null; for (int i = 0; i < locations.length; i++) { // check whether something to remove if (locations[i] != null && serverName.equals(locations[i].getServerName())) { - if (newLocations == null) { //first time + if (newLocations == null) { // first time newLocations = new HRegionLocation[locations.length]; System.arraycopy(locations, 0, newLocations, 0, i); } @@ -142,8 +137,8 @@ public class RegionLocations implements Iterable { /** * Removes the given location from the list * @param location the location to remove - * @return an RegionLocations object with removed locations or the same object - * if nothing is removed + * @return an RegionLocations object with removed locations or the same object if nothing is + * removed */ public RegionLocations remove(HRegionLocation location) { if (location == null) return this; @@ -153,9 +148,12 @@ public class RegionLocations implements Iterable { // check whether something to remove. HRL.compareTo() compares ONLY the // serverName. We want to compare the HRI's as well. - if (locations[replicaId] == null - || RegionInfo.COMPARATOR.compare(location.getRegion(), locations[replicaId].getRegion()) != 0 - || !location.equals(locations[replicaId])) { + if ( + locations[replicaId] == null + || RegionInfo.COMPARATOR.compare(location.getRegion(), locations[replicaId].getRegion()) + != 0 + || !location.equals(locations[replicaId]) + ) { return this; } @@ -169,8 +167,8 @@ public class RegionLocations implements Iterable { /** * Removes location of the given replicaId from the list * @param replicaId the replicaId of the location to remove - * @return an RegionLocations object with removed locations or the same object - * if nothing is removed + * @return an RegionLocations object with removed locations or the same object if nothing is + * removed */ public RegionLocations remove(int replicaId) { if (getRegionLocation(replicaId) == null) { @@ -204,13 +202,11 @@ public class RegionLocations implements Iterable { } /** - * Merges this RegionLocations list with the given list assuming - * same range, and keeping the most up to date version of the - * HRegionLocation entries from either list according to seqNum. If seqNums - * are equal, the location from the argument (other) is taken. + * Merges this RegionLocations list with the given list assuming same range, and keeping the most + * up to date version of the HRegionLocation entries from either list according to seqNum. If + * seqNums are equal, the location from the argument (other) is taken. * @param other the locations to merge with - * @return an RegionLocations object with merged locations or the same object - * if nothing is merged + * @return an RegionLocations object with merged locations or the same object if nothing is merged */ public RegionLocations mergeLocations(RegionLocations other) { assert other != null; @@ -231,8 +227,7 @@ public class RegionLocations implements Iterable { regionInfo = otherLoc.getRegion(); } - HRegionLocation selectedLoc = selectRegionLocation(thisLoc, - otherLoc, true, false); + HRegionLocation selectedLoc = selectRegionLocation(thisLoc, otherLoc, true, false); if (selectedLoc != thisLoc) { if (newLocations == null) { @@ -247,10 +242,9 @@ public class RegionLocations implements Iterable { // ensure that all replicas share the same start code. Otherwise delete them if (newLocations != null && regionInfo != null) { - for (int i=0; i < newLocations.length; i++) { + for (int i = 0; i < newLocations.length; i++) { if (newLocations[i] != null) { - if (!RegionReplicaUtil.isReplicasForSameRegion(regionInfo, - newLocations[i].getRegion())) { + if (!RegionReplicaUtil.isReplicasForSameRegion(regionInfo, newLocations[i].getRegion())) { newLocations[i] = null; } } @@ -261,7 +255,7 @@ public class RegionLocations implements Iterable { } private HRegionLocation selectRegionLocation(HRegionLocation oldLocation, - HRegionLocation location, boolean checkForEquals, boolean force) { + HRegionLocation location, boolean checkForEquals, boolean force) { if (location == null) { return oldLocation == null ? null : oldLocation; } @@ -270,44 +264,44 @@ public class RegionLocations implements Iterable { return location; } - if (force - || isGreaterThan(location.getSeqNum(), oldLocation.getSeqNum(), checkForEquals)) { + if (force || isGreaterThan(location.getSeqNum(), oldLocation.getSeqNum(), checkForEquals)) { return location; } return oldLocation; } /** - * Updates the location with new only if the new location has a higher - * seqNum than the old one or force is true. - * @param location the location to add or update - * @param checkForEquals whether to update the location if seqNums for the - * HRegionLocations for the old and new location are the same - * @param force whether to force update - * @return an RegionLocations object with updated locations or the same object - * if nothing is updated + * Updates the location with new only if the new location has a higher seqNum than the old one or + * force is true. + * @param location the location to add or update + * @param checkForEquals whether to update the location if seqNums for the HRegionLocations for + * the old and new location are the same + * @param force whether to force update + * @return an RegionLocations object with updated locations or the same object if nothing is + * updated */ - public RegionLocations updateLocation(HRegionLocation location, - boolean checkForEquals, boolean force) { + public RegionLocations updateLocation(HRegionLocation location, boolean checkForEquals, + boolean force) { assert location != null; int replicaId = location.getRegion().getReplicaId(); HRegionLocation oldLoc = getRegionLocation(location.getRegion().getReplicaId()); - HRegionLocation selectedLoc = selectRegionLocation(oldLoc, location, - checkForEquals, force); + HRegionLocation selectedLoc = selectRegionLocation(oldLoc, location, checkForEquals, force); if (selectedLoc == oldLoc) { return this; } - HRegionLocation[] newLocations = new HRegionLocation[Math.max(locations.length, replicaId +1)]; + HRegionLocation[] newLocations = new HRegionLocation[Math.max(locations.length, replicaId + 1)]; System.arraycopy(locations, 0, newLocations, 0, locations.length); newLocations[replicaId] = location; // ensure that all replicas share the same start code. Otherwise delete them - for (int i=0; i < newLocations.length; i++) { + for (int i = 0; i < newLocations.length; i++) { if (newLocations[i] != null) { - if (!RegionReplicaUtil.isReplicasForSameRegion(location.getRegion(), - newLocations[i].getRegion())) { + if ( + !RegionReplicaUtil.isReplicasForSameRegion(location.getRegion(), + newLocations[i].getRegion()) + ) { newLocations[i] = null; } } @@ -327,16 +321,18 @@ public class RegionLocations implements Iterable { } /** - * Returns the region location from the list for matching regionName, which can - * be regionName or encodedRegionName + * Returns the region location from the list for matching regionName, which can be regionName or + * encodedRegionName * @param regionName regionName or encodedRegionName * @return HRegionLocation found or null */ public HRegionLocation getRegionLocationByRegionName(byte[] regionName) { for (HRegionLocation loc : locations) { if (loc != null) { - if (Bytes.equals(loc.getRegion().getRegionName(), regionName) - || Bytes.equals(loc.getRegion().getEncodedNameAsBytes(), regionName)) { + if ( + Bytes.equals(loc.getRegion().getRegionName(), regionName) + || Bytes.equals(loc.getRegion().getEncodedNameAsBytes(), regionName) + ) { return loc; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java index 1a8e6c8c655..73d036e7370 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Map; @@ -26,8 +23,8 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** - * Encapsulates per-region load metrics. - */ + * Encapsulates per-region load metrics. + */ @InterfaceAudience.Public public interface RegionMetrics { @@ -87,8 +84,8 @@ public interface RegionMetrics { /** * TODO: why we pass the same value to different counters? Currently, the value from - * getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize() - * see HRegionServer#createRegionLoad. + * getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize() see + * HRegionServer#createRegionLoad. * @return The current total size of root-level indexes for the region */ Size getStoreFileIndexSize(); @@ -129,7 +126,6 @@ public interface RegionMetrics { */ Map getStoreSequenceId(); - /** * @return the uncompressed size of the storefiles */ @@ -151,8 +147,8 @@ public interface RegionMetrics { int getStoreRefCount(); /** - * @return the max reference count for any store file among all compacted stores files - * of this region + * @return the max reference count for any store file among all compacted stores files of this + * region */ int getMaxCompactedStoreFileRefCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java index cca6686f586..15a9c48bfbe 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Collections; @@ -39,96 +36,89 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @InterfaceAudience.Private public final class RegionMetricsBuilder { - public static List toRegionMetrics( - AdminProtos.GetRegionLoadResponse regionLoadResponse) { + public static List + toRegionMetrics(AdminProtos.GetRegionLoadResponse regionLoadResponse) { return regionLoadResponse.getRegionLoadsList().stream() - .map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList()); + .map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList()); } public static RegionMetrics toRegionMetrics(ClusterStatusProtos.RegionLoad regionLoadPB) { return RegionMetricsBuilder - .newBuilder(regionLoadPB.getRegionSpecifier().getValue().toByteArray()) - .setBloomFilterSize(new Size(regionLoadPB.getTotalStaticBloomSizeKB(), Size.Unit.KILOBYTE)) - .setCompactedCellCount(regionLoadPB.getCurrentCompactedKVs()) - .setCompactingCellCount(regionLoadPB.getTotalCompactingKVs()) - .setCompletedSequenceId(regionLoadPB.getCompleteSequenceId()) - .setDataLocality(regionLoadPB.hasDataLocality() ? regionLoadPB.getDataLocality() : 0.0f) - .setDataLocalityForSsd(regionLoadPB.hasDataLocalityForSsd() ? - regionLoadPB.getDataLocalityForSsd() : 0.0f) - .setBlocksLocalWeight(regionLoadPB.hasBlocksLocalWeight() ? - regionLoadPB.getBlocksLocalWeight() : 0) - .setBlocksLocalWithSsdWeight(regionLoadPB.hasBlocksLocalWithSsdWeight() ? - regionLoadPB.getBlocksLocalWithSsdWeight() : 0) - .setBlocksTotalWeight(regionLoadPB.getBlocksTotalWeight()) - .setCompactionState(ProtobufUtil.createCompactionStateForRegionLoad( - regionLoadPB.getCompactionState())) - .setFilteredReadRequestCount(regionLoadPB.getFilteredReadRequestsCount()) - .setStoreFileUncompressedDataIndexSize(new Size(regionLoadPB.getTotalStaticIndexSizeKB(), - Size.Unit.KILOBYTE)) - .setLastMajorCompactionTimestamp(regionLoadPB.getLastMajorCompactionTs()) - .setMemStoreSize(new Size(regionLoadPB.getMemStoreSizeMB(), Size.Unit.MEGABYTE)) - .setReadRequestCount(regionLoadPB.getReadRequestsCount()) - .setWriteRequestCount(regionLoadPB.getWriteRequestsCount()) - .setStoreFileIndexSize(new Size(regionLoadPB.getStorefileIndexSizeKB(), - Size.Unit.KILOBYTE)) - .setStoreFileRootLevelIndexSize(new Size(regionLoadPB.getRootIndexSizeKB(), - Size.Unit.KILOBYTE)) - .setStoreCount(regionLoadPB.getStores()) - .setStoreFileCount(regionLoadPB.getStorefiles()) - .setStoreRefCount(regionLoadPB.getStoreRefCount()) - .setMaxCompactedStoreFileRefCount(regionLoadPB.getMaxCompactedStoreFileRefCount()) - .setStoreFileSize(new Size(regionLoadPB.getStorefileSizeMB(), Size.Unit.MEGABYTE)) - .setStoreSequenceIds(regionLoadPB.getStoreCompleteSequenceIdList().stream() - .collect(Collectors.toMap( - (ClusterStatusProtos.StoreSequenceId s) -> s.getFamilyName().toByteArray(), - ClusterStatusProtos.StoreSequenceId::getSequenceId))) - .setUncompressedStoreFileSize( - new Size(regionLoadPB.getStoreUncompressedSizeMB(),Size.Unit.MEGABYTE)) - .build(); + .newBuilder(regionLoadPB.getRegionSpecifier().getValue().toByteArray()) + .setBloomFilterSize(new Size(regionLoadPB.getTotalStaticBloomSizeKB(), Size.Unit.KILOBYTE)) + .setCompactedCellCount(regionLoadPB.getCurrentCompactedKVs()) + .setCompactingCellCount(regionLoadPB.getTotalCompactingKVs()) + .setCompletedSequenceId(regionLoadPB.getCompleteSequenceId()) + .setDataLocality(regionLoadPB.hasDataLocality() ? regionLoadPB.getDataLocality() : 0.0f) + .setDataLocalityForSsd( + regionLoadPB.hasDataLocalityForSsd() ? regionLoadPB.getDataLocalityForSsd() : 0.0f) + .setBlocksLocalWeight( + regionLoadPB.hasBlocksLocalWeight() ? regionLoadPB.getBlocksLocalWeight() : 0) + .setBlocksLocalWithSsdWeight( + regionLoadPB.hasBlocksLocalWithSsdWeight() ? regionLoadPB.getBlocksLocalWithSsdWeight() : 0) + .setBlocksTotalWeight(regionLoadPB.getBlocksTotalWeight()) + .setCompactionState( + ProtobufUtil.createCompactionStateForRegionLoad(regionLoadPB.getCompactionState())) + .setFilteredReadRequestCount(regionLoadPB.getFilteredReadRequestsCount()) + .setStoreFileUncompressedDataIndexSize( + new Size(regionLoadPB.getTotalStaticIndexSizeKB(), Size.Unit.KILOBYTE)) + .setLastMajorCompactionTimestamp(regionLoadPB.getLastMajorCompactionTs()) + .setMemStoreSize(new Size(regionLoadPB.getMemStoreSizeMB(), Size.Unit.MEGABYTE)) + .setReadRequestCount(regionLoadPB.getReadRequestsCount()) + .setWriteRequestCount(regionLoadPB.getWriteRequestsCount()) + .setStoreFileIndexSize(new Size(regionLoadPB.getStorefileIndexSizeKB(), Size.Unit.KILOBYTE)) + .setStoreFileRootLevelIndexSize( + new Size(regionLoadPB.getRootIndexSizeKB(), Size.Unit.KILOBYTE)) + .setStoreCount(regionLoadPB.getStores()).setStoreFileCount(regionLoadPB.getStorefiles()) + .setStoreRefCount(regionLoadPB.getStoreRefCount()) + .setMaxCompactedStoreFileRefCount(regionLoadPB.getMaxCompactedStoreFileRefCount()) + .setStoreFileSize(new Size(regionLoadPB.getStorefileSizeMB(), Size.Unit.MEGABYTE)) + .setStoreSequenceIds(regionLoadPB.getStoreCompleteSequenceIdList().stream() + .collect(Collectors.toMap( + (ClusterStatusProtos.StoreSequenceId s) -> s.getFamilyName().toByteArray(), + ClusterStatusProtos.StoreSequenceId::getSequenceId))) + .setUncompressedStoreFileSize( + new Size(regionLoadPB.getStoreUncompressedSizeMB(), Size.Unit.MEGABYTE)) + .build(); } - private static List toStoreSequenceId( - Map ids) { + private static List + toStoreSequenceId(Map ids) { return ids.entrySet().stream() - .map(e -> ClusterStatusProtos.StoreSequenceId.newBuilder() - .setFamilyName(UnsafeByteOperations.unsafeWrap(e.getKey())) - .setSequenceId(e.getValue()) - .build()) - .collect(Collectors.toList()); + .map(e -> ClusterStatusProtos.StoreSequenceId.newBuilder() + .setFamilyName(UnsafeByteOperations.unsafeWrap(e.getKey())).setSequenceId(e.getValue()) + .build()) + .collect(Collectors.toList()); } public static ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) { return ClusterStatusProtos.RegionLoad.newBuilder() - .setRegionSpecifier(HBaseProtos.RegionSpecifier - .newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) - .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName())) - .build()) - .setTotalStaticBloomSizeKB((int) regionMetrics.getBloomFilterSize() - .get(Size.Unit.KILOBYTE)) - .setCurrentCompactedKVs(regionMetrics.getCompactedCellCount()) - .setTotalCompactingKVs(regionMetrics.getCompactingCellCount()) - .setCompleteSequenceId(regionMetrics.getCompletedSequenceId()) - .setDataLocality(regionMetrics.getDataLocality()) - .setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount()) - .setTotalStaticIndexSizeKB((int) regionMetrics.getStoreFileUncompressedDataIndexSize() - .get(Size.Unit.KILOBYTE)) - .setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp()) - .setMemStoreSizeMB((int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE)) - .setReadRequestsCount(regionMetrics.getReadRequestCount()) - .setWriteRequestsCount(regionMetrics.getWriteRequestCount()) - .setStorefileIndexSizeKB((long) regionMetrics.getStoreFileIndexSize() - .get(Size.Unit.KILOBYTE)) - .setRootIndexSizeKB((int) regionMetrics.getStoreFileRootLevelIndexSize() - .get(Size.Unit.KILOBYTE)) - .setStores(regionMetrics.getStoreCount()) - .setStorefiles(regionMetrics.getStoreFileCount()) - .setStoreRefCount(regionMetrics.getStoreRefCount()) - .setMaxCompactedStoreFileRefCount(regionMetrics.getMaxCompactedStoreFileRefCount()) - .setStorefileSizeMB((int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE)) - .addAllStoreCompleteSequenceId(toStoreSequenceId(regionMetrics.getStoreSequenceId())) - .setStoreUncompressedSizeMB( - (int) regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE)) - .build(); + .setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder() + .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) + .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName())).build()) + .setTotalStaticBloomSizeKB((int) regionMetrics.getBloomFilterSize().get(Size.Unit.KILOBYTE)) + .setCurrentCompactedKVs(regionMetrics.getCompactedCellCount()) + .setTotalCompactingKVs(regionMetrics.getCompactingCellCount()) + .setCompleteSequenceId(regionMetrics.getCompletedSequenceId()) + .setDataLocality(regionMetrics.getDataLocality()) + .setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount()) + .setTotalStaticIndexSizeKB( + (int) regionMetrics.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE)) + .setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp()) + .setMemStoreSizeMB((int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE)) + .setReadRequestsCount(regionMetrics.getReadRequestCount()) + .setWriteRequestsCount(regionMetrics.getWriteRequestCount()) + .setStorefileIndexSizeKB((long) regionMetrics.getStoreFileIndexSize().get(Size.Unit.KILOBYTE)) + .setRootIndexSizeKB( + (int) regionMetrics.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE)) + .setStores(regionMetrics.getStoreCount()).setStorefiles(regionMetrics.getStoreFileCount()) + .setStoreRefCount(regionMetrics.getStoreRefCount()) + .setMaxCompactedStoreFileRefCount(regionMetrics.getMaxCompactedStoreFileRefCount()) + .setStorefileSizeMB((int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE)) + .addAllStoreCompleteSequenceId(toStoreSequenceId(regionMetrics.getStoreSequenceId())) + .setStoreUncompressedSizeMB( + (int) regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE)) + .build(); } public static RegionMetricsBuilder newBuilder(byte[] name) { @@ -161,6 +151,7 @@ public final class RegionMetricsBuilder { private long blocksLocalWithSsdWeight; private long blocksTotalWeight; private CompactionState compactionState; + private RegionMetricsBuilder(byte[] name) { this.name = name; } @@ -169,130 +160,135 @@ public final class RegionMetricsBuilder { this.storeCount = value; return this; } + public RegionMetricsBuilder setStoreFileCount(int value) { this.storeFileCount = value; return this; } + public RegionMetricsBuilder setStoreRefCount(int value) { this.storeRefCount = value; return this; } + public RegionMetricsBuilder setMaxCompactedStoreFileRefCount(int value) { this.maxCompactedStoreFileRefCount = value; return this; } + public RegionMetricsBuilder setCompactingCellCount(long value) { this.compactingCellCount = value; return this; } + public RegionMetricsBuilder setCompactedCellCount(long value) { this.compactedCellCount = value; return this; } + public RegionMetricsBuilder setStoreFileSize(Size value) { this.storeFileSize = value; return this; } + public RegionMetricsBuilder setMemStoreSize(Size value) { this.memStoreSize = value; return this; } + public RegionMetricsBuilder setStoreFileIndexSize(Size value) { this.indexSize = value; return this; } + public RegionMetricsBuilder setStoreFileRootLevelIndexSize(Size value) { this.rootLevelIndexSize = value; return this; } + public RegionMetricsBuilder setStoreFileUncompressedDataIndexSize(Size value) { this.uncompressedDataIndexSize = value; return this; } + public RegionMetricsBuilder setBloomFilterSize(Size value) { this.bloomFilterSize = value; return this; } + public RegionMetricsBuilder setUncompressedStoreFileSize(Size value) { this.uncompressedStoreFileSize = value; return this; } + public RegionMetricsBuilder setWriteRequestCount(long value) { this.writeRequestCount = value; return this; } + public RegionMetricsBuilder setReadRequestCount(long value) { this.readRequestCount = value; return this; } + public RegionMetricsBuilder setFilteredReadRequestCount(long value) { this.filteredReadRequestCount = value; return this; } + public RegionMetricsBuilder setCompletedSequenceId(long value) { this.completedSequenceId = value; return this; } + public RegionMetricsBuilder setStoreSequenceIds(Map value) { this.storeSequenceIds = value; return this; } + public RegionMetricsBuilder setDataLocality(float value) { this.dataLocality = value; return this; } + public RegionMetricsBuilder setLastMajorCompactionTimestamp(long value) { this.lastMajorCompactionTimestamp = value; return this; } + public RegionMetricsBuilder setDataLocalityForSsd(float value) { this.dataLocalityForSsd = value; return this; } + public RegionMetricsBuilder setBlocksLocalWeight(long value) { this.blocksLocalWeight = value; return this; } + public RegionMetricsBuilder setBlocksLocalWithSsdWeight(long value) { this.blocksLocalWithSsdWeight = value; return this; } + public RegionMetricsBuilder setBlocksTotalWeight(long value) { this.blocksTotalWeight = value; return this; } + public RegionMetricsBuilder setCompactionState(CompactionState compactionState) { this.compactionState = compactionState; return this; } public RegionMetrics build() { - return new RegionMetricsImpl(name, - storeCount, - storeFileCount, - storeRefCount, - maxCompactedStoreFileRefCount, - compactingCellCount, - compactedCellCount, - storeFileSize, - memStoreSize, - indexSize, - rootLevelIndexSize, - uncompressedDataIndexSize, - bloomFilterSize, - uncompressedStoreFileSize, - writeRequestCount, - readRequestCount, - filteredReadRequestCount, - completedSequenceId, - storeSequenceIds, - dataLocality, - lastMajorCompactionTimestamp, - dataLocalityForSsd, - blocksLocalWeight, - blocksLocalWithSsdWeight, - blocksTotalWeight, - compactionState); + return new RegionMetricsImpl(name, storeCount, storeFileCount, storeRefCount, + maxCompactedStoreFileRefCount, compactingCellCount, compactedCellCount, storeFileSize, + memStoreSize, indexSize, rootLevelIndexSize, uncompressedDataIndexSize, bloomFilterSize, + uncompressedStoreFileSize, writeRequestCount, readRequestCount, filteredReadRequestCount, + completedSequenceId, storeSequenceIds, dataLocality, lastMajorCompactionTimestamp, + dataLocalityForSsd, blocksLocalWeight, blocksLocalWithSsdWeight, blocksTotalWeight, + compactionState); } private static class RegionMetricsImpl implements RegionMetrics { @@ -322,32 +318,15 @@ public final class RegionMetricsBuilder { private final long blocksLocalWithSsdWeight; private final long blocksTotalWeight; private final CompactionState compactionState; - RegionMetricsImpl(byte[] name, - int storeCount, - int storeFileCount, - int storeRefCount, - int maxCompactedStoreFileRefCount, - final long compactingCellCount, - long compactedCellCount, - Size storeFileSize, - Size memStoreSize, - Size indexSize, - Size rootLevelIndexSize, - Size uncompressedDataIndexSize, - Size bloomFilterSize, - Size uncompressedStoreFileSize, - long writeRequestCount, - long readRequestCount, - long filteredReadRequestCount, - long completedSequenceId, - Map storeSequenceIds, - float dataLocality, - long lastMajorCompactionTimestamp, - float dataLocalityForSsd, - long blocksLocalWeight, - long blocksLocalWithSsdWeight, - long blocksTotalWeight, - CompactionState compactionState) { + + RegionMetricsImpl(byte[] name, int storeCount, int storeFileCount, int storeRefCount, + int maxCompactedStoreFileRefCount, final long compactingCellCount, long compactedCellCount, + Size storeFileSize, Size memStoreSize, Size indexSize, Size rootLevelIndexSize, + Size uncompressedDataIndexSize, Size bloomFilterSize, Size uncompressedStoreFileSize, + long writeRequestCount, long readRequestCount, long filteredReadRequestCount, + long completedSequenceId, Map storeSequenceIds, float dataLocality, + long lastMajorCompactionTimestamp, float dataLocalityForSsd, long blocksLocalWeight, + long blocksLocalWithSsdWeight, long blocksTotalWeight, CompactionState compactionState) { this.name = Preconditions.checkNotNull(name); this.storeCount = storeCount; this.storeFileCount = storeFileCount; @@ -508,63 +487,43 @@ public final class RegionMetricsBuilder { @Override public String toString() { - StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "storeCount", - this.getStoreCount()); - Strings.appendKeyValue(sb, "storeFileCount", - this.getStoreFileCount()); - Strings.appendKeyValue(sb, "storeRefCount", - this.getStoreRefCount()); + StringBuilder sb = + Strings.appendKeyValue(new StringBuilder(), "storeCount", this.getStoreCount()); + Strings.appendKeyValue(sb, "storeFileCount", this.getStoreFileCount()); + Strings.appendKeyValue(sb, "storeRefCount", this.getStoreRefCount()); Strings.appendKeyValue(sb, "maxCompactedStoreFileRefCount", this.getMaxCompactedStoreFileRefCount()); - Strings.appendKeyValue(sb, "uncompressedStoreFileSize", - this.getUncompressedStoreFileSize()); + Strings.appendKeyValue(sb, "uncompressedStoreFileSize", this.getUncompressedStoreFileSize()); Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp", - this.getLastMajorCompactionTimestamp()); - Strings.appendKeyValue(sb, "storeFileSize", - this.getStoreFileSize()); + this.getLastMajorCompactionTimestamp()); + Strings.appendKeyValue(sb, "storeFileSize", this.getStoreFileSize()); if (this.getUncompressedStoreFileSize().get() != 0) { Strings.appendKeyValue(sb, "compressionRatio", - String.format("%.4f", - (float) this.getStoreFileSize().get(Size.Unit.MEGABYTE) / - (float) this.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE))); + String.format("%.4f", (float) this.getStoreFileSize().get(Size.Unit.MEGABYTE) + / (float) this.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE))); } - Strings.appendKeyValue(sb, "memStoreSize", - this.getMemStoreSize()); - Strings.appendKeyValue(sb, "readRequestCount", - this.getReadRequestCount()); - Strings.appendKeyValue(sb, "writeRequestCount", - this.getWriteRequestCount()); - Strings.appendKeyValue(sb, "rootLevelIndexSize", - this.getStoreFileRootLevelIndexSize()); + Strings.appendKeyValue(sb, "memStoreSize", this.getMemStoreSize()); + Strings.appendKeyValue(sb, "readRequestCount", this.getReadRequestCount()); + Strings.appendKeyValue(sb, "writeRequestCount", this.getWriteRequestCount()); + Strings.appendKeyValue(sb, "rootLevelIndexSize", this.getStoreFileRootLevelIndexSize()); Strings.appendKeyValue(sb, "uncompressedDataIndexSize", - this.getStoreFileUncompressedDataIndexSize()); - Strings.appendKeyValue(sb, "bloomFilterSize", - this.getBloomFilterSize()); - Strings.appendKeyValue(sb, "compactingCellCount", - this.getCompactingCellCount()); - Strings.appendKeyValue(sb, "compactedCellCount", - this.getCompactedCellCount()); + this.getStoreFileUncompressedDataIndexSize()); + Strings.appendKeyValue(sb, "bloomFilterSize", this.getBloomFilterSize()); + Strings.appendKeyValue(sb, "compactingCellCount", this.getCompactingCellCount()); + Strings.appendKeyValue(sb, "compactedCellCount", this.getCompactedCellCount()); float compactionProgressPct = Float.NaN; if (this.getCompactingCellCount() > 0) { - compactionProgressPct = ((float) this.getCompactedCellCount() / - (float) this.getCompactingCellCount()); + compactionProgressPct = + ((float) this.getCompactedCellCount() / (float) this.getCompactingCellCount()); } - Strings.appendKeyValue(sb, "compactionProgressPct", - compactionProgressPct); - Strings.appendKeyValue(sb, "completedSequenceId", - this.getCompletedSequenceId()); - Strings.appendKeyValue(sb, "dataLocality", - this.getDataLocality()); - Strings.appendKeyValue(sb, "dataLocalityForSsd", - this.getDataLocalityForSsd()); - Strings.appendKeyValue(sb, "blocksLocalWeight", - blocksLocalWeight); - Strings.appendKeyValue(sb, "blocksLocalWithSsdWeight", - blocksLocalWithSsdWeight); - Strings.appendKeyValue(sb, "blocksTotalWeight", - blocksTotalWeight); - Strings.appendKeyValue(sb, "compactionState", - compactionState); + Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); + Strings.appendKeyValue(sb, "completedSequenceId", this.getCompletedSequenceId()); + Strings.appendKeyValue(sb, "dataLocality", this.getDataLocality()); + Strings.appendKeyValue(sb, "dataLocalityForSsd", this.getDataLocalityForSsd()); + Strings.appendKeyValue(sb, "blocksLocalWeight", blocksLocalWeight); + Strings.appendKeyValue(sb, "blocksLocalWithSsdWeight", blocksLocalWithSsdWeight); + Strings.appendKeyValue(sb, "blocksTotalWeight", blocksTotalWeight); + Strings.appendKeyValue(sb, "compactionState", compactionState); return sb.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java index 3024962ebd6..4cdb4ea2ade 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,15 +18,14 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown by a region server if it will block and wait to serve a request. - * For example, the client wants to insert something to a region while the - * region is compacting. Keep variance in the passed 'msg' low because its msg is used as a key - * over in {@link org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException} - * grouping failure types. + * Thrown by a region server if it will block and wait to serve a request. For example, the client + * wants to insert something to a region while the region is compacting. Keep variance in the passed + * 'msg' low because its msg is used as a key over in + * {@link org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException} grouping failure + * types. */ @InterfaceAudience.Public public class RegionTooBusyException extends IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java index 6f02df2028f..4d1deebb4e8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java index 9df4f893c71..46cc77c61b8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java index 7b8f713ddb7..2320c8e908e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Arrays; @@ -33,13 +30,13 @@ import org.apache.hadoop.hbase.util.Strings; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Objects; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; /** * This class is used for exporting current state of load on a RegionServer. - * - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link ServerMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use {@link ServerMetrics} + * instead. */ @InterfaceAudience.Public @Deprecated @@ -68,7 +65,7 @@ public class ServerLoad implements ServerMetrics { this(ServerName.valueOf("localhost,1,1"), serverLoad); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") @InterfaceAudience.Private public ServerLoad(ServerName name, ClusterStatusProtos.ServerLoad serverLoad) { this(ServerMetricsBuilder.toServerMetrics(name, serverLoad)); @@ -112,9 +109,9 @@ public class ServerLoad implements ServerMetrics { protected ClusterStatusProtos.ServerLoad serverLoad; /** - * @return number of requests since last report. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #getRequestCountPerSecond} instead. + * @return number of requests since last report. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #getRequestCountPerSecond} instead. */ @Deprecated public long getNumberOfRequests() { @@ -122,8 +119,7 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * No flag in 2.0 + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 No flag in 2.0 */ @Deprecated public boolean hasNumberOfRequests() { @@ -132,8 +128,8 @@ public class ServerLoad implements ServerMetrics { /** * @return total Number of requests from the start of the region server. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #getRequestCount} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #getRequestCount} instead. */ @Deprecated public long getTotalNumberOfRequests() { @@ -141,8 +137,7 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * No flag in 2.0 + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 No flag in 2.0 */ @Deprecated public boolean hasTotalNumberOfRequests() { @@ -151,8 +146,8 @@ public class ServerLoad implements ServerMetrics { /** * @return the amount of used heap, in MB. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #getUsedHeapSize} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #getUsedHeapSize} instead. */ @Deprecated public int getUsedHeapMB() { @@ -160,8 +155,7 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * No flag in 2.0 + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 No flag in 2.0 */ @Deprecated public boolean hasUsedHeapMB() { @@ -170,8 +164,8 @@ public class ServerLoad implements ServerMetrics { /** * @return the maximum allowable size of the heap, in MB. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getMaxHeapSize} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getMaxHeapSize} instead. */ @Deprecated public int getMaxHeapMB() { @@ -179,8 +173,7 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * No flag in 2.0 + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 No flag in 2.0 */ @Deprecated public boolean hasMaxHeapMB() { @@ -188,8 +181,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getStores() { @@ -197,8 +190,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getStorefiles() { @@ -206,8 +199,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getStoreUncompressedSizeMB() { @@ -215,8 +208,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getStorefileSizeInMB() { @@ -224,8 +217,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getStorefileSizeMB() { @@ -233,8 +226,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getMemstoreSizeInMB() { @@ -242,8 +235,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getMemStoreSizeMB() { @@ -251,8 +244,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getStorefileIndexSizeInMB() { @@ -261,8 +254,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public long getStorefileIndexSizeKB() { @@ -270,8 +263,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public long getReadRequestsCount() { @@ -279,8 +272,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public long getFilteredReadRequestsCount() { @@ -288,8 +281,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public long getWriteRequestsCount() { @@ -297,8 +290,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getRootIndexSizeKB() { @@ -306,8 +299,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getTotalStaticIndexSizeKB() { @@ -315,8 +308,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getTotalStaticBloomSizeKB() { @@ -324,8 +317,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public long getTotalCompactingKVs() { @@ -333,8 +326,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public long getCurrentCompactedKVs() { @@ -342,8 +335,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getNumberOfRegions() { @@ -399,8 +392,7 @@ public class ServerLoad implements ServerMetrics { } /** - * Call directly from client such as hbase shell - * @return ReplicationLoadSink + * Call directly from client such as hbase shell n */ @Override public ReplicationLoadSink getReplicationLoadSink() { @@ -412,7 +404,8 @@ public class ServerLoad implements ServerMetrics { return metrics.getRegionMetrics(); } - @Override public Map getUserMetrics() { + @Override + public Map getUserMetrics() { return metrics.getUserMetrics(); } @@ -437,15 +430,13 @@ public class ServerLoad implements ServerMetrics { } /** - * Originally, this method factored in the effect of requests going to the - * server as well. However, this does not interact very well with the current - * region rebalancing code, which only factors number of regions. For the - * interim, until we can figure out how to make rebalancing use all the info - * available, we're just going to make load purely the number of regions. - * + * Originally, this method factored in the effect of requests going to the server as well. + * However, this does not interact very well with the current region rebalancing code, which only + * factors number of regions. For the interim, until we can figure out how to make rebalancing use + * all the info available, we're just going to make load purely the number of regions. * @return load factor for this server. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getNumberOfRegions} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getNumberOfRegions} instead. */ @Deprecated public int getLoad() { @@ -457,21 +448,20 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public Map getRegionsLoad() { return getRegionMetrics().entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> new RegionLoad(e.getValue()), - (v1, v2) -> { - throw new RuntimeException("key collisions?"); - }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR))); + .collect(Collectors.toMap(Map.Entry::getKey, e -> new RegionLoad(e.getValue()), (v1, v2) -> { + throw new RuntimeException("key collisions?"); + }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR))); } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getCoprocessorNames} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getCoprocessorNames} instead. */ @Deprecated public String[] getRegionServerCoprocessors() { @@ -479,8 +469,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getCoprocessorNames} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getCoprocessorNames} instead. */ @Deprecated public String[] getRsCoprocessors() { @@ -488,8 +478,8 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRequestCountPerSecond} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRequestCountPerSecond} instead. */ @Deprecated public double getRequestsPerSecond() { @@ -509,30 +499,29 @@ public class ServerLoad implements ServerMetrics { Strings.appendKeyValue(sb, "numberOfStores", Integer.valueOf(this.stores)); Strings.appendKeyValue(sb, "numberOfStorefiles", Integer.valueOf(this.storefiles)); Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", - Integer.valueOf(this.storeUncompressedSizeMB)); + Integer.valueOf(this.storeUncompressedSizeMB)); Strings.appendKeyValue(sb, "storefileSizeMB", Integer.valueOf(this.storefileSizeMB)); if (this.storeUncompressedSizeMB != 0) { - Strings.appendKeyValue(sb, "compressionRatio", String.format("%.4f", - (float) this.storefileSizeMB / (float) this.storeUncompressedSizeMB)); + Strings.appendKeyValue(sb, "compressionRatio", + String.format("%.4f", (float) this.storefileSizeMB / (float) this.storeUncompressedSizeMB)); } Strings.appendKeyValue(sb, "memstoreSizeMB", Integer.valueOf(this.memstoreSizeMB)); - Strings.appendKeyValue(sb, "storefileIndexSizeKB", - Long.valueOf(this.storefileIndexSizeKB)); + Strings.appendKeyValue(sb, "storefileIndexSizeKB", Long.valueOf(this.storefileIndexSizeKB)); Strings.appendKeyValue(sb, "readRequestsCount", Long.valueOf(this.readRequestsCount)); Strings.appendKeyValue(sb, "filteredReadRequestsCount", - Long.valueOf(this.filteredReadRequestsCount)); + Long.valueOf(this.filteredReadRequestsCount)); Strings.appendKeyValue(sb, "writeRequestsCount", Long.valueOf(this.writeRequestsCount)); Strings.appendKeyValue(sb, "rootIndexSizeKB", Integer.valueOf(this.rootIndexSizeKB)); Strings.appendKeyValue(sb, "totalStaticIndexSizeKB", - Integer.valueOf(this.totalStaticIndexSizeKB)); + Integer.valueOf(this.totalStaticIndexSizeKB)); Strings.appendKeyValue(sb, "totalStaticBloomSizeKB", - Integer.valueOf(this.totalStaticBloomSizeKB)); + Integer.valueOf(this.totalStaticBloomSizeKB)); Strings.appendKeyValue(sb, "totalCompactingKVs", Long.valueOf(this.totalCompactingKVs)); Strings.appendKeyValue(sb, "currentCompactedKVs", Long.valueOf(this.currentCompactedKVs)); float compactionProgressPct = Float.NaN; if (this.totalCompactingKVs > 0) { compactionProgressPct = - Float.valueOf((float) this.currentCompactedKVs / this.totalCompactingKVs); + Float.valueOf((float) this.currentCompactedKVs / this.totalCompactingKVs); } Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); @@ -544,17 +533,16 @@ public class ServerLoad implements ServerMetrics { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link ServerMetricsBuilder#of(ServerName)} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link ServerMetricsBuilder#of(ServerName)} instead. */ @Deprecated - public static final ServerLoad EMPTY_SERVERLOAD = - new ServerLoad(ServerName.valueOf("localhost,1,1"), - ClusterStatusProtos.ServerLoad.newBuilder().build()); + public static final ServerLoad EMPTY_SERVERLOAD = new ServerLoad( + ServerName.valueOf("localhost,1,1"), ClusterStatusProtos.ServerLoad.newBuilder().build()); /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getReportTimestamp} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getReportTimestamp} instead. */ @Deprecated public long getReportTime() { @@ -563,11 +551,10 @@ public class ServerLoad implements ServerMetrics { @Override public int hashCode() { - return Objects - .hashCode(stores, storefiles, storeUncompressedSizeMB, storefileSizeMB, memstoreSizeMB, - storefileIndexSizeKB, readRequestsCount, filteredReadRequestsCount, writeRequestsCount, - rootIndexSizeKB, totalStaticIndexSizeKB, totalStaticBloomSizeKB, totalCompactingKVs, - currentCompactedKVs); + return Objects.hashCode(stores, storefiles, storeUncompressedSizeMB, storefileSizeMB, + memstoreSizeMB, storefileIndexSizeKB, readRequestsCount, filteredReadRequestsCount, + writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB, totalStaticBloomSizeKB, + totalCompactingKVs, currentCompactedKVs); } @Override @@ -576,16 +563,16 @@ public class ServerLoad implements ServerMetrics { if (other instanceof ServerLoad) { ServerLoad sl = ((ServerLoad) other); return stores == sl.stores && storefiles == sl.storefiles - && storeUncompressedSizeMB == sl.storeUncompressedSizeMB - && storefileSizeMB == sl.storefileSizeMB && memstoreSizeMB == sl.memstoreSizeMB - && storefileIndexSizeKB == sl.storefileIndexSizeKB - && readRequestsCount == sl.readRequestsCount - && filteredReadRequestsCount == sl.filteredReadRequestsCount - && writeRequestsCount == sl.writeRequestsCount && rootIndexSizeKB == sl.rootIndexSizeKB - && totalStaticIndexSizeKB == sl.totalStaticIndexSizeKB - && totalStaticBloomSizeKB == sl.totalStaticBloomSizeKB - && totalCompactingKVs == sl.totalCompactingKVs - && currentCompactedKVs == sl.currentCompactedKVs; + && storeUncompressedSizeMB == sl.storeUncompressedSizeMB + && storefileSizeMB == sl.storefileSizeMB && memstoreSizeMB == sl.memstoreSizeMB + && storefileIndexSizeKB == sl.storefileIndexSizeKB + && readRequestsCount == sl.readRequestsCount + && filteredReadRequestsCount == sl.filteredReadRequestsCount + && writeRequestsCount == sl.writeRequestsCount && rootIndexSizeKB == sl.rootIndexSizeKB + && totalStaticIndexSizeKB == sl.totalStaticIndexSizeKB + && totalStaticBloomSizeKB == sl.totalStaticBloomSizeKB + && totalCompactingKVs == sl.totalCompactingKVs + && currentCompactedKVs == sl.currentCompactedKVs; } return false; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java index 893534cba5f..46678ac2460 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java @@ -1,5 +1,4 @@ -/** - * Copyright The Apache Software Foundation +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,14 +6,15 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; @@ -82,8 +82,7 @@ public interface ServerMetrics { Map> getReplicationLoadSourceMap(); /** - * Call directly from client such as hbase shell - * @return ReplicationLoadSink + * Call directly from client such as hbase shell n */ @Nullable ReplicationLoadSink getReplicationLoadSink(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java index 89cee9ffaf6..72f78220c1a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java @@ -1,5 +1,4 @@ -/** - * Copyright The Apache Software Foundation +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,18 +6,18 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; - import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -37,6 +36,7 @@ import org.apache.hadoop.hbase.util.Strings; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -62,12 +62,12 @@ public final class ServerMetricsBuilder { } public static ServerMetrics toServerMetrics(ServerName serverName, - ClusterStatusProtos.ServerLoad serverLoadPB) { + ClusterStatusProtos.ServerLoad serverLoadPB) { return toServerMetrics(serverName, 0, "0.0.0", serverLoadPB); } public static ServerMetrics toServerMetrics(ServerName serverName, int versionNumber, - String version, ClusterStatusProtos.ServerLoad serverLoadPB) { + String version, ClusterStatusProtos.ServerLoad serverLoadPB) { return ServerMetricsBuilder.newBuilder(serverName) .setRequestCountPerSecond(serverLoadPB.getNumberOfRequests()) .setRequestCount(serverLoadPB.getTotalNumberOfRequests()) @@ -78,48 +78,43 @@ public final class ServerMetricsBuilder { .map(HBaseProtos.Coprocessor::getName).collect(Collectors.toList())) .setRegionMetrics(serverLoadPB.getRegionLoadsList().stream() .map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList())) - .setUserMetrics(serverLoadPB.getUserLoadsList().stream() - .map(UserMetricsBuilder::toUserMetrics).collect(Collectors.toList())) + .setUserMetrics(serverLoadPB.getUserLoadsList().stream() + .map(UserMetricsBuilder::toUserMetrics).collect(Collectors.toList())) .setReplicationLoadSources(serverLoadPB.getReplLoadSourceList().stream() - .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) + .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) .setReplicationLoadSink(serverLoadPB.hasReplLoadSink() ? ProtobufUtil.toReplicationLoadSink(serverLoadPB.getReplLoadSink()) : null) - .setTasks(serverLoadPB.getTasksList().stream() - .map(ProtobufUtil::getServerTask).collect(Collectors.toList())) + .setTasks(serverLoadPB.getTasksList().stream().map(ProtobufUtil::getServerTask) + .collect(Collectors.toList())) .setReportTimestamp(serverLoadPB.getReportEndTime()) .setLastReportTimestamp(serverLoadPB.getReportStartTime()).setVersionNumber(versionNumber) .setVersion(version).build(); } public static List toCoprocessor(Collection names) { - return names.stream() - .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) - .collect(Collectors.toList()); + return names.stream().map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) + .collect(Collectors.toList()); } public static ClusterStatusProtos.ServerLoad toServerLoad(ServerMetrics metrics) { ClusterStatusProtos.ServerLoad.Builder builder = ClusterStatusProtos.ServerLoad.newBuilder() - .setNumberOfRequests(metrics.getRequestCountPerSecond()) - .setTotalNumberOfRequests(metrics.getRequestCount()) - .setInfoServerPort(metrics.getInfoServerPort()) - .setMaxHeapMB((int) metrics.getMaxHeapSize().get(Size.Unit.MEGABYTE)) - .setUsedHeapMB((int) metrics.getUsedHeapSize().get(Size.Unit.MEGABYTE)) - .addAllCoprocessors(toCoprocessor(metrics.getCoprocessorNames())) - .addAllRegionLoads( - metrics.getRegionMetrics().values().stream().map(RegionMetricsBuilder::toRegionLoad) - .collect(Collectors.toList())) - .addAllUserLoads( - metrics.getUserMetrics().values().stream().map(UserMetricsBuilder::toUserMetrics) - .collect(Collectors.toList())) - .addAllReplLoadSource( - metrics.getReplicationLoadSourceList().stream() - .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) - .addAllTasks( - metrics.getTasks().stream().map(ProtobufUtil::toServerTask) - .collect(Collectors.toList())) - .setReportStartTime(metrics.getLastReportTimestamp()) - .setReportEndTime(metrics.getReportTimestamp()); + .setNumberOfRequests(metrics.getRequestCountPerSecond()) + .setTotalNumberOfRequests(metrics.getRequestCount()) + .setInfoServerPort(metrics.getInfoServerPort()) + .setMaxHeapMB((int) metrics.getMaxHeapSize().get(Size.Unit.MEGABYTE)) + .setUsedHeapMB((int) metrics.getUsedHeapSize().get(Size.Unit.MEGABYTE)) + .addAllCoprocessors(toCoprocessor(metrics.getCoprocessorNames())) + .addAllRegionLoads(metrics.getRegionMetrics().values().stream() + .map(RegionMetricsBuilder::toRegionLoad).collect(Collectors.toList())) + .addAllUserLoads(metrics.getUserMetrics().values().stream() + .map(UserMetricsBuilder::toUserMetrics).collect(Collectors.toList())) + .addAllReplLoadSource(metrics.getReplicationLoadSourceList().stream() + .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) + .addAllTasks( + metrics.getTasks().stream().map(ProtobufUtil::toServerTask).collect(Collectors.toList())) + .setReportStartTime(metrics.getLastReportTimestamp()) + .setReportEndTime(metrics.getReportTimestamp()); if (metrics.getReplicationLoadSink() != null) { builder.setReplLoadSink(ProtobufUtil.toReplicationLoadSink(metrics.getReplicationLoadSink())); } @@ -228,23 +223,9 @@ public final class ServerMetricsBuilder { } public ServerMetrics build() { - return new ServerMetricsImpl( - serverName, - versionNumber, - version, - requestCountPerSecond, - requestCount, - usedHeapSize, - maxHeapSize, - infoServerPort, - sources, - sink, - regionStatus, - coprocessorNames, - reportTimestamp, - lastReportTimestamp, - userMetrics, - tasks); + return new ServerMetricsImpl(serverName, versionNumber, version, requestCountPerSecond, + requestCount, usedHeapSize, maxHeapSize, infoServerPort, sources, sink, regionStatus, + coprocessorNames, reportTimestamp, lastReportTimestamp, userMetrics, tasks); } private static class ServerMetricsImpl implements ServerMetrics { @@ -267,11 +248,10 @@ public final class ServerMetricsBuilder { private final List tasks; ServerMetricsImpl(ServerName serverName, int versionNumber, String version, - long requestCountPerSecond, long requestCount, Size usedHeapSize, Size maxHeapSize, - int infoServerPort, List sources, ReplicationLoadSink sink, - Map regionStatus, Set coprocessorNames, - long reportTimestamp, long lastReportTimestamp, Map userMetrics, - List tasks) { + long requestCountPerSecond, long requestCount, Size usedHeapSize, Size maxHeapSize, + int infoServerPort, List sources, ReplicationLoadSink sink, + Map regionStatus, Set coprocessorNames, long reportTimestamp, + long lastReportTimestamp, Map userMetrics, List tasks) { this.serverName = Preconditions.checkNotNull(serverName); this.versionNumber = versionNumber; this.version = version; @@ -284,7 +264,7 @@ public final class ServerMetricsBuilder { this.sink = sink; this.regionStatus = Preconditions.checkNotNull(regionStatus); this.userMetrics = Preconditions.checkNotNull(userMetrics); - this.coprocessorNames =Preconditions.checkNotNull(coprocessorNames); + this.coprocessorNames = Preconditions.checkNotNull(coprocessorNames); this.reportTimestamp = reportTimestamp; this.lastReportTimestamp = lastReportTimestamp; this.tasks = tasks; @@ -335,11 +315,11 @@ public final class ServerMetricsBuilder { } @Override - public Map> getReplicationLoadSourceMap(){ - Map> sourcesMap = new HashMap<>(); - for(ReplicationLoadSource loadSource : sources){ - sourcesMap.computeIfAbsent(loadSource.getPeerID(), - peerId -> new ArrayList<>()).add(loadSource); + public Map> getReplicationLoadSourceMap() { + Map> sourcesMap = new HashMap<>(); + for (ReplicationLoadSource loadSource : sources) { + sourcesMap.computeIfAbsent(loadSource.getPeerID(), peerId -> new ArrayList<>()) + .add(loadSource); } return sourcesMap; } @@ -401,8 +381,8 @@ public final class ServerMetricsBuilder { storeFileCount += r.getStoreFileCount(); storeRefCount += r.getStoreRefCount(); int currentMaxCompactedStoreFileRefCount = r.getMaxCompactedStoreFileRefCount(); - maxCompactedStoreFileRefCount = Math.max(maxCompactedStoreFileRefCount, - currentMaxCompactedStoreFileRefCount); + maxCompactedStoreFileRefCount = + Math.max(maxCompactedStoreFileRefCount, currentMaxCompactedStoreFileRefCount); uncompressedStoreFileSizeMB += r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); storeFileSizeMB += r.getStoreFileSize().get(Size.Unit.MEGABYTE); memStoreSizeMB += r.getMemStoreSize().get(Size.Unit.MEGABYTE); @@ -416,21 +396,20 @@ public final class ServerMetricsBuilder { compactingCellCount += r.getCompactingCellCount(); } StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "requestsPerSecond", - Double.valueOf(getRequestCountPerSecond())); + Double.valueOf(getRequestCountPerSecond())); Strings.appendKeyValue(sb, "numberOfOnlineRegions", - Integer.valueOf(getRegionMetrics().size())); + Integer.valueOf(getRegionMetrics().size())); Strings.appendKeyValue(sb, "usedHeapMB", getUsedHeapSize()); Strings.appendKeyValue(sb, "maxHeapMB", getMaxHeapSize()); Strings.appendKeyValue(sb, "numberOfStores", storeCount); Strings.appendKeyValue(sb, "numberOfStorefiles", storeFileCount); Strings.appendKeyValue(sb, "storeRefCount", storeRefCount); - Strings.appendKeyValue(sb, "maxCompactedStoreFileRefCount", - maxCompactedStoreFileRefCount); + Strings.appendKeyValue(sb, "maxCompactedStoreFileRefCount", maxCompactedStoreFileRefCount); Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", uncompressedStoreFileSizeMB); Strings.appendKeyValue(sb, "storefileSizeMB", storeFileSizeMB); if (uncompressedStoreFileSizeMB != 0) { - Strings.appendKeyValue(sb, "compressionRatio", String.format("%.4f", - (float) storeFileSizeMB / (float) uncompressedStoreFileSizeMB)); + Strings.appendKeyValue(sb, "compressionRatio", + String.format("%.4f", (float) storeFileSizeMB / (float) uncompressedStoreFileSizeMB)); } Strings.appendKeyValue(sb, "memstoreSizeMB", memStoreSizeMB); Strings.appendKeyValue(sb, "readRequestsCount", readRequestsCount); @@ -443,8 +422,7 @@ public final class ServerMetricsBuilder { Strings.appendKeyValue(sb, "currentCompactedKVs", compactedCellCount); float compactionProgressPct = Float.NaN; if (compactingCellCount > 0) { - compactionProgressPct = - Float.valueOf((float) compactedCellCount / compactingCellCount); + compactionProgressPct = Float.valueOf((float) compactedCellCount / compactingCellCount); } Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); Strings.appendKeyValue(sb, "coprocessors", getCoprocessorNames()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTask.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTask.java index e791093e43d..cd6d41169bb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTask.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTask.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTaskBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTaskBuilder.java index d4937373789..3ecd0c16cd9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTaskBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTaskBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,7 +33,8 @@ public final class ServerTaskBuilder { private long startTime; private long completionTime; - private ServerTaskBuilder() { } + private ServerTaskBuilder() { + } private static final class ServerTaskImpl implements ServerTask { @@ -44,7 +45,7 @@ public final class ServerTaskBuilder { private final long completionTime; private ServerTaskImpl(final String description, final String status, - final ServerTask.State state, final long startTime, final long completionTime) { + final ServerTask.State state, final long startTime, final long completionTime) { this.description = description; this.status = status; this.state = state; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java index 0e7716a0a61..c248849e363 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java @@ -1,5 +1,4 @@ -/** - * Copyright The Apache Software Foundation +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,14 +6,15 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.math.BigDecimal; @@ -24,8 +24,8 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * It is used to represent the size with different units. - * This class doesn't serve for the precise computation. + * It is used to represent the size with different units. This class doesn't serve for the precise + * computation. */ @InterfaceAudience.Public public final class Size implements Comparable { @@ -40,6 +40,7 @@ public final class Size implements Comparable { MEGABYTE(97, "MB"), KILOBYTE(96, "KB"), BYTE(95, "B"); + private final int orderOfSize; private final String simpleName; @@ -91,7 +92,6 @@ public final class Size implements Comparable { /** * get the value which is converted to specified unit. - * * @param unit size unit * @return the converted value */ @@ -146,7 +146,7 @@ public final class Size implements Comparable { return true; } if (obj instanceof Size) { - return compareTo((Size)obj) == 0; + return compareTo((Size) obj) == 0; } return false; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java index 9d67a37695c..ae6721813a8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java index a113f7c67bf..98e958aa65b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ public class TableInfoMissingException extends HBaseIOException { } /** - * @param message the message for this exception + * @param message the message for this exception * @param throwable the {@link Throwable} to use for this exception */ public TableInfoMissingException(String message, Throwable throwable) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java index 7e5046538ab..54f44405c58 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java index 90c015674ca..14720811ca1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,6 +26,7 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Public public class TableNotEnabledException extends DoNotRetryIOException { private static final long serialVersionUID = 262144L; + /** default constructor */ public TableNotEnabledException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java index ae114fed0e6..416d8601fc3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java index 850cd960062..dfe5f682f38 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +29,6 @@ public class UnknownRegionException extends DoNotRetryRegionException { /** * Constructs a new UnknownRegionException with the specified detail message. - * * @param message the detail message */ public UnknownRegionException(String message) { @@ -39,9 +37,8 @@ public class UnknownRegionException extends DoNotRetryRegionException { /** * Constructs a new UnknownRegionException with the specified detail message and cause. - * * @param message the detail message - * @param cause the cause of the exception + * @param cause the cause of the exception */ public UnknownRegionException(String message, Throwable cause) { super(message, cause); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java index 14afb977b5d..fec8e57bee2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +20,9 @@ package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown if a region server is passed an unknown scanner ID. - * This usually means that the client has taken too long between checkins and so the - * scanner lease on the server-side has expired OR the server-side is closing - * down and has cancelled all leases. + * Thrown if a region server is passed an unknown scanner ID. This usually means that the client has + * taken too long between checkins and so the scanner lease on the server-side has expired OR the + * server-side is closing down and has cancelled all leases. */ @InterfaceAudience.Public public class UnknownScannerException extends DoNotRetryIOException { @@ -42,7 +40,7 @@ public class UnknownScannerException extends DoNotRetryIOException { } /** - * @param message the message for this exception + * @param message the message for this exception * @param exception the exception to grab data from */ public UnknownScannerException(String message, Exception exception) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java index 6c2ba07cc3d..2710aa9be27 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,18 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Map; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Encapsulates per-user load metrics. - */ + * Encapsulates per-user load metrics. + */ @InterfaceAudience.Public @InterfaceStability.Evolving public interface UserMetrics { @@ -60,8 +56,8 @@ public interface UserMetrics { long getWriteRequestCount(); /** - * @return the number of write requests and read requests and coprocessor - * service requests made by the user + * @return the number of write requests and read requests and coprocessor service requests made by + * the user */ default long getRequestCount() { return getReadRequestCount() + getWriteRequestCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java index 70d28883c26..ab63f19fec8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.hbase.util.Strings; import org.apache.yetus.audience.InterfaceAudience; @@ -35,23 +31,24 @@ public final class UserMetricsBuilder { public static UserMetrics toUserMetrics(ClusterStatusProtos.UserLoad userLoad) { UserMetricsBuilder builder = UserMetricsBuilder.newBuilder(userLoad.getUserName().getBytes()); - userLoad.getClientMetricsList().stream().map( - clientMetrics -> new ClientMetricsImpl(clientMetrics.getHostName(), - clientMetrics.getReadRequestsCount(), clientMetrics.getWriteRequestsCount(), - clientMetrics.getFilteredRequestsCount())).forEach(builder::addClientMetris); + userLoad.getClientMetricsList().stream() + .map(clientMetrics -> new ClientMetricsImpl(clientMetrics.getHostName(), + clientMetrics.getReadRequestsCount(), clientMetrics.getWriteRequestsCount(), + clientMetrics.getFilteredRequestsCount())) + .forEach(builder::addClientMetris); return builder.build(); } public static ClusterStatusProtos.UserLoad toUserMetrics(UserMetrics userMetrics) { ClusterStatusProtos.UserLoad.Builder builder = - ClusterStatusProtos.UserLoad.newBuilder().setUserName(userMetrics.getNameAsString()); - userMetrics.getClientMetrics().values().stream().map( - clientMetrics -> ClusterStatusProtos.ClientMetrics.newBuilder() - .setHostName(clientMetrics.getHostName()) - .setWriteRequestsCount(clientMetrics.getWriteRequestsCount()) - .setReadRequestsCount(clientMetrics.getReadRequestsCount()) - .setFilteredRequestsCount(clientMetrics.getFilteredReadRequestsCount()).build()) - .forEach(builder::addClientMetrics); + ClusterStatusProtos.UserLoad.newBuilder().setUserName(userMetrics.getNameAsString()); + userMetrics.getClientMetrics().values().stream() + .map(clientMetrics -> ClusterStatusProtos.ClientMetrics.newBuilder() + .setHostName(clientMetrics.getHostName()) + .setWriteRequestsCount(clientMetrics.getWriteRequestsCount()) + .setReadRequestsCount(clientMetrics.getReadRequestsCount()) + .setFilteredRequestsCount(clientMetrics.getFilteredReadRequestsCount()).build()) + .forEach(builder::addClientMetrics); return builder.build(); } @@ -59,9 +56,9 @@ public final class UserMetricsBuilder { return new UserMetricsBuilder(name); } - private final byte[] name; private Map clientMetricsMap = new HashMap<>(); + private UserMetricsBuilder(byte[] name) { this.name = name; } @@ -82,26 +79,30 @@ public final class UserMetricsBuilder { private final long writeRequestCount; public ClientMetricsImpl(String hostName, long readRequest, long writeRequest, - long filteredReadRequestsCount) { + long filteredReadRequestsCount) { this.hostName = hostName; this.readRequestCount = readRequest; this.writeRequestCount = writeRequest; this.filteredReadRequestsCount = filteredReadRequestsCount; } - @Override public String getHostName() { + @Override + public String getHostName() { return hostName; } - @Override public long getReadRequestsCount() { + @Override + public long getReadRequestsCount() { return readRequestCount; } - @Override public long getWriteRequestsCount() { + @Override + public long getWriteRequestsCount() { return writeRequestCount; } - @Override public long getFilteredReadRequestsCount() { + @Override + public long getFilteredReadRequestsCount() { return filteredReadRequestsCount; } } @@ -115,33 +116,38 @@ public final class UserMetricsBuilder { this.clientMetricsMap = clientMetricsMap; } - @Override public byte[] getUserName() { + @Override + public byte[] getUserName() { return name; } - @Override public long getReadRequestCount() { - return clientMetricsMap.values().stream().map(c -> c.getReadRequestsCount()) - .reduce(0L, Long::sum); + @Override + public long getReadRequestCount() { + return clientMetricsMap.values().stream().map(c -> c.getReadRequestsCount()).reduce(0L, + Long::sum); } - @Override public long getWriteRequestCount() { - return clientMetricsMap.values().stream().map(c -> c.getWriteRequestsCount()) - .reduce(0L, Long::sum); + @Override + public long getWriteRequestCount() { + return clientMetricsMap.values().stream().map(c -> c.getWriteRequestsCount()).reduce(0L, + Long::sum); } - @Override public Map getClientMetrics() { + @Override + public Map getClientMetrics() { return this.clientMetricsMap; } - @Override public long getFilteredReadRequests() { + @Override + public long getFilteredReadRequests() { return clientMetricsMap.values().stream().map(c -> c.getFilteredReadRequestsCount()) - .reduce(0L, Long::sum); + .reduce(0L, Long::sum); } @Override public String toString() { - StringBuilder sb = Strings - .appendKeyValue(new StringBuilder(), "readRequestCount", this.getReadRequestCount()); + StringBuilder sb = + Strings.appendKeyValue(new StringBuilder(), "readRequestCount", this.getReadRequestCount()); Strings.appendKeyValue(sb, "writeRequestCount", this.getWriteRequestCount()); Strings.appendKeyValue(sb, "filteredReadRequestCount", this.getFilteredReadRequests()); return sb.toString(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java index 4dc44b4c3c6..f361d43f61d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -42,8 +40,7 @@ public class ZooKeeperConnectionException extends IOException { /** * Constructor taking another exception. - * - * @param message the message for this exception + * @param message the message for this exception * @param exception the exception to grab data from */ public ZooKeeperConnectionException(String message, Exception exception) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java index 92b04643625..48cec12f43c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.yetus.audience.InterfaceAudience; /** * Helper class for custom client scanners. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java index 9e33a12af6b..bb44defbac6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,10 +27,11 @@ abstract class AbstractResponse { public enum ResponseType { - SINGLE (0), - MULTI (1); + SINGLE(0), + MULTI(1); - ResponseType(int value) {} + ResponseType(int value) { + } } public abstract ResponseType type(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java index 54138d30516..bc1febe3803 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -222,7 +222,7 @@ abstract class AbstractRpcBasedConnectionRegistry implements ConnectionRegistry } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") Set getParsedServers() { return addr2Stub.keySet(); } @@ -244,8 +244,7 @@ abstract class AbstractRpcBasedConnectionRegistry implements ConnectionRegistry . call( (c, s, d) -> s.getMetaRegionLocations(c, GetMetaRegionLocationsRequest.getDefaultInstance(), d), - r -> r.getMetaLocationsCount() != 0, - "getMetaLocationsCount") + r -> r.getMetaLocationsCount() != 0, "getMetaLocationsCount") .thenApply(AbstractRpcBasedConnectionRegistry::transformMetaRegionLocations), getClass().getSimpleName() + ".getMetaRegionLocations"); } @@ -265,7 +264,7 @@ abstract class AbstractRpcBasedConnectionRegistry implements ConnectionRegistry public CompletableFuture getActiveMaster() { return tracedFuture( () -> this - .call( + . call( (c, s, d) -> s.getActiveMaster(c, GetActiveMasterRequest.getDefaultInstance(), d), GetActiveMasterResponse::hasServerName, "getActiveMaster()") .thenApply(resp -> ProtobufUtil.toServerName(resp.getServerName())), diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java index 4496a9e9855..6d57764491b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,9 +21,9 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.yetus.audience.InterfaceAudience; /** - * A Get, Put, Increment, Append, or Delete associated with it's region. Used internally by - * {@link Table#batch} to associate the action with it's region and maintain - * the index from the original request. + * A Get, Put, Increment, Append, or Delete associated with it's region. Used internally by + * {@link Table#batch} to associate the action with it's region and maintain the index from the + * original request. */ @InterfaceAudience.Private public class Action implements Comparable { @@ -46,7 +45,7 @@ public class Action implements Comparable { /** * Creates an action for a particular replica from original action. - * @param action Original action. + * @param action Original action. * @param replicaId Replica id for the new action. */ public Action(Action action, int replicaId) { @@ -76,7 +75,9 @@ public class Action implements Comparable { return replicaId; } - public int getPriority() { return priority; } + public int getPriority() { + return priority; + } @Override public int compareTo(Action other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 0be8173dcdd..8279a8efa2f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,11 +72,11 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; /** - * The administrative API for HBase. Obtain an instance from {@link Connection#getAdmin()} and - * call {@link #close()} when done. - *

    Admin can be used to create, drop, list, enable and disable and otherwise modify tables, - * as well as perform other administrative operations. - * + * The administrative API for HBase. Obtain an instance from {@link Connection#getAdmin()} and call + * {@link #close()} when done. + *

    + * Admin can be used to create, drop, list, enable and disable and otherwise modify tables, as well + * as perform other administrative operations. * @see ConnectionFactory * @see Connection * @see Table @@ -126,11 +126,10 @@ public interface Admin extends Abortable, Closeable { /** * List all the userspace tables. - * * @return an array of read-only HTableDescriptors * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * Use {@link #listTableDescriptors()}. + * @deprecated since 2.0 version and will be removed in 3.0 version. Use + * {@link #listTableDescriptors()}. * @see #listTableDescriptors() */ @Deprecated @@ -138,7 +137,6 @@ public interface Admin extends Abortable, Closeable { /** * List all the userspace tables. - * * @return a list of TableDescriptors * @throws IOException if a remote or network exception occurs */ @@ -146,13 +144,12 @@ public interface Admin extends Abortable, Closeable { /** * List all the userspace tables that match the given pattern. - * * @param pattern The compiled regular expression to match against * @return an array of read-only HTableDescriptors * @throws IOException if a remote or network exception occurs * @see #listTables() - * @deprecated since 2.0 version and will be removed in 3.0 version. - * Use {@link #listTableDescriptors(java.util.regex.Pattern)}. + * @deprecated since 2.0 version and will be removed in 3.0 version. Use + * {@link #listTableDescriptors(java.util.regex.Pattern)}. * @see #listTableDescriptors(Pattern) */ @Deprecated @@ -160,7 +157,6 @@ public interface Admin extends Abortable, Closeable { /** * List all the userspace tables that match the given pattern. - * * @param pattern The compiled regular expression to match against * @return a list of TableDescriptors * @throws IOException if a remote or network exception occurs @@ -172,7 +168,6 @@ public interface Admin extends Abortable, Closeable { /** * List all the userspace tables matching the given regular expression. - * * @param regex The regular expression to match against * @return a list of read-only HTableDescriptors * @throws IOException if a remote or network exception occurs @@ -185,50 +180,44 @@ public interface Admin extends Abortable, Closeable { /** * List all the tables matching the given pattern. - * - * @param pattern The compiled regular expression to match against + * @param pattern The compiled regular expression to match against * @param includeSysTables false to match only against userspace tables * @return an array of read-only HTableDescriptors * @throws IOException if a remote or network exception occurs * @see #listTables() - * @deprecated since 2.0 version and will be removed in 3.0 version. - * Use {@link #listTableDescriptors(java.util.regex.Pattern, boolean)}. + * @deprecated since 2.0 version and will be removed in 3.0 version. Use + * {@link #listTableDescriptors(java.util.regex.Pattern, boolean)}. * @see #listTableDescriptors(java.util.regex.Pattern, boolean) */ @Deprecated - HTableDescriptor[] listTables(Pattern pattern, boolean includeSysTables) - throws IOException; + HTableDescriptor[] listTables(Pattern pattern, boolean includeSysTables) throws IOException; /** * List all the tables matching the given pattern. - * - * @param pattern The compiled regular expression to match against + * @param pattern The compiled regular expression to match against * @param includeSysTables false to match only against userspace tables * @return a list of TableDescriptors * @throws IOException if a remote or network exception occurs * @see #listTables() */ List listTableDescriptors(Pattern pattern, boolean includeSysTables) - throws IOException; + throws IOException; /** * List all the tables matching the given pattern. - * - * @param regex The regular expression to match against + * @param regex The regular expression to match against * @param includeSysTables false to match only against userspace tables * @return an array of read-only HTableDescriptors * @throws IOException if a remote or network exception occurs * @see #listTables(java.util.regex.Pattern, boolean) - * @deprecated since 2.0 version and will be removed in 3.0 version. - * Use {@link #listTableDescriptors(Pattern, boolean)}. + * @deprecated since 2.0 version and will be removed in 3.0 version. Use + * {@link #listTableDescriptors(Pattern, boolean)}. */ @Deprecated - HTableDescriptor[] listTables(String regex, boolean includeSysTables) - throws IOException; + HTableDescriptor[] listTables(String regex, boolean includeSysTables) throws IOException; /** * List all of the names of userspace tables. - * * @return TableName[] table names * @throws IOException if a remote or network exception occurs */ @@ -257,17 +246,16 @@ public interface Admin extends Abortable, Closeable { /** * List all of the names of userspace tables. - * @param pattern The regular expression to match against + * @param pattern The regular expression to match against * @param includeSysTables false to match only against userspace tables * @return TableName[] table names * @throws IOException if a remote or network exception occurs */ - TableName[] listTableNames(Pattern pattern, boolean includeSysTables) - throws IOException; + TableName[] listTableNames(Pattern pattern, boolean includeSysTables) throws IOException; /** * List all of the names of userspace tables. - * @param regex The regular expression to match against + * @param regex The regular expression to match against * @param includeSysTables false to match only against userspace tables * @return TableName[] table names * @throws IOException if a remote or network exception occurs @@ -275,81 +263,87 @@ public interface Admin extends Abortable, Closeable { * {@link #listTableNames(Pattern, boolean)} instead. */ @Deprecated - TableName[] listTableNames(String regex, boolean includeSysTables) - throws IOException; + TableName[] listTableNames(String regex, boolean includeSysTables) throws IOException; /** * Get a table descriptor. - * * @param tableName as a {@link TableName} * @return the read-only tableDescriptor * @throws org.apache.hadoop.hbase.TableNotFoundException - * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * Use {@link #getDescriptor(TableName)}. + * @throws IOException if a remote or network exception occurs + * @deprecated since 2.0 version and will be removed in 3.0 version. Use + * {@link #getDescriptor(TableName)}. */ @Deprecated HTableDescriptor getTableDescriptor(TableName tableName) - throws TableNotFoundException, IOException; + throws TableNotFoundException, IOException; /** * Get a table descriptor. - * * @param tableName as a {@link TableName} * @return the tableDescriptor * @throws org.apache.hadoop.hbase.TableNotFoundException - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network exception occurs */ - TableDescriptor getDescriptor(TableName tableName) - throws TableNotFoundException, IOException; + TableDescriptor getDescriptor(TableName tableName) throws TableNotFoundException, IOException; /** * Creates a new table. Synchronous operation. - * * @param desc table descriptor for table - * @throws IllegalArgumentException if the table name is reserved + * @throws IllegalArgumentException if the table name is reserved * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running - * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent - * threads, the table may have been created between test-for-existence and attempt-at-creation). - * @throws IOException if a remote or network exception occurs + * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If + * concurrent threads, the table may + * have been created between + * test-for-existence and + * attempt-at-creation). + * @throws IOException if a remote or network exception + * occurs */ default void createTable(TableDescriptor desc) throws IOException { get(createTableAsync(desc), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } /** - * Creates a new table with the specified number of regions. The start key specified will become + * Creates a new table with the specified number of regions. The start key specified will become * the end key of the first region of the table, and the end key specified will become the start * key of the last region of the table (the first region has a null start key and the last region * has a null end key). BigInteger math will be used to divide the key range specified into enough * segments to make the required number of total regions. Synchronous operation. - * - * @param desc table descriptor for table - * @param startKey beginning of key range - * @param endKey end of key range + * @param desc table descriptor for table + * @param startKey beginning of key range + * @param endKey end of key range * @param numRegions the total number of regions to create - * @throws IllegalArgumentException if the table name is reserved - * @throws IOException if a remote or network exception occurs + * @throws IllegalArgumentException if the table name is reserved + * @throws IOException if a remote or network exception + * occurs * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running - * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent - * threads, the table may have been created between test-for-existence and attempt-at-creation). + * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If + * concurrent threads, the table may + * have been created between + * test-for-existence and + * attempt-at-creation). */ void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) - throws IOException; + throws IOException; /** * Creates a new table with an initial set of empty regions defined by the specified split keys. * The total number of regions created will be the number of split keys plus one. Synchronous * operation. Note : Avoid passing empty split key. - * - * @param desc table descriptor for table + * @param desc table descriptor for table * @param splitKeys array of split keys for the initial regions of the table - * @throws IllegalArgumentException if the table name is reserved, if the split keys are repeated - * and if the split key has empty byte array. + * @throws IllegalArgumentException if the table name is reserved, if the + * split keys are repeated and if the + * split key has empty byte array. * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running - * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent - * threads, the table may have been created between test-for-existence and attempt-at-creation). - * @throws IOException if a remote or network exception occurs + * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If + * concurrent threads, the table may + * have been created between + * test-for-existence and + * attempt-at-creation). + * @throws IOException if a remote or network exception + * occurs */ default void createTable(TableDescriptor desc, byte[][] splitKeys) throws IOException { get(createTableAsync(desc, splitKeys), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); @@ -378,7 +372,7 @@ public interface Admin extends Abortable, Closeable { *

    * Throws IllegalArgumentException Bad table name, if the split keys are repeated and if the split * key has empty byte array. - * @param desc table descriptor for table + * @param desc table descriptor for table * @param splitKeys keys to check if the table has been created with all split keys * @throws IOException if a remote or network exception occurs * @return the result of the async creation. You can use Future.get(long, TimeUnit) to wait on the @@ -396,62 +390,53 @@ public interface Admin extends Abortable, Closeable { } /** - * Deletes the table but does not block and wait for it to be completely removed. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Deletes the table but does not block and wait for it to be completely removed. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs - * @return the result of the async delete. You can use Future.get(long, TimeUnit) - * to wait on the operation to complete. + * @return the result of the async delete. You can use Future.get(long, TimeUnit) to wait on the + * operation to complete. */ Future deleteTableAsync(TableName tableName) throws IOException; /** * Deletes tables matching the passed in pattern and wait on completion. Warning: Use this method - * carefully, there is no prompting and the effect is immediate. Consider using {@link - * #listTableDescriptors(Pattern)} - * and {@link #deleteTable(org.apache.hadoop.hbase.TableName)} - * + * carefully, there is no prompting and the effect is immediate. Consider using + * {@link #listTableDescriptors(Pattern)} and + * {@link #deleteTable(org.apache.hadoop.hbase.TableName)} * @param regex The regular expression to match table names against - * @return Table descriptors for tables that couldn't be deleted. - * The return htds are read-only + * @return Table descriptors for tables that couldn't be deleted. The return htds are read-only * @throws IOException if a remote or network exception occurs * @see #deleteTables(java.util.regex.Pattern) * @see #deleteTable(org.apache.hadoop.hbase.TableName) - * @deprecated since 2.0 version and will be removed in 3.0 version - * This is just a trivial helper method without any magic. - * Consider using {@link #listTableDescriptors(Pattern)} - * and {@link #deleteTable(TableName)} + * @deprecated since 2.0 version and will be removed in 3.0 version This is just a trivial helper + * method without any magic. Consider using {@link #listTableDescriptors(Pattern)} and + * {@link #deleteTable(TableName)} */ @Deprecated HTableDescriptor[] deleteTables(String regex) throws IOException; /** * Delete tables matching the passed in pattern and wait on completion. Warning: Use this method - * carefully, there is no prompting and the effect is immediate. Consider using {@link - * #listTableDescriptors(java.util.regex.Pattern)} and + * carefully, there is no prompting and the effect is immediate. Consider using + * {@link #listTableDescriptors(java.util.regex.Pattern)} and * {@link #deleteTable(org.apache.hadoop.hbase.TableName)} - * * @param pattern The pattern to match table names against - * @return Table descriptors for tables that couldn't be deleted - * The return htds are read-only + * @return Table descriptors for tables that couldn't be deleted The return htds are read-only * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version - * This is just a trivial helper method without any magic. - * Consider using {@link #listTableDescriptors(java.util.regex.Pattern)} - * and {@link #deleteTable(TableName)} + * @deprecated since 2.0 version and will be removed in 3.0 version This is just a trivial helper + * method without any magic. Consider using + * {@link #listTableDescriptors(java.util.regex.Pattern)} and + * {@link #deleteTable(TableName)} */ @Deprecated HTableDescriptor[] deleteTables(Pattern pattern) throws IOException; /** - * Truncate a table. - * Synchronous operation. - * - * @param tableName name of table to truncate + * Truncate a table. Synchronous operation. + * @param tableName name of table to truncate * @param preserveSplits true if the splits should be preserved * @throws IOException if a remote or network exception occurs */ @@ -464,14 +449,13 @@ public interface Admin extends Abortable, Closeable { * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw * ExecutionException if there was an error while executing the operation or TimeoutException in * case the wait timeout was not long enough to allow the operation to complete. - * @param tableName name of table to delete + * @param tableName name of table to delete * @param preserveSplits true if the splits should be preserved * @throws IOException if a remote or network exception occurs * @return the result of the async truncate. You can use Future.get(long, TimeUnit) to wait on the * operation to complete. */ - Future truncateTableAsync(TableName tableName, boolean preserveSplits) - throws IOException; + Future truncateTableAsync(TableName tableName, boolean preserveSplits) throws IOException; /** * Enable a table. May timeout. Use {@link #enableTableAsync(org.apache.hadoop.hbase.TableName)} @@ -479,8 +463,8 @@ public interface Admin extends Abortable, Closeable { * disabled state for it to be enabled. * @param tableName name of the table * @throws IOException if a remote or network exception occurs There could be couple types of - * IOException TableNotFoundException means the table doesn't exist. - * TableNotDisabledException means the table isn't in disabled state. + * IOException TableNotFoundException means the table doesn't exist. + * TableNotDisabledException means the table isn't in disabled state. * @see #isTableEnabled(org.apache.hadoop.hbase.TableName) * @see #disableTable(org.apache.hadoop.hbase.TableName) * @see #enableTableAsync(org.apache.hadoop.hbase.TableName) @@ -490,67 +474,59 @@ public interface Admin extends Abortable, Closeable { } /** - * Enable the table but does not block and wait for it to be completely enabled. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Enable the table but does not block and wait for it to be completely enabled. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs - * @return the result of the async enable. You can use Future.get(long, TimeUnit) - * to wait on the operation to complete. + * @return the result of the async enable. You can use Future.get(long, TimeUnit) to wait on the + * operation to complete. */ Future enableTableAsync(TableName tableName) throws IOException; /** * Enable tables matching the passed in pattern and wait on completion. Warning: Use this method - * carefully, there is no prompting and the effect is immediate. Consider using {@link - * #listTableDescriptors(Pattern)} and {@link #enableTable(org.apache.hadoop.hbase.TableName)} - * + * carefully, there is no prompting and the effect is immediate. Consider using + * {@link #listTableDescriptors(Pattern)} and + * {@link #enableTable(org.apache.hadoop.hbase.TableName)} * @param regex The regular expression to match table names against * @throws IOException if a remote or network exception occurs - * @return Table descriptors for tables that couldn't be enabled. - * The return HTDs are read-only. + * @return Table descriptors for tables that couldn't be enabled. The return HTDs are read-only. * @see #enableTables(java.util.regex.Pattern) * @see #enableTable(org.apache.hadoop.hbase.TableName) - * @deprecated since 2.0 version and will be removed in 3.0 version - * This is just a trivial helper method without any magic. - * Consider using {@link #listTableDescriptors(Pattern)} - * and {@link #enableTable(org.apache.hadoop.hbase.TableName)} + * @deprecated since 2.0 version and will be removed in 3.0 version This is just a trivial helper + * method without any magic. Consider using {@link #listTableDescriptors(Pattern)} and + * {@link #enableTable(org.apache.hadoop.hbase.TableName)} */ @Deprecated HTableDescriptor[] enableTables(String regex) throws IOException; /** * Enable tables matching the passed in pattern and wait on completion. Warning: Use this method - * carefully, there is no prompting and the effect is immediate. Consider using {@link - * #listTableDescriptors(java.util.regex.Pattern)} and + * carefully, there is no prompting and the effect is immediate. Consider using + * {@link #listTableDescriptors(java.util.regex.Pattern)} and * {@link #enableTable(org.apache.hadoop.hbase.TableName)} - * * @param pattern The pattern to match table names against * @throws IOException if a remote or network exception occurs - * @return Table descriptors for tables that couldn't be enabled. - * The return HTDs are read-only. - * @deprecated since 2.0 version and will be removed in 3.0 version - * This is just a trivial helper method without any magic. - * Consider using {@link #listTableDescriptors(java.util.regex.Pattern)} - * and {@link #enableTable(org.apache.hadoop.hbase.TableName)} + * @return Table descriptors for tables that couldn't be enabled. The return HTDs are read-only. + * @deprecated since 2.0 version and will be removed in 3.0 version This is just a trivial helper + * method without any magic. Consider using + * {@link #listTableDescriptors(java.util.regex.Pattern)} and + * {@link #enableTable(org.apache.hadoop.hbase.TableName)} */ @Deprecated HTableDescriptor[] enableTables(Pattern pattern) throws IOException; /** - * Disable the table but does not block and wait for it to be completely disabled. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Disable the table but does not block and wait for it to be completely disabled. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs - * @return the result of the async disable. You can use Future.get(long, TimeUnit) - * to wait on the operation to complete. + * @return the result of the async disable. You can use Future.get(long, TimeUnit) to wait on the + * operation to complete. */ Future disableTableAsync(TableName tableName) throws IOException; @@ -558,10 +534,9 @@ public interface Admin extends Abortable, Closeable { * Disable table and wait on completion. May timeout eventually. Use * {@link #disableTableAsync(org.apache.hadoop.hbase.TableName)} and * {@link #isTableDisabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in - * enabled state for it to be disabled. - * @param tableName - * @throws IOException There could be couple types of IOException TableNotFoundException means the - * table doesn't exist. TableNotEnabledException means the table isn't in enabled state. + * enabled state for it to be disabled. n * @throws IOException There could be couple types of + * IOException TableNotFoundException means the table doesn't exist. TableNotEnabledException + * means the table isn't in enabled state. */ default void disableTable(TableName tableName) throws IOException { get(disableTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); @@ -569,37 +544,33 @@ public interface Admin extends Abortable, Closeable { /** * Disable tables matching the passed in pattern and wait on completion. Warning: Use this method - * carefully, there is no prompting and the effect is immediate. Consider using {@link - * #listTableDescriptors(Pattern)} and {@link #disableTable(org.apache.hadoop.hbase.TableName)} - * + * carefully, there is no prompting and the effect is immediate. Consider using + * {@link #listTableDescriptors(Pattern)} and + * {@link #disableTable(org.apache.hadoop.hbase.TableName)} * @param regex The regular expression to match table names against - * @return Table descriptors for tables that couldn't be disabled - * The return htds are read-only + * @return Table descriptors for tables that couldn't be disabled The return htds are read-only * @throws IOException if a remote or network exception occurs * @see #disableTables(java.util.regex.Pattern) * @see #disableTable(org.apache.hadoop.hbase.TableName) - * @deprecated since 2.0 version and will be removed in 3.0 version - * This is just a trivial helper method without any magic. - * Consider using {@link #listTableDescriptors(Pattern)} - * and {@link #disableTable(org.apache.hadoop.hbase.TableName)} + * @deprecated since 2.0 version and will be removed in 3.0 version This is just a trivial helper + * method without any magic. Consider using {@link #listTableDescriptors(Pattern)} and + * {@link #disableTable(org.apache.hadoop.hbase.TableName)} */ @Deprecated HTableDescriptor[] disableTables(String regex) throws IOException; /** * Disable tables matching the passed in pattern and wait on completion. Warning: Use this method - * carefully, there is no prompting and the effect is immediate. Consider using {@link - * #listTableDescriptors(java.util.regex.Pattern)} and + * carefully, there is no prompting and the effect is immediate. Consider using + * {@link #listTableDescriptors(java.util.regex.Pattern)} and * {@link #disableTable(org.apache.hadoop.hbase.TableName)} - * * @param pattern The pattern to match table names against - * @return Table descriptors for tables that couldn't be disabled - * The return htds are read-only + * @return Table descriptors for tables that couldn't be disabled The return htds are read-only * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version - * This is just a trivial helper method without any magic. - * Consider using {@link #listTableDescriptors(java.util.regex.Pattern)} - * and {@link #disableTable(org.apache.hadoop.hbase.TableName)} + * @deprecated since 2.0 version and will be removed in 3.0 version This is just a trivial helper + * method without any magic. Consider using + * {@link #listTableDescriptors(java.util.regex.Pattern)} and + * {@link #disableTable(org.apache.hadoop.hbase.TableName)} */ @Deprecated HTableDescriptor[] disableTables(Pattern pattern) throws IOException; @@ -629,7 +600,6 @@ public interface Admin extends Abortable, Closeable { * Use this api to check if the table has been created with the specified number of splitkeys * which was used while creating the given table. Note : If this api is used after a table's * region gets splitted, the api may return false. - * * @param tableName name of table to check * @param splitKeys keys to check if the table has been created with all split keys * @throws IOException if a remote or network excpetion occurs @@ -641,13 +611,12 @@ public interface Admin extends Abortable, Closeable { /** * Get the status of an alter (a.k.a modify) command - indicates how * many regions have received the updated schema Asynchronous operation. - * * @param tableName TableName instance * @return Pair indicating the number of regions updated Pair.getFirst() is the regions that are - * yet to be updated Pair.getSecond() is the total number of regions of the table + * yet to be updated Pair.getSecond() is the total number of regions of the table * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.0.0. Will be removed in 3.0.0. No longer needed now you get a Future - * on an operation. + * @deprecated Since 2.0.0. Will be removed in 3.0.0. No longer needed now you get a Future on an + * operation. */ @Deprecated Pair getAlterStatus(TableName tableName) throws IOException; @@ -655,28 +624,25 @@ public interface Admin extends Abortable, Closeable { /** * Get the status of alter (a.k.a modify) command - indicates how many * regions have received the updated schema Asynchronous operation. - * * @param tableName name of the table to get the status of * @return Pair indicating the number of regions updated Pair.getFirst() is the regions that are - * yet to be updated Pair.getSecond() is the total number of regions of the table + * yet to be updated Pair.getSecond() is the total number of regions of the table * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.0.0. Will be removed in 3.0.0. No longer needed now you get a Future - * on an operation. + * @deprecated Since 2.0.0. Will be removed in 3.0.0. No longer needed now you get a Future on an + * operation. */ @Deprecated Pair getAlterStatus(byte[] tableName) throws IOException; /** - * Add a column family to an existing table. Synchronous operation. - * Use {@link #addColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it - * returns a {@link Future} from which you can learn whether success or failure. - * - * @param tableName name of the table to add column family to + * Add a column family to an existing table. Synchronous operation. Use + * {@link #addColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it returns a + * {@link Future} from which you can learn whether success or failure. + * @param tableName name of the table to add column family to * @param columnFamily column family descriptor of column family to be added * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0. - * This will be removed in HBase 3.0.0. - * Use {@link #addColumnFamily(TableName, ColumnFamilyDescriptor)}. + * @deprecated As of release 2.0.0. This will be removed in HBase 3.0.0. Use + * {@link #addColumnFamily(TableName, ColumnFamilyDescriptor)}. */ @Deprecated default void addColumn(TableName tableName, ColumnFamilyDescriptor columnFamily) @@ -685,55 +651,50 @@ public interface Admin extends Abortable, Closeable { } /** - * Add a column family to an existing table. Synchronous operation. - * Use {@link #addColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it - * returns a {@link Future} from which you can learn whether success or failure. - * - * @param tableName name of the table to add column family to + * Add a column family to an existing table. Synchronous operation. Use + * {@link #addColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it returns a + * {@link Future} from which you can learn whether success or failure. + * @param tableName name of the table to add column family to * @param columnFamily column family descriptor of column family to be added * @throws IOException if a remote or network exception occurs */ default void addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily) - throws IOException { + throws IOException { get(addColumnFamilyAsync(tableName, columnFamily), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } /** - * Add a column family to an existing table. Asynchronous operation. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * - * @param tableName name of the table to add column family to + * Add a column family to an existing table. Asynchronous operation. You can use Future.get(long, + * TimeUnit) to wait on the operation to complete. It may throw ExecutionException if there was an + * error while executing the operation or TimeoutException in case the wait timeout was not long + * enough to allow the operation to complete. + * @param tableName name of the table to add column family to * @param columnFamily column family descriptor of column family to be added * @throws IOException if a remote or network exception occurs * @return the result of the async add column family. You can use Future.get(long, TimeUnit) to * wait on the operation to complete. */ Future addColumnFamilyAsync(TableName tableName, ColumnFamilyDescriptor columnFamily) - throws IOException; + throws IOException; /** - * Delete a column family from a table. Synchronous operation. - * Use {@link #deleteColumnFamily(TableName, byte[])} instead because it - * returns a {@link Future} from which you can learn whether success or failure. - * - * @param tableName name of table + * Delete a column family from a table. Synchronous operation. Use + * {@link #deleteColumnFamily(TableName, byte[])} instead because it returns a {@link Future} from + * which you can learn whether success or failure. + * @param tableName name of table * @param columnFamily name of column family to be deleted * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0. - * This will be removed in HBase 3.0.0. - * Use {@link #deleteColumnFamily(TableName, byte[])}}. + * @deprecated As of release 2.0.0. This will be removed in HBase 3.0.0. Use + * {@link #deleteColumnFamily(TableName, byte[])}}. */ @Deprecated void deleteColumn(TableName tableName, byte[] columnFamily) throws IOException; /** - * Delete a column family from a table. Synchronous operation. - * Use {@link #deleteColumnFamily(TableName, byte[])} instead because it - * returns a {@link Future} from which you can learn whether success or failure. - * @param tableName name of table + * Delete a column family from a table. Synchronous operation. Use + * {@link #deleteColumnFamily(TableName, byte[])} instead because it returns a {@link Future} from + * which you can learn whether success or failure. + * @param tableName name of table * @param columnFamily name of column family to be deleted * @throws IOException if a remote or network exception occurs */ @@ -743,73 +704,67 @@ public interface Admin extends Abortable, Closeable { } /** - * Delete a column family from a table. Asynchronous operation. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * - * @param tableName name of table + * Delete a column family from a table. Asynchronous operation. You can use Future.get(long, + * TimeUnit) to wait on the operation to complete. It may throw ExecutionException if there was an + * error while executing the operation or TimeoutException in case the wait timeout was not long + * enough to allow the operation to complete. + * @param tableName name of table * @param columnFamily name of column family to be deleted * @throws IOException if a remote or network exception occurs * @return the result of the async delete column family. You can use Future.get(long, TimeUnit) to * wait on the operation to complete. */ - Future deleteColumnFamilyAsync(TableName tableName, byte[] columnFamily) - throws IOException; + Future deleteColumnFamilyAsync(TableName tableName, byte[] columnFamily) throws IOException; /** * Modify an existing column family on a table. Synchronous operation. Use * {@link #modifyColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it returns * a {@link Future} from which you can learn whether success or failure. - * @param tableName name of table + * @param tableName name of table * @param columnFamily new column family descriptor to use * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0. - * This will be removed in HBase 3.0.0. - * Use {@link #modifyColumnFamily(TableName, ColumnFamilyDescriptor)}. + * @deprecated As of release 2.0.0. This will be removed in HBase 3.0.0. Use + * {@link #modifyColumnFamily(TableName, ColumnFamilyDescriptor)}. */ @Deprecated default void modifyColumn(TableName tableName, ColumnFamilyDescriptor columnFamily) - throws IOException { + throws IOException { modifyColumnFamily(tableName, columnFamily); } /** - * Modify an existing column family on a table. Synchronous operation. - * Use {@link #modifyColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it - * returns a {@link Future} from which you can learn whether success or failure. - * @param tableName name of table + * Modify an existing column family on a table. Synchronous operation. Use + * {@link #modifyColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it returns + * a {@link Future} from which you can learn whether success or failure. + * @param tableName name of table * @param columnFamily new column family descriptor to use * @throws IOException if a remote or network exception occurs */ default void modifyColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily) - throws IOException { + throws IOException { get(modifyColumnFamilyAsync(tableName, columnFamily), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } /** - * Modify an existing column family on a table. Asynchronous operation. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * - * @param tableName name of table + * Modify an existing column family on a table. Asynchronous operation. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. + * @param tableName name of table * @param columnFamily new column family descriptor to use * @throws IOException if a remote or network exception occurs * @return the result of the async modify column family. You can use Future.get(long, TimeUnit) to * wait on the operation to complete. */ Future modifyColumnFamilyAsync(TableName tableName, ColumnFamilyDescriptor columnFamily) - throws IOException; + throws IOException; /** * Change the store file tracker of the given table's given family. * @param tableName the table you want to change - * @param family the family you want to change - * @param dstSFT the destination store file tracker + * @param family the family you want to change + * @param dstSFT the destination store file tracker * @throws IOException if a remote or network exception occurs */ default void modifyColumnFamilyStoreFileTracker(TableName tableName, byte[] family, String dstSFT) @@ -821,8 +776,8 @@ public interface Admin extends Abortable, Closeable { /** * Change the store file tracker of the given table's given family. * @param tableName the table you want to change - * @param family the family you want to change - * @param dstSFT the destination store file tracker + * @param family the family you want to change + * @param dstSFT the destination store file tracker * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the * operation to complete * @throws IOException if a remote or network exception occurs @@ -832,53 +787,50 @@ public interface Admin extends Abortable, Closeable { /** * Uses {@link #unassign(byte[], boolean)} to unassign the region. For expert-admins. - * * @param regionname region name to close * @param serverName Deprecated. Not used. * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #unassign(byte[], boolean)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #unassign(byte[], boolean)}. */ @Deprecated void closeRegion(String regionname, String serverName) throws IOException; /** * Uses {@link #unassign(byte[], boolean)} to unassign the region. For expert-admins. - * * @param regionname region name to close * @param serverName Deprecated. Not used. * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #unassign(byte[], boolean)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #unassign(byte[], boolean)}. */ @Deprecated void closeRegion(byte[] regionname, String serverName) throws IOException; /** * Uses {@link #unassign(byte[], boolean)} to unassign the region. For expert-admins. - * * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name - * suffix: e.g. if regionname is - * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., - * then the encoded region name is: 527db22f95c8a9e0116f0cc13c680396. - * @param serverName Deprecated. Not used. + * suffix: e.g. if regionname is + * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., + * then the encoded region name is: + * 527db22f95c8a9e0116f0cc13c680396. + * @param serverName Deprecated. Not used. * @return Deprecated. Returns true always. * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #unassign(byte[], boolean)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #unassign(byte[], boolean)}. */ @Deprecated boolean closeRegionWithEncodedRegionName(String encodedRegionName, String serverName) - throws IOException; + throws IOException; /** * Used {@link #unassign(byte[], boolean)} to unassign the region. For expert-admins. - * * @param sn Deprecated. Not used. * @throws IOException if a remote or network exception occurs * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * (HBASE-18231). - * Use {@link #unassign(byte[], boolean)}. + * (HBASE-18231). Use + * {@link #unassign(byte[], boolean)}. */ @Deprecated void closeRegion(final ServerName sn, final HRegionInfo hri) throws IOException; @@ -887,15 +839,14 @@ public interface Admin extends Abortable, Closeable { * Get all the online regions on a region server. * @throws IOException if a remote or network exception occurs * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * (HBASE-17980). - * Use {@link #getRegions(ServerName sn)}. + * (HBASE-17980). Use + * {@link #getRegions(ServerName sn)}. */ @Deprecated List getOnlineRegions(ServerName sn) throws IOException; /** * Get all the online regions on a region server. - * * @return List of {@link RegionInfo} * @throws IOException if a remote or network exception occurs */ @@ -903,17 +854,15 @@ public interface Admin extends Abortable, Closeable { /** * Flush a table. Synchronous operation. - * * @param tableName table to flush * @throws IOException if a remote or network exception occurs */ void flush(TableName tableName) throws IOException; /** - * Flush the specified column family stores on all regions of the passed table. - * This runs as a synchronous operation. - * - * @param tableName table to flush + * Flush the specified column family stores on all regions of the passed table. This runs as a + * synchronous operation. + * @param tableName table to flush * @param columnFamily column family within a table * @throws IOException if a remote or network exception occurs */ @@ -921,7 +870,6 @@ public interface Admin extends Abortable, Closeable { /** * Flush an individual region. Synchronous operation. - * * @param regionName region to flush * @throws IOException if a remote or network exception occurs */ @@ -929,8 +877,7 @@ public interface Admin extends Abortable, Closeable { /** * Flush a column family within a region. Synchronous operation. - * - * @param regionName region to flush + * @param regionName region to flush * @param columnFamily column family within a region * @throws IOException if a remote or network exception occurs */ @@ -944,10 +891,8 @@ public interface Admin extends Abortable, Closeable { void flushRegionServer(ServerName serverName) throws IOException; /** - * Compact a table. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Compact a table. Asynchronous operation in that this method requests that a Compaction run and + * then it returns. It does not wait on the completion of Compaction (it can take a while). * @param tableName table to compact * @throws IOException if a remote or network exception occurs */ @@ -955,9 +900,8 @@ public interface Admin extends Abortable, Closeable { /** * Compact an individual region. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Compaction run and then it returns. It does not wait on the completion of Compaction (it can + * take a while). * @param regionName region to compact * @throws IOException if a remote or network exception occurs */ @@ -965,122 +909,103 @@ public interface Admin extends Abortable, Closeable { /** * Compact a column family within a table. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * - * @param tableName table to compact + * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it + * can take a while). + * @param tableName table to compact * @param columnFamily column family within a table * @throws IOException if a remote or network exception occurs */ - void compact(TableName tableName, byte[] columnFamily) - throws IOException; + void compact(TableName tableName, byte[] columnFamily) throws IOException; /** * Compact a column family within a region. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * - * @param regionName region to compact + * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it + * can take a while). + * @param regionName region to compact * @param columnFamily column family within a region * @throws IOException if a remote or network exception occurs */ - void compactRegion(byte[] regionName, byte[] columnFamily) - throws IOException; + void compactRegion(byte[] regionName, byte[] columnFamily) throws IOException; /** - * Compact a table. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * - * @param tableName table to compact + * Compact a table. Asynchronous operation in that this method requests that a Compaction run and + * then it returns. It does not wait on the completion of Compaction (it can take a while). + * @param tableName table to compact * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException + * @throws IOException if a remote or network exception occurs n */ void compact(TableName tableName, CompactType compactType) throws IOException, InterruptedException; /** - * Compact a column family within a table. Asynchronous operation in that this method - * requests that a Compaction run and then it returns. It does not wait on the - * completion of Compaction (it can take a while). - * - * @param tableName table to compact + * Compact a column family within a table. Asynchronous operation in that this method requests + * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it + * can take a while). + * @param tableName table to compact * @param columnFamily column family within a table - * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} - * @throws IOException if not a mob column family or if a remote or network exception occurs - * @throws InterruptedException + * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} + * @throws IOException if not a mob column family or if a remote or network exception occurs n */ void compact(TableName tableName, byte[] columnFamily, CompactType compactType) throws IOException, InterruptedException; /** - * Major compact a table. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Major compact a table. Asynchronous operation in that this method requests that a Compaction + * run and then it returns. It does not wait on the completion of Compaction (it can take a + * while). * @param tableName table to major compact * @throws IOException if a remote or network exception occurs */ void majorCompact(TableName tableName) throws IOException; /** - * Major compact a table or an individual region. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Major compact a table or an individual region. Asynchronous operation in that this method + * requests that a Compaction run and then it returns. It does not wait on the completion of + * Compaction (it can take a while). * @param regionName region to major compact * @throws IOException if a remote or network exception occurs */ void majorCompactRegion(byte[] regionName) throws IOException; /** - * Major compact a column family within a table. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * - * @param tableName table to major compact + * Major compact a column family within a table. Asynchronous operation in that this method + * requests that a Compaction run and then it returns. It does not wait on the completion of + * Compaction (it can take a while). + * @param tableName table to major compact * @param columnFamily column family within a table * @throws IOException if a remote or network exception occurs */ - void majorCompact(TableName tableName, byte[] columnFamily) - throws IOException; + void majorCompact(TableName tableName, byte[] columnFamily) throws IOException; /** - * Major compact a column family within region. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * - * @param regionName egion to major compact + * Major compact a column family within region. Asynchronous operation in that this method + * requests that a Compaction run and then it returns. It does not wait on the completion of + * Compaction (it can take a while). + * @param regionName egion to major compact * @param columnFamily column family within a region * @throws IOException if a remote or network exception occurs */ - void majorCompactRegion(byte[] regionName, byte[] columnFamily) - throws IOException; + void majorCompactRegion(byte[] regionName, byte[] columnFamily) throws IOException; /** - * Major compact a table. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * - * @param tableName table to compact + * Major compact a table. Asynchronous operation in that this method requests that a Compaction + * run and then it returns. It does not wait on the completion of Compaction (it can take a + * while). + * @param tableName table to compact * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException + * @throws IOException if a remote or network exception occurs n */ void majorCompact(TableName tableName, CompactType compactType) throws IOException, InterruptedException; /** - * Major compact a column family within a table. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * - * @param tableName table to compact + * Major compact a column family within a table. Asynchronous operation in that this method + * requests that a Compaction run and then it returns. It does not wait on the completion of + * Compaction (it can take a while). + * @param tableName table to compact * @param columnFamily column family within a table - * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} - * @throws IOException if not a mob column family or if a remote or network exception occurs - * @throws InterruptedException + * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} + * @throws IOException if not a mob column family or if a remote or network exception occurs n */ void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactType) throws IOException, InterruptedException; @@ -1089,17 +1014,16 @@ public interface Admin extends Abortable, Closeable { * Compact all regions on the region server. Asynchronous operation in that this method requests * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it * can take a while). - * @param sn the region server name + * @param sn the region server name * @param major if it's major compaction - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use - * {@link #compactRegionServer(ServerName)} or - * {@link #majorCompactRegionServer(ServerName)}. + * @throws IOException if a remote or network exception occurs n * @deprecated As of release + * 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #compactRegionServer(ServerName)} or + * {@link #majorCompactRegionServer(ServerName)}. */ @Deprecated - default void compactRegionServer(ServerName sn, boolean major) throws IOException, - InterruptedException { + default void compactRegionServer(ServerName sn, boolean major) + throws IOException, InterruptedException { if (major) { majorCompactRegionServer(sn); } else { @@ -1109,16 +1033,15 @@ public interface Admin extends Abortable, Closeable { /** * Turn the compaction on or off. Disabling compactions will also interrupt any currently ongoing - * compactions. This state is ephemeral. The setting will be lost on restart. Compaction - * can also be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled - * in hbase-site.xml. - * + * compactions. This state is ephemeral. The setting will be lost on restart. Compaction can also + * be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled in + * hbase-site.xml. * @param switchState Set to true to enable, false to disable. * @param serverNamesList list of region servers. * @return Previous compaction states for region servers */ Map compactionSwitch(boolean switchState, List serverNamesList) - throws IOException; + throws IOException; /** * Compact all regions on the region server. Asynchronous operation in that this method requests @@ -1141,9 +1064,10 @@ public interface Admin extends Abortable, Closeable { /** * Move the region encodedRegionName to a random server. * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name - * suffix: e.g. if regionname is - * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., - * then the encoded region name is: 527db22f95c8a9e0116f0cc13c680396. + * suffix: e.g. if regionname is + * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., + * then the encoded region name is: + * 527db22f95c8a9e0116f0cc13c680396. * @throws IOException if we can't find a region named encodedRegionName */ void move(byte[] encodedRegionName) throws IOException; @@ -1151,16 +1075,18 @@ public interface Admin extends Abortable, Closeable { /** * Move the region rencodedRegionName to destServerName. * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name - * suffix: e.g. if regionname is - * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., - * then the encoded region name is: 527db22f95c8a9e0116f0cc13c680396. - * @param destServerName The servername of the destination regionserver. If passed the empty byte - * array we'll assign to a random server. A server name is made of host, port and - * startcode. Here is an example: host187.example.com,60020,1289493121758 + * suffix: e.g. if regionname is + * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., + * then the encoded region name is: + * 527db22f95c8a9e0116f0cc13c680396. + * @param destServerName The servername of the destination regionserver. If passed the empty + * byte array we'll assign to a random server. A server name is made of + * host, port and startcode. Here is an example: + * host187.example.com,60020,1289493121758 * @throws IOException if we can't find a region named encodedRegionName * @deprecated since 2.2.0 and will be removed in 4.0.0. Use {@link #move(byte[], ServerName)} - * instead. And if you want to move the region to a random server, please use - * {@link #move(byte[])}. + * instead. And if you want to move the region to a random server, please use + * {@link #move(byte[])}. * @see HBASE-22108 */ @Deprecated @@ -1175,12 +1101,13 @@ public interface Admin extends Abortable, Closeable { /** * Move the region rencodedRegionName to destServerName. * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name - * suffix: e.g. if regionname is - * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., - * then the encoded region name is: 527db22f95c8a9e0116f0cc13c680396. - * @param destServerName The servername of the destination regionserver. A server name is made of - * host, port and startcode. Here is an example: - * host187.example.com,60020,1289493121758 + * suffix: e.g. if regionname is + * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., + * then the encoded region name is: + * 527db22f95c8a9e0116f0cc13c680396. + * @param destServerName The servername of the destination regionserver. A server name is made + * of host, port and startcode. Here is an example: + * host187.example.com,60020,1289493121758 * @throws IOException if we can't find a region named encodedRegionName */ void move(byte[] encodedRegionName, ServerName destServerName) throws IOException; @@ -1200,16 +1127,15 @@ public interface Admin extends Abortable, Closeable { void unassign(byte[] regionName) throws IOException; /** - * Unassign a region from current hosting regionserver. Region will then be assigned to a - * regionserver chosen at random. Region could be reassigned back to the same server. Use {@link - * #move(byte[], ServerName)} if you want to control the region movement. - * + * Unassign a region from current hosting regionserver. Region will then be assigned to a + * regionserver chosen at random. Region could be reassigned back to the same server. Use + * {@link #move(byte[], ServerName)} if you want to control the region movement. * @param regionName Region to unassign. Will clear any existing RegionPlan if one found. - * @param force If true, force unassign (Will remove region from regions-in-transition too if - * present. If results in double assignment use hbck -fix to resolve. To be used by experts). + * @param force If true, force unassign (Will remove region from + * regions-in-transition too if present. If results in double assignment use + * hbck -fix to resolve. To be used by experts). * @throws IOException if a remote or network exception occurs - * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} - * instead. + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} instead. * @see HBASE-24875 */ @Deprecated @@ -1223,7 +1149,6 @@ public interface Admin extends Abortable, Closeable { * still online as per Master's in memory state. If this API is incorrectly used on active region * then master will loose track of that region. This is a special method that should be used by * experts or hbck. - * * @param regionName Region to offline. * @throws IOException if a remote or network exception occurs */ @@ -1231,13 +1156,12 @@ public interface Admin extends Abortable, Closeable { /** * Turn the load balancer on or off. - * - * @param synchronous If true, it waits until current balance() call, if - * outstanding, to return. + * @param synchronous If true, it waits until current balance() call, if outstanding, + * to return. * @return Previous balancer value * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.0.0. Will be removed in 3.0.0. - * Use {@link #balancerSwitch(boolean, boolean)} instead. + * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use + * {@link #balancerSwitch(boolean, boolean)} instead. */ @Deprecated default boolean setBalancerRunning(boolean on, boolean synchronous) throws IOException { @@ -1246,23 +1170,20 @@ public interface Admin extends Abortable, Closeable { /** * Turn the load balancer on or off. - * @param onOrOff Set to true to enable, false to disable. - * @param synchronous If true, it waits until current balance() call, if - * outstanding, to return. + * @param onOrOff Set to true to enable, false to disable. + * @param synchronous If true, it waits until current balance() call, if outstanding, + * to return. * @return Previous balancer value * @throws IOException if a remote or network exception occurs */ - boolean balancerSwitch(boolean onOrOff, boolean synchronous) - throws IOException; + boolean balancerSwitch(boolean onOrOff, boolean synchronous) throws IOException; /** - * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the - * reassignments. Can NOT run for various reasons. Check logs. - * + * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the + * reassignments. Can NOT run for various reasons. Check logs. * @return true if balancer ran, false otherwise. * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.0.0. Will be removed in 3.0.0. - * Use {@link #balance()} instead. + * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #balance()} instead. */ @Deprecated default boolean balancer() throws IOException { @@ -1270,21 +1191,18 @@ public interface Admin extends Abortable, Closeable { } /** - * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the - * reassignments. Can NOT run for various reasons. Check logs. - * + * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the + * reassignments. Can NOT run for various reasons. Check logs. * @return true if balancer ran, false otherwise. * @throws IOException if a remote or network exception occurs */ default boolean balance() throws IOException { - return balance(BalanceRequest.defaultInstance()) - .isBalancerRan(); + return balance(BalanceRequest.defaultInstance()).isBalancerRan(); } /** - * Invoke the balancer with the given balance request. The BalanceRequest defines how the - * balancer will run. See {@link BalanceRequest} for more details. - * + * Invoke the balancer with the given balance request. The BalanceRequest defines how the balancer + * will run. See {@link BalanceRequest} for more details. * @param request defines how the balancer should run * @return {@link BalanceResponse} with details about the results of the invocation. * @throws IOException if a remote or network exception occurs @@ -1292,15 +1210,14 @@ public interface Admin extends Abortable, Closeable { BalanceResponse balance(BalanceRequest request) throws IOException; /** - * Invoke the balancer. Will run the balancer and if regions to move, it will - * go ahead and do the reassignments. If there is region in transition, force parameter of true - * would still run balancer. Can *not* run for other reasons. Check - * logs. + * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the + * reassignments. If there is region in transition, force parameter of true would still run + * balancer. Can *not* run for other reasons. Check logs. * @param force whether we should force balance even if there is region in transition * @return true if balancer ran, false otherwise. * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.0.0. Will be removed in 3.0.0. - * Use {@link #balance(BalanceRequest)} instead. + * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #balance(BalanceRequest)} + * instead. */ @Deprecated default boolean balancer(boolean force) throws IOException { @@ -1308,39 +1225,33 @@ public interface Admin extends Abortable, Closeable { } /** - * Invoke the balancer. Will run the balancer and if regions to move, it will - * go ahead and do the reassignments. If there is region in transition, force parameter of true - * would still run balancer. Can *not* run for other reasons. Check - * logs. + * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the + * reassignments. If there is region in transition, force parameter of true would still run + * balancer. Can *not* run for other reasons. Check logs. * @param force whether we should force balance even if there is region in transition * @return true if balancer ran, false otherwise. * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.5.0. Will be removed in 4.0.0. - * Use {@link #balance(BalanceRequest)} instead. + * @deprecated Since 2.5.0. Will be removed in 4.0.0. Use {@link #balance(BalanceRequest)} + * instead. */ @Deprecated default boolean balance(boolean force) throws IOException { - return balance( - BalanceRequest.newBuilder() - .setIgnoreRegionsInTransition(force) - .build() - ).isBalancerRan(); + return balance(BalanceRequest.newBuilder().setIgnoreRegionsInTransition(force).build()) + .isBalancerRan(); } /** * Query the current state of the balancer. - * * @return true if the balancer is enabled, false otherwise. * @throws IOException if a remote or network exception occurs */ boolean isBalancerEnabled() throws IOException; /** - * Clear all the blocks corresponding to this table from BlockCache. For expert-admins. - * Calling this API will drop all the cached blocks specific to a table from BlockCache. - * This can significantly impact the query performance as the subsequent queries will - * have to retrieve the blocks from underlying filesystem. - * + * Clear all the blocks corresponding to this table from BlockCache. For expert-admins. Calling + * this API will drop all the cached blocks specific to a table from BlockCache. This can + * significantly impact the query performance as the subsequent queries will have to retrieve the + * blocks from underlying filesystem. * @param tableName table to clear block cache * @return CacheEvictionStats related to the eviction * @throws IOException if a remote or network exception occurs @@ -1348,11 +1259,9 @@ public interface Admin extends Abortable, Closeable { CacheEvictionStats clearBlockCache(final TableName tableName) throws IOException; /** - * Invoke region normalizer. Can NOT run for various reasons. Check logs. - * This is a non-blocking invocation to region normalizer. If return value is true, it means - * the request was submitted successfully. We need to check logs for the details of which regions - * were split/merged. - * + * Invoke region normalizer. Can NOT run for various reasons. Check logs. This is a non-blocking + * invocation to region normalizer. If return value is true, it means the request was submitted + * successfully. We need to check logs for the details of which regions were split/merged. * @return {@code true} if region normalizer ran, {@code false} otherwise. * @throws IOException if a remote or network exception occurs */ @@ -1361,11 +1270,9 @@ public interface Admin extends Abortable, Closeable { } /** - * Invoke region normalizer. Can NOT run for various reasons. Check logs. - * This is a non-blocking invocation to region normalizer. If return value is true, it means - * the request was submitted successfully. We need to check logs for the details of which regions - * were split/merged. - * + * Invoke region normalizer. Can NOT run for various reasons. Check logs. This is a non-blocking + * invocation to region normalizer. If return value is true, it means the request was submitted + * successfully. We need to check logs for the details of which regions were split/merged. * @param ntfp limit to tables matching the specified filter. * @return {@code true} if region normalizer ran, {@code false} otherwise. * @throws IOException if a remote or network exception occurs @@ -1374,7 +1281,6 @@ public interface Admin extends Abortable, Closeable { /** * Query the current state of the region normalizer. - * * @return true if region normalizer is enabled, false otherwise. * @throws IOException if a remote or network exception occurs */ @@ -1382,11 +1288,10 @@ public interface Admin extends Abortable, Closeable { /** * Turn region normalizer on or off. - * * @return Previous normalizer value * @throws IOException if a remote or network exception occurs * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #normalizerSwitch(boolean)}} - * instead. + * instead. */ @Deprecated default boolean setNormalizerRunning(boolean on) throws IOException { @@ -1395,20 +1300,18 @@ public interface Admin extends Abortable, Closeable { /** * Turn region normalizer on or off. - * * @return Previous normalizer value * @throws IOException if a remote or network exception occurs */ - boolean normalizerSwitch (boolean on) throws IOException; + boolean normalizerSwitch(boolean on) throws IOException; /** * Enable/Disable the catalog janitor. - * * @param enable if true enables the catalog janitor * @return the previous state * @throws IOException if a remote or network exception occurs * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #catalogJanitorSwitch(boolean)}} - * instead. + * instead. */ @Deprecated default boolean enableCatalogJanitor(boolean enable) throws IOException { @@ -1417,7 +1320,6 @@ public interface Admin extends Abortable, Closeable { /** * Enable/Disable the catalog janitor/ - * * @param onOrOff if true enables the catalog janitor * @return the previous state * @throws IOException if a remote or network exception occurs @@ -1426,11 +1328,9 @@ public interface Admin extends Abortable, Closeable { /** * Ask for a scan of the catalog table. - * * @return the number of entries cleaned. Returns -1 if previous run is in progress. * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #runCatalogJanitor()}} - * instead. + * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #runCatalogJanitor()}} instead. */ @Deprecated default int runCatalogScan() throws IOException { @@ -1439,7 +1339,6 @@ public interface Admin extends Abortable, Closeable { /** * Ask for a scan of the catalog table. - * * @return the number of entries cleaned * @throws IOException if a remote or network exception occurs */ @@ -1447,19 +1346,17 @@ public interface Admin extends Abortable, Closeable { /** * Query on the catalog janitor state (Enabled/Disabled?). - * * @throws IOException if a remote or network exception occurs */ boolean isCatalogJanitorEnabled() throws IOException; /** * Enable/Disable the cleaner chore. - * * @param on if true enables the cleaner chore * @return the previous state * @throws IOException if a remote or network exception occurs * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #cleanerChoreSwitch(boolean)}} - * instead. + * instead. */ @Deprecated default boolean setCleanerChoreRunning(boolean on) throws IOException { @@ -1468,7 +1365,6 @@ public interface Admin extends Abortable, Closeable { /** * Enable/Disable the cleaner chore. - * * @param onOrOff if true enables the cleaner chore * @return the previous state * @throws IOException if a remote or network exception occurs @@ -1477,7 +1373,6 @@ public interface Admin extends Abortable, Closeable { /** * Ask for cleaner chore to run. - * * @return true if cleaner chore ran, false otherwise * @throws IOException if a remote or network exception occurs */ @@ -1485,39 +1380,37 @@ public interface Admin extends Abortable, Closeable { /** * Query on the cleaner chore state (Enabled/Disabled?). - * * @throws IOException if a remote or network exception occurs */ boolean isCleanerChoreEnabled() throws IOException; /** * Merge two regions. Asynchronous operation. - * * @param nameOfRegionA encoded or full name of region a * @param nameOfRegionB encoded or full name of region b - * @param forcible true if do a compulsory merge, otherwise we will only merge two - * adjacent regions + * @param forcible true if do a compulsory merge, otherwise we will only merge + * two adjacent regions * @throws IOException if a remote or network exception occurs * @deprecated Since 2.0. Will be removed in 3.0. Use - * {@link #mergeRegionsAsync(byte[], byte[], boolean)} instead. + * {@link #mergeRegionsAsync(byte[], byte[], boolean)} instead. */ @Deprecated - void mergeRegions(byte[] nameOfRegionA, byte[] nameOfRegionB, - boolean forcible) throws IOException; + void mergeRegions(byte[] nameOfRegionA, byte[] nameOfRegionB, boolean forcible) + throws IOException; /** * Merge two regions. Asynchronous operation. * @param nameOfRegionA encoded or full name of region a * @param nameOfRegionB encoded or full name of region b - * @param forcible true if do a compulsory merge, otherwise we will only merge two - * adjacent regions + * @param forcible true if do a compulsory merge, otherwise we will only merge + * two adjacent regions * @throws IOException if a remote or network exception occurs * @deprecated since 2.3.0 and will be removed in 4.0.0. Multi-region merge feature is now * supported. Use {@link #mergeRegionsAsync(byte[][], boolean)} instead. */ @Deprecated default Future mergeRegionsAsync(byte[] nameOfRegionA, byte[] nameOfRegionB, - boolean forcible) throws IOException { + boolean forcible) throws IOException { byte[][] nameofRegionsToMerge = new byte[2][]; nameofRegionsToMerge[0] = nameOfRegionA; nameofRegionsToMerge[1] = nameOfRegionB; @@ -1527,16 +1420,16 @@ public interface Admin extends Abortable, Closeable { /** * Merge multiple regions (>=2). Asynchronous operation. * @param nameofRegionsToMerge encoded or full name of daughter regions - * @param forcible true if do a compulsory merge, otherwise we will only merge - * adjacent regions + * @param forcible true if do a compulsory merge, otherwise we will only + * merge adjacent regions * @throws IOException if a remote or network exception occurs */ Future mergeRegionsAsync(byte[][] nameofRegionsToMerge, boolean forcible) - throws IOException; + throws IOException; /** - * Split a table. The method will execute split action for each region in table. - * Asynchronous operation. + * Split a table. The method will execute split action for each region in table. Asynchronous + * operation. * @param tableName table to split * @throws IOException if a remote or network exception occurs */ @@ -1544,19 +1437,17 @@ public interface Admin extends Abortable, Closeable { /** * Split an individual region. Asynchronous operation. - * * @param regionName region to split * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #splitRegionAsync(byte[], byte[])}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #splitRegionAsync(byte[], byte[])}. */ @Deprecated void splitRegion(byte[] regionName) throws IOException; /** * Split a table. Asynchronous operation. - * - * @param tableName table to split + * @param tableName table to split * @param splitPoint the explicit position to split on * @throws IOException if a remote or network exception occurs */ @@ -1564,16 +1455,14 @@ public interface Admin extends Abortable, Closeable { /** * Split an individual region. Asynchronous operation. - * * @param regionName region to split * @param splitPoint the explicit position to split on * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #splitRegionAsync(byte[], byte[])}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #splitRegionAsync(byte[], byte[])}. */ @Deprecated - void splitRegion(byte[] regionName, byte[] splitPoint) - throws IOException; + void splitRegion(byte[] regionName, byte[] splitPoint) throws IOException; /** * Split an individual region. Asynchronous operation. @@ -1593,7 +1482,7 @@ public interface Admin extends Abortable, Closeable { /** * Modify an existing table, more IRB friendly version. * @param tableName name of table. - * @param td modified description of the table + * @param td modified description of the table * @throws IOException if a remote or network exception occurs * @deprecated since 2.0 version and will be removed in 3.0 version. use * {@link #modifyTable(TableDescriptor)} @@ -1601,8 +1490,8 @@ public interface Admin extends Abortable, Closeable { @Deprecated default void modifyTable(TableName tableName, TableDescriptor td) throws IOException { if (!tableName.equals(td.getTableName())) { - throw new IllegalArgumentException("the specified table name '" + tableName + - "' doesn't match with the HTD one: " + td.getTableName()); + throw new IllegalArgumentException("the specified table name '" + tableName + + "' doesn't match with the HTD one: " + td.getTableName()); } modifyTable(td); } @@ -1617,39 +1506,35 @@ public interface Admin extends Abortable, Closeable { } /** - * Modify an existing table, more IRB friendly version. Asynchronous operation. This means that - * it may be a while before your schema change is updated across all of the table. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Modify an existing table, more IRB friendly version. Asynchronous operation. This means that it + * may be a while before your schema change is updated across all of the table. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table. - * @param td modified description of the table + * @param td modified description of the table * @throws IOException if a remote or network exception occurs * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the - * operation to complete - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #modifyTableAsync(TableDescriptor)} + * operation to complete + * @deprecated since 2.0 version and will be removed in 3.0 version. use + * {@link #modifyTableAsync(TableDescriptor)} */ @Deprecated default Future modifyTableAsync(TableName tableName, TableDescriptor td) - throws IOException { + throws IOException { if (!tableName.equals(td.getTableName())) { - throw new IllegalArgumentException("the specified table name '" + tableName + - "' doesn't match with the HTD one: " + td.getTableName()); + throw new IllegalArgumentException("the specified table name '" + tableName + + "' doesn't match with the HTD one: " + td.getTableName()); } return modifyTableAsync(td); } /** - * Modify an existing table, more IRB (ruby) friendly version. Asynchronous operation. This means that - * it may be a while before your schema change is updated across all of the table. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Modify an existing table, more IRB (ruby) friendly version. Asynchronous operation. This means + * that it may be a while before your schema change is updated across all of the table. You can + * use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param td description of the table * @throws IOException if a remote or network exception occurs * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the @@ -1660,7 +1545,7 @@ public interface Admin extends Abortable, Closeable { /** * Change the store file tracker of the given table. * @param tableName the table you want to change - * @param dstSFT the destination store file tracker + * @param dstSFT the destination store file tracker * @throws IOException if a remote or network exception occurs */ default void modifyTableStoreFileTracker(TableName tableName, String dstSFT) throws IOException { @@ -1671,7 +1556,7 @@ public interface Admin extends Abortable, Closeable { /** * Change the store file tracker of the given table. * @param tableName the table you want to change - * @param dstSFT the destination store file tracker + * @param dstSFT the destination store file tracker * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the * operation to complete * @throws IOException if a remote or network exception occurs @@ -1700,22 +1585,21 @@ public interface Admin extends Abortable, Closeable { /** * Check whether Master is in maintenance mode. - * * @throws IOException if a remote or network exception occurs */ - boolean isMasterInMaintenanceMode() throws IOException; + boolean isMasterInMaintenanceMode() throws IOException; /** * Stop the designated regionserver. - * * @param hostnamePort Hostname and port delimited by a : as in - * example.org:1234 + * example.org:1234 * @throws IOException if a remote or network exception occurs */ void stopRegionServer(String hostnamePort) throws IOException; /** * Get whole cluster status, containing status about: + * *

        * hbase version
        * cluster id
    @@ -1725,10 +1609,11 @@ public interface Admin extends Abortable, Closeable {
        * balancer
        * regions in transition
        * 
    + * * @return cluster status * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #getClusterMetrics()} + * @deprecated since 2.0 version and will be removed in 3.0 version. use + * {@link #getClusterMetrics()} */ @Deprecated default ClusterStatus getClusterStatus() throws IOException { @@ -1737,6 +1622,7 @@ public interface Admin extends Abortable, Closeable { /** * Get whole cluster metrics, containing status about: + * *
        * hbase version
        * cluster id
    @@ -1746,6 +1632,7 @@ public interface Admin extends Abortable, Closeable {
        * balancer
        * regions in transition
        * 
    + * * @return cluster metrics * @throws IOException if a remote or network exception occurs */ @@ -1785,29 +1672,26 @@ public interface Admin extends Abortable, Closeable { } /** - * Retrieve all current live region servers including decommissioned - * if excludeDecommissionedRS is false, else non-decommissioned ones only - * + * Retrieve all current live region servers including decommissioned if excludeDecommissionedRS is + * false, else non-decommissioned ones only * @param excludeDecommissionedRS should we exclude decommissioned RS nodes * @return all current live region servers including/excluding decommissioned hosts * @throws IOException if a remote or network exception occurs */ default Collection getRegionServers(boolean excludeDecommissionedRS) - throws IOException { + throws IOException { List allServers = getClusterMetrics(EnumSet.of(Option.SERVERS_NAME)).getServersName(); if (!excludeDecommissionedRS) { return allServers; } List decommissionedRegionServers = listDecommissionedRegionServers(); - return allServers.stream() - .filter(s -> !decommissionedRegionServers.contains(s)) + return allServers.stream().filter(s -> !decommissionedRegionServers.contains(s)) .collect(ImmutableList.toImmutableList()); } /** * Get {@link RegionMetrics} of all regions hosted on a regionserver. - * * @param serverName region server from which {@link RegionMetrics} is required. * @return a {@link RegionMetrics} list of all regions hosted on a region server * @throws IOException if a remote or network exception occurs @@ -1818,14 +1702,13 @@ public interface Admin extends Abortable, Closeable { /** * Get {@link RegionMetrics} of all regions hosted on a regionserver for a table. - * * @param serverName region server from which {@link RegionMetrics} is required. - * @param tableName get {@link RegionMetrics} of regions belonging to the table + * @param tableName get {@link RegionMetrics} of regions belonging to the table * @return region metrics map of all regions of a table hosted on a region server * @throws IOException if a remote or network exception occurs */ - List getRegionMetrics(ServerName serverName, - TableName tableName) throws IOException; + List getRegionMetrics(ServerName serverName, TableName tableName) + throws IOException; /** * @return Configuration used by the instance. @@ -1894,14 +1777,14 @@ public interface Admin extends Abortable, Closeable { * @param name name of namespace descriptor * @return A descriptor * @throws org.apache.hadoop.hbase.NamespaceNotFoundException - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network exception + * occurs */ NamespaceDescriptor getNamespaceDescriptor(String name) - throws NamespaceNotFoundException, IOException; + throws NamespaceNotFoundException, IOException; /** * List available namespaces - * * @return List of namespace names * @throws IOException if a remote or network exception occurs */ @@ -1909,25 +1792,21 @@ public interface Admin extends Abortable, Closeable { /** * List available namespace descriptors - * * @return List of descriptors * @throws IOException if a remote or network exception occurs */ - NamespaceDescriptor[] listNamespaceDescriptors() - throws IOException; + NamespaceDescriptor[] listNamespaceDescriptors() throws IOException; /** * Get list of table descriptors by namespace. - * * @param name namespace name * @return HTD[] the read-only tableDescriptors * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #listTableDescriptorsByNamespace(byte[])} + * @deprecated since 2.0 version and will be removed in 3.0 version. use + * {@link #listTableDescriptorsByNamespace(byte[])} */ @Deprecated - HTableDescriptor[] listTableDescriptorsByNamespace(String name) - throws IOException; + HTableDescriptor[] listTableDescriptorsByNamespace(String name) throws IOException; /** * Get list of table descriptors by namespace. @@ -1947,21 +1826,18 @@ public interface Admin extends Abortable, Closeable { /** * Get the regions of a given table. - * * @param tableName the name of the table * @return List of {@link HRegionInfo}. * @throws IOException if a remote or network exception occurs * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * (HBASE-17980). - * Use {@link #getRegions(TableName)}. + * (HBASE-17980). Use + * {@link #getRegions(TableName)}. */ @Deprecated - List getTableRegions(TableName tableName) - throws IOException; + List getTableRegions(TableName tableName) throws IOException; /** * Get the regions of a given table. - * * @param tableName the name of the table * @return List of {@link RegionInfo}. * @throws IOException if a remote or network exception occurs @@ -1973,45 +1849,39 @@ public interface Admin extends Abortable, Closeable { /** * Get tableDescriptors. - * * @param tableNames List of table names * @return HTD[] the read-only tableDescriptors * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #listTableDescriptors(List)} + * @deprecated since 2.0 version and will be removed in 3.0 version. use + * {@link #listTableDescriptors(List)} */ @Deprecated - HTableDescriptor[] getTableDescriptorsByTableName(List tableNames) - throws IOException; + HTableDescriptor[] getTableDescriptorsByTableName(List tableNames) throws IOException; /** * Get tableDescriptors. - * * @param tableNames List of table names * @return returns a list of TableDescriptors * @throws IOException if a remote or network exception occurs */ - List listTableDescriptors(List tableNames) - throws IOException; + List listTableDescriptors(List tableNames) throws IOException; /** * Get tableDescriptors. - * * @param names List of table names * @return HTD[] the read-only tableDescriptors * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #listTableDescriptors(List)} + * @deprecated since 2.0 version and will be removed in 3.0 version. use + * {@link #listTableDescriptors(List)} */ @Deprecated - HTableDescriptor[] getTableDescriptors(List names) - throws IOException; + HTableDescriptor[] getTableDescriptors(List names) throws IOException; /** * Abort a procedure. *

    * Do not use. Usually it is ignored but if not, it can do more damage than good. See hbck2. - * @param procId ID of the procedure to abort + * @param procId ID of the procedure to abort * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? * @return true if aborted, false if procedure already completed or does * not exist @@ -2026,24 +1896,22 @@ public interface Admin extends Abortable, Closeable { } /** - * Abort a procedure but does not block and wait for completion. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * Do not use. Usually it is ignored but if not, it can do more damage than good. See hbck2. - * - * @param procId ID of the procedure to abort + * Abort a procedure but does not block and wait for completion. You can use Future.get(long, + * TimeUnit) to wait on the operation to complete. It may throw ExecutionException if there was an + * error while executing the operation or TimeoutException in case the wait timeout was not long + * enough to allow the operation to complete. Do not use. Usually it is ignored but if not, it can + * do more damage than good. See hbck2. + * @param procId ID of the procedure to abort * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? - * @return true if aborted, false if procedure already completed or does not exist + * @return true if aborted, false if procedure already completed or does + * not exist * @throws IOException if a remote or network exception occurs * @deprecated since 2.1.1 and will be removed in 4.0.0. * @see HBASE-21223 */ @Deprecated - Future abortProcedureAsync( - long procId, - boolean mayInterruptIfRunning) throws IOException; + Future abortProcedureAsync(long procId, boolean mayInterruptIfRunning) + throws IOException; /** * Get procedures. @@ -2061,13 +1929,12 @@ public interface Admin extends Abortable, Closeable { /** * Roll the log writer. I.e. for filesystem based write ahead logs, start writing to a new file. - * * Note that the actual rolling of the log writer is asynchronous and may not be complete when - * this method returns. As a side effect of this call, the named region server may schedule - * store flushes at the request of the wal. - * + * this method returns. As a side effect of this call, the named region server may schedule store + * flushes at the request of the wal. * @param serverName The servername of the regionserver. - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network + * exception occurs * @throws org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException */ void rollWALWriter(ServerName serverName) throws IOException, FailedLogCloseException; @@ -2077,8 +1944,8 @@ public interface Admin extends Abortable, Closeable { * @return an array of master coprocessors * @throws IOException if a remote or network exception occurs * @see org.apache.hadoop.hbase.ClusterMetrics#getMasterCoprocessorNames() - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #getMasterCoprocessorNames()} + * @deprecated since 2.0 version and will be removed in 3.0 version. use + * {@link #getMasterCoprocessorNames()} */ @Deprecated default String[] getMasterCoprocessors() throws IOException { @@ -2092,14 +1959,12 @@ public interface Admin extends Abortable, Closeable { * @see org.apache.hadoop.hbase.ClusterMetrics#getMasterCoprocessorNames() */ default List getMasterCoprocessorNames() throws IOException { - return getClusterMetrics(EnumSet.of(Option.MASTER_COPROCESSORS)) - .getMasterCoprocessorNames(); + return getClusterMetrics(EnumSet.of(Option.MASTER_COPROCESSORS)).getMasterCoprocessorNames(); } /** * Get the current compaction state of a table. It could be in a major compaction, a minor * compaction, both, or none. - * * @param tableName table to examine * @return the current compaction state * @throws IOException if a remote or network exception occurs @@ -2108,19 +1973,17 @@ public interface Admin extends Abortable, Closeable { /** * Get the current compaction state of a table. It could be in a compaction, or none. - * - * @param tableName table to examine + * @param tableName table to examine * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} * @return the current compaction state * @throws IOException if a remote or network exception occurs */ - CompactionState getCompactionState(TableName tableName, - CompactType compactType) throws IOException; + CompactionState getCompactionState(TableName tableName, CompactType compactType) + throws IOException; /** * Get the current compaction state of region. It could be in a major compaction, a minor * compaction, both, or none. - * * @param regionName region to examine * @return the current compaction state * @throws IOException if a remote or network exception occurs @@ -2128,11 +1991,8 @@ public interface Admin extends Abortable, Closeable { CompactionState getCompactionStateForRegion(byte[] regionName) throws IOException; /** - * Get the timestamp of the last major compaction for the passed table - * - * The timestamp of the oldest HFile resulting from a major compaction of that table, - * or 0 if no such HFile could be found. - * + * Get the timestamp of the last major compaction for the passed table The timestamp of the oldest + * HFile resulting from a major compaction of that table, or 0 if no such HFile could be found. * @param tableName table to examine * @return the last major compaction timestamp or 0 * @throws IOException if a remote or network exception occurs @@ -2140,11 +2000,9 @@ public interface Admin extends Abortable, Closeable { long getLastMajorCompactionTimestamp(TableName tableName) throws IOException; /** - * Get the timestamp of the last major compaction for the passed region. - * - * The timestamp of the oldest HFile resulting from a major compaction of that region, - * or 0 if no such HFile could be found. - * + * Get the timestamp of the last major compaction for the passed region. The timestamp of the + * oldest HFile resulting from a major compaction of that region, or 0 if no such HFile could be + * found. * @param regionName region to examine * @return the last major compaction timestamp or 0 * @throws IOException if a remote or network exception occurs @@ -2160,13 +2018,15 @@ public interface Admin extends Abortable, Closeable { * naming. Snapshot names follow the same naming constraints as tables in HBase. See * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. * @param snapshotName name of the snapshot to be created - * @param tableName name of the table for which snapshot is created - * @throws IOException if a remote or network exception occurs + * @param tableName name of the table for which snapshot is created + * @throws IOException if a remote or network + * exception occurs * @throws org.apache.hadoop.hbase.snapshot.SnapshotCreationException if snapshot creation failed - * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + * @throws IllegalArgumentException if the snapshot request is + * formatted incorrectly */ default void snapshot(String snapshotName, TableName tableName) - throws IOException, SnapshotCreationException, IllegalArgumentException { + throws IOException, SnapshotCreationException, IllegalArgumentException { snapshot(snapshotName, tableName, SnapshotType.FLUSH); } @@ -2177,16 +2037,16 @@ public interface Admin extends Abortable, Closeable { * indicating the duplicate naming. Snapshot names follow the same naming constraints as tables in * HBase. * @param snapshotName name of the snapshot to be created - * @param tableName name of the table for which snapshot is created - * @throws IOException if a remote or network exception occurs + * @param tableName name of the table for which snapshot is created + * @throws IOException if a remote or network exception occurs * @throws SnapshotCreationException if snapshot creation failed - * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + * @throws IllegalArgumentException if the snapshot request is formatted incorrectly * @deprecated since 2.3.0, will be removed in 3.0.0. Use {@link #snapshot(String, TableName)} * instead. */ @Deprecated default void snapshot(byte[] snapshotName, TableName tableName) - throws IOException, SnapshotCreationException, IllegalArgumentException { + throws IOException, SnapshotCreationException, IllegalArgumentException { snapshot(Bytes.toString(snapshotName), tableName); } @@ -2197,27 +2057,26 @@ public interface Admin extends Abortable, Closeable { * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other - * snapshots stored on the cluster - * @param tableName name of the table to snapshot - * @param type type of snapshot to take - * @throws IOException we fail to reach the master + * snapshots stored on the cluster + * @param tableName name of the table to snapshot + * @param type type of snapshot to take + * @throws IOException we fail to reach the master * @throws SnapshotCreationException if snapshot creation failed - * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ default void snapshot(String snapshotName, TableName tableName, SnapshotType type) - throws IOException, SnapshotCreationException, IllegalArgumentException { + throws IOException, SnapshotCreationException, IllegalArgumentException { snapshot(new SnapshotDescription(snapshotName, tableName, type)); } /** * Create typed snapshot of the table. Snapshots are considered unique based on the name of the - * snapshot. Snapshots are taken sequentially even when requested concurrently, across - * all tables. Attempts to take a snapshot with the same name (even a different type or with - * different parameters) will fail with a {@link SnapshotCreationException} indicating the - * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See - * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. - * Snapshot can live with ttl seconds. - * + * snapshot. Snapshots are taken sequentially even when requested concurrently, across all + * tables. Attempts to take a snapshot with the same name (even a different type or with different + * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming. + * Snapshot names follow the same naming constraints as tables in HBase. See + * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. Snapshot can + * live with ttl seconds. * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other * snapshots stored on the cluster * @param tableName name of the table to snapshot @@ -2228,20 +2087,19 @@ public interface Admin extends Abortable, Closeable { * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ default void snapshot(String snapshotName, TableName tableName, SnapshotType type, - Map snapshotProps) throws IOException, - SnapshotCreationException, IllegalArgumentException { + Map snapshotProps) + throws IOException, SnapshotCreationException, IllegalArgumentException { snapshot(new SnapshotDescription(snapshotName, tableName, type, snapshotProps)); } /** * Create typed snapshot of the table. Snapshots are considered unique based on the name of the - * snapshot. Snapshots are taken sequentially even when requested concurrently, across - * all tables. Attempts to take a snapshot with the same name (even a different type or with - * different parameters) will fail with a {@link SnapshotCreationException} indicating the - * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See - * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. - * Snapshot can live with ttl seconds. - * + * snapshot. Snapshots are taken sequentially even when requested concurrently, across all + * tables. Attempts to take a snapshot with the same name (even a different type or with different + * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming. + * Snapshot names follow the same naming constraints as tables in HBase. See + * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. Snapshot can + * live with ttl seconds. * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other * snapshots stored on the cluster * @param tableName name of the table to snapshot @@ -2250,9 +2108,8 @@ public interface Admin extends Abortable, Closeable { * @throws SnapshotCreationException if snapshot creation failed * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ - default void snapshot(String snapshotName, TableName tableName, - Map snapshotProps) throws IOException, - SnapshotCreationException, IllegalArgumentException { + default void snapshot(String snapshotName, TableName tableName, Map snapshotProps) + throws IOException, SnapshotCreationException, IllegalArgumentException { snapshot(new SnapshotDescription(snapshotName, tableName, SnapshotType.FLUSH, snapshotProps)); } @@ -2268,28 +2125,28 @@ public interface Admin extends Abortable, Closeable { * {@link #snapshot(byte[], org.apache.hadoop.hbase.TableName)} unless you are sure about the type * of snapshot that you want to take. * @param snapshot snapshot to take - * @throws IOException or we lose contact with the master. + * @throws IOException or we lose contact with the master. * @throws SnapshotCreationException if snapshot failed to be taken - * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ void snapshot(SnapshotDescription snapshot) - throws IOException, SnapshotCreationException, IllegalArgumentException; + throws IOException, SnapshotCreationException, IllegalArgumentException; /** * Take a snapshot without waiting for the server to complete that snapshot (asynchronous) Only a * single snapshot should be taken at a time, or results may be undefined. - * * @param snapshot snapshot to take - * @throws IOException if the snapshot did not succeed or we lose contact with the master. + * @throws IOException if the snapshot did not succeed or we lose contact with the + * master. * @throws SnapshotCreationException if snapshot creation failed - * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + * @throws IllegalArgumentException if the snapshot request is formatted incorrectly * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use - * {@link #snapshotAsync(SnapshotDescription)} instead. + * {@link #snapshotAsync(SnapshotDescription)} instead. */ @Deprecated @SuppressWarnings("FutureReturnValueIgnored") default void takeSnapshotAsync(SnapshotDescription snapshot) - throws IOException, SnapshotCreationException { + throws IOException, SnapshotCreationException { snapshotAsync(snapshot); } @@ -2297,31 +2154,34 @@ public interface Admin extends Abortable, Closeable { * Take a snapshot without waiting for the server to complete that snapshot (asynchronous) Only a * single snapshot should be taken at a time, or results may be undefined. * @param snapshot snapshot to take - * @throws IOException if the snapshot did not succeed or we lose contact with the master. + * @throws IOException if the snapshot did not succeed or we lose contact with the + * master. * @throws SnapshotCreationException if snapshot creation failed - * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ Future snapshotAsync(SnapshotDescription snapshot) - throws IOException, SnapshotCreationException; + throws IOException, SnapshotCreationException; /** - * Check the current state of the passed snapshot. There are three possible states:

      - *
    1. running - returns false
    2. finished - returns true
    3. - *
    4. finished with error - throws the exception that caused the snapshot to fail
    The - * cluster only knows about the most recent snapshot. Therefore, if another snapshot has been - * run/started since the snapshot you are checking, you will receive an {@link - * org.apache.hadoop.hbase.snapshot.UnknownSnapshotException}. - * + * Check the current state of the passed snapshot. There are three possible states: + *
      + *
    1. running - returns false
    2. + *
    3. finished - returns true
    4. + *
    5. finished with error - throws the exception that caused the snapshot to fail
    6. + *
    + * The cluster only knows about the most recent snapshot. Therefore, if another snapshot has been + * run/started since the snapshot you are checking, you will receive an + * {@link org.apache.hadoop.hbase.snapshot.UnknownSnapshotException}. * @param snapshot description of the snapshot to check * @return true if the snapshot is completed, false if the snapshot is still - * running - * @throws IOException if we have a network issue - * @throws org.apache.hadoop.hbase.snapshot.HBaseSnapshotException if the snapshot failed + * running + * @throws IOException if we have a network issue + * @throws org.apache.hadoop.hbase.snapshot.HBaseSnapshotException if the snapshot failed * @throws org.apache.hadoop.hbase.snapshot.UnknownSnapshotException if the requested snapshot is - * unknown + * unknown */ boolean isSnapshotFinished(SnapshotDescription snapshot) - throws IOException, HBaseSnapshotException, UnknownSnapshotException; + throws IOException, HBaseSnapshotException, UnknownSnapshotException; /** * Restore the specified snapshot on the original table. (The table must be disabled) If the @@ -2330,10 +2190,12 @@ public interface Admin extends Abortable, Closeable { * operation. In case of restore failure, the failsafe snapshot will be restored. If the restore * completes without problem the failsafe snapshot is deleted. * @param snapshotName name of the snapshot to restore - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network + * exception occurs * @throws org.apache.hadoop.hbase.snapshot.RestoreSnapshotException if snapshot failed to be - * restored - * @throws IllegalArgumentException if the restore request is formatted incorrectly + * restored + * @throws IllegalArgumentException if the restore request is + * formatted incorrectly * @deprecated since 2.3.0, will be removed in 3.0.0. Use {@link #restoreSnapshot(String)} * instead. */ @@ -2349,7 +2211,7 @@ public interface Admin extends Abortable, Closeable { * operation. In case of restore failure, the failsafe snapshot will be restored. If the restore * completes without problem the failsafe snapshot is deleted. * @param snapshotName name of the snapshot to restore - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network exception occurs * @throws RestoreSnapshotException if snapshot failed to be restored * @throws IllegalArgumentException if the restore request is formatted incorrectly */ @@ -2362,7 +2224,7 @@ public interface Admin extends Abortable, Closeable { * operation. In case of restore failure, the failsafe snapshot will be restored. If the restore * completes without problem the failsafe snapshot is deleted. * @param snapshotName name of the snapshot to restore - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network exception occurs * @throws RestoreSnapshotException if snapshot failed to be restored * @return the result of the async restore snapshot. You can use Future.get(long, TimeUnit) to * wait on the operation to complete. @@ -2371,7 +2233,7 @@ public interface Admin extends Abortable, Closeable { */ @Deprecated Future restoreSnapshotAsync(String snapshotName) - throws IOException, RestoreSnapshotException; + throws IOException, RestoreSnapshotException; /** * Restore the specified snapshot on the original table. (The table must be disabled) If @@ -2380,9 +2242,9 @@ public interface Admin extends Abortable, Closeable { * be restored. If the restore completes without problem the failsafe snapshot is deleted. The * failsafe snapshot name is configurable by using the property * "hbase.snapshot.restore.failsafe.name". - * @param snapshotName name of the snapshot to restore + * @param snapshotName name of the snapshot to restore * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network exception occurs * @throws RestoreSnapshotException if snapshot failed to be restored * @throws IllegalArgumentException if the restore request is formatted incorrectly * @deprecated since 2.3.0, will be removed in 3.0.0. Use @@ -2390,7 +2252,7 @@ public interface Admin extends Abortable, Closeable { */ @Deprecated default void restoreSnapshot(byte[] snapshotName, boolean takeFailSafeSnapshot) - throws IOException, RestoreSnapshotException { + throws IOException, RestoreSnapshotException { restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot); } @@ -2401,14 +2263,14 @@ public interface Admin extends Abortable, Closeable { * be restored. If the restore completes without problem the failsafe snapshot is deleted. The * failsafe snapshot name is configurable by using the property * "hbase.snapshot.restore.failsafe.name". - * @param snapshotName name of the snapshot to restore + * @param snapshotName name of the snapshot to restore * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network exception occurs * @throws RestoreSnapshotException if snapshot failed to be restored * @throws IllegalArgumentException if the restore request is formatted incorrectly */ default void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot) - throws IOException, RestoreSnapshotException { + throws IOException, RestoreSnapshotException { restoreSnapshot(snapshotName, takeFailSafeSnapshot, false); } @@ -2419,22 +2281,22 @@ public interface Admin extends Abortable, Closeable { * be restored. If the restore completes without problem the failsafe snapshot is deleted. The * failsafe snapshot name is configurable by using the property * "hbase.snapshot.restore.failsafe.name". - * @param snapshotName name of the snapshot to restore + * @param snapshotName name of the snapshot to restore * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken - * @param restoreAcl true to restore acl of snapshot - * @throws IOException if a remote or network exception occurs + * @param restoreAcl true to restore acl of snapshot + * @throws IOException if a remote or network exception occurs * @throws RestoreSnapshotException if snapshot failed to be restored * @throws IllegalArgumentException if the restore request is formatted incorrectly */ - void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, - boolean restoreAcl) throws IOException, RestoreSnapshotException; + void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, boolean restoreAcl) + throws IOException, RestoreSnapshotException; /** * Create a new table by cloning the snapshot content. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored - * @throws IOException if a remote or network exception occurs - * @throws TableExistsException if table to be created already exists + * @param tableName name of the table where the snapshot will be restored + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be created already exists * @throws RestoreSnapshotException if snapshot failed to be cloned * @throws IllegalArgumentException if the specified table has not a valid name * @deprecated since 2.3.0, will be removed in 3.0.0. Use @@ -2442,38 +2304,37 @@ public interface Admin extends Abortable, Closeable { */ @Deprecated default void cloneSnapshot(byte[] snapshotName, TableName tableName) - throws IOException, TableExistsException, RestoreSnapshotException { + throws IOException, TableExistsException, RestoreSnapshotException { cloneSnapshot(Bytes.toString(snapshotName), tableName); } /** * Create a new table by cloning the snapshot content. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored - * @throws IOException if a remote or network exception occurs - * @throws TableExistsException if table to be created already exists + * @param tableName name of the table where the snapshot will be restored + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be created already exists * @throws RestoreSnapshotException if snapshot failed to be cloned * @throws IllegalArgumentException if the specified table has not a valid name */ default void cloneSnapshot(String snapshotName, TableName tableName) - throws IOException, TableExistsException, RestoreSnapshotException { + throws IOException, TableExistsException, RestoreSnapshotException { cloneSnapshot(snapshotName, tableName, false, null); } /** * Create a new table by cloning the snapshot content. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored - * @param restoreAcl true to clone acl into newly created table - * @param customSFT specify the StoreFileTracker used for the table - * @throws IOException if a remote or network exception occurs - * @throws TableExistsException if table to be created already exists + * @param tableName name of the table where the snapshot will be restored + * @param restoreAcl true to clone acl into newly created table + * @param customSFT specify the StoreFileTracker used for the table + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be created already exists * @throws RestoreSnapshotException if snapshot failed to be cloned * @throws IllegalArgumentException if the specified table has not a valid name */ default void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl, - String customSFT) - throws IOException, TableExistsException, RestoreSnapshotException { + String customSFT) throws IOException, TableExistsException, RestoreSnapshotException { get(cloneSnapshotAsync(snapshotName, tableName, restoreAcl, customSFT), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } @@ -2481,15 +2342,15 @@ public interface Admin extends Abortable, Closeable { /** * Create a new table by cloning the snapshot content. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored - * @param restoreAcl true to clone acl into newly created table - * @throws IOException if a remote or network exception occurs - * @throws TableExistsException if table to be created already exists + * @param tableName name of the table where the snapshot will be restored + * @param restoreAcl true to clone acl into newly created table + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be created already exists * @throws RestoreSnapshotException if snapshot failed to be cloned * @throws IllegalArgumentException if the specified table has not a valid name */ default void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl) - throws IOException, TableExistsException, RestoreSnapshotException { + throws IOException, TableExistsException, RestoreSnapshotException { get(cloneSnapshotAsync(snapshotName, tableName, restoreAcl), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } @@ -2501,41 +2362,40 @@ public interface Admin extends Abortable, Closeable { * TimeoutException in case the wait timeout was not long enough to allow the operation to * complete. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored - * @throws IOException if a remote or network exception occurs + * @param tableName name of the table where the snapshot will be restored + * @throws IOException if a remote or network exception occurs * @throws TableExistsException if table to be cloned already exists * @return the result of the async clone snapshot. You can use Future.get(long, TimeUnit) to wait * on the operation to complete. */ default Future cloneSnapshotAsync(String snapshotName, TableName tableName) - throws IOException, TableExistsException { + throws IOException, TableExistsException { return cloneSnapshotAsync(snapshotName, tableName, false); } /** * Create a new table by cloning the snapshot content. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored - * @param restoreAcl true to clone acl into newly created table - * @throws IOException if a remote or network exception occurs - * @throws TableExistsException if table to be created already exists + * @param tableName name of the table where the snapshot will be restored + * @param restoreAcl true to clone acl into newly created table + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be created already exists * @throws RestoreSnapshotException if snapshot failed to be cloned * @throws IllegalArgumentException if the specified table has not a valid name */ default Future cloneSnapshotAsync(String snapshotName, TableName tableName, - boolean restoreAcl) - throws IOException, TableExistsException, RestoreSnapshotException { + boolean restoreAcl) throws IOException, TableExistsException, RestoreSnapshotException { return cloneSnapshotAsync(snapshotName, tableName, restoreAcl, null); } /** * Create a new table by cloning the snapshot content. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored - * @param restoreAcl true to clone acl into newly created table - * @param customSFT specify the StroreFileTracker used for the table - * @throws IOException if a remote or network exception occurs - * @throws TableExistsException if table to be created already exists + * @param tableName name of the table where the snapshot will be restored + * @param restoreAcl true to clone acl into newly created table + * @param customSFT specify the StroreFileTracker used for the table + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be created already exists * @throws RestoreSnapshotException if snapshot failed to be cloned * @throws IllegalArgumentException if the specified table has not a valid name */ @@ -2544,66 +2404,66 @@ public interface Admin extends Abortable, Closeable { /** * Execute a distributed procedure on a cluster. - * * @param signature A distributed procedure is uniquely identified by its signature (default the - * root ZK node name of the procedure). - * @param instance The instance name of the procedure. For some procedures, this parameter is - * optional. - * @param props Property/Value pairs of properties passing to the procedure + * root ZK node name of the procedure). + * @param instance The instance name of the procedure. For some procedures, this parameter is + * optional. + * @param props Property/Value pairs of properties passing to the procedure * @throws IOException if a remote or network exception occurs */ void execProcedure(String signature, String instance, Map props) - throws IOException; + throws IOException; /** * Execute a distributed procedure on a cluster. - * * @param signature A distributed procedure is uniquely identified by its signature (default the - * root ZK node name of the procedure). - * @param instance The instance name of the procedure. For some procedures, this parameter is - * optional. - * @param props Property/Value pairs of properties passing to the procedure + * root ZK node name of the procedure). + * @param instance The instance name of the procedure. For some procedures, this parameter is + * optional. + * @param props Property/Value pairs of properties passing to the procedure * @return data returned after procedure execution. null if no return data. * @throws IOException if a remote or network exception occurs * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use - * {@link #execProcedureWithReturn(String, String, Map)} } instead. + * {@link #execProcedureWithReturn(String, String, Map)} } instead. */ @Deprecated default byte[] execProcedureWithRet(String signature, String instance, Map props) - throws IOException { + throws IOException { return execProcedureWithReturn(signature, instance, props); } /** * Execute a distributed procedure on a cluster. * @param signature A distributed procedure is uniquely identified by its signature (default the - * root ZK node name of the procedure). - * @param instance The instance name of the procedure. For some procedures, this parameter is - * optional. - * @param props Property/Value pairs of properties passing to the procedure + * root ZK node name of the procedure). + * @param instance The instance name of the procedure. For some procedures, this parameter is + * optional. + * @param props Property/Value pairs of properties passing to the procedure * @return data returned after procedure execution. null if no return data. * @throws IOException if a remote or network exception occurs */ byte[] execProcedureWithReturn(String signature, String instance, Map props) - throws IOException; + throws IOException; /** - * Check the current state of the specified procedure. There are three possible states:
      - *
    1. running - returns false
    2. finished - returns true
    3. - *
    4. finished with error - throws the exception that caused the procedure to fail
    - * + * Check the current state of the specified procedure. There are three possible states: + *
      + *
    1. running - returns false
    2. + *
    3. finished - returns true
    4. + *
    5. finished with error - throws the exception that caused the procedure to fail
    6. + *
    * @param signature The signature that uniquely identifies a procedure - * @param instance The instance name of the procedure - * @param props Property/Value pairs of properties passing to the procedure - * @return true if the specified procedure is finished successfully, false if it is still running + * @param instance The instance name of the procedure + * @param props Property/Value pairs of properties passing to the procedure + * @return true if the specified procedure is finished successfully, + * false if it is still running * @throws IOException if the specified procedure finished with error */ boolean isProcedureFinished(String signature, String instance, Map props) - throws IOException; + throws IOException; /** * List completed snapshots. - * * @return a list of snapshot descriptors for completed snapshots * @throws IOException if a network error occurs */ @@ -2611,19 +2471,17 @@ public interface Admin extends Abortable, Closeable { /** * List all the completed snapshots matching the given regular expression. - * * @param regex The regular expression to match against * @return list of SnapshotDescription * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * Use {@link #listSnapshots(Pattern)} instead. + * @deprecated since 2.0 version and will be removed in 3.0 version. Use + * {@link #listSnapshots(Pattern)} instead. */ @Deprecated List listSnapshots(String regex) throws IOException; /** * List all the completed snapshots matching the given pattern. - * * @param pattern The compiled regular expression to match against * @return list of SnapshotDescription * @throws IOException if a remote or network exception occurs @@ -2633,31 +2491,30 @@ public interface Admin extends Abortable, Closeable { /** * List all the completed snapshots matching the given table name regular expression and snapshot * name regular expression. - * @param tableNameRegex The table name regular expression to match against + * @param tableNameRegex The table name regular expression to match against * @param snapshotNameRegex The snapshot name regular expression to match against * @return list of completed SnapshotDescription * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * Use {@link #listTableSnapshots(Pattern, Pattern)} instead. + * @deprecated since 2.0 version and will be removed in 3.0 version. Use + * {@link #listTableSnapshots(Pattern, Pattern)} instead. */ @Deprecated - List listTableSnapshots(String tableNameRegex, - String snapshotNameRegex) throws IOException; + List listTableSnapshots(String tableNameRegex, String snapshotNameRegex) + throws IOException; /** * List all the completed snapshots matching the given table name regular expression and snapshot * name regular expression. - * @param tableNamePattern The compiled table name regular expression to match against + * @param tableNamePattern The compiled table name regular expression to match against * @param snapshotNamePattern The compiled snapshot name regular expression to match against * @return list of completed SnapshotDescription * @throws IOException if a remote or network exception occurs */ List listTableSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern) throws IOException; + Pattern snapshotNamePattern) throws IOException; /** * Delete an existing snapshot. - * * @param snapshotName name of the snapshot * @throws IOException if a remote or network exception occurs * @deprecated Since 2.2.0. Will be removed in 3.0.0. Use {@link #deleteSnapshot(String)} instead. @@ -2667,7 +2524,6 @@ public interface Admin extends Abortable, Closeable { /** * Delete an existing snapshot. - * * @param snapshotName name of the snapshot * @throws IOException if a remote or network exception occurs */ @@ -2675,18 +2531,16 @@ public interface Admin extends Abortable, Closeable { /** * Delete existing snapshots whose names match the pattern passed. - * * @param regex The regular expression to match against * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * Use {@link #deleteSnapshots(Pattern)} instead. + * @deprecated since 2.0 version and will be removed in 3.0 version. Use + * {@link #deleteSnapshots(Pattern)} instead. */ @Deprecated void deleteSnapshots(String regex) throws IOException; /** * Delete existing snapshots whose names match the pattern passed. - * * @param pattern pattern for names of the snapshot to match * @throws IOException if a remote or network exception occurs */ @@ -2695,11 +2549,11 @@ public interface Admin extends Abortable, Closeable { /** * Delete all existing snapshots matching the given table name regular expression and snapshot * name regular expression. - * @param tableNameRegex The table name regular expression to match against + * @param tableNameRegex The table name regular expression to match against * @param snapshotNameRegex The snapshot name regular expression to match against * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * Use {@link #deleteTableSnapshots(Pattern, Pattern)} instead. + * @deprecated since 2.0 version and will be removed in 3.0 version. Use + * {@link #deleteTableSnapshots(Pattern, Pattern)} instead. */ @Deprecated void deleteTableSnapshots(String tableNameRegex, String snapshotNameRegex) throws IOException; @@ -2707,16 +2561,15 @@ public interface Admin extends Abortable, Closeable { /** * Delete all existing snapshots matching the given table name regular expression and snapshot * name regular expression. - * @param tableNamePattern The compiled table name regular expression to match against + * @param tableNamePattern The compiled table name regular expression to match against * @param snapshotNamePattern The compiled snapshot name regular expression to match against * @throws IOException if a remote or network exception occurs */ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) - throws IOException; + throws IOException; /** * Apply the new quota settings. - * * @param quota the quota settings * @throws IOException if a remote or network exception occurs */ @@ -2742,60 +2595,60 @@ public interface Admin extends Abortable, Closeable { /** * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the active - * master.

    The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access - * a published coprocessor {@link com.google.protobuf.Service} using standard protobuf service - * invocations:

    - *
    +   * master.
    +   * 

    + * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published + * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations: + *

    + *
    + * + *
        * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
        * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
        * MyCallRequest request = MyCallRequest.newBuilder()
        *     ...
        *     .build();
        * MyCallResponse response = service.myCall(null, request);
    -   * 
    + *
    * + *
    * @return A MasterCoprocessorRpcChannel instance */ CoprocessorRpcChannel coprocessorService(); - /** - * Creates and returns a {@link com.google.protobuf.RpcChannel} instance - * connected to the passed region server. - * + * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the passed + * region server. *

    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations: *

    + *
    * - *
    - *
    +   * 
        * CoprocessorRpcChannel channel = myAdmin.coprocessorService(serverName);
        * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
        * MyCallRequest request = MyCallRequest.newBuilder()
        *     ...
        *     .build();
        * MyCallResponse response = service.myCall(null, request);
    -   * 
    + *
    * + * * @param serverName the server name to which the endpoint call is made * @return A RegionServerCoprocessorRpcChannel instance */ CoprocessorRpcChannel coprocessorService(ServerName serverName); - /** - * Update the configuration and trigger an online config change - * on the regionserver. + * Update the configuration and trigger an online config change on the regionserver. * @param server : The server whose config needs to be updated. * @throws IOException if a remote or network exception occurs */ void updateConfiguration(ServerName server) throws IOException; - /** - * Update the configuration and trigger an online config change - * on all the regionservers. + * Update the configuration and trigger an online config change on all the regionservers. * @throws IOException if a remote or network exception occurs */ void updateConfiguration() throws IOException; @@ -2811,16 +2664,15 @@ public interface Admin extends Abortable, Closeable { /** * Return the set of supported security capabilities. - * @throws IOException if a remote or network exception occurs - * @throws UnsupportedOperationException + * @throws IOException if a remote or network exception occurs n */ List getSecurityCapabilities() throws IOException; /** * Turn the Split or Merge switches on or off. - * @param enabled enabled or not + * @param enabled enabled or not * @param synchronous If true, it waits until current split() call, if outstanding, - * to return. + * to return. * @param switchTypes switchType list {@link MasterSwitchType} * @return Previous switch value array * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #splitSwitch(boolean, boolean)} @@ -2829,7 +2681,7 @@ public interface Admin extends Abortable, Closeable { */ @Deprecated default boolean[] setSplitOrMergeEnabled(boolean enabled, boolean synchronous, - MasterSwitchType... switchTypes) throws IOException { + MasterSwitchType... switchTypes) throws IOException { boolean[] preValues = new boolean[switchTypes.length]; for (int i = 0; i < switchTypes.length; i++) { switch (switchTypes[i]) { @@ -2848,9 +2700,9 @@ public interface Admin extends Abortable, Closeable { /** * Turn the split switch on or off. - * @param enabled enabled or not + * @param enabled enabled or not * @param synchronous If true, it waits until current split() call, if outstanding, - * to return. + * to return. * @return Previous switch value * @throws IOException if a remote or network exception occurs */ @@ -2858,9 +2710,9 @@ public interface Admin extends Abortable, Closeable { /** * Turn the merge switch on or off. - * @param enabled enabled or not + * @param enabled enabled or not * @param synchronous If true, it waits until current merge() call, if outstanding, - * to return. + * to return. * @return Previous switch value * @throws IOException if a remote or network exception occurs */ @@ -2868,11 +2720,10 @@ public interface Admin extends Abortable, Closeable { /** * Query the current state of the switch. - * * @return true if the switch is enabled, false otherwise. * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use - * {@link #isSplitEnabled()} or {@link #isMergeEnabled()} instead. + * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #isSplitEnabled()} or + * {@link #isMergeEnabled()} instead. */ @Deprecated default boolean isSplitOrMergeEnabled(MasterSwitchType switchType) throws IOException { @@ -2903,24 +2754,24 @@ public interface Admin extends Abortable, Closeable { /** * Add a new replication peer for replicating data to slave cluster. - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig configuration for the replication peer * @throws IOException if a remote or network exception occurs */ default void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig) - throws IOException { + throws IOException { addReplicationPeer(peerId, peerConfig, true); } /** * Add a new replication peer for replicating data to slave cluster. - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig configuration for the replication peer - * @param enabled peer state, true if ENABLED and false if DISABLED + * @param enabled peer state, true if ENABLED and false if DISABLED * @throws IOException if a remote or network exception occurs */ default void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled) - throws IOException { + throws IOException { get(addReplicationPeerAsync(peerId, peerConfig, enabled), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } @@ -2931,13 +2782,13 @@ public interface Admin extends Abortable, Closeable { * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw * ExecutionException if there was an error while executing the operation or TimeoutException in * case the wait timeout was not long enough to allow the operation to complete. - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig configuration for the replication peer * @return the result of the async operation * @throws IOException IOException if a remote or network exception occurs */ default Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig) - throws IOException { + throws IOException { return addReplicationPeerAsync(peerId, peerConfig, true); } @@ -2947,14 +2798,14 @@ public interface Admin extends Abortable, Closeable { * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw * ExecutionException if there was an error while executing the operation or TimeoutException in * case the wait timeout was not long enough to allow the operation to complete. - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig configuration for the replication peer - * @param enabled peer state, true if ENABLED and false if DISABLED + * @param enabled peer state, true if ENABLED and false if DISABLED * @return the result of the async operation * @throws IOException IOException if a remote or network exception occurs */ Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig, - boolean enabled) throws IOException; + boolean enabled) throws IOException; /** * Remove a peer and stop the replication. @@ -2962,8 +2813,7 @@ public interface Admin extends Abortable, Closeable { * @throws IOException if a remote or network exception occurs */ default void removeReplicationPeer(String peerId) throws IOException { - get(removeReplicationPeerAsync(peerId), getSyncWaitTimeout(), - TimeUnit.MILLISECONDS); + get(removeReplicationPeerAsync(peerId), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } /** @@ -3030,12 +2880,12 @@ public interface Admin extends Abortable, Closeable { /** * Update the peerConfig for the specified peer. - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig new config for the replication peer * @throws IOException if a remote or network exception occurs */ default void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig) - throws IOException { + throws IOException { get(updateReplicationPeerConfigAsync(peerId, peerConfig), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } @@ -3046,23 +2896,23 @@ public interface Admin extends Abortable, Closeable { * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw * ExecutionException if there was an error while executing the operation or TimeoutException in * case the wait timeout was not long enough to allow the operation to complete. - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig new config for the replication peer * @return the result of the async operation * @throws IOException IOException if a remote or network exception occurs */ Future updateReplicationPeerConfigAsync(String peerId, ReplicationPeerConfig peerConfig) - throws IOException; + throws IOException; /** * Append the replicable table column family config from the specified peer. - * @param id a short that identifies the cluster + * @param id a short that identifies the cluster * @param tableCfs A map from tableName to column family names * @throws ReplicationException if tableCfs has conflict with existing config - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network exception occurs */ default void appendReplicationPeerTableCFs(String id, Map> tableCfs) - throws ReplicationException, IOException { + throws ReplicationException, IOException { if (tableCfs == null) { throw new ReplicationException("tableCfs is null"); } @@ -3074,13 +2924,13 @@ public interface Admin extends Abortable, Closeable { /** * Remove some table-cfs from config of the specified peer. - * @param id a short name that identifies the cluster + * @param id a short name that identifies the cluster * @param tableCfs A map from tableName to column family names * @throws ReplicationException if tableCfs has conflict with existing config - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network exception occurs */ default void removeReplicationPeerTableCFs(String id, Map> tableCfs) - throws ReplicationException, IOException { + throws ReplicationException, IOException { if (tableCfs == null) { throw new ReplicationException("tableCfs is null"); } @@ -3106,10 +2956,10 @@ public interface Admin extends Abortable, Closeable { List listReplicationPeers(Pattern pattern) throws IOException; /** - * Mark region server(s) as decommissioned to prevent additional regions from getting - * assigned to them. Optionally unload the regions on the servers. If there are multiple servers - * to be decommissioned, decommissioning them at the same time can prevent wasteful region - * movements. Region unloading is asynchronous. + * Mark region server(s) as decommissioned to prevent additional regions from getting assigned to + * them. Optionally unload the regions on the servers. If there are multiple servers to be + * decommissioned, decommissioning them at the same time can prevent wasteful region movements. + * Region unloading is asynchronous. * @param servers The list of servers to decommission. * @param offload True to offload the regions from the decommissioned servers * @throws IOException if a remote or network exception occurs @@ -3124,15 +2974,14 @@ public interface Admin extends Abortable, Closeable { List listDecommissionedRegionServers() throws IOException; /** - * Remove decommission marker from a region server to allow regions assignments. - * Load regions onto the server if a list of regions is given. Region loading is - * asynchronous. - * @param server The server to recommission. + * Remove decommission marker from a region server to allow regions assignments. Load regions onto + * the server if a list of regions is given. Region loading is asynchronous. + * @param server The server to recommission. * @param encodedRegionNames Regions to load onto the server. * @throws IOException if a remote or network exception occurs */ void recommissionRegionServer(ServerName server, List encodedRegionNames) - throws IOException; + throws IOException; /** * Find all table and column families that are replicated from this cluster @@ -3158,9 +3007,8 @@ public interface Admin extends Abortable, Closeable { /** * Clear compacting queues on a regionserver. * @param serverName the region server name - * @param queues the set of queue name - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException + * @param queues the set of queue name + * @throws IOException if a remote or network exception occurs n */ void clearCompactionQueues(ServerName serverName, Set queues) throws IOException, InterruptedException; @@ -3184,13 +3032,13 @@ public interface Admin extends Abortable, Closeable { /** * Create a new table by cloning the existent table schema. - * @param tableName name of the table to be cloned - * @param newTableName name of the new table where the table will be created + * @param tableName name of the table to be cloned + * @param newTableName name of the new table where the table will be created * @param preserveSplits True if the splits should be preserved * @throws IOException if a remote or network exception occurs */ void cloneTableSchema(TableName tableName, TableName newTableName, boolean preserveSplits) - throws IOException; + throws IOException; /** * Switch the rpc throttle enable state. @@ -3208,8 +3056,8 @@ public interface Admin extends Abortable, Closeable { boolean isRpcThrottleEnabled() throws IOException; /** - * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota - * can be exceeded if region server has availble quota. + * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota can be + * exceeded if region server has availble quota. * @param enable Set to true to enable, false to disable. * @return Previous exceed throttle enabled value * @throws IOException if a remote or network exception occurs @@ -3226,8 +3074,8 @@ public interface Admin extends Abortable, Closeable { * Fetches the observed {@link SpaceQuotaSnapshotView}s observed by a RegionServer. * @throws IOException if a remote or network exception occurs */ - Map getRegionServerSpaceQuotaSnapshots( - ServerName serverName) throws IOException; + Map + getRegionServerSpaceQuotaSnapshots(ServerName serverName) throws IOException; /** * Returns the Master's view of a quota on the given {@code namespace} or null if the Master has @@ -3245,10 +3093,10 @@ public interface Admin extends Abortable, Closeable { /** * Grants user specific permissions - * @param userPermission user name and the specific permission + * @param userPermission user name and the specific permission * @param mergeExistingPermissions If set to false, later granted permissions will override - * previous granted permissions. otherwise, it'll merge with previous granted - * permissions. + * previous granted permissions. otherwise, it'll merge with + * previous granted permissions. * @throws IOException if a remote or network exception occurs */ void grant(UserPermission userPermission, boolean mergeExistingPermissions) throws IOException; @@ -3263,22 +3111,22 @@ public interface Admin extends Abortable, Closeable { /** * Get the global/namespace/table permissions for user * @param getUserPermissionsRequest A request contains which user, global, namespace or table - * permissions needed + * permissions needed * @return The user and permission list * @throws IOException if a remote or network exception occurs */ List getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) - throws IOException; + throws IOException; /** * Check if the user has specific permissions - * @param userName the user name + * @param userName the user name * @param permissions the specific permission list * @return True if user has the specific permissions * @throws IOException if a remote or network exception occurs */ List hasUserPermissions(String userName, List permissions) - throws IOException; + throws IOException; /** * Check if call user has specific permissions @@ -3292,40 +3140,34 @@ public interface Admin extends Abortable, Closeable { /** * Turn on or off the auto snapshot cleanup based on TTL. - * - * @param on Set to true to enable, false to disable. + * @param on Set to true to enable, false to disable. * @param synchronous If true, it waits until current snapshot cleanup is completed, - * if outstanding. + * if outstanding. * @return Previous auto snapshot cleanup value * @throws IOException if a remote or network exception occurs */ - boolean snapshotCleanupSwitch(final boolean on, final boolean synchronous) - throws IOException; + boolean snapshotCleanupSwitch(final boolean on, final boolean synchronous) throws IOException; /** * Query the current state of the auto snapshot cleanup based on TTL. - * - * @return true if the auto snapshot cleanup is enabled, - * false otherwise. + * @return true if the auto snapshot cleanup is enabled, false + * otherwise. * @throws IOException if a remote or network exception occurs */ boolean isSnapshotCleanupEnabled() throws IOException; - /** - * Retrieves online slow/large RPC logs from the provided list of - * RegionServers - * - * @param serverNames Server names to get slowlog responses from + * Retrieves online slow/large RPC logs from the provided list of RegionServers + * @param serverNames Server names to get slowlog responses from * @param logQueryFilter filter to be used if provided (determines slow / large RPC logs) * @return online slowlog response list * @throws IOException if a remote or network exception occurs - * @deprecated since 2.4.0 and will be removed in 4.0.0. - * Use {@link #getLogEntries(Set, String, ServerType, int, Map)} instead. + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use + * {@link #getLogEntries(Set, String, ServerType, int, Map)} instead. */ @Deprecated default List getSlowLogResponses(final Set serverNames, - final LogQueryFilter logQueryFilter) throws IOException { + final LogQueryFilter logQueryFilter) throws IOException { String logType; if (LogQueryFilter.Type.LARGE_LOG.equals(logQueryFilter.getType())) { logType = "LARGE_LOG"; @@ -3338,40 +3180,34 @@ public interface Admin extends Abortable, Closeable { filterParams.put("tableName", logQueryFilter.getTableName()); filterParams.put("userName", logQueryFilter.getUserName()); filterParams.put("filterByOperator", logQueryFilter.getFilterByOperator().toString()); - List logEntries = - getLogEntries(serverNames, logType, ServerType.REGION_SERVER, logQueryFilter.getLimit(), - filterParams); + List logEntries = getLogEntries(serverNames, logType, ServerType.REGION_SERVER, + logQueryFilter.getLimit(), filterParams); return logEntries.stream().map(logEntry -> (OnlineLogRecord) logEntry) .collect(Collectors.toList()); } /** - * Clears online slow/large RPC logs from the provided list of - * RegionServers - * + * Clears online slow/large RPC logs from the provided list of RegionServers * @param serverNames Set of Server names to clean slowlog responses from - * @return List of booleans representing if online slowlog response buffer is cleaned - * from each RegionServer + * @return List of booleans representing if online slowlog response buffer is cleaned from each + * RegionServer * @throws IOException if a remote or network exception occurs */ - List clearSlowLogResponses(final Set serverNames) - throws IOException; - + List clearSlowLogResponses(final Set serverNames) throws IOException; /** - * Retrieve recent online records from HMaster / RegionServers. - * Examples include slow/large RPC logs, balancer decisions by master. - * - * @param serverNames servers to retrieve records from, useful in case of records maintained - * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will - * only come from the currently active master. - * @param logType string representing type of log records - * @param serverType enum for server type: HMaster or RegionServer - * @param limit put a limit to list of records that server should send in response + * Retrieve recent online records from HMaster / RegionServers. Examples include slow/large RPC + * logs, balancer decisions by master. + * @param serverNames servers to retrieve records from, useful in case of records maintained by + * RegionServer as we can select specific server. In case of + * servertype=MASTER, logs will only come from the currently active master. + * @param logType string representing type of log records + * @param serverType enum for server type: HMaster or RegionServer + * @param limit put a limit to list of records that server should send in response * @param filterParams additional filter params * @return Log entries representing online records from servers * @throws IOException if a remote or network exception occurs */ - List getLogEntries(Set serverNames, String logType, - ServerType serverType, int limit, Map filterParams) throws IOException; + List getLogEntries(Set serverNames, String logType, ServerType serverType, + int limit, Map filterParams) throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdvancedScanResultConsumer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdvancedScanResultConsumer.java index 10933abf3cf..091024105a3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdvancedScanResultConsumer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdvancedScanResultConsumer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import java.util.Optional; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -93,10 +92,10 @@ public interface AdvancedScanResultConsumer extends ScanResultConsumerBase { /** * Indicate that we have receive some data. - * @param results the data fetched from HBase service. + * @param results the data fetched from HBase service. * @param controller used to suspend or terminate the scan. Notice that the {@code controller} - * instance is only valid within scope of onNext method. You can only call its method in - * onNext, do NOT store it and call it later outside onNext. + * instance is only valid within scope of onNext method. You can only call its + * method in onNext, do NOT store it and call it later outside onNext. */ void onNext(Result[] results, ScanController controller); @@ -113,8 +112,9 @@ public interface AdvancedScanResultConsumer extends ScanResultConsumerBase { *

    * This method give you a chance to terminate a slow scan operation. * @param controller used to suspend or terminate the scan. Notice that the {@code controller} - * instance is only valid within the scope of onHeartbeat method. You can only call its - * method in onHeartbeat, do NOT store it and call it later outside onHeartbeat. + * instance is only valid within the scope of onHeartbeat method. You can only + * call its method in onHeartbeat, do NOT store it and call it later outside + * onHeartbeat. */ default void onHeartbeat(ScanController controller) { } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java index 8d21994c23e..3ef28308f1c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.filterCells; import java.io.IOException; import java.util.Arrays; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java index 3a08d687fbb..c83bc76f56b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java @@ -36,12 +36,12 @@ import org.slf4j.LoggerFactory; /** * Performs Append operations on a single row. *

    - * This operation ensures atomicty to readers. Appends are done - * under a single row lock, so write operations to a row are synchronized, and - * readers are guaranteed to see this operation fully completed. + * This operation ensures atomicty to readers. Appends are done under a single row lock, so write + * operations to a row are synchronized, and readers are guaranteed to see this operation fully + * completed. *

    - * To append to a set of columns of a row, instantiate an Append object with the - * row to append to. At least one column to append must be specified using the + * To append to a set of columns of a row, instantiate an Append object with the row to append to. + * At least one column to append must be specified using the * {@link #addColumn(byte[], byte[], byte[])} method. */ @InterfaceAudience.Public @@ -53,17 +53,15 @@ public class Append extends Mutation { /** * Sets the TimeRange to be used on the Get for this append. *

    - * This is useful for when you have counters that only last for specific - * periods of time (ie. counters that are partitioned by time). By setting - * the range of valid times for this append, you can potentially gain - * some performance with a more optimal Get operation. - * Be careful adding the time range to this class as you will update the old cell if the - * time range doesn't include the latest cells. + * This is useful for when you have counters that only last for specific periods of time (ie. + * counters that are partitioned by time). By setting the range of valid times for this append, + * you can potentially gain some performance with a more optimal Get operation. Be careful adding + * the time range to this class as you will update the old cell if the time range doesn't include + * the latest cells. *

    * This range is used as [minStamp, maxStamp). * @param minStamp minimum timestamp value, inclusive - * @param maxStamp maximum timestamp value, exclusive - * @return this + * @param maxStamp maximum timestamp value, exclusive n */ public Append setTimeRange(long minStamp, long maxStamp) { tr = new TimeRange(minStamp, maxStamp); @@ -71,23 +69,20 @@ public class Append extends Mutation { } /** - * Gets the TimeRange used for this append. - * @return TimeRange + * Gets the TimeRange used for this append. n */ public TimeRange getTimeRange() { return this.tr; } @Override - protected long extraHeapSize(){ + protected long extraHeapSize() { return HEAP_OVERHEAD; } /** - * @param returnResults - * True (default) if the append operation should return the results. - * A client that is not interested in the result can save network - * bandwidth setting this to false. + * n * True (default) if the append operation should return the results. A client that is not + * interested in the result can save network bandwidth setting this to false. */ @Override public Append setReturnResults(boolean returnResults) { @@ -113,6 +108,7 @@ public class Append extends Mutation { public Append(byte[] row) { this(row, 0, row.length); } + /** * Copy constructor * @param appendToCopy append to copy @@ -122,50 +118,46 @@ public class Append extends Mutation { this.tr = appendToCopy.getTimeRange(); } - /** Create a Append operation for the specified row. + /** + * Create a Append operation for the specified row. *

    * At least one column must be appended to. - * @param rowArray Makes a copy out of this buffer. - * @param rowOffset - * @param rowLength + * @param rowArray Makes a copy out of this buffer. nn */ - public Append(final byte [] rowArray, final int rowOffset, final int rowLength) { + public Append(final byte[] rowArray, final int rowOffset, final int rowLength) { checkRow(rowArray, rowOffset, rowLength); this.row = Bytes.copy(rowArray, rowOffset, rowLength); } /** - * Construct the Append with user defined data. NOTED: - * 1) all cells in the familyMap must have the Type.Put - * 2) the row of each cell must be same with passed row. - * @param row row. CAN'T be null - * @param ts timestamp + * Construct the Append with user defined data. NOTED: 1) all cells in the familyMap must have the + * Type.Put 2) the row of each cell must be same with passed row. + * @param row row. CAN'T be null + * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - public Append(byte[] row, long ts, NavigableMap> familyMap) { + public Append(byte[] row, long ts, NavigableMap> familyMap) { super(row, ts, familyMap); } /** * Add the specified column and value to this Append operation. - * @param family family name + * @param family family name * @param qualifier column qualifier - * @param value value to append to specified column - * @return this - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #addColumn(byte[], byte[], byte[])} instead + * @param value value to append to specified column n * @deprecated As of release 2.0.0, this + * will be removed in HBase 3.0.0. Use {@link #addColumn(byte[], byte[], byte[])} + * instead */ @Deprecated - public Append add(byte [] family, byte [] qualifier, byte [] value) { + public Append add(byte[] family, byte[] qualifier, byte[] value) { return this.addColumn(family, qualifier, value); } /** * Add the specified column and value to this Append operation. - * @param family family name + * @param family family name * @param qualifier column qualifier - * @param value value to append to specified column - * @return this + * @param value value to append to specified column n */ public Append addColumn(byte[] family, byte[] qualifier, byte[] value) { KeyValue kv = new KeyValue(this.row, family, qualifier, this.ts, KeyValue.Type.Put, value); @@ -173,9 +165,7 @@ public class Append extends Mutation { } /** - * Add column and value to this Append operation. - * @param cell - * @return This instance + * Add column and value to this Append operation. n * @return This instance */ @SuppressWarnings("unchecked") public Append add(final Cell cell) { @@ -211,8 +201,8 @@ public class Append extends Mutation { /** * Method for setting the Append's familyMap - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link Append#Append(byte[], long, NavigableMap)} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link Append#Append(byte[], long, NavigableMap)} instead */ @Deprecated @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 62c9e32f8dc..fabdf629d7b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -84,12 +84,12 @@ public interface AsyncAdmin { /** * List all the tables matching the given pattern. - * @param pattern The compiled regular expression to match against + * @param pattern The compiled regular expression to match against * @param includeSysTables False to match only against userspace tables * @return - returns a list of TableDescriptors wrapped by a {@link CompletableFuture}. */ CompletableFuture> listTableDescriptors(Pattern pattern, - boolean includeSysTables); + boolean includeSysTables); /** * List specific tables including system tables. @@ -123,7 +123,7 @@ public interface AsyncAdmin { /** * List all of the names of userspace tables. - * @param pattern The regular expression to match against + * @param pattern The regular expression to match against * @param includeSysTables False to match only against userspace tables * @return a list of table names wrapped by a {@link CompletableFuture}. */ @@ -155,19 +155,19 @@ public interface AsyncAdmin { * key of the last region of the table (the first region has a null start key and the last region * has a null end key). BigInteger math will be used to divide the key range specified into enough * segments to make the required number of total regions. - * @param desc table descriptor for table - * @param startKey beginning of key range - * @param endKey end of key range + * @param desc table descriptor for table + * @param startKey beginning of key range + * @param endKey end of key range * @param numRegions the total number of regions to create */ CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, - int numRegions); + int numRegions); /** * Creates a new table with an initial set of empty regions defined by the specified split keys. - * The total number of regions created will be the number of split keys plus one. - * Note : Avoid passing empty split key. - * @param desc table descriptor for table + * The total number of regions created will be the number of split keys plus one. Note : Avoid + * passing empty split key. + * @param desc table descriptor for table * @param splitKeys array of split keys for the initial regions of the table */ CompletableFuture createTable(TableDescriptor desc, byte[][] splitKeys); @@ -181,7 +181,7 @@ public interface AsyncAdmin { /** * Change the store file tracker of the given table. * @param tableName the table you want to change - * @param dstSFT the destination store file tracker + * @param dstSFT the destination store file tracker */ CompletableFuture modifyTableStoreFileTracker(TableName tableName, String dstSFT); @@ -193,7 +193,7 @@ public interface AsyncAdmin { /** * Truncate a table. - * @param tableName name of table to truncate + * @param tableName name of table to truncate * @param preserveSplits True if the splits should be preserved */ CompletableFuture truncateTable(TableName tableName, boolean preserveSplits); @@ -205,8 +205,7 @@ public interface AsyncAdmin { CompletableFuture enableTable(TableName tableName); /** - * Disable a table. The table has to be in enabled state for it to be disabled. - * @param tableName + * Disable a table. The table has to be in enabled state for it to be disabled. n */ CompletableFuture disableTable(TableName tableName); @@ -245,32 +244,31 @@ public interface AsyncAdmin { /** * Add a column family to an existing table. - * @param tableName name of the table to add column family to + * @param tableName name of the table to add column family to * @param columnFamily column family descriptor of column family to be added */ - CompletableFuture addColumnFamily(TableName tableName, - ColumnFamilyDescriptor columnFamily); + CompletableFuture addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily); /** * Delete a column family from a table. - * @param tableName name of table + * @param tableName name of table * @param columnFamily name of column family to be deleted */ CompletableFuture deleteColumnFamily(TableName tableName, byte[] columnFamily); /** * Modify an existing column family on a table. - * @param tableName name of table + * @param tableName name of table * @param columnFamily new column family descriptor to use */ CompletableFuture modifyColumnFamily(TableName tableName, - ColumnFamilyDescriptor columnFamily); + ColumnFamilyDescriptor columnFamily); /** * Change the store file tracker of the given table's given family. * @param tableName the table you want to change - * @param family the family you want to change - * @param dstSFT the destination store file tracker + * @param family the family you want to change + * @param dstSFT the destination store file tracker */ CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, byte[] family, String dstSFT); @@ -329,9 +327,9 @@ public interface AsyncAdmin { CompletableFuture flush(TableName tableName); /** - * Flush the specified column family stores on all regions of the passed table. - * This runs as a synchronous operation. - * @param tableName table to flush + * Flush the specified column family stores on all regions of the passed table. This runs as a + * synchronous operation. + * @param tableName table to flush * @param columnFamily column family within a table */ CompletableFuture flush(TableName tableName, byte[] columnFamily); @@ -344,9 +342,9 @@ public interface AsyncAdmin { /** * Flush a column family within a region. - * @param regionName region to flush + * @param regionName region to flush * @param columnFamily column family within a region. If not present, flush the region's all - * column families. + * column families. */ CompletableFuture flushRegion(byte[] regionName, byte[] columnFamily); @@ -358,8 +356,8 @@ public interface AsyncAdmin { /** * Compact a table. When the returned CompletableFuture is done, it only means the compact request - * was sent to HBase and may need some time to finish the compact operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. + * was sent to HBase and may need some time to finish the compact operation. Throws + * {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. * @param tableName table to compact */ default CompletableFuture compact(TableName tableName) { @@ -369,11 +367,10 @@ public interface AsyncAdmin { /** * Compact a column family within a table. When the returned CompletableFuture is done, it only * means the compact request was sent to HBase and may need some time to finish the compact - * operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. - * @param tableName table to compact + * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. + * @param tableName table to compact * @param columnFamily column family within a table. If not present, compact the table's all - * column families. + * column families. */ default CompletableFuture compact(TableName tableName, byte[] columnFamily) { return compact(tableName, columnFamily, CompactType.NORMAL); @@ -381,10 +378,10 @@ public interface AsyncAdmin { /** * Compact a table. When the returned CompletableFuture is done, it only means the compact request - * was sent to HBase and may need some time to finish the compact operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for - * normal compaction type. - * @param tableName table to compact + * was sent to HBase and may need some time to finish the compact operation. Throws + * {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for normal compaction + * type. + * @param tableName table to compact * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} */ CompletableFuture compact(TableName tableName, CompactType compactType); @@ -392,15 +389,14 @@ public interface AsyncAdmin { /** * Compact a column family within a table. When the returned CompletableFuture is done, it only * means the compact request was sent to HBase and may need some time to finish the compact - * operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for + * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for * normal compaction type. - * @param tableName table to compact + * @param tableName table to compact * @param columnFamily column family within a table - * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} + * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} */ CompletableFuture compact(TableName tableName, byte[] columnFamily, - CompactType compactType); + CompactType compactType); /** * Compact an individual region. When the returned CompletableFuture is done, it only means the @@ -413,16 +409,16 @@ public interface AsyncAdmin { * Compact a column family within a region. When the returned CompletableFuture is done, it only * means the compact request was sent to HBase and may need some time to finish the compact * operation. - * @param regionName region to compact + * @param regionName region to compact * @param columnFamily column family within a region. If not present, compact the region's all - * column families. + * column families. */ CompletableFuture compactRegion(byte[] regionName, byte[] columnFamily); /** * Major compact a table. When the returned CompletableFuture is done, it only means the compact - * request was sent to HBase and may need some time to finish the compact operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. + * request was sent to HBase and may need some time to finish the compact operation. Throws + * {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. * @param tableName table to major compact */ default CompletableFuture majorCompact(TableName tableName) { @@ -432,12 +428,11 @@ public interface AsyncAdmin { /** * Major compact a column family within a table. When the returned CompletableFuture is done, it * only means the compact request was sent to HBase and may need some time to finish the compact - * operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for + * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for * normal compaction. type. - * @param tableName table to major compact + * @param tableName table to major compact * @param columnFamily column family within a table. If not present, major compact the table's all - * column families. + * column families. */ default CompletableFuture majorCompact(TableName tableName, byte[] columnFamily) { return majorCompact(tableName, columnFamily, CompactType.NORMAL); @@ -445,10 +440,10 @@ public interface AsyncAdmin { /** * Major compact a table. When the returned CompletableFuture is done, it only means the compact - * request was sent to HBase and may need some time to finish the compact operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for - * normal compaction type. - * @param tableName table to major compact + * request was sent to HBase and may need some time to finish the compact operation. Throws + * {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for normal compaction + * type. + * @param tableName table to major compact * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} */ CompletableFuture majorCompact(TableName tableName, CompactType compactType); @@ -456,15 +451,14 @@ public interface AsyncAdmin { /** * Major compact a column family within a table. When the returned CompletableFuture is done, it * only means the compact request was sent to HBase and may need some time to finish the compact - * operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. - * @param tableName table to major compact + * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. + * @param tableName table to major compact * @param columnFamily column family within a table. If not present, major compact the table's all - * column families. - * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} + * column families. + * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} */ CompletableFuture majorCompact(TableName tableName, byte[] columnFamily, - CompactType compactType); + CompactType compactType); /** * Major compact a region. When the returned CompletableFuture is done, it only means the compact @@ -477,9 +471,9 @@ public interface AsyncAdmin { * Major compact a column family within region. When the returned CompletableFuture is done, it * only means the compact request was sent to HBase and may need some time to finish the compact * operation. - * @param regionName region to major compact + * @param regionName region to major compact * @param columnFamily column family within a region. If not present, major compact the region's - * all column families. + * all column families. */ CompletableFuture majorCompactRegion(byte[] regionName, byte[] columnFamily); @@ -510,9 +504,9 @@ public interface AsyncAdmin { * Notice that, the method itself is always non-blocking, which means it will always return * immediately. The {@code drainMerges} parameter only effects when will we complete the returned * {@link CompletableFuture}. - * @param enabled enabled or not + * @param enabled enabled or not * @param drainMerges If true, it waits until current merge() call, if outstanding, - * to return. + * to return. * @return Previous switch value wrapped by a {@link CompletableFuture} */ CompletableFuture mergeSwitch(boolean enabled, boolean drainMerges); @@ -539,9 +533,9 @@ public interface AsyncAdmin { * Notice that, the method itself is always non-blocking, which means it will always return * immediately. The {@code drainSplits} parameter only effects when will we complete the returned * {@link CompletableFuture}. - * @param enabled enabled or not + * @param enabled enabled or not * @param drainSplits If true, it waits until current split() call, if outstanding, - * to return. + * to return. * @return Previous switch value wrapped by a {@link CompletableFuture} */ CompletableFuture splitSwitch(boolean enabled, boolean drainSplits); @@ -557,22 +551,22 @@ public interface AsyncAdmin { * Merge two regions. * @param nameOfRegionA encoded or full name of region a * @param nameOfRegionB encoded or full name of region b - * @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent - * regions + * @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent + * regions * @deprecated since 2.3.0 and will be removed in 4.0.0.Use {@link #mergeRegions(List, boolean)} * instead. */ @Deprecated default CompletableFuture mergeRegions(byte[] nameOfRegionA, byte[] nameOfRegionB, - boolean forcible) { + boolean forcible) { return mergeRegions(Arrays.asList(nameOfRegionA, nameOfRegionB), forcible); } /** * Merge multiple regions (>=2). * @param nameOfRegionsToMerge encoded or full name of daughter regions - * @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent - * regions + * @param forcible true if do a compulsory merge, otherwise we will only merge two + * adjacent regions */ CompletableFuture mergeRegions(List nameOfRegionsToMerge, boolean forcible); @@ -590,7 +584,7 @@ public interface AsyncAdmin { /** * Split a table. - * @param tableName table to split + * @param tableName table to split * @param splitPoint the explicit position to split on */ CompletableFuture split(TableName tableName, byte[] splitPoint); @@ -599,7 +593,7 @@ public interface AsyncAdmin { * Split an individual region. * @param regionName region to split * @param splitPoint the explicit position to split on. If not present, it will decide by region - * server. + * server. */ CompletableFuture splitRegion(byte[] regionName, byte[] splitPoint); @@ -618,12 +612,11 @@ public interface AsyncAdmin { * regionserver chosen at random. Region could be reassigned back to the same server. Use * {@link #move(byte[], ServerName)} if you want to control the region movement. * @param regionName Encoded or full name of region to unassign. Will clear any existing - * RegionPlan if one found. - * @param forcible If true, force unassign (Will remove region from regions-in-transition too if - * present. If results in double assignment use hbck -fix to resolve. To be used by - * experts). - * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} - * instead. + * RegionPlan if one found. + * @param forcible If true, force unassign (Will remove region from regions-in-transition too if + * present. If results in double assignment use hbck -fix to resolve. To be used + * by experts). + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} instead. * @see HBASE-24875 */ @Deprecated @@ -649,10 +642,11 @@ public interface AsyncAdmin { /** * Move the region r to dest. - * @param regionName Encoded or full name of region to move. + * @param regionName Encoded or full name of region to move. * @param destServerName The servername of the destination regionserver. If not present, we'll - * assign to a random server. A server name is made of host, port and startcode. Here is - * an example: host187.example.com,60020,1289493121758 + * assign to a random server. A server name is made of host, port and + * startcode. Here is an example: + * host187.example.com,60020,1289493121758 */ CompletableFuture move(byte[] regionName, ServerName destServerName); @@ -671,22 +665,22 @@ public interface AsyncAdmin { /** * Add a new replication peer for replicating data to slave cluster - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig configuration for the replication slave cluster */ default CompletableFuture addReplicationPeer(String peerId, - ReplicationPeerConfig peerConfig) { + ReplicationPeerConfig peerConfig) { return addReplicationPeer(peerId, peerConfig, true); } /** * Add a new replication peer for replicating data to slave cluster - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig configuration for the replication slave cluster - * @param enabled peer state, true if ENABLED and false if DISABLED + * @param enabled peer state, true if ENABLED and false if DISABLED */ - CompletableFuture addReplicationPeer(String peerId, - ReplicationPeerConfig peerConfig, boolean enabled); + CompletableFuture addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, + boolean enabled); /** * Remove a peer and stop the replication @@ -715,27 +709,27 @@ public interface AsyncAdmin { /** * Update the peerConfig for the specified peer - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig new config for the peer */ CompletableFuture updateReplicationPeerConfig(String peerId, - ReplicationPeerConfig peerConfig); + ReplicationPeerConfig peerConfig); /** * Append the replicable table-cf config of the specified peer - * @param peerId a short that identifies the cluster + * @param peerId a short that identifies the cluster * @param tableCfs A map from tableName to column family names */ CompletableFuture appendReplicationPeerTableCFs(String peerId, - Map> tableCfs); + Map> tableCfs); /** * Remove some table-cfs from config of the specified peer - * @param peerId a short name that identifies the cluster + * @param peerId a short name that identifies the cluster * @param tableCfs A map from tableName to column family names */ CompletableFuture removeReplicationPeerTableCFs(String peerId, - Map> tableCfs); + Map> tableCfs); /** * Return a list of replication peers. @@ -780,7 +774,7 @@ public interface AsyncAdmin { * naming. Snapshot names follow the same naming constraints as tables in HBase. See * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. * @param snapshotName name of the snapshot to be created - * @param tableName name of the table for which snapshot is created + * @param tableName name of the table for which snapshot is created */ default CompletableFuture snapshot(String snapshotName, TableName tableName) { return snapshot(snapshotName, tableName, SnapshotType.FLUSH); @@ -794,12 +788,12 @@ public interface AsyncAdmin { * naming. Snapshot names follow the same naming constraints as tables in HBase. See * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other - * snapshots stored on the cluster - * @param tableName name of the table to snapshot - * @param type type of snapshot to take + * snapshots stored on the cluster + * @param tableName name of the table to snapshot + * @param type type of snapshot to take */ default CompletableFuture snapshot(String snapshotName, TableName tableName, - SnapshotType type) { + SnapshotType type) { return snapshot(new SnapshotDescription(snapshotName, tableName, type)); } @@ -851,11 +845,11 @@ public interface AsyncAdmin { * restored. If the restore completes without problem the failsafe snapshot is deleted. The * failsafe snapshot name is configurable by using the property * "hbase.snapshot.restore.failsafe.name". - * @param snapshotName name of the snapshot to restore + * @param snapshotName name of the snapshot to restore * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken */ default CompletableFuture restoreSnapshot(String snapshotName, - boolean takeFailSafeSnapshot) { + boolean takeFailSafeSnapshot) { return restoreSnapshot(snapshotName, takeFailSafeSnapshot, false); } @@ -866,17 +860,17 @@ public interface AsyncAdmin { * restored. If the restore completes without problem the failsafe snapshot is deleted. The * failsafe snapshot name is configurable by using the property * "hbase.snapshot.restore.failsafe.name". - * @param snapshotName name of the snapshot to restore + * @param snapshotName name of the snapshot to restore * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken - * @param restoreAcl true to restore acl of snapshot + * @param restoreAcl true to restore acl of snapshot */ CompletableFuture restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, - boolean restoreAcl); + boolean restoreAcl); /** * Create a new table by cloning the snapshot content. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored + * @param tableName name of the table where the snapshot will be restored */ default CompletableFuture cloneSnapshot(String snapshotName, TableName tableName) { return cloneSnapshot(snapshotName, tableName, false); @@ -885,23 +879,23 @@ public interface AsyncAdmin { /** * Create a new table by cloning the snapshot content. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored - * @param restoreAcl true to restore acl of snapshot + * @param tableName name of the table where the snapshot will be restored + * @param restoreAcl true to restore acl of snapshot */ default CompletableFuture cloneSnapshot(String snapshotName, TableName tableName, - boolean restoreAcl) { + boolean restoreAcl) { return cloneSnapshot(snapshotName, tableName, restoreAcl, null); } /** * Create a new table by cloning the snapshot content. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored - * @param restoreAcl true to restore acl of snapshot - * @param customSFT specify the StroreFileTracker used for the table + * @param tableName name of the table where the snapshot will be restored + * @param restoreAcl true to restore acl of snapshot + * @param customSFT specify the StroreFileTracker used for the table */ CompletableFuture cloneSnapshot(String snapshotName, TableName tableName, - boolean restoreAcl, String customSFT); + boolean restoreAcl, String customSFT); /** * List completed snapshots. @@ -928,13 +922,13 @@ public interface AsyncAdmin { /** * List all the completed snapshots matching the given table name regular expression and snapshot * name regular expression. - * @param tableNamePattern The compiled table name regular expression to match against + * @param tableNamePattern The compiled table name regular expression to match against * @param snapshotNamePattern The compiled snapshot name regular expression to match against * @return - returns a List of completed SnapshotDescription wrapped by a * {@link CompletableFuture} */ CompletableFuture> listTableSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern); + Pattern snapshotNamePattern); /** * Delete an existing snapshot. @@ -962,34 +956,34 @@ public interface AsyncAdmin { /** * Delete all existing snapshots matching the given table name regular expression and snapshot * name regular expression. - * @param tableNamePattern The compiled table name regular expression to match against + * @param tableNamePattern The compiled table name regular expression to match against * @param snapshotNamePattern The compiled snapshot name regular expression to match against */ CompletableFuture deleteTableSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern); + Pattern snapshotNamePattern); /** * Execute a distributed procedure on a cluster. * @param signature A distributed procedure is uniquely identified by its signature (default the - * root ZK node name of the procedure). - * @param instance The instance name of the procedure. For some procedures, this parameter is - * optional. - * @param props Property/Value pairs of properties passing to the procedure + * root ZK node name of the procedure). + * @param instance The instance name of the procedure. For some procedures, this parameter is + * optional. + * @param props Property/Value pairs of properties passing to the procedure */ CompletableFuture execProcedure(String signature, String instance, - Map props); + Map props); /** * Execute a distributed procedure on a cluster. * @param signature A distributed procedure is uniquely identified by its signature (default the - * root ZK node name of the procedure). - * @param instance The instance name of the procedure. For some procedures, this parameter is - * optional. - * @param props Property/Value pairs of properties passing to the procedure + * root ZK node name of the procedure). + * @param instance The instance name of the procedure. For some procedures, this parameter is + * optional. + * @param props Property/Value pairs of properties passing to the procedure * @return data returned after procedure execution. null if no return data. */ CompletableFuture execProcedureWithReturn(String signature, String instance, - Map props); + Map props); /** * Check the current state of the specified procedure. There are three possible states: @@ -999,18 +993,18 @@ public interface AsyncAdmin { *

  • finished with error - throws the exception that caused the procedure to fail
  • * * @param signature The signature that uniquely identifies a procedure - * @param instance The instance name of the procedure - * @param props Property/Value pairs of properties passing to the procedure + * @param instance The instance name of the procedure + * @param props Property/Value pairs of properties passing to the procedure * @return true if the specified procedure is finished successfully, false if it is still running. * The value is wrapped by {@link CompletableFuture} */ CompletableFuture isProcedureFinished(String signature, String instance, - Map props); + Map props); /** - * Abort a procedure - * Do not use. Usually it is ignored but if not, it can do more damage than good. See hbck2. - * @param procId ID of the procedure to abort + * Abort a procedure Do not use. Usually it is ignored but if not, it can do more damage than + * good. See hbck2. + * @param procId ID of the procedure to abort * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? * @return true if aborted, false if procedure already completed or does not exist. the value is * wrapped by {@link CompletableFuture} @@ -1033,10 +1027,10 @@ public interface AsyncAdmin { CompletableFuture getLocks(); /** - * Mark region server(s) as decommissioned to prevent additional regions from getting - * assigned to them. Optionally unload the regions on the servers. If there are multiple servers - * to be decommissioned, decommissioning them at the same time can prevent wasteful region - * movements. Region unloading is asynchronous. + * Mark region server(s) as decommissioned to prevent additional regions from getting assigned to + * them. Optionally unload the regions on the servers. If there are multiple servers to be + * decommissioned, decommissioning them at the same time can prevent wasteful region movements. + * Region unloading is asynchronous. * @param servers The list of servers to decommission. * @param offload True to offload the regions from the decommissioned servers */ @@ -1051,11 +1045,11 @@ public interface AsyncAdmin { /** * Remove decommission marker from a region server to allow regions assignments. Load regions onto * the server if a list of regions is given. Region loading is asynchronous. - * @param server The server to recommission. + * @param server The server to recommission. * @param encodedRegionNames Regions to load onto the server. */ CompletableFuture recommissionRegionServer(ServerName server, - List encodedRegionNames); + List encodedRegionNames); /** * @return cluster status wrapped by {@link CompletableFuture} @@ -1087,11 +1081,11 @@ public interface AsyncAdmin { */ default CompletableFuture> getRegionServers() { return getClusterMetrics(EnumSet.of(Option.SERVERS_NAME)) - .thenApply(ClusterMetrics::getServersName); + .thenApply(ClusterMetrics::getServersName); } - default CompletableFuture> getRegionServers( - boolean excludeDecommissionedRS) { + default CompletableFuture> + getRegionServers(boolean excludeDecommissionedRS) { CompletableFuture> future = new CompletableFuture<>(); addListener( getClusterMetrics(EnumSet.of(Option.SERVERS_NAME)).thenApply(ClusterMetrics::getServersName), @@ -1121,7 +1115,7 @@ public interface AsyncAdmin { */ default CompletableFuture> getMasterCoprocessorNames() { return getClusterMetrics(EnumSet.of(Option.MASTER_COPROCESSORS)) - .thenApply(ClusterMetrics::getMasterCoprocessorNames); + .thenApply(ClusterMetrics::getMasterCoprocessorNames); } /** @@ -1129,8 +1123,8 @@ public interface AsyncAdmin { * @return master info port */ default CompletableFuture getMasterInfoPort() { - return getClusterMetrics(EnumSet.of(Option.MASTER_INFO_PORT)).thenApply( - ClusterMetrics::getMasterInfoPort); + return getClusterMetrics(EnumSet.of(Option.MASTER_INFO_PORT)) + .thenApply(ClusterMetrics::getMasterInfoPort); } /** @@ -1144,8 +1138,7 @@ public interface AsyncAdmin { CompletableFuture stopMaster(); /** - * Stop the designated regionserver. - * @param serverName + * Stop the designated regionserver. n */ CompletableFuture stopRegionServer(ServerName serverName); @@ -1173,24 +1166,19 @@ public interface AsyncAdmin { CompletableFuture rollWALWriter(ServerName serverName); /** - * Clear compacting queues on a region server. - * @param serverName - * @param queues the set of queue name + * Clear compacting queues on a region server. n * @param queues the set of queue name */ CompletableFuture clearCompactionQueues(ServerName serverName, Set queues); /** - * Get a list of {@link RegionMetrics} of all regions hosted on a region seerver. - * @param serverName - * @return a list of {@link RegionMetrics} wrapped by {@link CompletableFuture} + * Get a list of {@link RegionMetrics} of all regions hosted on a region seerver. n * @return a + * list of {@link RegionMetrics} wrapped by {@link CompletableFuture} */ CompletableFuture> getRegionMetrics(ServerName serverName); /** - * Get a list of {@link RegionMetrics} of all regions hosted on a region seerver for a table. - * @param serverName - * @param tableName - * @return a list of {@link RegionMetrics} wrapped by {@link CompletableFuture} + * Get a list of {@link RegionMetrics} of all regions hosted on a region seerver for a table. nn + * * @return a list of {@link RegionMetrics} wrapped by {@link CompletableFuture} */ CompletableFuture> getRegionMetrics(ServerName serverName, TableName tableName); @@ -1215,12 +1203,12 @@ public interface AsyncAdmin { /** * Get the current compaction state of a table. It could be in a major compaction, a minor * compaction, both, or none. - * @param tableName table to examine + * @param tableName table to examine * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} * @return the current compaction state wrapped by a {@link CompletableFuture} */ CompletableFuture getCompactionState(TableName tableName, - CompactType compactType); + CompactType compactType); /** * Get the current compaction state of region. It could be in a major compaction, a minor @@ -1271,9 +1259,9 @@ public interface AsyncAdmin { * Notice that, the method itself is always non-blocking, which means it will always return * immediately. The {@code drainRITs} parameter only effects when will we complete the returned * {@link CompletableFuture}. - * @param on Set to true to enable, false to disable. + * @param on Set to true to enable, false to disable. * @param drainRITs If true, it waits until current balance() call, if outstanding, - * to return. + * to return. * @return Previous balancer value wrapped by a {@link CompletableFuture}. */ CompletableFuture balancerSwitch(boolean on, boolean drainRITs); @@ -1285,8 +1273,7 @@ public interface AsyncAdmin { * {@link CompletableFuture}. */ default CompletableFuture balance() { - return balance(BalanceRequest.defaultInstance()) - .thenApply(BalanceResponse::isBalancerRan); + return balance(BalanceRequest.defaultInstance()).thenApply(BalanceResponse::isBalancerRan); } /** @@ -1296,21 +1283,17 @@ public interface AsyncAdmin { * @param forcible whether we should force balance even if there is region in transition. * @return True if balancer ran, false otherwise. The return value will be wrapped by a * {@link CompletableFuture}. - * @deprecated Since 2.5.0. Will be removed in 4.0.0. - * Use {@link #balance(BalanceRequest)} instead. + * @deprecated Since 2.5.0. Will be removed in 4.0.0. Use {@link #balance(BalanceRequest)} + * instead. */ default CompletableFuture balance(boolean forcible) { - return balance( - BalanceRequest.newBuilder() - .setIgnoreRegionsInTransition(forcible) - .build() - ).thenApply(BalanceResponse::isBalancerRan); + return balance(BalanceRequest.newBuilder().setIgnoreRegionsInTransition(forcible).build()) + .thenApply(BalanceResponse::isBalancerRan); } /** - * Invoke the balancer with the given balance request. The BalanceRequest defines how the - * balancer will run. See {@link BalanceRequest} for more details. - * + * Invoke the balancer with the given balance request. The BalanceRequest defines how the balancer + * will run. See {@link BalanceRequest} for more details. * @param request defines how the balancer should run * @return {@link BalanceResponse} with details about the results of the invocation. */ @@ -1318,8 +1301,8 @@ public interface AsyncAdmin { /** * Query the current state of the balancer. - * @return true if the balance switch is on, false otherwise. The return value will be wrapped by a - * {@link CompletableFuture}. + * @return true if the balance switch is on, false otherwise. The return value will be wrapped by + * a {@link CompletableFuture}. */ CompletableFuture isBalancerEnabled(); @@ -1355,16 +1338,15 @@ public interface AsyncAdmin { CompletableFuture normalize(NormalizeTableFilterParams ntfp); /** - * Turn the cleaner chore on/off. - * @param on - * @return Previous cleaner state wrapped by a {@link CompletableFuture} + * Turn the cleaner chore on/off. n * @return Previous cleaner state wrapped by a + * {@link CompletableFuture} */ CompletableFuture cleanerChoreSwitch(boolean on); /** * Query the current state of the cleaner chore. - * @return true if cleaner chore is on, false otherwise. The return value will be wrapped by - * a {@link CompletableFuture} + * @return true if cleaner chore is on, false otherwise. The return value will be wrapped by a + * {@link CompletableFuture} */ CompletableFuture isCleanerChoreEnabled(); @@ -1376,16 +1358,15 @@ public interface AsyncAdmin { CompletableFuture runCleanerChore(); /** - * Turn the catalog janitor on/off. - * @param on - * @return the previous state wrapped by a {@link CompletableFuture} + * Turn the catalog janitor on/off. n * @return the previous state wrapped by a + * {@link CompletableFuture} */ CompletableFuture catalogJanitorSwitch(boolean on); /** * Query on the catalog janitor state. - * @return true if the catalog janitor is on, false otherwise. The return value will be - * wrapped by a {@link CompletableFuture} + * @return true if the catalog janitor is on, false otherwise. The return value will be wrapped by + * a {@link CompletableFuture} */ CompletableFuture isCatalogJanitorEnabled(); @@ -1407,16 +1388,17 @@ public interface AsyncAdmin { * channel -> xxxService.newStub(channel) * * + * * @param stubMaker a delegation to the actual {@code newStub} call. - * @param callable a delegation to the actual protobuf rpc call. See the comment of - * {@link ServiceCaller} for more details. - * @param the type of the asynchronous stub - * @param the type of the return value + * @param callable a delegation to the actual protobuf rpc call. See the comment of + * {@link ServiceCaller} for more details. + * @param the type of the asynchronous stub + * @param the type of the return value * @return the return value of the protobuf rpc call, wrapped by a {@link CompletableFuture}. * @see ServiceCaller */ CompletableFuture coprocessorService(Function stubMaker, - ServiceCaller callable); + ServiceCaller callable); /** * Execute the given coprocessor call on the given region server. @@ -1429,12 +1411,13 @@ public interface AsyncAdmin { * channel -> xxxService.newStub(channel) * * - * @param stubMaker a delegation to the actual {@code newStub} call. - * @param callable a delegation to the actual protobuf rpc call. See the comment of - * {@link ServiceCaller} for more details. + * + * @param stubMaker a delegation to the actual {@code newStub} call. + * @param callable a delegation to the actual protobuf rpc call. See the comment of + * {@link ServiceCaller} for more details. * @param serverName the given region server - * @param the type of the asynchronous stub - * @param the type of the return value + * @param the type of the asynchronous stub + * @param the type of the return value * @return the return value of the protobuf rpc call, wrapped by a {@link CompletableFuture}. * @see ServiceCaller */ @@ -1446,7 +1429,7 @@ public interface AsyncAdmin { */ default CompletableFuture> listDeadServers() { return this.getClusterMetrics(EnumSet.of(Option.DEAD_SERVERS)) - .thenApply(ClusterMetrics::getDeadServerNames); + .thenApply(ClusterMetrics::getDeadServerNames); } /** @@ -1468,26 +1451,24 @@ public interface AsyncAdmin { /** * Create a new table by cloning the existent table schema. - * - * @param tableName name of the table to be cloned - * @param newTableName name of the new table where the table will be created + * @param tableName name of the table to be cloned + * @param newTableName name of the new table where the table will be created * @param preserveSplits True if the splits should be preserved */ - CompletableFuture cloneTableSchema(final TableName tableName, - final TableName newTableName, final boolean preserveSplits); + CompletableFuture cloneTableSchema(final TableName tableName, final TableName newTableName, + final boolean preserveSplits); /** * Turn the compaction on or off. Disabling compactions will also interrupt any currently ongoing - * compactions. This state is ephemeral. The setting will be lost on restart. Compaction - * can also be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled - * in hbase-site.xml. - * + * compactions. This state is ephemeral. The setting will be lost on restart. Compaction can also + * be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled in + * hbase-site.xml. * @param switchState Set to true to enable, false to disable. * @param serverNamesList list of region servers. * @return Previous compaction states for region servers */ CompletableFuture> compactionSwitch(boolean switchState, - List serverNamesList); + List serverNamesList); /** * Switch the rpc throttle enabled state. @@ -1503,8 +1484,8 @@ public interface AsyncAdmin { CompletableFuture isRpcThrottleEnabled(); /** - * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota - * can be exceeded if region server has availble quota. + * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota can be + * exceeded if region server has availble quota. * @param enable Set to true to enable, false to disable. * @return Previous exceed throttle enabled value */ @@ -1519,28 +1500,28 @@ public interface AsyncAdmin { * Fetches the observed {@link SpaceQuotaSnapshotView}s observed by a RegionServer. */ CompletableFuture> - getRegionServerSpaceQuotaSnapshots(ServerName serverName); + getRegionServerSpaceQuotaSnapshots(ServerName serverName); /** * Returns the Master's view of a quota on the given {@code namespace} or null if the Master has * no quota information on that namespace. */ CompletableFuture - getCurrentSpaceQuotaSnapshot(String namespace); + getCurrentSpaceQuotaSnapshot(String namespace); /** * Returns the Master's view of a quota on the given {@code tableName} or null if the Master has * no quota information on that table. */ - CompletableFuture getCurrentSpaceQuotaSnapshot( - TableName tableName); + CompletableFuture + getCurrentSpaceQuotaSnapshot(TableName tableName); /** * Grants user specific permissions - * @param userPermission user name and the specific permission + * @param userPermission user name and the specific permission * @param mergeExistingPermissions If set to false, later granted permissions will override - * previous granted permissions. otherwise, it'll merge with previous granted - * permissions. + * previous granted permissions. otherwise, it'll merge with + * previous granted permissions. */ CompletableFuture grant(UserPermission userPermission, boolean mergeExistingPermissions); @@ -1553,20 +1534,20 @@ public interface AsyncAdmin { /** * Get the global/namespace/table permissions for user * @param getUserPermissionsRequest A request contains which user, global, namespace or table - * permissions needed + * permissions needed * @return The user and permission list */ CompletableFuture> - getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest); + getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest); /** * Check if the user has specific permissions - * @param userName the user name + * @param userName the user name * @param permissions the specific permission list * @return True if user has the specific permissions */ CompletableFuture> hasUserPermissions(String userName, - List permissions); + List permissions); /** * Check if call user has specific permissions @@ -1583,35 +1564,31 @@ public interface AsyncAdmin { * Notice that, the method itself is always non-blocking, which means it will always return * immediately. The {@code sync} parameter only effects when will we complete the returned * {@link CompletableFuture}. - * - * @param on Set to true to enable, false to disable. - * @param sync If true, it waits until current snapshot cleanup is completed, - * if outstanding. + * @param on Set to true to enable, false to disable. + * @param sync If true, it waits until current snapshot cleanup is completed, if + * outstanding. * @return Previous auto snapshot cleanup value wrapped by a {@link CompletableFuture}. */ CompletableFuture snapshotCleanupSwitch(boolean on, boolean sync); /** * Query the current state of the auto snapshot cleanup based on TTL. - * - * @return true if the auto snapshot cleanup is enabled, false otherwise. - * The return value will be wrapped by a {@link CompletableFuture}. + * @return true if the auto snapshot cleanup is enabled, false otherwise. The return value will be + * wrapped by a {@link CompletableFuture}. */ CompletableFuture isSnapshotCleanupEnabled(); /** - * Retrieves online slow RPC logs from the provided list of - * RegionServers - * - * @param serverNames Server names to get slowlog responses from + * Retrieves online slow RPC logs from the provided list of RegionServers + * @param serverNames Server names to get slowlog responses from * @param logQueryFilter filter to be used if provided * @return Online slowlog response list. The return value wrapped by a {@link CompletableFuture} - * @deprecated since 2.4.0 and will be removed in 4.0.0. - * Use {@link #getLogEntries(Set, String, ServerType, int, Map)} instead. + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use + * {@link #getLogEntries(Set, String, ServerType, int, Map)} instead. */ @Deprecated - default CompletableFuture> getSlowLogResponses( - final Set serverNames, final LogQueryFilter logQueryFilter) { + default CompletableFuture> + getSlowLogResponses(final Set serverNames, final LogQueryFilter logQueryFilter) { String logType; if (LogQueryFilter.Type.LARGE_LOG.equals(logQueryFilter.getType())) { logType = "LARGE_LOG"; @@ -1624,34 +1601,29 @@ public interface AsyncAdmin { filterParams.put("tableName", logQueryFilter.getTableName()); filterParams.put("userName", logQueryFilter.getUserName()); filterParams.put("filterByOperator", logQueryFilter.getFilterByOperator().toString()); - CompletableFuture> logEntries = - getLogEntries(serverNames, logType, ServerType.REGION_SERVER, logQueryFilter.getLimit(), - filterParams); - return logEntries.thenApply( - logEntryList -> logEntryList.stream().map(logEntry -> (OnlineLogRecord) logEntry) - .collect(Collectors.toList())); + CompletableFuture> logEntries = getLogEntries(serverNames, logType, + ServerType.REGION_SERVER, logQueryFilter.getLimit(), filterParams); + return logEntries.thenApply(logEntryList -> logEntryList.stream() + .map(logEntry -> (OnlineLogRecord) logEntry).collect(Collectors.toList())); } /** - * Clears online slow RPC logs from the provided list of - * RegionServers - * + * Clears online slow RPC logs from the provided list of RegionServers * @param serverNames Set of Server names to clean slowlog responses from - * @return List of booleans representing if online slowlog response buffer is cleaned - * from each RegionServer. The return value wrapped by a {@link CompletableFuture} + * @return List of booleans representing if online slowlog response buffer is cleaned from each + * RegionServer. The return value wrapped by a {@link CompletableFuture} */ CompletableFuture> clearSlowLogResponses(final Set serverNames); /** - * Retrieve recent online records from HMaster / RegionServers. - * Examples include slow/large RPC logs, balancer decisions by master. - * - * @param serverNames servers to retrieve records from, useful in case of records maintained - * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will - * only come from the currently active master. - * @param logType string representing type of log records - * @param serverType enum for server type: HMaster or RegionServer - * @param limit put a limit to list of records that server should send in response + * Retrieve recent online records from HMaster / RegionServers. Examples include slow/large RPC + * logs, balancer decisions by master. + * @param serverNames servers to retrieve records from, useful in case of records maintained by + * RegionServer as we can select specific server. In case of + * servertype=MASTER, logs will only come from the currently active master. + * @param logType string representing type of log records + * @param serverType enum for server type: HMaster or RegionServer + * @param limit put a limit to list of records that server should send in response * @param filterParams additional filter params * @return Log entries representing online records from servers */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java index c55977dba5e..798bd7b4664 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.HBaseServerException; import org.apache.yetus.audience.InterfaceAudience; @@ -56,18 +55,17 @@ public interface AsyncAdminBuilder { AsyncAdminBuilder setRetryPause(long timeout, TimeUnit unit); /** - * Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. - * We use an exponential policy to generate sleep time from this base when retrying. + * Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. We + * use an exponential policy to generate sleep time from this base when retrying. *

    * This value should be greater than the normal pause value which could be set with the above * {@link #setRetryPause(long, TimeUnit)} method, as usually * {@link HBaseServerException#isServerOverloaded()} means the server is overloaded. We just use * the normal pause value for {@link HBaseServerException#isServerOverloaded()} if here you * specify a smaller value. - * * @see #setRetryPause(long, TimeUnit) * @deprecated Since 2.5.0, will be removed in 4.0.0. Please use - * {@link #setRetryPauseForServerOverloaded(long, TimeUnit)} instead. + * {@link #setRetryPauseForServerOverloaded(long, TimeUnit)} instead. */ @Deprecated default AsyncAdminBuilder setRetryPauseForCQTBE(long pause, TimeUnit unit) { @@ -75,15 +73,14 @@ public interface AsyncAdminBuilder { } /** - * Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. - * We use an exponential policy to generate sleep time when retrying. + * Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. We + * use an exponential policy to generate sleep time when retrying. *

    * This value should be greater than the normal pause value which could be set with the above * {@link #setRetryPause(long, TimeUnit)} method, as usually * {@link HBaseServerException#isServerOverloaded()} means the server is overloaded. We just use * the normal pause value for {@link HBaseServerException#isServerOverloaded()} if here you * specify a smaller value. - * * @see #setRetryPause(long, TimeUnit) */ AsyncAdminBuilder setRetryPauseForServerOverloaded(long pause, TimeUnit unit); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java index cd023d8134d..60dccffd451 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import java.util.concurrent.TimeUnit; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java index f03e8b5cacb..de6e967f21c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,8 +44,8 @@ public class AsyncAdminRequestRetryingCaller extends AsyncRpcRetryingCaller callable) { + long pauseNs, long pauseNsForServerOverloaded, int maxAttempts, long operationTimeoutNs, + long rpcTimeoutNs, int startLogErrorsCnt, ServerName serverName, Callable callable) { super(retryTimer, conn, priority, pauseNs, pauseNsForServerOverloaded, maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); this.serverName = serverName; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java index 6e4ed552931..0798915c08d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,7 +63,9 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.util.Timer; + import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; @@ -148,9 +150,8 @@ class AsyncBatchRpcRetryingCaller { } public AsyncBatchRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, - TableName tableName, List actions, long pauseNs, - long pauseNsForServerOverloaded, int maxAttempts, long operationTimeoutNs, - long rpcTimeoutNs, int startLogErrorsCnt) { + TableName tableName, List actions, long pauseNs, long pauseNsForServerOverloaded, + int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { this.retryTimer = retryTimer; this.conn = conn; this.tableName = tableName; @@ -214,13 +215,13 @@ class AsyncBatchRpcRetryingCaller { } private void logException(int tries, Supplier> regionsSupplier, - Throwable error, ServerName serverName) { + Throwable error, ServerName serverName) { if (tries > startLogErrorsCnt) { String regions = regionsSupplier.get().map(r -> "'" + r.loc.getRegion().getRegionNameAsString() + "'") .collect(Collectors.joining(",", "[", "]")); - LOG.warn("Process batch for " + regions + " in " + tableName + " from " + serverName + - " failed, tries=" + tries, error); + LOG.warn("Process batch for " + regions + " in " + tableName + " from " + serverName + + " failed, tries=" + tries, error); } } @@ -275,7 +276,7 @@ class AsyncBatchRpcRetryingCaller { } private ClientProtos.MultiRequest buildReq(Map actionsByRegion, - List cells, Map indexMap) throws IOException { + List cells, Map indexMap) throws IOException { ClientProtos.MultiRequest.Builder multiRequestBuilder = ClientProtos.MultiRequest.newBuilder(); ClientProtos.RegionAction.Builder regionActionBuilder = ClientProtos.RegionAction.newBuilder(); ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder(); @@ -289,21 +290,21 @@ class AsyncBatchRpcRetryingCaller { entry.getValue().actions.stream() .sorted((a1, a2) -> Integer.compare(a1.getOriginalIndex(), a2.getOriginalIndex())) .collect(Collectors.toList()), - cells, multiRequestBuilder, regionActionBuilder, actionBuilder, mutationBuilder, - nonceGroup, indexMap); + cells, multiRequestBuilder, regionActionBuilder, actionBuilder, mutationBuilder, nonceGroup, + indexMap); } return multiRequestBuilder.build(); } @SuppressWarnings("unchecked") private void onComplete(Action action, RegionRequest regionReq, int tries, ServerName serverName, - RegionResult regionResult, List failedActions, Throwable regionException, - MutableBoolean retryImmediately) { + RegionResult regionResult, List failedActions, Throwable regionException, + MutableBoolean retryImmediately) { Object result = regionResult.result.getOrDefault(action.getOriginalIndex(), regionException); if (result == null) { - LOG.error("Server " + serverName + " sent us neither result nor exception for row '" + - Bytes.toStringBinary(action.getAction().getRow()) + "' of " + - regionReq.loc.getRegion().getRegionNameAsString()); + LOG.error("Server " + serverName + " sent us neither result nor exception for row '" + + Bytes.toStringBinary(action.getAction().getRow()) + "' of " + + regionReq.loc.getRegion().getRegionNameAsString()); addError(action, new RuntimeException("Invalid response"), serverName); failedActions.add(action); } else if (result instanceof Throwable) { @@ -325,7 +326,7 @@ class AsyncBatchRpcRetryingCaller { } private void onComplete(Map actionsByRegion, int tries, - ServerName serverName, MultiResponse resp) { + ServerName serverName, MultiResponse resp) { ConnectionUtils.updateStats(conn.getStatisticsTracker(), conn.getConnectionMetrics(), serverName, resp); List failedActions = new ArrayList<>(); @@ -405,8 +406,8 @@ class AsyncBatchRpcRetryingCaller { onError(serverReq.actionsByRegion, tries, controller.getFailed(), serverName); } else { try { - onComplete(serverReq.actionsByRegion, tries, serverName, ResponseConverter.getResults(req, - indexMap, resp, controller.cellScanner())); + onComplete(serverReq.actionsByRegion, tries, serverName, + ResponseConverter.getResults(req, indexMap, resp, controller.cellScanner())); } catch (Exception e) { onError(serverReq.actionsByRegion, tries, e, serverName); return; @@ -451,7 +452,7 @@ class AsyncBatchRpcRetryingCaller { } private void onError(Map actionsByRegion, int tries, Throwable t, - ServerName serverName) { + ServerName serverName) { Throwable error = translateException(t); logException(tries, () -> actionsByRegion.values().stream(), error, serverName); actionsByRegion.forEach( @@ -469,7 +470,7 @@ class AsyncBatchRpcRetryingCaller { } private void tryResubmit(Stream actions, int tries, boolean immediately, - boolean isServerOverloaded) { + boolean isServerOverloaded) { if (immediately) { groupAndSend(actions, tries); return; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java index 7b21eb5fa13..e5f28d2e060 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java index ea2528d5152..ed21fb8e23e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; import java.util.concurrent.TimeUnit; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java index cd0496377bc..ede5b359e83 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ class AsyncBufferedMutatorBuilderImpl implements AsyncBufferedMutatorBuilder { private int maxKeyValueSize; public AsyncBufferedMutatorBuilderImpl(AsyncConnectionConfiguration connConf, - AsyncTableBuilder tableBuilder, HashedWheelTimer periodicalFlushTimer) { + AsyncTableBuilder tableBuilder, HashedWheelTimer periodicalFlushTimer) { this.tableBuilder = tableBuilder; this.writeBufferSize = connConf.getWriteBufferSize(); this.periodicFlushTimeoutNs = connConf.getWriteBufferPeriodicFlushTimeoutNs(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java index a7e5f3ff6d1..ce4193d9138 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,9 +30,10 @@ import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; import org.apache.hbase.thirdparty.io.netty.util.Timeout; -import org.apache.yetus.audience.InterfaceAudience; /** * The implementation of {@link AsyncBufferedMutator}. Simply wrap an {@link AsyncTable}. @@ -61,7 +62,7 @@ class AsyncBufferedMutatorImpl implements AsyncBufferedMutator { Timeout periodicFlushTask; AsyncBufferedMutatorImpl(HashedWheelTimer periodicalFlushTimer, AsyncTable table, - long writeBufferSize, long periodicFlushTimeoutNs, int maxKeyValueSize) { + long writeBufferSize, long periodicFlushTimeoutNs, int maxKeyValueSize) { this.periodicalFlushTimer = periodicalFlushTimer; this.table = table; this.writeBufferSize = writeBufferSize; @@ -116,7 +117,7 @@ class AsyncBufferedMutatorImpl implements AsyncBufferedMutator { for (Mutation mutation : mutations) { heapSize += mutation.heapSize(); if (mutation instanceof Put) { - validatePut((Put)mutation, maxKeyValueSize); + validatePut((Put) mutation, maxKeyValueSize); } } synchronized (this) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java index 80e3b0ce047..ed381df7e0d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java @@ -93,8 +93,8 @@ class AsyncClientScanner { private final Span span; public AsyncClientScanner(Scan scan, AdvancedScanResultConsumer consumer, TableName tableName, - AsyncConnectionImpl conn, Timer retryTimer, long pauseNs, long pauseNsForServerOverloaded, - int maxAttempts, long scanTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { + AsyncConnectionImpl conn, Timer retryTimer, long pauseNs, long pauseNsForServerOverloaded, + int maxAttempts, long scanTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { if (scan.getStartRow() == null) { scan.withStartRow(EMPTY_START_ROW, scan.includeStartRow()); } @@ -146,7 +146,7 @@ class AsyncClientScanner { public final ScanResponse resp; public OpenScannerResponse(HRegionLocation loc, boolean isRegionServerRemote, Interface stub, - HBaseRpcController controller, ScanResponse resp) { + HBaseRpcController controller, ScanResponse resp) { this.loc = loc; this.isRegionServerRemote = isRegionServerRemote; this.stub = stub; @@ -158,7 +158,7 @@ class AsyncClientScanner { private final AtomicInteger openScannerTries = new AtomicInteger(); private CompletableFuture callOpenScanner(HBaseRpcController controller, - HRegionLocation loc, ClientService.Interface stub) { + HRegionLocation loc, ClientService.Interface stub) { try (Scope ignored = span.makeCurrent()) { boolean isRegionServerRemote = isRemote(loc.getHostname()); incRPCCallsMetrics(scanMetrics, isRegionServerRemote); @@ -178,8 +178,8 @@ class AsyncClientScanner { span.end(); return; } - future.complete( - new OpenScannerResponse(loc, isRegionServerRemote, stub, controller, resp)); + future + .complete(new OpenScannerResponse(loc, isRegionServerRemote, stub, controller, resp)); } }); } catch (IOException e) { @@ -191,17 +191,15 @@ class AsyncClientScanner { } private void startScan(OpenScannerResponse resp) { - addListener( - conn.callerFactory.scanSingleRegion().id(resp.resp.getScannerId()).location(resp.loc) - .remote(resp.isRegionServerRemote) - .scannerLeaseTimeoutPeriod(resp.resp.getTtl(), TimeUnit.MILLISECONDS).stub(resp.stub) - .setScan(scan).metrics(scanMetrics).consumer(consumer).resultCache(resultCache) - .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) - .scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) - .pauseForServerOverloaded(pauseNsForServerOverloaded, TimeUnit.NANOSECONDS) - .maxAttempts(maxAttempts) - .startLogErrorsCnt(startLogErrorsCnt).start(resp.controller, resp.resp), - (hasMore, error) -> { + addListener(conn.callerFactory.scanSingleRegion().id(resp.resp.getScannerId()) + .location(resp.loc).remote(resp.isRegionServerRemote) + .scannerLeaseTimeoutPeriod(resp.resp.getTtl(), TimeUnit.MILLISECONDS).stub(resp.stub) + .setScan(scan).metrics(scanMetrics).consumer(consumer).resultCache(resultCache) + .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) + .scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) + .pauseForServerOverloaded(pauseNsForServerOverloaded, TimeUnit.NANOSECONDS) + .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt) + .start(resp.controller, resp.resp), (hasMore, error) -> { try (Scope ignored = span.makeCurrent()) { if (error != null) { try { @@ -229,19 +227,19 @@ class AsyncClientScanner { private CompletableFuture openScanner(int replicaId) { try (Scope ignored = span.makeCurrent()) { return conn.callerFactory. single().table(tableName) - .row(scan.getStartRow()).replicaId(replicaId).locateType(getLocateType(scan)) - .priority(scan.getPriority()).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) - .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS) - .pause(pauseNs, TimeUnit.NANOSECONDS) - .pauseForServerOverloaded(pauseNsForServerOverloaded, TimeUnit.NANOSECONDS) - .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt) - .action(this::callOpenScanner).call(); + .row(scan.getStartRow()).replicaId(replicaId).locateType(getLocateType(scan)) + .priority(scan.getPriority()).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) + .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) + .pauseForServerOverloaded(pauseNsForServerOverloaded, TimeUnit.NANOSECONDS) + .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt).action(this::callOpenScanner) + .call(); } } private long getPrimaryTimeoutNs() { - return TableName.isMetaTableName(tableName) ? conn.connConf.getPrimaryMetaScanTimeoutNs() - : conn.connConf.getPrimaryScanTimeoutNs(); + return TableName.isMetaTableName(tableName) + ? conn.connConf.getPrimaryMetaScanTimeoutNs() + : conn.connConf.getPrimaryScanTimeoutNs(); } private void openScanner() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java index d04b5f2cebe..5ed2bae3cea 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -97,7 +97,7 @@ public interface AsyncConnection extends Closeable { * This method no longer checks table existence. An exception will be thrown if the table does not * exist only when the first operation is attempted. * @param tableName the name of the table - * @param pool the thread pool to use for executing callback + * @param pool the thread pool to use for executing callback * @return an AsyncTable to use for interactions with this table */ default AsyncTable getTable(TableName tableName, ExecutorService pool) { @@ -110,7 +110,7 @@ public interface AsyncConnection extends Closeable { * This method no longer checks table existence. An exception will be thrown if the table does not * exist only when the first operation is attempted. * @param tableName the name of the table - * @param pool the thread pool to use for executing callback + * @param pool the thread pool to use for executing callback */ AsyncTableBuilder getTableBuilder(TableName tableName, ExecutorService pool); @@ -181,7 +181,7 @@ public interface AsyncConnection extends Closeable { * {@link #getBufferedMutatorBuilder(TableName, ExecutorService)} if you want to customize some * configs. * @param tableName the name of the table - * @param pool the thread pool to use for executing callback + * @param pool the thread pool to use for executing callback * @return an {@link AsyncBufferedMutator} for the supplied tableName. */ default AsyncBufferedMutator getBufferedMutator(TableName tableName, ExecutorService pool) { @@ -191,7 +191,7 @@ public interface AsyncConnection extends Closeable { /** * Returns an {@link AsyncBufferedMutatorBuilder} for creating {@link AsyncBufferedMutator}. * @param tableName the name of the table - * @param pool the thread pool to use for executing callback + * @param pool the thread pool to use for executing callback */ AsyncBufferedMutatorBuilder getBufferedMutatorBuilder(TableName tableName, ExecutorService pool); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java index 69616649d57..ffccc127914 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,6 +23,7 @@ import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PE import static org.apache.hadoop.hbase.HConstants.HBASE_META_SCANNER_CACHING; import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_READ_TIMEOUT_KEY; import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY; + import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; @@ -34,13 +35,13 @@ import org.apache.yetus.audience.InterfaceAudience; class AsyncConnectionConfiguration { /** - * Configure the number of failures after which the client will start logging. A few failures - * is fine: region moved, then is not opened, then is overloaded. We try to have an acceptable - * heuristic for the number of errors we don't log. 5 was chosen because we wait for 1s at - * this stage. + * Configure the number of failures after which the client will start logging. A few failures is + * fine: region moved, then is not opened, then is overloaded. We try to have an acceptable + * heuristic for the number of errors we don't log. 5 was chosen because we wait for 1s at this + * stage. */ public static final String START_LOG_ERRORS_AFTER_COUNT_KEY = - "hbase.client.start.log.errors.counter"; + "hbase.client.start.log.errors.counter"; public static final int DEFAULT_START_LOG_ERRORS_AFTER_COUNT = 5; private final long metaOperationTimeoutNs; @@ -106,28 +107,25 @@ class AsyncConnectionConfiguration { this.maxRetries = connectionConf.getRetriesNumber(); // fields from connection configuration that need to be converted to nanos - this.metaOperationTimeoutNs = TimeUnit.MILLISECONDS.toNanos( - connectionConf.getMetaOperationTimeout()); + this.metaOperationTimeoutNs = + TimeUnit.MILLISECONDS.toNanos(connectionConf.getMetaOperationTimeout()); this.operationTimeoutNs = TimeUnit.MILLISECONDS.toNanos(connectionConf.getOperationTimeout()); this.rpcTimeoutNs = TimeUnit.MILLISECONDS.toNanos(connectionConf.getRpcTimeout()); - this.readRpcTimeoutNs = - TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_RPC_READ_TIMEOUT_KEY, - connectionConf.getReadRpcTimeout())); - this.writeRpcTimeoutNs = - TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_RPC_WRITE_TIMEOUT_KEY, - connectionConf.getWriteRpcTimeout())); + this.readRpcTimeoutNs = TimeUnit.MILLISECONDS + .toNanos(conf.getLong(HBASE_RPC_READ_TIMEOUT_KEY, connectionConf.getReadRpcTimeout())); + this.writeRpcTimeoutNs = TimeUnit.MILLISECONDS + .toNanos(conf.getLong(HBASE_RPC_WRITE_TIMEOUT_KEY, connectionConf.getWriteRpcTimeout())); this.pauseNs = TimeUnit.MILLISECONDS.toNanos(connectionConf.getPauseMillis()); - this.pauseNsForServerOverloaded = TimeUnit.MILLISECONDS.toNanos( - connectionConf.getPauseMillisForServerOverloaded()); - this.primaryCallTimeoutNs = TimeUnit.MICROSECONDS.toNanos( - connectionConf.getPrimaryCallTimeoutMicroSecond()); - this.primaryScanTimeoutNs = TimeUnit.MICROSECONDS.toNanos( - connectionConf.getReplicaCallTimeoutMicroSecondScan()); + this.pauseNsForServerOverloaded = + TimeUnit.MILLISECONDS.toNanos(connectionConf.getPauseMillisForServerOverloaded()); + this.primaryCallTimeoutNs = + TimeUnit.MICROSECONDS.toNanos(connectionConf.getPrimaryCallTimeoutMicroSecond()); + this.primaryScanTimeoutNs = + TimeUnit.MICROSECONDS.toNanos(connectionConf.getReplicaCallTimeoutMicroSecondScan()); this.primaryMetaScanTimeoutNs = TimeUnit.MICROSECONDS.toNanos(connectionConf.getMetaReplicaCallTimeoutMicroSecondScan()); - this.scanTimeoutNs = TimeUnit.MILLISECONDS.toNanos( - conf.getInt(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, - DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD)); + this.scanTimeoutNs = TimeUnit.MILLISECONDS.toNanos(conf + .getInt(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD)); // fields not in connection configuration this.startLogErrorsCnt = diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index cbc3eaa2e46..bc1ad4aed5a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -27,6 +27,7 @@ import static org.apache.hadoop.hbase.client.MetricsConnection.CLIENT_SIDE_METRI import static org.apache.hadoop.hbase.client.NonceGenerator.CLIENT_NONCES_ENABLED_KEY; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.SERVER_NAME_KEY; import static org.apache.hadoop.hbase.util.FutureUtils.addListener; + import io.opentelemetry.api.trace.Span; import java.io.IOException; import java.util.Optional; @@ -56,8 +57,10 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; + import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; @@ -116,7 +119,7 @@ public class AsyncConnectionImpl implements AsyncConnection { private final ClusterStatusListener clusterStatusListener; public AsyncConnectionImpl(Configuration conf, ConnectionRegistry registry, String clusterId, - User user) { + User user) { this.conf = conf; this.user = user; @@ -175,8 +178,7 @@ public class AsyncConnectionImpl implements AsyncConnection { } /** - * If choreService has not been created yet, create the ChoreService. - * @return ChoreService + * If choreService has not been created yet, create the ChoreService. n */ synchronized ChoreService getChoreService() { if (isClosed()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index e2f8094bd75..ad554d9bed6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import com.google.protobuf.RpcChannel; - import java.util.EnumSet; import java.util.List; import java.util.Map; @@ -86,7 +85,7 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture> listTableDescriptors(Pattern pattern, - boolean includeSysTables) { + boolean includeSysTables) { return wrap(rawAdmin.listTableDescriptors(pattern, includeSysTables)); } @@ -107,7 +106,7 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture> listTableNames(Pattern pattern, - boolean includeSysTables) { + boolean includeSysTables) { return wrap(rawAdmin.listTableNames(pattern, includeSysTables)); } @@ -128,7 +127,7 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, - int numRegions) { + int numRegions) { return wrap(rawAdmin.createTable(desc, startKey, endKey, numRegions)); } @@ -189,7 +188,7 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture addColumnFamily(TableName tableName, - ColumnFamilyDescriptor columnFamily) { + ColumnFamilyDescriptor columnFamily) { return wrap(rawAdmin.addColumnFamily(tableName, columnFamily)); } @@ -200,7 +199,7 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture modifyColumnFamily(TableName tableName, - ColumnFamilyDescriptor columnFamily) { + ColumnFamilyDescriptor columnFamily) { return wrap(rawAdmin.modifyColumnFamily(tableName, columnFamily)); } @@ -276,14 +275,13 @@ class AsyncHBaseAdmin implements AsyncAdmin { } @Override - public CompletableFuture compact(TableName tableName, - CompactType compactType) { + public CompletableFuture compact(TableName tableName, CompactType compactType) { return wrap(rawAdmin.compact(tableName, compactType)); } @Override - public CompletableFuture compact(TableName tableName, - byte[] columnFamily, CompactType compactType) { + public CompletableFuture compact(TableName tableName, byte[] columnFamily, + CompactType compactType) { return wrap(rawAdmin.compact(tableName, columnFamily, compactType)); } @@ -304,7 +302,7 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture majorCompact(TableName tableName, byte[] columnFamily, - CompactType compactType) { + CompactType compactType) { return wrap(rawAdmin.majorCompact(tableName, columnFamily, compactType)); } @@ -409,8 +407,8 @@ class AsyncHBaseAdmin implements AsyncAdmin { } @Override - public CompletableFuture addReplicationPeer(String peerId, - ReplicationPeerConfig peerConfig, boolean enabled) { + public CompletableFuture addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, + boolean enabled) { return wrap(rawAdmin.addReplicationPeer(peerId, peerConfig, enabled)); } @@ -436,19 +434,19 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture updateReplicationPeerConfig(String peerId, - ReplicationPeerConfig peerConfig) { + ReplicationPeerConfig peerConfig) { return wrap(rawAdmin.updateReplicationPeerConfig(peerId, peerConfig)); } @Override public CompletableFuture appendReplicationPeerTableCFs(String peerId, - Map> tableCfs) { + Map> tableCfs) { return wrap(rawAdmin.appendReplicationPeerTableCFs(peerId, tableCfs)); } @Override public CompletableFuture removeReplicationPeerTableCFs(String peerId, - Map> tableCfs) { + Map> tableCfs) { return wrap(rawAdmin.removeReplicationPeerTableCFs(peerId, tableCfs)); } @@ -500,7 +498,7 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture cloneSnapshot(String snapshotName, TableName tableName, - boolean restoreAcl, String customSFT) { + boolean restoreAcl, String customSFT) { return wrap(rawAdmin.cloneSnapshot(snapshotName, tableName, restoreAcl, customSFT)); } @@ -521,7 +519,7 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture> listTableSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern) { + Pattern snapshotNamePattern) { return wrap(rawAdmin.listTableSnapshots(tableNamePattern, snapshotNamePattern)); } @@ -547,25 +545,25 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture deleteTableSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern) { + Pattern snapshotNamePattern) { return wrap(rawAdmin.deleteTableSnapshots(tableNamePattern, snapshotNamePattern)); } @Override public CompletableFuture execProcedure(String signature, String instance, - Map props) { + Map props) { return wrap(rawAdmin.execProcedure(signature, instance, props)); } @Override public CompletableFuture execProcedureWithReturn(String signature, String instance, - Map props) { + Map props) { return wrap(rawAdmin.execProcedureWithReturn(signature, instance, props)); } @Override public CompletableFuture isProcedureFinished(String signature, String instance, - Map props) { + Map props) { return wrap(rawAdmin.isProcedureFinished(signature, instance, props)); } @@ -586,7 +584,7 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture decommissionRegionServers(List servers, - boolean offload) { + boolean offload) { return wrap(rawAdmin.decommissionRegionServers(servers, offload)); } @@ -597,7 +595,7 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture recommissionRegionServer(ServerName server, - List encodedRegionNames) { + List encodedRegionNames) { return wrap(rawAdmin.recommissionRegionServer(server, encodedRegionNames)); } @@ -658,7 +656,7 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture> getRegionMetrics(ServerName serverName, - TableName tableName) { + TableName tableName) { return wrap(rawAdmin.getRegionMetrics(serverName, tableName)); } @@ -668,8 +666,8 @@ class AsyncHBaseAdmin implements AsyncAdmin { } @Override - public CompletableFuture getCompactionState( - TableName tableName, CompactType compactType) { + public CompletableFuture getCompactionState(TableName tableName, + CompactType compactType) { return wrap(rawAdmin.getCompactionState(tableName, compactType)); } @@ -684,8 +682,8 @@ class AsyncHBaseAdmin implements AsyncAdmin { } @Override - public CompletableFuture> getLastMajorCompactionTimestampForRegion( - byte[] regionName) { + public CompletableFuture> + getLastMajorCompactionTimestampForRegion(byte[] regionName) { return wrap(rawAdmin.getLastMajorCompactionTimestampForRegion(regionName)); } @@ -751,13 +749,13 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture coprocessorService(Function stubMaker, - ServiceCaller callable) { + ServiceCaller callable) { return wrap(rawAdmin.coprocessorService(stubMaker, callable)); } @Override public CompletableFuture coprocessorService(Function stubMaker, - ServiceCaller callable, ServerName serverName) { + ServiceCaller callable, ServerName serverName) { return wrap(rawAdmin.coprocessorService(stubMaker, callable, serverName)); } @@ -778,13 +776,13 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture cloneTableSchema(TableName tableName, TableName newTableName, - boolean preserveSplits) { + boolean preserveSplits) { return wrap(rawAdmin.cloneTableSchema(tableName, newTableName, preserveSplits)); } @Override public CompletableFuture> compactionSwitch(boolean switchState, - List serverNamesList) { + List serverNamesList) { return wrap(rawAdmin.compactionSwitch(switchState, serverNamesList)); } @@ -809,8 +807,8 @@ class AsyncHBaseAdmin implements AsyncAdmin { } @Override - public CompletableFuture> getRegionServerSpaceQuotaSnapshots( - ServerName serverName) { + public CompletableFuture> + getRegionServerSpaceQuotaSnapshots(ServerName serverName) { return wrap(rawAdmin.getRegionServerSpaceQuotaSnapshots(serverName)); } @@ -826,7 +824,7 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture grant(UserPermission userPermission, - boolean mergeExistingPermissions) { + boolean mergeExistingPermissions) { return wrap(rawAdmin.grant(userPermission, mergeExistingPermissions)); } @@ -837,19 +835,18 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture> - getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) { + getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) { return wrap(rawAdmin.getUserPermissions(getUserPermissionsRequest)); } @Override public CompletableFuture> hasUserPermissions(String userName, - List permissions) { + List permissions) { return wrap(rawAdmin.hasUserPermissions(userName, permissions)); } @Override - public CompletableFuture snapshotCleanupSwitch(final boolean on, - final boolean sync) { + public CompletableFuture snapshotCleanupSwitch(final boolean on, final boolean sync) { return wrap(rawAdmin.snapshotCleanupSwitch(on, sync)); } @@ -865,8 +862,7 @@ class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture> getLogEntries(Set serverNames, - String logType, ServerType serverType, int limit, - Map filterParams) { + String logType, ServerType serverType, int limit, Map filterParams) { return wrap(rawAdmin.getLogEntries(serverNames, logType, serverType, limit, filterParams)); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java index 976e9e78477..c02b80c666a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,8 +44,8 @@ public class AsyncMasterRequestRpcRetryingCaller extends AsyncRpcRetryingCall private final Callable callable; public AsyncMasterRequestRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, - Callable callable, int priority, long pauseNs, long pauseNsForServerOverloaded, - int maxRetries, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { + Callable callable, int priority, long pauseNs, long pauseNsForServerOverloaded, + int maxRetries, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { super(retryTimer, conn, priority, pauseNs, pauseNsForServerOverloaded, maxRetries, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); this.callable = callable; @@ -53,8 +53,10 @@ public class AsyncMasterRequestRpcRetryingCaller extends AsyncRpcRetryingCall private void clearMasterStubCacheOnError(MasterService.Interface stub, Throwable error) { // ServerNotRunningYetException may because it is the backup master. - if (ClientExceptionsUtil.isConnectionException(error) || - error instanceof ServerNotRunningYetException) { + if ( + ClientExceptionsUtil.isConnectionException(error) + || error instanceof ServerNotRunningYetException + ) { conn.clearMasterStubCache(stub); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java index 9df8efb8a63..136d18a9618 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -77,8 +77,10 @@ class AsyncMetaRegionLocator { } } HRegionLocation oldLoc = oldLocs.getRegionLocation(replicaId); - if (oldLoc != null && (oldLoc.getSeqNum() > loc.getSeqNum() || - oldLoc.getServerName().equals(loc.getServerName()))) { + if ( + oldLoc != null && (oldLoc.getSeqNum() > loc.getSeqNum() + || oldLoc.getServerName().equals(loc.getServerName())) + ) { return; } RegionLocations newLocs = replaceRegionLocation(oldLocs, loc); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java index 6896a2970b0..b44892d78a1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,11 +59,12 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Scan.ReadType; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hbase.thirdparty.com.google.common.base.Objects; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.base.Objects; + /** * The asynchronous locator for regions other than meta. */ @@ -157,7 +158,7 @@ class AsyncNonMetaRegionLocator { } private boolean tryComplete(LocateRequest req, CompletableFuture future, - RegionLocations locations) { + RegionLocations locations) { if (future.isDone()) { return true; } @@ -177,8 +178,8 @@ class AsyncNonMetaRegionLocator { // endKey. byte[] endKey = loc.getRegion().getEndKey(); int c = Bytes.compareTo(endKey, req.row); - completed = c == 0 || ((c > 0 || Bytes.equals(EMPTY_END_ROW, endKey)) && - Bytes.compareTo(loc.getRegion().getStartKey(), req.row) < 0); + completed = c == 0 || ((c > 0 || Bytes.equals(EMPTY_END_ROW, endKey)) + && Bytes.compareTo(loc.getRegion().getStartKey(), req.row) < 0); } else { completed = loc.getRegion().containsRow(req.row); } @@ -199,21 +200,21 @@ class AsyncNonMetaRegionLocator { conn.getConfiguration().getInt(LOCATE_PREFETCH_LIMIT, DEFAULT_LOCATE_PREFETCH_LIMIT); // Get the region locator's meta replica mode. - this.metaReplicaMode = CatalogReplicaMode.fromString(conn.getConfiguration() - .get(LOCATOR_META_REPLICAS_MODE, CatalogReplicaMode.NONE.toString())); + this.metaReplicaMode = CatalogReplicaMode.fromString( + conn.getConfiguration().get(LOCATOR_META_REPLICAS_MODE, CatalogReplicaMode.NONE.toString())); switch (this.metaReplicaMode) { case LOAD_BALANCE: - String replicaSelectorClass = conn.getConfiguration(). - get(RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR, - CatalogReplicaLoadBalanceSimpleSelector.class.getName()); + String replicaSelectorClass = + conn.getConfiguration().get(RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR, + CatalogReplicaLoadBalanceSimpleSelector.class.getName()); - this.metaReplicaSelector = CatalogReplicaLoadBalanceSelectorFactory.createSelector( - replicaSelectorClass, META_TABLE_NAME, conn.getChoreService(), () -> { + this.metaReplicaSelector = CatalogReplicaLoadBalanceSelectorFactory + .createSelector(replicaSelectorClass, META_TABLE_NAME, conn.getChoreService(), () -> { int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; try { - RegionLocations metaLocations = conn.registry.getMetaRegionLocations().get( - conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); + RegionLocations metaLocations = conn.registry.getMetaRegionLocations() + .get(conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); numOfReplicas = metaLocations.size(); } catch (Exception e) { LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); @@ -224,8 +225,8 @@ class AsyncNonMetaRegionLocator { case NONE: // If user does not configure LOCATOR_META_REPLICAS_MODE, let's check the legacy config. - boolean useMetaReplicas = conn.getConfiguration().getBoolean(USE_META_REPLICAS, - DEFAULT_USE_META_REPLICAS); + boolean useMetaReplicas = + conn.getConfiguration().getBoolean(USE_META_REPLICAS, DEFAULT_USE_META_REPLICAS); if (useMetaReplicas) { this.metaReplicaMode = CatalogReplicaMode.HEDGED_READ; } @@ -286,9 +287,9 @@ class AsyncNonMetaRegionLocator { RegionLocations mergedLocs = oldLocs.mergeLocations(locs); if (isEqual(mergedLocs, oldLocs)) { // the merged one is the same with the old one, give up - LOG.trace("Will not add {} to cache because the old value {} " + - " is newer than us or has the same server name." + - " Maybe it is updated before we replace it", locs, oldLocs); + LOG.trace("Will not add {} to cache because the old value {} " + + " is newer than us or has the same server name." + + " Maybe it is updated before we replace it", locs, oldLocs); return oldLocs; } if (tableCache.cache.replace(startKey, oldLocs, mergedLocs)) { @@ -298,8 +299,8 @@ class AsyncNonMetaRegionLocator { // the region is different, here we trust the one we fetched. This maybe wrong but finally // the upper layer can detect this and trigger removal of the wrong locations if (LOG.isDebugEnabled()) { - LOG.debug("The newnly fetch region {} is different from the old one {} for row '{}'," + - " try replaing the old one...", region, oldRegion, Bytes.toStringBinary(startKey)); + LOG.debug("The newnly fetch region {} is different from the old one {} for row '{}'," + + " try replaing the old one...", region, oldRegion, Bytes.toStringBinary(startKey)); } if (tableCache.cache.replace(startKey, oldLocs, locs)) { return locs; @@ -309,10 +310,10 @@ class AsyncNonMetaRegionLocator { } private void complete(TableName tableName, LocateRequest req, RegionLocations locs, - Throwable error) { + Throwable error) { if (error != null) { - LOG.warn("Failed to locate region in '" + tableName + "', row='" + - Bytes.toStringBinary(req.row) + "', locateType=" + req.locateType, error); + LOG.warn("Failed to locate region in '" + tableName + "', row='" + + Bytes.toStringBinary(req.row) + "', locateType=" + req.locateType, error); } Optional toSend = Optional.empty(); TableCache tableCache = getTableCache(tableName); @@ -392,7 +393,7 @@ class AsyncNonMetaRegionLocator { } private RegionLocations locateRowInCache(TableCache tableCache, TableName tableName, byte[] row, - int replicaId) { + int replicaId) { Map.Entry entry = tableCache.cache.floorEntry(row); if (entry == null) { recordCacheMiss(); @@ -419,7 +420,7 @@ class AsyncNonMetaRegionLocator { } private RegionLocations locateRowBeforeInCache(TableCache tableCache, TableName tableName, - byte[] row, int replicaId) { + byte[] row, int replicaId) { boolean isEmptyStopRow = isEmptyStopRow(row); Map.Entry entry = isEmptyStopRow ? tableCache.cache.lastEntry() : tableCache.cache.lowerEntry(row); @@ -433,8 +434,10 @@ class AsyncNonMetaRegionLocator { recordCacheMiss(); return null; } - if (isEmptyStopRow(loc.getRegion().getEndKey()) || - (!isEmptyStopRow && Bytes.compareTo(loc.getRegion().getEndKey(), row) >= 0)) { + if ( + isEmptyStopRow(loc.getRegion().getEndKey()) + || (!isEmptyStopRow && Bytes.compareTo(loc.getRegion().getEndKey(), row) >= 0) + ) { if (LOG.isTraceEnabled()) { LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName, Bytes.toStringBinary(row), RegionLocateType.BEFORE, replicaId); @@ -449,8 +452,8 @@ class AsyncNonMetaRegionLocator { private void locateInMeta(TableName tableName, LocateRequest req) { if (LOG.isTraceEnabled()) { - LOG.trace("Try locate '" + tableName + "', row='" + Bytes.toStringBinary(req.row) + - "', locateType=" + req.locateType + " in meta"); + LOG.trace("Try locate '" + tableName + "', row='" + Bytes.toStringBinary(req.row) + + "', locateType=" + req.locateType + " in meta"); } byte[] metaStartKey; if (req.locateType.equals(RegionLocateType.BEFORE)) { @@ -549,7 +552,7 @@ class AsyncNonMetaRegionLocator { } private RegionLocations locateInCache(TableCache tableCache, TableName tableName, byte[] row, - int replicaId, RegionLocateType locateType) { + int replicaId, RegionLocateType locateType) { return locateType.equals(RegionLocateType.BEFORE) ? locateRowBeforeInCache(tableCache, tableName, row, replicaId) : locateRowInCache(tableCache, tableName, row, replicaId); @@ -559,7 +562,7 @@ class AsyncNonMetaRegionLocator { // placed before it. Used for reverse scan. See the comment of // AsyncRegionLocator.getPreviousRegionLocation. private CompletableFuture getRegionLocationsInternal(TableName tableName, - byte[] row, int replicaId, RegionLocateType locateType, boolean reload) { + byte[] row, int replicaId, RegionLocateType locateType, boolean reload) { // AFTER should be convert to CURRENT before calling this method assert !locateType.equals(RegionLocateType.AFTER); TableCache tableCache = getTableCache(tableName); @@ -598,7 +601,7 @@ class AsyncNonMetaRegionLocator { } CompletableFuture getRegionLocations(TableName tableName, byte[] row, - int replicaId, RegionLocateType locateType, boolean reload) { + int replicaId, RegionLocateType locateType, boolean reload) { // as we know the exact row after us, so we can just create the new row, and use the same // algorithm to locate it. if (locateType.equals(RegionLocateType.AFTER)) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index 2b155842999..46abc3753cc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -30,7 +28,6 @@ import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; @@ -47,33 +44,32 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This class allows a continuous flow of requests. It's written to be compatible with a - * synchronous caller such as HTable. + * This class allows a continuous flow of requests. It's written to be compatible with a synchronous + * caller such as HTable. *

    - * The caller sends a buffer of operation, by calling submit. This class extract from this list - * the operations it can send, i.e. the operations that are on region that are not considered - * as busy. The process is asynchronous, i.e. it returns immediately when if has finished to - * iterate on the list. If, and only if, the maximum number of current task is reached, the call - * to submit will block. Alternatively, the caller can call submitAll, in which case all the - * operations will be sent. Each call to submit returns a future-like object that can be used - * to track operation progress. + * The caller sends a buffer of operation, by calling submit. This class extract from this list the + * operations it can send, i.e. the operations that are on region that are not considered as busy. + * The process is asynchronous, i.e. it returns immediately when if has finished to iterate on the + * list. If, and only if, the maximum number of current task is reached, the call to submit will + * block. Alternatively, the caller can call submitAll, in which case all the operations will be + * sent. Each call to submit returns a future-like object that can be used to track operation + * progress. *

    *

    * The class manages internally the retries. *

    *

    - * The errors are tracked inside the Future object that is returned. - * The results are always tracked inside the Future object and can be retrieved when the call - * has finished. Partial results can also be retrieved if some part of multi-request failed. + * The errors are tracked inside the Future object that is returned. The results are always tracked + * inside the Future object and can be retrieved when the call has finished. Partial results can + * also be retrieved if some part of multi-request failed. *

    *

    - * This class is thread safe. - * Internally, the class is thread safe enough to manage simultaneously new submission and results - * arising from older operations. + * This class is thread safe. Internally, the class is thread safe enough to manage simultaneously + * new submission and results arising from older operations. *

    *

    - * Internally, this class works with {@link Row}, this mean it could be theoretically used for - * gets as well. + * Internally, this class works with {@link Row}, this mean it could be theoretically used for gets + * as well. *

    */ @InterfaceAudience.Private @@ -85,13 +81,13 @@ class AsyncProcess { public static final String PRIMARY_CALL_TIMEOUT_KEY = "hbase.client.primaryCallTimeout.multiget"; /** - * Configure the number of failures after which the client will start logging. A few failures - * is fine: region moved, then is not opened, then is overloaded. We try to have an acceptable - * heuristic for the number of errors we don't log. 5 was chosen because we wait for 1s at - * this stage. + * Configure the number of failures after which the client will start logging. A few failures is + * fine: region moved, then is not opened, then is overloaded. We try to have an acceptable + * heuristic for the number of errors we don't log. 5 was chosen because we wait for 1s at this + * stage. */ public static final String START_LOG_ERRORS_AFTER_COUNT_KEY = - "hbase.client.start.log.errors.counter"; + "hbase.client.start.log.errors.counter"; public static final int DEFAULT_START_LOG_ERRORS_AFTER_COUNT = 5; /** @@ -156,13 +152,13 @@ class AsyncProcess { private static final int DEFAULT_LOG_DETAILS_PERIOD = 10000; private final int periodToLog; - AsyncProcess(ClusterConnection hc, Configuration conf, - RpcRetryingCallerFactory rpcCaller, RpcControllerFactory rpcFactory) { + AsyncProcess(ClusterConnection hc, Configuration conf, RpcRetryingCallerFactory rpcCaller, + RpcControllerFactory rpcFactory) { this(hc, conf, rpcCaller, rpcFactory, hc.getConnectionConfiguration().getRetriesNumber()); } - AsyncProcess(ClusterConnection hc, Configuration conf, - RpcRetryingCallerFactory rpcCaller, RpcControllerFactory rpcFactory, int retriesNumber) { + AsyncProcess(ClusterConnection hc, Configuration conf, RpcRetryingCallerFactory rpcCaller, + RpcControllerFactory rpcFactory, int retriesNumber) { if (hc == null) { throw new IllegalArgumentException("ClusterConnection cannot be null."); } @@ -176,7 +172,7 @@ class AsyncProcess { this.numTries = retriesNumber + 1; this.primaryCallTimeoutMicroseconds = conf.getInt(PRIMARY_CALL_TIMEOUT_KEY, 10000); this.startLogErrorsCnt = - conf.getInt(START_LOG_ERRORS_AFTER_COUNT_KEY, DEFAULT_START_LOG_ERRORS_AFTER_COUNT); + conf.getInt(START_LOG_ERRORS_AFTER_COUNT_KEY, DEFAULT_START_LOG_ERRORS_AFTER_COUNT); this.periodToLog = conf.getInt(LOG_DETAILS_PERIOD, DEFAULT_LOG_DETAILS_PERIOD); // Server tracker allows us to do faster, and yet useful (hopefully), retries. // However, if we are too useful, we might fail very quickly due to retry count limit. @@ -199,18 +195,19 @@ class AsyncProcess { } /** - * The submitted task may be not accomplished at all if there are too many running tasks or - * other limits. + * The submitted task may be not accomplished at all if there are too many running tasks or other + * limits. * @param The class to cast the result - * @param task The setting and data - * @return AsyncRequestFuture + * @param task The setting and data n */ - public AsyncRequestFuture submit(AsyncProcessTask task) throws InterruptedIOException { + public AsyncRequestFuture submit(AsyncProcessTask task) + throws InterruptedIOException { AsyncRequestFuture reqFuture = checkTask(task); if (reqFuture != null) { return reqFuture; } - SubmittedRows submittedRows = task.getSubmittedRows() == null ? SubmittedRows.ALL : task.getSubmittedRows(); + SubmittedRows submittedRows = + task.getSubmittedRows() == null ? SubmittedRows.ALL : task.getSubmittedRows(); switch (submittedRows) { case ALL: return submitAll(task); @@ -222,15 +219,14 @@ class AsyncProcess { } /** - * Extract from the rows list what we can submit. The rows we can not submit are kept in the - * list. Does not send requests to replicas (not currently used for anything other - * than streaming puts anyway). - * - * @param task The setting and data + * Extract from the rows list what we can submit. The rows we can not submit are kept in the list. + * Does not send requests to replicas (not currently used for anything other than streaming puts + * anyway). + * @param task The setting and data * @param atLeastOne true if we should submit at least a subset. */ - private AsyncRequestFuture submit(AsyncProcessTask task, - boolean atLeastOne) throws InterruptedIOException { + private AsyncRequestFuture submit(AsyncProcessTask task, boolean atLeastOne) + throws InterruptedIOException { TableName tableName = task.getTableName(); RowAccess rows = task.getRowAccess(); Map actionsByServer = new HashMap<>(); @@ -260,11 +256,11 @@ class AsyncProcess { throw new IllegalArgumentException("#" + id + ", row cannot be null"); } // Make sure we get 0-s replica. - RegionLocations locs = connection.locateRegion( - tableName, r.getRow(), true, true, RegionReplicaUtil.DEFAULT_REPLICA_ID); + RegionLocations locs = connection.locateRegion(tableName, r.getRow(), true, true, + RegionReplicaUtil.DEFAULT_REPLICA_ID); if (locs == null || locs.isEmpty() || locs.getDefaultRegionLocation() == null) { throw new IOException("#" + id + ", no location found, aborting submit for" - + " tableName=" + tableName + " rowkey=" + Bytes.toStringBinary(r.getRow())); + + " tableName=" + tableName + " rowkey=" + Bytes.toStringBinary(r.getRow())); } loc = locs.getDefaultRegionLocation(); } catch (IOException ex) { @@ -307,21 +303,22 @@ class AsyncProcess { if (retainedActions.isEmpty()) return NO_REQS_RESULT; - return submitMultiActions(task, retainedActions, nonceGroup, - locationErrors, locationErrorRows, actionsByServer); + return submitMultiActions(task, retainedActions, nonceGroup, locationErrors, locationErrorRows, + actionsByServer); } AsyncRequestFuture submitMultiActions(AsyncProcessTask task, - List retainedActions, long nonceGroup, List locationErrors, - List locationErrorRows, Map actionsByServer) { - AsyncRequestFutureImpl ars = createAsyncRequestFuture(task, retainedActions, nonceGroup); + List retainedActions, long nonceGroup, List locationErrors, + List locationErrorRows, Map actionsByServer) { + AsyncRequestFutureImpl ars = + createAsyncRequestFuture(task, retainedActions, nonceGroup); // Add location errors if any if (locationErrors != null) { for (int i = 0; i < locationErrors.size(); ++i) { int originalIndex = locationErrorRows.get(i); Row row = retainedActions.get(originalIndex).getAction(); - ars.manageError(originalIndex, row, - AsyncRequestFutureImpl.Retry.NO_LOCATION_PROBLEM, locationErrors.get(i), null); + ars.manageError(originalIndex, row, AsyncRequestFutureImpl.Retry.NO_LOCATION_PROBLEM, + locationErrors.get(i), null); } } ars.sendMultiAction(actionsByServer, 1, null, false); @@ -330,15 +327,14 @@ class AsyncProcess { /** * Helper that is used when grouping the actions per region server. - * - * @param server - server - * @param regionName - regionName - * @param action - the action to add to the multiaction + * @param server - server + * @param regionName - regionName + * @param action - the action to add to the multiaction * @param actionsByServer the multiaction per server - * @param nonceGroup Nonce group. + * @param nonceGroup Nonce group. */ static void addAction(ServerName server, byte[] regionName, Action action, - Map actionsByServer, long nonceGroup) { + Map actionsByServer, long nonceGroup) { MultiAction multiAction = actionsByServer.get(server); if (multiAction == null) { multiAction = new MultiAction(); @@ -369,7 +365,8 @@ class AsyncProcess { if (r instanceof Put) { Put put = (Put) r; if (put.isEmpty()) { - throw new IllegalArgumentException("No columns to insert for #" + (posInList+1)+ " item"); + throw new IllegalArgumentException( + "No columns to insert for #" + (posInList + 1) + " item"); } highestPriority = Math.max(put.getPriority(), highestPriority); } @@ -377,7 +374,8 @@ class AsyncProcess { setNonce(ng, r, action); actions.add(action); } - AsyncRequestFutureImpl ars = createAsyncRequestFuture(task, actions, ng.getNonceGroup()); + AsyncRequestFutureImpl ars = + createAsyncRequestFuture(task, actions, ng.getNonceGroup()); ars.groupAndSendMultiAction(actions, 1); return ars; } @@ -420,11 +418,12 @@ class AsyncProcess { private int checkTimeout(String name, int timeout) { if (timeout < 0) { - throw new RuntimeException("The " + name + " must be bigger than zero," - + "current value is" + timeout); + throw new RuntimeException( + "The " + name + " must be bigger than zero," + "current value is" + timeout); } return timeout; } + private int checkOperationTimeout(int operationTimeout) { return checkTimeout("operation timeout", operationTimeout); } @@ -433,24 +432,25 @@ class AsyncProcess { return checkTimeout("rpc timeout", rpcTimeout); } - AsyncRequestFutureImpl createAsyncRequestFuture( - AsyncProcessTask task, List actions, long nonceGroup) { + AsyncRequestFutureImpl createAsyncRequestFuture(AsyncProcessTask task, + List actions, long nonceGroup) { return new AsyncRequestFutureImpl<>(task, actions, nonceGroup, this); } /** Wait until the async does not have more than max tasks in progress. */ protected void waitForMaximumCurrentTasks(int max, TableName tableName) - throws InterruptedIOException { - requestController.waitForMaximumCurrentTasks(max, id, periodToLog, - getLogger(tableName, max)); + throws InterruptedIOException { + requestController.waitForMaximumCurrentTasks(max, id, periodToLog, getLogger(tableName, max)); } private Consumer getLogger(TableName tableName, long max) { return (currentInProgress) -> { - LOG.info("#" + id + (max < 0 ? - ", waiting for any free slot" : - ", waiting for some tasks to finish. Expected max=" + max) + ", tasksInProgress=" - + currentInProgress + (tableName == null ? "" : ", tableName=" + tableName)); + LOG.info("#" + id + + (max < 0 + ? ", waiting for any free slot" + : ", waiting for some tasks to finish. Expected max=" + max) + + ", tasksInProgress=" + currentInProgress + + (tableName == null ? "" : ", tableName=" + tableName)); }; } @@ -458,7 +458,6 @@ class AsyncProcess { requestController.incTaskCounters(regions, sn); } - void decTaskCounters(Collection regions, ServerName sn) { requestController.decTaskCounters(regions, sn); } @@ -466,25 +465,25 @@ class AsyncProcess { /** * Create a caller. Isolated to be easily overridden in the tests. */ - protected RpcRetryingCaller createCaller( - CancellableRegionServerCallable callable, int rpcTimeout) { + protected RpcRetryingCaller + createCaller(CancellableRegionServerCallable callable, int rpcTimeout) { return rpcCallerFactory. newCaller(checkRpcTimeout(rpcTimeout)); } /** - * Creates the server error tracker to use inside process. - * Currently, to preserve the main assumption about current retries, and to work well with - * the retry-limit-based calculation, the calculation is local per Process object. - * We may benefit from connection-wide tracking of server errors. + * Creates the server error tracker to use inside process. Currently, to preserve the main + * assumption about current retries, and to work well with the retry-limit-based calculation, the + * calculation is local per Process object. We may benefit from connection-wide tracking of server + * errors. * @return ServerErrorTracker to use, null if there is no ServerErrorTracker on this connection */ ConnectionImplementation.ServerErrorTracker createServerErrorTracker() { - return new ConnectionImplementation.ServerErrorTracker( - this.serverTrackerTimeout, this.numTries); + return new ConnectionImplementation.ServerErrorTracker(this.serverTrackerTimeout, + this.numTries); } static boolean isReplicaGet(Row row) { - return (row instanceof Get) && (((Get)row).getConsistency() == Consistency.TIMELINE); + return (row instanceof Get) && (((Get) row).getConsistency() == Consistency.TIMELINE); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcessTask.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcessTask.java index 5a2bbfebc08..49b04b35753 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcessTask.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcessTask.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,39 +15,38 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.Iterator; import java.util.List; import java.util.concurrent.ExecutorService; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.hbase.client.coprocessor.Batch; /** - * Contains the attributes of a task which will be executed - * by {@link org.apache.hadoop.hbase.client.AsyncProcess}. - * The attributes will be validated by AsyncProcess. - * It's intended for advanced client applications. + * Contains the attributes of a task which will be executed by + * {@link org.apache.hadoop.hbase.client.AsyncProcess}. The attributes will be validated by + * AsyncProcess. It's intended for advanced client applications. * @param The type of response from server-side */ @InterfaceAudience.Private @InterfaceStability.Evolving public class AsyncProcessTask { /** - * The number of processed rows. - * The AsyncProcess has traffic control which may reject some rows. + * The number of processed rows. The AsyncProcess has traffic control which may reject some rows. */ public enum SubmittedRows { ALL, AT_LEAST_ONE, NORMAL } + public static Builder newBuilder(final Batch.Callback callback) { return new Builder<>(callback); } + public static Builder newBuilder() { return new Builder(); } @@ -127,10 +125,11 @@ public class AsyncProcessTask { } public AsyncProcessTask build() { - return new AsyncProcessTask<>(pool, tableName, rows, submittedRows, - callback, callable, needResults, rpcTimeout, operationTimeout, results); + return new AsyncProcessTask<>(pool, tableName, rows, submittedRows, callback, callable, + needResults, rpcTimeout, operationTimeout, results); } } + private final ExecutorService pool; private final TableName tableName; private final RowAccess rows; @@ -141,16 +140,16 @@ public class AsyncProcessTask { private final int rpcTimeout; private final int operationTimeout; private final Object[] results; + AsyncProcessTask(AsyncProcessTask task) { - this(task.getPool(), task.getTableName(), task.getRowAccess(), - task.getSubmittedRows(), task.getCallback(), task.getCallable(), - task.getNeedResults(), task.getRpcTimeout(), task.getOperationTimeout(), - task.getResults()); + this(task.getPool(), task.getTableName(), task.getRowAccess(), task.getSubmittedRows(), + task.getCallback(), task.getCallable(), task.getNeedResults(), task.getRpcTimeout(), + task.getOperationTimeout(), task.getResults()); } - AsyncProcessTask(ExecutorService pool, TableName tableName, - RowAccess rows, SubmittedRows size, Batch.Callback callback, - CancellableRegionServerCallable callable, boolean needResults, - int rpcTimeout, int operationTimeout, Object[] results) { + + AsyncProcessTask(ExecutorService pool, TableName tableName, RowAccess rows, + SubmittedRows size, Batch.Callback callback, CancellableRegionServerCallable callable, + boolean needResults, int rpcTimeout, int operationTimeout, Object[] results) { this.pool = pool; this.tableName = tableName; this.rows = rows; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java index 09cae3571b1..c3839aedf91 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java @@ -45,12 +45,13 @@ import org.apache.hadoop.hbase.exceptions.TimeoutIOException; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FutureUtils; -import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; -import org.apache.hbase.thirdparty.io.netty.util.Timeout; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; +import org.apache.hbase.thirdparty.io.netty.util.Timeout; + /** * The asynchronous region locator. */ @@ -98,11 +99,8 @@ class AsyncRegionLocator { return TableName.isMetaTableName(tableName); } - private CompletableFuture tracedLocationFuture( - Supplier> action, - Function> getRegionNames, - Supplier spanSupplier - ) { + private CompletableFuture tracedLocationFuture(Supplier> action, + Function> getRegionNames, Supplier spanSupplier) { final Span span = spanSupplier.get(); try (Scope scope = span.makeCurrent()) { CompletableFuture future = action.get(); @@ -126,50 +124,44 @@ class AsyncRegionLocator { if (locs == null || locs.getRegionLocations() == null) { return Collections.emptyList(); } - return Arrays.stream(locs.getRegionLocations()) - .filter(Objects::nonNull) - .map(HRegionLocation::getRegion) - .map(RegionInfo::getRegionNameAsString) + return Arrays.stream(locs.getRegionLocations()).filter(Objects::nonNull) + .map(HRegionLocation::getRegion).map(RegionInfo::getRegionNameAsString) .collect(Collectors.toList()); } static List getRegionNames(HRegionLocation location) { - return Optional.ofNullable(location) - .map(HRegionLocation::getRegion) - .map(RegionInfo::getRegionNameAsString) - .map(Collections::singletonList) + return Optional.ofNullable(location).map(HRegionLocation::getRegion) + .map(RegionInfo::getRegionNameAsString).map(Collections::singletonList) .orElseGet(Collections::emptyList); } CompletableFuture getRegionLocations(TableName tableName, byte[] row, RegionLocateType type, boolean reload, long timeoutNs) { final Supplier supplier = new TableSpanBuilder(conn) - .setName("AsyncRegionLocator.getRegionLocations") - .setTableName(tableName); + .setName("AsyncRegionLocator.getRegionLocations").setTableName(tableName); return tracedLocationFuture(() -> { - CompletableFuture future = isMeta(tableName) ? - metaRegionLocator.getRegionLocations(RegionReplicaUtil.DEFAULT_REPLICA_ID, reload) : - nonMetaRegionLocator.getRegionLocations(tableName, row, + CompletableFuture future = isMeta(tableName) + ? metaRegionLocator.getRegionLocations(RegionReplicaUtil.DEFAULT_REPLICA_ID, reload) + : nonMetaRegionLocator.getRegionLocations(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID, type, reload); return withTimeout(future, timeoutNs, - () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + - "ms) waiting for region locations for " + tableName + ", row='" + - Bytes.toStringBinary(row) + "'"); + () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + + "ms) waiting for region locations for " + tableName + ", row='" + + Bytes.toStringBinary(row) + "'"); }, AsyncRegionLocator::getRegionNames, supplier); } CompletableFuture getRegionLocation(TableName tableName, byte[] row, int replicaId, RegionLocateType type, boolean reload, long timeoutNs) { final Supplier supplier = new TableSpanBuilder(conn) - .setName("AsyncRegionLocator.getRegionLocation") - .setTableName(tableName); + .setName("AsyncRegionLocator.getRegionLocation").setTableName(tableName); return tracedLocationFuture(() -> { // meta region can not be split right now so we always call the same method. // Change it later if the meta table can have more than one regions. CompletableFuture future = new CompletableFuture<>(); - CompletableFuture locsFuture = - isMeta(tableName) ? metaRegionLocator.getRegionLocations(replicaId, reload) : - nonMetaRegionLocator.getRegionLocations(tableName, row, replicaId, type, reload); + CompletableFuture locsFuture = isMeta(tableName) + ? metaRegionLocator.getRegionLocations(replicaId, reload) + : nonMetaRegionLocator.getRegionLocations(tableName, row, replicaId, type, reload); addListener(locsFuture, (locs, error) -> { if (error != null) { future.completeExceptionally(error); @@ -178,21 +170,21 @@ class AsyncRegionLocator { HRegionLocation loc = locs.getRegionLocation(replicaId); if (loc == null) { future.completeExceptionally( - new RegionOfflineException("No location for " + tableName + ", row='" + - Bytes.toStringBinary(row) + "', locateType=" + type + ", replicaId=" + replicaId)); + new RegionOfflineException("No location for " + tableName + ", row='" + + Bytes.toStringBinary(row) + "', locateType=" + type + ", replicaId=" + replicaId)); } else if (loc.getServerName() == null) { future.completeExceptionally( - new RegionOfflineException("No server address listed for region '" + - loc.getRegion().getRegionNameAsString() + ", row='" + Bytes.toStringBinary(row) + - "', locateType=" + type + ", replicaId=" + replicaId)); + new RegionOfflineException("No server address listed for region '" + + loc.getRegion().getRegionNameAsString() + ", row='" + Bytes.toStringBinary(row) + + "', locateType=" + type + ", replicaId=" + replicaId)); } else { future.complete(loc); } }); return withTimeout(future, timeoutNs, - () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + - "ms) waiting for region location for " + tableName + ", row='" + - Bytes.toStringBinary(row) + "', replicaId=" + replicaId); + () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + + "ms) waiting for region location for " + tableName + ", row='" + + Bytes.toStringBinary(row) + "', replicaId=" + replicaId); }, AsyncRegionLocator::getRegionNames, supplier); } @@ -221,9 +213,8 @@ class AsyncRegionLocator { } void clearCache(TableName tableName) { - Supplier supplier = new TableSpanBuilder(conn) - .setName("AsyncRegionLocator.clearCache") - .setTableName(tableName); + Supplier supplier = + new TableSpanBuilder(conn).setName("AsyncRegionLocator.clearCache").setTableName(tableName); TraceUtil.trace(() -> { LOG.debug("Clear meta cache for {}", tableName); if (tableName.equals(META_TABLE_NAME)) { @@ -235,9 +226,9 @@ class AsyncRegionLocator { } void clearCache(ServerName serverName) { - Supplier supplier = new ConnectionSpanBuilder(conn) - .setName("AsyncRegionLocator.clearCache") - .addAttribute(SERVER_NAME_KEY, serverName.getServerName()); + Supplier supplier = + new ConnectionSpanBuilder(conn).setName("AsyncRegionLocator.clearCache") + .addAttribute(SERVER_NAME_KEY, serverName.getServerName()); TraceUtil.trace(() -> { LOG.debug("Clear meta cache for {}", serverName); metaRegionLocator.clearCache(serverName); @@ -247,8 +238,8 @@ class AsyncRegionLocator { } void clearCache() { - Supplier supplier = new ConnectionSpanBuilder(conn) - .setName("AsyncRegionLocator.clearCache"); + Supplier supplier = + new ConnectionSpanBuilder(conn).setName("AsyncRegionLocator.clearCache"); TraceUtil.trace(() -> { metaRegionLocator.clearCache(); nonMetaRegionLocator.clearCache(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java index 4c6cd5a0117..cc0eccca6e2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil.findException; import static org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil.isMetaClearingException; + import java.util.Arrays; import java.util.function.Consumer; import java.util.function.Function; @@ -50,14 +51,14 @@ final class AsyncRegionLocatorHelper { if (oldLoc == null || oldLoc.getServerName() == null) { return false; } - return oldLoc.getSeqNum() <= loc.getSeqNum() && - oldLoc.getServerName().equals(loc.getServerName()); + return oldLoc.getSeqNum() <= loc.getSeqNum() + && oldLoc.getServerName().equals(loc.getServerName()); } static void updateCachedLocationOnError(HRegionLocation loc, Throwable exception, - Function cachedLocationSupplier, - Consumer addToCache, Consumer removeFromCache, - MetricsConnection metrics) { + Function cachedLocationSupplier, + Consumer addToCache, Consumer removeFromCache, + MetricsConnection metrics) { HRegionLocation oldLoc = cachedLocationSupplier.apply(loc); if (LOG.isDebugEnabled()) { LOG.debug("Try updating {} , the old value is {}, error={}", loc, oldLoc, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFuture.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFuture.java index b91e094d340..cbad11ffd98 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFuture.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFuture.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,10 +17,9 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.yetus.audience.InterfaceAudience; - import java.io.InterruptedIOException; import java.util.List; +import org.apache.yetus.audience.InterfaceAudience; /** * The context used to wait for results from one submit call. If submit call is made with @@ -31,9 +29,13 @@ import java.util.List; @InterfaceAudience.Private public interface AsyncRequestFuture { public boolean hasError(); + public RetriesExhaustedWithDetailsException getErrors(); + public List getFailedOperations(); + public Object[] getResults() throws InterruptedIOException; + /** Wait until all tasks are executed, successfully or not. */ public void waitUntilDone() throws InterruptedIOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java index d5da4db36fc..cfcb8daf488 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -54,13 +52,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * The context, and return value, for a single submit/submitAll call. - * Note on how this class (one AP submit) works. Initially, all requests are split into groups - * by server; request is sent to each server in parallel; the RPC calls are not async so a - * thread per server is used. Every time some actions fail, regions/locations might have - * changed, so we re-group them by server and region again and send these groups in parallel - * too. The result, in case of retries, is a "tree" of threads, with parent exiting after - * scheduling children. This is why lots of code doesn't require any synchronization. + * The context, and return value, for a single submit/submitAll call. Note on how this class (one AP + * submit) works. Initially, all requests are split into groups by server; request is sent to each + * server in parallel; the RPC calls are not async so a thread per server is used. Every time some + * actions fail, regions/locations might have changed, so we re-group them by server and region + * again and send these groups in parallel too. The result, in case of retries, is a "tree" of + * threads, with parent exiting after scheduling children. This is why lots of code doesn't require + * any synchronization. */ @InterfaceAudience.Private class AsyncRequestFutureImpl implements AsyncRequestFuture { @@ -70,11 +68,11 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { private RetryingTimeTracker tracker; /** - * Runnable (that can be submitted to thread pool) that waits for when it's time - * to issue replica calls, finds region replicas, groups the requests by replica and - * issues the calls (on separate threads, via sendMultiAction). - * This is done on a separate thread because we don't want to wait on user thread for - * our asynchronous call, and usually we have to wait before making replica calls. + * Runnable (that can be submitted to thread pool) that waits for when it's time to issue replica + * calls, finds region replicas, groups the requests by replica and issues the calls (on separate + * threads, via sendMultiAction). This is done on a separate thread because we don't want to wait + * on user thread for our asynchronous call, and usually we have to wait before making replica + * calls. */ private final class ReplicaCallIssuingRunnable implements Runnable { private final long startTime; @@ -125,11 +123,11 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { /** * Add replica actions to action map by server. - * @param index Index of the original action. + * @param index Index of the original action. * @param actionsByServer The map by server to add it to. */ private void addReplicaActions(int index, Map actionsByServer, - List unknownReplicaActions) { + List unknownReplicaActions) { if (results[index] != null) return; // opportunistic. Never goes from non-null to null. Action action = initialActions.get(index); RegionLocations loc = findAllLocationsOrFail(action, true); @@ -152,37 +150,37 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { Action replicaAction = new Action(action, i); if (locs[i] != null) { asyncProcess.addAction(locs[i].getServerName(), locs[i].getRegionInfo().getRegionName(), - replicaAction, actionsByServer, nonceGroup); + replicaAction, actionsByServer, nonceGroup); } else { unknownReplicaActions.add(replicaAction); } } } - private void addReplicaActionsAgain( - Action action, Map actionsByServer) { + private void addReplicaActionsAgain(Action action, + Map actionsByServer) { if (action.getReplicaId() == RegionReplicaUtil.DEFAULT_REPLICA_ID) { throw new AssertionError("Cannot have default replica here"); } HRegionLocation loc = getReplicaLocationOrFail(action); if (loc == null) return; - asyncProcess.addAction(loc.getServerName(), loc.getRegionInfo().getRegionName(), - action, actionsByServer, nonceGroup); + asyncProcess.addAction(loc.getServerName(), loc.getRegionInfo().getRegionName(), action, + actionsByServer, nonceGroup); } } /** - * Runnable (that can be submitted to thread pool) that submits MultiAction to a - * single server. The server call is synchronous, therefore we do it on a thread pool. + * Runnable (that can be submitted to thread pool) that submits MultiAction to a single server. + * The server call is synchronous, therefore we do it on a thread pool. */ final class SingleServerRequestRunnable implements Runnable { private final MultiAction multiAction; private final int numAttempt; private final ServerName server; private final Set callsInProgress; - SingleServerRequestRunnable( - MultiAction multiAction, int numAttempt, ServerName server, - Set callsInProgress) { + + SingleServerRequestRunnable(MultiAction multiAction, int numAttempt, ServerName server, + Set callsInProgress) { this.multiAction = multiAction; this.numAttempt = numAttempt; this.server = server; @@ -198,7 +196,8 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { if (callable == null) { callable = createCallable(server, tableName, multiAction); } - RpcRetryingCaller caller = asyncProcess.createCaller(callable,rpcTimeout); + RpcRetryingCaller caller = + asyncProcess.createCaller(callable, rpcTimeout); try { if (callsInProgress != null) { callsInProgress.add(callable); @@ -210,13 +209,13 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } } catch (IOException e) { // The service itself failed . It may be an error coming from the communication - // layer, but, as well, a functional error raised by the server. + // layer, but, as well, a functional error raised by the server. receiveGlobalFailure(multiAction, server, numAttempt, e); return; } catch (Throwable t) { // This should not happen. Let's log & retry anyway. - LOG.error("id=" + asyncProcess.id + ", caught throwable. Unexpected." + - " Retrying. Server=" + server + ", tableName=" + tableName, t); + LOG.error("id=" + asyncProcess.id + ", caught throwable. Unexpected." + + " Retrying. Server=" + server + ", tableName=" + tableName, t); receiveGlobalFailure(multiAction, server, numAttempt, t); return; } @@ -249,23 +248,22 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { private final ExecutorService pool; private final Set callsInProgress; - private final TableName tableName; private final AtomicLong actionsInProgress = new AtomicLong(-1); /** - * The lock controls access to results. It is only held when populating results where - * there might be several callers (eventual consistency gets). For other requests, - * there's one unique call going on per result index. + * The lock controls access to results. It is only held when populating results where there might + * be several callers (eventual consistency gets). For other requests, there's one unique call + * going on per result index. */ private final Object replicaResultLock = new Object(); /** - * Result array. Null if results are not needed. Otherwise, each index corresponds to - * the action index in initial actions submitted. For most request types, has null-s for - * requests that are not done, and result/exception for those that are done. - * For eventual-consistency gets, initially the same applies; at some point, replica calls - * might be started, and ReplicaResultState is put at the corresponding indices. The - * returning calls check the type to detect when this is the case. After all calls are done, - * ReplicaResultState-s are replaced with results for the user. + * Result array. Null if results are not needed. Otherwise, each index corresponds to the action + * index in initial actions submitted. For most request types, has null-s for requests that are + * not done, and result/exception for those that are done. For eventual-consistency gets, + * initially the same applies; at some point, replica calls might be started, and + * ReplicaResultState is put at the corresponding indices. The returning calls check the type to + * detect when this is the case. After all calls are done, ReplicaResultState-s are replaced with + * results for the user. */ private final Object[] results; /** @@ -291,10 +289,12 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { NO_OTHER_SUCCEEDED } - /** Sync point for calls to multiple replicas for the same user request (Get). - * Created and put in the results array (we assume replica calls require results) when - * the replica calls are launched. See results for details of this process. - * POJO, all fields are public. To modify them, the object itself is locked. */ + /** + * Sync point for calls to multiple replicas for the same user request (Get). Created and put in + * the results array (we assume replica calls require results) when the replica calls are + * launched. See results for details of this process. POJO, all fields are public. To modify them, + * the object itself is locked. + */ private static class ReplicaResultState { public ReplicaResultState(int callCount) { this.callCount = callCount; @@ -302,8 +302,10 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { /** Number of calls outstanding, or 0 if a call succeeded (even with others outstanding). */ int callCount; - /** Errors for which it is not decided whether we will report them to user. If one of the - * calls succeeds, we will discard the errors that may have happened in the other calls. */ + /** + * Errors for which it is not decided whether we will report them to user. If one of the calls + * succeeds, we will discard the errors that may have happened in the other calls. + */ BatchErrors replicaErrors = null; @Override @@ -312,8 +314,8 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } } - public AsyncRequestFutureImpl(AsyncProcessTask task, List actions, - long nonceGroup, AsyncProcess asyncProcess) { + public AsyncRequestFutureImpl(AsyncProcessTask task, List actions, long nonceGroup, + AsyncProcess asyncProcess) { this.pool = task.getPool(); this.callback = task.getCallback(); this.nonceGroup = nonceGroup; @@ -375,9 +377,10 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } else { this.replicaGetIndices = null; } - this.callsInProgress = !hasAnyReplicaGets ? null : - Collections.newSetFromMap( - new ConcurrentHashMap()); + this.callsInProgress = !hasAnyReplicaGets + ? null + : Collections + .newSetFromMap(new ConcurrentHashMap()); this.asyncProcess = asyncProcess; this.errorsByServer = createServerErrorTracker(); this.errors = new BatchErrors(); @@ -393,16 +396,15 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { return callsInProgress; } - SingleServerRequestRunnable createSingleServerRequest(MultiAction multiAction, int numAttempt, ServerName server, - Set callsInProgress) { + SingleServerRequestRunnable createSingleServerRequest(MultiAction multiAction, int numAttempt, + ServerName server, Set callsInProgress) { return new SingleServerRequestRunnable(multiAction, numAttempt, server, callsInProgress); } /** * Group a list of actions per region servers, and send them. - * * @param currentActions - the list of row to submit - * @param numAttempt - the current numAttempt (first attempt is 1) + * @param numAttempt - the current numAttempt (first attempt is 1) */ void groupAndSendMultiAction(List currentActions, int numAttempt) { Map actionsByServer = new HashMap<>(); @@ -431,7 +433,8 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } } else { byte[] regionName = loc.getRegionInfo().getRegionName(); - AsyncProcess.addAction(loc.getServerName(), regionName, action, actionsByServer, nonceGroup); + AsyncProcess.addAction(loc.getServerName(), regionName, action, actionsByServer, + nonceGroup); } } boolean doStartReplica = (numAttempt == 1 && !isReplica && hasAnyReplicaGets); @@ -439,8 +442,8 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { if (!actionsByServer.isEmpty()) { // If this is a first attempt to group and send, no replicas, we need replica thread. - sendMultiAction(actionsByServer, numAttempt, (doStartReplica && !hasUnknown) - ? currentActions : null, numAttempt > 1 && !hasUnknown); + sendMultiAction(actionsByServer, numAttempt, + (doStartReplica && !hasUnknown) ? currentActions : null, numAttempt > 1 && !hasUnknown); } if (hasUnknown) { @@ -449,11 +452,11 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { HRegionLocation loc = getReplicaLocationOrFail(action); if (loc == null) continue; byte[] regionName = loc.getRegionInfo().getRegionName(); - AsyncProcess.addAction(loc.getServerName(), regionName, action, actionsByServer, nonceGroup); + AsyncProcess.addAction(loc.getServerName(), regionName, action, actionsByServer, + nonceGroup); } if (!actionsByServer.isEmpty()) { - sendMultiAction( - actionsByServer, numAttempt, doStartReplica ? currentActions : null, true); + sendMultiAction(actionsByServer, numAttempt, doStartReplica ? currentActions : null, true); } } } @@ -478,23 +481,22 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } private void manageLocationError(Action action, Exception ex) { - String msg = "Cannot get replica " + action.getReplicaId() - + " location for " + action.getAction(); + String msg = + "Cannot get replica " + action.getReplicaId() + " location for " + action.getAction(); LOG.error(msg); if (ex == null) { ex = new IOException(msg); } - manageError(action.getOriginalIndex(), action.getAction(), - Retry.NO_LOCATION_PROBLEM, ex, null); + manageError(action.getOriginalIndex(), action.getAction(), Retry.NO_LOCATION_PROBLEM, ex, null); } private RegionLocations findAllLocationsOrFail(Action action, boolean useCache) { - if (action.getAction() == null) throw new IllegalArgumentException("#" + asyncProcess.id + - ", row cannot be null"); + if (action.getAction() == null) + throw new IllegalArgumentException("#" + asyncProcess.id + ", row cannot be null"); RegionLocations loc = null; try { - loc = asyncProcess.connection.locateRegion( - tableName, action.getAction().getRow(), useCache, true, action.getReplicaId()); + loc = asyncProcess.connection.locateRegion(tableName, action.getAction().getRow(), useCache, + true, action.getReplicaId()); } catch (IOException ex) { manageLocationError(action, ex); } @@ -502,15 +504,14 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } /** - * Send a multi action structure to the servers, after a delay depending on the attempt - * number. Asynchronous. - * - * @param actionsByServer the actions structured by regions - * @param numAttempt the attempt number. + * Send a multi action structure to the servers, after a delay depending on the attempt number. + * Asynchronous. + * @param actionsByServer the actions structured by regions + * @param numAttempt the attempt number. * @param actionsForReplicaThread original actions for replica thread; null on non-first call. */ - void sendMultiAction(Map actionsByServer, - int numAttempt, List actionsForReplicaThread, boolean reuseThread) { + void sendMultiAction(Map actionsByServer, int numAttempt, + List actionsForReplicaThread, boolean reuseThread) { // Run the last item on the same thread if we are already on a send thread. // We hope most of the time it will be the only item, so we can cut down on threads. int actionsRemaining = actionsByServer.size(); @@ -518,8 +519,8 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { for (Map.Entry e : actionsByServer.entrySet()) { ServerName server = e.getKey(); MultiAction multiAction = e.getValue(); - Collection runnables = getNewMultiActionRunnable(server, multiAction, - numAttempt); + Collection runnables = + getNewMultiActionRunnable(server, multiAction, numAttempt); // make sure we correctly count the number of runnables before we try to reuse the send // thread, in case we had to split the request into different runnables because of backoff if (runnables.size() > actionsRemaining) { @@ -527,11 +528,14 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } // run all the runnables - // HBASE-17475: Do not reuse the thread after stack reach a certain depth to prevent stack overflow + // HBASE-17475: Do not reuse the thread after stack reach a certain depth to prevent stack + // overflow // for now, we use HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER to control the depth for (Runnable runnable : runnables) { - if ((--actionsRemaining == 0) && reuseThread - && numAttempt % HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER != 0) { + if ( + (--actionsRemaining == 0) && reuseThread + && numAttempt % HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER != 0 + ) { runnable.run(); } else { try { @@ -540,8 +544,8 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { if (t instanceof RejectedExecutionException) { // This should never happen. But as the pool is provided by the end user, // let's secure this a little. - LOG.warn("id=" + asyncProcess.id + ", task rejected by pool. Unexpected." + - " Server=" + server.getServerName(), t); + LOG.warn("id=" + asyncProcess.id + ", task rejected by pool. Unexpected." + " Server=" + + server.getServerName(), t); } else { // see #HBASE-14359 for more details LOG.warn("Caught unexpected exception/error: ", t); @@ -561,16 +565,15 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } private Collection getNewMultiActionRunnable(ServerName server, - MultiAction multiAction, - int numAttempt) { + MultiAction multiAction, int numAttempt) { // no stats to manage, just do the standard action if (asyncProcess.connection.getStatisticsTracker() == null) { if (asyncProcess.connection.getConnectionMetrics() != null) { asyncProcess.connection.getConnectionMetrics().incrNormalRunners(); } asyncProcess.incTaskCounters(multiAction.getRegions(), server); - SingleServerRequestRunnable runnable = createSingleServerRequest( - multiAction, numAttempt, server, callsInProgress); + SingleServerRequestRunnable runnable = + createSingleServerRequest(multiAction, numAttempt, server, callsInProgress); // remove trace for runnable because HBASE-25373 and OpenTelemetry do not cover TraceRunnable return Collections.singletonList(runnable); @@ -593,7 +596,8 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { List toReturn = new ArrayList<>(actions.size()); for (DelayingRunner runner : actions.values()) { asyncProcess.incTaskCounters(runner.getActions().getRegions(), server); - Runnable runnable = createSingleServerRequest(runner.getActions(), numAttempt, server, callsInProgress); + Runnable runnable = + createSingleServerRequest(runner.getActions(), numAttempt, server, callsInProgress); // use a delay runner only if we need to sleep for some time if (runner.getSleepTime() > 0) { runner.setRunner(runnable); @@ -615,16 +619,15 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } /** - * @param server server location where the target region is hosted + * @param server server location where the target region is hosted * @param regionName name of the region which we are going to write some data - * @return the amount of time the client should wait until it submit a request to the - * specified server and region + * @return the amount of time the client should wait until it submit a request to the specified + * server and region */ private Long getBackoff(ServerName server, byte[] regionName) { ServerStatisticTracker tracker = asyncProcess.connection.getStatisticsTracker(); ServerStatistics stats = tracker.getStats(server); - return asyncProcess.connection.getBackoffPolicy() - .getBackoffTime(server, regionName, stats); + return asyncProcess.connection.getBackoffPolicy().getBackoffTime(server, regionName, stats); } /** @@ -632,8 +635,8 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { */ private void startWaitingForReplicaCalls(List actionsForReplicaThread) { long startTime = EnvironmentEdgeManager.currentTime(); - ReplicaCallIssuingRunnable replicaRunnable = new ReplicaCallIssuingRunnable( - actionsForReplicaThread, startTime); + ReplicaCallIssuingRunnable replicaRunnable = + new ReplicaCallIssuingRunnable(actionsForReplicaThread, startTime); if (asyncProcess.primaryCallTimeoutMicroseconds == 0) { // Start replica calls immediately. replicaRunnable.run(); @@ -650,7 +653,6 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { /** * Check that we can retry acts accordingly: logs, set the error status. - * * @param originalIndex the position in the list sent * @param row the row * @param canRetry if false, we won't retry whatever the settings. @@ -658,10 +660,9 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { * @param server the location, if any (can be null) * @return true if the action can be retried, false otherwise. */ - Retry manageError(int originalIndex, Row row, Retry canRetry, - Throwable throwable, ServerName server) { - if (canRetry == Retry.YES - && throwable != null && throwable instanceof DoNotRetryIOException) { + Retry manageError(int originalIndex, Row row, Retry canRetry, Throwable throwable, + ServerName server) { + if (canRetry == Retry.YES && throwable != null && throwable instanceof DoNotRetryIOException) { canRetry = Retry.NO_NOT_RETRIABLE; } @@ -676,17 +677,15 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { /** * Resubmit all the actions from this multiaction after a failure. - * * @param rsActions the actions still to do from the initial list - * @param server the destination + * @param server the destination * @param numAttempt the number of attempts so far - * @param t the throwable (if any) that caused the resubmit + * @param t the throwable (if any) that caused the resubmit */ - private void receiveGlobalFailure( - MultiAction rsActions, ServerName server, int numAttempt, Throwable t) { + private void receiveGlobalFailure(MultiAction rsActions, ServerName server, int numAttempt, + Throwable t) { errorsByServer.reportServerError(server); - Retry canRetry = errorsByServer.canTryMore(numAttempt) - ? Retry.YES : Retry.NO_RETRIES_EXHAUSTED; + Retry canRetry = errorsByServer.canTryMore(numAttempt) ? Retry.YES : Retry.NO_RETRIES_EXHAUSTED; cleanServerCache(server, t); int failed = 0; @@ -700,8 +699,8 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { updateCachedLocations(server, regionName, row, ClientExceptionsUtil.isMetaClearingException(t) ? null : t); for (Action action : e.getValue()) { - Retry retry = manageError( - action.getOriginalIndex(), action.getAction(), canRetry, t, server); + Retry retry = + manageError(action.getOriginalIndex(), action.getAction(), canRetry, t, server); if (retry == Retry.YES) { toReplay.add(action); } else if (retry == Retry.NO_OTHER_SUCCEEDED) { @@ -720,19 +719,19 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } /** - * Log as much info as possible, and, if there is something to replay, - * submit it again after a back off sleep. + * Log as much info as possible, and, if there is something to replay, submit it again after a + * back off sleep. */ - private void resubmit(ServerName oldServer, List toReplay, - int numAttempt, int failureCount, Throwable throwable) { + private void resubmit(ServerName oldServer, List toReplay, int numAttempt, + int failureCount, Throwable throwable) { // We have something to replay. We're going to sleep a little before. // We have two contradicting needs here: - // 1) We want to get the new location after having slept, as it may change. - // 2) We want to take into account the location when calculating the sleep time. - // 3) If all this is just because the response needed to be chunked try again FAST. + // 1) We want to get the new location after having slept, as it may change. + // 2) We want to take into account the location when calculating the sleep time. + // 3) If all this is just because the response needed to be chunked try again FAST. // It should be possible to have some heuristics to take the right decision. Short term, - // we go for one. + // we go for one. boolean retryImmediately = throwable instanceof RetryImmediatelyException; int nextAttemptNumber = retryImmediately ? numAttempt : numAttempt + 1; long backOffTime; @@ -749,9 +748,9 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } if (numAttempt > asyncProcess.startLogErrorsCnt) { // We use this value to have some logs when we have multiple failures, but not too many - // logs, as errors are to be expected when a region moves, splits and so on - LOG.info(createLog(numAttempt, failureCount, toReplay.size(), - oldServer, throwable, backOffTime, true, null, -1, -1)); + // logs, as errors are to be expected when a region moves, splits and so on + LOG.info(createLog(numAttempt, failureCount, toReplay.size(), oldServer, throwable, + backOffTime, true, null, -1, -1)); } try { @@ -759,7 +758,8 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { Thread.sleep(backOffTime); } } catch (InterruptedException e) { - LOG.warn("#" + asyncProcess.id + ", not sent: " + toReplay.size() + " operations, " + oldServer, e); + LOG.warn( + "#" + asyncProcess.id + ", not sent: " + toReplay.size() + " operations, " + oldServer, e); Thread.currentThread().interrupt(); return; } @@ -767,12 +767,12 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { groupAndSendMultiAction(toReplay, nextAttemptNumber); } - private void logNoResubmit(ServerName oldServer, int numAttempt, - int failureCount, Throwable throwable, int failed, int stopped) { + private void logNoResubmit(ServerName oldServer, int numAttempt, int failureCount, + Throwable throwable, int failed, int stopped) { if (failureCount != 0 || numAttempt > asyncProcess.startLogErrorsCnt + 1) { String timeStr = new Date(errorsByServer.getStartTrackingTime()).toString(); - String logMessage = createLog(numAttempt, failureCount, 0, oldServer, - throwable, -1, false, timeStr, failed, stopped); + String logMessage = createLog(numAttempt, failureCount, 0, oldServer, throwable, -1, false, + timeStr, failed, stopped); if (failed != 0) { // Only log final failures as warning LOG.warn(logMessage); @@ -784,21 +784,20 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { /** * Called when we receive the result of a server query. - * - * @param multiAction - the multiAction we sent - * @param server - the location. It's used as a server name. - * @param responses - the response, if any - * @param numAttempt - the attempt + * @param multiAction - the multiAction we sent + * @param server - the location. It's used as a server name. + * @param responses - the response, if any + * @param numAttempt - the attempt */ private void receiveMultiAction(MultiAction multiAction, ServerName server, - MultiResponse responses, int numAttempt) { + MultiResponse responses, int numAttempt) { assert responses != null; updateStats(server, responses); // Success or partial success // Analyze detailed results. We can still have individual failures to be redo. // two specific throwables are managed: - // - DoNotRetryIOException: we continue to retry for other actions - // - RegionMovedException: we update the cache with the new region location + // - DoNotRetryIOException: we continue to retry for other actions + // - RegionMovedException: we update the cache with the new region location Map results = responses.getResults(); List toReplay = new ArrayList<>(); Throwable lastException = null; @@ -823,8 +822,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { if (result == null) { if (regionException == null) { LOG.error("Server sent us neither results nor exceptions for " - + Bytes.toStringBinary(regionName) - + ", numAttempt:" + numAttempt); + + Bytes.toStringBinary(regionName) + ", numAttempt:" + numAttempt); regionException = new RuntimeException("Invalid response"); } // If the row operation encounters the region-lever error, the exception of action may be @@ -835,7 +833,8 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { if (result instanceof Throwable) { Throwable actionException = (Throwable) result; Row row = sentAction.getAction(); - lastException = regionException != null ? regionException + lastException = regionException != null + ? regionException : ClientExceptionsUtil.findException(actionException); // Register corresponding failures once per server/once per region. if (!regionFailureRegistered) { @@ -845,12 +844,10 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { if (retry == null) { errorsByServer.reportServerError(server); // We determine canRetry only once for all calls, after reporting server failure. - retry = errorsByServer.canTryMore(numAttempt) ? - Retry.YES : Retry.NO_RETRIES_EXHAUSTED; + retry = errorsByServer.canTryMore(numAttempt) ? Retry.YES : Retry.NO_RETRIES_EXHAUSTED; } ++failureCount; - switch (manageError(sentAction.getOriginalIndex(), row, retry, actionException, - server)) { + switch (manageError(sentAction.getOriginalIndex(), row, retry, actionException, server)) { case YES: toReplay.add(sentAction); break; @@ -880,8 +877,8 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { return; } try { - asyncProcess.connection - .updateCachedLocations(tableName, regionName, row, rowException, server); + asyncProcess.connection.updateCachedLocations(tableName, regionName, row, rowException, + server); } catch (Throwable ex) { // That should never happen, but if it did, we want to make sure // we still process errors @@ -892,12 +889,13 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { private void invokeCallBack(byte[] regionName, byte[] row, CResult result) { if (callback != null) { try { - //noinspection unchecked + // noinspection unchecked // TODO: would callback expect a replica region name if it gets one? this.callback.update(regionName, row, result); } catch (Throwable t) { - LOG.error("User callback threw an exception for " - + Bytes.toStringBinary(regionName) + ", ignoring", t); + LOG.error( + "User callback threw an exception for " + Bytes.toStringBinary(regionName) + ", ignoring", + t); } } } @@ -916,16 +914,16 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } private String createLog(int numAttempt, int failureCount, int replaySize, ServerName sn, - Throwable error, long backOffTime, boolean willRetry, String startTime, - int failed, int stopped) { + Throwable error, long backOffTime, boolean willRetry, String startTime, int failed, + int stopped) { StringBuilder sb = new StringBuilder(); - sb.append("id=").append(asyncProcess.id).append(", table=").append(tableName). - append(", attempt=").append(numAttempt).append("/").append(asyncProcess.numTries). - append(", "); + sb.append("id=").append(asyncProcess.id).append(", table=").append(tableName) + .append(", attempt=").append(numAttempt).append("/").append(asyncProcess.numTries) + .append(", "); - if (failureCount > 0 || error != null){ - sb.append("failureCount=").append(failureCount).append("ops").append(", last exception="). - append(error); + if (failureCount > 0 || error != null) { + sb.append("failureCount=").append(failureCount).append("ops").append(", last exception=") + .append(error); } else { sb.append("succeeded"); } @@ -933,12 +931,12 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { sb.append(" on ").append(sn).append(", tracking started ").append(startTime); if (willRetry) { - sb.append(", retrying after=").append(backOffTime).append("ms"). - append(", operationsToReplay=").append(replaySize); + sb.append(", retrying after=").append(backOffTime).append("ms") + .append(", operationsToReplay=").append(replaySize); } else if (failureCount > 0) { if (stopped > 0) { - sb.append("; NOT retrying, stopped=").append(stopped). - append(" because successful operation on other replica"); + sb.append("; NOT retrying, stopped=").append(stopped) + .append(" because successful operation on other replica"); } if (failed > 0) { sb.append("; NOT retrying, failed=").append(failed).append(" -- final attempt!"); @@ -991,10 +989,10 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { /** * Sets the error from a particular action. - * @param index Original action index. - * @param row Original request. + * @param index Original action index. + * @param row Original request. * @param throwable The resulting error. - * @param server The source server. + * @param server The source server. */ private void setError(int index, Row row, Throwable throwable, ServerName server) { ReplicaResultState state = null; @@ -1014,7 +1012,8 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { boolean isActionDone = false; synchronized (state) { switch (state.callCount) { - case 0: return; // someone already set the result + case 0: + return; // someone already set the result case 1: { // All calls failed, we are the last error. target = errors; isActionDone = true; @@ -1048,25 +1047,25 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } /** - * Checks if the action is complete; used on error to prevent needless retries. - * Does not synchronize, assuming element index/field accesses are atomic. - * This is an opportunistic optimization check, doesn't have to be strict. + * Checks if the action is complete; used on error to prevent needless retries. Does not + * synchronize, assuming element index/field accesses are atomic. This is an opportunistic + * optimization check, doesn't have to be strict. * @param index Original action index. - * @param row Original request. + * @param row Original request. */ private boolean isActionComplete(int index, Row row) { if (!AsyncProcess.isReplicaGet(row)) return false; Object resObj = results[index]; - return (resObj != null) && (!(resObj instanceof ReplicaResultState) - || ((ReplicaResultState)resObj).callCount == 0); + return (resObj != null) + && (!(resObj instanceof ReplicaResultState) || ((ReplicaResultState) resObj).callCount == 0); } /** * Tries to set the result or error for a particular action as if there were no replica calls. * @return null if successful; replica state if there were in fact replica calls. */ - private ReplicaResultState trySetResultSimple(int index, Row row, boolean isError, - Object result, ServerName server, boolean isFromReplica) { + private ReplicaResultState trySetResultSimple(int index, Row row, boolean isError, Object result, + ServerName server, boolean isFromReplica) { Object resObj = null; if (!AsyncProcess.isReplicaGet(row)) { if (isFromReplica) { @@ -1086,10 +1085,10 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } ReplicaResultState rrs = - (resObj instanceof ReplicaResultState) ? (ReplicaResultState)resObj : null; + (resObj instanceof ReplicaResultState) ? (ReplicaResultState) resObj : null; if (rrs == null && isError) { // The resObj is not replica state (null or already set). - errors.add((Throwable)result, row, server); + errors.add((Throwable) result, row, server); } if (resObj == null) { @@ -1115,7 +1114,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { private String buildDetailedErrorMsg(String string, int index) { StringBuilder error = new StringBuilder(128); error.append(string).append("; called for ").append(index).append(", actionsInProgress ") - .append(actionsInProgress.get()).append("; replica gets: "); + .append(actionsInProgress.get()).append("; replica gets: "); if (replicaGetIndices != null) { for (int i = 0; i < replicaGetIndices.length; ++i) { error.append(replicaGetIndices[i]).append(", "); @@ -1158,7 +1157,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } } - private boolean waitUntilDone(long cutoff) throws InterruptedException{ + private boolean waitUntilDone(long cutoff) throws InterruptedException { boolean hasWait = cutoff != Long.MAX_VALUE; long lastLog = EnvironmentEdgeManager.currentTime(); long currentInProgress; @@ -1171,7 +1170,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { if (now > lastLog + 10000) { lastLog = now; LOG.info("#" + asyncProcess.id + ", waiting for " + currentInProgress - + " actions to finish on table: " + tableName); + + " actions to finish on table: " + tableName); } } synchronized (actionsInProgress) { @@ -1209,32 +1208,31 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } /** - * Creates the server error tracker to use inside process. - * Currently, to preserve the main assumption about current retries, and to work well with - * the retry-limit-based calculation, the calculation is local per Process object. - * We may benefit from connection-wide tracking of server errors. + * Creates the server error tracker to use inside process. Currently, to preserve the main + * assumption about current retries, and to work well with the retry-limit-based calculation, the + * calculation is local per Process object. We may benefit from connection-wide tracking of server + * errors. * @return ServerErrorTracker to use, null if there is no ServerErrorTracker on this connection */ private ConnectionImplementation.ServerErrorTracker createServerErrorTracker() { - return new ConnectionImplementation.ServerErrorTracker( - asyncProcess.serverTrackerTimeout, asyncProcess.numTries); + return new ConnectionImplementation.ServerErrorTracker(asyncProcess.serverTrackerTimeout, + asyncProcess.numTries); } /** * Create a callable. Isolated to be easily overridden in the tests. */ private MultiServerCallable createCallable(final ServerName server, TableName tableName, - final MultiAction multi) { - return new MultiServerCallable(asyncProcess.connection, tableName, server, - multi, asyncProcess.rpcFactory.newController(), rpcTimeout, tracker, multi.getPriority()); + final MultiAction multi) { + return new MultiServerCallable(asyncProcess.connection, tableName, server, multi, + asyncProcess.rpcFactory.newController(), rpcTimeout, tracker, multi.getPriority()); } private void updateResult(int index, Object result) { Object current = results[index]; if (current != null) { if (LOG.isDebugEnabled()) { - LOG.debug("The result is assigned repeatedly! current:" + current - + ", new:" + result); + LOG.debug("The result is assigned repeatedly! current:" + current + ", new:" + result); } } results[index] = result; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java index 82ab90a47fa..95ed97e1811 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.ConnectionUtils.SLEEP_DELTA_NS; @@ -44,6 +42,7 @@ import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.util.Timer; @InterfaceAudience.Private @@ -80,8 +79,8 @@ public abstract class AsyncRpcRetryingCaller { protected final HBaseRpcController controller; public AsyncRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, int priority, - long pauseNs, long pauseNsForServerOverloaded, int maxAttempts, long operationTimeoutNs, - long rpcTimeoutNs, int startLogErrorsCnt) { + long pauseNs, long pauseNsForServerOverloaded, int maxAttempts, long operationTimeoutNs, + long rpcTimeoutNs, int startLogErrorsCnt) { this.retryTimer = retryTimer; this.conn = conn; this.priority = priority; @@ -126,8 +125,8 @@ public abstract class AsyncRpcRetryingCaller { } private void tryScheduleRetry(Throwable error) { - long pauseNsToUse = HBaseServerException.isServerOverloaded(error) ? - pauseNsForServerOverloaded : pauseNs; + long pauseNsToUse = + HBaseServerException.isServerOverloaded(error) ? pauseNsForServerOverloaded : pauseNs; long delayNs; if (operationTimeoutNs > 0) { long maxDelayNs = remainingTimeNs() - SLEEP_DELTA_NS; @@ -148,7 +147,7 @@ public abstract class AsyncRpcRetryingCaller { } protected final void onError(Throwable t, Supplier errMsg, - Consumer updateCachedLocation) { + Consumer updateCachedLocation) { if (future.isDone()) { // Give up if the future is already done, this is possible if user has already canceled the // future. And for timeline consistent read, we will also cancel some requests if we have @@ -168,9 +167,9 @@ public abstract class AsyncRpcRetryingCaller { return; } if (tries > startLogErrorsCnt) { - LOG.warn(errMsg.get() + ", tries = " + tries + ", maxAttempts = " + maxAttempts + - ", timeout = " + TimeUnit.NANOSECONDS.toMillis(operationTimeoutNs) + - " ms, time elapsed = " + elapsedMs() + " ms", error); + LOG.warn(errMsg.get() + ", tries = " + tries + ", maxAttempts = " + maxAttempts + + ", timeout = " + TimeUnit.NANOSECONDS.toMillis(operationTimeoutNs) + + " ms, time elapsed = " + elapsedMs() + " ms", error); } updateCachedLocation.accept(error); RetriesExhaustedException.ThrowableWithExtraContext qt = diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java index d501998f868..2d8e7b7aabe 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -93,8 +93,8 @@ class AsyncRpcRetryingCallerFactory { return this; } - public SingleRequestCallerBuilder action( - AsyncSingleRequestRpcRetryingCaller.Callable callable) { + public SingleRequestCallerBuilder + action(AsyncSingleRequestRpcRetryingCaller.Callable callable) { this.callable = callable; return this; } @@ -243,7 +243,7 @@ class AsyncRpcRetryingCallerFactory { } public ScanSingleRegionCallerBuilder scannerLeaseTimeoutPeriod(long scannerLeaseTimeoutPeriod, - TimeUnit unit) { + TimeUnit unit) { this.scannerLeaseTimeoutPeriodNs = unit.toNanos(scannerLeaseTimeoutPeriod); return this; } @@ -300,7 +300,7 @@ class AsyncRpcRetryingCallerFactory { * Short cut for {@code build().start(HBaseRpcController, ScanResponse)}. */ public CompletableFuture start(HBaseRpcController controller, - ScanResponse respWhenOpen) { + ScanResponse respWhenOpen) { return build().start(controller, respWhenOpen); } } @@ -386,8 +386,8 @@ class AsyncRpcRetryingCallerFactory { private int priority = PRIORITY_UNSET; - public MasterRequestCallerBuilder action( - AsyncMasterRequestRpcRetryingCaller.Callable callable) { + public MasterRequestCallerBuilder + action(AsyncMasterRequestRpcRetryingCaller.Callable callable) { this.callable = callable; return this; } @@ -468,8 +468,8 @@ class AsyncRpcRetryingCallerFactory { private int priority; - public AdminRequestCallerBuilder action( - AsyncAdminRequestRetryingCaller.Callable callable) { + public AdminRequestCallerBuilder + action(AsyncAdminRequestRetryingCaller.Callable callable) { this.callable = callable; return this; } @@ -540,8 +540,8 @@ class AsyncRpcRetryingCallerFactory { private ServerName serverName; - public ServerRequestCallerBuilder action( - AsyncServerRequestRpcRetryingCaller.Callable callable) { + public ServerRequestCallerBuilder + action(AsyncServerRequestRpcRetryingCaller.Callable callable) { this.callable = callable; return this; } @@ -583,9 +583,9 @@ class AsyncRpcRetryingCallerFactory { public AsyncServerRequestRpcRetryingCaller build() { return new AsyncServerRequestRpcRetryingCaller(retryTimer, conn, pauseNs, - pauseNsForServerOverloaded, - maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, - checkNotNull(serverName, "serverName is null"), checkNotNull(callable, "action is null")); + pauseNsForServerOverloaded, maxAttempts, operationTimeoutNs, rpcTimeoutNs, + startLogErrorsCnt, checkNotNull(serverName, "serverName is null"), + checkNotNull(callable, "action is null")); } public CompletableFuture call() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java index 973c0539cf4..dbaae5c26e2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java @@ -75,7 +75,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRespon class AsyncScanSingleRegionRpcRetryingCaller { private static final Logger LOG = - LoggerFactory.getLogger(AsyncScanSingleRegionRpcRetryingCaller.class); + LoggerFactory.getLogger(AsyncScanSingleRegionRpcRetryingCaller.class); private final Timer retryTimer; @@ -130,7 +130,10 @@ class AsyncScanSingleRegionRpcRetryingCaller { private long nextCallSeq = -1L; private enum ScanControllerState { - INITIALIZED, SUSPENDED, TERMINATED, DESTROYED + INITIALIZED, + SUSPENDED, + TERMINATED, + DESTROYED } // Since suspend and terminate should only be called within onNext or onHeartbeat(see the comments @@ -173,7 +176,7 @@ class AsyncScanSingleRegionRpcRetryingCaller { private void preCheck() { Preconditions.checkState(Thread.currentThread() == callerThread, "The current thread is %s, expected thread is %s, " - + "you should not call this method outside onNext or onHeartbeat", + + "you should not call this method outside onNext or onHeartbeat", Thread.currentThread(), callerThread); Preconditions.checkState(state.equals(ScanControllerState.INITIALIZED), "Invalid Stopper state %s", state); @@ -208,7 +211,9 @@ class AsyncScanSingleRegionRpcRetryingCaller { } private enum ScanResumerState { - INITIALIZED, SUSPENDED, RESUMED + INITIALIZED, + SUSPENDED, + RESUMED } // The resume method is allowed to be called in another thread so here we also use the @@ -307,11 +312,11 @@ class AsyncScanSingleRegionRpcRetryingCaller { } public AsyncScanSingleRegionRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, - Scan scan, ScanMetrics scanMetrics, long scannerId, ScanResultCache resultCache, - AdvancedScanResultConsumer consumer, Interface stub, HRegionLocation loc, - boolean isRegionServerRemote, int priority, long scannerLeaseTimeoutPeriodNs, long pauseNs, - long pauseNsForServerOverloaded, int maxAttempts, long scanTimeoutNs, long rpcTimeoutNs, - int startLogErrorsCnt) { + Scan scan, ScanMetrics scanMetrics, long scannerId, ScanResultCache resultCache, + AdvancedScanResultConsumer consumer, Interface stub, HRegionLocation loc, + boolean isRegionServerRemote, int priority, long scannerLeaseTimeoutPeriodNs, long pauseNs, + long pauseNsForServerOverloaded, int maxAttempts, long scanTimeoutNs, long rpcTimeoutNs, + int startLogErrorsCnt) { this.retryTimer = retryTimer; this.scan = scan; this.scanMetrics = scanMetrics; @@ -355,9 +360,8 @@ class AsyncScanSingleRegionRpcRetryingCaller { stub.scan(controller, req, resp -> { if (controller.failed()) { LOG.warn("Call to " + loc.getServerName() + " for closing scanner id = " + scannerId - + " for " + loc.getRegion().getEncodedName() + " of " + loc.getRegion().getTable() - + " failed, ignore, probably already closed", - controller.getFailed()); + + " for " + loc.getRegion().getEncodedName() + " of " + loc.getRegion().getTable() + + " failed, ignore, probably already closed", controller.getFailed()); } }); } @@ -395,26 +399,25 @@ class AsyncScanSingleRegionRpcRetryingCaller { error = translateException(error); if (tries > startLogErrorsCnt) { LOG.warn("Call to " + loc.getServerName() + " for scanner id = " + scannerId + " for " - + loc.getRegion().getEncodedName() + " of " + loc.getRegion().getTable() - + " failed, , tries = " + tries + ", maxAttempts = " + maxAttempts + ", timeout = " - + TimeUnit.NANOSECONDS.toMillis(scanTimeoutNs) + " ms, time elapsed = " + elapsedMs() - + " ms", - error); + + loc.getRegion().getEncodedName() + " of " + loc.getRegion().getTable() + + " failed, , tries = " + tries + ", maxAttempts = " + maxAttempts + ", timeout = " + + TimeUnit.NANOSECONDS.toMillis(scanTimeoutNs) + " ms, time elapsed = " + elapsedMs() + + " ms", error); } - boolean scannerClosed = error instanceof UnknownScannerException - || error instanceof NotServingRegionException + boolean scannerClosed = + error instanceof UnknownScannerException || error instanceof NotServingRegionException || error instanceof RegionServerStoppedException || error instanceof ScannerResetException; RetriesExhaustedException.ThrowableWithExtraContext qt = - new RetriesExhaustedException.ThrowableWithExtraContext(error, - EnvironmentEdgeManager.currentTime(), ""); + new RetriesExhaustedException.ThrowableWithExtraContext(error, + EnvironmentEdgeManager.currentTime(), ""); exceptions.add(qt); if (tries >= maxAttempts) { completeExceptionally(!scannerClosed); return; } long delayNs; - long pauseNsToUse = HBaseServerException.isServerOverloaded(error) ? - pauseNsForServerOverloaded : pauseNs; + long pauseNsToUse = + HBaseServerException.isServerOverloaded(error) ? pauseNsForServerOverloaded : pauseNs; if (scanTimeoutNs > 0) { long maxDelayNs = remainingTimeNs() - SLEEP_DELTA_NS; if (maxDelayNs <= 0) { @@ -511,8 +514,7 @@ class AsyncScanSingleRegionRpcRetryingCaller { ScanControllerImpl scanController; if (results.length > 0) { scanController = new ScanControllerImpl( - resp.hasCursor() ? Optional.of(ProtobufUtil.toCursor(resp.getCursor())) - : Optional.empty()); + resp.hasCursor() ? Optional.of(ProtobufUtil.toCursor(resp.getCursor())) : Optional.empty()); updateNextStartRowWhenError(results[results.length - 1]); consumer.onNext(results, scanController); } else { @@ -597,7 +599,7 @@ class AsyncScanSingleRegionRpcRetryingCaller { nextCallSeq++; resetController(controller, rpcTimeoutNs, priority); ScanRequest req = - RequestConverter.buildScanRequest(scannerId, 0, false, nextCallSeq, false, true, -1); + RequestConverter.buildScanRequest(scannerId, 0, false, nextCallSeq, false, true, -1); stub.scan(controller, req, resp -> { }); } @@ -610,7 +612,7 @@ class AsyncScanSingleRegionRpcRetryingCaller { * @return {@code true} if we should continue, otherwise {@code false}. */ public CompletableFuture start(HBaseRpcController controller, - ScanResponse respWhenOpen) { + ScanResponse respWhenOpen) { onComplete(controller, respWhenOpen); return future; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java index 8c6cf81f4c7..40cd3b87e92 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,8 +31,8 @@ import org.apache.hbase.thirdparty.io.netty.util.Timer; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; /** - * Retry caller for a request call to region server. - * Now only used for coprocessor call to region server. + * Retry caller for a request call to region server. Now only used for coprocessor call to region + * server. */ @InterfaceAudience.Private public class AsyncServerRequestRpcRetryingCaller extends AsyncRpcRetryingCaller { @@ -46,10 +46,10 @@ public class AsyncServerRequestRpcRetryingCaller extends AsyncRpcRetryingCall private ServerName serverName; public AsyncServerRequestRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, - long pauseNs, long pauseNsForServerOverloaded, int maxAttempts, long operationTimeoutNs, - long rpcTimeoutNs, int startLogErrorsCnt, ServerName serverName, Callable callable) { - super(retryTimer, conn, HConstants.NORMAL_QOS, pauseNs, pauseNsForServerOverloaded, - maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); + long pauseNs, long pauseNsForServerOverloaded, int maxAttempts, long operationTimeoutNs, + long rpcTimeoutNs, int startLogErrorsCnt, ServerName serverName, Callable callable) { + super(retryTimer, conn, HConstants.NORMAL_QOS, pauseNs, pauseNsForServerOverloaded, maxAttempts, + operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); this.serverName = serverName; this.callable = callable; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java index 31fa1834bb7..9c115af97b5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ class AsyncSingleRequestRpcRetryingCaller extends AsyncRpcRetryingCaller { @FunctionalInterface public interface Callable { CompletableFuture call(HBaseRpcController controller, HRegionLocation loc, - ClientService.Interface stub); + ClientService.Interface stub); } private final TableName tableName; @@ -55,9 +55,9 @@ class AsyncSingleRequestRpcRetryingCaller extends AsyncRpcRetryingCaller { private final Callable callable; public AsyncSingleRequestRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, - TableName tableName, byte[] row, int replicaId, RegionLocateType locateType, - Callable callable, int priority, long pauseNs, long pauseNsForServerOverloaded, - int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { + TableName tableName, byte[] row, int replicaId, RegionLocateType locateType, + Callable callable, int priority, long pauseNs, long pauseNsForServerOverloaded, + int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { super(retryTimer, conn, priority, pauseNs, pauseNsForServerOverloaded, maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); this.tableName = tableName; @@ -73,8 +73,8 @@ class AsyncSingleRequestRpcRetryingCaller extends AsyncRpcRetryingCaller { stub = conn.getRegionServerStub(loc.getServerName()); } catch (IOException e) { onError(e, - () -> "Get async stub to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + - "' in " + loc.getRegion().getEncodedName() + " of " + tableName + " failed", + () -> "Get async stub to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + + "' in " + loc.getRegion().getEncodedName() + " of " + tableName + " failed", err -> conn.getLocator().updateCachedLocationOnError(loc, err)); return; } @@ -82,8 +82,8 @@ class AsyncSingleRequestRpcRetryingCaller extends AsyncRpcRetryingCaller { addListener(callable.call(controller, loc, stub), (result, error) -> { if (error != null) { onError(error, - () -> "Call to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + "' in " + - loc.getRegion().getEncodedName() + " of " + tableName + " failed", + () -> "Call to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + "' in " + + loc.getRegion().getEncodedName() + " of " + tableName + " failed", err -> conn.getLocator().updateCachedLocationOnError(loc, err)); return; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java index c7003e05237..da1ca4afa1f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -154,7 +154,7 @@ public interface AsyncTable { * write operations to a row are synchronized, but readers do not take row locks so get and scan * operations can see this operation partially completed. * @param append object that specifies the columns and amounts to be used for the increment - * operations + * operations * @return values of columns after the append operation (maybe null). The return value will be * wrapped by a {@link CompletableFuture}. */ @@ -167,7 +167,7 @@ public interface AsyncTable { * so write operations to a row are synchronized, but readers do not take row locks so get and * scan operations can see this operation partially completed. * @param increment object that specifies the columns and amounts to be used for the increment - * operations + * operations * @return values of columns after the increment. The return value will be wrapped by a * {@link CompletableFuture}. */ @@ -177,10 +177,11 @@ public interface AsyncTable { * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)} *

    * The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}. - * @param row The row that contains the cell to increment. - * @param family The column family of the cell to increment. + * @param row The row that contains the cell to increment. + * @param family The column family of the cell to increment. * @param qualifier The column qualifier of the cell to increment. - * @param amount The amount to increment the cell with (or decrement, if the amount is negative). + * @param amount The amount to increment the cell with (or decrement, if the amount is + * negative). * @return The new value, post increment. The return value will be wrapped by a * {@link CompletableFuture}. */ @@ -196,10 +197,11 @@ public interface AsyncTable { *

    * Setting durability to {@link Durability#SKIP_WAL} means that in a fail scenario you will lose * any increments that have not been flushed. - * @param row The row that contains the cell to increment. - * @param family The column family of the cell to increment. - * @param qualifier The column qualifier of the cell to increment. - * @param amount The amount to increment the cell with (or decrement, if the amount is negative). + * @param row The row that contains the cell to increment. + * @param family The column family of the cell to increment. + * @param qualifier The column qualifier of the cell to increment. + * @param amount The amount to increment the cell with (or decrement, if the amount is + * negative). * @param durability The persistence guarantee for this increment. * @return The new value, post increment. The return value will be wrapped by a * {@link CompletableFuture}. @@ -234,16 +236,15 @@ public interface AsyncTable { * * * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it - * any more. + * any more. */ @Deprecated CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family); /** * A helper class for sending checkAndMutate request. - * * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it - * any more. + * any more. */ @Deprecated interface CheckAndMutateBuilder { @@ -273,7 +274,7 @@ public interface AsyncTable { /** * @param compareOp comparison operator to use - * @param value the expected value + * @param value the expected value */ CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value); @@ -320,16 +321,15 @@ public interface AsyncTable { * * * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it - * any more. + * any more. */ @Deprecated CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter); /** * A helper class for sending checkAndMutate request with a filter. - * * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it - * any more. + * any more. */ @Deprecated interface CheckAndMutateWithFilterBuilder { @@ -399,7 +399,7 @@ public interface AsyncTable { /** * The scan API uses the observer pattern. - * @param scan A configured {@link Scan} object. + * @param scan A configured {@link Scan} object. * @param consumer the consumer used to receive results. * @see ScanResultConsumer * @see AdvancedScanResultConsumer @@ -417,7 +417,7 @@ public interface AsyncTable { /** * Gets a scanner on the current table for the given family and qualifier. - * @param family The column family to scan. + * @param family The column family to scan. * @param qualifier The column qualifier to scan. * @return A scanner. */ @@ -463,7 +463,7 @@ public interface AsyncTable { * a {@code ResultScanner} or let you pass in a {@code ScanResultConsumer}. There is no * performance difference between these scan methods so do not worry. * @param scan A configured {@link Scan} object. So if you use this method to fetch a really large - * result set, it is likely to cause OOM. + * result set, it is likely to cause OOM. * @return The results of this small scan operation. The return value will be wrapped by a * {@link CompletableFuture}. */ @@ -580,11 +580,11 @@ public interface AsyncTable { * * * @param stubMaker a delegation to the actual {@code newStub} call. - * @param callable a delegation to the actual protobuf rpc call. See the comment of - * {@link ServiceCaller} for more details. - * @param row The row key used to identify the remote region location - * @param the type of the asynchronous stub - * @param the type of the return value + * @param callable a delegation to the actual protobuf rpc call. See the comment of + * {@link ServiceCaller} for more details. + * @param row The row key used to identify the remote region location + * @param the type of the asynchronous stub + * @param the type of the return value * @return the return value of the protobuf rpc call, wrapped by a {@link CompletableFuture}. * @see ServiceCaller */ @@ -640,13 +640,13 @@ public interface AsyncTable { /** * @param region the region that the response belongs to - * @param resp the response of the coprocessor call + * @param resp the response of the coprocessor call */ void onRegionComplete(RegionInfo region, R resp); /** * @param region the region that the error belongs to - * @param error the response error of the coprocessor call + * @param error the response error of the coprocessor call */ void onRegionError(RegionInfo region, Throwable error); @@ -682,7 +682,7 @@ public interface AsyncTable { } /** - * @param startKey start region selection with region containing this row + * @param startKey start region selection with region containing this row * @param inclusive whether to include the startKey */ CoprocessorServiceBuilder fromRow(byte[] startKey, boolean inclusive); @@ -695,7 +695,7 @@ public interface AsyncTable { } /** - * @param endKey select regions up to and including the region containing this row + * @param endKey select regions up to and including the region containing this row * @param inclusive whether to include the endKey */ CoprocessorServiceBuilder toRow(byte[] endKey, boolean inclusive); @@ -722,10 +722,10 @@ public interface AsyncTable { * * * @param stubMaker a delegation to the actual {@code newStub} call. - * @param callable a delegation to the actual protobuf rpc call. See the comment of - * {@link ServiceCaller} for more details. - * @param callback callback to get the response. See the comment of {@link CoprocessorCallback} - * for more details. + * @param callable a delegation to the actual protobuf rpc call. See the comment of + * {@link ServiceCaller} for more details. + * @param callback callback to get the response. See the comment of {@link CoprocessorCallback} + * for more details. */ CoprocessorServiceBuilder coprocessorService(Function stubMaker, ServiceCaller callable, CoprocessorCallback callback); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java index ebf98f98bc3..f6db89f82bf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.HBaseServerException; import org.apache.yetus.audience.InterfaceAudience; @@ -82,18 +81,17 @@ public interface AsyncTableBuilder { AsyncTableBuilder setRetryPause(long pause, TimeUnit unit); /** - * Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. - * We use an exponential policy to generate sleep time when retrying. + * Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. We + * use an exponential policy to generate sleep time when retrying. *

    * This value should be greater than the normal pause value which could be set with the above * {@link #setRetryPause(long, TimeUnit)} method, as usually * {@link HBaseServerException#isServerOverloaded()} means the server is overloaded. We just use * the normal pause value for {@link HBaseServerException#isServerOverloaded()} if here you * specify a smaller value. - * * @see #setRetryPause(long, TimeUnit) * @deprecated Since 2.5.0, will be removed in 4.0.0. Please use - * {@link #setRetryPauseForServerOverloaded(long, TimeUnit)} instead. + * {@link #setRetryPauseForServerOverloaded(long, TimeUnit)} instead. */ @Deprecated default AsyncTableBuilder setRetryPauseForCQTBE(long pause, TimeUnit unit) { @@ -101,15 +99,14 @@ public interface AsyncTableBuilder { } /** - * Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. - * We use an exponential policy to generate sleep time when retrying. + * Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. We + * use an exponential policy to generate sleep time when retrying. *

    * This value should be greater than the normal pause value which could be set with the above * {@link #setRetryPause(long, TimeUnit)} method, as usually * {@link HBaseServerException#isServerOverloaded()} means the server is overloaded. We just use * the normal pause value for {@link HBaseServerException#isServerOverloaded()} if here you * specify a smaller value. - * * @see #setRetryPause(long, TimeUnit) */ AsyncTableBuilder setRetryPauseForServerOverloaded(long pause, TimeUnit unit); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java index bec9f123690..7c58e8c672f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -29,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Private abstract class AsyncTableBuilderBase - implements AsyncTableBuilder { + implements AsyncTableBuilder { protected TableName tableName; @@ -53,7 +52,8 @@ abstract class AsyncTableBuilderBase AsyncTableBuilderBase(TableName tableName, AsyncConnectionConfiguration connConf) { this.tableName = tableName; - this.operationTimeoutNs = tableName.isSystemTable() ? connConf.getMetaOperationTimeoutNs() + this.operationTimeoutNs = tableName.isSystemTable() + ? connConf.getMetaOperationTimeoutNs() : connConf.getOperationTimeoutNs(); this.scanTimeoutNs = connConf.getScanTimeoutNs(); this.rpcTimeoutNs = connConf.getRpcTimeoutNs(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java index 9f780fbf9e9..7f5e6562c02 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java @@ -212,7 +212,7 @@ class AsyncTableImpl implements AsyncTable { @Override public List> - checkAndMutate(List checkAndMutates) { + checkAndMutate(List checkAndMutates) { return rawTable.checkAndMutate(checkAndMutates).stream().map(this::wrap).collect(toList()); } @@ -279,14 +279,14 @@ class AsyncTableImpl implements AsyncTable { @Override public CompletableFuture coprocessorService(Function stubMaker, - ServiceCaller callable, byte[] row) { + ServiceCaller callable, byte[] row) { return wrap(rawTable.coprocessorService(stubMaker, callable, row)); } @Override public CoprocessorServiceBuilder coprocessorService( - Function stubMaker, ServiceCaller callable, - CoprocessorCallback callback) { + Function stubMaker, ServiceCaller callable, + CoprocessorCallback callback) { final Context context = Context.current(); CoprocessorCallback wrappedCallback = new CoprocessorCallback() { @@ -311,7 +311,7 @@ class AsyncTableImpl implements AsyncTable { } }; CoprocessorServiceBuilder builder = - rawTable.coprocessorService(stubMaker, callable, wrappedCallback); + rawTable.coprocessorService(stubMaker, callable, wrappedCallback); return new CoprocessorServiceBuilder() { @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java index 321f44e87b5..11867ca333b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ public interface AsyncTableRegionLocator { * Finds the region on which the given row is being served. *

    * Returns the location of the region to which the row belongs. - * @param row Row to find. + * @param row Row to find. * @param reload true to reload information or false to use cached information */ default CompletableFuture getRegionLocation(byte[] row, boolean reload) { @@ -67,7 +67,7 @@ public interface AsyncTableRegionLocator { *

    * Returns the location of the region with the given replicaId to which the row * belongs. - * @param row Row to find. + * @param row Row to find. * @param replicaId the replica id of the region */ default CompletableFuture getRegionLocation(byte[] row, int replicaId) { @@ -79,9 +79,9 @@ public interface AsyncTableRegionLocator { *

    * Returns the location of the region with the given replicaId to which the row * belongs. - * @param row Row to find. + * @param row Row to find. * @param replicaId the replica id of the region - * @param reload true to reload information or false to use cached information + * @param reload true to reload information or false to use cached information */ CompletableFuture getRegionLocation(byte[] row, int replicaId, boolean reload); @@ -97,7 +97,7 @@ public interface AsyncTableRegionLocator { /** * Find all the replicas for the region on which the given row is being served. - * @param row Row to find. + * @param row Row to find. * @param reload true to reload information or false to use cached information * @return Locations for all the replicas of the row. * @throws IOException if a remote or network exception occurs diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java index db7d78cab93..06c39f6593e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,8 +64,8 @@ class AsyncTableRegionLocatorImpl implements AsyncTableRegionLocator { } CompletableFuture> future = AsyncMetaTableAccessor .getTableHRegionLocations(conn.getTable(TableName.META_TABLE_NAME), tableName); - addListener(future, (locs, error) -> locs.forEach(loc -> conn.getLocator() - .getNonMetaRegionLocator().addLocationToCache(loc))); + addListener(future, (locs, error) -> locs + .forEach(loc -> conn.getLocator().getNonMetaRegionLocator().addLocationToCache(loc))); return future; }, getClass().getSimpleName() + ".getAllRegionLocations"); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java index 39427fbcd4c..85f344b10b5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java @@ -77,7 +77,7 @@ class AsyncTableResultScanner implements ResultScanner, AdvancedScanResultConsum if (LOG.isDebugEnabled()) { LOG.debug( "{} stop prefetching when scanning {} as the cache size {}" - + " is greater than the maxCacheSize {}", + + " is greater than the maxCacheSize {}", String.format("0x%x", System.identityHashCode(this)), tableName, cacheSize, maxCacheSize); } resumer = controller.suspend(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java index d693cb329b3..259807866d8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,20 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Public public interface Attributes { /** - * Sets an attribute. - * In case value = null attribute is removed from the attributes map. - * Attribute names starting with _ indicate system attributes. - * @param name attribute name + * Sets an attribute. In case value = null attribute is removed from the attributes map. Attribute + * names starting with _ indicate system attributes. + * @param name attribute name * @param value attribute value */ Attributes setAttribute(String name, byte[] value); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java index 4e67bcedbd8..70a809da05b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,19 +34,15 @@ public final class BalanceRequest { private boolean dryRun = false; private boolean ignoreRegionsInTransition = false; - private Builder() {} + private Builder() { + } /** - * Updates BalancerRequest to run the balancer in dryRun mode. - * In this mode, the balancer will try to find a plan but WILL NOT - * execute any region moves or call any coprocessors. - * - * You can run in dryRun mode regardless of whether the balancer switch - * is enabled or disabled, but dryRun mode will not run over an existing - * request or chore. - * - * Dry run is useful for testing out new balance configs. See the logs - * on the active HMaster for the results of the dry run. + * Updates BalancerRequest to run the balancer in dryRun mode. In this mode, the balancer will + * try to find a plan but WILL NOT execute any region moves or call any coprocessors. You can + * run in dryRun mode regardless of whether the balancer switch is enabled or disabled, but + * dryRun mode will not run over an existing request or chore. Dry run is useful for testing out + * new balance configs. See the logs on the active HMaster for the results of the dry run. */ public Builder setDryRun(boolean dryRun) { this.dryRun = dryRun; @@ -55,10 +50,8 @@ public final class BalanceRequest { } /** - * Updates BalancerRequest to run the balancer even if there are regions - * in transition. - * - * WARNING: Advanced usage only, this could cause more issues than it fixes. + * Updates BalancerRequest to run the balancer even if there are regions in transition. WARNING: + * Advanced usage only, this could cause more issues than it fixes. */ public Builder setIgnoreRegionsInTransition(boolean ignoreRegionsInTransition) { this.ignoreRegionsInTransition = ignoreRegionsInTransition; @@ -81,8 +74,8 @@ public final class BalanceRequest { } /** - * Get a BalanceRequest for a default run of the balancer. The default mode executes - * any moves calculated and will not run if regions are already in transition. + * Get a BalanceRequest for a default run of the balancer. The default mode executes any moves + * calculated and will not run if regions are already in transition. */ public static BalanceRequest defaultInstance() { return DEFAULT; @@ -97,16 +90,16 @@ public final class BalanceRequest { } /** - * Returns true if the balancer should run in dry run mode, otherwise false. In - * dry run mode, moves will be calculated but not executed. + * Returns true if the balancer should run in dry run mode, otherwise false. In dry run mode, + * moves will be calculated but not executed. */ public boolean isDryRun() { return dryRun; } /** - * Returns true if the balancer should execute even if regions are in transition, otherwise - * false. This is an advanced usage feature, as it can cause more issues than it fixes. + * Returns true if the balancer should execute even if regions are in transition, otherwise false. + * This is an advanced usage feature, as it can cause more issues than it fixes. */ public boolean isIgnoreRegionsInTransition() { return ignoreRegionsInTransition; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java index 143878209d1..c7914f150de 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +26,8 @@ import org.apache.yetus.audience.InterfaceAudience; public final class BalanceResponse { /** - * Used in HMaster to build a {@link BalanceResponse} for returning results of a balance invocation to callers + * Used in HMaster to build a {@link BalanceResponse} for returning results of a balance + * invocation to callers */ @InterfaceAudience.Private public final static class Builder { @@ -35,13 +35,13 @@ public final class BalanceResponse { private int movesCalculated; private int movesExecuted; - private Builder() {} + private Builder() { + } /** * Set true if the balancer ran, otherwise false. The balancer may not run in some - * circumstances, such as if a balance is already running or there are regions already - * in transition. - * + * circumstances, such as if a balance is already running or there are regions already in + * transition. * @param balancerRan true if balancer ran, false otherwise */ public Builder setBalancerRan(boolean balancerRan) { @@ -52,7 +52,6 @@ public final class BalanceResponse { /** * Set how many moves were calculated by the balancer. This will be zero if the cluster is * already balanced. - * * @param movesCalculated moves calculated by the balance run */ public Builder setMovesCalculated(int movesCalculated) { @@ -64,7 +63,6 @@ public final class BalanceResponse { * Set how many of the calculated moves were actually executed by the balancer. This should be * zero if the balancer is run with {@link BalanceRequest#isDryRun()}. It may also not equal * movesCalculated if the balancer ran out of time while executing the moves. - * * @param movesExecuted moves executed by the balance run */ public Builder setMovesExecuted(int movesExecuted) { @@ -98,9 +96,9 @@ public final class BalanceResponse { } /** - * Returns true if the balancer ran, otherwise false. The balancer may not run for a - * variety of reasons, such as: another balance is running, there are regions in - * transition, the cluster is in maintenance mode, etc. + * Returns true if the balancer ran, otherwise false. The balancer may not run for a variety of + * reasons, such as: another balance is running, there are regions in transition, the cluster is + * in maintenance mode, etc. */ public boolean isBalancerRan() { return balancerRan; @@ -115,10 +113,10 @@ public final class BalanceResponse { } /** - * The number of moves actually executed by the balancer if it ran. This will be - * zero if {@link #getMovesCalculated()} is zero or if {@link BalanceRequest#isDryRun()} - * was true. It may also not be equal to {@link #getMovesCalculated()} if the balancer - * was interrupted midway through executing the moves due to max run time. + * The number of moves actually executed by the balancer if it ran. This will be zero if + * {@link #getMovesCalculated()} is zero or if {@link BalanceRequest#isDryRun()} was true. It may + * also not be equal to {@link #getMovesCalculated()} if the balancer was interrupted midway + * through executing the moves due to max run time. */ public int getMovesExecuted() { return movesExecuted; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerDecision.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerDecision.java index e2bf2e28e0e..4d66da19402 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerDecision.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerDecision.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.List; - import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.hbase.util.GsonUtil; import org.apache.yetus.audience.InterfaceAudience; @@ -45,17 +42,15 @@ final public class BalancerDecision extends LogEntry { // used to convert object to pretty printed format // used by toJsonPrettyPrint() - private static final Gson GSON = GsonUtil.createGson() - .setPrettyPrinting() - .registerTypeAdapter(BalancerDecision.class, (JsonSerializer) - (balancerDecision, type, jsonSerializationContext) -> { + private static final Gson GSON = + GsonUtil.createGson().setPrettyPrinting().registerTypeAdapter(BalancerDecision.class, + (JsonSerializer) (balancerDecision, type, jsonSerializationContext) -> { Gson gson = new Gson(); return gson.toJsonTree(balancerDecision); }).create(); private BalancerDecision(String initialFunctionCosts, String finalFunctionCosts, - double initTotalCost, double computedTotalCost, List regionPlans, - long computedSteps) { + double initTotalCost, double computedTotalCost, List regionPlans, long computedSteps) { this.initialFunctionCosts = initialFunctionCosts; this.finalFunctionCosts = finalFunctionCosts; this.initTotalCost = initTotalCost; @@ -90,14 +85,10 @@ final public class BalancerDecision extends LogEntry { @Override public String toString() { - return new ToStringBuilder(this) - .append("initialFunctionCosts", initialFunctionCosts) - .append("finalFunctionCosts", finalFunctionCosts) - .append("initTotalCost", initTotalCost) - .append("computedTotalCost", computedTotalCost) - .append("computedSteps", computedSteps) - .append("regionPlans", regionPlans) - .toString(); + return new ToStringBuilder(this).append("initialFunctionCosts", initialFunctionCosts) + .append("finalFunctionCosts", finalFunctionCosts).append("initTotalCost", initTotalCost) + .append("computedTotalCost", computedTotalCost).append("computedSteps", computedSteps) + .append("regionPlans", regionPlans).toString(); } @Override @@ -144,8 +135,8 @@ final public class BalancerDecision extends LogEntry { } public BalancerDecision build() { - return new BalancerDecision(initialFunctionCosts, finalFunctionCosts, - initTotalCost, computedTotalCost, regionPlans, computedSteps); + return new BalancerDecision(initialFunctionCosts, finalFunctionCosts, initTotalCost, + computedTotalCost, regionPlans, computedSteps); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerRejection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerRejection.java index d6e6cee20fc..eb5f7ff7ad2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerRejection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerRejection.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.hbase.util.GsonUtil; import org.apache.yetus.audience.InterfaceAudience; @@ -37,27 +34,25 @@ import org.apache.hbase.thirdparty.com.google.gson.JsonSerializer; @InterfaceAudience.Public @InterfaceStability.Evolving final public class BalancerRejection extends LogEntry { - //The reason why balancer was rejected + // The reason why balancer was rejected private final String reason; private final List costFuncInfoList; // used to convert object to pretty printed format // used by toJsonPrettyPrint() - private static final Gson GSON = GsonUtil.createGson() - .setPrettyPrinting() - .disableHtmlEscaping() - .registerTypeAdapter(BalancerRejection.class, (JsonSerializer) - (balancerRejection, type, jsonSerializationContext) -> { + private static final Gson GSON = GsonUtil.createGson().setPrettyPrinting().disableHtmlEscaping() + .registerTypeAdapter(BalancerRejection.class, + (JsonSerializer) (balancerRejection, type, jsonSerializationContext) -> { Gson gson = new Gson(); return gson.toJsonTree(balancerRejection); - }).create(); + }) + .create(); private BalancerRejection(String reason, List costFuncInfoList) { this.reason = reason; - if(costFuncInfoList == null){ + if (costFuncInfoList == null) { this.costFuncInfoList = Collections.emptyList(); - } - else { + } else { this.costFuncInfoList = costFuncInfoList; } } @@ -72,10 +67,8 @@ final public class BalancerRejection extends LogEntry { @Override public String toString() { - return new ToStringBuilder(this) - .append("reason", reason) - .append("costFuncInfoList", costFuncInfoList.toString()) - .toString(); + return new ToStringBuilder(this).append("reason", reason) + .append("costFuncInfoList", costFuncInfoList.toString()).toString(); } @Override @@ -92,19 +85,15 @@ final public class BalancerRejection extends LogEntry { return this; } - public void addCostFuncInfo(String funcName, double cost, float multiplier){ - if(costFuncInfoList == null){ + public void addCostFuncInfo(String funcName, double cost, float multiplier) { + if (costFuncInfoList == null) { costFuncInfoList = new ArrayList<>(); } - costFuncInfoList.add( - new StringBuilder() - .append(funcName) - .append(" cost:").append(cost) - .append(" multiplier:").append(multiplier) - .toString()); + costFuncInfoList.add(new StringBuilder().append(funcName).append(" cost:").append(cost) + .append(" multiplier:").append(multiplier).toString()); } - public Builder setCostFuncInfoList(List costFuncInfoList){ + public Builder setCostFuncInfoList(List costFuncInfoList) { this.costFuncInfoList = costFuncInfoList; return this; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchErrors.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchErrors.java index d3cdc74fdf6..44d5139afbd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchErrors.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchErrors.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,15 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; -import org.apache.hadoop.hbase.ServerName; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.ArrayList; import java.util.List; +import org.apache.hadoop.hbase.ServerName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; class BatchErrors { private static final Logger LOG = LoggerFactory.getLogger(BatchErrors.class); @@ -33,7 +30,7 @@ class BatchErrors { final List addresses = new ArrayList<>(); public synchronized void add(Throwable ex, Row row, ServerName serverName) { - if (row == null){ + if (row == null) { throw new IllegalArgumentException("row cannot be null. location=" + serverName); } @@ -48,11 +45,10 @@ class BatchErrors { synchronized RetriesExhaustedWithDetailsException makeException(boolean logDetails) { if (logDetails) { - LOG.error("Exception occurred! Exception details: " + throwables + ";\nActions: " - + actions); + LOG.error("Exception occurred! Exception details: " + throwables + ";\nActions: " + actions); } return new RetriesExhaustedWithDetailsException(new ArrayList<>(throwables), - new ArrayList<>(actions), new ArrayList<>(addresses)); + new ArrayList<>(actions), new ArrayList<>(addresses)); } public synchronized void clear() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java index 3b27298585e..b0423c6c5ce 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,11 +24,10 @@ import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Deque; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * A scan result cache for batched scan, i.e, @@ -142,8 +141,9 @@ public class BatchScanResultCache implements ScanResultCache { numberOfCompleteRows++; } // check if we have a row change - if (!partialResults.isEmpty() && - !Bytes.equals(partialResults.peek().getRow(), result.getRow())) { + if ( + !partialResults.isEmpty() && !Bytes.equals(partialResults.peek().getRow(), result.getRow()) + ) { regroupedResults.add(createCompletedResult()); } Result regroupedResult = regroupResults(result); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java index 7805f77e30e..2cba44baf9d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,34 +25,38 @@ import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; /** - *

    Used to communicate with a single HBase table similar to {@link Table} but meant for - * batched, asynchronous puts. Obtain an instance from a {@link Connection} and call - * {@link #close()} afterwards. Customizations can be applied to the {@code BufferedMutator} via - * the {@link BufferedMutatorParams}. + *

    + * Used to communicate with a single HBase table similar to {@link Table} but meant for batched, + * asynchronous puts. Obtain an instance from a {@link Connection} and call {@link #close()} + * afterwards. Customizations can be applied to the {@code BufferedMutator} via the + * {@link BufferedMutatorParams}. *

    - * - *

    Exception handling with asynchronously via the {@link BufferedMutator.ExceptionListener}. - * The default implementation is to throw the exception upon receipt. This behavior can be - * overridden with a custom implementation, provided as a parameter with - * {@link BufferedMutatorParams#listener(BufferedMutator.ExceptionListener)}.

    - * - *

    Map/Reduce jobs are good use cases for using {@code BufferedMutator}. Map/reduce jobs - * benefit from batching, but have no natural flush point. {@code BufferedMutator} receives the - * puts from the M/R job and will batch puts based on some heuristic, such as the accumulated size - * of the puts, and submit batches of puts asynchronously so that the M/R logic can continue - * without interruption. + *

    + * Exception handling with asynchronously via the {@link BufferedMutator.ExceptionListener}. The + * default implementation is to throw the exception upon receipt. This behavior can be overridden + * with a custom implementation, provided as a parameter with + * {@link BufferedMutatorParams#listener(BufferedMutator.ExceptionListener)}. *

    - * - *

    {@code BufferedMutator} can also be used on more exotic circumstances. Map/Reduce batch jobs - * will have a single {@code BufferedMutator} per thread. A single {@code BufferedMutator} can - * also be effectively used in high volume online systems to batch puts, with the caveat that - * extreme circumstances, such as JVM or machine failure, may cause some data loss.

    - * - *

    NOTE: This class replaces the functionality that used to be available via + *

    + * Map/Reduce jobs are good use cases for using {@code BufferedMutator}. Map/reduce jobs benefit + * from batching, but have no natural flush point. {@code BufferedMutator} receives the puts from + * the M/R job and will batch puts based on some heuristic, such as the accumulated size of the + * puts, and submit batches of puts asynchronously so that the M/R logic can continue without + * interruption. + *

    + *

    + * {@code BufferedMutator} can also be used on more exotic circumstances. Map/Reduce batch jobs will + * have a single {@code BufferedMutator} per thread. A single {@code BufferedMutator} can also be + * effectively used in high volume online systems to batch puts, with the caveat that extreme + * circumstances, such as JVM or machine failure, may cause some data loss. + *

    + *

    + * NOTE: This class replaces the functionality that used to be available via * HTable#setAutoFlush(boolean) set to {@code false}. *

    - * - *

    See also the {@code BufferedMutatorExample} in the hbase-examples module.

    + *

    + * See also the {@code BufferedMutatorExample} in the hbase-examples module. + *

    * @see ConnectionFactory * @see Connection * @since 1.0.0 @@ -66,8 +69,8 @@ public interface BufferedMutator extends Closeable { String CLASSNAME_KEY = "hbase.client.bufferedmutator.classname"; /** - * Having the timer tick run more often that once every 100ms is needless and will - * probably cause too many timer events firing having a negative impact on performance. + * Having the timer tick run more often that once every 100ms is needless and will probably cause + * too many timer events firing having a negative impact on performance. */ long MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS = 100; @@ -79,25 +82,22 @@ public interface BufferedMutator extends Closeable { /** * Returns the {@link org.apache.hadoop.conf.Configuration} object used by this instance. *

    - * The reference returned is not a copy, so any change made to it will - * affect this instance. + * The reference returned is not a copy, so any change made to it will affect this instance. */ Configuration getConfiguration(); /** - * Sends a {@link Mutation} to the table. The mutations will be buffered and sent over the - * wire as part of a batch. Currently only supports {@link Put} and {@link Delete} mutations. - * + * Sends a {@link Mutation} to the table. The mutations will be buffered and sent over the wire as + * part of a batch. Currently only supports {@link Put} and {@link Delete} mutations. * @param mutation The data to send. * @throws IOException if a remote or network exception occurs. */ void mutate(Mutation mutation) throws IOException; /** - * Send some {@link Mutation}s to the table. The mutations will be buffered and sent over the - * wire as part of a batch. There is no guarantee of sending entire content of {@code mutations} - * in a single batch; it will be broken up according to the write buffer capacity. - * + * Send some {@link Mutation}s to the table. The mutations will be buffered and sent over the wire + * as part of a batch. There is no guarantee of sending entire content of {@code mutations} in a + * single batch; it will be broken up according to the write buffer capacity. * @param mutations The data to send. * @throws IOException if a remote or network exception occurs. */ @@ -105,24 +105,22 @@ public interface BufferedMutator extends Closeable { /** * Performs a {@link #flush()} and releases any resources held. - * * @throws IOException if a remote or network exception occurs. */ @Override void close() throws IOException; /** - * Executes all the buffered, asynchronous {@link Mutation} operations and waits until they - * are done. - * + * Executes all the buffered, asynchronous {@link Mutation} operations and waits until they are + * done. * @throws IOException if a remote or network exception occurs. */ void flush() throws IOException; /** * Sets the maximum time before the buffer is automatically flushed checking once per second. - * @param timeoutMs The maximum number of milliseconds how long records may be buffered - * before they are flushed. Set to 0 to disable. + * @param timeoutMs The maximum number of milliseconds how long records may be buffered before + * they are flushed. Set to 0 to disable. */ default void setWriteBufferPeriodicFlush(long timeoutMs) { setWriteBufferPeriodicFlush(timeoutMs, 1000L); @@ -130,16 +128,16 @@ public interface BufferedMutator extends Closeable { /** * Sets the maximum time before the buffer is automatically flushed. - * @param timeoutMs The maximum number of milliseconds how long records may be buffered - * before they are flushed. Set to 0 to disable. - * @param timerTickMs The number of milliseconds between each check if the - * timeout has been exceeded. Must be 100ms (as defined in - * {@link #MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS}) - * or larger to avoid performance problems. + * @param timeoutMs The maximum number of milliseconds how long records may be buffered before + * they are flushed. Set to 0 to disable. + * @param timerTickMs The number of milliseconds between each check if the timeout has been + * exceeded. Must be 100ms (as defined in + * {@link #MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS}) or larger to avoid + * performance problems. */ default void setWriteBufferPeriodicFlush(long timeoutMs, long timerTickMs) { throw new UnsupportedOperationException( - "The BufferedMutator::setWriteBufferPeriodicFlush has not been implemented"); + "The BufferedMutator::setWriteBufferPeriodicFlush has not been implemented"); } /** @@ -151,22 +149,22 @@ public interface BufferedMutator extends Closeable { /** * Returns the current periodic flush timeout value in milliseconds. - * @return The maximum number of milliseconds how long records may be buffered before they - * are flushed. The value 0 means this is disabled. + * @return The maximum number of milliseconds how long records may be buffered before they are + * flushed. The value 0 means this is disabled. */ default long getWriteBufferPeriodicFlushTimeoutMs() { throw new UnsupportedOperationException( - "The BufferedMutator::getWriteBufferPeriodicFlushTimeoutMs has not been implemented"); + "The BufferedMutator::getWriteBufferPeriodicFlushTimeoutMs has not been implemented"); } /** * Returns the current periodic flush timertick interval in milliseconds. - * @return The number of milliseconds between each check if the timeout has been exceeded. - * This value only has a real meaning if the timeout has been set to > 0 + * @return The number of milliseconds between each check if the timeout has been exceeded. This + * value only has a real meaning if the timeout has been set to > 0 */ default long getWriteBufferPeriodicFlushTimerTickMs() { throw new UnsupportedOperationException( - "The BufferedMutator::getWriteBufferPeriodicFlushTimerTickMs has not been implemented"); + "The BufferedMutator::getWriteBufferPeriodicFlushTimerTickMs has not been implemented"); } /** @@ -192,7 +190,7 @@ public interface BufferedMutator extends Closeable { */ @InterfaceAudience.Public interface ExceptionListener { - public void onException(RetriesExhaustedWithDetailsException exception, - BufferedMutator mutator) throws RetriesExhaustedWithDetailsException; + public void onException(RetriesExhaustedWithDetailsException exception, BufferedMutator mutator) + throws RetriesExhaustedWithDetailsException; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java index d3b18864f2a..e0795c7c4c6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java @@ -1,16 +1,18 @@ -/** +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing permissions and + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.client; @@ -43,19 +45,17 @@ import org.slf4j.LoggerFactory; /** *

    - * Used to communicate with a single HBase table similar to {@link Table} - * but meant for batched, potentially asynchronous puts. Obtain an instance from - * a {@link Connection} and call {@link #close()} afterwards. Provide an alternate - * to this implementation by setting {@link BufferedMutatorParams#implementationClassName(String)} - * or by setting alternate classname via the key {} in Configuration. + * Used to communicate with a single HBase table similar to {@link Table} but meant for batched, + * potentially asynchronous puts. Obtain an instance from a {@link Connection} and call + * {@link #close()} afterwards. Provide an alternate to this implementation by setting + * {@link BufferedMutatorParams#implementationClassName(String)} or by setting alternate classname + * via the key {} in Configuration. *

    - * *

    - * While this can be used across threads, great care should be used when doing so. - * Errors are global to the buffered mutator and the Exceptions can be thrown on any - * thread that causes the flush for requests. + * While this can be used across threads, great care should be used when doing so. Errors are global + * to the buffered mutator and the Exceptions can be thrown on any thread that causes the flush for + * requests. *

    - * * @see ConnectionFactory * @see Connection * @since 1.0.0 @@ -74,15 +74,15 @@ public class BufferedMutatorImpl implements BufferedMutator { private final ConcurrentLinkedQueue writeAsyncBuffer = new ConcurrentLinkedQueue<>(); private final AtomicLong currentWriteBufferSize = new AtomicLong(0); /** - * Count the size of {@link BufferedMutatorImpl#writeAsyncBuffer}. - * The {@link ConcurrentLinkedQueue#size()} is NOT a constant-time operation. + * Count the size of {@link BufferedMutatorImpl#writeAsyncBuffer}. The + * {@link ConcurrentLinkedQueue#size()} is NOT a constant-time operation. */ private final AtomicInteger undealtMutationCount = new AtomicInteger(0); private final long writeBufferSize; private final AtomicLong writeBufferPeriodicFlushTimeoutMs = new AtomicLong(0); private final AtomicLong writeBufferPeriodicFlushTimerTickMs = - new AtomicLong(MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); + new AtomicLong(MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); private Timer writeBufferPeriodicFlushTimer = null; private final int maxKeyValueSize; @@ -108,38 +108,38 @@ public class BufferedMutatorImpl implements BufferedMutator { cleanupPoolOnClose = false; } ConnectionConfiguration tableConf = new ConnectionConfiguration(conf); - this.writeBufferSize = - params.getWriteBufferSize() != UNSET ? - params.getWriteBufferSize() : tableConf.getWriteBufferSize(); + this.writeBufferSize = params.getWriteBufferSize() != UNSET + ? params.getWriteBufferSize() + : tableConf.getWriteBufferSize(); // Set via the setter because it does value validation and starts/stops the TimerTask long newWriteBufferPeriodicFlushTimeoutMs = - params.getWriteBufferPeriodicFlushTimeoutMs() != UNSET - ? params.getWriteBufferPeriodicFlushTimeoutMs() - : tableConf.getWriteBufferPeriodicFlushTimeoutMs(); + params.getWriteBufferPeriodicFlushTimeoutMs() != UNSET + ? params.getWriteBufferPeriodicFlushTimeoutMs() + : tableConf.getWriteBufferPeriodicFlushTimeoutMs(); long newWriteBufferPeriodicFlushTimerTickMs = - params.getWriteBufferPeriodicFlushTimerTickMs() != UNSET - ? params.getWriteBufferPeriodicFlushTimerTickMs() - : tableConf.getWriteBufferPeriodicFlushTimerTickMs(); - this.setWriteBufferPeriodicFlush( - newWriteBufferPeriodicFlushTimeoutMs, - newWriteBufferPeriodicFlushTimerTickMs); + params.getWriteBufferPeriodicFlushTimerTickMs() != UNSET + ? params.getWriteBufferPeriodicFlushTimerTickMs() + : tableConf.getWriteBufferPeriodicFlushTimerTickMs(); + this.setWriteBufferPeriodicFlush(newWriteBufferPeriodicFlushTimeoutMs, + newWriteBufferPeriodicFlushTimerTickMs); - this.maxKeyValueSize = - params.getMaxKeyValueSize() != UNSET ? - params.getMaxKeyValueSize() : tableConf.getMaxKeyValueSize(); + this.maxKeyValueSize = params.getMaxKeyValueSize() != UNSET + ? params.getMaxKeyValueSize() + : tableConf.getMaxKeyValueSize(); - this.rpcTimeout = new AtomicInteger( - params.getRpcTimeout() != UNSET ? - params.getRpcTimeout() : conn.getConnectionConfiguration().getWriteRpcTimeout()); + this.rpcTimeout = new AtomicInteger(params.getRpcTimeout() != UNSET + ? params.getRpcTimeout() + : conn.getConnectionConfiguration().getWriteRpcTimeout()); - this.operationTimeout = new AtomicInteger( - params.getOperationTimeout() != UNSET ? - params.getOperationTimeout() : conn.getConnectionConfiguration().getOperationTimeout()); + this.operationTimeout = new AtomicInteger(params.getOperationTimeout() != UNSET + ? params.getOperationTimeout() + : conn.getConnectionConfiguration().getOperationTimeout()); this.ap = ap; } + BufferedMutatorImpl(ClusterConnection conn, RpcRetryingCallerFactory rpcCallerFactory, - RpcControllerFactory rpcFactory, BufferedMutatorParams params) { + RpcControllerFactory rpcFactory, BufferedMutatorParams params) { this(conn, params, // puts need to track errors globally due to how the APIs currently work. new AsyncProcess(conn, conn.getConfiguration(), rpcCallerFactory, rpcFactory)); @@ -170,14 +170,14 @@ public class BufferedMutatorImpl implements BufferedMutator { } @Override - public void mutate(Mutation m) throws InterruptedIOException, - RetriesExhaustedWithDetailsException { + public void mutate(Mutation m) + throws InterruptedIOException, RetriesExhaustedWithDetailsException { mutate(Collections.singletonList(m)); } @Override - public void mutate(List ms) throws InterruptedIOException, - RetriesExhaustedWithDetailsException { + public void mutate(List ms) + throws InterruptedIOException, RetriesExhaustedWithDetailsException { checkClose(); long toAddSize = 0; @@ -251,12 +251,8 @@ public class BufferedMutatorImpl implements BufferedMutator { } private AsyncProcessTask createTask(QueueRowAccess access) { - return new AsyncProcessTask(AsyncProcessTask.newBuilder() - .setPool(pool) - .setTableName(tableName) - .setRowAccess(access) - .setSubmittedRows(AsyncProcessTask.SubmittedRows.AT_LEAST_ONE) - .build()) { + return new AsyncProcessTask(AsyncProcessTask.newBuilder().setPool(pool).setTableName(tableName) + .setRowAccess(access).setSubmittedRows(AsyncProcessTask.SubmittedRows.AT_LEAST_ONE).build()) { @Override public int getRpcTimeout() { return rpcTimeout.get(); @@ -277,12 +273,11 @@ public class BufferedMutatorImpl implements BufferedMutator { /** * Send the operations in the buffer to the servers. - * * @param flushAll - if true, sends all the writes and wait for all of them to finish before * returning. Otherwise, flush until buffer size is smaller than threshold */ - private void doFlush(boolean flushAll) throws InterruptedIOException, - RetriesExhaustedWithDetailsException { + private void doFlush(boolean flushAll) + throws InterruptedIOException, RetriesExhaustedWithDetailsException { List errors = new ArrayList<>(); while (true) { if (!flushAll && currentWriteBufferSize.get() <= writeBufferSize) { @@ -308,15 +303,15 @@ public class BufferedMutatorImpl implements BufferedMutator { RetriesExhaustedWithDetailsException exception = makeException(errors); if (exception == null) { return; - } else if(listener == null) { + } else if (listener == null) { throw exception; } else { listener.onException(exception, this); } } - private static RetriesExhaustedWithDetailsException makeException( - List errors) { + private static RetriesExhaustedWithDetailsException + makeException(List errors) { switch (errors.size()) { case 0: return null; @@ -345,17 +340,19 @@ public class BufferedMutatorImpl implements BufferedMutator { @Override public synchronized void setWriteBufferPeriodicFlush(long timeoutMs, long timerTickMs) { - long originalTimeoutMs = this.writeBufferPeriodicFlushTimeoutMs.get(); + long originalTimeoutMs = this.writeBufferPeriodicFlushTimeoutMs.get(); long originalTimerTickMs = this.writeBufferPeriodicFlushTimerTickMs.get(); // Both parameters have minimal values. writeBufferPeriodicFlushTimeoutMs.set(Math.max(0, timeoutMs)); - writeBufferPeriodicFlushTimerTickMs.set( - Math.max(MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS, timerTickMs)); + writeBufferPeriodicFlushTimerTickMs + .set(Math.max(MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS, timerTickMs)); // If something changed we stop the old Timer. - if (writeBufferPeriodicFlushTimeoutMs.get() != originalTimeoutMs || - writeBufferPeriodicFlushTimerTickMs.get() != originalTimerTickMs) { + if ( + writeBufferPeriodicFlushTimeoutMs.get() != originalTimeoutMs + || writeBufferPeriodicFlushTimerTickMs.get() != originalTimerTickMs + ) { if (writeBufferPeriodicFlushTimer != null) { writeBufferPeriodicFlushTimer.cancel(); writeBufferPeriodicFlushTimer = null; @@ -363,16 +360,14 @@ public class BufferedMutatorImpl implements BufferedMutator { } // If we have the need for a timer and there is none we start it - if (writeBufferPeriodicFlushTimer == null && - writeBufferPeriodicFlushTimeoutMs.get() > 0) { + if (writeBufferPeriodicFlushTimer == null && writeBufferPeriodicFlushTimeoutMs.get() > 0) { writeBufferPeriodicFlushTimer = new Timer(true); // Create Timer running as Daemon. writeBufferPeriodicFlushTimer.schedule(new TimerTask() { @Override public void run() { BufferedMutatorImpl.this.timerCallbackForWriteBufferPeriodicFlush(); } - }, writeBufferPeriodicFlushTimerTickMs.get(), - writeBufferPeriodicFlushTimerTickMs.get()); + }, writeBufferPeriodicFlushTimerTickMs.get(), writeBufferPeriodicFlushTimerTickMs.get()); } } @@ -446,10 +441,12 @@ public class BufferedMutatorImpl implements BufferedMutator { public Iterator iterator() { return new Iterator() { private int countDown = remainder; + @Override public boolean hasNext() { return countDown > 0; } + @Override public Row next() { restoreLastMutation(); @@ -464,6 +461,7 @@ public class BufferedMutatorImpl implements BufferedMutator { --countDown; return last; } + @Override public void remove() { if (last == null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java index 43495b3fab7..da3aa7f0975 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.concurrent.ExecutorService; @@ -43,8 +41,7 @@ public class BufferedMutatorParams implements Cloneable { private BufferedMutator.ExceptionListener listener = new BufferedMutator.ExceptionListener() { @Override public void onException(RetriesExhaustedWithDetailsException exception, - BufferedMutator bufferedMutator) - throws RetriesExhaustedWithDetailsException { + BufferedMutator bufferedMutator) throws RetriesExhaustedWithDetailsException { throw exception; } }; @@ -141,8 +138,8 @@ public class BufferedMutatorParams implements Cloneable { } /** - * Override the default executor pool defined by the {@code hbase.htable.threads.*} - * configuration values. + * Override the default executor pool defined by the {@code hbase.htable.threads.*} configuration + * values. */ public BufferedMutatorParams pool(ExecutorService pool) { this.pool = pool; @@ -150,8 +147,8 @@ public class BufferedMutatorParams implements Cloneable { } /** - * @return Name of the class we will use when we construct a - * {@link BufferedMutator} instance or null if default implementation. + * @return Name of the class we will use when we construct a {@link BufferedMutator} instance or + * null if default implementation. */ public String getImplementationClassName() { return this.implementationClassName; @@ -180,21 +177,20 @@ public class BufferedMutatorParams implements Cloneable { /* * (non-Javadoc) - * * @see java.lang.Object#clone() */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="CN_IDIOM_NO_SUPER_CALL", - justification="The clone below is complete") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "CN_IDIOM_NO_SUPER_CALL", + justification = "The clone below is complete") @Override public BufferedMutatorParams clone() { BufferedMutatorParams clone = new BufferedMutatorParams(this.tableName); - clone.writeBufferSize = this.writeBufferSize; - clone.writeBufferPeriodicFlushTimeoutMs = this.writeBufferPeriodicFlushTimeoutMs; + clone.writeBufferSize = this.writeBufferSize; + clone.writeBufferPeriodicFlushTimeoutMs = this.writeBufferPeriodicFlushTimeoutMs; clone.writeBufferPeriodicFlushTimerTickMs = this.writeBufferPeriodicFlushTimerTickMs; - clone.maxKeyValueSize = this.maxKeyValueSize; - clone.pool = this.pool; - clone.listener = this.listener; - clone.implementationClassName = this.implementationClassName; + clone.maxKeyValueSize = this.maxKeyValueSize; + clone.pool = this.pool; + clone.listener = this.listener; + clone.implementationClassName = this.implementationClassName; return clone; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cancellable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cancellable.java index 5095c96ab32..d766872defb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cancellable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cancellable.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,15 +16,16 @@ * limitations under the License. */ package org.apache.hadoop.hbase.client; + import org.apache.yetus.audience.InterfaceAudience; /** - * This should be implemented by the Get/Scan implementations that - * talk to replica regions. When an RPC response is received from one - * of the replicas, the RPCs to the other replicas are cancelled. + * This should be implemented by the Get/Scan implementations that talk to replica regions. When an + * RPC response is received from one of the replicas, the RPCs to the other replicas are cancelled. */ @InterfaceAudience.Private interface Cancellable { public void cancel(); + public boolean isCancelled(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java index 6ad9254e35e..3919311abf8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,36 +19,37 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.io.InterruptedIOException; - import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; + import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; + /** - * This class is used to unify HTable calls with AsyncProcess Framework. HTable can use - * AsyncProcess directly though this class. Also adds global timeout tracking on top of - * RegionServerCallable and implements Cancellable. - * Global timeout tracking conflicts with logic in RpcRetryingCallerImpl's callWithRetries. So you - * can only use this callable in AsyncProcess which only uses callWithoutRetries and retries in its - * own implementation. + * This class is used to unify HTable calls with AsyncProcess Framework. HTable can use AsyncProcess + * directly though this class. Also adds global timeout tracking on top of RegionServerCallable and + * implements Cancellable. Global timeout tracking conflicts with logic in RpcRetryingCallerImpl's + * callWithRetries. So you can only use this callable in AsyncProcess which only uses + * callWithoutRetries and retries in its own implementation. */ @InterfaceAudience.Private -abstract class CancellableRegionServerCallable extends ClientServiceCallable implements - Cancellable { +abstract class CancellableRegionServerCallable extends ClientServiceCallable + implements Cancellable { private final RetryingTimeTracker tracker; private final int rpcTimeout; + CancellableRegionServerCallable(Connection connection, TableName tableName, byte[] row, - RpcController rpcController, int rpcTimeout, RetryingTimeTracker tracker, int priority) { + RpcController rpcController, int rpcTimeout, RetryingTimeTracker tracker, int priority) { super(connection, tableName, row, rpcController, priority); this.rpcTimeout = rpcTimeout; this.tracker = tracker; } - /* Override so can mess with the callTimeout. - * (non-Javadoc) + /* + * Override so can mess with the callTimeout. (non-Javadoc) * @see org.apache.hadoop.hbase.client.RegionServerCallable#rpcCall(int) */ @Override @@ -92,30 +94,30 @@ abstract class CancellableRegionServerCallable extends ClientServiceCallable< } protected ClientProtos.MultiResponse doMulti(ClientProtos.MultiRequest request) - throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { + throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { return getStub().multi(getRpcController(), request); } protected ClientProtos.ScanResponse doScan(ClientProtos.ScanRequest request) - throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { + throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { return getStub().scan(getRpcController(), request); } - protected ClientProtos.PrepareBulkLoadResponse doPrepareBulkLoad( - ClientProtos.PrepareBulkLoadRequest request) - throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { + protected ClientProtos.PrepareBulkLoadResponse + doPrepareBulkLoad(ClientProtos.PrepareBulkLoadRequest request) + throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { return getStub().prepareBulkLoad(getRpcController(), request); } - protected ClientProtos.BulkLoadHFileResponse doBulkLoadHFile( - ClientProtos.BulkLoadHFileRequest request) - throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { + protected ClientProtos.BulkLoadHFileResponse + doBulkLoadHFile(ClientProtos.BulkLoadHFileRequest request) + throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { return getStub().bulkLoadHFile(getRpcController(), request); } - protected ClientProtos.CleanupBulkLoadResponse doCleanupBulkLoad( - ClientProtos.CleanupBulkLoadRequest request) - throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { + protected ClientProtos.CleanupBulkLoadResponse + doCleanupBulkLoad(ClientProtos.CleanupBulkLoadRequest request) + throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { return getStub().cleanupBulkLoad(getRpcController(), request); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java index 27be88a9def..c6bf6b5d59e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java @@ -39,10 +39,9 @@ interface CatalogReplicaLoadBalanceSelector { /** * Select a catalog replica region where client go to loop up the input row key. - * - * @param tablename table name - * @param row key to look up - * @param locateType locate type + * @param tablename table name + * @param row key to look up + * @param locateType locate type * @return replica id */ int select(TableName tablename, byte[] row, RegionLocateType locateType); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java index 485afb40d26..9aac16d1588 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java @@ -36,10 +36,10 @@ final class CatalogReplicaLoadBalanceSelectorFactory { /** * Create a CatalogReplicaLoadBalanceReplicaSelector based on input config. - * @param replicaSelectorClass Selector classname. - * @param tableName System table name. - * @param choreService {@link ChoreService} - * @return {@link CatalogReplicaLoadBalanceSelector} + * @param replicaSelectorClass Selector classname. + * @param tableName System table name. + * @param choreService {@link ChoreService} + * @return {@link CatalogReplicaLoadBalanceSelector} */ public static CatalogReplicaLoadBalanceSelector createSelector(String replicaSelectorClass, TableName tableName, ChoreService choreService, IntSupplier getReplicaCount) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java index c03d8310994..1a148153bba 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow; import static org.apache.hadoop.hbase.util.Bytes.BYTES_COMPARATOR; import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent; + import java.util.Iterator; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -39,33 +40,34 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - *

    CatalogReplicaLoadBalanceReplicaSimpleSelector implements a simple catalog replica load - * balancing algorithm. It maintains a stale location cache for each table. Whenever client looks - * up location, it first check if the row is the stale location cache. If yes, the location from - * catalog replica is stale, it will go to the primary region to look up update-to-date location; - * otherwise, it will randomly pick up a replica region or primary region for lookup. When clients - * receive RegionNotServedException from region servers, it will add these region locations to the - * stale location cache. The stale cache will be cleaned up periodically by a chore.

    - * + *

    + * CatalogReplicaLoadBalanceReplicaSimpleSelector implements a simple catalog replica load balancing + * algorithm. It maintains a stale location cache for each table. Whenever client looks up location, + * it first check if the row is the stale location cache. If yes, the location from catalog replica + * is stale, it will go to the primary region to look up update-to-date location; otherwise, it will + * randomly pick up a replica region or primary region for lookup. When clients receive + * RegionNotServedException from region servers, it will add these region locations to the stale + * location cache. The stale cache will be cleaned up periodically by a chore. + *

    * It follows a simple algorithm to choose a meta replica region (including primary meta) to go: - * *
      - *
    1. If there is no stale location entry for rows it looks up, it will randomly - * pick a meta replica region (including primary meta) to do lookup.
    2. - *
    3. If the location from the replica region is stale, client gets RegionNotServedException - * from region server, in this case, it will create StaleLocationCacheEntry in - * CatalogReplicaLoadBalanceReplicaSimpleSelector.
    4. - *
    5. When client tries to do location lookup, it checks StaleLocationCache first for rows it - * tries to lookup, if entry exists, it will go with primary meta region to do lookup; - * otherwise, it will follow step 1.
    6. - *
    7. A chore will periodically run to clean up cache entries in the StaleLocationCache.
    8. + *
    9. If there is no stale location entry for rows it looks up, it will randomly pick a meta + * replica region (including primary meta) to do lookup.
    10. + *
    11. If the location from the replica region is stale, client gets RegionNotServedException from + * region server, in this case, it will create StaleLocationCacheEntry in + * CatalogReplicaLoadBalanceReplicaSimpleSelector.
    12. + *
    13. When client tries to do location lookup, it checks StaleLocationCache first for rows it tries + * to lookup, if entry exists, it will go with primary meta region to do lookup; otherwise, it will + * follow step 1.
    14. + *
    15. A chore will periodically run to clean up cache entries in the StaleLocationCache.
    16. *
    */ -class CatalogReplicaLoadBalanceSimpleSelector implements - CatalogReplicaLoadBalanceSelector, Stoppable { +class CatalogReplicaLoadBalanceSimpleSelector + implements CatalogReplicaLoadBalanceSelector, Stoppable { private static final Logger LOG = LoggerFactory.getLogger(CatalogReplicaLoadBalanceSimpleSelector.class); private final long STALE_CACHE_TIMEOUT_IN_MILLISECONDS = 3000; // 3 seconds @@ -96,10 +98,8 @@ class CatalogReplicaLoadBalanceSimpleSelector implements @Override public String toString() { - return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) - .append("endKey", endKey) - .append("timestamp", timestamp) - .toString(); + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).append("endKey", endKey) + .append("timestamp", timestamp).toString(); } } @@ -125,24 +125,22 @@ class CatalogReplicaLoadBalanceSimpleSelector implements } /** - * When a client runs into RegionNotServingException, it will call this method to - * update Selector's internal state. + * When a client runs into RegionNotServingException, it will call this method to update + * Selector's internal state. * @param loc the location which causes exception. */ public void onError(HRegionLocation loc) { - ConcurrentNavigableMap tableCache = - computeIfAbsent(staleCache, loc.getRegion().getTable(), - () -> new ConcurrentSkipListMap<>(BYTES_COMPARATOR)); + ConcurrentNavigableMap tableCache = computeIfAbsent(staleCache, + loc.getRegion().getTable(), () -> new ConcurrentSkipListMap<>(BYTES_COMPARATOR)); byte[] startKey = loc.getRegion().getStartKey(); - tableCache.putIfAbsent(startKey, - new StaleLocationCacheEntry(loc.getRegion().getEndKey())); + tableCache.putIfAbsent(startKey, new StaleLocationCacheEntry(loc.getRegion().getEndKey())); LOG.debug("Add entry to stale cache for table {} with startKey {}, {}", loc.getRegion().getTable(), startKey, loc.getRegion().getEndKey()); } /** - * Select an random replica id (including the primary replica id). In case there is no replica region configured, return - * the primary replica id. + * Select an random replica id (including the primary replica id). In case there is no replica + * region configured, return the primary replica id. * @return Replica id */ private int getRandomReplicaId() { @@ -159,20 +157,18 @@ class CatalogReplicaLoadBalanceSimpleSelector implements } /** - * When it looks up a location, it will call this method to find a replica region to go. - * For a normal case, > 99% of region locations from catalog/meta replica will be up to date. - * In extreme cases such as region server crashes, it will depends on how fast replication - * catches up. - * - * @param tablename table name it looks up - * @param row key it looks up. + * When it looks up a location, it will call this method to find a replica region to go. For a + * normal case, > 99% of region locations from catalog/meta replica will be up to date. In extreme + * cases such as region server crashes, it will depends on how fast replication catches up. + * @param tablename table name it looks up + * @param row key it looks up. * @param locateType locateType, Only BEFORE and CURRENT will be passed in. * @return catalog replica id */ public int select(final TableName tablename, final byte[] row, final RegionLocateType locateType) { - Preconditions.checkArgument(locateType == RegionLocateType.BEFORE || - locateType == RegionLocateType.CURRENT, + Preconditions.checkArgument( + locateType == RegionLocateType.BEFORE || locateType == RegionLocateType.CURRENT, "Expected type BEFORE or CURRENT but got: %s", locateType); ConcurrentNavigableMap tableCache = staleCache.get(tablename); @@ -200,15 +196,17 @@ class CatalogReplicaLoadBalanceSimpleSelector implements // long comparing is faster than comparing byte arrays(in most cases). It could remove // stale entries faster. If the possible match entry does not time out, it will check if // the entry is a match for the row passed in and select the replica id accordingly. - if ((EnvironmentEdgeManager.currentTime() - entry.getValue().getTimestamp()) >= - STALE_CACHE_TIMEOUT_IN_MILLISECONDS) { + if ( + (EnvironmentEdgeManager.currentTime() - entry.getValue().getTimestamp()) + >= STALE_CACHE_TIMEOUT_IN_MILLISECONDS + ) { LOG.debug("Entry for table {} with startKey {}, {} times out", tablename, entry.getKey(), entry); tableCache.remove(entry.getKey()); return getRandomReplicaId(); } - byte[] endKey = entry.getValue().getEndKey(); + byte[] endKey = entry.getValue().getEndKey(); // The following logic is borrowed from AsyncNonMetaRegionLocator. if (isEmptyStopRow(endKey)) { @@ -247,12 +245,12 @@ class CatalogReplicaLoadBalanceSimpleSelector implements private void cleanupReplicaReplicaStaleCache() { long curTimeInMills = EnvironmentEdgeManager.currentTime(); for (ConcurrentNavigableMap tableCache : staleCache.values()) { - Iterator> it = - tableCache.entrySet().iterator(); + Iterator> it = tableCache.entrySet().iterator(); while (it.hasNext()) { Map.Entry entry = it.next(); - if (curTimeInMills - entry.getValue().getTimestamp() >= - STALE_CACHE_TIMEOUT_IN_MILLISECONDS) { + if ( + curTimeInMills - entry.getValue().getTimestamp() >= STALE_CACHE_TIMEOUT_IN_MILLISECONDS + ) { LOG.debug("clean entry {}, {} from stale cache", entry.getKey(), entry.getValue()); it.remove(); } @@ -271,15 +269,17 @@ class CatalogReplicaLoadBalanceSimpleSelector implements } int cachedNumOfReplicas = this.numOfReplicas; - if ((cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) || - (cachedNumOfReplicas != newNumOfReplicas)) { + if ( + (cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) + || (cachedNumOfReplicas != newNumOfReplicas) + ) { this.numOfReplicas = newNumOfReplicas; } return newNumOfReplicas; } - private ScheduledChore getCacheCleanupChore( - final CatalogReplicaLoadBalanceSimpleSelector selector) { + private ScheduledChore + getCacheCleanupChore(final CatalogReplicaLoadBalanceSimpleSelector selector) { return new ScheduledChore("CleanupCatalogReplicaStaleCache", this, STALE_CACHE_CLEAN_CHORE_INTERVAL_IN_MILLISECONDS) { @Override @@ -289,8 +289,8 @@ class CatalogReplicaLoadBalanceSimpleSelector implements }; } - private ScheduledChore getRefreshReplicaCountChore( - final CatalogReplicaLoadBalanceSimpleSelector selector) { + private ScheduledChore + getRefreshReplicaCountChore(final CatalogReplicaLoadBalanceSimpleSelector selector) { return new ScheduledChore("RefreshReplicaCountChore", this, REFRESH_REPLICA_COUNT_CHORE_INTERVAL_IN_MILLISECONDS) { @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java index 40062e32e83..647d5dcf38f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -18,18 +20,16 @@ package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; /** - *

    There are two modes with catalog replica support.

    - * + *

    + * There are two modes with catalog replica support. + *

    *
      - *
    1. HEDGED_READ - Client sends requests to the primary region first, within a - * configured amount of time, if there is no response coming back, - * client sends requests to all replica regions and takes the first - * response.
    2. - * - *
    3. LOAD_BALANCE - Client sends requests to replica regions in a round-robin mode, - * if results from replica regions are stale, next time, client sends requests for - * these stale locations to the primary region. In this mode, scan - * requests are load balanced across all replica regions.
    4. + *
    5. HEDGED_READ - Client sends requests to the primary region first, within a configured amount + * of time, if there is no response coming back, client sends requests to all replica regions and + * takes the first response.
    6. + *
    7. LOAD_BALANCE - Client sends requests to replica regions in a round-robin mode, if results + * from replica regions are stale, next time, client sends requests for these stale locations to the + * primary region. In this mode, scan requests are load balanced across all replica regions.
    8. *
    */ @InterfaceAudience.Private @@ -54,7 +54,7 @@ enum CatalogReplicaMode { }; public static CatalogReplicaMode fromString(final String value) { - for(CatalogReplicaMode mode : values()) { + for (CatalogReplicaMode mode : values()) { if (mode.toString().equalsIgnoreCase(value)) { return mode; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java index 47bbc53797b..9202e310e7f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java @@ -23,15 +23,15 @@ import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -import java.util.Arrays; -import java.util.Objects; /** * Used to perform CheckAndMutate operations. *

    - * Use the builder class to instantiate a CheckAndMutate object. - * This builder class is fluent style APIs, the code are like: + * Use the builder class to instantiate a CheckAndMutate object. This builder class is fluent style + * APIs, the code are like: + * *

      * 
      * // A CheckAndMutate operation where do the specified action if the column (specified by the
    @@ -77,8 +77,7 @@ public final class CheckAndMutate implements Row {
     
         /**
          * Check for lack of column
    -     *
    -     * @param family family to check
    +     * @param family    family to check
          * @param qualifier qualifier to check
          * @return the CheckAndMutate object
          */
    @@ -88,10 +87,9 @@ public final class CheckAndMutate implements Row {
     
         /**
          * Check for equality
    -     *
    -     * @param family family to check
    +     * @param family    family to check
          * @param qualifier qualifier to check
    -     * @param value the expected value
    +     * @param value     the expected value
          * @return the CheckAndMutate object
          */
         public Builder ifEquals(byte[] family, byte[] qualifier, byte[] value) {
    @@ -99,10 +97,10 @@ public final class CheckAndMutate implements Row {
         }
     
         /**
    -     * @param family family to check
    +     * @param family    family to check
          * @param qualifier qualifier to check
          * @param compareOp comparison operator to use
    -     * @param value the expected value
    +     * @param value     the expected value
          * @return the CheckAndMutate object
          */
         public Builder ifMatches(byte[] family, byte[] qualifier, CompareOperator compareOp,
    @@ -135,13 +133,14 @@ public final class CheckAndMutate implements Row {
         private void preCheck(Row action) {
           Preconditions.checkNotNull(action, "action is null");
           if (!Bytes.equals(row, action.getRow())) {
    -        throw new IllegalArgumentException("The row of the action <" +
    -          Bytes.toStringBinary(action.getRow()) + "> doesn't match the original one <" +
    -          Bytes.toStringBinary(this.row) + ">");
    +        throw new IllegalArgumentException(
    +          "The row of the action <" + Bytes.toStringBinary(action.getRow())
    +            + "> doesn't match the original one <" + Bytes.toStringBinary(this.row) + ">");
           }
    -      Preconditions.checkState(op != null || filter != null, "condition is null. You need to"
    -        + " specify the condition by calling ifNotExists/ifEquals/ifMatches before building a"
    -        + " CheckAndMutate object");
    +      Preconditions.checkState(op != null || filter != null,
    +        "condition is null. You need to"
    +          + " specify the condition by calling ifNotExists/ifEquals/ifMatches before building a"
    +          + " CheckAndMutate object");
         }
     
         /**
    @@ -212,7 +211,6 @@ public final class CheckAndMutate implements Row {
     
       /**
        * returns a builder object to build a CheckAndMutate object
    -   *
        * @param row row
        * @return a builder object
        */
    @@ -229,7 +227,7 @@ public final class CheckAndMutate implements Row {
       private final TimeRange timeRange;
       private final Row action;
     
    -  private CheckAndMutate(byte[] row, byte[] family, byte[] qualifier,final CompareOperator op,
    +  private CheckAndMutate(byte[] row, byte[] family, byte[] qualifier, final CompareOperator op,
         byte[] value, TimeRange timeRange, Row action) {
         this.row = row;
         this.family = family;
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java
    index e8d453f83c9..928dd957e12 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java
    @@ -62,11 +62,11 @@ public class ClientAsyncPrefetchScanner extends ClientSimpleScanner {
       private final Condition notFull = lock.newCondition();
     
       public ClientAsyncPrefetchScanner(Configuration configuration, Scan scan, TableName name,
    -      ClusterConnection connection, RpcRetryingCallerFactory rpcCallerFactory,
    -      RpcControllerFactory rpcControllerFactory, ExecutorService pool,
    -      int replicaCallTimeoutMicroSecondScan) throws IOException {
    +    ClusterConnection connection, RpcRetryingCallerFactory rpcCallerFactory,
    +    RpcControllerFactory rpcControllerFactory, ExecutorService pool,
    +    int replicaCallTimeoutMicroSecondScan) throws IOException {
         super(configuration, scan, name, connection, rpcCallerFactory, rpcControllerFactory, pool,
    -        replicaCallTimeoutMicroSecondScan);
    +      replicaCallTimeoutMicroSecondScan);
         exceptionsQueue = new ConcurrentLinkedQueue<>();
         final Context context = Context.current();
         final Runnable runnable = context.wrap(new PrefetchRunnable());
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientCoprocessorRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientCoprocessorRpcController.java
    index 11b2f7883a0..36722c2894d 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientCoprocessorRpcController.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientCoprocessorRpcController.java
    @@ -1,4 +1,4 @@
    -/**
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.client;
     
     import com.google.protobuf.RpcCallback;
     import com.google.protobuf.RpcController;
    -
     import org.apache.yetus.audience.InterfaceAudience;
     
     /**
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
    index 9125132e66c..758cf508578 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -20,27 +19,27 @@ package org.apache.hadoop.hbase.client;
     
     import java.io.IOException;
     import java.lang.management.ManagementFactory;
    -
    -import org.apache.yetus.audience.InterfaceAudience;
    -import org.slf4j.Logger;
    -import org.slf4j.LoggerFactory;
     import org.apache.hadoop.hbase.util.Addressing;
     import org.apache.hadoop.hbase.util.Bytes;
     import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    +import org.apache.yetus.audience.InterfaceAudience;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;
     
     /**
    - * The class that is able to determine some unique strings for the client,
    - * such as an IP address, PID, and composite deterministic ID.
    + * The class that is able to determine some unique strings for the client, such as an IP address,
    + * PID, and composite deterministic ID.
      */
     @InterfaceAudience.Private
     final class ClientIdGenerator {
       private static final Logger LOG = LoggerFactory.getLogger(ClientIdGenerator.class);
     
    -  private ClientIdGenerator() {}
    +  private ClientIdGenerator() {
    +  }
     
       /**
    -   * @return a unique ID incorporating IP address, PID, TID and timer. Might be an overkill...
    -   * Note though that new UUID in java by default is just a random number.
    +   * @return a unique ID incorporating IP address, PID, TID and timer. Might be an overkill... Note
    +   *         though that new UUID in java by default is just a random number.
        */
       public static byte[] generateClientId() {
         byte[] selfBytes = getIpAddressBytes();
    @@ -78,8 +77,8 @@ final class ClientIdGenerator {
       }
     
       /**
    -   * @return Some IPv4/IPv6 address available on the current machine that is up, not virtual
    -   *         and not a loopback address. Empty array if none can be found or error occurred.
    +   * @return Some IPv4/IPv6 address available on the current machine that is up, not virtual and not
    +   *         a loopback address. Empty array if none can be found or error occurred.
        */
       public static byte[] getIpAddressBytes() {
         try {
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
    index 904df3b78ad..dbc03fce1d5 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
    @@ -93,15 +93,15 @@ public abstract class ClientScanner extends AbstractClientScanner {
       /**
        * Create a new ClientScanner for the specified table Note that the passed {@link Scan}'s start
        * row maybe changed changed.
    -   * @param conf The {@link Configuration} to use.
    -   * @param scan {@link Scan} to use in this scanner
    -   * @param tableName The table that we wish to scan
    +   * @param conf       The {@link Configuration} to use.
    +   * @param scan       {@link Scan} to use in this scanner
    +   * @param tableName  The table that we wish to scan
        * @param connection Connection identifying the cluster
        */
       public ClientScanner(final Configuration conf, final Scan scan, final TableName tableName,
    -      ClusterConnection connection, RpcRetryingCallerFactory rpcFactory,
    -      RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout)
    -      throws IOException {
    +    ClusterConnection connection, RpcRetryingCallerFactory rpcFactory,
    +    RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout)
    +    throws IOException {
         if (LOG.isTraceEnabled()) {
           LOG.trace(
             "Scan table=" + tableName + ", startRow=" + Bytes.toStringBinary(scan.getStartRow()));
    @@ -243,14 +243,14 @@ public abstract class ClientScanner extends AbstractClientScanner {
           // Only worth logging if NOT first region in scan.
           LOG.debug(
             "Advancing internal scanner to startKey at '" + Bytes.toStringBinary(scan.getStartRow())
    -            + "', " + (scan.includeStartRow() ? "inclusive" : "exclusive"));
    +          + "', " + (scan.includeStartRow() ? "inclusive" : "exclusive"));
         }
         // clear the current region, we will set a new value to it after the first call of the new
         // callable.
         this.currentRegion = null;
         this.callable =
    -        new ScannerCallableWithReplicas(getTable(), getConnection(), createScannerCallable(), pool,
    -            primaryOperationTimeout, scan, getRetries(), scannerTimeout, caching, conf, caller);
    +      new ScannerCallableWithReplicas(getTable(), getConnection(), createScannerCallable(), pool,
    +        primaryOperationTimeout, scan, getRetries(), scannerTimeout, caching, conf, caller);
         this.callable.setCaching(this.caching);
         incRegionCountMetrics(scanMetrics);
         return true;
    @@ -261,7 +261,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
       }
     
       private Result[] call(ScannerCallableWithReplicas callable, RpcRetryingCaller caller,
    -      int scannerTimeout, boolean updateCurrentRegion) throws IOException {
    +    int scannerTimeout, boolean updateCurrentRegion) throws IOException {
         if (Thread.interrupted()) {
           throw new InterruptedIOException();
         }
    @@ -336,7 +336,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
         // keep compatible with the old logic. Should remove the isOpenScanner in the future.
         // 2. Server tells us that it has no more results for this region.
         return (values.length == 0 && !callable.isHeartbeatMessage())
    -        || callable.moreResultsInRegion() == MoreResults.NO;
    +      || callable.moreResultsInRegion() == MoreResults.NO;
       }
     
       private void closeScannerIfExhausted(boolean exhausted) throws IOException {
    @@ -346,7 +346,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
       }
     
       private void handleScanError(DoNotRetryIOException e,
    -      MutableBoolean retryAfterOutOfOrderException, int retriesLeft) throws DoNotRetryIOException {
    +    MutableBoolean retryAfterOutOfOrderException, int retriesLeft) throws DoNotRetryIOException {
         // An exception was thrown which makes any partial results that we were collecting
         // invalid. The scanner will need to be reset to the beginning of a row.
         scanResultCache.clear();
    @@ -366,10 +366,12 @@ public abstract class ClientScanner extends AbstractClientScanner {
         // If exception is any but the list below throw it back to the client; else setup
         // the scanner and retry.
         Throwable cause = e.getCause();
    -    if ((cause != null && cause instanceof NotServingRegionException)
    +    if (
    +      (cause != null && cause instanceof NotServingRegionException)
             || (cause != null && cause instanceof RegionServerStoppedException)
             || e instanceof OutOfOrderScannerNextException || e instanceof UnknownScannerException
    -        || e instanceof ScannerResetException || e instanceof LeaseException) {
    +        || e instanceof ScannerResetException || e instanceof LeaseException
    +    ) {
           // Pass. It is easier writing the if loop test as list of what is allowed rather than
           // as a list of what is not allowed... so if in here, it means we do not throw.
           if (retriesLeft <= 0) {
    @@ -395,7 +397,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
           } else {
             // TODO: Why wrap this in a DNRIOE when it already is a DNRIOE?
             throw new DoNotRetryIOException(
    -            "Failed after retry of OutOfOrderScannerNextException: was there a rpc timeout?", e);
    +          "Failed after retry of OutOfOrderScannerNextException: was there a rpc timeout?", e);
           }
         }
         // Clear region.
    @@ -464,9 +466,9 @@ public abstract class ClientScanner extends AbstractClientScanner {
           // caller, all book keeping will be performed within this method.
           int numberOfCompleteRowsBefore = scanResultCache.numberOfCompleteRows();
           Result[] resultsToAddToCache =
    -          scanResultCache.addAndGet(values, callable.isHeartbeatMessage());
    +        scanResultCache.addAndGet(values, callable.isHeartbeatMessage());
           int numberOfCompleteRows =
    -          scanResultCache.numberOfCompleteRows() - numberOfCompleteRowsBefore;
    +        scanResultCache.numberOfCompleteRows() - numberOfCompleteRowsBefore;
           for (Result rs : resultsToAddToCache) {
             cache.add(rs);
             long estimatedHeapSizeOfResult = calcEstimatedSize(rs);
    @@ -494,7 +496,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
               // loop until a limit (e.g. size or caching) is reached, break out early to avoid causing
               // unnecesary delays to the caller
               LOG.trace("Heartbeat message received and cache contains Results. "
    -              + "Breaking out of scan loop");
    +            + "Breaking out of scan loop");
               // we know that the region has not been exhausted yet so just break without calling
               // closeScannerIfExhausted
               break;
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
    index f118e7a03f7..367ad3bda48 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
    @@ -1,12 +1,13 @@
     /*
    - * Licensed to the Apache Software Foundation (ASF) under one or more
    - * contributor license agreements.  See the NOTICE file distributed with
    - * this work for additional information regarding copyright ownership.
    - * The ASF licenses this file to you under the Apache License, Version 2.0
    - * (the "License"); you may not use this file except in compliance with
    - * the License.  You may obtain a copy of the License at
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
      *
    - * http://www.apache.org/licenses/LICENSE-2.0
    + *     http://www.apache.org/licenses/LICENSE-2.0
      *
      * Unless required by applicable law or agreed to in writing, software
      * distributed under the License is distributed on an "AS IS" BASIS,
    @@ -17,25 +18,25 @@
     package org.apache.hadoop.hbase.client;
     
     import java.io.IOException;
    -
     import org.apache.hadoop.hbase.ServerName;
     import org.apache.hadoop.hbase.TableName;
     import org.apache.yetus.audience.InterfaceAudience;
    -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
    +
     import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
     
    +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
    +
     /**
    - * A RegionServerCallable set to use the Client protocol.
    - * Also includes some utility methods so can hide protobuf references here rather than have them
    - * sprinkled about the code base.
    + * A RegionServerCallable set to use the Client protocol. Also includes some utility methods so can
    + * hide protobuf references here rather than have them sprinkled about the code base.
      * @param 
      */
     @InterfaceAudience.Private
    -public abstract class ClientServiceCallable extends
    -    RegionServerCallable {
    +public abstract class ClientServiceCallable
    +  extends RegionServerCallable {
     
    -  public ClientServiceCallable(Connection connection, TableName tableName, byte [] row,
    -      RpcController rpcController, int priority) {
    +  public ClientServiceCallable(Connection connection, TableName tableName, byte[] row,
    +    RpcController rpcController, int priority) {
         super(connection, tableName, row, rpcController, priority);
       }
     
    @@ -46,12 +47,12 @@ public abstract class ClientServiceCallable extends
     
       // Below here are simple methods that contain the stub and the rpcController.
       protected ClientProtos.GetResponse doGet(ClientProtos.GetRequest request)
    -  throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
    +    throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
         return getStub().get(getRpcController(), request);
       }
     
       protected ClientProtos.MutateResponse doMutate(ClientProtos.MutateRequest request)
    -  throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
    +    throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
         return getStub().mutate(getRpcController(), request);
       }
     }
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java
    index 2211f8696ef..a1530c9bb7c 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java
    @@ -1,4 +1,4 @@
    -/**
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -23,25 +23,23 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.noMoreResultsForSca
     
     import java.io.IOException;
     import java.util.concurrent.ExecutorService;
    -
     import org.apache.hadoop.conf.Configuration;
     import org.apache.hadoop.hbase.TableName;
    -import org.apache.yetus.audience.InterfaceAudience;
     import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
    +import org.apache.yetus.audience.InterfaceAudience;
     
     /**
    - * ClientSimpleScanner implements a sync scanner behaviour.
    - * The cache is a simple list.
    - * The prefetch is invoked only when the application finished processing the entire cache.
    + * ClientSimpleScanner implements a sync scanner behaviour. The cache is a simple list. The prefetch
    + * is invoked only when the application finished processing the entire cache.
      */
     @InterfaceAudience.Private
     public class ClientSimpleScanner extends ClientScanner {
       public ClientSimpleScanner(Configuration configuration, Scan scan, TableName name,
    -      ClusterConnection connection, RpcRetryingCallerFactory rpcCallerFactory,
    -      RpcControllerFactory rpcControllerFactory, ExecutorService pool,
    -      int replicaCallTimeoutMicroSecondScan) throws IOException {
    +    ClusterConnection connection, RpcRetryingCallerFactory rpcCallerFactory,
    +    RpcControllerFactory rpcControllerFactory, ExecutorService pool,
    +    int replicaCallTimeoutMicroSecondScan) throws IOException {
         super(configuration, scan, name, connection, rpcCallerFactory, rpcControllerFactory, pool,
    -        replicaCallTimeoutMicroSecondScan);
    +      replicaCallTimeoutMicroSecondScan);
       }
     
       @Override
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java
    index ba447d5a81b..44eef0668f0 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java
    @@ -1,4 +1,4 @@
    -/**
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -25,7 +25,6 @@ import org.apache.yetus.audience.InterfaceAudience;
     @InterfaceAudience.Private
     public class ClientUtil {
     
    -
       public static boolean areScanStartRowAndStopRowEqual(byte[] startRow, byte[] stopRow) {
         return startRow != null && startRow.length > 0 && Bytes.equals(startRow, stopRow);
       }
    @@ -35,19 +34,23 @@ public class ClientUtil {
       }
     
       /**
    -   * 

    When scanning for a prefix the scan should stop immediately after the the last row that - * has the specified prefix. This method calculates the closest next rowKey immediately following - * the given rowKeyPrefix.

    - *

    IMPORTANT: This converts a rowKeyPrefix into a rowKey.

    - *

    If the prefix is an 'ASCII' string put into a byte[] then this is easy because you can - * simply increment the last byte of the array. - * But if your application uses real binary rowids you may run into the scenario that your - * prefix is something like:

    + *

    + * When scanning for a prefix the scan should stop immediately after the the last row that has the + * specified prefix. This method calculates the closest next rowKey immediately following the + * given rowKeyPrefix. + *

    + *

    + * IMPORTANT: This converts a rowKeyPrefix into a rowKey. + *

    + *

    + * If the prefix is an 'ASCII' string put into a byte[] then this is easy because you can simply + * increment the last byte of the array. But if your application uses real binary rowids you may + * run into the scenario that your prefix is something like: + *

    *    { 0x12, 0x23, 0xFF, 0xFF }
    * Then this stopRow needs to be fed into the actual scan
    *    { 0x12, 0x24 } (Notice that it is shorter now)
    * This method calculates the correct stop row value for this usecase. - * * @param rowKeyPrefix the rowKeyPrefix. * @return the closest next rowKey immediately following the given rowKeyPrefix. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java index 45f9402ea01..1167b3a7889 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java @@ -1,6 +1,4 @@ -/** - * - +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,8 +41,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientServ public interface ClusterConnection extends Connection { /** - * Key for configuration in Configuration whose value is the class we implement making a - * new Connection instance. + * Key for configuration in Configuration whose value is the class we implement making a new + * Connection instance. */ String HBASE_CLIENT_CONNECTION_IMPL = "hbase.client.connection.impl"; @@ -53,28 +51,19 @@ public interface ClusterConnection extends Connection { * @deprecated this has been deprecated without a replacement */ @Deprecated - boolean isMasterRunning() - throws MasterNotRunningException, ZooKeeperConnectionException; + boolean isMasterRunning() throws MasterNotRunningException, ZooKeeperConnectionException; /** - * Use this api to check if the table has been created with the specified number of - * splitkeys which was used while creating the given table. - * Note : If this api is used after a table's region gets splitted, the api may return - * false. - * @param tableName - * tableName - * @param splitKeys - * splitKeys used while creating table - * @throws IOException - * if a remote or network exception occurs + * Use this api to check if the table has been created with the specified number of splitkeys + * which was used while creating the given table. Note : If this api is used after a table's + * region gets splitted, the api may return false. n * tableName n * splitKeys used while creating + * table n * if a remote or network exception occurs */ - boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws - IOException; + boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException; /** - * A table that isTableEnabled == false and isTableDisabled == false - * is possible. This happens when a table has a lot of regions - * that must be processed. + * A table that isTableEnabled == false and isTableDisabled == false is possible. This happens + * when a table has a lot of regions that must be processed. * @param tableName table name * @return true if the table is enabled, false otherwise * @throws IOException if a remote or network exception occurs @@ -93,19 +82,16 @@ public interface ClusterConnection extends Connection { * @param tableName table state for * @return state of the table */ - TableState getTableState(TableName tableName) throws IOException; + TableState getTableState(TableName tableName) throws IOException; /** - * Find the location of the region of tableName that row - * lives in. + * Find the location of the region of tableName that row lives in. * @param tableName name of the table row is in - * @param row row key you're trying to find the region of - * @return HRegionLocation that describes where to find the region in - * question + * @param row row key you're trying to find the region of + * @return HRegionLocation that describes where to find the region in question * @throws IOException if a remote or network exception occurs */ - HRegionLocation locateRegion(final TableName tableName, - final byte [] row) throws IOException; + HRegionLocation locateRegion(final TableName tableName, final byte[] row) throws IOException; /** * @deprecated {@link #clearRegionLocationCache()} instead. @@ -118,10 +104,8 @@ public interface ClusterConnection extends Connection { void cacheLocation(final TableName tableName, final RegionLocations location); /** - * Allows flushing the region cache of all locations that pertain to - * tableName - * @param tableName Name of the table whose regions we are to remove from - * cache. + * Allows flushing the region cache of all locations that pertain to tableName + * @param tableName Name of the table whose regions we are to remove from cache. */ void clearRegionCache(final TableName tableName); @@ -132,47 +116,43 @@ public interface ClusterConnection extends Connection { void deleteCachedRegionLocation(final HRegionLocation location); /** - * Find the location of the region of tableName that row - * lives in, ignoring any value that might be in the cache. + * Find the location of the region of tableName that row lives in, ignoring any + * value that might be in the cache. * @param tableName name of the table row is in - * @param row row key you're trying to find the region of - * @return HRegionLocation that describes where to find the region in - * question + * @param row row key you're trying to find the region of + * @return HRegionLocation that describes where to find the region in question * @throws IOException if a remote or network exception occurs */ - HRegionLocation relocateRegion(final TableName tableName, - final byte [] row) throws IOException; + HRegionLocation relocateRegion(final TableName tableName, final byte[] row) throws IOException; /** - * Find the location of the region of tableName that row - * lives in, ignoring any value that might be in the cache. + * Find the location of the region of tableName that row lives in, ignoring any + * value that might be in the cache. * @param tableName name of the table row is in - * @param row row key you're trying to find the region of + * @param row row key you're trying to find the region of * @param replicaId the replicaId of the region - * @return RegionLocations that describe where to find the region in - * question + * @return RegionLocations that describe where to find the region in question * @throws IOException if a remote or network exception occurs */ - RegionLocations relocateRegion(final TableName tableName, - final byte [] row, int replicaId) throws IOException; + RegionLocations relocateRegion(final TableName tableName, final byte[] row, int replicaId) + throws IOException; /** * Update the location cache. This is used internally by HBase, in most cases it should not be - * used by the client application. - * @param tableName the table name + * used by the client application. + * @param tableName the table name * @param regionName the region name - * @param rowkey the row - * @param exception the exception if any. Can be null. - * @param source the previous location + * @param rowkey the row + * @param exception the exception if any. Can be null. + * @param source the previous location */ void updateCachedLocations(TableName tableName, byte[] regionName, byte[] rowkey, - Object exception, ServerName source); + Object exception, ServerName source); /** * Gets the location of the region of regionName. * @param regionName name of the region to locate - * @return HRegionLocation that describes where to find the region in - * question + * @return HRegionLocation that describes where to find the region in question * @throws IOException if a remote or network exception occurs */ HRegionLocation locateRegion(final byte[] regionName) throws IOException; @@ -188,40 +168,37 @@ public interface ClusterConnection extends Connection { /** * Gets the locations of all regions in the specified table, tableName. * @param tableName table to get regions of - * @param useCache Should we use the cache to retrieve the region information. - * @param offlined True if we are to include offlined regions, false and we'll leave out offlined - * regions from returned list. + * @param useCache Should we use the cache to retrieve the region information. + * @param offlined True if we are to include offlined regions, false and we'll leave out offlined + * regions from returned list. * @return list of region locations for all regions of table * @throws IOException if IO failure occurs */ - List locateRegions(final TableName tableName, - final boolean useCache, - final boolean offlined) throws IOException; + List locateRegions(final TableName tableName, final boolean useCache, + final boolean offlined) throws IOException; /** - * * @param tableName table to get regions of - * @param row the row - * @param useCache Should we use the cache to retrieve the region information. - * @param retry do we retry + * @param row the row + * @param useCache Should we use the cache to retrieve the region information. + * @param retry do we retry * @return region locations for this row. * @throws IOException if IO failure occurs */ - RegionLocations locateRegion(TableName tableName, - byte[] row, boolean useCache, boolean retry) throws IOException; + RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache, boolean retry) + throws IOException; - /** - * - * @param tableName table to get regions of - * @param row the row - * @param useCache Should we use the cache to retrieve the region information. - * @param retry do we retry - * @param replicaId the replicaId for the region - * @return region locations for this row. - * @throws IOException if IO failure occurs - */ + /** + * @param tableName table to get regions of + * @param row the row + * @param useCache Should we use the cache to retrieve the region information. + * @param retry do we retry + * @param replicaId the replicaId for the region + * @return region locations for this row. + * @throws IOException if IO failure occurs + */ RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache, boolean retry, - int replicaId) throws IOException; + int replicaId) throws IOException; /** * Returns a {@link MasterKeepAliveConnection} to the active master @@ -242,26 +219,24 @@ public interface ClusterConnection extends Connection { AdminService.BlockingInterface getAdmin(final ServerName serverName) throws IOException; /** - * Establishes a connection to the region server at the specified address, and returns - * a region client protocol. - * + * Establishes a connection to the region server at the specified address, and returns a region + * client protocol. * @param serverName the region server to connect to * @return ClientProtocol proxy for RegionServer * @throws IOException if a remote or network exception occurs - * */ ClientService.BlockingInterface getClient(final ServerName serverName) throws IOException; /** * Find region location hosting passed row * @param tableName table name - * @param row Row to find. - * @param reload If true do not use cache, otherwise bypass. + * @param row Row to find. + * @param reload If true do not use cache, otherwise bypass. * @return Location of row. * @throws IOException if a remote or network exception occurs */ HRegionLocation getRegionLocation(TableName tableName, byte[] row, boolean reload) - throws IOException; + throws IOException; /** * Clear any caches that pertain to server name sn. @@ -280,11 +255,10 @@ public interface ClusterConnection extends Connection { AsyncProcess getAsyncProcess(); /** - * Returns a new RpcRetryingCallerFactory from the given {@link Configuration}. - * This RpcRetryingCallerFactory lets the users create {@link RpcRetryingCaller}s which can be + * Returns a new RpcRetryingCallerFactory from the given {@link Configuration}. This + * RpcRetryingCallerFactory lets the users create {@link RpcRetryingCaller}s which can be * intercepted with the configured {@link RetryingCallerInterceptor} - * @param conf configuration - * @return RpcRetryingCallerFactory + * @param conf configuration n */ RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java index 1370d07c5fb..cc34d59c732 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.Closeable; @@ -37,6 +35,10 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.ExceptionUtil; import org.apache.hadoop.hbase.util.Threads; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufInputStream; @@ -48,15 +50,13 @@ import org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoopGroup; import org.apache.hbase.thirdparty.io.netty.channel.socket.DatagramChannel; import org.apache.hbase.thirdparty.io.netty.channel.socket.DatagramPacket; import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioDatagramChannel; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * A class that receives the cluster status, and provide it as a set of service to the client. - * Today, manages only the dead server list. - * The class is abstract to allow multiple implementations, from ZooKeeper to multicast based. + * Today, manages only the dead server list. The class is abstract to allow multiple + * implementations, from ZooKeeper to multicast based. */ @InterfaceAudience.Private class ClusterStatusListener implements Closeable { @@ -70,7 +70,7 @@ class ClusterStatusListener implements Closeable { */ public static final String STATUS_LISTENER_CLASS = "hbase.status.listener.class"; public static final Class DEFAULT_STATUS_LISTENER_CLASS = - MulticastListener.class; + MulticastListener.class; /** * Class to be extended to manage a new dead server. @@ -80,13 +80,11 @@ class ClusterStatusListener implements Closeable { /** * Called when a server is identified as dead. Called only once even if we receive the * information multiple times. - * * @param sn - the server name */ void newDead(ServerName sn); } - /** * The interface to be implemented by a listener of a cluster status event. */ @@ -99,7 +97,6 @@ class ClusterStatusListener implements Closeable { /** * Called to connect. - * * @param conf Configuration to use. * @throws IOException if failing to connect */ @@ -107,11 +104,11 @@ class ClusterStatusListener implements Closeable { } public ClusterStatusListener(DeadServerHandler dsh, Configuration conf, - Class listenerClass) throws IOException { + Class listenerClass) throws IOException { this.deadServerHandler = dsh; try { Constructor ctor = - listenerClass.getConstructor(ClusterStatusListener.class); + listenerClass.getConstructor(ClusterStatusListener.class); this.listener = ctor.newInstance(this); } catch (InstantiationException e) { throw new IOException("Can't create listener " + listenerClass.getName(), e); @@ -128,7 +125,6 @@ class ClusterStatusListener implements Closeable { /** * Acts upon the reception of a new cluster status. - * * @param ncs the cluster status */ public void receive(ClusterMetrics ncs) { @@ -152,7 +148,6 @@ class ClusterStatusListener implements Closeable { /** * Check if we know if a server is dead. - * * @param sn the server name to check. * @return true if we know for sure that the server is dead, false otherwise. */ @@ -162,9 +157,10 @@ class ClusterStatusListener implements Closeable { } for (ServerName dead : deadServers) { - if (dead.getStartcode() >= sn.getStartcode() && - dead.getPort() == sn.getPort() && - dead.getHostname().equals(sn.getHostname())) { + if ( + dead.getStartcode() >= sn.getStartcode() && dead.getPort() == sn.getPort() + && dead.getHostname().equals(sn.getHostname()) + ) { return true; } } @@ -172,7 +168,6 @@ class ClusterStatusListener implements Closeable { return false; } - /** * An implementation using a multicast message between the master & the client. */ @@ -189,12 +184,12 @@ class ClusterStatusListener implements Closeable { @Override public void connect(Configuration conf) throws IOException { - String mcAddress = conf.get(HConstants.STATUS_MULTICAST_ADDRESS, - HConstants.DEFAULT_STATUS_MULTICAST_ADDRESS); + String mcAddress = + conf.get(HConstants.STATUS_MULTICAST_ADDRESS, HConstants.DEFAULT_STATUS_MULTICAST_ADDRESS); String bindAddress = conf.get(HConstants.STATUS_MULTICAST_BIND_ADDRESS, HConstants.DEFAULT_STATUS_MULTICAST_BIND_ADDRESS); - int port = conf.getInt(HConstants.STATUS_MULTICAST_PORT, - HConstants.DEFAULT_STATUS_MULTICAST_PORT); + int port = + conf.getInt(HConstants.STATUS_MULTICAST_PORT, HConstants.DEFAULT_STATUS_MULTICAST_PORT); String niName = conf.get(HConstants.STATUS_MULTICAST_NI_NAME); InetAddress ina; @@ -207,11 +202,9 @@ class ClusterStatusListener implements Closeable { try { Bootstrap b = new Bootstrap(); - b.group(group) - .channel(NioDatagramChannel.class) - .option(ChannelOption.SO_REUSEADDR, true) + b.group(group).channel(NioDatagramChannel.class).option(ChannelOption.SO_REUSEADDR, true) .handler(new ClusterStatusHandler()); - channel = (DatagramChannel)b.bind(bindAddress, port).sync().channel(); + channel = (DatagramChannel) b.bind(bindAddress, port).sync().channel(); } catch (InterruptedException e) { close(); throw ExceptionUtil.asInterrupt(e); @@ -228,7 +221,6 @@ class ClusterStatusListener implements Closeable { channel.joinGroup(ina, ni, null, channel.newPromise()); } - @Override public void close() { if (channel != null) { @@ -238,17 +230,13 @@ class ClusterStatusListener implements Closeable { group.shutdownGracefully(); } - - /** * Class, conforming to the Netty framework, that manages the message received. */ private class ClusterStatusHandler extends SimpleChannelInboundHandler { @Override - public void exceptionCaught( - ChannelHandlerContext ctx, Throwable cause) - throws Exception { + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { LOG.error("Unexpected exception, continuing.", cause); } @@ -257,7 +245,6 @@ class ClusterStatusListener implements Closeable { return super.acceptInboundMessage(msg); } - @Override protected void channelRead0(ChannelHandlerContext ctx, DatagramPacket dp) throws Exception { ByteBufInputStream bis = new ByteBufInputStream(dp.content()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java index 001d672620e..5f11f8d258a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,71 +20,68 @@ package org.apache.hadoop.hbase.client; import java.util.Comparator; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.MemoryCompactionPolicy; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** - * An ColumnFamilyDescriptor contains information about a column family such as the - * number of versions, compression settings, etc. - * - * It is used as input when creating a table or adding a column. - * - * To construct a new instance, use the {@link ColumnFamilyDescriptorBuilder} methods + * An ColumnFamilyDescriptor contains information about a column family such as the number of + * versions, compression settings, etc. It is used as input when creating a table or adding a + * column. To construct a new instance, use the {@link ColumnFamilyDescriptorBuilder} methods * @since 2.0.0 */ @InterfaceAudience.Public public interface ColumnFamilyDescriptor { @InterfaceAudience.Private - static final Comparator COMPARATOR - = (ColumnFamilyDescriptor lhs, ColumnFamilyDescriptor rhs) -> { - int result = Bytes.compareTo(lhs.getName(), rhs.getName()); - if (result != 0) { - return result; - } - // punt on comparison for ordering, just calculate difference. - result = lhs.getValues().hashCode() - rhs.getValues().hashCode(); - if (result != 0) { - return result; - } - return lhs.getConfiguration().hashCode() - rhs.getConfiguration().hashCode(); - }; + static final Comparator COMPARATOR = + (ColumnFamilyDescriptor lhs, ColumnFamilyDescriptor rhs) -> { + int result = Bytes.compareTo(lhs.getName(), rhs.getName()); + if (result != 0) { + return result; + } + // punt on comparison for ordering, just calculate difference. + result = lhs.getValues().hashCode() - rhs.getValues().hashCode(); + if (result != 0) { + return result; + } + return lhs.getConfiguration().hashCode() - rhs.getConfiguration().hashCode(); + }; - static final Bytes REPLICATION_SCOPE_BYTES = new Bytes( - Bytes.toBytes(ColumnFamilyDescriptorBuilder.REPLICATION_SCOPE)); + static final Bytes REPLICATION_SCOPE_BYTES = + new Bytes(Bytes.toBytes(ColumnFamilyDescriptorBuilder.REPLICATION_SCOPE)); @InterfaceAudience.Private - static final Comparator COMPARATOR_IGNORE_REPLICATION = ( - ColumnFamilyDescriptor lcf, ColumnFamilyDescriptor rcf) -> { - int result = Bytes.compareTo(lcf.getName(), rcf.getName()); - if (result != 0) { - return result; - } - // ColumnFamilyDescriptor.getValues is a immutable map, so copy it and remove - // REPLICATION_SCOPE_BYTES - Map lValues = new HashMap<>(); - lValues.putAll(lcf.getValues()); - lValues.remove(REPLICATION_SCOPE_BYTES); - Map rValues = new HashMap<>(); - rValues.putAll(rcf.getValues()); - rValues.remove(REPLICATION_SCOPE_BYTES); - result = lValues.hashCode() - rValues.hashCode(); - if (result != 0) { - return result; - } - return lcf.getConfiguration().hashCode() - rcf.getConfiguration().hashCode(); - }; + static final Comparator COMPARATOR_IGNORE_REPLICATION = + (ColumnFamilyDescriptor lcf, ColumnFamilyDescriptor rcf) -> { + int result = Bytes.compareTo(lcf.getName(), rcf.getName()); + if (result != 0) { + return result; + } + // ColumnFamilyDescriptor.getValues is a immutable map, so copy it and remove + // REPLICATION_SCOPE_BYTES + Map lValues = new HashMap<>(); + lValues.putAll(lcf.getValues()); + lValues.remove(REPLICATION_SCOPE_BYTES); + Map rValues = new HashMap<>(); + rValues.putAll(rcf.getValues()); + rValues.remove(REPLICATION_SCOPE_BYTES); + result = lValues.hashCode() - rValues.hashCode(); + if (result != 0) { + return result; + } + return lcf.getConfiguration().hashCode() - rcf.getConfiguration().hashCode(); + }; /** * @return The storefile/hfile blocksize for this column family. */ int getBlocksize(); + /** * @return bloom filter type used for new StoreFiles in ColumnFamily */ @@ -114,20 +111,23 @@ public interface ColumnFamilyDescriptor { * @return an unmodifiable map. */ Map getConfiguration(); + /** * @param key the key whose associated value is to be returned * @return accessing the configuration value by key. */ String getConfigurationValue(String key); + /** * @return replication factor set for this CF */ short getDFSReplication(); + /** - * @return the data block encoding algorithm used in block cache and - * optionally on disk + * @return the data block encoding algorithm used in block cache and optionally on disk */ DataBlockEncoding getDataBlockEncoding(); + /** * @return Return the raw crypto key attribute for the family, or null if not set */ @@ -137,35 +137,40 @@ public interface ColumnFamilyDescriptor { * @return Return the encryption algorithm in use by this family */ String getEncryptionType(); + /** - * @return in-memory compaction policy if set for the cf. Returns null if no policy is set for - * for this column family + * @return in-memory compaction policy if set for the cf. Returns null if no policy is set for for + * this column family */ MemoryCompactionPolicy getInMemoryCompaction(); + /** * @return return the KeepDeletedCells */ KeepDeletedCells getKeepDeletedCells(); + /** * @return maximum number of versions */ int getMaxVersions(); + /** * @return The minimum number of versions to keep. */ int getMinVersions(); + /** - * Get the mob compact partition policy for this family - * @return MobCompactPartitionPolicy + * Get the mob compact partition policy for this family n */ MobCompactPartitionPolicy getMobCompactPartitionPolicy(); + /** - * Gets the mob threshold of the family. - * If the size of a cell value is larger than this threshold, it's regarded as a mob. - * The default threshold is 1024*100(100K)B. + * Gets the mob threshold of the family. If the size of a cell value is larger than this + * threshold, it's regarded as a mob. The default threshold is 1024*100(100K)B. * @return The mob threshold. */ long getMobThreshold(); + /** * @return a copy of Name of this column family */ @@ -176,45 +181,53 @@ public interface ColumnFamilyDescriptor { */ String getNameAsString(); - /** - * @return the scope tag - */ + /** + * @return the scope tag + */ int getScope(); + /** * Not using {@code enum} here because HDFS is not using {@code enum} for storage policy, see * org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite for more details. * @return Return the storage policy in use by this family */ String getStoragePolicy(); - /** + + /** * @return Time-to-live of cell contents, in seconds. */ int getTimeToLive(); + /** * @param key The key. * @return A clone value. Null if no mapping for the key */ Bytes getValue(Bytes key); + /** * @param key The key. * @return A clone value. Null if no mapping for the key */ String getValue(String key); + /** * @param key The key. * @return A clone value. Null if no mapping for the key */ byte[] getValue(byte[] key); + /** * It clone all bytes of all elements. * @return All values */ Map getValues(); + /** * @return True if hfile DATA type blocks should be cached (You cannot disable caching of INDEX - * and BLOOM type blocks). + * and BLOOM type blocks). */ boolean isBlockCacheEnabled(); + /** * @return true if we should cache bloomfilter blocks on write */ @@ -224,29 +237,35 @@ public interface ColumnFamilyDescriptor { * @return true if we should cache data blocks on write */ boolean isCacheDataOnWrite(); + /** * @return true if we should cache index blocks on write */ boolean isCacheIndexesOnWrite(); + /** * @return Whether KV tags should be compressed along with DataBlockEncoding. When no * DataBlockEncoding is been used, this is having no effect. */ boolean isCompressTags(); + /** * @return true if we should evict cached blocks from the blockcache on close */ boolean isEvictBlocksOnClose(); + /** - * @return True if we are to favor keeping all values for this column family in the - * HRegionServer cache. + * @return True if we are to favor keeping all values for this column family in the HRegionServer + * cache. */ boolean isInMemory(); + /** * Gets whether the mob is enabled for the family. * @return True if the mob is enabled for the family. */ boolean isMobEnabled(); + /** * @return true if we should prefetch blocks into the blockcache on open */ @@ -258,9 +277,9 @@ public interface ColumnFamilyDescriptor { String toStringCustomizedValues(); /** - * By default, HBase only consider timestamp in versions. So a previous Delete with higher ts - * will mask a later Put with lower ts. Set this to true to enable new semantics of versions. - * We will also consider mvcc in versions. See HBASE-15968 for details. + * By default, HBase only consider timestamp in versions. So a previous Delete with higher ts will + * mask a later Put with lower ts. Set this to true to enable new semantics of versions. We will + * also consider mvcc in versions. See HBASE-15968 for details. */ boolean isNewVersionBehavior(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java index 5dccd0b40c5..37a63c955b2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,20 +49,21 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamil public class ColumnFamilyDescriptorBuilder { // For future backward compatibility - // Version 3 was when column names become byte arrays and when we picked up - // Time-to-live feature. Version 4 was when we moved to byte arrays, HBASE-82. - // Version 5 was when bloom filter descriptors were removed. - // Version 6 adds metadata as a map where keys and values are byte[]. - // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217) - // Version 8 -- reintroduction of bloom filters, changed from boolean to enum - // Version 9 -- add data block encoding + // Version 3 was when column names become byte arrays and when we picked up + // Time-to-live feature. Version 4 was when we moved to byte arrays, HBASE-82. + // Version 5 was when bloom filter descriptors were removed. + // Version 6 adds metadata as a map where keys and values are byte[]. + // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217) + // Version 8 -- reintroduction of bloom filters, changed from boolean to enum + // Version 9 -- add data block encoding // Version 10 -- change metadata to standard type. // Version 11 -- add column family level configuration. private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11; @InterfaceAudience.Private public static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION"; - private static final Bytes IN_MEMORY_COMPACTION_BYTES = new Bytes(Bytes.toBytes(IN_MEMORY_COMPACTION)); + private static final Bytes IN_MEMORY_COMPACTION_BYTES = + new Bytes(Bytes.toBytes(IN_MEMORY_COMPACTION)); @InterfaceAudience.Private public static final String IN_MEMORY = HConstants.IN_MEMORY; @@ -74,53 +75,59 @@ public class ColumnFamilyDescriptorBuilder { private static final Bytes COMPRESSION_BYTES = new Bytes(Bytes.toBytes(COMPRESSION)); @InterfaceAudience.Private public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT"; - private static final Bytes COMPRESSION_COMPACT_BYTES = new Bytes(Bytes.toBytes(COMPRESSION_COMPACT)); + private static final Bytes COMPRESSION_COMPACT_BYTES = + new Bytes(Bytes.toBytes(COMPRESSION_COMPACT)); public static final String COMPRESSION_COMPACT_MAJOR = "COMPRESSION_COMPACT_MAJOR"; - private static final Bytes COMPRESSION_COMPACT_MAJOR_BYTES = new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MAJOR)); + private static final Bytes COMPRESSION_COMPACT_MAJOR_BYTES = + new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MAJOR)); public static final String COMPRESSION_COMPACT_MINOR = "COMPRESSION_COMPACT_MINOR"; - private static final Bytes COMPRESSION_COMPACT_MINOR_BYTES = new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MINOR)); + private static final Bytes COMPRESSION_COMPACT_MINOR_BYTES = + new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MINOR)); @InterfaceAudience.Private public static final String DATA_BLOCK_ENCODING = "DATA_BLOCK_ENCODING"; - private static final Bytes DATA_BLOCK_ENCODING_BYTES = new Bytes(Bytes.toBytes(DATA_BLOCK_ENCODING)); + private static final Bytes DATA_BLOCK_ENCODING_BYTES = + new Bytes(Bytes.toBytes(DATA_BLOCK_ENCODING)); /** - * Key for the BLOCKCACHE attribute. A more exact name would be - * CACHE_DATA_ON_READ because this flag sets whether or not we cache DATA - * blocks. We always cache INDEX and BLOOM blocks; caching these blocks cannot - * be disabled. + * Key for the BLOCKCACHE attribute. A more exact name would be CACHE_DATA_ON_READ because this + * flag sets whether or not we cache DATA blocks. We always cache INDEX and BLOOM blocks; caching + * these blocks cannot be disabled. */ @InterfaceAudience.Private public static final String BLOCKCACHE = "BLOCKCACHE"; private static final Bytes BLOCKCACHE_BYTES = new Bytes(Bytes.toBytes(BLOCKCACHE)); @InterfaceAudience.Private public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE"; - private static final Bytes CACHE_DATA_ON_WRITE_BYTES = new Bytes(Bytes.toBytes(CACHE_DATA_ON_WRITE)); + private static final Bytes CACHE_DATA_ON_WRITE_BYTES = + new Bytes(Bytes.toBytes(CACHE_DATA_ON_WRITE)); @InterfaceAudience.Private public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE"; - private static final Bytes CACHE_INDEX_ON_WRITE_BYTES = new Bytes(Bytes.toBytes(CACHE_INDEX_ON_WRITE)); + private static final Bytes CACHE_INDEX_ON_WRITE_BYTES = + new Bytes(Bytes.toBytes(CACHE_INDEX_ON_WRITE)); @InterfaceAudience.Private public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE"; - private static final Bytes CACHE_BLOOMS_ON_WRITE_BYTES = new Bytes(Bytes.toBytes(CACHE_BLOOMS_ON_WRITE)); + private static final Bytes CACHE_BLOOMS_ON_WRITE_BYTES = + new Bytes(Bytes.toBytes(CACHE_BLOOMS_ON_WRITE)); @InterfaceAudience.Private public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE"; - private static final Bytes EVICT_BLOCKS_ON_CLOSE_BYTES = new Bytes(Bytes.toBytes(EVICT_BLOCKS_ON_CLOSE)); + private static final Bytes EVICT_BLOCKS_ON_CLOSE_BYTES = + new Bytes(Bytes.toBytes(EVICT_BLOCKS_ON_CLOSE)); /** - * Key for the PREFETCH_BLOCKS_ON_OPEN attribute. If set, all INDEX, BLOOM, - * and DATA blocks of HFiles belonging to this family will be loaded into the - * cache as soon as the file is opened. These loads will not count as cache - * misses. + * Key for the PREFETCH_BLOCKS_ON_OPEN attribute. If set, all INDEX, BLOOM, and DATA blocks of + * HFiles belonging to this family will be loaded into the cache as soon as the file is opened. + * These loads will not count as cache misses. */ @InterfaceAudience.Private public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN"; - private static final Bytes PREFETCH_BLOCKS_ON_OPEN_BYTES = new Bytes(Bytes.toBytes(PREFETCH_BLOCKS_ON_OPEN)); + private static final Bytes PREFETCH_BLOCKS_ON_OPEN_BYTES = + new Bytes(Bytes.toBytes(PREFETCH_BLOCKS_ON_OPEN)); /** - * Size of storefile/hfile 'blocks'. Default is {@link #DEFAULT_BLOCKSIZE}. - * Use smaller block sizes for faster random-access at expense of larger - * indices (more memory consumption). Note that this is a soft limit and that - * blocks have overhead (metadata, CRCs) so blocks will tend to be the size - * specified here and then some; i.e. don't expect that setting BLOCKSIZE=4k - * means hbase data will align with an SSDs 4k page accesses (TODO). + * Size of storefile/hfile 'blocks'. Default is {@link #DEFAULT_BLOCKSIZE}. Use smaller block + * sizes for faster random-access at expense of larger indices (more memory consumption). Note + * that this is a soft limit and that blocks have overhead (metadata, CRCs) so blocks will tend to + * be the size specified here and then some; i.e. don't expect that setting BLOCKSIZE=4k means + * hbase data will align with an SSDs 4k page accesses (TODO). */ @InterfaceAudience.Private public static final String BLOCKSIZE = "BLOCKSIZE"; @@ -141,13 +148,14 @@ public class ColumnFamilyDescriptorBuilder { public static final String MIN_VERSIONS = "MIN_VERSIONS"; private static final Bytes MIN_VERSIONS_BYTES = new Bytes(Bytes.toBytes(MIN_VERSIONS)); /** - * Retain all cells across flushes and compactions even if they fall behind a - * delete tombstone. To see all retained cells, do a 'raw' scan; see - * Scan#setRaw or pass RAW => true attribute in the shell. + * Retain all cells across flushes and compactions even if they fall behind a delete tombstone. To + * see all retained cells, do a 'raw' scan; see Scan#setRaw or pass RAW => true attribute in + * the shell. */ @InterfaceAudience.Private public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS"; - private static final Bytes KEEP_DELETED_CELLS_BYTES = new Bytes(Bytes.toBytes(KEEP_DELETED_CELLS)); + private static final Bytes KEEP_DELETED_CELLS_BYTES = + new Bytes(Bytes.toBytes(KEEP_DELETED_CELLS)); @InterfaceAudience.Private public static final String COMPRESS_TAGS = "COMPRESS_TAGS"; private static final Bytes COMPRESS_TAGS_BYTES = new Bytes(Bytes.toBytes(COMPRESS_TAGS)); @@ -168,9 +176,10 @@ public class ColumnFamilyDescriptorBuilder { public static final long DEFAULT_MOB_THRESHOLD = 100 * 1024; // 100k @InterfaceAudience.Private public static final String MOB_COMPACT_PARTITION_POLICY = "MOB_COMPACT_PARTITION_POLICY"; - private static final Bytes MOB_COMPACT_PARTITION_POLICY_BYTES = new Bytes(Bytes.toBytes(MOB_COMPACT_PARTITION_POLICY)); - public static final MobCompactPartitionPolicy DEFAULT_MOB_COMPACT_PARTITION_POLICY - = MobCompactPartitionPolicy.DAILY; + private static final Bytes MOB_COMPACT_PARTITION_POLICY_BYTES = + new Bytes(Bytes.toBytes(MOB_COMPACT_PARTITION_POLICY)); + public static final MobCompactPartitionPolicy DEFAULT_MOB_COMPACT_PARTITION_POLICY = + MobCompactPartitionPolicy.DAILY; @InterfaceAudience.Private public static final String DFS_REPLICATION = "DFS_REPLICATION"; private static final Bytes DFS_REPLICATION_BYTES = new Bytes(Bytes.toBytes(DFS_REPLICATION)); @@ -180,7 +189,8 @@ public class ColumnFamilyDescriptorBuilder { private static final Bytes STORAGE_POLICY_BYTES = new Bytes(Bytes.toBytes(STORAGE_POLICY)); public static final String NEW_VERSION_BEHAVIOR = "NEW_VERSION_BEHAVIOR"; - private static final Bytes NEW_VERSION_BEHAVIOR_BYTES = new Bytes(Bytes.toBytes(NEW_VERSION_BEHAVIOR)); + private static final Bytes NEW_VERSION_BEHAVIOR_BYTES = + new Bytes(Bytes.toBytes(NEW_VERSION_BEHAVIOR)); public static final boolean DEFAULT_NEW_VERSION_BEHAVIOR = false; /** * Default compression type. @@ -203,8 +213,7 @@ public class ColumnFamilyDescriptorBuilder { public static final int DEFAULT_MIN_VERSIONS = 0; /** - * Default setting for whether to try and serve this column family from memory - * or not. + * Default setting for whether to try and serve this column family from memory or not. */ public static final boolean DEFAULT_IN_MEMORY = false; @@ -219,14 +228,12 @@ public class ColumnFamilyDescriptorBuilder { public static final boolean DEFAULT_BLOCKCACHE = true; /** - * Default setting for whether to cache data blocks on write if block caching - * is enabled. + * Default setting for whether to cache data blocks on write if block caching is enabled. */ public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false; /** - * Default setting for whether to cache index blocks on write if block caching - * is enabled. + * Default setting for whether to cache index blocks on write if block caching is enabled. */ public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false; @@ -241,8 +248,7 @@ public class ColumnFamilyDescriptorBuilder { public static final BloomType DEFAULT_BLOOMFILTER = BloomType.ROW; /** - * Default setting for whether to cache bloom filter blocks on write if block - * caching is enabled. + * Default setting for whether to cache bloom filter blocks on write if block caching is enabled. */ public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false; @@ -257,8 +263,7 @@ public class ColumnFamilyDescriptorBuilder { public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL; /** - * Default setting for whether to evict cached blocks from the blockcache on - * close. + * Default setting for whether to evict cached blocks from the blockcache on close. */ public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false; @@ -276,7 +281,8 @@ public class ColumnFamilyDescriptorBuilder { private static Map getDefaultValuesBytes() { Map values = new HashMap<>(); - DEFAULT_VALUES.forEach((k, v) -> values.put(new Bytes(Bytes.toBytes(k)), new Bytes(Bytes.toBytes(v)))); + DEFAULT_VALUES + .forEach((k, v) -> values.put(new Bytes(Bytes.toBytes(k)), new Bytes(Bytes.toBytes(v)))); return values; } @@ -326,10 +332,11 @@ public class ColumnFamilyDescriptorBuilder { /** * @param b Family name. * @return b - * @throws IllegalArgumentException If not null and not a legitimate family - * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because - * b can be null when deserializing). Cannot start with a '.' - * either. Also Family can not be an empty value or equal "recovered.edits". + * @throws IllegalArgumentException If not null and not a legitimate family name: i.e. 'printable' + * and ends in a ':' (Null passes are allowed because + * b can be null when deserializing). Cannot start + * with a '.' either. Also Family can not be an empty value or + * equal "recovered.edits". */ public static byte[] isLegalColumnFamilyName(final byte[] b) { if (b == null) { @@ -337,27 +344,28 @@ public class ColumnFamilyDescriptorBuilder { } Preconditions.checkArgument(b.length != 0, "Column Family name can not be empty"); if (b[0] == '.') { - throw new IllegalArgumentException("Column Family names cannot start with a " - + "period: " + Bytes.toString(b)); + throw new IllegalArgumentException( + "Column Family names cannot start with a " + "period: " + Bytes.toString(b)); } for (int i = 0; i < b.length; i++) { if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') { throw new IllegalArgumentException("Illegal character <" + b[i] - + ">. Column Family names cannot contain control characters or colons: " - + Bytes.toString(b)); + + ">. Column Family names cannot contain control characters or colons: " + + Bytes.toString(b)); } } byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR); if (Bytes.equals(recoveredEdit, b)) { - throw new IllegalArgumentException("Column Family name cannot be: " - + HConstants.RECOVERED_EDITS_DIR); + throw new IllegalArgumentException( + "Column Family name cannot be: " + HConstants.RECOVERED_EDITS_DIR); } return b; } private final ModifyableColumnFamilyDescriptor desc; - public static ColumnFamilyDescriptor parseFrom(final byte[] pbBytes) throws DeserializationException { + public static ColumnFamilyDescriptor parseFrom(final byte[] pbBytes) + throws DeserializationException { return ModifyableColumnFamilyDescriptor.parseFrom(pbBytes); } @@ -453,12 +461,14 @@ public class ColumnFamilyDescriptorBuilder { return this; } - public ColumnFamilyDescriptorBuilder setMajorCompactionCompressionType(Compression.Algorithm value) { + public ColumnFamilyDescriptorBuilder + setMajorCompactionCompressionType(Compression.Algorithm value) { desc.setMajorCompactionCompressionType(value); return this; } - public ColumnFamilyDescriptorBuilder setMinorCompactionCompressionType(Compression.Algorithm value) { + public ColumnFamilyDescriptorBuilder + setMinorCompactionCompressionType(Compression.Algorithm value) { desc.setMinorCompactionCompressionType(value); return this; } @@ -532,7 +542,8 @@ public class ColumnFamilyDescriptorBuilder { return this; } - public ColumnFamilyDescriptorBuilder setMobCompactPartitionPolicy(final MobCompactPartitionPolicy value) { + public ColumnFamilyDescriptorBuilder + setMobCompactPartitionPolicy(final MobCompactPartitionPolicy value) { desc.setMobCompactPartitionPolicy(value); return this; } @@ -593,21 +604,19 @@ public class ColumnFamilyDescriptorBuilder { } public ColumnFamilyDescriptorBuilder setVersionsWithTimeToLive(final int retentionInterval, - final int versionAfterInterval) { + final int versionAfterInterval) { desc.setVersionsWithTimeToLive(retentionInterval, versionAfterInterval); return this; } /** - * An ModifyableFamilyDescriptor contains information about a column family such as the - * number of versions, compression settings, etc. - * - * It is used as input when creating a table or adding a column. - * TODO: make this package-private after removing the HColumnDescriptor + * An ModifyableFamilyDescriptor contains information about a column family such as the number of + * versions, compression settings, etc. It is used as input when creating a table or adding a + * column. TODO: make this package-private after removing the HColumnDescriptor */ @InterfaceAudience.Private public static class ModifyableColumnFamilyDescriptor - implements ColumnFamilyDescriptor, Comparable { + implements ColumnFamilyDescriptor, Comparable { // Column family name private final byte[] name; @@ -616,20 +625,17 @@ public class ColumnFamilyDescriptorBuilder { private final Map values = new HashMap<>(); /** - * A map which holds the configuration specific to the column family. The - * keys of the map have the same names as config keys and override the - * defaults with cf-specific settings. Example usage may be for compactions, - * etc. + * A map which holds the configuration specific to the column family. The keys of the map have + * the same names as config keys and override the defaults with cf-specific settings. Example + * usage may be for compactions, etc. */ private final Map configuration = new HashMap<>(); /** - * Construct a column descriptor specifying only the family name The other - * attributes are defaulted. - * - * @param name Column family name. Must be 'printable' -- digit or - * letter -- and may not contain a : - * TODO: make this private after the HCD is removed. + * Construct a column descriptor specifying only the family name The other attributes are + * defaulted. + * @param name Column family name. Must be 'printable' -- digit or letter -- and may not contain + * a : TODO: make this private after the HCD is removed. */ @InterfaceAudience.Private public ModifyableColumnFamilyDescriptor(final byte[] name) { @@ -637,8 +643,8 @@ public class ColumnFamilyDescriptorBuilder { } /** - * Constructor. Makes a deep copy of the supplied descriptor. - * TODO: make this private after the HCD is removed. + * Constructor. Makes a deep copy of the supplied descriptor. TODO: make this private after the + * HCD is removed. * @param desc The descriptor. */ @InterfaceAudience.Private @@ -646,7 +652,8 @@ public class ColumnFamilyDescriptorBuilder { this(desc.getName(), desc.getValues(), desc.getConfiguration()); } - private ModifyableColumnFamilyDescriptor(byte[] name, Map values, Map config) { + private ModifyableColumnFamilyDescriptor(byte[] name, Map values, + Map config) { this.name = name; this.values.putAll(values); this.configuration.putAll(config); @@ -685,12 +692,13 @@ public class ColumnFamilyDescriptorBuilder { } /** - * @param key The key. + * @param key The key. * @param value The value. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setValue(byte[] key, byte[] value) { - return setValue(toBytesOrNull(key, Function.identity()), toBytesOrNull(value, Function.identity())); + return setValue(toBytesOrNull(key, Function.identity()), + toBytesOrNull(value, Function.identity())); } public ModifyableColumnFamilyDescriptor setValue(String key, String value) { @@ -700,8 +708,9 @@ public class ColumnFamilyDescriptorBuilder { private ModifyableColumnFamilyDescriptor setValue(Bytes key, String value) { return setValue(key, toBytesOrNull(value, Bytes::toBytes)); } + /** - * @param key The key. + * @param key The key. * @param value The value. * @return this (for chained invocation) */ @@ -715,7 +724,6 @@ public class ColumnFamilyDescriptorBuilder { } /** - * * @param key Key whose key and value we're to remove from HCD parameters. * @return this (for chained invocation) */ @@ -760,9 +768,9 @@ public class ColumnFamilyDescriptorBuilder { throw new IllegalArgumentException("Maximum versions must be positive"); } if (maxVersions < this.getMinVersions()) { - throw new IllegalArgumentException("Set MaxVersion to " + maxVersions - + " while minVersion is " + this.getMinVersions() - + ". Maximum versions must be >= minimum versions "); + throw new IllegalArgumentException( + "Set MaxVersion to " + maxVersions + " while minVersion is " + this.getMinVersions() + + ". Maximum versions must be >= minimum versions "); } setValue(MAX_VERSIONS_BYTES, Integer.toString(maxVersions)); return this; @@ -770,7 +778,6 @@ public class ColumnFamilyDescriptorBuilder { /** * Set minimum and maximum versions to keep - * * @param minVersions minimal number of versions * @param maxVersions maximum number of versions * @return this (for chained invocation) @@ -783,24 +790,22 @@ public class ColumnFamilyDescriptorBuilder { } if (maxVersions < minVersions) { - throw new IllegalArgumentException("Unable to set MaxVersion to " + maxVersions - + " and set MinVersion to " + minVersions - + ", as maximum versions must be >= minimum versions."); + throw new IllegalArgumentException( + "Unable to set MaxVersion to " + maxVersions + " and set MinVersion to " + minVersions + + ", as maximum versions must be >= minimum versions."); } setMinVersions(minVersions); setMaxVersions(maxVersions); return this; } - @Override public int getBlocksize() { return getStringOrDefault(BLOCKSIZE_BYTES, Integer::valueOf, DEFAULT_BLOCKSIZE); } /** - * @param s Blocksize to use when writing out storefiles/hfiles on this - * column family. + * @param s Blocksize to use when writing out storefiles/hfiles on this column family. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setBlocksize(int s) { @@ -808,8 +813,8 @@ public class ColumnFamilyDescriptorBuilder { } public ModifyableColumnFamilyDescriptor setBlocksize(String blocksize) throws HBaseException { - return setBlocksize(Integer.parseInt(PrettyPrinter. - valueOf(blocksize, PrettyPrinter.Unit.BYTE))); + return setBlocksize( + Integer.parseInt(PrettyPrinter.valueOf(blocksize, PrettyPrinter.Unit.BYTE))); } @Override @@ -819,12 +824,9 @@ public class ColumnFamilyDescriptorBuilder { } /** - * Compression types supported in hbase. LZO is not bundled as part of the - * hbase distribution. See - * LZO - * Compression - * for how to enable it. - * + * Compression types supported in hbase. LZO is not bundled as part of the hbase distribution. + * See LZO Compression for how + * to enable it. * @param type Compression type setting. * @return this (for chained invocation) */ @@ -840,20 +842,18 @@ public class ColumnFamilyDescriptorBuilder { /** * Set data block encoding algorithm used in block cache. - * * @param type What kind of data block encoding will be used. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setDataBlockEncoding(DataBlockEncoding type) { - return setValue(DATA_BLOCK_ENCODING_BYTES, type == null ? DataBlockEncoding.NONE.name() : type.name()); + return setValue(DATA_BLOCK_ENCODING_BYTES, + type == null ? DataBlockEncoding.NONE.name() : type.name()); } /** - * Set whether the tags should be compressed along with DataBlockEncoding. - * When no DataBlockEncoding is been used, this is having no effect. - * - * @param compressTags - * @return this (for chained invocation) + * Set whether the tags should be compressed along with DataBlockEncoding. When no + * DataBlockEncoding is been used, this is having no effect. n * @return this (for chained + * invocation) */ public ModifyableColumnFamilyDescriptor setCompressTags(boolean compressTags) { return setValue(COMPRESS_TAGS_BYTES, String.valueOf(compressTags)); @@ -861,8 +861,7 @@ public class ColumnFamilyDescriptorBuilder { @Override public boolean isCompressTags() { - return getStringOrDefault(COMPRESS_TAGS_BYTES, Boolean::valueOf, - DEFAULT_COMPRESS_TAGS); + return getStringOrDefault(COMPRESS_TAGS_BYTES, Boolean::valueOf, DEFAULT_COMPRESS_TAGS); } @Override @@ -884,27 +883,24 @@ public class ColumnFamilyDescriptorBuilder { } /** - * Compression types supported in hbase. LZO is not bundled as part of the - * hbase distribution. See - * LZO - * Compression - * for how to enable it. - * + * Compression types supported in hbase. LZO is not bundled as part of the hbase distribution. + * See LZO Compression for how + * to enable it. * @param type Compression type setting. * @return this (for chained invocation) */ - public ModifyableColumnFamilyDescriptor setCompactionCompressionType( - Compression.Algorithm type) { + public ModifyableColumnFamilyDescriptor + setCompactionCompressionType(Compression.Algorithm type) { return setValue(COMPRESSION_COMPACT_BYTES, type.name()); } - public ModifyableColumnFamilyDescriptor setMajorCompactionCompressionType( - Compression.Algorithm type) { + public ModifyableColumnFamilyDescriptor + setMajorCompactionCompressionType(Compression.Algorithm type) { return setValue(COMPRESSION_COMPACT_MAJOR_BYTES, type.name()); } - public ModifyableColumnFamilyDescriptor setMinorCompactionCompressionType( - Compression.Algorithm type) { + public ModifyableColumnFamilyDescriptor + setMinorCompactionCompressionType(Compression.Algorithm type) { return setValue(COMPRESSION_COMPACT_MINOR_BYTES, type.name()); } @@ -914,8 +910,8 @@ public class ColumnFamilyDescriptorBuilder { } /** - * @param inMemory True if we are to favor keeping all values for this - * column family in the HRegionServer cache + * @param inMemory True if we are to favor keeping all values for this column family in the + * HRegionServer cache * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setInMemory(boolean inMemory) { @@ -929,23 +925,22 @@ public class ColumnFamilyDescriptorBuilder { } /** - * @param inMemoryCompaction the prefered in-memory compaction policy for - * this column family + * @param inMemoryCompaction the prefered in-memory compaction policy for this column family * @return this (for chained invocation) */ - public ModifyableColumnFamilyDescriptor setInMemoryCompaction(MemoryCompactionPolicy inMemoryCompaction) { + public ModifyableColumnFamilyDescriptor + setInMemoryCompaction(MemoryCompactionPolicy inMemoryCompaction) { return setValue(IN_MEMORY_COMPACTION_BYTES, inMemoryCompaction.name()); } @Override public KeepDeletedCells getKeepDeletedCells() { - return getStringOrDefault(KEEP_DELETED_CELLS_BYTES, - KeepDeletedCells::getValue, DEFAULT_KEEP_DELETED); + return getStringOrDefault(KEEP_DELETED_CELLS_BYTES, KeepDeletedCells::getValue, + DEFAULT_KEEP_DELETED); } /** - * @param keepDeletedCells True if deleted rows should not be collected - * immediately. + * @param keepDeletedCells True if deleted rows should not be collected immediately. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setKeepDeletedCells(KeepDeletedCells keepDeletedCells) { @@ -954,13 +949,13 @@ public class ColumnFamilyDescriptorBuilder { /** * By default, HBase only consider timestamp in versions. So a previous Delete with higher ts - * will mask a later Put with lower ts. Set this to true to enable new semantics of versions. - * We will also consider mvcc in versions. See HBASE-15968 for details. + * will mask a later Put with lower ts. Set this to true to enable new semantics of versions. We + * will also consider mvcc in versions. See HBASE-15968 for details. */ @Override public boolean isNewVersionBehavior() { - return getStringOrDefault(NEW_VERSION_BEHAVIOR_BYTES, - Boolean::parseBoolean, DEFAULT_NEW_VERSION_BEHAVIOR); + return getStringOrDefault(NEW_VERSION_BEHAVIOR_BYTES, Boolean::parseBoolean, + DEFAULT_NEW_VERSION_BEHAVIOR); } public ModifyableColumnFamilyDescriptor setNewVersionBehavior(boolean newVersionBehavior) { @@ -995,8 +990,7 @@ public class ColumnFamilyDescriptorBuilder { } /** - * @param minVersions The minimum number of versions to keep. (used when - * timeToLive is set) + * @param minVersions The minimum number of versions to keep. (used when timeToLive is set) * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setMinVersions(int minVersions) { @@ -1004,15 +998,14 @@ public class ColumnFamilyDescriptorBuilder { } /** - * Retain all versions for a given TTL(retentionInterval), and then only a specific number - * of versions(versionAfterInterval) after that interval elapses. - * - * @param retentionInterval Retain all versions for this interval + * Retain all versions for a given TTL(retentionInterval), and then only a specific number of + * versions(versionAfterInterval) after that interval elapses. + * @param retentionInterval Retain all versions for this interval * @param versionAfterInterval Retain no of versions to retain after retentionInterval * @return this (for chained invocation) */ - public ModifyableColumnFamilyDescriptor setVersionsWithTimeToLive( - final int retentionInterval, final int versionAfterInterval) { + public ModifyableColumnFamilyDescriptor setVersionsWithTimeToLive(final int retentionInterval, + final int versionAfterInterval) { ModifyableColumnFamilyDescriptor modifyableColumnFamilyDescriptor = setVersions(versionAfterInterval, Integer.MAX_VALUE); modifyableColumnFamilyDescriptor.setTimeToLive(retentionInterval); @@ -1026,8 +1019,8 @@ public class ColumnFamilyDescriptorBuilder { } /** - * @param blockCacheEnabled True if hfile DATA type blocks should be cached - * (We always cache INDEX and BLOOM blocks; you cannot turn this off). + * @param blockCacheEnabled True if hfile DATA type blocks should be cached (We always cache + * INDEX and BLOOM blocks; you cannot turn this off). * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) { @@ -1046,7 +1039,8 @@ public class ColumnFamilyDescriptorBuilder { @Override public int getScope() { - return getStringOrDefault(REPLICATION_SCOPE_BYTES, Integer::valueOf, DEFAULT_REPLICATION_SCOPE); + return getStringOrDefault(REPLICATION_SCOPE_BYTES, Integer::valueOf, + DEFAULT_REPLICATION_SCOPE); } /** @@ -1059,7 +1053,8 @@ public class ColumnFamilyDescriptorBuilder { @Override public boolean isCacheDataOnWrite() { - return getStringOrDefault(CACHE_DATA_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_DATA_ON_WRITE); + return getStringOrDefault(CACHE_DATA_ON_WRITE_BYTES, Boolean::valueOf, + DEFAULT_CACHE_DATA_ON_WRITE); } /** @@ -1072,7 +1067,8 @@ public class ColumnFamilyDescriptorBuilder { @Override public boolean isCacheIndexesOnWrite() { - return getStringOrDefault(CACHE_INDEX_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_INDEX_ON_WRITE); + return getStringOrDefault(CACHE_INDEX_ON_WRITE_BYTES, Boolean::valueOf, + DEFAULT_CACHE_INDEX_ON_WRITE); } /** @@ -1085,7 +1081,8 @@ public class ColumnFamilyDescriptorBuilder { @Override public boolean isCacheBloomsOnWrite() { - return getStringOrDefault(CACHE_BLOOMS_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_BLOOMS_ON_WRITE); + return getStringOrDefault(CACHE_BLOOMS_ON_WRITE_BYTES, Boolean::valueOf, + DEFAULT_CACHE_BLOOMS_ON_WRITE); } /** @@ -1098,12 +1095,12 @@ public class ColumnFamilyDescriptorBuilder { @Override public boolean isEvictBlocksOnClose() { - return getStringOrDefault(EVICT_BLOCKS_ON_CLOSE_BYTES, Boolean::valueOf, DEFAULT_EVICT_BLOCKS_ON_CLOSE); + return getStringOrDefault(EVICT_BLOCKS_ON_CLOSE_BYTES, Boolean::valueOf, + DEFAULT_EVICT_BLOCKS_ON_CLOSE); } /** - * @param value true if we should evict cached blocks from the blockcache on - * close + * @param value true if we should evict cached blocks from the blockcache on close * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setEvictBlocksOnClose(boolean value) { @@ -1112,12 +1109,12 @@ public class ColumnFamilyDescriptorBuilder { @Override public boolean isPrefetchBlocksOnOpen() { - return getStringOrDefault(PREFETCH_BLOCKS_ON_OPEN_BYTES, Boolean::valueOf, DEFAULT_PREFETCH_BLOCKS_ON_OPEN); + return getStringOrDefault(PREFETCH_BLOCKS_ON_OPEN_BYTES, Boolean::valueOf, + DEFAULT_PREFETCH_BLOCKS_ON_OPEN); } /** - * @param value true if we should prefetch blocks into the blockcache on - * open + * @param value true if we should prefetch blocks into the blockcache on open * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setPrefetchBlocksOnOpen(boolean value) { @@ -1137,7 +1134,6 @@ public class ColumnFamilyDescriptorBuilder { return s.toString(); } - @Override public String toStringCustomizedValues() { StringBuilder s = new StringBuilder(); @@ -1164,9 +1160,10 @@ public class ColumnFamilyDescriptorBuilder { } String key = Bytes.toString(entry.getKey().get()); String value = Bytes.toStringBinary(entry.getValue().get()); - if (printDefaults - || !DEFAULT_VALUES.containsKey(key) - || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { + if ( + printDefaults || !DEFAULT_VALUES.containsKey(key) + || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value) + ) { s.append(", "); s.append(key); s.append(" => "); @@ -1210,7 +1207,8 @@ public class ColumnFamilyDescriptorBuilder { printCommaForConfiguration = true; s.append('\'').append(e.getKey()).append('\''); s.append(" => "); - s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\''); + s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))) + .append('\''); } s.append("}"); } @@ -1223,7 +1221,8 @@ public class ColumnFamilyDescriptorBuilder { return true; } if (obj instanceof ModifyableColumnFamilyDescriptor) { - return ColumnFamilyDescriptor.COMPARATOR.compare(this, (ModifyableColumnFamilyDescriptor) obj) == 0; + return ColumnFamilyDescriptor.COMPARATOR.compare(this, + (ModifyableColumnFamilyDescriptor) obj) == 0; } return false; } @@ -1247,19 +1246,17 @@ public class ColumnFamilyDescriptorBuilder { * @see #parseFrom(byte[]) */ private byte[] toByteArray() { - return ProtobufUtil.prependPBMagic(ProtobufUtil.toColumnFamilySchema(this) - .toByteArray()); + return ProtobufUtil.prependPBMagic(ProtobufUtil.toColumnFamilySchema(this).toByteArray()); } /** - * @param bytes A pb serialized {@link ModifyableColumnFamilyDescriptor} instance with pb - * magic prefix - * @return An instance of {@link ModifyableColumnFamilyDescriptor} made from - * bytes - * @throws DeserializationException - * @see #toByteArray() + * @param bytes A pb serialized {@link ModifyableColumnFamilyDescriptor} instance with pb magic + * prefix + * @return An instance of {@link ModifyableColumnFamilyDescriptor} made from bytes + * n * @see #toByteArray() */ - private static ColumnFamilyDescriptor parseFrom(final byte[] bytes) throws DeserializationException { + private static ColumnFamilyDescriptor parseFrom(final byte[] bytes) + throws DeserializationException { if (!ProtobufUtil.isPBMagicPrefix(bytes)) { throw new DeserializationException("No magic"); } @@ -1288,9 +1285,7 @@ public class ColumnFamilyDescriptorBuilder { /** * Setter for storing a configuration setting in {@link #configuration} map. - * - * @param key Config key. Same as XML config key e.g. - * hbase.something.or.other. + * @param key Config key. Same as XML config key e.g. hbase.something.or.other. * @param value String value. If null, removes the configuration. * @return this (for chained invocation) */ @@ -1304,11 +1299,8 @@ public class ColumnFamilyDescriptorBuilder { } /** - * Remove a configuration setting represented by the key from the - * {@link #configuration} map. - * - * @param key - * @return this (for chained invocation) + * Remove a configuration setting represented by the key from the {@link #configuration} map. n + * * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor removeConfiguration(final String key) { return setConfiguration(key, null); @@ -1320,10 +1312,8 @@ public class ColumnFamilyDescriptorBuilder { } /** - * Set the encryption algorithm for use with this family - * - * @param algorithm - * @return this (for chained invocation) + * Set the encryption algorithm for use with this family n * @return this (for chained + * invocation) */ public ModifyableColumnFamilyDescriptor setEncryptionType(String algorithm) { return setValue(ENCRYPTION_BYTES, algorithm); @@ -1335,10 +1325,7 @@ public class ColumnFamilyDescriptorBuilder { } /** - * Set the raw crypto key attribute for the family - * - * @param keyBytes - * @return this (for chained invocation) + * Set the raw crypto key attribute for the family n * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setEncryptionKey(byte[] keyBytes) { return setValue(ENCRYPTION_KEY_BYTES, new Bytes(keyBytes)); @@ -1351,7 +1338,6 @@ public class ColumnFamilyDescriptorBuilder { /** * Sets the mob threshold of the family. - * * @param threshold The mob threshold. * @return this (for chained invocation) */ @@ -1366,7 +1352,6 @@ public class ColumnFamilyDescriptorBuilder { /** * Enables the mob for the family. - * * @param isMobEnabled Whether to enable the mob for the family. * @return this (for chained invocation) */ @@ -1383,32 +1368,30 @@ public class ColumnFamilyDescriptorBuilder { /** * Set the mob compact partition policy for the family. - * * @param policy policy type * @return this (for chained invocation) */ - public ModifyableColumnFamilyDescriptor setMobCompactPartitionPolicy(MobCompactPartitionPolicy policy) { + public ModifyableColumnFamilyDescriptor + setMobCompactPartitionPolicy(MobCompactPartitionPolicy policy) { return setValue(MOB_COMPACT_PARTITION_POLICY_BYTES, policy.name()); } @Override public short getDFSReplication() { - return getStringOrDefault(DFS_REPLICATION_BYTES, - Short::valueOf, DEFAULT_DFS_REPLICATION); + return getStringOrDefault(DFS_REPLICATION_BYTES, Short::valueOf, DEFAULT_DFS_REPLICATION); } /** * Set the replication factor to hfile(s) belonging to this family - * - * @param replication number of replicas the blocks(s) belonging to this CF - * should have, or {@link #DEFAULT_DFS_REPLICATION} for the default - * replication factor set in the filesystem + * @param replication number of replicas the blocks(s) belonging to this CF should have, or + * {@link #DEFAULT_DFS_REPLICATION} for the default replication factor set in + * the filesystem * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setDFSReplication(short replication) { if (replication < 1 && replication != DEFAULT_DFS_REPLICATION) { throw new IllegalArgumentException( - "DFS replication factor cannot be less than 1 if explicitly set."); + "DFS replication factor cannot be less than 1 if explicitly set."); } return setValue(DFS_REPLICATION_BYTES, Short.toString(replication)); } @@ -1420,11 +1403,8 @@ public class ColumnFamilyDescriptorBuilder { /** * Set the storage policy for use with this family - * - * @param policy the policy to set, valid setting includes: - * "LAZY_PERSIST", - * "ALL_SSD", "ONE_SSD", "HOT", "WARM", - * "COLD" + * @param policy the policy to set, valid setting includes: "LAZY_PERSIST", + * "ALL_SSD", "ONE_SSD", "HOT", "WARM", "COLD" * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setStoragePolicy(String policy) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java index 018cfef0260..225bb072db7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,18 +16,19 @@ * limitations under the License. */ package org.apache.hadoop.hbase.client; + import org.apache.yetus.audience.InterfaceAudience; /** - * Currently, there are only two compact types: - * {@code NORMAL} means do store files compaction; + * Currently, there are only two compact types: {@code NORMAL} means do store files compaction; * {@code MOB} means do mob files compaction. - * */ + */ @InterfaceAudience.Public public enum CompactType { - NORMAL (0), - MOB (1); + NORMAL(0), + MOB(1); - CompactType(int value) {} + CompactType(int value) { + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java index 51f7d071e4a..e1f1dcd7f77 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,5 +24,8 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Public public enum CompactionState { - NONE, MINOR, MAJOR, MAJOR_AND_MINOR + NONE, + MINOR, + MAJOR, + MAJOR_AND_MINOR } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java index 08afeb61b55..592a99b0584 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,9 +21,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * A scan result cache that only returns complete result. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java index 3ea13a44ed5..7b9a422094a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,23 +28,22 @@ import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; /** - * A cluster connection encapsulating lower level individual connections to actual servers and - * a connection to zookeeper. Connections are instantiated through the {@link ConnectionFactory} - * class. The lifecycle of the connection is managed by the caller, who has to {@link #close()} - * the connection to release the resources. - * - *

    The connection object contains logic to find the master, locate regions out on the cluster, - * keeps a cache of locations and then knows how to re-calibrate after they move. The individual - * connections to servers, meta cache, zookeeper connection, etc are all shared by the - * {@link Table} and {@link Admin} instances obtained from this connection. - * - *

    Connection creation is a heavy-weight operation. Connection implementations are thread-safe, - * so that the client can create a connection once, and share it with different threads. - * {@link Table} and {@link Admin} instances, on the other hand, are light-weight and are not - * thread-safe. Typically, a single connection per client application is instantiated and every - * thread will obtain its own Table instance. Caching or pooling of {@link Table} and {@link Admin} - * is not recommended. - * + * A cluster connection encapsulating lower level individual connections to actual servers and a + * connection to zookeeper. Connections are instantiated through the {@link ConnectionFactory} + * class. The lifecycle of the connection is managed by the caller, who has to {@link #close()} the + * connection to release the resources. + *

    + * The connection object contains logic to find the master, locate regions out on the cluster, keeps + * a cache of locations and then knows how to re-calibrate after they move. The individual + * connections to servers, meta cache, zookeeper connection, etc are all shared by the {@link Table} + * and {@link Admin} instances obtained from this connection. + *

    + * Connection creation is a heavy-weight operation. Connection implementations are thread-safe, so + * that the client can create a connection once, and share it with different threads. {@link Table} + * and {@link Admin} instances, on the other hand, are light-weight and are not thread-safe. + * Typically, a single connection per client application is instantiated and every thread will + * obtain its own Table instance. Caching or pooling of {@link Table} and {@link Admin} is not + * recommended. * @see ConnectionFactory * @since 0.99.0 */ @@ -52,13 +51,11 @@ import org.apache.yetus.audience.InterfaceAudience; public interface Connection extends Abortable, Closeable { /* - * Implementation notes: - * - Only allow new style of interfaces: - * -- All table names are passed as TableName. No more byte[] and string arguments - * -- Most of the classes with names H is deprecated in favor of non-H versions - * (Table, Connection, etc) - * -- Only real client-facing public methods are allowed - * - Connection should contain only getTable(), getAdmin() kind of general methods. + * Implementation notes: - Only allow new style of interfaces: -- All table names are passed as + * TableName. No more byte[] and string arguments -- Most of the classes with names H is + * deprecated in favor of non-H versions (Table, Connection, etc) -- Only real client-facing + * public methods are allowed - Connection should contain only getTable(), getAdmin() kind of + * general methods. */ /** @@ -67,17 +64,14 @@ public interface Connection extends Abortable, Closeable { Configuration getConfiguration(); /** - * Retrieve a Table implementation for accessing a table. - * The returned Table is not thread safe, a new instance should be created for each using thread. - * This is a lightweight operation, pooling or caching of the returned Table - * is neither required nor desired. + * Retrieve a Table implementation for accessing a table. The returned Table is not thread safe, a + * new instance should be created for each using thread. This is a lightweight operation, pooling + * or caching of the returned Table is neither required nor desired. *

    - * The caller is responsible for calling {@link Table#close()} on the returned - * table instance. + * The caller is responsible for calling {@link Table#close()} on the returned table instance. *

    - * Since 0.98.1 this method no longer checks table existence. An exception - * will be thrown if the table does not exist only when the first operation is - * attempted. + * Since 0.98.1 this method no longer checks table existence. An exception will be thrown if the + * table does not exist only when the first operation is attempted. * @param tableName the name of the table * @return a Table to use for interactions with this table */ @@ -86,20 +80,16 @@ public interface Connection extends Abortable, Closeable { } /** - * Retrieve a Table implementation for accessing a table. - * The returned Table is not thread safe, a new instance should be created for each using thread. - * This is a lightweight operation, pooling or caching of the returned Table - * is neither required nor desired. + * Retrieve a Table implementation for accessing a table. The returned Table is not thread safe, a + * new instance should be created for each using thread. This is a lightweight operation, pooling + * or caching of the returned Table is neither required nor desired. *

    - * The caller is responsible for calling {@link Table#close()} on the returned - * table instance. + * The caller is responsible for calling {@link Table#close()} on the returned table instance. *

    - * Since 0.98.1 this method no longer checks table existence. An exception - * will be thrown if the table does not exist only when the first operation is - * attempted. - * + * Since 0.98.1 this method no longer checks table existence. An exception will be thrown if the + * table does not exist only when the first operation is attempted. * @param tableName the name of the table - * @param pool The thread pool to use for batch operations, null to use a default pool. + * @param pool The thread pool to use for batch operations, null to use a default pool. * @return a Table to use for interactions with this table */ default Table getTable(TableName tableName, ExecutorService pool) throws IOException { @@ -109,18 +99,16 @@ public interface Connection extends Abortable, Closeable { /** *

    * Retrieve a {@link BufferedMutator} for performing client-side buffering of writes. The - * {@link BufferedMutator} returned by this method is thread-safe. - * This accessor will create a new ThreadPoolExecutor and will be shutdown once we close the - * BufferedMutator. This object can be used for long lived operations. + * {@link BufferedMutator} returned by this method is thread-safe. This accessor will create a new + * ThreadPoolExecutor and will be shutdown once we close the BufferedMutator. This object can be + * used for long lived operations. *

    *

    - * The caller is responsible for calling {@link BufferedMutator#close()} on - * the returned {@link BufferedMutator} instance. + * The caller is responsible for calling {@link BufferedMutator#close()} on the returned + * {@link BufferedMutator} instance. *

    *

    - * * @param tableName the name of the table - * * @return a {@link BufferedMutator} for the supplied tableName. */ BufferedMutator getBufferedMutator(TableName tableName) throws IOException; @@ -133,7 +121,6 @@ public interface Connection extends Abortable, Closeable { * responsibility to shutdown. For ThreadPool created by us, we will shutdown when user calls * {@link BufferedMutator#close()}. The caller is responsible for calling * {@link BufferedMutator#close()} on the returned {@link BufferedMutator} instance. - * * @param params details on how to instantiate the {@code BufferedMutator}. * @return a {@link BufferedMutator} for the supplied tableName. */ @@ -142,15 +129,10 @@ public interface Connection extends Abortable, Closeable { /** * Retrieve a RegionLocator implementation to inspect region information on a table. The returned * RegionLocator is not thread-safe, so a new instance should be created for each using thread. - * - * This is a lightweight operation. Pooling or caching of the returned RegionLocator is neither - * required nor desired. - *
    + * This is a lightweight operation. Pooling or caching of the returned RegionLocator is neither + * required nor desired.
    * The caller is responsible for calling {@link RegionLocator#close()} on the returned - * RegionLocator instance. - * - * RegionLocator needs to be unmanaged - * + * RegionLocator instance. RegionLocator needs to be unmanaged * @param tableName Name of the table who's region is to be examined * @return A RegionLocator instance */ @@ -167,14 +149,10 @@ public interface Connection extends Abortable, Closeable { void clearRegionLocationCache(); /** - * Retrieve an Admin implementation to administer an HBase cluster. - * The returned Admin is not guaranteed to be thread-safe. A new instance should be created for - * each using thread. This is a lightweight operation. Pooling or caching of the returned - * Admin is not recommended. - *
    - * The caller is responsible for calling {@link Admin#close()} on the returned - * Admin instance. - * + * Retrieve an Admin implementation to administer an HBase cluster. The returned Admin is not + * guaranteed to be thread-safe. A new instance should be created for each using thread. This is a + * lightweight operation. Pooling or caching of the returned Admin is not recommended.
    + * The caller is responsible for calling {@link Admin#close()} on the returned Admin instance. * @return an Admin instance for cluster administration */ Admin getAdmin() throws IOException; @@ -191,20 +169,16 @@ public interface Connection extends Abortable, Closeable { /** * Returns an {@link TableBuilder} for creating {@link Table}. * @param tableName the name of the table - * @param pool the thread pool to use for requests like batch and scan + * @param pool the thread pool to use for requests like batch and scan */ TableBuilder getTableBuilder(TableName tableName, ExecutorService pool); /** - * Retrieve an Hbck implementation to fix an HBase cluster. - * The returned Hbck is not guaranteed to be thread-safe. A new instance should be created by - * each thread. This is a lightweight operation. Pooling or caching of the returned Hbck instance - * is not recommended. - *
    - * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance. - *
    + * Retrieve an Hbck implementation to fix an HBase cluster. The returned Hbck is not guaranteed to + * be thread-safe. A new instance should be created by each thread. This is a lightweight + * operation. Pooling or caching of the returned Hbck instance is not recommended.
    + * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance.
    * This will be used mostly by hbck tool. - * * @return an Hbck instance for active master. Active master is fetched from the zookeeper. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK) @@ -213,18 +187,13 @@ public interface Connection extends Abortable, Closeable { } /** - * Retrieve an Hbck implementation to fix an HBase cluster. - * The returned Hbck is not guaranteed to be thread-safe. A new instance should be created by - * each thread. This is a lightweight operation. Pooling or caching of the returned Hbck instance - * is not recommended. - *
    - * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance. - *
    - * This will be used mostly by hbck tool. This may only be used to by pass getting - * registered master from ZK. In situations where ZK is not available or active master is not - * registered with ZK and user can get master address by other means, master can be explicitly - * specified. - * + * Retrieve an Hbck implementation to fix an HBase cluster. The returned Hbck is not guaranteed to + * be thread-safe. A new instance should be created by each thread. This is a lightweight + * operation. Pooling or caching of the returned Hbck instance is not recommended.
    + * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance.
    + * This will be used mostly by hbck tool. This may only be used to by pass getting registered + * master from ZK. In situations where ZK is not available or active master is not registered with + * ZK and user can get master address by other means, master can be explicitly specified. * @param masterServer explicit {@link ServerName} for master server * @return an Hbck instance for a specified master server */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java index eee2139796f..c8a283d0869 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java @@ -1,18 +1,25 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_PAUSE; import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_PAUSE; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.yetus.audience.InterfaceAudience; @@ -20,12 +27,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Configuration parameters for the connection. - * Configuration is a heavy weight registry that does a lot of string operations and regex matching. - * Method calls into Configuration account for high CPU usage and have huge performance impact. - * This class caches connection-related configuration values in the ConnectionConfiguration - * object so that expensive conf.getXXX() calls are avoided every time HTable, etc is instantiated. - * see HBASE-12128 + * Configuration parameters for the connection. Configuration is a heavy weight registry that does a + * lot of string operations and regex matching. Method calls into Configuration account for high CPU + * usage and have huge performance impact. This class caches connection-related configuration values + * in the ConnectionConfiguration object so that expensive conf.getXXX() calls are avoided every + * time HTable, etc is instantiated. see HBASE-12128 */ @InterfaceAudience.Private public class ConnectionConfiguration { @@ -34,9 +40,9 @@ public class ConnectionConfiguration { public static final String WRITE_BUFFER_SIZE_KEY = "hbase.client.write.buffer"; public static final long WRITE_BUFFER_SIZE_DEFAULT = 2097152; public static final String WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS = - "hbase.client.write.buffer.periodicflush.timeout.ms"; + "hbase.client.write.buffer.periodicflush.timeout.ms"; public static final String WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS = - "hbase.client.write.buffer.periodicflush.timertick.ms"; + "hbase.client.write.buffer.periodicflush.timertick.ms"; public static final long WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT = 0; // 0 == Disabled public static final long WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS_DEFAULT = 1000L; // 1 second public static final String MAX_KEYVALUE_SIZE_KEY = "hbase.client.keyvalue.maxsize"; @@ -49,9 +55,8 @@ public class ConnectionConfiguration { public static final int PRIMARY_SCAN_TIMEOUT_MICROSECOND_DEFAULT = 1000000; // 1s /** - * Parameter name for client pause when server is overloaded, denoted by an exception - * where {@link org.apache.hadoop.hbase.HBaseServerException#isServerOverloaded(Throwable)} - * is true. + * Parameter name for client pause when server is overloaded, denoted by an exception where + * {@link org.apache.hadoop.hbase.HBaseServerException#isServerOverloaded(Throwable)} is true. */ public static final String HBASE_CLIENT_PAUSE_FOR_SERVER_OVERLOADED = "hbase.client.pause.server.overloaded"; @@ -62,8 +67,8 @@ public class ConnectionConfiguration { // to handle checking both properties in parsing below. The benefit of calling this is // that it should still cause Configuration to log a warning if we do end up falling // through to the old deprecated config. - Configuration.addDeprecation( - HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, HBASE_CLIENT_PAUSE_FOR_SERVER_OVERLOADED); + Configuration.addDeprecation(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, + HBASE_CLIENT_PAUSE_FOR_SERVER_OVERLOADED); } private final long writeBufferSize; @@ -93,26 +98,23 @@ public class ConnectionConfiguration { ConnectionConfiguration(Configuration conf) { this.writeBufferSize = conf.getLong(WRITE_BUFFER_SIZE_KEY, WRITE_BUFFER_SIZE_DEFAULT); - this.writeBufferPeriodicFlushTimeoutMs = conf.getLong( - WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS, - WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT); + this.writeBufferPeriodicFlushTimeoutMs = conf.getLong(WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS, + WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT); this.writeBufferPeriodicFlushTimerTickMs = conf.getLong( - WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS, - WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS_DEFAULT); + WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS, WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS_DEFAULT); this.metaOperationTimeout = conf.getInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, - HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); - this.operationTimeout = conf.getInt( - HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); - this.scannerCaching = conf.getInt( - HConstants.HBASE_CLIENT_SCANNER_CACHING, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); + this.scannerCaching = conf.getInt(HConstants.HBASE_CLIENT_SCANNER_CACHING, + HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); - this.scannerMaxResultSize = - conf.getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, - HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); + this.scannerMaxResultSize = conf.getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, + HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); this.primaryCallTimeoutMicroSecond = conf.getInt(PRIMARY_CALL_TIMEOUT_MICROSECOND, PRIMARY_CALL_TIMEOUT_MICROSECOND_DEFAULT); @@ -124,23 +126,22 @@ public class ConnectionConfiguration { conf.getInt(HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT, HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT_DEFAULT); - this.retries = conf.getInt( - HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); + this.retries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); - this.clientScannerAsyncPrefetch = conf.getBoolean( - Scan.HBASE_CLIENT_SCANNER_ASYNC_PREFETCH, Scan.DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH); + this.clientScannerAsyncPrefetch = conf.getBoolean(Scan.HBASE_CLIENT_SCANNER_ASYNC_PREFETCH, + Scan.DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH); this.maxKeyValueSize = conf.getInt(MAX_KEYVALUE_SIZE_KEY, MAX_KEYVALUE_SIZE_DEFAULT); this.rpcTimeout = - conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); this.readRpcTimeout = conf.getInt(HConstants.HBASE_RPC_READ_TIMEOUT_KEY, - conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); + conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); this.writeRpcTimeout = conf.getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, - conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); - + conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); long pauseMs = conf.getLong(HBASE_CLIENT_PAUSE, DEFAULT_HBASE_CLIENT_PAUSE); long pauseMsForServerOverloaded = conf.getLong(HBASE_CLIENT_PAUSE_FOR_SERVER_OVERLOADED, @@ -148,8 +149,8 @@ public class ConnectionConfiguration { if (pauseMsForServerOverloaded < pauseMs) { LOG.warn( "The {} setting: {} ms is less than the {} setting: {} ms, use the greater one instead", - HBASE_CLIENT_PAUSE_FOR_SERVER_OVERLOADED, pauseMsForServerOverloaded, - HBASE_CLIENT_PAUSE, pauseMs); + HBASE_CLIENT_PAUSE_FOR_SERVER_OVERLOADED, pauseMsForServerOverloaded, HBASE_CLIENT_PAUSE, + pauseMs); pauseMsForServerOverloaded = pauseMs; } @@ -158,9 +159,8 @@ public class ConnectionConfiguration { } /** - * Constructor - * This is for internal testing purpose (using the default value). - * In real usage, we should read the configuration from the Configuration object. + * Constructor This is for internal testing purpose (using the default value). In real usage, we + * should read the configuration from the Configuration object. */ protected ConnectionConfiguration() { this.writeBufferSize = WRITE_BUFFER_SIZE_DEFAULT; @@ -173,7 +173,7 @@ public class ConnectionConfiguration { this.primaryCallTimeoutMicroSecond = 10000; this.replicaCallTimeoutMicroSecondScan = 1000000; this.metaReplicaCallTimeoutMicroSecondScan = - HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT_DEFAULT; + HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT_DEFAULT; this.retries = HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER; this.clientScannerAsyncPrefetch = Scan.DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH; this.maxKeyValueSize = MAX_KEYVALUE_SIZE_DEFAULT; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index 7980532727c..1fb0c7fecaf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,13 +53,15 @@ import org.apache.yetus.audience.InterfaceAudience; * Since 2.2.0, Connection created by ConnectionFactory can contain user-specified kerberos * credentials if caller has following two configurations set: *

      - *
    • hbase.client.keytab.file, points to a valid keytab on the local filesystem - *
    • hbase.client.kerberos.principal, gives the Kerberos principal to use + *
    • hbase.client.keytab.file, points to a valid keytab on the local filesystem + *
    • hbase.client.kerberos.principal, gives the Kerberos principal to use *
    * By this way, caller can directly connect to kerberized cluster without caring login and * credentials renewal logic in application. + * *
      * 
    + * * Similarly, {@link Connection} also returns {@link Admin} and {@link RegionLocator} * implementations. * @see Connection @@ -69,7 +70,8 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Public public class ConnectionFactory { - public static final String HBASE_CLIENT_ASYNC_CONNECTION_IMPL = "hbase.client.async.connection.impl"; + public static final String HBASE_CLIENT_ASYNC_CONNECTION_IMPL = + "hbase.client.async.connection.impl"; /** No public c.tors */ protected ConnectionFactory() { @@ -154,7 +156,7 @@ public class ConnectionFactory { * @return Connection object for conf */ public static Connection createConnection(Configuration conf, ExecutorService pool) - throws IOException { + throws IOException { return createConnection(conf, pool, AuthUtil.loginClient(conf)); } @@ -227,8 +229,8 @@ public class ConnectionFactory { Constructor constructor = clazz.getDeclaredConstructor(Configuration.class, ExecutorService.class, User.class); constructor.setAccessible(true); - return user.runAs((PrivilegedExceptionAction) () -> (Connection) constructor - .newInstance(conf, pool, user)); + return user.runAs((PrivilegedExceptionAction< + Connection>) () -> (Connection) constructor.newInstance(conf, pool, user)); } catch (Exception e) { throw new IOException(e); } @@ -281,7 +283,7 @@ public class ConnectionFactory { * @return AsyncConnection object wrapped by CompletableFuture */ public static CompletableFuture createAsyncConnection(Configuration conf, - final User user) { + final User user) { return TraceUtil.tracedFuture(() -> { CompletableFuture future = new CompletableFuture<>(); ConnectionRegistry registry = ConnectionRegistryFactory.getRegistry(conf); @@ -299,8 +301,8 @@ public class ConnectionFactory { Class clazz = conf.getClass(HBASE_CLIENT_ASYNC_CONNECTION_IMPL, AsyncConnectionImpl.class, AsyncConnection.class); try { - future.complete(user.runAs( - (PrivilegedExceptionAction) () -> ReflectionUtils + future.complete( + user.runAs((PrivilegedExceptionAction) () -> ReflectionUtils .newInstance(clazz, conf, registry, clusterId, user))); } catch (Exception e) { registry.close(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index b810cc56f59..8428a9becb9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -256,7 +256,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { * Constructor, for creating cluster connection with provided ConnectionRegistry. */ ConnectionImplementation(Configuration conf, ExecutorService pool, User user, - ConnectionRegistry registry) throws IOException { + ConnectionRegistry registry) throws IOException { this.conf = conf; this.user = user; if (user != null && user.isLoginFromKeytab()) { @@ -266,12 +266,12 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { this.connectionConfig = new ConnectionConfiguration(conf); this.closed = false; this.metaReplicaCallTimeoutScanInMicroSecond = - connectionConfig.getMetaReplicaCallTimeoutMicroSecondScan(); + connectionConfig.getMetaReplicaCallTimeoutMicroSecondScan(); // how many times to try, one more than max *retry* time this.numTries = retries2Attempts(connectionConfig.getRetriesNumber()); this.rpcTimeout = - conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); if (conf.getBoolean(NonceGenerator.CLIENT_NONCES_ENABLED_KEY, true)) { synchronized (nonceGeneratorCreateLock) { if (nonceGenerator == null) { @@ -290,10 +290,10 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { this.asyncProcess = new AsyncProcess(this, conf, rpcCallerFactory, rpcControllerFactory); boolean shouldListen = - conf.getBoolean(HConstants.STATUS_PUBLISHED, HConstants.STATUS_PUBLISHED_DEFAULT); - Class listenerClass = conf.getClass( - ClusterStatusListener.STATUS_LISTENER_CLASS, - ClusterStatusListener.DEFAULT_STATUS_LISTENER_CLASS, ClusterStatusListener.Listener.class); + conf.getBoolean(HConstants.STATUS_PUBLISHED, HConstants.STATUS_PUBLISHED_DEFAULT); + Class listenerClass = + conf.getClass(ClusterStatusListener.STATUS_LISTENER_CLASS, + ClusterStatusListener.DEFAULT_STATUS_LISTENER_CLASS, ClusterStatusListener.Listener.class); // Is there an alternate BufferedMutator to use? this.alternateBufferedMutatorClassName = this.conf.get(BufferedMutator.CLASSNAME_KEY); @@ -308,8 +308,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { if (conf.getBoolean(CLIENT_SIDE_METRICS_ENABLED_KEY, false)) { String scope = MetricsConnection.getScope(conf, clusterId, this); - this.metrics = - new MetricsConnection(scope, this::getBatchPool, this::getMetaLookupPool); + this.metrics = new MetricsConnection(scope, this::getBatchPool, this::getMetaLookupPool); } else { this.metrics = null; } @@ -321,16 +320,16 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { if (shouldListen) { if (listenerClass == null) { LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " - + ClusterStatusListener.STATUS_LISTENER_CLASS + " is not set - not listening status"); + + ClusterStatusListener.STATUS_LISTENER_CLASS + " is not set - not listening status"); } else { clusterStatusListener = - new ClusterStatusListener(new ClusterStatusListener.DeadServerHandler() { - @Override - public void newDead(ServerName sn) { - clearCaches(sn); - rpcClient.cancelConnections(sn); - } - }, conf, listenerClass); + new ClusterStatusListener(new ClusterStatusListener.DeadServerHandler() { + @Override + public void newDead(ServerName sn) { + clearCaches(sn); + rpcClient.cancelConnections(sn); + } + }, conf, listenerClass); } } } catch (Throwable e) { @@ -342,26 +341,26 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { // Get the region locator's meta replica mode. this.metaReplicaMode = CatalogReplicaMode - .fromString(conf.get(LOCATOR_META_REPLICAS_MODE, CatalogReplicaMode.NONE.toString())); + .fromString(conf.get(LOCATOR_META_REPLICAS_MODE, CatalogReplicaMode.NONE.toString())); switch (this.metaReplicaMode) { case LOAD_BALANCE: String replicaSelectorClass = - conf.get(RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR, - CatalogReplicaLoadBalanceSimpleSelector.class.getName()); + conf.get(RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR, + CatalogReplicaLoadBalanceSimpleSelector.class.getName()); this.metaReplicaSelector = CatalogReplicaLoadBalanceSelectorFactory - .createSelector(replicaSelectorClass, META_TABLE_NAME, getChoreService(), () -> { - int numOfReplicas = 1; - try { - RegionLocations metaLocations = this.registry.getMetaRegionLocations() - .get(connectionConfig.getReadRpcTimeout(), TimeUnit.MILLISECONDS); - numOfReplicas = metaLocations.size(); - } catch (Exception e) { - LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); - } - return numOfReplicas; - }); + .createSelector(replicaSelectorClass, META_TABLE_NAME, getChoreService(), () -> { + int numOfReplicas = 1; + try { + RegionLocations metaLocations = this.registry.getMetaRegionLocations() + .get(connectionConfig.getReadRpcTimeout(), TimeUnit.MILLISECONDS); + numOfReplicas = metaLocations.size(); + } catch (Exception e) { + LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + } + return numOfReplicas; + }); break; case NONE: // If user does not configure LOCATOR_META_REPLICAS_MODE, let's check the legacy config. @@ -383,7 +382,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { /** * @param conn The connection for which to replace the generator. - * @param cnm Replaces the nonce generator used, for testing. + * @param cnm Replaces the nonce generator used, for testing. * @return old nonce generator. */ static NonceGenerator injectNonceGeneratorForTesting(ClusterConnection conn, NonceGenerator cnm) { @@ -406,7 +405,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { @Override public Table build() { return new HTable(ConnectionImplementation.this, this, rpcCallerFactory, - rpcControllerFactory, pool); + rpcControllerFactory, pool); } }; } @@ -476,16 +475,16 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { throw new RegionServerStoppedException(masterServer + " is dead."); } String key = - getStubKey(MasterProtos.HbckService.BlockingInterface.class.getName(), masterServer); + getStubKey(MasterProtos.HbckService.BlockingInterface.class.getName(), masterServer); return new HBaseHbck( - (MasterProtos.HbckService.BlockingInterface) computeIfAbsentEx(stubs, key, () -> { - BlockingRpcChannel channel = - this.rpcClient.createBlockingRpcChannel(masterServer, user, rpcTimeout); - return MasterProtos.HbckService.newBlockingStub(channel); - }), rpcControllerFactory); + (MasterProtos.HbckService.BlockingInterface) computeIfAbsentEx(stubs, key, () -> { + BlockingRpcChannel channel = + this.rpcClient.createBlockingRpcChannel(masterServer, user, rpcTimeout); + return MasterProtos.HbckService.newBlockingStub(channel); + }), rpcControllerFactory); }, () -> TraceUtil.createSpan(this.getClass().getSimpleName() + ".getHbck") - .setAttribute(HBaseSemanticAttributes.SERVER_NAME_KEY, masterServer.getServerName())); + .setAttribute(HBaseSemanticAttributes.SERVER_NAME_KEY, masterServer.getServerName())); } @Override @@ -517,7 +516,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { } private ThreadPoolExecutor getThreadPool(int maxThreads, int coreThreads, String nameHint, - BlockingQueue passedWorkQueue) { + BlockingQueue passedWorkQueue) { // shared HTable thread executor not yet initialized if (maxThreads == 0) { maxThreads = Runtime.getRuntime().availableProcessors() * 8; @@ -528,14 +527,15 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { long keepAliveTime = conf.getLong("hbase.hconnection.threads.keepalivetime", 60); BlockingQueue workQueue = passedWorkQueue; if (workQueue == null) { - workQueue = new LinkedBlockingQueue<>(maxThreads * conf.getInt( - HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); + workQueue = + new LinkedBlockingQueue<>(maxThreads * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, + HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); coreThreads = maxThreads; } ThreadPoolExecutor tpe = - new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, - new ThreadFactoryBuilder().setNameFormat(toString() + nameHint + "-pool-%d") - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, + new ThreadFactoryBuilder().setNameFormat(toString() + nameHint + "-pool-%d") + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); tpe.allowCoreThreadTimeOut(true); return tpe; } @@ -550,7 +550,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { // the queue is full, a new thread will be started int threads = conf.getInt("hbase.hconnection.meta.lookup.threads.max", 128); this.metaLookupPool = - getThreadPool(threads, threads, "-metaLookup-shared-", new LinkedBlockingQueue<>()); + getThreadPool(threads, threads, "-metaLookup-shared-", new LinkedBlockingQueue<>()); } } } @@ -618,8 +618,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { } /** - * If choreService has not been created yet, create the ChoreService. - * @return ChoreService + * If choreService has not been created yet, create the ChoreService. n */ synchronized ChoreService getChoreService() { if (choreService == null) { @@ -673,7 +672,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { @Override public HRegionLocation getRegionLocation(final TableName tableName, final byte[] row, - boolean reload) throws IOException { + boolean reload) throws IOException { return reload ? relocateRegion(tableName, row) : locateRegion(tableName, row); } @@ -689,7 +688,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { @Override public boolean isTableAvailable(final TableName tableName, @Nullable final byte[][] splitKeys) - throws IOException { + throws IOException { checkClosed(); try { if (!isTableEnabled(tableName)) { @@ -701,7 +700,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { return true; } List> locations = - MetaTableAccessor.getTableRegionsAndLocations(this, tableName, true); + MetaTableAccessor.getTableRegionsAndLocations(this, tableName, true); int notDeployed = 0; int regionCount = 0; @@ -768,7 +767,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { @Override public List locateRegions(TableName tableName, boolean useCache, - boolean offlined) throws IOException { + boolean offlined) throws IOException { List regions; if (TableName.isMetaTableName(tableName)) { regions = Collections.singletonList(RegionInfoBuilder.FIRST_META_REGIONINFO); @@ -794,23 +793,24 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { @Override public HRegionLocation locateRegion(final TableName tableName, final byte[] row) - throws IOException { + throws IOException { RegionLocations locations = locateRegion(tableName, row, true, true); return locations == null ? null : locations.getRegionLocation(); } @Override public HRegionLocation relocateRegion(final TableName tableName, final byte[] row) - throws IOException { + throws IOException { RegionLocations locations = - relocateRegion(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID); - return locations == null ? null - : locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID); + relocateRegion(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID); + return locations == null + ? null + : locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID); } @Override public RegionLocations relocateRegion(final TableName tableName, final byte[] row, int replicaId) - throws IOException { + throws IOException { // Since this is an explicit request not to use any caching, finding // disabled tables should not be desirable. This will ensure that an exception is thrown when // the first time a disabled table is interacted with. @@ -823,13 +823,13 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { @Override public RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache, - boolean retry) throws IOException { + boolean retry) throws IOException { return locateRegion(tableName, row, useCache, retry, RegionReplicaUtil.DEFAULT_REPLICA_ID); } @Override public RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache, - boolean retry, int replicaId) throws IOException { + boolean retry, int replicaId) throws IOException { checkClosed(); if (tableName == null || tableName.getName().length == 0) { throw new IllegalArgumentException("table name cannot be null or zero length"); @@ -843,7 +843,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { } private RegionLocations locateMeta(final TableName tableName, boolean useCache, int replicaId) - throws IOException { + throws IOException { // HBASE-10785: We cache the location of the META itself, so that we are not overloading // zookeeper with one request for every region lookup. We cache the META with empty row // key in MetaCache. @@ -881,7 +881,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { * seeking. */ private RegionLocations locateRegionInMeta(TableName tableName, byte[] row, boolean useCache, - boolean retry, int replicaId) throws IOException { + boolean retry, int replicaId) throws IOException { // If we are supposed to be using the cache, look in the cache to see if we already have the // region. if (useCache) { @@ -895,15 +895,15 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { // without knowing the precise region names. byte[] metaStartKey = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false); byte[] metaStopKey = - RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false); + RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false); Scan s = new Scan().withStartRow(metaStartKey).withStopRow(metaStopKey, true) - .addFamily(HConstants.CATALOG_FAMILY).setReversed(true).setCaching(1) - .setReadType(ReadType.PREAD); + .addFamily(HConstants.CATALOG_FAMILY).setReversed(true).setCaching(1) + .setReadType(ReadType.PREAD); switch (this.metaReplicaMode) { case LOAD_BALANCE: int metaReplicaId = - this.metaReplicaSelector.select(tableName, row, RegionLocateType.CURRENT); + this.metaReplicaSelector.select(tableName, row, RegionLocateType.CURRENT); if (metaReplicaId != RegionInfo.DEFAULT_REPLICA_ID) { // If the selector gives a non-primary meta replica region, then go with it. // Otherwise, just go to primary in non-hedgedRead mode. @@ -922,7 +922,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { for (int tries = 0;; tries++) { if (tries >= maxAttempts) { throw new NoServerForRegionException("Unable to find region for " - + Bytes.toStringBinary(row) + " in " + tableName + " after " + tries + " tries."); + + Bytes.toStringBinary(row) + " in " + tableName + " after " + tries + " tries."); } if (useCache) { RegionLocations locations = getCachedLocation(tableName, row); @@ -953,11 +953,11 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { } s.resetMvccReadPoint(); final Span span = new TableOperationSpanBuilder(this) - .setTableName(TableName.META_TABLE_NAME).setOperation(s).build(); + .setTableName(TableName.META_TABLE_NAME).setOperation(s).build(); try (Scope ignored = span.makeCurrent(); - ReversedClientScanner rcs = new ReversedClientScanner(conf, s, - TableName.META_TABLE_NAME, this, rpcCallerFactory, rpcControllerFactory, - getMetaLookupPool(), metaReplicaCallTimeoutScanInMicroSecond)) { + ReversedClientScanner rcs = + new ReversedClientScanner(conf, s, TableName.META_TABLE_NAME, this, rpcCallerFactory, + rpcControllerFactory, getMetaLookupPool(), metaReplicaCallTimeoutScanInMicroSecond)) { boolean tableNotFound = true; for (;;) { Result regionInfoRow = rcs.next(); @@ -966,7 +966,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { throw new TableNotFoundException(tableName); } else { throw new IOException( - "Unable to find region for " + Bytes.toStringBinary(row) + " in " + tableName); + "Unable to find region for " + Bytes.toStringBinary(row) + " in " + tableName); } } tableNotFound = false; @@ -978,7 +978,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { RegionInfo regionInfo = locations.getRegionLocation(replicaId).getRegion(); if (regionInfo == null) { throw new IOException("RegionInfo null or empty in " + TableName.META_TABLE_NAME - + ", row=" + regionInfoRow); + + ", row=" + regionInfoRow); } // See HBASE-20182. It is possible that we locate to a split parent even after the // children are online, so here we need to skip this region and go to the next one. @@ -987,25 +987,25 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { } if (regionInfo.isOffline()) { throw new RegionOfflineException( - "Region offline; disable table call? " + regionInfo.getRegionNameAsString()); + "Region offline; disable table call? " + regionInfo.getRegionNameAsString()); } // It is possible that the split children have not been online yet and we have skipped // the parent in the above condition, so we may have already reached a region which does // not contains us. if (!regionInfo.containsRow(row)) { throw new IOException( - "Unable to find region for " + Bytes.toStringBinary(row) + " in " + tableName); + "Unable to find region for " + Bytes.toStringBinary(row) + " in " + tableName); } ServerName serverName = locations.getRegionLocation(replicaId).getServerName(); if (serverName == null) { throw new NoServerForRegionException("No server address listed in " - + TableName.META_TABLE_NAME + " for region " + regionInfo.getRegionNameAsString() - + " containing row " + Bytes.toStringBinary(row)); + + TableName.META_TABLE_NAME + " for region " + regionInfo.getRegionNameAsString() + + " containing row " + Bytes.toStringBinary(row)); } if (isDeadServer(serverName)) { throw new RegionServerStoppedException( - "hbase:meta says the region " + regionInfo.getRegionNameAsString() - + " is managed by the server " + serverName + ", but it is dead."); + "hbase:meta says the region " + regionInfo.getRegionNameAsString() + + " is managed by the server " + serverName + ", but it is dead."); } // Instantiate the location cacheLocation(tableName, locations); @@ -1032,16 +1032,14 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { pauseBase = connectionConfig.getPauseMillisForServerOverloaded(); } if (tries < maxAttempts - 1) { - LOG.debug( - "locateRegionInMeta parentTable='{}', attempt={} of {} failed; retrying " - + "after sleep of {}", - TableName.META_TABLE_NAME, tries, maxAttempts, maxAttempts, e); + LOG.debug("locateRegionInMeta parentTable='{}', attempt={} of {} failed; retrying " + + "after sleep of {}", TableName.META_TABLE_NAME, tries, maxAttempts, maxAttempts, e); } else { throw e; } // Only relocate the parent region if necessary relocateMeta = - !(e instanceof RegionOfflineException || e instanceof NoServerForRegionException); + !(e instanceof RegionOfflineException || e instanceof NoServerForRegionException); } finally { userRegionLock.unlock(); } @@ -1049,7 +1047,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { Thread.sleep(ConnectionUtils.getPauseTime(pauseBase, tries)); } catch (InterruptedException e) { throw new InterruptedIOException( - "Giving up trying to location region in " + "meta: thread is interrupted."); + "Giving up trying to location region in " + "meta: thread is interrupted."); } } } @@ -1059,7 +1057,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { long waitTime = connectionConfig.getMetaOperationTimeout(); if (!userRegionLock.tryLock(waitTime, TimeUnit.MILLISECONDS)) { throw new LockTimeoutException("Failed to get user region lock in" + waitTime + " ms. " - + " for accessing meta region server."); + + " for accessing meta region server."); } } catch (InterruptedException ie) { LOG.error("Interrupted while waiting for a lock", ie); @@ -1070,7 +1068,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { /** * Put a newly discovered HRegionLocation into the cache. * @param tableName The table name. - * @param location the new location + * @param location the new location */ @Override public void cacheLocation(final TableName tableName, final RegionLocations location) { @@ -1111,11 +1109,11 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { /** * Put a newly discovered HRegionLocation into the cache. * @param tableName The table name. - * @param source the source of the new location, if it's not coming from meta - * @param location the new location + * @param source the source of the new location, if it's not coming from meta + * @param location the new location */ private void cacheLocation(final TableName tableName, final ServerName source, - final HRegionLocation location) { + final HRegionLocation location) { metaCache.cacheLocation(tableName, source, location); } @@ -1166,14 +1164,14 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { static class ServerErrorTracker { // We need a concurrent map here, as we could have multiple threads updating it in parallel. private final ConcurrentMap errorsByServer = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); private final long canRetryUntil; private final int maxTries;// max number to try private final long startTrackingTime; /** * Constructor - * @param timeout how long to wait before timeout, in unit of millisecond + * @param timeout how long to wait before timeout, in unit of millisecond * @param maxTries how many times to try */ public ServerErrorTracker(long timeout, int maxTries) { @@ -1189,12 +1187,12 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { boolean canTryMore(int numAttempt) { // If there is a single try we must not take into account the time. return numAttempt < maxTries - || (maxTries > 1 && EnvironmentEdgeManager.currentTime() < this.canRetryUntil); + || (maxTries > 1 && EnvironmentEdgeManager.currentTime() < this.canRetryUntil); } /** * Calculates the back-off time for a retrying request to a particular server. - * @param server The server in question. + * @param server The server in question. * @param basePause The default hci pause. * @return The time to wait before sending next request. */ @@ -1243,7 +1241,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { private final class MasterServiceStubMaker { private void isMasterRunning(MasterProtos.MasterService.BlockingInterface stub) - throws IOException { + throws IOException { try { stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest()); } catch (ServiceException e) { @@ -1257,7 +1255,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { * @return A stub for master services. */ private MasterProtos.MasterService.BlockingInterface makeStubNoRetries() - throws IOException, KeeperException { + throws IOException, KeeperException { ServerName sn = get(registry.getActiveMaster()); if (sn == null) { String msg = "ZooKeeper available but no active master location found"; @@ -1270,10 +1268,10 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { // Use the security info interface name as our stub key String key = getStubKey(MasterProtos.MasterService.getDescriptor().getName(), sn); MasterProtos.MasterService.BlockingInterface stub = - (MasterProtos.MasterService.BlockingInterface) computeIfAbsentEx(stubs, key, () -> { - BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn, user, rpcTimeout); - return MasterProtos.MasterService.newBlockingStub(channel); - }); + (MasterProtos.MasterService.BlockingInterface) computeIfAbsentEx(stubs, key, () -> { + BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn, user, rpcTimeout); + return MasterProtos.MasterService.newBlockingStub(channel); + }); isMasterRunning(stub); return stub; } @@ -1311,7 +1309,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { @Override public AdminProtos.AdminService.BlockingInterface getAdmin(ServerName serverName) - throws IOException { + throws IOException { checkClosed(); if (isDeadServer(serverName)) { throw new RegionServerStoppedException(serverName + " is dead."); @@ -1319,7 +1317,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { String key = getStubKey(AdminProtos.AdminService.BlockingInterface.class.getName(), serverName); return (AdminProtos.AdminService.BlockingInterface) computeIfAbsentEx(stubs, key, () -> { BlockingRpcChannel channel = - this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout); + this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout); return AdminProtos.AdminService.newBlockingStub(channel); }); } @@ -1331,10 +1329,10 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { throw new RegionServerStoppedException(serverName + " is dead."); } String key = - getStubKey(ClientProtos.ClientService.BlockingInterface.class.getName(), serverName); + getStubKey(ClientProtos.ClientService.BlockingInterface.class.getName(), serverName); return (ClientProtos.ClientService.BlockingInterface) computeIfAbsentEx(stubs, key, () -> { BlockingRpcChannel channel = - this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout); + this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout); return ClientProtos.ClientService.newBlockingStub(channel); }); } @@ -1365,330 +1363,330 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { @Override public MasterProtos.AbortProcedureResponse abortProcedure(RpcController controller, - MasterProtos.AbortProcedureRequest request) throws ServiceException { + MasterProtos.AbortProcedureRequest request) throws ServiceException { return stub.abortProcedure(controller, request); } @Override public MasterProtos.GetProceduresResponse getProcedures(RpcController controller, - MasterProtos.GetProceduresRequest request) throws ServiceException { + MasterProtos.GetProceduresRequest request) throws ServiceException { return stub.getProcedures(controller, request); } @Override public MasterProtos.GetLocksResponse getLocks(RpcController controller, - MasterProtos.GetLocksRequest request) throws ServiceException { + MasterProtos.GetLocksRequest request) throws ServiceException { return stub.getLocks(controller, request); } @Override public MasterProtos.AddColumnResponse addColumn(RpcController controller, - MasterProtos.AddColumnRequest request) throws ServiceException { + MasterProtos.AddColumnRequest request) throws ServiceException { return stub.addColumn(controller, request); } @Override public MasterProtos.DeleteColumnResponse deleteColumn(RpcController controller, - MasterProtos.DeleteColumnRequest request) throws ServiceException { + MasterProtos.DeleteColumnRequest request) throws ServiceException { return stub.deleteColumn(controller, request); } @Override public MasterProtos.ModifyColumnResponse modifyColumn(RpcController controller, - MasterProtos.ModifyColumnRequest request) throws ServiceException { + MasterProtos.ModifyColumnRequest request) throws ServiceException { return stub.modifyColumn(controller, request); } @Override public MasterProtos.MoveRegionResponse moveRegion(RpcController controller, - MasterProtos.MoveRegionRequest request) throws ServiceException { + MasterProtos.MoveRegionRequest request) throws ServiceException { return stub.moveRegion(controller, request); } @Override public MasterProtos.MergeTableRegionsResponse mergeTableRegions(RpcController controller, - MasterProtos.MergeTableRegionsRequest request) throws ServiceException { + MasterProtos.MergeTableRegionsRequest request) throws ServiceException { return stub.mergeTableRegions(controller, request); } @Override public MasterProtos.AssignRegionResponse assignRegion(RpcController controller, - MasterProtos.AssignRegionRequest request) throws ServiceException { + MasterProtos.AssignRegionRequest request) throws ServiceException { return stub.assignRegion(controller, request); } @Override public MasterProtos.UnassignRegionResponse unassignRegion(RpcController controller, - MasterProtos.UnassignRegionRequest request) throws ServiceException { + MasterProtos.UnassignRegionRequest request) throws ServiceException { return stub.unassignRegion(controller, request); } @Override public MasterProtos.OfflineRegionResponse offlineRegion(RpcController controller, - MasterProtos.OfflineRegionRequest request) throws ServiceException { + MasterProtos.OfflineRegionRequest request) throws ServiceException { return stub.offlineRegion(controller, request); } @Override public MasterProtos.SplitTableRegionResponse splitRegion(RpcController controller, - MasterProtos.SplitTableRegionRequest request) throws ServiceException { + MasterProtos.SplitTableRegionRequest request) throws ServiceException { return stub.splitRegion(controller, request); } @Override public MasterProtos.DeleteTableResponse deleteTable(RpcController controller, - MasterProtos.DeleteTableRequest request) throws ServiceException { + MasterProtos.DeleteTableRequest request) throws ServiceException { return stub.deleteTable(controller, request); } @Override public MasterProtos.TruncateTableResponse truncateTable(RpcController controller, - MasterProtos.TruncateTableRequest request) throws ServiceException { + MasterProtos.TruncateTableRequest request) throws ServiceException { return stub.truncateTable(controller, request); } @Override public MasterProtos.EnableTableResponse enableTable(RpcController controller, - MasterProtos.EnableTableRequest request) throws ServiceException { + MasterProtos.EnableTableRequest request) throws ServiceException { return stub.enableTable(controller, request); } @Override public MasterProtos.DisableTableResponse disableTable(RpcController controller, - MasterProtos.DisableTableRequest request) throws ServiceException { + MasterProtos.DisableTableRequest request) throws ServiceException { return stub.disableTable(controller, request); } @Override public MasterProtos.ModifyTableResponse modifyTable(RpcController controller, - MasterProtos.ModifyTableRequest request) throws ServiceException { + MasterProtos.ModifyTableRequest request) throws ServiceException { return stub.modifyTable(controller, request); } @Override public MasterProtos.CreateTableResponse createTable(RpcController controller, - MasterProtos.CreateTableRequest request) throws ServiceException { + MasterProtos.CreateTableRequest request) throws ServiceException { return stub.createTable(controller, request); } @Override public MasterProtos.ShutdownResponse shutdown(RpcController controller, - MasterProtos.ShutdownRequest request) throws ServiceException { + MasterProtos.ShutdownRequest request) throws ServiceException { return stub.shutdown(controller, request); } @Override public MasterProtos.StopMasterResponse stopMaster(RpcController controller, - MasterProtos.StopMasterRequest request) throws ServiceException { + MasterProtos.StopMasterRequest request) throws ServiceException { return stub.stopMaster(controller, request); } @Override public MasterProtos.IsInMaintenanceModeResponse isMasterInMaintenanceMode( - final RpcController controller, final MasterProtos.IsInMaintenanceModeRequest request) - throws ServiceException { + final RpcController controller, final MasterProtos.IsInMaintenanceModeRequest request) + throws ServiceException { return stub.isMasterInMaintenanceMode(controller, request); } @Override public MasterProtos.BalanceResponse balance(RpcController controller, - MasterProtos.BalanceRequest request) throws ServiceException { + MasterProtos.BalanceRequest request) throws ServiceException { return stub.balance(controller, request); } @Override public MasterProtos.SetBalancerRunningResponse setBalancerRunning(RpcController controller, - MasterProtos.SetBalancerRunningRequest request) throws ServiceException { + MasterProtos.SetBalancerRunningRequest request) throws ServiceException { return stub.setBalancerRunning(controller, request); } @Override public NormalizeResponse normalize(RpcController controller, NormalizeRequest request) - throws ServiceException { + throws ServiceException { return stub.normalize(controller, request); } @Override public SetNormalizerRunningResponse setNormalizerRunning(RpcController controller, - SetNormalizerRunningRequest request) throws ServiceException { + SetNormalizerRunningRequest request) throws ServiceException { return stub.setNormalizerRunning(controller, request); } @Override public MasterProtos.RunCatalogScanResponse runCatalogScan(RpcController controller, - MasterProtos.RunCatalogScanRequest request) throws ServiceException { + MasterProtos.RunCatalogScanRequest request) throws ServiceException { return stub.runCatalogScan(controller, request); } @Override public MasterProtos.EnableCatalogJanitorResponse enableCatalogJanitor( - RpcController controller, MasterProtos.EnableCatalogJanitorRequest request) - throws ServiceException { + RpcController controller, MasterProtos.EnableCatalogJanitorRequest request) + throws ServiceException { return stub.enableCatalogJanitor(controller, request); } @Override public MasterProtos.IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled( - RpcController controller, MasterProtos.IsCatalogJanitorEnabledRequest request) - throws ServiceException { + RpcController controller, MasterProtos.IsCatalogJanitorEnabledRequest request) + throws ServiceException { return stub.isCatalogJanitorEnabled(controller, request); } @Override public MasterProtos.RunCleanerChoreResponse runCleanerChore(RpcController controller, - MasterProtos.RunCleanerChoreRequest request) throws ServiceException { + MasterProtos.RunCleanerChoreRequest request) throws ServiceException { return stub.runCleanerChore(controller, request); } @Override public MasterProtos.SetCleanerChoreRunningResponse setCleanerChoreRunning( - RpcController controller, MasterProtos.SetCleanerChoreRunningRequest request) - throws ServiceException { + RpcController controller, MasterProtos.SetCleanerChoreRunningRequest request) + throws ServiceException { return stub.setCleanerChoreRunning(controller, request); } @Override public MasterProtos.IsCleanerChoreEnabledResponse isCleanerChoreEnabled( - RpcController controller, MasterProtos.IsCleanerChoreEnabledRequest request) - throws ServiceException { + RpcController controller, MasterProtos.IsCleanerChoreEnabledRequest request) + throws ServiceException { return stub.isCleanerChoreEnabled(controller, request); } @Override public ClientProtos.CoprocessorServiceResponse execMasterService(RpcController controller, - ClientProtos.CoprocessorServiceRequest request) throws ServiceException { + ClientProtos.CoprocessorServiceRequest request) throws ServiceException { return stub.execMasterService(controller, request); } @Override public MasterProtos.SnapshotResponse snapshot(RpcController controller, - MasterProtos.SnapshotRequest request) throws ServiceException { + MasterProtos.SnapshotRequest request) throws ServiceException { return stub.snapshot(controller, request); } @Override public MasterProtos.GetCompletedSnapshotsResponse getCompletedSnapshots( - RpcController controller, MasterProtos.GetCompletedSnapshotsRequest request) - throws ServiceException { + RpcController controller, MasterProtos.GetCompletedSnapshotsRequest request) + throws ServiceException { return stub.getCompletedSnapshots(controller, request); } @Override public MasterProtos.DeleteSnapshotResponse deleteSnapshot(RpcController controller, - MasterProtos.DeleteSnapshotRequest request) throws ServiceException { + MasterProtos.DeleteSnapshotRequest request) throws ServiceException { return stub.deleteSnapshot(controller, request); } @Override public MasterProtos.IsSnapshotDoneResponse isSnapshotDone(RpcController controller, - MasterProtos.IsSnapshotDoneRequest request) throws ServiceException { + MasterProtos.IsSnapshotDoneRequest request) throws ServiceException { return stub.isSnapshotDone(controller, request); } @Override public MasterProtos.RestoreSnapshotResponse restoreSnapshot(RpcController controller, - MasterProtos.RestoreSnapshotRequest request) throws ServiceException { + MasterProtos.RestoreSnapshotRequest request) throws ServiceException { return stub.restoreSnapshot(controller, request); } @Override public MasterProtos.SetSnapshotCleanupResponse switchSnapshotCleanup(RpcController controller, - MasterProtos.SetSnapshotCleanupRequest request) throws ServiceException { + MasterProtos.SetSnapshotCleanupRequest request) throws ServiceException { return stub.switchSnapshotCleanup(controller, request); } @Override public MasterProtos.IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled( - RpcController controller, MasterProtos.IsSnapshotCleanupEnabledRequest request) - throws ServiceException { + RpcController controller, MasterProtos.IsSnapshotCleanupEnabledRequest request) + throws ServiceException { return stub.isSnapshotCleanupEnabled(controller, request); } @Override public MasterProtos.ExecProcedureResponse execProcedure(RpcController controller, - MasterProtos.ExecProcedureRequest request) throws ServiceException { + MasterProtos.ExecProcedureRequest request) throws ServiceException { return stub.execProcedure(controller, request); } @Override public MasterProtos.ExecProcedureResponse execProcedureWithRet(RpcController controller, - MasterProtos.ExecProcedureRequest request) throws ServiceException { + MasterProtos.ExecProcedureRequest request) throws ServiceException { return stub.execProcedureWithRet(controller, request); } @Override public MasterProtos.IsProcedureDoneResponse isProcedureDone(RpcController controller, - MasterProtos.IsProcedureDoneRequest request) throws ServiceException { + MasterProtos.IsProcedureDoneRequest request) throws ServiceException { return stub.isProcedureDone(controller, request); } @Override public MasterProtos.GetProcedureResultResponse getProcedureResult(RpcController controller, - MasterProtos.GetProcedureResultRequest request) throws ServiceException { + MasterProtos.GetProcedureResultRequest request) throws ServiceException { return stub.getProcedureResult(controller, request); } @Override public MasterProtos.IsMasterRunningResponse isMasterRunning(RpcController controller, - MasterProtos.IsMasterRunningRequest request) throws ServiceException { + MasterProtos.IsMasterRunningRequest request) throws ServiceException { return stub.isMasterRunning(controller, request); } @Override public MasterProtos.ModifyNamespaceResponse modifyNamespace(RpcController controller, - MasterProtos.ModifyNamespaceRequest request) throws ServiceException { + MasterProtos.ModifyNamespaceRequest request) throws ServiceException { return stub.modifyNamespace(controller, request); } @Override public MasterProtos.CreateNamespaceResponse createNamespace(RpcController controller, - MasterProtos.CreateNamespaceRequest request) throws ServiceException { + MasterProtos.CreateNamespaceRequest request) throws ServiceException { return stub.createNamespace(controller, request); } @Override public MasterProtos.DeleteNamespaceResponse deleteNamespace(RpcController controller, - MasterProtos.DeleteNamespaceRequest request) throws ServiceException { + MasterProtos.DeleteNamespaceRequest request) throws ServiceException { return stub.deleteNamespace(controller, request); } @Override public MasterProtos.ListNamespacesResponse listNamespaces(RpcController controller, - MasterProtos.ListNamespacesRequest request) throws ServiceException { + MasterProtos.ListNamespacesRequest request) throws ServiceException { return stub.listNamespaces(controller, request); } @Override public MasterProtos.GetNamespaceDescriptorResponse getNamespaceDescriptor( - RpcController controller, MasterProtos.GetNamespaceDescriptorRequest request) - throws ServiceException { + RpcController controller, MasterProtos.GetNamespaceDescriptorRequest request) + throws ServiceException { return stub.getNamespaceDescriptor(controller, request); } @Override public MasterProtos.ListNamespaceDescriptorsResponse listNamespaceDescriptors( - RpcController controller, MasterProtos.ListNamespaceDescriptorsRequest request) - throws ServiceException { + RpcController controller, MasterProtos.ListNamespaceDescriptorsRequest request) + throws ServiceException { return stub.listNamespaceDescriptors(controller, request); } @Override public MasterProtos.ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace( - RpcController controller, MasterProtos.ListTableDescriptorsByNamespaceRequest request) - throws ServiceException { + RpcController controller, MasterProtos.ListTableDescriptorsByNamespaceRequest request) + throws ServiceException { return stub.listTableDescriptorsByNamespace(controller, request); } @Override public MasterProtos.ListTableNamesByNamespaceResponse listTableNamesByNamespace( - RpcController controller, MasterProtos.ListTableNamesByNamespaceRequest request) - throws ServiceException { + RpcController controller, MasterProtos.ListTableNamesByNamespaceRequest request) + throws ServiceException { return stub.listTableNamesByNamespace(controller, request); } @Override public MasterProtos.GetTableStateResponse getTableState(RpcController controller, - MasterProtos.GetTableStateRequest request) throws ServiceException { + MasterProtos.GetTableStateRequest request) throws ServiceException { return stub.getTableState(controller, request); } @@ -1699,220 +1697,220 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { @Override public MasterProtos.GetSchemaAlterStatusResponse getSchemaAlterStatus( - RpcController controller, MasterProtos.GetSchemaAlterStatusRequest request) - throws ServiceException { + RpcController controller, MasterProtos.GetSchemaAlterStatusRequest request) + throws ServiceException { return stub.getSchemaAlterStatus(controller, request); } @Override public MasterProtos.GetTableDescriptorsResponse getTableDescriptors(RpcController controller, - MasterProtos.GetTableDescriptorsRequest request) throws ServiceException { + MasterProtos.GetTableDescriptorsRequest request) throws ServiceException { return stub.getTableDescriptors(controller, request); } @Override public MasterProtos.GetTableNamesResponse getTableNames(RpcController controller, - MasterProtos.GetTableNamesRequest request) throws ServiceException { + MasterProtos.GetTableNamesRequest request) throws ServiceException { return stub.getTableNames(controller, request); } @Override public MasterProtos.GetClusterStatusResponse getClusterStatus(RpcController controller, - MasterProtos.GetClusterStatusRequest request) throws ServiceException { + MasterProtos.GetClusterStatusRequest request) throws ServiceException { return stub.getClusterStatus(controller, request); } @Override public MasterProtos.SetQuotaResponse setQuota(RpcController controller, - MasterProtos.SetQuotaRequest request) throws ServiceException { + MasterProtos.SetQuotaRequest request) throws ServiceException { return stub.setQuota(controller, request); } @Override public MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp( - RpcController controller, MasterProtos.MajorCompactionTimestampRequest request) - throws ServiceException { + RpcController controller, MasterProtos.MajorCompactionTimestampRequest request) + throws ServiceException { return stub.getLastMajorCompactionTimestamp(controller, request); } @Override public MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion( - RpcController controller, MasterProtos.MajorCompactionTimestampForRegionRequest request) - throws ServiceException { + RpcController controller, MasterProtos.MajorCompactionTimestampForRegionRequest request) + throws ServiceException { return stub.getLastMajorCompactionTimestampForRegion(controller, request); } @Override public IsBalancerEnabledResponse isBalancerEnabled(RpcController controller, - IsBalancerEnabledRequest request) throws ServiceException { + IsBalancerEnabledRequest request) throws ServiceException { return stub.isBalancerEnabled(controller, request); } @Override public MasterProtos.SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled( - RpcController controller, MasterProtos.SetSplitOrMergeEnabledRequest request) - throws ServiceException { + RpcController controller, MasterProtos.SetSplitOrMergeEnabledRequest request) + throws ServiceException { return stub.setSplitOrMergeEnabled(controller, request); } @Override public MasterProtos.IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled( - RpcController controller, MasterProtos.IsSplitOrMergeEnabledRequest request) - throws ServiceException { + RpcController controller, MasterProtos.IsSplitOrMergeEnabledRequest request) + throws ServiceException { return stub.isSplitOrMergeEnabled(controller, request); } @Override public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller, - IsNormalizerEnabledRequest request) throws ServiceException { + IsNormalizerEnabledRequest request) throws ServiceException { return stub.isNormalizerEnabled(controller, request); } @Override public SecurityCapabilitiesResponse getSecurityCapabilities(RpcController controller, - SecurityCapabilitiesRequest request) throws ServiceException { + SecurityCapabilitiesRequest request) throws ServiceException { return stub.getSecurityCapabilities(controller, request); } @Override public AddReplicationPeerResponse addReplicationPeer(RpcController controller, - AddReplicationPeerRequest request) throws ServiceException { + AddReplicationPeerRequest request) throws ServiceException { return stub.addReplicationPeer(controller, request); } @Override public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller, - RemoveReplicationPeerRequest request) throws ServiceException { + RemoveReplicationPeerRequest request) throws ServiceException { return stub.removeReplicationPeer(controller, request); } @Override public EnableReplicationPeerResponse enableReplicationPeer(RpcController controller, - EnableReplicationPeerRequest request) throws ServiceException { + EnableReplicationPeerRequest request) throws ServiceException { return stub.enableReplicationPeer(controller, request); } @Override public DisableReplicationPeerResponse disableReplicationPeer(RpcController controller, - DisableReplicationPeerRequest request) throws ServiceException { + DisableReplicationPeerRequest request) throws ServiceException { return stub.disableReplicationPeer(controller, request); } @Override public ListDecommissionedRegionServersResponse listDecommissionedRegionServers( - RpcController controller, ListDecommissionedRegionServersRequest request) - throws ServiceException { + RpcController controller, ListDecommissionedRegionServersRequest request) + throws ServiceException { return stub.listDecommissionedRegionServers(controller, request); } @Override public DecommissionRegionServersResponse decommissionRegionServers(RpcController controller, - DecommissionRegionServersRequest request) throws ServiceException { + DecommissionRegionServersRequest request) throws ServiceException { return stub.decommissionRegionServers(controller, request); } @Override public RecommissionRegionServerResponse recommissionRegionServer(RpcController controller, - RecommissionRegionServerRequest request) throws ServiceException { + RecommissionRegionServerRequest request) throws ServiceException { return stub.recommissionRegionServer(controller, request); } @Override public GetReplicationPeerConfigResponse getReplicationPeerConfig(RpcController controller, - GetReplicationPeerConfigRequest request) throws ServiceException { + GetReplicationPeerConfigRequest request) throws ServiceException { return stub.getReplicationPeerConfig(controller, request); } @Override public UpdateReplicationPeerConfigResponse updateReplicationPeerConfig( - RpcController controller, UpdateReplicationPeerConfigRequest request) - throws ServiceException { + RpcController controller, UpdateReplicationPeerConfigRequest request) + throws ServiceException { return stub.updateReplicationPeerConfig(controller, request); } @Override public ListReplicationPeersResponse listReplicationPeers(RpcController controller, - ListReplicationPeersRequest request) throws ServiceException { + ListReplicationPeersRequest request) throws ServiceException { return stub.listReplicationPeers(controller, request); } @Override public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes(RpcController controller, - GetSpaceQuotaRegionSizesRequest request) throws ServiceException { + GetSpaceQuotaRegionSizesRequest request) throws ServiceException { return stub.getSpaceQuotaRegionSizes(controller, request); } @Override public GetQuotaStatesResponse getQuotaStates(RpcController controller, - GetQuotaStatesRequest request) throws ServiceException { + GetQuotaStatesRequest request) throws ServiceException { return stub.getQuotaStates(controller, request); } @Override public MasterProtos.ClearDeadServersResponse clearDeadServers(RpcController controller, - MasterProtos.ClearDeadServersRequest request) throws ServiceException { + MasterProtos.ClearDeadServersRequest request) throws ServiceException { return stub.clearDeadServers(controller, request); } @Override public SwitchRpcThrottleResponse switchRpcThrottle(RpcController controller, - SwitchRpcThrottleRequest request) throws ServiceException { + SwitchRpcThrottleRequest request) throws ServiceException { return stub.switchRpcThrottle(controller, request); } @Override public IsRpcThrottleEnabledResponse isRpcThrottleEnabled(RpcController controller, - IsRpcThrottleEnabledRequest request) throws ServiceException { + IsRpcThrottleEnabledRequest request) throws ServiceException { return stub.isRpcThrottleEnabled(controller, request); } @Override public SwitchExceedThrottleQuotaResponse switchExceedThrottleQuota(RpcController controller, - SwitchExceedThrottleQuotaRequest request) throws ServiceException { + SwitchExceedThrottleQuotaRequest request) throws ServiceException { return stub.switchExceedThrottleQuota(controller, request); } @Override public AccessControlProtos.GrantResponse grant(RpcController controller, - AccessControlProtos.GrantRequest request) throws ServiceException { + AccessControlProtos.GrantRequest request) throws ServiceException { return stub.grant(controller, request); } @Override public AccessControlProtos.RevokeResponse revoke(RpcController controller, - AccessControlProtos.RevokeRequest request) throws ServiceException { + AccessControlProtos.RevokeRequest request) throws ServiceException { return stub.revoke(controller, request); } @Override public GetUserPermissionsResponse getUserPermissions(RpcController controller, - GetUserPermissionsRequest request) throws ServiceException { + GetUserPermissionsRequest request) throws ServiceException { return stub.getUserPermissions(controller, request); } @Override public HasUserPermissionsResponse hasUserPermissions(RpcController controller, - HasUserPermissionsRequest request) throws ServiceException { + HasUserPermissionsRequest request) throws ServiceException { return stub.hasUserPermissions(controller, request); } @Override public HBaseProtos.LogEntry getLogEntries(RpcController controller, - HBaseProtos.LogRequest request) throws ServiceException { + HBaseProtos.LogRequest request) throws ServiceException { return stub.getLogEntries(controller, request); } @Override public ModifyTableStoreFileTrackerResponse modifyTableStoreFileTracker( - RpcController controller, ModifyTableStoreFileTrackerRequest request) - throws ServiceException { + RpcController controller, ModifyTableStoreFileTrackerRequest request) + throws ServiceException { return stub.modifyTableStoreFileTracker(controller, request); } @Override public ModifyColumnStoreFileTrackerResponse modifyColumnStoreFileTracker( - RpcController controller, ModifyColumnStoreFileTrackerRequest request) - throws ServiceException { + RpcController controller, ModifyColumnStoreFileTrackerRequest request) + throws ServiceException { return stub.modifyColumnStoreFileTracker(controller, request); } }; @@ -1983,15 +1981,15 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { * it from the cache. Does nothing if we can be sure from the exception that the location is still * accurate, or if the cache has already been updated. * @param exception an object (to simplify user code) on which we will try to find a nested or - * wrapped or both RegionMovedException - * @param source server that is the source of the location update. + * wrapped or both RegionMovedException + * @param source server that is the source of the location update. */ @Override public void updateCachedLocations(final TableName tableName, byte[] regionName, byte[] rowkey, - final Object exception, final ServerName source) { + final Object exception, final ServerName source) { if (rowkey == null || tableName == null) { LOG.warn("Coding error, see method javadoc. row=" + (rowkey == null ? "null" : rowkey) - + ", tableName=" + (tableName == null ? "null" : tableName)); + + ", tableName=" + (tableName == null ? "null" : tableName)); return; } @@ -2033,7 +2031,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { RegionMovedException rme = (RegionMovedException) cause; if (LOG.isTraceEnabled()) { LOG.trace("Region " + regionInfo.getRegionNameAsString() + " moved to " - + rme.getHostname() + ":" + rme.getPort() + " according to " + source.getAddress()); + + rme.getHostname() + ":" + rme.getPort() + " according to " + source.getAddress()); } // We know that the region is not anymore on this region server, but we know // the new location. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java index 975d8df7180..2ace3959ffa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java @@ -25,8 +25,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** * Registry for meta information needed for connection setup to a HBase cluster. Implementations - * hold cluster information such as this cluster's id, location of hbase:meta, etc.. - * Internal use only. + * hold cluster information such as this cluster's id, location of hbase:meta, etc.. Internal use + * only. */ @InterfaceAudience.Private public interface ConnectionRegistry extends Closeable { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java index e9af7e7b2e0..faaf61a4f52 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java index 6a92bc2f0a4..4baa48ce55e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java @@ -98,22 +98,22 @@ public final class ConnectionUtils { /** * @param conn The connection for which to replace the generator. - * @param cnm Replaces the nonce generator used, for testing. + * @param cnm Replaces the nonce generator used, for testing. * @return old nonce generator. */ public static NonceGenerator injectNonceGeneratorForTesting(ClusterConnection conn, - NonceGenerator cnm) { + NonceGenerator cnm) { return ConnectionImplementation.injectNonceGeneratorForTesting(conn, cnm); } /** * Changes the configuration to set the number of retries needed when using Connection internally, * e.g. for updating catalog tables, etc. Call this method before we create any Connections. - * @param c The Configuration instance to set the retries into. + * @param c The Configuration instance to set the retries into. * @param log Used to log what we set in here. */ public static void setServerSideHConnectionRetriesConfig(final Configuration c, final String sn, - final Logger log) { + final Logger log) { // TODO: Fix this. Not all connections from server side should have 10 times the retries. int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); @@ -381,7 +381,7 @@ public final class ConnectionUtils { } static void updateResultsMetrics(ScanMetrics scanMetrics, Result[] rrs, - boolean isRegionServerRemote) { + boolean isRegionServerRemote) { if (scanMetrics == null || rrs == null || rrs.length == 0) { return; } @@ -424,7 +424,7 @@ public final class ConnectionUtils { * increase the hedge read related metrics. */ private static void connect(CompletableFuture srcFuture, CompletableFuture dstFuture, - Optional metrics) { + Optional metrics) { addListener(srcFuture, (r, e) -> { if (e != null) { dstFuture.completeExceptionally(e); @@ -443,8 +443,8 @@ public final class ConnectionUtils { } private static void sendRequestsToSecondaryReplicas( - Function> requestReplica, RegionLocations locs, - CompletableFuture future, Optional metrics) { + Function> requestReplica, RegionLocations locs, + CompletableFuture future, Optional metrics) { if (future.isDone()) { // do not send requests to secondary replicas if the future is done, i.e, the primary request // has already been finished. @@ -458,9 +458,9 @@ public final class ConnectionUtils { } static CompletableFuture timelineConsistentRead(AsyncRegionLocator locator, - TableName tableName, Query query, byte[] row, RegionLocateType locateType, - Function> requestReplica, long rpcTimeoutNs, - long primaryCallTimeoutNs, Timer retryTimer, Optional metrics) { + TableName tableName, Query query, byte[] row, RegionLocateType locateType, + Function> requestReplica, long rpcTimeoutNs, + long primaryCallTimeoutNs, Timer retryTimer, Optional metrics) { if (query.getConsistency() != Consistency.TIMELINE) { return requestReplica.apply(RegionReplicaUtil.DEFAULT_REPLICA_ID); } @@ -480,8 +480,8 @@ public final class ConnectionUtils { (locs, error) -> { if (error != null) { LOG.warn( - "Failed to locate all the replicas for table={}, row='{}', locateType={}" + - " give up timeline consistent read", + "Failed to locate all the replicas for table={}, row='{}', locateType={}" + + " give up timeline consistent read", tableName, Bytes.toStringBinary(row), locateType, error); return; } @@ -536,7 +536,7 @@ public final class ConnectionUtils { *
  • For system table, use {@link HConstants#SYSTEMTABLE_QOS}.
  • *
  • For other tables, use {@link HConstants#NORMAL_QOS}.
  • * - * @param priority the priority set by user, can be {@link HConstants#PRIORITY_UNSET}. + * @param priority the priority set by user, can be {@link HConstants#PRIORITY_UNSET}. * @param tableName the table we operate on */ static int calcPriority(int priority, TableName tableName) { @@ -556,8 +556,8 @@ public final class ConnectionUtils { } static CompletableFuture getOrFetch(AtomicReference cacheRef, - AtomicReference> futureRef, boolean reload, - Supplier> fetch, Predicate validator, String type) { + AtomicReference> futureRef, boolean reload, + Supplier> fetch, Predicate validator, String type) { for (;;) { if (!reload) { T value = cacheRef.get(); @@ -600,7 +600,7 @@ public final class ConnectionUtils { } static void updateStats(Optional optStats, - Optional optMetrics, ServerName serverName, MultiResponse resp) { + Optional optMetrics, ServerName serverName, MultiResponse resp) { if (!optStats.isPresent() && !optMetrics.isPresent()) { // ServerStatisticTracker and MetricsConnection are both not present, just return return; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java index 533bd0f41b6..45dec17a695 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; @@ -27,22 +26,18 @@ import org.apache.yetus.audience.InterfaceAudience; public enum Consistency { // developer note: Do not reorder. Client.proto#Consistency depends on this order /** - * Strong consistency is the default consistency model in HBase, - * where reads and writes go through a single server which serializes - * the updates, and returns all data that was written and ack'd. + * Strong consistency is the default consistency model in HBase, where reads and writes go through + * a single server which serializes the updates, and returns all data that was written and ack'd. */ STRONG, /** - * Timeline consistent reads might return values that may not see - * the most recent updates. Write transactions are always performed - * in strong consistency model in HBase which guarantees that transactions - * are ordered, and replayed in the same order by all copies of the data. - * In timeline consistency, the get and scan requests can be answered from data - * that may be stale. - *
    - * The client may still observe transactions out of order if the requests are - * responded from different servers. + * Timeline consistent reads might return values that may not see the most recent updates. Write + * transactions are always performed in strong consistency model in HBase which guarantees that + * transactions are ordered, and replayed in the same order by all copies of the data. In timeline + * consistency, the get and scan requests can be answered from data that may be stale.
    + * The client may still observe transactions out of order if the requests are responded from + * different servers. */ TIMELINE, } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java index 72d588bc976..3331c810700 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,9 +22,9 @@ import java.util.Optional; import org.apache.yetus.audience.InterfaceAudience; /** - * CoprocessorDescriptor contains the details about how to build a coprocessor. - * This class is a pojo so there are no checks for the details carried by this class. - * Use {@link CoprocessorDescriptorBuilder} to instantiate a CoprocessorDescriptor + * CoprocessorDescriptor contains the details about how to build a coprocessor. This class is a pojo + * so there are no checks for the details carried by this class. Use + * {@link CoprocessorDescriptorBuilder} to instantiate a CoprocessorDescriptor */ @InterfaceAudience.Public public interface CoprocessorDescriptor { @@ -45,7 +44,7 @@ public interface CoprocessorDescriptor { int getPriority(); /** - * @return Arbitrary key-value parameter pairs passed into the coprocessor. + * @return Arbitrary key-value parameter pairs passed into the coprocessor. */ Map getProperties(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptorBuilder.java index 71d1264c074..cb0caca21b0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptorBuilder.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -109,9 +108,7 @@ public final class CoprocessorDescriptorBuilder { @Override public String toString() { - return "class:" + className - + ", jarPath:" + jarPath - + ", priority:" + priority + return "class:" + className + ", jarPath:" + jarPath + ", priority:" + priority + ", properties:" + properties; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java index 837e72d109c..73e128dfd8f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,16 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; /** - * Scan cursor to tell client where server is scanning - * {@link Scan#setNeedCursorResult(boolean)} - * {@link Result#isCursor()} - * {@link Result#getCursor()} + * Scan cursor to tell client where server is scanning {@link Scan#setNeedCursorResult(boolean)} + * {@link Result#isCursor()} {@link Result#getCursor()} */ @InterfaceAudience.Public public class Cursor { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelayingRunner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelayingRunner.java index 8ab5d850d2d..218e90c1440 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelayingRunner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelayingRunner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,13 +17,12 @@ */ package org.apache.hadoop.hbase.client; +import java.util.List; +import java.util.Map; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; - -import java.util.List; -import java.util.Map; /** * A wrapper for a runnable for a group of actions for a single regionserver. @@ -32,10 +31,10 @@ import java.util.Map; *

    *

    * This class exists to simulate using a ScheduledExecutorService with just a regular - * ExecutorService and Runnables. It is used for legacy reasons in the the client; this could - * only be removed if we change the expectations in HTable around the pool the client is able to - * pass in and even if we deprecate the current APIs would require keeping this class around - * for the interim to bridge between the legacy ExecutorServices and the scheduled pool. + * ExecutorService and Runnables. It is used for legacy reasons in the the client; this could only + * be removed if we change the expectations in HTable around the pool the client is able to pass in + * and even if we deprecate the current APIs would require keeping this class around for the interim + * to bridge between the legacy ExecutorServices and the scheduled pool. *

    */ @InterfaceAudience.Private @@ -60,10 +59,9 @@ public class DelayingRunner implements Runnable { @Override public void run() { if (!sleep()) { - LOG.warn( - "Interrupted while sleeping for expected sleep time " + sleepTime + " ms"); + LOG.warn("Interrupted while sleeping for expected sleep time " + sleepTime + " ms"); } - //TODO maybe we should consider switching to a listenableFuture for the actual callable and + // TODO maybe we should consider switching to a listenableFuture for the actual callable and // then handling the results/errors as callbacks. That way we can decrement outstanding tasks // even if we get interrupted here, but for now, we still need to run so we decrement the // outstanding tasks @@ -73,12 +71,11 @@ public class DelayingRunner implements Runnable { /** * Sleep for an expected amount of time. *

    - * This is nearly a copy of what the Sleeper does, but with the ability to know if you - * got interrupted while sleeping. + * This is nearly a copy of what the Sleeper does, but with the ability to know if you got + * interrupted while sleeping. *

    - * - * @return true if the sleep completely entirely successfully, - * but otherwise false if the sleep was interrupted. + * @return true if the sleep completely entirely successfully, but otherwise + * false if the sleep was interrupted. */ private boolean sleep() { long now = EnvironmentEdgeManager.currentTime(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java index c2f8ee5f119..d2674254752 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -35,97 +33,84 @@ import org.apache.yetus.audience.InterfaceAudience; /** * Used to perform Delete operations on a single row. *

    - * To delete an entire row, instantiate a Delete object with the row - * to delete. To further define the scope of what to delete, perform - * additional methods as outlined below. + * To delete an entire row, instantiate a Delete object with the row to delete. To further define + * the scope of what to delete, perform additional methods as outlined below. *

    - * To delete specific families, execute {@link #addFamily(byte[]) deleteFamily} - * for each family to delete. + * To delete specific families, execute {@link #addFamily(byte[]) deleteFamily} for each family to + * delete. *

    - * To delete multiple versions of specific columns, execute - * {@link #addColumns(byte[], byte[]) deleteColumns} - * for each column to delete. + * To delete multiple versions of specific columns, execute {@link #addColumns(byte[], byte[]) + * deleteColumns} for each column to delete. *

    - * To delete specific versions of specific columns, execute - * {@link #addColumn(byte[], byte[], long) deleteColumn} - * for each column version to delete. + * To delete specific versions of specific columns, execute {@link #addColumn(byte[], byte[], long) + * deleteColumn} for each column version to delete. *

    - * Specifying timestamps, deleteFamily and deleteColumns will delete all - * versions with a timestamp less than or equal to that passed. If no - * timestamp is specified, an entry is added with a timestamp of 'now' - * where 'now' is the servers's EnvironmentEdgeManager.currentTime(). - * Specifying a timestamp to the deleteColumn method will - * delete versions only with a timestamp equal to that specified. - * If no timestamp is passed to deleteColumn, internally, it figures the - * most recent cell's timestamp and adds a delete at that timestamp; i.e. - * it deletes the most recently added cell. - *

    The timestamp passed to the constructor is used ONLY for delete of - * rows. For anything less -- a deleteColumn, deleteColumns or - * deleteFamily -- then you need to use the method overrides that take a - * timestamp. The constructor timestamp is not referenced. + * Specifying timestamps, deleteFamily and deleteColumns will delete all versions with a timestamp + * less than or equal to that passed. If no timestamp is specified, an entry is added with a + * timestamp of 'now' where 'now' is the servers's EnvironmentEdgeManager.currentTime(). Specifying + * a timestamp to the deleteColumn method will delete versions only with a timestamp equal to that + * specified. If no timestamp is passed to deleteColumn, internally, it figures the most recent + * cell's timestamp and adds a delete at that timestamp; i.e. it deletes the most recently added + * cell. + *

    + * The timestamp passed to the constructor is used ONLY for delete of rows. For anything less -- a + * deleteColumn, deleteColumns or deleteFamily -- then you need to use the method overrides that + * take a timestamp. The constructor timestamp is not referenced. */ @InterfaceAudience.Public public class Delete extends Mutation { /** * Create a Delete operation for the specified row. *

    - * If no further operations are done, this will delete everything - * associated with the specified row (all versions of all columns in all - * families), with timestamp from current point in time to the past. - * Cells defining timestamp for a future point in time - * (timestamp > current time) will not be deleted. + * If no further operations are done, this will delete everything associated with the specified + * row (all versions of all columns in all families), with timestamp from current point in time to + * the past. Cells defining timestamp for a future point in time (timestamp > current time) will + * not be deleted. * @param row row key */ - public Delete(byte [] row) { + public Delete(byte[] row) { this(row, HConstants.LATEST_TIMESTAMP); } /** - * Create a Delete operation for the specified row and timestamp.

    - * - * If no further operations are done, this will delete all columns in all - * families of the specified row with a timestamp less than or equal to the - * specified timestamp.

    - * - * This timestamp is ONLY used for a delete row operation. If specifying - * families or columns, you must specify each timestamp individually. - * @param row row key + * Create a Delete operation for the specified row and timestamp. + *

    + * If no further operations are done, this will delete all columns in all families of the + * specified row with a timestamp less than or equal to the specified timestamp. + *

    + * This timestamp is ONLY used for a delete row operation. If specifying families or columns, you + * must specify each timestamp individually. + * @param row row key * @param timestamp maximum version timestamp (only for delete row) */ - public Delete(byte [] row, long timestamp) { + public Delete(byte[] row, long timestamp) { this(row, 0, row.length, timestamp); } /** - * Create a Delete operation for the specified row and timestamp.

    - * - * If no further operations are done, this will delete all columns in all - * families of the specified row with a timestamp less than or equal to the - * specified timestamp.

    - * - * This timestamp is ONLY used for a delete row operation. If specifying - * families or columns, you must specify each timestamp individually. - * @param row We make a local copy of this passed in row. - * @param rowOffset - * @param rowLength + * Create a Delete operation for the specified row and timestamp. + *

    + * If no further operations are done, this will delete all columns in all families of the + * specified row with a timestamp less than or equal to the specified timestamp. + *

    + * This timestamp is ONLY used for a delete row operation. If specifying families or columns, you + * must specify each timestamp individually. + * @param row We make a local copy of this passed in row. nn */ public Delete(final byte[] row, final int rowOffset, final int rowLength) { this(row, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP); } /** - * Create a Delete operation for the specified row and timestamp.

    - * - * If no further operations are done, this will delete all columns in all - * families of the specified row with a timestamp less than or equal to the - * specified timestamp.

    - * - * This timestamp is ONLY used for a delete row operation. If specifying - * families or columns, you must specify each timestamp individually. - * @param row We make a local copy of this passed in row. - * @param rowOffset - * @param rowLength - * @param timestamp maximum version timestamp (only for delete row) + * Create a Delete operation for the specified row and timestamp. + *

    + * If no further operations are done, this will delete all columns in all families of the + * specified row with a timestamp less than or equal to the specified timestamp. + *

    + * This timestamp is ONLY used for a delete row operation. If specifying families or columns, you + * must specify each timestamp individually. + * @param row We make a local copy of this passed in row. nn * @param timestamp maximum version + * timestamp (only for delete row) */ public Delete(final byte[] row, final int rowOffset, final int rowLength, long timestamp) { checkRow(row, rowOffset, rowLength); @@ -141,25 +126,22 @@ public class Delete extends Mutation { } /** - * Construct the Delete with user defined data. NOTED: - * 1) all cells in the familyMap must have the delete type. - * see {@link org.apache.hadoop.hbase.Cell.Type} - * 2) the row of each cell must be same with passed row. - * @param row row. CAN'T be null - * @param ts timestamp + * Construct the Delete with user defined data. NOTED: 1) all cells in the familyMap must have the + * delete type. see {@link org.apache.hadoop.hbase.Cell.Type} 2) the row of each cell must be same + * with passed row. + * @param row row. CAN'T be null + * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - public Delete(byte[] row, long ts, NavigableMap> familyMap) { + public Delete(byte[] row, long ts, NavigableMap> familyMap) { super(row, ts, familyMap); } /** * Advanced use only. Add an existing delete marker to this Delete object. * @param kv An existing KeyValue of type "delete". - * @return this for invocation chaining - * @throws IOException - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use {@link #add(Cell)} - * instead + * @return this for invocation chaining n * @deprecated As of release 2.0.0, this will be removed + * in HBase 3.0.0. Use {@link #add(Cell)} instead */ @SuppressWarnings("unchecked") @Deprecated @@ -170,8 +152,7 @@ public class Delete extends Mutation { /** * Add an existing delete marker to this Delete object. * @param cell An existing cell of type "delete". - * @return this for invocation chaining - * @throws IOException + * @return this for invocation chaining n */ public Delete add(Cell cell) throws IOException { super.add(cell); @@ -181,32 +162,30 @@ public class Delete extends Mutation { /** * Delete all versions of all columns of the specified family. *

    - * Overrides previous calls to deleteColumn and deleteColumns for the - * specified family. + * Overrides previous calls to deleteColumn and deleteColumns for the specified family. * @param family family name * @return this for invocation chaining */ - public Delete addFamily(final byte [] family) { + public Delete addFamily(final byte[] family) { this.addFamily(family, this.ts); return this; } /** - * Delete all columns of the specified family with a timestamp less than - * or equal to the specified timestamp. + * Delete all columns of the specified family with a timestamp less than or equal to the specified + * timestamp. *

    - * Overrides previous calls to deleteColumn and deleteColumns for the - * specified family. - * @param family family name + * Overrides previous calls to deleteColumn and deleteColumns for the specified family. + * @param family family name * @param timestamp maximum version timestamp * @return this for invocation chaining */ - public Delete addFamily(final byte [] family, final long timestamp) { + public Delete addFamily(final byte[] family, final long timestamp) { if (timestamp < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); } List list = getCellList(family); - if(!list.isEmpty()) { + if (!list.isEmpty()) { list.clear(); } KeyValue kv = new KeyValue(row, family, null, timestamp, KeyValue.Type.DeleteFamily); @@ -215,70 +194,66 @@ public class Delete extends Mutation { } /** - * Delete all columns of the specified family with a timestamp equal to - * the specified timestamp. - * @param family family name + * Delete all columns of the specified family with a timestamp equal to the specified timestamp. + * @param family family name * @param timestamp version timestamp * @return this for invocation chaining */ - public Delete addFamilyVersion(final byte [] family, final long timestamp) { + public Delete addFamilyVersion(final byte[] family, final long timestamp) { List list = getCellList(family); - list.add(new KeyValue(row, family, null, timestamp, - KeyValue.Type.DeleteFamilyVersion)); + list.add(new KeyValue(row, family, null, timestamp, KeyValue.Type.DeleteFamilyVersion)); return this; } /** * Delete all versions of the specified column. - * @param family family name + * @param family family name * @param qualifier column qualifier * @return this for invocation chaining */ - public Delete addColumns(final byte [] family, final byte [] qualifier) { + public Delete addColumns(final byte[] family, final byte[] qualifier) { addColumns(family, qualifier, this.ts); return this; } /** - * Delete all versions of the specified column with a timestamp less than - * or equal to the specified timestamp. - * @param family family name + * Delete all versions of the specified column with a timestamp less than or equal to the + * specified timestamp. + * @param family family name * @param qualifier column qualifier * @param timestamp maximum version timestamp * @return this for invocation chaining */ - public Delete addColumns(final byte [] family, final byte [] qualifier, final long timestamp) { + public Delete addColumns(final byte[] family, final byte[] qualifier, final long timestamp) { if (timestamp < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); } List list = getCellList(family); - list.add(new KeyValue(this.row, family, qualifier, timestamp, - KeyValue.Type.DeleteColumn)); + list.add(new KeyValue(this.row, family, qualifier, timestamp, KeyValue.Type.DeleteColumn)); return this; } /** - * Delete the latest version of the specified column. - * This is an expensive call in that on the server-side, it first does a - * get to find the latest versions timestamp. Then it adds a delete using - * the fetched cells timestamp. - * @param family family name + * Delete the latest version of the specified column. This is an expensive call in that on the + * server-side, it first does a get to find the latest versions timestamp. Then it adds a delete + * using the fetched cells timestamp. + * @param family family name * @param qualifier column qualifier * @return this for invocation chaining */ - public Delete addColumn(final byte [] family, final byte [] qualifier) { + public Delete addColumn(final byte[] family, final byte[] qualifier) { this.addColumn(family, qualifier, this.ts); return this; } /** * Delete the specified version of the specified column. - * @param family family name + * @param family family name * @param qualifier column qualifier * @param timestamp version timestamp * @return this for invocation chaining */ - public Delete addColumn(byte [] family, byte [] qualifier, long timestamp) { + public Delete addColumn(byte[] family, byte[] qualifier, long timestamp) { if (timestamp < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); } @@ -311,8 +286,8 @@ public class Delete extends Mutation { /** * Method for setting the Delete's familyMap - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link Delete#Delete(byte[], long, NavigableMap)} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link Delete#Delete(byte[], long, NavigableMap)} instead */ @Deprecated @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DoNotRetryRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DoNotRetryRegionException.java index 9419137842f..4bc7a76514a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DoNotRetryRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DoNotRetryRegionException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.DoNotRetryIOException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java index aaf0b5cc732..7ee451b982c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java @@ -15,22 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; /** - * Enum describing the durability guarantees for tables and {@link Mutation}s - * Note that the items must be sorted in order of increasing durability + * Enum describing the durability guarantees for tables and {@link Mutation}s Note that the items + * must be sorted in order of increasing durability */ @InterfaceAudience.Public public enum Durability { /* Developer note: Do not rename the enum field names. They are serialized in HTableDescriptor */ /** - * If this is for tables durability, use HBase's global default value (SYNC_WAL). - * Otherwise, if this is for mutation, use the table's default setting to determine durability. - * This must remain the first option. + * If this is for tables durability, use HBase's global default value (SYNC_WAL). Otherwise, if + * this is for mutation, use the table's default setting to determine durability. This must remain + * the first option. */ USE_DEFAULT, /** @@ -42,15 +41,15 @@ public enum Durability { */ ASYNC_WAL, /** - * Write the Mutation to the WAL synchronously. - * The data is flushed to the filesystem implementation, but not necessarily to disk. - * For HDFS this will flush the data to the designated number of DataNodes. - * See HADOOP-6313 + * Write the Mutation to the WAL synchronously. The data is flushed to the filesystem + * implementation, but not necessarily to disk. For HDFS this will flush the data to the + * designated number of DataNodes. See + * HADOOP-6313 */ SYNC_WAL, /** - * Write the Mutation to the WAL synchronously and force the entries to disk. - * See HADOOP-6313 + * Write the Mutation to the WAL synchronously and force the entries to disk. See + * HADOOP-6313 */ FSYNC_WAL } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java index 163203b48ab..5ab59232e51 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,15 +19,12 @@ package org.apache.hadoop.hbase.client; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; - import org.apache.yetus.audience.InterfaceAudience; /** * Keeps track of repeated failures to any region server. Multiple threads manipulate the contents - * of this thread. - * - * Access to the members is guarded by the concurrent nature of the members inherently. - * + * of this thread. Access to the members is guarded by the concurrent nature of the members + * inherently. */ @InterfaceAudience.Private class FailureInfo { @@ -46,12 +43,10 @@ class FailureInfo { @Override public String toString() { - return "FailureInfo: numConsecutiveFailures = " - + numConsecutiveFailures + " timeOfFirstFailureMilliSec = " - + timeOfFirstFailureMilliSec + " timeOfLatestAttemptMilliSec = " - + timeOfLatestAttemptMilliSec - + " exclusivelyRetringInspiteOfFastFail = " - + exclusivelyRetringInspiteOfFastFail.get(); + return "FailureInfo: numConsecutiveFailures = " + numConsecutiveFailures + + " timeOfFirstFailureMilliSec = " + timeOfFirstFailureMilliSec + + " timeOfLatestAttemptMilliSec = " + timeOfLatestAttemptMilliSec + + " exclusivelyRetringInspiteOfFastFail = " + exclusivelyRetringInspiteOfFastFail.get(); } FailureInfo(long firstFailureTime) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FastFailInterceptorContext.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FastFailInterceptorContext.java index 6b0e79096fc..aa701c848d7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FastFailInterceptorContext.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FastFailInterceptorContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -78,8 +78,7 @@ class FastFailInterceptorContext extends RetryingCallerInterceptorContext { return retryDespiteFastFailMode; } - public void setCouldNotCommunicateWithServer( - MutableBoolean couldNotCommunicateWithServer) { + public void setCouldNotCommunicateWithServer(MutableBoolean couldNotCommunicateWithServer) { this.couldNotCommunicateWithServer = couldNotCommunicateWithServer; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java index b0eea86a8bc..6ccceee7003 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; @@ -28,6 +26,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; @@ -43,16 +42,15 @@ public class FlushRegionCallable extends RegionAdminServiceCallable - * To get everything for a row, instantiate a Get object with the row to get. - * To further narrow the scope of what to Get, use the methods below. + * To get everything for a row, instantiate a Get object with the row to get. To further narrow the + * scope of what to Get, use the methods below. *

    - * To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily} - * for each family to retrieve. + * To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily} for each + * family to retrieve. *

    - * To get specific columns, execute {@link #addColumn(byte[], byte[]) addColumn} - * for each column to retrieve. + * To get specific columns, execute {@link #addColumn(byte[], byte[]) addColumn} for each column to + * retrieve. *

    - * To only retrieve columns within a specific range of version timestamps, - * execute {@link #setTimeRange(long, long) setTimeRange}. + * To only retrieve columns within a specific range of version timestamps, execute + * {@link #setTimeRange(long, long) setTimeRange}. *

    - * To only retrieve columns with a specific timestamp, execute - * {@link #setTimestamp(long) setTimestamp}. + * To only retrieve columns with a specific timestamp, execute {@link #setTimestamp(long) + * setTimestamp}. *

    * To limit the number of versions of each column to be returned, execute * {@link #setMaxVersions(int) setMaxVersions}. @@ -67,7 +64,7 @@ import org.apache.hadoop.hbase.util.Bytes; public class Get extends Query implements Row { private static final Logger LOG = LoggerFactory.getLogger(Get.class); - private byte [] row = null; + private byte[] row = null; private int maxVersions = 1; private boolean cacheBlocks = true; private int storeLimit = -1; @@ -75,24 +72,22 @@ public class Get extends Query implements Row { private TimeRange tr = TimeRange.allTime(); private boolean checkExistenceOnly = false; private boolean closestRowBefore = false; - private Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); + private Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** * Create a Get operation for the specified row. *

    - * If no further operations are done, this will get the latest version of - * all columns in all families of the specified row. + * If no further operations are done, this will get the latest version of all columns in all + * families of the specified row. * @param row row key */ - public Get(byte [] row) { + public Get(byte[] row) { Mutation.checkRow(row); this.row = row; } /** - * Copy-constructor - * - * @param get + * Copy-constructor n */ public Get(Get get) { this(get.getRow()); @@ -109,8 +104,8 @@ public class Get extends Query implements Row { this.checkExistenceOnly = get.isCheckExistenceOnly(); this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue(); Map> fams = get.getFamilyMap(); - for (Map.Entry> entry : fams.entrySet()) { - byte [] fam = entry.getKey(); + for (Map.Entry> entry : fams.entrySet()) { + byte[] fam = entry.getKey(); NavigableSet cols = entry.getValue(); if (cols != null && cols.size() > 0) { for (byte[] col : cols) { @@ -131,10 +126,7 @@ public class Get extends Query implements Row { } /** - * Create a Get operation for the specified row. - * @param row - * @param rowOffset - * @param rowLength + * Create a Get operation for the specified row. nnn */ public Get(byte[] row, int rowOffset, int rowLength) { Mutation.checkRow(row, rowOffset, rowLength); @@ -142,8 +134,7 @@ public class Get extends Query implements Row { } /** - * Create a Get operation for the specified row. - * @param row + * Create a Get operation for the specified row. n */ public Get(ByteBuffer row) { Mutation.checkRow(row); @@ -187,7 +178,7 @@ public class Get extends Query implements Row { * @param family family name * @return the Get object */ - public Get addFamily(byte [] family) { + public Get addFamily(byte[] family) { familyMap.remove(family); familyMap.put(family, null); return this; @@ -197,13 +188,13 @@ public class Get extends Query implements Row { * Get the column from the specific family with the specified qualifier. *

    * Overrides previous calls to addFamily for this family. - * @param family family name + * @param family family name * @param qualifier column qualifier * @return the Get objec */ - public Get addColumn(byte [] family, byte [] qualifier) { - NavigableSet set = familyMap.get(family); - if(set == null) { + public Get addColumn(byte[] family, byte[] qualifier) { + NavigableSet set = familyMap.get(family); + if (set == null) { set = new TreeSet<>(Bytes.BYTES_COMPARATOR); familyMap.put(family, set); } @@ -215,12 +206,9 @@ public class Get extends Query implements Row { } /** - * Get versions of columns only within the specified timestamp range, - * [minStamp, maxStamp). + * Get versions of columns only within the specified timestamp range, [minStamp, maxStamp). * @param minStamp minimum timestamp value, inclusive - * @param maxStamp maximum timestamp value, exclusive - * @throws IOException - * @return this for invocation chaining + * @param maxStamp maximum timestamp value, exclusive n * @return this for invocation chaining */ public Get setTimeRange(long minStamp, long maxStamp) throws IOException { tr = new TimeRange(minStamp, maxStamp); @@ -231,8 +219,8 @@ public class Get extends Query implements Row { * Get versions of columns with the specified timestamp. * @param timestamp version timestamp * @return this for invocation chaining - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #setTimestamp(long)} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #setTimestamp(long)} instead */ @Deprecated public Get setTimeStamp(long timestamp) throws IOException { @@ -247,7 +235,7 @@ public class Get extends Query implements Row { public Get setTimestamp(long timestamp) { try { tr = new TimeRange(timestamp, timestamp + 1); - } catch(Exception e) { + } catch (Exception e) { // This should never happen, unless integer overflow or something extremely wrong... LOG.error("TimeRange failed, likely caused by integer overflow. ", e); throw e; @@ -256,7 +244,8 @@ public class Get extends Query implements Row { return this; } - @Override public Get setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { + @Override + public Get setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { return (Get) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp); } @@ -344,12 +333,9 @@ public class Get extends Query implements Row { /** * Set whether blocks should be cached for this Get. *

    - * This is true by default. When true, default settings of the table and - * family are used (this will never override caching blocks if the block - * cache is disabled for that family or entirely). - * - * @param cacheBlocks if false, default settings are overridden and blocks - * will not be cached + * This is true by default. When true, default settings of the table and family are used (this + * will never override caching blocks if the block cache is disabled for that family or entirely). + * @param cacheBlocks if false, default settings are overridden and blocks will not be cached */ public Get setCacheBlocks(boolean cacheBlocks) { this.cacheBlocks = cacheBlocks; @@ -358,19 +344,17 @@ public class Get extends Query implements Row { /** * Get whether blocks should be cached for this Get. - * @return true if default caching should be used, false if blocks should not - * be cached + * @return true if default caching should be used, false if blocks should not be cached */ public boolean getCacheBlocks() { return cacheBlocks; } /** - * Method for retrieving the get's row - * @return row + * Method for retrieving the get's row n */ @Override - public byte [] getRow() { + public byte[] getRow() { return this.row; } @@ -383,8 +367,7 @@ public class Get extends Query implements Row { } /** - * Method for retrieving the get's maximum number of values - * to return per Column Family + * Method for retrieving the get's maximum number of values to return per Column Family * @return the maximum number of values to fetch per CF */ public int getMaxResultsPerColumnFamily() { @@ -392,8 +375,7 @@ public class Get extends Query implements Row { } /** - * Method for retrieving the get's offset per row per column - * family (#kvs to be skipped) + * Method for retrieving the get's offset per row per column family (#kvs to be skipped) * @return the row offset */ public int getRowOffsetPerColumnFamily() { @@ -401,8 +383,7 @@ public class Get extends Query implements Row { } /** - * Method for retrieving the get's TimeRange - * @return timeRange + * Method for retrieving the get's TimeRange n */ public TimeRange getTimeRange() { return this.tr; @@ -433,37 +414,32 @@ public class Get extends Query implements Row { } /** - * Method for retrieving the get's familyMap - * @return familyMap + * Method for retrieving the get's familyMap n */ - public Map> getFamilyMap() { + public Map> getFamilyMap() { return this.familyMap; } /** - * Compile the table and column family (i.e. schema) information - * into a String. Useful for parsing and aggregation by debugging, - * logging, and administration tools. - * @return Map + * Compile the table and column family (i.e. schema) information into a String. Useful for parsing + * and aggregation by debugging, logging, and administration tools. n */ @Override public Map getFingerprint() { Map map = new HashMap<>(); List families = new ArrayList<>(this.familyMap.entrySet().size()); map.put("families", families); - for (Map.Entry> entry : - this.familyMap.entrySet()) { + for (Map.Entry> entry : this.familyMap.entrySet()) { families.add(Bytes.toStringBinary(entry.getKey())); } return map; } /** - * Compile the details beyond the scope of getFingerprint (row, columns, - * timestamps, etc.) into a Map along with the fingerprinted information. - * Useful for debugging, logging, and administration tools. - * @param maxCols a limit on the number of columns output prior to truncation - * @return Map + * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a + * Map along with the fingerprinted information. Useful for debugging, logging, and administration + * tools. + * @param maxCols a limit on the number of columns output prior to truncation n */ @Override public Map toMap(int maxCols) { @@ -483,11 +459,10 @@ public class Get extends Query implements Row { map.put("timeRange", timeRange); int colCount = 0; // iterate through affected families and add details - for (Map.Entry> entry : - this.familyMap.entrySet()) { + for (Map.Entry> entry : this.familyMap.entrySet()) { List familyList = new ArrayList<>(); columns.put(Bytes.toStringBinary(entry.getKey()), familyList); - if(entry.getValue() == null) { + if (entry.getValue() == null) { colCount++; --maxCols; familyList.add("ALL"); @@ -496,7 +471,7 @@ public class Get extends Query implements Row { if (maxCols <= 0) { continue; } - for (byte [] column : entry.getValue()) { + for (byte[] column : entry.getValue()) { if (--maxCols <= 0) { continue; } @@ -515,16 +490,16 @@ public class Get extends Query implements Row { return map; } - //Row + // Row @Override public int compareTo(Row other) { - // TODO: This is wrong. Can't have two gets the same just because on same row. + // TODO: This is wrong. Can't have two gets the same just because on same row. return Bytes.compareTo(this.getRow(), other.getRow()); } @Override public int hashCode() { - // TODO: This is wrong. Can't have two gets the same just because on same row. But it + // TODO: This is wrong. Can't have two gets the same just because on same row. But it // matches how equals works currently and gets rid of the findbugs warning. return Bytes.hashCode(this.getRow()); } @@ -538,7 +513,7 @@ public class Get extends Query implements Row { return false; } Row other = (Row) obj; - // TODO: This is wrong. Can't have two gets the same just because on same row. + // TODO: This is wrong. Can't have two gets the same just because on same row. return compareTo(other) == 0; } @@ -579,7 +554,7 @@ public class Get extends Query implements Row { @Override public Get setIsolationLevel(IsolationLevel level) { - return (Get) super.setIsolationLevel(level); + return (Get) super.setIsolationLevel(level); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 6cf018468eb..c79aee717b8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -237,17 +237,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Updat import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; /** - * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that - * this is an HBase-internal class as defined in + * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that this + * is an HBase-internal class as defined in * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html * There are no guarantees for backwards source / binary compatibility and methods or class can - * change or go away without deprecation. - * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead of constructing - * an HBaseAdmin directly. - * - *

    Connection should be an unmanaged connection obtained via + * change or go away without deprecation. Use {@link Connection#getAdmin()} to obtain an instance of + * {@link Admin} instead of constructing an HBaseAdmin directly. + *

    + * Connection should be an unmanaged connection obtained via * {@link ConnectionFactory#createConnection(Configuration)} - * * @see ConnectionFactory * @see Connection * @see Admin @@ -287,18 +285,17 @@ public class HBaseAdmin implements Admin { this.connection = connection; // TODO: receive ConnectionConfiguration here rather than re-parsing these configs every time. - this.pause = this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE, - HConstants.DEFAULT_HBASE_CLIENT_PAUSE); + this.pause = + this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE); this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, - HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); - this.rpcTimeout = this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT); - this.syncWaitTimeout = this.conf.getInt( - "hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + this.rpcTimeout = + this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + this.syncWaitTimeout = this.conf.getInt("hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min this.getProcedureTimeout = - this.conf.getInt("hbase.client.procedure.future.get.timeout.msec", 10 * 60000); // 10min + this.conf.getInt("hbase.client.procedure.future.get.timeout.msec", 10 * 60000); // 10min this.rpcCallerFactory = connection.getRpcRetryingCallerFactory(); this.rpcControllerFactory = connection.getRpcControllerFactory(); @@ -320,52 +317,51 @@ public class HBaseAdmin implements Admin { @Override public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning) - throws IOException { + throws IOException { return get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout, TimeUnit.MILLISECONDS); } @Override public Future abortProcedureAsync(final long procId, final boolean mayInterruptIfRunning) - throws IOException { - Boolean abortProcResponse = - executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { - @Override - protected AbortProcedureResponse rpcCall() throws Exception { - AbortProcedureRequest abortProcRequest = + throws IOException { + Boolean abortProcResponse = executeCallable( + new MasterCallable(getConnection(), getRpcControllerFactory()) { + @Override + protected AbortProcedureResponse rpcCall() throws Exception { + AbortProcedureRequest abortProcRequest = AbortProcedureRequest.newBuilder().setProcId(procId).build(); - return master.abortProcedure(getRpcController(), abortProcRequest); - } - }).getIsProcedureAborted(); + return master.abortProcedure(getRpcController(), abortProcRequest); + } + }).getIsProcedureAborted(); return new AbortProcedureFuture(this, procId, abortProcResponse); } @Override public List listTableDescriptors() throws IOException { - return listTableDescriptors((Pattern)null, false); + return listTableDescriptors((Pattern) null, false); } @Override public List listTableDescriptors(Pattern pattern, boolean includeSysTables) - throws IOException { - return executeCallable(new MasterCallable>(getConnection(), - getRpcControllerFactory()) { - @Override - protected List rpcCall() throws Exception { - GetTableDescriptorsRequest req = + throws IOException { + return executeCallable( + new MasterCallable>(getConnection(), getRpcControllerFactory()) { + @Override + protected List rpcCall() throws Exception { + GetTableDescriptorsRequest req = RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables); - return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(), - req)); - } - }); + return ProtobufUtil + .toTableDescriptorList(master.getTableDescriptors(getRpcController(), req)); + } + }); } @Override public TableDescriptor getDescriptor(TableName tableName) - throws TableNotFoundException, IOException { + throws TableNotFoundException, IOException { return getTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory, - operationTimeout, rpcTimeout); + operationTimeout, rpcTimeout); } @Override @@ -386,7 +382,6 @@ public class HBaseAdmin implements Admin { return new ModifyTableFuture(this, td.getTableName(), response); } - @Override public Future modifyTableStoreFileTrackerAsync(TableName tableName, String dstSFT) throws IOException { @@ -422,33 +417,32 @@ public class HBaseAdmin implements Admin { @Override public List listTableDescriptorsByNamespace(byte[] name) throws IOException { - return executeCallable(new MasterCallable>(getConnection(), - getRpcControllerFactory()) { - @Override - protected List rpcCall() throws Exception { - return master.listTableDescriptorsByNamespace(getRpcController(), - ListTableDescriptorsByNamespaceRequest.newBuilder() - .setNamespaceName(Bytes.toString(name)).build()) - .getTableSchemaList() - .stream() - .map(ProtobufUtil::toTableDescriptor) - .collect(Collectors.toList()); - } - }); + return executeCallable( + new MasterCallable>(getConnection(), getRpcControllerFactory()) { + @Override + protected List rpcCall() throws Exception { + return master + .listTableDescriptorsByNamespace(getRpcController(), + ListTableDescriptorsByNamespaceRequest.newBuilder() + .setNamespaceName(Bytes.toString(name)).build()) + .getTableSchemaList().stream().map(ProtobufUtil::toTableDescriptor) + .collect(Collectors.toList()); + } + }); } @Override public List listTableDescriptors(List tableNames) throws IOException { - return executeCallable(new MasterCallable>(getConnection(), - getRpcControllerFactory()) { - @Override - protected List rpcCall() throws Exception { - GetTableDescriptorsRequest req = + return executeCallable( + new MasterCallable>(getConnection(), getRpcControllerFactory()) { + @Override + protected List rpcCall() throws Exception { + GetTableDescriptorsRequest req = RequestConverter.buildGetTableDescriptorsRequest(tableNames); - return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(), - req)); - } - }); + return ProtobufUtil + .toTableDescriptorList(master.getTableDescriptors(getRpcController(), req)); + } + }); } @Override @@ -471,17 +465,15 @@ public class HBaseAdmin implements Admin { private static class AbortProcedureFuture extends ProcedureFuture { private boolean isAbortInProgress; - public AbortProcedureFuture( - final HBaseAdmin admin, - final Long procId, - final Boolean abortProcResponse) { + public AbortProcedureFuture(final HBaseAdmin admin, final Long procId, + final Boolean abortProcResponse) { super(admin, procId); this.isAbortInProgress = abortProcResponse; } @Override public Boolean get(long timeout, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { + throws InterruptedException, ExecutionException, TimeoutException { if (!this.isAbortInProgress) { return false; } @@ -508,7 +500,7 @@ public class HBaseAdmin implements Admin { @Override public HTableDescriptor[] listTables() throws IOException { - return listTables((Pattern)null, false); + return listTables((Pattern) null, false); } @Override @@ -523,28 +515,28 @@ public class HBaseAdmin implements Admin { @Override public HTableDescriptor[] listTables(final Pattern pattern, final boolean includeSysTables) - throws IOException { - return executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { - @Override - protected HTableDescriptor[] rpcCall() throws Exception { - GetTableDescriptorsRequest req = + throws IOException { + return executeCallable( + new MasterCallable(getConnection(), getRpcControllerFactory()) { + @Override + protected HTableDescriptor[] rpcCall() throws Exception { + GetTableDescriptorsRequest req = RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables); - return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(), - req)).stream().map(ImmutableHTableDescriptor::new).toArray(HTableDescriptor[]::new); - } - }); + return ProtobufUtil + .toTableDescriptorList(master.getTableDescriptors(getRpcController(), req)).stream() + .map(ImmutableHTableDescriptor::new).toArray(HTableDescriptor[]::new); + } + }); } @Override - public HTableDescriptor[] listTables(String regex, boolean includeSysTables) - throws IOException { + public HTableDescriptor[] listTables(String regex, boolean includeSysTables) throws IOException { return listTables(Pattern.compile(regex), includeSysTables); } @Override public TableName[] listTableNames() throws IOException { - return listTableNames((Pattern)null, false); + return listTableNames((Pattern) null, false); } @Override @@ -554,48 +546,48 @@ public class HBaseAdmin implements Admin { @Override public TableName[] listTableNames(final Pattern pattern, final boolean includeSysTables) - throws IOException { - return executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { - @Override - protected TableName[] rpcCall() throws Exception { - GetTableNamesRequest req = + throws IOException { + return executeCallable( + new MasterCallable(getConnection(), getRpcControllerFactory()) { + @Override + protected TableName[] rpcCall() throws Exception { + GetTableNamesRequest req = RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables); - return ProtobufUtil.getTableNameArray(master.getTableNames(getRpcController(), req) - .getTableNamesList()); - } - }); + return ProtobufUtil + .getTableNameArray(master.getTableNames(getRpcController(), req).getTableNamesList()); + } + }); } @Override public TableName[] listTableNames(final String regex, final boolean includeSysTables) - throws IOException { + throws IOException { return listTableNames(Pattern.compile(regex), includeSysTables); } @Override public HTableDescriptor getTableDescriptor(final TableName tableName) throws IOException { return getHTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory, - operationTimeout, rpcTimeout); + operationTimeout, rpcTimeout); } static TableDescriptor getTableDescriptor(final TableName tableName, Connection connection, - RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory, - int operationTimeout, int rpcTimeout) throws IOException { + RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory, + int operationTimeout, int rpcTimeout) throws IOException { if (tableName == null) return null; TableDescriptor td = - executeCallable(new MasterCallable(connection, rpcControllerFactory) { - @Override - protected TableDescriptor rpcCall() throws Exception { - GetTableDescriptorsRequest req = + executeCallable(new MasterCallable(connection, rpcControllerFactory) { + @Override + protected TableDescriptor rpcCall() throws Exception { + GetTableDescriptorsRequest req = RequestConverter.buildGetTableDescriptorsRequest(tableName); - GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); - if (!htds.getTableSchemaList().isEmpty()) { - return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0)); + GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); + if (!htds.getTableSchemaList().isEmpty()) { + return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0)); + } + return null; } - return null; - } - }, rpcCallerFactory, operationTimeout, rpcTimeout); + }, rpcCallerFactory, operationTimeout, rpcTimeout); if (td != null) { return td; } @@ -603,31 +595,30 @@ public class HBaseAdmin implements Admin { } /** - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #getTableDescriptor(TableName, - * Connection, RpcRetryingCallerFactory,RpcControllerFactory,int,int)} + * @deprecated since 2.0 version and will be removed in 3.0 version. use + * {@link #getTableDescriptor(TableName, Connection, RpcRetryingCallerFactory,RpcControllerFactory,int,int)} */ @Deprecated static HTableDescriptor getHTableDescriptor(final TableName tableName, Connection connection, - RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory, - int operationTimeout, int rpcTimeout) throws IOException { + RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory, + int operationTimeout, int rpcTimeout) throws IOException { if (tableName == null) { return null; } HTableDescriptor htd = - executeCallable(new MasterCallable(connection, rpcControllerFactory) { - @Override - protected HTableDescriptor rpcCall() throws Exception { - GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(tableName); - GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); - if (!htds.getTableSchemaList().isEmpty()) { - return new ImmutableHTableDescriptor( - ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0))); - } - return null; + executeCallable(new MasterCallable(connection, rpcControllerFactory) { + @Override + protected HTableDescriptor rpcCall() throws Exception { + GetTableDescriptorsRequest req = + RequestConverter.buildGetTableDescriptorsRequest(tableName); + GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); + if (!htds.getTableSchemaList().isEmpty()) { + return new ImmutableHTableDescriptor( + ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0))); } - }, rpcCallerFactory, operationTimeout, rpcTimeout); + return null; + } + }, rpcCallerFactory, operationTimeout, rpcTimeout); if (htd != null) { return new ImmutableHTableDescriptor(htd); } @@ -644,7 +635,7 @@ public class HBaseAdmin implements Admin { @Override public void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) - throws IOException { + throws IOException { if (numRegions < 3) { throw new IllegalArgumentException("Must create at least three regions"); } else if (Bytes.compareTo(startKey, endKey) >= 0) { @@ -663,7 +654,7 @@ public class HBaseAdmin implements Admin { @Override public Future createTableAsync(final TableDescriptor desc, final byte[][] splitKeys) - throws IOException { + throws IOException { if (desc.getTableName() == null) { throw new IllegalArgumentException("TableName cannot be null"); } @@ -674,12 +665,11 @@ public class HBaseAdmin implements Admin { for (byte[] splitKey : splitKeys) { if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) { throw new IllegalArgumentException( - "Empty split key must not be passed in the split keys."); + "Empty split key must not be passed in the split keys."); } if (lastKey != null && Bytes.equals(splitKey, lastKey)) { - throw new IllegalArgumentException("All split keys must be unique, " + - "found duplicate: " + Bytes.toStringBinary(splitKey) + - ", " + Bytes.toStringBinary(lastKey)); + throw new IllegalArgumentException("All split keys must be unique, " + "found duplicate: " + + Bytes.toStringBinary(splitKey) + ", " + Bytes.toStringBinary(lastKey)); } lastKey = splitKey; } @@ -689,11 +679,12 @@ public class HBaseAdmin implements Admin { new MasterCallable(getConnection(), getRpcControllerFactory()) { Long nonceGroup = ng.getNonceGroup(); Long nonce = ng.newNonce(); + @Override protected CreateTableResponse rpcCall() throws Exception { setPriority(desc.getTableName()); - CreateTableRequest request = RequestConverter.buildCreateTableRequest( - desc, splitKeys, nonceGroup, nonce); + CreateTableRequest request = + RequestConverter.buildCreateTableRequest(desc, splitKeys, nonceGroup, nonce); return master.createTable(getRpcController(), request); } }); @@ -705,9 +696,9 @@ public class HBaseAdmin implements Admin { private final byte[][] splitKeys; public CreateTableFuture(final HBaseAdmin admin, final TableDescriptor desc, - final byte[][] splitKeys, final CreateTableResponse response) { + final byte[][] splitKeys, final CreateTableResponse response) { super(admin, desc.getTableName(), - (response != null && response.hasProcId()) ? response.getProcId() : null); + (response != null && response.hasProcId()) ? response.getProcId() : null); this.splitKeys = splitKeys; this.desc = desc; } @@ -736,11 +727,12 @@ public class HBaseAdmin implements Admin { new MasterCallable(getConnection(), getRpcControllerFactory()) { Long nonceGroup = ng.getNonceGroup(); Long nonce = ng.newNonce(); + @Override protected DeleteTableResponse rpcCall() throws Exception { setPriority(tableName); DeleteTableRequest req = - RequestConverter.buildDeleteTableRequest(tableName, nonceGroup,nonce); + RequestConverter.buildDeleteTableRequest(tableName, nonceGroup, nonce); return master.deleteTable(getRpcController(), req); } }); @@ -749,9 +741,9 @@ public class HBaseAdmin implements Admin { private static class DeleteTableFuture extends TableFuture { public DeleteTableFuture(final HBaseAdmin admin, final TableName tableName, - final DeleteTableResponse response) { + final DeleteTableResponse response) { super(admin, tableName, - (response != null && response.hasProcId()) ? response.getProcId() : null); + (response != null && response.hasProcId()) ? response.getProcId() : null); } @Override @@ -760,15 +752,14 @@ public class HBaseAdmin implements Admin { } @Override - protected Void waitOperationResult(final long deadlineTs) - throws IOException, TimeoutException { + protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException { waitTableNotFound(deadlineTs); return null; } @Override protected Void postOperationResult(final Void result, final long deadlineTs) - throws IOException, TimeoutException { + throws IOException, TimeoutException { // Delete cached information to prevent clients from using old locations ((ClusterConnection) getAdmin().getConnection()).clearRegionCache(getTableName()); return super.postOperationResult(result, deadlineTs); @@ -781,12 +772,9 @@ public class HBaseAdmin implements Admin { } /** - * Delete tables matching the passed in pattern and wait on completion. - * - * Warning: Use this method carefully, there is no prompting and the effect is - * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and - * {@link #deleteTable(TableName)} - * + * Delete tables matching the passed in pattern and wait on completion. Warning: Use this method + * carefully, there is no prompting and the effect is immediate. Consider using + * {@link #listTables(java.util.regex.Pattern) } and {@link #deleteTable(TableName)} * @param pattern The pattern to match table names against * @return Table descriptors for tables that couldn't be deleted * @throws IOException if a remote or network exception occurs @@ -807,21 +795,21 @@ public class HBaseAdmin implements Admin { @Override public Future truncateTableAsync(final TableName tableName, final boolean preserveSplits) - throws IOException { - TruncateTableResponse response = - executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { - Long nonceGroup = ng.getNonceGroup(); - Long nonce = ng.newNonce(); - @Override - protected TruncateTableResponse rpcCall() throws Exception { - setPriority(tableName); - LOG.info("Started truncating " + tableName); - TruncateTableRequest req = RequestConverter.buildTruncateTableRequest( - tableName, preserveSplits, nonceGroup, nonce); - return master.truncateTable(getRpcController(), req); - } - }); + throws IOException { + TruncateTableResponse response = executeCallable( + new MasterCallable(getConnection(), getRpcControllerFactory()) { + Long nonceGroup = ng.getNonceGroup(); + Long nonce = ng.newNonce(); + + @Override + protected TruncateTableResponse rpcCall() throws Exception { + setPriority(tableName); + LOG.info("Started truncating " + tableName); + TruncateTableRequest req = RequestConverter.buildTruncateTableRequest(tableName, + preserveSplits, nonceGroup, nonce); + return master.truncateTable(getRpcController(), req); + } + }); return new TruncateTableFuture(this, tableName, preserveSplits, response); } @@ -829,9 +817,9 @@ public class HBaseAdmin implements Admin { private final boolean preserveSplits; public TruncateTableFuture(final HBaseAdmin admin, final TableName tableName, - final boolean preserveSplits, final TruncateTableResponse response) { + final boolean preserveSplits, final TruncateTableResponse response) { super(admin, tableName, - (response != null && response.hasProcId()) ? response.getProcId() : null); + (response != null && response.hasProcId()) ? response.getProcId() : null); this.preserveSplits = preserveSplits; } @@ -872,13 +860,14 @@ public class HBaseAdmin implements Admin { new MasterCallable(getConnection(), getRpcControllerFactory()) { Long nonceGroup = ng.getNonceGroup(); Long nonce = ng.newNonce(); + @Override protected EnableTableResponse rpcCall() throws Exception { setPriority(tableName); LOG.info("Started enable of " + tableName); EnableTableRequest req = - RequestConverter.buildEnableTableRequest(tableName, nonceGroup, nonce); - return master.enableTable(getRpcController(),req); + RequestConverter.buildEnableTableRequest(tableName, nonceGroup, nonce); + return master.enableTable(getRpcController(), req); } }); return new EnableTableFuture(this, tableName, response); @@ -886,9 +875,9 @@ public class HBaseAdmin implements Admin { private static class EnableTableFuture extends TableFuture { public EnableTableFuture(final HBaseAdmin admin, final TableName tableName, - final EnableTableResponse response) { + final EnableTableResponse response) { super(admin, tableName, - (response != null && response.hasProcId()) ? response.getProcId() : null); + (response != null && response.hasProcId()) ? response.getProcId() : null); } @Override @@ -931,13 +920,13 @@ public class HBaseAdmin implements Admin { new MasterCallable(getConnection(), getRpcControllerFactory()) { Long nonceGroup = ng.getNonceGroup(); Long nonce = ng.newNonce(); + @Override protected DisableTableResponse rpcCall() throws Exception { setPriority(tableName); LOG.info("Started disable of " + tableName); DisableTableRequest req = - RequestConverter.buildDisableTableRequest( - tableName, nonceGroup, nonce); + RequestConverter.buildDisableTableRequest(tableName, nonceGroup, nonce); return master.disableTable(getRpcController(), req); } }); @@ -946,9 +935,9 @@ public class HBaseAdmin implements Admin { private static class DisableTableFuture extends TableFuture { public DisableTableFuture(final HBaseAdmin admin, final TableName tableName, - final DisableTableResponse response) { + final DisableTableResponse response) { super(admin, tableName, - (response != null && response.hasProcId()) ? response.getProcId() : null); + (response != null && response.hasProcId()) ? response.getProcId() : null); } @Override @@ -1017,19 +1006,19 @@ public class HBaseAdmin implements Admin { @Override public Pair getAlterStatus(final TableName tableName) throws IOException { - return executeCallable(new MasterCallable>(getConnection(), - getRpcControllerFactory()) { - @Override - protected Pair rpcCall() throws Exception { - setPriority(tableName); - GetSchemaAlterStatusRequest req = RequestConverter - .buildGetSchemaAlterStatusRequest(tableName); - GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(getRpcController(), req); - Pair pair = new Pair<>(ret.getYetToUpdateRegions(), - ret.getTotalRegions()); - return pair; - } - }); + return executeCallable( + new MasterCallable>(getConnection(), getRpcControllerFactory()) { + @Override + protected Pair rpcCall() throws Exception { + setPriority(tableName); + GetSchemaAlterStatusRequest req = + RequestConverter.buildGetSchemaAlterStatusRequest(tableName); + GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(getRpcController(), req); + Pair pair = + new Pair<>(ret.getYetToUpdateRegions(), ret.getTotalRegions()); + return pair; + } + }); } @Override @@ -1039,28 +1028,28 @@ public class HBaseAdmin implements Admin { @Override public Future addColumnFamilyAsync(final TableName tableName, - final ColumnFamilyDescriptor columnFamily) throws IOException { - AddColumnResponse response = - executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { - Long nonceGroup = ng.getNonceGroup(); - Long nonce = ng.newNonce(); - @Override - protected AddColumnResponse rpcCall() throws Exception { - setPriority(tableName); - AddColumnRequest req = - RequestConverter.buildAddColumnRequest(tableName, columnFamily, nonceGroup, nonce); - return master.addColumn(getRpcController(), req); - } - }); + final ColumnFamilyDescriptor columnFamily) throws IOException { + AddColumnResponse response = executeCallable( + new MasterCallable(getConnection(), getRpcControllerFactory()) { + Long nonceGroup = ng.getNonceGroup(); + Long nonce = ng.newNonce(); + + @Override + protected AddColumnResponse rpcCall() throws Exception { + setPriority(tableName); + AddColumnRequest req = + RequestConverter.buildAddColumnRequest(tableName, columnFamily, nonceGroup, nonce); + return master.addColumn(getRpcController(), req); + } + }); return new AddColumnFamilyFuture(this, tableName, response); } private static class AddColumnFamilyFuture extends ModifyTableFuture { public AddColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName, - final AddColumnResponse response) { - super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId() - : null); + final AddColumnResponse response) { + super(admin, tableName, + (response != null && response.hasProcId()) ? response.getProcId() : null); } @Override @@ -1072,40 +1061,39 @@ public class HBaseAdmin implements Admin { /** * {@inheritDoc} * @deprecated Since 2.0. Will be removed in 3.0. Use - * {@link #deleteColumnFamily(TableName, byte[])} instead. + * {@link #deleteColumnFamily(TableName, byte[])} instead. */ @Override @Deprecated public void deleteColumn(final TableName tableName, final byte[] columnFamily) - throws IOException { + throws IOException { deleteColumnFamily(tableName, columnFamily); } @Override public Future deleteColumnFamilyAsync(final TableName tableName, final byte[] columnFamily) - throws IOException { - DeleteColumnResponse response = - executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { - Long nonceGroup = ng.getNonceGroup(); - Long nonce = ng.newNonce(); - @Override - protected DeleteColumnResponse rpcCall() throws Exception { - setPriority(tableName); - DeleteColumnRequest req = - RequestConverter.buildDeleteColumnRequest(tableName, columnFamily, - nonceGroup, nonce); - return master.deleteColumn(getRpcController(), req); - } - }); + throws IOException { + DeleteColumnResponse response = executeCallable( + new MasterCallable(getConnection(), getRpcControllerFactory()) { + Long nonceGroup = ng.getNonceGroup(); + Long nonce = ng.newNonce(); + + @Override + protected DeleteColumnResponse rpcCall() throws Exception { + setPriority(tableName); + DeleteColumnRequest req = + RequestConverter.buildDeleteColumnRequest(tableName, columnFamily, nonceGroup, nonce); + return master.deleteColumn(getRpcController(), req); + } + }); return new DeleteColumnFamilyFuture(this, tableName, response); } private static class DeleteColumnFamilyFuture extends ModifyTableFuture { public DeleteColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName, - final DeleteColumnResponse response) { - super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId() - : null); + final DeleteColumnResponse response) { + super(admin, tableName, + (response != null && response.hasProcId()) ? response.getProcId() : null); } @Override @@ -1135,9 +1123,9 @@ public class HBaseAdmin implements Admin { private static class ModifyColumnFamilyFuture extends ModifyTableFuture { public ModifyColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName, - final ModifyColumnResponse response) { - super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId() - : null); + final ModifyColumnResponse response) { + super(admin, tableName, + (response != null && response.hasProcId()) ? response.getProcId() : null); } @Override @@ -1187,14 +1175,14 @@ public class HBaseAdmin implements Admin { @Deprecated @Override - public void closeRegion(final byte [] regionName, final String unused) throws IOException { + public void closeRegion(final byte[] regionName, final String unused) throws IOException { unassign(regionName, true); } @Deprecated @Override public boolean closeRegionWithEncodedRegionName(final String encodedRegionName, - final String unused) throws IOException { + final String unused) throws IOException { unassign(Bytes.toBytes(encodedRegionName), true); return true; } @@ -1206,11 +1194,10 @@ public class HBaseAdmin implements Admin { } /** - * @param sn - * @return List of {@link HRegionInfo}. + * n * @return List of {@link HRegionInfo}. * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegions(ServerName)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegions(ServerName)}. */ @Deprecated @Override @@ -1257,8 +1244,7 @@ public class HBaseAdmin implements Admin { } private void flush(AdminService.BlockingInterface admin, final RegionInfo info, - byte[] columnFamily) - throws IOException { + byte[] columnFamily) throws IOException { ProtobufUtil.call(() -> { // TODO: There is no timeout on this controller. Set one! HBaseRpcController controller = rpcControllerFactory.newController(); @@ -1280,14 +1266,12 @@ public class HBaseAdmin implements Admin { * {@inheritDoc} */ @Override - public void compact(final TableName tableName) - throws IOException { + public void compact(final TableName tableName) throws IOException { compact(tableName, null, false, CompactType.NORMAL); } @Override - public void compactRegion(final byte[] regionName) - throws IOException { + public void compactRegion(final byte[] regionName) throws IOException { compactRegion(regionName, null, false); } @@ -1295,8 +1279,7 @@ public class HBaseAdmin implements Admin { * {@inheritDoc} */ @Override - public void compact(final TableName tableName, final byte[] columnFamily) - throws IOException { + public void compact(final TableName tableName, final byte[] columnFamily) throws IOException { compact(tableName, columnFamily, false, CompactType.NORMAL); } @@ -1304,36 +1287,35 @@ public class HBaseAdmin implements Admin { * {@inheritDoc} */ @Override - public void compactRegion(final byte[] regionName, final byte[] columnFamily) - throws IOException { + public void compactRegion(final byte[] regionName, final byte[] columnFamily) throws IOException { compactRegion(regionName, columnFamily, false); } @Override - public Map compactionSwitch(boolean switchState, List - serverNamesList) throws IOException { + public Map compactionSwitch(boolean switchState, + List serverNamesList) throws IOException { List serverList = new ArrayList<>(); if (serverNamesList.isEmpty()) { ClusterMetrics status = getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)); serverList.addAll(status.getLiveServerMetrics().keySet()); } else { - for (String regionServerName: serverNamesList) { + for (String regionServerName : serverNamesList) { ServerName serverName = null; try { serverName = ServerName.valueOf(regionServerName); } catch (Exception e) { - throw new IllegalArgumentException(String.format("Invalid ServerName format: %s", - regionServerName)); + throw new IllegalArgumentException( + String.format("Invalid ServerName format: %s", regionServerName)); } if (serverName == null) { - throw new IllegalArgumentException(String.format("Null ServerName: %s", - regionServerName)); + throw new IllegalArgumentException( + String.format("Null ServerName: %s", regionServerName)); } serverList.add(serverName); } } Map res = new HashMap<>(serverList.size()); - for (ServerName serverName: serverList) { + for (ServerName serverName : serverList) { boolean prev_state = switchCompact(this.connection.getAdmin(serverName), switchState); res.put(serverName, prev_state); } @@ -1341,14 +1323,15 @@ public class HBaseAdmin implements Admin { } private Boolean switchCompact(AdminService.BlockingInterface admin, boolean onOrOff) - throws IOException { + throws IOException { return executeCallable(new RpcRetryingCallable() { - @Override protected Boolean rpcCall(int callTimeout) throws Exception { + @Override + protected Boolean rpcCall(int callTimeout) throws Exception { HBaseRpcController controller = rpcControllerFactory.newController(); CompactionSwitchRequest request = - CompactionSwitchRequest.newBuilder().setEnabled(onOrOff).build(); + CompactionSwitchRequest.newBuilder().setEnabled(onOrOff).build(); CompactionSwitchResponse compactionSwitchResponse = - admin.compactionSwitch(controller, request); + admin.compactionSwitch(controller, request); return compactionSwitchResponse.getPrevState(); } }); @@ -1369,14 +1352,12 @@ public class HBaseAdmin implements Admin { } @Override - public void majorCompact(final TableName tableName) - throws IOException { + public void majorCompact(final TableName tableName) throws IOException { compact(tableName, null, true, CompactType.NORMAL); } @Override - public void majorCompactRegion(final byte[] regionName) - throws IOException { + public void majorCompactRegion(final byte[] regionName) throws IOException { compactRegion(regionName, null, true); } @@ -1385,36 +1366,34 @@ public class HBaseAdmin implements Admin { */ @Override public void majorCompact(final TableName tableName, final byte[] columnFamily) - throws IOException { + throws IOException { compact(tableName, columnFamily, true, CompactType.NORMAL); } @Override public void majorCompactRegion(final byte[] regionName, final byte[] columnFamily) - throws IOException { + throws IOException { compactRegion(regionName, columnFamily, true); } /** - * Compact a table. - * Asynchronous operation. - * - * @param tableName table or region to compact + * Compact a table. Asynchronous operation. + * @param tableName table or region to compact * @param columnFamily column family within a table or region - * @param major True if we are to do a major compaction. - * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} + * @param major True if we are to do a major compaction. + * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} * @throws IOException if a remote or network exception occurs */ - private void compact(final TableName tableName, final byte[] columnFamily,final boolean major, - CompactType compactType) throws IOException { + private void compact(final TableName tableName, final byte[] columnFamily, final boolean major, + CompactType compactType) throws IOException { switch (compactType) { case MOB: compact(this.connection.getAdminForMaster(), RegionInfo.createMobRegionInfo(tableName), - major, columnFamily); + major, columnFamily); break; case NORMAL: checkTableExists(tableName); - for (HRegionLocation loc :connection.locateRegions(tableName, false, false)) { + for (HRegionLocation loc : connection.locateRegions(tableName, false, false)) { ServerName sn = loc.getServerName(); if (sn == null) { continue; @@ -1423,8 +1402,8 @@ public class HBaseAdmin implements Admin { compact(this.connection.getAdmin(sn), loc.getRegion(), major, columnFamily); } catch (NotServingRegionException e) { if (LOG.isDebugEnabled()) { - LOG.debug("Trying to" + (major ? " major" : "") + " compact " + loc.getRegion() + - ": " + StringUtils.stringifyException(e)); + LOG.debug("Trying to" + (major ? " major" : "") + " compact " + loc.getRegion() + ": " + + StringUtils.stringifyException(e)); } } } @@ -1435,17 +1414,14 @@ public class HBaseAdmin implements Admin { } /** - * Compact an individual region. - * Asynchronous operation. - * - * @param regionName region to compact + * Compact an individual region. Asynchronous operation. + * @param regionName region to compact * @param columnFamily column family within a table or region - * @param major True if we are to do a major compaction. - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException + * @param major True if we are to do a major compaction. + * @throws IOException if a remote or network exception occurs n */ private void compactRegion(final byte[] regionName, final byte[] columnFamily, - final boolean major) throws IOException { + final boolean major) throws IOException { Pair regionServerPair = getRegion(regionName); if (regionServerPair == null) { throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName)); @@ -1458,14 +1434,14 @@ public class HBaseAdmin implements Admin { } private void compact(AdminService.BlockingInterface admin, RegionInfo hri, boolean major, - byte[] family) throws IOException { + byte[] family) throws IOException { Callable callable = new Callable() { @Override public Void call() throws Exception { // TODO: There is no timeout on this controller. Set one! HBaseRpcController controller = rpcControllerFactory.newController(); CompactRegionRequest request = - RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family); + RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family); admin.compactRegion(controller, request); return null; } @@ -1492,14 +1468,14 @@ public class HBaseAdmin implements Admin { } @Override - public void assign(final byte [] regionName) throws MasterNotRunningException, - ZooKeeperConnectionException, IOException { + public void assign(final byte[] regionName) + throws MasterNotRunningException, ZooKeeperConnectionException, IOException { executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Void rpcCall() throws Exception { setPriority(regionName); AssignRegionRequest request = - RequestConverter.buildAssignRegionRequest(getRegionName(regionName)); + RequestConverter.buildAssignRegionRequest(getRegionName(regionName)); master.assignRegion(getRpcController(), request); return null; } @@ -1507,14 +1483,13 @@ public class HBaseAdmin implements Admin { } @Override - public void unassign(final byte [] regionName) throws IOException { + public void unassign(final byte[] regionName) throws IOException { final byte[] toBeUnassigned = getRegionName(regionName); executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Void rpcCall() throws Exception { setPriority(regionName); - UnassignRegionRequest request = - RequestConverter.buildUnassignRegionRequest(toBeUnassigned); + UnassignRegionRequest request = RequestConverter.buildUnassignRegionRequest(toBeUnassigned); master.unassignRegion(getRpcController(), request); return null; } @@ -1522,36 +1497,36 @@ public class HBaseAdmin implements Admin { } @Override - public void offline(final byte [] regionName) - throws IOException { + public void offline(final byte[] regionName) throws IOException { executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Void rpcCall() throws Exception { setPriority(regionName); master.offlineRegion(getRpcController(), - RequestConverter.buildOfflineRegionRequest(regionName)); + RequestConverter.buildOfflineRegionRequest(regionName)); return null; } }); } @Override - public boolean balancerSwitch(final boolean on, final boolean synchronous) - throws IOException { + public boolean balancerSwitch(final boolean on, final boolean synchronous) throws IOException { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Boolean rpcCall() throws Exception { SetBalancerRunningRequest req = - RequestConverter.buildSetBalancerRunningRequest(on, synchronous); + RequestConverter.buildSetBalancerRunningRequest(on, synchronous); return master.setBalancerRunning(getRpcController(), req).getPrevBalanceValue(); } }); } - @Override public BalanceResponse balance(BalanceRequest request) throws IOException { + @Override + public BalanceResponse balance(BalanceRequest request) throws IOException { return executeCallable( new MasterCallable(getConnection(), getRpcControllerFactory()) { - @Override protected BalanceResponse rpcCall() throws Exception { + @Override + protected BalanceResponse rpcCall() throws Exception { MasterProtos.BalanceRequest req = ProtobufUtil.toBalanceRequest(request); return ProtobufUtil.toBalanceResponse(master.balance(getRpcController(), req)); } @@ -1563,8 +1538,9 @@ public class HBaseAdmin implements Admin { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Boolean rpcCall() throws Exception { - return master.isBalancerEnabled(getRpcController(), - RequestConverter.buildIsBalancerEnabledRequest()).getEnabled(); + return master + .isBalancerEnabled(getRpcController(), RequestConverter.buildIsBalancerEnabledRequest()) + .getEnabled(); } }); } @@ -1578,21 +1554,19 @@ public class HBaseAdmin implements Admin { CacheEvictionStatsBuilder cacheEvictionStats = CacheEvictionStats.builder(); List> pairs = MetaTableAccessor.getTableRegionsAndLocations(connection, tableName); - Map> regionInfoByServerName = - pairs.stream() - .filter(pair -> !(pair.getFirst().isOffline())) - .filter(pair -> pair.getSecond() != null) - .collect(Collectors.groupingBy(pair -> pair.getSecond(), - Collectors.mapping(pair -> pair.getFirst(), Collectors.toList()))); + Map> regionInfoByServerName = pairs.stream() + .filter(pair -> !(pair.getFirst().isOffline())).filter(pair -> pair.getSecond() != null) + .collect(Collectors.groupingBy(pair -> pair.getSecond(), + Collectors.mapping(pair -> pair.getFirst(), Collectors.toList()))); for (Map.Entry> entry : regionInfoByServerName.entrySet()) { CacheEvictionStats stats = clearBlockCache(entry.getKey(), entry.getValue()); cacheEvictionStats = cacheEvictionStats.append(stats); if (stats.getExceptionCount() > 0) { for (Map.Entry exception : stats.getExceptions().entrySet()) { - LOG.debug("Failed to clear block cache for " - + Bytes.toStringBinary(exception.getKey()) - + " on " + entry.getKey() + ": ", exception.getValue()); + LOG.debug("Failed to clear block cache for " + Bytes.toStringBinary(exception.getKey()) + + " on " + entry.getKey() + ": ", exception.getValue()); } } } @@ -1600,11 +1574,10 @@ public class HBaseAdmin implements Admin { } private CacheEvictionStats clearBlockCache(final ServerName sn, final List hris) - throws IOException { + throws IOException { HBaseRpcController controller = rpcControllerFactory.newController(); AdminService.BlockingInterface admin = this.connection.getAdmin(sn); - ClearRegionBlockCacheRequest request = - RequestConverter.buildClearRegionBlockCacheRequest(hris); + ClearRegionBlockCacheRequest request = RequestConverter.buildClearRegionBlockCacheRequest(hris); ClearRegionBlockCacheResponse response; try { response = admin.clearRegionBlockCache(controller, request); @@ -1619,8 +1592,8 @@ public class HBaseAdmin implements Admin { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Boolean rpcCall() throws Exception { - return master.normalize(getRpcController(), - RequestConverter.buildNormalizeRequest(ntfp)).getNormalizerRan(); + return master.normalize(getRpcController(), RequestConverter.buildNormalizeRequest(ntfp)) + .getNormalizerRan(); } }); } @@ -1641,8 +1614,7 @@ public class HBaseAdmin implements Admin { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Boolean rpcCall() throws Exception { - SetNormalizerRunningRequest req = - RequestConverter.buildSetNormalizerRunningRequest(on); + SetNormalizerRunningRequest req = RequestConverter.buildSetNormalizerRunningRequest(on); return master.setNormalizerRunning(getRpcController(), req).getPrevNormalizerValue(); } }); @@ -1664,8 +1636,8 @@ public class HBaseAdmin implements Admin { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Integer rpcCall() throws Exception { - return master.runCatalogScan(getRpcController(), - RequestConverter.buildCatalogScanRequest()).getScanResult(); + return master.runCatalogScan(getRpcController(), RequestConverter.buildCatalogScanRequest()) + .getScanResult(); } }); } @@ -1684,9 +1656,10 @@ public class HBaseAdmin implements Admin { @Override public boolean cleanerChoreSwitch(final boolean on) throws IOException { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { - @Override public Boolean rpcCall() throws Exception { + @Override + public Boolean rpcCall() throws Exception { return master.setCleanerChoreRunning(getRpcController(), - RequestConverter.buildSetCleanerChoreRunningRequest(on)).getPrevValue(); + RequestConverter.buildSetCleanerChoreRunningRequest(on)).getPrevValue(); } }); } @@ -1694,9 +1667,11 @@ public class HBaseAdmin implements Admin { @Override public boolean runCleanerChore() throws IOException { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { - @Override public Boolean rpcCall() throws Exception { - return master.runCleanerChore(getRpcController(), - RequestConverter.buildRunCleanerChoreRequest()).getCleanerChoreRan(); + @Override + public Boolean rpcCall() throws Exception { + return master + .runCleanerChore(getRpcController(), RequestConverter.buildRunCleanerChoreRequest()) + .getCleanerChoreRan(); } }); } @@ -1704,30 +1679,26 @@ public class HBaseAdmin implements Admin { @Override public boolean isCleanerChoreEnabled() throws IOException { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { - @Override public Boolean rpcCall() throws Exception { + @Override + public Boolean rpcCall() throws Exception { return master.isCleanerChoreEnabled(getRpcController(), - RequestConverter.buildIsCleanerChoreEnabledRequest()).getValue(); + RequestConverter.buildIsCleanerChoreEnabledRequest()).getValue(); } }); } /** - * Merge two regions. Synchronous operation. - * Note: It is not feasible to predict the length of merge. - * Therefore, this is for internal testing only. + * Merge two regions. Synchronous operation. Note: It is not feasible to predict the length of + * merge. Therefore, this is for internal testing only. * @param nameOfRegionA encoded or full name of region a * @param nameOfRegionB encoded or full name of region b - * @param forcible true if do a compulsory merge, otherwise we will only merge - * two adjacent regions + * @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent + * regions * @throws IOException if a remote or network exception occurs */ - public void mergeRegionsSync( - final byte[] nameOfRegionA, - final byte[] nameOfRegionB, - final boolean forcible) throws IOException { - get( - mergeRegionsAsync(nameOfRegionA, nameOfRegionB, forcible), - syncWaitTimeout, + public void mergeRegionsSync(final byte[] nameOfRegionA, final byte[] nameOfRegionB, + final boolean forcible) throws IOException { + get(mergeRegionsAsync(nameOfRegionA, nameOfRegionB, forcible), syncWaitTimeout, TimeUnit.MILLISECONDS); } @@ -1735,93 +1706,83 @@ public class HBaseAdmin implements Admin { * Merge two regions. Asynchronous operation. * @param nameOfRegionA encoded or full name of region a * @param nameOfRegionB encoded or full name of region b - * @param forcible true if do a compulsory merge, otherwise we will only merge - * two adjacent regions + * @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent + * regions * @throws IOException if a remote or network exception occurs * @deprecated Since 2.0. Will be removed in 3.0. Use - * {@link #mergeRegionsAsync(byte[], byte[], boolean)} instead. + * {@link #mergeRegionsAsync(byte[], byte[], boolean)} instead. */ @Deprecated @Override - public void mergeRegions(final byte[] nameOfRegionA, - final byte[] nameOfRegionB, final boolean forcible) - throws IOException { + public void mergeRegions(final byte[] nameOfRegionA, final byte[] nameOfRegionB, + final boolean forcible) throws IOException { mergeRegionsAsync(nameOfRegionA, nameOfRegionB, forcible); } /** * Merge two regions. Asynchronous operation. * @param nameofRegionsToMerge encoded or full name of daughter regions - * @param forcible true if do a compulsory merge, otherwise we will only merge - * adjacent regions + * @param forcible true if do a compulsory merge, otherwise we will only merge + * adjacent regions */ @Override public Future mergeRegionsAsync(final byte[][] nameofRegionsToMerge, final boolean forcible) - throws IOException { + throws IOException { Preconditions.checkArgument(nameofRegionsToMerge.length >= 2, "Can not merge only %s region", nameofRegionsToMerge.length); byte[][] encodedNameofRegionsToMerge = new byte[nameofRegionsToMerge.length][]; for (int i = 0; i < nameofRegionsToMerge.length; i++) { - encodedNameofRegionsToMerge[i] = - RegionInfo.isEncodedRegionName(nameofRegionsToMerge[i]) ? nameofRegionsToMerge[i] - : Bytes.toBytes(RegionInfo.encodeRegionName(nameofRegionsToMerge[i])); + encodedNameofRegionsToMerge[i] = RegionInfo.isEncodedRegionName(nameofRegionsToMerge[i]) + ? nameofRegionsToMerge[i] + : Bytes.toBytes(RegionInfo.encodeRegionName(nameofRegionsToMerge[i])); } TableName tableName = null; Pair pair; - for(int i = 0; i < nameofRegionsToMerge.length; i++) { + for (int i = 0; i < nameofRegionsToMerge.length; i++) { pair = getRegion(nameofRegionsToMerge[i]); if (pair != null) { if (pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { - throw new IllegalArgumentException ("Can't invoke merge on non-default regions directly"); + throw new IllegalArgumentException("Can't invoke merge on non-default regions directly"); } if (tableName == null) { tableName = pair.getFirst().getTable(); - } else if (!tableName.equals(pair.getFirst().getTable())) { - throw new IllegalArgumentException ("Cannot merge regions from two different tables " + - tableName + " and " + pair.getFirst().getTable()); + } else if (!tableName.equals(pair.getFirst().getTable())) { + throw new IllegalArgumentException("Cannot merge regions from two different tables " + + tableName + " and " + pair.getFirst().getTable()); } } else { - throw new UnknownRegionException ( - "Can't invoke merge on unknown region " + throw new UnknownRegionException("Can't invoke merge on unknown region " + Bytes.toStringBinary(encodedNameofRegionsToMerge[i])); } } - MergeTableRegionsResponse response = - executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { - Long nonceGroup = ng.getNonceGroup(); - Long nonce = ng.newNonce(); - @Override - protected MergeTableRegionsResponse rpcCall() throws Exception { - MergeTableRegionsRequest request = RequestConverter - .buildMergeTableRegionsRequest( - encodedNameofRegionsToMerge, - forcible, - nonceGroup, - nonce); - return master.mergeTableRegions(getRpcController(), request); - } - }); + MergeTableRegionsResponse response = executeCallable( + new MasterCallable(getConnection(), getRpcControllerFactory()) { + Long nonceGroup = ng.getNonceGroup(); + Long nonce = ng.newNonce(); + + @Override + protected MergeTableRegionsResponse rpcCall() throws Exception { + MergeTableRegionsRequest request = RequestConverter.buildMergeTableRegionsRequest( + encodedNameofRegionsToMerge, forcible, nonceGroup, nonce); + return master.mergeTableRegions(getRpcController(), request); + } + }); return new MergeTableRegionsFuture(this, tableName, response); } private static class MergeTableRegionsFuture extends TableFuture { - public MergeTableRegionsFuture( - final HBaseAdmin admin, - final TableName tableName, - final MergeTableRegionsResponse response) { + public MergeTableRegionsFuture(final HBaseAdmin admin, final TableName tableName, + final MergeTableRegionsResponse response) { super(admin, tableName, - (response != null && response.hasProcId()) ? response.getProcId() : null); + (response != null && response.hasProcId()) ? response.getProcId() : null); } - public MergeTableRegionsFuture( - final HBaseAdmin admin, - final TableName tableName, - final Long procId) { + public MergeTableRegionsFuture(final HBaseAdmin admin, final TableName tableName, + final Long procId) { super(admin, tableName, procId); } @@ -1830,10 +1791,10 @@ public class HBaseAdmin implements Admin { return "MERGE_REGIONS"; } } + /** - * Split one region. Synchronous operation. - * Note: It is not feasible to predict the length of split. - * Therefore, this is for internal testing only. + * Split one region. Synchronous operation. Note: It is not feasible to predict the length of + * split. Therefore, this is for internal testing only. * @param regionName encoded or full name of region * @param splitPoint key where region splits * @throws IOException if a remote or network exception occurs @@ -1846,30 +1807,30 @@ public class HBaseAdmin implements Admin { * Split one region. Synchronous operation. * @param regionName region to be split * @param splitPoint split point - * @param timeout how long to wait on split - * @param units time units + * @param timeout how long to wait on split + * @param units time units * @throws IOException if a remote or network exception occurs */ public void splitRegionSync(byte[] regionName, byte[] splitPoint, final long timeout, - final TimeUnit units) throws IOException { + final TimeUnit units) throws IOException { get(splitRegionAsync(regionName, splitPoint), timeout, units); } @Override - public Future splitRegionAsync(byte[] regionName, byte[] splitPoint) - throws IOException { - byte[] encodedNameofRegionToSplit = HRegionInfo.isEncodedRegionName(regionName) ? - regionName : Bytes.toBytes(HRegionInfo.encodeRegionName(regionName)); + public Future splitRegionAsync(byte[] regionName, byte[] splitPoint) throws IOException { + byte[] encodedNameofRegionToSplit = HRegionInfo.isEncodedRegionName(regionName) + ? regionName + : Bytes.toBytes(HRegionInfo.encodeRegionName(regionName)); Pair pair = getRegion(regionName); if (pair != null) { - if (pair.getFirst() != null && - pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { - throw new IllegalArgumentException ("Can't invoke split on non-default regions directly"); + if ( + pair.getFirst() != null && pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID + ) { + throw new IllegalArgumentException("Can't invoke split on non-default regions directly"); } } else { - throw new UnknownRegionException ( - "Can't invoke merge on unknown region " - + Bytes.toStringBinary(encodedNameofRegionToSplit)); + throw new UnknownRegionException( + "Can't invoke merge on unknown region " + Bytes.toStringBinary(encodedNameofRegionToSplit)); } return splitRegionAsync(pair.getFirst(), splitPoint); @@ -1877,38 +1838,38 @@ public class HBaseAdmin implements Admin { Future splitRegionAsync(RegionInfo hri, byte[] splitPoint) throws IOException { TableName tableName = hri.getTable(); - if (hri.getStartKey() != null && splitPoint != null && - Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) { + if ( + hri.getStartKey() != null && splitPoint != null + && Bytes.compareTo(hri.getStartKey(), splitPoint) == 0 + ) { throw new IOException("should not give a splitkey which equals to startkey!"); } SplitTableRegionResponse response = executeCallable( - new MasterCallable(getConnection(), getRpcControllerFactory()) { - Long nonceGroup = ng.getNonceGroup(); - Long nonce = ng.newNonce(); - @Override - protected SplitTableRegionResponse rpcCall() throws Exception { - setPriority(tableName); - SplitTableRegionRequest request = RequestConverter - .buildSplitTableRegionRequest(hri, splitPoint, nonceGroup, nonce); - return master.splitRegion(getRpcController(), request); - } - }); + new MasterCallable(getConnection(), getRpcControllerFactory()) { + Long nonceGroup = ng.getNonceGroup(); + Long nonce = ng.newNonce(); + + @Override + protected SplitTableRegionResponse rpcCall() throws Exception { + setPriority(tableName); + SplitTableRegionRequest request = + RequestConverter.buildSplitTableRegionRequest(hri, splitPoint, nonceGroup, nonce); + return master.splitRegion(getRpcController(), request); + } + }); return new SplitTableRegionFuture(this, tableName, response); } private static class SplitTableRegionFuture extends TableFuture { - public SplitTableRegionFuture(final HBaseAdmin admin, - final TableName tableName, - final SplitTableRegionResponse response) { + public SplitTableRegionFuture(final HBaseAdmin admin, final TableName tableName, + final SplitTableRegionResponse response) { super(admin, tableName, - (response != null && response.hasProcId()) ? response.getProcId() : null); + (response != null && response.hasProcId()) ? response.getProcId() : null); } - public SplitTableRegionFuture( - final HBaseAdmin admin, - final TableName tableName, - final Long procId) { + public SplitTableRegionFuture(final HBaseAdmin admin, final TableName tableName, + final Long procId) { super(admin, tableName, procId); } @@ -1942,8 +1903,10 @@ public class HBaseAdmin implements Admin { continue; } // if a split point given, only split that particular region - if (r.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID || - (splitPoint != null && !r.containsRow(splitPoint))) { + if ( + r.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID + || (splitPoint != null && !r.containsRow(splitPoint)) + ) { continue; } // call out to master to do split now @@ -1952,15 +1915,17 @@ public class HBaseAdmin implements Admin { } @Override - public void splitRegion(final byte[] regionName, final byte [] splitPoint) throws IOException { + public void splitRegion(final byte[] regionName, final byte[] splitPoint) throws IOException { Pair regionServerPair = getRegion(regionName); if (regionServerPair == null) { throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName)); } - if (regionServerPair.getFirst() != null && - regionServerPair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { - throw new IllegalArgumentException("Can't split replicas directly. " - + "Replicas are auto-split when their primary is split."); + if ( + regionServerPair.getFirst() != null + && regionServerPair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID + ) { + throw new IllegalArgumentException( + "Can't split replicas directly. " + "Replicas are auto-split when their primary is split."); } if (regionServerPair.getSecond() == null) { throw new NoServerForRegionException(Bytes.toStringBinary(regionName)); @@ -1970,9 +1935,9 @@ public class HBaseAdmin implements Admin { private static class ModifyTableFuture extends TableFuture { public ModifyTableFuture(final HBaseAdmin admin, final TableName tableName, - final ModifyTableResponse response) { + final ModifyTableResponse response) { super(admin, tableName, - (response != null && response.hasProcId()) ? response.getProcId() : null); + (response != null && response.hasProcId()) ? response.getProcId() : null); } public ModifyTableFuture(final HBaseAdmin admin, final TableName tableName, final Long procId) { @@ -1986,7 +1951,7 @@ public class HBaseAdmin implements Admin { @Override protected Void postOperationResult(final Void result, final long deadlineTs) - throws IOException, TimeoutException { + throws IOException, TimeoutException { // The modify operation on the table is asynchronous on the server side irrespective // of whether Procedure V2 is supported or not. So, we wait in the client till // all regions get updated. @@ -1997,11 +1962,9 @@ public class HBaseAdmin implements Admin { /** * @param regionName Name of a region. - * @return a pair of HRegionInfo and ServerName if regionName is - * a verified region name (we call {@link - * MetaTableAccessor#getRegionLocation(Connection, byte[])} - * else null. - * Throw IllegalArgumentException if regionName is null. + * @return a pair of HRegionInfo and ServerName if regionName is a verified region + * name (we call {@link MetaTableAccessor#getRegionLocation(Connection, byte[])} else + * null. Throw IllegalArgumentException if regionName is null. * @throws IOException if a remote or network exception occurs */ Pair getRegion(final byte[] regionName) throws IOException { @@ -2052,19 +2015,17 @@ public class HBaseAdmin implements Admin { } /** - * If the input is a region name, it is returned as is. If it's an - * encoded region name, the corresponding region is found from meta - * and its region name is returned. If we can't find any region in - * meta matching the input as either region name or encoded region - * name, the input is returned as is. We don't throw unknown - * region exception. + * If the input is a region name, it is returned as is. If it's an encoded region name, the + * corresponding region is found from meta and its region name is returned. If we can't find any + * region in meta matching the input as either region name or encoded region name, the input is + * returned as is. We don't throw unknown region exception. */ - private byte[] getRegionName( - final byte[] regionNameOrEncodedRegionName) throws IOException { - if (Bytes.equals(regionNameOrEncodedRegionName, - HRegionInfo.FIRST_META_REGIONINFO.getRegionName()) - || Bytes.equals(regionNameOrEncodedRegionName, - HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) { + private byte[] getRegionName(final byte[] regionNameOrEncodedRegionName) throws IOException { + if ( + Bytes.equals(regionNameOrEncodedRegionName, HRegionInfo.FIRST_META_REGIONINFO.getRegionName()) + || Bytes.equals(regionNameOrEncodedRegionName, + HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes()) + ) { return HRegionInfo.FIRST_META_REGIONINFO.getRegionName(); } byte[] tmp = regionNameOrEncodedRegionName; @@ -2079,11 +2040,10 @@ public class HBaseAdmin implements Admin { * Check if table exists or not * @param tableName Name of a table. * @return tableName instance - * @throws IOException if a remote or network exception occurs. + * @throws IOException if a remote or network exception occurs. * @throws TableNotFoundException if table does not exist. */ - private TableName checkTableExists(final TableName tableName) - throws IOException { + private TableName checkTableExists(final TableName tableName) throws IOException { return executeCallable(new RpcRetryingCallable() { @Override protected TableName rpcCall(int callTimeout) throws Exception { @@ -2120,8 +2080,7 @@ public class HBaseAdmin implements Admin { } @Override - public synchronized void stopRegionServer(final String hostnamePort) - throws IOException { + public synchronized void stopRegionServer(final String hostnamePort) throws IOException { String hostname = Addressing.parseHostname(hostnamePort); int port = Addressing.parsePort(hostnamePort); final AdminService.BlockingInterface admin = @@ -2129,8 +2088,8 @@ public class HBaseAdmin implements Admin { // TODO: There is no timeout on this controller. Set one! HBaseRpcController controller = rpcControllerFactory.newController(); controller.setPriority(HConstants.HIGH_QOS); - StopServerRequest request = RequestConverter.buildStopServerRequest( - "Called by admin client " + this.connection.toString()); + StopServerRequest request = RequestConverter + .buildStopServerRequest("Called by admin client " + this.connection.toString()); try { admin.stopServer(controller, request); } catch (Exception e) { @@ -2140,32 +2099,32 @@ public class HBaseAdmin implements Admin { @Override public boolean isMasterInMaintenanceMode() throws IOException { - return executeCallable(new MasterCallable(getConnection(), - this.rpcControllerFactory) { - @Override - protected IsInMaintenanceModeResponse rpcCall() throws Exception { - return master.isMasterInMaintenanceMode(getRpcController(), + return executeCallable( + new MasterCallable(getConnection(), this.rpcControllerFactory) { + @Override + protected IsInMaintenanceModeResponse rpcCall() throws Exception { + return master.isMasterInMaintenanceMode(getRpcController(), IsInMaintenanceModeRequest.newBuilder().build()); - } - }).getInMaintenanceMode(); + } + }).getInMaintenanceMode(); } @Override public ClusterMetrics getClusterMetrics(EnumSet

  • Throw an exception if the table exists on peer cluster but descriptors are not same.
  • * * @param tableName name of the table to sync to the peer - * @param splits table split keys + * @param splits table split keys * @throws IOException if a remote or network exception occurs */ private void checkAndSyncTableDescToPeers(final TableName tableName, final byte[][] splits) - throws IOException { + throws IOException { List peers = listReplicationPeers(); if (peers == null || peers.size() <= 0) { throw new IllegalArgumentException("Found no peer cluster for replication."); @@ -4128,9 +4064,9 @@ public class HBaseAdmin implements Admin { for (ReplicationPeerDescription peerDesc : peers) { if (peerDesc.getPeerConfig().needToReplicate(tableName)) { Configuration peerConf = - ReplicationPeerConfigUtil.getPeerClusterConfiguration(this.conf, peerDesc); + ReplicationPeerConfigUtil.getPeerClusterConfiguration(this.conf, peerDesc); try (Connection conn = ConnectionFactory.createConnection(peerConf); - Admin repHBaseAdmin = conn.getAdmin()) { + Admin repHBaseAdmin = conn.getAdmin()) { TableDescriptor tableDesc = getDescriptor(tableName); TableDescriptor peerTableDesc = null; if (!repHBaseAdmin.tableExists(tableName)) { @@ -4139,14 +4075,15 @@ public class HBaseAdmin implements Admin { peerTableDesc = repHBaseAdmin.getDescriptor(tableName); if (peerTableDesc == null) { throw new IllegalArgumentException("Failed to get table descriptor for table " - + tableName.getNameAsString() + " from peer cluster " + peerDesc.getPeerId()); + + tableName.getNameAsString() + " from peer cluster " + peerDesc.getPeerId()); } - if (TableDescriptor.COMPARATOR_IGNORE_REPLICATION.compare(peerTableDesc, - tableDesc) != 0) { + if ( + TableDescriptor.COMPARATOR_IGNORE_REPLICATION.compare(peerTableDesc, tableDesc) != 0 + ) { throw new IllegalArgumentException("Table " + tableName.getNameAsString() - + " exists in peer cluster " + peerDesc.getPeerId() - + ", but the table descriptors are not same when compared with source cluster." - + " Thus can not enable the table's replication switch."); + + " exists in peer cluster " + peerDesc.getPeerId() + + ", but the table descriptors are not same when compared with source cluster." + + " Thus can not enable the table's replication switch."); } } } @@ -4164,7 +4101,7 @@ public class HBaseAdmin implements Admin { TableDescriptor tableDesc = getDescriptor(tableName); if (!tableDesc.matchReplicationScope(enableRep)) { int scope = - enableRep ? HConstants.REPLICATION_SCOPE_GLOBAL : HConstants.REPLICATION_SCOPE_LOCAL; + enableRep ? HConstants.REPLICATION_SCOPE_GLOBAL : HConstants.REPLICATION_SCOPE_LOCAL; modifyTable(TableDescriptorBuilder.newBuilder(tableDesc).setReplicationScope(scope).build()); } } @@ -4182,7 +4119,7 @@ public class HBaseAdmin implements Admin { // TODO: There is no timeout on this controller. Set one! HBaseRpcController controller = rpcControllerFactory.newController(); ClearCompactionQueuesRequest request = - RequestConverter.buildClearCompactionQueuesRequest(queues); + RequestConverter.buildClearCompactionQueuesRequest(queues); admin.clearCompactionQueues(controller, request); return null; } @@ -4192,21 +4129,21 @@ public class HBaseAdmin implements Admin { @Override public List clearDeadServers(List servers) throws IOException { - return executeCallable(new MasterCallable>(getConnection(), - getRpcControllerFactory()) { - @Override - protected List rpcCall() throws Exception { - ClearDeadServersRequest req = RequestConverter. - buildClearDeadServersRequest(servers == null? Collections.EMPTY_LIST: servers); - return ProtobufUtil.toServerNameList( - master.clearDeadServers(getRpcController(), req).getServerNameList()); - } - }); + return executeCallable( + new MasterCallable>(getConnection(), getRpcControllerFactory()) { + @Override + protected List rpcCall() throws Exception { + ClearDeadServersRequest req = RequestConverter + .buildClearDeadServersRequest(servers == null ? Collections.EMPTY_LIST : servers); + return ProtobufUtil + .toServerNameList(master.clearDeadServers(getRpcController(), req).getServerNameList()); + } + }); } @Override public void cloneTableSchema(final TableName tableName, final TableName newTableName, - final boolean preserveSplits) throws IOException { + final boolean preserveSplits) throws IOException { checkTableExists(tableName); if (tableExists(newTableName)) { throw new TableExistsException(newTableName); @@ -4224,10 +4161,9 @@ public class HBaseAdmin implements Admin { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Boolean rpcCall() throws Exception { - return this.master - .switchRpcThrottle(getRpcController(), MasterProtos.SwitchRpcThrottleRequest - .newBuilder().setRpcThrottleEnabled(enable).build()) - .getPreviousRpcThrottleEnabled(); + return this.master.switchRpcThrottle(getRpcController(), + MasterProtos.SwitchRpcThrottleRequest.newBuilder().setRpcThrottleEnabled(enable).build()) + .getPreviousRpcThrottleEnabled(); } }); } @@ -4249,10 +4185,10 @@ public class HBaseAdmin implements Admin { @Override protected Boolean rpcCall() throws Exception { return this.master - .switchExceedThrottleQuota(getRpcController(), - MasterProtos.SwitchExceedThrottleQuotaRequest.newBuilder() - .setExceedThrottleQuotaEnabled(enable).build()) - .getPreviousExceedThrottleQuotaEnabled(); + .switchExceedThrottleQuota(getRpcController(), + MasterProtos.SwitchExceedThrottleQuotaRequest.newBuilder() + .setExceedThrottleQuotaEnabled(enable).build()) + .getPreviousExceedThrottleQuotaEnabled(); } }); } @@ -4276,8 +4212,8 @@ public class HBaseAdmin implements Admin { } @Override - public Map getRegionServerSpaceQuotaSnapshots( - ServerName serverName) throws IOException { + public Map + getRegionServerSpaceQuotaSnapshots(ServerName serverName) throws IOException { final AdminService.BlockingInterface admin = this.connection.getAdmin(serverName); Callable callable = new Callable() { @@ -4337,12 +4273,12 @@ public class HBaseAdmin implements Admin { @Override public void grant(UserPermission userPermission, boolean mergeExistingPermissions) - throws IOException { + throws IOException { executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Void rpcCall() throws Exception { GrantRequest req = - ShadedAccessControlUtil.buildGrantRequest(userPermission, mergeExistingPermissions); + ShadedAccessControlUtil.buildGrantRequest(userPermission, mergeExistingPermissions); this.master.grant(getRpcController(), req); return null; } @@ -4363,18 +4299,18 @@ public class HBaseAdmin implements Admin { @Override public List - getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) throws IOException { + getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) throws IOException { return executeCallable( new MasterCallable>(getConnection(), getRpcControllerFactory()) { @Override protected List rpcCall() throws Exception { AccessControlProtos.GetUserPermissionsRequest req = - ShadedAccessControlUtil.buildGetUserPermissionsRequest(getUserPermissionsRequest); + ShadedAccessControlUtil.buildGetUserPermissionsRequest(getUserPermissionsRequest); AccessControlProtos.GetUserPermissionsResponse response = - this.master.getUserPermissions(getRpcController(), req); + this.master.getUserPermissions(getRpcController(), req); return response.getUserPermissionList().stream() - .map(userPermission -> ShadedAccessControlUtil.toUserPermission(userPermission)) - .collect(Collectors.toList()); + .map(userPermission -> ShadedAccessControlUtil.toUserPermission(userPermission)) + .collect(Collectors.toList()); } }); } @@ -4391,23 +4327,22 @@ public class HBaseAdmin implements Admin { @Override public List hasUserPermissions(String userName, List permissions) - throws IOException { + throws IOException { return executeCallable( new MasterCallable>(getConnection(), getRpcControllerFactory()) { @Override protected List rpcCall() throws Exception { HasUserPermissionsRequest request = - ShadedAccessControlUtil.buildHasUserPermissionsRequest(userName, permissions); + ShadedAccessControlUtil.buildHasUserPermissionsRequest(userName, permissions); return this.master.hasUserPermissions(getRpcController(), request) - .getHasUserPermissionList(); + .getHasUserPermissionList(); } }); } @Override public boolean snapshotCleanupSwitch(boolean on, boolean synchronous) throws IOException { - return executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { + return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Boolean rpcCall() throws Exception { @@ -4421,8 +4356,7 @@ public class HBaseAdmin implements Admin { @Override public boolean isSnapshotCleanupEnabled() throws IOException { - return executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { + return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Boolean rpcCall() throws Exception { @@ -4434,24 +4368,22 @@ public class HBaseAdmin implements Admin { } - private List getSlowLogResponses( - final Map filterParams, final Set serverNames, final int limit, - final String logType) { + private List getSlowLogResponses(final Map filterParams, + final Set serverNames, final int limit, final String logType) { if (CollectionUtils.isEmpty(serverNames)) { return Collections.emptyList(); } return serverNames.stream().map(serverName -> { - try { - return getSlowLogResponseFromServer(serverName, filterParams, limit, logType); - } catch (IOException e) { - throw new RuntimeException(e); - } + try { + return getSlowLogResponseFromServer(serverName, filterParams, limit, logType); + } catch (IOException e) { + throw new RuntimeException(e); } - ).flatMap(List::stream).collect(Collectors.toList()); + }).flatMap(List::stream).collect(Collectors.toList()); } private List getSlowLogResponseFromServer(ServerName serverName, - Map filterParams, int limit, String logType) throws IOException { + Map filterParams, int limit, String logType) throws IOException { AdminService.BlockingInterface admin = this.connection.getAdmin(serverName); return executeCallable(new RpcRetryingCallable>() { @Override @@ -4467,7 +4399,7 @@ public class HBaseAdmin implements Admin { @Override public List clearSlowLogResponses(@Nullable final Set serverNames) - throws IOException { + throws IOException { if (CollectionUtils.isEmpty(serverNames)) { return Collections.emptyList(); } @@ -4482,7 +4414,7 @@ public class HBaseAdmin implements Admin { @Override public List getLogEntries(Set serverNames, String logType, - ServerType serverType, int limit, Map filterParams) throws IOException { + ServerType serverType, int limit, Map filterParams) throws IOException { if (logType == null || serverType == null) { throw new IllegalArgumentException("logType and/or serverType cannot be empty"); } @@ -4508,27 +4440,27 @@ public class HBaseAdmin implements Admin { } private List getBalancerDecisions(final int limit) throws IOException { - return executeCallable(new MasterCallable>(getConnection(), - getRpcControllerFactory()) { - @Override - protected List rpcCall() throws Exception { - HBaseProtos.LogEntry logEntry = - master.getLogEntries(getRpcController(), ProtobufUtil.toBalancerDecisionRequest(limit)); - return ProtobufUtil.toBalancerDecisionResponse(logEntry); - } - }); + return executeCallable( + new MasterCallable>(getConnection(), getRpcControllerFactory()) { + @Override + protected List rpcCall() throws Exception { + HBaseProtos.LogEntry logEntry = + master.getLogEntries(getRpcController(), ProtobufUtil.toBalancerDecisionRequest(limit)); + return ProtobufUtil.toBalancerDecisionResponse(logEntry); + } + }); } private List getBalancerRejections(final int limit) throws IOException { - return executeCallable(new MasterCallable>(getConnection(), - getRpcControllerFactory()) { - @Override - protected List rpcCall() throws Exception { - HBaseProtos.LogEntry logEntry = - master.getLogEntries(getRpcController(), ProtobufUtil.toBalancerRejectionRequest(limit)); - return ProtobufUtil.toBalancerRejectionResponse(logEntry); - } - }); + return executeCallable( + new MasterCallable>(getConnection(), getRpcControllerFactory()) { + @Override + protected List rpcCall() throws Exception { + HBaseProtos.LogEntry logEntry = master.getLogEntries(getRpcController(), + ProtobufUtil.toBalancerRejectionRequest(limit)); + return ProtobufUtil.toBalancerRejectionResponse(logEntry); + } + }); } private Boolean clearSlowLogsResponses(final ServerName serverName) throws IOException { @@ -4537,9 +4469,8 @@ public class HBaseAdmin implements Admin { @Override protected Boolean rpcCall(int callTimeout) throws Exception { HBaseRpcController controller = rpcControllerFactory.newController(); - AdminProtos.ClearSlowLogResponses clearSlowLogResponses = - admin.clearSlowLogsResponses(controller, - RequestConverter.buildClearSlowLogResponseRequest()); + AdminProtos.ClearSlowLogResponses clearSlowLogResponses = admin + .clearSlowLogsResponses(controller, RequestConverter.buildClearSlowLogResponseRequest()); return ProtobufUtil.toClearSlowLogPayload(clearSlowLogResponses); } }); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java index 85920b66d11..8eaaa10addd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,18 +53,19 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignsR /** * Use {@link ClusterConnection#getHbck()} to obtain an instance of {@link Hbck} instead of * constructing an HBaseHbck directly. - * - *

    Connection should be an unmanaged connection obtained via - * {@link ConnectionFactory#createConnection(Configuration)}.

    - * - *

    NOTE: The methods in here can do damage to a cluster if applied in the wrong sequence or at - * the wrong time. Use with caution. For experts only. These methods are only for the - * extreme case where the cluster has been damaged or has achieved an inconsistent state because - * of some unforeseen circumstance or bug and requires manual intervention. - * - *

    An instance of this class is lightweight and not-thread safe. A new instance should be created - * by each thread. Pooling or caching of the instance is not recommended.

    - * + *

    + * Connection should be an unmanaged connection obtained via + * {@link ConnectionFactory#createConnection(Configuration)}. + *

    + *

    + * NOTE: The methods in here can do damage to a cluster if applied in the wrong sequence or at the + * wrong time. Use with caution. For experts only. These methods are only for the extreme case where + * the cluster has been damaged or has achieved an inconsistent state because of some unforeseen + * circumstance or bug and requires manual intervention. + *

    + * An instance of this class is lightweight and not-thread safe. A new instance should be created by + * each thread. Pooling or caching of the instance is not recommended. + *

    * @see ConnectionFactory * @see ClusterConnection * @see Hbck @@ -103,8 +104,8 @@ public class HBaseHbck implements Hbck { @Override public TableState setTableStateInMeta(TableState state) throws IOException { try { - GetTableStateResponse response = hbck.setTableStateInMeta( - rpcControllerFactory.newController(), + GetTableStateResponse response = + hbck.setTableStateInMeta(rpcControllerFactory.newController(), RequestConverter.buildSetTableStateInMetaRequest(state)); return TableState.convert(state.getTableName(), response.getTableState()); } catch (ServiceException se) { @@ -135,11 +136,10 @@ public class HBaseHbck implements Hbck { } @Override - public List assigns(List encodedRegionNames, boolean override) - throws IOException { + public List assigns(List encodedRegionNames, boolean override) throws IOException { try { AssignsResponse response = this.hbck.assigns(rpcControllerFactory.newController(), - RequestConverter.toAssignRegionsRequest(encodedRegionNames, override)); + RequestConverter.toAssignRegionsRequest(encodedRegionNames, override)); return response.getPidList(); } catch (ServiceException se) { LOG.debug(toCommaDelimitedString(encodedRegionNames), se); @@ -149,10 +149,10 @@ public class HBaseHbck implements Hbck { @Override public List unassigns(List encodedRegionNames, boolean override) - throws IOException { + throws IOException { try { UnassignsResponse response = this.hbck.unassigns(rpcControllerFactory.newController(), - RequestConverter.toUnassignRegionsRequest(encodedRegionNames, override)); + RequestConverter.toUnassignRegionsRequest(encodedRegionNames, override)); return response.getPidList(); } catch (ServiceException se) { LOG.debug(toCommaDelimitedString(encodedRegionNames), se); @@ -166,38 +166,34 @@ public class HBaseHbck implements Hbck { @Override public List bypassProcedure(List pids, long waitTime, boolean override, - boolean recursive) - throws IOException { - BypassProcedureResponse response = ProtobufUtil.call( - new Callable() { - @Override - public BypassProcedureResponse call() throws Exception { - try { - return hbck.bypassProcedure(rpcControllerFactory.newController(), - BypassProcedureRequest.newBuilder().addAllProcId(pids). - setWaitTime(waitTime).setOverride(override).setRecursive(recursive).build()); - } catch (Throwable t) { - LOG.error(pids.stream().map(i -> i.toString()). - collect(Collectors.joining(", ")), t); - throw t; - } - } - }); + boolean recursive) throws IOException { + BypassProcedureResponse response = ProtobufUtil.call(new Callable() { + @Override + public BypassProcedureResponse call() throws Exception { + try { + return hbck.bypassProcedure(rpcControllerFactory.newController(), + BypassProcedureRequest.newBuilder().addAllProcId(pids).setWaitTime(waitTime) + .setOverride(override).setRecursive(recursive).build()); + } catch (Throwable t) { + LOG.error(pids.stream().map(i -> i.toString()).collect(Collectors.joining(", ")), t); + throw t; + } + } + }); return response.getBypassedList(); } @Override - public List scheduleServerCrashProcedures(List serverNames) - throws IOException { + public List scheduleServerCrashProcedures(List serverNames) throws IOException { try { ScheduleServerCrashProcedureResponse response = - this.hbck.scheduleServerCrashProcedure(rpcControllerFactory.newController(), - RequestConverter.toScheduleServerCrashProcedureRequest(serverNames)); + this.hbck.scheduleServerCrashProcedure(rpcControllerFactory.newController(), + RequestConverter.toScheduleServerCrashProcedureRequest(serverNames)); return response.getPidList(); } catch (ServiceException se) { LOG.debug(toCommaDelimitedString( serverNames.stream().map(serverName -> ProtobufUtil.toServerName(serverName).toString()) - .collect(Collectors.toList())), + .collect(Collectors.toList())), se); throw new IOException(se); } @@ -207,8 +203,7 @@ public class HBaseHbck implements Hbck { public List scheduleSCPsForUnknownServers() throws IOException { try { ScheduleSCPsForUnknownServersResponse response = - this.hbck.scheduleSCPsForUnknownServers( - rpcControllerFactory.newController(), + this.hbck.scheduleSCPsForUnknownServers(rpcControllerFactory.newController(), ScheduleSCPsForUnknownServersRequest.newBuilder().build()); return response.getPidList(); } catch (ServiceException se) { @@ -221,7 +216,7 @@ public class HBaseHbck implements Hbck { public boolean runHbckChore() throws IOException { try { RunHbckChoreResponse response = this.hbck.runHbckChore(rpcControllerFactory.newController(), - RunHbckChoreRequest.newBuilder().build()); + RunHbckChoreRequest.newBuilder().build()); return response.getRan(); } catch (ServiceException se) { LOG.debug("Failed to run HBCK chore", se); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java index 11c9cc11b24..aee9f7d4e50 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java @@ -1,24 +1,24 @@ -/** -* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.REGION_NAMES_KEY; + import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.context.Scope; @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.trace.TableSpanBuilder; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; /** @@ -73,23 +74,18 @@ public class HRegionLocator implements RegionLocator { public HRegionLocation getRegionLocation(byte[] row, int replicaId, boolean reload) throws IOException { final Supplier supplier = new TableSpanBuilder(connection) - .setName("HRegionLocator.getRegionLocation") - .setTableName(tableName); - return tracedLocationFuture( - () -> connection.locateRegion(tableName, row, !reload, true, replicaId) - .getRegionLocation(replicaId), - AsyncRegionLocator::getRegionNames, - supplier); + .setName("HRegionLocator.getRegionLocation").setTableName(tableName); + return tracedLocationFuture(() -> connection + .locateRegion(tableName, row, !reload, true, replicaId).getRegionLocation(replicaId), + AsyncRegionLocator::getRegionNames, supplier); } @Override public List getRegionLocations(byte[] row, boolean reload) throws IOException { final Supplier supplier = new TableSpanBuilder(connection) - .setName("HRegionLocator.getRegionLocations") - .setTableName(tableName); + .setName("HRegionLocator.getRegionLocations").setTableName(tableName); final RegionLocations locs = tracedLocationFuture( - () -> connection.locateRegion(tableName, row, !reload, true, - RegionInfo.DEFAULT_REPLICA_ID), + () -> connection.locateRegion(tableName, row, !reload, true, RegionInfo.DEFAULT_REPLICA_ID), AsyncRegionLocator::getRegionNames, supplier); return Arrays.asList(locs.getRegionLocations()); } @@ -97,8 +93,7 @@ public class HRegionLocator implements RegionLocator { @Override public List getAllRegionLocations() throws IOException { final Supplier supplier = new TableSpanBuilder(connection) - .setName("HRegionLocator.getAllRegionLocations") - .setTableName(tableName); + .setName("HRegionLocator.getAllRegionLocations").setTableName(tableName); return tracedLocationFuture(() -> { ArrayList regions = new ArrayList<>(); for (RegionLocations locations : listRegionLocations()) { @@ -115,19 +110,14 @@ public class HRegionLocator implements RegionLocator { if (CollectionUtils.isEmpty(locations)) { return Collections.emptyList(); } - return locations.stream() - .filter(Objects::nonNull) - .map(AsyncRegionLocator::getRegionNames) - .filter(Objects::nonNull) - .flatMap(List::stream) - .collect(Collectors.toList()); + return locations.stream().filter(Objects::nonNull).map(AsyncRegionLocator::getRegionNames) + .filter(Objects::nonNull).flatMap(List::stream).collect(Collectors.toList()); } @Override public void clearRegionLocationCache() { final Supplier supplier = new TableSpanBuilder(connection) - .setName("HRegionLocator.clearRegionLocationCache") - .setTableName(tableName); + .setName("HRegionLocator.clearRegionLocationCache").setTableName(tableName); TraceUtil.trace(() -> connection.clearRegionCache(tableName), supplier); } @@ -157,11 +147,8 @@ public class HRegionLocator implements RegionLocator { return regions; } - private R tracedLocationFuture( - TraceUtil.ThrowingCallable action, - Function> getRegionNames, - Supplier spanSupplier - ) throws T { + private R tracedLocationFuture(TraceUtil.ThrowingCallable action, + Function> getRegionNames, Supplier spanSupplier) throws T { final Span span = spanSupplier.get(); try (Scope ignored = span.makeCurrent()) { final R result = action.call(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index fe2ae0c82ce..945d735d770 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -144,9 +144,9 @@ public class HTable implements Table { // we only create as many Runnables as there are region servers. It means // it also scales when new region servers are added. ThreadPoolExecutor pool = - new ThreadPoolExecutor(corePoolSize, maxThreads, keepAliveTime, TimeUnit.SECONDS, - new SynchronousQueue<>(), new ThreadFactoryBuilder().setNameFormat("htable-pool-%d") - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + new ThreadPoolExecutor(corePoolSize, maxThreads, keepAliveTime, TimeUnit.SECONDS, + new SynchronousQueue<>(), new ThreadFactoryBuilder().setNameFormat("htable-pool-%d") + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); pool.allowCoreThreadTimeOut(true); return pool; } @@ -155,16 +155,16 @@ public class HTable implements Table { * Creates an object to access a HBase table. Used by HBase internally. DO NOT USE. See * {@link ConnectionFactory} class comment for how to get a {@link Table} instance (use * {@link Table} instead of {@link HTable}). - * @param connection Connection to be used. - * @param builder The table builder - * @param rpcCallerFactory The RPC caller factory + * @param connection Connection to be used. + * @param builder The table builder + * @param rpcCallerFactory The RPC caller factory * @param rpcControllerFactory The RPC controller factory - * @param pool ExecutorService to be used. + * @param pool ExecutorService to be used. */ @InterfaceAudience.Private protected HTable(final ConnectionImplementation connection, final TableBuilderBase builder, - final RpcRetryingCallerFactory rpcCallerFactory, - final RpcControllerFactory rpcControllerFactory, final ExecutorService pool) { + final RpcRetryingCallerFactory rpcCallerFactory, + final RpcControllerFactory rpcControllerFactory, final ExecutorService pool) { this.connection = Preconditions.checkNotNull(connection, "connection is null"); this.configuration = connection.getConfiguration(); this.connConfiguration = connection.getConnectionConfiguration(); @@ -245,35 +245,35 @@ public class HTable implements Table { /** * Get the corresponding start keys and regions for an arbitrary range of keys. *

    - * @param startKey Starting row in range, inclusive - * @param endKey Ending row in range + * @param startKey Starting row in range, inclusive + * @param endKey Ending row in range * @param includeEndKey true if endRow is inclusive, false if exclusive * @return A pair of list of start keys and list of HRegionLocations that contain the specified * range * @throws IOException if a remote or network exception occurs */ private Pair, List> getKeysAndRegionsInRange(final byte[] startKey, - final byte[] endKey, final boolean includeEndKey) throws IOException { + final byte[] endKey, final boolean includeEndKey) throws IOException { return getKeysAndRegionsInRange(startKey, endKey, includeEndKey, false); } /** * Get the corresponding start keys and regions for an arbitrary range of keys. *

    - * @param startKey Starting row in range, inclusive - * @param endKey Ending row in range + * @param startKey Starting row in range, inclusive + * @param endKey Ending row in range * @param includeEndKey true if endRow is inclusive, false if exclusive - * @param reload true to reload information or false to use cached information + * @param reload true to reload information or false to use cached information * @return A pair of list of start keys and list of HRegionLocations that contain the specified * range * @throws IOException if a remote or network exception occurs */ private Pair, List> getKeysAndRegionsInRange(final byte[] startKey, - final byte[] endKey, final boolean includeEndKey, final boolean reload) throws IOException { + final byte[] endKey, final boolean includeEndKey, final boolean reload) throws IOException { final boolean endKeyIsEndOfTable = Bytes.equals(endKey, HConstants.EMPTY_END_ROW); if ((Bytes.compareTo(startKey, endKey) > 0) && !endKeyIsEndOfTable) { - throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(startKey) + " > " - + Bytes.toStringBinary(endKey)); + throw new IllegalArgumentException( + "Invalid range: " + Bytes.toStringBinary(startKey) + " > " + Bytes.toStringBinary(endKey)); } List keysInRange = new ArrayList<>(); List regionsInRange = new ArrayList<>(); @@ -283,9 +283,11 @@ public class HTable implements Table { keysInRange.add(currentKey); regionsInRange.add(regionLocation); currentKey = regionLocation.getRegionInfo().getEndKey(); - } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW) + } while ( + !Bytes.equals(currentKey, HConstants.EMPTY_END_ROW) && (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0 - || (includeEndKey && Bytes.compareTo(currentKey, endKey) == 0))); + || (includeEndKey && Bytes.compareTo(currentKey, endKey) == 0)) + ); return new Pair<>(keysInRange, regionsInRange); } @@ -295,8 +297,8 @@ public class HTable implements Table { */ @Override public ResultScanner getScanner(Scan scan) throws IOException { - final Span span = new TableOperationSpanBuilder(connection).setTableName(tableName) - .setOperation(scan).build(); + final Span span = + new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(scan).build(); try (Scope ignored = span.makeCurrent()) { if (scan.getCaching() <= 0) { scan.setCaching(scannerCaching); @@ -308,20 +310,21 @@ public class HTable implements Table { // it is not supposed to be set by user, clear scan.resetMvccReadPoint(); } - final boolean async = scan.isAsyncPrefetch() != null ? scan.isAsyncPrefetch() - : connConfiguration.isClientScannerAsyncPrefetch(); + final boolean async = scan.isAsyncPrefetch() != null + ? scan.isAsyncPrefetch() + : connConfiguration.isClientScannerAsyncPrefetch(); final int timeout = connConfiguration.getReplicaCallTimeoutMicroSecondScan(); if (scan.isReversed()) { return new ReversedClientScanner(getConfiguration(), scan, getName(), connection, - rpcCallerFactory, rpcControllerFactory, pool, timeout); + rpcCallerFactory, rpcControllerFactory, pool, timeout); } else { if (async) { return new ClientAsyncPrefetchScanner(getConfiguration(), scan, getName(), connection, - rpcCallerFactory, rpcControllerFactory, pool, timeout); + rpcCallerFactory, rpcControllerFactory, pool, timeout); } else { return new ClientSimpleScanner(getConfiguration(), scan, getName(), connection, - rpcCallerFactory, rpcControllerFactory, pool, timeout); + rpcCallerFactory, rpcControllerFactory, pool, timeout); } } } @@ -352,7 +355,7 @@ public class HTable implements Table { @Override public Result get(final Get get) throws IOException { final Supplier supplier = - new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(get); + new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(get); return TraceUtil.trace(() -> get(get, get.isCheckExistenceOnly()), supplier); } @@ -369,14 +372,15 @@ public class HTable implements Table { if (get.getConsistency() == Consistency.STRONG) { final Get configuredGet = get; ClientServiceCallable callable = new ClientServiceCallable(this.connection, - getName(), get.getRow(), this.rpcControllerFactory.newController(), get.getPriority()) { + getName(), get.getRow(), this.rpcControllerFactory.newController(), get.getPriority()) { @Override protected Result rpcCall() throws Exception { ClientProtos.GetRequest request = RequestConverter - .buildGetRequest(getLocation().getRegionInfo().getRegionName(), configuredGet); + .buildGetRequest(getLocation().getRegionInfo().getRegionName(), configuredGet); ClientProtos.GetResponse response = doGet(request); - return response == null ? null - : ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner()); + return response == null + ? null + : ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner()); } }; return rpcCallerFactory. newCaller(readRpcTimeoutMs).callWithRetries(callable, @@ -385,17 +389,17 @@ public class HTable implements Table { // Call that takes into account the replica RpcRetryingCallerWithReadReplicas callable = - new RpcRetryingCallerWithReadReplicas(rpcControllerFactory, tableName, this.connection, get, - pool, connConfiguration.getRetriesNumber(), operationTimeoutMs, readRpcTimeoutMs, - connConfiguration.getPrimaryCallTimeoutMicroSecond()); + new RpcRetryingCallerWithReadReplicas(rpcControllerFactory, tableName, this.connection, get, + pool, connConfiguration.getRetriesNumber(), operationTimeoutMs, readRpcTimeoutMs, + connConfiguration.getPrimaryCallTimeoutMicroSecond()); return callable.call(operationTimeoutMs); } @Override public Result[] get(List gets) throws IOException { final Supplier supplier = - new TableOperationSpanBuilder(connection).setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(gets); + new TableOperationSpanBuilder(connection).setTableName(tableName) + .setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(gets); return TraceUtil.trace(() -> { if (gets.size() == 1) { return new Result[] { get(gets.get(0)) }; @@ -419,7 +423,7 @@ public class HTable implements Table { @Override public void batch(final List actions, final Object[] results) - throws InterruptedException, IOException { + throws InterruptedException, IOException { int rpcTimeout = writeRpcTimeoutMs; boolean hasRead = false; boolean hasWrite = false; @@ -444,14 +448,14 @@ public class HTable implements Table { } public void batch(final List actions, final Object[] results, int rpcTimeout) - throws InterruptedException, IOException { + throws InterruptedException, IOException { AsyncProcessTask task = - AsyncProcessTask.newBuilder().setPool(pool).setTableName(tableName).setRowAccess(actions) - .setResults(results).setRpcTimeout(rpcTimeout).setOperationTimeout(operationTimeoutMs) - .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build(); + AsyncProcessTask.newBuilder().setPool(pool).setTableName(tableName).setRowAccess(actions) + .setResults(results).setRpcTimeout(rpcTimeout).setOperationTimeout(operationTimeoutMs) + .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build(); final Span span = new TableOperationSpanBuilder(connection).setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(actions) - .build(); + .setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(actions) + .build(); try (Scope ignored = span.makeCurrent()) { AsyncRequestFuture ars = multiAp.submit(task); ars.waitUntilDone(); @@ -467,24 +471,24 @@ public class HTable implements Table { @Override public void batchCallback(final List actions, final Object[] results, - final Batch.Callback callback) throws IOException, InterruptedException { + final Batch.Callback callback) throws IOException, InterruptedException { doBatchWithCallback(actions, results, callback, connection, pool, tableName); } public static void doBatchWithCallback(List actions, Object[] results, - Callback callback, ClusterConnection connection, ExecutorService pool, TableName tableName) - throws InterruptedIOException, RetriesExhaustedWithDetailsException { + Callback callback, ClusterConnection connection, ExecutorService pool, TableName tableName) + throws InterruptedIOException, RetriesExhaustedWithDetailsException { int operationTimeout = connection.getConnectionConfiguration().getOperationTimeout(); int writeTimeout = connection.getConfiguration().getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, connection.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); - AsyncProcessTask task = AsyncProcessTask.newBuilder(callback).setPool(pool) - .setTableName(tableName).setRowAccess(actions).setResults(results) - .setOperationTimeout(operationTimeout).setRpcTimeout(writeTimeout) - .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build(); + AsyncProcessTask task = + AsyncProcessTask.newBuilder(callback).setPool(pool).setTableName(tableName) + .setRowAccess(actions).setResults(results).setOperationTimeout(operationTimeout) + .setRpcTimeout(writeTimeout).setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build(); final Span span = new TableOperationSpanBuilder(connection).setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(actions) - .build(); + .setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(actions) + .build(); try (Scope ignored = span.makeCurrent()) { AsyncRequestFuture ars = connection.getAsyncProcess().submit(task); ars.waitUntilDone(); @@ -500,19 +504,19 @@ public class HTable implements Table { @Override public void delete(final Delete delete) throws IOException { final Supplier supplier = - new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(delete); + new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(delete); TraceUtil.trace(() -> { ClientServiceCallable callable = - new ClientServiceCallable(this.connection, getName(), delete.getRow(), - this.rpcControllerFactory.newController(), delete.getPriority()) { - @Override - protected Void rpcCall() throws Exception { - MutateRequest request = RequestConverter - .buildMutateRequest(getLocation().getRegionInfo().getRegionName(), delete); - doMutate(request); - return null; - } - }; + new ClientServiceCallable(this.connection, getName(), delete.getRow(), + this.rpcControllerFactory.newController(), delete.getPriority()) { + @Override + protected Void rpcCall() throws Exception { + MutateRequest request = RequestConverter + .buildMutateRequest(getLocation().getRegionInfo().getRegionName(), delete); + doMutate(request); + return null; + } + }; rpcCallerFactory. newCaller(this.writeRpcTimeoutMs).callWithRetries(callable, this.operationTimeoutMs); }, supplier); @@ -542,15 +546,15 @@ public class HTable implements Table { @Override public void put(final Put put) throws IOException { final Supplier supplier = - new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(put); + new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(put); TraceUtil.trace(() -> { validatePut(put); ClientServiceCallable callable = new ClientServiceCallable(this.connection, - getName(), put.getRow(), this.rpcControllerFactory.newController(), put.getPriority()) { + getName(), put.getRow(), this.rpcControllerFactory.newController(), put.getPriority()) { @Override protected Void rpcCall() throws Exception { - MutateRequest request = RequestConverter - .buildMutateRequest(getLocation().getRegionInfo().getRegionName(), put); + MutateRequest request = + RequestConverter.buildMutateRequest(getLocation().getRegionInfo().getRegionName(), put); doMutate(request); return null; } @@ -576,37 +580,37 @@ public class HTable implements Table { @Override public Result mutateRow(final RowMutations rm) throws IOException { final Supplier supplier = - new TableOperationSpanBuilder(connection).setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(rm); + new TableOperationSpanBuilder(connection).setTableName(tableName) + .setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(rm); return TraceUtil.trace(() -> { long nonceGroup = getNonceGroup(); long nonce = getNonce(); CancellableRegionServerCallable callable = - new CancellableRegionServerCallable(this.connection, getName(), - rm.getRow(), rpcControllerFactory.newController(), writeRpcTimeoutMs, - new RetryingTimeTracker().start(), rm.getMaxPriority()) { - @Override - protected MultiResponse rpcCall() throws Exception { - MultiRequest request = RequestConverter.buildMultiRequest( - getLocation().getRegionInfo().getRegionName(), rm, nonceGroup, nonce); - ClientProtos.MultiResponse response = doMulti(request); - ClientProtos.RegionActionResult res = response.getRegionActionResultList().get(0); - if (res.hasException()) { - Throwable ex = ProtobufUtil.toException(res.getException()); - if (ex instanceof IOException) { - throw (IOException) ex; - } - throw new IOException("Failed to mutate row: " + Bytes.toStringBinary(rm.getRow()), - ex); + new CancellableRegionServerCallable(this.connection, getName(), rm.getRow(), + rpcControllerFactory.newController(), writeRpcTimeoutMs, + new RetryingTimeTracker().start(), rm.getMaxPriority()) { + @Override + protected MultiResponse rpcCall() throws Exception { + MultiRequest request = RequestConverter.buildMultiRequest( + getLocation().getRegionInfo().getRegionName(), rm, nonceGroup, nonce); + ClientProtos.MultiResponse response = doMulti(request); + ClientProtos.RegionActionResult res = response.getRegionActionResultList().get(0); + if (res.hasException()) { + Throwable ex = ProtobufUtil.toException(res.getException()); + if (ex instanceof IOException) { + throw (IOException) ex; } - return ResponseConverter.getResults(request, response, getRpcControllerCellScanner()); + throw new IOException("Failed to mutate row: " + Bytes.toStringBinary(rm.getRow()), + ex); } - }; + return ResponseConverter.getResults(request, response, getRpcControllerCellScanner()); + } + }; Object[] results = new Object[rm.getMutations().size()]; AsyncProcessTask task = AsyncProcessTask.newBuilder().setPool(pool).setTableName(tableName) - .setRowAccess(rm.getMutations()).setCallable(callable).setRpcTimeout(writeRpcTimeoutMs) - .setOperationTimeout(operationTimeoutMs) - .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).setResults(results).build(); + .setRowAccess(rm.getMutations()).setCallable(callable).setRpcTimeout(writeRpcTimeoutMs) + .setOperationTimeout(operationTimeoutMs) + .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).setResults(results).build(); AsyncRequestFuture ars = multiAp.submit(task); ars.waitUntilDone(); if (ars.hasError()) { @@ -627,24 +631,24 @@ public class HTable implements Table { @Override public Result append(final Append append) throws IOException { final Supplier supplier = - new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(append); + new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(append); return TraceUtil.trace(() -> { checkHasFamilies(append); NoncedRegionServerCallable callable = - new NoncedRegionServerCallable(this.connection, getName(), append.getRow(), - this.rpcControllerFactory.newController(), append.getPriority()) { - @Override - protected Result rpcCall() throws Exception { - MutateRequest request = - RequestConverter.buildMutateRequest(getLocation().getRegionInfo().getRegionName(), - append, super.getNonceGroup(), super.getNonce()); - MutateResponse response = doMutate(request); - if (!response.hasResult()) { - return null; - } - return ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner()); + new NoncedRegionServerCallable(this.connection, getName(), append.getRow(), + this.rpcControllerFactory.newController(), append.getPriority()) { + @Override + protected Result rpcCall() throws Exception { + MutateRequest request = + RequestConverter.buildMutateRequest(getLocation().getRegionInfo().getRegionName(), + append, super.getNonceGroup(), super.getNonce()); + MutateResponse response = doMutate(request); + if (!response.hasResult()) { + return null; } - }; + return ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner()); + } + }; return rpcCallerFactory. newCaller(this.writeRpcTimeoutMs).callWithRetries(callable, this.operationTimeoutMs); }, supplier); @@ -653,22 +657,22 @@ public class HTable implements Table { @Override public Result increment(final Increment increment) throws IOException { final Supplier supplier = - new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(increment); + new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(increment); return TraceUtil.trace(() -> { checkHasFamilies(increment); NoncedRegionServerCallable callable = - new NoncedRegionServerCallable(this.connection, getName(), increment.getRow(), - this.rpcControllerFactory.newController(), increment.getPriority()) { - @Override - protected Result rpcCall() throws Exception { - MutateRequest request = - RequestConverter.buildMutateRequest(getLocation().getRegionInfo().getRegionName(), - increment, super.getNonceGroup(), super.getNonce()); - MutateResponse response = doMutate(request); - // Should this check for null like append does? - return ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner()); - } - }; + new NoncedRegionServerCallable(this.connection, getName(), increment.getRow(), + this.rpcControllerFactory.newController(), increment.getPriority()) { + @Override + protected Result rpcCall() throws Exception { + MutateRequest request = + RequestConverter.buildMutateRequest(getLocation().getRegionInfo().getRegionName(), + increment, super.getNonceGroup(), super.getNonce()); + MutateResponse response = doMutate(request); + // Should this check for null like append does? + return ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner()); + } + }; return rpcCallerFactory. newCaller(writeRpcTimeoutMs).callWithRetries(callable, this.operationTimeoutMs); }, supplier); @@ -676,15 +680,15 @@ public class HTable implements Table { @Override public long incrementColumnValue(final byte[] row, final byte[] family, final byte[] qualifier, - final long amount) throws IOException { + final long amount) throws IOException { return incrementColumnValue(row, family, qualifier, amount, Durability.SYNC_WAL); } @Override public long incrementColumnValue(final byte[] row, final byte[] family, final byte[] qualifier, - final long amount, final Durability durability) throws IOException { + final long amount, final Durability durability) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.INCREMENT); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.INCREMENT); return TraceUtil.trace(() -> { NullPointerException npe = null; if (row == null) { @@ -697,19 +701,19 @@ public class HTable implements Table { } NoncedRegionServerCallable callable = - new NoncedRegionServerCallable(this.connection, getName(), row, - this.rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { - @Override - protected Long rpcCall() throws Exception { - MutateRequest request = RequestConverter.buildIncrementRequest( - getLocation().getRegionInfo().getRegionName(), row, family, qualifier, amount, - durability, super.getNonceGroup(), super.getNonce()); - MutateResponse response = doMutate(request); - Result result = - ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner()); - return Long.valueOf(Bytes.toLong(result.getValue(family, qualifier))); - } - }; + new NoncedRegionServerCallable(this.connection, getName(), row, + this.rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { + @Override + protected Long rpcCall() throws Exception { + MutateRequest request = RequestConverter.buildIncrementRequest( + getLocation().getRegionInfo().getRegionName(), row, family, qualifier, amount, + durability, super.getNonceGroup(), super.getNonce()); + MutateResponse response = doMutate(request); + Result result = + ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner()); + return Long.valueOf(Bytes.toLong(result.getValue(family, qualifier))); + } + }; return rpcCallerFactory. newCaller(this.writeRpcTimeoutMs).callWithRetries(callable, this.operationTimeoutMs); }, supplier); @@ -718,25 +722,25 @@ public class HTable implements Table { @Override @Deprecated public boolean checkAndPut(final byte[] row, final byte[] family, final byte[] qualifier, - final byte[] value, final Put put) throws IOException { + final byte[] value, final Put put) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, - HBaseSemanticAttributes.Operation.PUT); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, + HBaseSemanticAttributes.Operation.PUT); return TraceUtil.trace( () -> doCheckAndMutate(row, family, qualifier, CompareOperator.EQUAL, value, null, null, put) - .isSuccess(), + .isSuccess(), supplier); } @Override @Deprecated public boolean checkAndPut(final byte[] row, final byte[] family, final byte[] qualifier, - final CompareOp compareOp, final byte[] value, final Put put) throws IOException { + final CompareOp compareOp, final byte[] value, final Put put) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, - HBaseSemanticAttributes.Operation.PUT); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, + HBaseSemanticAttributes.Operation.PUT); return TraceUtil.trace(() -> doCheckAndMutate(row, family, qualifier, toCompareOperator(compareOp), value, null, null, put).isSuccess(), supplier); } @@ -744,11 +748,11 @@ public class HTable implements Table { @Override @Deprecated public boolean checkAndPut(final byte[] row, final byte[] family, final byte[] qualifier, - final CompareOperator op, final byte[] value, final Put put) throws IOException { + final CompareOperator op, final byte[] value, final Put put) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, - HBaseSemanticAttributes.Operation.PUT); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, + HBaseSemanticAttributes.Operation.PUT); return TraceUtil.trace( () -> doCheckAndMutate(row, family, qualifier, op, value, null, null, put).isSuccess(), supplier); @@ -757,11 +761,11 @@ public class HTable implements Table { @Override @Deprecated public boolean checkAndDelete(final byte[] row, final byte[] family, final byte[] qualifier, - final byte[] value, final Delete delete) throws IOException { + final byte[] value, final Delete delete) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, - HBaseSemanticAttributes.Operation.DELETE); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, + HBaseSemanticAttributes.Operation.DELETE); return TraceUtil.trace(() -> doCheckAndMutate(row, family, qualifier, CompareOperator.EQUAL, value, null, null, delete).isSuccess(), supplier); } @@ -769,11 +773,11 @@ public class HTable implements Table { @Override @Deprecated public boolean checkAndDelete(final byte[] row, final byte[] family, final byte[] qualifier, - final CompareOp compareOp, final byte[] value, final Delete delete) throws IOException { + final CompareOp compareOp, final byte[] value, final Delete delete) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, - HBaseSemanticAttributes.Operation.DELETE); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, + HBaseSemanticAttributes.Operation.DELETE); return TraceUtil.trace(() -> doCheckAndMutate(row, family, qualifier, toCompareOperator(compareOp), value, null, null, delete).isSuccess(), supplier); } @@ -781,11 +785,11 @@ public class HTable implements Table { @Override @Deprecated public boolean checkAndDelete(final byte[] row, final byte[] family, final byte[] qualifier, - final CompareOperator op, final byte[] value, final Delete delete) throws IOException { + final CompareOperator op, final byte[] value, final Delete delete) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, - HBaseSemanticAttributes.Operation.DELETE); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, + HBaseSemanticAttributes.Operation.DELETE); return TraceUtil.trace( () -> doCheckAndMutate(row, family, qualifier, op, value, null, null, delete).isSuccess(), supplier); @@ -804,32 +808,32 @@ public class HTable implements Table { } private CheckAndMutateResult doCheckAndMutate(final byte[] row, final byte[] family, - final byte[] qualifier, final CompareOperator op, final byte[] value, final Filter filter, - final TimeRange timeRange, final RowMutations rm) throws IOException { + final byte[] qualifier, final CompareOperator op, final byte[] value, final Filter filter, + final TimeRange timeRange, final RowMutations rm) throws IOException { long nonceGroup = getNonceGroup(); long nonce = getNonce(); CancellableRegionServerCallable callable = - new CancellableRegionServerCallable(connection, getName(), rm.getRow(), - rpcControllerFactory.newController(), writeRpcTimeoutMs, - new RetryingTimeTracker().start(), rm.getMaxPriority()) { - @Override - protected MultiResponse rpcCall() throws Exception { - MultiRequest request = - RequestConverter.buildMultiRequest(getLocation().getRegionInfo().getRegionName(), - row, family, qualifier, op, value, filter, timeRange, rm, nonceGroup, nonce); - ClientProtos.MultiResponse response = doMulti(request); - ClientProtos.RegionActionResult res = response.getRegionActionResultList().get(0); - if (res.hasException()) { - Throwable ex = ProtobufUtil.toException(res.getException()); - if (ex instanceof IOException) { - throw (IOException) ex; - } - throw new IOException( - "Failed to checkAndMutate row: " + Bytes.toStringBinary(rm.getRow()), ex); + new CancellableRegionServerCallable(connection, getName(), rm.getRow(), + rpcControllerFactory.newController(), writeRpcTimeoutMs, new RetryingTimeTracker().start(), + rm.getMaxPriority()) { + @Override + protected MultiResponse rpcCall() throws Exception { + MultiRequest request = + RequestConverter.buildMultiRequest(getLocation().getRegionInfo().getRegionName(), row, + family, qualifier, op, value, filter, timeRange, rm, nonceGroup, nonce); + ClientProtos.MultiResponse response = doMulti(request); + ClientProtos.RegionActionResult res = response.getRegionActionResultList().get(0); + if (res.hasException()) { + Throwable ex = ProtobufUtil.toException(res.getException()); + if (ex instanceof IOException) { + throw (IOException) ex; } - return ResponseConverter.getResults(request, response, getRpcControllerCellScanner()); + throw new IOException( + "Failed to checkAndMutate row: " + Bytes.toStringBinary(rm.getRow()), ex); } - }; + return ResponseConverter.getResults(request, response, getRpcControllerCellScanner()); + } + }; /** * Currently, we use one array to store 'processed' flag which is returned by server. It is @@ -837,11 +841,11 @@ public class HTable implements Table { */ Object[] results = new Object[rm.getMutations().size()]; AsyncProcessTask task = AsyncProcessTask.newBuilder().setPool(pool).setTableName(tableName) - .setRowAccess(rm.getMutations()).setResults(results).setCallable(callable) - // TODO any better timeout? - .setRpcTimeout(Math.max(readRpcTimeoutMs, writeRpcTimeoutMs)) - .setOperationTimeout(operationTimeoutMs) - .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build(); + .setRowAccess(rm.getMutations()).setResults(results).setCallable(callable) + // TODO any better timeout? + .setRpcTimeout(Math.max(readRpcTimeoutMs, writeRpcTimeoutMs)) + .setOperationTimeout(operationTimeoutMs).setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL) + .build(); AsyncRequestFuture ars = multiAp.submit(task); ars.waitUntilDone(); if (ars.hasError()) { @@ -854,10 +858,10 @@ public class HTable implements Table { @Override @Deprecated public boolean checkAndMutate(final byte[] row, final byte[] family, final byte[] qualifier, - final CompareOp compareOp, final byte[] value, final RowMutations rm) throws IOException { + final CompareOp compareOp, final byte[] value, final RowMutations rm) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(rm); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(rm); return TraceUtil.trace(() -> doCheckAndMutate(row, family, qualifier, toCompareOperator(compareOp), value, null, null, rm).isSuccess(), supplier); } @@ -865,10 +869,10 @@ public class HTable implements Table { @Override @Deprecated public boolean checkAndMutate(final byte[] row, final byte[] family, final byte[] qualifier, - final CompareOperator op, final byte[] value, final RowMutations rm) throws IOException { + final CompareOperator op, final byte[] value, final RowMutations rm) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(rm); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(rm); return TraceUtil.trace( () -> doCheckAndMutate(row, family, qualifier, op, value, null, null, rm).isSuccess(), supplier); @@ -876,13 +880,14 @@ public class HTable implements Table { @Override public CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate) throws IOException { - final Supplier supplier = - new TableOperationSpanBuilder(connection).setTableName(tableName) - .setOperation(checkAndMutate).setContainerOperations(checkAndMutate); + final Supplier supplier = new TableOperationSpanBuilder(connection) + .setTableName(tableName).setOperation(checkAndMutate).setContainerOperations(checkAndMutate); return TraceUtil.trace(() -> { Row action = checkAndMutate.getAction(); - if (action instanceof Put || action instanceof Delete || action instanceof Increment - || action instanceof Append) { + if ( + action instanceof Put || action instanceof Delete || action instanceof Increment + || action instanceof Append + ) { if (action instanceof Put) { validatePut((Put) action); } @@ -898,36 +903,36 @@ public class HTable implements Table { } private CheckAndMutateResult doCheckAndMutate(final byte[] row, final byte[] family, - final byte[] qualifier, final CompareOperator op, final byte[] value, final Filter filter, - final TimeRange timeRange, final Mutation mutation) throws IOException { + final byte[] qualifier, final CompareOperator op, final byte[] value, final Filter filter, + final TimeRange timeRange, final Mutation mutation) throws IOException { long nonceGroup = getNonceGroup(); long nonce = getNonce(); ClientServiceCallable callable = - new ClientServiceCallable(this.connection, getName(), row, - this.rpcControllerFactory.newController(), mutation.getPriority()) { - @Override - protected CheckAndMutateResult rpcCall() throws Exception { - MutateRequest request = RequestConverter.buildMutateRequest( - getLocation().getRegionInfo().getRegionName(), row, family, qualifier, op, value, - filter, timeRange, mutation, nonceGroup, nonce); - MutateResponse response = doMutate(request); - if (response.hasResult()) { - return new CheckAndMutateResult(response.getProcessed(), - ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner())); - } - return new CheckAndMutateResult(response.getProcessed(), null); + new ClientServiceCallable(this.connection, getName(), row, + this.rpcControllerFactory.newController(), mutation.getPriority()) { + @Override + protected CheckAndMutateResult rpcCall() throws Exception { + MutateRequest request = + RequestConverter.buildMutateRequest(getLocation().getRegionInfo().getRegionName(), row, + family, qualifier, op, value, filter, timeRange, mutation, nonceGroup, nonce); + MutateResponse response = doMutate(request); + if (response.hasResult()) { + return new CheckAndMutateResult(response.getProcessed(), + ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner())); } - }; + return new CheckAndMutateResult(response.getProcessed(), null); + } + }; return rpcCallerFactory. newCaller(this.writeRpcTimeoutMs) - .callWithRetries(callable, this.operationTimeoutMs); + .callWithRetries(callable, this.operationTimeoutMs); } @Override public List checkAndMutate(List checkAndMutates) - throws IOException { + throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.BATCH) - .setContainerOperations(checkAndMutates); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.BATCH) + .setContainerOperations(checkAndMutates); return TraceUtil.trace(() -> { if (checkAndMutates.isEmpty()) { return Collections.emptyList(); @@ -984,7 +989,7 @@ public class HTable implements Table { @Override public boolean exists(final Get get) throws IOException { final Supplier supplier = - new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(get); + new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(get); return TraceUtil.trace(() -> { Result r = get(get, true); assert r.getExists() != null; @@ -995,8 +1000,8 @@ public class HTable implements Table { @Override public boolean[] exists(List gets) throws IOException { final Supplier supplier = - new TableOperationSpanBuilder(connection).setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(gets); + new TableOperationSpanBuilder(connection).setTableName(tableName) + .setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(gets); return TraceUtil.trace(() -> { if (gets.isEmpty()) { return new boolean[] {}; @@ -1034,21 +1039,21 @@ public class HTable implements Table { /** * Process a mixed batch of Get, Put and Delete actions. All actions for a RegionServer are * forwarded in one RPC call. Queries are executed in parallel. - * @param list The collection of actions. + * @param list The collection of actions. * @param results An empty array, same size as list. If an exception is thrown, you can test here - * for partial results, and to determine which actions processed successfully. + * for partial results, and to determine which actions processed successfully. * @throws IOException if there are problems talking to META. Per-item exceptions are stored in - * the results array. + * the results array. */ public void processBatchCallback(final List list, final Object[] results, - final Batch.Callback callback) throws IOException, InterruptedException { + final Batch.Callback callback) throws IOException, InterruptedException { this.batchCallback(list, results, callback); } @Override public void close() throws IOException { final Supplier supplier = new TableSpanBuilder(connection).setName("HTable.close") - .setTableName(tableName).setSpanKind(SpanKind.INTERNAL); + .setTableName(tableName).setSpanKind(SpanKind.INTERNAL); TraceUtil.trace(() -> { if (this.closed) { return; @@ -1098,10 +1103,10 @@ public class HTable implements Table { @Override public Map coprocessorService(final Class service, - byte[] startKey, byte[] endKey, final Batch.Call callable) - throws ServiceException, Throwable { + byte[] startKey, byte[] endKey, final Batch.Call callable) + throws ServiceException, Throwable { final Map results = - Collections.synchronizedMap(new TreeMap<>(Bytes.BYTES_COMPARATOR)); + Collections.synchronizedMap(new TreeMap<>(Bytes.BYTES_COMPARATOR)); coprocessorService(service, startKey, endKey, callable, (region, row, value) -> { if (region != null) { results.put(region, value); @@ -1112,10 +1117,10 @@ public class HTable implements Table { @Override public void coprocessorService(final Class service, byte[] startKey, - byte[] endKey, final Batch.Call callable, final Batch.Callback callback) - throws ServiceException, Throwable { + byte[] endKey, final Batch.Call callable, final Batch.Callback callback) + throws ServiceException, Throwable { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC); TraceUtil.trace(() -> { final Context context = Context.current(); final ExecutorService wrappedPool = context.wrap(pool); @@ -1124,10 +1129,10 @@ public class HTable implements Table { Map> futures = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (final byte[] r : keys) { final RegionCoprocessorRpcChannel channel = - new RegionCoprocessorRpcChannel(connection, tableName, r); + new RegionCoprocessorRpcChannel(connection, tableName, r); Future future = wrappedPool.submit(() -> { T instance = - org.apache.hadoop.hbase.protobuf.ProtobufUtil.newServiceStub(service, channel); + org.apache.hadoop.hbase.protobuf.ProtobufUtil.newServiceStub(service, channel); R result = callable.call(instance); byte[] region = channel.getLastRegion(); if (callback != null) { @@ -1146,7 +1151,7 @@ public class HTable implements Table { throw ee.getCause(); } catch (InterruptedException ie) { throw new InterruptedIOException("Interrupted calling coprocessor service " - + service.getName() + " for row " + Bytes.toStringBinary(e.getKey())).initCause(ie); + + service.getName() + " for row " + Bytes.toStringBinary(e.getKey())).initCause(ie); } } }, supplier); @@ -1238,10 +1243,10 @@ public class HTable implements Table { @Override public Map batchCoprocessorService( - Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, - byte[] endKey, R responsePrototype) throws ServiceException, Throwable { + Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey, + R responsePrototype) throws ServiceException, Throwable { final Map results = - Collections.synchronizedMap(new TreeMap<>(Bytes.BYTES_COMPARATOR)); + Collections.synchronizedMap(new TreeMap<>(Bytes.BYTES_COMPARATOR)); batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype, (region, row, result) -> { if (region != null) { @@ -1253,20 +1258,20 @@ public class HTable implements Table { @Override public void batchCoprocessorService( - final Descriptors.MethodDescriptor methodDescriptor, final Message request, byte[] startKey, - byte[] endKey, final R responsePrototype, final Callback callback) - throws ServiceException, Throwable { + final Descriptors.MethodDescriptor methodDescriptor, final Message request, byte[] startKey, + byte[] endKey, final R responsePrototype, final Callback callback) + throws ServiceException, Throwable { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC); TraceUtil.trace(() -> { final Context context = Context.current(); final byte[] sanitizedStartKey = - Optional.ofNullable(startKey).orElse(HConstants.EMPTY_START_ROW); + Optional.ofNullable(startKey).orElse(HConstants.EMPTY_START_ROW); final byte[] sanitizedEndKey = Optional.ofNullable(endKey).orElse(HConstants.EMPTY_END_ROW); // get regions covered by the row range Pair, List> keysAndRegions = - getKeysAndRegionsInRange(sanitizedStartKey, sanitizedEndKey, true); + getKeysAndRegionsInRange(sanitizedStartKey, sanitizedEndKey, true); List keys = keysAndRegions.getFirst(); List regions = keysAndRegions.getSecond(); @@ -1279,12 +1284,12 @@ public class HTable implements Table { List execs = new ArrayList<>(keys.size()); final Map execsByRow = - new TreeMap<>(Bytes.BYTES_COMPARATOR); + new TreeMap<>(Bytes.BYTES_COMPARATOR); for (int i = 0; i < keys.size(); i++) { final byte[] rowKey = keys.get(i); final byte[] region = regions.get(i).getRegionInfo().getRegionName(); RegionCoprocessorServiceExec exec = - new RegionCoprocessorServiceExec(region, rowKey, methodDescriptor, request); + new RegionCoprocessorServiceExec(region, rowKey, methodDescriptor, request); execs.add(exec); execsByRow.put(rowKey, exec); } @@ -1297,34 +1302,34 @@ public class HTable implements Table { Object[] results = new Object[execs.size()]; AsyncProcess asyncProcess = new AsyncProcess(connection, configuration, - RpcRetryingCallerFactory.instantiate(configuration, connection.getStatisticsTracker()), - RpcControllerFactory.instantiate(configuration)); + RpcRetryingCallerFactory.instantiate(configuration, connection.getStatisticsTracker()), + RpcControllerFactory.instantiate(configuration)); Callback resultsCallback = - (byte[] region, byte[] row, ClientProtos.CoprocessorServiceResult serviceResult) -> { - if (LOG.isTraceEnabled()) { - LOG.trace("Received result for endpoint {}: region={}, row={}, value={}", - methodDescriptor.getFullName(), Bytes.toStringBinary(region), - Bytes.toStringBinary(row), serviceResult.getValue().getValue()); - } - try { - Message.Builder builder = responsePrototype.newBuilderForType(); - org.apache.hadoop.hbase.protobuf.ProtobufUtil.mergeFrom(builder, - serviceResult.getValue().getValue().toByteArray()); - callback.update(region, row, (R) builder.build()); - } catch (IOException e) { - LOG.error("Unexpected response type from endpoint {}", methodDescriptor.getFullName(), - e); - callbackErrorExceptions.add(e); - callbackErrorActions.add(execsByRow.get(row)); - callbackErrorServers.add("null"); - } - }; + (byte[] region, byte[] row, ClientProtos.CoprocessorServiceResult serviceResult) -> { + if (LOG.isTraceEnabled()) { + LOG.trace("Received result for endpoint {}: region={}, row={}, value={}", + methodDescriptor.getFullName(), Bytes.toStringBinary(region), + Bytes.toStringBinary(row), serviceResult.getValue().getValue()); + } + try { + Message.Builder builder = responsePrototype.newBuilderForType(); + org.apache.hadoop.hbase.protobuf.ProtobufUtil.mergeFrom(builder, + serviceResult.getValue().getValue().toByteArray()); + callback.update(region, row, (R) builder.build()); + } catch (IOException e) { + LOG.error("Unexpected response type from endpoint {}", methodDescriptor.getFullName(), + e); + callbackErrorExceptions.add(e); + callbackErrorActions.add(execsByRow.get(row)); + callbackErrorServers.add("null"); + } + }; AsyncProcessTask task = - AsyncProcessTask.newBuilder(resultsCallback).setPool(context.wrap(pool)) - .setTableName(tableName).setRowAccess(execs).setResults(results) - .setRpcTimeout(readRpcTimeoutMs).setOperationTimeout(operationTimeoutMs) - .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build(); + AsyncProcessTask.newBuilder(resultsCallback).setPool(context.wrap(pool)) + .setTableName(tableName).setRowAccess(execs).setResults(results) + .setRpcTimeout(readRpcTimeoutMs).setOperationTimeout(operationTimeoutMs) + .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build(); AsyncRequestFuture future = asyncProcess.submit(task); future.waitUntilDone(); @@ -1332,7 +1337,7 @@ public class HTable implements Table { throw future.getErrors(); } else if (!callbackErrorExceptions.isEmpty()) { throw new RetriesExhaustedWithDetailsException(callbackErrorExceptions, - callbackErrorActions, callbackErrorServers); + callbackErrorActions, callbackErrorServers); } }, supplier); } @@ -1359,7 +1364,7 @@ public class HTable implements Table { @Override public CheckAndMutateBuilder qualifier(byte[] qualifier) { this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" - + " an empty byte array, or just do not call this method if you want a null qualifier"); + + " an empty byte array, or just do not call this method if you want a null qualifier"); return this; } @@ -1385,40 +1390,40 @@ public class HTable implements Table { private void preCheck() { Preconditions.checkNotNull(op, "condition is null. You need to specify the condition by" - + " calling ifNotExists/ifEquals/ifMatches before executing the request"); + + " calling ifNotExists/ifEquals/ifMatches before executing the request"); } @Override public boolean thenPut(Put put) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); return TraceUtil.trace(() -> { validatePut(put); preCheck(); return doCheckAndMutate(row, family, qualifier, op, value, null, timeRange, put) - .isSuccess(); + .isSuccess(); }, supplier); } @Override public boolean thenDelete(Delete delete) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); return TraceUtil.trace(() -> { preCheck(); return doCheckAndMutate(row, family, qualifier, op, value, null, timeRange, delete) - .isSuccess(); + .isSuccess(); }, supplier); } @Override public boolean thenMutate(RowMutations mutation) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); return TraceUtil.trace(() -> { preCheck(); return doCheckAndMutate(row, family, qualifier, op, value, null, timeRange, mutation) - .isSuccess(); + .isSuccess(); }, supplier); } } @@ -1443,7 +1448,7 @@ public class HTable implements Table { @Override public boolean thenPut(Put put) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); return TraceUtil.trace(() -> { validatePut(put); return doCheckAndMutate(row, null, null, null, null, filter, timeRange, put).isSuccess(); @@ -1453,7 +1458,7 @@ public class HTable implements Table { @Override public boolean thenDelete(Delete delete) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); return TraceUtil.trace( () -> doCheckAndMutate(row, null, null, null, null, filter, timeRange, delete).isSuccess(), supplier); @@ -1462,11 +1467,10 @@ public class HTable implements Table { @Override public boolean thenMutate(RowMutations mutation) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); return TraceUtil - .trace(() -> doCheckAndMutate(row, null, null, null, null, filter, timeRange, mutation) - .isSuccess(), - supplier); + .trace(() -> doCheckAndMutate(row, null, null, null, null, filter, timeRange, mutation) + .isSuccess(), supplier); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java index 3b0efba5f3b..fac13a831a6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,11 +39,12 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * HTableMultiplexer provides a thread-safe non blocking PUT API across all the tables. Each put * will be sharded into different buffer queues based on its destination region server. So each @@ -67,14 +66,14 @@ public class HTableMultiplexer { private static final Logger LOG = LoggerFactory.getLogger(HTableMultiplexer.class.getName()); public static final String TABLE_MULTIPLEXER_FLUSH_PERIOD_MS = - "hbase.tablemultiplexer.flush.period.ms"; + "hbase.tablemultiplexer.flush.period.ms"; public static final String TABLE_MULTIPLEXER_INIT_THREADS = "hbase.tablemultiplexer.init.threads"; public static final String TABLE_MULTIPLEXER_MAX_RETRIES_IN_QUEUE = - "hbase.client.max.retries.in.queue"; + "hbase.client.max.retries.in.queue"; /** The map between each region server to its flush worker */ private final Map serverToFlushWorkerMap = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); private final Configuration conf; private final ClusterConnection conn; @@ -86,41 +85,40 @@ public class HTableMultiplexer { private final long flushPeriod; /** - * @param conf The HBaseConfiguration + * @param conf The HBaseConfiguration * @param perRegionServerBufferQueueSize determines the max number of the buffered Put ops for - * each region server before dropping the request. + * each region server before dropping the request. */ public HTableMultiplexer(Configuration conf, int perRegionServerBufferQueueSize) - throws IOException { + throws IOException { this(ConnectionFactory.createConnection(conf), conf, perRegionServerBufferQueueSize); } /** - * @param conn The HBase connection. - * @param conf The HBase configuration + * @param conn The HBase connection. + * @param conf The HBase configuration * @param perRegionServerBufferQueueSize determines the max number of the buffered Put ops for - * each region server before dropping the request. + * each region server before dropping the request. */ public HTableMultiplexer(Connection conn, Configuration conf, - int perRegionServerBufferQueueSize) { + int perRegionServerBufferQueueSize) { this.conn = (ClusterConnection) conn; this.pool = HTable.getDefaultExecutor(conf); // how many times we could try in total, one more than retry number this.maxAttempts = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER) + 1; + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER) + 1; this.perRegionServerBufferQueueSize = perRegionServerBufferQueueSize; this.maxKeyValueSize = HTable.getMaxKeyValueSize(conf); this.flushPeriod = conf.getLong(TABLE_MULTIPLEXER_FLUSH_PERIOD_MS, 100); int initThreads = conf.getInt(TABLE_MULTIPLEXER_INIT_THREADS, 10); - this.executor = - Executors.newScheduledThreadPool(initThreads, - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("HTableFlushWorker-%d").build()); + this.executor = Executors.newScheduledThreadPool(initThreads, + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("HTableFlushWorker-%d").build()); this.conf = conf; } /** - * Closes the internal {@link Connection}. Does nothing if the {@link Connection} has already - * been closed. + * Closes the internal {@link Connection}. Does nothing if the {@link Connection} has already been + * closed. * @throws IOException If there is an error closing the connection. */ public synchronized void close() throws IOException { @@ -131,27 +129,21 @@ public class HTableMultiplexer { /** * The put request will be buffered by its corresponding buffer queue. Return false if the queue - * is already full. - * @param tableName - * @param put - * @return true if the request can be accepted by its corresponding buffer queue. + * is already full. nn * @return true if the request can be accepted by its corresponding buffer + * queue. */ public boolean put(TableName tableName, final Put put) { return put(tableName, put, this.maxAttempts); } /** - * The puts request will be buffered by their corresponding buffer queue. - * Return the list of puts which could not be queued. - * @param tableName - * @param puts - * @return the list of puts which could not be queued + * The puts request will be buffered by their corresponding buffer queue. Return the list of puts + * which could not be queued. nn * @return the list of puts which could not be queued */ public List put(TableName tableName, final List puts) { - if (puts == null) - return null; + if (puts == null) return null; - List failedPuts = null; + List failedPuts = null; boolean result; for (Put put : puts) { result = put(tableName, put, this.maxAttempts); @@ -178,8 +170,7 @@ public class HTableMultiplexer { /** * The put request will be buffered by its corresponding buffer queue. And the put request will be - * retried before dropping the request. - * Return false if the queue is already full. + * retried before dropping the request. Return false if the queue is already full. * @return true if the request can be accepted by its corresponding buffer queue. */ public boolean put(final TableName tableName, final Put put, int maxAttempts) { @@ -240,8 +231,8 @@ public class HTableMultiplexer { worker = serverToFlushWorkerMap.get(addr); if (worker == null) { // Create the flush worker - worker = new FlushWorker(conf, this.conn, addr, this, - perRegionServerBufferQueueSize, pool, executor); + worker = new FlushWorker(conf, this.conn, addr, this, perRegionServerBufferQueueSize, + pool, executor); this.serverToFlushWorkerMap.put(addr, worker); executor.scheduleAtFixedRate(worker, flushPeriod, flushPeriod, TimeUnit.MILLISECONDS); } @@ -274,8 +265,7 @@ public class HTableMultiplexer { private Map serverToAverageLatencyMap; private Map serverToMaxLatencyMap; - public HTableMultiplexerStatus( - Map serverToFlushWorkerMap) { + public HTableMultiplexerStatus(Map serverToFlushWorkerMap) { this.totalBufferedPutCounter = 0; this.totalFailedPutCounter = 0; this.maxLatency = 0; @@ -287,16 +277,14 @@ public class HTableMultiplexer { this.initialize(serverToFlushWorkerMap); } - private void initialize( - Map serverToFlushWorkerMap) { + private void initialize(Map serverToFlushWorkerMap) { if (serverToFlushWorkerMap == null) { return; } long averageCalcSum = 0; int averageCalcCount = 0; - for (Map.Entry entry : serverToFlushWorkerMap - .entrySet()) { + for (Map.Entry entry : serverToFlushWorkerMap.entrySet()) { HRegionLocation addr = entry.getKey(); FlushWorker worker = entry.getValue(); @@ -305,8 +293,7 @@ public class HTableMultiplexer { long serverMaxLatency = worker.getMaxLatency(); AtomicAverageCounter averageCounter = worker.getAverageLatencyCounter(); // Get sum and count pieces separately to compute overall average - SimpleEntry averageComponents = averageCounter - .getComponents(); + SimpleEntry averageComponents = averageCounter.getComponents(); long serverAvgLatency = averageCounter.getAndReset(); this.totalBufferedPutCounter += bufferedCounter; @@ -317,19 +304,12 @@ public class HTableMultiplexer { averageCalcSum += averageComponents.getKey(); averageCalcCount += averageComponents.getValue(); - this.serverToBufferedCounterMap.put(addr.getHostnamePort(), - bufferedCounter); - this.serverToFailedCounterMap - .put(addr.getHostnamePort(), - failedCounter); - this.serverToAverageLatencyMap.put(addr.getHostnamePort(), - serverAvgLatency); - this.serverToMaxLatencyMap - .put(addr.getHostnamePort(), - serverMaxLatency); + this.serverToBufferedCounterMap.put(addr.getHostnamePort(), bufferedCounter); + this.serverToFailedCounterMap.put(addr.getHostnamePort(), failedCounter); + this.serverToAverageLatencyMap.put(addr.getHostnamePort(), serverAvgLatency); + this.serverToMaxLatencyMap.put(addr.getHostnamePort(), serverMaxLatency); } - this.overallAverageLatency = averageCalcCount != 0 ? averageCalcSum - / averageCalcCount : 0; + this.overallAverageLatency = averageCalcCount != 0 ? averageCalcSum / averageCalcCount : 0; } public long getTotalBufferedCounter() { @@ -436,19 +416,19 @@ public class HTableMultiplexer { private final int writeRpcTimeout; // needed to pass in through AsyncProcess constructor private final int operationTimeout; private final ExecutorService pool; + public FlushWorker(Configuration conf, ClusterConnection conn, HRegionLocation addr, - HTableMultiplexer htableMultiplexer, int perRegionServerBufferQueueSize, - ExecutorService pool, ScheduledExecutorService executor) { + HTableMultiplexer htableMultiplexer, int perRegionServerBufferQueueSize, ExecutorService pool, + ScheduledExecutorService executor) { this.addr = addr; this.multiplexer = htableMultiplexer; this.queue = new LinkedBlockingQueue<>(perRegionServerBufferQueueSize); RpcRetryingCallerFactory rpcCallerFactory = RpcRetryingCallerFactory.instantiate(conf); RpcControllerFactory rpcControllerFactory = RpcControllerFactory.instantiate(conf); this.writeRpcTimeout = conf.getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, - conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); + conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, - HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); // Specify 0 retries in AsyncProcess because we need to reassign puts to different queues // if regions are moved. this.ap = new AsyncProcess(conn, conf, rpcCallerFactory, rpcControllerFactory, 0); @@ -526,7 +506,7 @@ public class HTableMultiplexer { @InterfaceAudience.Private long getNextDelay(int retryCount) { return ConnectionUtils.getPauseTime(multiplexer.flushPeriod, - multiplexer.maxAttempts - retryCount - 1); + multiplexer.maxAttempts - retryCount - 1); } @InterfaceAudience.Private @@ -585,22 +565,17 @@ public class HTableMultiplexer { List failed = null; Object[] results = new Object[actions.size()]; ServerName server = addr.getServerName(); - Map actionsByServer = - Collections.singletonMap(server, actions); + Map actionsByServer = Collections.singletonMap(server, actions); try { - AsyncProcessTask task = AsyncProcessTask.newBuilder() - .setResults(results) - .setPool(pool) - .setRpcTimeout(writeRpcTimeout) - .setOperationTimeout(operationTimeout) - .build(); + AsyncProcessTask task = AsyncProcessTask.newBuilder().setResults(results).setPool(pool) + .setRpcTimeout(writeRpcTimeout).setOperationTimeout(operationTimeout).build(); AsyncRequestFuture arf = - ap.submitMultiActions(task, retainedActions, 0L, null, null, actionsByServer); + ap.submitMultiActions(task, retainedActions, 0L, null, null, actionsByServer); arf.waitUntilDone(); if (arf.hasError()) { // We just log and ignore the exception here since failed Puts will be resubmit again. LOG.debug("Caught some exceptions when flushing puts to region server " - + addr.getHostnamePort(), arf.getErrors()); + + addr.getHostnamePort(), arf.getErrors()); } } finally { for (int i = 0; i < results.length; i++) { @@ -633,9 +608,9 @@ public class HTableMultiplexer { // Log some basic info if (LOG.isDebugEnabled()) { - LOG.debug("Processed " + currentProcessingCount + " put requests for " - + addr.getHostnamePort() + " and " + failedCount + " failed" - + ", latency for this send: " + elapsed); + LOG.debug( + "Processed " + currentProcessingCount + " put requests for " + addr.getHostnamePort() + + " and " + failedCount + " failed" + ", latency for this send: " + elapsed); } // Reset the current processing put count @@ -643,17 +618,15 @@ public class HTableMultiplexer { } catch (RuntimeException e) { // To make findbugs happy // Log all the exceptions and move on - LOG.debug( - "Caught some exceptions " + e + " when flushing puts to region server " - + addr.getHostnamePort(), e); + LOG.debug("Caught some exceptions " + e + " when flushing puts to region server " + + addr.getHostnamePort(), e); } catch (Exception e) { if (e instanceof InterruptedException) { Thread.currentThread().interrupt(); } // Log all the exceptions and move on - LOG.debug( - "Caught some exceptions " + e + " when flushing puts to region server " - + addr.getHostnamePort(), e); + LOG.debug("Caught some exceptions " + e + " when flushing puts to region server " + + addr.getHostnamePort(), e); } finally { // Update the totalFailedCount this.totalFailedPutCount.addAndGet(failedCount); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java index 99befa4aa11..81c104066f6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java @@ -34,11 +34,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** * Hbck fixup tool APIs. Obtain an instance from {@link ClusterConnection#getHbck()} and call * {@link #close()} when done. - *

    WARNING: the below methods can damage the cluster. It may leave the cluster in an - * indeterminate state, e.g. region not assigned, or some hdfs files left behind. After running - * any of the below, operators may have to do some clean up on hdfs or schedule some assign - * procedures to get regions back online. DO AT YOUR OWN RISK. For experienced users only. - * + *

    + * WARNING: the below methods can damage the cluster. It may leave the cluster in an indeterminate + * state, e.g. region not assigned, or some hdfs files left behind. After running any of the below, + * operators may have to do some clean up on hdfs or schedule some assign procedures to get regions + * back online. DO AT YOUR OWN RISK. For experienced users only. * @see ConnectionFactory * @see ClusterConnection * @since 2.0.2, 2.1.1 @@ -46,8 +46,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK) public interface Hbck extends Abortable, Closeable { /** - * Update table state in Meta only. No procedures are submitted to open/assign or - * close/unassign regions of the table. + * Update table state in Meta only. No procedures are submitted to open/assign or close/unassign + * regions of the table. * @param state table state * @return previous state of the table in Meta */ @@ -63,17 +63,18 @@ public interface Hbck extends Abortable, Closeable { setRegionStateInMeta(Map nameOrEncodedName2State) throws IOException; /** - * Like {@link Admin#assign(byte[])} but 'raw' in that it can do more than one Region at a time - * -- good if many Regions to online -- and it will schedule the assigns even in the case where + * Like {@link Admin#assign(byte[])} but 'raw' in that it can do more than one Region at a time -- + * good if many Regions to online -- and it will schedule the assigns even in the case where * Master is initializing (as long as the ProcedureExecutor is up). Does NOT call Coprocessor * hooks. - * @param override You need to add the override for case where a region has previously been - * bypassed. When a Procedure has been bypassed, a Procedure will have completed - * but no other Procedure will be able to make progress on the target entity - * (intentionally). This override flag will override this fencing mechanism. - * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding - * for hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an - * example of what a random user-space encoded Region name looks like. + * @param override You need to add the override for case where a region has previously + * been bypassed. When a Procedure has been bypassed, a Procedure will + * have completed but no other Procedure will be able to make progress + * on the target entity (intentionally). This override flag will + * override this fencing mechanism. + * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding for + * hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an example + * of what a random user-space encoded Region name looks like. */ List assigns(List encodedRegionNames, boolean override) throws IOException; @@ -86,13 +87,14 @@ public interface Hbck extends Abortable, Closeable { * at a time -- good if many Regions to offline -- and it will schedule the assigns even in the * case where Master is initializing (as long as the ProcedureExecutor is up). Does NOT call * Coprocessor hooks. - * @param override You need to add the override for case where a region has previously been - * bypassed. When a Procedure has been bypassed, a Procedure will have completed - * but no other Procedure will be able to make progress on the target entity - * (intentionally). This override flag will override this fencing mechanism. - * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding - * for hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an - * example of what a random user-space encoded Region name looks like. + * @param override You need to add the override for case where a region has previously + * been bypassed. When a Procedure has been bypassed, a Procedure will + * have completed but no other Procedure will be able to make progress + * on the target entity (intentionally). This override flag will + * override this fencing mechanism. + * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding for + * hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an example + * of what a random user-space encoded Region name looks like. */ List unassigns(List encodedRegionNames, boolean override) throws IOException; @@ -101,21 +103,20 @@ public interface Hbck extends Abortable, Closeable { } /** - * Bypass specified procedure and move it to completion. Procedure is marked completed but - * no actual work is done from the current state/step onwards. Parents of the procedure are - * also marked for bypass. - * - * @param pids of procedures to complete. - * @param waitTime wait time in ms for acquiring lock for a procedure - * @param override if override set to true, we will bypass the procedure even if it is executing. - * This is for procedures which can't break out during execution (bugs?). - * @param recursive If set, if a parent procedure, we will find and bypass children and then - * the parent procedure (Dangerous but useful in case where child procedure has been 'lost'). - * Does not always work. Experimental. + * Bypass specified procedure and move it to completion. Procedure is marked completed but no + * actual work is done from the current state/step onwards. Parents of the procedure are also + * marked for bypass. + * @param pids of procedures to complete. + * @param waitTime wait time in ms for acquiring lock for a procedure + * @param override if override set to true, we will bypass the procedure even if it is executing. + * This is for procedures which can't break out during execution (bugs?). + * @param recursive If set, if a parent procedure, we will find and bypass children and then the + * parent procedure (Dangerous but useful in case where child procedure has been + * 'lost'). Does not always work. Experimental. * @return true if procedure is marked for bypass successfully, false otherwise */ List bypassProcedure(List pids, long waitTime, boolean override, boolean recursive) - throws IOException; + throws IOException; /** * Use {@link #scheduleServerCrashProcedures(List)} instead. @@ -123,9 +124,9 @@ public interface Hbck extends Abortable, Closeable { */ @Deprecated default List scheduleServerCrashProcedure(List serverNames) - throws IOException { + throws IOException { return scheduleServerCrashProcedures( - serverNames.stream().map(ProtobufUtil::toServerName).collect(Collectors.toList())); + serverNames.stream().map(ProtobufUtil::toServerName).collect(Collectors.toList())); } List scheduleServerCrashProcedures(List serverNames) throws IOException; @@ -134,7 +135,6 @@ public interface Hbck extends Abortable, Closeable { /** * Request HBCK chore to run at master side. - * * @return true if HBCK chore ran, false if HBCK chore already running * @throws IOException if a remote or network exception occurs */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java index aa132afa7cc..5f758c7a5e1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor; +import org.apache.yetus.audience.InterfaceAudience; /** * Read-only column descriptor. @@ -28,16 +28,16 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableCo @InterfaceAudience.Private public class ImmutableHColumnDescriptor extends HColumnDescriptor { /* - * Create an unmodifyable copy of an HColumnDescriptor - * @param desc + * Create an unmodifyable copy of an HColumnDescriptor n */ ImmutableHColumnDescriptor(final HColumnDescriptor desc) { super(desc, false); } public ImmutableHColumnDescriptor(final ColumnFamilyDescriptor desc) { - super(desc instanceof ModifyableColumnFamilyDescriptor ? - (ModifyableColumnFamilyDescriptor) desc : new ModifyableColumnFamilyDescriptor(desc)); + super(desc instanceof ModifyableColumnFamilyDescriptor + ? (ModifyableColumnFamilyDescriptor) desc + : new ModifyableColumnFamilyDescriptor(desc)); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHRegionInfo.java index ed00ceedce5..f95dbeb001e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHRegionInfo.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,9 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience; public class ImmutableHRegionInfo extends HRegionInfo { /* - * Creates an immutable copy of an HRegionInfo. - * - * @param other + * Creates an immutable copy of an HRegionInfo. n */ public ImmutableHRegionInfo(RegionInfo other) { super(other); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java index 8539cef0a8e..2cce334e359 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,17 +39,18 @@ public class ImmutableHTableDescriptor extends HTableDescriptor { return new ImmutableHColumnDescriptor(desc); } } + /* - * Create an unmodifyable copy of an HTableDescriptor - * @param desc + * Create an unmodifyable copy of an HTableDescriptor n */ public ImmutableHTableDescriptor(final HTableDescriptor desc) { super(desc, false); } public ImmutableHTableDescriptor(final TableDescriptor desc) { - super(desc instanceof ModifyableTableDescriptor ? - (ModifyableTableDescriptor) desc : new ModifyableTableDescriptor(desc.getTableName(), desc)); + super(desc instanceof ModifyableTableDescriptor + ? (ModifyableTableDescriptor) desc + : new ModifyableTableDescriptor(desc.getTableName(), desc)); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableScan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableScan.java index e0d4ad78120..b54541e60f4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableScan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableScan.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -41,7 +39,6 @@ public final class ImmutableScan extends Scan { /** * Create Immutable instance of Scan from given Scan object - * * @param scan Copy all values from Scan */ public ImmutableScan(Scan scan) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java index d7d11160a78..2a9bec60329 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,12 +36,12 @@ import org.apache.yetus.audience.InterfaceAudience; /** * Used to perform Increment operations on a single row. *

    - * This operation ensures atomicity to readers. Increments are done - * under a single row lock, so write operations to a row are synchronized, and - * readers are guaranteed to see this operation fully completed. + * This operation ensures atomicity to readers. Increments are done under a single row lock, so + * write operations to a row are synchronized, and readers are guaranteed to see this operation + * fully completed. *

    - * To increment columns of a row, instantiate an Increment object with the row - * to increment. At least one column to increment must be specified using the + * To increment columns of a row, instantiate an Increment object with the row to increment. At + * least one column to increment must be specified using the * {@link #addColumn(byte[], byte[], long)} method. */ @InterfaceAudience.Public @@ -56,7 +55,7 @@ public class Increment extends Mutation { * At least one column must be incremented. * @param row row key (we will make a copy of this). */ - public Increment(byte [] row) { + public Increment(byte[] row) { this(row, 0, row.length); } @@ -66,10 +65,11 @@ public class Increment extends Mutation { * At least one column must be incremented. * @param row row key (we will make a copy of this). */ - public Increment(final byte [] row, final int offset, final int length) { + public Increment(final byte[] row, final int offset, final int length) { checkRow(row, offset, length); this.row = Bytes.copy(row, offset, length); } + /** * Copy constructor * @param incrementToCopy increment to copy @@ -80,39 +80,36 @@ public class Increment extends Mutation { } /** - * Construct the Increment with user defined data. NOTED: - * 1) all cells in the familyMap must have the Type.Put - * 2) the row of each cell must be same with passed row. - * @param row row. CAN'T be null - * @param ts timestamp + * Construct the Increment with user defined data. NOTED: 1) all cells in the familyMap must have + * the Type.Put 2) the row of each cell must be same with passed row. + * @param row row. CAN'T be null + * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - public Increment(byte[] row, long ts, NavigableMap> familyMap) { + public Increment(byte[] row, long ts, NavigableMap> familyMap) { super(row, ts, familyMap); } /** * Add the specified KeyValue to this operation. - * @param cell individual Cell - * @return this - * @throws java.io.IOException e + * @param cell individual Cell n * @throws java.io.IOException e */ - public Increment add(Cell cell) throws IOException{ + public Increment add(Cell cell) throws IOException { super.add(cell); return this; } /** - * Increment the column from the specific family with the specified qualifier - * by the specified amount. + * Increment the column from the specific family with the specified qualifier by the specified + * amount. *

    * Overrides previous calls to addColumn for this family and qualifier. - * @param family family name + * @param family family name * @param qualifier column qualifier - * @param amount amount to increment by + * @param amount amount to increment by * @return the Increment object */ - public Increment addColumn(byte [] family, byte [] qualifier, long amount) { + public Increment addColumn(byte[] family, byte[] qualifier, long amount) { if (family == null) { throw new IllegalArgumentException("family cannot be null"); } @@ -123,8 +120,7 @@ public class Increment extends Mutation { } /** - * Gets the TimeRange used for this increment. - * @return TimeRange + * Gets the TimeRange used for this increment. n */ public TimeRange getTimeRange() { return this.tr; @@ -133,21 +129,18 @@ public class Increment extends Mutation { /** * Sets the TimeRange to be used on the Get for this increment. *

    - * This is useful for when you have counters that only last for specific - * periods of time (ie. counters that are partitioned by time). By setting - * the range of valid times for this increment, you can potentially gain - * some performance with a more optimal Get operation. - * Be careful adding the time range to this class as you will update the old cell if the - * time range doesn't include the latest cells. + * This is useful for when you have counters that only last for specific periods of time (ie. + * counters that are partitioned by time). By setting the range of valid times for this increment, + * you can potentially gain some performance with a more optimal Get operation. Be careful adding + * the time range to this class as you will update the old cell if the time range doesn't include + * the latest cells. *

    * This range is used as [minStamp, maxStamp). * @param minStamp minimum timestamp value, inclusive * @param maxStamp maximum timestamp value, exclusive - * @throws IOException if invalid time range - * @return this + * @throws IOException if invalid time range n */ - public Increment setTimeRange(long minStamp, long maxStamp) - throws IOException { + public Increment setTimeRange(long minStamp, long maxStamp) throws IOException { tr = new TimeRange(minStamp, maxStamp); return this; } @@ -160,8 +153,8 @@ public class Increment extends Mutation { /** * @param returnResults True (default) if the increment operation should return the results. A - * client that is not interested in the result can save network bandwidth setting this - * to false. + * client that is not interested in the result can save network bandwidth + * setting this to false. */ @Override public Increment setReturnResults(boolean returnResults) { @@ -196,21 +189,20 @@ public class Increment extends Mutation { } /** - * Before 0.95, when you called Increment#getFamilyMap(), you got back - * a map of families to a list of Longs. Now, {@link #getFamilyCellMap()} returns - * families by list of Cells. This method has been added so you can have the - * old behavior. + * Before 0.95, when you called Increment#getFamilyMap(), you got back a map of families to a list + * of Longs. Now, {@link #getFamilyCellMap()} returns families by list of Cells. This method has + * been added so you can have the old behavior. * @return Map of families to a Map of qualifiers and their Long increments. * @since 0.95.0 */ - public Map> getFamilyMapOfLongs() { + public Map> getFamilyMapOfLongs() { NavigableMap> map = super.getFamilyCellMap(); - Map> results = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for (Map.Entry> entry: map.entrySet()) { - NavigableMap longs = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for (Cell cell: entry.getValue()) { + Map> results = new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (Map.Entry> entry : map.entrySet()) { + NavigableMap longs = new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (Cell cell : entry.getValue()) { longs.put(CellUtil.cloneQualifier(cell), - Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); } results.put(entry.getKey(), longs); } @@ -218,21 +210,21 @@ public class Increment extends Mutation { } /** - * @return String + * n */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("row="); sb.append(Bytes.toStringBinary(this.row)); - if(this.familyMap.isEmpty()) { + if (this.familyMap.isEmpty()) { sb.append(", no columns set to be incremented"); return sb.toString(); } sb.append(", families="); boolean moreThanOne = false; - for(Map.Entry> entry: this.familyMap.entrySet()) { - if(moreThanOne) { + for (Map.Entry> entry : this.familyMap.entrySet()) { + if (moreThanOne) { sb.append("), "); } else { moreThanOne = true; @@ -241,19 +233,19 @@ public class Increment extends Mutation { sb.append("(family="); sb.append(Bytes.toString(entry.getKey())); sb.append(", columns="); - if(entry.getValue() == null) { + if (entry.getValue() == null) { sb.append("NONE"); } else { sb.append("{"); boolean moreThanOneB = false; - for(Cell cell : entry.getValue()) { - if(moreThanOneB) { + for (Cell cell : entry.getValue()) { + if (moreThanOneB) { sb.append(", "); } else { moreThanOneB = true; } - sb.append(CellUtil.getCellKeyAsString(cell) + "+=" + - Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + sb.append(CellUtil.getCellKeyAsString(cell) + "+=" + + Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); } sb.append("}"); } @@ -263,25 +255,24 @@ public class Increment extends Mutation { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * No replacement. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. No replacement. */ @Deprecated @Override public int hashCode() { - // TODO: This is wrong. Can't have two gets the same just because on same row. But it + // TODO: This is wrong. Can't have two gets the same just because on same row. But it // matches how equals works currently and gets rid of the findbugs warning. return Bytes.hashCode(this.getRow()); } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link Row#COMPARATOR} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link Row#COMPARATOR} instead */ @Deprecated @Override public boolean equals(Object obj) { - // TODO: This is wrong. Can't have two the same just because on same row. + // TODO: This is wrong. Can't have two the same just because on same row. if (this == obj) { return true; } @@ -293,7 +284,7 @@ public class Increment extends Mutation { } @Override - protected long extraHeapSize(){ + protected long extraHeapSize() { return HEAP_OVERHEAD; } @@ -314,8 +305,8 @@ public class Increment extends Mutation { /** * Method for setting the Increment's familyMap - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link Increment#Increment(byte[], long, NavigableMap)} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link Increment#Increment(byte[], long, NavigableMap)} instead */ @Deprecated @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java index ba760908700..7804e48de9f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java @@ -1,6 +1,4 @@ /* - * Copyright The Apache Software Foundation - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; @@ -25,11 +22,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** * Specify Isolation levels in Scan operations. *

    - * There are two isolation levels. A READ_COMMITTED isolation level - * indicates that only data that is committed be returned in a scan. - * An isolation level of READ_UNCOMMITTED indicates that a scan - * should return data that is being modified by transactions that might - * not have been committed yet. + * There are two isolation levels. A READ_COMMITTED isolation level indicates that only data that is + * committed be returned in a scan. An isolation level of READ_UNCOMMITTED indicates that a scan + * should return data that is being modified by transactions that might not have been committed yet. */ @InterfaceAudience.Public public enum IsolationLevel { @@ -37,17 +32,18 @@ public enum IsolationLevel { READ_COMMITTED(1), READ_UNCOMMITTED(2); - IsolationLevel(int value) {} + IsolationLevel(int value) { + } - public byte [] toBytes() { - return new byte [] { toByte() }; + public byte[] toBytes() { + return new byte[] { toByte() }; } public byte toByte() { - return (byte)this.ordinal(); + return (byte) this.ordinal(); } - public static IsolationLevel fromBytes(byte [] bytes) { + public static IsolationLevel fromBytes(byte[] bytes) { return IsolationLevel.fromByte(bytes[0]); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LockTimeoutException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LockTimeoutException.java index b949f0e2ecb..b3fa169e0ab 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LockTimeoutException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LockTimeoutException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogEntry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogEntry.java index 41f79cf8e81..807c7f1f435 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogEntry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogEntry.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Abstract response class representing online logs response from ring-buffer use-cases - * e.g slow/large RPC logs, balancer decision logs + * Abstract response class representing online logs response from ring-buffer use-cases e.g + * slow/large RPC logs, balancer decision logs */ @InterfaceAudience.Public @InterfaceStability.Evolving diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java index 506fc4f7652..b2d217da3de 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.commons.lang3.builder.EqualsBuilder; @@ -26,8 +24,8 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Slow/Large Log Query Filter with all filter and limit parameters - * Extends generic LogRequest used by Admin API getLogEntries + * Slow/Large Log Query Filter with all filter and limit parameters Extends generic LogRequest used + * by Admin API getLogEntries * @deprecated as of 2.4.0. Will be removed in 4.0.0. */ @InterfaceAudience.Public @@ -121,41 +119,24 @@ public class LogQueryFilter { LogQueryFilter that = (LogQueryFilter) o; - return new EqualsBuilder() - .append(limit, that.limit) - .append(regionName, that.regionName) - .append(clientAddress, that.clientAddress) - .append(tableName, that.tableName) - .append(userName, that.userName) - .append(type, that.type) - .append(filterByOperator, that.filterByOperator) - .isEquals(); + return new EqualsBuilder().append(limit, that.limit).append(regionName, that.regionName) + .append(clientAddress, that.clientAddress).append(tableName, that.tableName) + .append(userName, that.userName).append(type, that.type) + .append(filterByOperator, that.filterByOperator).isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(regionName) - .append(clientAddress) - .append(tableName) - .append(userName) - .append(limit) - .append(type) - .append(filterByOperator) - .toHashCode(); + return new HashCodeBuilder(17, 37).append(regionName).append(clientAddress).append(tableName) + .append(userName).append(limit).append(type).append(filterByOperator).toHashCode(); } @Override public String toString() { - return new ToStringBuilder(this) - .append("regionName", regionName) - .append("clientAddress", clientAddress) - .append("tableName", tableName) - .append("userName", userName) - .append("limit", limit) - .append("type", type) - .append("filterByOperator", filterByOperator) - .toString(); + return new ToStringBuilder(this).append("regionName", regionName) + .append("clientAddress", clientAddress).append("tableName", tableName) + .append("userName", userName).append("limit", limit).append("type", type) + .append("filterByOperator", filterByOperator).toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java index 7ae97314c48..7a919f5ad5f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.Closeable; @@ -29,16 +28,17 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** - * A RetryingCallable for Master RPC operations. - * Implement the #rpcCall method. It will be retried on error. See its javadoc and the javadoc of - * #call(int). See {@link HBaseAdmin} for examples of how this is used. To get at the - * rpcController that has been created and configured to make this rpc call, use getRpcController(). - * We are trying to contain all protobuf references including references to rpcController so we - * don't pollute codebase with protobuf references; keep the protobuf references contained and only - * present in a few classes rather than all about the code base. - *

    Like {@link RegionServerCallable} only in here, we can safely be PayloadCarryingRpcController - * all the time. This is not possible in the similar {@link RegionServerCallable} Callable because - * it has to deal with Coprocessor Endpoints. + * A RetryingCallable for Master RPC operations. Implement the #rpcCall method. It will be retried + * on error. See its javadoc and the javadoc of #call(int). See {@link HBaseAdmin} for examples of + * how this is used. To get at the rpcController that has been created and configured to make this + * rpc call, use getRpcController(). We are trying to contain all protobuf references including + * references to rpcController so we don't pollute codebase with protobuf references; keep the + * protobuf references contained and only present in a few classes rather than all about the code + * base. + *

    + * Like {@link RegionServerCallable} only in here, we can safely be PayloadCarryingRpcController all + * the time. This is not possible in the similar {@link RegionServerCallable} Callable because it + * has to deal with Coprocessor Endpoints. * @param return type */ @InterfaceAudience.Private @@ -81,10 +81,10 @@ abstract class MasterCallable implements RetryingCallable, Closeable { } /** - * Override that changes the {@link java.util.concurrent.Callable#call()} Exception from {@link Exception} to - * {@link IOException}. It also does setup of an rpcController and calls through to the rpcCall() - * method which callers are expected to implement. If rpcController is an instance of - * PayloadCarryingRpcController, we will set a timeout on it. + * Override that changes the {@link java.util.concurrent.Callable#call()} Exception from + * {@link Exception} to {@link IOException}. It also does setup of an rpcController and calls + * through to the rpcCall() method which callers are expected to implement. If rpcController is an + * instance of PayloadCarryingRpcController, we will set a timeout on it. */ @Override // Same trick as in RegionServerCallable so users don't have to copy/paste so much boilerplate @@ -103,12 +103,11 @@ abstract class MasterCallable implements RetryingCallable, Closeable { } /** - * Run the RPC call. Implement this method. To get at the rpcController that has been created - * and configured to make this rpc call, use getRpcController(). We are trying to contain + * Run the RPC call. Implement this method. To get at the rpcController that has been created and + * configured to make this rpc call, use getRpcController(). We are trying to contain * rpcController references so we don't pollute codebase with protobuf references; keep the - * protobuf references contained and only present in a few classes rather than all about the - * code base. - * @throws Exception + * protobuf references contained and only present in a few classes rather than all about the code + * base. n */ protected abstract V rpcCall() throws Exception; @@ -138,7 +137,7 @@ abstract class MasterCallable implements RetryingCallable, Closeable { } private static boolean isMetaRegion(final byte[] regionName) { - return Bytes.equals(regionName, RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()) || - Bytes.equals(regionName, RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes()); + return Bytes.equals(regionName, RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()) + || Bytes.equals(regionName, RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java index 9e68a16bc30..c6f0ceec550 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,14 +48,13 @@ class MasterCoprocessorRpcChannelImpl implements RpcChannel { } private CompletableFuture rpcCall(MethodDescriptor method, Message request, - Message responsePrototype, HBaseRpcController controller, MasterService.Interface stub) { + Message responsePrototype, HBaseRpcController controller, MasterService.Interface stub) { CompletableFuture future = new CompletableFuture<>(); CoprocessorServiceRequest csr = - CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request); - stub.execMasterService( - controller, - csr, - new org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback() { + CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request); + stub.execMasterService(controller, csr, + new org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback< + CoprocessorServiceResponse>() { @Override public void run(CoprocessorServiceResponse resp) { @@ -75,7 +74,7 @@ class MasterCoprocessorRpcChannelImpl implements RpcChannel { @Override public void callMethod(MethodDescriptor method, RpcController controller, Message request, - Message responsePrototype, RpcCallback done) { + Message responsePrototype, RpcCallback done) { addListener( callerBuilder.action((c, s) -> rpcCall(method, request, responsePrototype, c, s)).call(), ((r, e) -> { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.java index b1c37776f9e..b8a957662b4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,22 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; + /** - * A KeepAlive connection is not physically closed immediately after the close, - * but rather kept alive for a few minutes. It makes sense only if it is shared. - * - *

    This interface is implemented on a stub. It allows to have a #close function in a master - * client. - * - *

    This class is intended to be used internally by HBase classes that need to make invocations - * against the master on the MasterProtos.MasterService.BlockingInterface; but not by - * final user code. Hence it's package protected. + * A KeepAlive connection is not physically closed immediately after the close, but rather kept + * alive for a few minutes. It makes sense only if it is shared. + *

    + * This interface is implemented on a stub. It allows to have a #close function in a master client. + *

    + * This class is intended to be used internally by HBase classes that need to make invocations + * against the master on the MasterProtos.MasterService.BlockingInterface; but not by final user + * code. Hence it's package protected. */ @InterfaceAudience.Private interface MasterKeepAliveConnection extends MasterProtos.MasterService.BlockingInterface { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java index 05773d0b419..a031d353097 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java @@ -135,7 +135,7 @@ public class MasterRegistry extends AbstractRpcBasedConnectionRegistry { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/(.*/MasterRegistry.java|src/test/.*)") + allowedOnPath = ".*/(.*/MasterRegistry.java|src/test/.*)") CompletableFuture> getMasters() { return this . call( diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java index 6d4b85cfc51..d56906d8ba5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +16,9 @@ * limitations under the License. */ package org.apache.hadoop.hbase.client; + import org.apache.yetus.audience.InterfaceAudience; + /** * Represents the master switch type */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java index fde2838acc2..dd19588d307 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent; @@ -26,7 +25,6 @@ import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.CopyOnWriteArraySet; - import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; @@ -49,8 +47,9 @@ public class MetaCache { /** * Map of table to table {@link HRegionLocation}s. */ - private final ConcurrentMap> - cachedRegionLocations = new CopyOnWriteArrayMap<>(); + private final ConcurrentMap> cachedRegionLocations = + new CopyOnWriteArrayMap<>(); // The presence of a server in the map implies it's likely that there is an // entry in cachedRegionLocations that map to this server; but the absence @@ -66,14 +65,12 @@ public class MetaCache { } /** - * Search the cache for a location that fits our table and row key. - * Return null if no suitable region is located. - * + * Search the cache for a location that fits our table and row key. Return null if no suitable + * region is located. * @return Null or region location found in cache. */ - public RegionLocations getCachedLocation(final TableName tableName, final byte [] row) { - ConcurrentNavigableMap tableLocations = - getTableLocations(tableName); + public RegionLocations getCachedLocation(final TableName tableName, final byte[] row) { + ConcurrentNavigableMap tableLocations = getTableLocations(tableName); Entry e = tableLocations.floorEntry(row); if (e == null) { @@ -96,8 +93,10 @@ public class MetaCache { // coming in here. // 2. Even if META region comes in, its end key will be empty byte[] and so Bytes.equals(endKey, // HConstants.EMPTY_END_ROW) check itself will pass. - if (Bytes.equals(endKey, HConstants.EMPTY_END_ROW) || - Bytes.compareTo(endKey, 0, endKey.length, row, 0, row.length) > 0) { + if ( + Bytes.equals(endKey, HConstants.EMPTY_END_ROW) + || Bytes.compareTo(endKey, 0, endKey.length, row, 0, row.length) > 0 + ) { if (metrics != null) metrics.incrMetaCacheHit(); return possibleRegion; } @@ -110,15 +109,15 @@ public class MetaCache { /** * Put a newly discovered HRegionLocation into the cache. * @param tableName The table name. - * @param source the source of the new location - * @param location the new location + * @param source the source of the new location + * @param location the new location */ public void cacheLocation(final TableName tableName, final ServerName source, - final HRegionLocation location) { + final HRegionLocation location) { assert source != null; - byte [] startKey = location.getRegion().getStartKey(); + byte[] startKey = location.getRegion().getStartKey(); ConcurrentMap tableLocations = getTableLocations(tableName); - RegionLocations locations = new RegionLocations(new HRegionLocation[] {location}) ; + RegionLocations locations = new RegionLocations(new HRegionLocation[] { location }); RegionLocations oldLocations = tableLocations.putIfAbsent(startKey, locations); boolean isNewCacheEntry = (oldLocations == null); if (isNewCacheEntry) { @@ -130,10 +129,10 @@ public class MetaCache { } // If the server in cache sends us a redirect, assume it's always valid. - HRegionLocation oldLocation = oldLocations.getRegionLocation( - location.getRegion().getReplicaId()); + HRegionLocation oldLocation = + oldLocations.getRegionLocation(location.getRegion().getReplicaId()); boolean force = oldLocation != null && oldLocation.getServerName() != null - && oldLocation.getServerName().equals(source); + && oldLocation.getServerName().equals(source); // For redirect if the number is equal to previous // record, the most common case is that first the region was closed with seqNum, and then @@ -156,7 +155,7 @@ public class MetaCache { * @param locations the new locations */ public void cacheLocation(final TableName tableName, final RegionLocations locations) { - byte [] startKey = locations.getRegionLocation().getRegion().getStartKey(); + byte[] startKey = locations.getRegionLocation().getRegion().getStartKey(); ConcurrentMap tableLocations = getTableLocations(tableName); RegionLocations oldLocation = tableLocations.putIfAbsent(startKey, locations); boolean isNewCacheEntry = (oldLocation == null); @@ -188,11 +187,10 @@ public class MetaCache { } /** - * @param tableName - * @return Map of cached locations for passed tableName + * n * @return Map of cached locations for passed tableName */ - private ConcurrentNavigableMap getTableLocations( - final TableName tableName) { + private ConcurrentNavigableMap + getTableLocations(final TableName tableName) { // find the map of cached locations for this table return computeIfAbsent(cachedRegionLocations, tableName, () -> new CopyOnWriteArrayMap<>(Bytes.BYTES_COMPARATOR)); @@ -201,7 +199,7 @@ public class MetaCache { /** * Check the region cache to see whether a region is cached yet or not. * @param tableName tableName - * @param row row + * @param row row * @return Region cached or not. */ public boolean isRegionCached(TableName tableName, final byte[] row) { @@ -210,8 +208,7 @@ public class MetaCache { } /** - * Return the number of cached region for a table. It will only be called - * from a unit test. + * Return the number of cached region for a table. It will only be called from a unit test. */ public int getNumberOfCachedRegionLocations(final TableName tableName) { Map tableLocs = this.cachedRegionLocations.get(tableName); @@ -244,13 +241,13 @@ public class MetaCache { boolean deletedSomething = false; synchronized (this.cachedServers) { // We block here, because if there is an error on a server, it's likely that multiple - // threads will get the error simultaneously. If there are hundreds of thousand of - // region location to check, it's better to do this only once. A better pattern would - // be to check if the server is dead when we get the region location. + // threads will get the error simultaneously. If there are hundreds of thousand of + // region location to check, it's better to do this only once. A better pattern would + // be to check if the server is dead when we get the region location. if (!this.cachedServers.contains(serverName)) { return; } - for (ConcurrentMap tableLocations : cachedRegionLocations.values()){ + for (ConcurrentMap tableLocations : cachedRegionLocations.values()) { for (Entry e : tableLocations.entrySet()) { RegionLocations regionLocations = e.getValue(); if (regionLocations != null) { @@ -259,8 +256,8 @@ public class MetaCache { if (updatedLocations.isEmpty()) { deletedSomething |= tableLocations.remove(e.getKey(), regionLocations); } else { - deletedSomething |= tableLocations.replace(e.getKey(), regionLocations, - updatedLocations); + deletedSomething |= + tableLocations.replace(e.getKey(), regionLocations, updatedLocations); } } } @@ -290,10 +287,9 @@ public class MetaCache { /** * Delete a cached location, no matter what it is. Called when we were told to not use cache. - * @param tableName tableName - * @param row + * @param tableName tableName n */ - public void clearCache(final TableName tableName, final byte [] row) { + public void clearCache(final TableName tableName, final byte[] row) { ConcurrentMap tableLocations = getTableLocations(tableName); RegionLocations regionLocations = getCachedLocation(tableName, row); @@ -314,10 +310,10 @@ public class MetaCache { /** * Delete a cached location with specific replicaId. * @param tableName tableName - * @param row row key + * @param row row key * @param replicaId region replica id */ - public void clearCache(final TableName tableName, final byte [] row, int replicaId) { + public void clearCache(final TableName tableName, final byte[] row, int replicaId) { ConcurrentMap tableLocations = getTableLocations(tableName); RegionLocations regionLocations = getCachedLocation(tableName, row); @@ -348,7 +344,7 @@ public class MetaCache { /** * Delete a cached location for a table, row and server */ - public void clearCache(final TableName tableName, final byte [] row, ServerName serverName) { + public void clearCache(final TableName tableName, final byte[] row, ServerName serverName) { ConcurrentMap tableLocations = getTableLocations(tableName); RegionLocations regionLocations = getCachedLocation(tableName, row); @@ -420,7 +416,7 @@ public class MetaCache { removed = tableLocations.remove(location.getRegion().getStartKey(), regionLocations); } else { removed = tableLocations.replace(location.getRegion().getStartKey(), regionLocations, - updatedLocations); + updatedLocations); } if (removed) { if (metrics != null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java index 76f4a9ab8cf..dc452bcd9d9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,30 +26,28 @@ import com.codahale.metrics.JmxReporter; import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.RatioGauge; import com.codahale.metrics.Timer; - import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; -import org.apache.yetus.audience.InterfaceAudience; /** - * This class is for maintaining the various connection statistics and publishing them through - * the metrics interfaces. - * - * This class manages its own {@link MetricRegistry} and {@link JmxReporter} so as to not - * conflict with other uses of Yammer Metrics within the client application. Instantiating + * This class is for maintaining the various connection statistics and publishing them through the + * metrics interfaces. This class manages its own {@link MetricRegistry} and {@link JmxReporter} so + * as to not conflict with other uses of Yammer Metrics within the client application. Instantiating * this class implicitly creates and "starts" instances of these classes; be sure to call * {@link #shutdown()} to terminate the thread pools they allocate. */ @@ -60,27 +58,25 @@ public class MetricsConnection implements StatisticTrackable { public static final String CLIENT_SIDE_METRICS_ENABLED_KEY = "hbase.client.metrics.enable"; /** - * Set to specify a custom scope for the metrics published through {@link MetricsConnection}. - * The scope is added to JMX MBean objectName, and defaults to a combination of the Connection's - * clusterId and hashCode. For example, a default value for a connection to cluster "foo" might - * be "foo-7d9d0818", where "7d9d0818" is the hashCode of the underlying AsyncConnectionImpl. - * Users may set this key to give a more contextual name for this scope. For example, one might - * want to differentiate a read connection from a write connection by setting the scopes to - * "foo-read" and "foo-write" respectively. - * - * Scope is the only thing that lends any uniqueness to the metrics. Care should be taken to - * avoid using the same scope for multiple Connections, otherwise the metrics may aggregate in - * unforeseen ways. + * Set to specify a custom scope for the metrics published through {@link MetricsConnection}. The + * scope is added to JMX MBean objectName, and defaults to a combination of the Connection's + * clusterId and hashCode. For example, a default value for a connection to cluster "foo" might be + * "foo-7d9d0818", where "7d9d0818" is the hashCode of the underlying AsyncConnectionImpl. Users + * may set this key to give a more contextual name for this scope. For example, one might want to + * differentiate a read connection from a write connection by setting the scopes to "foo-read" and + * "foo-write" respectively. Scope is the only thing that lends any uniqueness to the metrics. + * Care should be taken to avoid using the same scope for multiple Connections, otherwise the + * metrics may aggregate in unforeseen ways. */ public static final String METRICS_SCOPE_KEY = "hbase.client.metrics.scope"; /** - * Returns the scope for a MetricsConnection based on the configured {@link #METRICS_SCOPE_KEY} - * or by generating a default from the passed clusterId and connectionObj's hashCode. + * Returns the scope for a MetricsConnection based on the configured {@link #METRICS_SCOPE_KEY} or + * by generating a default from the passed clusterId and connectionObj's hashCode. * @param conf configuration for the connection * @param clusterId clusterId for the connection - * @param connectionObj either a Connection or AsyncConnectionImpl, the instance - * creating this MetricsConnection. + * @param connectionObj either a Connection or AsyncConnectionImpl, the instance creating this + * MetricsConnection. */ static String getScope(Configuration conf, String clusterId, Object connectionObj) { return conf.get(METRICS_SCOPE_KEY, @@ -169,12 +165,10 @@ public class MetricsConnection implements StatisticTrackable { sb.append("(").append(subName).append(")"); } this.name = sb.toString(); - this.callTimer = registry.timer(name(MetricsConnection.class, - DRTN_BASE + this.name, scope)); - this.reqHist = registry.histogram(name(MetricsConnection.class, - REQ_BASE + this.name, scope)); - this.respHist = registry.histogram(name(MetricsConnection.class, - RESP_BASE + this.name, scope)); + this.callTimer = registry.timer(name(MetricsConnection.class, DRTN_BASE + this.name, scope)); + this.reqHist = registry.histogram(name(MetricsConnection.class, REQ_BASE + this.name, scope)); + this.respHist = + registry.histogram(name(MetricsConnection.class, RESP_BASE + this.name, scope)); } private CallTracker(MetricRegistry registry, String name, String scope) { @@ -200,10 +194,10 @@ public class MetricsConnection implements StatisticTrackable { public RegionStats(MetricRegistry registry, String name) { this.name = name; - this.memstoreLoadHist = registry.histogram(name(MetricsConnection.class, - MEMLOAD_BASE + this.name)); - this.heapOccupancyHist = registry.histogram(name(MetricsConnection.class, - HEAP_BASE + this.name)); + this.memstoreLoadHist = + registry.histogram(name(MetricsConnection.class, MEMLOAD_BASE + this.name)); + this.heapOccupancyHist = + registry.histogram(name(MetricsConnection.class, HEAP_BASE + this.name)); } public void update(RegionLoadStats regionStatistics) { @@ -218,12 +212,10 @@ public class MetricsConnection implements StatisticTrackable { final Histogram delayIntevalHist; public RunnerStats(MetricRegistry registry) { - this.normalRunners = registry.counter( - name(MetricsConnection.class, "normalRunnersCount")); - this.delayRunners = registry.counter( - name(MetricsConnection.class, "delayRunnersCount")); - this.delayIntevalHist = registry.histogram( - name(MetricsConnection.class, "delayIntervalHist")); + this.normalRunners = registry.counter(name(MetricsConnection.class, "normalRunnersCount")); + this.delayRunners = registry.counter(name(MetricsConnection.class, "delayRunnersCount")); + this.delayIntevalHist = + registry.histogram(name(MetricsConnection.class, "delayIntervalHist")); } public void incrNormalRunners() { @@ -239,11 +231,10 @@ public class MetricsConnection implements StatisticTrackable { } } - protected ConcurrentHashMap> serverStats - = new ConcurrentHashMap<>(); + protected ConcurrentHashMap> serverStats = + new ConcurrentHashMap<>(); - public void updateServerStats(ServerName serverName, byte[] regionName, - Object r) { + public void updateServerStats(ServerName serverName, byte[] regionName, Object r) { if (!(r instanceof Result)) { return; } @@ -261,7 +252,7 @@ public class MetricsConnection implements StatisticTrackable { ConcurrentMap rsStats = computeIfAbsent(serverStats, serverName, () -> new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR)); RegionStats regionStats = - computeIfAbsent(rsStats, regionName, () -> new RegionStats(this.registry, name)); + computeIfAbsent(rsStats, regionName, () -> new RegionStats(this.registry, name)); regionStats.update(stats); } @@ -284,19 +275,22 @@ public class MetricsConnection implements StatisticTrackable { protected final String scope; private final NewMetric timerFactory = new NewMetric() { - @Override public Timer newMetric(Class clazz, String name, String scope) { + @Override + public Timer newMetric(Class clazz, String name, String scope) { return registry.timer(name(clazz, name, scope)); } }; private final NewMetric histogramFactory = new NewMetric() { - @Override public Histogram newMetric(Class clazz, String name, String scope) { + @Override + public Histogram newMetric(Class clazz, String name, String scope) { return registry.histogram(name(clazz, name, scope)); } }; private final NewMetric counterFactory = new NewMetric() { - @Override public Counter newMetric(Class clazz, String name, String scope) { + @Override + public Counter newMetric(Class clazz, String name, String scope) { return registry.counter(name(clazz, name, scope)); } }; @@ -328,47 +322,44 @@ public class MetricsConnection implements StatisticTrackable { // registry. I don't think their use perfectly removes redundant allocations, but it's // a big improvement over calling registry.newMetric each time. protected final ConcurrentMap rpcTimers = - new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); - protected final ConcurrentMap rpcHistograms = - new ConcurrentHashMap<>(CAPACITY * 2 /* tracking both request and response sizes */, - LOAD_FACTOR, CONCURRENCY_LEVEL); + new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); + protected final ConcurrentMap rpcHistograms = new ConcurrentHashMap<>( + CAPACITY * 2 /* tracking both request and response sizes */, LOAD_FACTOR, CONCURRENCY_LEVEL); private final ConcurrentMap cacheDroppingExceptions = new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); - protected final ConcurrentMap rpcCounters = - new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); + protected final ConcurrentMap rpcCounters = + new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); MetricsConnection(String scope, Supplier batchPool, - Supplier metaPool) { + Supplier metaPool) { this.scope = scope; this.registry = new MetricRegistry(); - this.registry.register(getExecutorPoolName(), - new RatioGauge() { - @Override - protected Ratio getRatio() { - ThreadPoolExecutor pool = batchPool.get(); - if (pool == null) { - return Ratio.of(0, 0); - } - return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize()); - } - }); - this.registry.register(getMetaPoolName(), - new RatioGauge() { - @Override - protected Ratio getRatio() { - ThreadPoolExecutor pool = metaPool.get(); - if (pool == null) { - return Ratio.of(0, 0); - } - return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize()); - } - }); + this.registry.register(getExecutorPoolName(), new RatioGauge() { + @Override + protected Ratio getRatio() { + ThreadPoolExecutor pool = batchPool.get(); + if (pool == null) { + return Ratio.of(0, 0); + } + return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize()); + } + }); + this.registry.register(getMetaPoolName(), new RatioGauge() { + @Override + protected Ratio getRatio() { + ThreadPoolExecutor pool = metaPool.get(); + if (pool == null) { + return Ratio.of(0, 0); + } + return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize()); + } + }); this.metaCacheHits = registry.counter(name(this.getClass(), "metaCacheHits", scope)); this.metaCacheMisses = registry.counter(name(this.getClass(), "metaCacheMisses", scope)); - this.metaCacheNumClearServer = registry.counter(name(this.getClass(), - "metaCacheNumClearServer", scope)); - this.metaCacheNumClearRegion = registry.counter(name(this.getClass(), - "metaCacheNumClearRegion", scope)); + this.metaCacheNumClearServer = + registry.counter(name(this.getClass(), "metaCacheNumClearServer", scope)); + this.metaCacheNumClearRegion = + registry.counter(name(this.getClass(), "metaCacheNumClearRegion", scope)); this.hedgedReadOps = registry.counter(name(this.getClass(), "hedgedReadOps", scope)); this.hedgedReadWin = registry.counter(name(this.getClass(), "hedgedReadWin", scope)); this.getTracker = new CallTracker(this.registry, "Get", scope); @@ -379,10 +370,10 @@ public class MetricsConnection implements StatisticTrackable { this.putTracker = new CallTracker(this.registry, "Mutate", "Put", scope); this.multiTracker = new CallTracker(this.registry, "Multi", scope); this.runnerStats = new RunnerStats(this.registry); - this.concurrentCallsPerServerHist = registry.histogram(name(MetricsConnection.class, - "concurrentCallsPerServer", scope)); - this.numActionsPerServerHist = registry.histogram(name(MetricsConnection.class, - "numActionsPerServer", scope)); + this.concurrentCallsPerServerHist = + registry.histogram(name(MetricsConnection.class, "concurrentCallsPerServer", scope)); + this.numActionsPerServerHist = + registry.histogram(name(MetricsConnection.class, "numActionsPerServer", scope)); this.nsLookups = registry.counter(name(this.getClass(), NS_LOOKUPS, scope)); this.nsLookupsFailed = registry.counter(name(this.getClass(), NS_LOOKUPS_FAILED, scope)); @@ -467,12 +458,12 @@ public class MetricsConnection implements StatisticTrackable { /** Update call stats for non-critical-path methods */ private void updateRpcGeneric(String methodName, CallStats stats) { - getMetric(DRTN_BASE + methodName, rpcTimers, timerFactory) - .update(stats.getCallTimeMs(), TimeUnit.MILLISECONDS); + getMetric(DRTN_BASE + methodName, rpcTimers, timerFactory).update(stats.getCallTimeMs(), + TimeUnit.MILLISECONDS); getMetric(REQ_BASE + methodName, rpcHistograms, histogramFactory) - .update(stats.getRequestSizeBytes()); + .update(stats.getRequestSizeBytes()); getMetric(RESP_BASE + methodName, rpcHistograms, histogramFactory) - .update(stats.getResponseSizeBytes()); + .update(stats.getResponseSizeBytes()); } /** Report RPC context to metrics system. */ @@ -487,7 +478,7 @@ public class MetricsConnection implements StatisticTrackable { // this implementation is tied directly to protobuf implementation details. would be better // if we could dispatch based on something static, ie, request Message type. if (method.getService() == ClientService.getDescriptor()) { - switch(method.getIndex()) { + switch (method.getIndex()) { case 0: assert "Get".equals(method.getName()); getTracker.updateRpc(stats); @@ -495,7 +486,7 @@ public class MetricsConnection implements StatisticTrackable { case 1: assert "Mutate".equals(method.getName()); final MutationType mutationType = ((MutateRequest) param).getMutation().getMutateType(); - switch(mutationType) { + switch (mutationType) { case APPEND: appendTracker.updateRpc(stats); return; @@ -549,8 +540,8 @@ public class MetricsConnection implements StatisticTrackable { } public void incrCacheDroppingExceptions(Object exception) { - getMetric(CACHE_BASE + - (exception == null? UNKNOWN_EXCEPTION : exception.getClass().getSimpleName()), + getMetric( + CACHE_BASE + (exception == null ? UNKNOWN_EXCEPTION : exception.getClass().getSimpleName()), cacheDroppingExceptions, counterFactory).inc(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.java index 6ad44f08a60..fc473bdbb70 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java index 19340215515..cc3db106c4e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,8 +29,8 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** - * Container for Actions (i.e. Get, Delete, or Put), which are grouped by - * regionName. Intended to be used with {@link AsyncProcess}. + * Container for Actions (i.e. Get, Delete, or Put), which are grouped by regionName. Intended to be + * used with {@link AsyncProcess}. */ @InterfaceAudience.Private public final class MultiAction { @@ -48,7 +47,6 @@ public final class MultiAction { /** * Get the total number of Actions - * * @return total number of Actions for all groups in this container. */ public int size() { @@ -60,26 +58,20 @@ public final class MultiAction { } /** - * Add an Action to this container based on it's regionName. If the regionName - * is wrong, the initial execution will fail, but will be automatically - * retried after looking up the correct region. - * - * @param regionName - * @param a + * Add an Action to this container based on it's regionName. If the regionName is wrong, the + * initial execution will fail, but will be automatically retried after looking up the correct + * region. nn */ public void add(byte[] regionName, Action a) { add(regionName, Collections.singletonList(a)); } /** - * Add an Action to this container based on it's regionName. If the regionName - * is wrong, the initial execution will fail, but will be automatically - * retried after looking up the correct region. - * - * @param regionName - * @param actionList list of actions to add for the region + * Add an Action to this container based on it's regionName. If the regionName is wrong, the + * initial execution will fail, but will be automatically retried after looking up the correct + * region. n * @param actionList list of actions to add for the region */ - public void add(byte[] regionName, List actionList){ + public void add(byte[] regionName, List actionList) { List rsActions = actions.get(regionName); if (rsActions == null) { rsActions = new ArrayList<>(actionList.size()); @@ -107,7 +99,7 @@ public final class MultiAction { // returns the max priority of all the actions public int getPriority() { Optional result = actions.values().stream().flatMap(List::stream) - .max((action1, action2) -> Math.max(action1.getPriority(), action2.getPriority())); + .max((action1, action2) -> Math.max(action1.getPriority(), action2.getPriority())); return result.isPresent() ? result.get().getPriority() : HConstants.PRIORITY_UNSET; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java index 03f168893a7..97a01e1bb6e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,16 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.HashMap; import java.util.Map; import java.util.TreeMap; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; /** * A container for Result objects, grouped by regionName. @@ -37,11 +35,10 @@ public class MultiResponse extends AbstractResponse { private Map results = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** - * The server can send us a failure for the region itself, instead of individual failure. - * It's a part of the protobuf definition. + * The server can send us a failure for the region itself, instead of individual failure. It's a + * part of the protobuf definition. */ - private Map exceptions = - new TreeMap<>(Bytes.BYTES_COMPARATOR); + private Map exceptions = new TreeMap<>(Bytes.BYTES_COMPARATOR); public MultiResponse() { super(); @@ -52,31 +49,29 @@ public class MultiResponse extends AbstractResponse { */ public int size() { int size = 0; - for (RegionResult result: results.values()) { + for (RegionResult result : results.values()) { size += result.size(); } return size; } /** - * Add the pair to the container, grouped by the regionName - * - * @param regionName - * @param originalIndex the original index of the Action (request). + * Add the pair to the container, grouped by the regionName n * @param originalIndex the original + * index of the Action (request). * @param resOrEx the result or error; will be empty for successful Put and Delete actions. */ public void add(byte[] regionName, int originalIndex, Object resOrEx) { getResult(regionName).addResult(originalIndex, resOrEx); } - public void addException(byte []regionName, Throwable ie){ + public void addException(byte[] regionName, Throwable ie) { exceptions.put(regionName, ie); } /** * @return the exception for the region, if any. Null otherwise. */ - public Throwable getException(byte []regionName){ + public Throwable getException(byte[] regionName) { return exceptions.get(regionName); } @@ -88,7 +83,7 @@ public class MultiResponse extends AbstractResponse { getResult(regionName).setStat(stat); } - private RegionResult getResult(byte[] region){ + private RegionResult getResult(byte[] region) { RegionResult rs = results.get(region); if (rs == null) { rs = new RegionResult(); @@ -97,7 +92,7 @@ public class MultiResponse extends AbstractResponse { return rs; } - public Map getResults(){ + public Map getResults() { return this.results; } @@ -106,15 +101,15 @@ public class MultiResponse extends AbstractResponse { return ResponseType.MULTI; } - static class RegionResult{ + static class RegionResult { Map result = new HashMap<>(); ClientProtos.RegionLoadStats stat; - public void addResult(int index, Object result){ + public void addResult(int index, Object result) { this.result.put(index, result); } - public void setStat(ClientProtos.RegionLoadStats stat){ + public void setStat(ClientProtos.RegionLoadStats stat) { this.stat = stat; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java index 4b4f08410a2..6484235b1eb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -31,18 +30,20 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; + import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; /** - * Callable that handles the multi method call going against a single - * regionserver; i.e. A RegionServerCallable for the multi call (It is NOT a - * RegionServerCallable that goes against multiple regions). + * Callable that handles the multi method call going against a single regionserver; + * i.e. A RegionServerCallable for the multi call (It is NOT a RegionServerCallable that goes + * against multiple regions). * @param */ @InterfaceAudience.Private @@ -51,8 +52,8 @@ class MultiServerCallable extends CancellableRegionServerCallable private boolean cellBlock; MultiServerCallable(final ClusterConnection connection, final TableName tableName, - final ServerName location, final MultiAction multi, RpcController rpcController, - int rpcTimeout, RetryingTimeTracker tracker, int priority) { + final ServerName location, final MultiAction multi, RpcController rpcController, int rpcTimeout, + RetryingTimeTracker tracker, int priority) { super(connection, tableName, null, rpcController, rpcTimeout, tracker, priority); this.multiAction = multi; // RegionServerCallable has HRegionLocation field, but this is a multi-region request. @@ -93,7 +94,7 @@ class MultiServerCallable extends CancellableRegionServerCallable // Pre-size. Presume at least a KV per Action. There are likely more. List cells = - (this.cellBlock ? new ArrayList(countOfActions) : null); + (this.cellBlock ? new ArrayList(countOfActions) : null); long nonceGroup = multiAction.getNonceGroup(); @@ -102,8 +103,8 @@ class MultiServerCallable extends CancellableRegionServerCallable // is RowMutations/CheckAndMutate in the action list. Map indexMap = new HashMap<>(); // The multi object is a list of Actions by region. Iterate by region. - for (Map.Entry> e: this.multiAction.actions.entrySet()) { - final byte [] regionName = e.getKey(); + for (Map.Entry> e : this.multiAction.actions.entrySet()) { + final byte[] regionName = e.getKey(); final List actions = e.getValue(); if (this.cellBlock) { // Send data in cellblocks. @@ -132,8 +133,8 @@ class MultiServerCallable extends CancellableRegionServerCallable } /** - * @return True if we should send data in cellblocks. This is an expensive call. Cache the - * result if you can rather than call each time. + * @return True if we should send data in cellblocks. This is an expensive call. Cache the result + * if you can rather than call each time. */ private boolean isCellBlock() { // This is not exact -- the configuration could have changed on us after connection was set up diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java index 0aa301c4c8c..22114f8f624 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java @@ -16,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hbase.client; + import java.util.Arrays; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparatorImpl; @@ -36,26 +37,16 @@ class MutableRegionInfo implements RegionInfo { private static final int MAX_REPLICA_ID = 0xFFFF; /** - * The new format for a region name contains its encodedName at the end. - * The encoded name also serves as the directory name for the region - * in the filesystem. - * - * New region name format: - * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. - * where, - * <encodedName> is a hex version of the MD5 hash of - * <tablename>,<startkey>,<regionIdTimestamp> - * - * The old region name format: - * <tablename>,<startkey>,<regionIdTimestamp> - * For region names in the old format, the encoded name is a 32-bit - * JenkinsHash integer value (in its decimal notation, string form). - *

    - * **NOTE** - * - * The first hbase:meta region, and regions created by an older - * version of HBase (0.20 or prior) will continue to use the - * old region name format. + * The new format for a region name contains its encodedName at the end. The encoded name also + * serves as the directory name for the region in the filesystem. New region name format: + * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. where, <encodedName> + * is a hex version of the MD5 hash of <tablename>,<startkey>,<regionIdTimestamp> The old + * region name format: <tablename>,<startkey>,<regionIdTimestamp> For region names in the + * old format, the encoded name is a 32-bit JenkinsHash integer value (in its decimal notation, + * string form). + *

    + * **NOTE** The first hbase:meta region, and regions created by an older version of HBase (0.20 or + * prior) will continue to use the old region name format. */ // This flag is in the parent of a split while the parent is still referenced by daughter @@ -76,8 +67,8 @@ class MutableRegionInfo implements RegionInfo { private final TableName tableName; private static int generateHashCode(final TableName tableName, final byte[] startKey, - final byte[] endKey, final long regionId, - final int replicaId, boolean offLine, byte[] regionName) { + final byte[] endKey, final long regionId, final int replicaId, boolean offLine, + byte[] regionName) { int result = Arrays.hashCode(regionName); result = (int) (result ^ regionId); result ^= Arrays.hashCode(checkStartKey(startKey)); @@ -89,11 +80,11 @@ class MutableRegionInfo implements RegionInfo { } private static byte[] checkStartKey(byte[] startKey) { - return startKey == null? HConstants.EMPTY_START_ROW: startKey; + return startKey == null ? HConstants.EMPTY_START_ROW : startKey; } private static byte[] checkEndKey(byte[] endKey) { - return endKey == null? HConstants.EMPTY_END_ROW: endKey; + return endKey == null ? HConstants.EMPTY_END_ROW : endKey; } private static TableName checkTableName(TableName tableName) { @@ -119,7 +110,7 @@ class MutableRegionInfo implements RegionInfo { } MutableRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, - final boolean split, final long regionId, final int replicaId, boolean offLine) { + final boolean split, final long regionId, final int replicaId, boolean offLine) { this.tableName = checkTableName(tableName); this.startKey = checkStartKey(startKey); this.endKey = checkEndKey(endKey); @@ -145,11 +136,10 @@ class MutableRegionInfo implements RegionInfo { /** @return the regionId */ @Override - public long getRegionId(){ + public long getRegionId() { return regionId; } - /** * @return the regionName as an array of bytes. * @see #getRegionNameAsString() @@ -191,8 +181,7 @@ class MutableRegionInfo implements RegionInfo { } /** - * Get current table name of the region - * @return TableName + * Get current table name of the region n */ @Override public TableName getTable() { @@ -200,25 +189,22 @@ class MutableRegionInfo implements RegionInfo { } /** - * Returns true if the given inclusive range of rows is fully contained - * by this region. For example, if the region is foo,a,g and this is - * passed ["b","c"] or ["a","c"] it will return true, but if this is passed - * ["b","z"] it will return false. + * Returns true if the given inclusive range of rows is fully contained by this region. For + * example, if the region is foo,a,g and this is passed ["b","c"] or ["a","c"] it will return + * true, but if this is passed ["b","z"] it will return false. * @throws IllegalArgumentException if the range passed is invalid (ie. end < start) */ @Override public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { CellComparator cellComparator = CellComparatorImpl.getCellComparator(tableName); if (cellComparator.compareRows(rangeStartKey, rangeEndKey) > 0) { - throw new IllegalArgumentException( - "Invalid range: " + Bytes.toStringBinary(rangeStartKey) + - " > " + Bytes.toStringBinary(rangeEndKey)); + throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(rangeStartKey) + + " > " + Bytes.toStringBinary(rangeEndKey)); } boolean firstKeyInRange = cellComparator.compareRows(rangeStartKey, startKey) >= 0; - boolean lastKeyInRange = - cellComparator.compareRows(rangeEndKey, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY); + boolean lastKeyInRange = cellComparator.compareRows(rangeEndKey, endKey) < 0 + || Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY); return firstKeyInRange && lastKeyInRange; } @@ -228,9 +214,9 @@ class MutableRegionInfo implements RegionInfo { @Override public boolean containsRow(byte[] row) { CellComparator cellComparator = CellComparatorImpl.getCellComparator(tableName); - return cellComparator.compareRows(row, startKey) >= 0 && - (cellComparator.compareRows(row, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); + return cellComparator.compareRows(row, startKey) >= 0 + && (cellComparator.compareRows(row, endKey) < 0 + || Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); } /** @return true if this region is a meta region */ @@ -248,8 +234,7 @@ class MutableRegionInfo implements RegionInfo { } /** - * @param split set split status - * @return MutableRegionInfo + * @param split set split status n */ public MutableRegionInfo setSplit(boolean split) { this.split = split; @@ -268,10 +253,9 @@ class MutableRegionInfo implements RegionInfo { } /** - * The parent of a region split is offline while split daughters hold - * references to the parent. Offlined regions are closed. - * @param offLine Set online/offline status. - * @return MutableRegionInfo + * The parent of a region split is offline while split daughters hold references to the parent. + * Offlined regions are closed. + * @param offLine Set online/offline status. n */ public MutableRegionInfo setOffline(boolean offLine) { this.offLine = offLine; @@ -309,14 +293,11 @@ class MutableRegionInfo implements RegionInfo { */ @Override public String toString() { - return "{ENCODED => " + getEncodedName() + ", " + - HConstants.NAME + " => '" + Bytes.toStringBinary(this.regionName) - + "', STARTKEY => '" + - Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + - Bytes.toStringBinary(this.endKey) + "'" + - (isOffline()? ", OFFLINE => true": "") + - (isSplit()? ", SPLIT => true": "") + - ((replicaId > 0)? ", REPLICA_ID => " + replicaId : "") + "}"; + return "{ENCODED => " + getEncodedName() + ", " + HConstants.NAME + " => '" + + Bytes.toStringBinary(this.regionName) + "', STARTKEY => '" + + Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + Bytes.toStringBinary(this.endKey) + + "'" + (isOffline() ? ", OFFLINE => true" : "") + (isSplit() ? ", SPLIT => true" : "") + + ((replicaId > 0) ? ", REPLICA_ID => " + replicaId : "") + "}"; } /** @@ -333,7 +314,7 @@ class MutableRegionInfo implements RegionInfo { if (!(o instanceof RegionInfo)) { return false; } - return compareTo((RegionInfo)o) == 0; + return compareTo((RegionInfo) o) == 0; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java index d49b7257e50..3c22b053a0d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -62,12 +61,12 @@ import org.apache.hbase.thirdparty.com.google.common.io.ByteArrayDataOutput; import org.apache.hbase.thirdparty.com.google.common.io.ByteStreams; @InterfaceAudience.Public -public abstract class Mutation extends OperationWithAttributes implements Row, CellScannable, - HeapSize { +public abstract class Mutation extends OperationWithAttributes + implements Row, CellScannable, HeapSize { public static final long MUTATION_OVERHEAD = ClassSize.align( - // This - ClassSize.OBJECT + - // row + OperationWithAttributes.attributes + // This + ClassSize.OBJECT + + // row + OperationWithAttributes.attributes 2 * ClassSize.REFERENCE + // Timestamp 1 * Bytes.SIZEOF_LONG + @@ -78,8 +77,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C // familyMap ClassSize.TREEMAP + // priority - ClassSize.INTEGER - ); + ClassSize.INTEGER); /** * The attribute for storing the list of clusters that have consumed the change. @@ -94,17 +92,16 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C private static final String RETURN_RESULTS = "_rr_"; // TODO: row should be final - protected byte [] row = null; + protected byte[] row = null; protected long ts = HConstants.LATEST_TIMESTAMP; protected Durability durability = Durability.USE_DEFAULT; // TODO: familyMap should be final // A Map sorted by column family. - protected NavigableMap> familyMap; + protected NavigableMap> familyMap; /** - * empty construction. - * We need this empty construction to keep binary compatibility. + * empty construction. We need this empty construction to keep binary compatibility. */ protected Mutation() { this.familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); @@ -114,19 +111,19 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C super(clone); this.row = clone.getRow(); this.ts = clone.getTimestamp(); - this.familyMap = clone.getFamilyCellMap().entrySet().stream(). - collect(Collectors.toMap(e -> e.getKey(), e -> new ArrayList<>(e.getValue()), (k, v) -> { + this.familyMap = clone.getFamilyCellMap().entrySet().stream() + .collect(Collectors.toMap(e -> e.getKey(), e -> new ArrayList<>(e.getValue()), (k, v) -> { throw new RuntimeException("collisions!!!"); }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR))); } /** * Construct the mutation with user defined data. - * @param row row. CAN'T be null - * @param ts timestamp + * @param row row. CAN'T be null + * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - protected Mutation(byte[] row, long ts, NavigableMap> familyMap) { + protected Mutation(byte[] row, long ts, NavigableMap> familyMap) { this.row = Preconditions.checkNotNull(row); if (row.length == 0) { throw new IllegalArgumentException("Row can't be empty"); @@ -141,9 +138,8 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C } /** - * Creates an empty list if one doesn't exist for the given column family - * or else it returns the associated list of Cell objects. - * + * Creates an empty list if one doesn't exist for the given column family or else it returns the + * associated list of Cell objects. * @param family column family * @return a list of Cell objects, returns an empty list if one doesn't exist. */ @@ -158,7 +154,6 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C /* * Create a KeyValue with this objects row key and the Put identifier. - * * @return a KeyValue with this objects row key and the Put identifier. */ KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, byte[] value) { @@ -166,12 +161,8 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C } /** - * Create a KeyValue with this objects row key and the Put identifier. - * @param family - * @param qualifier - * @param ts - * @param value - * @param tags - Specify the Tags as an Array + * Create a KeyValue with this objects row key and the Put identifier. nnnn * @param tags - + * Specify the Tags as an Array * @return a KeyValue with this objects row key and the Put identifier. */ KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, byte[] value, Tag[] tags) { @@ -181,21 +172,18 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C /* * Create a KeyValue with this objects row key and the Put identifier. - * * @return a KeyValue with this objects row key and the Put identifier. */ KeyValue createPutKeyValue(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value, - Tag[] tags) { - return new KeyValue(this.row, 0, this.row == null ? 0 : this.row.length, - family, 0, family == null ? 0 : family.length, - qualifier, ts, KeyValue.Type.Put, value, tags != null ? Arrays.asList(tags) : null); + Tag[] tags) { + return new KeyValue(this.row, 0, this.row == null ? 0 : this.row.length, family, 0, + family == null ? 0 : family.length, qualifier, ts, KeyValue.Type.Put, value, + tags != null ? Arrays.asList(tags) : null); } /** - * Compile the column family (i.e. schema) information - * into a Map. Useful for parsing and aggregation by debugging, - * logging, and administration tools. - * @return Map + * Compile the column family (i.e. schema) information into a Map. Useful for parsing and + * aggregation by debugging, logging, and administration tools. n */ @Override public Map getFingerprint() { @@ -204,18 +192,17 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C // ideally, we would also include table information, but that information // is not stored in each Operation instance. map.put("families", families); - for (Map.Entry> entry : getFamilyCellMap().entrySet()) { + for (Map.Entry> entry : getFamilyCellMap().entrySet()) { families.add(Bytes.toStringBinary(entry.getKey())); } return map; } /** - * Compile the details beyond the scope of getFingerprint (row, columns, - * timestamps, etc.) into a Map along with the fingerprinted information. - * Useful for debugging, logging, and administration tools. - * @param maxCols a limit on the number of columns output prior to truncation - * @return Map + * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a + * Map along with the fingerprinted information. Useful for debugging, logging, and administration + * tools. + * @param maxCols a limit on the number of columns output prior to truncation n */ @Override public Map toMap(int maxCols) { @@ -228,7 +215,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C map.put("row", Bytes.toStringBinary(this.row)); int colCount = 0; // iterate through all column families affected - for (Map.Entry> entry : getFamilyCellMap().entrySet()) { + for (Map.Entry> entry : getFamilyCellMap().entrySet()) { // map from this family to details for each cell affected within the family List> qualifierDetails = new ArrayList<>(); columns.put(Bytes.toStringBinary(entry.getKey()), qualifierDetails); @@ -237,7 +224,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C continue; } // add details for each cell - for (Cell cell: entry.getValue()) { + for (Cell cell : entry.getValue()) { if (--maxCols <= 0) { continue; } @@ -262,16 +249,15 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C private static Map cellToStringMap(Cell c) { Map stringMap = new HashMap<>(); - stringMap.put("qualifier", Bytes.toStringBinary(c.getQualifierArray(), c.getQualifierOffset(), - c.getQualifierLength())); + stringMap.put("qualifier", + Bytes.toStringBinary(c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength())); stringMap.put("timestamp", c.getTimestamp()); stringMap.put("vlen", c.getValueLength()); List tags = PrivateCellUtil.getTags(c); if (tags != null) { List tagsString = new ArrayList<>(tags.size()); for (Tag t : tags) { - tagsString - .add((t.getType()) + ":" + Bytes.toStringBinary(Tag.cloneValue(t))); + tagsString.add((t.getType()) + ":" + Bytes.toStringBinary(Tag.cloneValue(t))); } stringMap.put("tag", tagsString); } @@ -279,8 +265,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C } /** - * Set the durability for this mutation - * @param d + * Set the durability for this mutation n */ public Mutation setDurability(Durability d) { this.durability = d; @@ -293,21 +278,20 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C } /** - * Method for retrieving the put's familyMap - * @return familyMap + * Method for retrieving the put's familyMap n */ - public NavigableMap> getFamilyCellMap() { + public NavigableMap> getFamilyCellMap() { return this.familyMap; } /** * Method for setting the mutation's familyMap - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link Mutation#Mutation(byte[], long, NavigableMap)} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link Mutation#Mutation(byte[], long, NavigableMap)} instead */ @Deprecated - public Mutation setFamilyCellMap(NavigableMap> map) { - // TODO: Shut this down or move it up to be a Constructor. Get new object rather than change + public Mutation setFamilyCellMap(NavigableMap> map) { + // TODO: Shut this down or move it up to be a Constructor. Get new object rather than change // this internal data member. this.familyMap = map; return this; @@ -322,17 +306,16 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C } /** - * Method for retrieving the delete's row - * @return row + * Method for retrieving the delete's row n */ @Override - public byte [] getRow() { + public byte[] getRow() { return this.row; } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link Row#COMPARATOR} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link Row#COMPARATOR} instead */ @Deprecated @Override @@ -341,10 +324,8 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C } /** - * Method for retrieving the timestamp - * @return timestamp - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #getTimestamp()} instead + * Method for retrieving the timestamp n * @deprecated As of release 2.0.0, this will be removed + * in HBase 3.0.0. Use {@link #getTimestamp()} instead */ @Deprecated public long getTimeStamp() { @@ -352,9 +333,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C } /** - * Method for retrieving the timestamp. - * - * @return timestamp + * Method for retrieving the timestamp. n */ public long getTimestamp() { return this.ts; @@ -381,10 +360,10 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C public List getClusterIds() { List clusterIds = new ArrayList<>(); byte[] bytes = getAttribute(CONSUMED_CLUSTER_IDS); - if(bytes != null) { + if (bytes != null) { ByteArrayDataInput in = ByteStreams.newDataInput(bytes); int numClusters = in.readInt(); - for(int i=0; i> entry : getFamilyCellMap().entrySet()) { - //Adding key overhead - heapsize += - ClassSize.align(ClassSize.ARRAY + entry.getKey().length); + heapsize += ClassSize.align(getFamilyCellMap().size() * ClassSize.MAP_ENTRY); + for (Map.Entry> entry : getFamilyCellMap().entrySet()) { + // Adding key overhead + heapsize += ClassSize.align(ClassSize.ARRAY + entry.getKey().length); - //This part is kinds tricky since the JVM can reuse references if you - //store the same value, but have a good match with SizeOf at the moment - //Adding value overhead + // This part is kinds tricky since the JVM can reuse references if you + // store the same value, but have a good match with SizeOf at the moment + // Adding value overhead heapsize += ClassSize.align(ClassSize.ARRAYLIST); int size = entry.getValue().size(); - heapsize += ClassSize.align(ClassSize.ARRAY + - size * ClassSize.REFERENCE); + heapsize += ClassSize.align(ClassSize.ARRAY + size * ClassSize.REFERENCE); for (Cell cell : entry.getValue()) { heapsize += cell.heapSize(); @@ -515,7 +483,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C } /** - * @param user User short name + * @param user User short name * @param perms Permissions for the user */ public Mutation setACL(String user, Permission perms) { @@ -539,8 +507,8 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C /** * Return the TTL requested for the result of the mutation, in milliseconds. - * @return the TTL requested for the result of the mutation, in milliseconds, - * or Long.MAX_VALUE if unset + * @return the TTL requested for the result of the mutation, in milliseconds, or Long.MAX_VALUE if + * unset */ public long getTTL() { byte[] ttlBytes = getAttribute(OP_ATTRIBUTE_TTL); @@ -552,8 +520,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C /** * Set the TTL desired for the result of the mutation, in milliseconds. - * @param ttl the TTL desired for the result of the mutation, in milliseconds - * @return this + * @param ttl the TTL desired for the result of the mutation, in milliseconds n */ public Mutation setTTL(long ttl) { setAttribute(OP_ATTRIBUTE_TTL, Bytes.toBytes(ttl)); @@ -581,7 +548,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C * Subclasses should override this method to add the heap size of their own fields. * @return the heap size to add (will be aligned). */ - protected long extraHeapSize(){ + protected long extraHeapSize() { return 0L; } @@ -597,76 +564,71 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C } /** - * A convenience method to determine if this object's familyMap contains - * a value assigned to the given family & qualifier. - * Both given arguments must match the KeyValue object to return true. - * - * @param family column family + * A convenience method to determine if this object's familyMap contains a value assigned to the + * given family & qualifier. Both given arguments must match the KeyValue object to return + * true. + * @param family column family * @param qualifier column qualifier - * @return returns true if the given family and qualifier already has an - * existing KeyValue object in the family map. + * @return returns true if the given family and qualifier already has an existing KeyValue object + * in the family map. */ - public boolean has(byte [] family, byte [] qualifier) { + public boolean has(byte[] family, byte[] qualifier) { return has(family, qualifier, this.ts, HConstants.EMPTY_BYTE_ARRAY, true, true); } /** - * A convenience method to determine if this object's familyMap contains - * a value assigned to the given family, qualifier and timestamp. - * All 3 given arguments must match the KeyValue object to return true. - * - * @param family column family + * A convenience method to determine if this object's familyMap contains a value assigned to the + * given family, qualifier and timestamp. All 3 given arguments must match the KeyValue object to + * return true. + * @param family column family * @param qualifier column qualifier - * @param ts timestamp - * @return returns true if the given family, qualifier and timestamp already has an - * existing KeyValue object in the family map. + * @param ts timestamp + * @return returns true if the given family, qualifier and timestamp already has an existing + * KeyValue object in the family map. */ - public boolean has(byte [] family, byte [] qualifier, long ts) { + public boolean has(byte[] family, byte[] qualifier, long ts) { return has(family, qualifier, ts, HConstants.EMPTY_BYTE_ARRAY, false, true); } /** - * A convenience method to determine if this object's familyMap contains - * a value assigned to the given family, qualifier and timestamp. - * All 3 given arguments must match the KeyValue object to return true. - * - * @param family column family + * A convenience method to determine if this object's familyMap contains a value assigned to the + * given family, qualifier and timestamp. All 3 given arguments must match the KeyValue object to + * return true. + * @param family column family * @param qualifier column qualifier - * @param value value to check - * @return returns true if the given family, qualifier and value already has an - * existing KeyValue object in the family map. + * @param value value to check + * @return returns true if the given family, qualifier and value already has an existing KeyValue + * object in the family map. */ - public boolean has(byte [] family, byte [] qualifier, byte [] value) { + public boolean has(byte[] family, byte[] qualifier, byte[] value) { return has(family, qualifier, this.ts, value, true, false); } /** - * A convenience method to determine if this object's familyMap contains - * the given value assigned to the given family, qualifier and timestamp. - * All 4 given arguments must match the KeyValue object to return true. - * - * @param family column family + * A convenience method to determine if this object's familyMap contains the given value assigned + * to the given family, qualifier and timestamp. All 4 given arguments must match the KeyValue + * object to return true. + * @param family column family * @param qualifier column qualifier - * @param ts timestamp - * @param value value to check - * @return returns true if the given family, qualifier timestamp and value - * already has an existing KeyValue object in the family map. + * @param ts timestamp + * @param value value to check + * @return returns true if the given family, qualifier timestamp and value already has an existing + * KeyValue object in the family map. */ - public boolean has(byte [] family, byte [] qualifier, long ts, byte [] value) { + public boolean has(byte[] family, byte[] qualifier, long ts, byte[] value) { return has(family, qualifier, ts, value, false, false); } /** * Returns a list of all KeyValue objects with matching column family and qualifier. - * - * @param family column family + * @param family column family * @param qualifier column qualifier - * @return a list of KeyValue objects with the matching family and qualifier, - * returns an empty list if one doesn't exist for the given family. + * @return a list of KeyValue objects with the matching family and qualifier, returns an empty + * list if one doesn't exist for the given family. */ public List get(byte[] family, byte[] qualifier) { List filteredList = new ArrayList<>(); - for (Cell cell: getCellList(family)) { + for (Cell cell : getCellList(family)) { if (CellUtil.matchingQualifier(cell, qualifier)) { filteredList.add(cell); } @@ -675,21 +637,13 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C } /* - * Private method to determine if this object's familyMap contains - * the given value assigned to the given family, qualifier and timestamp - * respecting the 2 boolean arguments - * - * @param family - * @param qualifier - * @param ts - * @param value - * @param ignoreTS - * @param ignoreValue - * @return returns true if the given family, qualifier timestamp and value - * already has an existing KeyValue object in the family map. + * Private method to determine if this object's familyMap contains the given value assigned to the + * given family, qualifier and timestamp respecting the 2 boolean arguments nnnnnn * @return + * returns true if the given family, qualifier timestamp and value already has an existing + * KeyValue object in the family map. */ - protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, - boolean ignoreTS, boolean ignoreValue) { + protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, boolean ignoreTS, + boolean ignoreValue) { List list = getCellList(family); if (list.isEmpty()) { return false; @@ -701,31 +655,34 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C // F F => 1 if (!ignoreTS && !ignoreValue) { for (Cell cell : list) { - if (CellUtil.matchingFamily(cell, family) && - CellUtil.matchingQualifier(cell, qualifier) && - CellUtil.matchingValue(cell, value) && - cell.getTimestamp() == ts) { + if ( + CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier) + && CellUtil.matchingValue(cell, value) && cell.getTimestamp() == ts + ) { return true; } } } else if (ignoreValue && !ignoreTS) { for (Cell cell : list) { - if (CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier) - && cell.getTimestamp() == ts) { + if ( + CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier) + && cell.getTimestamp() == ts + ) { return true; } } } else if (!ignoreValue && ignoreTS) { for (Cell cell : list) { - if (CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier) - && CellUtil.matchingValue(cell, value)) { + if ( + CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier) + && CellUtil.matchingValue(cell, value) + ) { return true; } } } else { for (Cell cell : list) { - if (CellUtil.matchingFamily(cell, family) && - CellUtil.matchingQualifier(cell, qualifier)) { + if (CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier)) { return true; } } @@ -735,23 +692,20 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C /** * @param row Row to check - * @throws IllegalArgumentException Thrown if row is empty or null or - * > {@link HConstants#MAX_ROW_LENGTH} + * @throws IllegalArgumentException Thrown if row is empty or null or > + * {@link HConstants#MAX_ROW_LENGTH} * @return row */ - static byte [] checkRow(final byte [] row) { - return checkRow(row, 0, row == null? 0: row.length); + static byte[] checkRow(final byte[] row) { + return checkRow(row, 0, row == null ? 0 : row.length); } /** - * @param row Row to check - * @param offset - * @param length - * @throws IllegalArgumentException Thrown if row is empty or null or - * > {@link HConstants#MAX_ROW_LENGTH} + * @param row Row to check nn * @throws IllegalArgumentException Thrown if row is + * empty or null or > {@link HConstants#MAX_ROW_LENGTH} * @return row */ - static byte [] checkRow(final byte [] row, final int offset, final int length) { + static byte[] checkRow(final byte[] row, final int offset, final int length) { if (row == null) { throw new IllegalArgumentException("Row buffer is null"); } @@ -759,8 +713,8 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C throw new IllegalArgumentException("Row length is 0"); } if (length > HConstants.MAX_ROW_LENGTH) { - throw new IllegalArgumentException("Row length " + length + " is > " + - HConstants.MAX_ROW_LENGTH); + throw new IllegalArgumentException( + "Row length " + length + " is > " + HConstants.MAX_ROW_LENGTH); } return row; } @@ -773,18 +727,18 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C throw new IllegalArgumentException("Row length is 0"); } if (row.remaining() > HConstants.MAX_ROW_LENGTH) { - throw new IllegalArgumentException("Row length " + row.remaining() + " is > " + - HConstants.MAX_ROW_LENGTH); + throw new IllegalArgumentException( + "Row length " + row.remaining() + " is > " + HConstants.MAX_ROW_LENGTH); } } Mutation add(Cell cell) throws IOException { - //Checking that the row of the kv is the same as the mutation + // Checking that the row of the kv is the same as the mutation // TODO: It is fraught with risk if user pass the wrong row. // Throwing the IllegalArgumentException is more suitable I'd say. if (!CellUtil.matchingRows(cell, this.row)) { - throw new WrongRowIOException("The row in " + cell.toString() + - " doesn't match the original one " + Bytes.toStringBinary(this.row)); + throw new WrongRowIOException("The row in " + cell.toString() + + " doesn't match the original one " + Bytes.toStringBinary(this.row)); } byte[] family; @@ -808,11 +762,10 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C } private static final class CellWrapper implements ExtendedCell { - private static final long FIXED_OVERHEAD = ClassSize.align( - ClassSize.OBJECT // object header - + KeyValue.TIMESTAMP_SIZE // timestamp - + Bytes.SIZEOF_LONG // sequence id - + 1 * ClassSize.REFERENCE); // references to cell + private static final long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT // object header + + KeyValue.TIMESTAMP_SIZE // timestamp + + Bytes.SIZEOF_LONG // sequence id + + 1 * ClassSize.REFERENCE); // references to cell private final Cell cell; private long sequenceId; private long timestamp; @@ -945,21 +898,19 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C } private long heapOverhead() { - return FIXED_OVERHEAD - + ClassSize.ARRAY // row - + getFamilyLength() == 0 ? 0 : ClassSize.ARRAY - + getQualifierLength() == 0 ? 0 : ClassSize.ARRAY - + getValueLength() == 0 ? 0 : ClassSize.ARRAY - + getTagsLength() == 0 ? 0 : ClassSize.ARRAY; + return FIXED_OVERHEAD + ClassSize.ARRAY // row + + getFamilyLength() == 0 + ? 0 + : ClassSize.ARRAY + getQualifierLength() == 0 ? 0 + : ClassSize.ARRAY + getValueLength() == 0 ? 0 + : ClassSize.ARRAY + getTagsLength() == 0 ? 0 + : ClassSize.ARRAY; } @Override public long heapSize() { - return heapOverhead() - + ClassSize.align(getRowLength()) - + ClassSize.align(getFamilyLength()) - + ClassSize.align(getQualifierLength()) - + ClassSize.align(getValueLength()) + return heapOverhead() + ClassSize.align(getRowLength()) + ClassSize.align(getFamilyLength()) + + ClassSize.align(getQualifierLength()) + ClassSize.align(getValueLength()) + ClassSize.align(getTagsLength()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoOpRetryableCallerInterceptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoOpRetryableCallerInterceptor.java index 2bae4436383..b0450f2e47e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoOpRetryableCallerInterceptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoOpRetryableCallerInterceptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +18,19 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.PreemptiveFastFailException; +import org.apache.yetus.audience.InterfaceAudience; /** - * Class that acts as a NoOpInterceptor. This class is used in case the - * RetryingCallerInterceptor was not configured correctly or an - * RetryingCallerInterceptor was never configured in the first place. - * + * Class that acts as a NoOpInterceptor. This class is used in case the RetryingCallerInterceptor + * was not configured correctly or an RetryingCallerInterceptor was never configured in the first + * place. */ @InterfaceAudience.Private class NoOpRetryableCallerInterceptor extends RetryingCallerInterceptor { private static final RetryingCallerInterceptorContext NO_OP_CONTEXT = - new NoOpRetryingInterceptorContext(); + new NoOpRetryingInterceptorContext(); public NoOpRetryableCallerInterceptor() { } @@ -42,14 +40,13 @@ class NoOpRetryableCallerInterceptor extends RetryingCallerInterceptor { } @Override - public void intercept( - RetryingCallerInterceptorContext abstractRetryingCallerInterceptorContext) - throws PreemptiveFastFailException { + public void intercept(RetryingCallerInterceptorContext abstractRetryingCallerInterceptorContext) + throws PreemptiveFastFailException { } @Override - public void handleFailure(RetryingCallerInterceptorContext context, - Throwable t) throws IOException { + public void handleFailure(RetryingCallerInterceptorContext context, Throwable t) + throws IOException { } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoOpRetryingInterceptorContext.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoOpRetryingInterceptorContext.java index c726ccda4c7..80c2274223c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoOpRetryingInterceptorContext.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoOpRetryingInterceptorContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java index 184f0c0bc0f..6d5d94802d0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java index 70fa36a5afa..3020be22105 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +20,9 @@ package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; /** - * NonceGenerator interface. - * In general, nonce group is an ID (one per client, or region+client, or whatever) that - * could be used to reduce collision potential, or be used by compatible server nonce manager - * to optimize nonce storage and removal. See HBASE-3787. + * NonceGenerator interface. In general, nonce group is an ID (one per client, or region+client, or + * whatever) that could be used to reduce collision potential, or be used by compatible server nonce + * manager to optimize nonce storage and removal. See HBASE-3787. */ @InterfaceAudience.Private public interface NonceGenerator { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java index 2da8422f483..7d0e2979f77 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,26 +15,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ipc.HBaseRpcController; +import org.apache.yetus.audience.InterfaceAudience; /** - * Implementations make an rpc call against a RegionService via a protobuf Service. - * Implement #rpcCall(RpcController) and then call {@link #call(int)} to - * trigger the rpc. The {@link #call(int)} eventually invokes your - * #rpcCall(RpcController) meanwhile saving you having to write a bunch of - * boilerplate. The {@link #call(int)} implementation is from {@link RpcRetryingCaller} so rpcs are - * retried on fail. - * - *

    TODO: this class is actually tied to one region, because most of the paths make use of - * the regioninfo part of location when building requests. The only reason it works for - * multi-region requests (e.g. batch) is that they happen to not use the region parts. - * This could be done cleaner (e.g. having a generic parameter and 2 derived classes, - * RegionCallable and actual RegionServerCallable with ServerName. + * Implementations make an rpc call against a RegionService via a protobuf Service. Implement + * #rpcCall(RpcController) and then call {@link #call(int)} to trigger the rpc. The + * {@link #call(int)} eventually invokes your #rpcCall(RpcController) meanwhile saving you having to + * write a bunch of boilerplate. The {@link #call(int)} implementation is from + * {@link RpcRetryingCaller} so rpcs are retried on fail. + *

    + * TODO: this class is actually tied to one region, because most of the paths make use of the + * regioninfo part of location when building requests. The only reason it works for multi-region + * requests (e.g. batch) is that they happen to not use the region parts. This could be done cleaner + * (e.g. having a generic parameter and 2 derived classes, RegionCallable and actual + * RegionServerCallable with ServerName. * @param the class that the ServerCallable handles */ @InterfaceAudience.Private @@ -43,11 +41,11 @@ public abstract class NoncedRegionServerCallable extends ClientServiceCallabl /** * @param connection Connection to use. - * @param tableName Table name to which row belongs. - * @param row The row we want in tableName. + * @param tableName Table name to which row belongs. + * @param row The row we want in tableName. */ - public NoncedRegionServerCallable(Connection connection, TableName tableName, byte [] row, - HBaseRpcController rpcController, int priority) { + public NoncedRegionServerCallable(Connection connection, TableName tableName, byte[] row, + HBaseRpcController rpcController, int priority) { super(connection, tableName, row, rpcController, priority); this.nonce = getConnection().getNonceGenerator().newNonce(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java index 982ec5b0065..a720e1a7112 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java @@ -24,25 +24,16 @@ import org.apache.yetus.audience.InterfaceAudience; /** * A collection of criteria used for table selection. The logic of table selection is as follows: *

      - *
    • - * When no parameter values are provided, an unfiltered list of all user tables is returned. - *
    • - *
    • - * When a list of {@link TableName TableNames} are provided, the filter starts with any of - * these tables that exist. - *
    • - *
    • - * When a {@code namespace} name is provided, the filter starts with all the tables present in - * that namespace. - *
    • - *
    • - * If both a list of {@link TableName TableNames} and a {@code namespace} name are provided, - * the {@link TableName} list is honored and the {@code namespace} name is ignored. - *
    • - *
    • - * If a {@code regex} is provided, this subset of {@link TableName TableNames} is further - * reduced to those that match the provided regular expression. - *
    • + *
    • When no parameter values are provided, an unfiltered list of all user tables is returned. + *
    • + *
    • When a list of {@link TableName TableNames} are provided, the filter starts with any of these + * tables that exist.
    • + *
    • When a {@code namespace} name is provided, the filter starts with all the tables present in + * that namespace.
    • + *
    • If both a list of {@link TableName TableNames} and a {@code namespace} name are provided, the + * {@link TableName} list is honored and the {@code namespace} name is ignored.
    • + *
    • If a {@code regex} is provided, this subset of {@link TableName TableNames} is further + * reduced to those that match the provided regular expression.
    • *
    */ @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java index 115e55f336f..96182ca4b29 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.commons.lang3.builder.EqualsBuilder; @@ -40,10 +38,9 @@ final public class OnlineLogRecord extends LogEntry { // used to convert object to pretty printed format // used by toJsonPrettyPrint() - private static final Gson GSON = GsonUtil.createGson() - .setPrettyPrinting() - .registerTypeAdapter(OnlineLogRecord.class, (JsonSerializer) - (slowLogPayload, type, jsonSerializationContext) -> { + private static final Gson GSON = + GsonUtil.createGson().setPrettyPrinting().registerTypeAdapter(OnlineLogRecord.class, + (JsonSerializer) (slowLogPayload, type, jsonSerializationContext) -> { Gson gson = new Gson(); JsonObject jsonObj = (JsonObject) gson.toJsonTree(slowLogPayload); if (slowLogPayload.getMultiGetsCount() == 0) { @@ -132,10 +129,10 @@ final public class OnlineLogRecord extends LogEntry { } private OnlineLogRecord(final long startTime, final int processingTime, final int queueTime, - final long responseSize, final String clientAddress, final String serverClass, - final String methodName, final String callDetails, final String param, - final String regionName, final String userName, final int multiGetsCount, - final int multiMutationsCount, final int multiServiceCalls) { + final long responseSize, final String clientAddress, final String serverClass, + final String methodName, final String callDetails, final String param, final String regionName, + final String userName, final int multiGetsCount, final int multiMutationsCount, + final int multiServiceCalls) { this.startTime = startTime; this.processingTime = processingTime; this.queueTime = queueTime; @@ -239,9 +236,9 @@ final public class OnlineLogRecord extends LogEntry { } public OnlineLogRecord build() { - return new OnlineLogRecord(startTime, processingTime, queueTime, responseSize, - clientAddress, serverClass, methodName, callDetails, param, regionName, - userName, multiGetsCount, multiMutationsCount, multiServiceCalls); + return new OnlineLogRecord(startTime, processingTime, queueTime, responseSize, clientAddress, + serverClass, methodName, callDetails, param, regionName, userName, multiGetsCount, + multiMutationsCount, multiServiceCalls); } } @@ -257,42 +254,22 @@ final public class OnlineLogRecord extends LogEntry { OnlineLogRecord that = (OnlineLogRecord) o; - return new EqualsBuilder() - .append(startTime, that.startTime) - .append(processingTime, that.processingTime) - .append(queueTime, that.queueTime) - .append(responseSize, that.responseSize) - .append(multiGetsCount, that.multiGetsCount) + return new EqualsBuilder().append(startTime, that.startTime) + .append(processingTime, that.processingTime).append(queueTime, that.queueTime) + .append(responseSize, that.responseSize).append(multiGetsCount, that.multiGetsCount) .append(multiMutationsCount, that.multiMutationsCount) - .append(multiServiceCalls, that.multiServiceCalls) - .append(clientAddress, that.clientAddress) - .append(serverClass, that.serverClass) - .append(methodName, that.methodName) - .append(callDetails, that.callDetails) - .append(param, that.param) - .append(regionName, that.regionName) - .append(userName, that.userName) - .isEquals(); + .append(multiServiceCalls, that.multiServiceCalls).append(clientAddress, that.clientAddress) + .append(serverClass, that.serverClass).append(methodName, that.methodName) + .append(callDetails, that.callDetails).append(param, that.param) + .append(regionName, that.regionName).append(userName, that.userName).isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(startTime) - .append(processingTime) - .append(queueTime) - .append(responseSize) - .append(clientAddress) - .append(serverClass) - .append(methodName) - .append(callDetails) - .append(param) - .append(regionName) - .append(userName) - .append(multiGetsCount) - .append(multiMutationsCount) - .append(multiServiceCalls) - .toHashCode(); + return new HashCodeBuilder(17, 37).append(startTime).append(processingTime).append(queueTime) + .append(responseSize).append(clientAddress).append(serverClass).append(methodName) + .append(callDetails).append(param).append(regionName).append(userName).append(multiGetsCount) + .append(multiMutationsCount).append(multiServiceCalls).toHashCode(); } @Override @@ -302,22 +279,14 @@ final public class OnlineLogRecord extends LogEntry { @Override public String toString() { - return new ToStringBuilder(this) - .append("startTime", startTime) - .append("processingTime", processingTime) - .append("queueTime", queueTime) - .append("responseSize", responseSize) - .append("clientAddress", clientAddress) - .append("serverClass", serverClass) - .append("methodName", methodName) - .append("callDetails", callDetails) - .append("param", param) - .append("regionName", regionName) - .append("userName", userName) - .append("multiGetsCount", multiGetsCount) + return new ToStringBuilder(this).append("startTime", startTime) + .append("processingTime", processingTime).append("queueTime", queueTime) + .append("responseSize", responseSize).append("clientAddress", clientAddress) + .append("serverClass", serverClass).append("methodName", methodName) + .append("callDetails", callDetails).append("param", param).append("regionName", regionName) + .append("userName", userName).append("multiGetsCount", multiGetsCount) .append("multiMutationsCount", multiMutationsCount) - .append("multiServiceCalls", multiServiceCalls) - .toString(); + .append("multiServiceCalls", multiServiceCalls).toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java index 3b6a6f5e51c..a517f0bb43a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +19,12 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.JsonMapper; +import org.apache.yetus.audience.InterfaceAudience; /** - * Superclass for any type that maps to a potentially application-level query. - * (e.g. Put, Get, Delete, Scan, Next, etc.) - * Contains methods for exposure to logging and debugging tools. + * Superclass for any type that maps to a potentially application-level query. (e.g. Put, Get, + * Delete, Scan, Next, etc.) Contains methods for exposure to logging and debugging tools. */ @InterfaceAudience.Public public abstract class Operation { @@ -36,15 +33,15 @@ public abstract class Operation { private static final int DEFAULT_MAX_COLS = 5; /** - * Produces a Map containing a fingerprint which identifies the type and - * the static schema components of a query (i.e. column families) + * Produces a Map containing a fingerprint which identifies the type and the static schema + * components of a query (i.e. column families) * @return a map containing fingerprint information (i.e. column families) */ public abstract Map getFingerprint(); /** - * Produces a Map containing a summary of the details of a query - * beyond the scope of the fingerprint (i.e. columns, rows...) + * Produces a Map containing a summary of the details of a query beyond the scope of the + * fingerprint (i.e. columns, rows...) * @param maxCols a limit on the number of columns output prior to truncation * @return a map containing parameters of a query (i.e. rows, columns...) */ @@ -59,8 +56,7 @@ public abstract class Operation { } /** - * Produces a JSON object for fingerprint and details exposure in a - * parseable format. + * Produces a JSON object for fingerprint and details exposure in a parseable format. * @param maxCols a limit on the number of columns to include in the JSON * @return a JSONObject containing this Operation's information, as a string */ @@ -69,8 +65,7 @@ public abstract class Operation { } /** - * Produces a JSON object sufficient for description of a query - * in a debugging or logging context. + * Produces a JSON object sufficient for description of a query in a debugging or logging context. * @return the produced JSON object, as a string */ public String toJSON() throws IOException { @@ -78,17 +73,16 @@ public abstract class Operation { } /** - * Produces a string representation of this Operation. It defaults to a JSON - * representation, but falls back to a string representation of the - * fingerprint and details in the case of a JSON encoding failure. - * @param maxCols a limit on the number of columns output in the summary - * prior to truncation + * Produces a string representation of this Operation. It defaults to a JSON representation, but + * falls back to a string representation of the fingerprint and details in the case of a JSON + * encoding failure. + * @param maxCols a limit on the number of columns output in the summary prior to truncation * @return a JSON-parseable String */ public String toString(int maxCols) { - /* for now this is merely a wrapper from producing a JSON string, but - * toJSON is kept separate in case this is changed to be a less parsable - * pretty printed representation. + /* + * for now this is merely a wrapper from producing a JSON string, but toJSON is kept separate in + * case this is changed to be a less parsable pretty printed representation. */ try { return toJSON(maxCols); @@ -98,10 +92,9 @@ public abstract class Operation { } /** - * Produces a string representation of this Operation. It defaults to a JSON - * representation, but falls back to a string representation of the - * fingerprint and details in the case of a JSON encoding failure. - * @return String + * Produces a string representation of this Operation. It defaults to a JSON representation, but + * falls back to a string representation of the fingerprint and details in the case of a JSON + * encoding failure. n */ @Override public String toString() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java index 7342e65bb31..e34c9d6eacb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.Collections; @@ -39,15 +37,15 @@ public abstract class OperationWithAttributes extends Operation implements Attri private int priority = HConstants.PRIORITY_UNSET; /** - * empty construction. - * We need this empty construction to keep binary compatibility. + * empty construction. We need this empty construction to keep binary compatibility. */ protected OperationWithAttributes() { } protected OperationWithAttributes(OperationWithAttributes clone) { - this.attributes = clone.getAttributesMap() == null ? null : - clone.getAttributesMap().entrySet().stream() + this.attributes = clone.getAttributesMap() == null + ? null + : clone.getAttributesMap().entrySet().stream() .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue(), (k, v) -> { throw new RuntimeException("collisions!!!"); }, () -> new TreeMap<>())); @@ -96,7 +94,7 @@ public abstract class OperationWithAttributes extends Operation implements Attri long size = 0; if (attributes != null) { size += ClassSize.align(this.attributes.size() * ClassSize.MAP_ENTRY); - for(Map.Entry entry : this.attributes.entrySet()) { + for (Map.Entry entry : this.attributes.entrySet()) { size += ClassSize.align(ClassSize.STRING + entry.getKey().length()); size += ClassSize.align(ClassSize.ARRAY + entry.getValue().length); } @@ -105,13 +103,10 @@ public abstract class OperationWithAttributes extends Operation implements Attri } /** - * This method allows you to set an identifier on an operation. The original - * motivation for this was to allow the identifier to be used in slow query - * logging, but this could obviously be useful in other places. One use of - * this could be to put a class.method identifier in here to see where the - * slow query is coming from. - * @param id - * id to set for the scan + * This method allows you to set an identifier on an operation. The original motivation for this + * was to allow the identifier to be used in slow query logging, but this could obviously be + * useful in other places. One use of this could be to put a class.method identifier in here to + * see where the slow query is coming from. n * id to set for the scan */ public OperationWithAttributes setId(String id) { setAttribute(ID_ATRIBUTE, Bytes.toBytes(id)); @@ -119,13 +114,12 @@ public abstract class OperationWithAttributes extends Operation implements Attri } /** - * This method allows you to retrieve the identifier for the operation if one - * was set. + * This method allows you to retrieve the identifier for the operation if one was set. * @return the id or null if not set */ public String getId() { byte[] attr = getAttribute(ID_ATRIBUTE); - return attr == null? null: Bytes.toString(attr); + return attr == null ? null : Bytes.toString(attr); } public OperationWithAttributes setPriority(int priority) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java index 1b1ded9953b..56a8dd19fcc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java index 8aedc4d2205..f264d9babd7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,8 +35,8 @@ public final class PerClientRandomNonceGenerator implements NonceGenerator { private PerClientRandomNonceGenerator() { byte[] clientIdBase = ClientIdGenerator.generateClientId(); - this.clientId = (((long) Arrays.hashCode(clientIdBase)) << 32) + - ThreadLocalRandom.current().nextInt(); + this.clientId = + (((long) Arrays.hashCode(clientIdBase)) << 32) + ThreadLocalRandom.current().nextInt(); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java index e35902ca78f..0223109c4e8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.io.IOException; import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; - import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; @@ -64,8 +63,7 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.Private class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor { - private static final Logger LOG = LoggerFactory - .getLogger(PreemptiveFastFailInterceptor.class); + private static final Logger LOG = LoggerFactory.getLogger(PreemptiveFastFailInterceptor.class); // amount of time to wait before we consider a server to be in fast fail // mode @@ -73,7 +71,8 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor { // Keeps track of failures when we cannot talk to a server. Helps in // fast failing clients if the server is down for a long time. - protected final ConcurrentMap repeatedFailuresMap = new ConcurrentHashMap<>(); + protected final ConcurrentMap repeatedFailuresMap = + new ConcurrentHashMap<>(); // We populate repeatedFailuresMap every time there is a failure. So, to // keep it from growing unbounded, we garbage collect the failure information @@ -90,62 +89,49 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor { private final ThreadLocal threadRetryingInFastFailMode = new ThreadLocal<>(); public PreemptiveFastFailInterceptor(Configuration conf) { - this.fastFailThresholdMilliSec = conf.getLong( - HConstants.HBASE_CLIENT_FAST_FAIL_THREASHOLD_MS, - HConstants.HBASE_CLIENT_FAST_FAIL_THREASHOLD_MS_DEFAULT); - this.failureMapCleanupIntervalMilliSec = conf.getLong( - HConstants.HBASE_CLIENT_FAILURE_MAP_CLEANUP_INTERVAL_MS, - HConstants.HBASE_CLIENT_FAILURE_MAP_CLEANUP_INTERVAL_MS_DEFAULT); - this.fastFailClearingTimeMilliSec = conf.getLong( - HConstants.HBASE_CLIENT_FAST_FAIL_CLEANUP_MS_DURATION_MS, - HConstants.HBASE_CLIENT_FAST_FAIL_CLEANUP_DURATION_MS_DEFAULT); + this.fastFailThresholdMilliSec = conf.getLong(HConstants.HBASE_CLIENT_FAST_FAIL_THREASHOLD_MS, + HConstants.HBASE_CLIENT_FAST_FAIL_THREASHOLD_MS_DEFAULT); + this.failureMapCleanupIntervalMilliSec = + conf.getLong(HConstants.HBASE_CLIENT_FAILURE_MAP_CLEANUP_INTERVAL_MS, + HConstants.HBASE_CLIENT_FAILURE_MAP_CLEANUP_INTERVAL_MS_DEFAULT); + this.fastFailClearingTimeMilliSec = + conf.getLong(HConstants.HBASE_CLIENT_FAST_FAIL_CLEANUP_MS_DURATION_MS, + HConstants.HBASE_CLIENT_FAST_FAIL_CLEANUP_DURATION_MS_DEFAULT); lastFailureMapCleanupTimeMilliSec = EnvironmentEdgeManager.currentTime(); } - public void intercept(FastFailInterceptorContext context) - throws PreemptiveFastFailException { + public void intercept(FastFailInterceptorContext context) throws PreemptiveFastFailException { context.setFailureInfo(repeatedFailuresMap.get(context.getServer())); if (inFastFailMode(context.getServer()) && !currentThreadInFastFailMode()) { // In Fast-fail mode, all but one thread will fast fail. Check // if we are that one chosen thread. - context.setRetryDespiteFastFailMode(shouldRetryInspiteOfFastFail(context - .getFailureInfo())); + context.setRetryDespiteFastFailMode(shouldRetryInspiteOfFastFail(context.getFailureInfo())); if (!context.isRetryDespiteFastFailMode()) { // we don't have to retry - LOG.debug("Throwing PFFE : " + context.getFailureInfo() + " tries : " - + context.getTries()); - throw new PreemptiveFastFailException( - context.getFailureInfo().numConsecutiveFailures.get(), - context.getFailureInfo().timeOfFirstFailureMilliSec, - context.getFailureInfo().timeOfLatestAttemptMilliSec, context.getServer(), - context.getGuaranteedClientSideOnly().isTrue()); + LOG.debug("Throwing PFFE : " + context.getFailureInfo() + " tries : " + context.getTries()); + throw new PreemptiveFastFailException(context.getFailureInfo().numConsecutiveFailures.get(), + context.getFailureInfo().timeOfFirstFailureMilliSec, + context.getFailureInfo().timeOfLatestAttemptMilliSec, context.getServer(), + context.getGuaranteedClientSideOnly().isTrue()); } } context.setDidTry(true); } - public void handleFailure(FastFailInterceptorContext context, - Throwable t) throws IOException { - handleThrowable(t, context.getServer(), - context.getCouldNotCommunicateWithServer(), - context.getGuaranteedClientSideOnly()); + public void handleFailure(FastFailInterceptorContext context, Throwable t) throws IOException { + handleThrowable(t, context.getServer(), context.getCouldNotCommunicateWithServer(), + context.getGuaranteedClientSideOnly()); } public void updateFailureInfo(FastFailInterceptorContext context) { - updateFailureInfoForServer(context.getServer(), context.getFailureInfo(), - context.didTry(), context.getCouldNotCommunicateWithServer() - .booleanValue(), context.isRetryDespiteFastFailMode()); + updateFailureInfoForServer(context.getServer(), context.getFailureInfo(), context.didTry(), + context.getCouldNotCommunicateWithServer().booleanValue(), + context.isRetryDespiteFastFailMode()); } /** - * Handles failures encountered when communicating with a server. - * - * Updates the FailureInfo in repeatedFailuresMap to reflect the failure. - * Throws RepeatedConnectException if the client is in Fast fail mode. - * - * @param serverName - * @param t - * - the throwable to be handled. - * @throws PreemptiveFastFailException + * Handles failures encountered when communicating with a server. Updates the FailureInfo in + * repeatedFailuresMap to reflect the failure. Throws RepeatedConnectException if the client is in + * Fast fail mode. nn * - the throwable to be handled. n */ protected void handleFailureToServer(ServerName serverName, Throwable t) { if (serverName == null || t == null) { @@ -159,8 +145,8 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor { } public void handleThrowable(Throwable t1, ServerName serverName, - MutableBoolean couldNotCommunicateWithServer, - MutableBoolean guaranteedClientSideOnly) throws IOException { + MutableBoolean couldNotCommunicateWithServer, MutableBoolean guaranteedClientSideOnly) + throws IOException { Throwable t2 = ClientExceptionsUtil.translatePFFE(t1); boolean isLocalException = !(t2 instanceof RemoteException); @@ -172,38 +158,38 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor { } /** - * Occasionally cleans up unused information in repeatedFailuresMap. - * - * repeatedFailuresMap stores the failure information for all remote hosts - * that had failures. In order to avoid these from growing indefinitely, - * occassionallyCleanupFailureInformation() will clear these up once every + * Occasionally cleans up unused information in repeatedFailuresMap. repeatedFailuresMap stores + * the failure information for all remote hosts that had failures. In order to avoid these from + * growing indefinitely, occassionallyCleanupFailureInformation() will clear these up once every * cleanupInterval ms. */ protected void occasionallyCleanupFailureInformation() { long now = EnvironmentEdgeManager.currentTime(); - if (!(now > lastFailureMapCleanupTimeMilliSec - + failureMapCleanupIntervalMilliSec)) - return; + if (!(now > lastFailureMapCleanupTimeMilliSec + failureMapCleanupIntervalMilliSec)) return; // remove entries that haven't been attempted in a while // No synchronization needed. It is okay if multiple threads try to // remove the entry again and again from a concurrent hash map. StringBuilder sb = new StringBuilder(); for (Entry entry : repeatedFailuresMap.entrySet()) { - if (now > entry.getValue().timeOfLatestAttemptMilliSec - + failureMapCleanupIntervalMilliSec) { // no recent failures + if (now > entry.getValue().timeOfLatestAttemptMilliSec + failureMapCleanupIntervalMilliSec) { // no + // recent + // failures repeatedFailuresMap.remove(entry.getKey()); - } else if (now > entry.getValue().timeOfFirstFailureMilliSec - + this.fastFailClearingTimeMilliSec) { // been failing for a long - // time - LOG.error(entry.getKey() - + " been failing for a long time. clearing out." + } else + if (now > entry.getValue().timeOfFirstFailureMilliSec + this.fastFailClearingTimeMilliSec) { // been + // failing + // for + // a + // long + // time + LOG.error(entry.getKey() + " been failing for a long time. clearing out." + entry.getValue().toString()); - repeatedFailuresMap.remove(entry.getKey()); - } else { - sb.append(entry.getKey().toString()).append(" failing ") + repeatedFailuresMap.remove(entry.getKey()); + } else { + sb.append(entry.getKey().toString()).append(" failing ") .append(entry.getValue().toString()).append("\n"); - } + } } if (sb.length() > 0) { LOG.warn("Preemptive failure enabled for : " + sb.toString()); @@ -212,55 +198,41 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor { } /** - * Checks to see if we are in the Fast fail mode for requests to the server. - * - * If a client is unable to contact a server for more than - * fastFailThresholdMilliSec the client will get into fast fail mode. - * - * @param server - * @return true if the client is in fast fail mode for the server. + * Checks to see if we are in the Fast fail mode for requests to the server. If a client is unable + * to contact a server for more than fastFailThresholdMilliSec the client will get into fast fail + * mode. n * @return true if the client is in fast fail mode for the server. */ private boolean inFastFailMode(ServerName server) { FailureInfo fInfo = repeatedFailuresMap.get(server); // if fInfo is null --> The server is considered good. // If the server is bad, wait long enough to believe that the server is // down. - return (fInfo != null && - EnvironmentEdgeManager.currentTime() > - (fInfo.timeOfFirstFailureMilliSec + this.fastFailThresholdMilliSec)); + return (fInfo != null && EnvironmentEdgeManager.currentTime() + > (fInfo.timeOfFirstFailureMilliSec + this.fastFailThresholdMilliSec)); } /** - * Checks to see if the current thread is already in FastFail mode for *some* - * server. - * + * Checks to see if the current thread is already in FastFail mode for *some* server. * @return true, if the thread is already in FF mode. */ private boolean currentThreadInFastFailMode() { - return (this.threadRetryingInFastFailMode.get() != null && (this.threadRetryingInFastFailMode - .get().booleanValue() == true)); + return (this.threadRetryingInFastFailMode.get() != null + && (this.threadRetryingInFastFailMode.get().booleanValue() == true)); } /** - * Check to see if the client should try to connnect to the server, inspite of - * knowing that it is in the fast fail mode. - * - * The idea here is that we want just one client thread to be actively trying - * to reconnect, while all the other threads trying to reach the server will - * short circuit. - * - * @param fInfo - * @return true if the client should try to connect to the server. + * Check to see if the client should try to connnect to the server, inspite of knowing that it is + * in the fast fail mode. The idea here is that we want just one client thread to be actively + * trying to reconnect, while all the other threads trying to reach the server will short circuit. + * n * @return true if the client should try to connect to the server. */ protected boolean shouldRetryInspiteOfFastFail(FailureInfo fInfo) { // We believe that the server is down, But, we want to have just one // client // actively trying to connect. If we are the chosen one, we will retry // and not throw an exception. - if (fInfo != null - && fInfo.exclusivelyRetringInspiteOfFastFail.compareAndSet(false, true)) { - MutableBoolean threadAlreadyInFF = this.threadRetryingInFastFailMode - .get(); + if (fInfo != null && fInfo.exclusivelyRetringInspiteOfFastFail.compareAndSet(false, true)) { + MutableBoolean threadAlreadyInFF = this.threadRetryingInFastFailMode.get(); if (threadAlreadyInFF == null) { threadAlreadyInFF = new MutableBoolean(); this.threadRetryingInFastFailMode.set(threadAlreadyInFF); @@ -273,20 +245,11 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor { } /** - * - * This function updates the Failure info for a particular server after the - * attempt to - * - * @param server - * @param fInfo - * @param couldNotCommunicate - * @param retryDespiteFastFailMode + * This function updates the Failure info for a particular server after the attempt to nnnn */ - private void updateFailureInfoForServer(ServerName server, - FailureInfo fInfo, boolean didTry, boolean couldNotCommunicate, - boolean retryDespiteFastFailMode) { - if (server == null || fInfo == null || didTry == false) - return; + private void updateFailureInfoForServer(ServerName server, FailureInfo fInfo, boolean didTry, + boolean couldNotCommunicate, boolean retryDespiteFastFailMode) { + if (server == null || fInfo == null || didTry == false) return; // If we were able to connect to the server, reset the failure // information. @@ -310,15 +273,15 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor { @Override public void intercept(RetryingCallerInterceptorContext context) - throws PreemptiveFastFailException { + throws PreemptiveFastFailException { if (context instanceof FastFailInterceptorContext) { intercept((FastFailInterceptorContext) context); } } @Override - public void handleFailure(RetryingCallerInterceptorContext context, - Throwable t) throws IOException { + public void handleFailure(RetryingCallerInterceptorContext context, Throwable t) + throws IOException { if (context instanceof FastFailInterceptorContext) { handleFailure((FastFailInterceptorContext) context, t); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java index 702717038c3..a3502f29bd0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -38,10 +36,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** * Used to perform Put operations for a single row. *

    - * To perform a Put, instantiate a Put object with the row to insert to, and - * for each column to be inserted, execute {@link #addColumn(byte[], byte[], - * byte[]) add} or {@link #addColumn(byte[], byte[], long, byte[]) add} if - * setting the timestamp. + * To perform a Put, instantiate a Put object with the row to insert to, and for each column to be + * inserted, execute {@link #addColumn(byte[], byte[], byte[]) add} or + * {@link #addColumn(byte[], byte[], long, byte[]) add} if setting the timestamp. */ @InterfaceAudience.Public public class Put extends Mutation implements HeapSize { @@ -49,27 +46,23 @@ public class Put extends Mutation implements HeapSize { * Create a Put operation for the specified row. * @param row row key */ - public Put(byte [] row) { + public Put(byte[] row) { this(row, HConstants.LATEST_TIMESTAMP); } /** * Create a Put operation for the specified row, using a given timestamp. - * * @param row row key; we make a copy of what we are passed to keep local. - * @param ts timestamp + * @param ts timestamp */ public Put(byte[] row, long ts) { this(row, 0, row.length, ts); } /** - * We make a copy of the passed in row key to keep local. - * @param rowArray - * @param rowOffset - * @param rowLength + * We make a copy of the passed in row key to keep local. nnn */ - public Put(byte [] rowArray, int rowOffset, int rowLength) { + public Put(byte[] rowArray, int rowOffset, int rowLength) { this(rowArray, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP); } @@ -95,13 +88,9 @@ public class Put extends Mutation implements HeapSize { } /** - * We make a copy of the passed in row key to keep local. - * @param rowArray - * @param rowOffset - * @param rowLength - * @param ts + * We make a copy of the passed in row key to keep local. nnnn */ - public Put(byte [] rowArray, int rowOffset, int rowLength, long ts) { + public Put(byte[] rowArray, int rowOffset, int rowLength, long ts) { checkRow(rowArray, rowOffset, rowLength); this.row = Bytes.copy(rowArray, rowOffset, rowLength); this.ts = ts; @@ -112,24 +101,20 @@ public class Put extends Mutation implements HeapSize { /** * Create a Put operation for an immutable row key. - * - * @param row row key - * @param rowIsImmutable whether the input row is immutable. - * Set to true if the caller can guarantee that - * the row will not be changed for the Put duration. + * @param row row key + * @param rowIsImmutable whether the input row is immutable. Set to true if the caller can + * guarantee that the row will not be changed for the Put duration. */ - public Put(byte [] row, boolean rowIsImmutable) { + public Put(byte[] row, boolean rowIsImmutable) { this(row, HConstants.LATEST_TIMESTAMP, rowIsImmutable); } /** * Create a Put operation for an immutable row key, using a given timestamp. - * - * @param row row key - * @param ts timestamp - * @param rowIsImmutable whether the input row is immutable. - * Set to true if the caller can guarantee that - * the row will not be changed for the Put duration. + * @param row row key + * @param ts timestamp + * @param rowIsImmutable whether the input row is immutable. Set to true if the caller can + * guarantee that the row will not be changed for the Put duration. */ public Put(byte[] row, long ts, boolean rowIsImmutable) { // Check and set timestamp @@ -140,15 +125,15 @@ public class Put extends Mutation implements HeapSize { // Deal with row according to rowIsImmutable checkRow(row); - if (rowIsImmutable) { // Row is immutable - this.row = row; // Do not make a local copy, but point to the provided byte array directly - } else { // Row is not immutable - this.row = Bytes.copy(row, 0, row.length); // Make a local copy + if (rowIsImmutable) { // Row is immutable + this.row = row; // Do not make a local copy, but point to the provided byte array directly + } else { // Row is not immutable + this.row = Bytes.copy(row, 0, row.length); // Make a local copy } } /** - * Copy constructor. Creates a Put operation cloned from the specified Put. + * Copy constructor. Creates a Put operation cloned from the specified Put. * @param putToCopy put to copy */ public Put(Put putToCopy) { @@ -156,50 +141,46 @@ public class Put extends Mutation implements HeapSize { } /** - * Construct the Put with user defined data. NOTED: - * 1) all cells in the familyMap must have the Type.Put - * 2) the row of each cell must be same with passed row. - * @param row row. CAN'T be null - * @param ts timestamp + * Construct the Put with user defined data. NOTED: 1) all cells in the familyMap must have the + * Type.Put 2) the row of each cell must be same with passed row. + * @param row row. CAN'T be null + * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - public Put(byte[] row, long ts, NavigableMap> familyMap) { + public Put(byte[] row, long ts, NavigableMap> familyMap) { super(row, ts, familyMap); } /** * Add the specified column and value to this Put operation. - * @param family family name + * @param family family name * @param qualifier column qualifier - * @param value column value - * @return this + * @param value column value n */ - public Put addColumn(byte [] family, byte [] qualifier, byte [] value) { + public Put addColumn(byte[] family, byte[] qualifier, byte[] value) { return addColumn(family, qualifier, this.ts, value); } /** - * See {@link #addColumn(byte[], byte[], byte[])}. This version expects - * that the underlying arrays won't change. It's intended - * for usage internal HBase to and for advanced client applications. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #add(Cell)} and {@link org.apache.hadoop.hbase.CellBuilder} instead + * See {@link #addColumn(byte[], byte[], byte[])}. This version expects that the underlying arrays + * won't change. It's intended for usage internal HBase to and for advanced client applications. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use {@link #add(Cell)} + * and {@link org.apache.hadoop.hbase.CellBuilder} instead */ @Deprecated - public Put addImmutable(byte [] family, byte [] qualifier, byte [] value) { + public Put addImmutable(byte[] family, byte[] qualifier, byte[] value) { return addImmutable(family, qualifier, this.ts, value); } /** - * Add the specified column and value, with the specified timestamp as - * its version to this Put operation. - * @param family family name + * Add the specified column and value, with the specified timestamp as its version to this Put + * operation. + * @param family family name * @param qualifier column qualifier - * @param ts version timestamp - * @param value column value - * @return this + * @param ts version timestamp + * @param value column value n */ - public Put addColumn(byte [] family, byte [] qualifier, long ts, byte [] value) { + public Put addColumn(byte[] family, byte[] qualifier, long ts, byte[] value) { if (ts < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + ts); } @@ -210,15 +191,16 @@ public class Put extends Mutation implements HeapSize { } /** - * See {@link #addColumn(byte[], byte[], long, byte[])}. This version expects - * that the underlying arrays won't change. It's intended - * for usage internal HBase to and for advanced client applications. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #add(Cell)} and {@link org.apache.hadoop.hbase.CellBuilder} instead + * See {@link #addColumn(byte[], byte[], long, byte[])}. This version expects that the underlying + * arrays won't change. It's intended for usage internal HBase to and for advanced client + * applications. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use {@link #add(Cell)} + * and {@link org.apache.hadoop.hbase.CellBuilder} instead */ @Deprecated - public Put addImmutable(byte [] family, byte [] qualifier, long ts, byte [] value) { - // Family can not be null, otherwise NullPointerException is thrown when putting the cell into familyMap + public Put addImmutable(byte[] family, byte[] qualifier, long ts, byte[] value) { + // Family can not be null, otherwise NullPointerException is thrown when putting the cell into + // familyMap if (family == null) { throw new IllegalArgumentException("Family cannot be null"); } @@ -229,18 +211,18 @@ public class Put extends Mutation implements HeapSize { } List list = getCellList(family); - list.add(new IndividualBytesFieldCell(this.row, family, qualifier, ts, KeyValue.Type.Put, value)); + list + .add(new IndividualBytesFieldCell(this.row, family, qualifier, ts, KeyValue.Type.Put, value)); return this; } /** - * Add the specified column and value, with the specified timestamp as - * its version to this Put operation. - * @param family family name + * Add the specified column and value, with the specified timestamp as its version to this Put + * operation. + * @param family family name * @param qualifier column qualifier - * @param ts version timestamp - * @param value column value - * @return this + * @param ts version timestamp + * @param value column value n */ public Put addColumn(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value) { if (ts < 0) { @@ -253,11 +235,11 @@ public class Put extends Mutation implements HeapSize { } /** - * See {@link #addColumn(byte[], ByteBuffer, long, ByteBuffer)}. This version expects - * that the underlying arrays won't change. It's intended - * for usage internal HBase to and for advanced client applications. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #add(Cell)} and {@link org.apache.hadoop.hbase.CellBuilder} instead + * See {@link #addColumn(byte[], ByteBuffer, long, ByteBuffer)}. This version expects that the + * underlying arrays won't change. It's intended for usage internal HBase to and for advanced + * client applications. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use {@link #add(Cell)} + * and {@link org.apache.hadoop.hbase.CellBuilder} instead */ @Deprecated public Put addImmutable(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value) { @@ -271,12 +253,9 @@ public class Put extends Mutation implements HeapSize { } /** - * Add the specified KeyValue to this Put operation. Operation assumes that - * the passed KeyValue is immutable and its backing array will not be modified - * for the duration of this Put. - * @param cell individual cell - * @return this - * @throws java.io.IOException e + * Add the specified KeyValue to this Put operation. Operation assumes that the passed KeyValue is + * immutable and its backing array will not be modified for the duration of this Put. + * @param cell individual cell n * @throws java.io.IOException e */ public Put add(Cell cell) throws IOException { super.add(cell); @@ -306,8 +285,8 @@ public class Put extends Mutation implements HeapSize { /** * Method for setting the put's familyMap - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link Put#Put(byte[], long, NavigableMap)} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link Put#Put(byte[], long, NavigableMap)} instead */ @Deprecated @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java index 1d990d1bc94..661065d0949 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,9 +18,6 @@ package org.apache.hadoop.hbase.client; import java.util.Map; - -import org.apache.hbase.thirdparty.com.google.common.collect.Maps; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.TimeRange; @@ -29,11 +26,14 @@ import org.apache.hadoop.hbase.security.access.AccessControlUtil; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.security.visibility.VisibilityConstants; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; -import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hbase.thirdparty.com.google.common.collect.Maps; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** * Base class for HBase read operations; e.g. Scan and Get. @@ -46,8 +46,9 @@ public abstract class Query extends OperationWithAttributes { protected Consistency consistency = Consistency.STRONG; protected Map colFamTimeRangeMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); protected Boolean loadColumnFamiliesOnDemand = null; + /** - * @return Filter + * n */ public Filter getFilter() { return filter; @@ -66,18 +67,16 @@ public abstract class Query extends OperationWithAttributes { } /** - * Sets the authorizations to be used by this Query - * @param authorizations + * Sets the authorizations to be used by this Query n */ public Query setAuthorizations(Authorizations authorizations) { - this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY, ProtobufUtil - .toAuthorizations(authorizations).toByteArray()); + this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY, + ProtobufUtil.toAuthorizations(authorizations).toByteArray()); return this; } /** - * @return The authorizations this Query is associated with. - * @throws DeserializationException + * @return The authorizations this Query is associated with. n */ public Authorizations getAuthorizations() throws DeserializationException { byte[] authorizationsBytes = this.getAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY); @@ -93,7 +92,7 @@ public abstract class Query extends OperationWithAttributes { } /** - * @param user User short name + * @param user User short name * @param perms Permissions for the user */ public Query setACL(String user, Permission perms) { @@ -111,7 +110,7 @@ public abstract class Query extends OperationWithAttributes { permMap.put(entry.getKey(), entry.getValue()); } setAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL, - AccessControlUtil.toUsersAndPermissions(permMap).toByteArray()); + AccessControlUtil.toUsersAndPermissions(permMap).toByteArray()); return this; } @@ -134,10 +133,9 @@ public abstract class Query extends OperationWithAttributes { /** * Specify region replica id where Query will fetch data from. Use this together with - * {@link #setConsistency(Consistency)} passing {@link Consistency#TIMELINE} to read data from - * a specific replicaId. - *
    Expert: This is an advanced API exposed. Only use it if you know what you are doing - * @param Id + * {@link #setConsistency(Consistency)} passing {@link Consistency#TIMELINE} to read data from a + * specific replicaId.
    + * Expert: This is an advanced API exposed. Only use it if you know what you are doing n */ public Query setReplicaId(int Id) { this.targetReplicaId = Id; @@ -153,14 +151,10 @@ public abstract class Query extends OperationWithAttributes { } /** - * Set the isolation level for this query. If the - * isolation level is set to READ_UNCOMMITTED, then - * this query will return data from committed and - * uncommitted transactions. If the isolation level - * is set to READ_COMMITTED, then this query will return - * data from committed transactions only. If a isolation - * level is not explicitly set on a Query, then it - * is assumed to be READ_COMMITTED. + * Set the isolation level for this query. If the isolation level is set to READ_UNCOMMITTED, then + * this query will return data from committed and uncommitted transactions. If the isolation level + * is set to READ_COMMITTED, then this query will return data from committed transactions only. If + * a isolation level is not explicitly set on a Query, then it is assumed to be READ_COMMITTED. * @param level IsolationLevel for this query */ public Query setIsolationLevel(IsolationLevel level) { @@ -169,32 +163,28 @@ public abstract class Query extends OperationWithAttributes { } /** - * @return The isolation level of this query. - * If no isolation level was set for this query object, - * then it returns READ_COMMITTED. + * @return The isolation level of this query. If no isolation level was set for this query object, + * then it returns READ_COMMITTED. * @return The IsolationLevel for this query */ public IsolationLevel getIsolationLevel() { byte[] attr = getAttribute(ISOLATION_LEVEL); - return attr == null ? IsolationLevel.READ_COMMITTED : - IsolationLevel.fromBytes(attr); + return attr == null ? IsolationLevel.READ_COMMITTED : IsolationLevel.fromBytes(attr); } /** - * Set the value indicating whether loading CFs on demand should be allowed (cluster - * default is false). On-demand CF loading doesn't load column families until necessary, e.g. - * if you filter on one column, the other column family data will be loaded only for the rows - * that are included in result, not all rows like in normal case. - * With column-specific filters, like SingleColumnValueFilter w/filterIfMissing == true, - * this can deliver huge perf gains when there's a cf with lots of data; however, it can - * also lead to some inconsistent results, as follows: - * - if someone does a concurrent update to both column families in question you may get a row - * that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" } } - * someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent scan - * filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 }, - * { video => "my dog" } }. - * - if there's a concurrent split and you have more than 2 column families, some rows may be - * missing some column families. + * Set the value indicating whether loading CFs on demand should be allowed (cluster default is + * false). On-demand CF loading doesn't load column families until necessary, e.g. if you filter + * on one column, the other column family data will be loaded only for the rows that are included + * in result, not all rows like in normal case. With column-specific filters, like + * SingleColumnValueFilter w/filterIfMissing == true, this can deliver huge perf gains when + * there's a cf with lots of data; however, it can also lead to some inconsistent results, as + * follows: - if someone does a concurrent update to both column families in question you may get + * a row that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" + * } } someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent + * scan filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 }, { video => + * "my dog" } }. - if there's a concurrent split and you have more than 2 column families, some + * rows may be missing some column families. */ public Query setLoadColumnFamiliesOnDemand(boolean value) { this.loadColumnFamiliesOnDemand = value; @@ -212,21 +202,17 @@ public abstract class Query extends OperationWithAttributes { * Get the logical value indicating whether on-demand CF loading should be allowed. */ public boolean doLoadColumnFamiliesOnDemand() { - return (this.loadColumnFamiliesOnDemand != null) - && this.loadColumnFamiliesOnDemand; + return (this.loadColumnFamiliesOnDemand != null) && this.loadColumnFamiliesOnDemand; } /** - * Get versions of columns only within the specified timestamp range, - * [minStamp, maxStamp) on a per CF bases. Note, default maximum versions to return is 1. If - * your time range spans more than one version and you want all versions - * returned, up the number of versions beyond the default. + * Get versions of columns only within the specified timestamp range, [minStamp, maxStamp) on a + * per CF bases. Note, default maximum versions to return is 1. If your time range spans more than + * one version and you want all versions returned, up the number of versions beyond the default. * Column Family time ranges take precedence over the global time range. - * * @param cf the column family for which you want to restrict * @param minStamp minimum timestamp value, inclusive - * @param maxStamp maximum timestamp value, exclusive - * @return this + * @param maxStamp maximum timestamp value, exclusive n */ public Query setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 4468c18b179..3fa1e48a9c8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -204,8 +204,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedur import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsRpcThrottleEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsRpcThrottleEnabledResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos - .IsSnapshotCleanupEnabledResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest; @@ -260,8 +259,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormali import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos - .SetSnapshotCleanupResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest; @@ -342,7 +340,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { private final NonceGenerator ng; RawAsyncHBaseAdmin(AsyncConnectionImpl connection, HashedWheelTimer retryTimer, - AsyncAdminBuilderBase builder) { + AsyncAdminBuilderBase builder) { this.connection = connection; this.retryTimer = retryTimer; this.metaTable = connection.getTable(META_TABLE_NAME); @@ -351,8 +349,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { this.pauseNs = builder.pauseNs; if (builder.pauseNsForServerOverloaded < builder.pauseNs) { LOG.warn( - "Configured value of pauseNsForServerOverloaded is {} ms, which is less than" + - " the normal pause value {} ms, use the greater one instead", + "Configured value of pauseNsForServerOverloaded is {} ms, which is less than" + + " the normal pause value {} ms, use the greater one instead", TimeUnit.NANOSECONDS.toMillis(builder.pauseNsForServerOverloaded), TimeUnit.NANOSECONDS.toMillis(builder.pauseNs)); this.pauseNsForServerOverloaded = builder.pauseNs; @@ -385,13 +383,13 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @FunctionalInterface private interface MasterRpcCall { void call(MasterService.Interface stub, HBaseRpcController controller, REQ req, - RpcCallback done); + RpcCallback done); } @FunctionalInterface private interface AdminRpcCall { void call(AdminService.Interface stub, HBaseRpcController controller, REQ req, - RpcCallback done); + RpcCallback done); } @FunctionalInterface @@ -400,8 +398,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } private CompletableFuture call(HBaseRpcController controller, - MasterService.Interface stub, PREQ preq, MasterRpcCall rpcCall, - Converter respConverter) { + MasterService.Interface stub, PREQ preq, MasterRpcCall rpcCall, + Converter respConverter) { CompletableFuture future = new CompletableFuture<>(); rpcCall.call(stub, controller, preq, new RpcCallback() { @@ -422,8 +420,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } private CompletableFuture adminCall(HBaseRpcController controller, - AdminService.Interface stub, PREQ preq, AdminRpcCall rpcCall, - Converter respConverter) { + AdminService.Interface stub, PREQ preq, AdminRpcCall rpcCall, + Converter respConverter) { CompletableFuture future = new CompletableFuture<>(); rpcCall.call(stub, controller, preq, new RpcCallback() { @@ -444,24 +442,24 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } private CompletableFuture procedureCall(PREQ preq, - MasterRpcCall rpcCall, Converter respConverter, - ProcedureBiConsumer consumer) { + MasterRpcCall rpcCall, Converter respConverter, + ProcedureBiConsumer consumer) { return procedureCall(b -> { }, preq, rpcCall, respConverter, consumer); } private CompletableFuture procedureCall(TableName tableName, PREQ preq, - MasterRpcCall rpcCall, Converter respConverter, - ProcedureBiConsumer consumer) { + MasterRpcCall rpcCall, Converter respConverter, + ProcedureBiConsumer consumer) { return procedureCall(b -> b.priority(tableName), preq, rpcCall, respConverter, consumer); } private CompletableFuture procedureCall( - Consumer> prioritySetter, PREQ preq, - MasterRpcCall rpcCall, Converter respConverter, - ProcedureBiConsumer consumer) { + Consumer> prioritySetter, PREQ preq, + MasterRpcCall rpcCall, Converter respConverter, + ProcedureBiConsumer consumer) { MasterRequestCallerBuilder builder = this. newMasterCaller().action((controller, - stub) -> this. call(controller, stub, preq, rpcCall, respConverter)); + stub) -> this. call(controller, stub, preq, rpcCall, respConverter)); prioritySetter.accept(builder); CompletableFuture procFuture = builder.call(); CompletableFuture future = waitProcedureResult(procFuture); @@ -484,8 +482,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture> listTableDescriptors(boolean includeSysTables) { - return getTableDescriptors(RequestConverter.buildGetTableDescriptorsRequest(null, - includeSysTables)); + return getTableDescriptors( + RequestConverter.buildGetTableDescriptorsRequest(null, includeSysTables)); } /** @@ -493,11 +491,11 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { */ @Override public CompletableFuture> listTableDescriptors(Pattern pattern, - boolean includeSysTables) { + boolean includeSysTables) { Preconditions.checkNotNull(pattern, "pattern is null. If you don't specify a pattern, use listTables(boolean) instead"); - return getTableDescriptors(RequestConverter.buildGetTableDescriptorsRequest(pattern, - includeSysTables)); + return getTableDescriptors( + RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables)); } @Override @@ -511,13 +509,13 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } private CompletableFuture> - getTableDescriptors(GetTableDescriptorsRequest request) { + getTableDescriptors(GetTableDescriptorsRequest request) { return this.> newMasterCaller() - .action((controller, stub) -> this - .> call( - controller, stub, request, (s, c, req, done) -> s.getTableDescriptors(c, req, done), - (resp) -> ProtobufUtil.toTableDescriptorList(resp))) - .call(); + .action((controller, stub) -> this.> call(controller, stub, request, + (s, c, req, done) -> s.getTableDescriptors(c, req, done), + (resp) -> ProtobufUtil.toTableDescriptorList(resp))) + .call(); } @Override @@ -526,54 +524,51 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } @Override - public CompletableFuture> - listTableNames(Pattern pattern, boolean includeSysTables) { + public CompletableFuture> listTableNames(Pattern pattern, + boolean includeSysTables) { Preconditions.checkNotNull(pattern, - "pattern is null. If you don't specify a pattern, use listTableNames(boolean) instead"); + "pattern is null. If you don't specify a pattern, use listTableNames(boolean) instead"); return getTableNames(RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables)); } private CompletableFuture> getTableNames(GetTableNamesRequest request) { - return this - .> newMasterCaller() - .action( - (controller, stub) -> this - .> call(controller, - stub, request, (s, c, req, done) -> s.getTableNames(c, req, done), - (resp) -> ProtobufUtil.toTableNameList(resp.getTableNamesList()))).call(); + return this.> newMasterCaller() + .action((controller, stub) -> this.> call(controller, stub, request, + (s, c, req, done) -> s.getTableNames(c, req, done), + (resp) -> ProtobufUtil.toTableNameList(resp.getTableNamesList()))) + .call(); } @Override public CompletableFuture> listTableDescriptorsByNamespace(String name) { - return this.> newMasterCaller().action((controller, stub) -> this - .> call( - controller, stub, + return this.> newMasterCaller() + .action((controller, stub) -> this.> call(controller, stub, ListTableDescriptorsByNamespaceRequest.newBuilder().setNamespaceName(name).build(), (s, c, req, done) -> s.listTableDescriptorsByNamespace(c, req, done), (resp) -> ProtobufUtil.toTableDescriptorList(resp))) - .call(); + .call(); } @Override public CompletableFuture> listTableNamesByNamespace(String name) { - return this.> newMasterCaller().action((controller, stub) -> this - .> call( - controller, stub, + return this.> newMasterCaller() + .action((controller, stub) -> this.> call(controller, stub, ListTableNamesByNamespaceRequest.newBuilder().setNamespaceName(name).build(), (s, c, req, done) -> s.listTableNamesByNamespace(c, req, done), (resp) -> ProtobufUtil.toTableNameList(resp.getTableNameList()))) - .call(); + .call(); } @Override public CompletableFuture getDescriptor(TableName tableName) { CompletableFuture future = new CompletableFuture<>(); addListener(this.> newMasterCaller().priority(tableName) - .action((controller, stub) -> this - .> call( - controller, stub, RequestConverter.buildGetTableDescriptorsRequest(tableName), + .action((controller, stub) -> this.> call(controller, stub, + RequestConverter.buildGetTableDescriptorsRequest(tableName), (s, c, req, done) -> s.getTableDescriptors(c, req, done), (resp) -> resp.getTableSchemaList())) .call(), (tableSchemas, error) -> { @@ -598,7 +593,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, - int numRegions) { + int numRegions) { try { return createTable(desc, getSplitKeys(startKey, endKey, numRegions)); } catch (IllegalArgumentException e) { @@ -609,7 +604,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture createTable(TableDescriptor desc, byte[][] splitKeys) { Preconditions.checkNotNull(splitKeys, "splitKeys is null. If you don't specify splitKeys," - + " use createTable(TableDescriptor) instead"); + + " use createTable(TableDescriptor) instead"); try { verifySplitKeys(splitKeys); return createTable(desc.getTableName(), RequestConverter.buildCreateTableRequest(desc, @@ -630,15 +625,15 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { public CompletableFuture modifyTable(TableDescriptor desc) { return this. procedureCall(desc.getTableName(), RequestConverter.buildModifyTableRequest(desc.getTableName(), desc, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.modifyTable(c, req, done), - (resp) -> resp.getProcId(), new ModifyTableProcedureBiConsumer(this, desc.getTableName())); + ng.newNonce()), + (s, c, req, done) -> s.modifyTable(c, req, done), (resp) -> resp.getProcId(), + new ModifyTableProcedureBiConsumer(this, desc.getTableName())); } @Override public CompletableFuture modifyTableStoreFileTracker(TableName tableName, String dstSFT) { - return this - . procedureCall( - tableName, + return this. procedureCall(tableName, RequestConverter.buildModifyTableStoreFileTrackerRequest(tableName, dstSFT, ng.getNonceGroup(), ng.newNonce()), (s, c, req, done) -> s.modifyTableStoreFileTracker(c, req, done), @@ -658,8 +653,9 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { public CompletableFuture truncateTable(TableName tableName, boolean preserveSplits) { return this. procedureCall(tableName, RequestConverter.buildTruncateTableRequest(tableName, preserveSplits, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.truncateTable(c, req, done), - (resp) -> resp.getProcId(), new TruncateTableProcedureBiConsumer(tableName)); + ng.newNonce()), + (s, c, req, done) -> s.truncateTable(c, req, done), (resp) -> resp.getProcId(), + new TruncateTableProcedureBiConsumer(tableName)); } @Override @@ -679,13 +675,13 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } /** - * Utility for completing passed TableState {@link CompletableFuture} future - * using passed parameters. Sets error or boolean result ('true' if table matches - * the passed-in targetState). + * Utility for completing passed TableState {@link CompletableFuture} future using + * passed parameters. Sets error or boolean result ('true' if table matches the passed-in + * targetState). */ private static CompletableFuture completeCheckTableState( - CompletableFuture future, TableState tableState, Throwable error, - TableState.State targetState, TableName tableName) { + CompletableFuture future, TableState tableState, Throwable error, + TableState.State targetState, TableName tableName) { if (error != null) { future.completeExceptionally(error); } else { @@ -705,7 +701,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } CompletableFuture future = new CompletableFuture<>(); addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (tableState, error) -> { - completeCheckTableState(future, tableState.isPresent()? tableState.get(): null, error, + completeCheckTableState(future, tableState.isPresent() ? tableState.get() : null, error, TableState.State.ENABLED, tableName); }); return future; @@ -718,7 +714,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } CompletableFuture future = new CompletableFuture<>(); addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (tableState, error) -> { - completeCheckTableState(future, tableState.isPresent()? tableState.get(): null, error, + completeCheckTableState(future, tableState.isPresent() ? tableState.get() : null, error, TableState.State.DISABLED, tableName); }); return future; @@ -732,12 +728,12 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture isTableAvailable(TableName tableName, byte[][] splitKeys) { Preconditions.checkNotNull(splitKeys, "splitKeys is null. If you don't specify splitKeys," - + " use isTableAvailable(TableName) instead"); + + " use isTableAvailable(TableName) instead"); return isTableAvailable(tableName, Optional.of(splitKeys)); } private CompletableFuture isTableAvailable(TableName tableName, - Optional splitKeys) { + Optional splitKeys) { if (TableName.isMetaTableName(tableName)) { return connection.registry.getMetaRegionLocations().thenApply(locs -> Stream .of(locs.getRegionLocations()).allMatch(loc -> loc != null && loc.getServerName() != null)); @@ -755,8 +751,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { if (!enabled) { future.complete(false); } else { - addListener( - AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName), + addListener(AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName), (locations, error1) -> { if (error1 != null) { future.completeExceptionally(error1); @@ -801,11 +796,12 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } @Override - public CompletableFuture addColumnFamily( - TableName tableName, ColumnFamilyDescriptor columnFamily) { + public CompletableFuture addColumnFamily(TableName tableName, + ColumnFamilyDescriptor columnFamily) { return this. procedureCall(tableName, RequestConverter.buildAddColumnRequest(tableName, columnFamily, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.addColumn(c, req, done), (resp) -> resp.getProcId(), + ng.newNonce()), + (s, c, req, done) -> s.addColumn(c, req, done), (resp) -> resp.getProcId(), new AddColumnFamilyProcedureBiConsumer(tableName)); } @@ -813,25 +809,26 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { public CompletableFuture deleteColumnFamily(TableName tableName, byte[] columnFamily) { return this. procedureCall(tableName, RequestConverter.buildDeleteColumnRequest(tableName, columnFamily, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.deleteColumn(c, req, done), - (resp) -> resp.getProcId(), new DeleteColumnFamilyProcedureBiConsumer(tableName)); + ng.newNonce()), + (s, c, req, done) -> s.deleteColumn(c, req, done), (resp) -> resp.getProcId(), + new DeleteColumnFamilyProcedureBiConsumer(tableName)); } @Override public CompletableFuture modifyColumnFamily(TableName tableName, - ColumnFamilyDescriptor columnFamily) { + ColumnFamilyDescriptor columnFamily) { return this. procedureCall(tableName, RequestConverter.buildModifyColumnRequest(tableName, columnFamily, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.modifyColumn(c, req, done), - (resp) -> resp.getProcId(), new ModifyColumnFamilyProcedureBiConsumer(tableName)); + ng.newNonce()), + (s, c, req, done) -> s.modifyColumn(c, req, done), (resp) -> resp.getProcId(), + new ModifyColumnFamilyProcedureBiConsumer(tableName)); } @Override public CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, byte[] family, String dstSFT) { - return this - . procedureCall( - tableName, + return this. procedureCall(tableName, RequestConverter.buildModifyColumnStoreFileTrackerRequest(tableName, family, dstSFT, ng.getNonceGroup(), ng.newNonce()), (s, c, req, done) -> s.modifyColumnStoreFileTracker(c, req, done), @@ -865,48 +862,45 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture getNamespaceDescriptor(String name) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this - . - call(controller, stub, RequestConverter.buildGetNamespaceDescriptorRequest(name), - (s, c, req, done) -> s.getNamespaceDescriptor(c, req, done), (resp) - -> ProtobufUtil.toNamespaceDescriptor(resp.getNamespaceDescriptor()))).call(); + return this. newMasterCaller() + .action((controller, stub) -> this. call(controller, stub, + RequestConverter.buildGetNamespaceDescriptorRequest(name), + (s, c, req, done) -> s.getNamespaceDescriptor(c, req, done), + (resp) -> ProtobufUtil.toNamespaceDescriptor(resp.getNamespaceDescriptor()))) + .call(); } @Override public CompletableFuture> listNamespaces() { - return this - .> newMasterCaller() - .action( - (controller, stub) -> this - .> call( - controller, stub, ListNamespacesRequest.newBuilder().build(), (s, c, req, - done) -> s.listNamespaces(c, req, done), - (resp) -> resp.getNamespaceNameList())).call(); + return this.> newMasterCaller() + .action((controller, stub) -> this.> call(controller, stub, ListNamespacesRequest.newBuilder().build(), + (s, c, req, done) -> s.listNamespaces(c, req, done), + (resp) -> resp.getNamespaceNameList())) + .call(); } @Override public CompletableFuture> listNamespaceDescriptors() { - return this - .> newMasterCaller().action((controller, stub) -> this - .> call(controller, stub, - ListNamespaceDescriptorsRequest.newBuilder().build(), (s, c, req, done) -> - s.listNamespaceDescriptors(c, req, done), - (resp) -> ProtobufUtil.toNamespaceDescriptorList(resp))).call(); + return this.> newMasterCaller() + .action((controller, stub) -> this.> call(controller, stub, + ListNamespaceDescriptorsRequest.newBuilder().build(), + (s, c, req, done) -> s.listNamespaceDescriptors(c, req, done), + (resp) -> ProtobufUtil.toNamespaceDescriptorList(resp))) + .call(); } @Override public CompletableFuture> getRegions(ServerName serverName) { return this.> newAdminCaller() - .action((controller, stub) -> this - .> adminCall( - controller, stub, RequestConverter.buildGetOnlineRegionRequest(), - (s, c, req, done) -> s.getOnlineRegion(c, req, done), - resp -> ProtobufUtil.getRegionInfos(resp))) - .serverName(serverName).call(); + .action((controller, stub) -> this.> adminCall(controller, stub, + RequestConverter.buildGetOnlineRegionRequest(), + (s, c, req, done) -> s.getOnlineRegion(c, req, done), + resp -> ProtobufUtil.getRegionInfos(resp))) + .serverName(serverName).call(); } @Override @@ -916,11 +910,11 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { .thenApply(locs -> Stream.of(locs.getRegionLocations()).map(HRegionLocation::getRegion) .collect(Collectors.toList())); } else { - return AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName) - .thenApply( - locs -> locs.stream().map(HRegionLocation::getRegion).collect(Collectors.toList())); + return AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).thenApply( + locs -> locs.stream().map(HRegionLocation::getRegion).collect(Collectors.toList())); } } + @Override public CompletableFuture flush(TableName tableName) { return flush(tableName, null); @@ -945,8 +939,9 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { if (columnFamily != null) { props.put(HConstants.FAMILY_KEY_STR, Bytes.toString(columnFamily)); } - addListener(execProcedure(FLUSH_TABLE_PROCEDURE_SIGNATURE, tableName.getNameAsString(), - props), (ret, err3) -> { + addListener( + execProcedure(FLUSH_TABLE_PROCEDURE_SIGNATURE, tableName.getNameAsString(), props), + (ret, err3) -> { if (err3 != null) { future.completeExceptionally(err3); } else { @@ -991,15 +986,14 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } private CompletableFuture flush(final ServerName serverName, final RegionInfo regionInfo, - byte[] columnFamily) { - return this. newAdminCaller() - .serverName(serverName) - .action( - (controller, stub) -> this. adminCall( - controller, stub, RequestConverter.buildFlushRegionRequest(regionInfo - .getRegionName(), columnFamily, false), - (s, c, req, done) -> s.flushRegion(c, req, done), resp -> null)) - .call(); + byte[] columnFamily) { + return this. newAdminCaller().serverName(serverName) + .action( + (controller, stub) -> this. adminCall(controller, stub, RequestConverter + .buildFlushRegionRequest(regionInfo.getRegionName(), columnFamily, false), + (s, c, req, done) -> s.flushRegion(c, req, done), resp -> null)) + .call(); } @Override @@ -1033,9 +1027,9 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture compact(TableName tableName, byte[] columnFamily, - CompactType compactType) { + CompactType compactType) { Preconditions.checkNotNull(columnFamily, "columnFamily is null. " - + "If you don't specify a columnFamily, use compact(TableName) instead"); + + "If you don't specify a columnFamily, use compact(TableName) instead"); return compact(tableName, columnFamily, false, compactType); } @@ -1047,7 +1041,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture compactRegion(byte[] regionName, byte[] columnFamily) { Preconditions.checkNotNull(columnFamily, "columnFamily is null." - + " If you don't specify a columnFamily, use compactRegion(regionName) instead"); + + " If you don't specify a columnFamily, use compactRegion(regionName) instead"); return compactRegion(regionName, columnFamily, false); } @@ -1058,9 +1052,9 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture majorCompact(TableName tableName, byte[] columnFamily, - CompactType compactType) { + CompactType compactType) { Preconditions.checkNotNull(columnFamily, "columnFamily is null." - + "If you don't specify a columnFamily, use compact(TableName) instead"); + + "If you don't specify a columnFamily, use compact(TableName) instead"); return compact(tableName, columnFamily, true, compactType); } @@ -1072,7 +1066,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture majorCompactRegion(byte[] regionName, byte[] columnFamily) { Preconditions.checkNotNull(columnFamily, "columnFamily is null." - + " If you don't specify a columnFamily, use majorCompactRegion(regionName) instead"); + + " If you don't specify a columnFamily, use majorCompactRegion(regionName) instead"); return compactRegion(regionName, columnFamily, true); } @@ -1110,7 +1104,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } private CompletableFuture compactRegion(byte[] regionName, byte[] columnFamily, - boolean major) { + boolean major) { CompletableFuture future = new CompletableFuture<>(); addListener(getRegionLocation(regionName), (location, err) -> { if (err != null) { @@ -1144,8 +1138,10 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { addListener(connection.registry.getMetaRegionLocations(), (metaRegions, err) -> { if (err != null) { future.completeExceptionally(err); - } else if (metaRegions == null || metaRegions.isEmpty() || - metaRegions.getDefaultRegionLocation() == null) { + } else if ( + metaRegions == null || metaRegions.isEmpty() + || metaRegions.getDefaultRegionLocation() == null + ) { future.completeExceptionally(new IOException("meta region does not found")); } else { future.complete(Collections.singletonList(metaRegions.getDefaultRegionLocation())); @@ -1162,7 +1158,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { * Compact column family of a table, Asynchronous operation even if CompletableFuture.get() */ private CompletableFuture compact(TableName tableName, byte[] columnFamily, boolean major, - CompactType compactType) { + CompactType compactType) { CompletableFuture future = new CompletableFuture<>(); switch (compactType) { @@ -1216,24 +1212,23 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { * Compact the region at specific region server. */ private CompletableFuture compact(final ServerName sn, final RegionInfo hri, - final boolean major, byte[] columnFamily) { - return this - . newAdminCaller() - .serverName(sn) - .action( - (controller, stub) -> this. adminCall( - controller, stub, RequestConverter.buildCompactRegionRequest(hri.getRegionName(), - major, columnFamily), (s, c, req, done) -> s.compactRegion(c, req, done), - resp -> null)).call(); + final boolean major, byte[] columnFamily) { + return this. newAdminCaller().serverName(sn) + .action((controller, stub) -> this. adminCall(controller, stub, + RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, columnFamily), + (s, c, req, done) -> s.compactRegion(c, req, done), resp -> null)) + .call(); } private byte[] toEncodeRegionName(byte[] regionName) { - return RegionInfo.isEncodedRegionName(regionName) ? regionName : - Bytes.toBytes(RegionInfo.encodeRegionName(regionName)); + return RegionInfo.isEncodedRegionName(regionName) + ? regionName + : Bytes.toBytes(RegionInfo.encodeRegionName(regionName)); } private void checkAndGetTableName(byte[] encodeRegionName, AtomicReference tableName, - CompletableFuture result) { + CompletableFuture result) { addListener(getRegionLocation(encodeRegionName), (location, err) -> { if (err != null) { result.completeExceptionally(err); @@ -1249,8 +1244,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { if (!tableName.get().equals(regionInfo.getTable())) { // tables of this two region should be same. result.completeExceptionally( - new IllegalArgumentException("Cannot merge regions from two different tables " + - tableName.get() + " and " + regionInfo.getTable())); + new IllegalArgumentException("Cannot merge regions from two different tables " + + tableName.get() + " and " + regionInfo.getTable())); } else { result.complete(tableName.get()); } @@ -1288,28 +1283,25 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } private CompletableFuture setSplitOrMergeOn(boolean enabled, boolean synchronous, - MasterSwitchType switchType) { + MasterSwitchType switchType) { SetSplitOrMergeEnabledRequest request = RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, switchType); return this. newMasterCaller() - .action((controller, stub) -> this - . call(controller, - stub, request, (s, c, req, done) -> s.setSplitOrMergeEnabled(c, req, done), + .action((controller, stub) -> this. call(controller, stub, request, + (s, c, req, done) -> s.setSplitOrMergeEnabled(c, req, done), (resp) -> resp.getPrevValueList().get(0))) .call(); } private CompletableFuture isSplitOrMergeOn(MasterSwitchType switchType) { IsSplitOrMergeEnabledRequest request = - RequestConverter.buildIsSplitOrMergeEnabledRequest(switchType); - return this - . newMasterCaller() - .action( - (controller, stub) -> this - . call( - controller, stub, request, - (s, c, req, done) -> s.isSplitOrMergeEnabled(c, req, done), - (resp) -> resp.getEnabled())).call(); + RequestConverter.buildIsSplitOrMergeEnabledRequest(switchType); + return this. newMasterCaller() + .action((controller, stub) -> this. call(controller, stub, request, + (s, c, req, done) -> s.isSplitOrMergeEnabled(c, req, done), (resp) -> resp.getEnabled())) + .call(); } @Override @@ -1338,9 +1330,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } addListener( - this.procedureCall(tableName, request, - MasterService.Interface::mergeTableRegions, MergeTableRegionsResponse::getProcId, - new MergeTableRegionProcedureBiConsumer(tableName)), + this.procedureCall(tableName, request, MasterService.Interface::mergeTableRegions, + MergeTableRegionsResponse::getProcId, new MergeTableRegionProcedureBiConsumer(tableName)), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); @@ -1385,8 +1376,10 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { for (HRegionLocation h : rl.getRegionLocations()) { if (h != null && h.getServerName() != null) { RegionInfo hri = h.getRegion(); - if (hri == null || hri.isSplitParent() || - hri.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { + if ( + hri == null || hri.isSplitParent() + || hri.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID + ) { continue; } splitFutures.add(split(hri, null)); @@ -1449,9 +1442,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } RegionInfo regionInfo = location.getRegion(); if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { - future - .completeExceptionally(new IllegalArgumentException("Can't split replicas directly. " + - "Replicas are auto-split when their primary is split.")); + future.completeExceptionally(new IllegalArgumentException("Can't split replicas directly. " + + "Replicas are auto-split when their primary is split.")); return; } ServerName serverName = location.getServerName(); @@ -1483,9 +1475,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } RegionInfo regionInfo = location.getRegion(); if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { - future - .completeExceptionally(new IllegalArgumentException("Can't split replicas directly. " + - "Replicas are auto-split when their primary is split.")); + future.completeExceptionally(new IllegalArgumentException("Can't split replicas directly. " + + "Replicas are auto-split when their primary is split.")); return; } ServerName serverName = location.getServerName(); @@ -1494,8 +1485,10 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { .completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName))); return; } - if (regionInfo.getStartKey() != null && - Bytes.compareTo(regionInfo.getStartKey(), splitPoint) == 0) { + if ( + regionInfo.getStartKey() != null + && Bytes.compareTo(regionInfo.getStartKey(), splitPoint) == 0 + ) { future.completeExceptionally( new IllegalArgumentException("should not give a splitkey which equals to startkey!")); return; @@ -1524,9 +1517,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } addListener( - this.procedureCall(tableName, - request, MasterService.Interface::splitRegion, SplitTableRegionResponse::getProcId, - new SplitTableRegionProcedureBiConsumer(tableName)), + this.procedureCall(tableName, request, MasterService.Interface::splitRegion, + SplitTableRegionResponse::getProcId, new SplitTableRegionProcedureBiConsumer(tableName)), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); @@ -1570,8 +1562,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } addListener( this. newMasterCaller().priority(regionInfo.getTable()) - .action(((controller, stub) -> this - . call(controller, stub, + .action(((controller, stub) -> this. call(controller, stub, RequestConverter.buildUnassignRegionRequest(regionInfo.getRegionName()), (s, c, req, done) -> s.unassignRegion(c, req, done), resp -> null))) .call(), @@ -1596,8 +1588,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } addListener( this. newMasterCaller().priority(regionInfo.getTable()) - .action(((controller, stub) -> this - . call(controller, stub, + .action(((controller, stub) -> this. call(controller, stub, RequestConverter.buildOfflineRegionRequest(regionInfo.getRegionName()), (s, c, req, done) -> s.offlineRegion(c, req, done), resp -> null))) .call(), @@ -1668,50 +1660,49 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture setQuota(QuotaSettings quota) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this. call(controller, - stub, QuotaSettings.buildSetQuotaRequestProto(quota), - (s, c, req, done) -> s.setQuota(c, req, done), (resp) -> null)).call(); + return this. newMasterCaller() + .action((controller, stub) -> this. call(controller, + stub, QuotaSettings.buildSetQuotaRequestProto(quota), + (s, c, req, done) -> s.setQuota(c, req, done), (resp) -> null)) + .call(); } @Override public CompletableFuture> getQuota(QuotaFilter filter) { CompletableFuture> future = new CompletableFuture<>(); Scan scan = QuotaTableUtil.makeScan(filter); - this.connection.getTableBuilder(QuotaTableUtil.QUOTA_TABLE_NAME).build() - .scan(scan, new AdvancedScanResultConsumer() { - List settings = new ArrayList<>(); + this.connection.getTableBuilder(QuotaTableUtil.QUOTA_TABLE_NAME).build().scan(scan, + new AdvancedScanResultConsumer() { + List settings = new ArrayList<>(); - @Override - public void onNext(Result[] results, ScanController controller) { - for (Result result : results) { - try { - QuotaTableUtil.parseResultToCollection(result, settings); - } catch (IOException e) { - controller.terminate(); - future.completeExceptionally(e); - } + @Override + public void onNext(Result[] results, ScanController controller) { + for (Result result : results) { + try { + QuotaTableUtil.parseResultToCollection(result, settings); + } catch (IOException e) { + controller.terminate(); + future.completeExceptionally(e); } } + } - @Override - public void onError(Throwable error) { - future.completeExceptionally(error); - } + @Override + public void onError(Throwable error) { + future.completeExceptionally(error); + } - @Override - public void onComplete() { - future.complete(settings); - } - }); + @Override + public void onComplete() { + future.complete(settings); + } + }); return future; } @Override - public CompletableFuture addReplicationPeer(String peerId, - ReplicationPeerConfig peerConfig, boolean enabled) { + public CompletableFuture addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, + boolean enabled) { return this. procedureCall( RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, enabled), (s, c, req, done) -> s.addReplicationPeer(c, req, done), (resp) -> resp.getProcId(), @@ -1744,30 +1735,29 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture getReplicationPeerConfig(String peerId) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this - . call( - controller, stub, RequestConverter.buildGetReplicationPeerConfigRequest(peerId), ( - s, c, req, done) -> s.getReplicationPeerConfig(c, req, done), - (resp) -> ReplicationPeerConfigUtil.convert(resp.getPeerConfig()))).call(); + return this. newMasterCaller() + .action((controller, stub) -> this. call(controller, stub, + RequestConverter.buildGetReplicationPeerConfigRequest(peerId), + (s, c, req, done) -> s.getReplicationPeerConfig(c, req, done), + (resp) -> ReplicationPeerConfigUtil.convert(resp.getPeerConfig()))) + .call(); } @Override public CompletableFuture updateReplicationPeerConfig(String peerId, - ReplicationPeerConfig peerConfig) { - return this - . procedureCall( - RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig), - (s, c, req, done) -> s.updateReplicationPeerConfig(c, req, done), - (resp) -> resp.getProcId(), - new ReplicationProcedureBiConsumer(peerId, () -> "UPDATE_REPLICATION_PEER_CONFIG")); + ReplicationPeerConfig peerConfig) { + return this. procedureCall( + RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig), + (s, c, req, done) -> s.updateReplicationPeerConfig(c, req, done), + (resp) -> resp.getProcId(), + new ReplicationProcedureBiConsumer(peerId, () -> "UPDATE_REPLICATION_PEER_CONFIG")); } @Override public CompletableFuture appendReplicationPeerTableCFs(String id, - Map> tableCfs) { + Map> tableCfs) { if (tableCfs == null) { return failedFuture(new ReplicationException("tableCfs is null")); } @@ -1789,7 +1779,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture removeReplicationPeerTableCFs(String id, - Map> tableCfs) { + Map> tableCfs) { if (tableCfs == null) { return failedFuture(new ReplicationException("tableCfs is null")); } @@ -1827,17 +1817,16 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { return listReplicationPeers(RequestConverter.buildListReplicationPeersRequest(pattern)); } - private CompletableFuture> listReplicationPeers( - ListReplicationPeersRequest request) { - return this - .> newMasterCaller() - .action( - (controller, stub) -> this.> call(controller, stub, request, - (s, c, req, done) -> s.listReplicationPeers(c, req, done), - (resp) -> resp.getPeerDescList().stream() - .map(ReplicationPeerConfigUtil::toReplicationPeerDescription) - .collect(Collectors.toList()))).call(); + private CompletableFuture> + listReplicationPeers(ListReplicationPeersRequest request) { + return this.> newMasterCaller() + .action((controller, stub) -> this.> call(controller, stub, request, + (s, c, req, done) -> s.listReplicationPeers(c, req, done), + (resp) -> resp.getPeerDescList().stream() + .map(ReplicationPeerConfigUtil::toReplicationPeerDescription) + .collect(Collectors.toList()))) + .call(); } @Override @@ -1908,8 +1897,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { }); } else { future.completeExceptionally( - new SnapshotCreationException("Snapshot '" + snapshot.getName() + - "' wasn't completed in expectedTime:" + expectedTimeout + " ms", snapshotDesc)); + new SnapshotCreationException("Snapshot '" + snapshot.getName() + + "' wasn't completed in expectedTime:" + expectedTimeout + " ms", snapshotDesc)); } } }; @@ -1920,15 +1909,13 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture isSnapshotFinished(SnapshotDescription snapshot) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this. call( - controller, - stub, - IsSnapshotDoneRequest.newBuilder() - .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build(), (s, c, - req, done) -> s.isSnapshotDone(c, req, done), resp -> resp.getDone())).call(); + return this. newMasterCaller() + .action((controller, stub) -> this. call(controller, stub, + IsSnapshotDoneRequest.newBuilder() + .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build(), + (s, c, req, done) -> s.isSnapshotDone(c, req, done), resp -> resp.getDone())) + .call(); } @Override @@ -1941,7 +1928,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, - boolean restoreAcl) { + boolean restoreAcl) { CompletableFuture future = new CompletableFuture<>(); addListener(listSnapshots(Pattern.compile(snapshotName)), (snapshotDescriptions, err) -> { if (err != null) { @@ -1988,7 +1975,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } private CompletableFuture restoreSnapshot(String snapshotName, TableName tableName, - boolean takeFailSafeSnapshot, boolean restoreAcl) { + boolean takeFailSafeSnapshot, boolean restoreAcl) { if (takeFailSafeSnapshot) { CompletableFuture future = new CompletableFuture<>(); // Step.1 Take a snapshot of the current state @@ -2009,16 +1996,14 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { (void2, err2) -> { if (err2 != null) { // Step.3.a Something went wrong during the restore and try to rollback. - addListener( - internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName, restoreAcl, - null), - (void3, err3) -> { + addListener(internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName, + restoreAcl, null), (void3, err3) -> { if (err3 != null) { future.completeExceptionally(err3); } else { String msg = - "Restore snapshot=" + snapshotName + " failed. Rollback to snapshot=" + - failSafeSnapshotSnapshotName + " succeeded."; + "Restore snapshot=" + snapshotName + " failed. Rollback to snapshot=" + + failSafeSnapshotSnapshotName + " succeeded."; future.completeExceptionally(new RestoreSnapshotException(msg, err2)); } }); @@ -2046,7 +2031,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } private void completeConditionalOnFuture(CompletableFuture dependentFuture, - CompletableFuture parentFuture) { + CompletableFuture parentFuture) { addListener(parentFuture, (res, err) -> { if (err != null) { dependentFuture.completeExceptionally(err); @@ -2058,7 +2043,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture cloneSnapshot(String snapshotName, TableName tableName, - boolean restoreAcl, String customSFT) { + boolean restoreAcl, String customSFT) { CompletableFuture future = new CompletableFuture<>(); addListener(tableExists(tableName), (exists, err) -> { if (err != null) { @@ -2074,7 +2059,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } private CompletableFuture internalRestoreSnapshot(String snapshotName, TableName tableName, - boolean restoreAcl, String customSFT) { + boolean restoreAcl, String customSFT) { SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription.newBuilder() .setName(snapshotName).setTable(tableName.getNameAsString()).build(); try { @@ -2085,13 +2070,13 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { RestoreSnapshotRequest.Builder builder = RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot).setNonceGroup(ng.getNonceGroup()) .setNonce(ng.newNonce()).setRestoreACL(restoreAcl); - if(customSFT != null){ + if (customSFT != null) { builder.setCustomSFT(customSFT); } - return waitProcedureResult(this. newMasterCaller().action((controller, stub) -> this - . call(controller, stub, - builder.build(), - (s, c, req, done) -> s.restoreSnapshot(c, req, done), (resp) -> resp.getProcId())) + return waitProcedureResult(this. newMasterCaller() + .action((controller, stub) -> this. call(controller, stub, builder.build(), + (s, c, req, done) -> s.restoreSnapshot(c, req, done), (resp) -> resp.getProcId())) .call()); } @@ -2108,33 +2093,34 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } private CompletableFuture> getCompletedSnapshots(Pattern pattern) { - return this.> newMasterCaller().action((controller, stub) -> this - .> - call(controller, stub, GetCompletedSnapshotsRequest.newBuilder().build(), + return this.> newMasterCaller() + .action((controller, stub) -> this.> call(controller, stub, + GetCompletedSnapshotsRequest.newBuilder().build(), (s, c, req, done) -> s.getCompletedSnapshots(c, req, done), resp -> ProtobufUtil.toSnapshotDescriptionList(resp, pattern))) - .call(); + .call(); } @Override public CompletableFuture> listTableSnapshots(Pattern tableNamePattern) { Preconditions.checkNotNull(tableNamePattern, "tableNamePattern is null." - + " If you don't specify a tableNamePattern, use listSnapshots() instead"); + + " If you don't specify a tableNamePattern, use listSnapshots() instead"); return getCompletedSnapshots(tableNamePattern, null); } @Override public CompletableFuture> listTableSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern) { + Pattern snapshotNamePattern) { Preconditions.checkNotNull(tableNamePattern, "tableNamePattern is null." - + " If you don't specify a tableNamePattern, use listSnapshots(Pattern) instead"); + + " If you don't specify a tableNamePattern, use listSnapshots(Pattern) instead"); Preconditions.checkNotNull(snapshotNamePattern, "snapshotNamePattern is null." - + " If you don't specify a snapshotNamePattern, use listTableSnapshots(Pattern) instead"); + + " If you don't specify a snapshotNamePattern, use listTableSnapshots(Pattern) instead"); return getCompletedSnapshots(tableNamePattern, snapshotNamePattern); } - private CompletableFuture> getCompletedSnapshots( - Pattern tableNamePattern, Pattern snapshotNamePattern) { + private CompletableFuture> + getCompletedSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) { CompletableFuture> future = new CompletableFuture<>(); addListener(listTableNames(tableNamePattern, false), (tableNames, err) -> { if (err != null) { @@ -2175,29 +2161,29 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture deleteSnapshots(Pattern snapshotNamePattern) { Preconditions.checkNotNull(snapshotNamePattern, "snapshotNamePattern is null." - + " If you don't specify a snapshotNamePattern, use deleteSnapshots() instead"); + + " If you don't specify a snapshotNamePattern, use deleteSnapshots() instead"); return internalDeleteSnapshots(null, snapshotNamePattern); } @Override public CompletableFuture deleteTableSnapshots(Pattern tableNamePattern) { Preconditions.checkNotNull(tableNamePattern, "tableNamePattern is null." - + " If you don't specify a tableNamePattern, use deleteSnapshots() instead"); + + " If you don't specify a tableNamePattern, use deleteSnapshots() instead"); return internalDeleteSnapshots(tableNamePattern, null); } @Override public CompletableFuture deleteTableSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern) { + Pattern snapshotNamePattern) { Preconditions.checkNotNull(tableNamePattern, "tableNamePattern is null." - + " If you don't specify a tableNamePattern, use deleteSnapshots(Pattern) instead"); + + " If you don't specify a tableNamePattern, use deleteSnapshots(Pattern) instead"); Preconditions.checkNotNull(snapshotNamePattern, "snapshotNamePattern is null." - + " If you don't specify a snapshotNamePattern, use deleteSnapshots(Pattern) instead"); + + " If you don't specify a snapshotNamePattern, use deleteSnapshots(Pattern) instead"); return internalDeleteSnapshots(tableNamePattern, snapshotNamePattern); } private CompletableFuture internalDeleteSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern) { + Pattern snapshotNamePattern) { CompletableFuture> listSnapshotsFuture; if (tableNamePattern == null) { listSnapshotsFuture = getCompletedSnapshots(snapshotNamePattern); @@ -2227,20 +2213,18 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } private CompletableFuture internalDeleteSnapshot(SnapshotDescription snapshot) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this. call( - controller, - stub, - DeleteSnapshotRequest.newBuilder() - .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build(), (s, c, - req, done) -> s.deleteSnapshot(c, req, done), resp -> null)).call(); + return this. newMasterCaller() + .action((controller, stub) -> this. call( + controller, stub, + DeleteSnapshotRequest.newBuilder() + .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build(), + (s, c, req, done) -> s.deleteSnapshot(c, req, done), resp -> null)) + .call(); } @Override public CompletableFuture execProcedure(String signature, String instance, - Map props) { + Map props) { CompletableFuture future = new CompletableFuture<>(); ProcedureDescription procDesc = ProtobufUtil.buildProcedureDescription(signature, instance, props); @@ -2279,8 +2263,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } }); } else { - future.completeExceptionally(new IOException("Procedure '" + signature + " : " + - instance + "' wasn't completed in expectedTime:" + expectedTimeout + " ms")); + future.completeExceptionally(new IOException("Procedure '" + signature + " : " + + instance + "' wasn't completed in expectedTime:" + expectedTimeout + " ms")); } } }; @@ -2292,29 +2276,28 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture execProcedureWithReturn(String signature, String instance, - Map props) { + Map props) { ProcedureDescription proDesc = - ProtobufUtil.buildProcedureDescription(signature, instance, props); + ProtobufUtil.buildProcedureDescription(signature, instance, props); return this. newMasterCaller() - .action( - (controller, stub) -> this. call( - controller, stub, ExecProcedureRequest.newBuilder().setProcedure(proDesc).build(), - (s, c, req, done) -> s.execProcedureWithRet(c, req, done), - resp -> resp.hasReturnData() ? resp.getReturnData().toByteArray() : null)) - .call(); + .action((controller, stub) -> this. call( + controller, stub, ExecProcedureRequest.newBuilder().setProcedure(proDesc).build(), + (s, c, req, done) -> s.execProcedureWithRet(c, req, done), + resp -> resp.hasReturnData() ? resp.getReturnData().toByteArray() : null)) + .call(); } @Override public CompletableFuture isProcedureFinished(String signature, String instance, - Map props) { + Map props) { ProcedureDescription proDesc = - ProtobufUtil.buildProcedureDescription(signature, instance, props); + ProtobufUtil.buildProcedureDescription(signature, instance, props); return this. newMasterCaller() - .action((controller, stub) -> this - . call(controller, stub, - IsProcedureDoneRequest.newBuilder().setProcedure(proDesc).build(), - (s, c, req, done) -> s.isProcedureDone(c, req, done), resp -> resp.getDone())) - .call(); + .action( + (controller, stub) -> this. call( + controller, stub, IsProcedureDoneRequest.newBuilder().setProcedure(proDesc).build(), + (s, c, req, done) -> s.isProcedureDone(c, req, done), resp -> resp.getDone())) + .call(); } @Override @@ -2323,66 +2306,61 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { (controller, stub) -> this. call( controller, stub, AbortProcedureRequest.newBuilder().setProcId(procId).build(), (s, c, req, done) -> s.abortProcedure(c, req, done), resp -> resp.getIsProcedureAborted())) - .call(); + .call(); } @Override public CompletableFuture getProcedures() { - return this - . newMasterCaller() - .action( - (controller, stub) -> this - . call( - controller, stub, GetProceduresRequest.newBuilder().build(), - (s, c, req, done) -> s.getProcedures(c, req, done), - resp -> ProtobufUtil.toProcedureJson(resp.getProcedureList()))).call(); + return this. newMasterCaller() + .action((controller, stub) -> this. call( + controller, stub, GetProceduresRequest.newBuilder().build(), + (s, c, req, done) -> s.getProcedures(c, req, done), + resp -> ProtobufUtil.toProcedureJson(resp.getProcedureList()))) + .call(); } @Override public CompletableFuture getLocks() { - return this - . newMasterCaller() - .action( - (controller, stub) -> this. call( - controller, stub, GetLocksRequest.newBuilder().build(), - (s, c, req, done) -> s.getLocks(c, req, done), - resp -> ProtobufUtil.toLockJson(resp.getLockList()))).call(); + return this. newMasterCaller() + .action( + (controller, stub) -> this. call(controller, + stub, GetLocksRequest.newBuilder().build(), (s, c, req, done) -> s.getLocks(c, req, done), + resp -> ProtobufUtil.toLockJson(resp.getLockList()))) + .call(); } @Override - public CompletableFuture decommissionRegionServers( - List servers, boolean offload) { + public CompletableFuture decommissionRegionServers(List servers, + boolean offload) { return this. newMasterCaller() - .action((controller, stub) -> this - . call( - controller, stub, - RequestConverter.buildDecommissionRegionServersRequest(servers, offload), - (s, c, req, done) -> s.decommissionRegionServers(c, req, done), resp -> null)) - .call(); + .action((controller, stub) -> this. call(controller, stub, + RequestConverter.buildDecommissionRegionServersRequest(servers, offload), + (s, c, req, done) -> s.decommissionRegionServers(c, req, done), resp -> null)) + .call(); } @Override public CompletableFuture> listDecommissionedRegionServers() { return this.> newMasterCaller() - .action((controller, stub) -> this - .> call( - controller, stub, ListDecommissionedRegionServersRequest.newBuilder().build(), - (s, c, req, done) -> s.listDecommissionedRegionServers(c, req, done), - resp -> resp.getServerNameList().stream().map(ProtobufUtil::toServerName) - .collect(Collectors.toList()))) - .call(); + .action((controller, stub) -> this.> call(controller, stub, + ListDecommissionedRegionServersRequest.newBuilder().build(), + (s, c, req, done) -> s.listDecommissionedRegionServers(c, req, done), + resp -> resp.getServerNameList().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList()))) + .call(); } @Override public CompletableFuture recommissionRegionServer(ServerName server, - List encodedRegionNames) { + List encodedRegionNames) { return this. newMasterCaller() - .action((controller, stub) -> - this. call( - controller, stub, RequestConverter.buildRecommissionRegionServerRequest( - server, encodedRegionNames), (s, c, req, done) -> s.recommissionRegionServer( - c, req, done), resp -> null)).call(); + .action((controller, stub) -> this. call(controller, stub, + RequestConverter.buildRecommissionRegionServerRequest(server, encodedRegionNames), + (s, c, req, done) -> s.recommissionRegionServer(c, req, done), resp -> null)) + .call(); } /** @@ -2425,8 +2403,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) .findFirst()); } else { - future = - AsyncMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); + future = AsyncMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); } } @@ -2438,8 +2415,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } if (!location.isPresent() || location.get().getRegion() == null) { returnedFuture.completeExceptionally( - new UnknownRegionException("Invalid region name or encoded region name: " + - Bytes.toStringBinary(regionNameOrEncodedRegionName))); + new UnknownRegionException("Invalid region name or encoded region name: " + + Bytes.toStringBinary(regionNameOrEncodedRegionName))); } else { returnedFuture.complete(location.get()); } @@ -2458,10 +2435,12 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { return failedFuture(new IllegalArgumentException("Passed region name can't be null")); } - if (Bytes.equals(regionNameOrEncodedRegionName, - RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()) || + if ( Bytes.equals(regionNameOrEncodedRegionName, - RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) { + RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()) + || Bytes.equals(regionNameOrEncodedRegionName, + RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes()) + ) { return CompletableFuture.completedFuture(RegionInfoBuilder.FIRST_META_REGIONINFO); } @@ -2502,7 +2481,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } if (lastKey != null && Bytes.equals(splitKey, lastKey)) { throw new IllegalArgumentException("All split keys must be unique, " + "found duplicate: " - + Bytes.toStringBinary(splitKey) + ", " + Bytes.toStringBinary(lastKey)); + + Bytes.toStringBinary(splitKey) + ", " + Bytes.toStringBinary(lastKey)); } lastKey = splitKey; } @@ -2535,7 +2514,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { String getDescription() { return "Operation: " + getOperationType() + ", " + "Table Name: " - + tableName.getNameWithNamespaceInclAsString(); + + tableName.getNameWithNamespaceInclAsString(); } @Override @@ -2812,9 +2791,9 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { private void getProcedureResult(long procId, CompletableFuture future, int retries) { addListener( this. newMasterCaller() - .action((controller, stub) -> this - . call( - controller, stub, GetProcedureResultRequest.newBuilder().setProcId(procId).build(), + .action((controller, stub) -> this. call(controller, stub, + GetProcedureResultRequest.newBuilder().setProcId(procId).build(), (s, c, req, done) -> s.getProcedureResult(c, req, done), (resp) -> resp)) .call(), (response, error) -> { @@ -2860,14 +2839,13 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture getClusterMetrics(EnumSet

  • Throw an exception if the table exists on peer cluster but descriptors are not same.
  • * * @param tableName name of the table to sync to the peer - * @param splits table split keys + * @param splits table split keys */ private CompletableFuture checkAndSyncTableToPeerClusters(TableName tableName, - byte[][] splits) { + byte[][] splits) { CompletableFuture future = new CompletableFuture<>(); addListener(listReplicationPeers(), (peers, err) -> { if (err != null) { @@ -3597,7 +3536,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } private CompletableFuture trySyncTableToPeerCluster(TableName tableName, byte[][] splits, - ReplicationPeerDescription peer) { + ReplicationPeerDescription peer) { Configuration peerConf = null; try { peerConf = @@ -3653,7 +3592,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } private CompletableFuture compareTableWithPeerCluster(TableName tableName, - TableDescriptor tableDesc, ReplicationPeerDescription peer, AsyncAdmin peerAdmin) { + TableDescriptor tableDesc, ReplicationPeerDescription peer, AsyncAdmin peerAdmin) { CompletableFuture future = new CompletableFuture<>(); addListener(peerAdmin.getDescriptor(tableName), (peerTableDesc, err) -> { if (err != null) { @@ -3662,15 +3601,15 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } if (peerTableDesc == null) { future.completeExceptionally( - new IllegalArgumentException("Failed to get table descriptor for table " + - tableName.getNameAsString() + " from peer cluster " + peer.getPeerId())); + new IllegalArgumentException("Failed to get table descriptor for table " + + tableName.getNameAsString() + " from peer cluster " + peer.getPeerId())); return; } if (TableDescriptor.COMPARATOR_IGNORE_REPLICATION.compare(peerTableDesc, tableDesc) != 0) { future.completeExceptionally(new IllegalArgumentException( - "Table " + tableName.getNameAsString() + " exists in peer cluster " + peer.getPeerId() + - ", but the table descriptors are not same when compared with source cluster." + - " Thus can not enable the table's replication switch.")); + "Table " + tableName.getNameAsString() + " exists in peer cluster " + peer.getPeerId() + + ", but the table descriptors are not same when compared with source cluster." + + " Thus can not enable the table's replication switch.")); return; } future.complete(null); @@ -3748,7 +3687,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture cloneTableSchema(TableName tableName, TableName newTableName, - boolean preserveSplits) { + boolean preserveSplits) { CompletableFuture future = new CompletableFuture<>(); addListener(tableExists(tableName), (exist, err) -> { if (err != null) { @@ -3806,73 +3745,71 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } private CompletableFuture clearBlockCache(ServerName serverName, - List hris) { - return this. newAdminCaller().action((controller, stub) -> this - . adminCall( - controller, stub, RequestConverter.buildClearRegionBlockCacheRequest(hris), - (s, c, req, done) -> s.clearRegionBlockCache(controller, req, done), - resp -> ProtobufUtil.toCacheEvictionStats(resp.getStats()))) + List hris) { + return this. newAdminCaller() + .action((controller, stub) -> this. adminCall(controller, stub, + RequestConverter.buildClearRegionBlockCacheRequest(hris), + (s, c, req, done) -> s.clearRegionBlockCache(controller, req, done), + resp -> ProtobufUtil.toCacheEvictionStats(resp.getStats()))) .serverName(serverName).call(); } @Override public CompletableFuture switchRpcThrottle(boolean enable) { CompletableFuture future = this. newMasterCaller() - .action((controller, stub) -> this - . call(controller, stub, - SwitchRpcThrottleRequest.newBuilder().setRpcThrottleEnabled(enable).build(), - (s, c, req, done) -> s.switchRpcThrottle(c, req, done), - resp -> resp.getPreviousRpcThrottleEnabled())) - .call(); + .action((controller, stub) -> this. call(controller, stub, + SwitchRpcThrottleRequest.newBuilder().setRpcThrottleEnabled(enable).build(), + (s, c, req, done) -> s.switchRpcThrottle(c, req, done), + resp -> resp.getPreviousRpcThrottleEnabled())) + .call(); return future; } @Override public CompletableFuture isRpcThrottleEnabled() { CompletableFuture future = this. newMasterCaller() - .action((controller, stub) -> this - . call(controller, - stub, IsRpcThrottleEnabledRequest.newBuilder().build(), - (s, c, req, done) -> s.isRpcThrottleEnabled(c, req, done), - resp -> resp.getRpcThrottleEnabled())) - .call(); + .action((controller, stub) -> this. call(controller, stub, IsRpcThrottleEnabledRequest.newBuilder().build(), + (s, c, req, done) -> s.isRpcThrottleEnabled(c, req, done), + resp -> resp.getRpcThrottleEnabled())) + .call(); return future; } @Override public CompletableFuture exceedThrottleQuotaSwitch(boolean enable) { CompletableFuture future = this. newMasterCaller() - .action((controller, stub) -> this - . call( - controller, stub, - SwitchExceedThrottleQuotaRequest.newBuilder().setExceedThrottleQuotaEnabled(enable) - .build(), - (s, c, req, done) -> s.switchExceedThrottleQuota(c, req, done), - resp -> resp.getPreviousExceedThrottleQuotaEnabled())) - .call(); + .action((controller, stub) -> this. call(controller, stub, + SwitchExceedThrottleQuotaRequest.newBuilder().setExceedThrottleQuotaEnabled(enable) + .build(), + (s, c, req, done) -> s.switchExceedThrottleQuota(c, req, done), + resp -> resp.getPreviousExceedThrottleQuotaEnabled())) + .call(); return future; } @Override public CompletableFuture> getSpaceQuotaTableSizes() { - return this.> newMasterCaller().action((controller, stub) -> this - .> call(controller, stub, - RequestConverter.buildGetSpaceQuotaRegionSizesRequest(), - (s, c, req, done) -> s.getSpaceQuotaRegionSizes(c, req, done), - resp -> resp.getSizesList().stream().collect(Collectors - .toMap(sizes -> ProtobufUtil.toTableName(sizes.getTableName()), RegionSizes::getSize)))) + return this.> newMasterCaller() + .action((controller, stub) -> this.> call(controller, stub, + RequestConverter.buildGetSpaceQuotaRegionSizesRequest(), + (s, c, req, done) -> s.getSpaceQuotaRegionSizes(c, req, done), + resp -> resp.getSizesList().stream().collect(Collectors + .toMap(sizes -> ProtobufUtil.toTableName(sizes.getTableName()), RegionSizes::getSize)))) .call(); } @Override - public CompletableFuture> getRegionServerSpaceQuotaSnapshots( - ServerName serverName) { + public CompletableFuture> + getRegionServerSpaceQuotaSnapshots(ServerName serverName) { return this.> newAdminCaller() - .action((controller, stub) -> this - .> adminCall(controller, stub, - RequestConverter.buildGetSpaceQuotaSnapshotsRequest(), + .action((controller, stub) -> this.> adminCall(controller, + stub, RequestConverter.buildGetSpaceQuotaSnapshotsRequest(), (s, c, req, done) -> s.getSpaceQuotaSnapshots(controller, req, done), resp -> resp.getSnapshotsList().stream() .collect(Collectors.toMap(snapshot -> ProtobufUtil.toTableName(snapshot.getTableName()), @@ -3880,12 +3817,11 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { .serverName(serverName).call(); } - private CompletableFuture getCurrentSpaceQuotaSnapshot( - Converter converter) { + private CompletableFuture + getCurrentSpaceQuotaSnapshot(Converter converter) { return this. newMasterCaller() - .action((controller, stub) -> this - . call(controller, stub, - RequestConverter.buildGetQuotaStatesRequest(), + .action((controller, stub) -> this. call(controller, stub, RequestConverter.buildGetQuotaStatesRequest(), (s, c, req, done) -> s.getQuotaStates(c, req, done), converter)) .call(); } @@ -3907,154 +3843,140 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture grant(UserPermission userPermission, - boolean mergeExistingPermissions) { + boolean mergeExistingPermissions) { return this. newMasterCaller() - .action((controller, stub) -> this. call(controller, - stub, ShadedAccessControlUtil.buildGrantRequest(userPermission, mergeExistingPermissions), - (s, c, req, done) -> s.grant(c, req, done), resp -> null)) - .call(); + .action((controller, stub) -> this. call(controller, stub, + ShadedAccessControlUtil.buildGrantRequest(userPermission, mergeExistingPermissions), + (s, c, req, done) -> s.grant(c, req, done), resp -> null)) + .call(); } @Override public CompletableFuture revoke(UserPermission userPermission) { return this. newMasterCaller() - .action((controller, stub) -> this. call(controller, - stub, ShadedAccessControlUtil.buildRevokeRequest(userPermission), - (s, c, req, done) -> s.revoke(c, req, done), resp -> null)) - .call(); + .action((controller, stub) -> this. call(controller, + stub, ShadedAccessControlUtil.buildRevokeRequest(userPermission), + (s, c, req, done) -> s.revoke(c, req, done), resp -> null)) + .call(); } @Override public CompletableFuture> - getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) { - return this.> newMasterCaller().action((controller, - stub) -> this.> call(controller, stub, - ShadedAccessControlUtil.buildGetUserPermissionsRequest(getUserPermissionsRequest), - (s, c, req, done) -> s.getUserPermissions(c, req, done), - resp -> resp.getUserPermissionList().stream() - .map(uPerm -> ShadedAccessControlUtil.toUserPermission(uPerm)) - .collect(Collectors.toList()))) - .call(); + getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) { + return this.> newMasterCaller() + .action((controller, stub) -> this.> call(controller, stub, + ShadedAccessControlUtil.buildGetUserPermissionsRequest(getUserPermissionsRequest), + (s, c, req, done) -> s.getUserPermissions(c, req, done), + resp -> resp.getUserPermissionList().stream() + .map(uPerm -> ShadedAccessControlUtil.toUserPermission(uPerm)) + .collect(Collectors.toList()))) + .call(); } @Override public CompletableFuture> hasUserPermissions(String userName, - List permissions) { + List permissions) { return this.> newMasterCaller() - .action((controller, stub) -> this - .> call(controller, - stub, ShadedAccessControlUtil.buildHasUserPermissionsRequest(userName, permissions), - (s, c, req, done) -> s.hasUserPermissions(c, req, done), - resp -> resp.getHasUserPermissionList())) - .call(); + .action((controller, stub) -> this.> call(controller, stub, + ShadedAccessControlUtil.buildHasUserPermissionsRequest(userName, permissions), + (s, c, req, done) -> s.hasUserPermissions(c, req, done), + resp -> resp.getHasUserPermissionList())) + .call(); } @Override - public CompletableFuture snapshotCleanupSwitch(final boolean on, - final boolean sync) { - return this.newMasterCaller() - .action((controller, stub) -> this - .call(controller, stub, - RequestConverter.buildSetSnapshotCleanupRequest(on, sync), - MasterService.Interface::switchSnapshotCleanup, - SetSnapshotCleanupResponse::getPrevSnapshotCleanup)) - .call(); + public CompletableFuture snapshotCleanupSwitch(final boolean on, final boolean sync) { + return this. newMasterCaller() + .action((controller, stub) -> this.call(controller, stub, + RequestConverter.buildSetSnapshotCleanupRequest(on, sync), + MasterService.Interface::switchSnapshotCleanup, + SetSnapshotCleanupResponse::getPrevSnapshotCleanup)) + .call(); } @Override public CompletableFuture isSnapshotCleanupEnabled() { - return this.newMasterCaller() - .action((controller, stub) -> this - .call(controller, stub, - RequestConverter.buildIsSnapshotCleanupEnabledRequest(), - MasterService.Interface::isSnapshotCleanupEnabled, - IsSnapshotCleanupEnabledResponse::getEnabled)) - .call(); + return this. newMasterCaller() + .action((controller, stub) -> this.call(controller, stub, + RequestConverter.buildIsSnapshotCleanupEnabledRequest(), + MasterService.Interface::isSnapshotCleanupEnabled, + IsSnapshotCleanupEnabledResponse::getEnabled)) + .call(); } private CompletableFuture> getSlowLogResponses( - final Map filterParams, final Set serverNames, final int limit, - final String logType) { + final Map filterParams, final Set serverNames, final int limit, + final String logType) { if (CollectionUtils.isEmpty(serverNames)) { return CompletableFuture.completedFuture(Collections.emptyList()); } - return CompletableFuture.supplyAsync(() -> serverNames.stream() - .map((ServerName serverName) -> - getSlowLogResponseFromServer(serverName, filterParams, limit, logType)) - .map(CompletableFuture::join) - .flatMap(List::stream) - .collect(Collectors.toList())); + return CompletableFuture.supplyAsync(() -> serverNames + .stream().map((ServerName serverName) -> getSlowLogResponseFromServer(serverName, + filterParams, limit, logType)) + .map(CompletableFuture::join).flatMap(List::stream).collect(Collectors.toList())); } private CompletableFuture> getSlowLogResponseFromServer(ServerName serverName, - Map filterParams, int limit, String logType) { - return this.>newAdminCaller().action((controller, stub) -> this - .adminCall(controller, stub, + Map filterParams, int limit, String logType) { + return this.> newAdminCaller() + .action((controller, stub) -> this.adminCall(controller, stub, RequestConverter.buildSlowLogResponseRequest(filterParams, limit, logType), AdminService.Interface::getLogEntries, ProtobufUtil::toSlowLogPayloads)) .serverName(serverName).call(); } @Override - public CompletableFuture> clearSlowLogResponses( - @Nullable Set serverNames) { + public CompletableFuture> + clearSlowLogResponses(@Nullable Set serverNames) { if (CollectionUtils.isEmpty(serverNames)) { return CompletableFuture.completedFuture(Collections.emptyList()); } - List> clearSlowLogResponseList = serverNames.stream() - .map(this::clearSlowLogsResponses) - .collect(Collectors.toList()); + List> clearSlowLogResponseList = + serverNames.stream().map(this::clearSlowLogsResponses).collect(Collectors.toList()); return convertToFutureOfList(clearSlowLogResponseList); } private CompletableFuture clearSlowLogsResponses(final ServerName serverName) { - return this.newAdminCaller() - .action(((controller, stub) -> this - .adminCall( - controller, stub, RequestConverter.buildClearSlowLogResponseRequest(), - AdminService.Interface::clearSlowLogsResponses, - ProtobufUtil::toClearSlowLogPayload)) - ).serverName(serverName).call(); + return this. newAdminCaller() + .action(((controller, stub) -> this.adminCall(controller, stub, + RequestConverter.buildClearSlowLogResponseRequest(), + AdminService.Interface::clearSlowLogsResponses, ProtobufUtil::toClearSlowLogPayload))) + .serverName(serverName).call(); } - private static CompletableFuture> convertToFutureOfList( - List> futures) { + private static CompletableFuture> + convertToFutureOfList(List> futures) { CompletableFuture allDoneFuture = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])); - return allDoneFuture.thenApply(v -> - futures.stream() - .map(CompletableFuture::join) - .collect(Collectors.toList()) - ); + return allDoneFuture + .thenApply(v -> futures.stream().map(CompletableFuture::join).collect(Collectors.toList())); } private CompletableFuture> getBalancerDecisions(final int limit) { - return this.>newMasterCaller() - .action((controller, stub) -> - this.call(controller, stub, - ProtobufUtil.toBalancerDecisionRequest(limit), - MasterService.Interface::getLogEntries, ProtobufUtil::toBalancerDecisionResponse)) + return this.> newMasterCaller() + .action((controller, stub) -> this.call(controller, stub, + ProtobufUtil.toBalancerDecisionRequest(limit), MasterService.Interface::getLogEntries, + ProtobufUtil::toBalancerDecisionResponse)) .call(); } private CompletableFuture> getBalancerRejections(final int limit) { - return this.>newMasterCaller() - .action((controller, stub) -> - this.call(controller, stub, - ProtobufUtil.toBalancerRejectionRequest(limit), - MasterService.Interface::getLogEntries, ProtobufUtil::toBalancerRejectionResponse)) + return this.> newMasterCaller() + .action((controller, stub) -> this.call(controller, stub, + ProtobufUtil.toBalancerRejectionRequest(limit), MasterService.Interface::getLogEntries, + ProtobufUtil::toBalancerRejectionResponse)) .call(); } @Override public CompletableFuture> getLogEntries(Set serverNames, - String logType, ServerType serverType, int limit, - Map filterParams) { + String logType, ServerType serverType, int limit, Map filterParams) { if (logType == null || serverType == null) { throw new IllegalArgumentException("logType and/or serverType cannot be empty"); } - switch (logType){ + switch (logType) { case "SLOW_LOG": case "LARGE_LOG": if (ServerType.MASTER.equals(serverType)) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java index 26d4da81a4f..ccf879f1684 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java @@ -26,6 +26,7 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.validatePutsInRowMu import static org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture; import static org.apache.hadoop.hbase.trace.TraceUtil.tracedFutures; import static org.apache.hadoop.hbase.util.FutureUtils.addListener; + import com.google.protobuf.RpcChannel; import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.StatusCode; @@ -58,9 +59,11 @@ import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.io.netty.util.Timer; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; @@ -127,8 +130,8 @@ class RawAsyncTableImpl implements AsyncTable { this.pauseNs = builder.pauseNs; if (builder.pauseNsForServerOverloaded < builder.pauseNs) { LOG.warn( - "Configured value of pauseNsForServerOverloaded is {} ms, which is less than" + - " the normal pause value {} ms, use the greater one instead", + "Configured value of pauseNsForServerOverloaded is {} ms, which is less than" + + " the normal pause value {} ms, use the greater one instead", TimeUnit.NANOSECONDS.toMillis(builder.pauseNsForServerOverloaded), TimeUnit.NANOSECONDS.toMillis(builder.pauseNs)); this.pauseNsForServerOverloaded = builder.pauseNs; @@ -137,8 +140,9 @@ class RawAsyncTableImpl implements AsyncTable { } this.maxAttempts = builder.maxAttempts; this.startLogErrorsCnt = builder.startLogErrorsCnt; - this.defaultScannerCaching = tableName.isSystemTable() ? conn.connConf.getMetaScannerCaching() - : conn.connConf.getScannerCaching(); + this.defaultScannerCaching = tableName.isSystemTable() + ? conn.connConf.getMetaScannerCaching() + : conn.connConf.getScannerCaching(); this.defaultScannerMaxResultSize = conn.connConf.getScannerMaxResultSize(); } @@ -170,13 +174,13 @@ class RawAsyncTableImpl implements AsyncTable { @FunctionalInterface private interface RpcCall { void call(ClientService.Interface stub, HBaseRpcController controller, REQ req, - RpcCallback done); + RpcCallback done); } private static CompletableFuture call( - HBaseRpcController controller, HRegionLocation loc, ClientService.Interface stub, REQ req, - Converter reqConvert, RpcCall rpcCall, - Converter respConverter) { + HBaseRpcController controller, HRegionLocation loc, ClientService.Interface stub, REQ req, + Converter reqConvert, RpcCall rpcCall, + Converter respConverter) { CompletableFuture future = new CompletableFuture<>(); try { rpcCall.call(stub, controller, reqConvert.convert(loc.getRegion().getRegionName(), req), @@ -202,23 +206,23 @@ class RawAsyncTableImpl implements AsyncTable { } private static CompletableFuture mutate(HBaseRpcController controller, - HRegionLocation loc, ClientService.Interface stub, REQ req, - Converter reqConvert, - Converter respConverter) { + HRegionLocation loc, ClientService.Interface stub, REQ req, + Converter reqConvert, + Converter respConverter) { return call(controller, loc, stub, req, reqConvert, (s, c, r, done) -> s.mutate(c, r, done), respConverter); } private static CompletableFuture voidMutate(HBaseRpcController controller, - HRegionLocation loc, ClientService.Interface stub, REQ req, - Converter reqConvert) { + HRegionLocation loc, ClientService.Interface stub, REQ req, + Converter reqConvert) { return mutate(controller, loc, stub, req, reqConvert, (c, resp) -> { return null; }); } private static Result toResult(HBaseRpcController controller, MutateResponse resp) - throws IOException { + throws IOException { if (!resp.hasResult()) { return null; } @@ -231,15 +235,15 @@ class RawAsyncTableImpl implements AsyncTable { } private CompletableFuture noncedMutate(long nonceGroup, long nonce, - HBaseRpcController controller, HRegionLocation loc, ClientService.Interface stub, REQ req, - NoncedConverter reqConvert, - Converter respConverter) { + HBaseRpcController controller, HRegionLocation loc, ClientService.Interface stub, REQ req, + NoncedConverter reqConvert, + Converter respConverter) { return mutate(controller, loc, stub, req, (info, src) -> reqConvert.convert(info, src, nonceGroup, nonce), respConverter); } private SingleRequestCallerBuilder newCaller(byte[] row, int priority, long rpcTimeoutNs) { - return conn.callerFactory.single().table(tableName).row(row).priority(priority) + return conn.callerFactory. single().table(tableName).row(row).priority(priority) .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) .pause(pauseNs, TimeUnit.NANOSECONDS) @@ -248,17 +252,17 @@ class RawAsyncTableImpl implements AsyncTable { } private SingleRequestCallerBuilder - newCaller(R row, long rpcTimeoutNs) { + newCaller(R row, long rpcTimeoutNs) { return newCaller(row.getRow(), row.getPriority(), rpcTimeoutNs); } private CompletableFuture get(Get get, int replicaId) { return this. newCaller(get, readRpcTimeoutNs) - .action((controller, loc, stub) -> RawAsyncTableImpl - . call(controller, loc, stub, get, - RequestConverter::buildGetRequest, (s, c, req, done) -> s.get(c, req, done), - (c, resp) -> ProtobufUtil.toResult(resp.getResult(), c.cellScanner()))) - .replicaId(replicaId).call(); + .action((controller, loc, stub) -> RawAsyncTableImpl. call(controller, loc, stub, get, RequestConverter::buildGetRequest, + (s, c, req, done) -> s.get(c, req, done), + (c, resp) -> ProtobufUtil.toResult(resp.getResult(), c.cellScanner()))) + .replicaId(replicaId).call(); } private TableOperationSpanBuilder newTableOperationSpanBuilder() { @@ -280,20 +284,18 @@ class RawAsyncTableImpl implements AsyncTable { validatePut(put, conn.connConf.getMaxKeyValueSize()); final Supplier supplier = newTableOperationSpanBuilder().setOperation(put); return tracedFuture(() -> this. newCaller(put, writeRpcTimeoutNs) - .action((controller, loc, stub) -> RawAsyncTableImpl. voidMutate(controller, loc, stub, - put, RequestConverter::buildMutateRequest)) - .call(), - supplier); + .action((controller, loc, stub) -> RawAsyncTableImpl. voidMutate(controller, loc, stub, + put, RequestConverter::buildMutateRequest)) + .call(), supplier); } @Override public CompletableFuture delete(Delete delete) { final Supplier supplier = newTableOperationSpanBuilder().setOperation(delete); return tracedFuture(() -> this. newCaller(delete, writeRpcTimeoutNs) - .action((controller, loc, stub) -> RawAsyncTableImpl. voidMutate(controller, loc, - stub, delete, RequestConverter::buildMutateRequest)) - .call(), - supplier); + .action((controller, loc, stub) -> RawAsyncTableImpl. voidMutate(controller, loc, + stub, delete, RequestConverter::buildMutateRequest)) + .call(), supplier); } @Override @@ -304,10 +306,10 @@ class RawAsyncTableImpl implements AsyncTable { long nonceGroup = conn.getNonceGenerator().getNonceGroup(); long nonce = conn.getNonceGenerator().newNonce(); return this. newCaller(append, rpcTimeoutNs) - .action((controller, loc, stub) -> this. noncedMutate(nonceGroup, nonce, - controller, loc, stub, append, RequestConverter::buildMutateRequest, - RawAsyncTableImpl::toResult)) - .call(); + .action((controller, loc, stub) -> this. noncedMutate(nonceGroup, nonce, + controller, loc, stub, append, RequestConverter::buildMutateRequest, + RawAsyncTableImpl::toResult)) + .call(); }, supplier); } @@ -319,10 +321,10 @@ class RawAsyncTableImpl implements AsyncTable { long nonceGroup = conn.getNonceGenerator().getNonceGroup(); long nonce = conn.getNonceGenerator().newNonce(); return this. newCaller(increment, rpcTimeoutNs) - .action((controller, loc, stub) -> this. noncedMutate(nonceGroup, - nonce, controller, loc, stub, increment, RequestConverter::buildMutateRequest, - RawAsyncTableImpl::toResult)) - .call(); + .action((controller, loc, stub) -> this. noncedMutate(nonceGroup, nonce, + controller, loc, stub, increment, RequestConverter::buildMutateRequest, + RawAsyncTableImpl::toResult)) + .call(); }, supplier); } @@ -348,7 +350,7 @@ class RawAsyncTableImpl implements AsyncTable { @Override public CheckAndMutateBuilder qualifier(byte[] qualifier) { this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" - + " an empty byte array, or just do not call this method if you want a null qualifier"); + + " an empty byte array, or just do not call this method if you want a null qualifier"); return this; } @@ -374,7 +376,7 @@ class RawAsyncTableImpl implements AsyncTable { private void preCheck() { Preconditions.checkNotNull(op, "condition is null. You need to specify the condition by" - + " calling ifNotExists/ifEquals/ifMatches before executing the request"); + + " calling ifNotExists/ifEquals/ifMatches before executing the request"); } @Override @@ -382,15 +384,15 @@ class RawAsyncTableImpl implements AsyncTable { validatePut(put, conn.connConf.getMaxKeyValueSize()); preCheck(); final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(put); + .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(put); return tracedFuture( () -> RawAsyncTableImpl.this. newCaller(row, put.getPriority(), rpcTimeoutNs) - .action((controller, loc, stub) -> RawAsyncTableImpl.mutate(controller, loc, stub, put, - (rn, p) -> RequestConverter.buildMutateRequest(rn, row, family, qualifier, op, value, - null, timeRange, p, HConstants.NO_NONCE, HConstants.NO_NONCE), - (c, r) -> r.getProcessed())) - .call(), + .action((controller, loc, stub) -> RawAsyncTableImpl.mutate(controller, loc, stub, put, + (rn, p) -> RequestConverter.buildMutateRequest(rn, row, family, qualifier, op, value, + null, timeRange, p, HConstants.NO_NONCE, HConstants.NO_NONCE), + (c, r) -> r.getProcessed())) + .call(), supplier); } @@ -398,10 +400,10 @@ class RawAsyncTableImpl implements AsyncTable { public CompletableFuture thenDelete(Delete delete) { preCheck(); final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(delete); - return tracedFuture(() -> RawAsyncTableImpl.this - . newCaller(row, delete.getPriority(), rpcTimeoutNs) + .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(delete); + return tracedFuture( + () -> RawAsyncTableImpl.this. newCaller(row, delete.getPriority(), rpcTimeoutNs) .action((controller, loc, stub) -> RawAsyncTableImpl.mutate(controller, loc, stub, delete, (rn, d) -> RequestConverter.buildMutateRequest(rn, row, family, qualifier, op, value, null, timeRange, d, HConstants.NO_NONCE, HConstants.NO_NONCE), @@ -415,17 +417,16 @@ class RawAsyncTableImpl implements AsyncTable { preCheck(); validatePutsInRowMutations(mutations, conn.connConf.getMaxKeyValueSize()); final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(mutations); + .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(mutations); return tracedFuture(() -> RawAsyncTableImpl.this - . newCaller(row, mutations.getMaxPriority(), rpcTimeoutNs) - .action((controller, loc, stub) -> RawAsyncTableImpl.this.mutateRow(controller, loc, stub, - mutations, - (rn, rm) -> RequestConverter.buildMultiRequest(rn, row, family, qualifier, op, value, - null, timeRange, rm, HConstants.NO_NONCE, HConstants.NO_NONCE), - CheckAndMutateResult::isSuccess)) - .call(), - supplier); + . newCaller(row, mutations.getMaxPriority(), rpcTimeoutNs) + .action((controller, loc, stub) -> RawAsyncTableImpl.this.mutateRow(controller, loc, stub, + mutations, + (rn, rm) -> RequestConverter.buildMultiRequest(rn, row, family, qualifier, op, value, + null, timeRange, rm, HConstants.NO_NONCE, HConstants.NO_NONCE), + CheckAndMutateResult::isSuccess)) + .call(), supplier); } } @@ -435,7 +436,7 @@ class RawAsyncTableImpl implements AsyncTable { } private final class CheckAndMutateWithFilterBuilderImpl - implements CheckAndMutateWithFilterBuilder { + implements CheckAndMutateWithFilterBuilder { private final byte[] row; @@ -458,10 +459,10 @@ class RawAsyncTableImpl implements AsyncTable { public CompletableFuture thenPut(Put put) { validatePut(put, conn.connConf.getMaxKeyValueSize()); final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(put); - return tracedFuture(() -> RawAsyncTableImpl.this - . newCaller(row, put.getPriority(), rpcTimeoutNs) + .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(put); + return tracedFuture( + () -> RawAsyncTableImpl.this. newCaller(row, put.getPriority(), rpcTimeoutNs) .action((controller, loc, stub) -> RawAsyncTableImpl.mutate(controller, loc, stub, put, (rn, p) -> RequestConverter.buildMutateRequest(rn, row, null, null, null, null, filter, timeRange, p, HConstants.NO_NONCE, HConstants.NO_NONCE), @@ -473,10 +474,10 @@ class RawAsyncTableImpl implements AsyncTable { @Override public CompletableFuture thenDelete(Delete delete) { final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(delete); - return tracedFuture(() -> RawAsyncTableImpl.this - . newCaller(row, delete.getPriority(), rpcTimeoutNs) + .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(delete); + return tracedFuture( + () -> RawAsyncTableImpl.this. newCaller(row, delete.getPriority(), rpcTimeoutNs) .action((controller, loc, stub) -> RawAsyncTableImpl.mutate(controller, loc, stub, delete, (rn, d) -> RequestConverter.buildMutateRequest(rn, row, null, null, null, null, filter, timeRange, d, HConstants.NO_NONCE, HConstants.NO_NONCE), @@ -489,17 +490,16 @@ class RawAsyncTableImpl implements AsyncTable { public CompletableFuture thenMutate(RowMutations mutations) { validatePutsInRowMutations(mutations, conn.connConf.getMaxKeyValueSize()); final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(mutations); + .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(mutations); return tracedFuture(() -> RawAsyncTableImpl.this - . newCaller(row, mutations.getMaxPriority(), rpcTimeoutNs) - .action((controller, loc, stub) -> RawAsyncTableImpl.this.mutateRow(controller, loc, stub, - mutations, - (rn, rm) -> RequestConverter.buildMultiRequest(rn, row, null, null, null, null, filter, - timeRange, rm, HConstants.NO_NONCE, HConstants.NO_NONCE), - CheckAndMutateResult::isSuccess)) - .call(), - supplier); + . newCaller(row, mutations.getMaxPriority(), rpcTimeoutNs) + .action((controller, loc, stub) -> RawAsyncTableImpl.this.mutateRow(controller, loc, stub, + mutations, + (rn, rm) -> RequestConverter.buildMultiRequest(rn, row, null, null, null, null, filter, + timeRange, rm, HConstants.NO_NONCE, HConstants.NO_NONCE), + CheckAndMutateResult::isSuccess)) + .call(), supplier); } } @@ -511,11 +511,13 @@ class RawAsyncTableImpl implements AsyncTable { @Override public CompletableFuture checkAndMutate(CheckAndMutate checkAndMutate) { final Supplier supplier = newTableOperationSpanBuilder().setOperation(checkAndMutate) - .setContainerOperations(checkAndMutate.getAction()); + .setContainerOperations(checkAndMutate.getAction()); return tracedFuture(() -> { - if (checkAndMutate.getAction() instanceof Put || checkAndMutate.getAction() instanceof Delete + if ( + checkAndMutate.getAction() instanceof Put || checkAndMutate.getAction() instanceof Delete || checkAndMutate.getAction() instanceof Increment - || checkAndMutate.getAction() instanceof Append) { + || checkAndMutate.getAction() instanceof Append + ) { Mutation mutation = (Mutation) checkAndMutate.getAction(); if (mutation instanceof Put) { validatePut((Put) mutation, conn.connConf.getMaxKeyValueSize()); @@ -523,38 +525,36 @@ class RawAsyncTableImpl implements AsyncTable { long nonceGroup = conn.getNonceGenerator().getNonceGroup(); long nonce = conn.getNonceGenerator().newNonce(); return RawAsyncTableImpl.this - . newCaller(checkAndMutate.getRow(), mutation.getPriority(), - rpcTimeoutNs) - .action( - (controller, loc, stub) -> RawAsyncTableImpl.mutate(controller, loc, stub, mutation, - (rn, m) -> RequestConverter.buildMutateRequest(rn, checkAndMutate.getRow(), - checkAndMutate.getFamily(), checkAndMutate.getQualifier(), - checkAndMutate.getCompareOp(), checkAndMutate.getValue(), - checkAndMutate.getFilter(), checkAndMutate.getTimeRange(), m, nonceGroup, nonce), - (c, r) -> ResponseConverter.getCheckAndMutateResult(r, c.cellScanner()))) - .call(); + . newCaller(checkAndMutate.getRow(), mutation.getPriority(), + rpcTimeoutNs) + .action( + (controller, loc, stub) -> RawAsyncTableImpl.mutate(controller, loc, stub, mutation, + (rn, m) -> RequestConverter.buildMutateRequest(rn, checkAndMutate.getRow(), + checkAndMutate.getFamily(), checkAndMutate.getQualifier(), + checkAndMutate.getCompareOp(), checkAndMutate.getValue(), + checkAndMutate.getFilter(), checkAndMutate.getTimeRange(), m, nonceGroup, nonce), + (c, r) -> ResponseConverter.getCheckAndMutateResult(r, c.cellScanner()))) + .call(); } else if (checkAndMutate.getAction() instanceof RowMutations) { RowMutations rowMutations = (RowMutations) checkAndMutate.getAction(); validatePutsInRowMutations(rowMutations, conn.connConf.getMaxKeyValueSize()); long nonceGroup = conn.getNonceGenerator().getNonceGroup(); long nonce = conn.getNonceGenerator().newNonce(); return RawAsyncTableImpl.this - . newCaller(checkAndMutate.getRow(), - rowMutations.getMaxPriority(), rpcTimeoutNs) - .action((controller, loc, stub) -> RawAsyncTableImpl.this - . mutateRow(controller, loc, stub, - rowMutations, - (rn, rm) -> RequestConverter.buildMultiRequest(rn, checkAndMutate.getRow(), - checkAndMutate.getFamily(), checkAndMutate.getQualifier(), - checkAndMutate.getCompareOp(), checkAndMutate.getValue(), - checkAndMutate.getFilter(), checkAndMutate.getTimeRange(), rm, nonceGroup, - nonce), - resp -> resp)) - .call(); + . newCaller(checkAndMutate.getRow(), rowMutations.getMaxPriority(), + rpcTimeoutNs) + .action((controller, loc, stub) -> RawAsyncTableImpl.this. mutateRow(controller, loc, stub, rowMutations, + (rn, rm) -> RequestConverter.buildMultiRequest(rn, checkAndMutate.getRow(), + checkAndMutate.getFamily(), checkAndMutate.getQualifier(), + checkAndMutate.getCompareOp(), checkAndMutate.getValue(), + checkAndMutate.getFilter(), checkAndMutate.getTimeRange(), rm, nonceGroup, nonce), + resp -> resp)) + .call(); } else { CompletableFuture future = new CompletableFuture<>(); future.completeExceptionally(new DoNotRetryIOException( - "CheckAndMutate doesn't support " + checkAndMutate.getAction().getClass().getName())); + "CheckAndMutate doesn't support " + checkAndMutate.getAction().getClass().getName())); return future; } }, supplier); @@ -562,20 +562,19 @@ class RawAsyncTableImpl implements AsyncTable { @Override public List> - checkAndMutate(List checkAndMutates) { + checkAndMutate(List checkAndMutates) { final Supplier supplier = newTableOperationSpanBuilder().setOperation(checkAndMutates) - .setContainerOperations(checkAndMutates); + .setContainerOperations(checkAndMutates); return tracedFutures(() -> batch(checkAndMutates, rpcTimeoutNs).stream() - .map(f -> f.thenApply(r -> (CheckAndMutateResult) r)).collect(toList()), - supplier); + .map(f -> f.thenApply(r -> (CheckAndMutateResult) r)).collect(toList()), supplier); } // We need the MultiRequest when constructing the org.apache.hadoop.hbase.client.MultiResponse, // so here I write a new method as I do not want to change the abstraction of call method. @SuppressWarnings("unchecked") private CompletableFuture mutateRow(HBaseRpcController controller, - HRegionLocation loc, ClientService.Interface stub, RowMutations mutation, - Converter reqConvert, Function respConverter) { + HRegionLocation loc, ClientService.Interface stub, RowMutations mutation, + Converter reqConvert, Function respConverter) { CompletableFuture future = new CompletableFuture<>(); try { byte[] regionName = loc.getRegion().getRegionName(); @@ -589,16 +588,15 @@ class RawAsyncTableImpl implements AsyncTable { } else { try { org.apache.hadoop.hbase.client.MultiResponse multiResp = - ResponseConverter.getResults(req, resp, controller.cellScanner()); + ResponseConverter.getResults(req, resp, controller.cellScanner()); ConnectionUtils.updateStats(conn.getStatisticsTracker(), conn.getConnectionMetrics(), loc.getServerName(), multiResp); Throwable ex = multiResp.getException(regionName); if (ex != null) { - future - .completeExceptionally(ex instanceof IOException ? ex - : new IOException( - "Failed to mutate row: " + Bytes.toStringBinary(mutation.getRow()), - ex)); + future.completeExceptionally(ex instanceof IOException + ? ex + : new IOException( + "Failed to mutate row: " + Bytes.toStringBinary(mutation.getRow()), ex)); } else { future.complete( respConverter.apply((RES) multiResp.getResults().get(regionName).result.get(0))); @@ -621,8 +619,9 @@ class RawAsyncTableImpl implements AsyncTable { long nonceGroup = conn.getNonceGenerator().getNonceGroup(); long nonce = conn.getNonceGenerator().newNonce(); final Supplier supplier = - newTableOperationSpanBuilder().setOperation(mutations).setContainerOperations(mutations); - return tracedFuture(() -> this + newTableOperationSpanBuilder().setOperation(mutations).setContainerOperations(mutations); + return tracedFuture( + () -> this . newCaller(mutations.getRow(), mutations.getMaxPriority(), writeRpcTimeoutNs) .action((controller, loc, stub) -> this. mutateRow(controller, loc, stub, mutations, (rn, rm) -> RequestConverter.buildMultiRequest(rn, rm, nonceGroup, nonce), @@ -661,7 +660,7 @@ class RawAsyncTableImpl implements AsyncTable { scan.getMaxResultSize() > 0 ? scan.getMaxResultSize() : defaultScannerMaxResultSize); final Scan scanCopy = ReflectionUtils.newInstance(scan.getClass(), scan); final AsyncTableResultScanner scanner = - new AsyncTableResultScanner(tableName, scanCopy, maxCacheSize); + new AsyncTableResultScanner(tableName, scanCopy, maxCacheSize); scan(scan, scanner); return scanner; } @@ -693,34 +692,34 @@ class RawAsyncTableImpl implements AsyncTable { @Override public List> get(List gets) { final Supplier supplier = newTableOperationSpanBuilder().setOperation(gets) - .setContainerOperations(HBaseSemanticAttributes.Operation.GET); + .setContainerOperations(HBaseSemanticAttributes.Operation.GET); return tracedFutures(() -> batch(gets, readRpcTimeoutNs), supplier); } @Override public List> put(List puts) { final Supplier supplier = newTableOperationSpanBuilder().setOperation(puts) - .setContainerOperations(HBaseSemanticAttributes.Operation.PUT); + .setContainerOperations(HBaseSemanticAttributes.Operation.PUT); return tracedFutures(() -> voidMutate(puts), supplier); } @Override public List> delete(List deletes) { final Supplier supplier = newTableOperationSpanBuilder().setOperation(deletes) - .setContainerOperations(HBaseSemanticAttributes.Operation.DELETE); + .setContainerOperations(HBaseSemanticAttributes.Operation.DELETE); return tracedFutures(() -> voidMutate(deletes), supplier); } @Override public List> batch(List actions) { final Supplier supplier = - newTableOperationSpanBuilder().setOperation(actions).setContainerOperations(actions); + newTableOperationSpanBuilder().setOperation(actions).setContainerOperations(actions); return tracedFutures(() -> batch(actions, rpcTimeoutNs), supplier); } private List> voidMutate(List actions) { return this. batch(actions, writeRpcTimeoutNs).stream() - .map(f -> f. thenApply(r -> null)).collect(toList()); + .map(f -> f. thenApply(r -> null)).collect(toList()); } private List> batch(List actions, long rpcTimeoutNs) { @@ -743,8 +742,7 @@ class RawAsyncTableImpl implements AsyncTable { .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) .pauseForServerOverloaded(pauseNsForServerOverloaded, TimeUnit.NANOSECONDS) - .maxAttempts(maxAttempts) - .startLogErrorsCnt(startLogErrorsCnt).call(); + .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt).call(); } @Override @@ -773,9 +771,9 @@ class RawAsyncTableImpl implements AsyncTable { } private CompletableFuture coprocessorService(Function stubMaker, - ServiceCaller callable, RegionInfo region, byte[] row) { + ServiceCaller callable, RegionInfo region, byte[] row) { RegionCoprocessorRpcChannelImpl channel = new RegionCoprocessorRpcChannelImpl(conn, tableName, - region, row, rpcTimeoutNs, operationTimeoutNs); + region, row, rpcTimeoutNs, operationTimeoutNs); final Span span = Span.current(); S stub = stubMaker.apply(channel); CompletableFuture future = new CompletableFuture<>(); @@ -799,7 +797,7 @@ class RawAsyncTableImpl implements AsyncTable { @Override public CompletableFuture coprocessorService(Function stubMaker, - ServiceCaller callable, byte[] row) { + ServiceCaller callable, byte[] row) { return coprocessorService(stubMaker, callable, null, row); } @@ -821,9 +819,9 @@ class RawAsyncTableImpl implements AsyncTable { } private void onLocateComplete(Function stubMaker, - ServiceCaller callable, CoprocessorCallback callback, List locs, - byte[] endKey, boolean endKeyInclusive, AtomicBoolean locateFinished, - AtomicInteger unfinishedRequest, HRegionLocation loc, Throwable error) { + ServiceCaller callable, CoprocessorCallback callback, List locs, + byte[] endKey, boolean endKeyInclusive, AtomicBoolean locateFinished, + AtomicInteger unfinishedRequest, HRegionLocation loc, Throwable error) { final Span span = Span.current(); if (error != null) { callback.onError(error); @@ -859,7 +857,7 @@ class RawAsyncTableImpl implements AsyncTable { } private final class CoprocessorServiceBuilderImpl - implements CoprocessorServiceBuilder { + implements CoprocessorServiceBuilder { private final Function stubMaker; @@ -876,7 +874,7 @@ class RawAsyncTableImpl implements AsyncTable { private boolean endKeyInclusive; public CoprocessorServiceBuilderImpl(Function stubMaker, - ServiceCaller callable, CoprocessorCallback callback) { + ServiceCaller callable, CoprocessorCallback callback) { this.stubMaker = Preconditions.checkNotNull(stubMaker, "stubMaker is null"); this.callable = Preconditions.checkNotNull(callable, "callable is null"); this.callback = Preconditions.checkNotNull(callback, "callback is null"); @@ -886,8 +884,8 @@ class RawAsyncTableImpl implements AsyncTable { public CoprocessorServiceBuilderImpl fromRow(byte[] startKey, boolean inclusive) { this.startKey = Preconditions.checkNotNull(startKey, "startKey is null. Consider using" - + " an empty byte array, or just do not call this method if you want to start selection" - + " from the first region"); + + " an empty byte array, or just do not call this method if you want to start selection" + + " from the first region"); this.startKeyInclusive = inclusive; return this; } @@ -896,8 +894,8 @@ class RawAsyncTableImpl implements AsyncTable { public CoprocessorServiceBuilderImpl toRow(byte[] endKey, boolean inclusive) { this.endKey = Preconditions.checkNotNull(endKey, "endKey is null. Consider using" - + " an empty byte array, or just do not call this method if you want to continue" - + " selection to the last region"); + + " an empty byte array, or just do not call this method if you want to continue" + + " selection to the last region"); this.endKeyInclusive = inclusive; return this; } @@ -905,12 +903,12 @@ class RawAsyncTableImpl implements AsyncTable { @Override public void execute() { final Span span = newTableOperationSpanBuilder() - .setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC).build(); + .setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC).build(); try (Scope ignored = span.makeCurrent()) { final RegionLocateType regionLocateType = - startKeyInclusive ? RegionLocateType.CURRENT : RegionLocateType.AFTER; + startKeyInclusive ? RegionLocateType.CURRENT : RegionLocateType.AFTER; final CompletableFuture future = conn.getLocator() - .getRegionLocation(tableName, startKey, regionLocateType, operationTimeoutNs); + .getRegionLocation(tableName, startKey, regionLocateType, operationTimeoutNs); addListener(future, (loc, error) -> { try (Scope ignored1 = span.makeCurrent()) { onLocateComplete(stubMaker, callable, callback, new ArrayList<>(), endKey, @@ -923,8 +921,8 @@ class RawAsyncTableImpl implements AsyncTable { @Override public CoprocessorServiceBuilder coprocessorService( - Function stubMaker, ServiceCaller callable, - CoprocessorCallback callback) { + Function stubMaker, ServiceCaller callable, + CoprocessorCallback callback) { return new CoprocessorServiceBuilderImpl<>(stubMaker, callable, callback); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java index ece91bd1e0e..301c4315d25 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,31 +15,30 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; import java.io.InterruptedIOException; - import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.util.Bytes; /** - * Similar to RegionServerCallable but for the AdminService interface. This service callable - * assumes a Table and row and thus does region locating similar to RegionServerCallable. - * Works against Admin stub rather than Client stub. + * Similar to RegionServerCallable but for the AdminService interface. This service callable assumes + * a Table and row and thus does region locating similar to RegionServerCallable. Works against + * Admin stub rather than Client stub. */ -@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD", - justification="stub used by ipc") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD", + justification = "stub used by ipc") @InterfaceAudience.Private public abstract class RegionAdminServiceCallable implements RetryingCallable { protected AdminService.BlockingInterface stub; @@ -53,20 +52,20 @@ public abstract class RegionAdminServiceCallable implements RetryingCallable< protected final int replicaId; public RegionAdminServiceCallable(ClusterConnection connection, - RpcControllerFactory rpcControllerFactory, TableName tableName, byte[] row) { + RpcControllerFactory rpcControllerFactory, TableName tableName, byte[] row) { this(connection, rpcControllerFactory, null, tableName, row); } public RegionAdminServiceCallable(ClusterConnection connection, - RpcControllerFactory rpcControllerFactory, HRegionLocation location, - TableName tableName, byte[] row) { - this(connection, rpcControllerFactory, location, - tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID); + RpcControllerFactory rpcControllerFactory, HRegionLocation location, TableName tableName, + byte[] row) { + this(connection, rpcControllerFactory, location, tableName, row, + RegionReplicaUtil.DEFAULT_REPLICA_ID); } public RegionAdminServiceCallable(ClusterConnection connection, - RpcControllerFactory rpcControllerFactory, HRegionLocation location, - TableName tableName, byte[] row, int replicaId) { + RpcControllerFactory rpcControllerFactory, HRegionLocation location, TableName tableName, + byte[] row, int replicaId) { this.connection = connection; this.rpcControllerFactory = rpcControllerFactory; this.location = location; @@ -110,8 +109,8 @@ public abstract class RegionAdminServiceCallable implements RetryingCallable< @Override public void throwable(Throwable t, boolean retrying) { if (location != null) { - connection.updateCachedLocations(tableName, location.getRegionInfo().getRegionName(), row, - t, location.getServerName()); + connection.updateCachedLocations(tableName, location.getRegionInfo().getRegionName(), row, t, + location.getServerName()); } } @@ -122,10 +121,10 @@ public abstract class RegionAdminServiceCallable implements RetryingCallable< return this.connection; } - //subclasses can override this. + // subclasses can override this. protected String getExceptionMessage() { - return "There is no location" + " table=" + tableName - + " ,replica=" + replicaId + ", row=" + Bytes.toStringBinary(row); + return "There is no location" + " table=" + tableName + " ,replica=" + replicaId + ", row=" + + Bytes.toStringBinary(row); } @Override @@ -138,10 +137,9 @@ public abstract class RegionAdminServiceCallable implements RetryingCallable< return ConnectionUtils.getPauseTime(pause, tries); } - public static RegionLocations getRegionLocations( - ClusterConnection connection, TableName tableName, byte[] row, - boolean useCache, int replicaId) - throws RetriesExhaustedException, DoNotRetryIOException, InterruptedIOException { + public static RegionLocations getRegionLocations(ClusterConnection connection, + TableName tableName, byte[] row, boolean useCache, int replicaId) + throws RetriesExhaustedException, DoNotRetryIOException, InterruptedIOException { RegionLocations rl; try { rl = connection.locateRegion(tableName, row, useCache, true, replicaId); @@ -186,10 +184,9 @@ public abstract class RegionAdminServiceCallable implements RetryingCallable< /** * Run RPC call. - * @param rpcController PayloadCarryingRpcController is a mouthful but it at a minimum is a - * facade on protobuf so we don't have to put protobuf everywhere; we can keep it behind this - * class. - * @throws Exception + * @param rpcController PayloadCarryingRpcController is a mouthful but it at a minimum is a facade + * on protobuf so we don't have to put protobuf everywhere; we can keep it + * behind this class. n */ protected abstract T call(HBaseRpcController rpcController) throws Exception; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java index de8926b17d4..0f541c55944 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,28 +17,26 @@ */ package org.apache.hadoop.hbase.client; -import io.opentelemetry.context.Context; -import io.opentelemetry.context.Scope; -import java.io.IOException; - -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse; - import com.google.protobuf.Descriptors; import com.google.protobuf.Message; import com.google.protobuf.RpcController; +import io.opentelemetry.context.Context; +import io.opentelemetry.context.Scope; +import java.io.IOException; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse; /** * Provides clients with an RPC connection to call Coprocessor Endpoint - * {@link com.google.protobuf.Service}s - * against a given table region. An instance of this class may be obtained - * by calling {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}, + * {@link com.google.protobuf.Service}s against a given table region. An instance of this class may + * be obtained by calling {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}, * but should normally only be used in creating a new {@link com.google.protobuf.Service} stub to * call the endpoint methods. * @see org.apache.hadoop.hbase.client.Table#coprocessorService(byte[]) @@ -47,7 +45,7 @@ import com.google.protobuf.RpcController; class RegionCoprocessorRpcChannel extends SyncCoprocessorRpcChannel { private static final Logger LOG = LoggerFactory.getLogger(RegionCoprocessorRpcChannel.class); private final TableName table; - private final byte [] row; + private final byte[] row; private final ClusterConnection conn; private byte[] lastRegion; private final int operationTimeout; @@ -55,9 +53,9 @@ class RegionCoprocessorRpcChannel extends SyncCoprocessorRpcChannel { /** * Constructor - * @param conn connection to use + * @param conn connection to use * @param table to connect to - * @param row to locate region with + * @param row to locate region with */ RegionCoprocessorRpcChannel(ClusterConnection conn, TableName table, byte[] row) { this.table = table; @@ -69,9 +67,8 @@ class RegionCoprocessorRpcChannel extends SyncCoprocessorRpcChannel { @Override protected Message callExecService(final RpcController controller, - final Descriptors.MethodDescriptor method, final Message request, - final Message responsePrototype) - throws IOException { + final Descriptors.MethodDescriptor method, final Message request, + final Message responsePrototype) throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("Call: " + method.getName() + ", " + request.toString()); } @@ -82,19 +79,18 @@ class RegionCoprocessorRpcChannel extends SyncCoprocessorRpcChannel { ClientServiceCallable callable = new ClientServiceCallable(this.conn, this.table, this.row, this.conn.getRpcControllerFactory().newController(), HConstants.PRIORITY_UNSET) { - @Override - protected CoprocessorServiceResponse rpcCall() throws Exception { - try (Scope ignored = context.makeCurrent()) { - byte[] regionName = getLocation().getRegionInfo().getRegionName(); - CoprocessorServiceRequest csr = - CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request, row, regionName); - return getStub().execService(getRpcController(), csr); + @Override + protected CoprocessorServiceResponse rpcCall() throws Exception { + try (Scope ignored = context.makeCurrent()) { + byte[] regionName = getLocation().getRegionInfo().getRegionName(); + CoprocessorServiceRequest csr = + CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request, row, regionName); + return getStub().execService(getRpcController(), csr); + } } - } - }; - CoprocessorServiceResponse result = - this.rpcCallerFactory. newCaller().callWithRetries(callable, - operationTimeout); + }; + CoprocessorServiceResponse result = this.rpcCallerFactory + . newCaller().callWithRetries(callable, operationTimeout); this.lastRegion = result.getRegion().getValue().toByteArray(); return CoprocessorRpcUtils.getResponse(result, responsePrototype); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannelImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannelImpl.java index 009936ac86b..8acadafbb06 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannelImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannelImpl.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.util.FutureUtils.addListener; + import com.google.protobuf.Descriptors.MethodDescriptor; import com.google.protobuf.Message; import com.google.protobuf.RpcCallback; @@ -35,6 +36,7 @@ import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest; @@ -57,7 +59,7 @@ class RegionCoprocessorRpcChannelImpl implements RpcChannel { private final long operationTimeoutNs; RegionCoprocessorRpcChannelImpl(AsyncConnectionImpl conn, TableName tableName, RegionInfo region, - byte[] row, long rpcTimeoutNs, long operationTimeoutNs) { + byte[] row, long rpcTimeoutNs, long operationTimeoutNs) { this.conn = conn; this.tableName = tableName; this.region = region; @@ -67,15 +69,16 @@ class RegionCoprocessorRpcChannelImpl implements RpcChannel { } private CompletableFuture rpcCall(MethodDescriptor method, Message request, - Message responsePrototype, HBaseRpcController controller, HRegionLocation loc, - ClientService.Interface stub) { + Message responsePrototype, HBaseRpcController controller, HRegionLocation loc, + ClientService.Interface stub) { final Context context = Context.current(); CompletableFuture future = new CompletableFuture<>(); - if (region != null - && !Bytes.equals(loc.getRegionInfo().getRegionName(), region.getRegionName())) { + if ( + region != null && !Bytes.equals(loc.getRegionInfo().getRegionName(), region.getRegionName()) + ) { future.completeExceptionally(new DoNotRetryIOException( - "Region name is changed, expected " + region.getRegionNameAsString() + ", actual " - + loc.getRegionInfo().getRegionNameAsString())); + "Region name is changed, expected " + region.getRegionNameAsString() + ", actual " + + loc.getRegionInfo().getRegionNameAsString())); return future; } CoprocessorServiceRequest csr = CoprocessorRpcUtils.getCoprocessorServiceRequest(method, @@ -98,18 +101,15 @@ class RegionCoprocessorRpcChannelImpl implements RpcChannel { @Override public void callMethod(MethodDescriptor method, RpcController controller, Message request, - Message responsePrototype, RpcCallback done) { + Message responsePrototype, RpcCallback done) { final Context context = Context.current(); - addListener( - conn.callerFactory. single().table(tableName).row(row) - .locateType(RegionLocateType.CURRENT).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) - .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) - .action((c, l, s) -> { - try (Scope ignored = context.makeCurrent()) { - return rpcCall(method, request, responsePrototype, c, l, s); - } - }).call(), - (r, e) -> { + addListener(conn.callerFactory. single().table(tableName).row(row) + .locateType(RegionLocateType.CURRENT).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) + .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS).action((c, l, s) -> { + try (Scope ignored = context.makeCurrent()) { + return rpcCall(method, request, responsePrototype, c, l, s); + } + }).call(), (r, e) -> { try (Scope ignored = context.makeCurrent()) { if (e != null) { ((ClientCoprocessorRpcController) controller).setFailed(e); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java index 68a6927c569..720e97df636 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,24 +15,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.util.Bytes; - -import org.apache.hbase.thirdparty.com.google.common.base.Objects; import com.google.protobuf.Descriptors.MethodDescriptor; import com.google.protobuf.Message; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.common.base.Objects; /** - * Represents a coprocessor service method execution against a single region. While coprocessor - * service calls are performed against a region, this class implements {@link Row} in order to - * make use of the AsyncProcess framework for batching multi-region calls per region server. - * - *

    Note: This class should not be instantiated directly. Use - * HTable#batchCoprocessorService instead.

    + * Represents a coprocessor service method execution against a single region. While coprocessor + * service calls are performed against a region, this class implements {@link Row} in order to make + * use of the AsyncProcess framework for batching multi-region calls per region server. + *

    + * Note: This class should not be instantiated directly. Use HTable#batchCoprocessorService + * instead. + *

    */ @InterfaceAudience.Private public class RegionCoprocessorServiceExec implements Row { @@ -47,8 +45,8 @@ public class RegionCoprocessorServiceExec implements Row { private final MethodDescriptor method; private final Message request; - public RegionCoprocessorServiceExec(byte[] region, byte[] startKey, - MethodDescriptor method, Message request) { + public RegionCoprocessorServiceExec(byte[] region, byte[] startKey, MethodDescriptor method, + Message request) { this.region = region; this.startKey = startKey; this.method = method; @@ -104,14 +102,9 @@ public class RegionCoprocessorServiceExec implements Row { @Override public String toString() { StringBuilder builder = new StringBuilder(); - builder.append("region:") - .append(Bytes.toStringBinary(region)) - .append(", startKey:") - .append(Bytes.toStringBinary(startKey)) - .append(", method:") - .append(method.getFullName()) - .append(", request:") - .append(request); + builder.append("region:").append(Bytes.toStringBinary(region)).append(", startKey:") + .append(Bytes.toStringBinary(startKey)).append(", method:").append(method.getFullName()) + .append(", request:").append(request); return builder.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index 81dc63346c4..b9a5969b4d2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,33 +36,30 @@ import org.apache.hadoop.hbase.util.MD5Hash; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.IOUtils; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** - * Information about a region. A region is a range of keys in the whole keyspace - * of a table, an identifier (a timestamp) for differentiating between subset - * ranges (after region split) and a replicaId for differentiating the instance - * for the same range and some status information about the region. - * - * The region has a unique name which consists of the following fields: + * Information about a region. A region is a range of keys in the whole keyspace of a table, an + * identifier (a timestamp) for differentiating between subset ranges (after region split) and a + * replicaId for differentiating the instance for the same range and some status information about + * the region. The region has a unique name which consists of the following fields: *
      - *
    • tableName : The name of the table
    • - *
    • startKey : The startKey for the region.
    • - *
    • regionId : A timestamp when the region is created.
    • - *
    • replicaId : An id starting from 0 to differentiate replicas of the - * same region range but hosted in separated servers. The same region range can - * be hosted in multiple locations.
    • - *
    • encodedName : An MD5 encoded string for the region name.
    • + *
    • tableName : The name of the table
    • + *
    • startKey : The startKey for the region.
    • + *
    • regionId : A timestamp when the region is created.
    • + *
    • replicaId : An id starting from 0 to differentiate replicas of the same region range but + * hosted in separated servers. The same region range can be hosted in multiple locations.
    • + *
    • encodedName : An MD5 encoded string for the region name.
    • *
    - * - *
    Other than the fields in the region name, region info contains: + *
    + * Other than the fields in the region name, region info contains: *
      - *
    • endKey : the endKey for the region (exclusive)
    • - *
    • split : Whether the region is split
    • - *
    • offline : Whether the region is offline
    • + *
    • endKey : the endKey for the region (exclusive)
    • + *
    • split : Whether the region is split
    • + *
    • offline : Whether the region is offline
    • *
    - * */ @InterfaceAudience.Public public interface RegionInfo extends Comparable { @@ -73,12 +69,12 @@ public interface RegionInfo extends Comparable { @Deprecated @InterfaceAudience.Private // Not using RegionInfoBuilder intentionally to avoid a static loading deadlock: HBASE-24896 - RegionInfo UNDEFINED = new MutableRegionInfo(0, TableName.valueOf("__UNDEFINED__"), - RegionInfo.DEFAULT_REPLICA_ID); + RegionInfo UNDEFINED = + new MutableRegionInfo(0, TableName.valueOf("__UNDEFINED__"), RegionInfo.DEFAULT_REPLICA_ID); /** - * Separator used to demarcate the encodedName in a region name - * in the new format. See description on new format above. + * Separator used to demarcate the encodedName in a region name in the new format. See description + * on new format above. */ @InterfaceAudience.Private int ENC_SEPARATOR = '.'; @@ -90,79 +86,73 @@ public interface RegionInfo extends Comparable { int DEFAULT_REPLICA_ID = 0; /** - * to keep appended int's sorted in string format. Only allows 2 bytes - * to be sorted for replicaId. + * to keep appended int's sorted in string format. Only allows 2 bytes to be sorted for replicaId. */ @InterfaceAudience.Private String REPLICA_ID_FORMAT = "%04X"; @InterfaceAudience.Private - byte REPLICA_ID_DELIMITER = (byte)'_'; + byte REPLICA_ID_DELIMITER = (byte) '_'; @InterfaceAudience.Private String INVALID_REGION_NAME_FORMAT_MESSAGE = "Invalid regionName format"; @InterfaceAudience.Private - Comparator COMPARATOR - = (RegionInfo lhs, RegionInfo rhs) -> { - if (rhs == null) { - return 1; - } - - // Are regions of same table? - int result = lhs.getTable().compareTo(rhs.getTable()); - if (result != 0) { - return result; - } - - // Compare start keys. - result = Bytes.compareTo(lhs.getStartKey(), rhs.getStartKey()); - if (result != 0) { - return result; - } - - // Compare end keys. - result = Bytes.compareTo(lhs.getEndKey(), rhs.getEndKey()); - - if (result != 0) { - if (lhs.getStartKey().length != 0 - && lhs.getEndKey().length == 0) { - return 1; // this is last region - } - if (rhs.getStartKey().length != 0 - && rhs.getEndKey().length == 0) { - return -1; // o is the last region - } - return result; - } - - // regionId is usually milli timestamp -- this defines older stamps - // to be "smaller" than newer stamps in sort order. - if (lhs.getRegionId() > rhs.getRegionId()) { - return 1; - } else if (lhs.getRegionId() < rhs.getRegionId()) { - return -1; - } - - int replicaDiff = lhs.getReplicaId() - rhs.getReplicaId(); - if (replicaDiff != 0) { - return replicaDiff; - } - - if (lhs.isOffline() == rhs.isOffline()) { - return 0; - } - if (lhs.isOffline()) { - return -1; - } - + Comparator COMPARATOR = (RegionInfo lhs, RegionInfo rhs) -> { + if (rhs == null) { return 1; + } + + // Are regions of same table? + int result = lhs.getTable().compareTo(rhs.getTable()); + if (result != 0) { + return result; + } + + // Compare start keys. + result = Bytes.compareTo(lhs.getStartKey(), rhs.getStartKey()); + if (result != 0) { + return result; + } + + // Compare end keys. + result = Bytes.compareTo(lhs.getEndKey(), rhs.getEndKey()); + + if (result != 0) { + if (lhs.getStartKey().length != 0 && lhs.getEndKey().length == 0) { + return 1; // this is last region + } + if (rhs.getStartKey().length != 0 && rhs.getEndKey().length == 0) { + return -1; // o is the last region + } + return result; + } + + // regionId is usually milli timestamp -- this defines older stamps + // to be "smaller" than newer stamps in sort order. + if (lhs.getRegionId() > rhs.getRegionId()) { + return 1; + } else if (lhs.getRegionId() < rhs.getRegionId()) { + return -1; + } + + int replicaDiff = lhs.getReplicaId() - rhs.getReplicaId(); + if (replicaDiff != 0) { + return replicaDiff; + } + + if (lhs.isOffline() == rhs.isOffline()) { + return 0; + } + if (lhs.isOffline()) { + return -1; + } + + return 1; }; - /** - * @return Return a short, printable name for this region - * (usually encoded name) for us logging. + * @return Return a short, printable name for this region (usually encoded name) for us logging. */ String getShortNameToLog(); @@ -175,7 +165,7 @@ public interface RegionInfo extends Comparable { * @return the regionName as an array of bytes. * @see #getRegionNameAsString() */ - byte [] getRegionName(); + byte[] getRegionName(); /** * @return Region name as a String for use in logging, etc. @@ -190,17 +180,17 @@ public interface RegionInfo extends Comparable { /** * @return the encoded region name as an array of bytes. */ - byte [] getEncodedNameAsBytes(); + byte[] getEncodedNameAsBytes(); /** * @return the startKey. */ - byte [] getStartKey(); + byte[] getStartKey(); /** * @return the endKey. */ - byte [] getEndKey(); + byte[] getEndKey(); /** * @return current table name of the region @@ -239,10 +229,9 @@ public interface RegionInfo extends Comparable { boolean isMetaRegion(); /** - * @return true if the given inclusive range of rows is fully contained - * by this region. For example, if the region is foo,a,g and this is - * passed ["b","c"] or ["a","c"] it will return true, but if this is passed - * ["b","z"] it will return false. + * @return true if the given inclusive range of rows is fully contained by this region. For + * example, if the region is foo,a,g and this is passed ["b","c"] or ["a","c"] it will + * return true, but if this is passed ["b","z"] it will return false. * @throws IllegalArgumentException if the range passed is invalid (ie. end < start) */ boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey); @@ -255,28 +244,26 @@ public interface RegionInfo extends Comparable { /** * Does region name contain its encoded name? * @param regionName region name - * @return boolean indicating if this a new format region - * name which contains its encoded name. + * @return boolean indicating if this a new format region name which contains its encoded name. */ @InterfaceAudience.Private static boolean hasEncodedName(final byte[] regionName) { // check if region name ends in ENC_SEPARATOR - return (regionName.length >= 1) && - (regionName[regionName.length - 1] == RegionInfo.ENC_SEPARATOR); + return (regionName.length >= 1) + && (regionName[regionName.length - 1] == RegionInfo.ENC_SEPARATOR); } /** * @return the encodedName */ @InterfaceAudience.Private - static String encodeRegionName(final byte [] regionName) { + static String encodeRegionName(final byte[] regionName) { String encodedName; if (hasEncodedName(regionName)) { // region is in new format: // ,,/encodedName/ - encodedName = Bytes.toString(regionName, - regionName.length - MD5_HEX_LENGTH - 1, - MD5_HEX_LENGTH); + encodedName = + Bytes.toString(regionName, regionName.length - MD5_HEX_LENGTH - 1, MD5_HEX_LENGTH); } else { // old format region name. First hbase:meta region also // use this format.EncodedName is the JenkinsHash value. @@ -308,16 +295,16 @@ public interface RegionInfo extends Comparable { } /** - * @return Return a String of short, printable names for hris - * (usually encoded name) for us logging. + * @return Return a String of short, printable names for hris (usually encoded name) + * for us logging. */ - static String getShortNameToLog(RegionInfo...hris) { + static String getShortNameToLog(RegionInfo... hris) { return getShortNameToLog(Arrays.asList(hris)); } /** * @return Return a String of short, printable names for hris (usually encoded name) - * for us logging. + * for us logging. */ static String getShortNameToLog(final List ris) { return ris.stream().map(RegionInfo::getEncodedName).collect(Collectors.toList()).toString(); @@ -332,7 +319,7 @@ public interface RegionInfo extends Comparable { // This method should never be used. Its awful doing parse from bytes. // It is fallback in case we can't get the tablename any other way. Could try removing it. // Keeping it Audience Private so can remove at later date. - static TableName getTable(final byte [] regionName) { + static TableName getTable(final byte[] regionName) { int offset = -1; for (int i = 0; i < regionName.length; i++) { if (regionName[i] == HConstants.DELIMITER) { @@ -343,7 +330,7 @@ public interface RegionInfo extends Comparable { if (offset <= 0) { throw new IllegalArgumentException("offset=" + offset); } - byte[] buff = new byte[offset]; + byte[] buff = new byte[offset]; System.arraycopy(regionName, 0, buff, 0, offset); return TableName.valueOf(buff); } @@ -375,7 +362,7 @@ public interface RegionInfo extends Comparable { Integer.parseInt(encodedName); // If this is a valid integer, it could be hbase:meta's encoded region name. return true; - } catch(NumberFormatException er) { + } catch (NumberFormatException er) { return false; } } @@ -384,21 +371,19 @@ public interface RegionInfo extends Comparable { } /** - * @return A deserialized {@link RegionInfo} - * or null if we failed deserialize or passed bytes null + * @return A deserialized {@link RegionInfo} or null if we failed deserialize or passed bytes null */ @InterfaceAudience.Private - static RegionInfo parseFromOrNull(final byte [] bytes) { + static RegionInfo parseFromOrNull(final byte[] bytes) { if (bytes == null) return null; return parseFromOrNull(bytes, 0, bytes.length); } /** - * @return A deserialized {@link RegionInfo} or null - * if we failed deserialize or passed bytes null + * @return A deserialized {@link RegionInfo} or null if we failed deserialize or passed bytes null */ @InterfaceAudience.Private - static RegionInfo parseFromOrNull(final byte [] bytes, int offset, int len) { + static RegionInfo parseFromOrNull(final byte[] bytes, int offset, int len) { if (bytes == null || len <= 0) return null; try { return parseFrom(bytes, offset, len); @@ -412,20 +397,20 @@ public interface RegionInfo extends Comparable { * @return A deserialized {@link RegionInfo} */ @InterfaceAudience.Private - static RegionInfo parseFrom(final byte [] bytes) throws DeserializationException { + static RegionInfo parseFrom(final byte[] bytes) throws DeserializationException { if (bytes == null) return null; return parseFrom(bytes, 0, bytes.length); } /** - * @param bytes A pb RegionInfo serialized with a pb magic prefix. + * @param bytes A pb RegionInfo serialized with a pb magic prefix. * @param offset starting point in the byte array - * @param len length to read on the byte array + * @param len length to read on the byte array * @return A deserialized {@link RegionInfo} */ @InterfaceAudience.Private - static RegionInfo parseFrom(final byte [] bytes, int offset, int len) - throws DeserializationException { + static RegionInfo parseFrom(final byte[] bytes, int offset, int len) + throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(bytes, offset, len)) { int pblen = ProtobufUtil.lengthOfPBMagic(); try { @@ -456,14 +441,12 @@ public interface RegionInfo extends Comparable { } /** - * Check whether two regions are adjacent; i.e. lies just before or just - * after in a table. + * Check whether two regions are adjacent; i.e. lies just before or just after in a table. * @return true if two regions are adjacent */ static boolean areAdjacent(RegionInfo regionA, RegionInfo regionB) { if (regionA == null || regionB == null) { - throw new IllegalArgumentException( - "Can't check whether adjacent for null region"); + throw new IllegalArgumentException("Can't check whether adjacent for null region"); } if (!regionA.getTable().equals(regionB.getTable())) { return false; @@ -481,8 +464,8 @@ public interface RegionInfo extends Comparable { * @return This instance serialized as protobuf w/ a magic pb prefix. * @see #parseFrom(byte[]) */ - static byte [] toByteArray(RegionInfo ri) { - byte [] bytes = ProtobufUtil.toRegionInfo(ri).toByteArray(); + static byte[] toByteArray(RegionInfo ri) { + byte[] bytes = ProtobufUtil.toRegionInfo(ri).toByteArray(); return ProtobufUtil.prependPBMagic(bytes); } @@ -490,7 +473,7 @@ public interface RegionInfo extends Comparable { * Use logging. * @param encodedRegionName The encoded regionname. * @return hbase:meta if passed 1028785192 else returns - * encodedRegionName + * encodedRegionName */ static String prettyPrint(final String encodedRegionName) { if (encodedRegionName.equals("1028785192")) { @@ -501,67 +484,67 @@ public interface RegionInfo extends Comparable { /** * Make a region name of passed parameters. - * @param startKey Can be null - * @param regionid Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). + * @param startKey Can be null + * @param regionid Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format (such that it contains its + * encoded name?). * @return Region name made of passed tableName, startKey and id */ - static byte [] createRegionName(final TableName tableName, final byte[] startKey, - final long regionid, boolean newFormat) { + static byte[] createRegionName(final TableName tableName, final byte[] startKey, + final long regionid, boolean newFormat) { return createRegionName(tableName, startKey, Long.toString(regionid), newFormat); } /** * Make a region name of passed parameters. - * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). + * @param startKey Can be null + * @param id Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format (such that it contains its + * encoded name?). * @return Region name made of passed tableName, startKey and id */ - static byte [] createRegionName(final TableName tableName, - final byte[] startKey, final String id, boolean newFormat) { + static byte[] createRegionName(final TableName tableName, final byte[] startKey, final String id, + boolean newFormat) { return createRegionName(tableName, startKey, Bytes.toBytes(id), newFormat); } /** * Make a region name of passed parameters. - * @param startKey Can be null - * @param regionid Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). + * @param startKey Can be null + * @param regionid Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format (such that it contains its + * encoded name?). * @return Region name made of passed tableName, startKey, id and replicaId */ - static byte [] createRegionName(final TableName tableName, - final byte[] startKey, final long regionid, int replicaId, boolean newFormat) { - return createRegionName(tableName, startKey, Bytes.toBytes(Long.toString(regionid)), - replicaId, newFormat); + static byte[] createRegionName(final TableName tableName, final byte[] startKey, + final long regionid, int replicaId, boolean newFormat) { + return createRegionName(tableName, startKey, Bytes.toBytes(Long.toString(regionid)), replicaId, + newFormat); } /** * Make a region name of passed parameters. - * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). + * @param startKey Can be null + * @param id Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format (such that it contains its + * encoded name?). * @return Region name made of passed tableName, startKey and id */ - static byte [] createRegionName(final TableName tableName, - final byte[] startKey, final byte[] id, boolean newFormat) { + static byte[] createRegionName(final TableName tableName, final byte[] startKey, final byte[] id, + boolean newFormat) { return createRegionName(tableName, startKey, id, DEFAULT_REPLICA_ID, newFormat); } /** * Make a region name of passed parameters. - * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). + * @param startKey Can be null + * @param id Region id (Usually timestamp from when region was created). * @param newFormat should we create the region name in the new format * @return Region name made of passed tableName, startKey, id and replicaId */ - static byte [] createRegionName(final TableName tableName, - final byte[] startKey, final byte[] id, final int replicaId, boolean newFormat) { - int len = tableName.getName().length + 2 + id.length + (startKey == null? 0: startKey.length); + static byte[] createRegionName(final TableName tableName, final byte[] startKey, final byte[] id, + final int replicaId, boolean newFormat) { + int len = tableName.getName().length + 2 + id.length + (startKey == null ? 0 : startKey.length); if (newFormat) { len += MD5_HEX_LENGTH + 2; } @@ -575,7 +558,7 @@ public interface RegionInfo extends Comparable { len += 1 + replicaIdBytes.length; } - byte [] b = new byte [len]; + byte[] b = new byte[len]; int offset = tableName.getName().length; System.arraycopy(tableName.getName(), 0, b, 0, offset); @@ -603,11 +586,11 @@ public interface RegionInfo extends Comparable { // it to the byte buffer. // String md5Hash = MD5Hash.getMD5AsHex(b, 0, offset); - byte [] md5HashBytes = Bytes.toBytes(md5Hash); + byte[] md5HashBytes = Bytes.toBytes(md5Hash); if (md5HashBytes.length != MD5_HEX_LENGTH) { - System.out.println("MD5-hash length mismatch: Expected=" + MD5_HEX_LENGTH + - "; Got=" + md5HashBytes.length); + System.out.println( + "MD5-hash length mismatch: Expected=" + MD5_HEX_LENGTH + "; Got=" + md5HashBytes.length); } // now append the bytes '..' to the end @@ -622,40 +605,38 @@ public interface RegionInfo extends Comparable { /** * Creates a RegionInfo object for MOB data. - * * @param tableName the name of the table * @return the MOB {@link RegionInfo}. */ static RegionInfo createMobRegionInfo(TableName tableName) { // Skipping reference to RegionInfoBuilder in this class. - return new MutableRegionInfo(tableName, Bytes.toBytes(".mob"), - HConstants.EMPTY_END_ROW, false, 0, DEFAULT_REPLICA_ID, false); + return new MutableRegionInfo(tableName, Bytes.toBytes(".mob"), HConstants.EMPTY_END_ROW, false, + 0, DEFAULT_REPLICA_ID, false); } /** * Separate elements of a regionName. - * @return Array of byte[] containing tableName, startKey and id OR null if - * not parseable as a region name. + * @return Array of byte[] containing tableName, startKey and id OR null if not parseable as a + * region name. * @throws IOException if not parseable as regionName. */ - static byte [][] parseRegionName(final byte[] regionName) throws IOException { - byte [][] result = parseRegionNameOrReturnNull(regionName); + static byte[][] parseRegionName(final byte[] regionName) throws IOException { + byte[][] result = parseRegionNameOrReturnNull(regionName); if (result == null) { - throw new IOException(INVALID_REGION_NAME_FORMAT_MESSAGE + ": " + Bytes.toStringBinary(regionName)); + throw new IOException( + INVALID_REGION_NAME_FORMAT_MESSAGE + ": " + Bytes.toStringBinary(regionName)); } return result; } /** - * Separate elements of a regionName. - * Region name is of the format: - * tablename,startkey,regionIdTimestamp[_replicaId][.encodedName.]. - * Startkey can contain the delimiter (',') so we parse from the start and then parse from - * the end. - * @return Array of byte[] containing tableName, startKey and id OR null if not parseable - * as a region name. + * Separate elements of a regionName. Region name is of the format: + * tablename,startkey,regionIdTimestamp[_replicaId][.encodedName.]. Startkey can + * contain the delimiter (',') so we parse from the start and then parse from the end. + * @return Array of byte[] containing tableName, startKey and id OR null if not parseable as a + * region name. */ - static byte [][] parseRegionNameOrReturnNull(final byte[] regionName) { + static byte[][] parseRegionNameOrReturnNull(final byte[] regionName) { int offset = -1; for (int i = 0; i < regionName.length; i++) { if (regionName[i] == HConstants.DELIMITER) { @@ -672,9 +653,10 @@ public interface RegionInfo extends Comparable { int endOffset = regionName.length; // check whether regionName contains encodedName - if (regionName.length > MD5_HEX_LENGTH + 2 && - regionName[regionName.length-1] == ENC_SEPARATOR && - regionName[regionName.length-MD5_HEX_LENGTH-2] == ENC_SEPARATOR) { + if ( + regionName.length > MD5_HEX_LENGTH + 2 && regionName[regionName.length - 1] == ENC_SEPARATOR + && regionName[regionName.length - MD5_HEX_LENGTH - 2] == ENC_SEPARATOR + ) { endOffset = endOffset - MD5_HEX_LENGTH - 2; } @@ -682,10 +664,9 @@ public interface RegionInfo extends Comparable { byte[] replicaId = null; int idEndOffset = endOffset; for (int i = endOffset - 1; i > 0; i--) { - if (regionName[i] == REPLICA_ID_DELIMITER) { //replicaId may or may not be present + if (regionName[i] == REPLICA_ID_DELIMITER) { // replicaId may or may not be present replicaId = new byte[endOffset - i - 1]; - System.arraycopy(regionName, i + 1, replicaId, 0, - endOffset - i - 1); + System.arraycopy(regionName, i + 1, replicaId, 0, endOffset - i - 1); idEndOffset = i; // do not break, continue to search for id } @@ -697,16 +678,15 @@ public interface RegionInfo extends Comparable { if (offset == -1) { return null; } - byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; - if(offset != tableName.length + 1) { + byte[] startKey = HConstants.EMPTY_BYTE_ARRAY; + if (offset != tableName.length + 1) { startKey = new byte[offset - tableName.length - 1]; System.arraycopy(regionName, tableName.length + 1, startKey, 0, - offset - tableName.length - 1); + offset - tableName.length - 1); } - byte [] id = new byte[idEndOffset - offset - 1]; - System.arraycopy(regionName, offset + 1, id, 0, - idEndOffset - offset - 1); - byte [][] elements = new byte[replicaId == null ? 3 : 4][]; + byte[] id = new byte[idEndOffset - offset - 1]; + System.arraycopy(regionName, offset + 1, id, 0, idEndOffset - offset - 1); + byte[][] elements = new byte[replicaId == null ? 3 : 4][]; elements[0] = tableName; elements[1] = startKey; elements[2] = id; @@ -718,10 +698,9 @@ public interface RegionInfo extends Comparable { /** * Serializes given RegionInfo's as a byte array. Use this instead of - * {@link RegionInfo#toByteArray(RegionInfo)} when - * writing to a stream and you want to use the pb mergeDelimitedFrom (w/o the delimiter, pb reads - * to EOF which may not be what you want). {@link #parseDelimitedFrom(byte[], int, int)} can - * be used to read back the instances. + * {@link RegionInfo#toByteArray(RegionInfo)} when writing to a stream and you want to use the pb + * mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what you want). + * {@link #parseDelimitedFrom(byte[], int, int)} can be used to read back the instances. * @param infos RegionInfo objects to serialize * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. */ @@ -743,30 +722,30 @@ public interface RegionInfo extends Comparable { } /** - * Use this instead of {@link RegionInfo#toByteArray(RegionInfo)} when writing to a stream and you want to use - * the pb mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what you want). + * Use this instead of {@link RegionInfo#toByteArray(RegionInfo)} when writing to a stream and you + * want to use the pb mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what + * you want). * @return This instance serialized as a delimied protobuf w/ a magic pb prefix. */ - static byte [] toDelimitedByteArray(RegionInfo ri) throws IOException { + static byte[] toDelimitedByteArray(RegionInfo ri) throws IOException { return ProtobufUtil.toDelimitedByteArray(ProtobufUtil.toRegionInfo(ri)); } /** - * Parses an RegionInfo instance from the passed in stream. - * Presumes the RegionInfo was serialized to the stream with - * {@link #toDelimitedByteArray(RegionInfo)}. + * Parses an RegionInfo instance from the passed in stream. Presumes the RegionInfo was serialized + * to the stream with {@link #toDelimitedByteArray(RegionInfo)}. * @return An instance of RegionInfo. */ static RegionInfo parseFrom(final DataInputStream in) throws IOException { // I need to be able to move back in the stream if this is not a pb // serialization so I can do the Writable decoding instead. int pblen = ProtobufUtil.lengthOfPBMagic(); - byte [] pbuf = new byte[pblen]; - if (in.markSupported()) { //read it with mark() + byte[] pbuf = new byte[pblen]; + if (in.markSupported()) { // read it with mark() in.mark(pblen); } - //assumption: if Writable serialization, it should be longer than pblen. + // assumption: if Writable serialization, it should be longer than pblen. IOUtils.readFully(in, pbuf, 0, pblen); if (ProtobufUtil.isPBMagicPrefix(pbuf)) { return ProtobufUtil.toRegionInfo(HBaseProtos.RegionInfo.parseDelimitedFrom(in)); @@ -778,13 +757,13 @@ public interface RegionInfo extends Comparable { /** * Parses all the RegionInfo instances from the passed in stream until EOF. Presumes the * RegionInfo's were serialized to the stream with oDelimitedByteArray() - * @param bytes serialized bytes + * @param bytes serialized bytes * @param offset the start offset into the byte[] buffer * @param length how far we should read into the byte[] buffer * @return All the RegionInfos that are in the byte array. Keeps reading till we hit the end. */ - static List parseDelimitedFrom(final byte[] bytes, final int offset, - final int length) throws IOException { + static List parseDelimitedFrom(final byte[] bytes, final int offset, final int length) + throws IOException { if (bytes == null) { throw new IllegalArgumentException("Can't build an object with empty bytes array"); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java index cc42b96fb16..ef927fd3a55 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java @@ -28,18 +28,17 @@ public class RegionInfoBuilder { /** A non-capture group so that this can be embedded. */ public static final String ENCODED_REGION_NAME_REGEX = "(?:[a-f0-9]+)"; - //TODO: Move NO_HASH to HStoreFile which is really the only place it is used. + // TODO: Move NO_HASH to HStoreFile which is really the only place it is used. public static final String NO_HASH = null; public static final RegionInfo UNDEFINED = RegionInfoBuilder.newBuilder(TableName.valueOf("__UNDEFINED__")).build(); /** - * RegionInfo for first meta region - * You cannot use this builder to make an instance of the {@link #FIRST_META_REGIONINFO}. - * Just refer to this instance. Also, while the instance is actually a MutableRI, its type is - * just RI so the mutable methods are not available (unless you go casting); it appears - * as immutable (I tried adding Immutable type but it just makes a mess). + * RegionInfo for first meta region You cannot use this builder to make an instance of the + * {@link #FIRST_META_REGIONINFO}. Just refer to this instance. Also, while the instance is + * actually a MutableRI, its type is just RI so the mutable methods are not available (unless you + * go casting); it appears as immutable (I tried adding Immutable type but it just makes a mess). */ // TODO: How come Meta regions still do not have encoded region names? Fix. // hbase:meta,,1.1588230740 should be the hbase:meta first region name. @@ -108,8 +107,7 @@ public class RegionInfoBuilder { } public RegionInfo build() { - return new MutableRegionInfo(tableName, startKey, endKey, split, - regionId, replicaId, offLine); + return new MutableRegionInfo(tableName, startKey, endKey, split, regionId, replicaId, offLine); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java index 7ced1b3072a..58163a2d74a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,17 +17,14 @@ */ package org.apache.hadoop.hbase.client; +import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; -import java.io.IOException; - /** * Utility used composing RegionInfo for 'display'; e.g. on the web UI */ @@ -39,17 +35,16 @@ public class RegionInfoDisplay { public final static byte[] HIDDEN_START_KEY = Bytes.toBytes("hidden-start-key"); /** - * Get the descriptive name as {@link RegionState} does it but with hidden - * startkey optionally + * Get the descriptive name as {@link RegionState} does it but with hidden startkey optionally * @return descriptive string */ public static String getDescriptiveNameFromRegionStateForDisplay(RegionState state, - Configuration conf) { + Configuration conf) { if (conf.getBoolean(DISPLAY_KEYS_KEY, true)) return state.toDescriptiveString(); String descriptiveStringFromState = state.toDescriptiveString(); int idx = descriptiveStringFromState.lastIndexOf(" state="); String regionName = getRegionNameAsStringForDisplay( - RegionInfoBuilder.newBuilder(state.getRegion()).build(), conf); + RegionInfoBuilder.newBuilder(state.getRegion()).build(), conf); return regionName + descriptiveStringFromState.substring(idx); } @@ -64,10 +59,7 @@ public class RegionInfoDisplay { } /** - * Get the start key for display. Optionally hide the real start key. - * @param ri - * @param conf - * @return the startkey + * Get the start key for display. Optionally hide the real start key. nn * @return the startkey */ public static byte[] getStartKeyForDisplay(RegionInfo ri, Configuration conf) { boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true); @@ -76,20 +68,15 @@ public class RegionInfoDisplay { } /** - * Get the region name for display. Optionally hide the start key. - * @param ri - * @param conf - * @return region name as String + * Get the region name for display. Optionally hide the start key. nn * @return region name as + * String */ public static String getRegionNameAsStringForDisplay(RegionInfo ri, Configuration conf) { return Bytes.toStringBinary(getRegionNameForDisplay(ri, conf)); } /** - * Get the region name for display. Optionally hide the start key. - * @param ri - * @param conf - * @return region name bytes + * Get the region name for display. Optionally hide the start key. nn * @return region name bytes */ public static byte[] getRegionNameForDisplay(RegionInfo ri, Configuration conf) { boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true); @@ -99,17 +86,16 @@ public class RegionInfoDisplay { // create a modified regionname with the startkey replaced but preserving // the other parts including the encodedname. try { - byte[][]regionNameParts = RegionInfo.parseRegionName(ri.getRegionName()); - regionNameParts[1] = HIDDEN_START_KEY; //replace the real startkey + byte[][] regionNameParts = RegionInfo.parseRegionName(ri.getRegionName()); + regionNameParts[1] = HIDDEN_START_KEY; // replace the real startkey int len = 0; // get the total length for (byte[] b : regionNameParts) { len += b.length; } - byte[] encodedRegionName = - Bytes.toBytes(RegionInfo.encodeRegionName(ri.getRegionName())); + byte[] encodedRegionName = Bytes.toBytes(RegionInfo.encodeRegionName(ri.getRegionName())); len += encodedRegionName.length; - //allocate some extra bytes for the delimiters and the last '.' + // allocate some extra bytes for the delimiters and the last '.' byte[] modifiedName = new byte[len + regionNameParts.length + 1]; int lengthSoFar = 0; int loopCount = 0; @@ -117,17 +103,16 @@ public class RegionInfoDisplay { System.arraycopy(b, 0, modifiedName, lengthSoFar, b.length); lengthSoFar += b.length; if (loopCount++ == 2) modifiedName[lengthSoFar++] = RegionInfo.REPLICA_ID_DELIMITER; - else modifiedName[lengthSoFar++] = HConstants.DELIMITER; + else modifiedName[lengthSoFar++] = HConstants.DELIMITER; } // replace the last comma with '.' modifiedName[lengthSoFar - 1] = RegionInfo.ENC_SEPARATOR; - System.arraycopy(encodedRegionName, 0, modifiedName, lengthSoFar, - encodedRegionName.length); + System.arraycopy(encodedRegionName, 0, modifiedName, lengthSoFar, encodedRegionName.length); lengthSoFar += encodedRegionName.length; modifiedName[lengthSoFar] = RegionInfo.ENC_SEPARATOR; return modifiedName; } catch (IOException e) { - //LOG.warn("Encountered exception " + e); + // LOG.warn("Encountered exception " + e); throw new RuntimeException(e); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLoadStats.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLoadStats.java index 052ac536ab8..02e7f733b97 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLoadStats.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLoadStats.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,8 +35,8 @@ public class RegionLoadStats { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #getMemStoreLoad()} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #getMemStoreLoad()} instead */ @Deprecated public int getMemstoreLoad() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocateType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocateType.java index 950123cd0c6..253836455f3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocateType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocateType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,5 +29,7 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Private enum RegionLocateType { - BEFORE, CURRENT, AFTER + BEFORE, + CURRENT, + AFTER } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java index 7ea6e4ada36..40f31b06f25 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,8 +23,8 @@ import java.util.List; import java.util.stream.Collectors; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; /** * Used to view region location information for a single HBase table. Obtain an instance from an @@ -38,12 +37,14 @@ import org.apache.hadoop.hbase.util.Pair; @InterfaceAudience.Public public interface RegionLocator extends Closeable { - /** Configuration for Region Locator's mode when meta replica is configured. - * Valid values are: HedgedRead, LoadBalance, None + /** + * Configuration for Region Locator's mode when meta replica is configured. Valid values are: + * HedgedRead, LoadBalance, None */ String LOCATOR_META_REPLICAS_MODE = "hbase.locator.meta.replicas.mode"; - /** Configuration for meta replica selector when Region Locator's LoadBalance mode is configured. + /** + * Configuration for meta replica selector when Region Locator's LoadBalance mode is configured. * The default value is org.apache.hadoop.hbase.client.CatalogReplicaLoadBalanceSimpleSelector. */ String LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR = @@ -61,7 +62,7 @@ public interface RegionLocator extends Closeable { /** * Finds the region on which the given row is being served. - * @param row Row to find. + * @param row Row to find. * @param reload true to reload information or false to use cached information * @return Location of the row. * @throws IOException if a remote or network exception occurs @@ -72,7 +73,7 @@ public interface RegionLocator extends Closeable { /** * Finds the region with the given replica id on which the given row is being served. - * @param row Row to find. + * @param row Row to find. * @param replicaId the replica id * @return Location of the row. * @throws IOException if a remote or network exception occurs @@ -83,9 +84,9 @@ public interface RegionLocator extends Closeable { /** * Finds the region with the given replica id on which the given row is being served. - * @param row Row to find. + * @param row Row to find. * @param replicaId the replica id - * @param reload true to reload information or false to use cached information + * @param reload true to reload information or false to use cached information * @return Location of the row. * @throws IOException if a remote or network exception occurs */ @@ -103,7 +104,7 @@ public interface RegionLocator extends Closeable { /** * Find all the replicas for the region on which the given row is being served. - * @param row Row to find. + * @param row Row to find. * @param reload true to reload information or false to use cached information * @return Locations for all the replicas of the row. * @throws IOException if a remote or network exception occurs diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java index 4d4731a9e8a..cdb596a4b13 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,6 +24,7 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Public public class RegionOfflineException extends RegionException { private static final long serialVersionUID = 466008402L; + /** default constructor */ public RegionOfflineException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java index 09150f123fe..ea022820950 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.List; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -33,16 +31,16 @@ import org.apache.yetus.audience.InterfaceAudience; public class RegionReplicaUtil { /** - * Whether or not the secondary region will wait for observing a flush / region open event - * from the primary region via async wal replication before enabling read requests. Since replayed + * Whether or not the secondary region will wait for observing a flush / region open event from + * the primary region via async wal replication before enabling read requests. Since replayed * edits from async wal replication from primary is not persisted in WAL, the memstore of the * secondary region might be non-empty at the time of close or crash. For ensuring seqId's not * "going back in time" in the secondary region replica, this should be enabled. However, in some - * cases the above semantics might be ok for some application classes. - * See HBASE-11580 for more context. + * cases the above semantics might be ok for some application classes. See HBASE-11580 for more + * context. */ - public static final String REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY - = "hbase.region.replica.wait.for.primary.flush"; + public static final String REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY = + "hbase.region.replica.wait.for.primary.flush"; protected static final boolean DEFAULT_REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH = true; /** @@ -51,14 +49,11 @@ public class RegionReplicaUtil { static final int DEFAULT_REPLICA_ID = 0; /** - * Returns the RegionInfo for the given replicaId. - * RegionInfo's correspond to a range of a table, but more than one - * "instance" of the same range can be deployed which are differentiated by - * the replicaId. - * @param regionInfo - * @param replicaId the replicaId to use - * @return an RegionInfo object corresponding to the same range (table, start and - * end key), but for the given replicaId. + * Returns the RegionInfo for the given replicaId. RegionInfo's correspond to a range of a table, + * but more than one "instance" of the same range can be deployed which are differentiated by the + * replicaId. n * @param replicaId the replicaId to use + * @return an RegionInfo object corresponding to the same range (table, start and end key), but + * for the given replicaId. */ public static RegionInfo getRegionInfoForReplica(RegionInfo regionInfo, int replicaId) { if (regionInfo.getReplicaId() == replicaId) { @@ -68,11 +63,11 @@ public class RegionReplicaUtil { } /** - * Returns the RegionInfo for the default replicaId (0). RegionInfo's correspond to - * a range of a table, but more than one "instance" of the same range can be - * deployed which are differentiated by the replicaId. - * @return an RegionInfo object corresponding to the same range (table, start and - * end key), but for the default replicaId. + * Returns the RegionInfo for the default replicaId (0). RegionInfo's correspond to a range of a + * table, but more than one "instance" of the same range can be deployed which are differentiated + * by the replicaId. + * @return an RegionInfo object corresponding to the same range (table, start and end key), but + * for the default replicaId. */ public static RegionInfo getRegionInfoForDefaultReplica(RegionInfo regionInfo) { return getRegionInfoForReplica(regionInfo, DEFAULT_REPLICA_ID); @@ -85,12 +80,11 @@ public class RegionReplicaUtil { /** @return true if this region is a default replica for the region */ public static boolean isDefaultReplica(RegionInfo hri) { - return hri.getReplicaId() == DEFAULT_REPLICA_ID; + return hri.getReplicaId() == DEFAULT_REPLICA_ID; } /** - * Removes the non-default replicas from the passed regions collection - * @param regions + * Removes the non-default replicas from the passed regions collection n */ public static void removeNonDefaultRegions(Collection regions) { Iterator iterator = regions.iterator(); @@ -107,7 +101,7 @@ public class RegionReplicaUtil { } private static int compareRegionInfosWithoutReplicaId(RegionInfo regionInfoA, - RegionInfo regionInfoB) { + RegionInfo regionInfoB) { int result = regionInfoA.getTable().compareTo(regionInfoB.getTable()); if (result != 0) { return result; @@ -123,13 +117,11 @@ public class RegionReplicaUtil { result = Bytes.compareTo(regionInfoA.getEndKey(), regionInfoB.getEndKey()); if (result != 0) { - if (regionInfoA.getStartKey().length != 0 - && regionInfoA.getEndKey().length == 0) { - return 1; // this is last region + if (regionInfoA.getStartKey().length != 0 && regionInfoA.getEndKey().length == 0) { + return 1; // this is last region } - if (regionInfoB.getStartKey().length != 0 - && regionInfoB.getEndKey().length == 0) { - return -1; // o is the last region + if (regionInfoB.getStartKey().length != 0 && regionInfoB.getEndKey().length == 0) { + return -1; // o is the last region } return result; } @@ -147,7 +139,7 @@ public class RegionReplicaUtil { /** * Create any replicas for the regions (the default replicas that was already created is passed to * the method) - * @param regions existing regions + * @param regions existing regions * @param oldReplicaCount existing replica count * @param newReplicaCount updated replica count due to modify table * @return the combined list of default and non-default replicas @@ -159,9 +151,12 @@ public class RegionReplicaUtil { } List hRegionInfos = new ArrayList<>((newReplicaCount) * regions.size()); for (RegionInfo ri : regions) { - if (RegionReplicaUtil.isDefaultReplica(ri) && - (!ri.isOffline() || (!ri.isSplit() && !ri.isSplitParent()))) { - // region level replica index starts from 0. So if oldReplicaCount was 2 then the max replicaId for + if ( + RegionReplicaUtil.isDefaultReplica(ri) + && (!ri.isOffline() || (!ri.isSplit() && !ri.isSplitParent())) + ) { + // region level replica index starts from 0. So if oldReplicaCount was 2 then the max + // replicaId for // the existing regions would be 1 for (int j = oldReplicaCount; j < newReplicaCount; j++) { hRegionInfos.add(RegionReplicaUtil.getRegionInfoForReplica(ri, j)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java index e5183916ea5..8bdc1543d74 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; - import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -27,24 +25,24 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; /** - * Implementations make a RPC call against a RegionService via a protobuf Service. - * Implement rpcCall() and the parent class setClientByServiceName; this latter is where the - * RPC stub gets set (the appropriate protobuf 'Service'/Client). Be sure to make use of the - * RpcController that this instance is carrying via #getRpcController(). - * - *

    TODO: this class is actually tied to one region, because most of the paths make use of - * the regioninfo part of location when building requests. The only reason it works for - * multi-region requests (e.g. batch) is that they happen to not use the region parts. - * This could be done cleaner (e.g. having a generic parameter and 2 derived classes, - * RegionCallable and actual RegionServerCallable with ServerName. - * + * Implementations make a RPC call against a RegionService via a protobuf Service. Implement + * rpcCall() and the parent class setClientByServiceName; this latter is where the RPC stub gets set + * (the appropriate protobuf 'Service'/Client). Be sure to make use of the RpcController that this + * instance is carrying via #getRpcController(). + *

    + * TODO: this class is actually tied to one region, because most of the paths make use of the + * regioninfo part of location when building requests. The only reason it works for multi-region + * requests (e.g. batch) is that they happen to not use the region parts. This could be done cleaner + * (e.g. having a generic parameter and 2 derived classes, RegionCallable and actual + * RegionServerCallable with ServerName. * @param The class that the ServerCallable handles. * @param The protocol to use (Admin or Client or even an Endpoint over in MetaTableAccessor). */ @@ -63,25 +61,24 @@ public abstract class RegionServerCallable implements RetryingCallable /** * This is 99% of the time a HBaseRpcController but also used doing Coprocessor Endpoints and in - * this case, it is a ServerRpcControllable which is not a HBaseRpcController. - * Can be null! + * this case, it is a ServerRpcControllable which is not a HBaseRpcController. Can be null! */ protected final RpcController rpcController; private int priority = HConstants.NORMAL_QOS; /** - * @param connection Connection to use. + * @param connection Connection to use. * @param rpcController Controller to use; can be shaded or non-shaded. - * @param tableName Table name to which row belongs. - * @param row The row we want in tableName. + * @param tableName Table name to which row belongs. + * @param row The row we want in tableName. */ - public RegionServerCallable(Connection connection, TableName tableName, byte [] row, - RpcController rpcController) { + public RegionServerCallable(Connection connection, TableName tableName, byte[] row, + RpcController rpcController) { this(connection, tableName, row, rpcController, HConstants.NORMAL_QOS); } - public RegionServerCallable(Connection connection, TableName tableName, byte [] row, - RpcController rpcController, int priority) { + public RegionServerCallable(Connection connection, TableName tableName, byte[] row, + RpcController rpcController, int priority) { super(); this.connection = connection; this.tableName = tableName; @@ -103,8 +100,8 @@ public abstract class RegionServerCallable implements RetryingCallable } /** - * Override that changes call Exception from {@link Exception} to {@link IOException}. - * Also does set up of the rpcController. + * Override that changes call Exception from {@link Exception} to {@link IOException}. Also does + * set up of the rpcController. */ @Override public T call(int callTimeout) throws IOException { @@ -112,11 +109,11 @@ public abstract class RegionServerCallable implements RetryingCallable // Iff non-null and an instance of a SHADED rpcController, do config! Unshaded -- i.e. // com.google.protobuf.RpcController or null -- will just skip over this config. if (getRpcController() != null) { - RpcController shadedRpcController = (RpcController)getRpcController(); + RpcController shadedRpcController = (RpcController) getRpcController(); // Do a reset to clear previous states, such as CellScanner. shadedRpcController.reset(); if (shadedRpcController instanceof HBaseRpcController) { - HBaseRpcController hrc = (HBaseRpcController)getRpcController(); + HBaseRpcController hrc = (HBaseRpcController) getRpcController(); // If it is an instance of HBaseRpcController, we can set priority on the controller based // off the tableName. Set call timeout too. hrc.setPriority(tableName); @@ -131,31 +128,30 @@ public abstract class RegionServerCallable implements RetryingCallable } /** - * Run the RPC call. Implement this method. To get at the rpcController that has been created - * and configured to make this rpc call, use getRpcController(). We are trying to contain + * Run the RPC call. Implement this method. To get at the rpcController that has been created and + * configured to make this rpc call, use getRpcController(). We are trying to contain * rpcController references so we don't pollute codebase with protobuf references; keep the - * protobuf references contained and only present in a few classes rather than all about the - * code base. - * @throws Exception + * protobuf references contained and only present in a few classes rather than all about the code + * base. n */ protected abstract T rpcCall() throws Exception; /** - * Get the RpcController CellScanner. - * If the RpcController is a HBaseRpcController, which it is in all cases except - * when we are processing Coprocessor Endpoint, then this method returns a reference to the - * CellScanner that the HBaseRpcController is carrying. Do it up here in this Callable - * so we don't have to scatter ugly instanceof tests around the codebase. Will return null - * if called in a Coproccessor Endpoint context. Should never happen. + * Get the RpcController CellScanner. If the RpcController is a HBaseRpcController, which it is in + * all cases except when we are processing Coprocessor Endpoint, then this method returns a + * reference to the CellScanner that the HBaseRpcController is carrying. Do it up here in this + * Callable so we don't have to scatter ugly instanceof tests around the codebase. Will return + * null if called in a Coproccessor Endpoint context. Should never happen. */ protected CellScanner getRpcControllerCellScanner() { - return (getRpcController() != null && getRpcController() instanceof HBaseRpcController)? - ((HBaseRpcController)getRpcController()).cellScanner(): null; + return (getRpcController() != null && getRpcController() instanceof HBaseRpcController) + ? ((HBaseRpcController) getRpcController()).cellScanner() + : null; } protected void setRpcControllerCellScanner(CellScanner cellScanner) { if (getRpcController() != null && getRpcController() instanceof HBaseRpcController) { - ((HBaseRpcController)this.rpcController).setCellScanner(cellScanner); + ((HBaseRpcController) this.rpcController).setCellScanner(cellScanner); } } @@ -178,17 +174,19 @@ public abstract class RegionServerCallable implements RetryingCallable return this.tableName; } - public byte [] getRow() { + public byte[] getRow() { return this.row; } - protected int getPriority() { return this.priority;} + protected int getPriority() { + return this.priority; + } @Override public void throwable(Throwable t, boolean retrying) { if (location != null) { getConnection().updateCachedLocations(tableName, location.getRegionInfo().getRegionName(), - row, t, location.getServerName()); + row, t, location.getServerName()); } } @@ -215,16 +213,18 @@ public abstract class RegionServerCallable implements RetryingCallable @Override public void prepare(final boolean reload) throws IOException { // check table state if this is a retry - if (reload && tableName != null && !tableName.equals(TableName.META_TABLE_NAME) - && getConnection().isTableDisabled(tableName)) { + if ( + reload && tableName != null && !tableName.equals(TableName.META_TABLE_NAME) + && getConnection().isTableDisabled(tableName) + ) { throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled."); } try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) { this.location = regionLocator.getRegionLocation(row); } if (this.location == null) { - throw new IOException("Failed to find location, tableName=" + tableName + - ", row=" + Bytes.toString(row) + ", reload=" + reload); + throw new IOException("Failed to find location, tableName=" + tableName + ", row=" + + Bytes.toString(row) + ", reload=" + reload); } setStubByServiceName(this.location.getServerName()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCoprocessorRpcChannelImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCoprocessorRpcChannelImpl.java index 38512d55baf..76bca140ef1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCoprocessorRpcChannelImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCoprocessorRpcChannelImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,14 +48,13 @@ public class RegionServerCoprocessorRpcChannelImpl implements RpcChannel { } private CompletableFuture rpcCall(MethodDescriptor method, Message request, - Message responsePrototype, HBaseRpcController controller, ClientService.Interface stub) { + Message responsePrototype, HBaseRpcController controller, ClientService.Interface stub) { CompletableFuture future = new CompletableFuture<>(); CoprocessorServiceRequest csr = - CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request); - stub.execRegionServerService( - controller, - csr, - new org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback() { + CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request); + stub.execRegionServerService(controller, csr, + new org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback< + CoprocessorServiceResponse>() { @Override public void run(CoprocessorServiceResponse resp) { @@ -75,7 +74,7 @@ public class RegionServerCoprocessorRpcChannelImpl implements RpcChannel { @Override public void callMethod(MethodDescriptor method, RpcController controller, Message request, - Message responsePrototype, RpcCallback done) { + Message responsePrototype, RpcCallback done) { addListener( callerBuilder.action((c, s) -> rpcCall(method, request, responsePrototype, c, s)).call(), ((r, e) -> { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionStatesCount.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionStatesCount.java index 1e1ce95113b..582f492d3de 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionStatesCount.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionStatesCount.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; @@ -106,7 +104,7 @@ public final class RegionStatesCount { } public RegionStatesCount build() { - RegionStatesCount regionStatesCount=new RegionStatesCount(); + RegionStatesCount regionStatesCount = new RegionStatesCount(); regionStatesCount.setOpenRegions(openRegions); regionStatesCount.setClosedRegions(closedRegions); regionStatesCount.setRegionsInTransition(regionsInTransition); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryEndpointsRefresher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryEndpointsRefresher.java index 2064021f714..ac7cad27581 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryEndpointsRefresher.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryEndpointsRefresher.java @@ -80,8 +80,8 @@ final class RegistryEndpointsRefresher { } // if refreshNow is true, then we will wait until minTimeBetweenRefreshesMs elapsed, // otherwise wait until periodicRefreshMs elapsed - long waitTime = getRefreshIntervalMs(firstRefresh) - - (EnvironmentEdgeManager.currentTime() - lastRefreshTime); + long waitTime = getRefreshIntervalMs(firstRefresh) + - (EnvironmentEdgeManager.currentTime() - lastRefreshTime); if (waitTime <= 0) { // we are going to refresh, reset this flag firstRefresh = false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java index 4c63e4d0881..66d864be7d4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.InterruptedIOException; @@ -63,10 +61,10 @@ public interface RequestController { /** * Reset the state of the scheduler when completing the iteration of rows. - * @throws InterruptedIOException some controller may wait - * for some busy region or RS to complete the undealt request. + * @throws InterruptedIOException some controller may wait for some busy region or RS to + * complete the undealt request. */ - void reset() throws InterruptedIOException ; + void reset() throws InterruptedIOException; } /** @@ -77,14 +75,14 @@ public interface RequestController { /** * Increment the counter if we build a valid task. * @param regions The destination of task - * @param sn The target server + * @param sn The target server */ void incTaskCounters(Collection regions, ServerName sn); /** * Decrement the counter if a task is accomplished. * @param regions The destination of task - * @param sn The target server + * @param sn The target server */ void decTaskCounters(Collection regions, ServerName sn); @@ -94,28 +92,27 @@ public interface RequestController { long getNumberOfTasksInProgress(); /** - * Waits for the running tasks to complete. - * If there are specified threshold and trigger, the implementation should - * wake up once in a while for checking the threshold and calling trigger. - * @param max This method will return if the number of running tasks is - * less than or equal to max. - * @param id the caller's id - * @param periodToTrigger The period to invoke the trigger. This value is a - * hint. The real period depends on the implementation. - * @param trigger The object to call periodically. + * Waits for the running tasks to complete. If there are specified threshold and trigger, the + * implementation should wake up once in a while for checking the threshold and calling trigger. + * @param max This method will return if the number of running tasks is less than or + * equal to max. + * @param id the caller's id + * @param periodToTrigger The period to invoke the trigger. This value is a hint. The real period + * depends on the implementation. + * @param trigger The object to call periodically. * @throws java.io.InterruptedIOException If the waiting is interrupted */ - void waitForMaximumCurrentTasks(long max, long id, - int periodToTrigger, Consumer trigger) throws InterruptedIOException; + void waitForMaximumCurrentTasks(long max, long id, int periodToTrigger, Consumer trigger) + throws InterruptedIOException; /** * Wait until there is at least one slot for a new task. - * @param id the caller's id - * @param periodToTrigger The period to invoke the trigger. This value is a - * hint. The real period depends on the implementation. - * @param trigger The object to call periodically. + * @param id the caller's id + * @param periodToTrigger The period to invoke the trigger. This value is a hint. The real period + * depends on the implementation. + * @param trigger The object to call periodically. * @throws java.io.InterruptedIOException If the waiting is interrupted */ - void waitForFreeSlot(long id, int periodToTrigger, - Consumer trigger) throws InterruptedIOException; + void waitForFreeSlot(long id, int periodToTrigger, Consumer trigger) + throws InterruptedIOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestControllerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestControllerFactory.java index f03da448750..706db916aef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestControllerFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestControllerFactory.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,26 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.ReflectionUtils; +import org.apache.yetus.audience.InterfaceAudience; /** * A factory class that constructs an {@link org.apache.hadoop.hbase.client.RequestController}. */ @InterfaceAudience.Public public final class RequestControllerFactory { - public static final String REQUEST_CONTROLLER_IMPL_CONF_KEY = "hbase.client.request.controller.impl"; + public static final String REQUEST_CONTROLLER_IMPL_CONF_KEY = + "hbase.client.request.controller.impl"; + /** * Constructs a {@link org.apache.hadoop.hbase.client.RequestController}. * @param conf The {@link Configuration} to use. * @return A RequestController which is built according to the configuration. */ public static RequestController create(Configuration conf) { - Class clazz= conf.getClass(REQUEST_CONTROLLER_IMPL_CONF_KEY, + Class clazz = conf.getClass(REQUEST_CONTROLLER_IMPL_CONF_KEY, SimpleRequestController.class, RequestController.class); return ReflectionUtils.newInstance(clazz, conf); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java index 138432aada3..a66cbaaf512 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -32,52 +30,44 @@ import java.util.Map; import java.util.NavigableMap; import java.util.NoSuchElementException; import java.util.TreeMap; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** - * Single row result of a {@link Get} or {@link Scan} query.

    - * - * This class is NOT THREAD SAFE.

    - * - * Convenience methods are available that return various {@link Map} - * structures and values directly.

    - * - * To get a complete mapping of all cells in the Result, which can include - * multiple families and multiple versions, use {@link #getMap()}.

    - * - * To get a mapping of each family to its columns (qualifiers and values), - * including only the latest version of each, use {@link #getNoVersionMap()}. - * - * To get a mapping of qualifiers to latest values for an individual family use - * {@link #getFamilyMap(byte[])}.

    - * + * Single row result of a {@link Get} or {@link Scan} query. + *

    + * This class is NOT THREAD SAFE. + *

    + * Convenience methods are available that return various {@link Map} structures and values directly. + *

    + * To get a complete mapping of all cells in the Result, which can include multiple families and + * multiple versions, use {@link #getMap()}. + *

    + * To get a mapping of each family to its columns (qualifiers and values), including only the latest + * version of each, use {@link #getNoVersionMap()}. To get a mapping of qualifiers to latest values + * for an individual family use {@link #getFamilyMap(byte[])}. + *

    * To get the latest value for a specific family and qualifier use - * {@link #getValue(byte[], byte[])}. - * - * A Result is backed by an array of {@link Cell} objects, each representing - * an HBase cell defined by the row, family, qualifier, timestamp, and value.

    - * - * The underlying {@link Cell} objects can be accessed through the method {@link #listCells()}. - * This will create a List from the internal Cell []. Better is to exploit the fact that - * a new Result instance is a primed {@link CellScanner}; just call {@link #advance()} and - * {@link #current()} to iterate over Cells as you would any {@link CellScanner}. - * Call {@link #cellScanner()} to reset should you need to iterate the same Result over again - * ({@link CellScanner}s are one-shot). - * - * If you need to overwrite a Result with another Result instance -- as in the old 'mapred' - * RecordReader next invocations -- then create an empty Result with the null constructor and - * in then use {@link #copyFrom(Result)} + * {@link #getValue(byte[], byte[])}. A Result is backed by an array of {@link Cell} objects, each + * representing an HBase cell defined by the row, family, qualifier, timestamp, and value. + *

    + * The underlying {@link Cell} objects can be accessed through the method {@link #listCells()}. This + * will create a List from the internal Cell []. Better is to exploit the fact that a new Result + * instance is a primed {@link CellScanner}; just call {@link #advance()} and {@link #current()} to + * iterate over Cells as you would any {@link CellScanner}. Call {@link #cellScanner()} to reset + * should you need to iterate the same Result over again ({@link CellScanner}s are one-shot). If you + * need to overwrite a Result with another Result instance -- as in the old 'mapred' RecordReader + * next invocations -- then create an empty Result with the null constructor and in then use + * {@link #copyFrom(Result)} */ @InterfaceAudience.Public public class Result implements CellScannable, CellScanner { @@ -89,12 +79,12 @@ public class Result implements CellScannable, CellScanner { * See {@link #mayHaveMoreCellsInRow()}. */ private boolean mayHaveMoreCellsInRow = false; - // We're not using java serialization. Transient here is just a marker to say + // We're not using java serialization. Transient here is just a marker to say // that this is where we cache row if we're ever asked for it. - private transient byte [] row = null; - // Ditto for familyMap. It can be composed on fly from passed in kvs. - private transient NavigableMap>> - familyMap = null; + private transient byte[] row = null; + // Ditto for familyMap. It can be composed on fly from passed in kvs. + private transient NavigableMap>> familyMap = null; private static ThreadLocal localBuffer = new ThreadLocal<>(); private static final int PAD_WIDTH = 128; @@ -114,8 +104,8 @@ public class Result implements CellScannable, CellScanner { /** * Creates an empty Result w/ no KeyValue payload; returns null if you call {@link #rawCells()}. - * Use this to represent no results if {@code null} won't do or in old 'mapred' as opposed - * to 'mapreduce' package MapReduce where you need to overwrite a Result instance with a + * Use this to represent no results if {@code null} won't do or in old 'mapred' as opposed to + * 'mapreduce' package MapReduce where you need to overwrite a Result instance with a * {@link #copyFrom(Result)} call. */ public Result() { @@ -123,8 +113,7 @@ public class Result implements CellScannable, CellScanner { } /** - * Allows to construct special purpose immutable Result objects, - * such as EMPTY_RESULT. + * Allows to construct special purpose immutable Result objects, such as EMPTY_RESULT. * @param readonly whether this Result instance is readonly */ private Result(boolean readonly) { @@ -132,8 +121,8 @@ public class Result implements CellScannable, CellScanner { } /** - * Instantiate a Result with the specified List of KeyValues. - *
    Note: You must ensure that the keyvalues are already sorted. + * Instantiate a Result with the specified List of KeyValues.
    + * Note: You must ensure that the keyvalues are already sorted. * @param cells List of cells */ public static Result create(List cells) { @@ -149,16 +138,16 @@ public class Result implements CellScannable, CellScanner { } public static Result create(List cells, Boolean exists, boolean stale, - boolean mayHaveMoreCellsInRow) { - if (exists != null){ + boolean mayHaveMoreCellsInRow) { + if (exists != null) { return new Result(null, exists, stale, mayHaveMoreCellsInRow); } return new Result(cells.toArray(new Cell[cells.size()]), null, stale, mayHaveMoreCellsInRow); } /** - * Instantiate a Result with the specified array of KeyValues. - *
    Note: You must ensure that the keyvalues are already sorted. + * Instantiate a Result with the specified array of KeyValues.
    + * Note: You must ensure that the keyvalues are already sorted. * @param cells array of cells */ public static Result create(Cell[] cells) { @@ -170,7 +159,7 @@ public class Result implements CellScannable, CellScanner { } public static Result create(Cell[] cells, Boolean exists, boolean stale, - boolean mayHaveMoreCellsInRow) { + boolean mayHaveMoreCellsInRow) { if (exists != null) { return new Result(null, exists, stale, mayHaveMoreCellsInRow); } @@ -196,37 +185,27 @@ public class Result implements CellScannable, CellScanner { } /** - * Method for retrieving the row key that corresponds to - * the row from which this Result was created. - * @return row + * Method for retrieving the row key that corresponds to the row from which this Result was + * created. n */ - public byte [] getRow() { + public byte[] getRow() { if (this.row == null) { - this.row = (this.cells == null || this.cells.length == 0) ? - null : - CellUtil.cloneRow(this.cells[0]); + this.row = + (this.cells == null || this.cells.length == 0) ? null : CellUtil.cloneRow(this.cells[0]); } return this.row; } /** - * Return the array of Cells backing this Result instance. - * - * The array is sorted from smallest -> largest using the - * {@link CellComparator}. - * - * The array only contains what your Get or Scan specifies and no more. - * For example if you request column "A" 1 version you will have at most 1 - * Cell in the array. If you request column "A" with 2 version you will - * have at most 2 Cells, with the first one being the newer timestamp and - * the second being the older timestamp (this is the sort order defined by - * {@link CellComparator}). If columns don't exist, they won't be - * present in the result. Therefore if you ask for 1 version all columns, - * it is safe to iterate over this array and expect to see 1 Cell for - * each column and no more. - * - * This API is faster than using getFamilyMap() and getMap() - * + * Return the array of Cells backing this Result instance. The array is sorted from smallest -> + * largest using the {@link CellComparator}. The array only contains what your Get or Scan + * specifies and no more. For example if you request column "A" 1 version you will have at most 1 + * Cell in the array. If you request column "A" with 2 version you will have at most 2 Cells, with + * the first one being the newer timestamp and the second being the older timestamp (this is the + * sort order defined by {@link CellComparator}). If columns don't exist, they won't be present in + * the result. Therefore if you ask for 1 version all columns, it is safe to iterate over this + * array and expect to see 1 Cell for each column and no more. This API is faster than using + * getFamilyMap() and getMap() * @return array of Cells; can be null if nothing in the result */ public Cell[] rawCells() { @@ -234,35 +213,27 @@ public class Result implements CellScannable, CellScanner { } /** - * Create a sorted list of the Cell's in this result. - * - * Since HBase 0.20.5 this is equivalent to raw(). - * + * Create a sorted list of the Cell's in this result. Since HBase 0.20.5 this is equivalent to + * raw(). * @return sorted List of Cells; can be null if no cells in the result */ public List listCells() { - return isEmpty()? null: Arrays.asList(rawCells()); + return isEmpty() ? null : Arrays.asList(rawCells()); } /** - * Return the Cells for the specific column. The Cells are sorted in - * the {@link CellComparator} order. That implies the first entry in - * the list is the most recent column. If the query (Scan or Get) only - * requested 1 version the list will contain at most 1 entry. If the column - * did not exist in the result set (either the column does not exist - * or the column was not selected in the query) the list will be empty. - * - * Also see getColumnLatest which returns just a Cell - * - * @param family the family - * @param qualifier - * @return a list of Cells for this column or empty list if the column - * did not exist in the result set + * Return the Cells for the specific column. The Cells are sorted in the {@link CellComparator} + * order. That implies the first entry in the list is the most recent column. If the query (Scan + * or Get) only requested 1 version the list will contain at most 1 entry. If the column did not + * exist in the result set (either the column does not exist or the column was not selected in the + * query) the list will be empty. Also see getColumnLatest which returns just a Cell + * @param family the family n * @return a list of Cells for this column or empty list if the + * column did not exist in the result set */ - public List getColumnCells(byte [] family, byte [] qualifier) { + public List getColumnCells(byte[] family, byte[] qualifier) { List result = new ArrayList<>(); - Cell [] kvs = rawCells(); + Cell[] kvs = rawCells(); if (kvs == null || kvs.length == 0) { return result; @@ -273,7 +244,7 @@ public class Result implements CellScannable, CellScanner { } for (int i = pos; i < kvs.length; i++) { - if (CellUtil.matchingColumn(kvs[i], family,qualifier)) { + if (CellUtil.matchingColumn(kvs[i], family, qualifier)) { result.add(kvs[i]); } else { break; @@ -291,22 +262,18 @@ public class Result implements CellScannable, CellScanner { } } - protected int binarySearch(final Cell [] kvs, - final byte [] family, - final byte [] qualifier) { + protected int binarySearch(final Cell[] kvs, final byte[] family, final byte[] qualifier) { byte[] familyNotNull = notNullBytes(family); byte[] qualifierNotNull = notNullBytes(qualifier); - Cell searchTerm = - PrivateCellUtil.createFirstOnRow(kvs[0].getRowArray(), - kvs[0].getRowOffset(), kvs[0].getRowLength(), - familyNotNull, 0, (byte)familyNotNull.length, - qualifierNotNull, 0, qualifierNotNull.length); + Cell searchTerm = PrivateCellUtil.createFirstOnRow(kvs[0].getRowArray(), kvs[0].getRowOffset(), + kvs[0].getRowLength(), familyNotNull, 0, (byte) familyNotNull.length, qualifierNotNull, 0, + qualifierNotNull.length); // pos === ( -(insertion point) - 1) int pos = Arrays.binarySearch(kvs, searchTerm, CellComparator.getInstance()); // never will exact match if (pos < 0) { - pos = (pos+1) * -1; + pos = (pos + 1) * -1; // pos is now insertion point } if (pos == kvs.length) { @@ -317,23 +284,20 @@ public class Result implements CellScannable, CellScanner { /** * Searches for the latest value for the specified column. - * - * @param kvs the array to search - * @param family family name - * @param foffset family offset - * @param flength family length + * @param kvs the array to search + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * + * @param qoffset qualifier offset + * @param qlength qualifier length * @return the index where the value was found, or -1 otherwise */ - protected int binarySearch(final Cell [] kvs, - final byte [] family, final int foffset, final int flength, - final byte [] qualifier, final int qoffset, final int qlength) { + protected int binarySearch(final Cell[] kvs, final byte[] family, final int foffset, + final int flength, final byte[] qualifier, final int qoffset, final int qlength) { - double keyValueSize = (double) - KeyValue.getKeyValueDataStructureSize(kvs[0].getRowLength(), flength, qlength, 0); + double keyValueSize = + (double) KeyValue.getKeyValueDataStructureSize(kvs[0].getRowLength(), flength, qlength, 0); byte[] buffer = localBuffer.get(); if (buffer == null || keyValueSize > buffer.length) { @@ -342,16 +306,15 @@ public class Result implements CellScannable, CellScanner { localBuffer.set(buffer); } - Cell searchTerm = KeyValueUtil.createFirstOnRow(buffer, 0, - kvs[0].getRowArray(), kvs[0].getRowOffset(), kvs[0].getRowLength(), - family, foffset, flength, - qualifier, qoffset, qlength); + Cell searchTerm = + KeyValueUtil.createFirstOnRow(buffer, 0, kvs[0].getRowArray(), kvs[0].getRowOffset(), + kvs[0].getRowLength(), family, foffset, flength, qualifier, qoffset, qlength); // pos === ( -(insertion point) - 1) int pos = Arrays.binarySearch(kvs, searchTerm, CellComparator.getInstance()); // never will exact match if (pos < 0) { - pos = (pos+1) * -1; + pos = (pos + 1) * -1; // pos is now insertion point } if (pos == kvs.length) { @@ -361,16 +324,12 @@ public class Result implements CellScannable, CellScanner { } /** - * The Cell for the most recent timestamp for a given column. - * - * @param family - * @param qualifier - * + * The Cell for the most recent timestamp for a given column. nn * * @return the Cell for the column, or null if no value exists in the row or none have been - * selected in the query (Get/Scan) + * selected in the query (Get/Scan) */ - public Cell getColumnLatestCell(byte [] family, byte [] qualifier) { - Cell [] kvs = rawCells(); // side effect possibly. + public Cell getColumnLatestCell(byte[] family, byte[] qualifier) { + Cell[] kvs = rawCells(); // side effect possibly. if (kvs == null || kvs.length == 0) { return null; } @@ -386,21 +345,19 @@ public class Result implements CellScannable, CellScanner { /** * The Cell for the most recent timestamp for a given column. - * - * @param family family name - * @param foffset family offset - * @param flength family length + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * + * @param qoffset qualifier offset + * @param qlength qualifier length * @return the Cell for the column, or null if no value exists in the row or none have been - * selected in the query (Get/Scan) + * selected in the query (Get/Scan) */ - public Cell getColumnLatestCell(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength) { + public Cell getColumnLatestCell(byte[] family, int foffset, int flength, byte[] qualifier, + int qoffset, int qlength) { - Cell [] kvs = rawCells(); // side effect possibly. + Cell[] kvs = rawCells(); // side effect possibly. if (kvs == null || kvs.length == 0) { return null; } @@ -408,23 +365,24 @@ public class Result implements CellScannable, CellScanner { if (pos == -1) { return null; } - if (PrivateCellUtil.matchingColumn(kvs[pos], family, foffset, flength, qualifier, qoffset, - qlength)) { + if ( + PrivateCellUtil.matchingColumn(kvs[pos], family, foffset, flength, qualifier, qoffset, + qlength) + ) { return kvs[pos]; } return null; } /** - * Get the latest version of the specified column. - * Note: this call clones the value content of the hosting Cell. See - * {@link #getValueAsByteBuffer(byte[], byte[])}, etc., or {@link #listCells()} if you would - * avoid the cloning. - * @param family family name + * Get the latest version of the specified column. Note: this call clones the value content of the + * hosting Cell. See {@link #getValueAsByteBuffer(byte[], byte[])}, etc., or {@link #listCells()} + * if you would avoid the cloning. + * @param family family name * @param qualifier column qualifier * @return value of latest version of column, null if none found */ - public byte[] getValue(byte [] family, byte [] qualifier) { + public byte[] getValue(byte[] family, byte[] qualifier) { Cell kv = getColumnLatestCell(family, qualifier); if (kv == null) { return null; @@ -434,62 +392,55 @@ public class Result implements CellScannable, CellScanner { /** * Returns the value wrapped in a new ByteBuffer. - * - * @param family family name + * @param family family name * @param qualifier column qualifier - * * @return the latest version of the column, or null if none found */ - public ByteBuffer getValueAsByteBuffer(byte [] family, byte [] qualifier) { + public ByteBuffer getValueAsByteBuffer(byte[] family, byte[] qualifier) { Cell kv = getColumnLatestCell(family, 0, family.length, qualifier, 0, qualifier.length); if (kv == null) { return null; } - return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()). - asReadOnlyBuffer(); + return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()) + .asReadOnlyBuffer(); } /** * Returns the value wrapped in a new ByteBuffer. - * - * @param family family name - * @param foffset family offset - * @param flength family length + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * + * @param qoffset qualifier offset + * @param qlength qualifier length * @return the latest version of the column, or null if none found */ - public ByteBuffer getValueAsByteBuffer(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength) { + public ByteBuffer getValueAsByteBuffer(byte[] family, int foffset, int flength, byte[] qualifier, + int qoffset, int qlength) { Cell kv = getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength); if (kv == null) { return null; } - return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()). - asReadOnlyBuffer(); + return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()) + .asReadOnlyBuffer(); } /** * Loads the latest version of the specified column into the provided ByteBuffer. *

    * Does not clear or flip the buffer. - * - * @param family family name + * @param family family name * @param qualifier column qualifier - * @param dst the buffer where to write the value - * + * @param dst the buffer where to write the value * @return true if a value was found, false otherwise - * * @throws BufferOverflowException there is insufficient space remaining in the buffer */ - public boolean loadValue(byte [] family, byte [] qualifier, ByteBuffer dst) - throws BufferOverflowException { + public boolean loadValue(byte[] family, byte[] qualifier, ByteBuffer dst) + throws BufferOverflowException { return loadValue(family, 0, family.length, qualifier, 0, qualifier.length, dst); } @@ -497,22 +448,18 @@ public class Result implements CellScannable, CellScanner { * Loads the latest version of the specified column into the provided ByteBuffer. *

    * Does not clear or flip the buffer. - * - * @param family family name - * @param foffset family offset - * @param flength family length + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * @param dst the buffer where to write the value - * + * @param qoffset qualifier offset + * @param qlength qualifier length + * @param dst the buffer where to write the value * @return true if a value was found, false otherwise - * * @throws BufferOverflowException there is insufficient space remaining in the buffer */ - public boolean loadValue(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength, ByteBuffer dst) - throws BufferOverflowException { + public boolean loadValue(byte[] family, int foffset, int flength, byte[] qualifier, int qoffset, + int qlength, ByteBuffer dst) throws BufferOverflowException { Cell kv = getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength); if (kv == null) { @@ -524,31 +471,27 @@ public class Result implements CellScannable, CellScanner { /** * Checks if the specified column contains a non-empty value (not a zero-length byte array). - * - * @param family family name + * @param family family name * @param qualifier column qualifier - * * @return whether or not a latest value exists and is not empty */ - public boolean containsNonEmptyColumn(byte [] family, byte [] qualifier) { + public boolean containsNonEmptyColumn(byte[] family, byte[] qualifier) { return containsNonEmptyColumn(family, 0, family.length, qualifier, 0, qualifier.length); } /** * Checks if the specified column contains a non-empty value (not a zero-length byte array). - * - * @param family family name - * @param foffset family offset - * @param flength family length + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * + * @param qoffset qualifier offset + * @param qlength qualifier length * @return whether or not a latest value exists and is not empty */ - public boolean containsNonEmptyColumn(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength) { + public boolean containsNonEmptyColumn(byte[] family, int foffset, int flength, byte[] qualifier, + int qoffset, int qlength) { Cell kv = getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength); @@ -557,31 +500,27 @@ public class Result implements CellScannable, CellScanner { /** * Checks if the specified column contains an empty value (a zero-length byte array). - * - * @param family family name + * @param family family name * @param qualifier column qualifier - * * @return whether or not a latest value exists and is empty */ - public boolean containsEmptyColumn(byte [] family, byte [] qualifier) { + public boolean containsEmptyColumn(byte[] family, byte[] qualifier) { return containsEmptyColumn(family, 0, family.length, qualifier, 0, qualifier.length); } /** * Checks if the specified column contains an empty value (a zero-length byte array). - * - * @param family family name - * @param foffset family offset - * @param flength family length + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * + * @param qoffset qualifier offset + * @param qlength qualifier length * @return whether or not a latest value exists and is empty */ - public boolean containsEmptyColumn(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength) { + public boolean containsEmptyColumn(byte[] family, int foffset, int flength, byte[] qualifier, + int qoffset, int qlength) { Cell kv = getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength); return (kv != null) && (kv.getValueLength() == 0); @@ -589,31 +528,27 @@ public class Result implements CellScannable, CellScanner { /** * Checks for existence of a value for the specified column (empty or not). - * - * @param family family name + * @param family family name * @param qualifier column qualifier - * * @return true if at least one value exists in the result, false if not */ - public boolean containsColumn(byte [] family, byte [] qualifier) { + public boolean containsColumn(byte[] family, byte[] qualifier) { Cell kv = getColumnLatestCell(family, qualifier); return kv != null; } /** * Checks for existence of a value for the specified column (empty or not). - * - * @param family family name - * @param foffset family offset - * @param flength family length + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * + * @param qoffset qualifier offset + * @param qlength qualifier length * @return true if at least one value exists in the result, false if not */ - public boolean containsColumn(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength) { + public boolean containsColumn(byte[] family, int foffset, int flength, byte[] qualifier, + int qoffset, int qlength) { return getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength) != null; } @@ -631,20 +566,20 @@ public class Result implements CellScannable, CellScanner { if (this.familyMap != null) { return this.familyMap; } - if(isEmpty()) { + if (isEmpty()) { return null; } this.familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for(Cell kv : this.cells) { - byte [] family = CellUtil.cloneFamily(kv); + for (Cell kv : this.cells) { + byte[] family = CellUtil.cloneFamily(kv); NavigableMap> columnMap = familyMap.get(family); - if(columnMap == null) { + if (columnMap == null) { columnMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); familyMap.put(family, columnMap); } - byte [] qualifier = CellUtil.cloneQualifier(kv); + byte[] qualifier = CellUtil.cloneQualifier(kv); NavigableMap versionMap = columnMap.get(qualifier); - if(versionMap == null) { + if (versionMap == null) { versionMap = new TreeMap<>(new Comparator() { @Override public int compare(Long l1, Long l2) { @@ -654,7 +589,7 @@ public class Result implements CellScannable, CellScanner { columnMap.put(qualifier, versionMap); } Long timestamp = kv.getTimestamp(); - byte [] value = CellUtil.cloneValue(kv); + byte[] value = CellUtil.cloneValue(kv); versionMap.put(timestamp, value); } @@ -670,20 +605,20 @@ public class Result implements CellScannable, CellScanner { * @return map from families to qualifiers and value */ public NavigableMap> getNoVersionMap() { - if(this.familyMap == null) { + if (this.familyMap == null) { getMap(); } - if(isEmpty()) { + if (isEmpty()) { return null; } - NavigableMap> returnMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for(Map.Entry>> - familyEntry : familyMap.entrySet()) { + NavigableMap> returnMap = + new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (Map.Entry>> familyEntry : familyMap + .entrySet()) { NavigableMap qualifierMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for(Map.Entry> qualifierEntry : - familyEntry.getValue().entrySet()) { - byte [] value = - qualifierEntry.getValue().get(qualifierEntry.getValue().firstKey()); + for (Map.Entry> qualifierEntry : familyEntry.getValue() + .entrySet()) { + byte[] value = qualifierEntry.getValue().get(qualifierEntry.getValue().firstKey()); qualifierMap.put(qualifierEntry.getKey(), value); } returnMap.put(familyEntry.getKey(), qualifierMap); @@ -698,23 +633,20 @@ public class Result implements CellScannable, CellScanner { * @param family column family to get * @return map of qualifiers to values */ - public NavigableMap getFamilyMap(byte [] family) { - if(this.familyMap == null) { + public NavigableMap getFamilyMap(byte[] family) { + if (this.familyMap == null) { getMap(); } - if(isEmpty()) { + if (isEmpty()) { return null; } NavigableMap returnMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - NavigableMap> qualifierMap = - familyMap.get(family); - if(qualifierMap == null) { + NavigableMap> qualifierMap = familyMap.get(family); + if (qualifierMap == null) { return returnMap; } - for(Map.Entry> entry : - qualifierMap.entrySet()) { - byte [] value = - entry.getValue().get(entry.getValue().firstKey()); + for (Map.Entry> entry : qualifierMap.entrySet()) { + byte[] value = entry.getValue().get(entry.getValue().firstKey()); returnMap.put(entry.getKey(), value); } return returnMap; @@ -724,7 +656,7 @@ public class Result implements CellScannable, CellScanner { * Returns the value of the first column in the Result. * @return value of the first column */ - public byte [] value() { + public byte[] value() { if (isEmpty()) { return null; } @@ -743,24 +675,24 @@ public class Result implements CellScannable, CellScanner { * @return the size of the underlying Cell [] */ public int size() { - return this.cells == null? 0: this.cells.length; + return this.cells == null ? 0 : this.cells.length; } /** - * @return String + * n */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("keyvalues="); - if(isEmpty()) { + if (isEmpty()) { sb.append("NONE"); return sb.toString(); } sb.append("{"); boolean moreThanOne = false; - for(Cell kv : this.cells) { - if(moreThanOne) { + for (Cell kv : this.cells) { + if (moreThanOne) { sb.append(", "); } else { moreThanOne = true; @@ -777,49 +709,45 @@ public class Result implements CellScannable, CellScanner { * @param res2 second result to compare * @throws Exception Every difference is throwing an exception */ - public static void compareResults(Result res1, Result res2) - throws Exception{ + public static void compareResults(Result res1, Result res2) throws Exception { compareResults(res1, res2, true); } /** * Does a deep comparison of two Results, down to the byte arrays. - * @param res1 first result to compare - * @param res2 second result to compare - * @param verbose includes string representation for all cells in the exception if true; - * otherwise include rowkey only + * @param res1 first result to compare + * @param res2 second result to compare + * @param verbose includes string representation for all cells in the exception if true; otherwise + * include rowkey only * @throws Exception Every difference is throwing an exception */ - public static void compareResults(Result res1, Result res2, boolean verbose) - throws Exception { + public static void compareResults(Result res1, Result res2, boolean verbose) throws Exception { if (res2 == null) { - throw new Exception("There wasn't enough rows, we stopped at " - + Bytes.toStringBinary(res1.getRow())); + throw new Exception( + "There wasn't enough rows, we stopped at " + Bytes.toStringBinary(res1.getRow())); } if (res1.size() != res2.size()) { if (verbose) { throw new Exception( - "This row doesn't have the same number of KVs: " - + res1 + " compared to " + res2); + "This row doesn't have the same number of KVs: " + res1 + " compared to " + res2); } else { throw new Exception( - "This row doesn't have the same number of KVs: row=" - + Bytes.toStringBinary(res1.getRow()) + "This row doesn't have the same number of KVs: row=" + Bytes.toStringBinary(res1.getRow()) + ", " + res1.size() + " cells are compared to " + res2.size() + " cells"); } } Cell[] ourKVs = res1.rawCells(); Cell[] replicatedKVs = res2.rawCells(); for (int i = 0; i < res1.size(); i++) { - if (!ourKVs[i].equals(replicatedKVs[i]) || - !CellUtil.matchingValue(ourKVs[i], replicatedKVs[i]) || - !CellUtil.matchingTags(ourKVs[i], replicatedKVs[i])) { + if ( + !ourKVs[i].equals(replicatedKVs[i]) || !CellUtil.matchingValue(ourKVs[i], replicatedKVs[i]) + || !CellUtil.matchingTags(ourKVs[i], replicatedKVs[i]) + ) { if (verbose) { - throw new Exception("This result was different: " - + res1 + " compared to " + res2); + throw new Exception("This result was different: " + res1 + " compared to " + res2); } else { - throw new Exception("This result was different: row=" - + Bytes.toStringBinary(res1.getRow())); + throw new Exception( + "This result was different: row=" + Bytes.toStringBinary(res1.getRow())); } } } @@ -831,10 +759,9 @@ public class Result implements CellScannable, CellScanner { * @param partialResults list of partial results * @return The complete result that is formed by combining all of the partial results together * @throws IOException A complete result cannot be formed because the results in the partial list - * come from different rows + * come from different rows */ - public static Result createCompleteResult(Iterable partialResults) - throws IOException { + public static Result createCompleteResult(Iterable partialResults) throws IOException { if (partialResults == null) { return Result.create(Collections.emptyList(), null, false); } @@ -846,9 +773,8 @@ public class Result implements CellScannable, CellScanner { Result r = iter.next(); currentRow = r.getRow(); if (prevRow != null && !Bytes.equals(prevRow, currentRow)) { - throw new IOException( - "Cannot form complete result. Rows of partial results do not match." + - " Partial Results: " + partialResults); + throw new IOException("Cannot form complete result. Rows of partial results do not match." + + " Partial Results: " + partialResults); } // Ensure that all Results except the last one are marked as partials. The last result // may not be marked as a partial because Results are only marked as partials when @@ -862,8 +788,8 @@ public class Result implements CellScannable, CellScanner { // Result2: -3- -4- (2 cells, size limit reached, mark as partial) // Result3: -5- (1 cell, size limit NOT reached, NOT marked as partial) if (iter.hasNext() && !r.mayHaveMoreCellsInRow()) { - throw new IOException("Cannot form complete result. Result is missing partial flag. " + - "Partial Results: " + partialResults); + throw new IOException("Cannot form complete result. Result is missing partial flag. " + + "Partial Results: " + partialResults); } prevRow = currentRow; stale = stale || r.isStale(); @@ -876,9 +802,7 @@ public class Result implements CellScannable, CellScanner { } /** - * Get total size of raw cells - * @param result - * @return Total size. + * Get total size of raw cells n * @return Total size. */ public static long getTotalSizeOfCells(Result result) { long size = 0; @@ -893,9 +817,8 @@ public class Result implements CellScannable, CellScanner { /** * Copy another Result into this one. Needed for the old Mapred framework - * @throws UnsupportedOperationException if invoked on instance of EMPTY_RESULT - * (which is supposed to be immutable). - * @param other + * @throws UnsupportedOperationException if invoked on instance of EMPTY_RESULT (which is supposed + * to be immutable). n */ public void copyFrom(Result other) { checkReadonly(); @@ -913,9 +836,9 @@ public class Result implements CellScannable, CellScanner { @Override public Cell current() { - if (isEmpty() - || cellScannerIndex == INITIAL_CELLSCANNER_INDEX - || cellScannerIndex >= cells.length) { + if ( + isEmpty() || cellScannerIndex == INITIAL_CELLSCANNER_INDEX || cellScannerIndex >= cells.length + ) { return null; } return this.cells[cellScannerIndex]; @@ -945,8 +868,8 @@ public class Result implements CellScannable, CellScanner { } /** - * Whether or not the results are coming from possibly stale data. Stale results - * might be returned if {@link Consistency} is not STRONG for the query. + * Whether or not the results are coming from possibly stale data. Stale results might be returned + * if {@link Consistency} is not STRONG for the query. * @return Whether or not the results are coming from possibly stale data. */ public boolean isStale() { @@ -964,12 +887,12 @@ public class Result implements CellScannable, CellScanner { } /** - * For scanning large rows, the RS may choose to return the cells chunk by chunk to prevent OOM - * or timeout. This flag is used to tell you if the current Result is the last one of the current + * For scanning large rows, the RS may choose to return the cells chunk by chunk to prevent OOM or + * timeout. This flag is used to tell you if the current Result is the last one of the current * row. False means this Result is the last one. True means there MAY be more cells belonging to - * the current row. - * If you don't use {@link Scan#setAllowPartialResults(boolean)} or {@link Scan#setBatch(int)}, - * this method will always return false because the Result must contains all cells in one Row. + * the current row. If you don't use {@link Scan#setAllowPartialResults(boolean)} or + * {@link Scan#setBatch(int)}, this method will always return false because the Result must + * contains all cells in one Row. */ public boolean mayHaveMoreCellsInRow() { return mayHaveMoreCellsInRow; @@ -986,15 +909,15 @@ public class Result implements CellScannable, CellScanner { /** * @return the associated statistics about the region from which this was returned. Can be - * null if stats are disabled. + * null if stats are disabled. */ public RegionLoadStats getStats() { return stats; } /** - * All methods modifying state of Result object must call this method - * to ensure that special purpose immutable Results can't be accidentally modified. + * All methods modifying state of Result object must call this method to ensure that special + * purpose immutable Results can't be accidentally modified. */ private void checkReadonly() { if (readonly == true) { @@ -1003,36 +926,23 @@ public class Result implements CellScannable, CellScanner { } /** - * Return true if this Result is a cursor to tell users where the server has scanned. - * In this Result the only meaningful method is {@link #getCursor()}. - * - * {@code + * Return true if this Result is a cursor to tell users where the server has scanned. In this + * Result the only meaningful method is {@link #getCursor()}. {@code * while (r = scanner.next() && r != null) { * if(r.isCursor()){ * // scanning is not end, it is a cursor, save its row key and close scanner if you want, or - * // just continue the loop to call next(). - * } else { - * // just like before - * } - * } - * // scanning is end - * - * } - * {@link Scan#setNeedCursorResult(boolean)} - * {@link Cursor} - * {@link #getCursor()} + * // just continue the loop to call next(). } else { // just like before } } // scanning is end } + * {@link Scan#setNeedCursorResult(boolean)} {@link Cursor} {@link #getCursor()} */ public boolean isCursor() { - return cursor != null ; + return cursor != null; } /** - * Return the cursor if this Result is a cursor result. - * {@link Scan#setNeedCursorResult(boolean)} - * {@link Cursor} - * {@link #isCursor()} + * Return the cursor if this Result is a cursor result. {@link Scan#setNeedCursorResult(boolean)} + * {@link Cursor} {@link #isCursor()} */ - public Cursor getCursor(){ + public Cursor getCursor() { return cursor; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java index 4a96954b21a..7a2c4b160c1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,24 +24,20 @@ import java.util.concurrent.Executor; import java.util.concurrent.RunnableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; - +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** - * A completion service for the RpcRetryingCallerFactory. - * Keeps the list of the futures, and allows to cancel them all. - * This means as well that it can be used for a small set of tasks only. - *
    Implementation is not Thread safe. - * - * CompletedTasks is implemented as a queue, the entry is added based on the time order. I.e, - * when the first task completes (whether it is a success or failure), it is added as a first - * entry in the queue, the next completed task is added as a second entry in the queue, ... - * When iterating through the queue, we know it is based on time order. If the first - * completed task succeeds, it is returned. If it is failure, the iteration goes on until it - * finds a success. + * A completion service for the RpcRetryingCallerFactory. Keeps the list of the futures, and allows + * to cancel them all. This means as well that it can be used for a small set of tasks only.
    + * Implementation is not Thread safe. CompletedTasks is implemented as a queue, the entry is added + * based on the time order. I.e, when the first task completes (whether it is a success or failure), + * it is added as a first entry in the queue, the next completed task is added as a second entry in + * the queue, ... When iterating through the queue, we know it is based on time order. If the first + * completed task succeeds, it is returned. If it is failure, the iteration goes on until it finds a + * success. */ @InterfaceAudience.Private public class ResultBoundedCompletionService { @@ -52,7 +47,7 @@ public class ResultBoundedCompletionService { private final QueueingFuture[] tasks; // all the tasks private final ArrayList completedTasks; // completed tasks private volatile boolean cancelled = false; - + class QueueingFuture implements RunnableFuture { private final RetryingCallable future; private T result = null; @@ -61,13 +56,12 @@ public class ResultBoundedCompletionService { private final int callTimeout; private final RpcRetryingCaller retryingCaller; private boolean resultObtained = false; - private final int replicaId; // replica id - + private final int replicaId; // replica id public QueueingFuture(RetryingCallable future, int callTimeout, int id) { this.future = future; this.callTimeout = callTimeout; - this.retryingCaller = retryingCallerFactory.newCaller(); + this.retryingCaller = retryingCallerFactory. newCaller(); this.replicaId = id; } @@ -99,7 +93,7 @@ public class ResultBoundedCompletionService { public boolean cancel(boolean mayInterruptIfRunning) { if (resultObtained || exeEx != null) return false; retryingCaller.cancel(); - if (future instanceof Cancellable) ((Cancellable)future).cancel(); + if (future instanceof Cancellable) ((Cancellable) future).cancel(); cancelled = true; return true; } @@ -125,7 +119,7 @@ public class ResultBoundedCompletionService { @Override public T get(long timeout, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { + throws InterruptedException, ExecutionException, TimeoutException { synchronized (tasks) { if (resultObtained) { return result; @@ -155,16 +149,14 @@ public class ResultBoundedCompletionService { } @SuppressWarnings("unchecked") - public ResultBoundedCompletionService( - RpcRetryingCallerFactory retryingCallerFactory, Executor executor, - int maxTasks) { + public ResultBoundedCompletionService(RpcRetryingCallerFactory retryingCallerFactory, + Executor executor, int maxTasks) { this.retryingCallerFactory = retryingCallerFactory; this.executor = executor; this.tasks = new QueueingFuture[maxTasks]; this.completedTasks = new ArrayList<>(maxTasks); } - public void submit(RetryingCallable task, int callTimeout, int id) { QueueingFuture newFuture = new QueueingFuture<>(task, callTimeout, id); // remove trace for runnable because HBASE-25373 and OpenTelemetry do not cover TraceRunnable @@ -174,16 +166,16 @@ public class ResultBoundedCompletionService { public QueueingFuture take() throws InterruptedException { synchronized (tasks) { - while (!cancelled && (completedTasks.size() < 1)) tasks.wait(); + while (!cancelled && (completedTasks.size() < 1)) + tasks.wait(); } return completedTasks.get(0); } /** * Poll for the first completed task whether it is a success or execution exception. - * - * @param timeout - time to wait before it times out - * @param unit - time unit for timeout + * @param timeout - time to wait before it times out + * @param unit - time unit for timeout */ public QueueingFuture poll(long timeout, TimeUnit unit) throws InterruptedException { return pollForSpecificCompletedTask(timeout, unit, 0); @@ -192,23 +184,21 @@ public class ResultBoundedCompletionService { /** * Poll for the first successfully completed task whose completed order is in startIndex, * endIndex(exclusive) range - * - * @param timeout - time to wait before it times out - * @param unit - time unit for timeout + * @param timeout - time to wait before it times out + * @param unit - time unit for timeout * @param startIndex - start index, starting from 0, inclusive - * @param endIndex - end index, exclusive - * + * @param endIndex - end index, exclusive * @return If within timeout time, there is no successfully completed task, return null; If all * tasks get execution exception, it will throw out the last execution exception, * otherwise return the first successfully completed task's result. */ public QueueingFuture pollForFirstSuccessfullyCompletedTask(long timeout, TimeUnit unit, - int startIndex, int endIndex) - throws InterruptedException, CancellationException, ExecutionException { + int startIndex, int endIndex) + throws InterruptedException, CancellationException, ExecutionException { - QueueingFuture f; + QueueingFuture f; long start, duration; - for (int i = startIndex; i < endIndex; i ++) { + for (int i = startIndex; i < endIndex; i++) { start = EnvironmentEdgeManager.currentTime(); f = pollForSpecificCompletedTask(timeout, unit, i); @@ -223,8 +213,8 @@ public class ResultBoundedCompletionService { } else if (f.getExeEx() != null) { // we continue here as we need to loop through all the results. if (LOG.isDebugEnabled()) { - LOG.debug("Replica " + ((f == null) ? 0 : f.getReplicaId()) + " returns " + - f.getExeEx().getCause()); + LOG.debug("Replica " + ((f == null) ? 0 : f.getReplicaId()) + " returns " + + f.getExeEx().getCause()); } if (i == (endIndex - 1)) { @@ -242,13 +232,12 @@ public class ResultBoundedCompletionService { /** * Poll for the Nth completed task (index starts from 0 (the 1st), 1 (the second)...) - * - * @param timeout - time to wait before it times out - * @param unit - time unit for timeout - * @param index - the index(th) completed task, index starting from 0 + * @param timeout - time to wait before it times out + * @param unit - time unit for timeout + * @param index - the index(th) completed task, index starting from 0 */ private QueueingFuture pollForSpecificCompletedTask(long timeout, TimeUnit unit, int index) - throws InterruptedException { + throws InterruptedException { if (index < 0) { return null; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultStatsUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultStatsUtil.java index 793d10875cb..b0bb97476ae 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultStatsUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultStatsUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,16 +26,15 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public final class ResultStatsUtil { private ResultStatsUtil() { - //private ctor for util class + // private ctor for util class } /** * Update the statistics for the specified region. - * - * @param tracker tracker to update - * @param server server from which the result was obtained + * @param tracker tracker to update + * @param server server from which the result was obtained * @param regionName full region name for the statistics - * @param stats statistics to update for the specified region + * @param stats statistics to update for the specified region */ public static void updateStats(StatisticTrackable tracker, ServerName server, byte[] regionName, RegionLoadStats stats) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java index f0df7e09cf9..a8493b979c6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.client; @@ -23,13 +22,12 @@ import java.time.Instant; import java.time.format.DateTimeFormatter; import java.util.List; import java.util.StringJoiner; - import org.apache.commons.lang3.StringUtils; import org.apache.yetus.audience.InterfaceAudience; /** - * Exception thrown by HTable methods when an attempt to do something (like - * commit changes) fails after a bunch of retries. + * Exception thrown by HTable methods when an attempt to do something (like commit changes) fails + * after a bunch of retries. */ @InterfaceAudience.Public public class RetriesExhaustedException extends IOException { @@ -53,7 +51,7 @@ public class RetriesExhaustedException extends IOException { private final String extras; public ThrowableWithExtraContext(final Throwable throwable, final long whenAsEpochMilli, - final String extras) { + final String extras) { this.throwable = throwable; this.whenAsEpochMilli = whenAsEpochMilli; this.extras = extras; @@ -77,13 +75,12 @@ public class RetriesExhaustedException extends IOException { /** * Create a new RetriesExhaustedException from the list of prior failures. - * @param callableVitals Details from the Callable we were using - * when we got this exception. - * @param numTries The number of tries we made - * @param exceptions List of exceptions that failed before giving up + * @param callableVitals Details from the Callable we were using when we got this exception. + * @param numTries The number of tries we made + * @param exceptions List of exceptions that failed before giving up */ public RetriesExhaustedException(final String callableVitals, int numTries, - List exceptions) { + List exceptions) { super(getMessage(callableVitals, numTries, exceptions)); } @@ -94,13 +91,13 @@ public class RetriesExhaustedException extends IOException { */ @InterfaceAudience.Private public RetriesExhaustedException(final int numRetries, - final List exceptions) { + final List exceptions) { super(getMessage(numRetries, exceptions), - exceptions.isEmpty()? null: exceptions.get(exceptions.size() - 1).throwable); + exceptions.isEmpty() ? null : exceptions.get(exceptions.size() - 1).throwable); } private static String getMessage(String callableVitals, int numTries, - List exceptions) { + List exceptions) { StringBuilder buffer = new StringBuilder("Failed contacting "); buffer.append(callableVitals); buffer.append(" after "); @@ -114,7 +111,7 @@ public class RetriesExhaustedException extends IOException { } private static String getMessage(final int numRetries, - final List exceptions) { + final List exceptions) { StringBuilder buffer = new StringBuilder("Failed after attempts="); buffer.append(numRetries + 1); buffer.append(", exceptions:\n"); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java index ecbada95c2a..8850cbe6940 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -27,25 +26,22 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.RegionTooBusyException; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** - * This subclass of {@link org.apache.hadoop.hbase.client.RetriesExhaustedException} - * is thrown when we have more information about which rows were causing which - * exceptions on what servers. You can call {@link #mayHaveClusterIssues()} - * and if the result is false, you have input error problems, otherwise you - * may have cluster issues. You can iterate over the causes, rows and last - * known server addresses via {@link #getNumExceptions()} and - * {@link #getCause(int)}, {@link #getRow(int)} and {@link #getHostnamePort(int)}. + * This subclass of {@link org.apache.hadoop.hbase.client.RetriesExhaustedException} is thrown when + * we have more information about which rows were causing which exceptions on what servers. You can + * call {@link #mayHaveClusterIssues()} and if the result is false, you have input error problems, + * otherwise you may have cluster issues. You can iterate over the causes, rows and last known + * server addresses via {@link #getNumExceptions()} and {@link #getCause(int)}, {@link #getRow(int)} + * and {@link #getHostnamePort(int)}. */ @SuppressWarnings("serial") @InterfaceAudience.Public -public class RetriesExhaustedWithDetailsException -extends RetriesExhaustedException { +public class RetriesExhaustedWithDetailsException extends RetriesExhaustedException { List exceptions; List actions; List hostnameAndPort; @@ -58,12 +54,10 @@ extends RetriesExhaustedException { super(msg, e); } - public RetriesExhaustedWithDetailsException(List exceptions, - List actions, - List hostnameAndPort) { - super("Failed " + exceptions.size() + " action" + - pluralize(exceptions) + ": " + - getDesc(exceptions, actions, hostnameAndPort)); + public RetriesExhaustedWithDetailsException(List exceptions, List actions, + List hostnameAndPort) { + super("Failed " + exceptions.size() + " action" + pluralize(exceptions) + ": " + + getDesc(exceptions, actions, hostnameAndPort)); this.exceptions = exceptions; this.actions = actions; @@ -102,7 +96,6 @@ extends RetriesExhaustedException { return res; } - public static String pluralize(Collection c) { return pluralize(c.size()); } @@ -111,9 +104,8 @@ extends RetriesExhaustedException { return c > 1 ? "s" : ""; } - public static String getDesc(List exceptions, - List actions, - List hostnamePort) { + public static String getDesc(List exceptions, List actions, + List hostnamePort) { String s = getDesc(classifyExs(exceptions)); StringBuilder addrs = new StringBuilder(s); addrs.append("servers with issues: "); @@ -147,14 +139,12 @@ extends RetriesExhaustedException { return errorWriter.toString(); } - public static Map classifyExs(List ths) { Map cls = new HashMap<>(); for (Throwable t : ths) { if (t == null) continue; String name = ""; - if (t instanceof DoNotRetryIOException || - t instanceof RegionTooBusyException) { + if (t instanceof DoNotRetryIOException || t instanceof RegionTooBusyException) { // If RegionTooBusyException, print message since it has Region name in it. // RegionTooBusyException message was edited to remove variance. Has regionname, server, // and why the exception; no longer has duration it waited on lock nor current memsize. @@ -172,8 +162,8 @@ extends RetriesExhaustedException { return cls; } - public static String getDesc(Map classificaton) { - StringBuilder classificatons =new StringBuilder(11); + public static String getDesc(Map classificaton) { + StringBuilder classificatons = new StringBuilder(11); for (Map.Entry e : classificaton.entrySet()) { classificatons.append(e.getKey()); classificatons.append(": "); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallable.java index 601f8b882a5..8885069b41a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallable.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,35 +15,33 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * A Callable<T> that will be retried. If {@link #call(int)} invocation throws exceptions, - * we will call {@link #throwable(Throwable, boolean)} with whatever the exception was. + * A Callable<T> that will be retried. If {@link #call(int)} invocation throws exceptions, we + * will call {@link #throwable(Throwable, boolean)} with whatever the exception was. * @param result class from executing this */ @InterfaceAudience.Private public interface RetryingCallable { /** - * Prepare by setting up any connections to servers, etc., ahead of call invocation. - * TODO: We call prepare before EVERY call. Seems wrong. FIX!!!! + * Prepare by setting up any connections to servers, etc., ahead of call invocation. TODO: We call + * prepare before EVERY call. Seems wrong. FIX!!!! * @param reload Set this to true if need to requery locations * @throws IOException e */ void prepare(final boolean reload) throws IOException; /** - * Called when call throws an exception and we are going to retry; take action to - * make it so we succeed on next call (clear caches, do relookup of locations, etc.). + * Called when call throws an exception and we are going to retry; take action to make it so we + * succeed on next call (clear caches, do relookup of locations, etc.). * @param t throwable which was thrown - * @param retrying True if we are in retrying mode (we are not in retrying mode when max - * retries == 1; we ARE in retrying mode if retries > 1 even when we are the - * last attempt) + * @param retrying True if we are in retrying mode (we are not in retrying mode when max retries + * == 1; we ARE in retrying mode if retries > 1 even when we are the last + * attempt) */ void throwable(final Throwable t, boolean retrying); @@ -64,7 +61,6 @@ public interface RetryingCallable { /** * Computes a result, or throws an exception if unable to do so. - * * @param callTimeout - the time available for this call. 0 for infinite. * @return computed result * @throws Exception if unable to compute a result diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptor.java index 42490f0ab99..cb3b2fd3cd6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,34 +18,18 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * This class is designed to fit into the RetryingCaller class which forms the - * central piece of intelligence for the client side retries for most calls. - * - * One can extend this class and intercept the RetryingCaller and add additional - * logic into the execution of a simple HTable operations like get, delete etc. - * - * Concrete implementations of this calls are supposed to the thread safe. The - * object is used across threads to identify the fast failing threads. - * - * For a concrete use case see {@link PreemptiveFastFailInterceptor} - * - * Example use case : - * try { - * interceptor.intercept - * doAction() - * } catch (Exception e) { - * interceptor.handleFailure - * } finally { - * interceptor.updateFaulireInfo - * } - * - * The {@link RetryingCallerInterceptor} also acts as a factory - * for getting a new {@link RetryingCallerInterceptorContext}. - * + * This class is designed to fit into the RetryingCaller class which forms the central piece of + * intelligence for the client side retries for most calls. One can extend this class and intercept + * the RetryingCaller and add additional logic into the execution of a simple HTable operations like + * get, delete etc. Concrete implementations of this calls are supposed to the thread safe. The + * object is used across threads to identify the fast failing threads. For a concrete use case see + * {@link PreemptiveFastFailInterceptor} Example use case : try { interceptor.intercept doAction() } + * catch (Exception e) { interceptor.handleFailure } finally { interceptor.updateFaulireInfo } The + * {@link RetryingCallerInterceptor} also acts as a factory for getting a new + * {@link RetryingCallerInterceptorContext}. */ @InterfaceAudience.Private @@ -57,41 +41,27 @@ abstract class RetryingCallerInterceptor { /** * This returns the context object for the current call. - * * @return context : the context that needs to be used during this call. */ public abstract RetryingCallerInterceptorContext createEmptyContext(); /** - * Call this function in case we caught a failure during retries. - * - * @param context - * : The context object that we obtained previously. - * @param t - * : The exception that we caught in this particular try - * @throws IOException + * Call this function in case we caught a failure during retries. n * : The context object that we + * obtained previously. n * : The exception that we caught in this particular try n */ - public abstract void handleFailure(RetryingCallerInterceptorContext context, - Throwable t) throws IOException; + public abstract void handleFailure(RetryingCallerInterceptorContext context, Throwable t) + throws IOException; /** - * Call this function alongside the actual call done on the callable. - * - * @param abstractRetryingCallerInterceptorContext - * @throws IOException + * Call this function alongside the actual call done on the callable. nn */ public abstract void intercept( - RetryingCallerInterceptorContext abstractRetryingCallerInterceptorContext) - throws IOException; + RetryingCallerInterceptorContext abstractRetryingCallerInterceptorContext) throws IOException; /** - * Call this function to update at the end of the retry. This is not necessary - * to happen. - * - * @param context + * Call this function to update at the end of the retry. This is not necessary to happen. n */ - public abstract void updateFailureInfo( - RetryingCallerInterceptorContext context); + public abstract void updateFailureInfo(RetryingCallerInterceptorContext context); @Override public abstract String toString(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorContext.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorContext.java index 70ef56d6d10..b810de46c44 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorContext.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +21,10 @@ import org.apache.yetus.audience.InterfaceAudience; /** * The context object used in the {@link RpcRetryingCaller} to enable - * {@link RetryingCallerInterceptor} to intercept calls. - * {@link RetryingCallerInterceptorContext} is the piece of information unique - * to a retrying call that transfers information from the call into the - * {@link RetryingCallerInterceptor} so that {@link RetryingCallerInterceptor} - * can take appropriate action according to the specific logic - * + * {@link RetryingCallerInterceptor} to intercept calls. {@link RetryingCallerInterceptorContext} is + * the piece of information unique to a retrying call that transfers information from the call into + * the {@link RetryingCallerInterceptor} so that {@link RetryingCallerInterceptor} can take + * appropriate action according to the specific logic */ @InterfaceAudience.Private abstract class RetryingCallerInterceptorContext { @@ -39,29 +37,19 @@ abstract class RetryingCallerInterceptorContext { public abstract void clear(); /** - * This prepares the context object by populating it with information specific - * to the implementation of the {@link RetryingCallerInterceptor} along with - * which this will be used. - * - * @param callable - * : The {@link RetryingCallable} that contains the information about - * the call that is being made. - * @return A new {@link RetryingCallerInterceptorContext} object that can be - * used for use in the current retrying call + * This prepares the context object by populating it with information specific to the + * implementation of the {@link RetryingCallerInterceptor} along with which this will be used. n * + * : The {@link RetryingCallable} that contains the information about the call that is being made. + * @return A new {@link RetryingCallerInterceptorContext} object that can be used for use in the + * current retrying call */ public abstract RetryingCallerInterceptorContext prepare(RetryingCallable callable); /** - * Telescopic extension that takes which of the many retries we are currently - * in. - * - * @param callable - * : The {@link RetryingCallable} that contains the information about - * the call that is being made. - * @param tries - * : The retry number that we are currently in. - * @return A new context object that can be used for use in the current - * retrying call + * Telescopic extension that takes which of the many retries we are currently in. n * : The + * {@link RetryingCallable} that contains the information about the call that is being made. n * : + * The retry number that we are currently in. + * @return A new context object that can be used for use in the current retrying call */ public abstract RetryingCallerInterceptorContext prepare(RetryingCallable callable, int tries); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorFactory.java index 838e8fc695f..06d557b195e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorFactory.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase.client; import java.lang.reflect.Constructor; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.yetus.audience.InterfaceAudience; @@ -27,49 +25,42 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Factory implementation to provide the {@link ConnectionImplementation} with - * the implementation of the {@link RetryingCallerInterceptor} that we would use - * to intercept the {@link RpcRetryingCaller} during the course of their calls. - * + * Factory implementation to provide the {@link ConnectionImplementation} with the implementation of + * the {@link RetryingCallerInterceptor} that we would use to intercept the + * {@link RpcRetryingCaller} during the course of their calls. */ @InterfaceAudience.Private class RetryingCallerInterceptorFactory { - private static final Logger LOG = LoggerFactory - .getLogger(RetryingCallerInterceptorFactory.class); + private static final Logger LOG = LoggerFactory.getLogger(RetryingCallerInterceptorFactory.class); private Configuration conf; private final boolean failFast; public static final RetryingCallerInterceptor NO_OP_INTERCEPTOR = - new NoOpRetryableCallerInterceptor(null); + new NoOpRetryableCallerInterceptor(null); public RetryingCallerInterceptorFactory(Configuration conf) { this.conf = conf; failFast = conf.getBoolean(HConstants.HBASE_CLIENT_FAST_FAIL_MODE_ENABLED, - HConstants.HBASE_CLIENT_ENABLE_FAST_FAIL_MODE_DEFAULT); + HConstants.HBASE_CLIENT_ENABLE_FAST_FAIL_MODE_DEFAULT); } /** - * This builds the implementation of {@link RetryingCallerInterceptor} that we - * specify in the conf and returns the same. - * - * To use {@link PreemptiveFastFailInterceptor}, set HBASE_CLIENT_ENABLE_FAST_FAIL_MODE to true. - * HBASE_CLIENT_FAST_FAIL_INTERCEPTOR_IMPL is defaulted to {@link PreemptiveFastFailInterceptor} - * - * @return The factory build method which creates the - * {@link RetryingCallerInterceptor} object according to the - * configuration. + * This builds the implementation of {@link RetryingCallerInterceptor} that we specify in the conf + * and returns the same. To use {@link PreemptiveFastFailInterceptor}, set + * HBASE_CLIENT_ENABLE_FAST_FAIL_MODE to true. HBASE_CLIENT_FAST_FAIL_INTERCEPTOR_IMPL is + * defaulted to {@link PreemptiveFastFailInterceptor} + * @return The factory build method which creates the {@link RetryingCallerInterceptor} object + * according to the configuration. */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION", - justification="Convert thrown exception to unchecked") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION", + justification = "Convert thrown exception to unchecked") public RetryingCallerInterceptor build() { RetryingCallerInterceptor ret = NO_OP_INTERCEPTOR; if (failFast) { try { - Class c = conf.getClass( - HConstants.HBASE_CLIENT_FAST_FAIL_INTERCEPTOR_IMPL, - PreemptiveFastFailInterceptor.class); - Constructor constructor = c - .getDeclaredConstructor(Configuration.class); + Class c = conf.getClass(HConstants.HBASE_CLIENT_FAST_FAIL_INTERCEPTOR_IMPL, + PreemptiveFastFailInterceptor.class); + Constructor constructor = c.getDeclaredConstructor(Configuration.class); constructor.setAccessible(true); ret = (RetryingCallerInterceptor) constructor.newInstance(conf); } catch (Exception e) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingTimeTracker.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingTimeTracker.java index e804e925460..59335d0ac99 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingTimeTracker.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingTimeTracker.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -47,10 +48,10 @@ class RetryingTimeTracker { remainingTime = 1; } if (remainingTime > Integer.MAX_VALUE) { - throw new RuntimeException("remainingTime=" + remainingTime + - " which is > Integer.MAX_VALUE"); + throw new RuntimeException( + "remainingTime=" + remainingTime + " which is > Integer.MAX_VALUE"); } - return (int)remainingTime; + return (int) remainingTime; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java index c0bf4b8d263..44ad82a38cd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java @@ -1,20 +1,19 @@ -/** - * Copyright The Apache Software Foundation +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.client; @@ -22,11 +21,10 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.noMoreResultsForRev import java.io.IOException; import java.util.concurrent.ExecutorService; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.yetus.audience.InterfaceAudience; /** * A reversed client scanner which support backward scanning @@ -39,11 +37,11 @@ public class ReversedClientScanner extends ClientScanner { * {@link Scan}'s start row maybe changed. */ public ReversedClientScanner(Configuration conf, Scan scan, TableName tableName, - ClusterConnection connection, RpcRetryingCallerFactory rpcFactory, - RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout) - throws IOException { + ClusterConnection connection, RpcRetryingCallerFactory rpcFactory, + RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout) + throws IOException { super(conf, scan, tableName, connection, rpcFactory, controllerFactory, pool, - primaryOperationTimeout); + primaryOperationTimeout); } @Override @@ -58,6 +56,6 @@ public class ReversedClientScanner extends ClientScanner { @Override protected ReversedScannerCallable createScannerCallable() { return new ReversedScannerCallable(getConnection(), getTable(), scan, this.scanMetrics, - this.rpcControllerFactory, getScanReplicaId()); + this.rpcControllerFactory, getScanReplicaId()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java index 727b1409c57..59dcf79bbfc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java @@ -1,20 +1,19 @@ -/** - * Copyright The Apache Software Foundation +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.client; @@ -24,19 +23,17 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStartRow; import java.io.IOException; import java.io.InterruptedIOException; - import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.util.Bytes; - +import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; /** * A reversed ScannerCallable which supports backward scanning. @@ -47,14 +44,14 @@ public class ReversedScannerCallable extends ScannerCallable { private byte[] locationSearchKey; /** - * @param connection which connection - * @param tableName table callable is on - * @param scan the scan to execute + * @param connection which connection + * @param tableName table callable is on + * @param scan the scan to execute * @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable won't collect - * metrics - * @param rpcFactory to create an {@link com.google.protobuf.RpcController} to talk to the - * regionserver - * @param replicaId the replica id + * metrics + * @param rpcFactory to create an {@link com.google.protobuf.RpcController} to talk to the + * regionserver + * @param replicaId the replica id */ public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan, ScanMetrics scanMetrics, RpcControllerFactory rpcFactory, int replicaId) { @@ -68,8 +65,7 @@ public class ReversedScannerCallable extends ScannerCallable { // when trying to clear cache for an empty row. if (location != null && locationSearchKey != null) { getConnection().updateCachedLocations(getTableName(), - location.getRegionInfo().getRegionName(), - locationSearchKey, t, location.getServerName()); + location.getRegionInfo().getRegionName(), locationSearchKey, t, location.getServerName()); } } @@ -82,8 +78,10 @@ public class ReversedScannerCallable extends ScannerCallable { throw new InterruptedIOException(); } - if (reload && getTableName() != null && !getTableName().equals(TableName.META_TABLE_NAME) - && getConnection().isTableDisabled(getTableName())) { + if ( + reload && getTableName() != null && !getTableName().equals(TableName.META_TABLE_NAME) + && getConnection().isTableDisabled(getTableName()) + ) { throw new TableNotEnabledException(getTableName().getNameAsString() + " is disabled."); } @@ -100,16 +98,15 @@ public class ReversedScannerCallable extends ScannerCallable { // The locateStart row is an approximation. So we need to search between // that and the actual row in order to really find the last region byte[] locateStartRow = createCloseRowBefore(getRow()); - Pair lastRegionAndKey = locateLastRegionInRange( - locateStartRow, getRow()); + Pair lastRegionAndKey = + locateLastRegionInRange(locateStartRow, getRow()); this.location = lastRegionAndKey.getFirst(); this.locationSearchKey = lastRegionAndKey.getSecond(); } if (location == null || location.getServerName() == null) { - throw new IOException("Failed to find location, tableName=" - + getTableName() + ", row=" + Bytes.toStringBinary(getRow()) + ", reload=" - + reload); + throw new IOException("Failed to find location, tableName=" + getTableName() + ", row=" + + Bytes.toStringBinary(getRow()) + ", reload=" + reload); } setStub(getConnection().getClient(getLocation().getServerName())); @@ -126,18 +123,16 @@ public class ReversedScannerCallable extends ScannerCallable { /** * Get the last region before the endkey, which will be used to execute the reverse scan * @param startKey Starting row in range, inclusive - * @param endKey Ending row in range, exclusive - * @return The last location, and the rowKey used to find it. May be null, - * if a region could not be found. + * @param endKey Ending row in range, exclusive + * @return The last location, and the rowKey used to find it. May be null, if a region could not + * be found. */ private Pair locateLastRegionInRange(byte[] startKey, byte[] endKey) - throws IOException { - final boolean endKeyIsEndOfTable = Bytes.equals(endKey, - HConstants.EMPTY_END_ROW); + throws IOException { + final boolean endKeyIsEndOfTable = Bytes.equals(endKey, HConstants.EMPTY_END_ROW); if ((Bytes.compareTo(startKey, endKey) > 0) && !endKeyIsEndOfTable) { - throw new IllegalArgumentException("Invalid range: " - + Bytes.toStringBinary(startKey) + " > " - + Bytes.toStringBinary(endKey)); + throw new IllegalArgumentException( + "Invalid range: " + Bytes.toStringBinary(startKey) + " > " + Bytes.toStringBinary(endKey)); } HRegionLocation lastRegion = null; @@ -152,12 +147,14 @@ public class ReversedScannerCallable extends ScannerCallable { lastRegion = regionLocation; } else { throw new DoNotRetryIOException( - "Does hbase:meta exist hole? Locating row " + Bytes.toStringBinary(currentKey) + - " returns incorrect region " + regionLocation.getRegionInfo()); + "Does hbase:meta exist hole? Locating row " + Bytes.toStringBinary(currentKey) + + " returns incorrect region " + regionLocation.getRegionInfo()); } currentKey = regionLocation.getRegionInfo().getEndKey(); - } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW) - && (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0)); + } while ( + !Bytes.equals(currentKey, HConstants.EMPTY_END_ROW) + && (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0) + ); return new Pair<>(lastRegion, lastFoundKey); } @@ -165,7 +162,7 @@ public class ReversedScannerCallable extends ScannerCallable { @Override public ScannerCallable getScannerCallableForReplica(int id) { ReversedScannerCallable r = new ReversedScannerCallable(getConnection(), getTableName(), - this.getScan(), this.scanMetrics, rpcControllerFactory, id); + this.getScan(), this.scanMetrics, rpcControllerFactory, id); r.setCaching(this.getCaching()); return r; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java index 3152f9e6b6e..ad1089f2aad 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,14 +27,15 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Public public interface Row extends Comparable { Comparator COMPARATOR = (v1, v2) -> Bytes.compareTo(v1.getRow(), v2.getRow()); + /** * @return The row. */ - byte [] getRow(); + byte[] getRow(); /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link Row#COMPARATOR} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link Row#COMPARATOR} instead */ @Deprecated int compareTo(Row var1); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java index 9f92c66d317..ba613bb1773 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,9 +20,8 @@ package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; /** - * Provide a way to access the inner buffer. - * The purpose is to reduce the elapsed time to move a large number - * of elements between collections. + * Provide a way to access the inner buffer. The purpose is to reduce the elapsed time to move a + * large number of elements between collections. * @param */ @InterfaceAudience.Private diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java index 084e259a8f7..6b2535989bc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java @@ -22,50 +22,46 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; /** - * Performs multiple mutations atomically on a single row. - * - * The mutations are performed in the order in which they - * were added. - * - *

    We compare and equate mutations based off their row so be careful putting RowMutations - * into Sets or using them as keys in Maps. + * Performs multiple mutations atomically on a single row. The mutations are performed in the order + * in which they were added. + *

    + * We compare and equate mutations based off their row so be careful putting RowMutations into Sets + * or using them as keys in Maps. */ @InterfaceAudience.Public public class RowMutations implements Row { /** * Create a {@link RowMutations} with the specified mutations. - * @param mutations the mutations to send - * @return RowMutations - * @throws IOException if any row in mutations is different to another + * @param mutations the mutations to send n * @throws IOException if any row in mutations is + * different to another */ public static RowMutations of(List mutations) throws IOException { if (CollectionUtils.isEmpty(mutations)) { throw new IllegalArgumentException("Cannot instantiate a RowMutations by empty list"); } - return new RowMutations(mutations.get(0).getRow(), mutations.size()) - .add(mutations); + return new RowMutations(mutations.get(0).getRow(), mutations.size()).add(mutations); } private final List mutations; - private final byte [] row; + private final byte[] row; - public RowMutations(byte [] row) { + public RowMutations(byte[] row) { this(row, -1); } + /** * Create an atomic mutation for the specified row. - * @param row row key + * @param row row key * @param initialCapacity the initial capacity of the RowMutations */ - public RowMutations(byte [] row, int initialCapacity) { + public RowMutations(byte[] row, int initialCapacity) { this.row = Bytes.copy(Mutation.checkRow(row)); if (initialCapacity <= 0) { this.mutations = new ArrayList<>(); @@ -78,8 +74,7 @@ public class RowMutations implements Row { * Add a {@link Put} operation to the list of mutations * @param p The {@link Put} to add * @throws IOException if the row of added mutation doesn't match the original row - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #add(Mutation)} + * @deprecated since 2.0 version and will be removed in 3.0 version. use {@link #add(Mutation)} */ @Deprecated public void add(Put p) throws IOException { @@ -90,8 +85,7 @@ public class RowMutations implements Row { * Add a {@link Delete} operation to the list of mutations * @param d The {@link Delete} to add * @throws IOException if the row of added mutation doesn't match the original row - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #add(Mutation)} + * @deprecated since 2.0 version and will be removed in 3.0 version. use {@link #add(Mutation)} */ @Deprecated public void add(Delete d) throws IOException { @@ -100,7 +94,6 @@ public class RowMutations implements Row { /** * Currently only supports {@link Put} and {@link Delete} mutations. - * * @param mutation The data to send. * @throws IOException if the row of added mutation doesn't match the original row */ @@ -115,9 +108,9 @@ public class RowMutations implements Row { public RowMutations add(List mutations) throws IOException { for (Mutation mutation : mutations) { if (!Bytes.equals(row, mutation.getRow())) { - throw new WrongRowIOException("The row in the recently added Mutation <" + - Bytes.toStringBinary(mutation.getRow()) + "> doesn't match the original one <" + - Bytes.toStringBinary(this.row) + ">"); + throw new WrongRowIOException( + "The row in the recently added Mutation <" + Bytes.toStringBinary(mutation.getRow()) + + "> doesn't match the original one <" + Bytes.toStringBinary(this.row) + ">"); } } this.mutations.addAll(mutations); @@ -125,8 +118,8 @@ public class RowMutations implements Row { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link Row#COMPARATOR} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link Row#COMPARATOR} instead */ @Deprecated @Override @@ -135,27 +128,25 @@ public class RowMutations implements Row { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * No replacement + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. No replacement */ @Deprecated @Override public boolean equals(Object obj) { if (obj == this) return true; if (obj instanceof RowMutations) { - RowMutations other = (RowMutations)obj; + RowMutations other = (RowMutations) obj; return compareTo(other) == 0; } return false; } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * No replacement + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. No replacement */ @Deprecated @Override - public int hashCode(){ + public int hashCode() { return Arrays.hashCode(row); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java index ef5ea05e113..02268451c7b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,13 +17,11 @@ */ package org.apache.hadoop.hbase.client; - import org.apache.yetus.audience.InterfaceAudience; /** - * Gets or Scans throw this exception if running without in-row scan flag - * set and row size appears to exceed max configured size (configurable via - * hbase.table.max.rowsize). + * Gets or Scans throw this exception if running without in-row scan flag set and row size appears + * to exceed max configured size (configurable via hbase.table.max.rowsize). */ @InterfaceAudience.Public public class RowTooBigException extends DoNotRetryRegionException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistry.java index 660d74e74c2..2c320d3a9d1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistry.java @@ -86,9 +86,7 @@ public class RpcConnectionRegistry extends AbstractRpcBasedConnectionRegistry { if (StringUtils.isBlank(configuredBootstrapNodes)) { return MasterRegistry.getConnectionString(conf); } - return Splitter.on(ADDRS_CONF_SEPARATOR) - .trimResults() - .splitToStream(configuredBootstrapNodes) + return Splitter.on(ADDRS_CONF_SEPARATOR).trimResults().splitToStream(configuredBootstrapNodes) .collect(Collectors.joining(String.valueOf(ADDRS_CONF_SEPARATOR))); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallable.java index 4f93bd9a26e..c69ac5560b1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.Closeable; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java index 9d30221cd55..a5e917010cb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,9 +17,8 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.yetus.audience.InterfaceAudience; - import java.io.IOException; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Public public interface RpcRetryingCaller { @@ -28,23 +27,22 @@ public interface RpcRetryingCaller { /** * Retries if invocation fails. * @param callTimeout Timeout for this call - * @param callable The {@link RetryingCallable} to run. + * @param callable The {@link RetryingCallable} to run. * @return an object of type T - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network exception occurs * @throws RuntimeException other unspecified error */ T callWithRetries(RetryingCallable callable, int callTimeout) - throws IOException, RuntimeException; + throws IOException, RuntimeException; /** - * Call the server once only. - * {@link RetryingCallable} has a strange shape so we can do retries. Use this invocation if you - * want to do a single call only (A call to {@link RetryingCallable#call(int)} will not likely - * succeed). + * Call the server once only. {@link RetryingCallable} has a strange shape so we can do retries. + * Use this invocation if you want to do a single call only (A call to + * {@link RetryingCallable#call(int)} will not likely succeed). * @return an object of type T - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network exception occurs * @throws RuntimeException other unspecified error */ T callWithoutRetries(RetryingCallable callable, int callTimeout) - throws IOException, RuntimeException; + throws IOException, RuntimeException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java index 7425f8837f6..19daacd77a7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ public class RpcRetryingCallerFactory { private final ConnectionConfiguration connectionConf; private final RetryingCallerInterceptor interceptor; private final int startLogErrorsCnt; - /* These below data members are UNUSED!!!*/ + /* These below data members are UNUSED!!! */ private final boolean enableBackPressure; private ServerStatisticTracker stats; @@ -46,10 +46,10 @@ public class RpcRetryingCallerFactory { this.conf = conf; this.connectionConf = new ConnectionConfiguration(conf); startLogErrorsCnt = conf.getInt(AsyncProcess.START_LOG_ERRORS_AFTER_COUNT_KEY, - AsyncProcess.DEFAULT_START_LOG_ERRORS_AFTER_COUNT); + AsyncProcess.DEFAULT_START_LOG_ERRORS_AFTER_COUNT); this.interceptor = interceptor; enableBackPressure = conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, - HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE); + HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE); } /** @@ -64,11 +64,9 @@ public class RpcRetryingCallerFactory { */ public RpcRetryingCaller newCaller(int rpcTimeout) { // We store the values in the factory instance. This way, constructing new objects - // is cheap as it does not require parsing a complex structure. - return new RpcRetryingCallerImpl<>( - connectionConf.getPauseMillis(), - connectionConf.getPauseMillisForServerOverloaded(), - connectionConf.getRetriesNumber(), + // is cheap as it does not require parsing a complex structure. + return new RpcRetryingCallerImpl<>(connectionConf.getPauseMillis(), + connectionConf.getPauseMillisForServerOverloaded(), connectionConf.getRetriesNumber(), interceptor, startLogErrorsCnt, rpcTimeout); } @@ -77,13 +75,10 @@ public class RpcRetryingCallerFactory { */ public RpcRetryingCaller newCaller() { // We store the values in the factory instance. This way, constructing new objects - // is cheap as it does not require parsing a complex structure. - return new RpcRetryingCallerImpl<>( - connectionConf.getPauseMillis(), - connectionConf.getPauseMillisForServerOverloaded(), - connectionConf.getRetriesNumber(), - interceptor, startLogErrorsCnt, - connectionConf.getRpcTimeout()); + // is cheap as it does not require parsing a complex structure. + return new RpcRetryingCallerImpl<>(connectionConf.getPauseMillis(), + connectionConf.getPauseMillisForServerOverloaded(), connectionConf.getRetriesNumber(), + interceptor, startLogErrorsCnt, connectionConf.getRpcTimeout()); } public static RpcRetryingCallerFactory instantiate(Configuration configuration) { @@ -91,22 +86,21 @@ public class RpcRetryingCallerFactory { } public static RpcRetryingCallerFactory instantiate(Configuration configuration, - ServerStatisticTracker stats) { + ServerStatisticTracker stats) { return instantiate(configuration, RetryingCallerInterceptorFactory.NO_OP_INTERCEPTOR, stats); } public static RpcRetryingCallerFactory instantiate(Configuration configuration, - RetryingCallerInterceptor interceptor, ServerStatisticTracker stats) { + RetryingCallerInterceptor interceptor, ServerStatisticTracker stats) { String clazzName = RpcRetryingCallerFactory.class.getName(); String rpcCallerFactoryClazz = - configuration.get(RpcRetryingCallerFactory.CUSTOM_CALLER_CONF_KEY, clazzName); + configuration.get(RpcRetryingCallerFactory.CUSTOM_CALLER_CONF_KEY, clazzName); RpcRetryingCallerFactory factory; if (rpcCallerFactoryClazz.equals(clazzName)) { factory = new RpcRetryingCallerFactory(configuration, interceptor); } else { - factory = ReflectionUtils.instantiateWithCustomCtor( - rpcCallerFactoryClazz, new Class[] { Configuration.class }, - new Object[] { configuration }); + factory = ReflectionUtils.instantiateWithCustomCtor(rpcCallerFactoryClazz, + new Class[] { Configuration.class }, new Object[] { configuration }); } // setting for backwards compat with existing caller factories, rather than in the ctor diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java index 57a86417443..c36a02a885d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; @@ -29,7 +27,6 @@ import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseServerException; import org.apache.hadoop.hbase.exceptions.PreemptiveFastFailException; @@ -40,16 +37,15 @@ import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; /** - * Runs an rpc'ing {@link RetryingCallable}. Sets into rpc client - * threadlocal outstanding timeouts as so we don't persist too much. - * Dynamic rather than static so can set the generic appropriately. - * - * This object has a state. It should not be used by in parallel by different threads. - * Reusing it is possible however, even between multiple threads. However, the user will - * have to manage the synchronization on its side: there is no synchronization inside the class. + * Runs an rpc'ing {@link RetryingCallable}. Sets into rpc client threadlocal outstanding timeouts + * as so we don't persist too much. Dynamic rather than static so can set the generic appropriately. + * This object has a state. It should not be used by in parallel by different threads. Reusing it is + * possible however, even between multiple threads. However, the user will have to manage the + * synchronization on its side: there is no synchronization inside the class. */ @InterfaceAudience.Private public class RpcRetryingCallerImpl implements RpcRetryingCaller { @@ -75,7 +71,7 @@ public class RpcRetryingCallerImpl implements RpcRetryingCaller { } public RpcRetryingCallerImpl(long pause, long pauseForServerOverloaded, int retries, - RetryingCallerInterceptor interceptor, int startLogErrorsCnt, int rpcTimeout) { + RetryingCallerInterceptor interceptor, int startLogErrorsCnt, int rpcTimeout) { this.pause = pause; this.pauseForServerOverloaded = pauseForServerOverloaded; this.maxAttempts = retries2Attempts(retries); @@ -87,16 +83,16 @@ public class RpcRetryingCallerImpl implements RpcRetryingCaller { } @Override - public void cancel(){ + public void cancel() { cancelled.set(true); - synchronized (cancelled){ + synchronized (cancelled) { cancelled.notifyAll(); } } @Override public T callWithRetries(RetryingCallable callable, int callTimeout) - throws IOException, RuntimeException { + throws IOException, RuntimeException { List exceptions = new ArrayList<>(); tracker.start(); context.clear(); @@ -123,12 +119,12 @@ public class RpcRetryingCallerImpl implements RpcRetryingCaller { if (tries > startLogErrorsCnt) { if (LOG.isInfoEnabled()) { StringBuilder builder = new StringBuilder("Call exception, tries=").append(tries) - .append(", retries=").append(maxAttempts).append(", started=") - .append((EnvironmentEdgeManager.currentTime() - tracker.getStartTime())) - .append(" ms ago, ").append("cancelled=").append(cancelled.get()) - .append(", msg=").append(t.getMessage()) - .append(", details=").append(callable.getExceptionMessageAdditionalDetail()) - .append(", see https://s.apache.org/timeout"); + .append(", retries=").append(maxAttempts).append(", started=") + .append((EnvironmentEdgeManager.currentTime() - tracker.getStartTime())) + .append(" ms ago, ").append("cancelled=").append(cancelled.get()).append(", msg=") + .append(t.getMessage()).append(", details=") + .append(callable.getExceptionMessageAdditionalDetail()) + .append(", see https://s.apache.org/timeout"); if (LOG.isDebugEnabled()) { builder.append(", exception=").append(StringUtils.stringifyException(t)); LOG.debug(builder.toString()); @@ -140,8 +136,8 @@ public class RpcRetryingCallerImpl implements RpcRetryingCaller { callable.throwable(t, maxAttempts != 1); RetriesExhaustedException.ThrowableWithExtraContext qt = - new RetriesExhaustedException.ThrowableWithExtraContext(t, - EnvironmentEdgeManager.currentTime(), toString()); + new RetriesExhaustedException.ThrowableWithExtraContext(t, + EnvironmentEdgeManager.currentTime(), toString()); exceptions.add(qt); if (tries >= maxAttempts - 1) { throw new RetriesExhaustedException(tries, exceptions); @@ -151,16 +147,16 @@ public class RpcRetryingCallerImpl implements RpcRetryingCaller { // get right pause time, start by RETRY_BACKOFF[0] * pauseBase, where pauseBase might be // special when encountering an exception indicating the server is overloaded. // see #HBASE-17114 and HBASE-26807 - long pauseBase = HBaseServerException.isServerOverloaded(t) - ? pauseForServerOverloaded : pause; + long pauseBase = + HBaseServerException.isServerOverloaded(t) ? pauseForServerOverloaded : pause; expectedSleep = callable.sleep(pauseBase, tries); // If, after the planned sleep, there won't be enough time left, we stop now. long duration = singleCallDuration(expectedSleep); if (duration > callTimeout) { - String msg = "callTimeout=" + callTimeout + ", callDuration=" + duration + - ": " + t.getMessage() + " " + callable.getExceptionMessageAdditionalDetail(); - throw (SocketTimeoutException)(new SocketTimeoutException(msg).initCause(t)); + String msg = "callTimeout=" + callTimeout + ", callDuration=" + duration + ": " + + t.getMessage() + " " + callable.getExceptionMessageAdditionalDetail(); + throw (SocketTimeoutException) (new SocketTimeoutException(msg).initCause(t)); } } finally { interceptor.updateFailureInfo(context); @@ -174,8 +170,8 @@ public class RpcRetryingCallerImpl implements RpcRetryingCaller { } if (cancelled.get()) return null; } catch (InterruptedException e) { - throw new InterruptedIOException("Interrupted after " + tries - + " tries while maxAttempts=" + maxAttempts); + throw new InterruptedIOException( + "Interrupted after " + tries + " tries while maxAttempts=" + maxAttempts); } } } @@ -189,7 +185,7 @@ public class RpcRetryingCallerImpl implements RpcRetryingCaller { @Override public T callWithoutRetries(RetryingCallable callable, int callTimeout) - throws IOException, RuntimeException { + throws IOException, RuntimeException { // The code of this method should be shared with withRetries. try { callable.prepare(false); @@ -199,7 +195,7 @@ public class RpcRetryingCallerImpl implements RpcRetryingCaller { ExceptionUtil.rethrowIfInterrupt(t2); // It would be nice to clear the location cache here. if (t2 instanceof IOException) { - throw (IOException)t2; + throw (IOException) t2; } else { throw new RuntimeException(t2); } @@ -219,29 +215,29 @@ public class RpcRetryingCallerImpl implements RpcRetryingCaller { } } if (t instanceof RemoteException) { - t = ((RemoteException)t).unwrapRemoteException(); + t = ((RemoteException) t).unwrapRemoteException(); } if (t instanceof LinkageError) { throw new DoNotRetryIOException(t); } if (t instanceof ServiceException) { - ServiceException se = (ServiceException)t; + ServiceException se = (ServiceException) t; Throwable cause = se.getCause(); if (cause instanceof DoNotRetryIOException) { - throw (DoNotRetryIOException)cause; + throw (DoNotRetryIOException) cause; } // Don't let ServiceException out; its rpc specific. // It also could be a RemoteException, so go around again. t = translateException(cause); } else if (t instanceof DoNotRetryIOException) { - throw (DoNotRetryIOException)t; + throw (DoNotRetryIOException) t; } return t; } - private int getTimeout(int callTimeout){ + private int getTimeout(int callTimeout) { int timeout = tracker.getRemainingTime(callTimeout); - if (timeout <= 0 || rpcTimeout > 0 && rpcTimeout < timeout){ + if (timeout <= 0 || rpcTimeout > 0 && rpcTimeout < timeout) { timeout = rpcTimeout; } return timeout; @@ -249,8 +245,7 @@ public class RpcRetryingCallerImpl implements RpcRetryingCaller { @Override public String toString() { - return "RpcRetryingCaller{" + "globalStartTime=" + - Instant.ofEpochMilli(tracker.getStartTime()) + - ", pause=" + pause + ", maxAttempts=" + maxAttempts + '}'; + return "RpcRetryingCaller{" + "globalStartTime=" + Instant.ofEpochMilli(tracker.getStartTime()) + + ", pause=" + pause + ", maxAttempts=" + maxAttempts + '}'; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java index 260cb8c26f2..902993c2bd8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; - import static org.apache.hadoop.hbase.HConstants.PRIORITY_UNSET; import java.io.IOException; @@ -29,7 +28,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseIOException; @@ -49,15 +47,14 @@ import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; /** - * Caller that goes to replica if the primary region does no answer within a configurable - * timeout. If the timeout is reached, it calls all the secondary replicas, and returns - * the first answer. If the answer comes from one of the secondary replica, it will - * be marked as stale. + * Caller that goes to replica if the primary region does no answer within a configurable timeout. + * If the timeout is reached, it calls all the secondary replicas, and returns the first answer. If + * the answer comes from one of the secondary replica, it will be marked as stale. */ @InterfaceAudience.Private public class RpcRetryingCallerWithReadReplicas { private static final Logger LOG = - LoggerFactory.getLogger(RpcRetryingCallerWithReadReplicas.class); + LoggerFactory.getLogger(RpcRetryingCallerWithReadReplicas.class); protected final ExecutorService pool; protected final ClusterConnection cConnection; @@ -71,11 +68,9 @@ public class RpcRetryingCallerWithReadReplicas { private final RpcControllerFactory rpcControllerFactory; private final RpcRetryingCallerFactory rpcRetryingCallerFactory; - public RpcRetryingCallerWithReadReplicas( - RpcControllerFactory rpcControllerFactory, TableName tableName, - ClusterConnection cConnection, final Get get, - ExecutorService pool, int retries, int operationTimeout, int rpcTimeout, - int timeBeforeReplicas) { + public RpcRetryingCallerWithReadReplicas(RpcControllerFactory rpcControllerFactory, + TableName tableName, ClusterConnection cConnection, final Get get, ExecutorService pool, + int retries, int operationTimeout, int rpcTimeout, int timeBeforeReplicas) { this.rpcControllerFactory = rpcControllerFactory; this.tableName = tableName; this.cConnection = cConnection; @@ -90,25 +85,24 @@ public class RpcRetryingCallerWithReadReplicas { } /** - * A RegionServerCallable that takes into account the replicas, i.e. - * - the call can be on any replica - * - we need to stop retrying when the call is completed - * - we can be interrupted + * A RegionServerCallable that takes into account the replicas, i.e. - the call can be on any + * replica - we need to stop retrying when the call is completed - we can be interrupted */ class ReplicaRegionServerCallable extends CancellableRegionServerCallable { final int id; + public ReplicaRegionServerCallable(int id, HRegionLocation location) { super(RpcRetryingCallerWithReadReplicas.this.cConnection, - RpcRetryingCallerWithReadReplicas.this.tableName, get.getRow(), - rpcControllerFactory.newController(), rpcTimeout, new RetryingTimeTracker(), PRIORITY_UNSET); + RpcRetryingCallerWithReadReplicas.this.tableName, get.getRow(), + rpcControllerFactory.newController(), rpcTimeout, new RetryingTimeTracker(), + PRIORITY_UNSET); this.id = id; this.location = location; } /** - * Two responsibilities - * - if the call is already completed (by another replica) stops the retries. - * - set the location to the right region, depending on the replica. + * Two responsibilities - if the call is already completed (by another replica) stops the + * retries. - set the location to the right region, depending on the replica. */ @Override // TODO: Very like the super class implemenation. Can we shrink this down? @@ -124,7 +118,7 @@ public class RpcRetryingCallerWithReadReplicas { if (location == null || location.getServerName() == null) { // With this exception, there will be a retry. The location can be null for a replica - // when the table is created or after a split. + // when the table is created or after a split. throw new HBaseIOException("There is no location for replica id #" + id); } @@ -140,7 +134,7 @@ public class RpcRetryingCallerWithReadReplicas { } byte[] reg = location.getRegionInfo().getRegionName(); ClientProtos.GetRequest request = RequestConverter.buildGetRequest(reg, get); - HBaseRpcController hrc = (HBaseRpcController)getRpcController(); + HBaseRpcController hrc = (HBaseRpcController) getRpcController(); hrc.reset(); hrc.setCallTimeout(rpcTimeout); hrc.setPriority(tableName); @@ -154,24 +148,19 @@ public class RpcRetryingCallerWithReadReplicas { /** *

    - * Algo: - * - we put the query into the execution pool. - * - after x ms, if we don't have a result, we add the queries for the secondary replicas - * - we take the first answer - * - when done, we cancel what's left. Cancelling means: - * - removing from the pool if the actual call was not started - * - interrupting the call if it has started - * Client side, we need to take into account - * - a call is not executed immediately after being put into the pool - * - a call is a thread. Let's not multiply the number of thread by the number of replicas. - * Server side, if we can cancel when it's still in the handler pool, it's much better, as a call - * can take some i/o. + * Algo: - we put the query into the execution pool. - after x ms, if we don't have a result, we + * add the queries for the secondary replicas - we take the first answer - when done, we cancel + * what's left. Cancelling means: - removing from the pool if the actual call was not started - + * interrupting the call if it has started Client side, we need to take into account - a call is + * not executed immediately after being put into the pool - a call is a thread. Let's not multiply + * the number of thread by the number of replicas. Server side, if we can cancel when it's still + * in the handler pool, it's much better, as a call can take some i/o. *

    - * Globally, the number of retries, timeout and so on still applies, but it's per replica, - * not global. We continue until all retries are done, or all timeouts are exceeded. + * Globally, the number of retries, timeout and so on still applies, but it's per replica, not + * global. We continue until all retries are done, or all timeouts are exceeded. */ public Result call(int operationTimeout) - throws DoNotRetryIOException, InterruptedIOException, RetriesExhaustedException { + throws DoNotRetryIOException, InterruptedIOException, RetriesExhaustedException { boolean isTargetReplicaSpecified = (get.getReplicaId() >= 0); RegionLocations rl = null; @@ -188,7 +177,7 @@ public class RpcRetryingCallerWithReadReplicas { // We cannot get the primary replica location, it is possible that the region // server hosting meta is down, it needs to proceed to try cached replicas. if (cConnection instanceof ConnectionImplementation) { - rl = ((ConnectionImplementation)cConnection).getCachedLocation(tableName, get.getRow()); + rl = ((ConnectionImplementation) cConnection).getCachedLocation(tableName, get.getRow()); if (rl == null) { // No cached locations throw e; @@ -204,11 +193,11 @@ public class RpcRetryingCallerWithReadReplicas { } final ResultBoundedCompletionService cs = - new ResultBoundedCompletionService<>(this.rpcRetryingCallerFactory, pool, rl.size()); + new ResultBoundedCompletionService<>(this.rpcRetryingCallerFactory, pool, rl.size()); int startIndex = 0; int endIndex = rl.size(); - if(isTargetReplicaSpecified) { + if (isTargetReplicaSpecified) { addCallsForReplica(cs, rl, get.getReplicaId(), get.getReplicaId()); endIndex = 1; } else { @@ -216,9 +205,10 @@ public class RpcRetryingCallerWithReadReplicas { addCallsForReplica(cs, rl, 0, 0); try { // wait for the timeout to see whether the primary responds back - Future f = cs.poll(timeBeforeReplicas, TimeUnit.MICROSECONDS); // Yes, microseconds + Future f = cs.poll(timeBeforeReplicas, TimeUnit.MICROSECONDS); // Yes, + // microseconds if (f != null) { - return f.get(); //great we got a response + return f.get(); // great we got a response } if (cConnection.getConnectionMetrics() != null) { cConnection.getConnectionMetrics().incrHedgedReadOps(); @@ -238,7 +228,7 @@ public class RpcRetryingCallerWithReadReplicas { } } else { // Since primary replica is skipped, the endIndex needs to be adjusted accordingly - endIndex --; + endIndex--; } // submit call for the all of the secondaries at once @@ -246,14 +236,17 @@ public class RpcRetryingCallerWithReadReplicas { } try { ResultBoundedCompletionService.QueueingFuture f = - cs.pollForFirstSuccessfullyCompletedTask(operationTimeout, TimeUnit.MILLISECONDS, startIndex, endIndex); + cs.pollForFirstSuccessfullyCompletedTask(operationTimeout, TimeUnit.MILLISECONDS, + startIndex, endIndex); if (f == null) { - throw new RetriesExhaustedException("Timed out after " + operationTimeout + - "ms. Get is sent to replicas with startIndex: " + startIndex + - ", endIndex: " + endIndex + ", Locations: " + rl); + throw new RetriesExhaustedException( + "Timed out after " + operationTimeout + "ms. Get is sent to replicas with startIndex: " + + startIndex + ", endIndex: " + endIndex + ", Locations: " + rl); } - if (cConnection.getConnectionMetrics() != null && !isTargetReplicaSpecified && - !skipPrimary && f.getReplicaId() != RegionReplicaUtil.DEFAULT_REPLICA_ID) { + if ( + cConnection.getConnectionMetrics() != null && !isTargetReplicaSpecified && !skipPrimary + && f.getReplicaId() != RegionReplicaUtil.DEFAULT_REPLICA_ID + ) { cConnection.getConnectionMetrics().incrHedgedReadWin(); } return f.get(); @@ -274,11 +267,10 @@ public class RpcRetryingCallerWithReadReplicas { } /** - * Extract the real exception from the ExecutionException, and throws what makes more - * sense. + * Extract the real exception from the ExecutionException, and throws what makes more sense. */ static void throwEnrichedException(ExecutionException e, int retries) - throws RetriesExhaustedException, DoNotRetryIOException { + throws RetriesExhaustedException, DoNotRetryIOException { Throwable t = e.getCause(); assert t != null; // That's what ExecutionException is about: holding an exception t.printStackTrace(); @@ -292,25 +284,24 @@ public class RpcRetryingCallerWithReadReplicas { } RetriesExhaustedException.ThrowableWithExtraContext qt = - new RetriesExhaustedException.ThrowableWithExtraContext(t, - EnvironmentEdgeManager.currentTime(), null); + new RetriesExhaustedException.ThrowableWithExtraContext(t, + EnvironmentEdgeManager.currentTime(), null); List exceptions = - Collections.singletonList(qt); + Collections.singletonList(qt); throw new RetriesExhaustedException(retries, exceptions); } /** * Creates the calls and submit them - * * @param cs - the completion service to use for submitting * @param rl - the region locations * @param min - the id of the first replica, inclusive * @param max - the id of the last replica, inclusive. */ - private void addCallsForReplica(ResultBoundedCompletionService cs, - RegionLocations rl, int min, int max) { + private void addCallsForReplica(ResultBoundedCompletionService cs, RegionLocations rl, + int min, int max) { for (int id = min; id <= max; id++) { HRegionLocation hrl = rl.getRegionLocation(id); ReplicaRegionServerCallable callOnReplica = new ReplicaRegionServerCallable(id, hrl); @@ -319,8 +310,8 @@ public class RpcRetryingCallerWithReadReplicas { } static RegionLocations getRegionLocations(boolean useCache, int replicaId, - ClusterConnection cConnection, TableName tableName, byte[] row) - throws RetriesExhaustedException, DoNotRetryIOException, InterruptedIOException { + ClusterConnection cConnection, TableName tableName, byte[] row) + throws RetriesExhaustedException, DoNotRetryIOException, InterruptedIOException { RegionLocations rl; try { @@ -333,11 +324,11 @@ public class RpcRetryingCallerWithReadReplicas { throw e; } catch (IOException e) { throw new RetriesExhaustedException("Cannot get the location for replica" + replicaId - + " of region for " + Bytes.toStringBinary(row) + " in " + tableName, e); + + " of region for " + Bytes.toStringBinary(row) + " in " + tableName, e); } if (rl == null) { throw new RetriesExhaustedException("Cannot get the location for replica" + replicaId - + " of region for " + Bytes.toStringBinary(row) + " in " + tableName); + + " of region for " + Bytes.toStringBinary(row) + " in " + tableName); } return rl; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index c8931e1497b..677ddc0bb3e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -27,19 +25,19 @@ import java.util.Map; import java.util.NavigableSet; import java.util.TreeMap; import java.util.TreeSet; - import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.IncompatibleFilterException; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.visibility.Authorizations; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** * Used to perform Scan operations. @@ -92,7 +90,7 @@ public class Scan extends Query { private byte[] startRow = HConstants.EMPTY_START_ROW; private boolean includeStartRow = true; - private byte[] stopRow = HConstants.EMPTY_END_ROW; + private byte[] stopRow = HConstants.EMPTY_END_ROW; private boolean includeStopRow = false; private int maxVersions = 1; private int batch = -1; @@ -141,18 +139,17 @@ public class Scan extends Query { private boolean cacheBlocks = true; private boolean reversed = false; private TimeRange tr = TimeRange.allTime(); - private Map> familyMap = - new TreeMap>(Bytes.BYTES_COMPARATOR); + private Map> familyMap = + new TreeMap>(Bytes.BYTES_COMPARATOR); private Boolean asyncPrefetch = null; /** - * Parameter name for client scanner sync/async prefetch toggle. - * When using async scanner, prefetching data from the server is done at the background. - * The parameter currently won't have any effect in the case that the user has set - * Scan#setSmall or Scan#setReversed + * Parameter name for client scanner sync/async prefetch toggle. When using async scanner, + * prefetching data from the server is done at the background. The parameter currently won't have + * any effect in the case that the user has set Scan#setSmall or Scan#setReversed */ public static final String HBASE_CLIENT_SCANNER_ASYNC_PREFETCH = - "hbase.client.scanner.async.prefetch"; + "hbase.client.scanner.async.prefetch"; /** * Default value of {@link #HBASE_CLIENT_SCANNER_ASYNC_PREFETCH}. @@ -192,11 +189,12 @@ public class Scan extends Query { /** * Create a Scan operation across all rows. */ - public Scan() {} + public Scan() { + } /** * @deprecated since 2.0.0 and will be removed in 3.0.0. Use - * {@code new Scan().withStartRow(startRow).setFilter(filter)} instead. + * {@code new Scan().withStartRow(startRow).setFilter(filter)} instead. * @see HBASE-17320 */ @Deprecated @@ -212,7 +210,7 @@ public class Scan extends Query { * specified row. * @param startRow row to start scanner at or after * @deprecated since 2.0.0 and will be removed in 3.0.0. Use - * {@code new Scan().withStartRow(startRow)} instead. + * {@code new Scan().withStartRow(startRow)} instead. * @see HBASE-17320 */ @Deprecated @@ -223,9 +221,9 @@ public class Scan extends Query { /** * Create a Scan operation for the range of rows specified. * @param startRow row to start scanner at or after (inclusive) - * @param stopRow row to stop scanner before (exclusive) + * @param stopRow row to stop scanner before (exclusive) * @deprecated since 2.0.0 and will be removed in 3.0.0. Use - * {@code new Scan().withStartRow(startRow).withStopRow(stopRow)} instead. + * {@code new Scan().withStartRow(startRow).withStopRow(stopRow)} instead. * @see HBASE-17320 */ @Deprecated @@ -236,14 +234,13 @@ public class Scan extends Query { /** * Creates a new instance of this class while copying all values. - * - * @param scan The scan instance to copy from. + * @param scan The scan instance to copy from. * @throws IOException When copying the values fails. */ public Scan(Scan scan) throws IOException { startRow = scan.getStartRow(); includeStartRow = scan.includeStartRow(); - stopRow = scan.getStopRow(); + stopRow = scan.getStopRow(); includeStopRow = scan.includeStopRow(); maxVersions = scan.getMaxVersions(); batch = scan.getBatch(); @@ -262,8 +259,8 @@ public class Scan extends Query { allowPartialResults = scan.getAllowPartialResults(); tr = scan.getTimeRange(); // TimeRange is immutable Map> fams = scan.getFamilyMap(); - for (Map.Entry> entry : fams.entrySet()) { - byte [] fam = entry.getKey(); + for (Map.Entry> entry : fams.entrySet()) { + byte[] fam = entry.getKey(); NavigableSet cols = entry.getValue(); if (cols != null && cols.size() > 0) { for (byte[] col : cols) { @@ -322,17 +319,16 @@ public class Scan extends Query { public boolean isGetScan() { return includeStartRow && includeStopRow - && ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow); + && ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow); } /** * Get all columns from the specified family. *

    * Overrides previous calls to addColumn for this family. - * @param family family name - * @return this + * @param family family name n */ - public Scan addFamily(byte [] family) { + public Scan addFamily(byte[] family) { familyMap.remove(family); familyMap.put(family, null); return this; @@ -342,13 +338,12 @@ public class Scan extends Query { * Get the column from the specified family with the specified qualifier. *

    * Overrides previous calls to addFamily for this family. - * @param family family name - * @param qualifier column qualifier - * @return this + * @param family family name + * @param qualifier column qualifier n */ - public Scan addColumn(byte [] family, byte [] qualifier) { - NavigableSet set = familyMap.get(family); - if(set == null) { + public Scan addColumn(byte[] family, byte[] qualifier) { + NavigableSet set = familyMap.get(family); + if (set == null) { set = new TreeSet<>(Bytes.BYTES_COMPARATOR); familyMap.put(family, set); } @@ -360,15 +355,13 @@ public class Scan extends Query { } /** - * Get versions of columns only within the specified timestamp range, - * [minStamp, maxStamp). Note, default maximum versions to return is 1. If - * your time range spans more than one version and you want all versions - * returned, up the number of versions beyond the default. + * Get versions of columns only within the specified timestamp range, [minStamp, maxStamp). Note, + * default maximum versions to return is 1. If your time range spans more than one version and you + * want all versions returned, up the number of versions beyond the default. * @param minStamp minimum timestamp value, inclusive * @param maxStamp maximum timestamp value, exclusive * @see #setMaxVersions() - * @see #setMaxVersions(int) - * @return this + * @see #setMaxVersions(int) n */ public Scan setTimeRange(long minStamp, long maxStamp) throws IOException { tr = new TimeRange(minStamp, maxStamp); @@ -376,37 +369,31 @@ public class Scan extends Query { } /** - * Get versions of columns with the specified timestamp. Note, default maximum - * versions to return is 1. If your time range spans more than one version - * and you want all versions returned, up the number of versions beyond the - * defaut. + * Get versions of columns with the specified timestamp. Note, default maximum versions to return + * is 1. If your time range spans more than one version and you want all versions returned, up the + * number of versions beyond the defaut. * @param timestamp version timestamp * @see #setMaxVersions() - * @see #setMaxVersions(int) - * @return this - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #setTimestamp(long)} instead + * @see #setMaxVersions(int) n * @deprecated As of release 2.0.0, this will be removed in HBase + * 3.0.0. Use {@link #setTimestamp(long)} instead */ @Deprecated - public Scan setTimeStamp(long timestamp) - throws IOException { + public Scan setTimeStamp(long timestamp) throws IOException { return this.setTimestamp(timestamp); } /** - * Get versions of columns with the specified timestamp. Note, default maximum - * versions to return is 1. If your time range spans more than one version - * and you want all versions returned, up the number of versions beyond the - * defaut. + * Get versions of columns with the specified timestamp. Note, default maximum versions to return + * is 1. If your time range spans more than one version and you want all versions returned, up the + * number of versions beyond the defaut. * @param timestamp version timestamp * @see #setMaxVersions() - * @see #setMaxVersions(int) - * @return this + * @see #setMaxVersions(int) n */ public Scan setTimestamp(long timestamp) { try { tr = new TimeRange(timestamp, timestamp + 1); - } catch(Exception e) { + } catch (Exception e) { // This should never happen, unless integer overflow or something extremely wrong... LOG.error("TimeRange failed, likely caused by integer overflow. ", e); throw e; @@ -415,7 +402,8 @@ public class Scan extends Query { return this; } - @Override public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { + @Override + public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { return (Scan) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp); } @@ -425,17 +413,16 @@ public class Scan extends Query { * If the specified row does not exist, the Scanner will start from the next closest row after the * specified row. *

    - * Note: Do NOT use this in combination with - * {@link #setRowPrefixFilter(byte[])} or {@link #setStartStopRowForPrefixScan(byte[])}. - * Doing so will make the scan result unexpected or even undefined. + * Note: Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or + * {@link #setStartStopRowForPrefixScan(byte[])}. Doing so will make the scan result + * unexpected or even undefined. *

    - * @param startRow row to start scanner at or after - * @return this - * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length - * exceeds {@link HConstants#MAX_ROW_LENGTH}) + * @param startRow row to start scanner at or after n * @throws IllegalArgumentException if + * startRow does not meet criteria for a row key (when length exceeds + * {@link HConstants#MAX_ROW_LENGTH}) * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #withStartRow(byte[])} - * instead. This method may change the inclusive of the stop row to keep compatible with the old - * behavior. + * instead. This method may change the inclusive of the stop row to keep compatible + * with the old behavior. * @see #withStartRow(byte[]) * @see HBASE-17320 */ @@ -454,10 +441,9 @@ public class Scan extends Query { *

    * If the specified row does not exist, the Scanner will start from the next closest row after the * specified row. - * @param startRow row to start scanner at or after - * @return this - * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length - * exceeds {@link HConstants#MAX_ROW_LENGTH}) + * @param startRow row to start scanner at or after n * @throws IllegalArgumentException if + * startRow does not meet criteria for a row key (when length exceeds + * {@link HConstants#MAX_ROW_LENGTH}) */ public Scan withStartRow(byte[] startRow) { return withStartRow(startRow, true); @@ -469,20 +455,19 @@ public class Scan extends Query { * If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner * will start from the next closest row after the specified row. *

    - * Note: Do NOT use this in combination with - * {@link #setRowPrefixFilter(byte[])} or {@link #setStartStopRowForPrefixScan(byte[])}. - * Doing so will make the scan result unexpected or even undefined. + * Note: Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or + * {@link #setStartStopRowForPrefixScan(byte[])}. Doing so will make the scan result + * unexpected or even undefined. *

    - * @param startRow row to start scanner at or after - * @param inclusive whether we should include the start row when scan - * @return this - * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length - * exceeds {@link HConstants#MAX_ROW_LENGTH}) + * @param startRow row to start scanner at or after + * @param inclusive whether we should include the start row when scan n * @throws + * IllegalArgumentException if startRow does not meet criteria for a row key + * (when length exceeds {@link HConstants#MAX_ROW_LENGTH}) */ public Scan withStartRow(byte[] startRow, boolean inclusive) { if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) { throw new IllegalArgumentException("startRow's length must be less than or equal to " - + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key."); + + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key."); } this.startRow = startRow; this.includeStartRow = inclusive; @@ -494,17 +479,16 @@ public class Scan extends Query { *

    * The scan will include rows that are lexicographically less than the provided stopRow. *

    - * Note: Do NOT use this in combination with - * {@link #setRowPrefixFilter(byte[])} or {@link #setStartStopRowForPrefixScan(byte[])}. - * Doing so will make the scan result unexpected or even undefined. + * Note: Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or + * {@link #setStartStopRowForPrefixScan(byte[])}. Doing so will make the scan result + * unexpected or even undefined. *

    - * @param stopRow row to end at (exclusive) - * @return this - * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length - * exceeds {@link HConstants#MAX_ROW_LENGTH}) + * @param stopRow row to end at (exclusive) n * @throws IllegalArgumentException if stopRow does + * not meet criteria for a row key (when length exceeds + * {@link HConstants#MAX_ROW_LENGTH}) * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #withStopRow(byte[])} instead. - * This method may change the inclusive of the stop row to keep compatible with the old - * behavior. + * This method may change the inclusive of the stop row to keep compatible with the + * old behavior. * @see #withStopRow(byte[]) * @see HBASE-17320 */ @@ -526,10 +510,9 @@ public class Scan extends Query { * Note: When doing a filter for a rowKey Prefix use * {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result. *

    - * @param stopRow row to end at (exclusive) - * @return this - * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length - * exceeds {@link HConstants#MAX_ROW_LENGTH}) + * @param stopRow row to end at (exclusive) n * @throws IllegalArgumentException if stopRow does + * not meet criteria for a row key (when length exceeds + * {@link HConstants#MAX_ROW_LENGTH}) */ public Scan withStopRow(byte[] stopRow) { return withStopRow(stopRow, false); @@ -541,20 +524,19 @@ public class Scan extends Query { * The scan will include rows that are lexicographically less than (or equal to if * {@code inclusive} is {@code true}) the provided stopRow. *

    - * Note: Do NOT use this in combination with - * {@link #setRowPrefixFilter(byte[])} or {@link #setStartStopRowForPrefixScan(byte[])}. - * Doing so will make the scan result unexpected or even undefined. + * Note: Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or + * {@link #setStartStopRowForPrefixScan(byte[])}. Doing so will make the scan result + * unexpected or even undefined. *

    - * @param stopRow row to end at - * @param inclusive whether we should include the stop row when scan - * @return this - * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length - * exceeds {@link HConstants#MAX_ROW_LENGTH}) + * @param stopRow row to end at + * @param inclusive whether we should include the stop row when scan n * @throws + * IllegalArgumentException if stopRow does not meet criteria for a row key (when + * length exceeds {@link HConstants#MAX_ROW_LENGTH}) */ public Scan withStopRow(byte[] stopRow, boolean inclusive) { if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) { throw new IllegalArgumentException("stopRow's length must be less than or equal to " - + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key."); + + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key."); } this.stopRow = stopRow; this.includeStopRow = inclusive; @@ -562,34 +544,48 @@ public class Scan extends Query { } /** - *

    Set a filter (using stopRow and startRow) so the result set only contains rows where the - * rowKey starts with the specified prefix.

    - *

    This is a utility method that converts the desired rowPrefix into the appropriate values - * for the startRow and stopRow to achieve the desired result.

    - *

    This can safely be used in combination with setFilter.

    - *

    This CANNOT be used in combination with withStartRow and/or withStopRow. - * Such a combination will yield unexpected and even undefined results.

    - * @param rowPrefix the prefix all rows must start with. (Set null to remove the filter.) - * @return this - * @deprecated since 2.5.0, will be removed in 4.0.0. - * The name of this method is considered to be confusing as it does not - * use a {@link Filter} but uses setting the startRow and stopRow instead. - * Use {@link #setStartStopRowForPrefixScan(byte[])} instead. + *

    + * Set a filter (using stopRow and startRow) so the result set only contains rows where the rowKey + * starts with the specified prefix. + *

    + *

    + * This is a utility method that converts the desired rowPrefix into the appropriate values for + * the startRow and stopRow to achieve the desired result. + *

    + *

    + * This can safely be used in combination with setFilter. + *

    + *

    + * This CANNOT be used in combination with withStartRow and/or withStopRow. Such + * a combination will yield unexpected and even undefined results. + *

    + * @param rowPrefix the prefix all rows must start with. (Set null to remove the filter.) n + * * @deprecated since 2.5.0, will be removed in 4.0.0. The name of this method + * is considered to be confusing as it does not use a {@link Filter} but uses + * setting the startRow and stopRow instead. Use + * {@link #setStartStopRowForPrefixScan(byte[])} instead. */ public Scan setRowPrefixFilter(byte[] rowPrefix) { return setStartStopRowForPrefixScan(rowPrefix); } /** - *

    Set a filter (using stopRow and startRow) so the result set only contains rows where the - * rowKey starts with the specified prefix.

    - *

    This is a utility method that converts the desired rowPrefix into the appropriate values - * for the startRow and stopRow to achieve the desired result.

    - *

    This can safely be used in combination with setFilter.

    - *

    This CANNOT be used in combination with withStartRow and/or withStopRow. - * Such a combination will yield unexpected and even undefined results.

    - * @param rowPrefix the prefix all rows must start with. (Set null to remove the filter.) - * @return this + *

    + * Set a filter (using stopRow and startRow) so the result set only contains rows where the rowKey + * starts with the specified prefix. + *

    + *

    + * This is a utility method that converts the desired rowPrefix into the appropriate values for + * the startRow and stopRow to achieve the desired result. + *

    + *

    + * This can safely be used in combination with setFilter. + *

    + *

    + * This CANNOT be used in combination with withStartRow and/or withStopRow. Such + * a combination will yield unexpected and even undefined results. + *

    + * @param rowPrefix the prefix all rows must start with. (Set null to remove the filter.) n */ public Scan setStartStopRowForPrefixScan(byte[] rowPrefix) { if (rowPrefix == null) { @@ -603,10 +599,9 @@ public class Scan extends Query { } /** - * Get all available versions. - * @return this - * @deprecated since 2.0.0 and will be removed in 3.0.0. It is easy to misunderstand with column - * family's max versions, so use {@link #readAllVersions()} instead. + * Get all available versions. n * @deprecated since 2.0.0 and will be removed in 3.0.0. It is + * easy to misunderstand with column family's max versions, so use {@link #readAllVersions()} + * instead. * @see #readAllVersions() * @see HBASE-17125 */ @@ -617,10 +612,9 @@ public class Scan extends Query { /** * Get up to the specified number of versions of each column. - * @param maxVersions maximum versions for each column - * @return this - * @deprecated since 2.0.0 and will be removed in 3.0.0. It is easy to misunderstand with column - * family's max versions, so use {@link #readVersions(int)} instead. + * @param maxVersions maximum versions for each column n * @deprecated since 2.0.0 and will be + * removed in 3.0.0. It is easy to misunderstand with column family's max + * versions, so use {@link #readVersions(int)} instead. * @see #readVersions(int) * @see HBASE-17125 */ @@ -630,8 +624,7 @@ public class Scan extends Query { } /** - * Get all available versions. - * @return this + * Get all available versions. n */ public Scan readAllVersions() { this.maxVersions = Integer.MAX_VALUE; @@ -640,8 +633,7 @@ public class Scan extends Query { /** * Get up to the specified number of versions of each column. - * @param versions specified number of versions for each column - * @return this + * @param versions specified number of versions for each column n */ public Scan readVersions(int versions) { this.maxVersions = versions; @@ -649,19 +641,18 @@ public class Scan extends Query { } /** - * Set the maximum number of cells to return for each call to next(). Callers should be aware - * that this is not equivalent to calling {@link #setAllowPartialResults(boolean)}. - * If you don't allow partial results, the number of cells in each Result must equal to your - * batch setting unless it is the last Result for current row. So this method is helpful in paging - * queries. If you just want to prevent OOM at client, use setAllowPartialResults(true) is better. + * Set the maximum number of cells to return for each call to next(). Callers should be aware that + * this is not equivalent to calling {@link #setAllowPartialResults(boolean)}. If you don't allow + * partial results, the number of cells in each Result must equal to your batch setting unless it + * is the last Result for current row. So this method is helpful in paging queries. If you just + * want to prevent OOM at client, use setAllowPartialResults(true) is better. * @param batch the maximum number of values * @see Result#mayHaveMoreCellsInRow() */ public Scan setBatch(int batch) { if (this.hasFilter() && this.filter.hasFilterRow()) { throw new IncompatibleFilterException( - "Cannot set batch on a scan using a filter" + - " that returns true for filter.hasFilterRow"); + "Cannot set batch on a scan using a filter" + " that returns true for filter.hasFilterRow"); } this.batch = batch; return this; @@ -686,10 +677,9 @@ public class Scan extends Query { } /** - * Set the number of rows for caching that will be passed to scanners. - * If not set, the Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will - * apply. - * Higher caching values will enable faster scanners but will use more memory. + * Set the number of rows for caching that will be passed to scanners. If not set, the + * Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will apply. Higher + * caching values will enable faster scanners but will use more memory. * @param caching the number of rows for caching */ public Scan setCaching(int caching) { @@ -705,10 +695,9 @@ public class Scan extends Query { } /** - * Set the maximum result size. The default is -1; this means that no specific - * maximum result size will be set for this scan, and the global configured - * value will be used instead. (Defaults to unlimited). - * + * Set the maximum result size. The default is -1; this means that no specific maximum result size + * will be set for this scan, and the global configured value will be used instead. (Defaults to + * unlimited). * @param maxResultSize The maximum result size in bytes. */ public Scan setMaxResultSize(long maxResultSize) { @@ -724,19 +713,17 @@ public class Scan extends Query { /** * Setting the familyMap - * @param familyMap map of family to qualifier - * @return this + * @param familyMap map of family to qualifier n */ - public Scan setFamilyMap(Map> familyMap) { + public Scan setFamilyMap(Map> familyMap) { this.familyMap = familyMap; return this; } /** - * Getting the familyMap - * @return familyMap + * Getting the familyMap n */ - public Map> getFamilyMap() { + public Map> getFamilyMap() { return this.familyMap; } @@ -744,7 +731,7 @@ public class Scan extends Query { * @return the number of families in familyMap */ public int numFamilies() { - if(hasFamilies()) { + if (hasFamilies()) { return this.familyMap.size(); } return 0; @@ -761,7 +748,7 @@ public class Scan extends Query { * @return the keys of the familyMap */ public byte[][] getFamilies() { - if(hasFamilies()) { + if (hasFamilies()) { return this.familyMap.keySet().toArray(new byte[0][0]); } return null; @@ -770,7 +757,7 @@ public class Scan extends Query { /** * @return the startrow */ - public byte [] getStartRow() { + public byte[] getStartRow() { return this.startRow; } @@ -817,8 +804,7 @@ public class Scan extends Query { } /** - * Method for retrieving the scan's offset per row per column - * family (#kvs to be skipped) + * Method for retrieving the scan's offset per row per column family (#kvs to be skipped) * @return row offset */ public int getRowOffsetPerColumnFamily() { @@ -833,14 +819,14 @@ public class Scan extends Query { } /** - * @return TimeRange + * n */ public TimeRange getTimeRange() { return this.tr; } /** - * @return RowFilter + * n */ @Override public Filter getFilter() { @@ -857,12 +843,9 @@ public class Scan extends Query { /** * Set whether blocks should be cached for this Scan. *

    - * This is true by default. When true, default settings of the table and - * family are used (this will never override caching blocks if the block - * cache is disabled for that family or entirely). - * - * @param cacheBlocks if false, default settings are overridden and blocks - * will not be cached + * This is true by default. When true, default settings of the table and family are used (this + * will never override caching blocks if the block cache is disabled for that family or entirely). + * @param cacheBlocks if false, default settings are overridden and blocks will not be cached */ public Scan setCacheBlocks(boolean cacheBlocks) { this.cacheBlocks = cacheBlocks; @@ -871,8 +854,7 @@ public class Scan extends Query { /** * Get whether blocks should be cached for this Scan. - * @return true if default caching should be used, false if blocks should not - * be cached + * @return true if default caching should be used, false if blocks should not be cached */ public boolean getCacheBlocks() { return cacheBlocks; @@ -882,9 +864,7 @@ public class Scan extends Query { * Set whether this scan is a reversed one *

    * This is false by default which means forward(normal) scan. - * - * @param reversed if true, scan will be backward order - * @return this + * @param reversed if true, scan will be backward order n */ public Scan setReversed(boolean reversed) { this.reversed = reversed; @@ -901,12 +881,9 @@ public class Scan extends Query { /** * Setting whether the caller wants to see the partial results when server returns - * less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. - * By default this value is false and the complete results will be assembled client side - * before being delivered to the caller. - * @param allowPartialResults - * @return this - * @see Result#mayHaveMoreCellsInRow() + * less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. By + * default this value is false and the complete results will be assembled client side before being + * delivered to the caller. nn * @see Result#mayHaveMoreCellsInRow() * @see #setBatch(int) */ public Scan setAllowPartialResults(final boolean allowPartialResults) { @@ -929,34 +906,30 @@ public class Scan extends Query { } /** - * Compile the table and column family (i.e. schema) information - * into a String. Useful for parsing and aggregation by debugging, - * logging, and administration tools. - * @return Map + * Compile the table and column family (i.e. schema) information into a String. Useful for parsing + * and aggregation by debugging, logging, and administration tools. n */ @Override public Map getFingerprint() { Map map = new HashMap<>(); List families = new ArrayList<>(); - if(this.familyMap.isEmpty()) { + if (this.familyMap.isEmpty()) { map.put("families", "ALL"); return map; } else { map.put("families", families); } - for (Map.Entry> entry : - this.familyMap.entrySet()) { + for (Map.Entry> entry : this.familyMap.entrySet()) { families.add(Bytes.toStringBinary(entry.getKey())); } return map; } /** - * Compile the details beyond the scope of getFingerprint (row, columns, - * timestamps, etc.) into a Map along with the fingerprinted information. - * Useful for debugging, logging, and administration tools. - * @param maxCols a limit on the number of columns output prior to truncation - * @return Map + * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a + * Map along with the fingerprinted information. Useful for debugging, logging, and administration + * tools. + * @param maxCols a limit on the number of columns output prior to truncation n */ @Override public Map toMap(int maxCols) { @@ -980,11 +953,10 @@ public class Scan extends Query { map.put("timeRange", timeRange); int colCount = 0; // iterate through affected families and list out up to maxCols columns - for (Map.Entry> entry : - this.familyMap.entrySet()) { + for (Map.Entry> entry : this.familyMap.entrySet()) { List columns = new ArrayList<>(); familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns); - if(entry.getValue() == null) { + if (entry.getValue() == null) { colCount++; --maxCols; columns.add("ALL"); @@ -993,7 +965,7 @@ public class Scan extends Query { if (maxCols <= 0) { continue; } - for (byte [] column : entry.getValue()) { + for (byte[] column : entry.getValue()) { if (--maxCols <= 0) { continue; } @@ -1013,13 +985,10 @@ public class Scan extends Query { } /** - * Enable/disable "raw" mode for this scan. - * If "raw" is enabled the scan will return all - * delete marker and deleted rows that have not - * been collected, yet. - * This is mostly useful for Scan on column families - * that have KEEP_DELETED_ROWS enabled. - * It is an error to specify any column when "raw" is set. + * Enable/disable "raw" mode for this scan. If "raw" is enabled the scan will return all delete + * marker and deleted rows that have not been collected, yet. This is mostly useful for Scan on + * column families that have KEEP_DELETED_ROWS enabled. It is an error to specify any column when + * "raw" is set. * @param raw True/False to enable/disable "raw" mode. */ public Scan setRaw(boolean raw) { @@ -1046,9 +1015,10 @@ public class Scan extends Query { * data block(64KB), it could be considered as a small scan. * @param small set if that should use read type of PREAD * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #setLimit(int)} and - * {@link #setReadType(ReadType)} instead. And for the one rpc optimization, now we will also - * fetch data when openScanner, and if the number of rows reaches the limit then we will close - * the scanner automatically which means we will fall back to one rpc. + * {@link #setReadType(ReadType)} instead. And for the one rpc optimization, now we + * will also fetch data when openScanner, and if the number of rows reaches the limit + * then we will close the scanner automatically which means we will fall back to one + * rpc. * @see #setLimit(int) * @see #setReadType(ReadType) * @see HBASE-17045 @@ -1066,7 +1036,7 @@ public class Scan extends Query { * Get whether this scan is a small scan * @return true if small scan * @deprecated since 2.0.0 and will be removed in 3.0.0. See the comment of - * {@link #setSmall(boolean)} + * {@link #setSmall(boolean)} * @see HBASE-17045 */ @Deprecated @@ -1171,8 +1141,7 @@ public class Scan extends Query { * reaches this value. *

    * This condition will be tested at last, after all other conditions such as stopRow, filter, etc. - * @param limit the limit of rows for this scan - * @return this + * @param limit the limit of rows for this scan n */ public Scan setLimit(int limit) { this.limit = limit; @@ -1181,8 +1150,7 @@ public class Scan extends Query { /** * Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also - * set {@code readType} to {@link ReadType#PREAD}. - * @return this + * set {@code readType} to {@link ReadType#PREAD}. n */ public Scan setOneRowLimit() { return setLimit(1).setReadType(ReadType.PREAD); @@ -1190,7 +1158,9 @@ public class Scan extends Query { @InterfaceAudience.Public public enum ReadType { - DEFAULT, STREAM, PREAD + DEFAULT, + STREAM, + PREAD } /** @@ -1204,8 +1174,7 @@ public class Scan extends Query { * Set the read type for this scan. *

    * Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For - * example, we will always use pread if this is a get scan. - * @return this + * example, we will always use pread if this is a get scan. n */ public Scan setReadType(ReadType readType) { this.readType = readType; @@ -1238,20 +1207,14 @@ public class Scan extends Query { * When the server is slow or we scan a table with many deleted data or we use a sparse filter, * the server will response heartbeat to prevent timeout. However the scanner will return a Result * only when client can do it. So if there are many heartbeats, the blocking time on - * ResultScanner#next() may be very long, which is not friendly to online services. - * - * Set this to true then you can get a special Result whose #isCursor() returns true and is not - * contains any real data. It only tells you where the server has scanned. You can call next - * to continue scanning or open a new scanner with this row key as start row whenever you want. - * - * Users can get a cursor when and only when there is a response from the server but we can not - * return a Result to users, for example, this response is a heartbeat or there are partial cells - * but users do not allow partial result. - * - * Now the cursor is in row level which means the special Result will only contains a row key. - * {@link Result#isCursor()} - * {@link Result#getCursor()} - * {@link Cursor} + * ResultScanner#next() may be very long, which is not friendly to online services. Set this to + * true then you can get a special Result whose #isCursor() returns true and is not contains any + * real data. It only tells you where the server has scanned. You can call next to continue + * scanning or open a new scanner with this row key as start row whenever you want. Users can get + * a cursor when and only when there is a response from the server but we can not return a Result + * to users, for example, this response is a heartbeat or there are partial cells but users do not + * allow partial result. Now the cursor is in row level which means the special Result will only + * contains a row key. {@link Result#isCursor()} {@link Result#getCursor()} {@link Cursor} */ public Scan setNeedCursorResult(boolean needCursorResult) { this.needCursorResult = needCursorResult; @@ -1263,11 +1226,9 @@ public class Scan extends Query { } /** - * Create a new Scan with a cursor. It only set the position information like start row key. - * The others (like cfs, stop row, limit) should still be filled in by the user. - * {@link Result#isCursor()} - * {@link Result#getCursor()} - * {@link Cursor} + * Create a new Scan with a cursor. It only set the position information like start row key. The + * others (like cfs, stop row, limit) should still be filled in by the user. + * {@link Result#isCursor()} {@link Result#getCursor()} {@link Cursor} */ public static Scan createScanFromCursor(Cursor cursor) { return new Scan().withStartRow(cursor.getRow()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultCache.java index 583c4559312..7c6a9f27ad3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultCache.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -39,7 +38,7 @@ interface ScanResultCache { /** * Add the given results to cache and get valid results back. - * @param results the results of a scan next. Must not be null. + * @param results the results of a scan next. Must not be null. * @param isHeartbeatMessage indicate whether the results is gotten from a heartbeat response. * @return valid results, never null. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumer.java index be3108bd34c..b574b2c2bd5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumerBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumerBase.java index 538cf9d9638..e16348b0dc9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumerBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumerBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java index 794fcb52c58..06130f5a889 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.ConnectionUtils.incRPCCallsMetrics; @@ -55,14 +54,13 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanReques import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse; /** - * Scanner operations such as create, next, etc. - * Used by {@link ResultScanner}s made by {@link Table}. Passed to a retrying caller such as - * {@link RpcRetryingCaller} so fails are retried. + * Scanner operations such as create, next, etc. Used by {@link ResultScanner}s made by + * {@link Table}. Passed to a retrying caller such as {@link RpcRetryingCaller} so fails are + * retried. */ @InterfaceAudience.Private public class ScannerCallable extends ClientServiceCallable { - public static final String LOG_SCANNER_LATENCY_CUTOFF - = "hbase.client.log.scanner.latency.cutoff"; + public static final String LOG_SCANNER_LATENCY_CUTOFF = "hbase.client.log.scanner.latency.cutoff"; public static final String LOG_SCANNER_ACTIVITY = "hbase.client.log.scanner.activity"; // Keeping LOG public as it is being used in TestScannerHeartbeatMessages @@ -79,7 +77,9 @@ public class ScannerCallable extends ClientServiceCallable { protected final int id; enum MoreResults { - YES, NO, UNKNOWN + YES, + NO, + UNKNOWN } private MoreResults moreResultsInRegion; @@ -99,17 +99,18 @@ public class ScannerCallable extends ClientServiceCallable { protected final RpcControllerFactory rpcControllerFactory; /** - * @param connection which connection - * @param tableName table callable is on - * @param scan the scan to execute - * @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable won't collect - * metrics + * @param connection which connection + * @param tableName table callable is on + * @param scan the scan to execute + * @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable won't + * collect metrics * @param rpcControllerFactory factory to use when creating - * {@link com.google.protobuf.RpcController} + * {@link com.google.protobuf.RpcController} */ public ScannerCallable(ClusterConnection connection, TableName tableName, Scan scan, - ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory, int id) { - super(connection, tableName, scan.getStartRow(), rpcControllerFactory.newController(), scan.getPriority()); + ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory, int id) { + super(connection, tableName, scan.getStartRow(), rpcControllerFactory.newController(), + scan.getPriority()); this.id = id; this.scan = scan; this.scanMetrics = scanMetrics; @@ -131,14 +132,12 @@ public class ScannerCallable extends ClientServiceCallable { } /** - * Fetch region locations for the row. Since this is for prepare, we always useCache. - * This is because we can be sure that RpcRetryingCaller will have cleared the cache - * in error handling if this is a retry. - * + * Fetch region locations for the row. Since this is for prepare, we always useCache. This is + * because we can be sure that RpcRetryingCaller will have cleared the cache in error handling if + * this is a retry. * @param row the row to look up region location for */ - protected final RegionLocations getRegionLocationsForPrepare(byte[] row) - throws IOException { + protected final RegionLocations getRegionLocationsForPrepare(byte[] row) throws IOException { // always use cache, because cache will have been cleared if necessary // in the try/catch before retrying return RpcRetryingCallerWithReadReplicas.getRegionLocations(true, id, getConnection(), @@ -154,8 +153,10 @@ public class ScannerCallable extends ClientServiceCallable { throw new InterruptedIOException(); } - if (reload && getTableName() != null && !getTableName().equals(TableName.META_TABLE_NAME) - && getConnection().isTableDisabled(getTableName())) { + if ( + reload && getTableName() != null && !getTableName().equals(TableName.META_TABLE_NAME) + && getConnection().isTableDisabled(getTableName()) + ) { throw new TableNotEnabledException(getTableName().getNameAsString() + " is disabled."); } @@ -203,16 +204,15 @@ public class ScannerCallable extends ClientServiceCallable { if (ioe instanceof UnknownScannerException) { try { HRegionLocation location = - getConnection().relocateRegion(getTableName(), scan.getStartRow()); + getConnection().relocateRegion(getTableName(), scan.getStartRow()); LOG.info("Scanner=" + scannerId + " expired, current region location is " - + location.toString()); + + location.toString()); } catch (Throwable t) { LOG.info("Failed to relocate region", t); } } else if (ioe instanceof ScannerResetException) { LOG.info("Scanner=" + scannerId + " has received an exception, and the server " - + "asked us to reset the scanner state.", - ioe); + + "asked us to reset the scanner state.", ioe); } } // The below convertion of exceptions into DoNotRetryExceptions is a little strange. @@ -271,8 +271,8 @@ public class ScannerCallable extends ClientServiceCallable { long now = EnvironmentEdgeManager.currentTime(); if (now - timestamp > logCutOffLatency) { int rows = rrs == null ? 0 : rrs.length; - LOG.info("Took " + (now - timestamp) + "ms to fetch " + rows + " rows from scanner=" - + scannerId); + LOG.info( + "Took " + (now - timestamp) + "ms to fetch " + rows + " rows from scanner=" + scannerId); } } updateServerSideMetrics(scanMetrics, response); @@ -326,7 +326,7 @@ public class ScannerCallable extends ClientServiceCallable { try { incRPCCallsMetrics(scanMetrics, isRegionServerRemote); ScanRequest request = - RequestConverter.buildScanRequest(this.scannerId, 0, true, this.scanMetrics != null); + RequestConverter.buildScanRequest(this.scannerId, 0, true, this.scanMetrics != null); HBaseRpcController controller = rpcControllerFactory.newController(); // Set fields from the original controller onto the close-specific controller @@ -349,8 +349,8 @@ public class ScannerCallable extends ClientServiceCallable { } catch (IOException e) { TableName table = getTableName(); String tableDetails = (table == null) ? "" : (" on table: " + table.getNameAsString()); - LOG.warn("Ignore, probably already closed. Current scan: " + getScan().toString() - + tableDetails, e); + LOG.warn( + "Ignore, probably already closed. Current scan: " + getScan().toString() + tableDetails, e); } this.scannerId = -1L; } @@ -363,8 +363,8 @@ public class ScannerCallable extends ClientServiceCallable { ScanResponse response = getStub().scan(getRpcController(), request); long id = response.getScannerId(); if (logScannerActivity) { - LOG.info("Open scanner=" + id + " for scan=" + scan.toString() - + " on region " + getLocation().toString()); + LOG.info("Open scanner=" + id + " for scan=" + scan.toString() + " on region " + + getLocation().toString()); } if (response.hasMvccReadPoint()) { this.scan.setMvccReadPoint(response.getMvccReadPoint()); @@ -424,8 +424,8 @@ public class ScannerCallable extends ClientServiceCallable { } public ScannerCallable getScannerCallableForReplica(int id) { - ScannerCallable s = new ScannerCallable(this.getConnection(), getTableName(), - this.getScan(), this.scanMetrics, this.rpcControllerFactory, id); + ScannerCallable s = new ScannerCallable(this.getConnection(), getTableName(), this.getScan(), + this.scanMetrics, this.rpcControllerFactory, id); s.setCaching(this.caching); return s; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java index 636ca374d3b..05cb850e337 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -29,7 +28,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HRegionInfo; @@ -42,17 +40,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This class has the logic for handling scanners for regions with and without replicas. - * 1. A scan is attempted on the default (primary) region, or a specific region. - * 2. The scanner sends all the RPCs to the default/specific region until it is done, or, there - * is a timeout on the default/specific region (a timeout of zero is disallowed). - * 3. If there is a timeout in (2) above, scanner(s) is opened on the non-default replica(s) only - * for Consistency.TIMELINE without specific replica id specified. - * 4. The results from the first successful scanner are taken, and it is stored which server - * returned the results. - * 5. The next RPCs are done on the above stored server until it is done or there is a timeout, - * in which case, the other replicas are queried (as in (3) above). - * + * This class has the logic for handling scanners for regions with and without replicas. 1. A scan + * is attempted on the default (primary) region, or a specific region. 2. The scanner sends all the + * RPCs to the default/specific region until it is done, or, there is a timeout on the + * default/specific region (a timeout of zero is disallowed). 3. If there is a timeout in (2) above, + * scanner(s) is opened on the non-default replica(s) only for Consistency.TIMELINE without specific + * replica id specified. 4. The results from the first successful scanner are taken, and it is + * stored which server returned the results. 5. The next RPCs are done on the above stored server + * until it is done or there is a timeout, in which case, the other replicas are queried (as in (3) + * above). */ @InterfaceAudience.Private class ScannerCallableWithReplicas implements RetryingCallable { @@ -70,13 +66,13 @@ class ScannerCallableWithReplicas implements RetryingCallable { private Configuration conf; private int scannerTimeout; private Set outstandingCallables = new HashSet<>(); - private boolean someRPCcancelled = false; //required for testing purposes only + private boolean someRPCcancelled = false; // required for testing purposes only private int regionReplication = 0; public ScannerCallableWithReplicas(TableName tableName, ClusterConnection cConnection, - ScannerCallable baseCallable, ExecutorService pool, int timeBeforeReplicas, Scan scan, - int retries, int scannerTimeout, int caching, Configuration conf, - RpcRetryingCaller caller) { + ScannerCallable baseCallable, ExecutorService pool, int timeBeforeReplicas, Scan scan, + int retries, int scannerTimeout, int caching, Configuration conf, + RpcRetryingCaller caller) { this.currentScannerCallable = baseCallable; this.cConnection = cConnection; this.pool = pool; @@ -93,7 +89,7 @@ class ScannerCallableWithReplicas implements RetryingCallable { } public void setClose() { - if(currentScannerCallable != null) { + if (currentScannerCallable != null) { currentScannerCallable.setClose(); } else { LOG.warn("Calling close on ScannerCallable reference that is already null, " @@ -140,17 +136,17 @@ class ScannerCallableWithReplicas implements RetryingCallable { Result[] r = currentScannerCallable.call(timeout); currentScannerCallable = null; return r; - } else if(currentScannerCallable == null) { + } else if (currentScannerCallable == null) { LOG.warn("Another call received, but our ScannerCallable is already null. " + "This shouldn't happen, but there's not much to do, so logging and returning null."); return null; } // We need to do the following: - //1. When a scan goes out to a certain replica (default or not), we need to - // continue to hit that until there is a failure. So store the last successfully invoked - // replica - //2. We should close the "losing" scanners (scanners other than the ones we hear back - // from first) + // 1. When a scan goes out to a certain replica (default or not), we need to + // continue to hit that until there is a failure. So store the last successfully invoked + // replica + // 2. We should close the "losing" scanners (scanners other than the ones we hear back + // from first) // // Since RegionReplication is a table attribute, it wont change as long as table is enabled, // it just needs to be set once. @@ -160,13 +156,13 @@ class ScannerCallableWithReplicas implements RetryingCallable { try { rl = RpcRetryingCallerWithReadReplicas.getRegionLocations(true, RegionReplicaUtil.DEFAULT_REPLICA_ID, cConnection, tableName, - currentScannerCallable.getRow()); + currentScannerCallable.getRow()); } catch (RetriesExhaustedException | DoNotRetryIOException e) { // We cannot get the primary replica region location, it is possible that the region server // hosting meta table is down, it needs to proceed to try cached replicas directly. if (cConnection instanceof ConnectionImplementation) { - rl = ((ConnectionImplementation) cConnection) - .getCachedLocation(tableName, currentScannerCallable.getRow()); + rl = ((ConnectionImplementation) cConnection).getCachedLocation(tableName, + currentScannerCallable.getRow()); if (rl == null) { throw e; } @@ -180,9 +176,9 @@ class ScannerCallableWithReplicas implements RetryingCallable { // allocate a boundedcompletion pool of some multiple of number of replicas. // We want to accomodate some RPCs for redundant replica scans (but are still in progress) ResultBoundedCompletionService> cs = - new ResultBoundedCompletionService<>( - RpcRetryingCallerFactory.instantiate(ScannerCallableWithReplicas.this.conf), pool, - regionReplication * 5); + new ResultBoundedCompletionService<>( + RpcRetryingCallerFactory.instantiate(ScannerCallableWithReplicas.this.conf), pool, + regionReplication * 5); AtomicBoolean done = new AtomicBoolean(false); replicaSwitched.set(false); @@ -192,15 +188,15 @@ class ScannerCallableWithReplicas implements RetryingCallable { try { // wait for the timeout to see whether the primary responds back - Future> f = cs.poll(timeBeforeReplicas, - TimeUnit.MICROSECONDS); // Yes, microseconds + Future> f = + cs.poll(timeBeforeReplicas, TimeUnit.MICROSECONDS); // Yes, microseconds if (f != null) { // After poll, if f is not null, there must be a completed task Pair r = f.get(); if (r != null && r.getSecond() != null) { updateCurrentlyServingReplica(r.getSecond(), r.getFirst(), done, pool); } - return r == null ? null : r.getFirst(); //great we got a response + return r == null ? null : r.getFirst(); // great we got a response } } catch (ExecutionException e) { // We ignore the ExecutionException and continue with the replicas @@ -210,8 +206,10 @@ class ScannerCallableWithReplicas implements RetryingCallable { // If rl's size is 1 or scan's consitency is strong, or scan is over specific replica, // it needs to throw out the exception from the primary replica - if (regionReplication == 1 || scan.getConsistency() == Consistency.STRONG || - scan.getReplicaId() >= 0) { + if ( + regionReplication == 1 || scan.getConsistency() == Consistency.STRONG + || scan.getReplicaId() >= 0 + ) { // Rethrow the first exception RpcRetryingCallerWithReadReplicas.throwEnrichedException(e, retries); } @@ -235,7 +233,7 @@ class ScannerCallableWithReplicas implements RetryingCallable { try { Future> f = cs.pollForFirstSuccessfullyCompletedTask(timeout, - TimeUnit.MILLISECONDS, startIndex, endIndex); + TimeUnit.MILLISECONDS, startIndex, endIndex); if (f == null) { throw new IOException("Failed to get result within timeout, timeout=" + timeout + "ms"); @@ -263,31 +261,30 @@ class ScannerCallableWithReplicas implements RetryingCallable { } private void updateCurrentlyServingReplica(ScannerCallable scanner, Result[] result, - AtomicBoolean done, ExecutorService pool) { + AtomicBoolean done, ExecutorService pool) { if (done.compareAndSet(false, true)) { if (currentScannerCallable != scanner) replicaSwitched.set(true); currentScannerCallable = scanner; // store where to start the replica scanner from if we need to. if (result != null && result.length != 0) this.lastResult = result[result.length - 1]; if (LOG.isTraceEnabled()) { - LOG.trace("Setting current scanner as id=" + currentScannerCallable.scannerId + - " associated with replica=" + currentScannerCallable.getHRegionInfo().getReplicaId()); + LOG.trace("Setting current scanner as id=" + currentScannerCallable.scannerId + + " associated with replica=" + currentScannerCallable.getHRegionInfo().getReplicaId()); } // close all outstanding replica scanners but the one we heard back from outstandingCallables.remove(scanner); for (ScannerCallable s : outstandingCallables) { if (LOG.isTraceEnabled()) { - LOG.trace("Closing scanner id=" + s.scannerId + - ", replica=" + s.getHRegionInfo().getRegionId() + - " because slow and replica=" + - this.currentScannerCallable.getHRegionInfo().getReplicaId() + " succeeded"); + LOG.trace("Closing scanner id=" + s.scannerId + ", replica=" + + s.getHRegionInfo().getRegionId() + " because slow and replica=" + + this.currentScannerCallable.getHRegionInfo().getReplicaId() + " succeeded"); } // Submit the "close" to the pool since this might take time, and we don't // want to wait for the "close" to happen yet. The "wait" will happen when // the table is closed (when the awaitTermination of the underlying pool is called) s.setClose(); final RetryingRPC r = new RetryingRPC(s); - pool.submit(new Callable(){ + pool.submit(new Callable() { @Override public Void call() throws Exception { r.call(scannerTimeout); @@ -301,8 +298,8 @@ class ScannerCallableWithReplicas implements RetryingCallable { } /** - * When a scanner switches in the middle of scanning (the 'next' call fails - * for example), the upper layer {@link ClientScanner} needs to know + * When a scanner switches in the middle of scanning (the 'next' call fails for example), the + * upper layer {@link ClientScanner} needs to know */ public boolean switchedToADifferentReplica() { return replicaSwitched.get(); @@ -322,19 +319,19 @@ class ScannerCallableWithReplicas implements RetryingCallable { return currentScannerCallable != null ? currentScannerCallable.getCursor() : null; } - private void addCallsForCurrentReplica( - ResultBoundedCompletionService> cs) { + private void + addCallsForCurrentReplica(ResultBoundedCompletionService> cs) { RetryingRPC retryingOnReplica = new RetryingRPC(currentScannerCallable); outstandingCallables.add(currentScannerCallable); cs.submit(retryingOnReplica, scannerTimeout, currentScannerCallable.id); } private void addCallsForOtherReplicas( - ResultBoundedCompletionService> cs, int min, int max) { + ResultBoundedCompletionService> cs, int min, int max) { for (int id = min; id <= max; id++) { if (currentScannerCallable.id == id) { - continue; //this was already scheduled earlier + continue; // this was already scheduled earlier } ScannerCallable s = currentScannerCallable.getScannerCallableForReplica(id); setStartRowForReplicaCallable(s); @@ -359,7 +356,8 @@ class ScannerCallableWithReplicas implements RetryingCallable { // 2. The last result was not a partial result which means it contained all of the cells for // that row (we no longer need any information from it). Set the start row to the next // closest row that could be seen. - callable.getScan().withStartRow(this.lastResult.getRow(), this.lastResult.mayHaveMoreCellsInRow()); + callable.getScan().withStartRow(this.lastResult.getRow(), + this.lastResult.mayHaveMoreCellsInRow()); } boolean isAnyRPCcancelled() { @@ -380,8 +378,8 @@ class ScannerCallableWithReplicas implements RetryingCallable { // and we can't invoke it multiple times at the same time) this.caller = ScannerCallableWithReplicas.this.caller; if (scan.getConsistency() == Consistency.TIMELINE) { - this.caller = RpcRetryingCallerFactory.instantiate(ScannerCallableWithReplicas.this.conf) - .newCaller(); + this.caller = RpcRetryingCallerFactory.instantiate(ScannerCallableWithReplicas.this.conf).< + Result[]> newCaller(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java index f02f61ebdfe..aeca91e5bc9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java @@ -15,17 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; +import static org.apache.hadoop.hbase.HConstants.PRIORITY_UNSET; + import java.io.IOException; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.security.token.Token; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest; @@ -36,9 +38,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBul import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -import org.apache.hadoop.security.token.Token; - -import static org.apache.hadoop.hbase.HConstants.PRIORITY_UNSET; /** * Client proxy for SecureBulkLoadProtocol @@ -55,23 +54,23 @@ public class SecureBulkLoadClient { public String prepareBulkLoad(final Connection conn) throws IOException { try { - ClientServiceCallable callable = new ClientServiceCallable(conn, - table.getName(), HConstants.EMPTY_START_ROW, + ClientServiceCallable callable = + new ClientServiceCallable(conn, table.getName(), HConstants.EMPTY_START_ROW, this.rpcControllerFactory.newController(), PRIORITY_UNSET) { - @Override - protected String rpcCall() throws Exception { - byte[] regionName = getLocation().getRegionInfo().getRegionName(); - RegionSpecifier region = + @Override + protected String rpcCall() throws Exception { + byte[] regionName = getLocation().getRegionInfo().getRegionName(); + RegionSpecifier region = RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); - PrepareBulkLoadRequest request = PrepareBulkLoadRequest.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName(table.getName())) - .setRegion(region).build(); - PrepareBulkLoadResponse response = getStub().prepareBulkLoad(null, request); - return response.getBulkToken(); - } - }; + PrepareBulkLoadRequest request = PrepareBulkLoadRequest.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName(table.getName())).setRegion(region) + .build(); + PrepareBulkLoadResponse response = getStub().prepareBulkLoad(null, request); + return response.getBulkToken(); + } + }; return RpcRetryingCallerFactory.instantiate(conn.getConfiguration(), null) - . newCaller().callWithRetries(callable, Integer.MAX_VALUE); + . newCaller().callWithRetries(callable, Integer.MAX_VALUE); } catch (Throwable throwable) { throw new IOException(throwable); } @@ -79,76 +78,54 @@ public class SecureBulkLoadClient { public void cleanupBulkLoad(final Connection conn, final String bulkToken) throws IOException { try { - ClientServiceCallable callable = new ClientServiceCallable(conn, - table.getName(), HConstants.EMPTY_START_ROW, this.rpcControllerFactory.newController(), PRIORITY_UNSET) { + ClientServiceCallable callable = new ClientServiceCallable(conn, table.getName(), + HConstants.EMPTY_START_ROW, this.rpcControllerFactory.newController(), PRIORITY_UNSET) { @Override protected Void rpcCall() throws Exception { byte[] regionName = getLocation().getRegionInfo().getRegionName(); - RegionSpecifier region = RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); CleanupBulkLoadRequest request = - CleanupBulkLoadRequest.newBuilder().setRegion(region).setBulkToken(bulkToken).build(); + CleanupBulkLoadRequest.newBuilder().setRegion(region).setBulkToken(bulkToken).build(); getStub().cleanupBulkLoad(null, request); return null; } }; - RpcRetryingCallerFactory.instantiate(conn.getConfiguration(), null) - . newCaller().callWithRetries(callable, Integer.MAX_VALUE); + RpcRetryingCallerFactory.instantiate(conn.getConfiguration(), null). newCaller() + .callWithRetries(callable, Integer.MAX_VALUE); } catch (Throwable throwable) { throw new IOException(throwable); } } /** - * Securely bulk load a list of HFiles using client protocol. - * - * @param client - * @param familyPaths - * @param regionName - * @param assignSeqNum - * @param userToken - * @param bulkToken - * @return true if all are loaded - * @throws IOException + * Securely bulk load a list of HFiles using client protocol. nnnnnn * @return true if all are + * loaded n */ public boolean secureBulkLoadHFiles(final ClientService.BlockingInterface client, - final List> familyPaths, - final byte[] regionName, boolean assignSeqNum, - final Token userToken, final String bulkToken) throws IOException { - return secureBulkLoadHFiles(client, familyPaths, regionName, assignSeqNum, userToken, - bulkToken, false, null, true); + final List> familyPaths, final byte[] regionName, boolean assignSeqNum, + final Token userToken, final String bulkToken) throws IOException { + return secureBulkLoadHFiles(client, familyPaths, regionName, assignSeqNum, userToken, bulkToken, + false, null, true); } /** - * Securely bulk load a list of HFiles using client protocol. - * - * @param client - * @param familyPaths - * @param regionName - * @param assignSeqNum - * @param userToken - * @param bulkToken - * @param copyFiles - * @return true if all are loaded - * @throws IOException + * Securely bulk load a list of HFiles using client protocol. nnnnnnn * @return true if all are + * loaded n */ public boolean secureBulkLoadHFiles(final ClientService.BlockingInterface client, - final List> familyPaths, - final byte[] regionName, boolean assignSeqNum, - final Token userToken, final String bulkToken, - boolean copyFiles) throws IOException { - return secureBulkLoadHFiles(client, familyPaths, regionName, assignSeqNum, userToken, - bulkToken, copyFiles, null, true); + final List> familyPaths, final byte[] regionName, boolean assignSeqNum, + final Token userToken, final String bulkToken, boolean copyFiles) throws IOException { + return secureBulkLoadHFiles(client, familyPaths, regionName, assignSeqNum, userToken, bulkToken, + copyFiles, null, true); } public boolean secureBulkLoadHFiles(final ClientService.BlockingInterface client, - final List> familyPaths, - final byte[] regionName, boolean assignSeqNum, - final Token userToken, final String bulkToken, - boolean copyFiles, List clusterIds, boolean replicate) throws IOException { - BulkLoadHFileRequest request = - RequestConverter.buildBulkLoadHFileRequest(familyPaths, regionName, assignSeqNum, - userToken, bulkToken, copyFiles, clusterIds, replicate); + final List> familyPaths, final byte[] regionName, boolean assignSeqNum, + final Token userToken, final String bulkToken, boolean copyFiles, List clusterIds, + boolean replicate) throws IOException { + BulkLoadHFileRequest request = RequestConverter.buildBulkLoadHFileRequest(familyPaths, + regionName, assignSeqNum, userToken, bulkToken, copyFiles, clusterIds, replicate); try { BulkLoadHFileResponse response = client.bulkLoadHFile(null, request); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java index 12e3e3bd990..8eeb2d55ef6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,8 +44,10 @@ public class ServerStatisticTracker implements StatisticTrackable { } public static ServerStatisticTracker create(Configuration conf) { - if (!conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, - HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE)) { + if ( + !conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, + HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE) + ) { return null; } return new ServerStatisticTracker(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerType.java index 1d1bf6e3c6d..0f73e06b95a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerType.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; /** - * Select server type i.e destination for RPC request associated with ring buffer. - * e.g slow/large log records are maintained by HRegionServer, whereas balancer decisions - * are maintained by HMaster. + * Select server type i.e destination for RPC request associated with ring buffer. e.g slow/large + * log records are maintained by HRegionServer, whereas balancer decisions are maintained by + * HMaster. */ @InterfaceAudience.Public public enum ServerType { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServiceCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServiceCaller.java index 292405f6d17..fc550c880a6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServiceCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServiceCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.client; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -53,8 +52,8 @@ public interface ServiceCaller { /** * Represent the actual protobuf rpc call. - * @param stub the asynchronous stub - * @param controller the rpc controller, has already been prepared for you + * @param stub the asynchronous stub + * @param controller the rpc controller, has already been prepared for you * @param rpcCallback the rpc callback, has already been prepared for you */ void call(S stub, RpcController controller, RpcCallback rpcCallback); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java index 97127e0e3eb..0362bf1f704 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; @@ -100,10 +101,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedur import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsRpcThrottleEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsRpcThrottleEnabledResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos - .IsSnapshotCleanupEnabledRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos - .IsSnapshotCleanupEnabledResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest; @@ -212,418 +211,418 @@ public class ShortCircuitMasterConnection implements MasterKeepAliveConnection { @Override public UnassignRegionResponse unassignRegion(RpcController controller, - UnassignRegionRequest request) throws ServiceException { + UnassignRegionRequest request) throws ServiceException { return stub.unassignRegion(controller, request); } @Override public TruncateTableResponse truncateTable(RpcController controller, TruncateTableRequest request) - throws ServiceException { + throws ServiceException { return stub.truncateTable(controller, request); } @Override public StopMasterResponse stopMaster(RpcController controller, StopMasterRequest request) - throws ServiceException { + throws ServiceException { return stub.stopMaster(controller, request); } @Override public SnapshotResponse snapshot(RpcController controller, SnapshotRequest request) - throws ServiceException { + throws ServiceException { return stub.snapshot(controller, request); } @Override public ShutdownResponse shutdown(RpcController controller, ShutdownRequest request) - throws ServiceException { + throws ServiceException { return stub.shutdown(controller, request); } @Override public SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled(RpcController controller, - SetSplitOrMergeEnabledRequest request) throws ServiceException { + SetSplitOrMergeEnabledRequest request) throws ServiceException { return stub.setSplitOrMergeEnabled(controller, request); } @Override public SetQuotaResponse setQuota(RpcController controller, SetQuotaRequest request) - throws ServiceException { + throws ServiceException { return stub.setQuota(controller, request); } @Override public SetNormalizerRunningResponse setNormalizerRunning(RpcController controller, - SetNormalizerRunningRequest request) throws ServiceException { + SetNormalizerRunningRequest request) throws ServiceException { return stub.setNormalizerRunning(controller, request); } @Override public SetBalancerRunningResponse setBalancerRunning(RpcController controller, - SetBalancerRunningRequest request) throws ServiceException { + SetBalancerRunningRequest request) throws ServiceException { return stub.setBalancerRunning(controller, request); } @Override public RunCatalogScanResponse runCatalogScan(RpcController controller, - RunCatalogScanRequest request) throws ServiceException { + RunCatalogScanRequest request) throws ServiceException { return stub.runCatalogScan(controller, request); } @Override public RestoreSnapshotResponse restoreSnapshot(RpcController controller, - RestoreSnapshotRequest request) throws ServiceException { + RestoreSnapshotRequest request) throws ServiceException { return stub.restoreSnapshot(controller, request); } @Override public SetSnapshotCleanupResponse switchSnapshotCleanup(RpcController controller, - SetSnapshotCleanupRequest request) throws ServiceException { + SetSnapshotCleanupRequest request) throws ServiceException { return stub.switchSnapshotCleanup(controller, request); } @Override - public IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled( - RpcController controller, IsSnapshotCleanupEnabledRequest request) - throws ServiceException { + public IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled(RpcController controller, + IsSnapshotCleanupEnabledRequest request) throws ServiceException { return stub.isSnapshotCleanupEnabled(controller, request); } @Override public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller, - RemoveReplicationPeerRequest request) throws ServiceException { + RemoveReplicationPeerRequest request) throws ServiceException { return stub.removeReplicationPeer(controller, request); } @Override public RecommissionRegionServerResponse recommissionRegionServer(RpcController controller, - RecommissionRegionServerRequest request) throws ServiceException { + RecommissionRegionServerRequest request) throws ServiceException { return stub.recommissionRegionServer(controller, request); } @Override public OfflineRegionResponse offlineRegion(RpcController controller, OfflineRegionRequest request) - throws ServiceException { + throws ServiceException { return stub.offlineRegion(controller, request); } @Override public NormalizeResponse normalize(RpcController controller, NormalizeRequest request) - throws ServiceException { + throws ServiceException { return stub.normalize(controller, request); } @Override public MoveRegionResponse moveRegion(RpcController controller, MoveRegionRequest request) - throws ServiceException { + throws ServiceException { return stub.moveRegion(controller, request); } @Override public ModifyTableResponse modifyTable(RpcController controller, ModifyTableRequest request) - throws ServiceException { + throws ServiceException { return stub.modifyTable(controller, request); } @Override public ModifyNamespaceResponse modifyNamespace(RpcController controller, - ModifyNamespaceRequest request) throws ServiceException { + ModifyNamespaceRequest request) throws ServiceException { return stub.modifyNamespace(controller, request); } @Override public ModifyColumnResponse modifyColumn(RpcController controller, ModifyColumnRequest request) - throws ServiceException { + throws ServiceException { return stub.modifyColumn(controller, request); } @Override public MergeTableRegionsResponse mergeTableRegions(RpcController controller, - MergeTableRegionsRequest request) throws ServiceException { + MergeTableRegionsRequest request) throws ServiceException { return stub.mergeTableRegions(controller, request); } @Override public ListTableNamesByNamespaceResponse listTableNamesByNamespace(RpcController controller, - ListTableNamesByNamespaceRequest request) throws ServiceException { + ListTableNamesByNamespaceRequest request) throws ServiceException { return stub.listTableNamesByNamespace(controller, request); } @Override public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace( - RpcController controller, ListTableDescriptorsByNamespaceRequest request) - throws ServiceException { + RpcController controller, ListTableDescriptorsByNamespaceRequest request) + throws ServiceException { return stub.listTableDescriptorsByNamespace(controller, request); } @Override - public GetProceduresResponse getProcedures(RpcController controller, - GetProceduresRequest request) throws ServiceException { + public GetProceduresResponse getProcedures(RpcController controller, GetProceduresRequest request) + throws ServiceException { return stub.getProcedures(controller, request); } @Override - public GetLocksResponse getLocks(RpcController controller, - GetLocksRequest request) throws ServiceException { + public GetLocksResponse getLocks(RpcController controller, GetLocksRequest request) + throws ServiceException { return stub.getLocks(controller, request); } @Override public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController controller, - ListNamespaceDescriptorsRequest request) throws ServiceException { + ListNamespaceDescriptorsRequest request) throws ServiceException { return stub.listNamespaceDescriptors(controller, request); } @Override - public ListDecommissionedRegionServersResponse listDecommissionedRegionServers(RpcController controller, - ListDecommissionedRegionServersRequest request) throws ServiceException { + public ListDecommissionedRegionServersResponse listDecommissionedRegionServers( + RpcController controller, ListDecommissionedRegionServersRequest request) + throws ServiceException { return stub.listDecommissionedRegionServers(controller, request); } @Override public IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled(RpcController controller, - IsSplitOrMergeEnabledRequest request) throws ServiceException { + IsSplitOrMergeEnabledRequest request) throws ServiceException { return stub.isSplitOrMergeEnabled(controller, request); } @Override public IsSnapshotDoneResponse isSnapshotDone(RpcController controller, - IsSnapshotDoneRequest request) throws ServiceException { + IsSnapshotDoneRequest request) throws ServiceException { return stub.isSnapshotDone(controller, request); } @Override public IsProcedureDoneResponse isProcedureDone(RpcController controller, - IsProcedureDoneRequest request) throws ServiceException { + IsProcedureDoneRequest request) throws ServiceException { return stub.isProcedureDone(controller, request); } @Override public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller, - IsNormalizerEnabledRequest request) throws ServiceException { + IsNormalizerEnabledRequest request) throws ServiceException { return stub.isNormalizerEnabled(controller, request); } @Override public IsMasterRunningResponse isMasterRunning(RpcController controller, - IsMasterRunningRequest request) throws ServiceException { + IsMasterRunningRequest request) throws ServiceException { return stub.isMasterRunning(controller, request); } @Override public IsInMaintenanceModeResponse isMasterInMaintenanceMode(RpcController controller, - IsInMaintenanceModeRequest request) throws ServiceException { + IsInMaintenanceModeRequest request) throws ServiceException { return stub.isMasterInMaintenanceMode(controller, request); } @Override public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController controller, - IsCatalogJanitorEnabledRequest request) throws ServiceException { + IsCatalogJanitorEnabledRequest request) throws ServiceException { return stub.isCatalogJanitorEnabled(controller, request); } @Override public IsBalancerEnabledResponse isBalancerEnabled(RpcController controller, - IsBalancerEnabledRequest request) throws ServiceException { + IsBalancerEnabledRequest request) throws ServiceException { return stub.isBalancerEnabled(controller, request); } @Override public GetTableStateResponse getTableState(RpcController controller, GetTableStateRequest request) - throws ServiceException { + throws ServiceException { return stub.getTableState(controller, request); } @Override public GetTableNamesResponse getTableNames(RpcController controller, GetTableNamesRequest request) - throws ServiceException { + throws ServiceException { return stub.getTableNames(controller, request); } @Override public GetTableDescriptorsResponse getTableDescriptors(RpcController controller, - GetTableDescriptorsRequest request) throws ServiceException { + GetTableDescriptorsRequest request) throws ServiceException { return stub.getTableDescriptors(controller, request); } @Override public SecurityCapabilitiesResponse getSecurityCapabilities(RpcController controller, - SecurityCapabilitiesRequest request) throws ServiceException { + SecurityCapabilitiesRequest request) throws ServiceException { return stub.getSecurityCapabilities(controller, request); } @Override public GetSchemaAlterStatusResponse getSchemaAlterStatus(RpcController controller, - GetSchemaAlterStatusRequest request) throws ServiceException { + GetSchemaAlterStatusRequest request) throws ServiceException { return stub.getSchemaAlterStatus(controller, request); } @Override public GetProcedureResultResponse getProcedureResult(RpcController controller, - GetProcedureResultRequest request) throws ServiceException { + GetProcedureResultRequest request) throws ServiceException { return stub.getProcedureResult(controller, request); } @Override public GetNamespaceDescriptorResponse getNamespaceDescriptor(RpcController controller, - GetNamespaceDescriptorRequest request) throws ServiceException { + GetNamespaceDescriptorRequest request) throws ServiceException { return stub.getNamespaceDescriptor(controller, request); } @Override public ListNamespacesResponse listNamespaces(RpcController controller, - ListNamespacesRequest request) throws ServiceException { + ListNamespacesRequest request) throws ServiceException { return stub.listNamespaces(controller, request); } @Override public HBaseProtos.LogEntry getLogEntries(RpcController controller, - HBaseProtos.LogRequest request) throws ServiceException { + HBaseProtos.LogRequest request) throws ServiceException { return stub.getLogEntries(controller, request); } @Override public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion( - RpcController controller, MajorCompactionTimestampForRegionRequest request) - throws ServiceException { + RpcController controller, MajorCompactionTimestampForRegionRequest request) + throws ServiceException { return stub.getLastMajorCompactionTimestampForRegion(controller, request); } @Override public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController controller, - MajorCompactionTimestampRequest request) throws ServiceException { + MajorCompactionTimestampRequest request) throws ServiceException { return stub.getLastMajorCompactionTimestamp(controller, request); } @Override public GetCompletedSnapshotsResponse getCompletedSnapshots(RpcController controller, - GetCompletedSnapshotsRequest request) throws ServiceException { + GetCompletedSnapshotsRequest request) throws ServiceException { return stub.getCompletedSnapshots(controller, request); } @Override public GetClusterStatusResponse getClusterStatus(RpcController controller, - GetClusterStatusRequest request) throws ServiceException { + GetClusterStatusRequest request) throws ServiceException { return stub.getClusterStatus(controller, request); } @Override public ExecProcedureResponse execProcedureWithRet(RpcController controller, - ExecProcedureRequest request) throws ServiceException { + ExecProcedureRequest request) throws ServiceException { return stub.execProcedureWithRet(controller, request); } @Override public ExecProcedureResponse execProcedure(RpcController controller, ExecProcedureRequest request) - throws ServiceException { + throws ServiceException { return stub.execProcedure(controller, request); } @Override public CoprocessorServiceResponse execMasterService(RpcController controller, - CoprocessorServiceRequest request) throws ServiceException { + CoprocessorServiceRequest request) throws ServiceException { return stub.execMasterService(controller, request); } @Override public EnableTableResponse enableTable(RpcController controller, EnableTableRequest request) - throws ServiceException { + throws ServiceException { return stub.enableTable(controller, request); } @Override public EnableReplicationPeerResponse enableReplicationPeer(RpcController controller, - EnableReplicationPeerRequest request) throws ServiceException { + EnableReplicationPeerRequest request) throws ServiceException { return stub.enableReplicationPeer(controller, request); } @Override public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController controller, - EnableCatalogJanitorRequest request) throws ServiceException { + EnableCatalogJanitorRequest request) throws ServiceException { return stub.enableCatalogJanitor(controller, request); } @Override public DecommissionRegionServersResponse decommissionRegionServers(RpcController controller, - DecommissionRegionServersRequest request) throws ServiceException { + DecommissionRegionServersRequest request) throws ServiceException { return stub.decommissionRegionServers(controller, request); } @Override public DisableTableResponse disableTable(RpcController controller, DisableTableRequest request) - throws ServiceException { + throws ServiceException { return stub.disableTable(controller, request); } @Override public DisableReplicationPeerResponse disableReplicationPeer(RpcController controller, - DisableReplicationPeerRequest request) throws ServiceException { + DisableReplicationPeerRequest request) throws ServiceException { return stub.disableReplicationPeer(controller, request); } @Override public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest request) - throws ServiceException { + throws ServiceException { return stub.deleteTable(controller, request); } @Override public DeleteSnapshotResponse deleteSnapshot(RpcController controller, - DeleteSnapshotRequest request) throws ServiceException { + DeleteSnapshotRequest request) throws ServiceException { return stub.deleteSnapshot(controller, request); } @Override public DeleteNamespaceResponse deleteNamespace(RpcController controller, - DeleteNamespaceRequest request) throws ServiceException { + DeleteNamespaceRequest request) throws ServiceException { return stub.deleteNamespace(controller, request); } @Override public DeleteColumnResponse deleteColumn(RpcController controller, DeleteColumnRequest request) - throws ServiceException { + throws ServiceException { return stub.deleteColumn(controller, request); } @Override public CreateTableResponse createTable(RpcController controller, CreateTableRequest request) - throws ServiceException { + throws ServiceException { return stub.createTable(controller, request); } @Override public CreateNamespaceResponse createNamespace(RpcController controller, - CreateNamespaceRequest request) throws ServiceException { + CreateNamespaceRequest request) throws ServiceException { return stub.createNamespace(controller, request); } @Override public BalanceResponse balance(RpcController controller, BalanceRequest request) - throws ServiceException { + throws ServiceException { return stub.balance(controller, request); } @Override public AssignRegionResponse assignRegion(RpcController controller, AssignRegionRequest request) - throws ServiceException { + throws ServiceException { return stub.assignRegion(controller, request); } @Override public AddReplicationPeerResponse addReplicationPeer(RpcController controller, - AddReplicationPeerRequest request) throws ServiceException { + AddReplicationPeerRequest request) throws ServiceException { return stub.addReplicationPeer(controller, request); } @Override public AddColumnResponse addColumn(RpcController controller, AddColumnRequest request) - throws ServiceException { + throws ServiceException { return stub.addColumn(controller, request); } @Override public AbortProcedureResponse abortProcedure(RpcController controller, - AbortProcedureRequest request) throws ServiceException { + AbortProcedureRequest request) throws ServiceException { return stub.abortProcedure(controller, request); } @@ -634,103 +633,103 @@ public class ShortCircuitMasterConnection implements MasterKeepAliveConnection { @Override public RunCleanerChoreResponse runCleanerChore(RpcController controller, - RunCleanerChoreRequest request) throws ServiceException { + RunCleanerChoreRequest request) throws ServiceException { return stub.runCleanerChore(controller, request); } @Override public SetCleanerChoreRunningResponse setCleanerChoreRunning(RpcController controller, - SetCleanerChoreRunningRequest request) throws ServiceException { + SetCleanerChoreRunningRequest request) throws ServiceException { return stub.setCleanerChoreRunning(controller, request); } @Override public IsCleanerChoreEnabledResponse isCleanerChoreEnabled(RpcController controller, - IsCleanerChoreEnabledRequest request) throws ServiceException { + IsCleanerChoreEnabledRequest request) throws ServiceException { return stub.isCleanerChoreEnabled(controller, request); } @Override public GetReplicationPeerConfigResponse getReplicationPeerConfig(RpcController controller, - GetReplicationPeerConfigRequest request) throws ServiceException { + GetReplicationPeerConfigRequest request) throws ServiceException { return stub.getReplicationPeerConfig(controller, request); } @Override public UpdateReplicationPeerConfigResponse updateReplicationPeerConfig(RpcController controller, - UpdateReplicationPeerConfigRequest request) throws ServiceException { + UpdateReplicationPeerConfigRequest request) throws ServiceException { return stub.updateReplicationPeerConfig(controller, request); } @Override public ListReplicationPeersResponse listReplicationPeers(RpcController controller, - ListReplicationPeersRequest request) throws ServiceException { + ListReplicationPeersRequest request) throws ServiceException { return stub.listReplicationPeers(controller, request); } @Override public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes(RpcController controller, - GetSpaceQuotaRegionSizesRequest request) throws ServiceException { + GetSpaceQuotaRegionSizesRequest request) throws ServiceException { return stub.getSpaceQuotaRegionSizes(controller, request); } @Override public GetQuotaStatesResponse getQuotaStates(RpcController controller, - GetQuotaStatesRequest request) throws ServiceException { + GetQuotaStatesRequest request) throws ServiceException { return stub.getQuotaStates(controller, request); } @Override public ClearDeadServersResponse clearDeadServers(RpcController controller, - ClearDeadServersRequest request) throws ServiceException { + ClearDeadServersRequest request) throws ServiceException { return stub.clearDeadServers(controller, request); } @Override - public SplitTableRegionResponse splitRegion(RpcController controller, SplitTableRegionRequest request) - throws ServiceException { + public SplitTableRegionResponse splitRegion(RpcController controller, + SplitTableRegionRequest request) throws ServiceException { return stub.splitRegion(controller, request); } @Override public SwitchRpcThrottleResponse switchRpcThrottle(RpcController controller, - SwitchRpcThrottleRequest request) throws ServiceException { + SwitchRpcThrottleRequest request) throws ServiceException { return stub.switchRpcThrottle(controller, request); } @Override public IsRpcThrottleEnabledResponse isRpcThrottleEnabled(RpcController controller, - IsRpcThrottleEnabledRequest request) throws ServiceException { + IsRpcThrottleEnabledRequest request) throws ServiceException { return stub.isRpcThrottleEnabled(controller, request); } @Override public SwitchExceedThrottleQuotaResponse switchExceedThrottleQuota(RpcController controller, - SwitchExceedThrottleQuotaRequest request) throws ServiceException { + SwitchExceedThrottleQuotaRequest request) throws ServiceException { return stub.switchExceedThrottleQuota(controller, request); } @Override public GrantResponse grant(RpcController controller, GrantRequest request) - throws ServiceException { + throws ServiceException { return stub.grant(controller, request); } @Override public RevokeResponse revoke(RpcController controller, RevokeRequest request) - throws ServiceException { + throws ServiceException { return stub.revoke(controller, request); } @Override public GetUserPermissionsResponse getUserPermissions(RpcController controller, - GetUserPermissionsRequest request) throws ServiceException { + GetUserPermissionsRequest request) throws ServiceException { return stub.getUserPermissions(controller, request); } @Override public HasUserPermissionsResponse hasUserPermissions(RpcController controller, - HasUserPermissionsRequest request) throws ServiceException { + HasUserPermissionsRequest request) throws ServiceException { return stub.hasUserPermissions(controller, request); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java index 54abc452a30..b5fc3eb91a9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +34,6 @@ import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -59,7 +57,8 @@ class SimpleRequestController implements RequestController { /** * The maximum heap size for each request. */ - public static final String HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE = "hbase.client.max.perrequest.heapsize"; + public static final String HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE = + "hbase.client.max.perrequest.heapsize"; /** * Default value of {@link #HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE}. @@ -82,10 +81,11 @@ class SimpleRequestController implements RequestController { /** * Default value of {@link #HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE}. */ - static final long DEFAULT_HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE = DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE; + static final long DEFAULT_HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE = + DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE; final AtomicLong tasksInProgress = new AtomicLong(0); - final ConcurrentMap taskCounterPerRegion - = new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); + final ConcurrentMap taskCounterPerRegion = + new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); final ConcurrentMap taskCounterPerServer = new ConcurrentHashMap<>(); /** * The number of tasks simultaneously executed on the cluster. @@ -102,10 +102,9 @@ class SimpleRequestController implements RequestController { private final long maxRowsPerRequest; private final long maxHeapSizeSubmit; /** - * The number of tasks we run in parallel on a single region. With 1 (the - * default) , we ensure that the ordering of the queries is respected: we - * don't start a set of operations on a region before the previous one is - * done. As well, this limits the pressure we put on the region server. + * The number of tasks we run in parallel on a single region. With 1 (the default) , we ensure + * that the ordering of the queries is respected: we don't start a set of operations on a region + * before the previous one is done. As well, this limits the pressure we put on the region server. */ final int maxConcurrentTasksPerRegion; @@ -115,37 +114,32 @@ class SimpleRequestController implements RequestController { final int maxConcurrentTasksPerServer; private final int thresholdToLogUndoneTaskDetails; public static final String THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS = - "hbase.client.threshold.log.details"; + "hbase.client.threshold.log.details"; private static final int DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS = 10; public static final String THRESHOLD_TO_LOG_REGION_DETAILS = - "hbase.client.threshold.log.region.details"; + "hbase.client.threshold.log.region.details"; private static final int DEFAULT_THRESHOLD_TO_LOG_REGION_DETAILS = 2; private final int thresholdToLogRegionDetails; + SimpleRequestController(final Configuration conf) { - this.maxTotalConcurrentTasks = checkAndGet(conf, - HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, - HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS); - this.maxConcurrentTasksPerServer = checkAndGet(conf, - HConstants.HBASE_CLIENT_MAX_PERSERVER_TASKS, - HConstants.DEFAULT_HBASE_CLIENT_MAX_PERSERVER_TASKS); - this.maxConcurrentTasksPerRegion = checkAndGet(conf, - HConstants.HBASE_CLIENT_MAX_PERREGION_TASKS, - HConstants.DEFAULT_HBASE_CLIENT_MAX_PERREGION_TASKS); - this.maxHeapSizePerRequest = checkAndGet(conf, - HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, - DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE); - this.maxRowsPerRequest = checkAndGet(conf, - HBASE_CLIENT_MAX_PERREQUEST_ROWS, - DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_ROWS); - this.maxHeapSizeSubmit = checkAndGet(conf, - HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE, - DEFAULT_HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE); - this.thresholdToLogUndoneTaskDetails = conf.getInt( - THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS, - DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS); - this.thresholdToLogRegionDetails = conf.getInt( - THRESHOLD_TO_LOG_REGION_DETAILS, - DEFAULT_THRESHOLD_TO_LOG_REGION_DETAILS); + this.maxTotalConcurrentTasks = checkAndGet(conf, HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, + HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS); + this.maxConcurrentTasksPerServer = + checkAndGet(conf, HConstants.HBASE_CLIENT_MAX_PERSERVER_TASKS, + HConstants.DEFAULT_HBASE_CLIENT_MAX_PERSERVER_TASKS); + this.maxConcurrentTasksPerRegion = + checkAndGet(conf, HConstants.HBASE_CLIENT_MAX_PERREGION_TASKS, + HConstants.DEFAULT_HBASE_CLIENT_MAX_PERREGION_TASKS); + this.maxHeapSizePerRequest = checkAndGet(conf, HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, + DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE); + this.maxRowsPerRequest = + checkAndGet(conf, HBASE_CLIENT_MAX_PERREQUEST_ROWS, DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_ROWS); + this.maxHeapSizeSubmit = + checkAndGet(conf, HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE, DEFAULT_HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE); + this.thresholdToLogUndoneTaskDetails = conf.getInt(THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS, + DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS); + this.thresholdToLogRegionDetails = + conf.getInt(THRESHOLD_TO_LOG_REGION_DETAILS, DEFAULT_THRESHOLD_TO_LOG_REGION_DETAILS); } private static int checkAndGet(Configuration conf, String key, int defaultValue) { @@ -219,12 +213,8 @@ class SimpleRequestController implements RequestController { @Override public Checker newChecker() { List checkers = new ArrayList<>(4); - checkers.add(new TaskCountChecker(maxTotalConcurrentTasks, - maxConcurrentTasksPerServer, - maxConcurrentTasksPerRegion, - tasksInProgress, - taskCounterPerServer, - taskCounterPerRegion)); + checkers.add(new TaskCountChecker(maxTotalConcurrentTasks, maxConcurrentTasksPerServer, + maxConcurrentTasksPerRegion, tasksInProgress, taskCounterPerServer, taskCounterPerRegion)); checkers.add(new RequestHeapSizeChecker(maxHeapSizePerRequest)); checkers.add(new SubmittedSizeChecker(maxHeapSizeSubmit)); checkers.add(new RequestRowsChecker(maxRowsPerRequest)); @@ -237,9 +227,9 @@ class SimpleRequestController implements RequestController { computeIfAbsent(taskCounterPerServer, sn, AtomicInteger::new).incrementAndGet(); - regions.forEach((regBytes) - -> computeIfAbsent(taskCounterPerRegion, regBytes, AtomicInteger::new).incrementAndGet() - ); + regions + .forEach((regBytes) -> computeIfAbsent(taskCounterPerRegion, regBytes, AtomicInteger::new) + .incrementAndGet()); } @Override @@ -262,8 +252,8 @@ class SimpleRequestController implements RequestController { } @Override - public void waitForMaximumCurrentTasks(long max, long id, - int periodToTrigger, Consumer trigger) throws InterruptedIOException { + public void waitForMaximumCurrentTasks(long max, long id, int periodToTrigger, + Consumer trigger) throws InterruptedIOException { assert max >= 0; long lastLog = EnvironmentEdgeManager.currentTime(); long currentInProgress, oldInProgress = Long.MAX_VALUE; @@ -286,8 +276,8 @@ class SimpleRequestController implements RequestController { } } } catch (InterruptedException e) { - throw new InterruptedIOException("#" + id + ", interrupted." + - " currentNumberOfTask=" + currentInProgress); + throw new InterruptedIOException( + "#" + id + ", interrupted." + " currentNumberOfTask=" + currentInProgress); } } } @@ -315,13 +305,14 @@ class SimpleRequestController implements RequestController { } @Override - public void waitForFreeSlot(long id, int periodToTrigger, Consumer trigger) throws InterruptedIOException { + public void waitForFreeSlot(long id, int periodToTrigger, Consumer trigger) + throws InterruptedIOException { waitForMaximumCurrentTasks(maxTotalConcurrentTasks - 1, id, periodToTrigger, trigger); } /** - * limit the heapsize of total submitted data. Reduce the limit of heapsize - * for submitting quickly if there is no running task. + * limit the heapsize of total submitted data. Reduce the limit of heapsize for submitting quickly + * if there is no running task. */ static class SubmittedSizeChecker implements RowChecker { @@ -358,7 +349,7 @@ class SimpleRequestController implements RequestController { */ static class TaskCountChecker implements RowChecker { - private static final long MAX_WAITING_TIME = 1000; //ms + private static final long MAX_WAITING_TIME = 1000; // ms private final Set regionsIncluded = new HashSet<>(); private final Set serversIncluded = new HashSet<>(); private final int maxConcurrentTasksPerRegion; @@ -369,12 +360,10 @@ class SimpleRequestController implements RequestController { private final Set busyRegions = new TreeSet<>(Bytes.BYTES_COMPARATOR); private final AtomicLong tasksInProgress; - TaskCountChecker(final int maxTotalConcurrentTasks, - final int maxConcurrentTasksPerServer, - final int maxConcurrentTasksPerRegion, - final AtomicLong tasksInProgress, - final Map taskCounterPerServer, - final Map taskCounterPerRegion) { + TaskCountChecker(final int maxTotalConcurrentTasks, final int maxConcurrentTasksPerServer, + final int maxConcurrentTasksPerRegion, final AtomicLong tasksInProgress, + final Map taskCounterPerServer, + final Map taskCounterPerRegion) { this.maxTotalConcurrentTasks = maxTotalConcurrentTasks; this.maxConcurrentTasksPerRegion = maxConcurrentTasksPerRegion; this.maxConcurrentTasksPerServer = maxConcurrentTasksPerServer; @@ -410,18 +399,15 @@ class SimpleRequestController implements RequestController { tasksInProgress.wait(10); } } catch (InterruptedException e) { - throw new InterruptedIOException("Interrupted." - + " tasksInProgress=" + tasksInProgress); + throw new InterruptedIOException("Interrupted." + " tasksInProgress=" + tasksInProgress); } } } /** - * 1) check the regions is allowed. 2) check the concurrent tasks for - * regions. 3) check the total concurrent tasks. 4) check the concurrent - * tasks for server. - * - * @param loc the destination of data + * 1) check the regions is allowed. 2) check the concurrent tasks for regions. 3) check the + * total concurrent tasks. 4) check the concurrent tasks for server. + * @param loc the destination of data * @param heapSizeOfRow the data size * @return either Include {@link RequestController.ReturnCode} or skip * {@link RequestController.ReturnCode} @@ -439,8 +425,8 @@ class SimpleRequestController implements RequestController { // Too many tasks on this region already. return ReturnCode.SKIP; } - int newServers = serversIncluded.size() - + (serversIncluded.contains(loc.getServerName()) ? 0 : 1); + int newServers = + serversIncluded.size() + (serversIncluded.contains(loc.getServerName()) ? 0 : 1); if ((newServers + tasksInProgress.get()) > maxTotalConcurrentTasks) { // Too many tasks. return ReturnCode.SKIP; @@ -482,8 +468,8 @@ class SimpleRequestController implements RequestController { @Override public ReturnCode canTakeOperation(HRegionLocation loc, long heapSizeOfRow) { - long currentRows = serverRows.containsKey(loc.getServerName()) - ? serverRows.get(loc.getServerName()) : 0L; + long currentRows = + serverRows.containsKey(loc.getServerName()) ? serverRows.get(loc.getServerName()) : 0L; // accept at least one row if (currentRows == 0 || currentRows < maxRowsPerRequest) { return ReturnCode.INCLUDE; @@ -494,8 +480,8 @@ class SimpleRequestController implements RequestController { @Override public void notifyFinal(ReturnCode code, HRegionLocation loc, long heapSizeOfRow) { if (code == ReturnCode.INCLUDE) { - long currentRows = serverRows.containsKey(loc.getServerName()) - ? serverRows.get(loc.getServerName()) : 0L; + long currentRows = + serverRows.containsKey(loc.getServerName()) ? serverRows.get(loc.getServerName()) : 0L; serverRows.put(loc.getServerName(), currentRows + 1); } } @@ -522,7 +508,8 @@ class SimpleRequestController implements RequestController { public ReturnCode canTakeOperation(HRegionLocation loc, long heapSizeOfRow) { // Is it ok for limit of request size? long currentRequestSize = serverRequestSizes.containsKey(loc.getServerName()) - ? serverRequestSizes.get(loc.getServerName()) : 0L; + ? serverRequestSizes.get(loc.getServerName()) + : 0L; // accept at least one request if (currentRequestSize == 0 || currentRequestSize + heapSizeOfRow <= maxHeapSizePerRequest) { return ReturnCode.INCLUDE; @@ -534,7 +521,8 @@ class SimpleRequestController implements RequestController { public void notifyFinal(ReturnCode code, HRegionLocation loc, long heapSizeOfRow) { if (code == ReturnCode.INCLUDE) { long currentRequestSize = serverRequestSizes.containsKey(loc.getServerName()) - ? serverRequestSizes.get(loc.getServerName()) : 0L; + ? serverRequestSizes.get(loc.getServerName()) + : 0L; serverRequestSizes.put(loc.getServerName(), currentRequestSize + heapSizeOfRow); } } @@ -548,11 +536,10 @@ class SimpleRequestController implements RequestController { ReturnCode canTakeOperation(HRegionLocation loc, long heapSizeOfRow); /** - * Add the final ReturnCode to the checker. The ReturnCode may be reversed, - * so the checker need the final decision to update the inner state. - * - * @param code The final decision - * @param loc the destination of data + * Add the final ReturnCode to the checker. The ReturnCode may be reversed, so the checker need + * the final decision to update the inner state. + * @param code The final decision + * @param loc the destination of data * @param heapSizeOfRow the data size */ void notifyFinal(ReturnCode code, HRegionLocation loc, long heapSizeOfRow); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SingleResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SingleResponse.java index 252142ac80d..17c4b38b355 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SingleResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SingleResponse.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -58,6 +56,7 @@ public class SingleResponse extends AbstractResponse { public void setEntry(Entry entry) { this.entry = entry; } + @Override public ResponseType type() { return ResponseType.SINGLE; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SlowLogParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SlowLogParams.java index 86df9fda207..3311539c261 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SlowLogParams.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SlowLogParams.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.commons.lang3.StringUtils; @@ -26,8 +24,8 @@ import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.yetus.audience.InterfaceAudience; /** - * SlowLog params object that contains detailed info as params and region name : to be used - * for filter purpose + * SlowLog params object that contains detailed info as params and region name : to be used for + * filter purpose */ @InterfaceAudience.Private public class SlowLogParams { @@ -55,9 +53,7 @@ public class SlowLogParams { @Override public String toString() { - return new ToStringBuilder(this) - .append("regionName", regionName) - .append("params", params) + return new ToStringBuilder(this).append("regionName", regionName).append("params", params) .toString(); } @@ -73,17 +69,12 @@ public class SlowLogParams { SlowLogParams that = (SlowLogParams) o; - return new EqualsBuilder() - .append(regionName, that.regionName) - .append(params, that.params) + return new EqualsBuilder().append(regionName, that.regionName).append(params, that.params) .isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(regionName) - .append(params) - .toHashCode(); + return new HashCodeBuilder(17, 37).append(regionName).append(params).toHashCode(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java index 154b99460f1..4cf7a876137 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import java.util.Map; - import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -46,7 +45,7 @@ public class SnapshotDescription { /** * @deprecated since 2.0.0 and will be removed in 3.0.0. Use the version with the TableName - * instance instead. + * instance instead. * @see #SnapshotDescription(String, TableName) * @see HBASE-16892 */ @@ -61,7 +60,7 @@ public class SnapshotDescription { /** * @deprecated since 2.0.0 and will be removed in 3.0.0. Use the version with the TableName - * instance instead. + * instance instead. * @see #SnapshotDescription(String, TableName, SnapshotType) * @see HBASE-16892 */ @@ -78,7 +77,7 @@ public class SnapshotDescription { * @see #SnapshotDescription(String, TableName, SnapshotType, String) * @see HBASE-16892 * @deprecated since 2.0.0 and will be removed in 3.0.0. Use the version with the TableName - * instance instead. + * instance instead. */ @Deprecated public SnapshotDescription(String name, String table, SnapshotType type, String owner) { @@ -93,35 +92,33 @@ public class SnapshotDescription { * @see #SnapshotDescription(String, TableName, SnapshotType, String, long, int, Map) * @see HBASE-16892 * @deprecated since 2.0.0 and will be removed in 3.0.0. Use the version with the TableName - * instance instead. + * instance instead. */ @Deprecated public SnapshotDescription(String name, String table, SnapshotType type, String owner, - long creationTime, int version) { + long creationTime, int version) { this(name, TableName.valueOf(table), type, owner, creationTime, version, null); } /** * SnapshotDescription Parameterized Constructor - * - * @param name Name of the snapshot - * @param table TableName associated with the snapshot - * @param type Type of the snapshot - enum SnapshotType - * @param owner Snapshot Owner + * @param name Name of the snapshot + * @param table TableName associated with the snapshot + * @param type Type of the snapshot - enum SnapshotType + * @param owner Snapshot Owner * @param creationTime Creation time for Snapshot - * @param version Snapshot Version + * @param version Snapshot Version * @deprecated since 2.3.0 and will be removed in 4.0.0. Use - * {@link #SnapshotDescription(String, TableName, SnapshotType, String, long, int, Map)} + * {@link #SnapshotDescription(String, TableName, SnapshotType, String, long, int, Map)} */ @Deprecated public SnapshotDescription(String name, TableName table, SnapshotType type, String owner, - long creationTime, int version) { + long creationTime, int version) { this(name, table, type, owner, creationTime, version, null); } /** * SnapshotDescription Parameterized Constructor - * * @param name Name of the snapshot * @param table TableName associated with the snapshot * @param type Type of the snapshot - enum SnapshotType @@ -131,7 +128,7 @@ public class SnapshotDescription { * @param snapshotProps Additional properties for snapshot e.g. TTL */ public SnapshotDescription(String name, TableName table, SnapshotType type, String owner, - long creationTime, int version, Map snapshotProps) { + long creationTime, int version, Map snapshotProps) { this.name = name; this.table = table; this.snapShotType = type; @@ -146,18 +143,15 @@ public class SnapshotDescription { return MapUtils.getLongValue(snapshotProps, property, -1); } - - /** * SnapshotDescription Parameterized Constructor - * * @param snapshotName Name of the snapshot * @param tableName TableName associated with the snapshot * @param type Type of the snapshot - enum SnapshotType * @param snapshotProps Additional properties for snapshot e.g. TTL */ public SnapshotDescription(String snapshotName, TableName tableName, SnapshotType type, - Map snapshotProps) { + Map snapshotProps) { this(snapshotName, tableName, type, null, -1, -1, snapshotProps); } @@ -167,7 +161,7 @@ public class SnapshotDescription { /** * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #getTableName()} or - * {@link #getTableNameAsString()} instead. + * {@link #getTableNameAsString()} instead. * @see #getTableName() * @see #getTableNameAsString() * @see HBASE-16892 @@ -206,19 +200,15 @@ public class SnapshotDescription { return this.version; } - public long getMaxFileSize() { return maxFileSize; } + public long getMaxFileSize() { + return maxFileSize; + } @Override public String toString() { - return new ToStringBuilder(this) - .append("name", name) - .append("table", table) - .append("snapShotType", snapShotType) - .append("owner", owner) - .append("creationTime", creationTime) - .append("ttl", ttl) - .append("version", version) - .append("maxFileSize", maxFileSize) - .toString(); + return new ToStringBuilder(this).append("name", name).append("table", table) + .append("snapShotType", snapShotType).append("owner", owner) + .append("creationTime", creationTime).append("ttl", ttl).append("version", version) + .append("maxFileSize", maxFileSize).toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotType.java index a4e4cc08be7..1c453aa24be 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,5 +24,7 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Public public enum SnapshotType { - DISABLED, FLUSH, SKIPFLUSH + DISABLED, + FLUSH, + SKIPFLUSH } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatisticTrackable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatisticTrackable.java index b73dee1e44f..625271177a7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatisticTrackable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatisticTrackable.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,7 +26,7 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public interface StatisticTrackable { /** - * Update stats per region. - * */ + * Update stats per region. + */ void updateRegionStats(ServerName server, byte[] region, RegionLoadStats stats); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SyncCoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SyncCoprocessorRpcChannel.java index 6b4419d5385..3473ee07949 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SyncCoprocessorRpcChannel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SyncCoprocessorRpcChannel.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,20 +22,17 @@ import com.google.protobuf.Message; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; - import java.io.IOException; - +import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; /** - * Base class which provides clients with an RPC connection to - * call coprocessor endpoint {@link com.google.protobuf.Service}s. - * Note that clients should not use this class directly, except through - * {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}. + * Base class which provides clients with an RPC connection to call coprocessor endpoint + * {@link com.google.protobuf.Service}s. Note that clients should not use this class directly, + * except through {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}. */ @InterfaceAudience.Public abstract class SyncCoprocessorRpcChannel implements CoprocessorRpcChannel { @@ -43,10 +40,8 @@ abstract class SyncCoprocessorRpcChannel implements CoprocessorRpcChannel { @Override @InterfaceAudience.Private - public void callMethod(Descriptors.MethodDescriptor method, - RpcController controller, - Message request, Message responsePrototype, - RpcCallback callback) { + public void callMethod(Descriptors.MethodDescriptor method, RpcController controller, + Message request, Message responsePrototype, RpcCallback callback) { Message response = null; try { response = callExecService(controller, method, request, responsePrototype); @@ -61,18 +56,16 @@ abstract class SyncCoprocessorRpcChannel implements CoprocessorRpcChannel { @Override @InterfaceAudience.Private - public Message callBlockingMethod(Descriptors.MethodDescriptor method, - RpcController controller, - Message request, Message responsePrototype) - throws ServiceException { + public Message callBlockingMethod(Descriptors.MethodDescriptor method, RpcController controller, + Message request, Message responsePrototype) throws ServiceException { try { return callExecService(controller, method, request, responsePrototype); } catch (IOException ioe) { - throw new ServiceException("Error calling method "+method.getFullName(), ioe); + throw new ServiceException("Error calling method " + method.getFullName(), ioe); } } protected abstract Message callExecService(RpcController controller, - Descriptors.MethodDescriptor method, Message request, Message responsePrototype) - throws IOException; + Descriptors.MethodDescriptor method, Message request, Message responsePrototype) + throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java index f8a28d5597e..894b4678908 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,10 +42,10 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** - * Used to communicate with a single HBase table. - * Obtain an instance from a {@link Connection} and call {@link #close()} afterwards. - * - *

    Table can be used to get, put, delete or scan data from a table. + * Used to communicate with a single HBase table. Obtain an instance from a {@link Connection} and + * call {@link #close()} afterwards. + *

    + * Table can be used to get, put, delete or scan data from a table. * @see ConnectionFactory * @see Connection * @see Admin @@ -63,30 +62,29 @@ public interface Table extends Closeable { /** * Returns the {@link org.apache.hadoop.conf.Configuration} object used by this instance. *

    - * The reference returned is not a copy, so any change made to it will - * affect this instance. + * The reference returned is not a copy, so any change made to it will affect this instance. */ Configuration getConfiguration(); /** * Gets the {@link org.apache.hadoop.hbase.HTableDescriptor table descriptor} for this table. * @throws java.io.IOException if a remote or network exception occurs. - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #getDescriptor()} + * @deprecated since 2.0 version and will be removed in 3.0 version. use {@link #getDescriptor()} */ @Deprecated default HTableDescriptor getTableDescriptor() throws IOException { TableDescriptor descriptor = getDescriptor(); if (descriptor instanceof HTableDescriptor) { - return (HTableDescriptor)descriptor; + return (HTableDescriptor) descriptor; } else { return new HTableDescriptor(descriptor); } } /** - * Gets the {@link org.apache.hadoop.hbase.client.TableDescriptor table descriptor} for this table. + * Gets the {@link org.apache.hadoop.hbase.client.TableDescriptor table descriptor} for this + * table. * @throws java.io.IOException if a remote or network exception occurs. */ TableDescriptor getDescriptor() throws IOException; @@ -99,13 +97,9 @@ public interface Table extends Closeable { /** * Test for the existence of columns in the table, as specified by the Get. *

    - * * This will return true if the Get matches one or more keys, false if not. *

    - * - * This is a server-side call so it prevents any data from being transfered to - * the client. - * + * This is a server-side call so it prevents any data from being transfered to the client. * @param get the Get * @return true if the specified Get matches one or more keys, false if not * @throws IOException e @@ -117,16 +111,12 @@ public interface Table extends Closeable { /** * Test for the existence of columns in the table, as specified by the Gets. *

    - * - * This will return an array of booleans. Each value will be true if the related Get matches - * one or more keys, false if not. + * This will return an array of booleans. Each value will be true if the related Get matches one + * or more keys, false if not. *

    - * - * This is a server-side call so it prevents any data from being transferred to - * the client. - * + * This is a server-side call so it prevents any data from being transferred to the client. * @param gets the Gets - * @return Array of boolean. True if the specified Get matches one or more keys, false if not. + * @return Array of boolean. True if the specified Get matches one or more keys, false if not. * @throws IOException e */ default boolean[] exists(List gets) throws IOException { @@ -134,17 +124,14 @@ public interface Table extends Closeable { } /** - * Test for the existence of columns in the table, as specified by the Gets. - * This will return an array of booleans. Each value will be true if the related Get matches - * one or more keys, false if not. - * This is a server-side call so it prevents any data from being transferred to - * the client. - * + * Test for the existence of columns in the table, as specified by the Gets. This will return an + * array of booleans. Each value will be true if the related Get matches one or more keys, false + * if not. This is a server-side call so it prevents any data from being transferred to the + * client. * @param gets the Gets - * @return Array of boolean. True if the specified Get matches one or more keys, false if not. + * @return Array of boolean. True if the specified Get matches one or more keys, false if not. * @throws IOException e - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #exists(List)} + * @deprecated since 2.0 version and will be removed in 3.0 version. use {@link #exists(List)} */ @Deprecated default boolean[] existsAll(List gets) throws IOException { @@ -152,21 +139,18 @@ public interface Table extends Closeable { } /** - * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations. - * The ordering of execution of the actions is not defined. Meaning if you do a Put and a - * Get in the same {@link #batch} call, you will not necessarily be - * guaranteed that the Get returns what the Put had put. - * + * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations. The + * ordering of execution of the actions is not defined. Meaning if you do a Put and a Get in the + * same {@link #batch} call, you will not necessarily be guaranteed that the Get returns what the + * Put had put. * @param actions list of Get, Put, Delete, Increment, Append, RowMutations. - * @param results Empty Object[], same size as actions. Provides access to partial - * results, in case an exception is thrown. A null in the result array means that - * the call for that action failed, even after retries. The order of the objects - * in the results array corresponds to the order of actions in the request list. - * @throws IOException - * @since 0.90.0 + * @param results Empty Object[], same size as actions. Provides access to partial results, in + * case an exception is thrown. A null in the result array means that the call for + * that action failed, even after retries. The order of the objects in the results + * array corresponds to the order of actions in the request list. n * @since 0.90.0 */ - default void batch(final List actions, final Object[] results) throws IOException, - InterruptedException { + default void batch(final List actions, final Object[] results) + throws IOException, InterruptedException { throw new NotImplementedException("Add an implementation!"); } @@ -174,19 +158,17 @@ public interface Table extends Closeable { * Same as {@link #batch(List, Object[])}, but with a callback. * @since 0.96.0 */ - default void batchCallback( - final List actions, final Object[] results, final Batch.Callback callback) - throws IOException, InterruptedException { + default void batchCallback(final List actions, final Object[] results, + final Batch.Callback callback) throws IOException, InterruptedException { throw new NotImplementedException("Add an implementation!"); } /** * Extracts certain cells from a given row. * @param get The object that specifies what data to fetch and from which row. - * @return The data coming from the specified row, if it exists. If the row - * specified doesn't exist, the {@link Result} instance returned won't - * contain any {@link org.apache.hadoop.hbase.KeyValue}, as indicated by - * {@link Result#isEmpty()}. + * @return The data coming from the specified row, if it exists. If the row specified doesn't + * exist, the {@link Result} instance returned won't contain any + * {@link org.apache.hadoop.hbase.KeyValue}, as indicated by {@link Result#isEmpty()}. * @throws IOException if a remote or network exception occurs. * @since 0.20.0 */ @@ -196,31 +178,27 @@ public interface Table extends Closeable { /** * Extracts specified cells from the given rows, as a batch. - * * @param gets The objects that specify what data to fetch and from which rows. - * @return The data coming from the specified rows, if it exists. If the row specified doesn't - * exist, the {@link Result} instance returned won't contain any - * {@link org.apache.hadoop.hbase.Cell}s, as indicated by {@link Result#isEmpty()}. If there - * are any failures even after retries, there will be a null in the results' array - * for those Gets, AND an exception will be thrown. The ordering of the Result array - * corresponds to the order of the list of passed in Gets. + * @return The data coming from the specified rows, if it exists. If the row specified doesn't + * exist, the {@link Result} instance returned won't contain any + * {@link org.apache.hadoop.hbase.Cell}s, as indicated by {@link Result#isEmpty()}. If + * there are any failures even after retries, there will be a null in the + * results' array for those Gets, AND an exception will be thrown. The ordering of the + * Result array corresponds to the order of the list of passed in Gets. * @throws IOException if a remote or network exception occurs. * @since 0.90.0 - * @apiNote {@link #put(List)} runs pre-flight validations on the input list on client. - * Currently {@link #get(List)} doesn't run any validations on the client-side, - * currently there is no need, but this may change in the future. An - * {@link IllegalArgumentException} will be thrown in this case. + * @apiNote {@link #put(List)} runs pre-flight validations on the input list on client. Currently + * {@link #get(List)} doesn't run any validations on the client-side, currently there is + * no need, but this may change in the future. An {@link IllegalArgumentException} will + * be thrown in this case. */ default Result[] get(List gets) throws IOException { throw new NotImplementedException("Add an implementation!"); } /** - * Returns a scanner on the current table as specified by the {@link Scan} - * object. - * Note that the passed {@link Scan}'s start row and caching properties - * maybe changed. - * + * Returns a scanner on the current table as specified by the {@link Scan} object. Note that the + * passed {@link Scan}'s start row and caching properties maybe changed. * @param scan A configured {@link Scan} object. * @return A scanner. * @throws IOException if a remote or network exception occurs. @@ -232,7 +210,6 @@ public interface Table extends Closeable { /** * Gets a scanner on the current table for the given family. - * * @param family The column family to scan. * @return A scanner. * @throws IOException if a remote or network exception occurs. @@ -244,8 +221,7 @@ public interface Table extends Closeable { /** * Gets a scanner on the current table for the given family and qualifier. - * - * @param family The column family to scan. + * @param family The column family to scan. * @param qualifier The column qualifier to scan. * @return A scanner. * @throws IOException if a remote or network exception occurs. @@ -257,7 +233,6 @@ public interface Table extends Closeable { /** * Puts some data in the table. - * * @param put The data to put. * @throws IOException if a remote or network exception occurs. * @since 0.20.0 @@ -269,15 +244,14 @@ public interface Table extends Closeable { /** * Batch puts the specified data into the table. *

    - * This can be used for group commit, or for submitting user defined batches. Before sending - * a batch of mutations to the server, the client runs a few validations on the input list. If an + * This can be used for group commit, or for submitting user defined batches. Before sending a + * batch of mutations to the server, the client runs a few validations on the input list. If an * error is found, for example, a mutation was supplied but was missing it's column an - * {@link IllegalArgumentException} will be thrown and no mutations will be applied. If there - * are any failures even after retries, a {@link RetriesExhaustedWithDetailsException} will be - * thrown. RetriesExhaustedWithDetailsException contains lists of failed mutations and - * corresponding remote exceptions. The ordering of mutations and exceptions in the - * encapsulating exception corresponds to the order of the input list of Put requests. - * + * {@link IllegalArgumentException} will be thrown and no mutations will be applied. If there are + * any failures even after retries, a {@link RetriesExhaustedWithDetailsException} will be thrown. + * RetriesExhaustedWithDetailsException contains lists of failed mutations and corresponding + * remote exceptions. The ordering of mutations and exceptions in the encapsulating exception + * corresponds to the order of the input list of Put requests. * @param puts The list of mutations to apply. * @throws IOException if a remote or network exception occurs. * @since 0.20.0 @@ -287,48 +261,43 @@ public interface Table extends Closeable { } /** - * Atomically checks if a row/family/qualifier value matches the expected - * value. If it does, it adds the put. If the passed value is null, the check - * is for the lack of column (ie: non-existance) - * - * @param row to check - * @param family column family to check + * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it + * adds the put. If the passed value is null, the check is for the lack of column (ie: + * non-existance) + * @param row to check + * @param family column family to check * @param qualifier column qualifier to check - * @param value the expected value - * @param put data to put if check succeeds + * @param value the expected value + * @param put data to put if check succeeds * @throws IOException e * @return true if the new put was executed, false otherwise * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])} */ @Deprecated default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) - throws IOException { + throws IOException { return checkAndPut(row, family, qualifier, CompareOperator.EQUAL, value, put); } /** - * Atomically checks if a row/family/qualifier value matches the expected - * value. If it does, it adds the put. If the passed value is null, the check - * is for the lack of column (ie: non-existence) - * - * The expected value argument of this call is on the left and the current - * value of the cell is on the right side of the comparison operator. - * - * Ie. eg. GREATER operator means expected value > existing <=> add the put. - * - * @param row to check - * @param family column family to check + * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it + * adds the put. If the passed value is null, the check is for the lack of column (ie: + * non-existence) The expected value argument of this call is on the left and the current value of + * the cell is on the right side of the comparison operator. Ie. eg. GREATER operator means + * expected value > existing <=> add the put. + * @param row to check + * @param family column family to check * @param qualifier column qualifier to check * @param compareOp comparison operator to use - * @param value the expected value - * @param put data to put if check succeeds + * @param value the expected value + * @param put data to put if check succeeds * @throws IOException e * @return true if the new put was executed, false otherwise * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])} */ @Deprecated default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, - CompareFilter.CompareOp compareOp, byte[] value, Put put) throws IOException { + CompareFilter.CompareOp compareOp, byte[] value, Put put) throws IOException { RowMutations mutations = new RowMutations(put.getRow(), 1); mutations.add(put); @@ -336,28 +305,24 @@ public interface Table extends Closeable { } /** - * Atomically checks if a row/family/qualifier value matches the expected - * value. If it does, it adds the put. If the passed value is null, the check - * is for the lack of column (ie: non-existence) - * - * The expected value argument of this call is on the left and the current - * value of the cell is on the right side of the comparison operator. - * - * Ie. eg. GREATER operator means expected value > existing <=> add the put. - * - * @param row to check - * @param family column family to check + * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it + * adds the put. If the passed value is null, the check is for the lack of column (ie: + * non-existence) The expected value argument of this call is on the left and the current value of + * the cell is on the right side of the comparison operator. Ie. eg. GREATER operator means + * expected value > existing <=> add the put. + * @param row to check + * @param family column family to check * @param qualifier column qualifier to check - * @param op comparison operator to use - * @param value the expected value - * @param put data to put if check succeeds + * @param op comparison operator to use + * @param value the expected value + * @param put data to put if check succeeds * @throws IOException e * @return true if the new put was executed, false otherwise * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])} */ @Deprecated default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, - byte[] value, Put put) throws IOException { + byte[] value, Put put) throws IOException { RowMutations mutations = new RowMutations(put.getRow(), 1); mutations.add(put); @@ -366,7 +331,6 @@ public interface Table extends Closeable { /** * Deletes the specified cells/row. - * * @param delete The object that specifies what to delete. * @throws IOException if a remote or network exception occurs. * @since 0.20.0 @@ -378,19 +342,18 @@ public interface Table extends Closeable { /** * Batch Deletes the specified cells/rows from the table. *

    - * If a specified row does not exist, {@link Delete} will report as though sucessful - * delete; no exception will be thrown. If there are any failures even after retries, - * a {@link RetriesExhaustedWithDetailsException} will be thrown. - * RetriesExhaustedWithDetailsException contains lists of failed {@link Delete}s and - * corresponding remote exceptions. - * - * @param deletes List of things to delete. The input list gets modified by this - * method. All successfully applied {@link Delete}s in the list are removed (in particular it - * gets re-ordered, so the order in which the elements are inserted in the list gives no - * guarantee as to the order in which the {@link Delete}s are executed). - * @throws IOException if a remote or network exception occurs. In that case - * the {@code deletes} argument will contain the {@link Delete} instances - * that have not be successfully applied. + * If a specified row does not exist, {@link Delete} will report as though sucessful delete; no + * exception will be thrown. If there are any failures even after retries, a + * {@link RetriesExhaustedWithDetailsException} will be thrown. + * RetriesExhaustedWithDetailsException contains lists of failed {@link Delete}s and corresponding + * remote exceptions. + * @param deletes List of things to delete. The input list gets modified by this method. All + * successfully applied {@link Delete}s in the list are removed (in particular it + * gets re-ordered, so the order in which the elements are inserted in the list + * gives no guarantee as to the order in which the {@link Delete}s are executed). + * @throws IOException if a remote or network exception occurs. In that case the {@code deletes} + * argument will contain the {@link Delete} instances that have not be + * successfully applied. * @since 0.20.1 * @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also, * {@link #put(List)} runs pre-flight validations on the input list on client. Currently @@ -403,41 +366,36 @@ public interface Table extends Closeable { } /** - * Atomically checks if a row/family/qualifier value matches the expected - * value. If it does, it adds the delete. If the passed value is null, the - * check is for the lack of column (ie: non-existance) - * - * @param row to check - * @param family column family to check + * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it + * adds the delete. If the passed value is null, the check is for the lack of column (ie: + * non-existance) + * @param row to check + * @param family column family to check * @param qualifier column qualifier to check - * @param value the expected value - * @param delete data to delete if check succeeds + * @param value the expected value + * @param delete data to delete if check succeeds * @throws IOException e * @return true if the new delete was executed, false otherwise * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])} */ @Deprecated - default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Delete delete) throws IOException { + default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, + Delete delete) throws IOException { return checkAndDelete(row, family, qualifier, CompareOperator.EQUAL, value, delete); } /** - * Atomically checks if a row/family/qualifier value matches the expected - * value. If it does, it adds the delete. If the passed value is null, the - * check is for the lack of column (ie: non-existence) - * - * The expected value argument of this call is on the left and the current - * value of the cell is on the right side of the comparison operator. - * - * Ie. eg. GREATER operator means expected value > existing <=> add the delete. - * - * @param row to check - * @param family column family to check + * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it + * adds the delete. If the passed value is null, the check is for the lack of column (ie: + * non-existence) The expected value argument of this call is on the left and the current value of + * the cell is on the right side of the comparison operator. Ie. eg. GREATER operator means + * expected value > existing <=> add the delete. + * @param row to check + * @param family column family to check * @param qualifier column qualifier to check * @param compareOp comparison operator to use - * @param value the expected value - * @param delete data to delete if check succeeds + * @param value the expected value + * @param delete data to delete if check succeeds * @throws IOException e * @return true if the new delete was executed, false otherwise * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])} @@ -452,28 +410,24 @@ public interface Table extends Closeable { } /** - * Atomically checks if a row/family/qualifier value matches the expected - * value. If it does, it adds the delete. If the passed value is null, the - * check is for the lack of column (ie: non-existence) - * - * The expected value argument of this call is on the left and the current - * value of the cell is on the right side of the comparison operator. - * - * Ie. eg. GREATER operator means expected value > existing <=> add the delete. - * - * @param row to check - * @param family column family to check + * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it + * adds the delete. If the passed value is null, the check is for the lack of column (ie: + * non-existence) The expected value argument of this call is on the left and the current value of + * the cell is on the right side of the comparison operator. Ie. eg. GREATER operator means + * expected value > existing <=> add the delete. + * @param row to check + * @param family column family to check * @param qualifier column qualifier to check - * @param op comparison operator to use - * @param value the expected value - * @param delete data to delete if check succeeds + * @param op comparison operator to use + * @param value the expected value + * @param delete data to delete if check succeeds * @throws IOException e * @return true if the new delete was executed, false otherwise * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])} */ @Deprecated - default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - CompareOperator op, byte[] value, Delete delete) throws IOException { + default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, + byte[] value, Delete delete) throws IOException { RowMutations mutations = new RowMutations(delete.getRow(), 1); mutations.add(delete); @@ -494,7 +448,7 @@ public interface Table extends Closeable { * * * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it - * any more. + * any more. */ @Deprecated default CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { @@ -503,9 +457,8 @@ public interface Table extends Closeable { /** * A helper class for sending checkAndMutate request. - * * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it - * any more. + * any more. */ @Deprecated interface CheckAndMutateBuilder { @@ -535,7 +488,7 @@ public interface Table extends Closeable { /** * @param compareOp comparison operator to use - * @param value the expected value + * @param value the expected value */ CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value); @@ -572,7 +525,7 @@ public interface Table extends Closeable { * * * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it - * any more. + * any more. */ @Deprecated default CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter) { @@ -581,9 +534,8 @@ public interface Table extends Closeable { /** * A helper class for sending checkAndMutate request with a filter. - * * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it - * any more. + * any more. */ @Deprecated interface CheckAndMutateWithFilterBuilder { @@ -613,9 +565,8 @@ public interface Table extends Closeable { } /** - * checkAndMutate that atomically checks if a row matches the specified condition. If it does, - * it performs the specified action. - * + * checkAndMutate that atomically checks if a row matches the specified condition. If it does, it + * performs the specified action. * @param checkAndMutate The CheckAndMutate object. * @return A CheckAndMutateResult object that represents the result for the CheckAndMutate. * @throws IOException if a remote or network exception occurs. @@ -628,10 +579,9 @@ public interface Table extends Closeable { * Batch version of checkAndMutate. The specified CheckAndMutates are batched only in the sense * that they are sent to a RS in one RPC, but each CheckAndMutate operation is still executed * atomically (and thus, each may fail independently of others). - * * @param checkAndMutates The list of CheckAndMutate. * @return A list of CheckAndMutateResult objects that represents the result for each - * CheckAndMutate. + * CheckAndMutate. * @throws IOException if a remote or network exception occurs. */ default List checkAndMutate(List checkAndMutates) @@ -640,9 +590,8 @@ public interface Table extends Closeable { } /** - * Performs multiple mutations atomically on a single row. Currently - * {@link Put} and {@link Delete} are supported. - * + * Performs multiple mutations atomically on a single row. Currently {@link Put} and + * {@link Delete} are supported. * @param rm object that specifies the set of mutations to perform atomically * @return results of Increment/Append operations * @throws IOException if a remote or network exception occurs. @@ -654,10 +603,9 @@ public interface Table extends Closeable { /** * Appends values to one or more columns within a single row. *

    - * This operation guaranteed atomicity to readers. Appends are done - * under a single row lock, so write operations to a row are synchronized, and - * readers are guaranteed to see this operation fully completed. - * + * This operation guaranteed atomicity to readers. Appends are done under a single row lock, so + * write operations to a row are synchronized, and readers are guaranteed to see this operation + * fully completed. * @param append object that specifies the columns and values to be appended * @throws IOException e * @return values of columns after the append operation (maybe null) @@ -669,12 +617,11 @@ public interface Table extends Closeable { /** * Increments one or more columns within a single row. *

    - * This operation ensures atomicity to readers. Increments are done - * under a single row lock, so write operations to a row are synchronized, and - * readers are guaranteed to see this operation fully completed. - * - * @param increment object that specifies the columns and amounts to be used - * for the increment operations + * This operation ensures atomicity to readers. Increments are done under a single row lock, so + * write operations to a row are synchronized, and readers are guaranteed to see this operation + * fully completed. + * @param increment object that specifies the columns and amounts to be used for the increment + * operations * @throws IOException e * @return values of columns after the increment */ @@ -686,50 +633,47 @@ public interface Table extends Closeable { * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)} *

    * The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}. - * @param row The row that contains the cell to increment. - * @param family The column family of the cell to increment. + * @param row The row that contains the cell to increment. + * @param family The column family of the cell to increment. * @param qualifier The column qualifier of the cell to increment. - * @param amount The amount to increment the cell with (or decrement, if the - * amount is negative). + * @param amount The amount to increment the cell with (or decrement, if the amount is + * negative). * @return The new value, post increment. * @throws IOException if a remote or network exception occurs. */ default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) - throws IOException { + throws IOException { Increment increment = new Increment(row).addColumn(family, qualifier, amount); Cell cell = increment(increment).getColumnLatestCell(family, qualifier); return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); } /** - * Atomically increments a column value. If the column value already exists - * and is not a big-endian long, this could throw an exception. If the column - * value does not yet exist it is initialized to amount and - * written to the specified column. - * - *

    Setting durability to {@link Durability#SKIP_WAL} means that in a fail - * scenario you will lose any increments that have not been flushed. - * @param row The row that contains the cell to increment. - * @param family The column family of the cell to increment. - * @param qualifier The column qualifier of the cell to increment. - * @param amount The amount to increment the cell with (or decrement, if the - * amount is negative). + * Atomically increments a column value. If the column value already exists and is not a + * big-endian long, this could throw an exception. If the column value does not yet exist it is + * initialized to amount and written to the specified column. + *

    + * Setting durability to {@link Durability#SKIP_WAL} means that in a fail scenario you will lose + * any increments that have not been flushed. + * @param row The row that contains the cell to increment. + * @param family The column family of the cell to increment. + * @param qualifier The column qualifier of the cell to increment. + * @param amount The amount to increment the cell with (or decrement, if the amount is + * negative). * @param durability The persistence guarantee for this increment. * @return The new value, post increment. * @throws IOException if a remote or network exception occurs. */ - default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, - long amount, Durability durability) throws IOException { - Increment increment = new Increment(row) - .addColumn(family, qualifier, amount) - .setDurability(durability); + default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, + Durability durability) throws IOException { + Increment increment = + new Increment(row).addColumn(family, qualifier, amount).setDurability(durability); Cell cell = increment(increment).getColumnLatestCell(family, qualifier); return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); } /** * Releases any resources held or pending changes in internal buffers. - * * @throws IOException if a remote or network exception occurs. */ @Override @@ -738,29 +682,28 @@ public interface Table extends Closeable { } /** - * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the - * table region containing the specified row. The row given does not actually have - * to exist. Whichever region would contain the row based on start and end keys will - * be used. Note that the {@code row} parameter is also not passed to the - * coprocessor handler registered for this protocol, unless the {@code row} - * is separately passed as an argument in the service request. The parameter - * here is only used to locate the region used to handle the call. - * + * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the table + * region containing the specified row. The row given does not actually have to exist. Whichever + * region would contain the row based on start and end keys will be used. Note that the + * {@code row} parameter is also not passed to the coprocessor handler registered for this + * protocol, unless the {@code row} is separately passed as an argument in the service request. + * The parameter here is only used to locate the region used to handle the call. *

    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations: *

    + *
    * - *
    - *
    +   * 
        * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
        * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
        * MyCallRequest request = MyCallRequest.newBuilder()
        *     ...
        *     .build();
        * MyCallResponse response = service.myCall(null, request);
    -   * 
    + * * + *
    * @param row The row key used to identify the remote region location * @return A CoprocessorRpcChannel instance */ @@ -773,23 +716,22 @@ public interface Table extends Closeable { * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method * with each {@link com.google.protobuf.Service} instance. - * - * @param service the protocol buffer {@code Service} implementation to call - * @param startKey start region selection with region containing this row. If {@code null}, the - * selection will start with the first table region. - * @param endKey select regions up to and including the region containing this row. If - * {@code null}, selection will continue through the last table region. + * @param service the protocol buffer {@code Service} implementation to call + * @param startKey start region selection with region containing this row. If {@code null}, the + * selection will start with the first table region. + * @param endKey select regions up to and including the region containing this row. If + * {@code null}, selection will continue through the last table region. * @param callable this instance's - * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} - * method will be invoked once per table region, using the {@link com.google.protobuf.Service} - * instance connected to that region. - * @param the {@link com.google.protobuf.Service} subclass to connect to - * @param Return type for the {@code callable} parameter's {@link - * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will + * be invoked once per table region, using the {@link com.google.protobuf.Service} + * instance connected to that region. + * @param the {@link com.google.protobuf.Service} subclass to connect to + * @param Return type for the {@code callable} parameter's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method * @return a map of result values keyed by region name */ - default Map coprocessorService(final Class service, - byte[] startKey, byte[] endKey, final Batch.Call callable) + default Map coprocessorService(final Class service, + byte[] startKey, byte[] endKey, final Batch.Call callable) throws ServiceException, Throwable { throw new NotImplementedException("Add an implementation!"); } @@ -799,28 +741,28 @@ public interface Table extends Closeable { * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method * with each {@link Service} instance. - * - *

    The given + *

    + * The given * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)} * method will be called with the return value from each region's - * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation.

    - * - * @param service the protocol buffer {@code Service} implementation to call - * @param startKey start region selection with region containing this row. If {@code null}, the - * selection will start with the first table region. - * @param endKey select regions up to and including the region containing this row. If - * {@code null}, selection will continue through the last table region. + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation. + *

    + * @param service the protocol buffer {@code Service} implementation to call + * @param startKey start region selection with region containing this row. If {@code null}, the + * selection will start with the first table region. + * @param endKey select regions up to and including the region containing this row. If + * {@code null}, selection will continue through the last table region. * @param callable this instance's - * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} - * method will be invoked once per table region, using the {@link Service} instance connected to - * that region. - * @param the {@link Service} subclass to connect to - * @param Return type for the {@code callable} parameter's {@link - * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will + * be invoked once per table region, using the {@link Service} instance connected + * to that region. + * @param the {@link Service} subclass to connect to + * @param Return type for the {@code callable} parameter's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method */ - default void coprocessorService(final Class service, - byte[] startKey, byte[] endKey, final Batch.Call callable, - final Batch.Callback callback) throws ServiceException, Throwable { + default void coprocessorService(final Class service, byte[] startKey, + byte[] endKey, final Batch.Call callable, final Batch.Callback callback) + throws ServiceException, Throwable { throw new NotImplementedException("Add an implementation!"); } @@ -828,27 +770,18 @@ public interface Table extends Closeable { * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all * the invocations to the same region server will be batched into one call. The coprocessor - * service is invoked according to the service instance, method name and parameters. - * - * @param methodDescriptor - * the descriptor for the protobuf service method to call. - * @param request - * the method call parameters - * @param startKey - * start region selection with region containing this row. If {@code null}, the - * selection will start with the first table region. - * @param endKey - * select regions up to and including the region containing this row. If {@code null}, - * selection will continue through the last table region. - * @param responsePrototype - * the proto type of the response of the method in Service. - * @param - * the response type for the coprocessor Service method + * service is invoked according to the service instance, method name and parameters. n * the + * descriptor for the protobuf service method to call. n * the method call parameters n * start + * region selection with region containing this row. If {@code null}, the selection will start + * with the first table region. n * select regions up to and including the region containing this + * row. If {@code null}, selection will continue through the last table region. n * the proto type + * of the response of the method in Service. + * @param the response type for the coprocessor Service method * @return a map of result values keyed by region name */ default Map batchCoprocessorService( - Descriptors.MethodDescriptor methodDescriptor, Message request, - byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable { + Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey, + R responsePrototype) throws ServiceException, Throwable { throw new NotImplementedException("Add an implementation!"); } @@ -857,46 +790,38 @@ public interface Table extends Closeable { * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all * the invocations to the same region server will be batched into one call. The coprocessor * service is invoked according to the service instance, method name and parameters. - * *

    * The given * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)} * method will be called with the return value from each region's invocation. *

    - * - * @param methodDescriptor the descriptor for the protobuf service method to call. - * @param request the method call parameters - * @param startKey start region selection with region containing this row. - * If {@code null}, the selection will start with the first table region. - * @param endKey select regions up to and including the region containing this row. - * If {@code null}, selection will continue through the last table region. + * @param methodDescriptor the descriptor for the protobuf service method to call. + * @param request the method call parameters + * @param startKey start region selection with region containing this row. If + * {@code null}, the selection will start with the first table region. + * @param endKey select regions up to and including the region containing this row. If + * {@code null}, selection will continue through the last table region. * @param responsePrototype the proto type of the response of the method in Service. - * @param callback callback to invoke with the response for each region - * @param - * the response type for the coprocessor Service method + * @param callback callback to invoke with the response for each region + * @param the response type for the coprocessor Service method */ default void batchCoprocessorService( - Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, - byte[] endKey, R responsePrototype, Batch.Callback callback) - throws ServiceException, Throwable { + Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey, + R responsePrototype, Batch.Callback callback) throws ServiceException, Throwable { throw new NotImplementedException("Add an implementation!"); } /** - * Atomically checks if a row/family/qualifier value matches the expected value. - * If it does, it performs the row mutations. If the passed value is null, the check - * is for the lack of column (ie: non-existence) - * - * The expected value argument of this call is on the left and the current - * value of the cell is on the right side of the comparison operator. - * - * Ie. eg. GREATER operator means expected value > existing <=> perform row mutations. - * - * @param row to check - * @param family column family to check + * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it + * performs the row mutations. If the passed value is null, the check is for the lack of column + * (ie: non-existence) The expected value argument of this call is on the left and the current + * value of the cell is on the right side of the comparison operator. Ie. eg. GREATER operator + * means expected value > existing <=> perform row mutations. + * @param row to check + * @param family column family to check * @param qualifier column qualifier to check * @param compareOp the comparison operator - * @param value the expected value + * @param value the expected value * @param mutation mutations to perform if check succeeds * @throws IOException e * @return true if the new put was executed, false otherwise @@ -904,25 +829,21 @@ public interface Table extends Closeable { */ @Deprecated default boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, - CompareFilter.CompareOp compareOp, byte[] value, RowMutations mutation) throws IOException { + CompareFilter.CompareOp compareOp, byte[] value, RowMutations mutation) throws IOException { throw new NotImplementedException("Add an implementation!"); } /** - * Atomically checks if a row/family/qualifier value matches the expected value. - * If it does, it performs the row mutations. If the passed value is null, the check - * is for the lack of column (ie: non-existence) - * - * The expected value argument of this call is on the left and the current - * value of the cell is on the right side of the comparison operator. - * - * Ie. eg. GREATER operator means expected value > existing <=> perform row mutations. - * - * @param row to check - * @param family column family to check + * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it + * performs the row mutations. If the passed value is null, the check is for the lack of column + * (ie: non-existence) The expected value argument of this call is on the left and the current + * value of the cell is on the right side of the comparison operator. Ie. eg. GREATER operator + * means expected value > existing <=> perform row mutations. + * @param row to check + * @param family column family to check * @param qualifier column qualifier to check - * @param op the comparison operator - * @param value the expected value + * @param op the comparison operator + * @param value the expected value * @param mutation mutations to perform if check succeeds * @throws IOException e * @return true if the new put was executed, false otherwise @@ -930,7 +851,7 @@ public interface Table extends Closeable { */ @Deprecated default boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, - byte[] value, RowMutations mutation) throws IOException { + byte[] value, RowMutations mutation) throws IOException { throw new NotImplementedException("Add an implementation!"); } @@ -948,26 +869,23 @@ public interface Table extends Closeable { /** * Get timeout (millisecond) of each rpc request in this Table instance. - * * @return Currently configured read timeout - * @deprecated use {@link #getReadRpcTimeout(TimeUnit)} or - * {@link #getWriteRpcTimeout(TimeUnit)} instead + * @deprecated use {@link #getReadRpcTimeout(TimeUnit)} or {@link #getWriteRpcTimeout(TimeUnit)} + * instead */ @Deprecated default int getRpcTimeout() { - return (int)getRpcTimeout(TimeUnit.MILLISECONDS); + return (int) getRpcTimeout(TimeUnit.MILLISECONDS); } /** * Set timeout (millisecond) of each rpc request in operations of this Table instance, will - * override the value of hbase.rpc.timeout in configuration. - * If a rpc request waiting too long, it will stop waiting and send a new request to retry until - * retries exhausted or operation timeout reached. + * override the value of hbase.rpc.timeout in configuration. If a rpc request waiting too long, it + * will stop waiting and send a new request to retry until retries exhausted or operation timeout + * reached. *

    * NOTE: This will set both the read and write timeout settings to the provided value. - * * @param rpcTimeout the timeout of each rpc request in millisecond. - * * @deprecated Use setReadRpcTimeout or setWriteRpcTimeout instead */ @Deprecated @@ -987,20 +905,19 @@ public interface Table extends Closeable { /** * Get timeout (millisecond) of each rpc read request in this Table instance. - * @deprecated since 2.0 and will be removed in 3.0 version - * use {@link #getReadRpcTimeout(TimeUnit)} instead + * @deprecated since 2.0 and will be removed in 3.0 version use + * {@link #getReadRpcTimeout(TimeUnit)} instead */ @Deprecated default int getReadRpcTimeout() { - return (int)getReadRpcTimeout(TimeUnit.MILLISECONDS); + return (int) getReadRpcTimeout(TimeUnit.MILLISECONDS); } /** * Set timeout (millisecond) of each rpc read request in operations of this Table instance, will - * override the value of hbase.rpc.read.timeout in configuration. - * If a rpc read request waiting too long, it will stop waiting and send a new request to retry - * until retries exhausted or operation timeout reached. - * + * override the value of hbase.rpc.read.timeout in configuration. If a rpc read request waiting + * too long, it will stop waiting and send a new request to retry until retries exhausted or + * operation timeout reached. * @param readRpcTimeout the timeout for read rpc request in milliseconds * @deprecated since 2.0.0, use {@link TableBuilder#setReadRpcTimeout} instead */ @@ -1020,20 +937,19 @@ public interface Table extends Closeable { /** * Get timeout (millisecond) of each rpc write request in this Table instance. - * @deprecated since 2.0 and will be removed in 3.0 version - * use {@link #getWriteRpcTimeout(TimeUnit)} instead + * @deprecated since 2.0 and will be removed in 3.0 version use + * {@link #getWriteRpcTimeout(TimeUnit)} instead */ @Deprecated default int getWriteRpcTimeout() { - return (int)getWriteRpcTimeout(TimeUnit.MILLISECONDS); + return (int) getWriteRpcTimeout(TimeUnit.MILLISECONDS); } /** * Set timeout (millisecond) of each rpc write request in operations of this Table instance, will - * override the value of hbase.rpc.write.timeout in configuration. - * If a rpc write request waiting too long, it will stop waiting and send a new request to retry - * until retries exhausted or operation timeout reached. - * + * override the value of hbase.rpc.write.timeout in configuration. If a rpc write request waiting + * too long, it will stop waiting and send a new request to retry until retries exhausted or + * operation timeout reached. * @param writeRpcTimeout the timeout for write rpc request in milliseconds * @deprecated since 2.0.0, use {@link TableBuilder#setWriteRpcTimeout} instead */ @@ -1053,22 +969,21 @@ public interface Table extends Closeable { /** * Get timeout (millisecond) of each operation for in Table instance. - * @deprecated since 2.0 and will be removed in 3.0 version - * use {@link #getOperationTimeout(TimeUnit)} instead + * @deprecated since 2.0 and will be removed in 3.0 version use + * {@link #getOperationTimeout(TimeUnit)} instead */ @Deprecated default int getOperationTimeout() { - return (int)getOperationTimeout(TimeUnit.MILLISECONDS); + return (int) getOperationTimeout(TimeUnit.MILLISECONDS); } /** - * Set timeout (millisecond) of each operation in this Table instance, will override the value - * of hbase.client.operation.timeout in configuration. - * Operation timeout is a top-level restriction that makes sure a blocking method will not be - * blocked more than this. In each operation, if rpc request fails because of timeout or - * other reason, it will retry until success or throw a RetriesExhaustedException. But if the - * total time being blocking reach the operation timeout before retries exhausted, it will break - * early and throw SocketTimeoutException. + * Set timeout (millisecond) of each operation in this Table instance, will override the value of + * hbase.client.operation.timeout in configuration. Operation timeout is a top-level restriction + * that makes sure a blocking method will not be blocked more than this. In each operation, if rpc + * request fails because of timeout or other reason, it will retry until success or throw a + * RetriesExhaustedException. But if the total time being blocking reach the operation timeout + * before retries exhausted, it will break early and throw SocketTimeoutException. * @param operationTimeout the total timeout of each operation in millisecond. * @deprecated since 2.0.0, use {@link TableBuilder#setOperationTimeout} instead */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilder.java index d71a07e80eb..75e16e89a5d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,8 +23,7 @@ import org.apache.yetus.audience.InterfaceAudience; * For creating {@link Table} instance. *

    * The implementation should have default configurations set before returning the builder to user. - * So users are free to only set the configurations they care about to create a new - * Table instance. + * So users are free to only set the configurations they care about to create a new Table instance. */ @InterfaceAudience.Public public interface TableBuilder { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilderBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilderBase.java index fa543c06244..796fd6496f7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilderBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilderBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,8 +41,9 @@ abstract class TableBuilderBase implements TableBuilder { throw new IllegalArgumentException("Given table name is null"); } this.tableName = tableName; - this.operationTimeout = tableName.isSystemTable() ? connConf.getMetaOperationTimeout() - : connConf.getOperationTimeout(); + this.operationTimeout = tableName.isSystemTable() + ? connConf.getMetaOperationTimeout() + : connConf.getOperationTimeout(); this.rpcTimeout = connConf.getRpcTimeout(); this.readRpcTimeout = connConf.getReadRpcTimeout(); this.writeRpcTimeout = connConf.getWriteRpcTimeout(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java index 789c044a3ee..85f412e2df2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,10 +32,10 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** - * TableDescriptor contains the details about an HBase table such as the descriptors of - * all the column families, is the table a catalog table, hbase:meta , - * if the table is read only, the maximum size of the memstore, - * when the region split should occur, coprocessors associated with it etc... + * TableDescriptor contains the details about an HBase table such as the descriptors of all the + * column families, is the table a catalog table, hbase:meta , if the table is read + * only, the maximum size of the memstore, when the region split should occur, coprocessors + * associated with it etc... */ @InterfaceAudience.Public public interface TableDescriptor { @@ -46,10 +45,10 @@ public interface TableDescriptor { @InterfaceAudience.Private Comparator COMPARATOR_IGNORE_REPLICATION = - getComparator(ColumnFamilyDescriptor.COMPARATOR_IGNORE_REPLICATION); + getComparator(ColumnFamilyDescriptor.COMPARATOR_IGNORE_REPLICATION); static Comparator - getComparator(Comparator cfComparator) { + getComparator(Comparator cfComparator) { return (TableDescriptor lhs, TableDescriptor rhs) -> { int result = lhs.getTableName().compareTo(rhs.getTableName()); if (result != 0) { @@ -62,8 +61,8 @@ public interface TableDescriptor { return result; } - for (Iterator it = lhsFamilies.iterator(), it2 = - rhsFamilies.iterator(); it.hasNext();) { + for (Iterator it = lhsFamilies.iterator(), + it2 = rhsFamilies.iterator(); it.hasNext();) { result = cfComparator.compare(it.next(), it2.next()); if (result != 0) { return result; @@ -76,91 +75,75 @@ public interface TableDescriptor { /** * Returns the count of the column families of the table. - * * @return Count of column families of the table */ int getColumnFamilyCount(); /** * Return the list of attached co-processor represented - * * @return The list of CoprocessorDescriptor */ Collection getCoprocessorDescriptors(); /** - * Return the list of attached co-processor represented by their name - * className + * Return the list of attached co-processor represented by their name className * @return The list of co-processors classNames - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #getCoprocessorDescriptors()} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #getCoprocessorDescriptors()} instead */ @Deprecated default Collection getCoprocessors() { - return getCoprocessorDescriptors().stream() - .map(CoprocessorDescriptor::getClassName) + return getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) .collect(Collectors.toList()); } /** * Returns the durability setting for the table. - * * @return durability setting for the table. */ Durability getDurability(); /** - * Returns an unmodifiable collection of all the {@link ColumnFamilyDescriptor} of - * all the column families of the table. - * - * @return An array of {@link ColumnFamilyDescriptor} of all the column - * families. + * Returns an unmodifiable collection of all the {@link ColumnFamilyDescriptor} of all the column + * families of the table. + * @return An array of {@link ColumnFamilyDescriptor} of all the column families. */ ColumnFamilyDescriptor[] getColumnFamilies(); /** - * Returns all the column family names of the current table. The map of - * TableDescriptor contains mapping of family name to ColumnDescriptor. - * This returns all the keys of the family map which represents the column - * family names of the table. - * + * Returns all the column family names of the current table. The map of TableDescriptor contains + * mapping of family name to ColumnDescriptor. This returns all the keys of the family map which + * represents the column family names of the table. * @return Immutable sorted set of the keys of the families. */ Set getColumnFamilyNames(); /** - * Returns the ColumnDescriptor for a specific column family with name as - * specified by the parameter column. - * + * Returns the ColumnDescriptor for a specific column family with name as specified by the + * parameter column. * @param name Column family name - * @return Column descriptor for the passed family name or the family on - * passed in column. + * @return Column descriptor for the passed family name or the family on passed in column. */ ColumnFamilyDescriptor getColumnFamily(final byte[] name); /** - * This gets the class associated with the flush policy which determines the - * stores need to be flushed when flushing a region. The class used by default - * is defined in org.apache.hadoop.hbase.regionserver.FlushPolicy. - * - * @return the class name of the flush policy for this table. If this returns - * null, the default flush policy is used. + * This gets the class associated with the flush policy which determines the stores need to be + * flushed when flushing a region. The class used by default is defined in + * org.apache.hadoop.hbase.regionserver.FlushPolicy. + * @return the class name of the flush policy for this table. If this returns null, the default + * flush policy is used. */ String getFlushPolicyClassName(); /** - * Returns the maximum size upto which a region can grow to after which a - * region split is triggered. The region size is represented by the size of - * the biggest store file in that region. - * + * Returns the maximum size upto which a region can grow to after which a region split is + * triggered. The region size is represented by the size of the biggest store file in that region. * @return max hregion size for table, -1 if not set. */ long getMaxFileSize(); /** - * Returns the size of the memstore after which a flush to filesystem is - * triggered. - * + * Returns the size of the memstore after which a flush to filesystem is triggered. * @return memory cache flush size for each hregion, -1 if not set. */ long getMemStoreFlushSize(); @@ -176,19 +159,16 @@ public interface TableDescriptor { int getRegionReplication(); /** - * This gets the class associated with the region split policy which - * determines when a region split should occur. The class used by default is - * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy - * - * @return the class name of the region split policy for this table. If this - * returns null, the default split policy is used. + * This gets the class associated with the region split policy which determines when a region + * split should occur. The class used by default is defined in + * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy + * @return the class name of the region split policy for this table. If this returns null, the + * default split policy is used. */ String getRegionSplitPolicyClassName(); /** - * Get the name of the table - * - * @return TableName + * Get the name of the table n */ TableName getTableName(); @@ -201,7 +181,6 @@ public interface TableDescriptor { /** * Getter for accessing the metadata associated with the key. - * * @param key The key. * @return A clone value. Null if no mapping for the key */ @@ -209,7 +188,6 @@ public interface TableDescriptor { /** * Getter for accessing the metadata associated with the key. - * * @param key The key. * @return A clone value. Null if no mapping for the key */ @@ -217,7 +195,6 @@ public interface TableDescriptor { /** * Getter for accessing the metadata associated with the key. - * * @param key The key. * @return Null if no mapping for the key */ @@ -229,9 +206,7 @@ public interface TableDescriptor { Map getValues(); /** - * Check if the table has an attached co-processor represented by the name - * className - * + * Check if the table has an attached co-processor represented by the name className * @param classNameToMatch - Class name of the co-processor * @return true of the table has a co-processor className */ @@ -239,7 +214,6 @@ public interface TableDescriptor { /** * Checks to see if this table contains the given column family - * * @param name Family name or column name. * @return true if the table contains the specified family name */ @@ -251,71 +225,62 @@ public interface TableDescriptor { boolean hasRegionMemStoreReplication(); /** - * Check if the compaction enable flag of the table is true. If flag is false - * then no minor/major compactions will be done in real. - * + * Check if the compaction enable flag of the table is true. If flag is false then no minor/major + * compactions will be done in real. * @return true if table compaction enabled */ boolean isCompactionEnabled(); /** - * Check if the split enable flag of the table is true. If flag is false - * then no region split will be done. - * + * Check if the split enable flag of the table is true. If flag is false then no region split will + * be done. * @return true if table region split enabled */ boolean isSplitEnabled(); /** - * Check if the merge enable flag of the table is true. If flag is false - * then no region merge will be done. - * + * Check if the merge enable flag of the table is true. If flag is false then no region merge will + * be done. * @return true if table region merge enabled */ boolean isMergeEnabled(); /** * Checks if this table is hbase:meta region. - * * @return true if this table is hbase:meta region */ boolean isMetaRegion(); /** * Checks if the table is a hbase:meta table - * * @return true if table is hbase:meta region. */ boolean isMetaTable(); /** - * Check if normalization enable flag of the table is true. If flag is false - * then region normalizer won't attempt to normalize this table. - * + * Check if normalization enable flag of the table is true. If flag is false then region + * normalizer won't attempt to normalize this table. * @return true if region normalization is enabled for this table */ boolean isNormalizationEnabled(); /** - * Check if there is the target region count. If so, the normalize plan will - * be calculated based on the target region count. - * + * Check if there is the target region count. If so, the normalize plan will be calculated based + * on the target region count. * @return target region count after normalize done */ int getNormalizerTargetRegionCount(); /** - * Check if there is the target region size. If so, the normalize plan will - * be calculated based on the target region size. - * + * Check if there is the target region size. If so, the normalize plan will be calculated based on + * the target region size. * @return target region size after normalize done */ long getNormalizerTargetRegionSize(); /** - * Check if the readOnly flag of the table is set. If the readOnly flag is set - * then the contents of the table can only be read from but not modified. - * + * Check if the readOnly flag of the table is set. If the readOnly flag is set then the contents + * of the table can only be read from but not modified. * @return true if all columns in the table should be read only */ boolean isReadOnly(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index 813232c4e82..ec91f100add 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,145 +62,131 @@ public class TableDescriptorBuilder { public static final String SPLIT_POLICY = "SPLIT_POLICY"; private static final Bytes SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY)); /** - * Used by HBase Shell interface to access this metadata - * attribute which denotes the maximum size of the store file after which a - * region split occurs. + * Used by HBase Shell interface to access this metadata attribute which denotes the maximum size + * of the store file after which a region split occurs. */ @InterfaceAudience.Private public static final String MAX_FILESIZE = "MAX_FILESIZE"; - private static final Bytes MAX_FILESIZE_KEY - = new Bytes(Bytes.toBytes(MAX_FILESIZE)); + private static final Bytes MAX_FILESIZE_KEY = new Bytes(Bytes.toBytes(MAX_FILESIZE)); @InterfaceAudience.Private public static final String OWNER = "OWNER"; @InterfaceAudience.Private - public static final Bytes OWNER_KEY - = new Bytes(Bytes.toBytes(OWNER)); + public static final Bytes OWNER_KEY = new Bytes(Bytes.toBytes(OWNER)); /** - * Used by rest interface to access this metadata attribute - * which denotes if the table is Read Only. + * Used by rest interface to access this metadata attribute which denotes if the table is Read + * Only. */ @InterfaceAudience.Private public static final String READONLY = "READONLY"; - private static final Bytes READONLY_KEY - = new Bytes(Bytes.toBytes(READONLY)); + private static final Bytes READONLY_KEY = new Bytes(Bytes.toBytes(READONLY)); /** - * Used by HBase Shell interface to access this metadata - * attribute which denotes if the table is compaction enabled. + * Used by HBase Shell interface to access this metadata attribute which denotes if the table is + * compaction enabled. */ @InterfaceAudience.Private public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED"; - private static final Bytes COMPACTION_ENABLED_KEY - = new Bytes(Bytes.toBytes(COMPACTION_ENABLED)); + private static final Bytes COMPACTION_ENABLED_KEY = new Bytes(Bytes.toBytes(COMPACTION_ENABLED)); /** - * Used by HBase Shell interface to access this metadata - * attribute which denotes if the table is split enabled. + * Used by HBase Shell interface to access this metadata attribute which denotes if the table is + * split enabled. */ @InterfaceAudience.Private public static final String SPLIT_ENABLED = "SPLIT_ENABLED"; private static final Bytes SPLIT_ENABLED_KEY = new Bytes(Bytes.toBytes(SPLIT_ENABLED)); /** - * Used by HBase Shell interface to access this metadata - * attribute which denotes if the table is merge enabled. + * Used by HBase Shell interface to access this metadata attribute which denotes if the table is + * merge enabled. */ @InterfaceAudience.Private public static final String MERGE_ENABLED = "MERGE_ENABLED"; private static final Bytes MERGE_ENABLED_KEY = new Bytes(Bytes.toBytes(MERGE_ENABLED)); /** - * Used by HBase Shell interface to access this metadata - * attribute which represents the maximum size of the memstore after which its - * contents are flushed onto the disk. + * Used by HBase Shell interface to access this metadata attribute which represents the maximum + * size of the memstore after which its contents are flushed onto the disk. */ @InterfaceAudience.Private public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE"; - private static final Bytes MEMSTORE_FLUSHSIZE_KEY - = new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE)); + private static final Bytes MEMSTORE_FLUSHSIZE_KEY = new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE)); @InterfaceAudience.Private public static final String FLUSH_POLICY = "FLUSH_POLICY"; private static final Bytes FLUSH_POLICY_KEY = new Bytes(Bytes.toBytes(FLUSH_POLICY)); /** - * Used by rest interface to access this metadata attribute - * which denotes if it is a catalog table, either hbase:meta . + * Used by rest interface to access this metadata attribute which denotes if it is a catalog + * table, either hbase:meta . */ @InterfaceAudience.Private public static final String IS_META = "IS_META"; - private static final Bytes IS_META_KEY - = new Bytes(Bytes.toBytes(IS_META)); + private static final Bytes IS_META_KEY = new Bytes(Bytes.toBytes(IS_META)); /** * {@link Durability} setting for the table. */ @InterfaceAudience.Private public static final String DURABILITY = "DURABILITY"; - private static final Bytes DURABILITY_KEY - = new Bytes(Bytes.toBytes("DURABILITY")); + private static final Bytes DURABILITY_KEY = new Bytes(Bytes.toBytes("DURABILITY")); /** * The number of region replicas for the table. */ @InterfaceAudience.Private public static final String REGION_REPLICATION = "REGION_REPLICATION"; - private static final Bytes REGION_REPLICATION_KEY - = new Bytes(Bytes.toBytes(REGION_REPLICATION)); + private static final Bytes REGION_REPLICATION_KEY = new Bytes(Bytes.toBytes(REGION_REPLICATION)); /** - * The flag to indicate whether or not the memstore should be - * replicated for read-replicas (CONSISTENCY => TIMELINE). + * The flag to indicate whether or not the memstore should be replicated for read-replicas + * (CONSISTENCY => TIMELINE). */ @InterfaceAudience.Private public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION"; - private static final Bytes REGION_MEMSTORE_REPLICATION_KEY - = new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION)); + private static final Bytes REGION_MEMSTORE_REPLICATION_KEY = + new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION)); - private static final Bytes REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY - = new Bytes(Bytes.toBytes(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY)); + private static final Bytes REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY = + new Bytes(Bytes.toBytes(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY)); /** - * Used by shell/rest interface to access this metadata - * attribute which denotes if the table should be treated by region - * normalizer. + * Used by shell/rest interface to access this metadata attribute which denotes if the table + * should be treated by region normalizer. */ @InterfaceAudience.Private public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED"; - private static final Bytes NORMALIZATION_ENABLED_KEY - = new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED)); + private static final Bytes NORMALIZATION_ENABLED_KEY = + new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED)); @InterfaceAudience.Private - public static final String NORMALIZER_TARGET_REGION_COUNT = - "NORMALIZER_TARGET_REGION_COUNT"; + public static final String NORMALIZER_TARGET_REGION_COUNT = "NORMALIZER_TARGET_REGION_COUNT"; private static final Bytes NORMALIZER_TARGET_REGION_COUNT_KEY = - new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_COUNT)); + new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_COUNT)); @InterfaceAudience.Private public static final String NORMALIZER_TARGET_REGION_SIZE_MB = "NORMALIZER_TARGET_REGION_SIZE_MB"; private static final Bytes NORMALIZER_TARGET_REGION_SIZE_MB_KEY = - new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE_MB)); + new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE_MB)); // TODO: Keeping backward compatability with HBASE-25651 change. Can be removed in later version @InterfaceAudience.Private @Deprecated public static final String NORMALIZER_TARGET_REGION_SIZE = "NORMALIZER_TARGET_REGION_SIZE"; @Deprecated private static final Bytes NORMALIZER_TARGET_REGION_SIZE_KEY = - new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE)); + new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE)); /** - * Default durability for HTD is USE_DEFAULT, which defaults to HBase-global - * default value + * Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */ private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT; @InterfaceAudience.Private public static final String PRIORITY = "PRIORITY"; - private static final Bytes PRIORITY_KEY - = new Bytes(Bytes.toBytes(PRIORITY)); + private static final Bytes PRIORITY_KEY = new Bytes(Bytes.toBytes(PRIORITY)); private static final Bytes RSGROUP_KEY = - new Bytes(Bytes.toBytes(RSGroupInfo.TABLE_DESC_PROP_GROUP)); + new Bytes(Bytes.toBytes(RSGroupInfo.TABLE_DESC_PROP_GROUP)); /** * Relative priority of the table used for rpc scheduling @@ -229,8 +214,8 @@ public class TableDescriptorBuilder { public static final boolean DEFAULT_MERGE_ENABLED = true; /** - * Constant that denotes the maximum default size of the memstore in bytes after which - * the contents are flushed to the store files. + * Constant that denotes the maximum default size of the memstore in bytes after which the + * contents are flushed to the store files. */ public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024 * 1024 * 128L; @@ -242,16 +227,14 @@ public class TableDescriptorBuilder { private final static Set RESERVED_KEYWORDS = new HashSet<>(); static { - DEFAULT_VALUES.put(MAX_FILESIZE, - String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE)); + DEFAULT_VALUES.put(MAX_FILESIZE, String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE)); DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY)); - DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE, - String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE)); - DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name + DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE, String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE)); + DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); // use the enum name DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION)); DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY)); - DEFAULT_VALUES.keySet().stream() - .map(s -> new Bytes(Bytes.toBytes(s))).forEach(RESERVED_KEYWORDS::add); + DEFAULT_VALUES.keySet().stream().map(s -> new Bytes(Bytes.toBytes(s))) + .forEach(RESERVED_KEYWORDS::add); RESERVED_KEYWORDS.add(IS_META_KEY); } @@ -285,9 +268,8 @@ public class TableDescriptorBuilder { private static final String CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+"; private static final String CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+"; - private static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile( - "(" + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" + - CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?"); + private static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile("(" + + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" + CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?"); private static final Pattern CP_HTD_ATTR_KEY_PATTERN = Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE); @@ -299,15 +281,12 @@ public class TableDescriptorBuilder { // rethink about adding back the setCacheDataInL1 for NS table. // Note: namespace schema is hard-coded. In hbase3, namespace goes away; it is integrated into // hbase:meta. - public static final TableDescriptor NAMESPACE_TABLEDESC - = TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME) + public static final TableDescriptor NAMESPACE_TABLEDESC = + TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(NAMESPACE_FAMILY_INFO_BYTES) - // Ten is arbitrary number. Keep versions to help debugging. - .setMaxVersions(10) - .setInMemory(true) - .setBlocksize(8 * 1024) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - .build()) + // Ten is arbitrary number. Keep versions to help debugging. + .setMaxVersions(10).setInMemory(true).setBlocksize(8 * 1024) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL).build()) .build(); private final ModifyableTableDescriptor desc; @@ -363,8 +342,8 @@ public class TableDescriptorBuilder { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #setCoprocessor(String)} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #setCoprocessor(String)} instead */ @Deprecated public TableDescriptorBuilder addCoprocessor(String className) throws IOException { @@ -372,24 +351,21 @@ public class TableDescriptorBuilder { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #setCoprocessor(CoprocessorDescriptor)} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #setCoprocessor(CoprocessorDescriptor)} instead */ @Deprecated - public TableDescriptorBuilder addCoprocessor(String className, Path jarFilePath, - int priority, final Map kvs) throws IOException { - desc.setCoprocessor( - CoprocessorDescriptorBuilder.newBuilder(className) - .setJarPath(jarFilePath == null ? null : jarFilePath.toString()) - .setPriority(priority) - .setProperties(kvs == null ? Collections.emptyMap() : kvs) - .build()); + public TableDescriptorBuilder addCoprocessor(String className, Path jarFilePath, int priority, + final Map kvs) throws IOException { + desc.setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className) + .setJarPath(jarFilePath == null ? null : jarFilePath.toString()).setPriority(priority) + .setProperties(kvs == null ? Collections.emptyMap() : kvs).build()); return this; } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #setCoprocessor(CoprocessorDescriptor)} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #setCoprocessor(CoprocessorDescriptor)} instead */ @Deprecated public TableDescriptorBuilder addCoprocessorWithSpec(final String specStr) throws IOException { @@ -398,8 +374,8 @@ public class TableDescriptorBuilder { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #setColumnFamily(ColumnFamilyDescriptor)} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #setColumnFamily(ColumnFamilyDescriptor)} instead */ @Deprecated public TableDescriptorBuilder addColumnFamily(final ColumnFamilyDescriptor family) { @@ -429,8 +405,8 @@ public class TableDescriptorBuilder { return this; } - public TableDescriptorBuilder setColumnFamilies( - final Collection families) { + public TableDescriptorBuilder + setColumnFamilies(final Collection families) { families.forEach(desc::setColumnFamily); return this; } @@ -599,18 +575,16 @@ public class TableDescriptorBuilder { public TableDescriptorBuilder setReplicationScope(int scope) { Map newFamilies = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); newFamilies.putAll(desc.families); - newFamilies - .forEach((cf, cfDesc) -> { - desc.removeColumnFamily(cf); - desc.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope) - .build()); - }); + newFamilies.forEach((cf, cfDesc) -> { + desc.removeColumnFamily(cf); + desc + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope).build()); + }); return this; } /** * Set the RSGroup for this table, specified RSGroup must exist before create or modify table. - * * @param group rsgroup name * @return a TableDescriptorBuilder */ @@ -628,28 +602,25 @@ public class TableDescriptorBuilder { */ @InterfaceAudience.Private public static class ModifyableTableDescriptor - implements TableDescriptor, Comparable { + implements TableDescriptor, Comparable { private final TableName name; /** - * A map which holds the metadata information of the table. This metadata - * includes values like IS_META, SPLIT_POLICY, MAX_FILE_SIZE, - * READONLY, MEMSTORE_FLUSHSIZE etc... + * A map which holds the metadata information of the table. This metadata includes values like + * IS_META, SPLIT_POLICY, MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc... */ private final Map values = new HashMap<>(); /** * Maps column family name to the respective FamilyDescriptors */ - private final Map families - = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); + private final Map families = + new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); /** * Construct a table descriptor specifying a TableName object - * - * @param name Table name. - * TODO: make this private after removing the HTableDescriptor + * @param name Table name. TODO: make this private after removing the HTableDescriptor */ @InterfaceAudience.Private public ModifyableTableDescriptor(final TableName name) { @@ -661,13 +632,11 @@ public class TableDescriptorBuilder { } /** - * Construct a table descriptor by cloning the descriptor passed as a - * parameter. + * Construct a table descriptor by cloning the descriptor passed as a parameter. *

    * Makes a deep copy of the supplied descriptor. * @param name The new name - * @param desc The descriptor. - * TODO: make this private after removing the HTableDescriptor + * @param desc The descriptor. TODO: make this private after removing the HTableDescriptor */ @InterfaceAudience.Private @Deprecated // only used by HTableDescriptor. remove this method if HTD is removed @@ -675,8 +644,8 @@ public class TableDescriptorBuilder { this(name, Arrays.asList(desc.getColumnFamilies()), desc.getValues()); } - private ModifyableTableDescriptor(final TableName name, final Collection families, - Map values) { + private ModifyableTableDescriptor(final TableName name, + final Collection families, Map values) { this.name = name; families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c))); this.values.putAll(values); @@ -686,7 +655,6 @@ public class TableDescriptorBuilder { /** * Checks if this table is hbase:meta region. - * * @return true if this table is hbase:meta region */ @Override @@ -696,7 +664,6 @@ public class TableDescriptorBuilder { /** * Checks if the table is a hbase:meta table - * * @return true if table is hbase:meta region. */ @Override @@ -733,7 +700,6 @@ public class TableDescriptorBuilder { /** * Getter for fetching an unmodifiable {@link #values} map. - * * @return unmodifiable map {@link #values}. * @see #values */ @@ -745,35 +711,30 @@ public class TableDescriptorBuilder { /** * Setter for storing metadata as a (key, value) pair in {@link #values} map - * - * @param key The key. + * @param key The key. * @param value The value. If null, removes the setting. * @return the modifyable TD * @see #values */ public ModifyableTableDescriptor setValue(byte[] key, byte[] value) { - return setValue(toBytesOrNull(key, v -> v), - toBytesOrNull(value, v -> v)); + return setValue(toBytesOrNull(key, v -> v), toBytesOrNull(value, v -> v)); } public ModifyableTableDescriptor setValue(String key, String value) { - return setValue(toBytesOrNull(key, Bytes::toBytes), - toBytesOrNull(value, Bytes::toBytes)); + return setValue(toBytesOrNull(key, Bytes::toBytes), toBytesOrNull(value, Bytes::toBytes)); } /** - * @param key The key. + * @param key The key. * @param value The value. If null, removes the setting. */ - private ModifyableTableDescriptor setValue(final Bytes key, - final String value) { + private ModifyableTableDescriptor setValue(final Bytes key, final String value) { return setValue(key, toBytesOrNull(value, Bytes::toBytes)); } /** * Setter for storing metadata as a (key, value) pair in {@link #values} map - * - * @param key The key. + * @param key The key. * @param value The value. If null, removes the setting. */ public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) { @@ -795,9 +756,7 @@ public class TableDescriptorBuilder { /** * Remove metadata represented by the key from the {@link #values} map - * - * @param key Key whose key and value we're to remove from TableDescriptor - * parameters. + * @param key Key whose key and value we're to remove from TableDescriptor parameters. * @return the modifyable TD */ public ModifyableTableDescriptor removeValue(final String key) { @@ -806,9 +765,7 @@ public class TableDescriptorBuilder { /** * Remove metadata represented by the key from the {@link #values} map - * - * @param key Key whose key and value we're to remove from TableDescriptor - * parameters. + * @param key Key whose key and value we're to remove from TableDescriptor parameters. * @return the modifyable TD */ public ModifyableTableDescriptor removeValue(Bytes key) { @@ -817,7 +774,6 @@ public class TableDescriptorBuilder { /** * Remove metadata represented by the key from the {@link #values} map - * * @param key Key whose key and value we're to remove from TableDescriptor parameters * @return the modifiable TD */ @@ -826,10 +782,8 @@ public class TableDescriptorBuilder { } /** - * Check if the readOnly flag of the table is set. If the readOnly flag is - * set then the contents of the table can only be read from but not - * modified. - * + * Check if the readOnly flag of the table is set. If the readOnly flag is set then the contents + * of the table can only be read from but not modified. * @return true if all columns in the table should be read only */ @Override @@ -838,13 +792,10 @@ public class TableDescriptorBuilder { } /** - * Setting the table as read only sets all the columns in the table as read - * only. By default all tables are modifiable, but if the readOnly flag is - * set to true then the contents of the table can only be read but not - * modified. - * - * @param readOnly True if all of the columns in the table should be read - * only. + * Setting the table as read only sets all the columns in the table as read only. By default all + * tables are modifiable, but if the readOnly flag is set to true then the contents of the table + * can only be read but not modified. + * @param readOnly True if all of the columns in the table should be read only. * @return the modifyable TD */ public ModifyableTableDescriptor setReadOnly(final boolean readOnly) { @@ -852,9 +803,8 @@ public class TableDescriptorBuilder { } /** - * Check if the compaction enable flag of the table is true. If flag is - * false then no minor/major compactions will be done in real. - * + * Check if the compaction enable flag of the table is true. If flag is false then no + * minor/major compactions will be done in real. * @return true if table compaction enabled */ @Override @@ -864,7 +814,6 @@ public class TableDescriptorBuilder { /** * Setting the table compaction enable flag. - * * @param isEnable True if enable compaction. * @return the modifyable TD */ @@ -875,7 +824,6 @@ public class TableDescriptorBuilder { /** * Check if the split enable flag of the table is true. If flag is false then no split will be * done. - * * @return true if table region split enabled */ @Override @@ -886,7 +834,6 @@ public class TableDescriptorBuilder { /** * Setting the table region split enable flag. * @param isEnable True if enable region split. - * * @return the modifyable TD */ public ModifyableTableDescriptor setSplitEnabled(final boolean isEnable) { @@ -896,7 +843,6 @@ public class TableDescriptorBuilder { /** * Check if the region merge enable flag of the table is true. If flag is false then no merge * will be done. - * * @return true if table region merge enabled */ @Override @@ -907,7 +853,6 @@ public class TableDescriptorBuilder { /** * Setting the table region merge enable flag. * @param isEnable True if enable region merge. - * * @return the modifyable TD */ public ModifyableTableDescriptor setMergeEnabled(final boolean isEnable) { @@ -915,8 +860,8 @@ public class TableDescriptorBuilder { } /** - * Check if normalization enable flag of the table is true. If flag is false - * then no region normalizer won't attempt to normalize this table. + * Check if normalization enable flag of the table is true. If flag is false then no region + * normalizer won't attempt to normalize this table. * @return true if region normalization is enabled for this table **/ @Override @@ -944,13 +889,13 @@ public class TableDescriptorBuilder { public long getNormalizerTargetRegionSize() { long target_region_size = getOrDefault(NORMALIZER_TARGET_REGION_SIZE_MB_KEY, Long::valueOf, Long.valueOf(-1)); - return target_region_size == Long.valueOf(-1) ? getOrDefault( - NORMALIZER_TARGET_REGION_SIZE_KEY, Long::valueOf, Long.valueOf(-1)) : target_region_size; + return target_region_size == Long.valueOf(-1) + ? getOrDefault(NORMALIZER_TARGET_REGION_SIZE_KEY, Long::valueOf, Long.valueOf(-1)) + : target_region_size; } /** * Setting the table normalization enable flag. - * * @param isEnable True if enable normalization. * @return the modifyable TD */ @@ -977,9 +922,7 @@ public class TableDescriptorBuilder { } /** - * Sets the {@link Durability} setting for the table. This defaults to - * Durability.USE_DEFAULT. - * + * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT. * @param durability enum value * @return the modifyable TD */ @@ -989,7 +932,6 @@ public class TableDescriptorBuilder { /** * Returns the durability setting for the table. - * * @return durability setting for the table. */ @Override @@ -998,9 +940,7 @@ public class TableDescriptorBuilder { } /** - * Get the name of the table - * - * @return TableName + * Get the name of the table n */ @Override public TableName getTableName() { @@ -1008,10 +948,9 @@ public class TableDescriptorBuilder { } /** - * This sets the class associated with the region split policy which - * determines when a region split should occur. The class used by default is - * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy - * + * This sets the class associated with the region split policy which determines when a region + * split should occur. The class used by default is defined in + * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy * @param clazz the class name * @return the modifyable TD */ @@ -1020,12 +959,11 @@ public class TableDescriptorBuilder { } /** - * This gets the class associated with the region split policy which - * determines when a region split should occur. The class used by default is - * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy - * - * @return the class name of the region split policy for this table. If this - * returns null, the default split policy is used. + * This gets the class associated with the region split policy which determines when a region + * split should occur. The class used by default is defined in + * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy + * @return the class name of the region split policy for this table. If this returns null, the + * default split policy is used. */ @Override public String getRegionSplitPolicyClassName() { @@ -1033,12 +971,10 @@ public class TableDescriptorBuilder { } /** - * Returns the maximum size upto which a region can grow to after which a - * region split is triggered. The region size is represented by the size of - * the biggest store file in that region. - * + * Returns the maximum size upto which a region can grow to after which a region split is + * triggered. The region size is represented by the size of the biggest store file in that + * region. * @return max hregion size for table, -1 if not set. - * * @see #setMaxFileSize(long) */ @Override @@ -1047,19 +983,17 @@ public class TableDescriptorBuilder { } /** - * Sets the maximum size upto which a region can grow to after which a - * region split is triggered. The region size is represented by the size of - * the biggest store file in that region, i.e. If the biggest store file - * grows beyond the maxFileSize, then the region split is triggered. This - * defaults to a value of 256 MB. + * Sets the maximum size upto which a region can grow to after which a region split is + * triggered. The region size is represented by the size of the biggest store file in that + * region, i.e. If the biggest store file grows beyond the maxFileSize, then the region split is + * triggered. This defaults to a value of 256 MB. *

    - * This is not an absolute value and might vary. Assume that a single row - * exceeds the maxFileSize then the storeFileSize will be greater than - * maxFileSize since a single row cannot be split across multiple regions + * This is not an absolute value and might vary. Assume that a single row exceeds the + * maxFileSize then the storeFileSize will be greater than maxFileSize since a single row cannot + * be split across multiple regions *

    - * - * @param maxFileSize The maximum file size that a store file can grow to - * before a split is triggered. + * @param maxFileSize The maximum file size that a store file can grow to before a split is + * triggered. * @return the modifyable TD */ public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) { @@ -1067,16 +1001,13 @@ public class TableDescriptorBuilder { } public ModifyableTableDescriptor setMaxFileSize(String maxFileSize) throws HBaseException { - return setMaxFileSize(Long.parseLong(PrettyPrinter. - valueOf(maxFileSize, PrettyPrinter.Unit.BYTE))); + return setMaxFileSize( + Long.parseLong(PrettyPrinter.valueOf(maxFileSize, PrettyPrinter.Unit.BYTE))); } /** - * Returns the size of the memstore after which a flush to filesystem is - * triggered. - * + * Returns the size of the memstore after which a flush to filesystem is triggered. * @return memory cache flush size for each hregion, -1 if not set. - * * @see #setMemStoreFlushSize(long) */ @Override @@ -1085,10 +1016,8 @@ public class TableDescriptorBuilder { } /** - * Represents the maximum size of the memstore after which the contents of - * the memstore are flushed to the filesystem. This defaults to a size of 64 - * MB. - * + * Represents the maximum size of the memstore after which the contents of the memstore are + * flushed to the filesystem. This defaults to a size of 64 MB. * @param memstoreFlushSize memory cache flush size for each hregion * @return the modifyable TD */ @@ -1098,16 +1027,14 @@ public class TableDescriptorBuilder { public ModifyableTableDescriptor setMemStoreFlushSize(String memStoreFlushSize) throws HBaseException { - return setMemStoreFlushSize(Long.parseLong(PrettyPrinter.valueOf(memStoreFlushSize, - PrettyPrinter.Unit.BYTE))); + return setMemStoreFlushSize( + Long.parseLong(PrettyPrinter.valueOf(memStoreFlushSize, PrettyPrinter.Unit.BYTE))); } /** - * This sets the class associated with the flush policy which determines - * determines the stores need to be flushed when flushing a region. The - * class used by default is defined in + * This sets the class associated with the flush policy which determines determines the stores + * need to be flushed when flushing a region. The class used by default is defined in * org.apache.hadoop.hbase.regionserver.FlushPolicy. - * * @param clazz the class name * @return the modifyable TD */ @@ -1116,12 +1043,11 @@ public class TableDescriptorBuilder { } /** - * This gets the class associated with the flush policy which determines the - * stores need to be flushed when flushing a region. The class used by - * default is defined in org.apache.hadoop.hbase.regionserver.FlushPolicy. - * - * @return the class name of the flush policy for this table. If this - * returns null, the default flush policy is used. + * This gets the class associated with the flush policy which determines the stores need to be + * flushed when flushing a region. The class used by default is defined in + * org.apache.hadoop.hbase.regionserver.FlushPolicy. + * @return the class name of the flush policy for this table. If this returns null, the default + * flush policy is used. */ @Override public String getFlushPolicyClassName() { @@ -1131,7 +1057,6 @@ public class TableDescriptorBuilder { /** * Adds a column family. For the updating purpose please use * {@link #modifyColumnFamily(ColumnFamilyDescriptor)} instead. - * * @param family to add. * @return the modifyable TD */ @@ -1141,18 +1066,18 @@ public class TableDescriptorBuilder { } int flength = family.getName() == null ? 0 : family.getName().length; if (flength > Byte.MAX_VALUE) { - throw new IllegalArgumentException("The length of family name is bigger than " + Byte.MAX_VALUE); + throw new IllegalArgumentException( + "The length of family name is bigger than " + Byte.MAX_VALUE); } if (hasColumnFamily(family.getName())) { - throw new IllegalArgumentException("Family '" - + family.getNameAsString() + "' already exists so cannot be added"); + throw new IllegalArgumentException( + "Family '" + family.getNameAsString() + "' already exists so cannot be added"); } return putColumnFamily(family); } /** * Modifies the existing column family. - * * @param family to update * @return this (for chained invocation) */ @@ -1161,8 +1086,8 @@ public class TableDescriptorBuilder { throw new IllegalArgumentException("Family name cannot be null or empty"); } if (!hasColumnFamily(family.getName())) { - throw new IllegalArgumentException("Column family '" + family.getNameAsString() - + "' does not exist"); + throw new IllegalArgumentException( + "Column family '" + family.getNameAsString() + "' does not exist"); } return putColumnFamily(family); } @@ -1174,7 +1099,6 @@ public class TableDescriptorBuilder { /** * Checks to see if this table contains the given column family - * * @param familyName Family name or column name. * @return true if the table contains the specified family name */ @@ -1196,8 +1120,8 @@ public class TableDescriptorBuilder { } /** - * @return Name of this table and then a map of all of the column family - * descriptors (with only the non-default column family attributes) + * @return Name of this table and then a map of all of the column family descriptors (with only + * the non-default column family attributes) */ @Override public String toStringCustomizedValues() { @@ -1239,9 +1163,10 @@ public class TableDescriptorBuilder { } } // see if a reserved key is a default value. may not want to print it out - if (printDefaults - || !DEFAULT_VALUES.containsKey(key) - || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { + if ( + printDefaults || !DEFAULT_VALUES.containsKey(key) + || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value) + ) { reservedKeys.add(entry.getKey()); } } @@ -1301,13 +1226,11 @@ public class TableDescriptorBuilder { } /** - * Compare the contents of the descriptor with another one passed as a - * parameter. Checks if the obj passed is an instance of ModifyableTableDescriptor, - * if yes then the contents of the descriptors are compared. - * + * Compare the contents of the descriptor with another one passed as a parameter. Checks if the + * obj passed is an instance of ModifyableTableDescriptor, if yes then the contents of the + * descriptors are compared. * @param obj The object to compare * @return true if the contents of the the two descriptors exactly match - * * @see java.lang.Object#equals(java.lang.Object) */ @Override @@ -1338,13 +1261,11 @@ public class TableDescriptorBuilder { // Comparable /** - * Compares the descriptor with another descriptor which is passed as a - * parameter. This compares the content of the two descriptors and not the - * reference. - * + * Compares the descriptor with another descriptor which is passed as a parameter. This compares + * the content of the two descriptors and not the reference. * @param other The MTD to compare - * @return 0 if the contents of the descriptors are exactly matching, 1 if - * there is a mismatch in the contents + * @return 0 if the contents of the descriptors are exactly matching, 1 if there is a mismatch + * in the contents */ @Override public int compareTo(final ModifyableTableDescriptor other) { @@ -1366,7 +1287,6 @@ public class TableDescriptorBuilder { /** * Sets the number of replicas per region. - * * @param regionReplication the replication factor per region * @return the modifyable TD */ @@ -1379,17 +1299,16 @@ public class TableDescriptorBuilder { */ @Override public boolean hasRegionMemStoreReplication() { - return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, DEFAULT_REGION_MEMSTORE_REPLICATION); + return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, + DEFAULT_REGION_MEMSTORE_REPLICATION); } /** - * Enable or Disable the memstore replication from the primary region to the - * replicas. The replication will be used only for meta operations (e.g. - * flush, compaction, ...) - * - * @param memstoreReplication true if the new data written to the primary - * region should be replicated. false if the secondaries can tollerate to - * have new data only when the primary flushes the memstore. + * Enable or Disable the memstore replication from the primary region to the replicas. The + * replication will be used only for meta operations (e.g. flush, compaction, ...) + * @param memstoreReplication true if the new data written to the primary region should be + * replicated. false if the secondaries can tollerate to have new + * data only when the primary flushes the memstore. * @return the modifyable TD */ public ModifyableTableDescriptor setRegionMemStoreReplication(boolean memstoreReplication) { @@ -1406,11 +1325,9 @@ public class TableDescriptorBuilder { } /** - * Returns all the column family names of the current table. The map of - * TableDescriptor contains mapping of family name to ColumnFamilyDescriptor. - * This returns all the keys of the family map which represents the column - * family names of the table. - * + * Returns all the column family names of the current table. The map of TableDescriptor contains + * mapping of family name to ColumnFamilyDescriptor. This returns all the keys of the family map + * which represents the column family names of the table. * @return Immutable sorted set of the keys of the families. */ @Override @@ -1419,12 +1336,10 @@ public class TableDescriptorBuilder { } /** - * Returns the ColumnFamilyDescriptor for a specific column family with name as - * specified by the parameter column. - * + * Returns the ColumnFamilyDescriptor for a specific column family with name as specified by the + * parameter column. * @param column Column family name - * @return Column descriptor for the passed family name or the family on - * passed in column. + * @return Column descriptor for the passed family name or the family on passed in column. */ @Override public ColumnFamilyDescriptor getColumnFamily(final byte[] column) { @@ -1432,12 +1347,10 @@ public class TableDescriptorBuilder { } /** - * Removes the ColumnFamilyDescriptor with name specified by the parameter column - * from the table descriptor - * + * Removes the ColumnFamilyDescriptor with name specified by the parameter column from the table + * descriptor * @param column Name of the column family to be removed. - * @return Column descriptor for the passed family name or the family on - * passed in column. + * @return Column descriptor for the passed family name or the family on passed in column. */ public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) { return this.families.remove(column); @@ -1445,35 +1358,29 @@ public class TableDescriptorBuilder { /** * Add a table coprocessor to this table. The coprocessor type must be - * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't - * check if the class can be loaded or not. Whether a coprocessor is - * loadable or not will be determined when a region is opened. - * - * @param className Full class name. - * @throws IOException - * @return the modifyable TD + * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class + * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a + * region is opened. + * @param className Full class name. n * @return the modifyable TD */ public ModifyableTableDescriptor setCoprocessor(String className) throws IOException { - return setCoprocessor( - CoprocessorDescriptorBuilder.newBuilder(className).setPriority(Coprocessor.PRIORITY_USER) - .build()); + return setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className) + .setPriority(Coprocessor.PRIORITY_USER).build()); } /** * Add a table coprocessor to this table. The coprocessor type must be - * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't - * check if the class can be loaded or not. Whether a coprocessor is - * loadable or not will be determined when a region is opened. - * + * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class + * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a + * region is opened. * @throws IOException any illegal parameter key/value * @return the modifyable TD */ - public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) - throws IOException { + public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) throws IOException { checkHasCoprocessor(cp.getClassName()); if (cp.getPriority() < 0) { - throw new IOException("Priority must be bigger than or equal with zero, current:" - + cp.getPriority()); + throw new IOException( + "Priority must be bigger than or equal with zero, current:" + cp.getPriority()); } // Validate parameter kvs and then add key/values to kvString. StringBuilder kvString = new StringBuilder(); @@ -1482,8 +1389,7 @@ public class TableDescriptorBuilder { throw new IOException("Illegal parameter key = " + e.getKey()); } if (!e.getValue().matches(CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) { - throw new IOException("Illegal parameter (" + e.getKey() - + ") value = " + e.getValue()); + throw new IOException("Illegal parameter (" + e.getKey() + ") value = " + e.getValue()); } if (kvString.length() != 0) { kvString.append(','); @@ -1493,29 +1399,26 @@ public class TableDescriptorBuilder { kvString.append(e.getValue()); } - String value = cp.getJarPath().orElse("") - + "|" + cp.getClassName() + "|" + Integer.toString(cp.getPriority()) + "|" - + kvString.toString(); + String value = cp.getJarPath().orElse("") + "|" + cp.getClassName() + "|" + + Integer.toString(cp.getPriority()) + "|" + kvString.toString(); return setCoprocessorToMap(value); } /** * Add a table coprocessor to this table. The coprocessor type must be - * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't - * check if the class can be loaded or not. Whether a coprocessor is - * loadable or not will be determined when a region is opened. - * - * @param specStr The Coprocessor specification all in in one String - * @throws IOException - * @return the modifyable TD - * @deprecated used by HTableDescriptor and admin.rb. - * As of release 2.0.0, this will be removed in HBase 3.0.0. + * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class + * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a + * region is opened. + * @param specStr The Coprocessor specification all in in one String n * @return the modifyable + * TD + * @deprecated used by HTableDescriptor and admin.rb. As of release 2.0.0, this will be removed + * in HBase 3.0.0. */ @Deprecated public ModifyableTableDescriptor setCoprocessorWithSpec(final String specStr) throws IOException { - CoprocessorDescriptor cpDesc = toCoprocessorDescriptor(specStr).orElseThrow( - () -> new IllegalArgumentException( + CoprocessorDescriptor cpDesc = + toCoprocessorDescriptor(specStr).orElseThrow(() -> new IllegalArgumentException( "Format does not match " + CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr)); checkHasCoprocessor(cpDesc.getClassName()); return setCoprocessorToMap(specStr); @@ -1544,7 +1447,8 @@ public class TableDescriptorBuilder { if (!keyMatcher.matches()) { continue; } - maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber); + maxCoprocessorNumber = + Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber); } maxCoprocessorNumber++; String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber); @@ -1552,32 +1456,27 @@ public class TableDescriptorBuilder { } /** - * Check if the table has an attached co-processor represented by the name - * className - * + * Check if the table has an attached co-processor represented by the name className * @param classNameToMatch - Class name of the co-processor * @return true of the table has a co-processor className */ @Override public boolean hasCoprocessor(String classNameToMatch) { - return getCoprocessorDescriptors().stream().anyMatch(cp -> cp.getClassName() - .equals(classNameToMatch)); + return getCoprocessorDescriptors().stream() + .anyMatch(cp -> cp.getClassName().equals(classNameToMatch)); } /** - * Return the list of attached co-processor represented by their name - * className - * + * Return the list of attached co-processor represented by their name className * @return The list of co-processors classNames */ @Override public List getCoprocessorDescriptors() { List result = new ArrayList<>(); - for (Map.Entry e: getValues().entrySet()) { + for (Map.Entry e : getValues().entrySet()) { String key = Bytes.toString(e.getKey().get()).trim(); if (CP_HTD_ATTR_KEY_PATTERN.matcher(key).matches()) { - toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim()) - .ifPresent(result::add); + toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim()).ifPresent(result::add); } } return result; @@ -1585,22 +1484,18 @@ public class TableDescriptorBuilder { /** * Remove a coprocessor from those set on the table - * * @param className Class name of the co-processor */ public void removeCoprocessor(String className) { Bytes match = null; Matcher keyMatcher; Matcher valueMatcher; - for (Map.Entry e : this.values - .entrySet()) { - keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e - .getKey().get())); + for (Map.Entry e : this.values.entrySet()) { + keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get())); if (!keyMatcher.matches()) { continue; } - valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes - .toString(e.getValue().get())); + valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes.toString(e.getValue().get())); if (!valueMatcher.matches()) { continue; } @@ -1660,15 +1555,11 @@ public class TableDescriptorBuilder { } /** - * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance - * with pb magic prefix - * @return An instance of {@link ModifyableTableDescriptor} made from - * bytes - * @throws DeserializationException - * @see #toByteArray() + * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance with pb magic prefix + * @return An instance of {@link ModifyableTableDescriptor} made from bytes n + * * @see #toByteArray() */ - private static TableDescriptor parseFrom(final byte[] bytes) - throws DeserializationException { + private static TableDescriptor parseFrom(final byte[] bytes) throws DeserializationException { if (!ProtobufUtil.isPBMagicPrefix(bytes)) { throw new DeserializationException("Expected PB encoded ModifyableTableDescriptor"); } @@ -1703,15 +1594,14 @@ public class TableDescriptorBuilder { if (matcher.matches()) { // jar file path can be empty if the cp class can be loaded // from class loader. - String path = matcher.group(1).trim().isEmpty() ? - null : matcher.group(1).trim(); + String path = matcher.group(1).trim().isEmpty() ? null : matcher.group(1).trim(); String className = matcher.group(2).trim(); if (className.isEmpty()) { return Optional.empty(); } String priorityStr = matcher.group(3).trim(); - int priority = priorityStr.isEmpty() ? - Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr); + int priority = + priorityStr.isEmpty() ? Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr); String cfgSpec = null; try { cfgSpec = matcher.group(4); @@ -1726,11 +1616,8 @@ public class TableDescriptorBuilder { ourConf.put(m.group(1), m.group(2)); } } - return Optional.of(CoprocessorDescriptorBuilder.newBuilder(className) - .setJarPath(path) - .setPriority(priority) - .setProperties(ourConf) - .build()); + return Optional.of(CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path) + .setPriority(priority).setProperties(ourConf).build()); } return Optional.empty(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorUtils.java index 90e05a9a13c..0b08dd8e7d6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -86,11 +84,12 @@ public final class TableDescriptorUtils { } } - private TableDescriptorUtils() { } + private TableDescriptorUtils() { + } /** - * Compares two {@link TableDescriptor} and indicate which columns were added, deleted, - * or modified from oldTD to newTD + * Compares two {@link TableDescriptor} and indicate which columns were added, deleted, or + * modified from oldTD to newTD * @return a TableDescriptorDelta that contains the added/deleted/modified column names */ public static TableDescriptorDelta computeDelta(TableDescriptor oldTD, TableDescriptor newTD) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java index 40612e9b202..ffd8cf8409d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.hbase.exceptions.DeserializationException; + +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** @@ -40,53 +42,49 @@ public class TableState { /** * Covert from PB version of State - * - * @param state convert from - * @return POJO + * @param state convert from n */ public static State convert(HBaseProtos.TableState.State state) { State ret; switch (state) { - case ENABLED: - ret = State.ENABLED; - break; - case DISABLED: - ret = State.DISABLED; - break; - case DISABLING: - ret = State.DISABLING; - break; - case ENABLING: - ret = State.ENABLING; - break; - default: - throw new IllegalStateException(state.toString()); + case ENABLED: + ret = State.ENABLED; + break; + case DISABLED: + ret = State.DISABLED; + break; + case DISABLING: + ret = State.DISABLING; + break; + case ENABLING: + ret = State.ENABLING; + break; + default: + throw new IllegalStateException(state.toString()); } return ret; } /** - * Covert to PB version of State - * - * @return PB + * Covert to PB version of State n */ public HBaseProtos.TableState.State convert() { HBaseProtos.TableState.State state; switch (this) { - case ENABLED: - state = HBaseProtos.TableState.State.ENABLED; - break; - case DISABLED: - state = HBaseProtos.TableState.State.DISABLED; - break; - case DISABLING: - state = HBaseProtos.TableState.State.DISABLING; - break; - case ENABLING: - state = HBaseProtos.TableState.State.ENABLING; - break; - default: - throw new IllegalStateException(this.toString()); + case ENABLED: + state = HBaseProtos.TableState.State.ENABLED; + break; + case DISABLED: + state = HBaseProtos.TableState.State.DISABLED; + break; + case DISABLING: + state = HBaseProtos.TableState.State.DISABLING; + break; + case ENABLING: + state = HBaseProtos.TableState.State.ENABLING; + break; + default: + throw new IllegalStateException(this.toString()); } return state; } @@ -141,7 +139,7 @@ public class TableState { /** * Create instance of TableState. * @param tableName name of the table - * @param state table state + * @param state table state */ public TableState(TableName tableName, State state) { this.tableName = tableName; @@ -156,9 +154,7 @@ public class TableState { } /** - * Table name for state - * - * @return milliseconds + * Table name for state n */ public TableName getTableName() { return tableName; @@ -180,28 +176,22 @@ public class TableState { */ public boolean inStates(State... states) { for (State s : states) { - if (s.equals(this.state)) - return true; + if (s.equals(this.state)) return true; } return false; } - /** - * Covert to PB version of TableState - * @return PB + * Covert to PB version of TableState n */ public HBaseProtos.TableState convert() { - return HBaseProtos.TableState.newBuilder() - .setState(this.state.convert()).build(); + return HBaseProtos.TableState.newBuilder().setState(this.state.convert()).build(); } /** * Covert from PB version of TableState - * - * @param tableName table this state of - * @param tableState convert from - * @return POJO + * @param tableName table this state of + * @param tableState convert from n */ public static TableState convert(TableName tableName, HBaseProtos.TableState tableState) { TableState.State state = State.convert(tableState.getState()); @@ -209,7 +199,7 @@ public class TableState { } public static TableState parseFrom(TableName tableName, byte[] bytes) - throws DeserializationException { + throws DeserializationException { try { return convert(tableName, HBaseProtos.TableState.parseFrom(bytes)); } catch (InvalidProtocolBufferException e) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java index 128a77de196..a2a53114ac7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.HRegionInfo; @@ -29,9 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience; @Deprecated class UnmodifyableHRegionInfo extends HRegionInfo { /* - * Creates an unmodifyable copy of an HRegionInfo - * - * @param info + * Creates an unmodifyable copy of an HRegionInfo n */ UnmodifyableHRegionInfo(HRegionInfo info) { super(info); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index abb98569bfd..fd9fb33bba0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -44,8 +44,6 @@ import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; -import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -120,7 +118,7 @@ class ZKConnectionRegistry implements ConnectionRegistry { } private static void tryComplete(MutableInt remaining, Collection locs, - CompletableFuture future) { + CompletableFuture future) { remaining.decrement(); if (remaining.intValue() > 0) { return; @@ -128,8 +126,8 @@ class ZKConnectionRegistry implements ConnectionRegistry { future.complete(new RegionLocations(locs)); } - private Pair getStateAndServerName( - ZooKeeperProtos.MetaRegionServer proto) { + private Pair + getStateAndServerName(ZooKeeperProtos.MetaRegionServer proto) { RegionState.State state; if (proto.hasState()) { state = RegionState.State.convert(proto.getState()); @@ -142,7 +140,7 @@ class ZKConnectionRegistry implements ConnectionRegistry { } private void getMetaRegionLocation(CompletableFuture future, - List metaReplicaZNodes) { + List metaReplicaZNodes) { if (metaReplicaZNodes.isEmpty()) { future.completeExceptionally(new IOException("No meta znode available")); } @@ -190,12 +188,12 @@ class ZKConnectionRegistry implements ConnectionRegistry { } else { Pair stateAndServerName = getStateAndServerName(proto); if (stateAndServerName.getFirst() != RegionState.State.OPEN) { - LOG.warn("Meta region for replica " + replicaId + " is in state " + - stateAndServerName.getFirst()); + LOG.warn("Meta region for replica " + replicaId + " is in state " + + stateAndServerName.getFirst()); locs.put(replicaId, null); } else { - locs.put(replicaId, new HRegionLocation( - getRegionInfoForReplica(FIRST_META_REGIONINFO, replicaId), + locs.put(replicaId, + new HRegionLocation(getRegionInfoForReplica(FIRST_META_REGIONINFO, replicaId), stateAndServerName.getSecond())); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicy.java index 3957d1e1d0e..ee66bcada1b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicy.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +21,8 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.yetus.audience.InterfaceAudience; /** - * Configurable policy for the amount of time a client should wait for a new request to the - * server when given the server load statistics. + * Configurable policy for the amount of time a client should wait for a new request to the server + * when given the server load statistics. *

    * Must have a single-argument constructor that takes a {@link org.apache.hadoop.conf.Configuration} *

    @@ -30,8 +30,7 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Public public interface ClientBackoffPolicy { - public static final String BACKOFF_POLICY_CLASS = - "hbase.client.statistics.backoff-policy"; + public static final String BACKOFF_POLICY_CLASS = "hbase.client.statistics.backoff-policy"; /** * @return the number of ms to wait on the client based on the diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicyFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicyFactory.java index 03589472374..a786702b169 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicyFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicyFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +19,11 @@ package org.apache.hadoop.hbase.client.backoff; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.ReflectionUtils; @InterfaceAudience.Private @InterfaceStability.Evolving @@ -37,17 +37,16 @@ public final class ClientBackoffPolicyFactory { public static ClientBackoffPolicy create(Configuration conf) { // create the backoff policy String className = - conf.get(ClientBackoffPolicy.BACKOFF_POLICY_CLASS, NoBackoffPolicy.class - .getName()); - return ReflectionUtils.instantiateWithCustomCtor(className, - new Class[] { Configuration.class }, new Object[] { conf }); + conf.get(ClientBackoffPolicy.BACKOFF_POLICY_CLASS, NoBackoffPolicy.class.getName()); + return ReflectionUtils.instantiateWithCustomCtor(className, + new Class[] { Configuration.class }, new Object[] { conf }); } /** * Default backoff policy that doesn't create any backoff for the client, regardless of load */ public static class NoBackoffPolicy implements ClientBackoffPolicy { - public NoBackoffPolicy(Configuration conf){ + public NoBackoffPolicy(Configuration conf) { // necessary to meet contract } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java index f769e2ed611..aa84207e1ed 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,11 +23,12 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * Simple exponential backoff policy on for the client that uses a percent^4 times the - * max backoff to generate the backoff time. + * Simple exponential backoff policy on for the client that uses a percent^4 times the max backoff + * to generate the backoff time. */ @InterfaceAudience.Public public class ExponentialClientBackoffPolicy implements ClientBackoffPolicy { @@ -77,8 +78,7 @@ public class ExponentialClientBackoffPolicy implements ClientBackoffPolicy { heapOccupancy = heapOccupancyHighWatermark; } percent = Math.max(percent, - scale(heapOccupancy, heapOccupancyLowWatermark, heapOccupancyHighWatermark, - 0.1, 1.0)); + scale(heapOccupancy, heapOccupancyLowWatermark, heapOccupancyHighWatermark, 0.1, 1.0)); } percent = Math.max(percent, compactionPressure); // square the percent as a value less than 1. Closer we move to 100 percent, @@ -92,13 +92,13 @@ public class ExponentialClientBackoffPolicy implements ClientBackoffPolicy { /** Scale valueIn in the range [baseMin,baseMax] to the range [limitMin,limitMax] */ private static double scale(double valueIn, double baseMin, double baseMax, double limitMin, - double limitMax) { - Preconditions.checkArgument(baseMin <= baseMax, "Illegal source range [%s,%s]", - baseMin, baseMax); - Preconditions.checkArgument(limitMin <= limitMax, "Illegal target range [%s,%s]", - limitMin, limitMax); + double limitMax) { + Preconditions.checkArgument(baseMin <= baseMax, "Illegal source range [%s,%s]", baseMin, + baseMax); + Preconditions.checkArgument(limitMin <= limitMax, "Illegal target range [%s,%s]", limitMin, + limitMax); Preconditions.checkArgument(valueIn >= baseMin && valueIn <= baseMax, - "Value %s must be within the range [%s,%s]", valueIn, baseMin, baseMax); + "Value %s must be within the range [%s,%s]", valueIn, baseMin, baseMax); return ((limitMax - limitMin) * (valueIn - baseMin) / (baseMax - baseMin)) + limitMin; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java index e18309d9bbe..ab5915ec975 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,12 +17,11 @@ */ package org.apache.hadoop.hbase.client.backoff; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.client.RegionLoadStats; -import org.apache.hadoop.hbase.util.Bytes; - import java.util.Map; import java.util.TreeMap; +import org.apache.hadoop.hbase.client.RegionLoadStats; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * Track the statistics for a single region @@ -33,14 +32,12 @@ public class ServerStatistics { private Map stats = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** - * Good enough attempt. Last writer wins. It doesn't really matter which one gets to update, - * as something gets set - * @param region - * @param currentStats + * Good enough attempt. Last writer wins. It doesn't really matter which one gets to update, as + * something gets set nn */ public void update(byte[] region, RegionLoadStats currentStats) { RegionStatistics regionStat = this.stats.get(region); - if(regionStat == null){ + if (regionStat == null) { regionStat = new RegionStatistics(); this.stats.put(region, regionStat); } @@ -49,7 +46,7 @@ public class ServerStatistics { } @InterfaceAudience.Private - public RegionStatistics getStatsForRegion(byte[] regionName){ + public RegionStatistics getStatsForRegion(byte[] regionName) { return stats.get(regionName); } @@ -64,11 +61,11 @@ public class ServerStatistics { this.compactionPressure = currentStats.getCompactionPressure(); } - public int getMemStoreLoadPercent(){ + public int getMemStoreLoadPercent() { return this.memstoreLoad; } - public int getHeapOccupancyPercent(){ + public int getHeapOccupancyPercent() { return this.heapOccupancy; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java index c3defda5b4d..0d119db37c9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,57 +15,49 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.coprocessor; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * A collection of interfaces and utilities used for interacting with custom RPC - * interfaces exposed by Coprocessors. + * A collection of interfaces and utilities used for interacting with custom RPC interfaces exposed + * by Coprocessors. */ @InterfaceAudience.Public public abstract class Batch { /** * Defines a unit of work to be executed. - * *

    * When used with - * {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], - * org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} - * the implementations {@link Batch.Call#call(Object)} method will be invoked - * with a proxy to each region's coprocessor {@link com.google.protobuf.Service} implementation. + * {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} + * the implementations {@link Batch.Call#call(Object)} method will be invoked with a proxy to each + * region's coprocessor {@link com.google.protobuf.Service} implementation. *

    * @see org.apache.hadoop.hbase.client.coprocessor.Batch * @see org.apache.hadoop.hbase.client.Table#coprocessorService(byte[]) * @see org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], - * org.apache.hadoop.hbase.client.coprocessor.Batch.Call) - * @param the instance type to be passed to - * {@link Batch.Call#call(Object)} + * org.apache.hadoop.hbase.client.coprocessor.Batch.Call) + * @param the instance type to be passed to {@link Batch.Call#call(Object)} * @param the return type from {@link Batch.Call#call(Object)} */ @InterfaceAudience.Public - public interface Call { + public interface Call { R call(T instance) throws IOException; } /** - * Defines a generic callback to be triggered for each {@link Batch.Call#call(Object)} - * result. - * + * Defines a generic callback to be triggered for each {@link Batch.Call#call(Object)} result. *

    * When used with - * {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], - * org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} - * the implementation's {@link Batch.Callback#update(byte[], byte[], Object)} - * method will be called with the {@link Batch.Call#call(Object)} return value - * from each region in the selected range. + * {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} + * the implementation's {@link Batch.Callback#update(byte[], byte[], Object)} method will be + * called with the {@link Batch.Call#call(Object)} return value from each region in the selected + * range. *

    * @param the return type from the associated {@link Batch.Call#call(Object)} * @see org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], - * org.apache.hadoop.hbase.client.coprocessor.Batch.Call) + * org.apache.hadoop.hbase.client.coprocessor.Batch.Call) */ @InterfaceAudience.Public public interface Callback { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java index fb7dcd1dc70..b7af4a9eff9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,32 +20,29 @@ package org.apache.hadoop.hbase.client.coprocessor; import java.io.IOException; import java.math.BigDecimal; import java.math.RoundingMode; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; /** - * ColumnInterpreter for doing Aggregation's with BigDecimal columns. This class - * is required at the RegionServer also. - * + * ColumnInterpreter for doing Aggregation's with BigDecimal columns. This class is required at the + * RegionServer also. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public class BigDecimalColumnInterpreter extends ColumnInterpreter { +public class BigDecimalColumnInterpreter + extends ColumnInterpreter { @Override - public BigDecimal getValue(byte[] colFamily, byte[] colQualifier, Cell kv) - throws IOException { + public BigDecimal getValue(byte[] colFamily, byte[] colQualifier, Cell kv) throws IOException { if (kv == null || CellUtil.cloneValue(kv) == null) { return null; } @@ -87,8 +83,9 @@ public class BigDecimalColumnInterpreter extends ColumnInterpreter{ - +public class DoubleColumnInterpreter + extends ColumnInterpreter { + @Override - public Double getValue(byte[] colFamily, byte[] colQualifier, Cell c) - throws IOException { - if (c == null || c.getValueLength() != Bytes.SIZEOF_DOUBLE) - return null; + public Double getValue(byte[] colFamily, byte[] colQualifier, Cell c) throws IOException { + if (c == null || c.getValueLength() != Bytes.SIZEOF_DOUBLE) return null; return PrivateCellUtil.getValueAsDouble(c); } @Override public Double add(Double d1, Double d2) { if (d1 == null || d2 == null) { - return (d1 == null) ? d2 : d1; + return (d1 == null) ? d2 : d1; } return d1 + d2; } @@ -63,8 +58,7 @@ public class DoubleColumnInterpreter extends ColumnInterpreter { +public class LongColumnInterpreter + extends ColumnInterpreter { @Override - public Long getValue(byte[] colFamily, byte[] colQualifier, Cell kv) - throws IOException { - if (kv == null || kv.getValueLength() != Bytes.SIZEOF_LONG) - return null; + public Long getValue(byte[] colFamily, byte[] colQualifier, Cell kv) throws IOException { + if (kv == null || kv.getValueLength() != Bytes.SIZEOF_LONG) return null; return PrivateCellUtil.getValueAsLong(kv); } @@ -64,8 +59,7 @@ public class LongColumnInterpreter extends ColumnInterpreter - * The data can be passed to mapreduce framework or other systems. - * We use atomic longs so that one thread can increment, - * while another atomically resets to zero after the values are reported - * to hadoop's counters. + * The data can be passed to mapreduce framework or other systems. We use atomic longs so that one + * thread can increment, while another atomically resets to zero after the values are reported to + * hadoop's counters. *

    - * Some of these metrics are general for any client operation such as put - * However, there is no need for this. So they are defined under scan operation - * for now. + * Some of these metrics are general for any client operation such as put However, there is no need + * for this. So they are defined under scan operation for now. */ @InterfaceAudience.Public public class ScanMetrics extends ServerSideScanMetrics { @@ -45,13 +40,14 @@ public class ScanMetrics extends ServerSideScanMetrics { public static final String RPC_CALLS_METRIC_NAME = "RPC_CALLS"; public static final String REMOTE_RPC_CALLS_METRIC_NAME = "REMOTE_RPC_CALLS"; public static final String MILLIS_BETWEEN_NEXTS_METRIC_NAME = "MILLIS_BETWEEN_NEXTS"; - public static final String NOT_SERVING_REGION_EXCEPTION_METRIC_NAME = "NOT_SERVING_REGION_EXCEPTION"; + public static final String NOT_SERVING_REGION_EXCEPTION_METRIC_NAME = + "NOT_SERVING_REGION_EXCEPTION"; public static final String BYTES_IN_RESULTS_METRIC_NAME = "BYTES_IN_RESULTS"; public static final String BYTES_IN_REMOTE_RESULTS_METRIC_NAME = "BYTES_IN_REMOTE_RESULTS"; public static final String REGIONS_SCANNED_METRIC_NAME = "REGIONS_SCANNED"; public static final String RPC_RETRIES_METRIC_NAME = "RPC_RETRIES"; public static final String REMOTE_RPC_RETRIES_METRIC_NAME = "REMOTE_RPC_RETRIES"; - + /** * number of RPC calls */ @@ -65,7 +61,8 @@ public class ScanMetrics extends ServerSideScanMetrics { /** * sum of milliseconds between sequential next calls */ - public final AtomicLong sumOfMillisSecBetweenNexts = createCounter(MILLIS_BETWEEN_NEXTS_METRIC_NAME); + public final AtomicLong sumOfMillisSecBetweenNexts = + createCounter(MILLIS_BETWEEN_NEXTS_METRIC_NAME); /** * number of NotServingRegionException caught @@ -80,7 +77,8 @@ public class ScanMetrics extends ServerSideScanMetrics { /** * number of bytes in Result objects from remote region servers */ - public final AtomicLong countOfBytesInRemoteResults = createCounter(BYTES_IN_REMOTE_RESULTS_METRIC_NAME); + public final AtomicLong countOfBytesInRemoteResults = + createCounter(BYTES_IN_REMOTE_RESULTS_METRIC_NAME); /** * number of regions diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java index 97722bcbdcf..519109934eb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client.metrics; import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; @@ -36,9 +35,8 @@ public class ServerSideScanMetrics { private final Map counters = new HashMap<>(); /** - * Create a new counter with the specified name - * @param counterName - * @return {@link AtomicLong} instance for the counter with counterName + * Create a new counter with the specified name n * @return {@link AtomicLong} instance for the + * counter with counterName */ protected AtomicLong createCounter(String counterName) { AtomicLong c = new AtomicLong(0); @@ -51,16 +49,16 @@ public class ServerSideScanMetrics { /** * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * (HBASE-17886). - * Use {@link #COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME}. + * (HBASE-17886). Use + * {@link #COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME}. */ @Deprecated public static final String COUNT_OF_ROWS_SCANNED_KEY = COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME; /** * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * (HBASE-17886). - * Use {@link #COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME}. + * (HBASE-17886). Use + * {@link #COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME}. */ @Deprecated public static final String COUNT_OF_ROWS_FILTERED_KEY = COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME; @@ -68,7 +66,8 @@ public class ServerSideScanMetrics { /** * number of rows filtered during scan RPC */ - public final AtomicLong countOfRowsFiltered = createCounter(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME); + public final AtomicLong countOfRowsFiltered = + createCounter(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME); /** * number of rows scanned during scan RPC. Not every row scanned will be returned to the client @@ -77,8 +76,7 @@ public class ServerSideScanMetrics { public final AtomicLong countOfRowsScanned = createCounter(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME); /** - * @param counterName - * @param value + * nn */ public void setCounter(String counterName, long value) { AtomicLong c = this.counters.get(counterName); @@ -88,24 +86,21 @@ public class ServerSideScanMetrics { } /** - * @param counterName - * @return true if a counter exists with the counterName + * n * @return true if a counter exists with the counterName */ public boolean hasCounter(String counterName) { return this.counters.containsKey(counterName); } /** - * @param counterName - * @return {@link AtomicLong} instance for this counter name, null if counter does not exist. + * n * @return {@link AtomicLong} instance for this counter name, null if counter does not exist. */ public AtomicLong getCounter(String counterName) { return this.counters.get(counterName); } /** - * @param counterName - * @param delta + * nn */ public void addToCounter(String counterName, long delta) { AtomicLong c = this.counters.get(counterName); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java index 16d2f51f7bf..4f94dc67a88 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,32 +36,30 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + /** *

    - * This class provides the administrative interface to HBase cluster - * replication. + * This class provides the administrative interface to HBase cluster replication. *

    *

    - * Adding a new peer results in creating new outbound connections from every - * region server to a subset of region servers on the slave cluster. Each - * new stream of replication will start replicating from the beginning of the - * current WAL, meaning that edits from that past will be replicated. + * Adding a new peer results in creating new outbound connections from every region server to a + * subset of region servers on the slave cluster. Each new stream of replication will start + * replicating from the beginning of the current WAL, meaning that edits from that past will be + * replicated. *

    *

    - * Removing a peer is a destructive and irreversible operation that stops - * all the replication streams for the given cluster and deletes the metadata - * used to keep track of the replication state. + * Removing a peer is a destructive and irreversible operation that stops all the replication + * streams for the given cluster and deletes the metadata used to keep track of the replication + * state. *

    *

    - * To see which commands are available in the shell, type - * replication. + * To see which commands are available in the shell, type replication. *

    - * * @deprecated use {@link org.apache.hadoop.hbase.client.Admin} instead. */ @InterfaceAudience.Public @@ -76,8 +73,8 @@ public class ReplicationAdmin implements Closeable { // only Global for now, can add other type // such as, 1) no global replication, or 2) the table is replicated to this cluster, etc. public static final String REPLICATIONTYPE = "replicationType"; - public static final String REPLICATIONGLOBAL = Integer - .toString(HConstants.REPLICATION_SCOPE_GLOBAL); + public static final String REPLICATIONGLOBAL = + Integer.toString(HConstants.REPLICATION_SCOPE_GLOBAL); private final Connection connection; private Admin admin; @@ -85,7 +82,7 @@ public class ReplicationAdmin implements Closeable { /** * Constructor that creates a connection to the local ZooKeeper ensemble. * @param conf Configuration to use - * @throws IOException if an internal replication error occurs + * @throws IOException if an internal replication error occurs * @throws RuntimeException if replication isn't enabled. */ public ReplicationAdmin(Configuration conf) throws IOException { @@ -95,19 +92,19 @@ public class ReplicationAdmin implements Closeable { /** * Add a new remote slave cluster for replication. - * @param id a short name that identifies the cluster + * @param id a short name that identifies the cluster * @param peerConfig configuration for the replication slave cluster - * @param tableCfs the table and column-family list which will be replicated for this peer. - * A map from tableName to column family names. An empty collection can be passed - * to indicate replicating all column families. Pass null for replicating all table and column - * families - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0, - * use {@link #addPeer(String, ReplicationPeerConfig)} instead. + * @param tableCfs the table and column-family list which will be replicated for this peer. A + * map from tableName to column family names. An empty collection can be passed + * to indicate replicating all column families. Pass null for replicating all + * table and column families + * @deprecated as release of 2.0.0, and it will be removed in 3.0.0, use + * {@link #addPeer(String, ReplicationPeerConfig)} instead. */ @Deprecated public void addPeer(String id, ReplicationPeerConfig peerConfig, - Map> tableCfs) throws ReplicationException, - IOException { + Map> tableCfs) + throws ReplicationException, IOException { if (tableCfs != null) { peerConfig.setTableCFsMap(tableCfs); } @@ -116,21 +113,21 @@ public class ReplicationAdmin implements Closeable { /** * Add a new remote slave cluster for replication. - * @param id a short name that identifies the cluster + * @param id a short name that identifies the cluster * @param peerConfig configuration for the replication slave cluster * @deprecated use * {@link org.apache.hadoop.hbase.client.Admin#addReplicationPeer(String, ReplicationPeerConfig)} * instead */ @Deprecated - public void addPeer(String id, ReplicationPeerConfig peerConfig) throws ReplicationException, - IOException { + public void addPeer(String id, ReplicationPeerConfig peerConfig) + throws ReplicationException, IOException { this.admin.addReplicationPeer(id, peerConfig); } /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0 - * */ + * @deprecated as release of 2.0.0, and it will be removed in 3.0.0 + */ @Deprecated public static Map> parseTableCFsFromConfig(String tableCFsConfig) { return ReplicationPeerConfigUtil.parseTableCFsFromConfig(tableCFsConfig); @@ -149,7 +146,8 @@ public class ReplicationAdmin implements Closeable { /** * Removes a peer cluster and stops the replication to it. * @param id a short name that identifies the cluster - * @deprecated use {@link org.apache.hadoop.hbase.client.Admin#removeReplicationPeer(String)} instead + * @deprecated use {@link org.apache.hadoop.hbase.client.Admin#removeReplicationPeer(String)} + * instead */ @Deprecated public void removePeer(String id) throws IOException { @@ -180,9 +178,7 @@ public class ReplicationAdmin implements Closeable { /** * Get the number of slave clusters the local cluster has. - * @return number of slave clusters - * @throws IOException - * @deprecated + * @return number of slave clusters n * @deprecated */ @Deprecated public int getPeersCount() throws IOException { @@ -214,9 +210,9 @@ public class ReplicationAdmin implements Closeable { /** * Get the replicable table-cf config of the specified peer. * @param id a short name that identifies the cluster - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0, - * use {@link #getPeerConfig(String)} instead. - * */ + * @deprecated as release of 2.0.0, and it will be removed in 3.0.0, use + * {@link #getPeerConfig(String)} instead. + */ @Deprecated public String getPeerTableCFs(String id) throws IOException { ReplicationPeerConfig peerConfig = admin.getReplicationPeerConfig(id); @@ -225,62 +221,52 @@ public class ReplicationAdmin implements Closeable { /** * Append the replicable table-cf config of the specified peer - * @param id a short that identifies the cluster - * @param tableCfs table-cfs config str - * @throws ReplicationException - * @throws IOException - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0, - * use {@link #appendPeerTableCFs(String, Map)} instead. + * @param id a short that identifies the cluster + * @param tableCfs table-cfs config str nn * @deprecated as release of 2.0.0, and it will be + * removed in 3.0.0, use {@link #appendPeerTableCFs(String, Map)} instead. */ @Deprecated - public void appendPeerTableCFs(String id, String tableCfs) throws ReplicationException, - IOException { + public void appendPeerTableCFs(String id, String tableCfs) + throws ReplicationException, IOException { appendPeerTableCFs(id, ReplicationPeerConfigUtil.parseTableCFsFromConfig(tableCfs)); } /** * Append the replicable table-cf config of the specified peer - * @param id a short that identifies the cluster - * @param tableCfs A map from tableName to column family names - * @throws ReplicationException - * @throws IOException + * @param id a short that identifies the cluster + * @param tableCfs A map from tableName to column family names nn */ @Deprecated public void appendPeerTableCFs(String id, Map> tableCfs) - throws ReplicationException, IOException { + throws ReplicationException, IOException { this.admin.appendReplicationPeerTableCFs(id, copyTableCFs(tableCfs)); } /** * Remove some table-cfs from table-cfs config of the specified peer - * @param id a short name that identifies the cluster - * @param tableCf table-cfs config str - * @throws ReplicationException - * @throws IOException - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0, - * use {@link #removePeerTableCFs(String, Map)} instead. + * @param id a short name that identifies the cluster + * @param tableCf table-cfs config str nn * @deprecated as release of 2.0.0, and it will be + * removed in 3.0.0, use {@link #removePeerTableCFs(String, Map)} instead. */ @Deprecated - public void removePeerTableCFs(String id, String tableCf) throws ReplicationException, - IOException { + public void removePeerTableCFs(String id, String tableCf) + throws ReplicationException, IOException { removePeerTableCFs(id, ReplicationPeerConfigUtil.parseTableCFsFromConfig(tableCf)); } /** * Remove some table-cfs from config of the specified peer - * @param id a short name that identifies the cluster - * @param tableCfs A map from tableName to column family names - * @throws ReplicationException - * @throws IOException + * @param id a short name that identifies the cluster + * @param tableCfs A map from tableName to column family names nn */ @Deprecated public void removePeerTableCFs(String id, Map> tableCfs) - throws ReplicationException, IOException { + throws ReplicationException, IOException { this.admin.removeReplicationPeerTableCFs(id, copyTableCFs(tableCfs)); } private Map> - copyTableCFs(Map> tableCfs) { + copyTableCFs(Map> tableCfs) { Map> newTableCfs = new HashMap<>(); if (tableCfs != null) { tableCfs.forEach( @@ -291,15 +277,15 @@ public class ReplicationAdmin implements Closeable { /** * Set the replicable table-cf config of the specified peer - * @param id a short name that identifies the cluster - * @param tableCfs the table and column-family list which will be replicated for this peer. - * A map from tableName to column family names. An empty collection can be passed - * to indicate replicating all column families. Pass null for replicating all table and column - * families + * @param id a short name that identifies the cluster + * @param tableCfs the table and column-family list which will be replicated for this peer. A map + * from tableName to column family names. An empty collection can be passed to + * indicate replicating all column families. Pass null for replicating all table + * and column families */ @Deprecated public void setPeerTableCFs(String id, Map> tableCfs) - throws IOException { + throws IOException { ReplicationPeerConfig peerConfig = getPeerConfig(id); peerConfig.setTableCFsMap(tableCfs); updatePeerConfig(id, peerConfig); @@ -307,8 +293,8 @@ public class ReplicationAdmin implements Closeable { /** * Get the state of the specified peer cluster - * @param id String format of the Short name that identifies the peer, - * an IllegalArgumentException is thrown if it doesn't exist + * @param id String format of the Short name that identifies the peer, an IllegalArgumentException + * is thrown if it doesn't exist * @return true if replication is enabled to that peer, false if it isn't */ @Deprecated @@ -330,32 +316,26 @@ public class ReplicationAdmin implements Closeable { /** * Find all column families that are replicated from this cluster - * @return the full list of the replicated column families of this cluster as: - * tableName, family name, replicationType - * - * Currently replicationType is Global. In the future, more replication - * types may be extended here. For example - * 1) the replication may only apply to selected peers instead of all peers - * 2) the replicationType may indicate the host Cluster servers as Slave - * for the table:columnFam. + * @return the full list of the replicated column families of this cluster as: tableName, family + * name, replicationType Currently replicationType is Global. In the future, more + * replication types may be extended here. For example 1) the replication may only apply + * to selected peers instead of all peers 2) the replicationType may indicate the host + * Cluster servers as Slave for the table:columnFam. * @deprecated use {@link org.apache.hadoop.hbase.client.Admin#listReplicatedTableCFs()} instead */ @Deprecated public List> listReplicated() throws IOException { List> replicationColFams = new ArrayList<>(); - admin.listReplicatedTableCFs().forEach( - (tableCFs) -> { - String table = tableCFs.getTable().getNameAsString(); - tableCFs.getColumnFamilyMap() - .forEach( - (cf, scope) -> { - HashMap replicationEntry = new HashMap<>(); - replicationEntry.put(TNAME, table); - replicationEntry.put(CFNAME, cf); - replicationEntry.put(REPLICATIONTYPE, REPLICATIONGLOBAL); - replicationColFams.add(replicationEntry); - }); + admin.listReplicatedTableCFs().forEach((tableCFs) -> { + String table = tableCFs.getTable().getNameAsString(); + tableCFs.getColumnFamilyMap().forEach((cf, scope) -> { + HashMap replicationEntry = new HashMap<>(); + replicationEntry.put(TNAME, table); + replicationEntry.put(CFNAME, cf); + replicationEntry.put(REPLICATIONTYPE, REPLICATIONGLOBAL); + replicationColFams.add(replicationEntry); }); + }); return replicationColFams; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java index 0c015e06934..7942d9904de 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,11 +41,13 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Splitter; import org.apache.hbase.thirdparty.com.google.common.base.Strings; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; @@ -62,7 +63,8 @@ public final class ReplicationPeerConfigUtil { public static final String HBASE_REPLICATION_PEER_BASE_CONFIG = "hbase.replication.peer.base.config"; - private ReplicationPeerConfigUtil() {} + private ReplicationPeerConfigUtil() { + } public static String convertToString(Set namespaces) { if (namespaces == null) { @@ -72,13 +74,13 @@ public final class ReplicationPeerConfigUtil { } /** convert map to TableCFs Object */ - public static ReplicationProtos.TableCF[] convert( - Map> tableCfs) { + public static ReplicationProtos.TableCF[] + convert(Map> tableCfs) { if (tableCfs == null) { return null; } List tableCFList = new ArrayList<>(tableCfs.entrySet().size()); - ReplicationProtos.TableCF.Builder tableCFBuilder = ReplicationProtos.TableCF.newBuilder(); + ReplicationProtos.TableCF.Builder tableCFBuilder = ReplicationProtos.TableCF.newBuilder(); for (Map.Entry> entry : tableCfs.entrySet()) { tableCFBuilder.clear(); tableCFBuilder.setTableName(ProtobufUtil.toProtoTableName(entry.getKey())); @@ -101,10 +103,9 @@ public final class ReplicationPeerConfigUtil { } /** - * Convert string to TableCFs Object. - * This is only for read TableCFs information from TableCF node. - * Input String Format: ns1.table1:cf1,cf2;ns2.table2:cfA,cfB;ns3.table3. - * */ + * Convert string to TableCFs Object. This is only for read TableCFs information from TableCF + * node. Input String Format: ns1.table1:cf1,cf2;ns2.table2:cfA,cfB;ns3.table3. + */ public static ReplicationProtos.TableCF[] convert(String tableCFsConfig) { if (tableCFsConfig == null || tableCFsConfig.trim().length() == 0) { return null; @@ -121,7 +122,7 @@ public final class ReplicationPeerConfigUtil { continue; } // 2 split to "table" and "cf1,cf2" - // for each table: "table#cf1,cf2" or "table" + // for each table: "table#cf1,cf2" or "table" String[] pair = tab.split(":"); String tabName = pair[0].trim(); if (pair.length > 2 || tabName.length() == 0) { @@ -138,8 +139,7 @@ public final class ReplicationPeerConfigUtil { ns = dbs[0]; tName = dbs[1]; } - tableCFBuilder.setTableName( - ProtobufUtil.toProtoTableName(TableName.valueOf(ns, tName))); + tableCFBuilder.setTableName(ProtobufUtil.toProtoTableName(TableName.valueOf(ns, tName))); // 3 parse "cf1,cf2" part to List if (pair.length == 2) { @@ -157,18 +157,17 @@ public final class ReplicationPeerConfigUtil { } /** - * Convert TableCFs Object to String. - * Output String Format: ns1.table1:cf1,cf2;ns2.table2:cfA,cfB;table3 - * */ + * Convert TableCFs Object to String. Output String Format: + * ns1.table1:cf1,cf2;ns2.table2:cfA,cfB;table3 + */ public static String convert(ReplicationProtos.TableCF[] tableCFs) { StringBuilder sb = new StringBuilder(); for (int i = 0, n = tableCFs.length; i < n; i++) { ReplicationProtos.TableCF tableCF = tableCFs[i]; String namespace = tableCF.getTableName().getNamespace().toStringUtf8(); if (StringUtils.isNotEmpty(namespace)) { - sb.append(namespace).append("."). - append(tableCF.getTableName().getQualifier().toStringUtf8()) - .append(":"); + sb.append(namespace).append(".") + .append(tableCF.getTableName().getQualifier().toStringUtf8()).append(":"); } else { sb.append(tableCF.getTableName().toString()).append(":"); } @@ -184,10 +183,10 @@ public final class ReplicationPeerConfigUtil { } /** - * Get TableCF in TableCFs, if not exist, return null. - * */ + * Get TableCF in TableCFs, if not exist, return null. + */ public static ReplicationProtos.TableCF getTableCF(ReplicationProtos.TableCF[] tableCFs, - String table) { + String table) { for (int i = 0, n = tableCFs.length; i < n; i++) { ReplicationProtos.TableCF tableCF = tableCFs[i]; if (tableCF.getTableName().getQualifier().toStringUtf8().equals(table)) { @@ -198,10 +197,9 @@ public final class ReplicationPeerConfigUtil { } /** - * Parse bytes into TableCFs. - * It is used for backward compatibility. - * Old format bytes have no PB_MAGIC Header - * */ + * Parse bytes into TableCFs. It is used for backward compatibility. Old format bytes have no + * PB_MAGIC Header + */ public static ReplicationProtos.TableCF[] parseTableCFs(byte[] bytes) throws IOException { if (bytes == null) { return null; @@ -210,16 +208,16 @@ public final class ReplicationPeerConfigUtil { } /** - * Convert tableCFs string into Map. - * */ + * Convert tableCFs string into Map. + */ public static Map> parseTableCFsFromConfig(String tableCFsConfig) { ReplicationProtos.TableCF[] tableCFs = convert(tableCFsConfig); return convert2Map(tableCFs); } /** - * Convert tableCFs Object to Map. - * */ + * Convert tableCFs Object to Map. + */ public static Map> convert2Map(ReplicationProtos.TableCF[] tableCFs) { if (tableCFs == null || tableCFs.length == 0) { return null; @@ -247,11 +245,11 @@ public final class ReplicationPeerConfigUtil { * @throws DeserializationException deserialization exception */ public static ReplicationPeerConfig parsePeerFrom(final byte[] bytes) - throws DeserializationException { + throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(bytes)) { int pbLen = ProtobufUtil.lengthOfPBMagic(); ReplicationProtos.ReplicationPeer.Builder builder = - ReplicationProtos.ReplicationPeer.newBuilder(); + ReplicationProtos.ReplicationPeer.newBuilder(); ReplicationProtos.ReplicationPeer peer; try { ProtobufUtil.mergeFrom(builder, bytes, pbLen, bytes.length - pbLen); @@ -310,7 +308,7 @@ public final class ReplicationPeerConfigUtil { } Map> excludeTableCFsMap = convert2Map(peer.getExcludeTableCfsList() - .toArray(new ReplicationProtos.TableCF[peer.getExcludeTableCfsCount()])); + .toArray(new ReplicationProtos.TableCF[peer.getExcludeTableCfsCount()])); if (excludeTableCFsMap != null) { builder.setExcludeTableCFsMap(excludeTableCFsMap); } @@ -326,7 +324,7 @@ public final class ReplicationPeerConfigUtil { public static ReplicationProtos.ReplicationPeer convert(ReplicationPeerConfig peerConfig) { ReplicationProtos.ReplicationPeer.Builder builder = - ReplicationProtos.ReplicationPeer.newBuilder(); + ReplicationProtos.ReplicationPeer.newBuilder(); // we used to set cluster key as required so here we must always set it, until we can make sure // that no one uses the old proto file. builder.setClusterkey(peerConfig.getClusterKey() != null ? peerConfig.getClusterKey() : ""); @@ -336,16 +334,13 @@ public final class ReplicationPeerConfigUtil { for (Map.Entry entry : peerConfig.getPeerData().entrySet()) { builder.addData(HBaseProtos.BytesBytesPair.newBuilder() - .setFirst(UnsafeByteOperations.unsafeWrap(entry.getKey())) - .setSecond(UnsafeByteOperations.unsafeWrap(entry.getValue())) - .build()); + .setFirst(UnsafeByteOperations.unsafeWrap(entry.getKey())) + .setSecond(UnsafeByteOperations.unsafeWrap(entry.getValue())).build()); } for (Map.Entry entry : peerConfig.getConfiguration().entrySet()) { - builder.addConfiguration(HBaseProtos.NameStringPair.newBuilder() - .setName(entry.getKey()) - .setValue(entry.getValue()) - .build()); + builder.addConfiguration(HBaseProtos.NameStringPair.newBuilder().setName(entry.getKey()) + .setValue(entry.getValue()).build()); } ReplicationProtos.TableCF[] tableCFs = convert(peerConfig.getTableCFsMap()); @@ -392,30 +387,31 @@ public final class ReplicationPeerConfigUtil { return ProtobufUtil.prependPBMagic(bytes); } - public static ReplicationPeerDescription toReplicationPeerDescription( - ReplicationProtos.ReplicationPeerDescription desc) { - boolean enabled = ReplicationProtos.ReplicationState.State.ENABLED == desc.getState() - .getState(); + public static ReplicationPeerDescription + toReplicationPeerDescription(ReplicationProtos.ReplicationPeerDescription desc) { + boolean enabled = + ReplicationProtos.ReplicationState.State.ENABLED == desc.getState().getState(); ReplicationPeerConfig config = convert(desc.getConfig()); return new ReplicationPeerDescription(desc.getId(), enabled, config); } - public static ReplicationProtos.ReplicationPeerDescription toProtoReplicationPeerDescription( - ReplicationPeerDescription desc) { + public static ReplicationProtos.ReplicationPeerDescription + toProtoReplicationPeerDescription(ReplicationPeerDescription desc) { ReplicationProtos.ReplicationPeerDescription.Builder builder = - ReplicationProtos.ReplicationPeerDescription.newBuilder(); + ReplicationProtos.ReplicationPeerDescription.newBuilder(); builder.setId(desc.getPeerId()); - ReplicationProtos.ReplicationState.Builder stateBuilder = ReplicationProtos.ReplicationState - .newBuilder(); - stateBuilder.setState(desc.isEnabled() ? ReplicationProtos.ReplicationState.State.ENABLED - : ReplicationProtos.ReplicationState.State.DISABLED); + ReplicationProtos.ReplicationState.Builder stateBuilder = + ReplicationProtos.ReplicationState.newBuilder(); + stateBuilder.setState(desc.isEnabled() + ? ReplicationProtos.ReplicationState.State.ENABLED + : ReplicationProtos.ReplicationState.State.DISABLED); builder.setState(stateBuilder.build()); builder.setConfig(convert(desc.getPeerConfig())); return builder.build(); } public static ReplicationPeerConfig appendTableCFsToReplicationPeerConfig( - Map> tableCfs, ReplicationPeerConfig peerConfig) { + Map> tableCfs, ReplicationPeerConfig peerConfig) { ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder(peerConfig); Map> preTableCfs = peerConfig.getTableCFsMap(); if (preTableCfs == null) { @@ -427,21 +423,19 @@ public final class ReplicationPeerConfigUtil { } /** - * Helper method to add/removev base peer configs from Configuration to ReplicationPeerConfig - * - * This merges the user supplied peer configuration - * {@link org.apache.hadoop.hbase.replication.ReplicationPeerConfig} with peer configs - * provided as property hbase.replication.peer.base.configs in hbase configuration. - * Expected format for this hbase configuration is "k1=v1;k2=v2,v2_1;k3=""". - * If value is empty, it will remove the existing key-value from peer config. - * + * Helper method to add/removev base peer configs from Configuration to ReplicationPeerConfig This + * merges the user supplied peer configuration + * {@link org.apache.hadoop.hbase.replication.ReplicationPeerConfig} with peer configs provided as + * property hbase.replication.peer.base.configs in hbase configuration. Expected format for this + * hbase configuration is "k1=v1;k2=v2,v2_1;k3=""". If value is empty, it will remove the existing + * key-value from peer config. * @param conf Configuration * @return ReplicationPeerConfig containing updated configs. */ public static ReplicationPeerConfig updateReplicationBasePeerConfigs(Configuration conf, ReplicationPeerConfig receivedPeerConfig) { - ReplicationPeerConfigBuilder copiedPeerConfigBuilder = ReplicationPeerConfig. - newBuilder(receivedPeerConfig); + ReplicationPeerConfigBuilder copiedPeerConfigBuilder = + ReplicationPeerConfig.newBuilder(receivedPeerConfig); Map receivedPeerConfigMap = receivedPeerConfig.getConfiguration(); String basePeerConfigs = conf.get(HBASE_REPLICATION_PEER_BASE_CONFIG, ""); @@ -467,8 +461,8 @@ public final class ReplicationPeerConfigUtil { } public static ReplicationPeerConfig appendExcludeTableCFsToReplicationPeerConfig( - Map> excludeTableCfs, ReplicationPeerConfig peerConfig) - throws ReplicationException { + Map> excludeTableCfs, ReplicationPeerConfig peerConfig) + throws ReplicationException { if (excludeTableCfs == null) { throw new ReplicationException("exclude tableCfs is null"); } @@ -482,8 +476,8 @@ public final class ReplicationPeerConfigUtil { return builder.build(); } - private static Map> mergeTableCFs( - Map> preTableCfs, Map> tableCfs) { + private static Map> + mergeTableCFs(Map> preTableCfs, Map> tableCfs) { Map> newTableCfs = copyTableCFsMap(preTableCfs); for (Map.Entry> entry : tableCfs.entrySet()) { TableName table = entry.getKey(); @@ -509,7 +503,7 @@ public final class ReplicationPeerConfigUtil { } private static Map> - copyTableCFsMap(Map> preTableCfs) { + copyTableCFsMap(Map> preTableCfs) { Map> newTableCfs = new HashMap<>(); preTableCfs.forEach( (table, cfs) -> newTableCfs.put(table, cfs != null ? Lists.newArrayList(cfs) : null)); @@ -517,8 +511,8 @@ public final class ReplicationPeerConfigUtil { } public static ReplicationPeerConfig removeTableCFsFromReplicationPeerConfig( - Map> tableCfs, ReplicationPeerConfig peerConfig, - String id) throws ReplicationException { + Map> tableCfs, ReplicationPeerConfig peerConfig, String id) + throws ReplicationException { Map> preTableCfs = peerConfig.getTableCFsMap(); if (preTableCfs == null) { throw new ReplicationException("Table-Cfs for peer: " + id + " is null"); @@ -541,14 +535,14 @@ public final class ReplicationPeerConfigUtil { } } else if (cfs == null && (removeCfs != null && !removeCfs.isEmpty())) { throw new ReplicationException("Cannot remove cf of table: " + table - + " which doesn't specify cfs from table-cfs config in peer: " + id); + + " which doesn't specify cfs from table-cfs config in peer: " + id); } else if (cfs != null && (removeCfs == null || removeCfs.isEmpty())) { throw new ReplicationException("Cannot remove table: " + table - + " which has specified cfs from table-cfs config in peer: " + id); + + " which has specified cfs from table-cfs config in peer: " + id); } } else { throw new ReplicationException( - "No table: " + table + " in table-cfs config of peer: " + id); + "No table: " + table + " in table-cfs config of peer: " + id); } } ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder(peerConfig); @@ -557,8 +551,8 @@ public final class ReplicationPeerConfigUtil { } public static ReplicationPeerConfig removeExcludeTableCFsFromReplicationPeerConfig( - Map> excludeTableCfs, ReplicationPeerConfig peerConfig, String id) - throws ReplicationException { + Map> excludeTableCfs, ReplicationPeerConfig peerConfig, String id) + throws ReplicationException { if (excludeTableCfs == null) { throw new ReplicationException("exclude tableCfs is null"); } @@ -584,14 +578,14 @@ public final class ReplicationPeerConfigUtil { } } else if (cfs == null && (removeCfs != null && !removeCfs.isEmpty())) { throw new ReplicationException("Cannot remove cf of table: " + table - + " which doesn't specify cfs from exclude-table-cfs config in peer: " + id); + + " which doesn't specify cfs from exclude-table-cfs config in peer: " + id); } else if (cfs != null && (removeCfs == null || removeCfs.isEmpty())) { throw new ReplicationException("Cannot remove table: " + table - + " which has specified cfs from exclude-table-cfs config in peer: " + id); + + " which has specified cfs from exclude-table-cfs config in peer: " + id); } } else { throw new ReplicationException( - "No table: " + table + " in exclude-table-cfs config of peer: " + id); + "No table: " + table + " in exclude-table-cfs config of peer: " + id); } } ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder(peerConfig); @@ -607,7 +601,7 @@ public final class ReplicationPeerConfigUtil { * @throws IOException when create peer cluster configuration failed */ public static Configuration getPeerClusterConfiguration(Configuration conf, - ReplicationPeerDescription peer) throws IOException { + ReplicationPeerDescription peer) throws IOException { ReplicationPeerConfig peerConfig = peer.getPeerConfig(); Configuration otherConf; try { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFs.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFs.java index aea354beac0..b4b703991e1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFs.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFs.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,14 +18,13 @@ package org.apache.hadoop.hbase.client.replication; import java.util.Map; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; /** - * Used by {@link org.apache.hadoop.hbase.client.Admin#listReplicatedTableCFs()}. - * The cfs is a map of <ColumnFamily, ReplicationScope>. + * Used by {@link org.apache.hadoop.hbase.client.Admin#listReplicatedTableCFs()}. The cfs is a map + * of <ColumnFamily, ReplicationScope>. */ @InterfaceAudience.Public public class TableCFs { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/security/SecurityCapability.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/security/SecurityCapability.java index c410e2d6054..7244124fc00 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/security/SecurityCapability.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/security/SecurityCapability.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,11 +47,16 @@ public enum SecurityCapability { public static SecurityCapability valueOf(int value) { switch (value) { - case 0: return SIMPLE_AUTHENTICATION; - case 1: return SECURE_AUTHENTICATION; - case 2: return AUTHORIZATION; - case 3: return CELL_AUTHORIZATION; - case 4: return CELL_VISIBILITY; + case 0: + return SIMPLE_AUTHENTICATION; + case 1: + return SECURE_AUTHENTICATION; + case 2: + return AUTHORIZATION; + case 3: + return CELL_AUTHORIZATION; + case 4: + return CELL_VISIBILITY; default: throw new IllegalArgumentException("Unknown SecurityCapability value " + value); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/ConnectionSpanBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/ConnectionSpanBuilder.java index 27081a750fb..bdede7e3d74 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/ConnectionSpanBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/ConnectionSpanBuilder.java @@ -15,13 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.trace; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.DB_CONNECTION_STRING; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.DB_SYSTEM; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.DB_SYSTEM_VALUE; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.DB_USER; + import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.SpanBuilder; @@ -66,8 +66,7 @@ public class ConnectionSpanBuilder implements Supplier { @SuppressWarnings("unchecked") public Span build() { - final SpanBuilder builder = TraceUtil.getGlobalTracer() - .spanBuilder(name) + final SpanBuilder builder = TraceUtil.getGlobalTracer().spanBuilder(name) // TODO: what about clients embedded in Master/RegionServer/Gateways/&c? .setSpanKind(SpanKind.CLIENT); attributes.forEach((k, v) -> builder.setAttribute((AttributeKey) k, v)); @@ -77,48 +76,38 @@ public class ConnectionSpanBuilder implements Supplier { /** * @see #populateConnectionAttributes(Map, AsyncConnectionImpl) */ - static void populateConnectionAttributes( - final Map, Object> attributes, - final ClusterConnection conn - ) { + static void populateConnectionAttributes(final Map, Object> attributes, + final ClusterConnection conn) { attributes.put(DB_SYSTEM, DB_SYSTEM_VALUE); attributes.put(DB_CONNECTION_STRING, conn.getConnectionRegistry().getConnectionString()); - attributes.put(DB_USER, Optional.ofNullable(conn.getUser()) - .map(Object::toString) - .orElse(null)); + attributes.put(DB_USER, Optional.ofNullable(conn.getUser()).map(Object::toString).orElse(null)); } /** * Static utility method that performs the primary logic of this builder. It is visible to other * classes in this package so that other builders can use this functionality as a mix-in. * @param attributes the attributes map to be populated. - * @param conn the source of connection attribute values. + * @param conn the source of connection attribute values. */ - static void populateConnectionAttributes( - final Map, Object> attributes, - final AsyncConnectionImpl conn - ) { - final Supplier connStringSupplier = () -> conn.getConnectionRegistry() - .getConnectionString(); + static void populateConnectionAttributes(final Map, Object> attributes, + final AsyncConnectionImpl conn) { + final Supplier connStringSupplier = + () -> conn.getConnectionRegistry().getConnectionString(); populateConnectionAttributes(attributes, connStringSupplier, conn::getUser); } /** * Static utility method that performs the primary logic of this builder. It is visible to other * classes in this package so that other builders can use this functionality as a mix-in. - * @param attributes the attributes map to be populated. + * @param attributes the attributes map to be populated. * @param connectionStringSupplier the source of the {@code db.connection_string} attribute value. - * @param userSupplier the source of the {@code db.user} attribute value. + * @param userSupplier the source of the {@code db.user} attribute value. */ - static void populateConnectionAttributes( - final Map, Object> attributes, - final Supplier connectionStringSupplier, - final Supplier userSupplier - ) { + static void populateConnectionAttributes(final Map, Object> attributes, + final Supplier connectionStringSupplier, final Supplier userSupplier) { attributes.put(DB_SYSTEM, DB_SYSTEM_VALUE); attributes.put(DB_CONNECTION_STRING, connectionStringSupplier.get()); - attributes.put(DB_USER, Optional.ofNullable(userSupplier.get()) - .map(Object::toString) - .orElse(null)); + attributes.put(DB_USER, + Optional.ofNullable(userSupplier.get()).map(Object::toString).orElse(null)); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/IpcClientSpanBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/IpcClientSpanBuilder.java index 07edbcb2807..546ce428fc4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/IpcClientSpanBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/IpcClientSpanBuilder.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.trace; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.NET_PEER_NAME; @@ -23,6 +22,7 @@ import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.NET_PEER_POR import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RPC_METHOD; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RPC_SERVICE; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RPC_SYSTEM; + import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.SpanBuilder; @@ -34,12 +34,14 @@ import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RpcSystem; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; /** * Construct {@link Span} instances originating from the client side of an IPC. - * - * @see Semantic conventions for RPC spans + * @see Semantic + * conventions for RPC spans */ @InterfaceAudience.Private public class IpcClientSpanBuilder implements Supplier { @@ -68,8 +70,7 @@ public class IpcClientSpanBuilder implements Supplier { @SuppressWarnings("unchecked") public Span build() { - final SpanBuilder builder = TraceUtil.getGlobalTracer() - .spanBuilder(name) + final SpanBuilder builder = TraceUtil.getGlobalTracer().spanBuilder(name) // TODO: what about clients embedded in Master/RegionServer/Gateways/&c? .setSpanKind(SpanKind.CLIENT); attributes.forEach((k, v) -> builder.setAttribute((AttributeKey) k, v)); @@ -80,12 +81,10 @@ public class IpcClientSpanBuilder implements Supplier { * Static utility method that performs the primary logic of this builder. It is visible to other * classes in this package so that other builders can use this functionality as a mix-in. * @param attributes the attributes map to be populated. - * @param md the source of the RPC attribute values. + * @param md the source of the RPC attribute values. */ - static void populateMethodDescriptorAttributes( - final Map, Object> attributes, - final Descriptors.MethodDescriptor md - ) { + static void populateMethodDescriptorAttributes(final Map, Object> attributes, + final Descriptors.MethodDescriptor md) { final String packageAndService = getRpcPackageAndService(md.getService()); final String method = getRpcName(md); attributes.put(RPC_SYSTEM, RpcSystem.HBASE_RPC.name()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/TableOperationSpanBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/TableOperationSpanBuilder.java index 1e8f992c7a4..95fca449f01 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/TableOperationSpanBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/TableOperationSpanBuilder.java @@ -15,11 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.trace; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.CONTAINER_DB_OPERATIONS_KEY; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.DB_OPERATION; + import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.SpanBuilder; @@ -101,8 +101,7 @@ public class TableOperationSpanBuilder implements Supplier { // contained within the provided "batch" object. public TableOperationSpanBuilder setContainerOperations(final RowMutations mutations) { - final Operation[] ops = mutations.getMutations() - .stream() + final Operation[] ops = mutations.getMutations().stream() .flatMap(row -> Stream.concat(Stream.of(valueFrom(row)), unpackRowOperations(row).stream())) .toArray(Operation[]::new); return setContainerOperations(ops); @@ -111,13 +110,12 @@ public class TableOperationSpanBuilder implements Supplier { public TableOperationSpanBuilder setContainerOperations(final Row row) { final Operation[] ops = Stream.concat(Stream.of(valueFrom(row)), unpackRowOperations(row).stream()) - .toArray(Operation[]::new); + .toArray(Operation[]::new); return setContainerOperations(ops); } - public TableOperationSpanBuilder setContainerOperations( - final Collection operations - ) { + public TableOperationSpanBuilder + setContainerOperations(final Collection operations) { final Operation[] ops = operations.stream() .flatMap(row -> Stream.concat(Stream.of(valueFrom(row)), unpackRowOperations(row).stream())) .toArray(Operation[]::new); @@ -132,10 +130,8 @@ public class TableOperationSpanBuilder implements Supplier { } if (row instanceof RowMutations) { final RowMutations mutations = (RowMutations) row; - final List operations = mutations.getMutations() - .stream() - .map(TableOperationSpanBuilder::valueFrom) - .collect(Collectors.toList()); + final List operations = mutations.getMutations().stream() + .map(TableOperationSpanBuilder::valueFrom).collect(Collectors.toList()); ops.addAll(operations); } return ops; @@ -155,14 +151,9 @@ public class TableOperationSpanBuilder implements Supplier { return ops; } - public TableOperationSpanBuilder setContainerOperations( - final Operation... operations - ) { - final List ops = Arrays.stream(operations) - .map(op -> op == null ? unknown : op.name()) - .sorted() - .distinct() - .collect(Collectors.toList()); + public TableOperationSpanBuilder setContainerOperations(final Operation... operations) { + final List ops = Arrays.stream(operations).map(op -> op == null ? unknown : op.name()) + .sorted().distinct().collect(Collectors.toList()); attributes.put(CONTAINER_DB_OPERATIONS_KEY, ops); return this; } @@ -175,11 +166,9 @@ public class TableOperationSpanBuilder implements Supplier { @SuppressWarnings("unchecked") public Span build() { - final String name = attributes.getOrDefault(DB_OPERATION, unknown) - + " " - + (tableName != null ? tableName.getNameWithNamespaceInclAsString() : unknown); - final SpanBuilder builder = TraceUtil.getGlobalTracer() - .spanBuilder(name) + final String name = attributes.getOrDefault(DB_OPERATION, unknown) + " " + + (tableName != null ? tableName.getNameWithNamespaceInclAsString() : unknown); + final SpanBuilder builder = TraceUtil.getGlobalTracer().spanBuilder(name) // TODO: what about clients embedded in Master/RegionServer/Gateways/&c? .setSpanKind(SpanKind.CLIENT); attributes.forEach((k, v) -> builder.setAttribute((AttributeKey) k, v)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/TableSpanBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/TableSpanBuilder.java index 437c70b409f..f009dfa2b48 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/TableSpanBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/TableSpanBuilder.java @@ -15,11 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.trace; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.DB_NAME; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.TABLE_KEY; + import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.SpanBuilder; @@ -73,8 +73,7 @@ public class TableSpanBuilder implements Supplier { @SuppressWarnings("unchecked") public Span build() { - final SpanBuilder builder = TraceUtil.getGlobalTracer() - .spanBuilder(name) + final SpanBuilder builder = TraceUtil.getGlobalTracer().spanBuilder(name) // TODO: what about clients embedded in Master/RegionServer/Gateways/&c? .setSpanKind(spanKind); attributes.forEach((k, v) -> builder.setAttribute((AttributeKey) k, v)); @@ -85,12 +84,10 @@ public class TableSpanBuilder implements Supplier { * Static utility method that performs the primary logic of this builder. It is visible to other * classes in this package so that other builders can use this functionality as a mix-in. * @param attributes the attributes map to be populated. - * @param tableName the source of attribute values. + * @param tableName the source of attribute values. */ - static void populateTableNameAttributes( - final Map, Object> attributes, - final TableName tableName - ) { + static void populateTableNameAttributes(final Map, Object> attributes, + final TableName tableName) { attributes.put(DB_NAME, tableName.getNamespaceAsString()); attributes.put(TABLE_KEY, tableName.getNameAsString()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java index b7ae64e2651..5ed2aba2a19 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,39 +15,30 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; +import com.google.protobuf.Message; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import com.google.protobuf.Message; - /** - * Defines how value for specific column is interpreted and provides utility - * methods like compare, add, multiply etc for them. Takes column family, column - * qualifier and return the cell value. Its concrete implementation should - * handle null case gracefully. - * Refer to {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter} - * for an example. + * Defines how value for specific column is interpreted and provides utility methods like compare, + * add, multiply etc for them. Takes column family, column qualifier and return the cell value. Its + * concrete implementation should handle null case gracefully. Refer to + * {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter} for an example. *

    - * Takes two generic parameters and three Message parameters. - * The cell value type of the interpreter is <T>. - * During some computations like sum, average, the return type can be different - * than the cell value data type, for eg, sum of int cell values might overflow - * in case of a int result, we should use Long for its result. Therefore, this - * class mandates to use a different (promoted) data type for result of these - * computations <S>. All computations are performed on the promoted data type - * <S>. There is a conversion method - * {@link ColumnInterpreter#castToReturnType(Object)} which takes a <T> type and - * returns a <S> type. - * The AggregateIm>lementation uses PB messages to initialize the - * user's ColumnInterpreter implementation, and for sending the responses - * back to AggregationClient. + * Takes two generic parameters and three Message parameters. The cell value type of the interpreter + * is <T>. During some computations like sum, average, the return type can be different than + * the cell value data type, for eg, sum of int cell values might overflow in case of a int result, + * we should use Long for its result. Therefore, this class mandates to use a different (promoted) + * data type for result of these computations <S>. All computations are performed on the + * promoted data type <S>. There is a conversion method + * {@link ColumnInterpreter#castToReturnType(Object)} which takes a <T> type and returns a + * <S> type. The AggregateIm>lementation uses PB messages to initialize the user's + * ColumnInterpreter implementation, and for sending the responses back to AggregationClient. * @param T Cell value data type * @param S Promoted data type * @param P PB message that is used to transport initializer specific bytes @@ -57,31 +47,21 @@ import com.google.protobuf.Message; */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public abstract class ColumnInterpreter { +public abstract class ColumnInterpreter { /** - * - * @param colFamily - * @param colQualifier - * @param c - * @return value of type T - * @throws IOException + * nnn * @return value of type T n */ - public abstract T getValue(byte[] colFamily, byte[] colQualifier, Cell c) - throws IOException; + public abstract T getValue(byte[] colFamily, byte[] colQualifier, Cell c) throws IOException; /** - * @param l1 - * @param l2 - * @return sum or non null value among (if either of them is null); otherwise - * returns a null. + * nn * @return sum or non null value among (if either of them is null); otherwise returns a null. */ public abstract S add(S l1, S l2); /** - * returns the maximum value for this type T - * @return max + * returns the maximum value for this type T n */ public abstract T getMaxValue(); @@ -89,28 +69,22 @@ Q extends Message, R extends Message> { public abstract T getMinValue(); /** - * @param o1 - * @param o2 - * @return multiplication + * nnn */ public abstract S multiply(S o1, S o2); /** - * @param o - * @return increment + * nn */ public abstract S increment(S o); /** - * provides casting opportunity between the data types. - * @param o - * @return cast + * provides casting opportunity between the data types. nn */ public abstract S castToReturnType(T o); /** - * This takes care if either of arguments are null. returns 0 if they are - * equal or both are null; + * This takes care if either of arguments are null. returns 0 if they are equal or both are null; *

      *
    • > 0 if l1 > l2 or l1 is not null and l2 is null.
    • *
    • < 0 if l1 < l2 or l1 is null and l2 is not null.
    • @@ -119,65 +93,54 @@ Q extends Message, R extends Message> { public abstract int compare(final T l1, final T l2); /** - * used for computing average of <S> data values. Not providing the divide - * method that takes two <S> values as it is not needed as of now. - * @param o - * @param l - * @return Average + * used for computing average of <S> data values. Not providing the divide method that takes + * two <S> values as it is not needed as of now. nnn */ public abstract double divideForAvg(S o, Long l); /** - * This method should return any additional data that is needed on the - * server side to construct the ColumnInterpreter. The server - * will pass this to the {@link #initialize} - * method. If there is no ColumnInterpreter specific data (for e.g., - * {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter}) - * then null should be returned. + * This method should return any additional data that is needed on the server side to construct + * the ColumnInterpreter. The server will pass this to the {@link #initialize} method. If there is + * no ColumnInterpreter specific data (for e.g., + * {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter}) then null should be + * returned. * @return the PB message */ public abstract P getRequestData(); /** - * This method should initialize any field(s) of the ColumnInterpreter with - * a parsing of the passed message bytes (used on the server side). - * @param msg + * This method should initialize any field(s) of the ColumnInterpreter with a parsing of the + * passed message bytes (used on the server side). n */ public abstract void initialize(P msg); - + /** - * This method gets the PB message corresponding to the cell type - * @param t - * @return the PB message for the cell-type instance + * This method gets the PB message corresponding to the cell type n * @return the PB message for + * the cell-type instance */ public abstract Q getProtoForCellType(T t); /** - * This method gets the PB message corresponding to the cell type - * @param q - * @return the cell-type instance from the PB message + * This method gets the PB message corresponding to the cell type n * @return the cell-type + * instance from the PB message */ public abstract T getCellValueFromProto(Q q); /** - * This method gets the PB message corresponding to the promoted type - * @param s - * @return the PB message for the promoted-type instance + * This method gets the PB message corresponding to the promoted type n * @return the PB message + * for the promoted-type instance */ public abstract R getProtoForPromotedType(S s); /** - * This method gets the promoted type from the proto message - * @param r - * @return the promoted-type instance from the PB message + * This method gets the promoted type from the proto message n * @return the promoted-type + * instance from the PB message */ public abstract S getPromotedValueFromProto(R r); /** - * The response message comes as type S. This will convert/cast it to T. - * In some sense, performs the opposite of {@link #castToReturnType(Object)} - * @param response - * @return cast + * The response message comes as type S. This will convert/cast it to T. In some sense, performs + * the opposite of {@link #castToReturnType(Object)} nn */ public abstract T castToCellType(S response); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java index 7b1ac43c4bf..ff9ed066fd4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,12 +33,10 @@ public class CoprocessorException extends DoNotRetryIOException { } /** - * Constructor with a Class object and exception message. - * @param clazz - * @param s + * Constructor with a Class object and exception message. nn */ public CoprocessorException(Class clazz, String s) { - super( "Coprocessor [" + clazz.getName() + "]: " + s); + super("Coprocessor [" + clazz.getName() + "]: " + s); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java index 0e40e97eee1..71999ad269f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.exceptions; import java.io.EOFException; @@ -48,7 +46,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; @InterfaceStability.Evolving public final class ClientExceptionsUtil { - private ClientExceptionsUtil() {} + private ClientExceptionsUtil() { + } public static boolean isMetaClearingException(Throwable cur) { cur = findException(cur); @@ -57,25 +56,21 @@ public final class ClientExceptionsUtil { return true; } return !isSpecialException(cur) || (cur instanceof RegionMovedException) - || cur instanceof NotServingRegionException; + || cur instanceof NotServingRegionException; } public static boolean isSpecialException(Throwable cur) { return (cur instanceof RegionMovedException || cur instanceof RegionOpeningException - || cur instanceof RegionTooBusyException || cur instanceof RpcThrottlingException - || cur instanceof MultiActionResultTooLarge || cur instanceof RetryImmediatelyException - || cur instanceof CallQueueTooBigException || cur instanceof CallDroppedException - || cur instanceof NotServingRegionException || cur instanceof RequestTooBigException); + || cur instanceof RegionTooBusyException || cur instanceof RpcThrottlingException + || cur instanceof MultiActionResultTooLarge || cur instanceof RetryImmediatelyException + || cur instanceof CallQueueTooBigException || cur instanceof CallDroppedException + || cur instanceof NotServingRegionException || cur instanceof RequestTooBigException); } - /** - * Look for an exception we know in the remote exception: - * - hadoop.ipc wrapped exceptions - * - nested exceptions - * - * Looks for: RegionMovedException / RegionOpeningException / RegionTooBusyException / - * RpcThrottlingException + * Look for an exception we know in the remote exception: - hadoop.ipc wrapped exceptions - nested + * exceptions Looks for: RegionMovedException / RegionOpeningException / RegionTooBusyException / + * RpcThrottlingException * @return null if we didn't find the exception, the exception otherwise. */ public static Throwable findException(Object exception) { @@ -92,7 +87,7 @@ public final class ClientExceptionsUtil { cur = re.unwrapRemoteException(); // unwrapRemoteException can return the exception given as a parameter when it cannot - // unwrap it. In this case, there is no need to look further + // unwrap it. In this case, there is no need to look further // noinspection ObjectEquality if (cur == re) { return cur; @@ -150,8 +145,7 @@ public final class ClientExceptionsUtil { /** * Translates exception for preemptive fast fail checks. * @param t exception to check - * @return translated exception - * @throws IOException + * @return translated exception n */ public static Throwable translatePFFE(Throwable t) throws IOException { if (t instanceof NoSuchMethodError) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosedException.java index 54ad092b2d9..287f1eec0cb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,9 +36,8 @@ public class ConnectionClosedException extends HBaseIOException { /** * ConnectionClosedException with cause - * * @param message the message for this exception - * @param cause the cause for this exception + * @param cause the cause for this exception */ public ConnectionClosedException(String message, Throwable cause) { super(message, cause); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java index ab20cbc9abf..fa3c4a9aeb1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,16 +37,13 @@ package org.apache.hadoop.hbase.exceptions; */ import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** -* Thrown when the client believes that we are trying to communicate to has -* been repeatedly unresponsive for a while. -* -* On receiving such an exception. The ConnectionManager will skip all -* retries and fast fail the operation. -*/ + * Thrown when the client believes that we are trying to communicate to has been repeatedly + * unresponsive for a while. On receiving such an exception. The ConnectionManager will skip all + * retries and fast fail the operation. + */ @InterfaceAudience.Public public class ConnectionClosingException extends IOException { public ConnectionClosingException(String string) { @@ -55,9 +52,8 @@ public class ConnectionClosingException extends IOException { /** * ConnectionClosingException with cause - * * @param message the message for this exception - * @param cause the cause for this exception + * @param cause the cause for this exception */ public ConnectionClosingException(String message, Throwable cause) { super(message, cause); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java index cbd931ecb1e..ae15777a7f0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,15 +35,14 @@ public class FailedSanityCheckException extends org.apache.hadoop.hbase.DoNotRet } /** - * @param message + * n */ public FailedSanityCheckException(String message) { super(message); } /** - * @param message - * @param cause + * nn */ public FailedSanityCheckException(String message, Throwable cause) { super(message, cause); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterRegistryFetchException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterRegistryFetchException.java index ca80ed565a2..2b55190b636 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterRegistryFetchException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterRegistryFetchException.java @@ -35,6 +35,6 @@ public class MasterRegistryFetchException extends HBaseIOException { public MasterRegistryFetchException(Set masters, Throwable failure) { super(String.format("Exception making rpc to masters %s", PrettyPrinter.toString(masters)), - failure); + failure); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java index 1ed5b55410f..e53b1a7fc2a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MergeRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MergeRegionException.java index 5399f07cb56..3d2d0db083a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MergeRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MergeRegionException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,10 +17,8 @@ */ package org.apache.hadoop.hbase.exceptions; -import org.apache.yetus.audience.InterfaceAudience; - import org.apache.hadoop.hbase.client.DoNotRetryRegionException; - +import org.apache.yetus.audience.InterfaceAudience; /** * Thrown when something is wrong in trying to merge two regions. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/OutOfOrderScannerNextException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/OutOfOrderScannerNextException.java index 545a7f10702..58f80898981 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/OutOfOrderScannerNextException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/OutOfOrderScannerNextException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java index 0b4db8df66b..6d28f3288fd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java @@ -1,34 +1,29 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hbase.exceptions; import java.net.ConnectException; - import org.apache.hadoop.hbase.ServerName; import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when the client believes that we are trying to communicate to has - * been repeatedly unresponsive for a while. - * - * On receiving such an exception. The ConnectionManager will skip all + * Thrown when the client believes that we are trying to communicate to has been repeatedly + * unresponsive for a while. On receiving such an exception. The ConnectionManager will skip all * retries and fast fail the operation. * @deprecated since 2.3.0, and will be removed in 4.0.0. */ @@ -42,13 +37,13 @@ public class PreemptiveFastFailException extends ConnectException { private boolean guaranteedClientSideOnly; /** - * @param count num of consecutive failures - * @param timeOfFirstFailureMilliSec when first failure happened + * @param count num of consecutive failures + * @param timeOfFirstFailureMilliSec when first failure happened * @param timeOfLatestAttemptMilliSec when last attempt happened - * @param serverName server we failed to connect to + * @param serverName server we failed to connect to */ public PreemptiveFastFailException(long count, long timeOfFirstFailureMilliSec, - long timeOfLatestAttemptMilliSec, ServerName serverName) { + long timeOfLatestAttemptMilliSec, ServerName serverName) { super("Exception happened " + count + " times. to" + serverName); this.failureCount = count; this.timeOfFirstFailureMilliSec = timeOfFirstFailureMilliSec; @@ -56,16 +51,15 @@ public class PreemptiveFastFailException extends ConnectException { } /** - * @param count num of consecutive failures - * @param timeOfFirstFailureMilliSec when first failure happened + * @param count num of consecutive failures + * @param timeOfFirstFailureMilliSec when first failure happened * @param timeOfLatestAttemptMilliSec when last attempt happened - * @param serverName server we failed to connect to - * @param guaranteedClientSideOnly if true, guarantees that no mutations - * have been applied on the server + * @param serverName server we failed to connect to + * @param guaranteedClientSideOnly if true, guarantees that no mutations have been applied on + * the server */ public PreemptiveFastFailException(long count, long timeOfFirstFailureMilliSec, - long timeOfLatestAttemptMilliSec, ServerName serverName, - boolean guaranteedClientSideOnly) { + long timeOfLatestAttemptMilliSec, ServerName serverName, boolean guaranteedClientSideOnly) { super("Exception happened " + count + " times. to" + serverName); this.failureCount = count; this.timeOfFirstFailureMilliSec = timeOfFirstFailureMilliSec; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionMovedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionMovedException.java index e79c138e703..2dcdb13ab5f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionMovedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionMovedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,8 +26,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Subclass if the server knows the region is now on another server. - * This allows the client to call the new region server without calling the master. + * Subclass if the server knows the region is now on another server. This allows the client to call + * the new region server without calling the master. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -45,7 +45,6 @@ public class RegionMovedException extends NotServingRegionException { private static final String STARTCODE_FIELD = "startCode="; private static final String LOCATIONSEQNUM_FIELD = "locationSeqNum="; - public RegionMovedException(ServerName serverName, long locationSeqNum) { this.hostname = serverName.getHostname(); this.port = serverName.getPort(); @@ -61,7 +60,7 @@ public class RegionMovedException extends NotServingRegionException { return port; } - public ServerName getServerName(){ + public ServerName getServerName() { return ServerName.valueOf(hostname, port, startCode); } @@ -70,9 +69,8 @@ public class RegionMovedException extends NotServingRegionException { } /** - * For hadoop.ipc internal call. Do NOT use. - * We have to parse the hostname to recreate the exception. - * The input is the one generated by {@link #getMessage()} + * For hadoop.ipc internal call. Do NOT use. We have to parse the hostname to recreate the + * exception. The input is the one generated by {@link #getMessage()} */ public RegionMovedException(String s) { int posHostname = s.indexOf(HOST_FIELD) + HOST_FIELD.length(); @@ -88,11 +86,11 @@ public class RegionMovedException extends NotServingRegionException { // TODO: this whole thing is extremely brittle. tmpHostname = s.substring(posHostname, s.indexOf(' ', posHostname)); tmpPort = Integer.parseInt(s.substring(posPort, s.indexOf(' ', posPort))); - tmpStartCode = Long.parseLong(s.substring(posStartCode, s.indexOf('.', posStartCode))); + tmpStartCode = Long.parseLong(s.substring(posStartCode, s.indexOf('.', posStartCode))); tmpSeqNum = Long.parseLong(s.substring(posSeqNum, s.indexOf('.', posSeqNum))); } catch (Exception ignored) { - LOG.warn("Can't parse the hostname, port and startCode from this string: " + - s + ", continuing"); + LOG.warn( + "Can't parse the hostname, port and startCode from this string: " + s + ", continuing"); } hostname = tmpHostname; @@ -105,7 +103,7 @@ public class RegionMovedException extends NotServingRegionException { public String getMessage() { // TODO: deserialization above depends on this. That is bad, but also means this // should be modified carefully. - return "Region moved to: " + HOST_FIELD + hostname + " " + PORT_FIELD + port + " " + - STARTCODE_FIELD + startCode + ". As of " + LOCATIONSEQNUM_FIELD + locationSeqNum + "."; + return "Region moved to: " + HOST_FIELD + hostname + " " + PORT_FIELD + port + " " + + STARTCODE_FIELD + startCode + ". As of " + LOCATIONSEQNUM_FIELD + locationSeqNum + "."; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionOpeningException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionOpeningException.java index a854b996ae9..347c6b987a1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionOpeningException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionOpeningException.java @@ -1,5 +1,4 @@ /* -/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,8 +24,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Subclass if the server knows the region is now on another server. - * This allows the client to call the new region server without calling the master. + * Subclass if the server knows the region is now on another server. This allows the client to call + * the new region server without calling the master. */ @InterfaceAudience.Private @InterfaceStability.Evolving diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RequestTooBigException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RequestTooBigException.java index ae94823f0d4..56eec36c67e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RequestTooBigException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RequestTooBigException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.exceptions; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when the size of the rpc request received by the server is too large. - * - * On receiving such an exception, the client does not retry the offending rpc. + * Thrown when the size of the rpc request received by the server is too large. On receiving such an + * exception, the client does not retry the offending rpc. * @since 1.3.0 */ @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ScannerResetException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ScannerResetException.java index 548772857c6..9ee7750c573 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ScannerResetException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ScannerResetException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.exceptions; import org.apache.hadoop.hbase.DoNotRetryIOException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/UnknownProtocolException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/UnknownProtocolException.java index dbcfa7efa18..2e19d5bc004 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/UnknownProtocolException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/UnknownProtocolException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.exceptions; import org.apache.yetus.audience.InterfaceAudience; @@ -35,7 +33,7 @@ public class UnknownProtocolException extends org.apache.hadoop.hbase.DoNotRetry } public UnknownProtocolException(Class protocol) { - this(protocol, "Server is not handling protocol "+protocol.getName()); + this(protocol, "Server is not handling protocol " + protocol.getName()); } public UnknownProtocolException(Class protocol, String mesg) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BigDecimalComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BigDecimalComparator.java index a33da473af1..bfd285975ff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BigDecimalComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BigDecimalComparator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,22 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.util.Objects; - import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; - import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; + /** * A BigDecimal comparator which numerical compares against the specified byte array */ @@ -80,7 +78,7 @@ public class BigDecimalComparator extends ByteArrayComparable { @Override public byte[] toByteArray() { ComparatorProtos.BigDecimalComparator.Builder builder = - ComparatorProtos.BigDecimalComparator.newBuilder(); + ComparatorProtos.BigDecimalComparator.newBuilder(); builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value)); return builder.build().toByteArray(); } @@ -92,15 +90,15 @@ public class BigDecimalComparator extends ByteArrayComparable { * @see #toByteArray */ public static BigDecimalComparator parseFrom(final byte[] pbBytes) - throws DeserializationException { + throws DeserializationException { ComparatorProtos.BigDecimalComparator proto; try { proto = ComparatorProtos.BigDecimalComparator.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); } - return new BigDecimalComparator(Bytes.toBigDecimal(proto.getComparable().getValue() - .toByteArray())); + return new BigDecimalComparator( + Bytes.toBigDecimal(proto.getComparable().getValue().toByteArray())); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java index 10be12a7896..0c8274a8611 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,23 +15,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; + /** - * A binary comparator which lexicographically compares against the specified - * byte array using {@link org.apache.hadoop.hbase.util.Bytes#compareTo(byte[], byte[])}. + * A binary comparator which lexicographically compares against the specified byte array using + * {@link org.apache.hadoop.hbase.util.Bytes#compareTo(byte[], byte[])}. * @since 2.0.0 */ @InterfaceAudience.Public @@ -47,7 +45,7 @@ public class BinaryComparator extends org.apache.hadoop.hbase.filter.ByteArrayCo } @Override - public int compareTo(byte [] value, int offset, int length) { + public int compareTo(byte[] value, int offset, int length) { return Bytes.compareTo(this.value, 0, this.value.length, value, offset, length); } @@ -60,7 +58,7 @@ public class BinaryComparator extends org.apache.hadoop.hbase.filter.ByteArrayCo * @return The comparator serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { ComparatorProtos.BinaryComparator.Builder builder = ComparatorProtos.BinaryComparator.newBuilder(); builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value)); @@ -69,12 +67,10 @@ public class BinaryComparator extends org.apache.hadoop.hbase.filter.ByteArrayCo /** * @param pbBytes A pb serialized {@link BinaryComparator} instance - * @return An instance of {@link BinaryComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link BinaryComparator} made from bytes n * @see + * #toByteArray */ - public static BinaryComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static BinaryComparator parseFrom(final byte[] pbBytes) throws DeserializationException { ComparatorProtos.BinaryComparator proto; try { proto = ComparatorProtos.BinaryComparator.parseFrom(pbBytes); @@ -85,9 +81,8 @@ public class BinaryComparator extends org.apache.hadoop.hbase.filter.ByteArrayCo } /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the comparator that are serialized are equal to + * the corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComponentComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComponentComparator.java index f78f79e419f..c479d757d1c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComponentComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComponentComparator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import com.google.protobuf.ByteString; @@ -27,18 +25,16 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** - * A comparator which compares against a specified byte array, but only - * compares specific portion of the byte array. For the rest it is similar to - * {@link BinaryComparator}. + * A comparator which compares against a specified byte array, but only compares specific portion of + * the byte array. For the rest it is similar to {@link BinaryComparator}. */ @InterfaceAudience.Public @SuppressWarnings("ComparableType") public class BinaryComponentComparator extends ByteArrayComparable { - private int offset; //offset of component from beginning. + private int offset; // offset of component from beginning. /** * Constructor - * * @param value value of the component * @param offset offset of the component from begining */ @@ -55,20 +51,19 @@ public class BinaryComponentComparator extends ByteArrayComparable { @Override public int compareTo(byte[] value, int offset, int length) { return Bytes.compareTo(this.value, 0, this.value.length, value, offset + this.offset, - this.value.length); + this.value.length); } @Override public boolean equals(Object other) { - if (other == this){ + if (other == this) { return true; } - if (!(other instanceof BinaryComponentComparator)){ + if (!(other instanceof BinaryComponentComparator)) { return false; } - BinaryComponentComparator bcc = (BinaryComponentComparator)other; - return offset == bcc.offset && - (compareTo(bcc.value) == 0); + BinaryComponentComparator bcc = (BinaryComponentComparator) other; + return offset == bcc.offset && (compareTo(bcc.value) == 0); } @Override @@ -84,7 +79,7 @@ public class BinaryComponentComparator extends ByteArrayComparable { @Override public byte[] toByteArray() { ComparatorProtos.BinaryComponentComparator.Builder builder = - ComparatorProtos.BinaryComponentComparator.newBuilder(); + ComparatorProtos.BinaryComponentComparator.newBuilder(); builder.setValue(ByteString.copyFrom(this.value)); builder.setOffset(this.offset); return builder.build().toByteArray(); @@ -97,7 +92,7 @@ public class BinaryComponentComparator extends ByteArrayComparable { * @see #toByteArray */ public static BinaryComponentComparator parseFrom(final byte[] pbBytes) - throws DeserializationException { + throws DeserializationException { ComparatorProtos.BinaryComponentComparator proto; try { proto = ComparatorProtos.BinaryComponentComparator.parseFrom(pbBytes); @@ -109,15 +104,15 @@ public class BinaryComponentComparator extends ByteArrayComparable { /** * @param other paramemter to compare against - * @return true if and only if the fields of the comparator that are - * serialized are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the comparator that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { - if (other == this){ + if (other == this) { return true; } - if (!(other instanceof BinaryComponentComparator)){ + if (!(other instanceof BinaryComponentComparator)) { return false; } return super.areSerializedFieldsEqual(other); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java index 0c06b4957ae..f97fd070be6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,24 +15,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; + /** - * A comparator which compares against a specified byte array, but only compares - * up to the length of this byte array. For the rest it is similar to - * {@link BinaryComparator}. + * A comparator which compares against a specified byte array, but only compares up to the length of + * this byte array. For the rest it is similar to {@link BinaryComparator}. */ @InterfaceAudience.Public @SuppressWarnings("ComparableType") // Should this move to Comparator usage? @@ -48,9 +45,9 @@ public class BinaryPrefixComparator extends ByteArrayComparable { } @Override - public int compareTo(byte [] value, int offset, int length) { + public int compareTo(byte[] value, int offset, int length) { return Bytes.compareTo(this.value, 0, this.value.length, value, offset, - this.value.length <= length ? this.value.length : length); + this.value.length <= length ? this.value.length : length); } @Override @@ -65,7 +62,7 @@ public class BinaryPrefixComparator extends ByteArrayComparable { * @return The comparator serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { ComparatorProtos.BinaryPrefixComparator.Builder builder = ComparatorProtos.BinaryPrefixComparator.newBuilder(); builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value)); @@ -74,12 +71,11 @@ public class BinaryPrefixComparator extends ByteArrayComparable { /** * @param pbBytes A pb serialized {@link BinaryPrefixComparator} instance - * @return An instance of {@link BinaryPrefixComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link BinaryPrefixComparator} made from bytes n * @see + * #toByteArray */ - public static BinaryPrefixComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static BinaryPrefixComparator parseFrom(final byte[] pbBytes) + throws DeserializationException { ComparatorProtos.BinaryPrefixComparator proto; try { proto = ComparatorProtos.BinaryPrefixComparator.parseFrom(pbBytes); @@ -90,9 +86,8 @@ public class BinaryPrefixComparator extends ByteArrayComparable { } /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the comparator that are serialized are equal to + * the corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java index bb31e9d2a90..15ca8890aba 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,21 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; + /** - * A bit comparator which performs the specified bitwise operation on each of the bytes - * with the specified byte array. Then returns whether the result is non-zero. + * A bit comparator which performs the specified bitwise operation on each of the bytes with the + * specified byte array. Then returns whether the result is non-zero. */ @InterfaceAudience.Public @SuppressWarnings("ComparableType") // Should this move to Comparator usage? @@ -46,11 +44,12 @@ public class BitComparator extends ByteArrayComparable { /** xor */ XOR } + protected BitwiseOp bitOperator; /** * Constructor - * @param value value + * @param value value * @param bitOperator operator to use on the bit comparison */ public BitComparator(byte[] value, BitwiseOp bitOperator) { @@ -69,9 +68,8 @@ public class BitComparator extends ByteArrayComparable { * @return The comparator serialized using pb */ @Override - public byte [] toByteArray() { - ComparatorProtos.BitComparator.Builder builder = - ComparatorProtos.BitComparator.newBuilder(); + public byte[] toByteArray() { + ComparatorProtos.BitComparator.Builder builder = ComparatorProtos.BitComparator.newBuilder(); builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value)); ComparatorProtos.BitComparator.BitwiseOp bitwiseOpPb = ComparatorProtos.BitComparator.BitwiseOp.valueOf(bitOperator.name()); @@ -81,12 +79,9 @@ public class BitComparator extends ByteArrayComparable { /** * @param pbBytes A pb serialized {@link BitComparator} instance - * @return An instance of {@link BitComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link BitComparator} made from bytes n * @see #toByteArray */ - public static BitComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static BitComparator parseFrom(final byte[] pbBytes) throws DeserializationException { ComparatorProtos.BitComparator proto; try { proto = ComparatorProtos.BitComparator.parseFrom(pbBytes); @@ -94,20 +89,19 @@ public class BitComparator extends ByteArrayComparable { throw new DeserializationException(e); } BitwiseOp bitwiseOp = BitwiseOp.valueOf(proto.getBitwiseOp().name()); - return new BitComparator(proto.getComparable().getValue().toByteArray(),bitwiseOp); + return new BitComparator(proto.getComparable().getValue().toByteArray(), bitwiseOp); } /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the comparator that are serialized are equal to + * the corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { if (other == this) return true; if (!(other instanceof BitComparator)) return false; - BitComparator comparator = (BitComparator)other; + BitComparator comparator = (BitComparator) other; return super.areSerializedFieldsEqual(other) && this.getOperator().equals(comparator.getOperator()); } @@ -118,17 +112,17 @@ public class BitComparator extends ByteArrayComparable { return 1; } int b = 0; - //Iterating backwards is faster because we can quit after one non-zero byte. + // Iterating backwards is faster because we can quit after one non-zero byte. for (int i = length - 1; i >= 0 && b == 0; i--) { switch (bitOperator) { case AND: - b = (this.value[i] & value[i+offset]) & 0xff; + b = (this.value[i] & value[i + offset]) & 0xff; break; case OR: - b = (this.value[i] | value[i+offset]) & 0xff; + b = (this.value[i] | value[i + offset]) & 0xff; break; case XOR: - b = (this.value[i] ^ value[i+offset]) & 0xff; + b = (this.value[i] ^ value[i + offset]) & 0xff; break; } } @@ -141,7 +135,7 @@ public class BitComparator extends ByteArrayComparable { return 1; } int b = 0; - //Iterating backwards is faster because we can quit after one non-zero byte. + // Iterating backwards is faster because we can quit after one non-zero byte. for (int i = length - 1; i >= 0 && b == 0; i--) { switch (bitOperator) { case AND: @@ -158,4 +152,3 @@ public class BitComparator extends ByteArrayComparable { return b == 0 ? 1 : 0; } } - diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java index f880d5f2aac..f1e5644fd8a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,26 +15,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * Simple filter that returns first N columns on row only. - * This filter was written to test filters in Get and as soon as it gets - * its quota of columns, {@link #filterAllRemaining()} returns true. This - * makes this filter unsuitable as a Scan filter. + * Simple filter that returns first N columns on row only. This filter was written to test filters + * in Get and as soon as it gets its quota of columns, {@link #filterAllRemaining()} returns true. + * This makes this filter unsuitable as a Scan filter. */ @InterfaceAudience.Public public class ColumnCountGetFilter extends FilterBase { @@ -80,9 +77,9 @@ public class ColumnCountGetFilter extends FilterBase { this.count = 0; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 1, - "Expected 1 but got: %s", filterArguments.size()); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 1, "Expected 1 but got: %s", + filterArguments.size()); int limit = ParseFilter.convertByteArrayToInt(filterArguments.get(0)); return new ColumnCountGetFilter(limit); } @@ -91,7 +88,7 @@ public class ColumnCountGetFilter extends FilterBase { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { FilterProtos.ColumnCountGetFilter.Builder builder = FilterProtos.ColumnCountGetFilter.newBuilder(); builder.setLimit(this.limit); @@ -103,8 +100,8 @@ public class ColumnCountGetFilter extends FilterBase { * @return An instance of {@link ColumnCountGetFilter} made from bytes * @see #toByteArray */ - public static ColumnCountGetFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static ColumnCountGetFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { FilterProtos.ColumnCountGetFilter proto; try { proto = FilterProtos.ColumnCountGetFilter.parseFrom(pbBytes); @@ -116,15 +113,15 @@ public class ColumnCountGetFilter extends FilterBase { /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof ColumnCountGetFilter)) return false; - ColumnCountGetFilter other = (ColumnCountGetFilter)o; + ColumnCountGetFilter other = (ColumnCountGetFilter) o; return this.getLimit() == other.getLimit(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java index 5d5321b6909..f11005e43d2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,24 +20,24 @@ package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * A filter, based on the ColumnCountGetFilter, takes two arguments: limit and offset. - * This filter can be used for row-based indexing, where references to other tables are stored across many columns, - * in order to efficient lookups and paginated results for end users. Only most recent versions are considered - * for pagination. + * A filter, based on the ColumnCountGetFilter, takes two arguments: limit and offset. This filter + * can be used for row-based indexing, where references to other tables are stored across many + * columns, in order to efficient lookups and paginated results for end users. Only most recent + * versions are considered for pagination. */ @InterfaceAudience.Public public class ColumnPaginationFilter extends FilterBase { @@ -49,16 +48,13 @@ public class ColumnPaginationFilter extends FilterBase { private int count = 0; /** - * Initializes filter with an integer offset and limit. The offset is arrived at - * scanning sequentially and skipping entries. @limit number of columns are - * then retrieved. If multiple column families are involved, the columns may be spread - * across them. - * - * @param limit Max number of columns to return. + * Initializes filter with an integer offset and limit. The offset is arrived at scanning + * sequentially and skipping entries. @limit number of columns are then retrieved. If multiple + * column families are involved, the columns may be spread across them. + * @param limit Max number of columns to return. * @param offset The integer offset where to start pagination. */ - public ColumnPaginationFilter(final int limit, final int offset) - { + public ColumnPaginationFilter(final int limit, final int offset) { Preconditions.checkArgument(limit >= 0, "limit must be positive %s", limit); Preconditions.checkArgument(offset >= 0, "offset must be positive %s", offset); this.limit = limit; @@ -66,40 +62,38 @@ public class ColumnPaginationFilter extends FilterBase { } /** - * Initializes filter with a string/bookmark based offset and limit. The offset is arrived - * at, by seeking to it using scanner hints. If multiple column families are involved, - * pagination starts at the first column family which contains @columnOffset. Columns are - * then retrieved sequentially upto @limit number of columns which maybe spread across - * multiple column families, depending on how the scan is setup. - * - * @param limit Max number of columns to return. + * Initializes filter with a string/bookmark based offset and limit. The offset is arrived at, by + * seeking to it using scanner hints. If multiple column families are involved, pagination starts + * at the first column family which contains @columnOffset. Columns are then retrieved + * sequentially upto @limit number of columns which maybe spread across multiple column families, + * depending on how the scan is setup. + * @param limit Max number of columns to return. * @param columnOffset The string/bookmark offset on where to start pagination. */ public ColumnPaginationFilter(final int limit, final byte[] columnOffset) { Preconditions.checkArgument(limit >= 0, "limit must be positive %s", limit); - Preconditions.checkArgument(columnOffset != null, - "columnOffset must be non-null %s", - columnOffset); + Preconditions.checkArgument(columnOffset != null, "columnOffset must be non-null %s", + columnOffset); this.limit = limit; this.columnOffset = columnOffset; } /** - * @return limit + * n */ public int getLimit() { return limit; } /** - * @return offset + * n */ public int getOffset() { return offset; } /** - * @return columnOffset + * n */ public byte[] getColumnOffset() { return columnOffset; @@ -118,8 +112,7 @@ public class ColumnPaginationFilter extends FilterBase { } @Override - public ReturnCode filterCell(final Cell c) - { + public ReturnCode filterCell(final Cell c) { if (columnOffset != null) { if (count >= limit) { return ReturnCode.NEXT_ROW; @@ -140,8 +133,7 @@ public class ColumnPaginationFilter extends FilterBase { return ReturnCode.NEXT_ROW; } - ReturnCode code = count < offset ? ReturnCode.NEXT_COL : - ReturnCode.INCLUDE_AND_NEXT_COL; + ReturnCode code = count < offset ? ReturnCode.NEXT_COL : ReturnCode.INCLUDE_AND_NEXT_COL; count++; return code; } @@ -153,14 +145,13 @@ public class ColumnPaginationFilter extends FilterBase { } @Override - public void reset() - { + public void reset() { this.count = 0; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 2, - "Expected 2 but got: %s", filterArguments.size()); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 2, "Expected 2 but got: %s", + filterArguments.size()); int limit = ParseFilter.convertByteArrayToInt(filterArguments.get(0)); int offset = ParseFilter.convertByteArrayToInt(filterArguments.get(1)); return new ColumnPaginationFilter(limit, offset); @@ -170,7 +161,7 @@ public class ColumnPaginationFilter extends FilterBase { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { FilterProtos.ColumnPaginationFilter.Builder builder = FilterProtos.ColumnPaginationFilter.newBuilder(); builder.setLimit(this.limit); @@ -185,12 +176,11 @@ public class ColumnPaginationFilter extends FilterBase { /** * @param pbBytes A pb serialized {@link ColumnPaginationFilter} instance - * @return An instance of {@link ColumnPaginationFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link ColumnPaginationFilter} made from bytes n * @see + * #toByteArray */ - public static ColumnPaginationFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static ColumnPaginationFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { FilterProtos.ColumnPaginationFilter proto; try { proto = FilterProtos.ColumnPaginationFilter.parseFrom(pbBytes); @@ -198,26 +188,25 @@ public class ColumnPaginationFilter extends FilterBase { throw new DeserializationException(e); } if (proto.hasColumnOffset()) { - return new ColumnPaginationFilter(proto.getLimit(), - proto.getColumnOffset().toByteArray()); + return new ColumnPaginationFilter(proto.getLimit(), proto.getColumnOffset().toByteArray()); } - return new ColumnPaginationFilter(proto.getLimit(),proto.getOffset()); + return new ColumnPaginationFilter(proto.getLimit(), proto.getOffset()); } /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof ColumnPaginationFilter)) return false; - ColumnPaginationFilter other = (ColumnPaginationFilter)o; + ColumnPaginationFilter other = (ColumnPaginationFilter) o; if (this.columnOffset != null) { - return this.getLimit() == other.getLimit() && - Bytes.equals(this.getColumnOffset(), other.getColumnOffset()); + return this.getLimit() == other.getLimit() + && Bytes.equals(this.getColumnOffset(), other.getColumnOffset()); } return this.getLimit() == other.getLimit() && this.getOffset() == other.getOffset(); } @@ -225,11 +214,10 @@ public class ColumnPaginationFilter extends FilterBase { @Override public String toString() { if (this.columnOffset != null) { - return (this.getClass().getSimpleName() + "(" + this.limit + ", " + - Bytes.toStringBinary(this.columnOffset) + ")"); + return (this.getClass().getSimpleName() + "(" + this.limit + ", " + + Bytes.toStringBinary(this.columnOffset) + ")"); } - return String.format("%s (%d, %d)", this.getClass().getSimpleName(), - this.limit, this.offset); + return String.format("%s (%d, %d)", this.getClass().getSimpleName(), this.limit, this.offset); } @Override @@ -239,7 +227,8 @@ public class ColumnPaginationFilter extends FilterBase { @Override public int hashCode() { - return columnOffset == null ? Objects.hash(this.limit, this.offset) : - Objects.hash(this.limit, Bytes.hashCode(this.columnOffset)); + return columnOffset == null + ? Objects.hash(this.limit, this.offset) + : Objects.hash(this.limit, Bytes.hashCode(this.columnOffset)); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java index a016b8c29f4..86401564ed5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,35 +15,34 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; - import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * This filter is used for selecting only those keys with columns that matches - * a particular prefix. For example, if prefix is 'an', it will pass keys with - * columns like 'and', 'anti' but not keys with columns like 'ball', 'act'. + * This filter is used for selecting only those keys with columns that matches a particular prefix. + * For example, if prefix is 'an', it will pass keys with columns like 'and', 'anti' but not keys + * with columns like 'ball', 'act'. */ @InterfaceAudience.Public public class ColumnPrefixFilter extends FilterBase { - protected byte [] prefix = null; + protected byte[] prefix = null; - public ColumnPrefixFilter(final byte [] prefix) { + public ColumnPrefixFilter(final byte[] prefix) { this.prefix = prefix; } @@ -97,16 +95,16 @@ public class ColumnPrefixFilter extends FilterBase { private static int compareQualifierPart(Cell cell, int length, byte[] prefix) { if (cell instanceof ByteBufferExtendedCell) { return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) cell).getQualifierByteBuffer(), - ((ByteBufferExtendedCell) cell).getQualifierPosition(), length, prefix, 0, length); + ((ByteBufferExtendedCell) cell).getQualifierPosition(), length, prefix, 0, length); } return Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), length, prefix, 0, - length); + length); } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 1, - "Expected 1 but got: %s", filterArguments.size()); - byte [] columnPrefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 1, "Expected 1 but got: %s", + filterArguments.size()); + byte[] columnPrefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); return new ColumnPrefixFilter(columnPrefix); } @@ -114,9 +112,8 @@ public class ColumnPrefixFilter extends FilterBase { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.ColumnPrefixFilter.Builder builder = - FilterProtos.ColumnPrefixFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.ColumnPrefixFilter.Builder builder = FilterProtos.ColumnPrefixFilter.newBuilder(); if (this.prefix != null) builder.setPrefix(UnsafeByteOperations.unsafeWrap(this.prefix)); return builder.build().toByteArray(); } @@ -127,8 +124,7 @@ public class ColumnPrefixFilter extends FilterBase { * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray */ - public static ColumnPrefixFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static ColumnPrefixFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.ColumnPrefixFilter proto; try { proto = FilterProtos.ColumnPrefixFilter.parseFrom(pbBytes); @@ -140,15 +136,15 @@ public class ColumnPrefixFilter extends FilterBase { /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof ColumnPrefixFilter)) return false; + if (o == this) return true; + if (!(o instanceof ColumnPrefixFilter)) return false; - ColumnPrefixFilter other = (ColumnPrefixFilter)o; + ColumnPrefixFilter other = (ColumnPrefixFilter) o; return Bytes.equals(this.getPrefix(), other.getPrefix()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java index ea4d7e910bf..dca4609763c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import static org.apache.hadoop.hbase.util.Bytes.len; @@ -24,30 +22,25 @@ import static org.apache.hadoop.hbase.util.Bytes.len; import java.io.IOException; import java.util.ArrayList; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; -import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; /** - * This filter is used for selecting only those keys with columns that are - * between minColumn to maxColumn. For example, if minColumn is 'an', and - * maxColumn is 'be', it will pass keys with columns like 'ana', 'bad', but not - * keys with columns like 'bed', 'eye' - * - * If minColumn is null, there is no lower bound. If maxColumn is null, there is - * no upper bound. - * - * minColumnInclusive and maxColumnInclusive specify if the ranges are inclusive - * or not. + * This filter is used for selecting only those keys with columns that are between minColumn to + * maxColumn. For example, if minColumn is 'an', and maxColumn is 'be', it will pass keys with + * columns like 'ana', 'bad', but not keys with columns like 'bed', 'eye' If minColumn is null, + * there is no lower bound. If maxColumn is null, there is no upper bound. minColumnInclusive and + * maxColumnInclusive specify if the ranges are inclusive or not. */ @InterfaceAudience.Public public class ColumnRangeFilter extends FilterBase { @@ -57,17 +50,15 @@ public class ColumnRangeFilter extends FilterBase { protected boolean maxColumnInclusive = false; /** - * Create a filter to select those keys with columns that are between minColumn - * and maxColumn. - * @param minColumn minimum value for the column range. If if it's null, - * there is no lower bound. + * Create a filter to select those keys with columns that are between minColumn and maxColumn. + * @param minColumn minimum value for the column range. If if it's null, there is no + * lower bound. * @param minColumnInclusive if true, include minColumn in the range. - * @param maxColumn maximum value for the column range. If it's null, - * @param maxColumnInclusive if true, include maxColumn in the range. - * there is no upper bound. + * @param maxColumn maximum value for the column range. If it's null, + * @param maxColumnInclusive if true, include maxColumn in the range. there is no upper bound. */ public ColumnRangeFilter(final byte[] minColumn, boolean minColumnInclusive, - final byte[] maxColumn, boolean maxColumnInclusive) { + final byte[] maxColumn, boolean maxColumnInclusive) { this.minColumn = minColumn; this.minColumnInclusive = minColumnInclusive; this.maxColumn = maxColumn; @@ -157,54 +148,49 @@ public class ColumnRangeFilter extends FilterBase { return ReturnCode.NEXT_ROW; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 4, - "Expected 4 but got: %s", filterArguments.size()); - byte [] minColumn = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 4, "Expected 4 but got: %s", + filterArguments.size()); + byte[] minColumn = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); boolean minColumnInclusive = ParseFilter.convertByteArrayToBoolean(filterArguments.get(1)); - byte [] maxColumn = ParseFilter.removeQuotesFromByteArray(filterArguments.get(2)); + byte[] maxColumn = ParseFilter.removeQuotesFromByteArray(filterArguments.get(2)); boolean maxColumnInclusive = ParseFilter.convertByteArrayToBoolean(filterArguments.get(3)); - if (minColumn.length == 0) - minColumn = null; - if (maxColumn.length == 0) - maxColumn = null; - return new ColumnRangeFilter(minColumn, minColumnInclusive, - maxColumn, maxColumnInclusive); + if (minColumn.length == 0) minColumn = null; + if (maxColumn.length == 0) maxColumn = null; + return new ColumnRangeFilter(minColumn, minColumnInclusive, maxColumn, maxColumnInclusive); } /** * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.ColumnRangeFilter.Builder builder = - FilterProtos.ColumnRangeFilter.newBuilder(); - if (this.minColumn != null) builder.setMinColumn( - UnsafeByteOperations.unsafeWrap(this.minColumn)); + public byte[] toByteArray() { + FilterProtos.ColumnRangeFilter.Builder builder = FilterProtos.ColumnRangeFilter.newBuilder(); + if (this.minColumn != null) + builder.setMinColumn(UnsafeByteOperations.unsafeWrap(this.minColumn)); builder.setMinColumnInclusive(this.minColumnInclusive); - if (this.maxColumn != null) builder.setMaxColumn( - UnsafeByteOperations.unsafeWrap(this.maxColumn)); + if (this.maxColumn != null) + builder.setMaxColumn(UnsafeByteOperations.unsafeWrap(this.maxColumn)); builder.setMaxColumnInclusive(this.maxColumnInclusive); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link ColumnRangeFilter} instance - * @return An instance of {@link ColumnRangeFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link ColumnRangeFilter} made from bytes n * @see + * #toByteArray */ - public static ColumnRangeFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static ColumnRangeFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.ColumnRangeFilter proto; try { proto = FilterProtos.ColumnRangeFilter.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); } - return new ColumnRangeFilter(proto.hasMinColumn()?proto.getMinColumn().toByteArray():null, - proto.getMinColumnInclusive(),proto.hasMaxColumn()?proto.getMaxColumn().toByteArray():null, + return new ColumnRangeFilter(proto.hasMinColumn() ? proto.getMinColumn().toByteArray() : null, + proto.getMinColumnInclusive(), + proto.hasMaxColumn() ? proto.getMaxColumn().toByteArray() : null, proto.getMaxColumnInclusive()); } @@ -223,9 +209,9 @@ public class ColumnRangeFilter extends FilterBase { } ColumnRangeFilter other = (ColumnRangeFilter) o; return Bytes.equals(this.getMinColumn(), other.getMinColumn()) - && this.getMinColumnInclusive() == other.getMinColumnInclusive() - && Bytes.equals(this.getMaxColumn(), other.getMaxColumn()) - && this.getMaxColumnInclusive() == other.getMaxColumnInclusive(); + && this.getMinColumnInclusive() == other.getMinColumnInclusive() + && Bytes.equals(this.getMaxColumn(), other.getMaxColumn()) + && this.getMaxColumnInclusive() == other.getMaxColumnInclusive(); } @Override @@ -235,10 +221,9 @@ public class ColumnRangeFilter extends FilterBase { @Override public String toString() { - return this.getClass().getSimpleName() + " " - + (this.minColumnInclusive ? "[" : "(") + Bytes.toStringBinary(this.minColumn) - + ", " + Bytes.toStringBinary(this.maxColumn) - + (this.maxColumnInclusive ? "]" : ")"); + return this.getClass().getSimpleName() + " " + (this.minColumnInclusive ? "[" : "(") + + Bytes.toStringBinary(this.minColumn) + ", " + Bytes.toStringBinary(this.maxColumn) + + (this.maxColumnInclusive ? "]" : ")"); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java index 6516b9692e1..0074fe40a3a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; @@ -40,12 +37,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** - * Different from {@link SingleColumnValueFilter} which returns an entire row - * when specified condition is matched, {@link ColumnValueFilter} return the matched cell only. + * Different from {@link SingleColumnValueFilter} which returns an entire row when specified + * condition is matched, {@link ColumnValueFilter} return the matched cell only. *

      - * This filter is used to filter cells based on column and value. - * It takes a {@link org.apache.hadoop.hbase.CompareOperator} operator (<, <=, =, !=, >, >=), and - * and a {@link ByteArrayComparable} comparator. + * This filter is used to filter cells based on column and value. It takes a + * {@link org.apache.hadoop.hbase.CompareOperator} operator (<, <=, =, !=, >, >=), and and a + * {@link ByteArrayComparable} comparator. */ @InterfaceAudience.Public public class ColumnValueFilter extends FilterBase { @@ -58,14 +55,13 @@ public class ColumnValueFilter extends FilterBase { // columns in the same row can be skipped faster by NEXT_ROW instead of NEXT_COL. private boolean columnFound = false; - public ColumnValueFilter(final byte[] family, final byte[] qualifier, - final CompareOperator op, final byte[] value) { + public ColumnValueFilter(final byte[] family, final byte[] qualifier, final CompareOperator op, + final byte[] value) { this(family, qualifier, op, new BinaryComparator(value)); } - public ColumnValueFilter(final byte[] family, final byte[] qualifier, - final CompareOperator op, - final ByteArrayComparable comparator) { + public ColumnValueFilter(final byte[] family, final byte[] qualifier, final CompareOperator op, + final ByteArrayComparable comparator) { this.family = Preconditions.checkNotNull(family, "family should not be null."); this.qualifier = qualifier == null ? new byte[0] : qualifier; this.op = Preconditions.checkNotNull(op, "CompareOperator should not be null"); @@ -73,7 +69,7 @@ public class ColumnValueFilter extends FilterBase { } /** - * @return operator + * n */ public CompareOperator getCompareOperator() { return op; @@ -120,15 +116,16 @@ public class ColumnValueFilter extends FilterBase { columnFound = true; // 2. Check value match: // True means filter out, just skip this cell, else include it. - return compareValue(getCompareOperator(), getComparator(), c) ? - ReturnCode.SKIP : ReturnCode.INCLUDE; + return compareValue(getCompareOperator(), getComparator(), c) + ? ReturnCode.SKIP + : ReturnCode.INCLUDE; } /** * This method is used to determine a cell should be included or filtered out. - * @param op one of operators {@link CompareOperator} + * @param op one of operators {@link CompareOperator} * @param comparator comparator used to compare cells. - * @param cell cell to be compared. + * @param cell cell to be compared. * @return true means cell should be filtered out, included otherwise. */ private boolean compareValue(final CompareOperator op, final ByteArrayComparable comparator, @@ -146,20 +143,18 @@ public class ColumnValueFilter extends FilterBase { * @return a ColumnValueFilter */ public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 4, - "Expect 4 arguments: %s", filterArguments.size()); + Preconditions.checkArgument(filterArguments.size() == 4, "Expect 4 arguments: %s", + filterArguments.size()); byte[] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); byte[] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); CompareOperator operator = ParseFilter.createCompareOperator(filterArguments.get(2)); ByteArrayComparable comparator = ParseFilter.createComparator(ParseFilter.removeQuotesFromByteArray(filterArguments.get(3))); - if (comparator instanceof RegexStringComparator || - comparator instanceof SubstringComparator) { - if (operator != CompareOperator.EQUAL && - operator != CompareOperator.NOT_EQUAL) { - throw new IllegalArgumentException("A regexstring comparator and substring comparator " + - "can only be used with EQUAL and NOT_EQUAL"); + if (comparator instanceof RegexStringComparator || comparator instanceof SubstringComparator) { + if (operator != CompareOperator.EQUAL && operator != CompareOperator.NOT_EQUAL) { + throw new IllegalArgumentException("A regexstring comparator and substring comparator " + + "can only be used with EQUAL and NOT_EQUAL"); } } @@ -170,8 +165,7 @@ public class ColumnValueFilter extends FilterBase { * @return A pb instance to represent this instance. */ FilterProtos.ColumnValueFilter convert() { - FilterProtos.ColumnValueFilter.Builder builder = - FilterProtos.ColumnValueFilter.newBuilder(); + FilterProtos.ColumnValueFilter.Builder builder = FilterProtos.ColumnValueFilter.newBuilder(); builder.setFamily(UnsafeByteOperations.unsafeWrap(this.family)); builder.setQualifier(UnsafeByteOperations.unsafeWrap(this.qualifier)); @@ -221,10 +215,10 @@ public class ColumnValueFilter extends FilterBase { } ColumnValueFilter other = (ColumnValueFilter) o; - return Bytes.equals(this.getFamily(), other.getFamily()) && - Bytes.equals(this.getQualifier(), other.getQualifier()) && - this.getCompareOperator().equals(other.getCompareOperator()) && - this.getComparator().areSerializedFieldsEqual(other.getComparator()); + return Bytes.equals(this.getFamily(), other.getFamily()) + && Bytes.equals(this.getQualifier(), other.getQualifier()) + && this.getCompareOperator().equals(other.getCompareOperator()) + && this.getComparator().areSerializedFieldsEqual(other.getComparator()); } @Override @@ -234,9 +228,8 @@ public class ColumnValueFilter extends FilterBase { @Override public String toString() { - return String.format("%s (%s, %s, %s, %s)", - getClass().getSimpleName(), Bytes.toStringBinary(this.family), - Bytes.toStringBinary(this.qualifier), this.op.name(), + return String.format("%s (%s, %s, %s, %s)", getClass().getSimpleName(), + Bytes.toStringBinary(this.family), Bytes.toStringBinary(this.qualifier), this.op.name(), Bytes.toStringBinary(this.comparator.getValue())); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java index 5f8346ef812..7fc8be99396 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -37,8 +34,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.CompareType; /** - * This is a generic filter to be used to filter by comparison. It takes an - * operator (equal, greater, not equal, etc) and a byte [] comparator. + * This is a generic filter to be used to filter by comparison. It takes an operator (equal, + * greater, not equal, etc) and a byte [] comparator. *

      * To filter by row key, use {@link RowFilter}. *

      @@ -48,18 +45,17 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.CompareType *

      * To filter by value, use {@link ValueFilter}. *

      - * These filters can be wrapped with {@link SkipFilter} and {@link WhileMatchFilter} - * to add more control. + * These filters can be wrapped with {@link SkipFilter} and {@link WhileMatchFilter} to add more + * control. *

      * Multiple filters can be combined using {@link FilterList}. */ @InterfaceAudience.Public public abstract class CompareFilter extends FilterBase { /** - * Comparison operators. For filters only! - * Use {@link CompareOperator} otherwise. - * It (intentionally) has at least the below enums with same names. - * @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link CompareOperator} instead. + * Comparison operators. For filters only! Use {@link CompareOperator} otherwise. It + * (intentionally) has at least the below enums with same names. + * @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link CompareOperator} instead. */ @Deprecated @InterfaceAudience.Public @@ -85,30 +81,28 @@ public abstract class CompareFilter extends FilterBase { /** * Constructor. - * @param compareOp the compare op for row matching + * @param compareOp the compare op for row matching * @param comparator the comparator for row matching * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use other constructor. */ @Deprecated - public CompareFilter(final CompareOp compareOp, - final ByteArrayComparable comparator) { + public CompareFilter(final CompareOp compareOp, final ByteArrayComparable comparator) { this(CompareOperator.valueOf(compareOp.name()), comparator); } /** * Constructor. - * @param op the compare op for row matching + * @param op the compare op for row matching * @param comparator the comparator for row matching */ - public CompareFilter(final CompareOperator op, - final ByteArrayComparable comparator) { + public CompareFilter(final CompareOperator op, final ByteArrayComparable comparator) { this.op = op; this.comparator = comparator; } /** - * @return operator - * @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link #getCompareOperator()} instead. + * n * @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link #getCompareOperator()} + * instead. */ @Deprecated public CompareOp getOperator() { @@ -133,12 +127,12 @@ public abstract class CompareFilter extends FilterBase { } /** - * @deprecated Since 2.0.0. Will be removed in 3.0.0. - * Use {@link #compareRow(CompareOperator, ByteArrayComparable, Cell)} + * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use + * {@link #compareRow(CompareOperator, ByteArrayComparable, Cell)} */ @Deprecated protected boolean compareRow(final CompareOp compareOp, final ByteArrayComparable comparator, - final Cell cell) { + final Cell cell) { if (compareOp == CompareOp.NO_OP) { return true; } @@ -147,7 +141,7 @@ public abstract class CompareFilter extends FilterBase { } protected boolean compareRow(final CompareOperator op, final ByteArrayComparable comparator, - final Cell cell) { + final Cell cell) { if (op == CompareOperator.NO_OP) { return true; } @@ -156,12 +150,12 @@ public abstract class CompareFilter extends FilterBase { } /** - * @deprecated Since 2.0.0. Will be removed in 3.0.0. - * Use {@link #compareFamily(CompareOperator, ByteArrayComparable, Cell)} + * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use + * {@link #compareFamily(CompareOperator, ByteArrayComparable, Cell)} */ @Deprecated protected boolean compareFamily(final CompareOp compareOp, final ByteArrayComparable comparator, - final Cell cell) { + final Cell cell) { if (compareOp == CompareOp.NO_OP) { return true; } @@ -170,7 +164,7 @@ public abstract class CompareFilter extends FilterBase { } protected boolean compareFamily(final CompareOperator op, final ByteArrayComparable comparator, - final Cell cell) { + final Cell cell) { if (op == CompareOperator.NO_OP) { return true; } @@ -179,12 +173,12 @@ public abstract class CompareFilter extends FilterBase { } /** - * @deprecated Since 2.0.0. Will be removed in 3.0.0. - * Use {@link #compareQualifier(CompareOperator, ByteArrayComparable, Cell)} + * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use + * {@link #compareQualifier(CompareOperator, ByteArrayComparable, Cell)} */ @Deprecated protected boolean compareQualifier(final CompareOp compareOp, - final ByteArrayComparable comparator, final Cell cell) { + final ByteArrayComparable comparator, final Cell cell) { // We do not call through to the non-deprecated method for perf reasons. if (compareOp == CompareOp.NO_OP) { return true; @@ -193,8 +187,8 @@ public abstract class CompareFilter extends FilterBase { return compare(compareOp, compareResult); } - protected boolean compareQualifier(final CompareOperator op, - final ByteArrayComparable comparator, final Cell cell) { + protected boolean compareQualifier(final CompareOperator op, final ByteArrayComparable comparator, + final Cell cell) { // We do not call through to the non-deprecated method for perf reasons. if (op == CompareOperator.NO_OP) { return true; @@ -204,12 +198,12 @@ public abstract class CompareFilter extends FilterBase { } /** - * @deprecated Since 2.0.0. Will be removed in 3.0.0. - * Use {@link #compareValue(CompareOperator, ByteArrayComparable, Cell)} + * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use + * {@link #compareValue(CompareOperator, ByteArrayComparable, Cell)} */ @Deprecated protected boolean compareValue(final CompareOp compareOp, final ByteArrayComparable comparator, - final Cell cell) { + final Cell cell) { // We do not call through to the non-deprecated method for perf reasons. if (compareOp == CompareOp.NO_OP) { return true; @@ -219,7 +213,7 @@ public abstract class CompareFilter extends FilterBase { } protected boolean compareValue(final CompareOperator op, final ByteArrayComparable comparator, - final Cell cell) { + final Cell cell) { if (op == CompareOperator.NO_OP) { return true; } @@ -229,20 +223,20 @@ public abstract class CompareFilter extends FilterBase { static boolean compare(final CompareOp op, int compareResult) { switch (op) { - case LESS: - return compareResult <= 0; - case LESS_OR_EQUAL: - return compareResult < 0; - case EQUAL: - return compareResult != 0; - case NOT_EQUAL: - return compareResult == 0; - case GREATER_OR_EQUAL: - return compareResult > 0; - case GREATER: - return compareResult >= 0; - default: - throw new RuntimeException("Unknown Compare op " + op.name()); + case LESS: + return compareResult <= 0; + case LESS_OR_EQUAL: + return compareResult < 0; + case EQUAL: + return compareResult != 0; + case NOT_EQUAL: + return compareResult == 0; + case GREATER_OR_EQUAL: + return compareResult > 0; + case GREATER: + return compareResult >= 0; + default: + throw new RuntimeException("Unknown Compare op " + op.name()); } } @@ -266,19 +260,17 @@ public abstract class CompareFilter extends FilterBase { } // returns an array of heterogeneous objects - public static ArrayList extractArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 2, - "Expected 2 but got: %s", filterArguments.size()); + public static ArrayList extractArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 2, "Expected 2 but got: %s", + filterArguments.size()); CompareOperator op = ParseFilter.createCompareOperator(filterArguments.get(0)); - ByteArrayComparable comparator = ParseFilter.createComparator( - ParseFilter.removeQuotesFromByteArray(filterArguments.get(1))); + ByteArrayComparable comparator = + ParseFilter.createComparator(ParseFilter.removeQuotesFromByteArray(filterArguments.get(1))); - if (comparator instanceof RegexStringComparator || - comparator instanceof SubstringComparator) { - if (op != CompareOperator.EQUAL && - op != CompareOperator.NOT_EQUAL) { - throw new IllegalArgumentException ("A regexstring comparator and substring comparator" + - " can only be used with EQUAL and NOT_EQUAL"); + if (comparator instanceof RegexStringComparator || comparator instanceof SubstringComparator) { + if (op != CompareOperator.EQUAL && op != CompareOperator.NOT_EQUAL) { + throw new IllegalArgumentException("A regexstring comparator and substring comparator" + + " can only be used with EQUAL and NOT_EQUAL"); } } ArrayList arguments = new ArrayList<>(2); @@ -291,8 +283,7 @@ public abstract class CompareFilter extends FilterBase { * @return A pb instance to represent this instance. */ FilterProtos.CompareFilter convert() { - FilterProtos.CompareFilter.Builder builder = - FilterProtos.CompareFilter.newBuilder(); + FilterProtos.CompareFilter.Builder builder = FilterProtos.CompareFilter.newBuilder(); HBaseProtos.CompareType compareOp = CompareType.valueOf(this.op.name()); builder.setCompareOp(compareOp); if (this.comparator != null) builder.setComparator(ProtobufUtil.toComparator(this.comparator)); @@ -300,27 +291,23 @@ public abstract class CompareFilter extends FilterBase { } /** - * - * @param o - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof CompareFilter)) return false; - CompareFilter other = (CompareFilter)o; - return this.getCompareOperator().equals(other.getCompareOperator()) && - (this.getComparator() == other.getComparator() + CompareFilter other = (CompareFilter) o; + return this.getCompareOperator().equals(other.getCompareOperator()) + && (this.getComparator() == other.getComparator() || this.getComparator().areSerializedFieldsEqual(other.getComparator())); } @Override public String toString() { - return String.format("%s (%s, %s)", - this.getClass().getSimpleName(), - this.op.name(), - Bytes.toStringBinary(this.comparator.getValue())); + return String.format("%s (%s, %s)", this.getClass().getSimpleName(), this.op.name(), + Bytes.toStringBinary(this.comparator.getValue())); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java index 4d7f681f0a7..c9a688028ca 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,27 +23,24 @@ import java.util.HashSet; import java.util.List; import java.util.Objects; import java.util.Set; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; - import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * A filter for adding inter-column timestamp matching - * Only cells with a correspondingly timestamped entry in - * the target column will be retained - * Not compatible with Scan.setBatch as operations need - * full rows for correct filtering + * A filter for adding inter-column timestamp matching Only cells with a correspondingly timestamped + * entry in the target column will be retained Not compatible with Scan.setBatch as operations need + * full rows for correct filtering */ @InterfaceAudience.Public public class DependentColumnFilter extends CompareFilter { @@ -54,75 +50,65 @@ public class DependentColumnFilter extends CompareFilter { protected boolean dropDependentColumn; protected Set stampSet = new HashSet<>(); - + /** - * Build a dependent column filter with value checking - * dependent column varies will be compared using the supplied - * compareOp and comparator, for usage of which - * refer to {@link CompareFilter} - * - * @param family dependent column family - * @param qualifier dependent column qualifier + * Build a dependent column filter with value checking dependent column varies will be compared + * using the supplied compareOp and comparator, for usage of which refer to {@link CompareFilter} + * @param family dependent column family + * @param qualifier dependent column qualifier * @param dropDependentColumn whether the column should be discarded after - * @param valueCompareOp comparison op - * @param valueComparator comparator + * @param valueCompareOp comparison op + * @param valueComparator comparator * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use - * {@link #DependentColumnFilter(byte[], byte[], boolean, CompareOperator, ByteArrayComparable)} - * instead. + * {@link #DependentColumnFilter(byte[], byte[], boolean, CompareOperator, ByteArrayComparable)} + * instead. */ @Deprecated - public DependentColumnFilter(final byte [] family, final byte[] qualifier, - final boolean dropDependentColumn, final CompareOp valueCompareOp, - final ByteArrayComparable valueComparator) { + public DependentColumnFilter(final byte[] family, final byte[] qualifier, + final boolean dropDependentColumn, final CompareOp valueCompareOp, + final ByteArrayComparable valueComparator) { this(family, qualifier, dropDependentColumn, CompareOperator.valueOf(valueCompareOp.name()), valueComparator); } /** - * Build a dependent column filter with value checking - * dependent column varies will be compared using the supplied - * compareOp and comparator, for usage of which - * refer to {@link CompareFilter} - * - * @param family dependent column family - * @param qualifier dependent column qualifier + * Build a dependent column filter with value checking dependent column varies will be compared + * using the supplied compareOp and comparator, for usage of which refer to {@link CompareFilter} + * @param family dependent column family + * @param qualifier dependent column qualifier * @param dropDependentColumn whether the column should be discarded after - * @param op Value comparison op - * @param valueComparator comparator + * @param op Value comparison op + * @param valueComparator comparator */ - public DependentColumnFilter(final byte [] family, final byte[] qualifier, - final boolean dropDependentColumn, final CompareOperator op, - final ByteArrayComparable valueComparator) { + public DependentColumnFilter(final byte[] family, final byte[] qualifier, + final boolean dropDependentColumn, final CompareOperator op, + final ByteArrayComparable valueComparator) { // set up the comparator super(op, valueComparator); this.columnFamily = family; this.columnQualifier = qualifier; this.dropDependentColumn = dropDependentColumn; } - + /** - * Constructor for DependentColumn filter. - * Cells where a Cell from target column - * with the same timestamp do not exist will be dropped. - * - * @param family name of target column family + * Constructor for DependentColumn filter. Cells where a Cell from target column with the same + * timestamp do not exist will be dropped. + * @param family name of target column family * @param qualifier name of column qualifier */ - public DependentColumnFilter(final byte [] family, final byte [] qualifier) { + public DependentColumnFilter(final byte[] family, final byte[] qualifier) { this(family, qualifier, false); } - + /** - * Constructor for DependentColumn filter. - * Cells where a Cell from target column - * with the same timestamp do not exist will be dropped. - * - * @param family name of dependent column family - * @param qualifier name of dependent qualifier + * Constructor for DependentColumn filter. Cells where a Cell from target column with the same + * timestamp do not exist will be dropped. + * @param family name of dependent column family + * @param qualifier name of dependent qualifier * @param dropDependentColumn whether the dependent columns Cells should be discarded */ - public DependentColumnFilter(final byte [] family, final byte [] qualifier, - final boolean dropDependentColumn) { + public DependentColumnFilter(final byte[] family, final byte[] qualifier, + final boolean dropDependentColumn) { this(family, qualifier, dropDependentColumn, CompareOp.NO_OP, null); } @@ -166,16 +152,15 @@ public class DependentColumnFilter extends CompareFilter { public ReturnCode filterCell(final Cell c) { // Check if the column and qualifier match if (!CellUtil.matchingColumn(c, this.columnFamily, this.columnQualifier)) { - // include non-matches for the time being, they'll be discarded afterwards - return ReturnCode.INCLUDE; + // include non-matches for the time being, they'll be discarded afterwards + return ReturnCode.INCLUDE; } // If it doesn't pass the op, skip it - if (comparator != null - && compareValue(getCompareOperator(), comparator, c)) + if (comparator != null && compareValue(getCompareOperator(), comparator, c)) return ReturnCode.SKIP; - + stampSet.add(c.getTimestamp()); - if(dropDependentColumn) { + if (dropDependentColumn) { return ReturnCode.SKIP; } return ReturnCode.INCLUDE; @@ -190,7 +175,7 @@ public class DependentColumnFilter extends CompareFilter { public boolean hasFilterRow() { return true; } - + @Override public boolean filterRow() { return false; @@ -200,36 +185,35 @@ public class DependentColumnFilter extends CompareFilter { public boolean filterRowKey(byte[] buffer, int offset, int length) { return false; } + @Override public void reset() { - stampSet.clear(); + stampSet.clear(); } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 2 || - filterArguments.size() == 3 || - filterArguments.size() == 5, - "Expected 2, 3 or 5 but got: %s", filterArguments.size()); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument( + filterArguments.size() == 2 || filterArguments.size() == 3 || filterArguments.size() == 5, + "Expected 2, 3 or 5 but got: %s", filterArguments.size()); if (filterArguments.size() == 2) { - byte [] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); - byte [] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); + byte[] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + byte[] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); return new DependentColumnFilter(family, qualifier); } else if (filterArguments.size() == 3) { - byte [] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); - byte [] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); + byte[] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + byte[] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); boolean dropDependentColumn = ParseFilter.convertByteArrayToBoolean(filterArguments.get(2)); return new DependentColumnFilter(family, qualifier, dropDependentColumn); } else if (filterArguments.size() == 5) { - byte [] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); - byte [] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); + byte[] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + byte[] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); boolean dropDependentColumn = ParseFilter.convertByteArrayToBoolean(filterArguments.get(2)); CompareOperator op = ParseFilter.createCompareOperator(filterArguments.get(3)); - ByteArrayComparable comparator = ParseFilter.createComparator( - ParseFilter.removeQuotesFromByteArray(filterArguments.get(4))); - return new DependentColumnFilter(family, qualifier, dropDependentColumn, - op, comparator); + ByteArrayComparable comparator = + ParseFilter.createComparator(ParseFilter.removeQuotesFromByteArray(filterArguments.get(4))); + return new DependentColumnFilter(family, qualifier, dropDependentColumn, op, comparator); } else { throw new IllegalArgumentException("Expected 2, 3 or 5 but got: " + filterArguments.size()); } @@ -239,7 +223,7 @@ public class DependentColumnFilter extends CompareFilter { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { FilterProtos.DependentColumnFilter.Builder builder = FilterProtos.DependentColumnFilter.newBuilder(); builder.setCompareFilter(super.convert()); @@ -255,12 +239,11 @@ public class DependentColumnFilter extends CompareFilter { /** * @param pbBytes A pb serialized {@link DependentColumnFilter} instance - * @return An instance of {@link DependentColumnFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link DependentColumnFilter} made from bytes n * @see + * #toByteArray */ - public static DependentColumnFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static DependentColumnFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { FilterProtos.DependentColumnFilter proto; try { proto = FilterProtos.DependentColumnFilter.parseFrom(pbBytes); @@ -268,7 +251,7 @@ public class DependentColumnFilter extends CompareFilter { throw new DeserializationException(e); } final CompareOperator valueCompareOp = - CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name()); + CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name()); ByteArrayComparable valueComparator = null; try { if (proto.getCompareFilter().hasComparator()) { @@ -278,24 +261,23 @@ public class DependentColumnFilter extends CompareFilter { throw new DeserializationException(ioe); } return new DependentColumnFilter( - proto.hasColumnFamily()?proto.getColumnFamily().toByteArray():null, - proto.hasColumnQualifier()?proto.getColumnQualifier().toByteArray():null, + proto.hasColumnFamily() ? proto.getColumnFamily().toByteArray() : null, + proto.hasColumnQualifier() ? proto.getColumnQualifier().toByteArray() : null, proto.getDropDependentColumn(), valueCompareOp, valueComparator); } /** - * @param o - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE") + value = "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE") @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof DependentColumnFilter)) return false; - DependentColumnFilter other = (DependentColumnFilter)o; + DependentColumnFilter other = (DependentColumnFilter) o; return other != null && super.areSerializedFieldsEqual(other) && Bytes.equals(this.getFamily(), other.getFamily()) && Bytes.equals(this.getQualifier(), other.getQualifier()) @@ -304,13 +286,10 @@ public class DependentColumnFilter extends CompareFilter { @Override public String toString() { - return String.format("%s (%s, %s, %s, %s, %s)", - this.getClass().getSimpleName(), - Bytes.toStringBinary(this.columnFamily), - Bytes.toStringBinary(this.columnQualifier), - this.dropDependentColumn, - this.op.name(), - this.comparator != null ? Bytes.toStringBinary(this.comparator.getValue()) : "null"); + return String.format("%s (%s, %s, %s, %s, %s)", this.getClass().getSimpleName(), + Bytes.toStringBinary(this.columnFamily), Bytes.toStringBinary(this.columnQualifier), + this.dropDependentColumn, this.op.name(), + this.comparator != null ? Bytes.toStringBinary(this.comparator.getValue()) : "null"); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java index fb544199c89..489b1363fd7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,59 +15,56 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; /** *

      - * This filter is used to filter based on the column family. It takes an - * operator (equal, greater, not equal, etc) and a byte [] comparator for the - * column family portion of a key. - *

      - * This filter can be wrapped with {@link org.apache.hadoop.hbase.filter.WhileMatchFilter} and {@link org.apache.hadoop.hbase.filter.SkipFilter} - * to add more control. - *

      + * This filter is used to filter based on the column family. It takes an operator (equal, greater, + * not equal, etc) and a byte [] comparator for the column family portion of a key. + *

      + *

      + * This filter can be wrapped with {@link org.apache.hadoop.hbase.filter.WhileMatchFilter} and + * {@link org.apache.hadoop.hbase.filter.SkipFilter} to add more control. + *

      + *

      * Multiple filters can be combined using {@link org.apache.hadoop.hbase.filter.FilterList}. *

      - * If an already known column family is looked for, use {@link org.apache.hadoop.hbase.client.Get#addFamily(byte[])} - * directly rather than a filter. + * If an already known column family is looked for, use + * {@link org.apache.hadoop.hbase.client.Get#addFamily(byte[])} directly rather than a filter. */ @InterfaceAudience.Public public class FamilyFilter extends CompareFilter { /** * Constructor. - * * @param familyCompareOp the compare op for column family matching * @param familyComparator the comparator for column family matching - * @deprecated Since 2.0.0. Will be removed in 3.0.0. - * Use {@link #FamilyFilter(CompareOperator, ByteArrayComparable)} + * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use + * {@link #FamilyFilter(CompareOperator, ByteArrayComparable)} */ @Deprecated - public FamilyFilter(final CompareOp familyCompareOp, - final ByteArrayComparable familyComparator) { - super(familyCompareOp, familyComparator); + public FamilyFilter(final CompareOp familyCompareOp, final ByteArrayComparable familyComparator) { + super(familyCompareOp, familyComparator); } /** * Constructor. - * - * @param op the compare op for column family matching + * @param op the compare op for column family matching * @param familyComparator the comparator for column family matching */ - public FamilyFilter(final CompareOperator op, - final ByteArrayComparable familyComparator) { + public FamilyFilter(final CompareOperator op, final ByteArrayComparable familyComparator) { super(op, familyComparator); } @@ -89,10 +85,10 @@ public class FamilyFilter extends CompareFilter { return ReturnCode.INCLUDE; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { + public static Filter createFilterFromArguments(ArrayList filterArguments) { ArrayList arguments = CompareFilter.extractArguments(filterArguments); - CompareOperator compareOp = (CompareOperator)arguments.get(0); - ByteArrayComparable comparator = (ByteArrayComparable)arguments.get(1); + CompareOperator compareOp = (CompareOperator) arguments.get(0); + ByteArrayComparable comparator = (ByteArrayComparable) arguments.get(1); return new FamilyFilter(compareOp, comparator); } @@ -100,21 +96,17 @@ public class FamilyFilter extends CompareFilter { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.FamilyFilter.Builder builder = - FilterProtos.FamilyFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.FamilyFilter.Builder builder = FilterProtos.FamilyFilter.newBuilder(); builder.setCompareFilter(super.convert()); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link FamilyFilter} instance - * @return An instance of {@link FamilyFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link FamilyFilter} made from bytes n * @see #toByteArray */ - public static FamilyFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static FamilyFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.FamilyFilter proto; try { proto = FilterProtos.FamilyFilter.parseFrom(pbBytes); @@ -131,21 +123,21 @@ public class FamilyFilter extends CompareFilter { } catch (IOException ioe) { throw new DeserializationException(ioe); } - return new FamilyFilter(valueCompareOp,valueComparator); + return new FamilyFilter(valueCompareOp, valueComparator); } /** - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof FamilyFilter)) return false; - FamilyFilter other = (FamilyFilter)o; + FamilyFilter other = (FamilyFilter) o; return super.areSerializedFieldsEqual(other); - } + } @Override public boolean equals(Object obj) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java index dec8e061b54..9df8a6f14f3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,81 +15,66 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.yetus.audience.InterfaceAudience; /** - * Interface for row and column filters directly applied within the regionserver. - * - * A filter can expect the following call sequence: + * Interface for row and column filters directly applied within the regionserver. A filter can + * expect the following call sequence: *
        - *
      • {@link #reset()} : reset the filter state before filtering a new row.
      • - *
      • {@link #filterAllRemaining()}: true means row scan is over; false means keep going.
      • - *
      • {@link #filterRowKey(Cell)}: true means drop this row; false means include.
      • - *
      • {@link #filterCell(Cell)}: decides whether to include or exclude this Cell. - * See {@link ReturnCode}.
      • - *
      • {@link #transformCell(Cell)}: if the Cell is included, let the filter transform the - * Cell.
      • - *
      • {@link #filterRowCells(List)}: allows direct modification of the final list to be submitted - *
      • {@link #filterRow()}: last chance to drop entire row based on the sequence of - * filter calls. Eg: filter a row if it doesn't contain a specified column.
      • + *
      • {@link #reset()} : reset the filter state before filtering a new row.
      • + *
      • {@link #filterAllRemaining()}: true means row scan is over; false means keep going.
      • + *
      • {@link #filterRowKey(Cell)}: true means drop this row; false means include.
      • + *
      • {@link #filterCell(Cell)}: decides whether to include or exclude this Cell. See + * {@link ReturnCode}.
      • + *
      • {@link #transformCell(Cell)}: if the Cell is included, let the filter transform the Cell. + *
      • + *
      • {@link #filterRowCells(List)}: allows direct modification of the final list to be submitted + *
      • {@link #filterRow()}: last chance to drop entire row based on the sequence of filter calls. + * Eg: filter a row if it doesn't contain a specified column.
      • *
      - * - * Filter instances are created one per region/scan. This abstract class replaces - * the old RowFilterInterface. - * - * When implementing your own filters, consider inheriting {@link FilterBase} to help - * you reduce boilerplate. - * + * Filter instances are created one per region/scan. This abstract class replaces the old + * RowFilterInterface. When implementing your own filters, consider inheriting {@link FilterBase} to + * help you reduce boilerplate. * @see FilterBase */ @InterfaceAudience.Public public abstract class Filter { protected transient boolean reversed; + /** - * Reset the state of the filter between rows. - * - * Concrete implementers can signal a failure condition in their code by throwing an - * {@link IOException}. - * + * Reset the state of the filter between rows. Concrete implementers can signal a failure + * condition in their code by throwing an {@link IOException}. * @throws IOException in case an I/O or an filter specific failure needs to be signaled. */ abstract public void reset() throws IOException; /** * Filters a row based on the row key. If this returns true, the entire row will be excluded. If - * false, each KeyValue in the row will be passed to {@link #filterCell(Cell)} below. - * - * Concrete implementers can signal a failure condition in their code by throwing an - * {@link IOException}. - * + * false, each KeyValue in the row will be passed to {@link #filterCell(Cell)} below. Concrete + * implementers can signal a failure condition in their code by throwing an {@link IOException}. * @param buffer buffer containing row key * @param offset offset into buffer where row key starts * @param length length of the row key * @return true, remove entire row, false, include the row (maybe). * @throws IOException in case an I/O or an filter specific failure needs to be signaled. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Instead use {@link #filterRowKey(Cell)} + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Instead use + * {@link #filterRowKey(Cell)} */ @Deprecated abstract public boolean filterRowKey(byte[] buffer, int offset, int length) throws IOException; /** * Filters a row based on the row key. If this returns true, the entire row will be excluded. If - * false, each KeyValue in the row will be passed to {@link #filterCell(Cell)} below. - * If {@link #filterAllRemaining()} returns true, then {@link #filterRowKey(Cell)} should - * also return true. - * - * Concrete implementers can signal a failure condition in their code by throwing an + * false, each KeyValue in the row will be passed to {@link #filterCell(Cell)} below. If + * {@link #filterAllRemaining()} returns true, then {@link #filterRowKey(Cell)} should also return + * true. Concrete implementers can signal a failure condition in their code by throwing an * {@link IOException}. - * * @param firstRowCell The first cell coming in the new row * @return true, remove entire row, false, include the row (maybe). * @throws IOException in case an I/O or an filter specific failure needs to be signaled. @@ -98,11 +82,8 @@ public abstract class Filter { abstract public boolean filterRowKey(Cell firstRowCell) throws IOException; /** - * If this returns true, the scan will terminate. - * - * Concrete implementers can signal a failure condition in their code by throwing an - * {@link IOException}. - * + * If this returns true, the scan will terminate. Concrete implementers can signal a failure + * condition in their code by throwing an {@link IOException}. * @return true to end scan, false to continue. * @throws IOException in case an I/O or an filter specific failure needs to be signaled. */ @@ -111,25 +92,18 @@ public abstract class Filter { /** * A way to filter based on the column family, column qualifier and/or the column value. Return * code is described below. This allows filters to filter only certain number of columns, then - * terminate without matching ever column. - * - * If filterRowKey returns true, filterKeyValue needs to be consistent with it. - * - * filterKeyValue can assume that filterRowKey has already been called for the row. - * - * If your filter returns ReturnCode.NEXT_ROW, it should return + * terminate without matching ever column. If filterRowKey returns true, filterKeyValue needs to + * be consistent with it. filterKeyValue can assume that filterRowKey has already been called for + * the row. If your filter returns ReturnCode.NEXT_ROW, it should return * ReturnCode.NEXT_ROW until {@link #reset()} is called just in case the caller calls - * for the next row. - * - * Concrete implementers can signal a failure condition in their code by throwing an - * {@link IOException}. - * + * for the next row. Concrete implementers can signal a failure condition in their code by + * throwing an {@link IOException}. * @param c the Cell in question * @return code as described below, Filter.ReturnCode.INCLUDE by default * @throws IOException in case an I/O or an filter specific failure needs to be signaled. * @see Filter.ReturnCode - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Instead use filterCell(Cell) + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Instead use + * filterCell(Cell) */ @Deprecated public ReturnCode filterKeyValue(final Cell c) throws IOException { @@ -139,41 +113,29 @@ public abstract class Filter { /** * A way to filter based on the column family, column qualifier and/or the column value. Return * code is described below. This allows filters to filter only certain number of columns, then - * terminate without matching ever column. - * - * If filterRowKey returns true, filterCell needs to be consistent with it. - * - * filterCell can assume that filterRowKey has already been called for the row. - * - * If your filter returns ReturnCode.NEXT_ROW, it should return + * terminate without matching ever column. If filterRowKey returns true, filterCell needs to be + * consistent with it. filterCell can assume that filterRowKey has already been called for the + * row. If your filter returns ReturnCode.NEXT_ROW, it should return * ReturnCode.NEXT_ROW until {@link #reset()} is called just in case the caller calls - * for the next row. - * - * Concrete implementers can signal a failure condition in their code by throwing an - * {@link IOException}. - * + * for the next row. Concrete implementers can signal a failure condition in their code by + * throwing an {@link IOException}. * @param c the Cell in question * @return code as described below * @throws IOException in case an I/O or an filter specific failure needs to be signaled. * @see Filter.ReturnCode */ - public ReturnCode filterCell(final Cell c) throws IOException{ + public ReturnCode filterCell(final Cell c) throws IOException { return filterKeyValue(c); } /** - * Give the filter a chance to transform the passed KeyValue. If the Cell is changed a new - * Cell object must be returned. - * - * @see org.apache.hadoop.hbase.KeyValue#shallowCopy() - * The transformed KeyValue is what is eventually returned to the client. Most filters will - * return the passed KeyValue unchanged. + * Give the filter a chance to transform the passed KeyValue. If the Cell is changed a new Cell + * object must be returned. + * @see org.apache.hadoop.hbase.KeyValue#shallowCopy() The transformed KeyValue is what is + * eventually returned to the client. Most filters will return the passed KeyValue unchanged. * @see org.apache.hadoop.hbase.filter.KeyOnlyFilter#transformCell(Cell) for an example of a - * transformation. - * - * Concrete implementers can signal a failure condition in their code by throwing an - * {@link IOException}. - * + * transformation. Concrete implementers can signal a failure condition in their code by + * throwing an {@link IOException}. * @param v the KeyValue in question * @return the changed KeyValue * @throws IOException in case an I/O or an filter specific failure needs to be signaled. @@ -218,14 +180,12 @@ public abstract class Filter { * Include KeyValue and done with row, seek to next. See NEXT_ROW. */ INCLUDE_AND_SEEK_NEXT_ROW, -} + } /** * Chance to alter the list of Cells to be submitted. Modifications to the list will carry on - * * Concrete implementers can signal a failure condition in their code by throwing an * {@link IOException}. - * * @param kvs the list of Cells to be filtered * @throws IOException in case an I/O or an filter specific failure needs to be signaled. */ @@ -234,19 +194,15 @@ public abstract class Filter { /** * Primarily used to check for conflicts with scans(such as scans that do not read a full row at a * time). - * * @return True if this filter actively uses filterRowCells(List) or filterRow(). */ abstract public boolean hasFilterRow(); /** - * Last chance to veto row based on previous {@link #filterCell(Cell)} calls. The filter - * needs to retain state then return a particular value for this call if they wish to exclude a - * row if a certain column is missing (for example). - * - * Concrete implementers can signal a failure condition in their code by throwing an - * {@link IOException}. - * + * Last chance to veto row based on previous {@link #filterCell(Cell)} calls. The filter needs to + * retain state then return a particular value for this call if they wish to exclude a row if a + * certain column is missing (for example). Concrete implementers can signal a failure condition + * in their code by throwing an {@link IOException}. * @return true to exclude row, false to include row. * @throws IOException in case an I/O or an filter specific failure needs to be signaled. */ @@ -255,11 +211,8 @@ public abstract class Filter { /** * If the filter returns the match code SEEK_NEXT_USING_HINT, then it should also tell which is * the next key it must seek to. After receiving the match code SEEK_NEXT_USING_HINT, the - * QueryMatcher would call this function to find out which key it must next seek to. - * - * Concrete implementers can signal a failure condition in their code by throwing an - * {@link IOException}. - * + * QueryMatcher would call this function to find out which key it must next seek to. Concrete + * implementers can signal a failure condition in their code by throwing an {@link IOException}. * @return KeyValue which must be next seeked. return null if the filter is not sure which key to * seek to next. * @throws IOException in case an I/O or an filter specific failure needs to be signaled. @@ -270,48 +223,35 @@ public abstract class Filter { * Check that given column family is essential for filter to check row. Most filters always return * true here. But some could have more sophisticated logic which could significantly reduce * scanning process by not even touching columns until we are 100% sure that it's data is needed - * in result. - * - * Concrete implementers can signal a failure condition in their code by throwing an + * in result. Concrete implementers can signal a failure condition in their code by throwing an * {@link IOException}. - * * @throws IOException in case an I/O or an filter specific failure needs to be signaled. */ abstract public boolean isFamilyEssential(byte[] name) throws IOException; /** - * TODO: JAVADOC - * - * Concrete implementers can signal a failure condition in their code by throwing an + * TODO: JAVADOC Concrete implementers can signal a failure condition in their code by throwing an * {@link IOException}. - * * @return The filter serialized using pb * @throws IOException in case an I/O or an filter specific failure needs to be signaled. */ abstract public byte[] toByteArray() throws IOException; /** - * * Concrete implementers can signal a failure condition in their code by throwing an * {@link IOException}. - * * @param pbBytes A pb serialized {@link Filter} instance - * @return An instance of {@link Filter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link Filter} made from bytes n * @see #toByteArray */ - public static Filter parseFrom(final byte [] pbBytes) throws DeserializationException { + public static Filter parseFrom(final byte[] pbBytes) throws DeserializationException { throw new DeserializationException( "parseFrom called on base Filter, but should be called on derived type"); } /** * Concrete implementers can signal a failure condition in their code by throwing an - * {@link IOException}. - * - * @param other - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * {@link IOException}. n * @return true if and only if the fields of the filter that are + * serialized are equal to the corresponding fields in other. Used for testing. * @throws IOException in case an I/O or an filter specific failure needs to be signaled. */ abstract boolean areSerializedFieldsEqual(Filter other); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java index 7401e4cc38f..e66022f6e7d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,44 +15,36 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.yetus.audience.InterfaceAudience; /** - * Abstract base class to help you implement new Filters. Common "ignore" or NOOP type - * methods can go here, helping to reduce boiler plate in an ever-expanding filter - * library. - * - * If you could instantiate FilterBase, it would end up being a "null" filter - - * that is one that never filters anything. + * Abstract base class to help you implement new Filters. Common "ignore" or NOOP type methods can + * go here, helping to reduce boiler plate in an ever-expanding filter library. If you could + * instantiate FilterBase, it would end up being a "null" filter - that is one that never filters + * anything. */ @InterfaceAudience.Private // TODO add filter limited private level public abstract class FilterBase extends Filter { /** - * Filters that are purely stateless and do nothing in their reset() methods can inherit - * this null/empty implementation. - * - * {@inheritDoc} + * Filters that are purely stateless and do nothing in their reset() methods can inherit this + * null/empty implementation. {@inheritDoc} */ @Override public void reset() throws IOException { } /** - * Filters that do not filter by row key can inherit this implementation that - * never filters anything. (ie: returns false). - * - * {@inheritDoc} - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Instead use {@link #filterRowKey(Cell)} + * Filters that do not filter by row key can inherit this implementation that never filters + * anything. (ie: returns false). {@inheritDoc} + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Instead use + * {@link #filterRowKey(Cell)} */ @Override @Deprecated @@ -69,10 +60,8 @@ public abstract class FilterBase extends Filter { } /** - * Filters that never filter all remaining can inherit this implementation that - * never stops the filter early. - * - * {@inheritDoc} + * Filters that never filter all remaining can inherit this implementation that never stops the + * filter early. {@inheritDoc} */ @Override public boolean filterAllRemaining() throws IOException { @@ -80,9 +69,7 @@ public abstract class FilterBase extends Filter { } /** - * By default no transformation takes place - * - * {@inheritDoc} + * By default no transformation takes place {@inheritDoc} */ @Override public Cell transformCell(Cell v) throws IOException { @@ -90,20 +77,16 @@ public abstract class FilterBase extends Filter { } /** - * Filters that never filter by modifying the returned List of Cells can - * inherit this implementation that does nothing. - * - * {@inheritDoc} + * Filters that never filter by modifying the returned List of Cells can inherit this + * implementation that does nothing. {@inheritDoc} */ @Override public void filterRowCells(List ignored) throws IOException { } /** - * Fitlers that never filter by modifying the returned List of Cells can - * inherit this implementation that does nothing. - * - * {@inheritDoc} + * Fitlers that never filter by modifying the returned List of Cells can inherit this + * implementation that does nothing. {@inheritDoc} */ @Override public boolean hasFilterRow() { @@ -112,9 +95,7 @@ public abstract class FilterBase extends Filter { /** * Filters that never filter by rows based on previously gathered state from - * {@link #filterCell(Cell)} can inherit this implementation that - * never filters a row. - * + * {@link #filterCell(Cell)} can inherit this implementation that never filters a row. * {@inheritDoc} */ @Override @@ -123,10 +104,8 @@ public abstract class FilterBase extends Filter { } /** - * Filters that are not sure which key must be next seeked to, can inherit - * this implementation that, by default, returns a null Cell. - * - * {@inheritDoc} + * Filters that are not sure which key must be next seeked to, can inherit this implementation + * that, by default, returns a null Cell. {@inheritDoc} */ @Override public Cell getNextCellHint(Cell currentCell) throws IOException { @@ -134,10 +113,8 @@ public abstract class FilterBase extends Filter { } /** - * By default, we require all scan's column families to be present. Our - * subclasses may be more precise. - * - * {@inheritDoc} + * By default, we require all scan's column families to be present. Our subclasses may be more + * precise. {@inheritDoc} */ @Override public boolean isFamilyEssential(byte[] name) throws IOException { @@ -150,7 +127,7 @@ public abstract class FilterBase extends Filter { * @param filterArguments the filter's arguments * @return constructed filter object */ - public static Filter createFilterFromArguments(ArrayList filterArguments) { + public static Filter createFilterFromArguments(ArrayList filterArguments) { throw new IllegalArgumentException("This method has not been implemented"); } @@ -171,11 +148,9 @@ public abstract class FilterBase extends Filter { } /** - * Default implementation so that writers of custom filters aren't forced to implement. - * - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * Default implementation so that writers of custom filters aren't forced to implement. n + * * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java index 1f14a45279e..6daf97e931e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,12 +23,12 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; @@ -61,7 +60,7 @@ final public class FilterList extends FilterBase { /** * Constructor that takes a set of {@link Filter}s and an operator. * @param operator Operator to process filter set with. - * @param filters Set of row filters. + * @param filters Set of row filters. */ public FilterList(final Operator operator, final List filters) { if (operator == Operator.MUST_PASS_ALL) { @@ -85,8 +84,7 @@ final public class FilterList extends FilterBase { /** * Constructor that takes a var arg number of {@link Filter}s. The default operator MUST_PASS_ALL - * is assumed. - * @param filters + * is assumed. n */ public FilterList(final Filter... filters) { this(Operator.MUST_PASS_ALL, Arrays.asList(filters)); @@ -103,23 +101,21 @@ final public class FilterList extends FilterBase { /** * Constructor that takes a var arg number of {@link Filter}s and an operator. * @param operator Operator to process filter set with. - * @param filters Filters to use + * @param filters Filters to use */ public FilterList(final Operator operator, final Filter... filters) { this(operator, Arrays.asList(filters)); } /** - * Get the operator. - * @return operator + * Get the operator. n */ public Operator getOperator() { return operator; } /** - * Get the filters. - * @return filters + * Get the filters. n */ public List getFilters() { return filterListBase.getFilters(); @@ -212,9 +208,7 @@ final public class FilterList extends FilterBase { /** * @param pbBytes A pb serialized {@link FilterList} instance - * @return An instance of {@link FilterList} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link FilterList} made from bytes n * @see #toByteArray */ public static FilterList parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.FilterList proto; @@ -237,9 +231,8 @@ final public class FilterList extends FilterBase { } /** - * @param other - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter other) { @@ -248,7 +241,7 @@ final public class FilterList extends FilterBase { FilterList o = (FilterList) other; return this.getOperator().equals(o.getOperator()) - && ((this.getFilters() == o.getFilters()) || this.getFilters().equals(o.getFilters())); + && ((this.getFilters() == o.getFilters()) || this.getFilters().equals(o.getFilters())); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java index ad24d0e47d9..2d36172064d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.yetus.audience.InterfaceAudience; @@ -95,8 +92,7 @@ public abstract class FilterListBase extends FilterBase { * the current child, we should set the traverse result (transformed cell) of previous node(s) as * the initial value. (HBASE-18879). * @param c The cell in question. - * @return the transformed cell. - * @throws IOException + * @return the transformed cell. n */ @Override public Cell transformCell(Cell c) throws IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java index 76cda02932c..6ea8e14257c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,17 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; -import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; - import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Objects; +import org.apache.hadoop.hbase.Cell; +import org.apache.yetus.audience.InterfaceAudience; /** * FilterListWithAND represents an ordered list of filters which will be evaluated with an AND @@ -72,7 +69,8 @@ public class FilterListWithAND extends FilterListBase { * The jump step will be: * *
      -   * INCLUDE < SKIP < INCLUDE_AND_NEXT_COL < NEXT_COL < INCLUDE_AND_SEEK_NEXT_ROW < NEXT_ROW < SEEK_NEXT_USING_HINT
      +   * INCLUDE < SKIP < INCLUDE_AND_NEXT_COL < NEXT_COL < INCLUDE_AND_SEEK_NEXT_ROW < NEXT_ROW
      +   *     < SEEK_NEXT_USING_HINT
          * 
      * * Here, we have the following map to describe The Maximal Step Rule. if current return code (for @@ -91,7 +89,7 @@ public class FilterListWithAND extends FilterListBase { * SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT * * - * @param rc Return code which is calculated by previous sub-filter(s) in filter list. + * @param rc Return code which is calculated by previous sub-filter(s) in filter list. * @param localRC Return code of the current sub-filter in filter list. * @return Return code which is merged by the return code of previous sub-filter(s) and the return * code of current sub-filter. @@ -120,8 +118,10 @@ public class FilterListWithAND extends FilterListBase { } break; case INCLUDE_AND_SEEK_NEXT_ROW: - if (isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, - ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) { + if ( + isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, + ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW) + ) { return ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW; } if (isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) { @@ -140,8 +140,10 @@ public class FilterListWithAND extends FilterListBase { } break; case NEXT_COL: - if (isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, ReturnCode.SKIP, - ReturnCode.NEXT_COL)) { + if ( + isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, ReturnCode.SKIP, + ReturnCode.NEXT_COL) + ) { return ReturnCode.NEXT_COL; } if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW, ReturnCode.NEXT_ROW)) { @@ -152,7 +154,7 @@ public class FilterListWithAND extends FilterListBase { return ReturnCode.NEXT_ROW; } throw new IllegalStateException( - "Received code is not valid. rc: " + rc + ", localRC: " + localRC); + "Received code is not valid. rc: " + rc + ", localRC: " + localRC); } private boolean isIncludeRelatedReturnCode(ReturnCode rc) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithOR.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithOR.java index 28540a4fa13..690b32babba 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithOR.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithOR.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,18 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.yetus.audience.InterfaceAudience; - import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Objects; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.yetus.audience.InterfaceAudience; /** * FilterListWithOR represents an ordered list of filters which will be evaluated with an OR @@ -82,39 +79,40 @@ public class FilterListWithOR extends FilterListBase { * next family for RegionScanner, INCLUDE_AND_NEXT_ROW is the same. so we should pass current cell * to the filter, if row mismatch or row match but column family mismatch. (HBASE-18368) * @see org.apache.hadoop.hbase.filter.Filter.ReturnCode - * @param subFilter which sub-filter to calculate the return code by using previous cell and - * previous return code. - * @param prevCell the previous cell passed to given sub-filter. + * @param subFilter which sub-filter to calculate the return code by using previous cell and + * previous return code. + * @param prevCell the previous cell passed to given sub-filter. * @param currentCell the current cell which will pass to given sub-filter. - * @param prevCode the previous return code for given sub-filter. + * @param prevCode the previous return code for given sub-filter. * @return return code calculated by using previous cell and previous return code. null means can * not decide which return code should return, so we will pass the currentCell to * subFilter for getting currentCell's return code, and it won't impact the sub-filter's * internal states. */ private ReturnCode calculateReturnCodeByPrevCellAndRC(Filter subFilter, Cell currentCell, - Cell prevCell, ReturnCode prevCode) throws IOException { + Cell prevCell, ReturnCode prevCode) throws IOException { if (prevCell == null || prevCode == null) { return null; } switch (prevCode) { - case INCLUDE: - case SKIP: + case INCLUDE: + case SKIP: return null; - case SEEK_NEXT_USING_HINT: + case SEEK_NEXT_USING_HINT: Cell nextHintCell = subFilter.getNextCellHint(prevCell); return nextHintCell != null && compareCell(currentCell, nextHintCell) < 0 - ? ReturnCode.SEEK_NEXT_USING_HINT : null; - case NEXT_COL: - case INCLUDE_AND_NEXT_COL: + ? ReturnCode.SEEK_NEXT_USING_HINT + : null; + case NEXT_COL: + case INCLUDE_AND_NEXT_COL: // Once row changed, reset() will clear prevCells, so we need not to compare their rows // because rows are the same here. return CellUtil.matchingColumn(prevCell, currentCell) ? ReturnCode.NEXT_COL : null; - case NEXT_ROW: - case INCLUDE_AND_SEEK_NEXT_ROW: + case NEXT_ROW: + case INCLUDE_AND_SEEK_NEXT_ROW: // As described above, rows are definitely the same, so we only compare the family. return CellUtil.matchingFamily(prevCell, currentCell) ? ReturnCode.NEXT_ROW : null; - default: + default: throw new IllegalStateException("Received code is not valid."); } } @@ -129,7 +127,8 @@ public class FilterListWithOR extends FilterListBase { * The jump step will be: * *
      -   * INCLUDE < SKIP < INCLUDE_AND_NEXT_COL < NEXT_COL < INCLUDE_AND_SEEK_NEXT_ROW < NEXT_ROW < SEEK_NEXT_USING_HINT
      +   * INCLUDE < SKIP < INCLUDE_AND_NEXT_COL < NEXT_COL < INCLUDE_AND_SEEK_NEXT_ROW < NEXT_ROW
      +   *     < SEEK_NEXT_USING_HINT
          * 
      * * Here, we have the following map to describe The Minimal Step Rule. if current return code (for @@ -148,7 +147,7 @@ public class FilterListWithOR extends FilterListBase { * SEEK_NEXT_USING_HINT INCLUDE INCLUDE INCLUDE SKIP SKIP SKIP SEEK_NEXT_USING_HINT * * - * @param rc Return code which is calculated by previous sub-filter(s) in filter list. + * @param rc Return code which is calculated by previous sub-filter(s) in filter list. * @param localRC Return code of the current sub-filter in filter list. * @return Return code which is merged by the return code of previous sub-filter(s) and the return * code of current sub-filter. @@ -156,90 +155,101 @@ public class FilterListWithOR extends FilterListBase { private ReturnCode mergeReturnCode(ReturnCode rc, ReturnCode localRC) { if (rc == null) return localRC; switch (localRC) { - case INCLUDE: - return ReturnCode.INCLUDE; - case INCLUDE_AND_NEXT_COL: - if (isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.SKIP, - ReturnCode.SEEK_NEXT_USING_HINT)) { + case INCLUDE: return ReturnCode.INCLUDE; - } - if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_NEXT_COL, ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW, - ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) { - return ReturnCode.INCLUDE_AND_NEXT_COL; - } - break; - case INCLUDE_AND_SEEK_NEXT_ROW: - if (isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.SKIP, - ReturnCode.SEEK_NEXT_USING_HINT)) { - return ReturnCode.INCLUDE; - } - if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_NEXT_COL, ReturnCode.NEXT_COL)) { - return ReturnCode.INCLUDE_AND_NEXT_COL; - } - if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW, ReturnCode.NEXT_ROW)) { - return ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW; - } - break; - case SKIP: - if (isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, - ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) { - return ReturnCode.INCLUDE; - } - if (isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW, - ReturnCode.SEEK_NEXT_USING_HINT)) { - return ReturnCode.SKIP; - } - break; - case NEXT_COL: - if (isInReturnCodes(rc, ReturnCode.INCLUDE)) { - return ReturnCode.INCLUDE; - } - if (isInReturnCodes(rc, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) { - return ReturnCode.NEXT_COL; - } - if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_NEXT_COL, - ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) { - return ReturnCode.INCLUDE_AND_NEXT_COL; - } - if (isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.SEEK_NEXT_USING_HINT)) { - return ReturnCode.SKIP; - } - break; - case NEXT_ROW: - if (isInReturnCodes(rc, ReturnCode.INCLUDE)) { - return ReturnCode.INCLUDE; - } - if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_NEXT_COL)) { - return ReturnCode.INCLUDE_AND_NEXT_COL; - } - if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) { - return ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW; - } - if (isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.SEEK_NEXT_USING_HINT)) { - return ReturnCode.SKIP; - } - if (isInReturnCodes(rc, ReturnCode.NEXT_COL)) { - return ReturnCode.NEXT_COL; - } - if (isInReturnCodes(rc, ReturnCode.NEXT_ROW)) { - return ReturnCode.NEXT_ROW; - } - break; - case SEEK_NEXT_USING_HINT: - if (isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, - ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) { - return ReturnCode.INCLUDE; - } - if (isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) { - return ReturnCode.SKIP; - } - if (isInReturnCodes(rc, ReturnCode.SEEK_NEXT_USING_HINT)) { - return ReturnCode.SEEK_NEXT_USING_HINT; - } - break; + case INCLUDE_AND_NEXT_COL: + if ( + isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.SKIP, ReturnCode.SEEK_NEXT_USING_HINT) + ) { + return ReturnCode.INCLUDE; + } + if ( + isInReturnCodes(rc, ReturnCode.INCLUDE_AND_NEXT_COL, ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW, + ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW) + ) { + return ReturnCode.INCLUDE_AND_NEXT_COL; + } + break; + case INCLUDE_AND_SEEK_NEXT_ROW: + if ( + isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.SKIP, ReturnCode.SEEK_NEXT_USING_HINT) + ) { + return ReturnCode.INCLUDE; + } + if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_NEXT_COL, ReturnCode.NEXT_COL)) { + return ReturnCode.INCLUDE_AND_NEXT_COL; + } + if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW, ReturnCode.NEXT_ROW)) { + return ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW; + } + break; + case SKIP: + if ( + isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, + ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW) + ) { + return ReturnCode.INCLUDE; + } + if ( + isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW, + ReturnCode.SEEK_NEXT_USING_HINT) + ) { + return ReturnCode.SKIP; + } + break; + case NEXT_COL: + if (isInReturnCodes(rc, ReturnCode.INCLUDE)) { + return ReturnCode.INCLUDE; + } + if (isInReturnCodes(rc, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) { + return ReturnCode.NEXT_COL; + } + if ( + isInReturnCodes(rc, ReturnCode.INCLUDE_AND_NEXT_COL, ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW) + ) { + return ReturnCode.INCLUDE_AND_NEXT_COL; + } + if (isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.SEEK_NEXT_USING_HINT)) { + return ReturnCode.SKIP; + } + break; + case NEXT_ROW: + if (isInReturnCodes(rc, ReturnCode.INCLUDE)) { + return ReturnCode.INCLUDE; + } + if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_NEXT_COL)) { + return ReturnCode.INCLUDE_AND_NEXT_COL; + } + if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) { + return ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW; + } + if (isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.SEEK_NEXT_USING_HINT)) { + return ReturnCode.SKIP; + } + if (isInReturnCodes(rc, ReturnCode.NEXT_COL)) { + return ReturnCode.NEXT_COL; + } + if (isInReturnCodes(rc, ReturnCode.NEXT_ROW)) { + return ReturnCode.NEXT_ROW; + } + break; + case SEEK_NEXT_USING_HINT: + if ( + isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, + ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW) + ) { + return ReturnCode.INCLUDE; + } + if (isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) { + return ReturnCode.SKIP; + } + if (isInReturnCodes(rc, ReturnCode.SEEK_NEXT_USING_HINT)) { + return ReturnCode.SEEK_NEXT_USING_HINT; + } + break; } throw new IllegalStateException( - "Received code is not valid. rc: " + rc + ", localRC: " + localRC); + "Received code is not valid. rc: " + rc + ", localRC: " + localRC); } private void updatePrevFilterRCList(int index, ReturnCode currentRC) { @@ -287,8 +297,10 @@ public class FilterListWithOR extends FilterListBase { rc = mergeReturnCode(rc, localRC); // For INCLUDE* case, we need to update the transformed cell. - if (isInReturnCodes(localRC, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, - ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) { + if ( + isInReturnCodes(localRC, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, + ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW) + ) { subFiltersIncludedCell.set(i, true); } } @@ -395,7 +407,6 @@ public class FilterListWithOR extends FilterListBase { return minKeyHint; } - @Override public boolean equals(Object obj) { if (obj == null || (!(obj instanceof FilterListWithOR))) { @@ -405,9 +416,8 @@ public class FilterListWithOR extends FilterListBase { return true; } FilterListWithOR f = (FilterListWithOR) obj; - return this.filters.equals(f.getFilters()) && - this.prevFilterRCList.equals(f.prevFilterRCList) && - this.prevCellList.equals(f.prevCellList); + return this.filters.equals(f.getFilters()) && this.prevFilterRCList.equals(f.prevFilterRCList) + && this.prevCellList.equals(f.prevCellList); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java index a258ec17c31..fd6eddf005e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,15 +20,15 @@ package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** * A filter that will only return the first KV from each row. *

      @@ -61,14 +60,14 @@ public class FirstKeyOnlyFilter extends FilterBase { @Override public ReturnCode filterCell(final Cell c) { - if(foundKV) return ReturnCode.NEXT_ROW; + if (foundKV) return ReturnCode.NEXT_ROW; foundKV = true; return ReturnCode.INCLUDE; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.isEmpty(), - "Expected 0 but got: %s", filterArguments.size()); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.isEmpty(), "Expected 0 but got: %s", + filterArguments.size()); return new FirstKeyOnlyFilter(); } @@ -80,7 +79,6 @@ public class FirstKeyOnlyFilter extends FilterBase { } /** - * * @param value update {@link #foundKV} flag with value. */ protected void setFoundKV(boolean value) { @@ -91,9 +89,8 @@ public class FirstKeyOnlyFilter extends FilterBase { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.FirstKeyOnlyFilter.Builder builder = - FilterProtos.FirstKeyOnlyFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.FirstKeyOnlyFilter.Builder builder = FilterProtos.FirstKeyOnlyFilter.newBuilder(); return builder.build().toByteArray(); } @@ -103,9 +100,8 @@ public class FirstKeyOnlyFilter extends FilterBase { * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray */ - public static FirstKeyOnlyFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - // There is nothing to deserialize. Why do this at all? + public static FirstKeyOnlyFilter parseFrom(final byte[] pbBytes) throws DeserializationException { + // There is nothing to deserialize. Why do this at all? try { FilterProtos.FirstKeyOnlyFilter.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { @@ -117,8 +113,8 @@ public class FirstKeyOnlyFilter extends FilterBase { /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java index ed119494cce..d10a6ea8e90 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java @@ -15,33 +15,30 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.util.Objects; import java.util.Set; import java.util.TreeSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * The filter looks for the given columns in KeyValue. Once there is a match for - * any one of the columns, it returns ReturnCode.NEXT_ROW for remaining - * KeyValues in the row. + * The filter looks for the given columns in KeyValue. Once there is a match for any one of the + * columns, it returns ReturnCode.NEXT_ROW for remaining KeyValues in the row. *

      - * Note : It may emit KVs which do not have the given columns in them, if - * these KVs happen to occur before a KV which does have a match. Given this - * caveat, this filter is only useful for special cases - * like org.apache.hadoop.hbase.mapreduce.RowCounter. + * Note : It may emit KVs which do not have the given columns in them, if these KVs happen to occur + * before a KV which does have a match. Given this caveat, this filter is only useful for special + * cases like org.apache.hadoop.hbase.mapreduce.RowCounter. *

      * @deprecated Deprecated in 2.0.0 and will be removed in 3.0.0. * @see HBASE-13347 @@ -50,15 +47,14 @@ import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; @Deprecated public class FirstKeyValueMatchingQualifiersFilter extends FirstKeyOnlyFilter { - private Set qualifiers; + private Set qualifiers; /** - * Constructor which takes a set of columns. As soon as first KeyValue - * matching any of these columns is found, filter moves to next row. - * + * Constructor which takes a set of columns. As soon as first KeyValue matching any of these + * columns is found, filter moves to next row. * @param qualifiers the set of columns to me matched. */ - public FirstKeyValueMatchingQualifiersFilter(Set qualifiers) { + public FirstKeyValueMatchingQualifiersFilter(Set qualifiers) { this.qualifiers = qualifiers; } @@ -91,7 +87,7 @@ public class FirstKeyValueMatchingQualifiersFilter extends FirstKeyOnlyFilter { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { FilterProtos.FirstKeyValueMatchingQualifiersFilter.Builder builder = FilterProtos.FirstKeyValueMatchingQualifiersFilter.newBuilder(); for (byte[] qualifier : qualifiers) { @@ -102,12 +98,11 @@ public class FirstKeyValueMatchingQualifiersFilter extends FirstKeyOnlyFilter { /** * @param pbBytes A pb serialized {@link FirstKeyValueMatchingQualifiersFilter} instance - * @return An instance of {@link FirstKeyValueMatchingQualifiersFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link FirstKeyValueMatchingQualifiersFilter} made from + * bytes n * @see #toByteArray */ - public static FirstKeyValueMatchingQualifiersFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static FirstKeyValueMatchingQualifiersFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { FilterProtos.FirstKeyValueMatchingQualifiersFilter proto; try { proto = FilterProtos.FirstKeyValueMatchingQualifiersFilter.parseFrom(pbBytes); @@ -115,7 +110,7 @@ public class FirstKeyValueMatchingQualifiersFilter extends FirstKeyOnlyFilter { throw new DeserializationException(e); } - TreeSet qualifiers = new TreeSet<>(Bytes.BYTES_COMPARATOR); + TreeSet qualifiers = new TreeSet<>(Bytes.BYTES_COMPARATOR); for (ByteString qualifier : proto.getQualifiersList()) { qualifiers.add(qualifier.toByteArray()); } @@ -124,15 +119,15 @@ public class FirstKeyValueMatchingQualifiersFilter extends FirstKeyOnlyFilter { /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof FirstKeyValueMatchingQualifiersFilter)) return false; - FirstKeyValueMatchingQualifiersFilter other = (FirstKeyValueMatchingQualifiersFilter)o; + FirstKeyValueMatchingQualifiersFilter other = (FirstKeyValueMatchingQualifiersFilter) o; return this.qualifiers.equals(other.qualifiers); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java index 6f8e9916255..5f3e2b3da73 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -101,8 +101,8 @@ public class FuzzyRowFilter extends FilterBase { for (Pair aFuzzyKeysData : fuzzyKeysData) { if (aFuzzyKeysData.getFirst().length != aFuzzyKeysData.getSecond().length) { - Pair readable = - new Pair<>(Bytes.toStringBinary(aFuzzyKeysData.getFirst()), Bytes.toStringBinary(aFuzzyKeysData.getSecond())); + Pair readable = new Pair<>(Bytes.toStringBinary(aFuzzyKeysData.getFirst()), + Bytes.toStringBinary(aFuzzyKeysData.getSecond())); throw new IllegalArgumentException("Fuzzy pair lengths do not match: " + readable); } @@ -138,9 +138,7 @@ public class FuzzyRowFilter extends FilterBase { /** * We need to preprocess mask array, as since we treat 2's as unfixed positions and -1 (0xff) as - * fixed positions - * @param mask - * @return mask array + * fixed positions n * @return mask array */ private byte[] preprocessMask(byte[] mask) { if (!UNSAFE_UNALIGNED) { @@ -181,9 +179,8 @@ public class FuzzyRowFilter extends FilterBase { final int index = i % size; Pair fuzzyData = fuzzyKeysData.get(index); idempotentMaskShift(fuzzyData.getSecond()); - SatisfiesCode satisfiesCode = - satisfies(isReversed(), c.getRowArray(), c.getRowOffset(), c.getRowLength(), - fuzzyData.getFirst(), fuzzyData.getSecond()); + SatisfiesCode satisfiesCode = satisfies(isReversed(), c.getRowArray(), c.getRowOffset(), + c.getRowLength(), fuzzyData.getFirst(), fuzzyData.getSecond()); if (satisfiesCode == SatisfiesCode.YES) { lastFoundIndex = index; return ReturnCode.INCLUDE; @@ -229,14 +226,15 @@ public class FuzzyRowFilter extends FilterBase { RowTracker() { nextRows = new PriorityQueue<>(fuzzyKeysData.size(), - new Comparator>>() { - @Override - public int compare(Pair> o1, - Pair> o2) { - return isReversed()? Bytes.compareTo(o2.getFirst(), o1.getFirst()): - Bytes.compareTo(o1.getFirst(), o2.getFirst()); - } - }); + new Comparator>>() { + @Override + public int compare(Pair> o1, + Pair> o2) { + return isReversed() + ? Bytes.compareTo(o2.getFirst(), o1.getFirst()) + : Bytes.compareTo(o1.getFirst(), o2.getFirst()); + } + }); } byte[] nextRow() { @@ -265,14 +263,15 @@ public class FuzzyRowFilter extends FilterBase { } boolean lessThan(Cell currentCell, byte[] nextRowKey) { - int compareResult = CellComparator.getInstance().compareRows(currentCell, nextRowKey, 0, nextRowKey.length); + int compareResult = + CellComparator.getInstance().compareRows(currentCell, nextRowKey, 0, nextRowKey.length); return (!isReversed() && compareResult < 0) || (isReversed() && compareResult > 0); } void updateWith(Cell currentCell, Pair fuzzyData) { byte[] nextRowKeyCandidate = - getNextForFuzzyRule(isReversed(), currentCell.getRowArray(), currentCell.getRowOffset(), - currentCell.getRowLength(), fuzzyData.getFirst(), fuzzyData.getSecond()); + getNextForFuzzyRule(isReversed(), currentCell.getRowArray(), currentCell.getRowOffset(), + currentCell.getRowLength(), fuzzyData.getFirst(), fuzzyData.getSecond()); if (nextRowKeyCandidate != null) { nextRows.add(new Pair<>(nextRowKeyCandidate, fuzzyData)); } @@ -290,9 +289,8 @@ public class FuzzyRowFilter extends FilterBase { */ @Override public byte[] toByteArray() { - FilterProtos.FuzzyRowFilter.Builder builder = FilterProtos.FuzzyRowFilter - .newBuilder() - .setIsMaskV2(processedWildcardMask == V2_PROCESSED_WILDCARD_MASK); + FilterProtos.FuzzyRowFilter.Builder builder = FilterProtos.FuzzyRowFilter.newBuilder() + .setIsMaskV2(processedWildcardMask == V2_PROCESSED_WILDCARD_MASK); for (Pair fuzzyData : fuzzyKeysData) { BytesBytesPair.Builder bbpBuilder = BytesBytesPair.newBuilder(); bbpBuilder.setFirst(UnsafeByteOperations.unsafeWrap(fuzzyData.getFirst())); @@ -304,9 +302,8 @@ public class FuzzyRowFilter extends FilterBase { /** * @param pbBytes A pb serialized {@link FuzzyRowFilter} instance - * @return An instance of {@link FuzzyRowFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link FuzzyRowFilter} made from bytes n * @see + * #toByteArray */ public static FuzzyRowFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.FuzzyRowFilter proto; @@ -324,8 +321,8 @@ public class FuzzyRowFilter extends FilterBase { fuzzyKeysData.add(new Pair<>(keyBytes, keyMeta)); } byte processedWildcardMask = proto.hasIsMaskV2() && proto.getIsMaskV2() - ? V2_PROCESSED_WILDCARD_MASK - : V1_PROCESSED_WILDCARD_MASK; + ? V2_PROCESSED_WILDCARD_MASK + : V1_PROCESSED_WILDCARD_MASK; return new FuzzyRowFilter(fuzzyKeysData, processedWildcardMask); } @@ -360,12 +357,12 @@ public class FuzzyRowFilter extends FilterBase { @InterfaceAudience.Private static SatisfiesCode satisfies(boolean reverse, byte[] row, byte[] fuzzyKeyBytes, - byte[] fuzzyKeyMeta) { + byte[] fuzzyKeyMeta) { return satisfies(reverse, row, 0, row.length, fuzzyKeyBytes, fuzzyKeyMeta); } static SatisfiesCode satisfies(boolean reverse, byte[] row, int offset, int length, - byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { + byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { if (!UNSAFE_UNALIGNED) { return satisfiesNoUnsafe(reverse, row, offset, length, fuzzyKeyBytes, fuzzyKeyMeta); @@ -429,7 +426,7 @@ public class FuzzyRowFilter extends FilterBase { } static SatisfiesCode satisfiesNoUnsafe(boolean reverse, byte[] row, int offset, int length, - byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { + byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { if (row == null) { // do nothing, let scan to proceed return SatisfiesCode.YES; @@ -481,7 +478,7 @@ public class FuzzyRowFilter extends FilterBase { @InterfaceAudience.Private static byte[] getNextForFuzzyRule(boolean reverse, byte[] row, byte[] fuzzyKeyBytes, - byte[] fuzzyKeyMeta) { + byte[] fuzzyKeyMeta) { return getNextForFuzzyRule(reverse, row, 0, row.length, fuzzyKeyBytes, fuzzyKeyMeta); } @@ -568,7 +565,7 @@ public class FuzzyRowFilter extends FilterBase { */ @InterfaceAudience.Private static byte[] getNextForFuzzyRule(boolean reverse, byte[] row, int offset, int length, - byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { + byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { // To find out the next "smallest" byte array that satisfies fuzzy rule and "greater" than // the given one we do the following: // 1. setting values on all "fixed" positions to the values from fuzzyKeyBytes @@ -578,7 +575,7 @@ public class FuzzyRowFilter extends FilterBase { // It is easier to perform this by using fuzzyKeyBytes copy and setting "non-fixed" position // values than otherwise. byte[] result = - Arrays.copyOf(fuzzyKeyBytes, length > fuzzyKeyBytes.length ? length : fuzzyKeyBytes.length); + Arrays.copyOf(fuzzyKeyBytes, length > fuzzyKeyBytes.length ? length : fuzzyKeyBytes.length); if (reverse && length > fuzzyKeyBytes.length) { // we need trailing 0xff's instead of trailing 0x00's for (int i = fuzzyKeyBytes.length; i < result.length; i++) { @@ -628,29 +625,23 @@ public class FuzzyRowFilter extends FilterBase { } } - return reverse? result: trimTrailingZeroes(result, fuzzyKeyMeta, toInc); + return reverse ? result : trimTrailingZeroes(result, fuzzyKeyMeta, toInc); } /** - * For forward scanner, next cell hint should not contain any trailing zeroes - * unless they are part of fuzzyKeyMeta - * hint = '\x01\x01\x01\x00\x00' - * will skip valid row '\x01\x01\x01' - * - * @param result - * @param fuzzyKeyMeta - * @param toInc - position of incremented byte + * For forward scanner, next cell hint should not contain any trailing zeroes unless they are part + * of fuzzyKeyMeta hint = '\x01\x01\x01\x00\x00' will skip valid row '\x01\x01\x01' nn * @param + * toInc - position of incremented byte * @return trimmed version of result */ - + private static byte[] trimTrailingZeroes(byte[] result, byte[] fuzzyKeyMeta, int toInc) { - int off = fuzzyKeyMeta.length >= result.length? result.length -1: - fuzzyKeyMeta.length -1; - for( ; off >= 0; off--){ - if(fuzzyKeyMeta[off] != 0) break; + int off = fuzzyKeyMeta.length >= result.length ? result.length - 1 : fuzzyKeyMeta.length - 1; + for (; off >= 0; off--) { + if (fuzzyKeyMeta[off] != 0) break; } - if (off < toInc) off = toInc; - byte[] retValue = new byte[off+1]; + if (off < toInc) off = toInc; + byte[] retValue = new byte[off + 1]; System.arraycopy(result, 0, retValue, 0, retValue.length); return retValue; } @@ -669,8 +660,10 @@ public class FuzzyRowFilter extends FilterBase { for (int i = 0; i < fuzzyKeysData.size(); ++i) { Pair thisData = this.fuzzyKeysData.get(i); Pair otherData = other.fuzzyKeysData.get(i); - if (!(Bytes.equals(thisData.getFirst(), otherData.getFirst()) && Bytes.equals( - thisData.getSecond(), otherData.getSecond()))) { + if ( + !(Bytes.equals(thisData.getFirst(), otherData.getFirst()) + && Bytes.equals(thisData.getSecond(), otherData.getSecond())) + ) { return false; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java index cfaf18df21b..b8d43cc5c39 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,34 +15,31 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.util.ArrayList; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; -import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; /** - * A Filter that stops after the given row. There is no "RowStopFilter" because - * the Scan spec allows you to specify a stop row. - * - * Use this filter to include the stop row, eg: [A,Z]. + * A Filter that stops after the given row. There is no "RowStopFilter" because the Scan spec allows + * you to specify a stop row. Use this filter to include the stop row, eg: [A,Z]. */ @InterfaceAudience.Public public class InclusiveStopFilter extends FilterBase { - private byte [] stopRowKey; + private byte[] stopRowKey; private boolean done = false; - public InclusiveStopFilter(final byte [] stopRowKey) { + public InclusiveStopFilter(final byte[] stopRowKey) { this.stopRowKey = stopRowKey; } @@ -67,7 +63,8 @@ public class InclusiveStopFilter extends FilterBase { public boolean filterRowKey(Cell firstRowCell) { // if stopRowKey is <= buffer, then true, filter row. if (filterAllRemaining()) return true; - int cmp = CellComparator.getInstance().compareRows(firstRowCell, stopRowKey, 0, stopRowKey.length); + int cmp = + CellComparator.getInstance().compareRows(firstRowCell, stopRowKey, 0, stopRowKey.length); done = reversed ? cmp < 0 : cmp > 0; return done; } @@ -77,10 +74,10 @@ public class InclusiveStopFilter extends FilterBase { return done; } - public static Filter createFilterFromArguments (ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 1, - "Expected 1 but got: %s", filterArguments.size()); - byte [] stopRowKey = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 1, "Expected 1 but got: %s", + filterArguments.size()); + byte[] stopRowKey = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); return new InclusiveStopFilter(stopRowKey); } @@ -88,42 +85,42 @@ public class InclusiveStopFilter extends FilterBase { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { FilterProtos.InclusiveStopFilter.Builder builder = FilterProtos.InclusiveStopFilter.newBuilder(); - if (this.stopRowKey != null) builder.setStopRowKey( - UnsafeByteOperations.unsafeWrap(this.stopRowKey)); + if (this.stopRowKey != null) + builder.setStopRowKey(UnsafeByteOperations.unsafeWrap(this.stopRowKey)); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link InclusiveStopFilter} instance - * @return An instance of {@link InclusiveStopFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link InclusiveStopFilter} made from bytes n * @see + * #toByteArray */ - public static InclusiveStopFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static InclusiveStopFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { FilterProtos.InclusiveStopFilter proto; try { proto = FilterProtos.InclusiveStopFilter.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); } - return new InclusiveStopFilter(proto.hasStopRowKey()?proto.getStopRowKey().toByteArray():null); + return new InclusiveStopFilter( + proto.hasStopRowKey() ? proto.getStopRowKey().toByteArray() : null); } /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof InclusiveStopFilter)) return false; - InclusiveStopFilter other = (InclusiveStopFilter)o; + InclusiveStopFilter other = (InclusiveStopFilter) o; return Bytes.equals(this.getStopRowKey(), other.getStopRowKey()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java index 4826e05e2a3..c91543d29af 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +26,7 @@ import org.apache.yetus.audience.InterfaceAudience; public class IncompatibleFilterException extends RuntimeException { private static final long serialVersionUID = 3236763276623198231L; -/** constructor */ + /** constructor */ public IncompatibleFilterException() { super(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java index 0ccdd902e36..9e48cbba1fe 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +26,6 @@ import org.apache.yetus.audience.InterfaceAudience; public class InvalidRowFilterException extends RuntimeException { private static final long serialVersionUID = 2667894046345657865L; - /** constructor */ public InvalidRowFilterException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java index 9603304520e..6332d404ad1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +24,6 @@ import java.util.Collections; import java.util.Iterator; import java.util.Objects; import java.util.Optional; - import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; @@ -39,21 +37,27 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; /** - * A filter that will only return the key component of each KV (the value will - * be rewritten as empty). + * A filter that will only return the key component of each KV (the value will be rewritten as + * empty). *

      - * This filter can be used to grab all of the keys without having to also grab - * the values. + * This filter can be used to grab all of the keys without having to also grab the values. */ @InterfaceAudience.Public public class KeyOnlyFilter extends FilterBase { boolean lenAsVal; - public KeyOnlyFilter() { this(false); } - public KeyOnlyFilter(boolean lenAsVal) { this.lenAsVal = lenAsVal; } + + public KeyOnlyFilter() { + this(false); + } + + public KeyOnlyFilter(boolean lenAsVal) { + this.lenAsVal = lenAsVal; + } @Override public boolean filterRowKey(Cell cell) throws IOException { @@ -85,9 +89,9 @@ public class KeyOnlyFilter extends FilterBase { return ReturnCode.INCLUDE; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { + public static Filter createFilterFromArguments(ArrayList filterArguments) { Preconditions.checkArgument((filterArguments.isEmpty() || filterArguments.size() == 1), - "Expected: 0 or 1 but got: %s", filterArguments.size()); + "Expected: 0 or 1 but got: %s", filterArguments.size()); KeyOnlyFilter filter = new KeyOnlyFilter(); if (filterArguments.size() == 1) { filter.lenAsVal = ParseFilter.convertByteArrayToBoolean(filterArguments.get(0)); @@ -99,21 +103,17 @@ public class KeyOnlyFilter extends FilterBase { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.KeyOnlyFilter.Builder builder = - FilterProtos.KeyOnlyFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.KeyOnlyFilter.Builder builder = FilterProtos.KeyOnlyFilter.newBuilder(); builder.setLenAsVal(this.lenAsVal); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link KeyOnlyFilter} instance - * @return An instance of {@link KeyOnlyFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link KeyOnlyFilter} made from bytes n * @see #toByteArray */ - public static KeyOnlyFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static KeyOnlyFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.KeyOnlyFilter proto; try { proto = FilterProtos.KeyOnlyFilter.parseFrom(pbBytes); @@ -125,15 +125,15 @@ public class KeyOnlyFilter extends FilterBase { /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof KeyOnlyFilter)) return false; - KeyOnlyFilter other = (KeyOnlyFilter)o; + KeyOnlyFilter other = (KeyOnlyFilter) o; return this.lenAsVal == other.lenAsVal; } @@ -218,7 +218,6 @@ public class KeyOnlyFilter extends FilterBase { return cell.getType(); } - @Override public long getSequenceId() { return 0; @@ -274,8 +273,8 @@ public class KeyOnlyFilter extends FilterBase { } static class KeyOnlyByteBufferExtendedCell extends ByteBufferExtendedCell { - public static final int FIXED_OVERHEAD = ClassSize.OBJECT + ClassSize.REFERENCE - + Bytes.SIZEOF_BOOLEAN; + public static final int FIXED_OVERHEAD = + ClassSize.OBJECT + ClassSize.REFERENCE + Bytes.SIZEOF_BOOLEAN; private ByteBufferExtendedCell cell; private boolean lenAsVal; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java index 53198732e39..ead0ee10447 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,20 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.nio.ByteBuffer; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.util.ByteBufferUtils; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; -import org.apache.hadoop.hbase.util.ByteBufferUtils; -import org.apache.hadoop.hbase.util.Bytes; - /** * A long comparator which numerical compares against the specified byte array @@ -55,41 +53,38 @@ public class LongComparator extends ByteArrayComparable { return Long.compare(longValue, that); } - /** - * @return The comparator serialized using pb - */ - @Override - public byte [] toByteArray() { - ComparatorProtos.LongComparator.Builder builder = - ComparatorProtos.LongComparator.newBuilder(); - builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value)); - return builder.build().toByteArray(); - } + /** + * @return The comparator serialized using pb + */ + @Override + public byte[] toByteArray() { + ComparatorProtos.LongComparator.Builder builder = ComparatorProtos.LongComparator.newBuilder(); + builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value)); + return builder.build().toByteArray(); + } - /** - * @param pbBytes A pb serialized {@link LongComparator} instance - * @return An instance of {@link LongComparator} made from bytes - * @throws org.apache.hadoop.hbase.exceptions.DeserializationException - * @see #toByteArray - */ - public static LongComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { - ComparatorProtos.LongComparator proto; - try { - proto = ComparatorProtos.LongComparator.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return new LongComparator(Bytes.toLong(proto.getComparable().getValue().toByteArray())); + /** + * @param pbBytes A pb serialized {@link LongComparator} instance + * @return An instance of {@link LongComparator} made from bytes + * @throws org.apache.hadoop.hbase.exceptions.DeserializationException + * @see #toByteArray + */ + public static LongComparator parseFrom(final byte[] pbBytes) throws DeserializationException { + ComparatorProtos.LongComparator proto; + try { + proto = ComparatorProtos.LongComparator.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); } + return new LongComparator(Bytes.toLong(proto.getComparable().getValue().toByteArray())); + } - /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(LongComparator other) { - if (other == this) return true; - return super.areSerializedFieldsEqual(other); - } + /** + * n * @return true if and only if the fields of the comparator that are serialized are equal to + * the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(LongComparator other) { + if (other == this) return true; + return super.areSerializedFieldsEqual(other); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java index 6ab200321a2..55026e8f1bf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; @@ -30,24 +29,25 @@ import org.apache.hadoop.hbase.client.ClientUtil; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; /** * Filter to support scan multiple row key ranges. It can construct the row key ranges from the - * passed list which can be accessed by each region server. - * - * HBase is quite efficient when scanning only one small row key range. If user needs to specify - * multiple row key ranges in one scan, the typical solutions are: 1. through FilterList which is a - * list of row key Filters, 2. using the SQL layer over HBase to join with two table, such as hive, - * phoenix etc. However, both solutions are inefficient. Both of them can't utilize the range info - * to perform fast forwarding during scan which is quite time consuming. If the number of ranges - * are quite big (e.g. millions), join is a proper solution though it is slow. However, there are - * cases that user wants to specify a small number of ranges to scan (e.g. <1000 ranges). Both - * solutions can't provide satisfactory performance in such case. MultiRowRangeFilter is to support - * such usec ase (scan multiple row key ranges), which can construct the row key ranges from user - * specified list and perform fast-forwarding during scan. Thus, the scan will be quite efficient. + * passed list which can be accessed by each region server. HBase is quite efficient when scanning + * only one small row key range. If user needs to specify multiple row key ranges in one scan, the + * typical solutions are: 1. through FilterList which is a list of row key Filters, 2. using the SQL + * layer over HBase to join with two table, such as hive, phoenix etc. However, both solutions are + * inefficient. Both of them can't utilize the range info to perform fast forwarding during scan + * which is quite time consuming. If the number of ranges are quite big (e.g. millions), join is a + * proper solution though it is slow. However, there are cases that user wants to specify a small + * number of ranges to scan (e.g. <1000 ranges). Both solutions can't provide satisfactory + * performance in such case. MultiRowRangeFilter is to support such usec ase (scan multiple row key + * ranges), which can construct the row key ranges from user specified list and perform + * fast-forwarding during scan. Thus, the scan will be quite efficient. */ @InterfaceAudience.Public public class MultiRowRangeFilter extends FilterBase { @@ -73,13 +73,11 @@ public class MultiRowRangeFilter extends FilterBase { } /** - * Constructor for creating a MultiRowRangeFilter from multiple rowkey prefixes. - * - * As MultiRowRangeFilter javadoc says (See the solution 1 of the first statement), - * if you try to create a filter list that scans row keys corresponding to given prefixes (e.g., + * Constructor for creating a MultiRowRangeFilter from multiple rowkey prefixes. As + * MultiRowRangeFilter javadoc says (See the solution 1 of the first statement), if + * you try to create a filter list that scans row keys corresponding to given prefixes (e.g., * FilterList composed of multiple PrefixFilters), this constructor * provides a way to avoid creating an inefficient one. - * * @param rowKeyPrefixes the array of byte array */ public MultiRowRangeFilter(byte[][] rowKeyPrefixes) { @@ -92,7 +90,7 @@ public class MultiRowRangeFilter extends FilterBase { } List list = new ArrayList<>(); - for (byte[] rowKeyPrefix: rowKeyPrefixes) { + for (byte[] rowKeyPrefix : rowKeyPrefixes) { byte[] stopRow = ClientUtil.calculateTheClosestNextRowKeyForPrefix(rowKeyPrefix); list.add(new RowRange(rowKeyPrefix, true, stopRow, false)); } @@ -135,7 +133,7 @@ public class MultiRowRangeFilter extends FilterBase { currentReturnCode = ReturnCode.NEXT_ROW; return false; } - if(index != ROW_BEFORE_FIRST_RANGE) { + if (index != ROW_BEFORE_FIRST_RANGE) { range = ranges.get(index); } else { range = ranges.get(0); @@ -146,7 +144,7 @@ public class MultiRowRangeFilter extends FilterBase { return false; } if (!ranges.hasFoundFirstRange()) { - if(index != ROW_BEFORE_FIRST_RANGE) { + if (index != ROW_BEFORE_FIRST_RANGE) { currentReturnCode = ReturnCode.INCLUDE; } else { currentReturnCode = ReturnCode.SEEK_NEXT_USING_HINT; @@ -189,8 +187,8 @@ public class MultiRowRangeFilter extends FilterBase { */ @Override public byte[] toByteArray() { - FilterProtos.MultiRowRangeFilter.Builder builder = FilterProtos.MultiRowRangeFilter - .newBuilder(); + FilterProtos.MultiRowRangeFilter.Builder builder = + FilterProtos.MultiRowRangeFilter.newBuilder(); for (RowRange range : rangeList) { if (range != null) { FilterProtos.RowRange.Builder rangebuilder = FilterProtos.RowRange.newBuilder(); @@ -212,7 +210,7 @@ public class MultiRowRangeFilter extends FilterBase { * @throws org.apache.hadoop.hbase.exceptions.DeserializationException */ public static MultiRowRangeFilter parseFrom(final byte[] pbBytes) - throws DeserializationException { + throws DeserializationException { FilterProtos.MultiRowRangeFilter proto; try { proto = FilterProtos.MultiRowRangeFilter.parseFrom(pbBytes); @@ -223,9 +221,11 @@ public class MultiRowRangeFilter extends FilterBase { List rangeProtos = proto.getRowRangeListList(); List rangeList = new ArrayList<>(length); for (FilterProtos.RowRange rangeProto : rangeProtos) { - RowRange range = new RowRange(rangeProto.hasStartRow() ? rangeProto.getStartRow() - .toByteArray() : null, rangeProto.getStartRowInclusive(), rangeProto.hasStopRow() ? - rangeProto.getStopRow().toByteArray() : null, rangeProto.getStopRowInclusive()); + RowRange range = + new RowRange(rangeProto.hasStartRow() ? rangeProto.getStartRow().toByteArray() : null, + rangeProto.getStartRowInclusive(), + rangeProto.hasStopRow() ? rangeProto.getStopRow().toByteArray() : null, + rangeProto.getStopRowInclusive()); rangeList.add(range); } return new MultiRowRangeFilter(rangeList); @@ -238,21 +238,20 @@ public class MultiRowRangeFilter extends FilterBase { */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) - return true; - if (!(o instanceof MultiRowRangeFilter)) - return false; + if (o == this) return true; + if (!(o instanceof MultiRowRangeFilter)) return false; MultiRowRangeFilter other = (MultiRowRangeFilter) o; - if (this.rangeList.size() != other.rangeList.size()) - return false; + if (this.rangeList.size() != other.rangeList.size()) return false; for (int i = 0; i < rangeList.size(); ++i) { RowRange thisRange = this.rangeList.get(i); RowRange otherRange = other.rangeList.get(i); - if (!(Bytes.equals(thisRange.startRow, otherRange.startRow) && Bytes.equals( - thisRange.stopRow, otherRange.stopRow) && (thisRange.startRowInclusive == - otherRange.startRowInclusive) && (thisRange.stopRowInclusive == - otherRange.stopRowInclusive))) { + if ( + !(Bytes.equals(thisRange.startRow, otherRange.startRow) + && Bytes.equals(thisRange.stopRow, otherRange.stopRow) + && (thisRange.startRowInclusive == otherRange.startRowInclusive) + && (thisRange.stopRowInclusive == otherRange.stopRowInclusive)) + ) { return false; } } @@ -261,7 +260,6 @@ public class MultiRowRangeFilter extends FilterBase { /** * sort the ranges and if the ranges with overlap, then merge them. - * * @param ranges the list of ranges to sort and merge. * @return the ranges after sort and merge. */ @@ -272,7 +270,7 @@ public class MultiRowRangeFilter extends FilterBase { List invalidRanges = new ArrayList<>(); List newRanges = new ArrayList<>(ranges.size()); Collections.sort(ranges); - if(ranges.get(0).isValid()) { + if (ranges.get(0).isValid()) { if (ranges.size() == 1) { newRanges.add(ranges.get(0)); } @@ -290,30 +288,32 @@ public class MultiRowRangeFilter extends FilterBase { if (!range.isValid()) { invalidRanges.add(range); } - if(Bytes.equals(lastStopRow, HConstants.EMPTY_BYTE_ARRAY)) { - newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, - lastStopRowInclusive)); + if (Bytes.equals(lastStopRow, HConstants.EMPTY_BYTE_ARRAY)) { + newRanges.add( + new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, lastStopRowInclusive)); break; } // with overlap in the ranges - if ((Bytes.compareTo(lastStopRow, range.startRow) > 0) || - (Bytes.compareTo(lastStopRow, range.startRow) == 0 && !(lastStopRowInclusive == false && - range.isStartRowInclusive() == false))) { - if(Bytes.equals(range.stopRow, HConstants.EMPTY_BYTE_ARRAY)) { + if ( + (Bytes.compareTo(lastStopRow, range.startRow) > 0) + || (Bytes.compareTo(lastStopRow, range.startRow) == 0 + && !(lastStopRowInclusive == false && range.isStartRowInclusive() == false)) + ) { + if (Bytes.equals(range.stopRow, HConstants.EMPTY_BYTE_ARRAY)) { newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, range.stopRow, - range.stopRowInclusive)); + range.stopRowInclusive)); break; } // if first range contains second range, ignore the second range if (Bytes.compareTo(lastStopRow, range.stopRow) >= 0) { - if((Bytes.compareTo(lastStopRow, range.stopRow) == 0)) { - if(lastStopRowInclusive == true || range.stopRowInclusive == true) { + if ((Bytes.compareTo(lastStopRow, range.stopRow) == 0)) { + if (lastStopRowInclusive == true || range.stopRowInclusive == true) { lastStopRowInclusive = true; } } if ((i + 1) == ranges.size()) { - newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, - lastStopRowInclusive)); + newRanges.add( + new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, lastStopRowInclusive)); } } else { lastStopRow = range.stopRow; @@ -325,19 +325,21 @@ public class MultiRowRangeFilter extends FilterBase { invalidRanges.add(range); } } else { - newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, - lastStopRowInclusive)); + newRanges.add( + new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, lastStopRowInclusive)); break; } - while ((Bytes.compareTo(lastStopRow, range.startRow) > 0) || - (Bytes.compareTo(lastStopRow, range.startRow) == 0 && - (lastStopRowInclusive == true || range.startRowInclusive==true))) { - if(Bytes.equals(range.stopRow, HConstants.EMPTY_BYTE_ARRAY)) { + while ( + (Bytes.compareTo(lastStopRow, range.startRow) > 0) + || (Bytes.compareTo(lastStopRow, range.startRow) == 0 + && (lastStopRowInclusive == true || range.startRowInclusive == true)) + ) { + if (Bytes.equals(range.stopRow, HConstants.EMPTY_BYTE_ARRAY)) { break; } // if this first range contain second range, ignore the second range if (Bytes.compareTo(lastStopRow, range.stopRow) >= 0) { - if(lastStopRowInclusive == true || range.stopRowInclusive == true) { + if (lastStopRowInclusive == true || range.stopRowInclusive == true) { lastStopRowInclusive = true; } i++; @@ -363,21 +365,23 @@ public class MultiRowRangeFilter extends FilterBase { } } } - if(Bytes.equals(range.stopRow, HConstants.EMPTY_BYTE_ARRAY)) { - if((Bytes.compareTo(lastStopRow, range.startRow) < 0) || - (Bytes.compareTo(lastStopRow, range.startRow) == 0 && - lastStopRowInclusive == false && range.startRowInclusive == false)) { + if (Bytes.equals(range.stopRow, HConstants.EMPTY_BYTE_ARRAY)) { + if ( + (Bytes.compareTo(lastStopRow, range.startRow) < 0) + || (Bytes.compareTo(lastStopRow, range.startRow) == 0 + && lastStopRowInclusive == false && range.startRowInclusive == false) + ) { newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, - lastStopRowInclusive)); + lastStopRowInclusive)); newRanges.add(range); } else { newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, range.stopRow, - range.stopRowInclusive)); + range.stopRowInclusive)); break; } } - newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, - lastStopRowInclusive)); + newRanges.add( + new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, lastStopRowInclusive)); if ((i + 1) == ranges.size()) { newRanges.add(range); } @@ -387,8 +391,8 @@ public class MultiRowRangeFilter extends FilterBase { lastStopRowInclusive = range.stopRowInclusive; } } else { - newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, - lastStopRowInclusive)); + newRanges.add( + new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, lastStopRowInclusive)); if ((i + 1) == ranges.size()) { newRanges.add(range); } @@ -399,8 +403,8 @@ public class MultiRowRangeFilter extends FilterBase { } } // check the remaining ranges - for(int j=i; j < ranges.size(); j++) { - if(!ranges.get(j).isValid()) { + for (int j = i; j < ranges.size(); j++) { + if (!ranges.get(j).isValid()) { invalidRanges.add(ranges.get(j)); } } @@ -409,21 +413,20 @@ public class MultiRowRangeFilter extends FilterBase { throwExceptionForInvalidRanges(invalidRanges, true); } // If no valid ranges found, throw the exception - if(newRanges.isEmpty()) { + if (newRanges.isEmpty()) { throw new IllegalArgumentException("No valid ranges found."); } return newRanges; } private static void throwExceptionForInvalidRanges(List invalidRanges, - boolean details) { + boolean details) { StringBuilder sb = new StringBuilder(); sb.append(invalidRanges.size()).append(" invaild ranges.\n"); if (details) { for (RowRange range : invalidRanges) { - sb.append( - "Invalid range: start row => " + Bytes.toString(range.startRow) + ", stop row => " - + Bytes.toString(range.stopRow)).append('\n'); + sb.append("Invalid range: start row => " + Bytes.toString(range.startRow) + ", stop row => " + + Bytes.toString(range.stopRow)).append('\n'); } } throw new IllegalArgumentException(sb.toString()); @@ -437,24 +440,30 @@ public class MultiRowRangeFilter extends FilterBase { public BasicRowRange() { } + /** * If the startRow is empty or null, set it to HConstants.EMPTY_BYTE_ARRAY, means begin at the * start row of the table. If the stopRow is empty or null, set it to * HConstants.EMPTY_BYTE_ARRAY, means end of the last row of table. */ public BasicRowRange(String startRow, boolean startRowInclusive, String stopRow, - boolean stopRowInclusive) { - this((startRow == null || startRow.isEmpty()) ? HConstants.EMPTY_BYTE_ARRAY : - Bytes.toBytes(startRow), startRowInclusive, - (stopRow == null || stopRow.isEmpty()) ? HConstants.EMPTY_BYTE_ARRAY : - Bytes.toBytes(stopRow), stopRowInclusive); + boolean stopRowInclusive) { + this( + (startRow == null || startRow.isEmpty()) + ? HConstants.EMPTY_BYTE_ARRAY + : Bytes.toBytes(startRow), + startRowInclusive, + (stopRow == null || stopRow.isEmpty()) + ? HConstants.EMPTY_BYTE_ARRAY + : Bytes.toBytes(stopRow), + stopRowInclusive); } - public BasicRowRange(byte[] startRow, boolean startRowInclusive, byte[] stopRow, - boolean stopRowInclusive) { + public BasicRowRange(byte[] startRow, boolean startRowInclusive, byte[] stopRow, + boolean stopRowInclusive) { this.startRow = (startRow == null) ? HConstants.EMPTY_BYTE_ARRAY : startRow; this.startRowInclusive = startRowInclusive; - this.stopRow = (stopRow == null) ? HConstants.EMPTY_BYTE_ARRAY :stopRow; + this.stopRow = (stopRow == null) ? HConstants.EMPTY_BYTE_ARRAY : stopRow; this.stopRowInclusive = stopRowInclusive; } @@ -485,38 +494,38 @@ public class MultiRowRangeFilter extends FilterBase { } public boolean contains(byte[] buffer, int offset, int length) { - if(startRowInclusive) { - if(stopRowInclusive) { + if (startRowInclusive) { + if (stopRowInclusive) { return Bytes.compareTo(buffer, offset, length, startRow, 0, startRow.length) >= 0 - && (Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) || - Bytes.compareTo(buffer, offset, length, stopRow, 0, stopRow.length) <= 0); + && (Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) + || Bytes.compareTo(buffer, offset, length, stopRow, 0, stopRow.length) <= 0); } else { return Bytes.compareTo(buffer, offset, length, startRow, 0, startRow.length) >= 0 - && (Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) || - Bytes.compareTo(buffer, offset, length, stopRow, 0, stopRow.length) < 0); + && (Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) + || Bytes.compareTo(buffer, offset, length, stopRow, 0, stopRow.length) < 0); } } else { - if(stopRowInclusive) { + if (stopRowInclusive) { return Bytes.compareTo(buffer, offset, length, startRow, 0, startRow.length) > 0 - && (Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) || - Bytes.compareTo(buffer, offset, length, stopRow, 0, stopRow.length) <= 0); + && (Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) + || Bytes.compareTo(buffer, offset, length, stopRow, 0, stopRow.length) <= 0); } else { return Bytes.compareTo(buffer, offset, length, startRow, 0, startRow.length) > 0 - && (Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) || - Bytes.compareTo(buffer, offset, length, stopRow, 0, stopRow.length) < 0); + && (Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) + || Bytes.compareTo(buffer, offset, length, stopRow, 0, stopRow.length) < 0); } } } public boolean isValid() { return Bytes.equals(startRow, HConstants.EMPTY_BYTE_ARRAY) - || Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) - || Bytes.compareTo(startRow, stopRow) < 0 - || (Bytes.compareTo(startRow, stopRow) == 0 && stopRowInclusive == true); + || Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) + || Bytes.compareTo(startRow, stopRow) < 0 + || (Bytes.compareTo(startRow, stopRow) == 0 && stopRowInclusive == true); } @Override - public boolean equals(Object obj){ + public boolean equals(Object obj) { if (!(obj instanceof BasicRowRange)) { return false; } @@ -524,18 +533,16 @@ public class MultiRowRangeFilter extends FilterBase { return true; } BasicRowRange rr = (BasicRowRange) obj; - return Bytes.equals(this.stopRow, rr.getStopRow()) && - Bytes.equals(this.startRow, this.getStartRow()) && - this.startRowInclusive == rr.isStartRowInclusive() && - this.stopRowInclusive == rr.isStopRowInclusive(); + return Bytes.equals(this.stopRow, rr.getStopRow()) + && Bytes.equals(this.startRow, this.getStartRow()) + && this.startRowInclusive == rr.isStartRowInclusive() + && this.stopRowInclusive == rr.isStopRowInclusive(); } @Override public int hashCode() { - return Objects.hash(Bytes.hashCode(this.stopRow), - Bytes.hashCode(this.startRow), - this.startRowInclusive, - this.stopRowInclusive); + return Objects.hash(Bytes.hashCode(this.stopRow), Bytes.hashCode(this.startRow), + this.startRowInclusive, this.stopRowInclusive); } /** @@ -544,10 +551,8 @@ public class MultiRowRangeFilter extends FilterBase { public abstract byte[] getComparisonData(); /** - * Returns whether the bounding row used for binary-search is inclusive or not. - * - * For forward scans, we would check the starRow, but we would check the stopRow for - * the reverse scan case. + * Returns whether the bounding row used for binary-search is inclusive or not. For forward + * scans, we would check the starRow, but we would check the stopRow for the reverse scan case. */ public abstract boolean isSearchRowInclusive(); @@ -573,8 +578,8 @@ public class MultiRowRangeFilter extends FilterBase { */ @InterfaceAudience.Private private static class ReversedRowRange extends BasicRowRange { - public ReversedRowRange(byte[] startRow, boolean startRowInclusive, byte[] stopRow, - boolean stopRowInclusive) { + public ReversedRowRange(byte[] startRow, boolean startRowInclusive, byte[] stopRow, + boolean stopRowInclusive) { super(startRow, startRowInclusive, stopRow, stopRowInclusive); } @@ -598,18 +603,19 @@ public class MultiRowRangeFilter extends FilterBase { public static class RowRange extends BasicRowRange { public RowRange() { } + /** * If the startRow is empty or null, set it to HConstants.EMPTY_BYTE_ARRAY, means begin at the * start row of the table. If the stopRow is empty or null, set it to * HConstants.EMPTY_BYTE_ARRAY, means end of the last row of table. */ public RowRange(String startRow, boolean startRowInclusive, String stopRow, - boolean stopRowInclusive) { + boolean stopRowInclusive) { super(startRow, startRowInclusive, stopRow, stopRowInclusive); } - public RowRange(byte[] startRow, boolean startRowInclusive, byte[] stopRow, - boolean stopRowInclusive) { + public RowRange(byte[] startRow, boolean startRowInclusive, byte[] stopRow, + boolean stopRowInclusive) { super(startRow, startRowInclusive, stopRow, stopRowInclusive); } @@ -631,8 +637,8 @@ public class MultiRowRangeFilter extends FilterBase { /** * Abstraction over the ranges of rows to return from this filter, regardless of forward or - * reverse scans being used. This Filter can use this class, agnostic of iteration direction, - * as the same algorithm can be applied in both cases. + * reverse scans being used. This Filter can use this class, agnostic of iteration direction, as + * the same algorithm can be applied in both cases. */ @InterfaceAudience.Private private static class RangeIteration { @@ -663,16 +669,15 @@ public class MultiRowRangeFilter extends FilterBase { /** * Rebuilds the sorted ranges (by startKey) into an equivalent sorted list of ranges, only by - * stopKey instead. Descending order and the ReversedRowRange compareTo implementation make - * sure that we can use Collections.binarySearch(). + * stopKey instead. Descending order and the ReversedRowRange compareTo implementation make sure + * that we can use Collections.binarySearch(). */ static List flipAndReverseRanges(List ranges) { List flippedRanges = new ArrayList<>(ranges.size()); for (int i = ranges.size() - 1; i >= 0; i--) { RowRange origRange = ranges.get(i); - ReversedRowRange newRowRange = new ReversedRowRange( - origRange.startRow, origRange.startRowInclusive, origRange.stopRow, - origRange.isStopRowInclusive()); + ReversedRowRange newRowRange = new ReversedRowRange(origRange.startRow, + origRange.startRowInclusive, origRange.stopRow, origRange.isStopRowInclusive()); flippedRanges.add(newRowRange); } return flippedRanges; @@ -680,7 +685,6 @@ public class MultiRowRangeFilter extends FilterBase { /** * Calculates the position where the given rowkey fits in the ranges list. - * * @param rowKey the row key to calculate * @return index the position of the row key */ @@ -711,7 +715,7 @@ public class MultiRowRangeFilter extends FilterBase { return insertionPosition; } // the row key equals one of the start keys, and the the range exclude the start key - if(ranges.get(index).isSearchRowInclusive() == false) { + if (ranges.get(index).isSearchRowInclusive() == false) { exclusive = true; } return index; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java index 0585f55d175..1744c20968e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java @@ -22,32 +22,33 @@ import java.util.ArrayList; import java.util.Comparator; import java.util.Objects; import java.util.TreeSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; -import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * This filter is used for selecting only those keys with columns that matches - * a particular prefix. For example, if prefix is 'an', it will pass keys will - * columns like 'and', 'anti' but not keys with columns like 'ball', 'act'. + * This filter is used for selecting only those keys with columns that matches a particular prefix. + * For example, if prefix is 'an', it will pass keys will columns like 'and', 'anti' but not keys + * with columns like 'ball', 'act'. */ @InterfaceAudience.Public public class MultipleColumnPrefixFilter extends FilterBase { private static final Logger LOG = LoggerFactory.getLogger(MultipleColumnPrefixFilter.class); - protected byte [] hint = null; - protected TreeSet sortedPrefixes = createTreeSet(); + protected byte[] hint = null; + protected TreeSet sortedPrefixes = createTreeSet(); private final static int MAX_LOG_PREFIXES = 5; - public MultipleColumnPrefixFilter(final byte [][] prefixes) { + public MultipleColumnPrefixFilter(final byte[][] prefixes) { if (prefixes != null) { for (byte[] prefix : prefixes) { if (!sortedPrefixes.add(prefix)) { @@ -58,11 +59,11 @@ public class MultipleColumnPrefixFilter extends FilterBase { } } - public byte [][] getPrefix() { + public byte[][] getPrefix() { int count = 0; - byte [][] temp = new byte [sortedPrefixes.size()][]; - for (byte [] prefixes : sortedPrefixes) { - temp [count++] = prefixes; + byte[][] temp = new byte[sortedPrefixes.size()][]; + for (byte[] prefixes : sortedPrefixes) { + temp[count++] = prefixes; } return temp; } @@ -89,17 +90,17 @@ public class MultipleColumnPrefixFilter extends FilterBase { } public ReturnCode filterColumn(Cell cell) { - byte [] qualifier = CellUtil.cloneQualifier(cell); - TreeSet lesserOrEqualPrefixes = - (TreeSet) sortedPrefixes.headSet(qualifier, true); + byte[] qualifier = CellUtil.cloneQualifier(cell); + TreeSet lesserOrEqualPrefixes = + (TreeSet) sortedPrefixes.headSet(qualifier, true); if (lesserOrEqualPrefixes.size() != 0) { - byte [] largestPrefixSmallerThanQualifier = lesserOrEqualPrefixes.last(); - + byte[] largestPrefixSmallerThanQualifier = lesserOrEqualPrefixes.last(); + if (Bytes.startsWith(qualifier, largestPrefixSmallerThanQualifier)) { return ReturnCode.INCLUDE; } - + if (lesserOrEqualPrefixes.size() == sortedPrefixes.size()) { return ReturnCode.NEXT_ROW; } else { @@ -112,10 +113,10 @@ public class MultipleColumnPrefixFilter extends FilterBase { } } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - byte [][] prefixes = new byte [filterArguments.size()][]; - for (int i = 0 ; i < filterArguments.size(); i++) { - byte [] columnPrefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(i)); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + byte[][] prefixes = new byte[filterArguments.size()][]; + for (int i = 0; i < filterArguments.size(); i++) { + byte[] columnPrefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(i)); prefixes[i] = columnPrefix; } return new MultipleColumnPrefixFilter(prefixes); @@ -125,10 +126,10 @@ public class MultipleColumnPrefixFilter extends FilterBase { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { FilterProtos.MultipleColumnPrefixFilter.Builder builder = FilterProtos.MultipleColumnPrefixFilter.newBuilder(); - for (byte [] element : sortedPrefixes) { + for (byte[] element : sortedPrefixes) { if (element != null) builder.addSortedPrefixes(UnsafeByteOperations.unsafeWrap(element)); } return builder.build().toByteArray(); @@ -136,12 +137,11 @@ public class MultipleColumnPrefixFilter extends FilterBase { /** * @param pbBytes A pb serialized {@link MultipleColumnPrefixFilter} instance - * @return An instance of {@link MultipleColumnPrefixFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link MultipleColumnPrefixFilter} made from bytes n * @see + * #toByteArray */ - public static MultipleColumnPrefixFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static MultipleColumnPrefixFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { FilterProtos.MultipleColumnPrefixFilter proto; try { proto = FilterProtos.MultipleColumnPrefixFilter.parseFrom(pbBytes); @@ -149,7 +149,7 @@ public class MultipleColumnPrefixFilter extends FilterBase { throw new DeserializationException(e); } int numPrefixes = proto.getSortedPrefixesCount(); - byte [][] prefixes = new byte[numPrefixes][]; + byte[][] prefixes = new byte[numPrefixes][]; for (int i = 0; i < numPrefixes; ++i) { prefixes[i] = proto.getSortedPrefixes(i).toByteArray(); } @@ -159,15 +159,15 @@ public class MultipleColumnPrefixFilter extends FilterBase { /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof MultipleColumnPrefixFilter)) return false; - MultipleColumnPrefixFilter other = (MultipleColumnPrefixFilter)o; + MultipleColumnPrefixFilter other = (MultipleColumnPrefixFilter) o; return this.sortedPrefixes.equals(other.sortedPrefixes); } @@ -176,18 +176,17 @@ public class MultipleColumnPrefixFilter extends FilterBase { return PrivateCellUtil.createFirstOnRowCol(cell, hint, 0, hint.length); } - public TreeSet createTreeSet() { + public TreeSet createTreeSet() { return new TreeSet<>(new Comparator() { - @Override - public int compare (Object o1, Object o2) { - if (o1 == null || o2 == null) - throw new IllegalArgumentException ("prefixes can't be null"); + @Override + public int compare(Object o1, Object o2) { + if (o1 == null || o2 == null) throw new IllegalArgumentException("prefixes can't be null"); - byte [] b1 = (byte []) o1; - byte [] b2 = (byte []) o2; - return Bytes.compareTo (b1, 0, b1.length, b2, 0, b2.length); - } - }); + byte[] b1 = (byte[]) o1; + byte[] b2 = (byte[]) o2; + return Bytes.compareTo(b1, 0, b1.length, b2, 0, b2.length); + } + }); } @Override @@ -210,8 +209,8 @@ public class MultipleColumnPrefixFilter extends FilterBase { } } - return String.format("%s (%d/%d): [%s]", this.getClass().getSimpleName(), - count, this.sortedPrefixes.size(), prefixes.toString()); + return String.format("%s (%d/%d): [%s]", this.getClass().getSimpleName(), count, + this.sortedPrefixes.size(), prefixes.toString()); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java index de3edb9d7c1..fc0562ecb3e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,20 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; + /** - * A binary comparator which lexicographically compares against the specified - * byte array using {@link org.apache.hadoop.hbase.util.Bytes#compareTo(byte[], byte[])}. + * A binary comparator which lexicographically compares against the specified byte array using + * {@link org.apache.hadoop.hbase.util.Bytes#compareTo(byte[], byte[])}. */ @InterfaceAudience.Public @SuppressWarnings("ComparableType") // Should this move to Comparator usage? @@ -45,7 +43,7 @@ public class NullComparator extends ByteArrayComparable { } @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings (value="EQ_UNUSUAL", justification="") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "EQ_UNUSUAL", justification = "") public boolean equals(Object obj) { return obj == null; } @@ -69,22 +67,19 @@ public class NullComparator extends ByteArrayComparable { * @return The comparator serialized using pb */ @Override - public byte [] toByteArray() { - ComparatorProtos.NullComparator.Builder builder = - ComparatorProtos.NullComparator.newBuilder(); + public byte[] toByteArray() { + ComparatorProtos.NullComparator.Builder builder = ComparatorProtos.NullComparator.newBuilder(); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link NullComparator} instance - * @return An instance of {@link NullComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link NullComparator} made from bytes n * @see + * #toByteArray */ - public static NullComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static NullComparator parseFrom(final byte[] pbBytes) throws DeserializationException { try { - // Just parse. Don't use what we parse since on end we are returning new NullComparator. + // Just parse. Don't use what we parse since on end we are returning new NullComparator. ComparatorProtos.NullComparator.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); @@ -93,9 +88,8 @@ public class NullComparator extends ByteArrayComparable { } /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the comparator that are serialized are equal to + * the corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java index b1228d9daed..603778cac57 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,25 +20,23 @@ package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; /** - * Implementation of Filter interface that limits results to a specific page - * size. It terminates scanning once the number of filter-passed rows is > - * the given page size. + * Implementation of Filter interface that limits results to a specific page size. It terminates + * scanning once the number of filter-passed rows is > the given page size. *

      - * Note that this filter cannot guarantee that the number of results returned - * to a client are <= page size. This is because the filter is applied - * separately on different region servers. It does however optimize the scan of - * individual HRegions by making sure that the page size is never exceeded - * locally. + * Note that this filter cannot guarantee that the number of results returned to a client are <= + * page size. This is because the filter is applied separately on different region servers. It does + * however optimize the scan of individual HRegions by making sure that the page size is never + * exceeded locally. */ @InterfaceAudience.Public public class PageFilter extends FilterBase { @@ -48,7 +45,6 @@ public class PageFilter extends FilterBase { /** * Constructor that takes a maximum page size. - * * @param pageSize Maximum result size. */ public PageFilter(final long pageSize) { @@ -88,15 +84,15 @@ public class PageFilter extends FilterBase { this.rowsAccepted++; return this.rowsAccepted > this.pageSize; } - + @Override public boolean hasFilterRow() { return true; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 1, - "Expected 1 but got: %s", filterArguments.size()); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 1, "Expected 1 but got: %s", + filterArguments.size()); long pageSize = ParseFilter.convertByteArrayToLong(filterArguments.get(0)); return new PageFilter(pageSize); } @@ -105,21 +101,17 @@ public class PageFilter extends FilterBase { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.PageFilter.Builder builder = - FilterProtos.PageFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.PageFilter.Builder builder = FilterProtos.PageFilter.newBuilder(); builder.setPageSize(this.pageSize); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link PageFilter} instance - * @return An instance of {@link PageFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link PageFilter} made from bytes n * @see #toByteArray */ - public static PageFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static PageFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.PageFilter proto; try { proto = FilterProtos.PageFilter.parseFrom(pbBytes); @@ -132,7 +124,7 @@ public class PageFilter extends FilterBase { /** * @param o other Filter to compare with * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { @@ -143,7 +135,7 @@ public class PageFilter extends FilterBase { return false; } - PageFilter other = (PageFilter)o; + PageFilter other = (PageFilter) o; return this.getPageSize() == other.getPageSize(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java index b9132a3ba29..0a304481ec1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,12 +18,11 @@ package org.apache.hadoop.hbase.filter; import java.nio.ByteBuffer; - import org.apache.yetus.audience.InterfaceAudience; /** - * ParseConstants holds a bunch of constants related to parsing Filter Strings - * Used by {@link ParseFilter} + * ParseConstants holds a bunch of constants related to parsing Filter Strings Used by + * {@link ParseFilter} */ @InterfaceAudience.Public public final class ParseConstants { @@ -97,7 +95,7 @@ public final class ParseConstants { /** * SKIP Array */ - public static final byte [] SKIP_ARRAY = new byte [ ] {'S', 'K', 'I', 'P'}; + public static final byte[] SKIP_ARRAY = new byte[] { 'S', 'K', 'I', 'P' }; public static final ByteBuffer SKIP_BUFFER = ByteBuffer.wrap(SKIP_ARRAY); /** @@ -123,19 +121,19 @@ public final class ParseConstants { /** * WHILE Array */ - public static final byte [] WHILE_ARRAY = new byte [] {'W', 'H', 'I', 'L', 'E'}; + public static final byte[] WHILE_ARRAY = new byte[] { 'W', 'H', 'I', 'L', 'E' }; public static final ByteBuffer WHILE_BUFFER = ByteBuffer.wrap(WHILE_ARRAY); /** * OR Array */ - public static final byte [] OR_ARRAY = new byte [] {'O','R'}; + public static final byte[] OR_ARRAY = new byte[] { 'O', 'R' }; public static final ByteBuffer OR_BUFFER = ByteBuffer.wrap(OR_ARRAY); /** * AND Array */ - public static final byte [] AND_ARRAY = new byte [] {'A','N', 'D'}; + public static final byte[] AND_ARRAY = new byte[] { 'A', 'N', 'D' }; public static final ByteBuffer AND_BUFFER = ByteBuffer.wrap(AND_ARRAY); /** @@ -156,39 +154,39 @@ public final class ParseConstants { /** * LESS_THAN Array */ - public static final byte [] LESS_THAN_ARRAY = new byte [] {'<'}; + public static final byte[] LESS_THAN_ARRAY = new byte[] { '<' }; public static final ByteBuffer LESS_THAN_BUFFER = ByteBuffer.wrap(LESS_THAN_ARRAY); /** * LESS_THAN_OR_EQUAL_TO Array */ - public static final byte [] LESS_THAN_OR_EQUAL_TO_ARRAY = new byte [] {'<', '='}; + public static final byte[] LESS_THAN_OR_EQUAL_TO_ARRAY = new byte[] { '<', '=' }; public static final ByteBuffer LESS_THAN_OR_EQUAL_TO_BUFFER = ByteBuffer.wrap(LESS_THAN_OR_EQUAL_TO_ARRAY); /** * GREATER_THAN Array */ - public static final byte [] GREATER_THAN_ARRAY = new byte [] {'>'}; + public static final byte[] GREATER_THAN_ARRAY = new byte[] { '>' }; public static final ByteBuffer GREATER_THAN_BUFFER = ByteBuffer.wrap(GREATER_THAN_ARRAY); /** * GREATER_THAN_OR_EQUAL_TO Array */ - public static final byte [] GREATER_THAN_OR_EQUAL_TO_ARRAY = new byte [] {'>', '='}; + public static final byte[] GREATER_THAN_OR_EQUAL_TO_ARRAY = new byte[] { '>', '=' }; public static final ByteBuffer GREATER_THAN_OR_EQUAL_TO_BUFFER = ByteBuffer.wrap(GREATER_THAN_OR_EQUAL_TO_ARRAY); /** * EQUAL_TO Array */ - public static final byte [] EQUAL_TO_ARRAY = new byte [] {'='}; + public static final byte[] EQUAL_TO_ARRAY = new byte[] { '=' }; public static final ByteBuffer EQUAL_TO_BUFFER = ByteBuffer.wrap(EQUAL_TO_ARRAY); /** * NOT_EQUAL_TO Array */ - public static final byte [] NOT_EQUAL_TO_ARRAY = new byte [] {'!', '='}; + public static final byte[] NOT_EQUAL_TO_ARRAY = new byte[] { '!', '=' }; public static final ByteBuffer NOT_EQUAL_TO_BUFFER = ByteBuffer.wrap(NOT_EQUAL_TO_ARRAY); /** @@ -199,17 +197,17 @@ public final class ParseConstants { /** * AND Byte Array */ - public static final byte [] AND = new byte [] {'A','N','D'}; + public static final byte[] AND = new byte[] { 'A', 'N', 'D' }; /** * OR Byte Array */ - public static final byte [] OR = new byte [] {'O', 'R'}; + public static final byte[] OR = new byte[] { 'O', 'R' }; /** * LPAREN Array */ - public static final byte [] LPAREN_ARRAY = new byte [] {'('}; + public static final byte[] LPAREN_ARRAY = new byte[] { '(' }; public static final ByteBuffer LPAREN_BUFFER = ByteBuffer.wrap(LPAREN_ARRAY); /** @@ -230,31 +228,31 @@ public final class ParseConstants { /** * BinaryType byte array */ - public static final byte [] binaryType = new byte [] {'b','i','n','a','r','y'}; + public static final byte[] binaryType = new byte[] { 'b', 'i', 'n', 'a', 'r', 'y' }; /** * BinaryPrefixType byte array */ - public static final byte [] binaryPrefixType = new byte [] {'b','i','n','a','r','y', - 'p','r','e','f','i','x'}; + public static final byte[] binaryPrefixType = + new byte[] { 'b', 'i', 'n', 'a', 'r', 'y', 'p', 'r', 'e', 'f', 'i', 'x' }; /** * RegexStringType byte array */ - public static final byte [] regexStringType = new byte [] {'r','e','g','e', 'x', - 's','t','r','i','n','g'}; + public static final byte[] regexStringType = + new byte[] { 'r', 'e', 'g', 'e', 'x', 's', 't', 'r', 'i', 'n', 'g' }; /** * RegexStringNoCaseType byte array */ - public static final byte [] regexStringNoCaseType = new byte [] {'r','e','g','e','x', - 's','t','r','i','n','g', - 'n','o','c','a','s','e'}; + public static final byte[] regexStringNoCaseType = new byte[] { 'r', 'e', 'g', 'e', 'x', 's', 't', + 'r', 'i', 'n', 'g', 'n', 'o', 'c', 'a', 's', 'e' }; /** * SubstringType byte array */ - public static final byte [] substringType = new byte [] {'s','u','b','s','t','r','i','n','g'}; + public static final byte[] substringType = + new byte[] { 's', 'u', 'b', 's', 't', 'r', 'i', 'n', 'g' }; /** * ASCII for Minus Sign diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java index 91e578ace97..c9ebace2da2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,25 +27,23 @@ import java.util.Collections; import java.util.EmptyStackException; import java.util.HashMap; import java.util.Map; -import java.util.regex.Pattern; import java.util.Set; import java.util.Stack; - +import java.util.regex.Pattern; import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; -import org.apache.hadoop.hbase.util.Bytes; /** - * This class allows a user to specify a filter via a string - * The string is parsed using the methods of this class and - * a filter object is constructed. This filter object is then wrapped - * in a scanner object which is then returned + * This class allows a user to specify a filter via a string The string is parsed using the methods + * of this class and a filter object is constructed. This filter object is then wrapped in a scanner + * object which is then returned *

      - * This class addresses the HBASE-4168 JIRA. More documentation on this - * Filter Language can be found at: https://issues.apache.org/jira/browse/HBASE-4176 + * This class addresses the HBASE-4168 JIRA. More documentation on this Filter Language can be found + * at: https://issues.apache.org/jira/browse/HBASE-4176 */ @InterfaceAudience.Public public class ParseFilter { @@ -58,44 +55,36 @@ public class ParseFilter { static { // Registers all the filter supported by the Filter Language filterHashMap = new HashMap<>(); - filterHashMap.put("KeyOnlyFilter", ParseConstants.FILTER_PACKAGE + "." + - "KeyOnlyFilter"); - filterHashMap.put("FirstKeyOnlyFilter", ParseConstants.FILTER_PACKAGE + "." + - "FirstKeyOnlyFilter"); - filterHashMap.put("PrefixFilter", ParseConstants.FILTER_PACKAGE + "." + - "PrefixFilter"); - filterHashMap.put("ColumnPrefixFilter", ParseConstants.FILTER_PACKAGE + "." + - "ColumnPrefixFilter"); - filterHashMap.put("MultipleColumnPrefixFilter", ParseConstants.FILTER_PACKAGE + "." + - "MultipleColumnPrefixFilter"); - filterHashMap.put("ColumnCountGetFilter", ParseConstants.FILTER_PACKAGE + "." + - "ColumnCountGetFilter"); - filterHashMap.put("PageFilter", ParseConstants.FILTER_PACKAGE + "." + - "PageFilter"); - filterHashMap.put("ColumnPaginationFilter", ParseConstants.FILTER_PACKAGE + "." + - "ColumnPaginationFilter"); - filterHashMap.put("InclusiveStopFilter", ParseConstants.FILTER_PACKAGE + "." + - "InclusiveStopFilter"); - filterHashMap.put("TimestampsFilter", ParseConstants.FILTER_PACKAGE + "." + - "TimestampsFilter"); - filterHashMap.put("RowFilter", ParseConstants.FILTER_PACKAGE + "." + - "RowFilter"); - filterHashMap.put("FamilyFilter", ParseConstants.FILTER_PACKAGE + "." + - "FamilyFilter"); - filterHashMap.put("QualifierFilter", ParseConstants.FILTER_PACKAGE + "." + - "QualifierFilter"); - filterHashMap.put("ValueFilter", ParseConstants.FILTER_PACKAGE + "." + - "ValueFilter"); - filterHashMap.put("ColumnRangeFilter", ParseConstants.FILTER_PACKAGE + "." + - "ColumnRangeFilter"); - filterHashMap.put("SingleColumnValueFilter", ParseConstants.FILTER_PACKAGE + "." + - "SingleColumnValueFilter"); - filterHashMap.put("SingleColumnValueExcludeFilter", ParseConstants.FILTER_PACKAGE + "." + - "SingleColumnValueExcludeFilter"); - filterHashMap.put("DependentColumnFilter", ParseConstants.FILTER_PACKAGE + "." + - "DependentColumnFilter"); - filterHashMap.put("ColumnValueFilter", ParseConstants.FILTER_PACKAGE + "." + - "ColumnValueFilter"); + filterHashMap.put("KeyOnlyFilter", ParseConstants.FILTER_PACKAGE + "." + "KeyOnlyFilter"); + filterHashMap.put("FirstKeyOnlyFilter", + ParseConstants.FILTER_PACKAGE + "." + "FirstKeyOnlyFilter"); + filterHashMap.put("PrefixFilter", ParseConstants.FILTER_PACKAGE + "." + "PrefixFilter"); + filterHashMap.put("ColumnPrefixFilter", + ParseConstants.FILTER_PACKAGE + "." + "ColumnPrefixFilter"); + filterHashMap.put("MultipleColumnPrefixFilter", + ParseConstants.FILTER_PACKAGE + "." + "MultipleColumnPrefixFilter"); + filterHashMap.put("ColumnCountGetFilter", + ParseConstants.FILTER_PACKAGE + "." + "ColumnCountGetFilter"); + filterHashMap.put("PageFilter", ParseConstants.FILTER_PACKAGE + "." + "PageFilter"); + filterHashMap.put("ColumnPaginationFilter", + ParseConstants.FILTER_PACKAGE + "." + "ColumnPaginationFilter"); + filterHashMap.put("InclusiveStopFilter", + ParseConstants.FILTER_PACKAGE + "." + "InclusiveStopFilter"); + filterHashMap.put("TimestampsFilter", ParseConstants.FILTER_PACKAGE + "." + "TimestampsFilter"); + filterHashMap.put("RowFilter", ParseConstants.FILTER_PACKAGE + "." + "RowFilter"); + filterHashMap.put("FamilyFilter", ParseConstants.FILTER_PACKAGE + "." + "FamilyFilter"); + filterHashMap.put("QualifierFilter", ParseConstants.FILTER_PACKAGE + "." + "QualifierFilter"); + filterHashMap.put("ValueFilter", ParseConstants.FILTER_PACKAGE + "." + "ValueFilter"); + filterHashMap.put("ColumnRangeFilter", + ParseConstants.FILTER_PACKAGE + "." + "ColumnRangeFilter"); + filterHashMap.put("SingleColumnValueFilter", + ParseConstants.FILTER_PACKAGE + "." + "SingleColumnValueFilter"); + filterHashMap.put("SingleColumnValueExcludeFilter", + ParseConstants.FILTER_PACKAGE + "." + "SingleColumnValueExcludeFilter"); + filterHashMap.put("DependentColumnFilter", + ParseConstants.FILTER_PACKAGE + "." + "DependentColumnFilter"); + filterHashMap.put("ColumnValueFilter", + ParseConstants.FILTER_PACKAGE + "." + "ColumnValueFilter"); // Creates the operatorPrecedenceHashMap operatorPrecedenceHashMap = new HashMap<>(); @@ -111,8 +100,7 @@ public class ParseFilter { * @param filterString filter string given by the user * @return filter object we constructed */ - public Filter parseFilterString (String filterString) - throws CharacterCodingException { + public Filter parseFilterString(String filterString) throws CharacterCodingException { return parseFilterString(Bytes.toBytes(filterString)); } @@ -122,20 +110,21 @@ public class ParseFilter { * @param filterStringAsByteArray filter string given by the user * @return filter object we constructed */ - public Filter parseFilterString (byte [] filterStringAsByteArray) - throws CharacterCodingException { + public Filter parseFilterString(byte[] filterStringAsByteArray) throws CharacterCodingException { // stack for the operators and parenthesis - Stack operatorStack = new Stack<>(); + Stack operatorStack = new Stack<>(); // stack for the filter objects - Stack filterStack = new Stack<>(); + Stack filterStack = new Stack<>(); Filter filter = null; - for (int i=0; i - * A simpleFilterExpression is of the form: FilterName('arg', 'arg', 'arg') - * The user given filter string can have many simpleFilterExpressions combined - * using operators. - *

      - * This function extracts a simpleFilterExpression from the - * larger filterString given the start offset of the simpler expression - *

      - * @param filterStringAsByteArray filter string given by the user - * @param filterExpressionStartOffset start index of the simple filter expression - * @return byte array containing the simple filter expression - */ - public byte [] extractFilterSimpleExpression (byte [] filterStringAsByteArray, - int filterExpressionStartOffset) - throws CharacterCodingException { + /** + * Extracts a simple filter expression from the filter string given by the user + *

      + * A simpleFilterExpression is of the form: FilterName('arg', 'arg', 'arg') The user given filter + * string can have many simpleFilterExpressions combined using operators. + *

      + * This function extracts a simpleFilterExpression from the larger filterString given the start + * offset of the simpler expression + *

      + * @param filterStringAsByteArray filter string given by the user + * @param filterExpressionStartOffset start index of the simple filter expression + * @return byte array containing the simple filter expression + */ + public byte[] extractFilterSimpleExpression(byte[] filterStringAsByteArray, + int filterExpressionStartOffset) throws CharacterCodingException { int quoteCount = 0; - for (int i=filterExpressionStartOffset; i - * @param filterStringAsByteArray filter string given by the user - * @return filter object we constructed - */ - public Filter parseSimpleFilterExpression (byte [] filterStringAsByteArray) + /** + * Constructs a filter object given a simple filter expression + *

      + * @param filterStringAsByteArray filter string given by the user + * @return filter object we constructed + */ + public Filter parseSimpleFilterExpression(byte[] filterStringAsByteArray) throws CharacterCodingException { String filterName = Bytes.toString(getFilterName(filterStringAsByteArray)); - ArrayList filterArguments = getFilterArguments(filterStringAsByteArray); + ArrayList filterArguments = getFilterArguments(filterStringAsByteArray); if (!filterHashMap.containsKey(filterName)) { throw new IllegalArgumentException("Filter Name " + filterName + " not supported"); } try { filterName = filterHashMap.get(filterName); Class c = Class.forName(filterName); - Class[] argTypes = new Class [] {ArrayList.class}; + Class[] argTypes = new Class[] { ArrayList.class }; Method m = c.getDeclaredMethod("createFilterFromArguments", argTypes); - return (Filter) m.invoke(null,filterArguments); + return (Filter) m.invoke(null, filterArguments); } catch (ClassNotFoundException e) { e.printStackTrace(); } catch (NoSuchMethodException e) { @@ -264,23 +251,25 @@ public class ParseFilter { } catch (InvocationTargetException e) { e.printStackTrace(); } - throw new IllegalArgumentException("Incorrect filter string " + - new String(filterStringAsByteArray, StandardCharsets.UTF_8)); + throw new IllegalArgumentException( + "Incorrect filter string " + new String(filterStringAsByteArray, StandardCharsets.UTF_8)); } -/** - * Returns the filter name given a simple filter expression - *

      - * @param filterStringAsByteArray a simple filter expression - * @return name of filter in the simple filter expression - */ - public static byte [] getFilterName (byte [] filterStringAsByteArray) { + /** + * Returns the filter name given a simple filter expression + *

      + * @param filterStringAsByteArray a simple filter expression + * @return name of filter in the simple filter expression + */ + public static byte[] getFilterName(byte[] filterStringAsByteArray) { int filterNameStartIndex = 0; int filterNameEndIndex = 0; - for (int i=filterNameStartIndex; i - * @param filterStringAsByteArray filter string given by the user - * @return an ArrayList containing the arguments of the filter in the filter string - */ - public static ArrayList getFilterArguments (byte [] filterStringAsByteArray) { + /** + * Returns the arguments of the filter from the filter string + *

      + * @param filterStringAsByteArray filter string given by the user + * @return an ArrayList containing the arguments of the filter in the filter string + */ + public static ArrayList getFilterArguments(byte[] filterStringAsByteArray) { int argumentListStartIndex = Bytes.searchDelimiterIndex(filterStringAsByteArray, 0, - filterStringAsByteArray.length, - ParseConstants.LPAREN); + filterStringAsByteArray.length, ParseConstants.LPAREN); if (argumentListStartIndex == -1) { throw new IllegalArgumentException("Incorrect argument list"); } int argumentStartIndex = 0; int argumentEndIndex = 0; - ArrayList filterArguments = new ArrayList<>(); + ArrayList filterArguments = new ArrayList<>(); - for (int i = argumentListStartIndex + 1; i, != etc argumentStartIndex = i; for (int j = argumentStartIndex; j < filterStringAsByteArray.length; j++) { - if (filterStringAsByteArray[j] == ParseConstants.WHITESPACE || - filterStringAsByteArray[j] == ParseConstants.COMMA || - filterStringAsByteArray[j] == ParseConstants.RPAREN) { + if ( + filterStringAsByteArray[j] == ParseConstants.WHITESPACE + || filterStringAsByteArray[j] == ParseConstants.COMMA + || filterStringAsByteArray[j] == ParseConstants.RPAREN + ) { argumentEndIndex = j - 1; i = j; - byte [] filterArgument = new byte [argumentEndIndex - argumentStartIndex + 1]; - Bytes.putBytes(filterArgument, 0, filterStringAsByteArray, - argumentStartIndex, argumentEndIndex - argumentStartIndex + 1); + byte[] filterArgument = new byte[argumentEndIndex - argumentStartIndex + 1]; + Bytes.putBytes(filterArgument, 0, filterStringAsByteArray, argumentStartIndex, + argumentEndIndex - argumentStartIndex + 1); filterArguments.add(filterArgument); break; } else if (j == filterStringAsByteArray.length - 1) { @@ -365,19 +357,19 @@ public class ParseFilter { return filterArguments; } -/** - * This function is called while parsing the filterString and an operator is parsed - *

      - * @param operatorStack the stack containing the operators and parenthesis - * @param filterStack the stack containing the filters - * @param operator the operator found while parsing the filterString - */ - public void reduce(Stack operatorStack, - Stack filterStack, - ByteBuffer operator) { - while (!operatorStack.empty() && - !(ParseConstants.LPAREN_BUFFER.equals(operatorStack.peek())) && - hasHigherPriority(operatorStack.peek(), operator)) { + /** + * This function is called while parsing the filterString and an operator is parsed + *

      + * @param operatorStack the stack containing the operators and parenthesis + * @param filterStack the stack containing the filters + * @param operator the operator found while parsing the filterString + */ + public void reduce(Stack operatorStack, Stack filterStack, + ByteBuffer operator) { + while ( + !operatorStack.empty() && !(ParseConstants.LPAREN_BUFFER.equals(operatorStack.peek())) + && hasHigherPriority(operatorStack.peek(), operator) + ) { filterStack.push(popArguments(operatorStack, filterStack)); } } @@ -387,10 +379,10 @@ public class ParseFilter { * from the filterStack and evaluates them *

      * @param operatorStack the stack containing the operators - * @param filterStack the stack containing the filters + * @param filterStack the stack containing the filters * @return the evaluated filter */ - public static Filter popArguments (Stack operatorStack, Stack filterStack) { + public static Filter popArguments(Stack operatorStack, Stack filterStack) { ByteBuffer argumentOnTopOfStack = operatorStack.peek(); if (argumentOnTopOfStack.equals(ParseConstants.OR_BUFFER)) { @@ -452,7 +444,7 @@ public class ParseFilter { } else if (argumentOnTopOfStack.equals(ParseConstants.LPAREN_BUFFER)) { // The top of the stack is a LPAREN try { - Filter filter = filterStack.pop(); + Filter filter = filterStack.pop(); operatorStack.pop(); return filter; } catch (EmptyStackException e) { @@ -464,12 +456,12 @@ public class ParseFilter { } } -/** - * Returns which operator has higher precedence - *

      - * If a has higher precedence than b, it returns true - * If they have the same precedence, it returns false - */ + /** + * Returns which operator has higher precedence + *

      + * If a has higher precedence than b, it returns true If they have the same precedence, it returns + * false + */ public boolean hasHigherPriority(ByteBuffer a, ByteBuffer b) { if ((operatorPrecedenceHashMap.get(a) - operatorPrecedenceHashMap.get(b)) < 0) { return true; @@ -477,62 +469,65 @@ public class ParseFilter { return false; } -/** - * Removes the single quote escaping a single quote - thus it returns an unescaped argument - *

      - * @param filterStringAsByteArray filter string given by user - * @param argumentStartIndex start index of the argument - * @param argumentEndIndex end index of the argument - * @return returns an unescaped argument - */ - public static byte [] createUnescapdArgument (byte [] filterStringAsByteArray, - int argumentStartIndex, int argumentEndIndex) { + /** + * Removes the single quote escaping a single quote - thus it returns an unescaped argument + *

      + * @param filterStringAsByteArray filter string given by user + * @param argumentStartIndex start index of the argument + * @param argumentEndIndex end index of the argument + * @return returns an unescaped argument + */ + public static byte[] createUnescapdArgument(byte[] filterStringAsByteArray, + int argumentStartIndex, int argumentEndIndex) { int unescapedArgumentLength = 2; for (int i = argumentStartIndex + 1; i <= argumentEndIndex - 1; i++) { - unescapedArgumentLength ++; - if (filterStringAsByteArray[i] == ParseConstants.SINGLE_QUOTE && - i != (argumentEndIndex - 1) && - filterStringAsByteArray[i+1] == ParseConstants.SINGLE_QUOTE) { + unescapedArgumentLength++; + if ( + filterStringAsByteArray[i] == ParseConstants.SINGLE_QUOTE && i != (argumentEndIndex - 1) + && filterStringAsByteArray[i + 1] == ParseConstants.SINGLE_QUOTE + ) { i++; continue; } } - byte [] unescapedArgument = new byte [unescapedArgumentLength]; + byte[] unescapedArgument = new byte[unescapedArgumentLength]; int count = 1; unescapedArgument[0] = '\''; for (int i = argumentStartIndex + 1; i <= argumentEndIndex - 1; i++) { - if (filterStringAsByteArray [i] == ParseConstants.SINGLE_QUOTE && - i != (argumentEndIndex - 1) && - filterStringAsByteArray [i+1] == ParseConstants.SINGLE_QUOTE) { - unescapedArgument[count++] = filterStringAsByteArray [i+1]; + if ( + filterStringAsByteArray[i] == ParseConstants.SINGLE_QUOTE && i != (argumentEndIndex - 1) + && filterStringAsByteArray[i + 1] == ParseConstants.SINGLE_QUOTE + ) { + unescapedArgument[count++] = filterStringAsByteArray[i + 1]; i++; - } - else { - unescapedArgument[count++] = filterStringAsByteArray [i]; + } else { + unescapedArgument[count++] = filterStringAsByteArray[i]; } } unescapedArgument[unescapedArgumentLength - 1] = '\''; return unescapedArgument; } -/** - * Checks if the current index of filter string we are on is the beginning of the keyword 'OR' - *

      - * @param filterStringAsByteArray filter string given by the user - * @param indexOfOr index at which an 'O' was read - * @return true if the keyword 'OR' is at the current index - */ - public static boolean checkForOr (byte [] filterStringAsByteArray, int indexOfOr) + /** + * Checks if the current index of filter string we are on is the beginning of the keyword 'OR' + *

      + * @param filterStringAsByteArray filter string given by the user + * @param indexOfOr index at which an 'O' was read + * @return true if the keyword 'OR' is at the current index + */ + public static boolean checkForOr(byte[] filterStringAsByteArray, int indexOfOr) throws CharacterCodingException, ArrayIndexOutOfBoundsException { try { - if (filterStringAsByteArray[indexOfOr] == ParseConstants.O && - filterStringAsByteArray[indexOfOr+1] == ParseConstants.R && - (filterStringAsByteArray[indexOfOr-1] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfOr-1] == ParseConstants.RPAREN) && - (filterStringAsByteArray[indexOfOr+2] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfOr+2] == ParseConstants.LPAREN)) { + if ( + filterStringAsByteArray[indexOfOr] == ParseConstants.O + && filterStringAsByteArray[indexOfOr + 1] == ParseConstants.R + && (filterStringAsByteArray[indexOfOr - 1] == ParseConstants.WHITESPACE + || filterStringAsByteArray[indexOfOr - 1] == ParseConstants.RPAREN) + && (filterStringAsByteArray[indexOfOr + 2] == ParseConstants.WHITESPACE + || filterStringAsByteArray[indexOfOr + 2] == ParseConstants.LPAREN) + ) { return true; } else { return false; @@ -542,24 +537,26 @@ public class ParseFilter { } } -/** - * Checks if the current index of filter string we are on is the beginning of the keyword 'AND' - *

      - * @param filterStringAsByteArray filter string given by the user - * @param indexOfAnd index at which an 'A' was read - * @return true if the keyword 'AND' is at the current index - */ - public static boolean checkForAnd (byte [] filterStringAsByteArray, int indexOfAnd) + /** + * Checks if the current index of filter string we are on is the beginning of the keyword 'AND' + *

      + * @param filterStringAsByteArray filter string given by the user + * @param indexOfAnd index at which an 'A' was read + * @return true if the keyword 'AND' is at the current index + */ + public static boolean checkForAnd(byte[] filterStringAsByteArray, int indexOfAnd) throws CharacterCodingException { try { - if (filterStringAsByteArray[indexOfAnd] == ParseConstants.A && - filterStringAsByteArray[indexOfAnd+1] == ParseConstants.N && - filterStringAsByteArray[indexOfAnd+2] == ParseConstants.D && - (filterStringAsByteArray[indexOfAnd-1] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfAnd-1] == ParseConstants.RPAREN) && - (filterStringAsByteArray[indexOfAnd+3] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfAnd+3] == ParseConstants.LPAREN)) { + if ( + filterStringAsByteArray[indexOfAnd] == ParseConstants.A + && filterStringAsByteArray[indexOfAnd + 1] == ParseConstants.N + && filterStringAsByteArray[indexOfAnd + 2] == ParseConstants.D + && (filterStringAsByteArray[indexOfAnd - 1] == ParseConstants.WHITESPACE + || filterStringAsByteArray[indexOfAnd - 1] == ParseConstants.RPAREN) + && (filterStringAsByteArray[indexOfAnd + 3] == ParseConstants.WHITESPACE + || filterStringAsByteArray[indexOfAnd + 3] == ParseConstants.LPAREN) + ) { return true; } else { return false; @@ -569,27 +566,29 @@ public class ParseFilter { } } -/** - * Checks if the current index of filter string we are on is the beginning of the keyword 'SKIP' - *

      - * @param filterStringAsByteArray filter string given by the user - * @param indexOfSkip index at which an 'S' was read - * @return true if the keyword 'SKIP' is at the current index - */ - public static boolean checkForSkip (byte [] filterStringAsByteArray, int indexOfSkip) + /** + * Checks if the current index of filter string we are on is the beginning of the keyword 'SKIP' + *

      + * @param filterStringAsByteArray filter string given by the user + * @param indexOfSkip index at which an 'S' was read + * @return true if the keyword 'SKIP' is at the current index + */ + public static boolean checkForSkip(byte[] filterStringAsByteArray, int indexOfSkip) throws CharacterCodingException { try { - if (filterStringAsByteArray[indexOfSkip] == ParseConstants.S && - filterStringAsByteArray[indexOfSkip+1] == ParseConstants.K && - filterStringAsByteArray[indexOfSkip+2] == ParseConstants.I && - filterStringAsByteArray[indexOfSkip+3] == ParseConstants.P && - (indexOfSkip == 0 || - filterStringAsByteArray[indexOfSkip-1] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfSkip-1] == ParseConstants.RPAREN || - filterStringAsByteArray[indexOfSkip-1] == ParseConstants.LPAREN) && - (filterStringAsByteArray[indexOfSkip+4] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfSkip+4] == ParseConstants.LPAREN)) { + if ( + filterStringAsByteArray[indexOfSkip] == ParseConstants.S + && filterStringAsByteArray[indexOfSkip + 1] == ParseConstants.K + && filterStringAsByteArray[indexOfSkip + 2] == ParseConstants.I + && filterStringAsByteArray[indexOfSkip + 3] == ParseConstants.P + && (indexOfSkip == 0 + || filterStringAsByteArray[indexOfSkip - 1] == ParseConstants.WHITESPACE + || filterStringAsByteArray[indexOfSkip - 1] == ParseConstants.RPAREN + || filterStringAsByteArray[indexOfSkip - 1] == ParseConstants.LPAREN) + && (filterStringAsByteArray[indexOfSkip + 4] == ParseConstants.WHITESPACE + || filterStringAsByteArray[indexOfSkip + 4] == ParseConstants.LPAREN) + ) { return true; } else { return false; @@ -599,27 +598,30 @@ public class ParseFilter { } } -/** - * Checks if the current index of filter string we are on is the beginning of the keyword 'WHILE' - *

      - * @param filterStringAsByteArray filter string given by the user - * @param indexOfWhile index at which an 'W' was read - * @return true if the keyword 'WHILE' is at the current index - */ - public static boolean checkForWhile (byte [] filterStringAsByteArray, int indexOfWhile) + /** + * Checks if the current index of filter string we are on is the beginning of the keyword 'WHILE' + *

      + * @param filterStringAsByteArray filter string given by the user + * @param indexOfWhile index at which an 'W' was read + * @return true if the keyword 'WHILE' is at the current index + */ + public static boolean checkForWhile(byte[] filterStringAsByteArray, int indexOfWhile) throws CharacterCodingException { try { - if (filterStringAsByteArray[indexOfWhile] == ParseConstants.W && - filterStringAsByteArray[indexOfWhile+1] == ParseConstants.H && - filterStringAsByteArray[indexOfWhile+2] == ParseConstants.I && - filterStringAsByteArray[indexOfWhile+3] == ParseConstants.L && - filterStringAsByteArray[indexOfWhile+4] == ParseConstants.E && - (indexOfWhile == 0 || filterStringAsByteArray[indexOfWhile-1] == ParseConstants.WHITESPACE - || filterStringAsByteArray[indexOfWhile-1] == ParseConstants.RPAREN || - filterStringAsByteArray[indexOfWhile-1] == ParseConstants.LPAREN) && - (filterStringAsByteArray[indexOfWhile+5] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfWhile+5] == ParseConstants.LPAREN)) { + if ( + filterStringAsByteArray[indexOfWhile] == ParseConstants.W + && filterStringAsByteArray[indexOfWhile + 1] == ParseConstants.H + && filterStringAsByteArray[indexOfWhile + 2] == ParseConstants.I + && filterStringAsByteArray[indexOfWhile + 3] == ParseConstants.L + && filterStringAsByteArray[indexOfWhile + 4] == ParseConstants.E + && (indexOfWhile == 0 + || filterStringAsByteArray[indexOfWhile - 1] == ParseConstants.WHITESPACE + || filterStringAsByteArray[indexOfWhile - 1] == ParseConstants.RPAREN + || filterStringAsByteArray[indexOfWhile - 1] == ParseConstants.LPAREN) + && (filterStringAsByteArray[indexOfWhile + 5] == ParseConstants.WHITESPACE + || filterStringAsByteArray[indexOfWhile + 5] == ParseConstants.LPAREN) + ) { return true; } else { return false; @@ -629,57 +631,56 @@ public class ParseFilter { } } -/** - * Returns a boolean indicating whether the quote was escaped or not - *

      - * @param array byte array in which the quote was found - * @param quoteIndex index of the single quote - * @return returns true if the quote was unescaped - */ - public static boolean isQuoteUnescaped (byte [] array, int quoteIndex) { + /** + * Returns a boolean indicating whether the quote was escaped or not + *

      + * @param array byte array in which the quote was found + * @param quoteIndex index of the single quote + * @return returns true if the quote was unescaped + */ + public static boolean isQuoteUnescaped(byte[] array, int quoteIndex) { if (array == null) { throw new IllegalArgumentException("isQuoteUnescaped called with a null array"); } - if (quoteIndex == array.length - 1 || array[quoteIndex+1] != ParseConstants.SINGLE_QUOTE) { + if (quoteIndex == array.length - 1 || array[quoteIndex + 1] != ParseConstants.SINGLE_QUOTE) { return true; - } - else { + } else { return false; } } -/** - * Takes a quoted byte array and converts it into an unquoted byte array - * For example: given a byte array representing 'abc', it returns a - * byte array representing abc - *

      - * @param quotedByteArray the quoted byte array - * @return Unquoted byte array - */ - public static byte [] removeQuotesFromByteArray (byte [] quotedByteArray) { - if (quotedByteArray == null || - quotedByteArray.length < 2 || - quotedByteArray[0] != ParseConstants.SINGLE_QUOTE || - quotedByteArray[quotedByteArray.length - 1] != ParseConstants.SINGLE_QUOTE) { + /** + * Takes a quoted byte array and converts it into an unquoted byte array For example: given a byte + * array representing 'abc', it returns a byte array representing abc + *

      + * @param quotedByteArray the quoted byte array + * @return Unquoted byte array + */ + public static byte[] removeQuotesFromByteArray(byte[] quotedByteArray) { + if ( + quotedByteArray == null || quotedByteArray.length < 2 + || quotedByteArray[0] != ParseConstants.SINGLE_QUOTE + || quotedByteArray[quotedByteArray.length - 1] != ParseConstants.SINGLE_QUOTE + ) { throw new IllegalArgumentException("removeQuotesFromByteArray needs a quoted byte array"); } else { - byte [] targetString = new byte [quotedByteArray.length - 2]; + byte[] targetString = new byte[quotedByteArray.length - 2]; Bytes.putBytes(targetString, 0, quotedByteArray, 1, quotedByteArray.length - 2); return targetString; } } -/** - * Converts an int expressed in a byte array to an actual int - *

      - * This doesn't use Bytes.toInt because that assumes - * that there will be {@link Bytes#SIZEOF_INT} bytes available. - *

      - * @param numberAsByteArray the int value expressed as a byte array - * @return the int value - */ - public static int convertByteArrayToInt (byte [] numberAsByteArray) { + /** + * Converts an int expressed in a byte array to an actual int + *

      + * This doesn't use Bytes.toInt because that assumes that there will be {@link Bytes#SIZEOF_INT} + * bytes available. + *

      + * @param numberAsByteArray the int value expressed as a byte array + * @return the int value + */ + public static int convertByteArrayToInt(byte[] numberAsByteArray) { long tempResult = ParseFilter.convertByteArrayToLong(numberAsByteArray); @@ -693,16 +694,16 @@ public class ParseFilter { return result; } -/** - * Converts a long expressed in a byte array to an actual long - *

      - * This doesn't use Bytes.toLong because that assumes - * that there will be {@link Bytes#SIZEOF_INT} bytes available. - *

      - * @param numberAsByteArray the long value expressed as a byte array - * @return the long value - */ - public static long convertByteArrayToLong (byte [] numberAsByteArray) { + /** + * Converts a long expressed in a byte array to an actual long + *

      + * This doesn't use Bytes.toLong because that assumes that there will be {@link Bytes#SIZEOF_INT} + * bytes available. + *

      + * @param numberAsByteArray the long value expressed as a byte array + * @return the long value + */ + public static long convertByteArrayToLong(byte[] numberAsByteArray) { if (numberAsByteArray == null) { throw new IllegalArgumentException("convertByteArrayToLong called with a null array"); } @@ -717,11 +718,12 @@ public class ParseFilter { } while (i != numberAsByteArray.length) { - if (numberAsByteArray[i] < ParseConstants.ZERO || - numberAsByteArray[i] > ParseConstants.NINE) { + if ( + numberAsByteArray[i] < ParseConstants.ZERO || numberAsByteArray[i] > ParseConstants.NINE + ) { throw new IllegalArgumentException("Byte Array should only contain digits"); } - result = result*10 + (numberAsByteArray[i] - ParseConstants.ZERO); + result = result * 10 + (numberAsByteArray[i] - ParseConstants.ZERO); if (result < 0) { throw new IllegalArgumentException("Long Argument too large"); } @@ -735,37 +737,38 @@ public class ParseFilter { } } -/** - * Converts a boolean expressed in a byte array to an actual boolean - *

      - * This doesn't used Bytes.toBoolean because Bytes.toBoolean(byte []) - * assumes that 1 stands for true and 0 for false. - * Here, the byte array representing "true" and "false" is parsed - *

      - * @param booleanAsByteArray the boolean value expressed as a byte array - * @return the boolean value - */ - public static boolean convertByteArrayToBoolean (byte [] booleanAsByteArray) { + /** + * Converts a boolean expressed in a byte array to an actual boolean + *

      + * This doesn't used Bytes.toBoolean because Bytes.toBoolean(byte []) assumes that 1 stands for + * true and 0 for false. Here, the byte array representing "true" and "false" is parsed + *

      + * @param booleanAsByteArray the boolean value expressed as a byte array + * @return the boolean value + */ + public static boolean convertByteArrayToBoolean(byte[] booleanAsByteArray) { if (booleanAsByteArray == null) { throw new IllegalArgumentException("convertByteArrayToBoolean called with a null array"); } - if (booleanAsByteArray.length == 4 && - (booleanAsByteArray[0] == 't' || booleanAsByteArray[0] == 'T') && - (booleanAsByteArray[1] == 'r' || booleanAsByteArray[1] == 'R') && - (booleanAsByteArray[2] == 'u' || booleanAsByteArray[2] == 'U') && - (booleanAsByteArray[3] == 'e' || booleanAsByteArray[3] == 'E')) { + if ( + booleanAsByteArray.length == 4 + && (booleanAsByteArray[0] == 't' || booleanAsByteArray[0] == 'T') + && (booleanAsByteArray[1] == 'r' || booleanAsByteArray[1] == 'R') + && (booleanAsByteArray[2] == 'u' || booleanAsByteArray[2] == 'U') + && (booleanAsByteArray[3] == 'e' || booleanAsByteArray[3] == 'E') + ) { return true; - } - else if (booleanAsByteArray.length == 5 && - (booleanAsByteArray[0] == 'f' || booleanAsByteArray[0] == 'F') && - (booleanAsByteArray[1] == 'a' || booleanAsByteArray[1] == 'A') && - (booleanAsByteArray[2] == 'l' || booleanAsByteArray[2] == 'L') && - (booleanAsByteArray[3] == 's' || booleanAsByteArray[3] == 'S') && - (booleanAsByteArray[4] == 'e' || booleanAsByteArray[4] == 'E')) { + } else if ( + booleanAsByteArray.length == 5 + && (booleanAsByteArray[0] == 'f' || booleanAsByteArray[0] == 'F') + && (booleanAsByteArray[1] == 'a' || booleanAsByteArray[1] == 'A') + && (booleanAsByteArray[2] == 'l' || booleanAsByteArray[2] == 'L') + && (booleanAsByteArray[3] == 's' || booleanAsByteArray[3] == 'S') + && (booleanAsByteArray[4] == 'e' || booleanAsByteArray[4] == 'E') + ) { return false; - } - else { + } else { throw new IllegalArgumentException("Incorrect Boolean Expression"); } } @@ -775,64 +778,52 @@ public class ParseFilter { * @param compareOpAsByteArray the comparatorOperator symbol as a byte array * @return the Compare Operator */ - public static CompareOperator createCompareOperator (byte [] compareOpAsByteArray) { + public static CompareOperator createCompareOperator(byte[] compareOpAsByteArray) { ByteBuffer compareOp = ByteBuffer.wrap(compareOpAsByteArray); - if (compareOp.equals(ParseConstants.LESS_THAN_BUFFER)) - return CompareOperator.LESS; + if (compareOp.equals(ParseConstants.LESS_THAN_BUFFER)) return CompareOperator.LESS; else if (compareOp.equals(ParseConstants.LESS_THAN_OR_EQUAL_TO_BUFFER)) return CompareOperator.LESS_OR_EQUAL; - else if (compareOp.equals(ParseConstants.GREATER_THAN_BUFFER)) - return CompareOperator.GREATER; + else if (compareOp.equals(ParseConstants.GREATER_THAN_BUFFER)) return CompareOperator.GREATER; else if (compareOp.equals(ParseConstants.GREATER_THAN_OR_EQUAL_TO_BUFFER)) return CompareOperator.GREATER_OR_EQUAL; - else if (compareOp.equals(ParseConstants.NOT_EQUAL_TO_BUFFER)) - return CompareOperator.NOT_EQUAL; - else if (compareOp.equals(ParseConstants.EQUAL_TO_BUFFER)) - return CompareOperator.EQUAL; - else - throw new IllegalArgumentException("Invalid compare operator"); + else if (compareOp.equals(ParseConstants.NOT_EQUAL_TO_BUFFER)) return CompareOperator.NOT_EQUAL; + else if (compareOp.equals(ParseConstants.EQUAL_TO_BUFFER)) return CompareOperator.EQUAL; + else throw new IllegalArgumentException("Invalid compare operator"); } /** * Takes a compareOperator symbol as a byte array and returns the corresponding CompareOperator * @deprecated Since 2.0 - *

      + *

      * @param compareOpAsByteArray the comparatorOperator symbol as a byte array * @return the Compare Operator * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #createCompareOperator(byte [])} */ @Deprecated - public static CompareFilter.CompareOp createCompareOp (byte [] compareOpAsByteArray) { + public static CompareFilter.CompareOp createCompareOp(byte[] compareOpAsByteArray) { ByteBuffer compareOp = ByteBuffer.wrap(compareOpAsByteArray); - if (compareOp.equals(ParseConstants.LESS_THAN_BUFFER)) - return CompareOp.LESS; + if (compareOp.equals(ParseConstants.LESS_THAN_BUFFER)) return CompareOp.LESS; else if (compareOp.equals(ParseConstants.LESS_THAN_OR_EQUAL_TO_BUFFER)) return CompareOp.LESS_OR_EQUAL; - else if (compareOp.equals(ParseConstants.GREATER_THAN_BUFFER)) - return CompareOp.GREATER; + else if (compareOp.equals(ParseConstants.GREATER_THAN_BUFFER)) return CompareOp.GREATER; else if (compareOp.equals(ParseConstants.GREATER_THAN_OR_EQUAL_TO_BUFFER)) return CompareOp.GREATER_OR_EQUAL; - else if (compareOp.equals(ParseConstants.NOT_EQUAL_TO_BUFFER)) - return CompareOp.NOT_EQUAL; - else if (compareOp.equals(ParseConstants.EQUAL_TO_BUFFER)) - return CompareOp.EQUAL; - else - throw new IllegalArgumentException("Invalid compare operator"); + else if (compareOp.equals(ParseConstants.NOT_EQUAL_TO_BUFFER)) return CompareOp.NOT_EQUAL; + else if (compareOp.equals(ParseConstants.EQUAL_TO_BUFFER)) return CompareOp.EQUAL; + else throw new IllegalArgumentException("Invalid compare operator"); } -/** - * Parses a comparator of the form comparatorType:comparatorValue form and returns a comparator - *

      - * @param comparator the comparator in the form comparatorType:comparatorValue - * @return the parsed comparator - */ - public static ByteArrayComparable createComparator (byte [] comparator) { - if (comparator == null) - throw new IllegalArgumentException("Incorrect Comparator"); - byte [][] parsedComparator = ParseFilter.parseComparator(comparator); - byte [] comparatorType = parsedComparator[0]; - byte [] comparatorValue = parsedComparator[1]; - + /** + * Parses a comparator of the form comparatorType:comparatorValue form and returns a comparator + *

      + * @param comparator the comparator in the form comparatorType:comparatorValue + * @return the parsed comparator + */ + public static ByteArrayComparable createComparator(byte[] comparator) { + if (comparator == null) throw new IllegalArgumentException("Incorrect Comparator"); + byte[][] parsedComparator = ParseFilter.parseComparator(comparator); + byte[] comparatorType = parsedComparator[0]; + byte[] comparatorValue = parsedComparator[1]; if (Bytes.equals(comparatorType, ParseConstants.binaryType)) return new BinaryComparator(comparatorValue); @@ -842,28 +833,27 @@ public class ParseFilter { return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); else if (Bytes.equals(comparatorType, ParseConstants.regexStringNoCaseType)) return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8), - Pattern.CASE_INSENSITIVE | Pattern.DOTALL); + Pattern.CASE_INSENSITIVE | Pattern.DOTALL); else if (Bytes.equals(comparatorType, ParseConstants.substringType)) return new SubstringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); - else - throw new IllegalArgumentException("Incorrect comparatorType"); + else throw new IllegalArgumentException("Incorrect comparatorType"); } -/** - * Splits a column in comparatorType:comparatorValue form into separate byte arrays - *

      - * @param comparator the comparator - * @return the parsed arguments of the comparator as a 2D byte array - */ - public static byte [][] parseComparator (byte [] comparator) { - final int index = Bytes.searchDelimiterIndex(comparator, 0, comparator.length, - ParseConstants.COLON); + /** + * Splits a column in comparatorType:comparatorValue form into separate byte arrays + *

      + * @param comparator the comparator + * @return the parsed arguments of the comparator as a 2D byte array + */ + public static byte[][] parseComparator(byte[] comparator) { + final int index = + Bytes.searchDelimiterIndex(comparator, 0, comparator.length, ParseConstants.COLON); if (index == -1) { throw new IllegalArgumentException("Incorrect comparator"); } - byte [][] result = new byte [2][0]; - result[0] = new byte [index]; + byte[][] result = new byte[2][0]; + result[0] = new byte[index]; System.arraycopy(comparator, 0, result[0], 0, index); final int len = comparator.length - (index + 1); @@ -873,10 +863,10 @@ public class ParseFilter { return result; } -/** - * Return a Set of filters supported by the Filter Language - */ - public Set getSupportedFilters () { + /** + * Return a Set of filters supported by the Filter Language + */ + public Set getSupportedFilters() { return filterHashMap.keySet(); } @@ -889,15 +879,13 @@ public class ParseFilter { } /** - * Register a new filter with the parser. If the filter is already registered, - * an IllegalArgumentException will be thrown. - * - * @param name a name for the filter + * Register a new filter with the parser. If the filter is already registered, an + * IllegalArgumentException will be thrown. + * @param name a name for the filter * @param filterClass fully qualified class name */ public static void registerFilter(String name, String filterClass) { - if(LOG.isInfoEnabled()) - LOG.info("Registering new filter " + name); + if (LOG.isInfoEnabled()) LOG.info("Registering new filter " + name); filterHashMap.put(name, filterClass); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java index 15c18fcb248..c6fdcc88875 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,33 +15,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.util.ArrayList; - import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** * Pass results that have same row prefix. */ @InterfaceAudience.Public public class PrefixFilter extends FilterBase { - protected byte [] prefix = null; + protected byte[] prefix = null; protected boolean passedPrefix = false; protected boolean filterRow = true; - public PrefixFilter(final byte [] prefix) { + public PrefixFilter(final byte[] prefix) { this.prefix = prefix; } @@ -52,8 +50,7 @@ public class PrefixFilter extends FilterBase { @Override public boolean filterRowKey(Cell firstRowCell) { - if (firstRowCell == null || this.prefix == null) - return true; + if (firstRowCell == null || this.prefix == null) return true; if (filterAllRemaining()) return true; int length = firstRowCell.getRowLength(); if (length < prefix.length) return true; @@ -63,11 +60,11 @@ public class PrefixFilter extends FilterBase { int cmp; if (firstRowCell instanceof ByteBufferExtendedCell) { cmp = ByteBufferUtils.compareTo(((ByteBufferExtendedCell) firstRowCell).getRowByteBuffer(), - ((ByteBufferExtendedCell) firstRowCell).getRowPosition(), this.prefix.length, - this.prefix, 0, this.prefix.length); + ((ByteBufferExtendedCell) firstRowCell).getRowPosition(), this.prefix.length, this.prefix, + 0, this.prefix.length); } else { cmp = Bytes.compareTo(firstRowCell.getRowArray(), firstRowCell.getRowOffset(), - this.prefix.length, this.prefix, 0, this.prefix.length); + this.prefix.length, this.prefix, 0, this.prefix.length); } if ((!isReversed() && cmp > 0) || (isReversed() && cmp < 0)) { passedPrefix = true; @@ -103,10 +100,10 @@ public class PrefixFilter extends FilterBase { return passedPrefix; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 1, - "Expected 1 but got: %s", filterArguments.size()); - byte [] prefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 1, "Expected 1 but got: %s", + filterArguments.size()); + byte[] prefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); return new PrefixFilter(prefix); } @@ -114,9 +111,8 @@ public class PrefixFilter extends FilterBase { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.PrefixFilter.Builder builder = - FilterProtos.PrefixFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.PrefixFilter.Builder builder = FilterProtos.PrefixFilter.newBuilder(); if (this.prefix != null) builder.setPrefix(UnsafeByteOperations.unsafeWrap(this.prefix)); return builder.build().toByteArray(); } @@ -127,28 +123,27 @@ public class PrefixFilter extends FilterBase { * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray */ - public static PrefixFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static PrefixFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.PrefixFilter proto; try { proto = FilterProtos.PrefixFilter.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); } - return new PrefixFilter(proto.hasPrefix()?proto.getPrefix().toByteArray():null); + return new PrefixFilter(proto.hasPrefix() ? proto.getPrefix().toByteArray() : null); } /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof PrefixFilter)) return false; - PrefixFilter other = (PrefixFilter)o; + PrefixFilter other = (PrefixFilter) o; return Bytes.equals(this.getPrefix(), other.getPrefix()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java index 9d1d8c75701..6ee70a79e9f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,57 +15,53 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * This filter is used to filter based on the column qualifier. It takes an - * operator (equal, greater, not equal, etc) and a byte [] comparator for the - * column qualifier portion of a key. + * This filter is used to filter based on the column qualifier. It takes an operator (equal, + * greater, not equal, etc) and a byte [] comparator for the column qualifier portion of a key. *

      - * This filter can be wrapped with {@link WhileMatchFilter} and {@link SkipFilter} - * to add more control. + * This filter can be wrapped with {@link WhileMatchFilter} and {@link SkipFilter} to add more + * control. *

      * Multiple filters can be combined using {@link FilterList}. *

      - * If an already known column qualifier is looked for, - * use {@link org.apache.hadoop.hbase.client.Get#addColumn} - * directly rather than a filter. + * If an already known column qualifier is looked for, use + * {@link org.apache.hadoop.hbase.client.Get#addColumn} directly rather than a filter. */ @InterfaceAudience.Public public class QualifierFilter extends CompareFilter { /** * Constructor. - * @param op the compare op for column qualifier matching + * @param op the compare op for column qualifier matching * @param qualifierComparator the comparator for column qualifier matching - * @deprecated Since 2.0.0. Will be removed in 3.0.0. - * Use {@link #QualifierFilter(CompareOperator, ByteArrayComparable)} instead. + * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use + * {@link #QualifierFilter(CompareOperator, ByteArrayComparable)} instead. */ @Deprecated - public QualifierFilter(final CompareOp op, - final ByteArrayComparable qualifierComparator) { + public QualifierFilter(final CompareOp op, final ByteArrayComparable qualifierComparator) { super(op, qualifierComparator); } /** * Constructor. - * @param op the compare op for column qualifier matching + * @param op the compare op for column qualifier matching * @param qualifierComparator the comparator for column qualifier matching */ - public QualifierFilter(final CompareOperator op, - final ByteArrayComparable qualifierComparator) { + public QualifierFilter(final CompareOperator op, final ByteArrayComparable qualifierComparator) { super(op, qualifierComparator); } @@ -84,10 +79,10 @@ public class QualifierFilter extends CompareFilter { return ReturnCode.INCLUDE; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { + public static Filter createFilterFromArguments(ArrayList filterArguments) { ArrayList arguments = CompareFilter.extractArguments(filterArguments); - CompareOperator compareOp = (CompareOperator)arguments.get(0); - ByteArrayComparable comparator = (ByteArrayComparable)arguments.get(1); + CompareOperator compareOp = (CompareOperator) arguments.get(0); + ByteArrayComparable comparator = (ByteArrayComparable) arguments.get(1); return new QualifierFilter(compareOp, comparator); } @@ -95,9 +90,8 @@ public class QualifierFilter extends CompareFilter { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.QualifierFilter.Builder builder = - FilterProtos.QualifierFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.QualifierFilter.Builder builder = FilterProtos.QualifierFilter.newBuilder(); builder.setCompareFilter(super.convert()); return builder.build().toByteArray(); } @@ -108,8 +102,7 @@ public class QualifierFilter extends CompareFilter { * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray */ - public static QualifierFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static QualifierFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.QualifierFilter proto; try { proto = FilterProtos.QualifierFilter.parseFrom(pbBytes); @@ -126,12 +119,12 @@ public class QualifierFilter extends CompareFilter { } catch (IOException ioe) { throw new DeserializationException(ioe); } - return new QualifierFilter(valueCompareOp,valueComparator); + return new QualifierFilter(valueCompareOp, valueComparator); } /** - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java index f25430ce799..b09a1995e80 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,22 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.util.Objects; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** * A filter that includes rows based on a chance. - * */ @InterfaceAudience.Public public class RandomRowFilter extends FilterBase { @@ -40,9 +37,7 @@ public class RandomRowFilter extends FilterBase { protected boolean filterOutRow; /** - * Create a new filter with a specified chance for a row to be included. - * - * @param chance + * Create a new filter with a specified chance for a row to be included. n */ public RandomRowFilter(float chance) { this.chance = chance; @@ -56,9 +51,7 @@ public class RandomRowFilter extends FilterBase { } /** - * Set the chance that a row is included. - * - * @param chance + * Set the chance that a row is included. n */ public void setChance(float chance) { this.chance = chance; @@ -117,21 +110,18 @@ public class RandomRowFilter extends FilterBase { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.RandomRowFilter.Builder builder = - FilterProtos.RandomRowFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.RandomRowFilter.Builder builder = FilterProtos.RandomRowFilter.newBuilder(); builder.setChance(this.chance); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link RandomRowFilter} instance - * @return An instance of {@link RandomRowFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link RandomRowFilter} made from bytes n * @see + * #toByteArray */ - public static RandomRowFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static RandomRowFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.RandomRowFilter proto; try { proto = FilterProtos.RandomRowFilter.parseFrom(pbBytes); @@ -143,15 +133,15 @@ public class RandomRowFilter extends FilterBase { /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof RandomRowFilter)) return false; - RandomRowFilter other = (RandomRowFilter)o; + RandomRowFilter other = (RandomRowFilter) o; return this.getChance() == other.getChance(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java index d278e7ac7ba..75272c5f241 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,11 +21,9 @@ import java.nio.charset.Charset; import java.nio.charset.IllegalCharsetNameException; import java.util.Arrays; import java.util.regex.Pattern; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.jcodings.Encoding; import org.jcodings.EncodingDB; import org.jcodings.specific.UTF8Encoding; @@ -36,36 +33,39 @@ import org.joni.Regex; import org.joni.Syntax; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; + /** - * This comparator is for use with {@link CompareFilter} implementations, such - * as {@link RowFilter}, {@link QualifierFilter}, and {@link ValueFilter}, for - * filtering based on the value of a given column. Use it to test if a given - * regular expression matches a cell value in the column. + * This comparator is for use with {@link CompareFilter} implementations, such as {@link RowFilter}, + * {@link QualifierFilter}, and {@link ValueFilter}, for filtering based on the value of a given + * column. Use it to test if a given regular expression matches a cell value in the column. *

      * Only EQUAL or NOT_EQUAL comparisons are valid with this comparator. *

      * For example: *

      + * *

      - * ValueFilter vf = new ValueFilter(CompareOp.EQUAL,
      - *     new RegexStringComparator(
      - *       // v4 IP address
      - *       "(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3,3}" +
      - *         "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))(\\/[0-9]+)?" +
      - *         "|" +
      - *       // v6 IP address
      - *       "((([\\dA-Fa-f]{1,4}:){7}[\\dA-Fa-f]{1,4})(:([\\d]{1,3}.)" +
      - *         "{3}[\\d]{1,3})?)(\\/[0-9]+)?"));
      + * ValueFilter vf = new ValueFilter(CompareOp.EQUAL, new RegexStringComparator(
      + *   // v4 IP address
      + *   "(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3,3}"
      + *     + "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))(\\/[0-9]+)?" + "|" +
      + *     // v6 IP address
      + *     "((([\\dA-Fa-f]{1,4}:){7}[\\dA-Fa-f]{1,4})(:([\\d]{1,3}.)"
      + *     + "{3}[\\d]{1,3})?)(\\/[0-9]+)?"));
        * 
      *

      * Supports {@link java.util.regex.Pattern} flags as well: *

      + * *

        * ValueFilter vf = new ValueFilter(CompareOp.EQUAL,
      - *     new RegexStringComparator("regex", Pattern.CASE_INSENSITIVE | Pattern.DOTALL));
      + *   new RegexStringComparator("regex", Pattern.CASE_INSENSITIVE | Pattern.DOTALL));
        * 
      + * * @see java.util.regex.Pattern */ @InterfaceAudience.Public @@ -84,8 +84,7 @@ public class RegexStringComparator extends ByteArrayComparable { } /** - * Constructor - * Adds Pattern.DOTALL to the underlying Pattern + * Constructor Adds Pattern.DOTALL to the underlying Pattern * @param expr a valid regular expression */ public RegexStringComparator(String expr) { @@ -93,9 +92,8 @@ public class RegexStringComparator extends ByteArrayComparable { } /** - * Constructor - * Adds Pattern.DOTALL to the underlying Pattern - * @param expr a valid regular expression + * Constructor Adds Pattern.DOTALL to the underlying Pattern + * @param expr a valid regular expression * @param engine engine implementation type */ public RegexStringComparator(String expr, EngineType engine) { @@ -104,7 +102,7 @@ public class RegexStringComparator extends ByteArrayComparable { /** * Constructor - * @param expr a valid regular expression + * @param expr a valid regular expression * @param flags java.util.regex.Pattern flags */ public RegexStringComparator(String expr, int flags) { @@ -113,8 +111,8 @@ public class RegexStringComparator extends ByteArrayComparable { /** * Constructor - * @param expr a valid regular expression - * @param flags java.util.regex.Pattern flags + * @param expr a valid regular expression + * @param flags java.util.regex.Pattern flags * @param engine engine implementation type */ public RegexStringComparator(String expr, int flags, EngineType engine) { @@ -132,12 +130,10 @@ public class RegexStringComparator extends ByteArrayComparable { /** * Specifies the {@link Charset} to use to convert the row key to a String. *

      - * The row key needs to be converted to a String in order to be matched - * against the regular expression. This method controls which charset is - * used to do this conversion. + * The row key needs to be converted to a String in order to be matched against the regular + * expression. This method controls which charset is used to do this conversion. *

      - * If the row key is made of arbitrary bytes, the charset {@code ISO-8859-1} - * is recommended. + * If the row key is made of arbitrary bytes, the charset {@code ISO-8859-1} is recommended. * @param charset The charset to use. */ public void setCharset(final Charset charset) { @@ -153,18 +149,17 @@ public class RegexStringComparator extends ByteArrayComparable { * @return The comparator serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { return engine.toByteArray(); } /** * @param pbBytes A pb serialized {@link RegexStringComparator} instance - * @return An instance of {@link RegexStringComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link RegexStringComparator} made from bytes n * @see + * #toByteArray */ - public static RegexStringComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static RegexStringComparator parseFrom(final byte[] pbBytes) + throws DeserializationException { ComparatorProtos.RegexStringComparator proto; try { proto = ComparatorProtos.RegexStringComparator.parseFrom(pbBytes); @@ -174,8 +169,7 @@ public class RegexStringComparator extends ByteArrayComparable { RegexStringComparator comparator; if (proto.hasEngine()) { EngineType engine = EngineType.valueOf(proto.getEngine()); - comparator = new RegexStringComparator(proto.getPattern(), proto.getPatternFlags(), - engine); + comparator = new RegexStringComparator(proto.getPattern(), proto.getPatternFlags(), engine); } else { comparator = new RegexStringComparator(proto.getPattern(), proto.getPatternFlags()); } @@ -191,15 +185,14 @@ public class RegexStringComparator extends ByteArrayComparable { } /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the comparator that are serialized are equal to + * the corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { if (other == this) return true; if (!(other instanceof RegexStringComparator)) return false; - RegexStringComparator comparator = (RegexStringComparator)other; + RegexStringComparator comparator = (RegexStringComparator) other; return super.areSerializedFieldsEqual(comparator) && engine.getClass().isInstance(comparator.getEngine()) && engine.getPattern().equals(comparator.getEngine().getPattern()) @@ -212,19 +205,17 @@ public class RegexStringComparator extends ByteArrayComparable { } /** - * This is an internal interface for abstracting access to different regular - * expression matching engines. + * This is an internal interface for abstracting access to different regular expression matching + * engines. */ static interface Engine { /** - * Returns the string representation of the configured regular expression - * for matching + * Returns the string representation of the configured regular expression for matching */ String getPattern(); /** - * Returns the set of configured match flags, a bit mask that may include - * {@link Pattern} flags + * Returns the set of configured match flags, a bit mask that may include {@link Pattern} flags */ int getFlags(); @@ -242,11 +233,11 @@ public class RegexStringComparator extends ByteArrayComparable { /** * Return the serialized form of the configured matcher */ - byte [] toByteArray(); + byte[] toByteArray(); /** * Match the given input against the configured pattern - * @param value the data to be matched + * @param value the data to be matched * @param offset offset of the data to be matched * @param length length of the data to be matched * @return 0 if a match was made, 1 otherwise @@ -305,7 +296,7 @@ public class RegexStringComparator extends ByteArrayComparable { @Override public byte[] toByteArray() { ComparatorProtos.RegexStringComparator.Builder builder = - ComparatorProtos.RegexStringComparator.newBuilder(); + ComparatorProtos.RegexStringComparator.newBuilder(); builder.setPattern(pattern.pattern()); builder.setPatternFlags(pattern.flags()); builder.setCharset(charset.name()); @@ -317,11 +308,10 @@ public class RegexStringComparator extends ByteArrayComparable { /** * Implementation of the Engine interface using Jruby's joni regex engine. *

      - * This engine operates on byte arrays directly so is expected to be more GC - * friendly, and reportedly is twice as fast as Java's Pattern engine. + * This engine operates on byte arrays directly so is expected to be more GC friendly, and + * reportedly is twice as fast as Java's Pattern engine. *

      - * NOTE: Only the {@link Pattern} flags CASE_INSENSITIVE, DOTALL, and - * MULTILINE are supported. + * NOTE: Only the {@link Pattern} flags CASE_INSENSITIVE, DOTALL, and MULTILINE are supported. */ static class JoniRegexEngine implements Engine { private Encoding encoding = UTF8Encoding.INSTANCE; @@ -365,12 +355,12 @@ public class RegexStringComparator extends ByteArrayComparable { @Override public byte[] toByteArray() { ComparatorProtos.RegexStringComparator.Builder builder = - ComparatorProtos.RegexStringComparator.newBuilder(); - builder.setPattern(regex); - builder.setPatternFlags(joniToPatternFlags(pattern.getOptions())); - builder.setCharset(encoding.getCharsetName()); - builder.setEngine(EngineType.JONI.name()); - return builder.build().toByteArray(); + ComparatorProtos.RegexStringComparator.newBuilder(); + builder.setPattern(regex); + builder.setPatternFlags(joniToPatternFlags(pattern.getOptions())); + builder.setCharset(encoding.getCharsetName()); + builder.setEngine(EngineType.JONI.name()); + return builder.build().toByteArray(); } private int patternToJoniFlags(int flags) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java index 6fe32fb1749..6c51e788091 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,32 +15,30 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * This filter is used to filter based on the key. It takes an operator - * (equal, greater, not equal, etc) and a byte [] comparator for the row, - * and column qualifier portions of a key. + * This filter is used to filter based on the key. It takes an operator (equal, greater, not equal, + * etc) and a byte [] comparator for the row, and column qualifier portions of a key. *

      * This filter can be wrapped with {@link WhileMatchFilter} to add more control. *

      * Multiple filters can be combined using {@link FilterList}. *

      - * If an already known row range needs to be scanned, - * use {@link org.apache.hadoop.hbase.CellScanner} start - * and stop rows directly rather than a filter. + * If an already known row range needs to be scanned, use + * {@link org.apache.hadoop.hbase.CellScanner} start and stop rows directly rather than a filter. */ @InterfaceAudience.Public public class RowFilter extends CompareFilter { @@ -50,24 +47,22 @@ public class RowFilter extends CompareFilter { /** * Constructor. - * @param rowCompareOp the compare op for row matching + * @param rowCompareOp the compare op for row matching * @param rowComparator the comparator for row matching * @deprecated Since 2.0.0. Will remove in 3.0.0. Use - * {@link #RowFilter(CompareOperator, ByteArrayComparable)}} instead. + * {@link #RowFilter(CompareOperator, ByteArrayComparable)}} instead. */ @Deprecated - public RowFilter(final CompareOp rowCompareOp, - final ByteArrayComparable rowComparator) { + public RowFilter(final CompareOp rowCompareOp, final ByteArrayComparable rowComparator) { super(rowCompareOp, rowComparator); } /** * Constructor. - * @param op the compare op for row matching + * @param op the compare op for row matching * @param rowComparator the comparator for row matching */ - public RowFilter(final CompareOperator op, - final ByteArrayComparable rowComparator) { + public RowFilter(final CompareOperator op, final ByteArrayComparable rowComparator) { super(op, rowComparator); } @@ -84,7 +79,7 @@ public class RowFilter extends CompareFilter { @Override public ReturnCode filterCell(final Cell v) { - if(this.filterOutRow) { + if (this.filterOutRow) { return ReturnCode.NEXT_ROW; } return ReturnCode.INCLUDE; @@ -103,33 +98,29 @@ public class RowFilter extends CompareFilter { return this.filterOutRow; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { + public static Filter createFilterFromArguments(ArrayList filterArguments) { @SuppressWarnings("rawtypes") // for arguments ArrayList arguments = CompareFilter.extractArguments(filterArguments); - CompareOperator compareOp = (CompareOperator)arguments.get(0); - ByteArrayComparable comparator = (ByteArrayComparable)arguments.get(1); + CompareOperator compareOp = (CompareOperator) arguments.get(0); + ByteArrayComparable comparator = (ByteArrayComparable) arguments.get(1); return new RowFilter(compareOp, comparator); } - /** - * @return The filter serialized using pb - */ + /** + * @return The filter serialized using pb + */ @Override - public byte [] toByteArray() { - FilterProtos.RowFilter.Builder builder = - FilterProtos.RowFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.RowFilter.Builder builder = FilterProtos.RowFilter.newBuilder(); builder.setCompareFilter(super.convert()); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link RowFilter} instance - * @return An instance of {@link RowFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link RowFilter} made from bytes n * @see #toByteArray */ - public static RowFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static RowFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.RowFilter proto; try { proto = FilterProtos.RowFilter.parseFrom(pbBytes); @@ -146,12 +137,12 @@ public class RowFilter extends CompareFilter { } catch (IOException ioe) { throw new DeserializationException(ioe); } - return new RowFilter(valueCompareOp,valueComparator); + return new RowFilter(valueCompareOp, valueComparator); } /** - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java index 7b24b03961a..a410884a8ab 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,149 +15,125 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * A {@link Filter} that checks a single column value, but does not emit the - * tested column. This will enable a performance boost over - * {@link SingleColumnValueFilter}, if the tested column value is not actually - * needed as input (besides for the filtering itself). + * A {@link Filter} that checks a single column value, but does not emit the tested column. This + * will enable a performance boost over {@link SingleColumnValueFilter}, if the tested column value + * is not actually needed as input (besides for the filtering itself). */ @InterfaceAudience.Public public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter { /** - * Constructor for binary compare of the value of a single column. If the - * column is found and the condition passes, all columns of the row will be - * emitted; except for the tested column value. If the column is not found or - * the condition fails, the row will not be emitted. - * - * @param family name of column family + * Constructor for binary compare of the value of a single column. If the column is found and the + * condition passes, all columns of the row will be emitted; except for the tested column value. + * If the column is not found or the condition fails, the row will not be emitted. + * @param family name of column family * @param qualifier name of column qualifier * @param compareOp operator - * @param value value to compare column values against - * {@link #SingleColumnValueExcludeFilter(byte[], byte[], CompareOperator, byte[])} + * @param value value to compare column values against + * {@link #SingleColumnValueExcludeFilter(byte[], byte[], CompareOperator, byte[])} */ @Deprecated - public SingleColumnValueExcludeFilter(byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value) { + public SingleColumnValueExcludeFilter(byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value) { super(family, qualifier, compareOp, value); } /** - * Constructor for binary compare of the value of a single column. If the - * column is found and the condition passes, all columns of the row will be - * emitted; except for the tested column value. If the column is not found or - * the condition fails, the row will not be emitted. - * - * @param family name of column family + * Constructor for binary compare of the value of a single column. If the column is found and the + * condition passes, all columns of the row will be emitted; except for the tested column value. + * If the column is not found or the condition fails, the row will not be emitted. + * @param family name of column family * @param qualifier name of column qualifier - * @param op operator - * @param value value to compare column values against + * @param op operator + * @param value value to compare column values against */ - public SingleColumnValueExcludeFilter(byte[] family, byte[] qualifier, - CompareOperator op, byte[] value) { + public SingleColumnValueExcludeFilter(byte[] family, byte[] qualifier, CompareOperator op, + byte[] value) { super(family, qualifier, op, value); } /** - * Constructor for binary compare of the value of a single column. If the - * column is found and the condition passes, all columns of the row will be - * emitted; except for the tested column value. If the condition fails, the - * row will not be emitted. + * Constructor for binary compare of the value of a single column. If the column is found and the + * condition passes, all columns of the row will be emitted; except for the tested column value. + * If the condition fails, the row will not be emitted. *

      - * Use the filterIfColumnMissing flag to set whether the rest of the columns - * in a row will be emitted if the specified column to check is not found in - * the row. - * - * @param family name of column family - * @param qualifier name of column qualifier - * @param compareOp operator + * Use the filterIfColumnMissing flag to set whether the rest of the columns in a row will be + * emitted if the specified column to check is not found in the row. + * @param family name of column family + * @param qualifier name of column qualifier + * @param compareOp operator * @param comparator Comparator to use. * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use - * {@link #SingleColumnValueExcludeFilter(byte[], byte[], CompareOperator, ByteArrayComparable)} + * {@link #SingleColumnValueExcludeFilter(byte[], byte[], CompareOperator, ByteArrayComparable)} */ @Deprecated - public SingleColumnValueExcludeFilter(byte[] family, byte[] qualifier, - CompareOp compareOp, ByteArrayComparable comparator) { + public SingleColumnValueExcludeFilter(byte[] family, byte[] qualifier, CompareOp compareOp, + ByteArrayComparable comparator) { super(family, qualifier, compareOp, comparator); } /** - * Constructor for binary compare of the value of a single column. If the - * column is found and the condition passes, all columns of the row will be - * emitted; except for the tested column value. If the condition fails, the - * row will not be emitted. + * Constructor for binary compare of the value of a single column. If the column is found and the + * condition passes, all columns of the row will be emitted; except for the tested column value. + * If the condition fails, the row will not be emitted. *

      - * Use the filterIfColumnMissing flag to set whether the rest of the columns - * in a row will be emitted if the specified column to check is not found in - * the row. - * - * @param family name of column family - * @param qualifier name of column qualifier - * @param op operator + * Use the filterIfColumnMissing flag to set whether the rest of the columns in a row will be + * emitted if the specified column to check is not found in the row. + * @param family name of column family + * @param qualifier name of column qualifier + * @param op operator * @param comparator Comparator to use. */ - public SingleColumnValueExcludeFilter(byte[] family, byte[] qualifier, - CompareOperator op, ByteArrayComparable comparator) { + public SingleColumnValueExcludeFilter(byte[] family, byte[] qualifier, CompareOperator op, + ByteArrayComparable comparator) { super(family, qualifier, op, comparator); } - /** - * Constructor for protobuf deserialization only. - * @param family - * @param qualifier - * @param compareOp - * @param comparator - * @param filterIfMissing - * @param latestVersionOnly - * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use + * Constructor for protobuf deserialization only. nnnnnn * @deprecated Since 2.0.0. Will be + * removed in 3.0.0. Use * {@link #SingleColumnValueExcludeFilter(byte[], byte[], CompareOperator, ByteArrayComparable, boolean, boolean)} */ @Deprecated protected SingleColumnValueExcludeFilter(final byte[] family, final byte[] qualifier, - final CompareOp compareOp, ByteArrayComparable comparator, final boolean filterIfMissing, - final boolean latestVersionOnly) { - this(family, qualifier, CompareOperator.valueOf(compareOp.name()), comparator, - filterIfMissing, latestVersionOnly); + final CompareOp compareOp, ByteArrayComparable comparator, final boolean filterIfMissing, + final boolean latestVersionOnly) { + this(family, qualifier, CompareOperator.valueOf(compareOp.name()), comparator, filterIfMissing, + latestVersionOnly); } /** - * Constructor for protobuf deserialization only. - * @param family - * @param qualifier - * @param op - * @param comparator - * @param filterIfMissing - * @param latestVersionOnly + * Constructor for protobuf deserialization only. nnnnnn */ protected SingleColumnValueExcludeFilter(final byte[] family, final byte[] qualifier, - final CompareOperator op, ByteArrayComparable comparator, final boolean filterIfMissing, - final boolean latestVersionOnly) { + final CompareOperator op, ByteArrayComparable comparator, final boolean filterIfMissing, + final boolean latestVersionOnly) { super(family, qualifier, op, comparator, filterIfMissing, latestVersionOnly); } // We cleaned result row in FilterRow to be consistent with scanning process. @Override public boolean hasFilterRow() { - return true; + return true; } // Here we remove from row all key values from testing column @@ -174,12 +149,12 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter { } } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - SingleColumnValueFilter tempFilter = (SingleColumnValueFilter) - SingleColumnValueFilter.createFilterFromArguments(filterArguments); - SingleColumnValueExcludeFilter filter = new SingleColumnValueExcludeFilter ( - tempFilter.getFamily(), tempFilter.getQualifier(), - tempFilter.getOperator(), tempFilter.getComparator()); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + SingleColumnValueFilter tempFilter = + (SingleColumnValueFilter) SingleColumnValueFilter.createFilterFromArguments(filterArguments); + SingleColumnValueExcludeFilter filter = + new SingleColumnValueExcludeFilter(tempFilter.getFamily(), tempFilter.getQualifier(), + tempFilter.getOperator(), tempFilter.getComparator()); if (filterArguments.size() == 6) { filter.setFilterIfMissing(tempFilter.getFilterIfMissing()); @@ -192,7 +167,7 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { FilterProtos.SingleColumnValueExcludeFilter.Builder builder = FilterProtos.SingleColumnValueExcludeFilter.newBuilder(); builder.setSingleColumnValueFilter(super.convert()); @@ -201,12 +176,11 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter { /** * @param pbBytes A pb serialized {@link SingleColumnValueExcludeFilter} instance - * @return An instance of {@link SingleColumnValueExcludeFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link SingleColumnValueExcludeFilter} made from bytes n + * * @see #toByteArray */ - public static SingleColumnValueExcludeFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static SingleColumnValueExcludeFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { FilterProtos.SingleColumnValueExcludeFilter proto; try { proto = FilterProtos.SingleColumnValueExcludeFilter.parseFrom(pbBytes); @@ -215,8 +189,7 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter { } FilterProtos.SingleColumnValueFilter parentProto = proto.getSingleColumnValueFilter(); - final CompareOperator compareOp = - CompareOperator.valueOf(parentProto.getCompareOp().name()); + final CompareOperator compareOp = CompareOperator.valueOf(parentProto.getCompareOp().name()); final ByteArrayComparable comparator; try { comparator = ProtobufUtil.toComparator(parentProto.getComparator()); @@ -224,15 +197,15 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter { throw new DeserializationException(ioe); } - return new SingleColumnValueExcludeFilter(parentProto.hasColumnFamily() ? parentProto - .getColumnFamily().toByteArray() : null, parentProto.hasColumnQualifier() ? parentProto - .getColumnQualifier().toByteArray() : null, compareOp, comparator, parentProto - .getFilterIfMissing(), parentProto.getLatestVersionOnly()); + return new SingleColumnValueExcludeFilter( + parentProto.hasColumnFamily() ? parentProto.getColumnFamily().toByteArray() : null, + parentProto.hasColumnQualifier() ? parentProto.getColumnQualifier().toByteArray() : null, + compareOp, comparator, parentProto.getFilterIfMissing(), parentProto.getLatestVersionOnly()); } /** - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java index f314bede082..b049867d2ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,63 +15,58 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.CompareType; -import org.apache.hadoop.hbase.util.Bytes; - -import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** * This filter is used to filter cells based on value. It takes a {@link CompareFilter.CompareOp} - * operator (equal, greater, not equal, etc), and either a byte [] value or - * a ByteArrayComparable. + * operator (equal, greater, not equal, etc), and either a byte [] value or a ByteArrayComparable. *

      - * If we have a byte [] value then we just do a lexicographic compare. For - * example, if passed value is 'b' and cell has 'a' and the compare operator - * is LESS, then we will filter out this cell (return true). If this is not - * sufficient (eg you want to deserialize a long and then compare it to a fixed - * long value), then you can pass in your own comparator instead. + * If we have a byte [] value then we just do a lexicographic compare. For example, if passed value + * is 'b' and cell has 'a' and the compare operator is LESS, then we will filter out this cell + * (return true). If this is not sufficient (eg you want to deserialize a long and then compare it + * to a fixed long value), then you can pass in your own comparator instead. *

      - * You must also specify a family and qualifier. Only the value of this column - * will be tested. When using this filter on a - * {@link org.apache.hadoop.hbase.CellScanner} with specified - * inputs, the column to be tested should also be added as input (otherwise - * the filter will regard the column as missing). + * You must also specify a family and qualifier. Only the value of this column will be tested. When + * using this filter on a {@link org.apache.hadoop.hbase.CellScanner} with specified inputs, the + * column to be tested should also be added as input (otherwise the filter will regard the column as + * missing). *

      - * To prevent the entire row from being emitted if the column is not found - * on a row, use {@link #setFilterIfMissing}. - * Otherwise, if the column is found, the entire row will be emitted only if - * the value passes. If the value fails, the row will be filtered out. + * To prevent the entire row from being emitted if the column is not found on a row, use + * {@link #setFilterIfMissing}. Otherwise, if the column is found, the entire row will be emitted + * only if the value passes. If the value fails, the row will be filtered out. *

      - * In order to test values of previous versions (timestamps), set - * {@link #setLatestVersionOnly} to false. The default is true, meaning that - * only the latest version's value is tested and all previous versions are ignored. + * In order to test values of previous versions (timestamps), set {@link #setLatestVersionOnly} to + * false. The default is true, meaning that only the latest version's value is tested and all + * previous versions are ignored. *

      * To filter based on the value of all scanned columns, use {@link ValueFilter}. */ @InterfaceAudience.Public public class SingleColumnValueFilter extends FilterBase { - protected byte [] columnFamily; - protected byte [] columnQualifier; + protected byte[] columnFamily; + protected byte[] columnQualifier; protected CompareOperator op; protected org.apache.hadoop.hbase.filter.ByteArrayComparable comparator; protected boolean foundColumn = false; @@ -81,88 +75,79 @@ public class SingleColumnValueFilter extends FilterBase { protected boolean latestVersionOnly = true; /** - * Constructor for binary compare of the value of a single column. If the - * column is found and the condition passes, all columns of the row will be - * emitted. If the condition fails, the row will not be emitted. + * Constructor for binary compare of the value of a single column. If the column is found and the + * condition passes, all columns of the row will be emitted. If the condition fails, the row will + * not be emitted. *

      - * Use the filterIfColumnMissing flag to set whether the rest of the columns - * in a row will be emitted if the specified column to check is not found in - * the row. - * - * @param family name of column family + * Use the filterIfColumnMissing flag to set whether the rest of the columns in a row will be + * emitted if the specified column to check is not found in the row. + * @param family name of column family * @param qualifier name of column qualifier * @param compareOp operator - * @param value value to compare column values against + * @param value value to compare column values against * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use - * {@link #SingleColumnValueFilter(byte[], byte[], CompareOperator, byte[])} instead. + * {@link #SingleColumnValueFilter(byte[], byte[], CompareOperator, byte[])} instead. */ @Deprecated - public SingleColumnValueFilter(final byte [] family, final byte [] qualifier, - final CompareOp compareOp, final byte[] value) { + public SingleColumnValueFilter(final byte[] family, final byte[] qualifier, + final CompareOp compareOp, final byte[] value) { this(family, qualifier, CompareOperator.valueOf(compareOp.name()), new org.apache.hadoop.hbase.filter.BinaryComparator(value)); } /** - * Constructor for binary compare of the value of a single column. If the - * column is found and the condition passes, all columns of the row will be - * emitted. If the condition fails, the row will not be emitted. + * Constructor for binary compare of the value of a single column. If the column is found and the + * condition passes, all columns of the row will be emitted. If the condition fails, the row will + * not be emitted. *

      - * Use the filterIfColumnMissing flag to set whether the rest of the columns - * in a row will be emitted if the specified column to check is not found in - * the row. - * - * @param family name of column family + * Use the filterIfColumnMissing flag to set whether the rest of the columns in a row will be + * emitted if the specified column to check is not found in the row. + * @param family name of column family * @param qualifier name of column qualifier - * @param op operator - * @param value value to compare column values against + * @param op operator + * @param value value to compare column values against */ - public SingleColumnValueFilter(final byte [] family, final byte [] qualifier, - final CompareOperator op, final byte[] value) { - this(family, qualifier, op, - new org.apache.hadoop.hbase.filter.BinaryComparator(value)); + public SingleColumnValueFilter(final byte[] family, final byte[] qualifier, + final CompareOperator op, final byte[] value) { + this(family, qualifier, op, new org.apache.hadoop.hbase.filter.BinaryComparator(value)); } /** - * Constructor for binary compare of the value of a single column. If the - * column is found and the condition passes, all columns of the row will be - * emitted. If the condition fails, the row will not be emitted. + * Constructor for binary compare of the value of a single column. If the column is found and the + * condition passes, all columns of the row will be emitted. If the condition fails, the row will + * not be emitted. *

      - * Use the filterIfColumnMissing flag to set whether the rest of the columns - * in a row will be emitted if the specified column to check is not found in - * the row. - * - * @param family name of column family - * @param qualifier name of column qualifier - * @param compareOp operator + * Use the filterIfColumnMissing flag to set whether the rest of the columns in a row will be + * emitted if the specified column to check is not found in the row. + * @param family name of column family + * @param qualifier name of column qualifier + * @param compareOp operator * @param comparator Comparator to use. * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use - * {@link #SingleColumnValueFilter(byte[], byte[], CompareOperator, ByteArrayComparable)} instead. + * {@link #SingleColumnValueFilter(byte[], byte[], CompareOperator, ByteArrayComparable)} + * instead. */ @Deprecated - public SingleColumnValueFilter(final byte [] family, final byte [] qualifier, - final CompareOp compareOp, - final org.apache.hadoop.hbase.filter.ByteArrayComparable comparator) { + public SingleColumnValueFilter(final byte[] family, final byte[] qualifier, + final CompareOp compareOp, + final org.apache.hadoop.hbase.filter.ByteArrayComparable comparator) { this(family, qualifier, CompareOperator.valueOf(compareOp.name()), comparator); } /** - * Constructor for binary compare of the value of a single column. If the - * column is found and the condition passes, all columns of the row will be - * emitted. If the condition fails, the row will not be emitted. + * Constructor for binary compare of the value of a single column. If the column is found and the + * condition passes, all columns of the row will be emitted. If the condition fails, the row will + * not be emitted. *

      - * Use the filterIfColumnMissing flag to set whether the rest of the columns - * in a row will be emitted if the specified column to check is not found in - * the row. - * - * @param family name of column family - * @param qualifier name of column qualifier - * @param op operator + * Use the filterIfColumnMissing flag to set whether the rest of the columns in a row will be + * emitted if the specified column to check is not found in the row. + * @param family name of column family + * @param qualifier name of column qualifier + * @param op operator * @param comparator Comparator to use. */ - public SingleColumnValueFilter(final byte [] family, final byte [] qualifier, - final CompareOperator op, - final org.apache.hadoop.hbase.filter.ByteArrayComparable comparator) { + public SingleColumnValueFilter(final byte[] family, final byte[] qualifier, + final CompareOperator op, final org.apache.hadoop.hbase.filter.ByteArrayComparable comparator) { this.columnFamily = family; this.columnQualifier = qualifier; this.op = op; @@ -170,46 +155,33 @@ public class SingleColumnValueFilter extends FilterBase { } /** - * Constructor for protobuf deserialization only. - * @param family - * @param qualifier - * @param compareOp - * @param comparator - * @param filterIfMissing - * @param latestVersionOnly - * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use - * {@link #SingleColumnValueFilter(byte[], byte[], CompareOperator, ByteArrayComparable, - * boolean, boolean)} instead. + * Constructor for protobuf deserialization only. nnnnnn * @deprecated Since 2.0.0. Will be + * removed in 3.0.0. Use + * {@link #SingleColumnValueFilter(byte[], byte[], CompareOperator, ByteArrayComparable, boolean, boolean)} + * instead. */ @Deprecated protected SingleColumnValueFilter(final byte[] family, final byte[] qualifier, - final CompareOp compareOp, org.apache.hadoop.hbase.filter.ByteArrayComparable comparator, - final boolean filterIfMissing, - final boolean latestVersionOnly) { + final CompareOp compareOp, org.apache.hadoop.hbase.filter.ByteArrayComparable comparator, + final boolean filterIfMissing, final boolean latestVersionOnly) { this(family, qualifier, CompareOperator.valueOf(compareOp.name()), comparator, filterIfMissing, latestVersionOnly); } /** - * Constructor for protobuf deserialization only. - * @param family - * @param qualifier - * @param op - * @param comparator - * @param filterIfMissing - * @param latestVersionOnly + * Constructor for protobuf deserialization only. nnnnnn */ protected SingleColumnValueFilter(final byte[] family, final byte[] qualifier, - final CompareOperator op, org.apache.hadoop.hbase.filter.ByteArrayComparable comparator, - final boolean filterIfMissing, final boolean latestVersionOnly) { + final CompareOperator op, org.apache.hadoop.hbase.filter.ByteArrayComparable comparator, + final boolean filterIfMissing, final boolean latestVersionOnly) { this(family, qualifier, op, comparator); this.filterIfMissing = filterIfMissing; this.latestVersionOnly = latestVersionOnly; } /** - * @return operator - * @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link #getCompareOperator()} instead. + * n * @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link #getCompareOperator()} + * instead. */ @Deprecated public CompareOp getOperator() { @@ -255,7 +227,8 @@ public class SingleColumnValueFilter extends FilterBase { @Override public ReturnCode filterCell(final Cell c) { - // System.out.println("REMOVE KEY=" + keyValue.toString() + ", value=" + Bytes.toString(keyValue.getValue())); + // System.out.println("REMOVE KEY=" + keyValue.toString() + ", value=" + + // Bytes.toString(keyValue.getValue())); if (this.matchedColumn) { // We already found and matched the single column, all keys now pass return ReturnCode.INCLUDE; @@ -268,7 +241,7 @@ public class SingleColumnValueFilter extends FilterBase { } foundColumn = true; if (filterColumnValue(c)) { - return this.latestVersionOnly? ReturnCode.NEXT_ROW: ReturnCode.INCLUDE; + return this.latestVersionOnly ? ReturnCode.NEXT_ROW : ReturnCode.INCLUDE; } this.matchedColumn = true; return ReturnCode.INCLUDE; @@ -283,9 +256,9 @@ public class SingleColumnValueFilter extends FilterBase { public boolean filterRow() { // If column was found, return false if it was matched, true if it was not // If column not found, return true if we filter if missing, false if not - return this.foundColumn? !this.matchedColumn: this.filterIfMissing; + return this.foundColumn ? !this.matchedColumn : this.filterIfMissing; } - + @Override public boolean hasFilterRow() { return true; @@ -299,8 +272,8 @@ public class SingleColumnValueFilter extends FilterBase { /** * Get whether entire row should be filtered if column is not found. - * @return true if row should be skipped if column not found, false if row - * should be let through anyways + * @return true if row should be skipped if column not found, false if row should be let through + * anyways */ public boolean getFilterIfMissing() { return filterIfMissing; @@ -311,7 +284,7 @@ public class SingleColumnValueFilter extends FilterBase { *

      * If true, the entire row will be skipped if the column is not found. *

      - * If false, the row will pass if the column is not found. This is default. + * If false, the row will pass if the column is not found. This is default. * @param filterIfMissing flag */ public void setFilterIfMissing(boolean filterIfMissing) { @@ -319,10 +292,9 @@ public class SingleColumnValueFilter extends FilterBase { } /** - * Get whether only the latest version of the column value should be compared. - * If true, the row will be returned if only the latest version of the column - * value matches. If false, the row will be returned if any version of the - * column value matches. The default is true. + * Get whether only the latest version of the column value should be compared. If true, the row + * will be returned if only the latest version of the column value matches. If false, the row will + * be returned if any version of the column value matches. The default is true. * @return return value */ public boolean getLatestVersionOnly() { @@ -330,36 +302,32 @@ public class SingleColumnValueFilter extends FilterBase { } /** - * Set whether only the latest version of the column value should be compared. - * If true, the row will be returned if only the latest version of the column - * value matches. If false, the row will be returned if any version of the - * column value matches. The default is true. + * Set whether only the latest version of the column value should be compared. If true, the row + * will be returned if only the latest version of the column value matches. If false, the row will + * be returned if any version of the column value matches. The default is true. * @param latestVersionOnly flag */ public void setLatestVersionOnly(boolean latestVersionOnly) { this.latestVersionOnly = latestVersionOnly; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { + public static Filter createFilterFromArguments(ArrayList filterArguments) { Preconditions.checkArgument(filterArguments.size() == 4 || filterArguments.size() == 6, - "Expected 4 or 6 but got: %s", filterArguments.size()); - byte [] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); - byte [] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); + "Expected 4 or 6 but got: %s", filterArguments.size()); + byte[] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + byte[] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); CompareOperator op = ParseFilter.createCompareOperator(filterArguments.get(2)); - org.apache.hadoop.hbase.filter.ByteArrayComparable comparator = ParseFilter.createComparator( - ParseFilter.removeQuotesFromByteArray(filterArguments.get(3))); + org.apache.hadoop.hbase.filter.ByteArrayComparable comparator = + ParseFilter.createComparator(ParseFilter.removeQuotesFromByteArray(filterArguments.get(3))); - if (comparator instanceof RegexStringComparator || - comparator instanceof SubstringComparator) { - if (op != CompareOperator.EQUAL && - op != CompareOperator.NOT_EQUAL) { - throw new IllegalArgumentException ("A regexstring comparator and substring comparator " + - "can only be used with EQUAL and NOT_EQUAL"); + if (comparator instanceof RegexStringComparator || comparator instanceof SubstringComparator) { + if (op != CompareOperator.EQUAL && op != CompareOperator.NOT_EQUAL) { + throw new IllegalArgumentException("A regexstring comparator and substring comparator " + + "can only be used with EQUAL and NOT_EQUAL"); } } - SingleColumnValueFilter filter = new SingleColumnValueFilter(family, qualifier, - op, comparator); + SingleColumnValueFilter filter = new SingleColumnValueFilter(family, qualifier, op, comparator); if (filterArguments.size() == 6) { boolean filterIfMissing = ParseFilter.convertByteArrayToBoolean(filterArguments.get(4)); @@ -392,7 +360,7 @@ public class SingleColumnValueFilter extends FilterBase { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { return convert().toByteArray(); } @@ -401,8 +369,8 @@ public class SingleColumnValueFilter extends FilterBase { * @return An instance of {@link SingleColumnValueFilter} made from bytes * @see #toByteArray */ - public static SingleColumnValueFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static SingleColumnValueFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { FilterProtos.SingleColumnValueFilter proto; try { proto = FilterProtos.SingleColumnValueFilter.parseFrom(pbBytes); @@ -410,8 +378,7 @@ public class SingleColumnValueFilter extends FilterBase { throw new DeserializationException(e); } - final CompareOperator compareOp = - CompareOperator.valueOf(proto.getCompareOp().name()); + final CompareOperator compareOp = CompareOperator.valueOf(proto.getCompareOp().name()); final org.apache.hadoop.hbase.filter.ByteArrayComparable comparator; try { comparator = ProtobufUtil.toComparator(proto.getComparator()); @@ -419,34 +386,33 @@ public class SingleColumnValueFilter extends FilterBase { throw new DeserializationException(ioe); } - return new SingleColumnValueFilter(proto.hasColumnFamily() ? proto.getColumnFamily() - .toByteArray() : null, proto.hasColumnQualifier() ? proto.getColumnQualifier() - .toByteArray() : null, compareOp, comparator, proto.getFilterIfMissing(), proto - .getLatestVersionOnly()); + return new SingleColumnValueFilter( + proto.hasColumnFamily() ? proto.getColumnFamily().toByteArray() : null, + proto.hasColumnQualifier() ? proto.getColumnQualifier().toByteArray() : null, compareOp, + comparator, proto.getFilterIfMissing(), proto.getLatestVersionOnly()); } /** - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof SingleColumnValueFilter)) return false; - SingleColumnValueFilter other = (SingleColumnValueFilter)o; + SingleColumnValueFilter other = (SingleColumnValueFilter) o; return Bytes.equals(this.getFamily(), other.getFamily()) - && Bytes.equals(this.getQualifier(), other.getQualifier()) - && this.op.equals(other.op) + && Bytes.equals(this.getQualifier(), other.getQualifier()) && this.op.equals(other.op) && this.getComparator().areSerializedFieldsEqual(other.getComparator()) && this.getFilterIfMissing() == other.getFilterIfMissing() && this.getLatestVersionOnly() == other.getLatestVersionOnly(); } /** - * The only CF this filter needs is given column family. So, it's the only essential - * column in whole scan. If filterIfMissing == false, all families are essential, - * because of possibility of skipping the rows without any data in filtered CF. + * The only CF this filter needs is given column family. So, it's the only essential column in + * whole scan. If filterIfMissing == false, all families are essential, because of possibility of + * skipping the rows without any data in filtered CF. */ @Override public boolean isFamilyEssential(byte[] name) { @@ -455,10 +421,9 @@ public class SingleColumnValueFilter extends FilterBase { @Override public String toString() { - return String.format("%s (%s, %s, %s, %s)", - this.getClass().getSimpleName(), Bytes.toStringBinary(this.columnFamily), - Bytes.toStringBinary(this.columnQualifier), this.op.name(), - Bytes.toStringBinary(this.comparator.getValue())); + return String.format("%s (%s, %s, %s, %s)", this.getClass().getSimpleName(), + Bytes.toStringBinary(this.columnFamily), Bytes.toStringBinary(this.columnQualifier), + this.op.name(), Bytes.toStringBinary(this.comparator.getValue())); } @Override @@ -468,7 +433,7 @@ public class SingleColumnValueFilter extends FilterBase { @Override public int hashCode() { - return Objects.hash(Bytes.hashCode(getFamily()), Bytes.hashCode(getQualifier()), - this.op, getComparator(), getFilterIfMissing(), getLatestVersionOnly()); + return Objects.hash(Bytes.hashCode(getFamily()), Bytes.hashCode(getQualifier()), this.op, + getComparator(), getFilterIfMissing(), getLatestVersionOnly()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java index fe5caad1b27..e3b334b8820 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,38 +15,34 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * A wrapper filter that filters an entire row if any of the Cell checks do - * not pass. + * A wrapper filter that filters an entire row if any of the Cell checks do not pass. *

      - * For example, if all columns in a row represent weights of different things, - * with the values being the actual weights, and we want to filter out the - * entire row if any of its weights are zero. In this case, we want to prevent - * rows from being emitted if a single key is filtered. Combine this filter - * with a {@link ValueFilter}: + * For example, if all columns in a row represent weights of different things, with the values being + * the actual weights, and we want to filter out the entire row if any of its weights are zero. In + * this case, we want to prevent rows from being emitted if a single key is filtered. Combine this + * filter with a {@link ValueFilter}: *

      *

      * * scan.setFilter(new SkipFilter(new ValueFilter(CompareOp.NOT_EQUAL, * new BinaryComparator(Bytes.toBytes(0)))); - * - * Any row which contained a column whose value was 0 will be filtered out - * (since ValueFilter will not pass that Cell). - * Without this filter, the other non-zero valued columns in the row would still - * be emitted. + * Any row which contained a column whose value was 0 will be filtered out (since + * ValueFilter will not pass that Cell). Without this filter, the other non-zero valued columns in + * the row would still be emitted. *

      */ @InterfaceAudience.Public @@ -101,7 +96,7 @@ public class SkipFilter extends FilterBase { public boolean filterRow() { return filterRow; } - + @Override public boolean hasFilterRow() { return true; @@ -112,20 +107,16 @@ public class SkipFilter extends FilterBase { */ @Override public byte[] toByteArray() throws IOException { - FilterProtos.SkipFilter.Builder builder = - FilterProtos.SkipFilter.newBuilder(); + FilterProtos.SkipFilter.Builder builder = FilterProtos.SkipFilter.newBuilder(); builder.setFilter(ProtobufUtil.toFilter(this.filter)); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link SkipFilter} instance - * @return An instance of {@link SkipFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link SkipFilter} made from bytes n * @see #toByteArray */ - public static SkipFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static SkipFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.SkipFilter proto; try { proto = FilterProtos.SkipFilter.parseFrom(pbBytes); @@ -141,15 +132,15 @@ public class SkipFilter extends FilterBase { /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof SkipFilter)) return false; - SkipFilter other = (SkipFilter)o; + SkipFilter other = (SkipFilter) o; return getFilter().areSerializedFieldsEqual(other.getFilter()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java index 1bfc7229f53..b8e33c438fe 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,27 +18,27 @@ package org.apache.hadoop.hbase.filter; import java.util.Locale; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; /** - * This comparator is for use with SingleColumnValueFilter, for filtering based on - * the value of a given column. Use it to test if a given substring appears - * in a cell value in the column. The comparison is case insensitive. + * This comparator is for use with SingleColumnValueFilter, for filtering based on the value of a + * given column. Use it to test if a given substring appears in a cell value in the column. The + * comparison is case insensitive. *

      * Only EQUAL or NOT_EQUAL tests are valid with this comparator. *

      * For example: *

      + * *

        * SingleColumnValueFilter scvf =
      - *   new SingleColumnValueFilter("col", CompareOp.EQUAL,
      - *     new SubstringComparator("substr"));
      + *   new SingleColumnValueFilter("col", CompareOp.EQUAL, new SubstringComparator("substr"));
        * 
      */ @InterfaceAudience.Public @@ -64,15 +63,14 @@ public class SubstringComparator extends ByteArrayComparable { @Override public int compareTo(byte[] value, int offset, int length) { - return Bytes.toString(value, offset, length).toLowerCase(Locale.ROOT).contains(substr) ? 0 - : 1; + return Bytes.toString(value, offset, length).toLowerCase(Locale.ROOT).contains(substr) ? 0 : 1; } /** * @return The comparator serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { ComparatorProtos.SubstringComparator.Builder builder = ComparatorProtos.SubstringComparator.newBuilder(); builder.setSubstr(this.substr); @@ -81,12 +79,11 @@ public class SubstringComparator extends ByteArrayComparable { /** * @param pbBytes A pb serialized {@link SubstringComparator} instance - * @return An instance of {@link SubstringComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link SubstringComparator} made from bytes n * @see + * #toByteArray */ - public static SubstringComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static SubstringComparator parseFrom(final byte[] pbBytes) + throws DeserializationException { ComparatorProtos.SubstringComparator proto; try { proto = ComparatorProtos.SubstringComparator.parseFrom(pbBytes); @@ -97,18 +94,16 @@ public class SubstringComparator extends ByteArrayComparable { } /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the comparator that are serialized are equal to + * the corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { if (other == this) return true; if (!(other instanceof SubstringComparator)) return false; - SubstringComparator comparator = (SubstringComparator)other; - return super.areSerializedFieldsEqual(comparator) - && this.substr.equals(comparator.substr); + SubstringComparator comparator = (SubstringComparator) other; + return super.areSerializedFieldsEqual(comparator) && this.substr.equals(comparator.substr); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java index c2cdf9975c5..2ed44ba77e4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,25 +22,25 @@ import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.TreeSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * Filter that returns only cells whose timestamp (version) is - * in the specified list of timestamps (versions). + * Filter that returns only cells whose timestamp (version) is in the specified list of timestamps + * (versions). *

      - * Note: Use of this filter overrides any time range/time stamp - * options specified using {@link org.apache.hadoop.hbase.client.Get#setTimeRange(long, long)}, + * Note: Use of this filter overrides any time range/time stamp options specified using + * {@link org.apache.hadoop.hbase.client.Get#setTimeRange(long, long)}, * {@link org.apache.hadoop.hbase.client.Scan#setTimeRange(long, long)}, - * {@link org.apache.hadoop.hbase.client.Get#setTimestamp(long)}, - * or {@link org.apache.hadoop.hbase.client.Scan#setTimestamp(long)}. + * {@link org.apache.hadoop.hbase.client.Get#setTimestamp(long)}, or + * {@link org.apache.hadoop.hbase.client.Scan#setTimestamp(long)}. */ @InterfaceAudience.Public public class TimestampsFilter extends FilterBase { @@ -54,23 +54,19 @@ public class TimestampsFilter extends FilterBase { long minTimestamp = Long.MAX_VALUE; /** - * Constructor for filter that retains only the specified timestamps in the list. - * @param timestamps + * Constructor for filter that retains only the specified timestamps in the list. n */ public TimestampsFilter(List timestamps) { this(timestamps, false); } /** - * Constructor for filter that retains only those - * cells whose timestamp (version) is in the specified - * list of timestamps. - * + * Constructor for filter that retains only those cells whose timestamp (version) is in the + * specified list of timestamps. * @param timestamps list of timestamps that are wanted. - * @param canHint should the filter provide a seek hint? This can skip - * past delete tombstones, so it should only be used when that - * is not an issue ( no deletes, or don't care if data - * becomes visible) + * @param canHint should the filter provide a seek hint? This can skip past delete tombstones, + * so it should only be used when that is not an issue ( no deletes, or don't + * care if data becomes visible) */ public TimestampsFilter(List timestamps, boolean canHint) { for (Long timestamp : timestamps) { @@ -98,7 +94,7 @@ public class TimestampsFilter extends FilterBase { /** * Gets the minimum timestamp requested by filter. - * @return minimum timestamp requested by filter. + * @return minimum timestamp requested by filter. */ public long getMin() { return minTimestamp; @@ -128,13 +124,10 @@ public class TimestampsFilter extends FilterBase { return canHint ? ReturnCode.SEEK_NEXT_USING_HINT : ReturnCode.SKIP; } - /** - * Pick the next cell that the scanner should seek to. Since this can skip any number of cells - * any of which can be a delete this can resurect old data. - * - * The method will only be used if canHint was set to true while creating the filter. - * + * Pick the next cell that the scanner should seek to. Since this can skip any number of cells any + * of which can be a delete this can resurect old data. The method will only be used if canHint + * was set to true while creating the filter. * @throws IOException This will never happen. */ @Override @@ -163,9 +156,9 @@ public class TimestampsFilter extends FilterBase { return PrivateCellUtil.createFirstOnRowColTS(currentCell, nextTimestamp); } - public static Filter createFilterFromArguments(ArrayList filterArguments) { + public static Filter createFilterFromArguments(ArrayList filterArguments) { ArrayList timestamps = new ArrayList<>(filterArguments.size()); - for (int i = 0; ibytes * @see #toByteArray */ - public static TimestampsFilter parseFrom(final byte[] pbBytes) - throws DeserializationException { + public static TimestampsFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.TimestampsFilter proto; try { proto = FilterProtos.TimestampsFilter.parseFrom(pbBytes); @@ -199,20 +189,20 @@ public class TimestampsFilter extends FilterBase { throw new DeserializationException(e); } return new TimestampsFilter(proto.getTimestampsList(), - proto.hasCanHint() && proto.getCanHint()); + proto.hasCanHint() && proto.getCanHint()); } /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof TimestampsFilter)) return false; - TimestampsFilter other = (TimestampsFilter)o; + TimestampsFilter other = (TimestampsFilter) o; return this.getTimestamps().equals(other.getTimestamps()); } @@ -236,8 +226,8 @@ public class TimestampsFilter extends FilterBase { } } - return String.format("%s (%d/%d): [%s] canHint: [%b]", this.getClass().getSimpleName(), - count, this.timestamps.size(), tsList.toString(), canHint); + return String.format("%s (%d/%d): [%s] canHint: [%b]", this.getClass().getSimpleName(), count, + this.timestamps.size(), tsList.toString(), canHint); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java index 7e958f03a29..57a1c5c7c2a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,55 +15,53 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * This filter is used to filter based on column value. It takes an - * operator (equal, greater, not equal, etc) and a byte [] comparator for the - * cell value. + * This filter is used to filter based on column value. It takes an operator (equal, greater, not + * equal, etc) and a byte [] comparator for the cell value. *

      - * This filter can be wrapped with {@link WhileMatchFilter} and {@link SkipFilter} - * to add more control. + * This filter can be wrapped with {@link WhileMatchFilter} and {@link SkipFilter} to add more + * control. *

      * Multiple filters can be combined using {@link FilterList}. *

      - * To test the value of a single qualifier when scanning multiple qualifiers, - * use {@link SingleColumnValueFilter}. + * To test the value of a single qualifier when scanning multiple qualifiers, use + * {@link SingleColumnValueFilter}. */ @InterfaceAudience.Public public class ValueFilter extends CompareFilter { /** * Constructor. - * @param valueCompareOp the compare op for value matching + * @param valueCompareOp the compare op for value matching * @param valueComparator the comparator for value matching - * @deprecated Since 2.0.0. Will be removed in 3.0.0. - * Use {@link #ValueFilter(CompareOperator, ByteArrayComparable)} + * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use + * {@link #ValueFilter(CompareOperator, ByteArrayComparable)} */ - public ValueFilter(final CompareOp valueCompareOp, - final ByteArrayComparable valueComparator) { + public ValueFilter(final CompareOp valueCompareOp, final ByteArrayComparable valueComparator) { super(valueCompareOp, valueComparator); } /** * Constructor. - * @param valueCompareOp the compare op for value matching + * @param valueCompareOp the compare op for value matching * @param valueComparator the comparator for value matching */ public ValueFilter(final CompareOperator valueCompareOp, - final ByteArrayComparable valueComparator) { + final ByteArrayComparable valueComparator) { super(valueCompareOp, valueComparator); } @@ -82,11 +79,11 @@ public class ValueFilter extends CompareFilter { return ReturnCode.INCLUDE; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - @SuppressWarnings("rawtypes") // for arguments + public static Filter createFilterFromArguments(ArrayList filterArguments) { + @SuppressWarnings("rawtypes") // for arguments ArrayList arguments = CompareFilter.extractArguments(filterArguments); - CompareOperator compareOp = (CompareOperator)arguments.get(0); - ByteArrayComparable comparator = (ByteArrayComparable)arguments.get(1); + CompareOperator compareOp = (CompareOperator) arguments.get(0); + ByteArrayComparable comparator = (ByteArrayComparable) arguments.get(1); return new ValueFilter(compareOp, comparator); } @@ -94,21 +91,17 @@ public class ValueFilter extends CompareFilter { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.ValueFilter.Builder builder = - FilterProtos.ValueFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.ValueFilter.Builder builder = FilterProtos.ValueFilter.newBuilder(); builder.setCompareFilter(super.convert()); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link ValueFilter} instance - * @return An instance of {@link ValueFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link ValueFilter} made from bytes n * @see #toByteArray */ - public static ValueFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static ValueFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.ValueFilter proto; try { proto = FilterProtos.ValueFilter.parseFrom(pbBytes); @@ -125,12 +118,12 @@ public class ValueFilter extends CompareFilter { } catch (IOException ioe) { throw new DeserializationException(ioe); } - return new ValueFilter(valueCompareOp,valueComparator); + return new ValueFilter(valueCompareOp, valueComparator); } /** - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java index e053886f631..c58e080b565 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,26 +15,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * A wrapper filter that returns true from {@link #filterAllRemaining()} as soon - * as the wrapped filters {@link Filter#filterRowKey(byte[], int, int)}, + * A wrapper filter that returns true from {@link #filterAllRemaining()} as soon as the wrapped + * filters {@link Filter#filterRowKey(byte[], int, int)}, * {@link Filter#filterCell(org.apache.hadoop.hbase.Cell)}, * {@link org.apache.hadoop.hbase.filter.Filter#filterRow()} or - * {@link org.apache.hadoop.hbase.filter.Filter#filterAllRemaining()} methods - * returns true. + * {@link org.apache.hadoop.hbase.filter.Filter#filterAllRemaining()} methods returns true. */ @InterfaceAudience.Public public class WhileMatchFilter extends FilterBase { @@ -103,7 +101,7 @@ public class WhileMatchFilter extends FilterBase { changeFAR(filterRow); return filterRow; } - + @Override public boolean hasFilterRow() { return true; @@ -114,8 +112,7 @@ public class WhileMatchFilter extends FilterBase { */ @Override public byte[] toByteArray() throws IOException { - FilterProtos.WhileMatchFilter.Builder builder = - FilterProtos.WhileMatchFilter.newBuilder(); + FilterProtos.WhileMatchFilter.Builder builder = FilterProtos.WhileMatchFilter.newBuilder(); builder.setFilter(ProtobufUtil.toFilter(this.filter)); return builder.build().toByteArray(); } @@ -126,8 +123,7 @@ public class WhileMatchFilter extends FilterBase { * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray */ - public static WhileMatchFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static WhileMatchFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.WhileMatchFilter proto; try { proto = FilterProtos.WhileMatchFilter.parseFrom(pbBytes); @@ -143,15 +139,15 @@ public class WhileMatchFilter extends FilterBase { /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof WhileMatchFilter)) return false; - WhileMatchFilter other = (WhileMatchFilter)o; + WhileMatchFilter other = (WhileMatchFilter) o; return getFilter().areSerializedFieldsEqual(other.getFilter()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index accb8329181..7c0149ccb8a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import static org.apache.hadoop.hbase.ipc.IPCUtil.toIOE; @@ -52,11 +51,11 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader; import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; import org.apache.hbase.thirdparty.com.google.protobuf.Message; @@ -139,28 +138,29 @@ public abstract class AbstractRpcClient implements RpcC private int maxConcurrentCallsPerServer; private static final LoadingCache concurrentCounterCache = - CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.HOURS). - build(new CacheLoader() { - @Override public AtomicInteger load(Address key) throws Exception { - return new AtomicInteger(0); - } - }); + CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.HOURS) + .build(new CacheLoader() { + @Override + public AtomicInteger load(Address key) throws Exception { + return new AtomicInteger(0); + } + }); /** * Construct an IPC client for the cluster clusterId - * @param conf configuration + * @param conf configuration * @param clusterId the cluster id * @param localAddr client socket bind address. - * @param metrics the connection metrics + * @param metrics the connection metrics */ public AbstractRpcClient(Configuration conf, String clusterId, SocketAddress localAddr, - MetricsConnection metrics) { + MetricsConnection metrics) { this.userProvider = UserProvider.instantiate(conf); this.localAddr = localAddr; this.tcpKeepAlive = conf.getBoolean("hbase.ipc.client.tcpkeepalive", true); this.clusterId = clusterId != null ? clusterId : HConstants.CLUSTER_ID_DEFAULT; - this.failureSleep = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, - HConstants.DEFAULT_HBASE_CLIENT_PAUSE); + this.failureSleep = + conf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE); this.maxRetries = conf.getInt("hbase.ipc.client.connect.max.retries", 0); this.tcpNoDelay = conf.getBoolean("hbase.ipc.client.tcpnodelay", true); this.cellBlockBuilder = new CellBlockBuilder(conf); @@ -176,8 +176,8 @@ public abstract class AbstractRpcClient implements RpcC this.readTO = conf.getInt(SOCKET_TIMEOUT_READ, DEFAULT_SOCKET_TIMEOUT_READ); this.writeTO = conf.getInt(SOCKET_TIMEOUT_WRITE, DEFAULT_SOCKET_TIMEOUT_WRITE); this.metrics = metrics; - this.maxConcurrentCallsPerServer = conf.getInt( - HConstants.HBASE_CLIENT_PERSERVER_REQUESTS_THRESHOLD, + this.maxConcurrentCallsPerServer = + conf.getInt(HConstants.HBASE_CLIENT_PERSERVER_REQUESTS_THRESHOLD, HConstants.DEFAULT_HBASE_CLIENT_PERSERVER_REQUESTS_THRESHOLD); this.connections = new PoolMap<>(getPoolType(conf), getPoolSize(conf)); @@ -192,11 +192,11 @@ public abstract class AbstractRpcClient implements RpcC if (LOG.isDebugEnabled()) { LOG.debug("Codec=" + this.codec + ", compressor=" + this.compressor + ", tcpKeepAlive=" - + this.tcpKeepAlive + ", tcpNoDelay=" + this.tcpNoDelay + ", connectTO=" + this.connectTO - + ", readTO=" + this.readTO + ", writeTO=" + this.writeTO + ", minIdleTimeBeforeClose=" - + this.minIdleTimeBeforeClose + ", maxRetries=" + this.maxRetries + ", fallbackAllowed=" - + this.fallbackAllowed + ", bind address=" - + (this.localAddr != null ? this.localAddr : "null")); + + this.tcpKeepAlive + ", tcpNoDelay=" + this.tcpNoDelay + ", connectTO=" + this.connectTO + + ", readTO=" + this.readTO + ", writeTO=" + this.writeTO + ", minIdleTimeBeforeClose=" + + this.minIdleTimeBeforeClose + ", maxRetries=" + this.maxRetries + ", fallbackAllowed=" + + this.fallbackAllowed + ", bind address=" + + (this.localAddr != null ? this.localAddr : "null")); } } @@ -298,7 +298,8 @@ public abstract class AbstractRpcClient implements RpcC int poolSize = config.getInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, 1); if (poolSize <= 0) { - LOG.warn("{} must be positive. Using default value: 1", HConstants.HBASE_CLIENT_IPC_POOL_SIZE); + LOG.warn("{} must be positive. Using default value: 1", + HConstants.HBASE_CLIENT_IPC_POOL_SIZE); return 1; } else { return poolSize; @@ -318,13 +319,13 @@ public abstract class AbstractRpcClient implements RpcC * Make a blocking call. Throws exceptions if there are network problems or if the remote code * threw an exception. * @param ticket Be careful which ticket you pass. A new user will mean a new Connection. - * {@link UserProvider#getCurrent()} makes a new instance of User each time so will be a - * new Connection each time. + * {@link UserProvider#getCurrent()} makes a new instance of User each time so will + * be a new Connection each time. * @return A pair with the Message response and the Cell data (if any). */ private Message callBlockingMethod(Descriptors.MethodDescriptor md, HBaseRpcController hrc, - Message param, Message returnType, final User ticket, final Address isa) - throws ServiceException { + Message param, Message returnType, final User ticket, final Address isa) + throws ServiceException { BlockingRpcCallback done = new BlockingRpcCallback<>(); callMethod(md, hrc, param, returnType, ticket, isa, done); Message val; @@ -348,10 +349,10 @@ public abstract class AbstractRpcClient implements RpcC if (failedServers.isFailedServer(remoteId.getAddress())) { if (LOG.isDebugEnabled()) { LOG.debug("Not trying to connect to " + remoteId.getAddress() - + " this server is in the failed servers list"); + + " this server is in the failed servers list"); } throw new FailedServerException( - "This server is in the failed servers list: " + remoteId.getAddress()); + "This server is in the failed servers list: " + remoteId.getAddress()); } T conn; synchronized (connections) { @@ -396,10 +397,7 @@ public abstract class AbstractRpcClient implements RpcC private Call callMethod(final Descriptors.MethodDescriptor md, final HBaseRpcController hrc, final Message param, Message returnType, final User ticket, final Address addr, final RpcCallback callback) { - Span span = new IpcClientSpanBuilder() - .setMethodDescriptor(md) - .setRemoteAddress(addr) - .build(); + Span span = new IpcClientSpanBuilder().setMethodDescriptor(md).setRemoteAddress(addr).build(); try (Scope scope = span.makeCurrent()) { final MetricsConnection.CallStats cs = MetricsConnection.newCallStats(); cs.setStartTime(EnvironmentEdgeManager.currentTime()); @@ -465,10 +463,12 @@ public abstract class AbstractRpcClient implements RpcC synchronized (connections) { for (T connection : connections.values()) { ConnectionId remoteId = connection.remoteId(); - if (remoteId.getAddress().getPort() == sn.getPort() - && remoteId.getAddress().getHostName().equals(sn.getHostname())) { + if ( + remoteId.getAddress().getPort() == sn.getPort() + && remoteId.getAddress().getHostName().equals(sn.getHostname()) + ) { LOG.info("The server on " + sn.toString() + " is dead - stopping the connection " - + connection.remoteId); + + connection.remoteId); connections.remove(remoteId, connection); connection.shutdown(); connection.cleanupConnection(); @@ -476,14 +476,15 @@ public abstract class AbstractRpcClient implements RpcC } } } + /** * Configure an hbase rpccontroller - * @param controller to configure + * @param controller to configure * @param channelOperationTimeout timeout for operation * @return configured controller */ - static HBaseRpcController configureHBaseRpcController( - RpcController controller, int channelOperationTimeout) { + static HBaseRpcController configureHBaseRpcController(RpcController controller, + int channelOperationTimeout) { HBaseRpcController hrc; if (controller != null && controller instanceof HBaseRpcController) { hrc = (HBaseRpcController) controller; @@ -525,7 +526,7 @@ public abstract class AbstractRpcClient implements RpcC @Override public BlockingRpcChannel createBlockingRpcChannel(final ServerName sn, final User ticket, - int rpcTimeout) { + int rpcTimeout) { return new BlockingRpcChannelImplementation(this, createAddr(sn), ticket, rpcTimeout); } @@ -544,8 +545,8 @@ public abstract class AbstractRpcClient implements RpcC protected final int rpcTimeout; - protected AbstractRpcChannel(AbstractRpcClient rpcClient, Address addr, - User ticket, int rpcTimeout) { + protected AbstractRpcChannel(AbstractRpcClient rpcClient, Address addr, User ticket, + int rpcTimeout) { this.addr = addr; this.rpcClient = rpcClient; this.ticket = ticket; @@ -578,10 +579,10 @@ public abstract class AbstractRpcClient implements RpcC * Blocking rpc channel that goes via hbase rpc. */ public static class BlockingRpcChannelImplementation extends AbstractRpcChannel - implements BlockingRpcChannel { + implements BlockingRpcChannel { - protected BlockingRpcChannelImplementation(AbstractRpcClient rpcClient, - Address addr, User ticket, int rpcTimeout) { + protected BlockingRpcChannelImplementation(AbstractRpcClient rpcClient, Address addr, + User ticket, int rpcTimeout) { super(rpcClient, addr, ticket, rpcTimeout); } @@ -596,11 +597,10 @@ public abstract class AbstractRpcClient implements RpcC /** * Async rpc channel that goes via hbase rpc. */ - public static class RpcChannelImplementation extends AbstractRpcChannel implements - RpcChannel { + public static class RpcChannelImplementation extends AbstractRpcChannel implements RpcChannel { - protected RpcChannelImplementation(AbstractRpcClient rpcClient, Address addr, - User ticket, int rpcTimeout) { + protected RpcChannelImplementation(AbstractRpcClient rpcClient, Address addr, User ticket, + int rpcTimeout) { super(rpcClient, addr, ticket, rpcTimeout); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BadAuthException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BadAuthException.java index 145754b6f27..4f4ac910137 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BadAuthException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BadAuthException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java index da636a37161..1b203727f26 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,21 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; - import java.io.IOException; import java.io.InterruptedIOException; - import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; + /** - * Simple {@link RpcCallback} implementation providing a - * {@link java.util.concurrent.Future}-like {@link BlockingRpcCallback#get()} method, which - * will block util the instance's {@link BlockingRpcCallback#run(Object)} method has been called. - * {@code R} is the RPC response type that will be passed to the {@link #run(Object)} method. + * Simple {@link RpcCallback} implementation providing a {@link java.util.concurrent.Future}-like + * {@link BlockingRpcCallback#get()} method, which will block util the instance's + * {@link BlockingRpcCallback#run(Object)} method has been called. {@code R} is the RPC response + * type that will be passed to the {@link #run(Object)} method. */ @InterfaceAudience.Private public class BlockingRpcCallback implements RpcCallback { @@ -52,8 +50,8 @@ public class BlockingRpcCallback implements RpcCallback { /** * Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was - * passed. When used asynchronously, this method will block until the {@link #run(Object)} - * method has been called. + * passed. When used asynchronously, this method will block until the {@link #run(Object)} method + * has been called. * @return the response object or {@code null} if no response was passed */ public synchronized R get() throws IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java index dd8f96bb2b9..7fffdad935f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,13 +47,13 @@ public class BlockingRpcClient extends AbstractRpcClient /** * Construct an IPC client for the cluster {@code clusterId} with the default SocketFactory This * method is called with reflection by the RpcClientFactory to create an instance - * @param conf configuration + * @param conf configuration * @param clusterId the cluster id * @param localAddr client socket bind address. - * @param metrics the connection metrics + * @param metrics the connection metrics */ public BlockingRpcClient(Configuration conf, String clusterId, SocketAddress localAddr, - MetricsConnection metrics) { + MetricsConnection metrics) { super(conf, clusterId, localAddr, metrics); this.socketFactory = NetUtils.getDefaultSocketFactory(conf); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java index eb8e1d92b21..c8adc6a8cc3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -151,7 +151,7 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { public void sendCall(final Call call) throws IOException { if (callsToWrite.size() >= maxQueueSize) { throw new IOException("Can't add " + call.toShortString() - + " to the write queue. callsToWrite.size()=" + callsToWrite.size()); + + " to the write queue. callsToWrite.size()=" + callsToWrite.size()); } callsToWrite.offer(call); BlockingRpcConnection.this.notifyAll(); @@ -163,8 +163,8 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { // it means as well that we don't know how many calls we cancelled. calls.remove(call.id); call.setException(new CallCancelledException(call.toShortString() + ", waitTime=" - + (EnvironmentEdgeManager.currentTime() - call.getStartTime()) + ", rpcTimeout=" - + call.timeout)); + + (EnvironmentEdgeManager.currentTime() - call.getStartTime()) + ", rpcTimeout=" + + call.timeout)); } /** @@ -206,8 +206,8 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { * Cleans the call not yet sent when we finish. */ public void cleanup(IOException e) { - IOException ie = new ConnectionClosingException( - "Connection to " + remoteId.getAddress() + " is closing."); + IOException ie = + new ConnectionClosingException("Connection to " + remoteId.getAddress() + " is closing."); for (Call call : callsToWrite) { call.setException(ie); } @@ -217,8 +217,8 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { BlockingRpcConnection(BlockingRpcClient rpcClient, ConnectionId remoteId) throws IOException { super(rpcClient.conf, AbstractRpcClient.WHEEL_TIMER, remoteId, rpcClient.clusterId, - rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, rpcClient.compressor, - rpcClient.metrics); + rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, rpcClient.compressor, + rpcClient.metrics); this.rpcClient = rpcClient; this.connectionHeaderPreamble = getConnectionHeaderPreamble(); ConnectionHeader header = getConnectionHeader(); @@ -231,8 +231,8 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { UserGroupInformation ticket = remoteId.ticket.getUGI(); this.threadName = "BRPC Connection (" + this.rpcClient.socketFactory.hashCode() + ") to " - + remoteId.getAddress().toString() - + ((ticket == null) ? " from an unknown user" : (" from " + ticket.getUserName())); + + remoteId.getAddress().toString() + + ((ticket == null) ? " from an unknown user" : (" from " + ticket.getUserName())); if (this.rpcClient.conf.getBoolean(BlockingRpcClient.SPECIFIC_WRITE_THREAD, false)) { callSender = new CallSender(threadName, this.rpcClient.conf); @@ -263,14 +263,14 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { * The max number of retries is 45, which amounts to 20s*45 = 15 minutes retries. */ if (LOG.isDebugEnabled()) { - LOG.debug("Received exception in connection setup.\n" + - StringUtils.stringifyException(toe)); + LOG.debug( + "Received exception in connection setup.\n" + StringUtils.stringifyException(toe)); } handleConnectionFailure(timeoutFailures++, this.rpcClient.maxRetries, toe); } catch (IOException ie) { if (LOG.isDebugEnabled()) { - LOG.debug("Received exception in connection setup.\n" + - StringUtils.stringifyException(ie)); + LOG.debug( + "Received exception in connection setup.\n" + StringUtils.stringifyException(ie)); } handleConnectionFailure(ioFailures++, this.rpcClient.maxRetries, ie); } @@ -284,11 +284,11 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { * the sleep is synchronized; the locks will be retained. * @param curRetries current number of retries * @param maxRetries max number of retries allowed - * @param ioe failure reason + * @param ioe failure reason * @throws IOException if max number of retries is reached */ private void handleConnectionFailure(int curRetries, int maxRetries, IOException ioe) - throws IOException { + throws IOException { closeSocket(); // throw the exception if the maximum number of retries is reached @@ -304,9 +304,8 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { } if (LOG.isInfoEnabled()) { - LOG.info("Retrying connect to server: " + remoteId.getAddress() + - " after sleeping " + this.rpcClient.failureSleep + "ms. Already tried " + curRetries + - " time(s)."); + LOG.info("Retrying connect to server: " + remoteId.getAddress() + " after sleeping " + + this.rpcClient.failureSleep + "ms. Already tried " + curRetries + " time(s)."); } } @@ -359,15 +358,15 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { } private boolean setupSaslConnection(final InputStream in2, final OutputStream out2) - throws IOException { + throws IOException { if (this.metrics != null) { this.metrics.incrNsLookups(); } saslRpcClient = new HBaseSaslRpcClient(this.rpcClient.conf, provider, token, - socket.getInetAddress(), securityInfo, this.rpcClient.fallbackAllowed, - this.rpcClient.conf.get("hbase.rpc.protection", - QualityOfProtection.AUTHENTICATION.name().toLowerCase(Locale.ROOT)), - this.rpcClient.conf.getBoolean(CRYPTO_AES_ENABLED_KEY, CRYPTO_AES_ENABLED_DEFAULT)); + socket.getInetAddress(), securityInfo, this.rpcClient.fallbackAllowed, + this.rpcClient.conf.get("hbase.rpc.protection", + QualityOfProtection.AUTHENTICATION.name().toLowerCase(Locale.ROOT)), + this.rpcClient.conf.getBoolean(CRYPTO_AES_ENABLED_KEY, CRYPTO_AES_ENABLED_DEFAULT)); return saslRpcClient.saslConnect(in2, out2); } @@ -378,15 +377,14 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { * connection again. The other problem is to do with ticket expiry. To handle that, a relogin is * attempted. *

      - * The retry logic is governed by the {@link SaslClientAuthenticationProvider#canRetry()} - * method. Some providers have the ability to obtain new credentials and then re-attempt to - * authenticate with HBase services. Other providers will continue to fail if they failed the - * first time -- for those, we want to fail-fast. + * The retry logic is governed by the {@link SaslClientAuthenticationProvider#canRetry()} method. + * Some providers have the ability to obtain new credentials and then re-attempt to authenticate + * with HBase services. Other providers will continue to fail if they failed the first time -- for + * those, we want to fail-fast. *

      */ private void handleSaslConnectionFailure(final int currRetries, final int maxRetries, - final Exception ex, final UserGroupInformation user) - throws IOException, InterruptedException { + final Exception ex, final UserGroupInformation user) throws IOException, InterruptedException { closeSocket(); user.doAs(new PrivilegedExceptionAction() { @Override @@ -400,7 +398,7 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { } if (ex instanceof SaslException) { String msg = "SASL authentication failed." - + " The most likely cause is missing or invalid credentials."; + + " The most likely cause is missing or invalid credentials."; throw new RuntimeException(msg, ex); } throw new IOException(ex); @@ -424,9 +422,9 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { Thread.sleep(ThreadLocalRandom.current().nextInt(reloginMaxBackoff) + 1); return null; } else { - String msg = "Failed to initiate connection for " - + UserGroupInformation.getLoginUser().getUserName() + " to " - + securityInfo.getServerPrincipal(); + String msg = + "Failed to initiate connection for " + UserGroupInformation.getLoginUser().getUserName() + + " to " + securityInfo.getServerPrincipal(); throw new IOException(msg, ex); } } @@ -442,10 +440,10 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { if (this.rpcClient.failedServers.isFailedServer(remoteId.getAddress())) { if (LOG.isDebugEnabled()) { LOG.debug("Not trying to connect to " + remoteId.getAddress() - + " this server is in the failed servers list"); + + " this server is in the failed servers list"); } throw new FailedServerException( - "This server is in the failed servers list: " + remoteId.getAddress()); + "This server is in the failed servers list: " + remoteId.getAddress()); } try { @@ -539,10 +537,10 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { boolean isCryptoAesEnable = false; // check if Crypto AES is enabled if (saslRpcClient != null) { - boolean saslEncryptionEnabled = SaslUtil.QualityOfProtection.PRIVACY. - getSaslQop().equalsIgnoreCase(saslRpcClient.getSaslQOP()); - isCryptoAesEnable = saslEncryptionEnabled && conf.getBoolean( - CRYPTO_AES_ENABLED_KEY, CRYPTO_AES_ENABLED_DEFAULT); + boolean saslEncryptionEnabled = SaslUtil.QualityOfProtection.PRIVACY.getSaslQop() + .equalsIgnoreCase(saslRpcClient.getSaslQOP()); + isCryptoAesEnable = saslEncryptionEnabled + && conf.getBoolean(CRYPTO_AES_ENABLED_KEY, CRYPTO_AES_ENABLED_DEFAULT); } // if Crypto AES is enabled, set transformation and negotiate with server @@ -566,7 +564,7 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { } RPCProtos.ConnectionHeaderResponse connectionHeaderResponse = - RPCProtos.ConnectionHeaderResponse.parseFrom(buff); + RPCProtos.ConnectionHeaderResponse.parseFrom(buff); // Get the CryptoCipherMeta, update the HBaseSaslRpcClient for Crypto Cipher if (connectionHeaderResponse.hasCryptoCipherMeta()) { @@ -574,16 +572,17 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { } waitingConnectionHeaderResponse = false; } catch (SocketTimeoutException ste) { - LOG.error(HBaseMarkers.FATAL, "Can't get the connection header response for rpc timeout, " + LOG.error(HBaseMarkers.FATAL, + "Can't get the connection header response for rpc timeout, " + "please check if server has the correct configuration to support the additional " - + "function.", ste); + + "function.", + ste); // timeout when waiting the connection header response, ignore the additional function throw new IOException("Timeout while waiting connection header response", ste); } } - private void negotiateCryptoAes(RPCProtos.CryptoCipherMeta cryptoCipherMeta) - throws IOException { + private void negotiateCryptoAes(RPCProtos.CryptoCipherMeta cryptoCipherMeta) throws IOException { // initialize the Crypto AES with CryptoCipherMeta saslRpcClient.initCryptoCipher(cryptoCipherMeta, this.rpcClient.conf); // reset the inputStream/outputStream for Crypto AES encryption @@ -600,7 +599,7 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { ByteBuf cellBlock = null; try { cellBlock = this.rpcClient.cellBlockBuilder.buildCellBlock(this.codec, this.compressor, - call.cells, PooledByteBufAllocator.DEFAULT); + call.cells, PooledByteBufAllocator.DEFAULT); CellBlockMeta cellBlockMeta; if (cellBlock != null) { cellBlockMeta = CellBlockMeta.newBuilder().setLength(cellBlock.readableBytes()).build(); @@ -624,7 +623,7 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { try { call.callStats.setRequestSizeBytes(write(this.out, requestHeader, call.param, cellBlock)); } catch (Throwable t) { - if(LOG.isTraceEnabled()) { + if (LOG.isTraceEnabled()) { LOG.trace("Error while writing {}", call.toShortString()); } IOException e = IPCUtil.toIOE(t); @@ -667,7 +666,7 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { if (call != null) { call.callStats.setResponseSizeBytes(totalSize); call.callStats - .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); + .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); } return; } @@ -677,7 +676,7 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { call.setException(re); call.callStats.setResponseSizeBytes(totalSize); call.callStats - .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); + .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); if (isFatalConnectionException(exceptionResponse)) { synchronized (this) { closeConn(re); @@ -701,7 +700,7 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { call.setResponse(value, cellBlockScanner); call.callStats.setResponseSizeBytes(totalSize); call.callStats - .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); + .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); } } catch (IOException e) { if (expectedCall) { @@ -772,7 +771,7 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { @Override public synchronized void sendRequest(final Call call, HBaseRpcController pcrc) - throws IOException { + throws IOException { pcrc.notifyOnCancel(new RpcCallback() { @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.java index 137e60b7460..3dc48ce3e00 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,16 +17,15 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise; - import java.io.IOException; import java.util.HashMap; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise; + /** * We will expose the connection to upper layer before initialized, so we need to buffer the calls * passed in and write them out once the connection is established. @@ -35,7 +34,8 @@ import org.apache.yetus.audience.InterfaceAudience; class BufferCallBeforeInitHandler extends ChannelDuplexHandler { private enum BufferCallAction { - FLUSH, FAIL + FLUSH, + FAIL } public static final class BufferCallEvent { @@ -45,7 +45,7 @@ class BufferCallBeforeInitHandler extends ChannelDuplexHandler { public final IOException error; private BufferCallEvent(BufferCallBeforeInitHandler.BufferCallAction action, - IOException error) { + IOException error) { this.action = action; this.error = error; } @@ -59,8 +59,8 @@ class BufferCallBeforeInitHandler extends ChannelDuplexHandler { } } - private static final BufferCallEvent SUCCESS_EVENT = new BufferCallEvent(BufferCallAction.FLUSH, - null); + private static final BufferCallEvent SUCCESS_EVENT = + new BufferCallEvent(BufferCallAction.FLUSH, null); private final Map id2Call = new HashMap<>(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java index 8d23d924339..3c0e24e5714 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ class Call { // The return type. Used to create shell into which we deserialize the response if any. Message responseDefaultType; @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IS2_INCONSISTENT_SYNC", - justification = "Direct access is only allowed after done") + justification = "Direct access is only allowed after done") IOException error; // exception, null if value private boolean done; // true when call is done final Descriptors.MethodDescriptor md; @@ -61,9 +61,9 @@ class Call { final Span span; Timeout timeoutTask; - Call(int id, final Descriptors.MethodDescriptor md, Message param, - final CellScanner cells, final Message responseDefaultType, int timeout, int priority, - RpcCallback callback, MetricsConnection.CallStats callStats) { + Call(int id, final Descriptors.MethodDescriptor md, Message param, final CellScanner cells, + final Message responseDefaultType, int timeout, int priority, RpcCallback callback, + MetricsConnection.CallStats callStats) { this.param = param; this.md = md; this.cells = cells; @@ -81,20 +81,15 @@ class Call { * Builds a simplified {@link #toString()} that includes just the id and method name. */ public String toShortString() { - return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) - .append("id", id) - .append("methodName", md.getName()) - .toString(); + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).append("id", id) + .append("methodName", md.getName()).toString(); } @Override public String toString() { // Call[id=32153218,methodName=Get] - return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) - .appendSuper(toShortString()) - .append("param", Optional.ofNullable(param) - .map(ProtobufUtil::getShortTextFormat) - .orElse("")) + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).appendSuper(toShortString()) + .append("param", Optional.ofNullable(param).map(ProtobufUtil::getShortTextFormat).orElse("")) .toString(); } @@ -137,7 +132,7 @@ class Call { /** * Set the return value when there is no error. Notify the caller the call is done. * @param response return value of the call. - * @param cells Can be null + * @param cells Can be null */ public void setResponse(Message response, final CellScanner cells) { synchronized (this) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallCancelledException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallCancelledException.java index 695c031b142..d710f9d6553 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallCancelledException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallCancelledException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallEvent.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallEvent.java index 1ee04b5801f..6bf1fcdff19 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallEvent.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallEvent.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,8 @@ import org.apache.yetus.audience.InterfaceAudience; class CallEvent { public enum Type { - TIMEOUT, CANCELLED + TIMEOUT, + CANCELLED } final Type type; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallTimeoutException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallTimeoutException.java index d4105a09fa3..6dfef9c5715 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallTimeoutException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallTimeoutException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,9 +33,8 @@ public class CallTimeoutException extends HBaseIOException { /** * CallTimeoutException with cause - * * @param message the message for this exception - * @param cause the cause for this exception + * @param cause the cause for this exception */ public CallTimeoutException(final String message, final Throwable cause) { super(message, cause); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallerDisconnectedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallerDisconnectedException.java index 2cfde8930f6..feb76d7245a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallerDisconnectedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallerDisconnectedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,17 +18,16 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * Exception indicating that the remote host making this IPC lost its - * IPC connection. This will never be returned back to a client, - * but is only used for logging on the server side, etc. + * Exception indicating that the remote host making this IPC lost its IPC connection. This will + * never be returned back to a client, but is only used for logging on the server side, etc. */ @InterfaceAudience.Public public class CallerDisconnectedException extends IOException { private static final long serialVersionUID = 1L; + public CallerDisconnectedException(String msg) { super(msg); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java index 111f7684224..9e9c0688ece 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,27 +17,19 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; -import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocator; -import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream; - import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.BufferOverflowException; import java.nio.ByteBuffer; - import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.io.ByteBuffAllocator; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.codec.Codec; +import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.ByteBuffInputStream; import org.apache.hadoop.hbase.io.ByteBufferInputStream; import org.apache.hadoop.hbase.io.ByteBufferListOutputStream; @@ -50,6 +42,13 @@ import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.CompressionInputStream; import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocator; +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream; /** * Helper class for building cell block. @@ -71,13 +70,13 @@ class CellBlockBuilder { public CellBlockBuilder(Configuration conf) { this.conf = conf; - this.cellBlockDecompressionMultiplier = conf - .getInt("hbase.ipc.cellblock.decompression.buffersize.multiplier", 3); + this.cellBlockDecompressionMultiplier = + conf.getInt("hbase.ipc.cellblock.decompression.buffersize.multiplier", 3); // Guess that 16k is a good size for rpc buffer. Could go bigger. See the TODO below in // #buildCellBlock. - this.cellBlockBuildingInitialBufferSize = ClassSize - .align(conf.getInt("hbase.ipc.cellblock.building.initial.buffersize", 16 * 1024)); + this.cellBlockBuildingInitialBufferSize = + ClassSize.align(conf.getInt("hbase.ipc.cellblock.building.initial.buffersize", 16 * 1024)); } private interface OutputStreamSupplier { @@ -105,17 +104,12 @@ class CellBlockBuilder { /** * Puts CellScanner Cells into a cell block using passed in codec and/or - * compressor. - * @param codec - * @param compressor - * @param cellScanner - * @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using - * passed in codec and/or compressor; the returned buffer has - * been flipped and is ready for reading. Use limit to find total size. - * @throws IOException + * compressor. nnn * @return Null or byte buffer filled with a cellblock filled with + * passed-in Cells encoded using passed in codec and/or compressor; the + * returned buffer has been flipped and is ready for reading. Use limit to find total size. n */ public ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor, - final CellScanner cellScanner) throws IOException { + final CellScanner cellScanner) throws IOException { ByteBufferOutputStreamSupplier supplier = new ByteBufferOutputStreamSupplier(); if (buildCellBlock(codec, compressor, cellScanner, supplier)) { ByteBuffer bb = supplier.baos.getByteBuffer(); @@ -150,7 +144,7 @@ class CellBlockBuilder { } public ByteBuf buildCellBlock(Codec codec, CompressionCodec compressor, CellScanner cellScanner, - ByteBufAllocator alloc) throws IOException { + ByteBufAllocator alloc) throws IOException { ByteBufOutputStreamSupplier supplier = new ByteBufOutputStreamSupplier(alloc); if (buildCellBlock(codec, compressor, cellScanner, supplier)) { return supplier.buf; @@ -160,7 +154,7 @@ class CellBlockBuilder { } private boolean buildCellBlock(final Codec codec, final CompressionCodec compressor, - final CellScanner cellScanner, OutputStreamSupplier supplier) throws IOException { + final CellScanner cellScanner, OutputStreamSupplier supplier) throws IOException { if (cellScanner == null) { return false; } @@ -171,13 +165,13 @@ class CellBlockBuilder { encodeCellsTo(supplier.get(bufferSize), cellScanner, codec, compressor); if (LOG.isTraceEnabled() && bufferSize < supplier.size()) { LOG.trace("Buffer grew from initial bufferSize=" + bufferSize + " to " + supplier.size() - + "; up hbase.ipc.cellblock.building.initial.buffersize?"); + + "; up hbase.ipc.cellblock.building.initial.buffersize?"); } return true; } private void encodeCellsTo(OutputStream os, CellScanner cellScanner, Codec codec, - CompressionCodec compressor) throws IOException { + CompressionCodec compressor) throws IOException { Compressor poolCompressor = null; try { if (compressor != null) { @@ -205,10 +199,10 @@ class CellBlockBuilder { /** * Puts CellScanner Cells into a cell block using passed in codec and/or * compressor. - * @param codec to use for encoding - * @param compressor to use for encoding + * @param codec to use for encoding + * @param compressor to use for encoding * @param cellScanner to encode - * @param allocator to allocate the {@link ByteBuff}. + * @param allocator to allocate the {@link ByteBuff}. * @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using * passed in codec and/or compressor; the returned buffer has * been flipped and is ready for reading. Use limit to find total size. If @@ -217,7 +211,7 @@ class CellBlockBuilder { * @throws IOException if encoding the cells fail */ public ByteBufferListOutputStream buildCellBlockStream(Codec codec, CompressionCodec compressor, - CellScanner cellScanner, ByteBuffAllocator allocator) throws IOException { + CellScanner cellScanner, ByteBuffAllocator allocator) throws IOException { if (cellScanner == null) { return null; } @@ -234,13 +228,13 @@ class CellBlockBuilder { } /** - * @param codec to use for cellblock + * @param codec to use for cellblock * @param cellBlock to encode * @return CellScanner to work against the content of cellBlock * @throws IOException if encoding fails */ public CellScanner createCellScanner(final Codec codec, final CompressionCodec compressor, - final byte[] cellBlock) throws IOException { + final byte[] cellBlock) throws IOException { // Use this method from Client side to create the CellScanner if (compressor != null) { ByteBuffer cellBlockBuf = decompress(compressor, cellBlock); @@ -254,15 +248,15 @@ class CellBlockBuilder { } /** - * @param codec to use for cellblock + * @param codec to use for cellblock * @param cellBlock ByteBuffer containing the cells written by the Codec. The buffer should be - * position()'ed at the start of the cell block and limit()'ed at the end. + * position()'ed at the start of the cell block and limit()'ed at the end. * @return CellScanner to work against the content of cellBlock. All cells created * out of the CellScanner will share the same ByteBuffer being passed. * @throws IOException if cell encoding fails */ public CellScanner createCellScannerReusingBuffers(final Codec codec, - final CompressionCodec compressor, ByteBuff cellBlock) throws IOException { + final CompressionCodec compressor, ByteBuff cellBlock) throws IOException { // Use this method from HRS to create the CellScanner // If compressed, decompress it first before passing it on else we will leak compression // resources if the stream is not closed properly after we let it out. @@ -273,21 +267,21 @@ class CellBlockBuilder { } private ByteBuffer decompress(CompressionCodec compressor, byte[] compressedCellBlock) - throws IOException { + throws IOException { ByteBuffer cellBlock = decompress(compressor, new ByteArrayInputStream(compressedCellBlock), - compressedCellBlock.length * this.cellBlockDecompressionMultiplier); + compressedCellBlock.length * this.cellBlockDecompressionMultiplier); return cellBlock; } private ByteBuff decompress(CompressionCodec compressor, ByteBuff compressedCellBlock) - throws IOException { + throws IOException { ByteBuffer cellBlock = decompress(compressor, new ByteBuffInputStream(compressedCellBlock), - compressedCellBlock.remaining() * this.cellBlockDecompressionMultiplier); + compressedCellBlock.remaining() * this.cellBlockDecompressionMultiplier); return new SingleByteBuff(cellBlock); } private ByteBuffer decompress(CompressionCodec compressor, InputStream cellBlockStream, - int osInitialSize) throws IOException { + int osInitialSize) throws IOException { // GZIPCodec fails w/ NPE if no configuration. if (compressor instanceof Configurable) { ((Configurable) compressor).setConf(this.conf); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellScannerButNoCodecException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellScannerButNoCodecException.java index c08272474de..a9e9faf73fd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellScannerButNoCodecException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellScannerButNoCodecException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java index cac9ff27382..6cb9cddd9fe 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java @@ -18,14 +18,13 @@ package org.apache.hadoop.hbase.ipc; import java.util.Objects; - import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; /** - * This class holds the address and the user ticket, etc. The client connections - * to servers are uniquely identified by <remoteAddress, ticket, serviceName> + * This class holds the address and the user ticket, etc. The client connections to servers are + * uniquely identified by <remoteAddress, ticket, serviceName> */ @InterfaceAudience.Private class ConnectionId { @@ -61,21 +60,20 @@ class ConnectionId { public boolean equals(Object obj) { if (obj instanceof ConnectionId) { ConnectionId id = (ConnectionId) obj; - return address.equals(id.address) && - ((ticket != null && ticket.equals(id.ticket)) || - (ticket == id.ticket)) && Objects.equals(this.serviceName, id.serviceName); + return address.equals(id.address) + && ((ticket != null && ticket.equals(id.ticket)) || (ticket == id.ticket)) + && Objects.equals(this.serviceName, id.serviceName); } return false; } - @Override // simply use the default Object#hashcode() ? + @Override // simply use the default Object#hashcode() ? public int hashCode() { - return hashCode(ticket,serviceName,address); + return hashCode(ticket, serviceName, address); } public static int hashCode(User ticket, String serviceName, Address address) { - return (address.hashCode() + - PRIME * (PRIME * serviceName.hashCode() ^ - (ticket == null ? 0 : ticket.hashCode()))); + return (address.hashCode() + + PRIME * (PRIME * serviceName.hashCode() ^ (ticket == null ? 0 : ticket.hashCode()))); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java index 6ae7027894b..54b1c48e0f9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,12 +22,12 @@ import com.google.protobuf.RpcChannel; import org.apache.yetus.audience.InterfaceAudience; /** - * Base interface which provides clients with an RPC connection to - * call coprocessor endpoint {@link com.google.protobuf.Service}s. - * Note that clients should not use this class directly, except through - * {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}. + * Base interface which provides clients with an RPC connection to call coprocessor endpoint + * {@link com.google.protobuf.Service}s. Note that clients should not use this class directly, + * except through {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}. */ @InterfaceAudience.Public -public interface CoprocessorRpcChannel extends RpcChannel, BlockingRpcChannel {} +public interface CoprocessorRpcChannel extends RpcChannel, BlockingRpcChannel { +} // This Interface is part of our public, client-facing API!!! // This belongs in client package but it is exposed in our public API so we cannot relocate. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcUtils.java index cfee1873cd6..bb443db7f66 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcUtils.java @@ -14,37 +14,34 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ - package org.apache.hadoop.hbase.ipc; import static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; -import java.io.IOException; -import java.io.InterruptedIOException; -import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -import org.apache.hadoop.util.StringUtils; - -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; import com.google.protobuf.Descriptors; import com.google.protobuf.Descriptors.MethodDescriptor; import com.google.protobuf.Descriptors.ServiceDescriptor; import com.google.protobuf.Message; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; import com.google.protobuf.Service; - import edu.umd.cs.findbugs.annotations.Nullable; +import java.io.IOException; +import java.io.InterruptedIOException; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; /** * Utilities for handling coprocessor rpc service calls. @@ -53,14 +50,14 @@ import edu.umd.cs.findbugs.annotations.Nullable; public final class CoprocessorRpcUtils { private static final Logger LOG = LoggerFactory.getLogger(CoprocessorRpcUtils.class); /** - * We assume that all HBase protobuf services share a common package name - * (defined in the .proto files). + * We assume that all HBase protobuf services share a common package name (defined in the .proto + * files). */ private static final String hbaseServicePackage; static { Descriptors.ServiceDescriptor clientService = ClientProtos.ClientService.getDescriptor(); - hbaseServicePackage = clientService.getFullName() - .substring(0, clientService.getFullName().lastIndexOf(clientService.getName())); + hbaseServicePackage = clientService.getFullName().substring(0, + clientService.getFullName().lastIndexOf(clientService.getName())); } private CoprocessorRpcUtils() { @@ -68,10 +65,10 @@ public final class CoprocessorRpcUtils { } /** - * Returns the name to use for coprocessor service calls. For core HBase services - * (in the hbase.pb protobuf package), this returns the unqualified name in order to provide - * backward compatibility across the package name change. For all other services, - * the fully-qualified service name is used. + * Returns the name to use for coprocessor service calls. For core HBase services (in the hbase.pb + * protobuf package), this returns the unqualified name in order to provide backward compatibility + * across the package name change. For all other services, the fully-qualified service name is + * used. */ public static String getServiceName(Descriptors.ServiceDescriptor service) { if (service.getFullName().startsWith(hbaseServicePackage)) { @@ -80,60 +77,55 @@ public final class CoprocessorRpcUtils { return service.getFullName(); } - public static CoprocessorServiceRequest getCoprocessorServiceRequest( - final Descriptors.MethodDescriptor method, final Message request) { + public static CoprocessorServiceRequest + getCoprocessorServiceRequest(final Descriptors.MethodDescriptor method, final Message request) { return getCoprocessorServiceRequest(method, request, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY); + HConstants.EMPTY_BYTE_ARRAY); } public static CoprocessorServiceRequest getCoprocessorServiceRequest( - final Descriptors.MethodDescriptor method, final Message request, final byte [] row, - final byte [] regionName) { - return CoprocessorServiceRequest.newBuilder().setCall( - getCoprocessorServiceCall(method, request, row)). - setRegion(RequestConverter.buildRegionSpecifier(REGION_NAME, regionName)).build(); + final Descriptors.MethodDescriptor method, final Message request, final byte[] row, + final byte[] regionName) { + return CoprocessorServiceRequest.newBuilder() + .setCall(getCoprocessorServiceCall(method, request, row)) + .setRegion(RequestConverter.buildRegionSpecifier(REGION_NAME, regionName)).build(); } private static CoprocessorServiceCall getCoprocessorServiceCall( - final Descriptors.MethodDescriptor method, final Message request, final byte [] row) { + final Descriptors.MethodDescriptor method, final Message request, final byte[] row) { return CoprocessorServiceCall.newBuilder() - .setRow(org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations.unsafeWrap(row)) - .setServiceName(CoprocessorRpcUtils.getServiceName(method.getService())) - .setMethodName(method.getName()) - // TODO!!!!! Come back here after!!!!! This is a double copy of the request if I read - // it right copying from non-shaded to shaded version!!!!!! FIXXXXX!!!!! - .setRequest(org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations. - unsafeWrap(request.toByteArray())).build(); + .setRow(org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations.unsafeWrap(row)) + .setServiceName(CoprocessorRpcUtils.getServiceName(method.getService())) + .setMethodName(method.getName()) + // TODO!!!!! Come back here after!!!!! This is a double copy of the request if I read + // it right copying from non-shaded to shaded version!!!!!! FIXXXXX!!!!! + .setRequest(org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations + .unsafeWrap(request.toByteArray())) + .build(); } public static MethodDescriptor getMethodDescriptor(final String methodName, - final ServiceDescriptor serviceDesc) - throws UnknownProtocolException { + final ServiceDescriptor serviceDesc) throws UnknownProtocolException { Descriptors.MethodDescriptor methodDesc = serviceDesc.findMethodByName(methodName); if (methodDesc == null) { - throw new UnknownProtocolException("Unknown method " + methodName + " called on service " + - serviceDesc.getFullName()); + throw new UnknownProtocolException( + "Unknown method " + methodName + " called on service " + serviceDesc.getFullName()); } return methodDesc; } - public static Message getRequest(Service service, - Descriptors.MethodDescriptor methodDesc, - org.apache.hbase.thirdparty.com.google.protobuf.ByteString shadedRequest) - throws IOException { - Message.Builder builderForType = - service.getRequestPrototype(methodDesc).newBuilderForType(); + public static Message getRequest(Service service, Descriptors.MethodDescriptor methodDesc, + org.apache.hbase.thirdparty.com.google.protobuf.ByteString shadedRequest) throws IOException { + Message.Builder builderForType = service.getRequestPrototype(methodDesc).newBuilderForType(); org.apache.hadoop.hbase.protobuf.ProtobufUtil.mergeFrom(builderForType, - // TODO: COPY FROM SHADED TO NON_SHADED. DO I HAVE TOO? - shadedRequest.toByteArray()); + // TODO: COPY FROM SHADED TO NON_SHADED. DO I HAVE TOO? + shadedRequest.toByteArray()); return builderForType.build(); } public static Message getResponse( - org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse - result, - com.google.protobuf.Message responsePrototype) - throws IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse result, + com.google.protobuf.Message responsePrototype) throws IOException { Message response; if (result.getValue().hasValue()) { Message.Builder builder = responsePrototype.newBuilderForType(); @@ -148,26 +140,25 @@ public final class CoprocessorRpcUtils { return response; } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos. - CoprocessorServiceResponse getResponse(final Message result, final byte [] regionName) { - org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos. - CoprocessorServiceResponse.Builder builder = - org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse. - newBuilder(); - builder.setRegion(RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, - regionName)); + public static + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse + getResponse(final Message result, final byte[] regionName) { + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse.Builder builder = + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse + .newBuilder(); + builder.setRegion( + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)); // TODO: UGLY COPY IN HERE!!!! - builder.setValue(builder.getValueBuilder().setName(result.getClass().getName()) - .setValue(org.apache.hbase.thirdparty.com.google.protobuf.ByteString. - copyFrom(result.toByteArray()))); + builder.setValue(builder.getValueBuilder().setName(result.getClass().getName()).setValue( + org.apache.hbase.thirdparty.com.google.protobuf.ByteString.copyFrom(result.toByteArray()))); return builder.build(); } /** - * Simple {@link RpcCallback} implementation providing a - * {@link java.util.concurrent.Future}-like {@link BlockingRpcCallback#get()} method, which - * will block util the instance's {@link BlockingRpcCallback#run(Object)} method has been called. - * {@code R} is the RPC response type that will be passed to the {@link #run(Object)} method. + * Simple {@link RpcCallback} implementation providing a {@link java.util.concurrent.Future}-like + * {@link BlockingRpcCallback#get()} method, which will block util the instance's + * {@link BlockingRpcCallback#run(Object)} method has been called. {@code R} is the RPC response + * type that will be passed to the {@link #run(Object)} method. */ @InterfaceAudience.Private // Copy of BlockingRpcCallback but deriving from RpcCallback non-shaded. @@ -191,7 +182,7 @@ public final class CoprocessorRpcUtils { /** * Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was - * passed. When used asynchronously, this method will block until the {@link #run(Object)} + * passed. When used asynchronously, this method will block until the {@link #run(Object)} * method has been called. * @return the response object or {@code null} if no response was passed */ @@ -210,17 +201,17 @@ public final class CoprocessorRpcUtils { } /** - * Stores an exception encountered during RPC invocation so it can be passed back - * through to the client. + * Stores an exception encountered during RPC invocation so it can be passed back through to the + * client. * @param controller the controller instance provided by the client when calling the service - * @param ioe the exception encountered + * @param ioe the exception encountered */ public static void setControllerException(RpcController controller, IOException ioe) { if (controller == null) { return; } if (controller instanceof org.apache.hadoop.hbase.ipc.ServerRpcController) { - ((ServerRpcController)controller).setFailedOn(ioe); + ((ServerRpcController) controller).setFailedOn(ioe); } else { controller.setFailed(StringUtils.stringifyException(ioe)); } @@ -230,7 +221,7 @@ public final class CoprocessorRpcUtils { * Retreivies exception stored during RPC invocation. * @param controller the controller instance provided by the client when calling the service * @return exception if any, or null; Will return DoNotRetryIOException for string represented - * failure causes in controller. + * failure causes in controller. */ @Nullable public static IOException getControllerException(RpcController controller) throws IOException { @@ -238,7 +229,7 @@ public final class CoprocessorRpcUtils { return null; } if (controller instanceof ServerRpcController) { - return ((ServerRpcController)controller).getFailedOn(); + return ((ServerRpcController) controller).getFailedOn(); } return new DoNotRetryIOException(controller.errorText()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java index d68d955a984..9bee88d599f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,14 +17,13 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; - import java.io.IOException; - import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; + /** * Simple delegating controller for use with the {@link RpcControllerFactory} to help override * standard behavior of a {@link HBaseRpcController}. Used testing. @@ -130,7 +129,7 @@ public class DelegatingHBaseRpcController implements HBaseRpcController { @Override public void notifyOnCancel(RpcCallback callback, CancellationCallback action) - throws IOException { + throws IOException { delegate.notifyOnCancel(callback, action); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServerException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServerException.java index 46f57d51804..e14ed782d41 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServerException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServerException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +21,9 @@ import org.apache.hadoop.hbase.HBaseIOException; import org.apache.yetus.audience.InterfaceAudience; /** - * Indicates that we're trying to connect to a already known as dead server. We will want to - * retry: we're getting this because the region location was wrong, or because - * the server just died, in which case the retry loop will help us to wait for the - * regions to recover. + * Indicates that we're trying to connect to a already known as dead server. We will want to retry: + * we're getting this because the region location was wrong, or because the server just died, in + * which case the retry loop will help us to wait for the regions to recover. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServers.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServers.java index 1a8bc0129ea..0a8da3c2015 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServers.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,13 +19,12 @@ package org.apache.hadoop.hbase.ipc; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.net.Address; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * A class to manage a list of servers that failed recently. @@ -38,8 +37,8 @@ public class FailedServers { private static final Logger LOG = LoggerFactory.getLogger(FailedServers.class); public FailedServers(Configuration conf) { - this.recheckServersTimeout = conf.getInt( - RpcClient.FAILED_SERVER_EXPIRY_KEY, RpcClient.FAILED_SERVER_EXPIRY_DEFAULT); + this.recheckServersTimeout = + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, RpcClient.FAILED_SERVER_EXPIRY_DEFAULT); } /** @@ -50,15 +49,13 @@ public class FailedServers { this.failedServers.put(address, expiry); this.latestExpiry = expiry; if (LOG.isDebugEnabled()) { - LOG.debug( - "Added failed server with address " + address + " to list caused by " - + throwable.toString()); + LOG.debug("Added failed server with address " + address + " to list caused by " + + throwable.toString()); } } /** * Check if the server should be considered as bad. Clean the old entries of the list. - * * @return true if the server is in the failed servers list */ public synchronized boolean isFailedServer(final Address address) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FallbackDisallowedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FallbackDisallowedException.java index f2e7db039ad..21c4f3ada2e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FallbackDisallowedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FallbackDisallowedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,6 +31,6 @@ public class FallbackDisallowedException extends HBaseIOException { public FallbackDisallowedException() { super("Server asks us to fall back to SIMPLE auth, " - + "but this client is configured to only allow secure connections."); + + "but this client is configured to only allow secure connections."); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FatalConnectionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FatalConnectionException.java index 6e674268894..ed0e220a92c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FatalConnectionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FatalConnectionException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,9 +21,8 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when server finds fatal issue w/ connection setup: e.g. bad rpc version - * or unsupported auth method. - * Closes connection after throwing this exception with message on why the failure. + * Thrown when server finds fatal issue w/ connection setup: e.g. bad rpc version or unsupported + * auth method. Closes connection after throwing this exception with message on why the failure. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java index 4f953e93aa7..b33771e5b58 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java @@ -17,20 +17,19 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; - import java.io.IOException; - import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; + /** * Optionally carries Cells across the proxy/service interface down into ipc. On its way out it * optionally carries a set of result Cell data. We stick the Cells here when we want to avoid @@ -39,8 +38,8 @@ import org.apache.yetus.audience.InterfaceStability; * RegionInfo we're making the call against if relevant (useful adding info to exceptions and logs). * Used by client and server ipc'ing. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, - HBaseInterfaceAudience.REPLICATION}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, + HBaseInterfaceAudience.REPLICATION }) @InterfaceStability.Evolving public interface HBaseRpcController extends RpcController, CellScannable { @@ -52,7 +51,7 @@ public interface HBaseRpcController extends RpcController, CellScannable { /** * @param priority Priority for this request; should fall roughly in the range - * {@link HConstants#NORMAL_QOS} to {@link HConstants#HIGH_QOS} + * {@link HConstants#NORMAL_QOS} to {@link HConstants#HIGH_QOS} */ void setPriority(int priority); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java index 3058247db48..a4c51ae62ff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java @@ -17,20 +17,19 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; - import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; + /** * Get instances via {@link RpcControllerFactory} on client-side. * @see RpcControllerFactory @@ -156,7 +155,7 @@ public class HBaseRpcControllerImpl implements HBaseRpcController { @Override public int getCallTimeout() { - return callTimeout != null? callTimeout: 0; + return callTimeout != null ? callTimeout : 0; } @Override @@ -254,7 +253,7 @@ public class HBaseRpcControllerImpl implements HBaseRpcController { @Override public synchronized void notifyOnCancel(RpcCallback callback, CancellationCallback action) - throws IOException { + throws IOException { if (cancelled) { action.run(true); } else { @@ -263,7 +262,8 @@ public class HBaseRpcControllerImpl implements HBaseRpcController { } } - @Override public String toString() { + @Override + public String toString() { return "HBaseRpcControllerImpl{" + "callTimeout=" + callTimeout + ", done=" + done + ", cancelled=" + cancelled + ", cancellationCbs=" + cancellationCbs + ", exception=" + exception + ", regionInfo=" + regionInfo + ", priority=" + priority + ", cellScanner=" diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java index 2b71493e76c..57f8da98eff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,12 +40,14 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.channel.EventLoop; import org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocal; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; @@ -59,15 +61,15 @@ class IPCUtil { /** * Write out header, param, and cell block if there is one. - * @param dos Stream to write into - * @param header to write - * @param param to write + * @param dos Stream to write into + * @param header to write + * @param param to write * @param cellBlock to write * @return Total number of bytes written. * @throws IOException if write action fails */ public static int write(final OutputStream dos, final Message header, final Message param, - final ByteBuf cellBlock) throws IOException { + final ByteBuf cellBlock) throws IOException { // Must calculate total size and write that first so other side can read it all in in one // swoop. This is dictated by how the server is currently written. Server needs to change // if we are to be able to write without the length prefixing. @@ -79,7 +81,7 @@ class IPCUtil { } private static int write(final OutputStream dos, final Message header, final Message param, - final ByteBuf cellBlock, final int totalSize) throws IOException { + final ByteBuf cellBlock, final int totalSize) throws IOException { // I confirmed toBytes does same as DataOutputStream#writeInt. dos.write(Bytes.toBytes(totalSize)); // This allocates a buffer that is the size of the message internally. @@ -140,10 +142,10 @@ class IPCUtil { boolean doNotRetry = e.getDoNotRetry(); boolean serverOverloaded = e.hasServerOverloaded() && e.getServerOverloaded(); return e.hasHostname() ? - // If a hostname then add it to the RemoteWithExtrasException + // If a hostname then add it to the RemoteWithExtrasException new RemoteWithExtrasException(innerExceptionClassName, e.getStackTrace(), e.getHostname(), - e.getPort(), doNotRetry, serverOverloaded) : - new RemoteWithExtrasException(innerExceptionClassName, e.getStackTrace(), doNotRetry, + e.getPort(), doNotRetry, serverOverloaded) + : new RemoteWithExtrasException(innerExceptionClassName, e.getStackTrace(), doNotRetry, serverOverloaded); } @@ -163,8 +165,8 @@ class IPCUtil { } private static String getCallTarget(Address addr, RegionInfo regionInfo) { - return "address=" + addr + - (regionInfo != null? ", region=" + regionInfo.getRegionNameAsString(): ""); + return "address=" + addr + + (regionInfo != null ? ", region=" + regionInfo.getRegionNameAsString() : ""); } /** @@ -179,7 +181,7 @@ class IPCUtil { * deciding whether to retry. If it is not possible to create a new exception with the same type, * for example, the {@code error} is not an {@link IOException}, an {@link IOException} will be * created. - * @param addr target address + * @param addr target address * @param error the relevant exception * @return an exception to throw * @see ClientExceptionsUtil#isConnectionException(Throwable) @@ -187,14 +189,16 @@ class IPCUtil { static IOException wrapException(Address addr, RegionInfo regionInfo, Throwable error) { if (error instanceof ConnectException) { // connection refused; include the host:port in the error - return (IOException) new ConnectException("Call to " + getCallTarget(addr, regionInfo) + - " failed on connection exception: " + error).initCause(error); + return (IOException) new ConnectException( + "Call to " + getCallTarget(addr, regionInfo) + " failed on connection exception: " + error) + .initCause(error); } else if (error instanceof SocketTimeoutException) { return (IOException) new SocketTimeoutException( "Call to " + getCallTarget(addr, regionInfo) + " failed because " + error).initCause(error); } else if (error instanceof ConnectionClosingException) { - return new ConnectionClosingException("Call to " + getCallTarget(addr, regionInfo) + - " failed on local exception: " + error, error); + return new ConnectionClosingException( + "Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error, + error); } else if (error instanceof ServerTooBusyException) { // we already have address in the exception message return (IOException) error; @@ -203,51 +207,57 @@ class IPCUtil { try { return (IOException) error.getClass().asSubclass(DoNotRetryIOException.class) .getConstructor(String.class) - .newInstance("Call to " + getCallTarget(addr, regionInfo) + - " failed on local exception: " + error).initCause(error); + .newInstance( + "Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error) + .initCause(error); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException - | InvocationTargetException | NoSuchMethodException | SecurityException e) { + | InvocationTargetException | NoSuchMethodException | SecurityException e) { // just ignore, will just new a DoNotRetryIOException instead below } - return new DoNotRetryIOException("Call to " + getCallTarget(addr, regionInfo) + - " failed on local exception: " + error, error); + return new DoNotRetryIOException( + "Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error, + error); } else if (error instanceof ConnectionClosedException) { - return new ConnectionClosedException("Call to " + getCallTarget(addr, regionInfo) + - " failed on local exception: " + error, error); + return new ConnectionClosedException( + "Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error, + error); } else if (error instanceof CallTimeoutException) { - return new CallTimeoutException("Call to " + getCallTarget(addr, regionInfo) + - " failed on local exception: " + error, error); + return new CallTimeoutException( + "Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error, + error); } else if (error instanceof ClosedChannelException) { // ClosedChannelException does not have a constructor which takes a String but it is a // connection exception so we keep its original type return (IOException) error; } else if (error instanceof TimeoutException) { // TimeoutException is not an IOException, let's convert it to TimeoutIOException. - return new TimeoutIOException("Call to " + getCallTarget(addr, regionInfo) + - " failed on local exception: " + error, error); + return new TimeoutIOException( + "Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error, + error); } else { // try our best to keep the original exception type if (error instanceof IOException) { try { return (IOException) error.getClass().asSubclass(IOException.class) .getConstructor(String.class) - .newInstance("Call to " + getCallTarget(addr, regionInfo) + - " failed on local exception: " + error) + .newInstance( + "Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error) .initCause(error); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException - | InvocationTargetException | NoSuchMethodException | SecurityException e) { + | InvocationTargetException | NoSuchMethodException | SecurityException e) { // just ignore, will just new an IOException instead below } } - return new HBaseIOException("Call to " + getCallTarget(addr, regionInfo) + - " failed on local exception: " + error, error); + return new HBaseIOException( + "Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error, + error); } } static void setCancelled(Call call) { call.setException(new CallCancelledException(call.toShortString() + ", waitTime=" - + (EnvironmentEdgeManager.currentTime() - call.getStartTime()) + ", rpcTimeout=" - + call.timeout)); + + (EnvironmentEdgeManager.currentTime() - call.getStartTime()) + ", rpcTimeout=" + + call.timeout)); } private static final FastThreadLocal DEPTH = new FastThreadLocal() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java index 4c85e3d51ab..7b698958ede 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java @@ -46,17 +46,16 @@ public class NettyRpcClient extends AbstractRpcClient { private final boolean shutdownGroupWhenClose; public NettyRpcClient(Configuration configuration, String clusterId, SocketAddress localAddress, - MetricsConnection metrics) { + MetricsConnection metrics) { super(configuration, clusterId, localAddress, metrics); Pair> groupAndChannelClass = NettyRpcClientConfigHelper.getEventLoopConfig(conf); if (groupAndChannelClass == null) { // Use our own EventLoopGroup. - int threadCount = conf.getInt( - NettyRpcClientConfigHelper.HBASE_NETTY_EVENTLOOP_RPCCLIENT_THREADCOUNT_KEY, 0); + int threadCount = + conf.getInt(NettyRpcClientConfigHelper.HBASE_NETTY_EVENTLOOP_RPCCLIENT_THREADCOUNT_KEY, 0); this.group = new NioEventLoopGroup(threadCount, - new DefaultThreadFactory("RPCClient(own)-NioEventLoopGroup", true, - Thread.NORM_PRIORITY)); + new DefaultThreadFactory("RPCClient(own)-NioEventLoopGroup", true, Thread.NORM_PRIORITY)); this.channelClass = NioSocketChannel.class; this.shutdownGroupWhenClose = true; } else { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClientConfigHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClientConfigHelper.java index a8c99378720..ef805ad5178 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClientConfigHelper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClientConfigHelper.java @@ -45,27 +45,28 @@ public final class NettyRpcClientConfigHelper { public static final String EVENT_LOOP_CONFIG = "hbase.rpc.client.event-loop.config"; /** - * Name of property to change netty rpc client eventloop thread count. Default is 0. - * Tests may set this down from unlimited. + * Name of property to change netty rpc client eventloop thread count. Default is 0. Tests may set + * this down from unlimited. */ public static final String HBASE_NETTY_EVENTLOOP_RPCCLIENT_THREADCOUNT_KEY = "hbase.netty.eventloop.rpcclient.thread.count"; private static final String CONFIG_NAME = "global-event-loop"; - private static final Map>> - EVENT_LOOP_CONFIG_MAP = new HashMap<>(); + private static final Map>> EVENT_LOOP_CONFIG_MAP = new HashMap<>(); /** * Shutdown constructor. */ - private NettyRpcClientConfigHelper() {} + private NettyRpcClientConfigHelper() { + } /** * Set the EventLoopGroup and channel class for {@code AsyncRpcClient}. */ public static void setEventLoopConfig(Configuration conf, EventLoopGroup group, - Class channelClass) { + Class channelClass) { Preconditions.checkNotNull(group, "group is null"); Preconditions.checkNotNull(channelClass, "channel class is null"); conf.set(EVENT_LOOP_CONFIG, CONFIG_NAME); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java index d0a13ca33d6..14e8cbc13d3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java index c67d96f0a75..fe32189f81b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,7 +68,7 @@ class NettyRpcDuplexHandler extends ChannelDuplexHandler { private final Map id2Call = new HashMap<>(); public NettyRpcDuplexHandler(NettyRpcConnection conn, CellBlockBuilder cellBlockBuilder, - Codec codec, CompressionCodec compressor) { + Codec codec, CompressionCodec compressor) { this.conn = conn; this.cellBlockBuilder = cellBlockBuilder; this.codec = codec; @@ -77,7 +77,7 @@ class NettyRpcDuplexHandler extends ChannelDuplexHandler { } private void writeRequest(ChannelHandlerContext ctx, Call call, ChannelPromise promise) - throws IOException { + throws IOException { id2Call.put(call.id, call); ByteBuf cellBlock = cellBlockBuilder.buildCellBlock(codec, compressor, call.cells, ctx.alloc()); CellBlockMeta cellBlockMeta; @@ -90,8 +90,8 @@ class NettyRpcDuplexHandler extends ChannelDuplexHandler { } RequestHeader requestHeader = IPCUtil.buildRequestHeader(call, cellBlockMeta); int sizeWithoutCellBlock = IPCUtil.getTotalSizeWhenWrittenDelimited(requestHeader, call.param); - int totalSize = cellBlock != null ? sizeWithoutCellBlock + cellBlock.writerIndex() - : sizeWithoutCellBlock; + int totalSize = + cellBlock != null ? sizeWithoutCellBlock + cellBlock.writerIndex() : sizeWithoutCellBlock; ByteBuf buf = ctx.alloc().buffer(sizeWithoutCellBlock + 4); buf.writeInt(totalSize); try (ByteBufOutputStream bbos = new ByteBufOutputStream(buf)) { @@ -133,7 +133,7 @@ class NettyRpcDuplexHandler extends ChannelDuplexHandler { int id = responseHeader.getCallId(); if (LOG.isTraceEnabled()) { LOG.trace("got response header " + TextFormat.shortDebugString(responseHeader) - + ", totalSize: " + totalSize + " bytes"); + + ", totalSize: " + totalSize + " bytes"); } RemoteException remoteExc; if (responseHeader.hasException()) { @@ -158,7 +158,7 @@ class NettyRpcDuplexHandler extends ChannelDuplexHandler { int readSoFar = IPCUtil.getTotalSizeWhenWrittenDelimited(responseHeader); int whatIsLeftToRead = totalSize - readSoFar; LOG.debug("Unknown callId: " + id + ", skipping over this response of " + whatIsLeftToRead - + " bytes"); + + " bytes"); } return; } @@ -235,7 +235,7 @@ class NettyRpcDuplexHandler extends ChannelDuplexHandler { if (id2Call.isEmpty()) { if (LOG.isTraceEnabled()) { LOG.trace("shutdown connection to " + conn.remoteId().address - + " because idle for a long time"); + + " because idle for a long time"); } // It may happen that there are still some pending calls in the event loop queue and // they will get a closed channel exception. But this is not a big deal as it rarely diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java index 108b9068a2d..62d0bb1d455 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.io.IOException; import java.lang.reflect.Constructor; import java.security.AccessController; import java.security.PrivilegedAction; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -31,10 +30,10 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; /** - * A {@link RemoteException} with some extra information. If source exception - * was a {@link org.apache.hadoop.hbase.DoNotRetryIOException}, - * {@link #isDoNotRetry()} will return true. - *

      A {@link RemoteException} hosts exceptions we got from the server. + * A {@link RemoteException} with some extra information. If source exception was a + * {@link org.apache.hadoop.hbase.DoNotRetryIOException}, {@link #isDoNotRetry()} will return true. + *

      + * A {@link RemoteException} hosts exceptions we got from the server. */ @SuppressWarnings("serial") @InterfaceAudience.Public @@ -53,9 +52,8 @@ public class RemoteWithExtrasException extends RemoteException { static { ClassLoader parent = RemoteWithExtrasException.class.getClassLoader(); Configuration conf = HBaseConfiguration.create(); - CLASS_LOADER = AccessController.doPrivileged((PrivilegedAction) - () -> new DynamicClassLoader(conf, parent) - ); + CLASS_LOADER = AccessController + .doPrivileged((PrivilegedAction) () -> new DynamicClassLoader(conf, parent)); } } @@ -74,7 +72,7 @@ public class RemoteWithExtrasException extends RemoteException { } public RemoteWithExtrasException(String className, String msg, final String hostname, - final int port, final boolean doNotRetry, final boolean serverOverloaded) { + final int port, final boolean doNotRetry, final boolean serverOverloaded) { super(className, msg); this.hostname = hostname; this.port = port; @@ -95,14 +93,14 @@ public class RemoteWithExtrasException extends RemoteException { realClass = Class.forName(getClassName(), false, super.getClass().getClassLoader()); } catch (ClassNotFoundException e) { return new DoNotRetryIOException( - "Unable to load exception received from server:" + e.getMessage(), this); + "Unable to load exception received from server:" + e.getMessage(), this); } } try { return instantiateException(realClass.asSubclass(IOException.class)); } catch (Exception e) { return new DoNotRetryIOException( - "Unable to instantiate exception received from server:" + e.getMessage(), this); + "Unable to instantiate exception received from server:" + e.getMessage(), this); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java index 5bb08152d30..6ecff49e52b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,15 +34,15 @@ public interface RpcClient extends Closeable { int FAILED_SERVER_EXPIRY_DEFAULT = 2000; String IDLE_TIME = "hbase.ipc.client.connection.minIdleTimeBeforeClose"; String IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY = - "hbase.ipc.client.fallback-to-simple-auth-allowed"; + "hbase.ipc.client.fallback-to-simple-auth-allowed"; boolean IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT = false; String SPECIFIC_WRITE_THREAD = "hbase.ipc.client.specificThreadForWriting"; String DEFAULT_CODEC_CLASS = "hbase.client.default.rpc.codec"; String SOCKET_TIMEOUT_CONNECT = "hbase.ipc.client.socket.timeout.connect"; /** - * How long we wait when we wait for an answer. It's not the operation time, it's the time - * we wait when we start to receive an answer, when the remote write starts to send the data. + * How long we wait when we wait for an answer. It's not the operation time, it's the time we wait + * when we start to receive an answer, when the remote write starts to send the data. */ String SOCKET_TIMEOUT_READ = "hbase.ipc.client.socket.timeout.read"; String SOCKET_TIMEOUT_WRITE = "hbase.ipc.client.socket.timeout.write"; @@ -55,43 +55,36 @@ public interface RpcClient extends Closeable { int PING_CALL_ID = -1; /** - * Creates a "channel" that can be used by a blocking protobuf service. Useful setting up - * protobuf blocking stubs. - * - * @param sn server name describing location of server - * @param user which is to use the connection + * Creates a "channel" that can be used by a blocking protobuf service. Useful setting up protobuf + * blocking stubs. + * @param sn server name describing location of server + * @param user which is to use the connection * @param rpcTimeout default rpc operation timeout - * * @return A blocking rpc channel that goes via this rpc client instance. */ BlockingRpcChannel createBlockingRpcChannel(ServerName sn, User user, int rpcTimeout); /** - * Creates a "channel" that can be used by a protobuf service. Useful setting up - * protobuf stubs. - * - * @param sn server name describing location of server - * @param user which is to use the connection + * Creates a "channel" that can be used by a protobuf service. Useful setting up protobuf stubs. + * @param sn server name describing location of server + * @param user which is to use the connection * @param rpcTimeout default rpc operation timeout - * * @return A rpc channel that goes via this rpc client instance. */ RpcChannel createRpcChannel(final ServerName sn, final User user, int rpcTimeout); /** - * Interrupt the connections to the given server. This should be called if the server - * is known as actually dead. This will not prevent current operation to be retried, and, - * depending on their own behavior, they may retry on the same server. This can be a feature, - * for example at startup. In any case, they're likely to get connection refused (if the - * process died) or no route to host: i.e. their next retries should be faster and with a - * safe exception. + * Interrupt the connections to the given server. This should be called if the server is known as + * actually dead. This will not prevent current operation to be retried, and, depending on their + * own behavior, they may retry on the same server. This can be a feature, for example at startup. + * In any case, they're likely to get connection refused (if the process died) or no route to + * host: i.e. their next retries should be faster and with a safe exception. * @param sn server location to cancel connections of */ void cancelConnections(ServerName sn); /** - * Stop all threads related to this client. No further calls may be made - * using this client. + * Stop all threads related to this client. No further calls may be made using this client. */ @Override void close(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientFactory.java index 434795248c6..9b69b523405 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,9 +33,10 @@ public final class RpcClientFactory { public static final String CUSTOM_RPC_CLIENT_IMPL_CONF_KEY = "hbase.rpc.client.impl"; - private static final ImmutableMap DEPRECATED_NAME_MAPPING = ImmutableMap.of( - "org.apache.hadoop.hbase.ipc.RpcClientImpl", BlockingRpcClient.class.getName(), - "org.apache.hadoop.hbase.ipc.AsyncRpcClient", NettyRpcClient.class.getName()); + private static final ImmutableMap DEPRECATED_NAME_MAPPING = ImmutableMap.of("org.apache.hadoop.hbase.ipc.RpcClientImpl", + BlockingRpcClient.class.getName(), "org.apache.hadoop.hbase.ipc.AsyncRpcClient", + NettyRpcClient.class.getName()); /** * Private Constructor @@ -51,13 +52,13 @@ public final class RpcClientFactory { /** * Creates a new RpcClient by the class defined in the configuration or falls back to * RpcClientImpl - * @param conf configuration + * @param conf configuration * @param clusterId the cluster id - * @param metrics the connection metrics + * @param metrics the connection metrics * @return newly created RpcClient */ public static RpcClient createClient(Configuration conf, String clusterId, - MetricsConnection metrics) { + MetricsConnection metrics) { return createClient(conf, clusterId, null, metrics); } @@ -73,17 +74,17 @@ public final class RpcClientFactory { /** * Creates a new RpcClient by the class defined in the configuration or falls back to * RpcClientImpl - * @param conf configuration + * @param conf configuration * @param clusterId the cluster id * @param localAddr client socket bind address. - * @param metrics the connection metrics + * @param metrics the connection metrics * @return newly created RpcClient */ public static RpcClient createClient(Configuration conf, String clusterId, - SocketAddress localAddr, MetricsConnection metrics) { + SocketAddress localAddr, MetricsConnection metrics) { String rpcClientClass = getRpcClientClass(conf); return ReflectionUtils.instantiateWithCustomCtor(rpcClientClass, new Class[] { - Configuration.class, String.class, SocketAddress.class, MetricsConnection.class }, + Configuration.class, String.class, SocketAddress.class, MetricsConnection.class }, new Object[] { conf, clusterId, localAddr, metrics }); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java index bfaf91c5285..912fa4fb065 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.MetricsConnection; @@ -43,6 +42,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; import org.apache.hbase.thirdparty.io.netty.util.Timeout; import org.apache.hbase.thirdparty.io.netty.util.TimerTask; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; @@ -85,8 +85,8 @@ abstract class RpcConnection { protected SaslClientAuthenticationProvider provider; protected RpcConnection(Configuration conf, HashedWheelTimer timeoutTimer, ConnectionId remoteId, - String clusterId, boolean isSecurityEnabled, Codec codec, CompressionCodec compressor, - MetricsConnection metrics) throws IOException { + String clusterId, boolean isSecurityEnabled, Codec codec, CompressionCodec compressor, + MetricsConnection metrics) throws IOException { this.timeoutTimer = timeoutTimer; this.codec = codec; this.compressor = compressor; @@ -98,14 +98,14 @@ abstract class RpcConnection { // Choose the correct Token and AuthenticationProvider for this client to use SaslClientAuthenticationProviders providers = - SaslClientAuthenticationProviders.getInstance(conf); + SaslClientAuthenticationProviders.getInstance(conf); Pair> pair; if (useSasl && securityInfo != null) { pair = providers.selectProvider(clusterId, ticket); if (pair == null) { if (LOG.isTraceEnabled()) { LOG.trace("Found no valid authentication method from providers={} with tokens={}", - providers.toString(), ticket.getTokens()); + providers.toString(), ticket.getTokens()); } throw new RuntimeException("Found no valid authentication method from options"); } @@ -120,7 +120,7 @@ abstract class RpcConnection { this.token = pair.getSecond(); LOG.debug("Using {} authentication for service={}, sasl={}", - provider.getSaslAuthMethod().getName(), remoteId.serviceName, useSasl); + provider.getSaslAuthMethod().getName(), remoteId.serviceName, useSasl); reloginMaxBackoff = conf.getInt("hbase.security.relogin.maxbackoff", 5000); this.remoteId = remoteId; } @@ -132,8 +132,8 @@ abstract class RpcConnection { @Override public void run(Timeout timeout) throws Exception { call.setTimeout(new CallTimeoutException(call.toShortString() + ", waitTime=" - + (EnvironmentEdgeManager.currentTime() - call.getStartTime()) + "ms, rpcTimeout=" - + call.timeout + "ms")); + + (EnvironmentEdgeManager.currentTime() - call.getStartTime()) + "ms, rpcTimeout=" + + call.timeout + "ms")); callTimeout(call); } }, call.timeout, TimeUnit.MILLISECONDS); @@ -159,7 +159,7 @@ abstract class RpcConnection { protected final ConnectionHeader getConnectionHeader() { final ConnectionHeader.Builder builder = ConnectionHeader.newBuilder(); builder.setServiceName(remoteId.getServiceName()); - final UserInformation userInfoPB = provider.getUserInfo(remoteId.ticket); + final UserInformation userInfoPB = provider.getUserInfo(remoteId.ticket); if (userInfoPB != null) { builder.setUserInfo(userInfoPB); } @@ -174,7 +174,7 @@ abstract class RpcConnection { // if Crypto AES enable, setup Cipher transformation if (isCryptoAESEnable) { builder.setRpcCryptoCipherTransformation( - conf.get("hbase.rpc.crypto.encryption.aes.cipher.transform", "AES/CTR/NoPadding")); + conf.get("hbase.rpc.crypto.encryption.aes.cipher.transform", "AES/CTR/NoPadding")); } return builder.build(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java index 3f160d4dc59..a256769de70 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java @@ -32,7 +32,7 @@ import org.slf4j.LoggerFactory; /** * Factory to create a {@link HBaseRpcController} */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public class RpcControllerFactory { private static final Logger LOG = LoggerFactory.getLogger(RpcControllerFactory.class); @@ -67,14 +67,13 @@ public class RpcControllerFactory { } public HBaseRpcController newController(RegionInfo regionInfo, - final List cellIterables) { + final List cellIterables) { return new HBaseRpcControllerImpl(regionInfo, cellIterables); } public static RpcControllerFactory instantiate(Configuration configuration) { String rpcControllerFactoryClazz = - configuration.get(CUSTOM_CONTROLLER_CONF_KEY, - RpcControllerFactory.class.getName()); + configuration.get(CUSTOM_CONTROLLER_CONF_KEY, RpcControllerFactory.class.getName()); try { return ReflectionUtils.instantiateWithCustomCtor(rpcControllerFactoryClazz, new Class[] { Configuration.class }, new Object[] { configuration }); @@ -82,8 +81,8 @@ public class RpcControllerFactory { // HBASE-14960: In case the RPCController is in a non-HBase jar (Phoenix), but the application // is a pure HBase application, we want to fallback to the default one. String msg = "Cannot load configured \"" + CUSTOM_CONTROLLER_CONF_KEY + "\" (" - + rpcControllerFactoryClazz + ") from hbase-site.xml, falling back to use " - + "default RpcControllerFactory"; + + rpcControllerFactoryClazz + ") from hbase-site.xml, falling back to use " + + "default RpcControllerFactory"; if (LOG.isDebugEnabled()) { LOG.warn(msg, ex); // if DEBUG enabled, we want the exception, but still log in WARN level } else { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerNotRunningYetException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerNotRunningYetException.java index 19b46817258..94d2b0eafb5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerNotRunningYetException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerNotRunningYetException.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; @SuppressWarnings("serial") diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java index 78df1f68661..92f96cf610b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,26 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; - import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; /** - * Used for server-side protobuf RPC service invocations. This handler allows - * invocation exceptions to easily be passed through to the RPC server from coprocessor + * Used for server-side protobuf RPC service invocations. This handler allows invocation exceptions + * to easily be passed through to the RPC server from coprocessor * {@link com.google.protobuf.Service} implementations. - * *

      - * When implementing {@link com.google.protobuf.Service} defined methods, - * coprocessor endpoints can use the following pattern to pass exceptions back to the RPC client: - * + * When implementing {@link com.google.protobuf.Service} defined methods, coprocessor endpoints can + * use the following pattern to pass exceptions back to the RPC client: * public void myMethod(RpcController controller, MyRequest request, * RpcCallback<MyResponse> done) { * MyResponse response = null; @@ -54,8 +49,7 @@ import org.apache.hadoop.util.StringUtils; public class ServerRpcController implements RpcController { /** * The exception thrown within - * {@link com.google.protobuf.Service#callMethod(com.google.protobuf.Descriptors.MethodDescriptor, RpcController, - * com.google.protobuf.Message, RpcCallback)} + * {@link com.google.protobuf.Service#callMethod(com.google.protobuf.Descriptors.MethodDescriptor, RpcController, com.google.protobuf.Message, RpcCallback)} * if any. */ // TODO: it would be good widen this to just Throwable, but IOException is what we allow now @@ -109,9 +103,8 @@ public class ServerRpcController implements RpcController { /** * Returns any exception thrown during service method invocation, or {@code null} if no exception - * was thrown. This can be used by clients to receive exceptions generated by RPC calls, even - * when {@link RpcCallback}s are used and no {@link com.google.protobuf.ServiceException} is - * declared. + * was thrown. This can be used by clients to receive exceptions generated by RPC calls, even when + * {@link RpcCallback}s are used and no {@link com.google.protobuf.ServiceException} is declared. */ public IOException getFailedOn() { return serviceException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java index eae9886ca55..6c22ca94e42 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase.ipc; import java.net.InetSocketAddress; - import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.net.Address; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java index bd1e101c2e9..520dbcb2d9c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCellCodecException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCellCodecException.java index 8e09716c188..7ed351968c1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCellCodecException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCellCodecException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCompressionCodecException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCompressionCodecException.java index b7c28e05842..b782741971d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCompressionCodecException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCompressionCodecException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCryptoException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCryptoException.java index 7cb78f9b98d..047761c92ee 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCryptoException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCryptoException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/WrongVersionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/WrongVersionException.java index 83a869b8142..5474087a857 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/WrongVersionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/WrongVersionException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java index 8ae0888a9de..2b0f2f4509e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,8 +28,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; /** - * State of a Region while undergoing transitions. - * This class is immutable. + * State of a Region while undergoing transitions. This class is immutable. */ @InterfaceAudience.Private public class RegionState { @@ -37,23 +36,23 @@ public class RegionState { @InterfaceAudience.Private @InterfaceStability.Evolving public enum State { - OFFLINE, // region is in an offline state - OPENING, // server has begun to open but not yet done - OPEN, // server opened region and updated meta - CLOSING, // server has begun to close but not yet done - CLOSED, // server closed region and updated meta - SPLITTING, // server started split of a region - SPLIT, // server completed split of a region - FAILED_OPEN, // failed to open, and won't retry any more - FAILED_CLOSE, // failed to close, and won't retry any more - MERGING, // server started merge a region - MERGED, // server completed merge a region - SPLITTING_NEW, // new region to be created when RS splits a parent - // region but hasn't be created yet, or master doesn't - // know it's already created - MERGING_NEW, // new region to be created when RS merges two - // daughter regions but hasn't be created yet, or - // master doesn't know it's already created + OFFLINE, // region is in an offline state + OPENING, // server has begun to open but not yet done + OPEN, // server opened region and updated meta + CLOSING, // server has begun to close but not yet done + CLOSED, // server closed region and updated meta + SPLITTING, // server started split of a region + SPLIT, // server completed split of a region + FAILED_OPEN, // failed to open, and won't retry any more + FAILED_CLOSE, // failed to close, and won't retry any more + MERGING, // server started merge a region + MERGED, // server completed merge a region + SPLITTING_NEW, // new region to be created when RS splits a parent + // region but hasn't be created yet, or master doesn't + // know it's already created + MERGING_NEW, // new region to be created when RS merges two + // daughter regions but hasn't be created yet, or + // master doesn't know it's already created ABNORMALLY_CLOSED; // the region is CLOSED because of a RS crashes. Usually it is the same // with CLOSED, but for some operations such as merge/split, we can not // apply it to a region in this state, as it may lead to data loss as we @@ -124,7 +123,6 @@ public class RegionState { /** * Convert a protobuf HBaseProtos.RegionState.State to a RegionState.State - * * @return the RegionState.State */ public static State convert(ClusterStatusProtos.RegionState.State protoState) { @@ -196,13 +194,12 @@ public class RegionState { this(region, state, EnvironmentEdgeManager.currentTime(), serverName); } - public RegionState(RegionInfo region, - State state, long stamp, ServerName serverName) { + public RegionState(RegionInfo region, State state, long stamp, ServerName serverName) { this(region, state, stamp, serverName, 0); } public RegionState(RegionInfo region, State state, long stamp, ServerName serverName, - long ritDuration) { + long ritDuration) { this.hri = region; this.state = state; this.stamp = stamp; @@ -351,8 +348,7 @@ public class RegionState { * Check if a region state can transition to offline */ public boolean isReadyToOffline() { - return isMerged() || isSplit() || isOffline() - || isSplittingNew() || isMergingNew(); + return isMerged() || isSplit() || isOffline() || isSplittingNew() || isMergingNew(); } /** @@ -363,16 +359,16 @@ public class RegionState { } /** - * Check if a region state is one of offline states that - * can't transition to pending_close/closing (unassign/offline) + * Check if a region state is one of offline states that can't transition to pending_close/closing + * (unassign/offline) */ public boolean isUnassignable() { return isUnassignable(state); } /** - * Check if a region state is one of offline states that - * can't transition to pending_close/closing (unassign/offline) + * Check if a region state is one of offline states that can't transition to pending_close/closing + * (unassign/offline) */ public static boolean isUnassignable(State state) { return state == State.MERGED || state == State.SPLIT || state == State.OFFLINE @@ -381,10 +377,8 @@ public class RegionState { @Override public String toString() { - return "{" + hri.getShortNameToLog() - + " state=" + state - + ", ts=" + stamp - + ", server=" + serverName + "}"; + return "{" + hri.getShortNameToLog() + " state=" + state + ", ts=" + stamp + ", server=" + + serverName + "}"; } /** @@ -392,19 +386,17 @@ public class RegionState { */ public String toDescriptiveString() { long relTime = EnvironmentEdgeManager.currentTime() - stamp; - return hri.getRegionNameAsString() - + " state=" + state - + ", ts=" + new Date(stamp) + " (" + (relTime/1000) + "s ago)" - + ", server=" + serverName; + return hri.getRegionNameAsString() + " state=" + state + ", ts=" + new Date(stamp) + " (" + + (relTime / 1000) + "s ago)" + ", server=" + serverName; } /** * Convert a RegionState to an HBaseProtos.RegionState - * * @return the converted HBaseProtos.RegionState */ public ClusterStatusProtos.RegionState convert() { - ClusterStatusProtos.RegionState.Builder regionState = ClusterStatusProtos.RegionState.newBuilder(); + ClusterStatusProtos.RegionState.Builder regionState = + ClusterStatusProtos.RegionState.newBuilder(); regionState.setRegionInfo(ProtobufUtil.toRegionInfo(hri)); regionState.setState(state.convert()); regionState.setStamp(getStamp()); @@ -413,7 +405,6 @@ public class RegionState { /** * Convert a protobuf HBaseProtos.RegionState to a RegionState - * * @return the RegionState */ public static RegionState convert(ClusterStatusProtos.RegionState proto) { @@ -430,7 +421,7 @@ public class RegionState { if (obj == null || getClass() != obj.getClass()) { return false; } - RegionState tmp = (RegionState)obj; + RegionState tmp = (RegionState) obj; return RegionInfo.COMPARATOR.compare(tmp.hri, hri) == 0 && tmp.state == state && ((serverName != null && serverName.equals(tmp.serverName)) @@ -442,7 +433,7 @@ public class RegionState { */ @Override public int hashCode() { - return (serverName != null ? serverName.hashCode() * 11 : 0) - + hri.hashCode() + 5 * state.ordinal(); + return (serverName != null ? serverName.hashCode() * 11 : 0) + hri.hashCode() + + 5 * state.ordinal(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java index 5268dafb8a6..7c6f780e069 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,32 +29,28 @@ public class ProtobufMagic { } /** - * Magic we put ahead of a serialized protobuf message. - * For example, all znode content is protobuf messages with the below magic - * for preamble. + * Magic we put ahead of a serialized protobuf message. For example, all znode content is protobuf + * messages with the below magic for preamble. */ - public static final byte [] PB_MAGIC = new byte [] {'P', 'B', 'U', 'F'}; + public static final byte[] PB_MAGIC = new byte[] { 'P', 'B', 'U', 'F' }; /** * @param bytes Bytes to check. * @return True if passed bytes has {@link #PB_MAGIC} for a prefix. */ - public static boolean isPBMagicPrefix(final byte [] bytes) { + public static boolean isPBMagicPrefix(final byte[] bytes) { if (bytes == null) return false; return isPBMagicPrefix(bytes, 0, bytes.length); } /* - * Copied from Bytes.java to here - * hbase-common now depends on hbase-protocol - * Referencing Bytes.java directly would create circular dependency + * Copied from Bytes.java to here hbase-common now depends on hbase-protocol Referencing + * Bytes.java directly would create circular dependency */ - private static int compareTo(byte[] buffer1, int offset1, int length1, - byte[] buffer2, int offset2, int length2) { + private static int compareTo(byte[] buffer1, int offset1, int length1, byte[] buffer2, + int offset2, int length2) { // Short circuit equal case - if (buffer1 == buffer2 && - offset1 == offset2 && - length1 == length2) { + if (buffer1 == buffer2 && offset1 == offset2 && length1 == length2) { return 0; } // Bring WritableComparator code local @@ -71,12 +67,12 @@ public class ProtobufMagic { } /** - * @param bytes Bytes to check. + * @param bytes Bytes to check. * @param offset offset to start at - * @param len length to use + * @param len length to use * @return True if passed bytes has {@link #PB_MAGIC} for a prefix. */ - public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) { + public static boolean isPBMagicPrefix(final byte[] bytes, int offset, int len) { if (bytes == null || len < PB_MAGIC.length) return false; return compareTo(PB_MAGIC, 0, PB_MAGIC.length, bytes, offset, PB_MAGIC.length) == 0; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMessageConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMessageConverter.java index fd74719e722..d5f5c8575b1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMessageConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMessageConverter.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.protobuf; import java.util.ArrayList; @@ -24,8 +23,8 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; - import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.gson.JsonArray; import org.apache.hbase.thirdparty.com.google.gson.JsonElement; import org.apache.hbase.thirdparty.com.google.gson.JsonObject; @@ -36,6 +35,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferExce import org.apache.hbase.thirdparty.com.google.protobuf.MessageOrBuilder; import org.apache.hbase.thirdparty.com.google.protobuf.util.JsonFormat; import org.apache.hbase.thirdparty.com.google.protobuf.util.JsonFormat.TypeRegistry; + import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; @@ -45,8 +45,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; *

        *
      • JSON string: {@link #toJsonElement(MessageOrBuilder)}
      • *
      • JSON object (gson): {@link #toJsonElement(MessageOrBuilder)}
      • - *
      • Java objects (Boolean, Number, String, List, Map): - * {@link #toJavaObject(JsonElement)}
      • + *
      • Java objects (Boolean, Number, String, List, Map): {@link #toJavaObject(JsonElement)}
      • *
      */ @InterfaceAudience.Private @@ -57,33 +56,29 @@ public class ProtobufMessageConverter { static { TypeRegistry.Builder builder = TypeRegistry.newBuilder(); - builder - .add(BytesValue.getDescriptor()) - .add(LockServiceProtos.getDescriptor().getMessageTypes()) + builder.add(BytesValue.getDescriptor()).add(LockServiceProtos.getDescriptor().getMessageTypes()) .add(MasterProcedureProtos.getDescriptor().getMessageTypes()) .add(ProcedureProtos.getDescriptor().getMessageTypes()); TypeRegistry typeRegistry = builder.build(); - jsonPrinter = JsonFormat.printer() - .usingTypeRegistry(typeRegistry) - .omittingInsignificantWhitespace(); + jsonPrinter = + JsonFormat.printer().usingTypeRegistry(typeRegistry).omittingInsignificantWhitespace(); } private ProtobufMessageConverter() { } public static String toJsonString(MessageOrBuilder messageOrBuilder) - throws InvalidProtocolBufferException { + throws InvalidProtocolBufferException { return jsonPrinter.print(messageOrBuilder); } private static void removeTypeFromJson(JsonElement json) { if (json.isJsonArray()) { - for (JsonElement child: json.getAsJsonArray()) { + for (JsonElement child : json.getAsJsonArray()) { removeTypeFromJson(child); } } else if (json.isJsonObject()) { - Iterator> iterator = - json.getAsJsonObject().entrySet().iterator(); + Iterator> iterator = json.getAsJsonObject().entrySet().iterator(); while (iterator.hasNext()) { Entry entry = iterator.next(); @@ -97,12 +92,12 @@ public class ProtobufMessageConverter { } public static JsonElement toJsonElement(MessageOrBuilder messageOrBuilder) - throws InvalidProtocolBufferException { + throws InvalidProtocolBufferException { return toJsonElement(messageOrBuilder, true); } - public static JsonElement toJsonElement(MessageOrBuilder messageOrBuilder, - boolean removeType) throws InvalidProtocolBufferException { + public static JsonElement toJsonElement(MessageOrBuilder messageOrBuilder, boolean removeType) + throws InvalidProtocolBufferException { String jsonString = toJsonString(messageOrBuilder); JsonParser parser = new JsonParser(); JsonElement element = parser.parse(jsonString); @@ -140,7 +135,7 @@ public class ProtobufMessageConverter { JsonObject object = element.getAsJsonObject(); Map map = new LinkedHashMap<>(); - for (Entry entry: object.entrySet()) { + for (Entry entry : object.entrySet()) { Object javaObject = toJavaObject(entry.getValue()); map.put(entry.getKey(), javaObject); } @@ -152,7 +147,7 @@ public class ProtobufMessageConverter { } public static Object toJavaObject(MessageOrBuilder messageOrBuilder) - throws InvalidProtocolBufferException { + throws InvalidProtocolBufferException { JsonElement element = toJsonElement(messageOrBuilder); return toJavaObject(element); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 769b2876f9d..a7bb4d0e01b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -95,8 +95,8 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos; -import org.apache.hadoop.hbase.protobuf.generated.TableProtos; import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; +import org.apache.hadoop.hbase.protobuf.generated.TableProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.util.Addressing; @@ -109,11 +109,10 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; /** - * Protobufs utility. - * NOTE: This class OVERLAPS ProtobufUtil in the subpackage 'shaded'. The latter is used - * internally and has more methods. This Class is for Coprocessor Endpoints only though they - * should not be using this private class. It should not be depended upon. Most methods here - * are COPIED from the shaded ProtobufUtils with only difference being they refer to non-shaded + * Protobufs utility. NOTE: This class OVERLAPS ProtobufUtil in the subpackage 'shaded'. The latter + * is used internally and has more methods. This Class is for Coprocessor Endpoints only though they + * should not be using this private class. It should not be depended upon. Most methods here are + * COPIED from the shaded ProtobufUtils with only difference being they refer to non-shaded * protobufs. * @see ProtobufUtil */ @@ -124,19 +123,19 @@ public final class ProtobufUtil { } /** - * Many results are simple: no cell, exists true or false. To save on object creations, - * we reuse them across calls. + * Many results are simple: no cell, exists true or false. To save on object creations, we reuse + * them across calls. */ // TODO: PICK THESE UP FROM THE SHADED PROTOBUF. - private final static Cell[] EMPTY_CELL_ARRAY = new Cell[]{}; + private final static Cell[] EMPTY_CELL_ARRAY = new Cell[] {}; private final static Result EMPTY_RESULT = Result.create(EMPTY_CELL_ARRAY); final static Result EMPTY_RESULT_EXISTS_TRUE = Result.create(null, true); final static Result EMPTY_RESULT_EXISTS_FALSE = Result.create(null, false); private final static Result EMPTY_RESULT_STALE = Result.create(EMPTY_CELL_ARRAY, null, true); - private final static Result EMPTY_RESULT_EXISTS_TRUE_STALE - = Result.create((Cell[])null, true, true); - private final static Result EMPTY_RESULT_EXISTS_FALSE_STALE - = Result.create((Cell[])null, false, true); + private final static Result EMPTY_RESULT_EXISTS_TRUE_STALE = + Result.create((Cell[]) null, true, true); + private final static Result EMPTY_RESULT_EXISTS_FALSE_STALE = + Result.create((Cell[]) null, false, true); private final static ClientProtos.Result EMPTY_RESULT_PB; private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_TRUE; @@ -145,13 +144,12 @@ public final class ProtobufUtil { private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_TRUE_STALE; private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_FALSE_STALE; - static { ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder(); builder.setExists(true); builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB_EXISTS_TRUE = builder.build(); + EMPTY_RESULT_PB_EXISTS_TRUE = builder.build(); builder.setStale(true); EMPTY_RESULT_PB_EXISTS_TRUE_STALE = builder.build(); @@ -159,41 +157,39 @@ public final class ProtobufUtil { builder.setExists(false); builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB_EXISTS_FALSE = builder.build(); + EMPTY_RESULT_PB_EXISTS_FALSE = builder.build(); builder.setStale(true); EMPTY_RESULT_PB_EXISTS_FALSE_STALE = builder.build(); builder.clear(); builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB = builder.build(); + EMPTY_RESULT_PB = builder.build(); builder.setStale(true); EMPTY_RESULT_PB_STALE = builder.build(); } /** - * Dynamic class loader to load filter/comparators - */ + * Dynamic class loader to load filter/comparators + */ private final static class ClassLoaderHolder { private final static ClassLoader CLASS_LOADER; static { ClassLoader parent = ProtobufUtil.class.getClassLoader(); Configuration conf = HBaseConfiguration.create(); - CLASS_LOADER = AccessController.doPrivileged((PrivilegedAction) - () -> new DynamicClassLoader(conf, parent) - ); + CLASS_LOADER = AccessController + .doPrivileged((PrivilegedAction) () -> new DynamicClassLoader(conf, parent)); } } /** - * Prepend the passed bytes with four bytes of magic, {@link ProtobufMagic#PB_MAGIC}, - * to flag what follows as a protobuf in hbase. Prepend these bytes to all content written to - * znodes, etc. + * Prepend the passed bytes with four bytes of magic, {@link ProtobufMagic#PB_MAGIC}, to flag what + * follows as a protobuf in hbase. Prepend these bytes to all content written to znodes, etc. * @param bytes Bytes to decorate - * @return The passed bytes with magic prepended (Creates a new - * byte array that is bytes.length plus {@link ProtobufMagic#PB_MAGIC}.length. + * @return The passed bytes with magic prepended (Creates a new byte array that is + * bytes.length plus {@link ProtobufMagic#PB_MAGIC}.length. */ - public static byte [] prependPBMagic(final byte [] bytes) { + public static byte[] prependPBMagic(final byte[] bytes) { return Bytes.add(PB_MAGIC, bytes); } @@ -201,17 +197,17 @@ public final class ProtobufUtil { * @param bytes Bytes to check. * @return True if passed bytes has {@link ProtobufMagic#PB_MAGIC} for a prefix. */ - public static boolean isPBMagicPrefix(final byte [] bytes) { + public static boolean isPBMagicPrefix(final byte[] bytes) { return ProtobufMagic.isPBMagicPrefix(bytes); } /** - * @param bytes Bytes to check. + * @param bytes Bytes to check. * @param offset offset to start at - * @param len length to use + * @param len length to use * @return True if passed bytes has {@link ProtobufMagic#PB_MAGIC} for a prefix. */ - public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) { + public static boolean isPBMagicPrefix(final byte[] bytes, int offset, int len) { return ProtobufMagic.isPBMagicPrefix(bytes, offset, len); } @@ -223,7 +219,7 @@ public final class ProtobufUtil { if (!isPBMagicPrefix(bytes)) { String bytesPrefix = bytes == null ? "null" : Bytes.toStringBinary(bytes, 0, PB_MAGIC.length); throw new DeserializationException( - "Missing pb magic " + Bytes.toString(PB_MAGIC) + " prefix, bytes: " + bytesPrefix); + "Missing pb magic " + Bytes.toString(PB_MAGIC) + " prefix, bytes: " + bytesPrefix); } } @@ -235,26 +231,23 @@ public final class ProtobufUtil { } /** - * Return the IOException thrown by the remote server wrapped in - * ServiceException as cause. - * + * Return the IOException thrown by the remote server wrapped in ServiceException as cause. * @param se ServiceException that wraps IO exception thrown by the server - * @return Exception wrapped in ServiceException or - * a new IOException that wraps the unexpected ServiceException. + * @return Exception wrapped in ServiceException or a new IOException that wraps the unexpected + * ServiceException. */ public static IOException getRemoteException(ServiceException se) { return makeIOExceptionOfException(se); } /** - * Return the Exception thrown by the remote server wrapped in - * ServiceException as cause. RemoteException are left untouched. - * + * Return the Exception thrown by the remote server wrapped in ServiceException as cause. + * RemoteException are left untouched. * @param e ServiceException that wraps IO exception thrown by the server * @return Exception wrapped in ServiceException. */ - public static IOException getServiceException( - org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) { + public static IOException + getServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) { Throwable t = e.getCause(); if (ExceptionUtil.isInterrupt(t)) { return ExceptionUtil.asInterrupt(t); @@ -265,9 +258,8 @@ public final class ProtobufUtil { /** * Like {@link #getRemoteException(ServiceException)} but more generic, able to handle more than * just {@link ServiceException}. Prefer this method to - * {@link #getRemoteException(ServiceException)} because trying to - * contain direct protobuf references. - * @param e + * {@link #getRemoteException(ServiceException)} because trying to contain direct protobuf + * references. n */ public static IOException handleRemoteException(Exception e) { return makeIOExceptionOfException(e); @@ -275,31 +267,30 @@ public final class ProtobufUtil { private static IOException makeIOExceptionOfException(Exception e) { Throwable t = e; - if (e instanceof ServiceException || - e instanceof org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) { + if ( + e instanceof ServiceException + || e instanceof org.apache.hbase.thirdparty.com.google.protobuf.ServiceException + ) { t = e.getCause(); } if (ExceptionUtil.isInterrupt(t)) { return ExceptionUtil.asInterrupt(t); } if (t instanceof RemoteException) { - t = ((RemoteException)t).unwrapRemoteException(); + t = ((RemoteException) t).unwrapRemoteException(); } - return t instanceof IOException? (IOException)t: new HBaseIOException(t); + return t instanceof IOException ? (IOException) t : new HBaseIOException(t); } /** * Convert a ServerName to a protocol buffer ServerName - * * @param serverName the ServerName to convert * @return the converted protocol buffer ServerName * @see #toServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName) */ - public static HBaseProtos.ServerName - toServerName(final ServerName serverName) { + public static HBaseProtos.ServerName toServerName(final ServerName serverName) { if (serverName == null) return null; - HBaseProtos.ServerName.Builder builder = - HBaseProtos.ServerName.newBuilder(); + HBaseProtos.ServerName.Builder builder = HBaseProtos.ServerName.newBuilder(); builder.setHostName(serverName.getHostname()); if (serverName.getPort() >= 0) { builder.setPort(serverName.getPort()); @@ -312,7 +303,6 @@ public final class ProtobufUtil { /** * Convert a protocol buffer ServerName to a ServerName - * * @param proto the protocol buffer ServerName to convert * @return the converted ServerName */ @@ -333,51 +323,47 @@ public final class ProtobufUtil { /** * Convert a protobuf Durability into a client Durability */ - public static Durability toDurability( - final ClientProtos.MutationProto.Durability proto) { - switch(proto) { - case USE_DEFAULT: - return Durability.USE_DEFAULT; - case SKIP_WAL: - return Durability.SKIP_WAL; - case ASYNC_WAL: - return Durability.ASYNC_WAL; - case SYNC_WAL: - return Durability.SYNC_WAL; - case FSYNC_WAL: - return Durability.FSYNC_WAL; - default: - return Durability.USE_DEFAULT; + public static Durability toDurability(final ClientProtos.MutationProto.Durability proto) { + switch (proto) { + case USE_DEFAULT: + return Durability.USE_DEFAULT; + case SKIP_WAL: + return Durability.SKIP_WAL; + case ASYNC_WAL: + return Durability.ASYNC_WAL; + case SYNC_WAL: + return Durability.SYNC_WAL; + case FSYNC_WAL: + return Durability.FSYNC_WAL; + default: + return Durability.USE_DEFAULT; } } /** * Convert a client Durability into a protbuf Durability */ - public static ClientProtos.MutationProto.Durability toDurability( - final Durability d) { - switch(d) { - case USE_DEFAULT: - return ClientProtos.MutationProto.Durability.USE_DEFAULT; - case SKIP_WAL: - return ClientProtos.MutationProto.Durability.SKIP_WAL; - case ASYNC_WAL: - return ClientProtos.MutationProto.Durability.ASYNC_WAL; - case SYNC_WAL: - return ClientProtos.MutationProto.Durability.SYNC_WAL; - case FSYNC_WAL: - return ClientProtos.MutationProto.Durability.FSYNC_WAL; - default: - return ClientProtos.MutationProto.Durability.USE_DEFAULT; + public static ClientProtos.MutationProto.Durability toDurability(final Durability d) { + switch (d) { + case USE_DEFAULT: + return ClientProtos.MutationProto.Durability.USE_DEFAULT; + case SKIP_WAL: + return ClientProtos.MutationProto.Durability.SKIP_WAL; + case ASYNC_WAL: + return ClientProtos.MutationProto.Durability.ASYNC_WAL; + case SYNC_WAL: + return ClientProtos.MutationProto.Durability.SYNC_WAL; + case FSYNC_WAL: + return ClientProtos.MutationProto.Durability.FSYNC_WAL; + default: + return ClientProtos.MutationProto.Durability.USE_DEFAULT; } } /** * Convert a protocol buffer Get to a client Get - * * @param proto the protocol buffer Get to convert - * @return the converted client Get - * @throws IOException + * @return the converted client Get n */ public static Get toGet(final ClientProtos.Get proto) throws IOException { if (proto == null) return null; @@ -398,8 +384,8 @@ public final class ProtobufUtil { if (proto.getCfTimeRangeCount() > 0) { for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) { TimeRange timeRange = protoToTimeRange(cftr.getTimeRange()); - get.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), - timeRange.getMin(), timeRange.getMax()); + get.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), timeRange.getMin(), + timeRange.getMax()); } } if (proto.hasTimeRange()) { @@ -410,14 +396,14 @@ public final class ProtobufUtil { FilterProtos.Filter filter = proto.getFilter(); get.setFilter(ProtobufUtil.toFilter(filter)); } - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { get.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } if (proto.getColumnCount() > 0) { - for (Column column: proto.getColumnList()) { + for (Column column : proto.getColumnList()) { byte[] family = column.getFamily().toByteArray(); if (column.getQualifierCount() > 0) { - for (ByteString qualifier: column.getQualifierList()) { + for (ByteString qualifier : column.getQualifierList()) { get.addColumn(family, qualifier.toByteArray()); } } else { @@ -425,7 +411,7 @@ public final class ProtobufUtil { } } } - if (proto.hasExistenceOnly() && proto.getExistenceOnly()){ + if (proto.hasExistenceOnly() && proto.getExistenceOnly()) { get.setCheckExistenceOnly(true); } if (proto.hasConsistency()) { @@ -439,58 +425,59 @@ public final class ProtobufUtil { public static Consistency toConsistency(ClientProtos.Consistency consistency) { switch (consistency) { - case STRONG : return Consistency.STRONG; - case TIMELINE : return Consistency.TIMELINE; - default : return Consistency.STRONG; + case STRONG: + return Consistency.STRONG; + case TIMELINE: + return Consistency.TIMELINE; + default: + return Consistency.STRONG; } } public static ClientProtos.Consistency toConsistency(Consistency consistency) { switch (consistency) { - case STRONG : return ClientProtos.Consistency.STRONG; - case TIMELINE : return ClientProtos.Consistency.TIMELINE; - default : return ClientProtos.Consistency.STRONG; + case STRONG: + return ClientProtos.Consistency.STRONG; + case TIMELINE: + return ClientProtos.Consistency.TIMELINE; + default: + return ClientProtos.Consistency.STRONG; } } /** * Convert a protocol buffer Mutate to a Put. - * * @param proto The protocol buffer MutationProto to convert - * @return A client Put. - * @throws IOException + * @return A client Put. n */ - public static Put toPut(final MutationProto proto) - throws IOException { + public static Put toPut(final MutationProto proto) throws IOException { return toPut(proto, null); } /** * Convert a protocol buffer Mutate to a Put. - * - * @param proto The protocol buffer MutationProto to convert + * @param proto The protocol buffer MutationProto to convert * @param cellScanner If non-null, the Cell data that goes with this proto. - * @return A client Put. - * @throws IOException + * @return A client Put. n */ public static Put toPut(final MutationProto proto, final CellScanner cellScanner) - throws IOException { - // TODO: Server-side at least why do we convert back to the Client types? Why not just pb it? + throws IOException { + // TODO: Server-side at least why do we convert back to the Client types? Why not just pb it? MutationType type = proto.getMutateType(); - assert type == MutationType.PUT: type.name(); - long timestamp = proto.hasTimestamp()? proto.getTimestamp(): HConstants.LATEST_TIMESTAMP; + assert type == MutationType.PUT : type.name(); + long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP; Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), timestamp) : null; - int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0; + int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; if (cellCount > 0) { // The proto has metadata only and the data is separate to be found in the cellScanner. if (cellScanner == null) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - toShortString(proto)); + throw new DoNotRetryIOException( + "Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto)); } for (int i = 0; i < cellCount; i++) { if (!cellScanner.advance()) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + toShortString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + + " no cell returned: " + toShortString(proto)); } Cell cell = cellScanner.current(); if (put == null) { @@ -503,13 +490,13 @@ public final class ProtobufUtil { throw new IllegalArgumentException("row cannot be null"); } // The proto has the metadata and the data itself - ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - for (ColumnValue column: proto.getColumnValueList()) { + ExtendedCellBuilder cellBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + for (ColumnValue column : proto.getColumnValueList()) { byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv: column.getQualifierValueList()) { + for (QualifierValue qv : column.getQualifierValueList()) { if (!qv.hasValue()) { - throw new DoNotRetryIOException( - "Missing required field: qualifier value"); + throw new DoNotRetryIOException("Missing required field: qualifier value"); } long ts = timestamp; if (qv.hasTimestamp()) { @@ -518,51 +505,35 @@ public final class ProtobufUtil { byte[] allTagsBytes; if (qv.hasTags()) { allTagsBytes = qv.getTags().toByteArray(); - if(qv.hasDeleteType()) { - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) - .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(fromDeleteType(qv.getDeleteType()).getCode()) - .setTags(allTagsBytes) - .build()); + if (qv.hasDeleteType()) { + put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family) + .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) + .setTimestamp(ts).setType(fromDeleteType(qv.getDeleteType()).getCode()) + .setTags(allTagsBytes).build()); } else { - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) - .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(Cell.Type.Put) - .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null) - .setTags(allTagsBytes) - .build()); + put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family) + .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) + .setTimestamp(ts).setType(Cell.Type.Put) + .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null).setTags(allTagsBytes) + .build()); } } else { - if(qv.hasDeleteType()) { - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) - .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(fromDeleteType(qv.getDeleteType()).getCode()) - .build()); - } else{ - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) - .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(Type.Put) - .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null) - .build()); + if (qv.hasDeleteType()) { + put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family) + .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) + .setTimestamp(ts).setType(fromDeleteType(qv.getDeleteType()).getCode()).build()); + } else { + put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family) + .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) + .setTimestamp(ts).setType(Type.Put) + .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null).build()); } } } } } put.setDurability(toDurability(proto.getDurability())); - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { put.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } return put; @@ -570,43 +541,38 @@ public final class ProtobufUtil { /** * Convert a protocol buffer Mutate to a Delete - * * @param proto the protocol buffer Mutate to convert - * @return the converted client Delete - * @throws IOException + * @return the converted client Delete n */ - public static Delete toDelete(final MutationProto proto) - throws IOException { + public static Delete toDelete(final MutationProto proto) throws IOException { return toDelete(proto, null); } /** * Convert a protocol buffer Mutate to a Delete - * - * @param proto the protocol buffer Mutate to convert + * @param proto the protocol buffer Mutate to convert * @param cellScanner if non-null, the data that goes with this delete. - * @return the converted client Delete - * @throws IOException + * @return the converted client Delete n */ public static Delete toDelete(final MutationProto proto, final CellScanner cellScanner) - throws IOException { + throws IOException { MutationType type = proto.getMutateType(); assert type == MutationType.DELETE : type.name(); long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP; Delete delete = proto.hasRow() ? new Delete(proto.getRow().toByteArray(), timestamp) : null; - int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0; + int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; if (cellCount > 0) { // The proto has metadata only and the data is separate to be found in the cellScanner. if (cellScanner == null) { // TextFormat should be fine for a Delete since it carries no data, just coordinates. - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - TextFormat.shortDebugString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + + TextFormat.shortDebugString(proto)); } for (int i = 0; i < cellCount; i++) { if (!cellScanner.advance()) { // TextFormat should be fine for a Delete since it carries no data, just coordinates. - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + TextFormat.shortDebugString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + + " no cell returned: " + TextFormat.shortDebugString(proto)); } Cell cell = cellScanner.current(); if (delete == null) { @@ -619,9 +585,9 @@ public final class ProtobufUtil { if (delete == null) { throw new IllegalArgumentException("row cannot be null"); } - for (ColumnValue column: proto.getColumnValueList()) { + for (ColumnValue column : proto.getColumnValueList()) { byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv: column.getQualifierValueList()) { + for (QualifierValue qv : column.getQualifierValueList()) { DeleteType deleteType = qv.getDeleteType(); byte[] qualifier = null; if (qv.hasQualifier()) { @@ -641,36 +607,38 @@ public final class ProtobufUtil { } } delete.setDurability(toDurability(proto.getDurability())); - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { delete.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } return delete; } @FunctionalInterface - private interface ConsumerWithException { + private interface ConsumerWithException { void accept(T t, U u) throws IOException; } - private static T toDelta(Function supplier, ConsumerWithException consumer, - final MutationProto proto, final CellScanner cellScanner) throws IOException { + private static T toDelta(Function supplier, + ConsumerWithException consumer, final MutationProto proto, + final CellScanner cellScanner) throws IOException { byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null; T mutation = row == null ? null : supplier.apply(new Bytes(row)); int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; if (cellCount > 0) { // The proto has metadata only and the data is separate to be found in the cellScanner. if (cellScanner == null) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - toShortString(proto)); + throw new DoNotRetryIOException( + "Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto)); } for (int i = 0; i < cellCount; i++) { if (!cellScanner.advance()) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + toShortString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + + " no cell returned: " + toShortString(proto)); } Cell cell = cellScanner.current(); if (mutation == null) { - mutation = supplier.apply(new Bytes(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + mutation = + supplier.apply(new Bytes(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); } consumer.accept(mutation, cell); } @@ -683,16 +651,15 @@ public final class ProtobufUtil { for (QualifierValue qv : column.getQualifierValueList()) { byte[] qualifier = qv.getQualifier().toByteArray(); if (!qv.hasValue()) { - throw new DoNotRetryIOException( - "Missing required field: qualifier value"); + throw new DoNotRetryIOException("Missing required field: qualifier value"); } byte[] value = qv.getValue().toByteArray(); byte[] tags = null; if (qv.hasTags()) { tags = qv.getTags().toByteArray(); } - consumer.accept(mutation, CellUtil.createCell(mutation.getRow(), family, qualifier, cellTimestampOrLatest(qv), - KeyValue.Type.Put, value, tags)); + consumer.accept(mutation, CellUtil.createCell(mutation.getRow(), family, qualifier, + cellTimestampOrLatest(qv), KeyValue.Type.Put, value, tags)); } } } @@ -712,18 +679,16 @@ public final class ProtobufUtil { } /** - * Convert a protocol buffer Mutate to an Append - * @param cellScanner - * @param proto the protocol buffer Mutate to convert - * @return the converted client Append - * @throws IOException + * Convert a protocol buffer Mutate to an Append n * @param proto the protocol buffer Mutate to + * convert + * @return the converted client Append n */ public static Append toAppend(final MutationProto proto, final CellScanner cellScanner) - throws IOException { + throws IOException { MutationType type = proto.getMutateType(); assert type == MutationType.APPEND : type.name(); Append append = toDelta((Bytes row) -> new Append(row.get(), row.getOffset(), row.getLength()), - Append::add, proto, cellScanner); + Append::add, proto, cellScanner); if (proto.hasTimeRange()) { TimeRange timeRange = protoToTimeRange(proto.getTimeRange()); append.setTimeRange(timeRange.getMin(), timeRange.getMax()); @@ -733,17 +698,16 @@ public final class ProtobufUtil { /** * Convert a protocol buffer Mutate to an Increment - * * @param proto the protocol buffer Mutate to convert - * @return the converted client Increment - * @throws IOException + * @return the converted client Increment n */ public static Increment toIncrement(final MutationProto proto, final CellScanner cellScanner) - throws IOException { + throws IOException { MutationType type = proto.getMutateType(); assert type == MutationType.INCREMENT : type.name(); - Increment increment = toDelta((Bytes row) -> new Increment(row.get(), row.getOffset(), row.getLength()), - Increment::add, proto, cellScanner); + Increment increment = + toDelta((Bytes row) -> new Increment(row.get(), row.getOffset(), row.getLength()), + Increment::add, proto, cellScanner); if (proto.hasTimeRange()) { TimeRange timeRange = protoToTimeRange(proto.getTimeRange()); increment.setTimeRange(timeRange.getMin(), timeRange.getMax()); @@ -753,10 +717,8 @@ public final class ProtobufUtil { /** * Convert a MutateRequest to Mutation - * * @param proto the protocol buffer Mutate to convert - * @return the converted Mutation - * @throws IOException + * @return the converted Mutation n */ public static Mutation toMutation(final MutationProto proto) throws IOException { MutationType type = proto.getMutateType(); @@ -777,13 +739,10 @@ public final class ProtobufUtil { /** * Convert a protocol buffer Mutate to a Get. - * @param proto the protocol buffer Mutate to convert. - * @param cellScanner - * @return the converted client get. - * @throws IOException + * @param proto the protocol buffer Mutate to convert. n * @return the converted client get. n */ public static Get toGet(final MutationProto proto, final CellScanner cellScanner) - throws IOException { + throws IOException { MutationType type = proto.getMutateType(); assert type == MutationType.INCREMENT || type == MutationType.APPEND : type.name(); byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null; @@ -793,21 +752,20 @@ public final class ProtobufUtil { // The proto has metadata only and the data is separate to be found in the cellScanner. if (cellScanner == null) { throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " - + TextFormat.shortDebugString(proto)); + + TextFormat.shortDebugString(proto)); } for (int i = 0; i < cellCount; i++) { if (!cellScanner.advance()) { throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i - + " no cell returned: " + TextFormat.shortDebugString(proto)); + + " no cell returned: " + TextFormat.shortDebugString(proto)); } Cell cell = cellScanner.current(); if (get == null) { get = new Get(Bytes.copy(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); } get.addColumn( - Bytes.copy(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()), - Bytes.copy(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength())); + Bytes.copy(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()), Bytes + .copy(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); } } else { get = new Get(row); @@ -860,15 +818,11 @@ public final class ProtobufUtil { /** * Convert a client Scan to a protocol buffer Scan - * * @param scan the client Scan to convert - * @return the converted protocol buffer Scan - * @throws IOException + * @return the converted protocol buffer Scan n */ - public static ClientProtos.Scan toScan( - final Scan scan) throws IOException { - ClientProtos.Scan.Builder scanBuilder = - ClientProtos.Scan.newBuilder(); + public static ClientProtos.Scan toScan(final Scan scan) throws IOException { + ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder(); scanBuilder.setCacheBlocks(scan.getCacheBlocks()); if (scan.getBatch() > 0) { scanBuilder.setBatchSize(scan.getBatch()); @@ -889,15 +843,13 @@ public final class ProtobufUtil { scanBuilder.setMaxVersions(scan.getMaxVersions()); scan.getColumnFamilyTimeRange().forEach((cf, timeRange) -> { scanBuilder.addCfTimeRange(HBaseProtos.ColumnFamilyTimeRange.newBuilder() - .setColumnFamily(ByteStringer.wrap(cf)) - .setTimeRange(toTimeRange(timeRange)) - .build()); + .setColumnFamily(ByteStringer.wrap(cf)).setTimeRange(toTimeRange(timeRange)).build()); }); scanBuilder.setTimeRange(toTimeRange(scan.getTimeRange())); Map attributes = scan.getAttributesMap(); if (!attributes.isEmpty()) { NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { + for (Map.Entry attribute : attributes.entrySet()) { attributeBuilder.setName(attribute.getKey()); attributeBuilder.setValue(ByteStringer.wrap(attribute.getValue())); scanBuilder.addAttribute(attributeBuilder.build()); @@ -916,13 +868,12 @@ public final class ProtobufUtil { } if (scan.hasFamilies()) { Column.Builder columnBuilder = Column.newBuilder(); - for (Map.Entry> - family: scan.getFamilyMap().entrySet()) { + for (Map.Entry> family : scan.getFamilyMap().entrySet()) { columnBuilder.setFamily(ByteStringer.wrap(family.getKey())); - NavigableSet qualifiers = family.getValue(); + NavigableSet qualifiers = family.getValue(); columnBuilder.clearQualifier(); if (qualifiers != null && qualifiers.size() > 0) { - for (byte [] qualifier: qualifiers) { + for (byte[] qualifier : qualifiers) { columnBuilder.addQualifier(ByteStringer.wrap(qualifier)); } } @@ -960,13 +911,10 @@ public final class ProtobufUtil { /** * Convert a protocol buffer Scan to a client Scan - * * @param proto the protocol buffer Scan to convert - * @return the converted client Scan - * @throws IOException + * @return the converted client Scan n */ - public static Scan toScan( - final ClientProtos.Scan proto) throws IOException { + public static Scan toScan(final ClientProtos.Scan proto) throws IOException { byte[] startRow = HConstants.EMPTY_START_ROW; byte[] stopRow = HConstants.EMPTY_END_ROW; boolean includeStartRow = true; @@ -984,7 +932,7 @@ public final class ProtobufUtil { includeStopRow = proto.getIncludeStopRow(); } Scan scan = - new Scan().withStartRow(startRow, includeStartRow).withStopRow(stopRow, includeStopRow); + new Scan().withStartRow(startRow, includeStartRow).withStopRow(stopRow, includeStopRow); if (proto.hasCacheBlocks()) { scan.setCacheBlocks(proto.getCacheBlocks()); } @@ -1003,8 +951,8 @@ public final class ProtobufUtil { if (proto.getCfTimeRangeCount() > 0) { for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) { TimeRange timeRange = protoToTimeRange(cftr.getTimeRange()); - scan.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), - timeRange.getMin(), timeRange.getMax()); + scan.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), timeRange.getMin(), + timeRange.getMax()); } } if (proto.hasTimeRange()) { @@ -1027,14 +975,14 @@ public final class ProtobufUtil { if (proto.hasAllowPartialResults()) { scan.setAllowPartialResults(proto.getAllowPartialResults()); } - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { scan.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } if (proto.getColumnCount() > 0) { - for (Column column: proto.getColumnList()) { + for (Column column : proto.getColumnList()) { byte[] family = column.getFamily().toByteArray(); if (column.getQualifierCount() > 0) { - for (ByteString qualifier: column.getQualifierList()) { + for (ByteString qualifier : column.getQualifierList()) { scan.addColumn(family, qualifier.toByteArray()); } } else { @@ -1064,31 +1012,25 @@ public final class ProtobufUtil { /** * Create a protocol buffer Get based on a client Get. - * * @param get the client Get - * @return a protocol buffer Get - * @throws IOException + * @return a protocol buffer Get n */ - public static ClientProtos.Get toGet( - final Get get) throws IOException { - ClientProtos.Get.Builder builder = - ClientProtos.Get.newBuilder(); + public static ClientProtos.Get toGet(final Get get) throws IOException { + ClientProtos.Get.Builder builder = ClientProtos.Get.newBuilder(); builder.setRow(ByteStringer.wrap(get.getRow())); builder.setCacheBlocks(get.getCacheBlocks()); builder.setMaxVersions(get.getMaxVersions()); if (get.getFilter() != null) { builder.setFilter(ProtobufUtil.toFilter(get.getFilter())); } - get.getColumnFamilyTimeRange().forEach((cf, timeRange) -> - builder.addCfTimeRange(HBaseProtos.ColumnFamilyTimeRange.newBuilder() - .setColumnFamily(ByteStringer.wrap(cf)) - .setTimeRange(toTimeRange(timeRange)).build()) - ); + get.getColumnFamilyTimeRange().forEach( + (cf, timeRange) -> builder.addCfTimeRange(HBaseProtos.ColumnFamilyTimeRange.newBuilder() + .setColumnFamily(ByteStringer.wrap(cf)).setTimeRange(toTimeRange(timeRange)).build())); builder.setTimeRange(toTimeRange(get.getTimeRange())); Map attributes = get.getAttributesMap(); if (!attributes.isEmpty()) { NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { + for (Map.Entry attribute : attributes.entrySet()) { attributeBuilder.setName(attribute.getKey()); attributeBuilder.setValue(ByteStringer.wrap(attribute.getValue())); builder.addAttribute(attributeBuilder.build()); @@ -1097,12 +1039,12 @@ public final class ProtobufUtil { if (get.hasFamilies()) { Column.Builder columnBuilder = Column.newBuilder(); Map> families = get.getFamilyMap(); - for (Map.Entry> family: families.entrySet()) { + for (Map.Entry> family : families.entrySet()) { NavigableSet qualifiers = family.getValue(); columnBuilder.setFamily(ByteStringer.wrap(family.getKey())); columnBuilder.clearQualifier(); if (qualifiers != null && qualifiers.size() > 0) { - for (byte[] qualifier: qualifiers) { + for (byte[] qualifier : qualifiers) { columnBuilder.addQualifier(ByteStringer.wrap(qualifier)); } } @@ -1115,7 +1057,7 @@ public final class ProtobufUtil { if (get.getRowOffsetPerColumnFamily() > 0) { builder.setStoreOffset(get.getRowOffsetPerColumnFamily()); } - if (get.isCheckExistenceOnly()){ + if (get.isCheckExistenceOnly()) { builder.setExistenceOnly(true); } if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) { @@ -1136,12 +1078,7 @@ public final class ProtobufUtil { } /** - * Create a protocol buffer Mutate based on a client Mutation - * - * @param type - * @param mutation - * @return a protobuf'd Mutation - * @throws IOException + * Create a protocol buffer Mutate based on a client Mutation nn * @return a protobuf'd Mutation n */ public static MutationProto toMutation(final MutationType type, final Mutation mutation, final long nonce) throws IOException { @@ -1149,13 +1086,12 @@ public final class ProtobufUtil { } public static MutationProto toMutation(final MutationType type, final Mutation mutation, - MutationProto.Builder builder) throws IOException { + MutationProto.Builder builder) throws IOException { return toMutation(type, mutation, builder, HConstants.NO_NONCE); } public static MutationProto toMutation(final MutationType type, final Mutation mutation, - MutationProto.Builder builder, long nonce) - throws IOException { + MutationProto.Builder builder, long nonce) throws IOException { builder = getMutationBuilderAndSetCommonFields(type, mutation, builder); if (nonce != HConstants.NO_NONCE) { builder.setNonce(nonce); @@ -1168,15 +1104,15 @@ public final class ProtobufUtil { } ColumnValue.Builder columnBuilder = ColumnValue.newBuilder(); QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); - for (Map.Entry> family: mutation.getFamilyCellMap().entrySet()) { + for (Map.Entry> family : mutation.getFamilyCellMap().entrySet()) { columnBuilder.clear(); columnBuilder.setFamily(ByteStringer.wrap(family.getKey())); - for (Cell cell: family.getValue()) { + for (Cell cell : family.getValue()) { valueBuilder.clear(); - valueBuilder.setQualifier(ByteStringer.wrap( - cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); - valueBuilder.setValue(ByteStringer.wrap( - cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + valueBuilder.setQualifier(ByteStringer.wrap(cell.getQualifierArray(), + cell.getQualifierOffset(), cell.getQualifierLength())); + valueBuilder.setValue( + ByteStringer.wrap(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); valueBuilder.setTimestamp(cell.getTimestamp()); if (type == MutationType.DELETE || (type == MutationType.PUT && CellUtil.isDelete(cell))) { KeyValue.Type keyValueType = KeyValue.Type.codeToType(cell.getTypeByte()); @@ -1191,41 +1127,34 @@ public final class ProtobufUtil { /** * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data. - * Understanding is that the Cell will be transported other than via protobuf. - * @param type - * @param mutation - * @param builder - * @return a protobuf'd Mutation - * @throws IOException + * Understanding is that the Cell will be transported other than via protobuf. nnn * @return a + * protobuf'd Mutation n */ public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation, - final MutationProto.Builder builder) throws IOException { + final MutationProto.Builder builder) throws IOException { return toMutationNoData(type, mutation, builder, HConstants.NO_NONCE); } /** - * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data. - * Understanding is that the Cell will be transported other than via protobuf. - * @param type - * @param mutation - * @return a protobuf'd Mutation - * @throws IOException + * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data. + * Understanding is that the Cell will be transported other than via protobuf. nn * @return a + * protobuf'd Mutation n */ public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation) - throws IOException { - MutationProto.Builder builder = MutationProto.newBuilder(); + throws IOException { + MutationProto.Builder builder = MutationProto.newBuilder(); return toMutationNoData(type, mutation, builder); } public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation, - final MutationProto.Builder builder, long nonce) throws IOException { + final MutationProto.Builder builder, long nonce) throws IOException { getMutationBuilderAndSetCommonFields(type, mutation, builder); builder.setAssociatedCellCount(mutation.size()); if (mutation instanceof Increment) { - builder.setTimeRange(toTimeRange(((Increment)mutation).getTimeRange())); + builder.setTimeRange(toTimeRange(((Increment) mutation).getTimeRange())); } if (mutation instanceof Append) { - builder.setTimeRange(toTimeRange(((Append)mutation).getTimeRange())); + builder.setTimeRange(toTimeRange(((Append) mutation).getTimeRange())); } if (nonce != HConstants.NO_NONCE) { builder.setNonce(nonce); @@ -1235,13 +1164,11 @@ public final class ProtobufUtil { /** * Code shared by {@link #toMutation(MutationType, Mutation)} and - * {@link #toMutationNoData(MutationType, Mutation)} - * @param type - * @param mutation - * @return A partly-filled out protobuf'd Mutation. + * {@link #toMutationNoData(MutationType, Mutation)} nn * @return A partly-filled out protobuf'd + * Mutation. */ private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final MutationType type, - final Mutation mutation, MutationProto.Builder builder) { + final Mutation mutation, MutationProto.Builder builder) { builder.setRow(ByteStringer.wrap(mutation.getRow())); builder.setMutateType(type); builder.setDurability(toDurability(mutation.getDurability())); @@ -1249,7 +1176,7 @@ public final class ProtobufUtil { Map attributes = mutation.getAttributesMap(); if (!attributes.isEmpty()) { NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { + for (Map.Entry attribute : attributes.entrySet()) { attributeBuilder.setName(attribute.getKey()); attributeBuilder.setValue(ByteStringer.wrap(attribute.getValue())); builder.addAttribute(attributeBuilder.build()); @@ -1260,7 +1187,6 @@ public final class ProtobufUtil { /** * Convert a client Result to a protocol buffer Result - * * @param result the client Result to convert * @return the converted protocol buffer Result */ @@ -1287,12 +1213,11 @@ public final class ProtobufUtil { /** * Convert a client Result to a protocol buffer Result - * * @param existence the client existence to send * @return the converted protocol buffer Result */ public static ClientProtos.Result toResult(final boolean existence, boolean stale) { - if (stale){ + if (stale) { return existence ? EMPTY_RESULT_PB_EXISTS_TRUE_STALE : EMPTY_RESULT_PB_EXISTS_FALSE_STALE; } else { return existence ? EMPTY_RESULT_PB_EXISTS_TRUE : EMPTY_RESULT_PB_EXISTS_FALSE; @@ -1300,9 +1225,8 @@ public final class ProtobufUtil { } /** - * Convert a client Result to a protocol buffer Result. - * The pb Result does not include the Cell data. That is for transport otherwise. - * + * Convert a client Result to a protocol buffer Result. The pb Result does not include the Cell + * data. That is for transport otherwise. * @param result the client Result to convert * @return the converted protocol buffer Result */ @@ -1318,20 +1242,19 @@ public final class ProtobufUtil { /** * Convert a protocol buffer Result to a client Result - * * @param proto the protocol buffer Result to convert * @return the converted client Result */ public static Result toResult(final ClientProtos.Result proto) { if (proto.hasExists()) { if (proto.getStale()) { - return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE :EMPTY_RESULT_EXISTS_FALSE_STALE; + return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE : EMPTY_RESULT_EXISTS_FALSE_STALE; } return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE : EMPTY_RESULT_EXISTS_FALSE; } List values = proto.getCellList(); - if (values.isEmpty()){ + if (values.isEmpty()) { return proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT; } @@ -1345,23 +1268,22 @@ public final class ProtobufUtil { /** * Convert a protocol buffer Result to a client Result - * - * @param proto the protocol buffer Result to convert + * @param proto the protocol buffer Result to convert * @param scanner Optional cell scanner. - * @return the converted client Result - * @throws IOException + * @return the converted client Result n */ public static Result toResult(final ClientProtos.Result proto, final CellScanner scanner) - throws IOException { + throws IOException { List values = proto.getCellList(); if (proto.hasExists()) { - if (!values.isEmpty() || - (proto.hasAssociatedCellCount() && proto.getAssociatedCellCount() > 0)) { + if ( + !values.isEmpty() || (proto.hasAssociatedCellCount() && proto.getAssociatedCellCount() > 0) + ) { throw new IllegalArgumentException("bad proto: exists with cells is no allowed " + proto); } if (proto.getStale()) { - return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE :EMPTY_RESULT_EXISTS_FALSE_STALE; + return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE : EMPTY_RESULT_EXISTS_FALSE_STALE; } return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE : EMPTY_RESULT_EXISTS_FALSE; } @@ -1377,23 +1299,21 @@ public final class ProtobufUtil { } } - if (!values.isEmpty()){ + if (!values.isEmpty()) { if (cells == null) cells = new ArrayList<>(values.size()); ExtendedCellBuilder builder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - for (CellProtos.Cell c: values) { + for (CellProtos.Cell c : values) { cells.add(toCell(builder, c)); } } return (cells == null || cells.isEmpty()) - ? (proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT) - : Result.create(cells, null, proto.getStale()); + ? (proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT) + : Result.create(cells, null, proto.getStale()); } - /** * Convert a ByteArrayComparable to a protocol buffer Comparator - * * @param comparator the ByteArrayComparable to convert * @return the converted protocol buffer Comparator */ @@ -1406,23 +1326,22 @@ public final class ProtobufUtil { /** * Convert a protocol buffer Comparator to a ByteArrayComparable - * * @param proto the protocol buffer Comparator to convert * @return the converted ByteArrayComparable */ @SuppressWarnings("unchecked") public static ByteArrayComparable toComparator(ComparatorProtos.Comparator proto) - throws IOException { + throws IOException { String type = proto.getName(); String funcName = "parseFrom"; - byte [] value = proto.getSerializedComparator().toByteArray(); + byte[] value = proto.getSerializedComparator().toByteArray(); try { Class c = Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); Method parseFrom = c.getMethod(funcName, byte[].class); if (parseFrom == null) { throw new IOException("Unable to locate function: " + funcName + " in type: " + type); } - return (ByteArrayComparable)parseFrom.invoke(null, value); + return (ByteArrayComparable) parseFrom.invoke(null, value); } catch (Exception e) { throw new IOException(e); } @@ -1430,14 +1349,13 @@ public final class ProtobufUtil { /** * Convert a protocol buffer Filter to a client Filter - * * @param proto the protocol buffer Filter to convert * @return the converted Filter */ @SuppressWarnings("unchecked") public static Filter toFilter(FilterProtos.Filter proto) throws IOException { String type = proto.getName(); - final byte [] value = proto.getSerializedFilter().toByteArray(); + final byte[] value = proto.getSerializedFilter().toByteArray(); String funcName = "parseFrom"; try { Class c = Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); @@ -1445,7 +1363,7 @@ public final class ProtobufUtil { if (parseFrom == null) { throw new IOException("Unable to locate function: " + funcName + " in type: " + type); } - return (Filter)parseFrom.invoke(c, value); + return (Filter) parseFrom.invoke(c, value); } catch (Exception e) { // Either we couldn't instantiate the method object, or "parseFrom" failed. // In either case, let's not retry. @@ -1455,7 +1373,6 @@ public final class ProtobufUtil { /** * Convert a client Filter to a protocol buffer Filter - * * @param filter the Filter to convert * @return the converted protocol buffer Filter */ @@ -1467,54 +1384,46 @@ public final class ProtobufUtil { } /** - * Convert a delete KeyValue type to protocol buffer DeleteType. - * - * @param type - * @return protocol buffer DeleteType - * @throws IOException + * Convert a delete KeyValue type to protocol buffer DeleteType. n * @return protocol buffer + * DeleteType n */ - public static DeleteType toDeleteType( - KeyValue.Type type) throws IOException { + public static DeleteType toDeleteType(KeyValue.Type type) throws IOException { switch (type) { - case Delete: - return DeleteType.DELETE_ONE_VERSION; - case DeleteColumn: - return DeleteType.DELETE_MULTIPLE_VERSIONS; - case DeleteFamily: - return DeleteType.DELETE_FAMILY; - case DeleteFamilyVersion: - return DeleteType.DELETE_FAMILY_VERSION; - default: + case Delete: + return DeleteType.DELETE_ONE_VERSION; + case DeleteColumn: + return DeleteType.DELETE_MULTIPLE_VERSIONS; + case DeleteFamily: + return DeleteType.DELETE_FAMILY; + case DeleteFamilyVersion: + return DeleteType.DELETE_FAMILY_VERSION; + default: throw new IOException("Unknown delete type: " + type); } } /** * Convert a protocol buffer DeleteType to delete KeyValue type. - * * @param type The DeleteType - * @return The type. - * @throws IOException + * @return The type. n */ - public static KeyValue.Type fromDeleteType( - DeleteType type) throws IOException { + public static KeyValue.Type fromDeleteType(DeleteType type) throws IOException { switch (type) { - case DELETE_ONE_VERSION: - return KeyValue.Type.Delete; - case DELETE_MULTIPLE_VERSIONS: - return KeyValue.Type.DeleteColumn; - case DELETE_FAMILY: - return KeyValue.Type.DeleteFamily; - case DELETE_FAMILY_VERSION: - return KeyValue.Type.DeleteFamilyVersion; - default: - throw new IOException("Unknown delete type: " + type); + case DELETE_ONE_VERSION: + return KeyValue.Type.Delete; + case DELETE_MULTIPLE_VERSIONS: + return KeyValue.Type.DeleteColumn; + case DELETE_FAMILY: + return KeyValue.Type.DeleteFamily; + case DELETE_FAMILY_VERSION: + return KeyValue.Type.DeleteFamilyVersion; + default: + throw new IOException("Unknown delete type: " + type); } } /** * Convert a stringified protocol buffer exception Parameter to a Java Exception - * * @param parameter the protocol buffer Parameter to convert * @return the converted Exception * @throws IOException if failed to deserialize the parameter @@ -1526,7 +1435,7 @@ public final class ProtobufUtil { String type = parameter.getName(); try { Class c = - (Class)Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); + (Class) Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); Constructor cn = null; try { cn = c.getDeclaredConstructor(String.class); @@ -1541,25 +1450,24 @@ public final class ProtobufUtil { } } -// Start helpers for Client + // Start helpers for Client @SuppressWarnings("unchecked") public static T newServiceStub(Class service, RpcChannel channel) - throws Exception { - return (T)Methods.call(service, null, "newStub", - new Class[]{ RpcChannel.class }, new Object[]{ channel }); + throws Exception { + return (T) Methods.call(service, null, "newStub", new Class[] { RpcChannel.class }, + new Object[] { channel }); } -// End helpers for Client -// Start helpers for Admin + // End helpers for Client + // Start helpers for Admin /** * A helper to get the info of a region server using admin protocol. * @return the server name */ public static ServerInfo getServerInfo(final RpcController controller, - final AdminService.BlockingInterface admin) - throws IOException { + final AdminService.BlockingInterface admin) throws IOException { GetServerInfoRequest request = buildGetServerInfoRequest(); try { GetServerInfoResponse response = admin.getServerInfo(controller, request); @@ -1569,7 +1477,6 @@ public final class ProtobufUtil { } } - /** * @see #buildGetServerInfoRequest() */ @@ -1578,7 +1485,6 @@ public final class ProtobufUtil { /** * Create a new GetServerInfoRequest - * * @return a GetServerInfoRequest */ public static GetServerInfoRequest buildGetServerInfoRequest() { @@ -1591,7 +1497,7 @@ public final class ProtobufUtil { try { pScanMetrics = parser.parseFrom(bytes); } catch (InvalidProtocolBufferException e) { - //Ignored there are just no key values to add. + // Ignored there are just no key values to add. } ScanMetrics scanMetrics = new ScanMetrics(); if (pScanMetrics != null) { @@ -1605,8 +1511,8 @@ public final class ProtobufUtil { } /** - * Unwraps an exception from a protobuf service into the underlying (expected) IOException. - * This method will always throw an exception. + * Unwraps an exception from a protobuf service into the underlying (expected) IOException. This + * method will always throw an exception. * @param se the {@code ServiceException} instance to convert into an {@code IOException} */ public static void toIOException(ServiceException se) throws IOException { @@ -1616,7 +1522,7 @@ public final class ProtobufUtil { Throwable cause = se.getCause(); if (cause != null && cause instanceof IOException) { - throw (IOException)cause; + throw (IOException) cause; } throw new IOException(se); } @@ -1625,28 +1531,23 @@ public final class ProtobufUtil { // Doing this is going to kill us if we do it for all data passed. // St.Ack 20121205 CellProtos.Cell.Builder kvbuilder = CellProtos.Cell.newBuilder(); - kvbuilder.setRow(ByteStringer.wrap(kv.getRowArray(), kv.getRowOffset(), - kv.getRowLength())); - kvbuilder.setFamily(ByteStringer.wrap(kv.getFamilyArray(), - kv.getFamilyOffset(), kv.getFamilyLength())); - kvbuilder.setQualifier(ByteStringer.wrap(kv.getQualifierArray(), - kv.getQualifierOffset(), kv.getQualifierLength())); + kvbuilder.setRow(ByteStringer.wrap(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength())); + kvbuilder.setFamily( + ByteStringer.wrap(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength())); + kvbuilder.setQualifier( + ByteStringer.wrap(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength())); kvbuilder.setCellType(CellProtos.CellType.valueOf(kv.getTypeByte())); kvbuilder.setTimestamp(kv.getTimestamp()); - kvbuilder.setValue(ByteStringer.wrap(kv.getValueArray(), kv.getValueOffset(), - kv.getValueLength())); + kvbuilder + .setValue(ByteStringer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); return kvbuilder.build(); } public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell) { - return cellBuilder.clear() - .setRow(cell.getRow().toByteArray()) - .setFamily(cell.getFamily().toByteArray()) - .setQualifier(cell.getQualifier().toByteArray()) - .setTimestamp(cell.getTimestamp()) - .setType((byte) cell.getCellType().getNumber()) - .setValue(cell.getValue().toByteArray()) - .build(); + return cellBuilder.clear().setRow(cell.getRow().toByteArray()) + .setFamily(cell.getFamily().toByteArray()).setQualifier(cell.getQualifier().toByteArray()) + .setTimestamp(cell.getTimestamp()).setType((byte) cell.getCellType().getNumber()) + .setValue(cell.getValue().toByteArray()).build(); } /** @@ -1655,27 +1556,26 @@ public final class ProtobufUtil { * @return Short String of mutation proto */ static String toShortString(final MutationProto proto) { - return "row=" + Bytes.toString(proto.getRow().toByteArray()) + - ", type=" + proto.getMutateType().toString(); + return "row=" + Bytes.toString(proto.getRow().toByteArray()) + ", type=" + + proto.getMutateType().toString(); } public static TableName toTableName(TableProtos.TableName tableNamePB) { return TableName.valueOf(tableNamePB.getNamespace().asReadOnlyByteBuffer(), - tableNamePB.getQualifier().asReadOnlyByteBuffer()); + tableNamePB.getQualifier().asReadOnlyByteBuffer()); } public static TableProtos.TableName toProtoTableName(TableName tableName) { return TableProtos.TableName.newBuilder() - .setNamespace(ByteStringer.wrap(tableName.getNamespace())) - .setQualifier(ByteStringer.wrap(tableName.getQualifier())).build(); + .setNamespace(ByteStringer.wrap(tableName.getNamespace())) + .setQualifier(ByteStringer.wrap(tableName.getQualifier())).build(); } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers when working with byte arrays + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when + * working with byte arrays * @param builder current message builder - * @param b byte array - * @throws IOException + * @param b byte array n */ public static void mergeFrom(Message.Builder builder, byte[] b) throws IOException { final CodedInputStream codedInput = CodedInputStream.newInstance(b); @@ -1685,16 +1585,13 @@ public final class ProtobufUtil { } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers when working with byte arrays + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when + * working with byte arrays * @param builder current message builder - * @param b byte array - * @param offset - * @param length - * @throws IOException + * @param b byte array nnn */ public static void mergeFrom(Message.Builder builder, byte[] b, int offset, int length) - throws IOException { + throws IOException { final CodedInputStream codedInput = CodedInputStream.newInstance(b, offset, length); codedInput.setSizeLimit(length); builder.mergeFrom(codedInput); @@ -1702,14 +1599,14 @@ public final class ProtobufUtil { } private static TimeRange protoToTimeRange(HBaseProtos.TimeRange timeRange) throws IOException { - long minStamp = 0; - long maxStamp = Long.MAX_VALUE; - if (timeRange.hasFrom()) { - minStamp = timeRange.getFrom(); - } - if (timeRange.hasTo()) { - maxStamp = timeRange.getTo(); - } + long minStamp = 0; + long maxStamp = Long.MAX_VALUE; + if (timeRange.hasFrom()) { + minStamp = timeRange.getFrom(); + } + if (timeRange.hasTo()) { + maxStamp = timeRange.getTo(); + } return new TimeRange(minStamp, maxStamp); } @@ -1720,19 +1617,18 @@ public final class ProtobufUtil { * @return the protobuf SnapshotDescription type */ public static HBaseProtos.SnapshotDescription.Type - createProtosSnapShotDescType(SnapshotType type) { + createProtosSnapShotDescType(SnapshotType type) { return HBaseProtos.SnapshotDescription.Type.valueOf(type.name()); } /** * Convert a byte array to a protocol buffer RegionSpecifier - * - * @param type the region specifier type + * @param type the region specifier type * @param value the region specifier byte array value * @return a protocol buffer RegionSpecifier */ - public static RegionSpecifier buildRegionSpecifier( - final RegionSpecifierType type, final byte[] value) { + public static RegionSpecifier buildRegionSpecifier(final RegionSpecifierType type, + final byte[] value) { RegionSpecifier.Builder regionBuilder = RegionSpecifier.newBuilder(); regionBuilder.setValue(ByteStringer.wrap(value)); regionBuilder.setType(type); @@ -1741,29 +1637,27 @@ public final class ProtobufUtil { /** * Get a ServerName from the passed in data bytes. - * @param data Data with a serialize server name in it; can handle the old style - * servername where servername was host and port. Works too with data that - * begins w/ the pb 'PBUF' magic and that is then followed by a protobuf that - * has a serialized {@link ServerName} in it. - * @return Returns null if data is null else converts passed data - * to a ServerName instance. - * @throws DeserializationException + * @param data Data with a serialize server name in it; can handle the old style servername where + * servername was host and port. Works too with data that begins w/ the pb 'PBUF' + * magic and that is then followed by a protobuf that has a serialized + * {@link ServerName} in it. + * @return Returns null if data is null else converts passed data to a ServerName + * instance. n */ - public static ServerName toServerName(final byte [] data) throws DeserializationException { + public static ServerName toServerName(final byte[] data) throws DeserializationException { if (data == null || data.length <= 0) return null; if (ProtobufMagic.isPBMagicPrefix(data)) { int prefixLen = ProtobufMagic.lengthOfPBMagic(); try { ZooKeeperProtos.Master rss = ZooKeeperProtos.Master.PARSER.parseFrom(data, prefixLen, data.length - prefixLen); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName sn = - rss.getMaster(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName sn = rss.getMaster(); return ServerName.valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode()); - } catch (/*InvalidProtocolBufferException*/IOException e) { + } catch (/* InvalidProtocolBufferException */IOException e) { // A failed parse of the znode is pretty catastrophic. Rather than loop // retrying hoping the bad bytes will changes, and rather than change // the signature on this method to add an IOE which will send ripples all - // over the code base, throw a RuntimeException. This should "never" happen. + // over the code base, throw a RuntimeException. This should "never" happen. // Fail fast if it does. throw new DeserializationException(e); } @@ -1801,8 +1695,7 @@ public final class ProtobufUtil { if (timeRange == null) { timeRange = TimeRange.allTime(); } - return HBaseProtos.TimeRange.newBuilder().setFrom(timeRange.getMin()) - .setTo(timeRange.getMax()) + return HBaseProtos.TimeRange.newBuilder().setFrom(timeRange.getMin()).setTo(timeRange.getMax()) .build(); } @@ -1825,17 +1718,16 @@ public final class ProtobufUtil { final byte[] qualifier, final CompareOperator op, final byte[] value, final Filter filter, final TimeRange timeRange) throws IOException { - ClientProtos.Condition.Builder builder = ClientProtos.Condition.newBuilder() - .setRow(ByteStringer.wrap(row)); + ClientProtos.Condition.Builder builder = + ClientProtos.Condition.newBuilder().setRow(ByteStringer.wrap(row)); if (filter != null) { builder.setFilter(ProtobufUtil.toFilter(filter)); } else { builder.setFamily(ByteStringer.wrap(family)) - .setQualifier(ByteStringer.wrap( - qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier)) - .setComparator( - ProtobufUtil.toComparator(new BinaryComparator(value))) + .setQualifier( + ByteStringer.wrap(qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier)) + .setComparator(ProtobufUtil.toComparator(new BinaryComparator(value))) .setCompareType(HBaseProtos.CompareType.valueOf(op.name())); } @@ -1848,8 +1740,8 @@ public final class ProtobufUtil { } public static ClientProtos.Condition toCondition(final byte[] row, final byte[] family, - final byte[] qualifier, final CompareOperator op, final byte[] value, - final TimeRange timeRange) throws IOException { + final byte[] qualifier, final CompareOperator op, final byte[] value, final TimeRange timeRange) + throws IOException { return toCondition(row, family, qualifier, op, value, null, timeRange); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java index 5961ec5cde8..60bf23a944d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.quotas; import org.apache.hadoop.hbase.DoNotRetryIOException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java index d5190ad9f7c..9ddd408f845 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.quotas; import java.util.HashSet; import java.util.Set; - import org.apache.commons.lang3.StringUtils; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java index a48ce71d607..728959e0a0c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.quotas; import java.io.Closeable; @@ -24,19 +23,18 @@ import java.util.Iterator; import java.util.LinkedList; import java.util.Objects; import java.util.Queue; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Scanner to iterate over the quota settings. @@ -48,8 +46,8 @@ public class QuotaRetriever implements Closeable, Iterable { private final Queue cache = new LinkedList<>(); private ResultScanner scanner; /** - * Connection to use. - * Could pass one in and have this class use it but this class wants to be standalone. + * Connection to use. Could pass one in and have this class use it but this class wants to be + * standalone. */ private Connection connection; private Table table; @@ -104,8 +102,10 @@ public class QuotaRetriever implements Closeable, Iterable { if (cache.isEmpty()) { Result result = scanner.next(); // Skip exceedThrottleQuota row key because this is not a QuotaSettings - if (result != null - && Bytes.equals(result.getRow(), QuotaTableUtil.getExceedThrottleQuotaRowKey())) { + if ( + result != null + && Bytes.equals(result.getRow(), QuotaTableUtil.getExceedThrottleQuotaRowKey()) + ) { result = scanner.next(); } if (result == null) { @@ -166,13 +166,13 @@ public class QuotaRetriever implements Closeable, Iterable { /** * Open a QuotaRetriever with the specified filter. - * @param conf Configuration object to use. + * @param conf Configuration object to use. * @param filter the QuotaFilter * @return the QuotaRetriever * @throws IOException if a remote or network exception occurs */ public static QuotaRetriever open(final Configuration conf, final QuotaFilter filter) - throws IOException { + throws IOException { Scan scan = QuotaTableUtil.makeScan(filter); QuotaRetriever scanner = new QuotaRetriever(); scanner.init(conf, scan); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaScope.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaScope.java index 6f7317c8555..bc211e022d0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaScope.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaScope.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,22 +20,21 @@ package org.apache.hadoop.hbase.quotas; import org.apache.yetus.audience.InterfaceAudience; /** - * Describe the Scope of the quota rules. - * The quota can be enforced at the cluster level or at machine level. + * Describe the Scope of the quota rules. The quota can be enforced at the cluster level or at + * machine level. */ @InterfaceAudience.Public public enum QuotaScope { /** - * The specified throttling rules will be applied at the cluster level. - * A limit of 100req/min means 100req/min in total. - * If you execute 50req on a machine and then 50req on another machine + * The specified throttling rules will be applied at the cluster level. A limit of 100req/min + * means 100req/min in total. If you execute 50req on a machine and then 50req on another machine * then you have to wait your quota to fill up. */ CLUSTER, /** - * The specified throttling rules will be applied on the machine level. - * A limit of 100req/min means that each machine can execute 100req/min. + * The specified throttling rules will be applied on the machine level. A limit of 100req/min + * means that each machine can execute 100req/min. */ MACHINE, } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettings.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettings.java index b51a20d6e0a..05218f903f6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettings.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettings.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,12 +20,12 @@ package org.apache.hadoop.hbase.quotas; import java.io.IOException; import java.util.Objects; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.quotas.QuotaSettingsFactory.QuotaGlobalsSettingsBypass; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; @@ -37,7 +37,7 @@ public abstract class QuotaSettings { private final String regionServer; protected QuotaSettings(final String userName, final TableName tableName, final String namespace, - final String regionServer) { + final String regionServer) { this.userName = userName; this.namespace = namespace; this.tableName = tableName; @@ -63,10 +63,9 @@ public abstract class QuotaSettings { } /** - * Converts the protocol buffer request into a QuotaSetting POJO. Arbitrarily - * enforces that the request only contain one "limit", despite the message - * allowing multiple. The public API does not allow such use of the message. - * + * Converts the protocol buffer request into a QuotaSetting POJO. Arbitrarily enforces that the + * request only contain one "limit", despite the message allowing multiple. The public API does + * not allow such use of the message. * @param request The protocol buffer request. * @return A {@link QuotaSettings} POJO. */ @@ -92,35 +91,33 @@ public abstract class QuotaSettings { // Make sure we don't have either of the two below limits also included if (request.hasSpaceLimit() || request.hasThrottle()) { throw new IllegalStateException( - "SetQuotaRequest has multiple limits: " + TextFormat.shortDebugString(request)); + "SetQuotaRequest has multiple limits: " + TextFormat.shortDebugString(request)); } - return new QuotaGlobalsSettingsBypass( - username, tableName, namespace, regionServer, request.getBypassGlobals()); + return new QuotaGlobalsSettingsBypass(username, tableName, namespace, regionServer, + request.getBypassGlobals()); } else if (request.hasSpaceLimit()) { // Make sure we don't have the below limit as well if (request.hasThrottle()) { throw new IllegalStateException( - "SetQuotaRequests has multiple limits: " + TextFormat.shortDebugString(request)); + "SetQuotaRequests has multiple limits: " + TextFormat.shortDebugString(request)); } // Sanity check on the pb received. if (!request.getSpaceLimit().hasQuota()) { - throw new IllegalArgumentException( - "SpaceLimitRequest is missing the expected SpaceQuota."); + throw new IllegalArgumentException("SpaceLimitRequest is missing the expected SpaceQuota."); } - return QuotaSettingsFactory.fromSpace( - tableName, namespace, request.getSpaceLimit().getQuota()); + return QuotaSettingsFactory.fromSpace(tableName, namespace, + request.getSpaceLimit().getQuota()); } else if (request.hasThrottle()) { return new ThrottleSettings(username, tableName, namespace, regionServer, - request.getThrottle()); + request.getThrottle()); } else { throw new IllegalStateException("Unhandled SetRequestRequest state"); } } /** - * Convert a QuotaSettings to a protocol buffer SetQuotaRequest. - * This is used internally by the Admin client to serialize the quota settings - * and send them to the master. + * Convert a QuotaSettings to a protocol buffer SetQuotaRequest. This is used internally by the + * Admin client to serialize the quota settings and send them to the master. */ @InterfaceAudience.Private public static SetQuotaRequest buildSetQuotaRequestProto(final QuotaSettings settings) { @@ -142,9 +139,8 @@ public abstract class QuotaSettings { } /** - * Called by toSetQuotaRequestProto() - * the subclass should implement this method to set the specific SetQuotaRequest - * properties. + * Called by toSetQuotaRequestProto() the subclass should implement this method to set the + * specific SetQuotaRequest properties. */ @InterfaceAudience.Private protected abstract void setupSetQuotaRequest(SetQuotaRequest.Builder builder); @@ -174,40 +170,46 @@ public abstract class QuotaSettings { protected static String sizeToString(final long size) { if (size >= (1L << 50)) { - return String.format("%.2fP", (double)size / (1L << 50)); + return String.format("%.2fP", (double) size / (1L << 50)); } if (size >= (1L << 40)) { - return String.format("%.2fT", (double)size / (1L << 40)); + return String.format("%.2fT", (double) size / (1L << 40)); } if (size >= (1L << 30)) { - return String.format("%.2fG", (double)size / (1L << 30)); + return String.format("%.2fG", (double) size / (1L << 30)); } if (size >= (1L << 20)) { - return String.format("%.2fM", (double)size / (1L << 20)); + return String.format("%.2fM", (double) size / (1L << 20)); } if (size >= (1L << 10)) { - return String.format("%.2fK", (double)size / (1L << 10)); + return String.format("%.2fK", (double) size / (1L << 10)); } - return String.format("%.2fB", (double)size); + return String.format("%.2fB", (double) size); } protected static String timeToString(final TimeUnit timeUnit) { switch (timeUnit) { - case NANOSECONDS: return "nsec"; - case MICROSECONDS: return "usec"; - case MILLISECONDS: return "msec"; - case SECONDS: return "sec"; - case MINUTES: return "min"; - case HOURS: return "hour"; - case DAYS: return "day"; + case NANOSECONDS: + return "nsec"; + case MICROSECONDS: + return "usec"; + case MILLISECONDS: + return "msec"; + case SECONDS: + return "sec"; + case MINUTES: + return "min"; + case HOURS: + return "hour"; + case DAYS: + return "day"; } throw new RuntimeException("Invalid TimeUnit " + timeUnit); } /** - * Merges the provided settings with {@code this} and returns a new settings - * object to the caller if the merged settings differ from the original. - * + * Merges the provided settings with {@code this} and returns a new settings object to the caller + * if the merged settings differ from the original. * @param newSettings The new settings to merge in. * @return The merged {@link QuotaSettings} object or null if the quota should be deleted. */ @@ -216,7 +218,6 @@ public abstract class QuotaSettings { /** * Validates that settings being merged into {@code this} is targeting the same "subject", e.g. * user, table, namespace. - * * @param mergee The quota settings to be merged into {@code this}. * @throws IllegalArgumentException if the subjects are not equal. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java index 3e5bc16860f..1f3ebc7c07d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -37,7 +36,7 @@ public class QuotaSettingsFactory { private final boolean bypassGlobals; QuotaGlobalsSettingsBypass(final String userName, final TableName tableName, - final String namespace, final String regionServer, final boolean bypassGlobals) { + final String namespace, final String regionServer, final boolean bypassGlobals) { super(userName, tableName, namespace, regionServer); this.bypassGlobals = bypassGlobals; } @@ -76,20 +75,21 @@ public class QuotaSettingsFactory { } } - /* ========================================================================== - * QuotaSettings from the Quotas object + /* + * ========================================================================== QuotaSettings from + * the Quotas object */ static List fromUserQuotas(final String userName, final Quotas quotas) { return fromQuotas(userName, null, null, null, quotas); } static List fromUserQuotas(final String userName, final TableName tableName, - final Quotas quotas) { + final Quotas quotas) { return fromQuotas(userName, tableName, null, null, quotas); } static List fromUserQuotas(final String userName, final String namespace, - final Quotas quotas) { + final Quotas quotas) { return fromQuotas(userName, null, namespace, null, quotas); } @@ -102,20 +102,20 @@ public class QuotaSettingsFactory { } static List fromRegionServerQuotas(final String regionServer, - final Quotas quotas) { + final Quotas quotas) { return fromQuotas(null, null, null, regionServer, quotas); } private static List fromQuotas(final String userName, final TableName tableName, - final String namespace, final String regionServer, final Quotas quotas) { + final String namespace, final String regionServer, final Quotas quotas) { List settings = new ArrayList<>(); if (quotas.hasThrottle()) { settings - .addAll(fromThrottle(userName, tableName, namespace, regionServer, quotas.getThrottle())); + .addAll(fromThrottle(userName, tableName, namespace, regionServer, quotas.getThrottle())); } if (quotas.getBypassGlobals() == true) { settings - .add(new QuotaGlobalsSettingsBypass(userName, tableName, namespace, regionServer, true)); + .add(new QuotaGlobalsSettingsBypass(userName, tableName, namespace, regionServer, true)); } if (quotas.hasSpace()) { settings.add(fromSpace(tableName, namespace, quotas.getSpace())); @@ -124,13 +124,13 @@ public class QuotaSettingsFactory { } public static List fromTableThrottles(final TableName tableName, - final QuotaProtos.Throttle throttle) { + final QuotaProtos.Throttle throttle) { return fromThrottle(null, tableName, null, null, throttle); } protected static List fromThrottle(final String userName, - final TableName tableName, final String namespace, final String regionServer, - final QuotaProtos.Throttle throttle) { + final TableName tableName, final String namespace, final String regionServer, + final QuotaProtos.Throttle throttle) { List settings = new ArrayList<>(); if (throttle.hasReqNum()) { settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, namespace, regionServer, @@ -177,7 +177,7 @@ public class QuotaSettingsFactory { } if ((table == null && namespace == null) || (table != null && namespace != null)) { throw new IllegalArgumentException( - "Can only construct SpaceLimitSettings for a table or namespace."); + "Can only construct SpaceLimitSettings for a table or namespace."); } if (table != null) { if (protoQuota.getRemove()) { @@ -193,101 +193,97 @@ public class QuotaSettingsFactory { } } - /* ========================================================================== - * RPC Throttle + /* + * ========================================================================== RPC Throttle */ /** * Throttle the specified user. - * * @param userName the user to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit * @param timeUnit the limit time unit * @return the quota settings */ public static QuotaSettings throttleUser(final String userName, final ThrottleType type, - final long limit, final TimeUnit timeUnit) { + final long limit, final TimeUnit timeUnit) { return throttleUser(userName, type, limit, timeUnit, QuotaScope.MACHINE); } /** * Throttle the specified user. * @param userName the user to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit * @param timeUnit the limit time unit - * @param scope the scope of throttling + * @param scope the scope of throttling * @return the quota settings */ public static QuotaSettings throttleUser(final String userName, final ThrottleType type, - final long limit, final TimeUnit timeUnit, QuotaScope scope) { + final long limit, final TimeUnit timeUnit, QuotaScope scope) { return throttle(userName, null, null, null, type, limit, timeUnit, scope); } /** * Throttle the specified user on the specified table. - * - * @param userName the user to throttle + * @param userName the user to throttle * @param tableName the table to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit * @return the quota settings */ public static QuotaSettings throttleUser(final String userName, final TableName tableName, - final ThrottleType type, final long limit, final TimeUnit timeUnit) { + final ThrottleType type, final long limit, final TimeUnit timeUnit) { return throttleUser(userName, tableName, type, limit, timeUnit, QuotaScope.MACHINE); } /** * Throttle the specified user on the specified table. - * @param userName the user to throttle + * @param userName the user to throttle * @param tableName the table to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit - * @param scope the scope of throttling + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit + * @param scope the scope of throttling * @return the quota settings */ public static QuotaSettings throttleUser(final String userName, final TableName tableName, - final ThrottleType type, final long limit, final TimeUnit timeUnit, QuotaScope scope) { + final ThrottleType type, final long limit, final TimeUnit timeUnit, QuotaScope scope) { return throttle(userName, tableName, null, null, type, limit, timeUnit, scope); } /** * Throttle the specified user on the specified namespace. - * - * @param userName the user to throttle + * @param userName the user to throttle * @param namespace the namespace to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit * @return the quota settings */ public static QuotaSettings throttleUser(final String userName, final String namespace, - final ThrottleType type, final long limit, final TimeUnit timeUnit) { + final ThrottleType type, final long limit, final TimeUnit timeUnit) { return throttleUser(userName, namespace, type, limit, timeUnit, QuotaScope.MACHINE); } /** * Throttle the specified user on the specified namespace. - * @param userName the user to throttle + * @param userName the user to throttle * @param namespace the namespace to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit - * @param scope the scope of throttling + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit + * @param scope the scope of throttling * @return the quota settings */ public static QuotaSettings throttleUser(final String userName, final String namespace, - final ThrottleType type, final long limit, final TimeUnit timeUnit, QuotaScope scope) { + final ThrottleType type, final long limit, final TimeUnit timeUnit, QuotaScope scope) { return throttle(userName, null, namespace, null, type, limit, timeUnit, scope); } /** * Remove the throttling for the specified user. - * * @param userName the user * @return the quota settings */ @@ -297,20 +293,18 @@ public class QuotaSettingsFactory { /** * Remove the throttling for the specified user. - * * @param userName the user - * @param type the type of throttling + * @param type the type of throttling * @return the quota settings */ public static QuotaSettings unthrottleUserByThrottleType(final String userName, - final ThrottleType type) { + final ThrottleType type) { return throttle(userName, null, null, null, type, 0, null, QuotaScope.MACHINE); } /** * Remove the throttling for the specified user on the specified table. - * - * @param userName the user + * @param userName the user * @param tableName the table * @return the quota settings */ @@ -320,21 +314,19 @@ public class QuotaSettingsFactory { /** * Remove the throttling for the specified user on the specified table. - * - * @param userName the user + * @param userName the user * @param tableName the table - * @param type the type of throttling + * @param type the type of throttling * @return the quota settings */ public static QuotaSettings unthrottleUserByThrottleType(final String userName, - final TableName tableName, final ThrottleType type) { + final TableName tableName, final ThrottleType type) { return throttle(userName, tableName, null, null, type, 0, null, QuotaScope.MACHINE); } /** * Remove the throttling for the specified user on the specified namespace. - * - * @param userName the user + * @param userName the user * @param namespace the namespace * @return the quota settings */ @@ -344,48 +336,45 @@ public class QuotaSettingsFactory { /** * Remove the throttling for the specified user on the specified namespace. - * - * @param userName the user + * @param userName the user * @param namespace the namespace - * @param type the type of throttling + * @param type the type of throttling * @return the quota settings */ public static QuotaSettings unthrottleUserByThrottleType(final String userName, - final String namespace, final ThrottleType type) { + final String namespace, final ThrottleType type) { return throttle(userName, null, namespace, null, type, 0, null, QuotaScope.MACHINE); } /** * Throttle the specified table. - * * @param tableName the table to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit * @return the quota settings */ public static QuotaSettings throttleTable(final TableName tableName, final ThrottleType type, - final long limit, final TimeUnit timeUnit) { + final long limit, final TimeUnit timeUnit) { return throttleTable(tableName, type, limit, timeUnit, QuotaScope.MACHINE); } /** * Throttle the specified table. * @param tableName the table to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit - * @param scope the scope of throttling + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit + * @param scope the scope of throttling * @return the quota settings */ public static QuotaSettings throttleTable(final TableName tableName, final ThrottleType type, - final long limit, final TimeUnit timeUnit, QuotaScope scope) { + final long limit, final TimeUnit timeUnit, QuotaScope scope) { return throttle(null, tableName, null, null, type, limit, timeUnit, scope); } /** * Remove the throttling for the specified table. - * * @param tableName the table * @return the quota settings */ @@ -395,47 +384,44 @@ public class QuotaSettingsFactory { /** * Remove the throttling for the specified table. - * * @param tableName the table - * @param type the type of throttling + * @param type the type of throttling * @return the quota settings */ public static QuotaSettings unthrottleTableByThrottleType(final TableName tableName, - final ThrottleType type) { + final ThrottleType type) { return throttle(null, tableName, null, null, type, 0, null, QuotaScope.MACHINE); } /** * Throttle the specified namespace. - * * @param namespace the namespace to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit * @return the quota settings */ public static QuotaSettings throttleNamespace(final String namespace, final ThrottleType type, - final long limit, final TimeUnit timeUnit) { + final long limit, final TimeUnit timeUnit) { return throttleNamespace(namespace, type, limit, timeUnit, QuotaScope.MACHINE); } /** * Throttle the specified namespace. * @param namespace the namespace to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit - * @param scope the scope of throttling + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit + * @param scope the scope of throttling * @return the quota settings */ public static QuotaSettings throttleNamespace(final String namespace, final ThrottleType type, - final long limit, final TimeUnit timeUnit, QuotaScope scope) { + final long limit, final TimeUnit timeUnit, QuotaScope scope) { return throttle(null, null, namespace, null, type, limit, timeUnit, scope); } /** * Remove the throttling for the specified namespace. - * * @param namespace the namespace * @return the quota settings */ @@ -445,33 +431,30 @@ public class QuotaSettingsFactory { /** * Remove the throttling for the specified namespace by throttle type. - * * @param namespace the namespace - * @param type the type of throttling + * @param type the type of throttling * @return the quota settings */ public static QuotaSettings unthrottleNamespaceByThrottleType(final String namespace, - final ThrottleType type) { + final ThrottleType type) { return throttle(null, null, namespace, null, type, 0, null, QuotaScope.MACHINE); } /** * Throttle the specified region server. - * * @param regionServer the region server to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit * @return the quota settings */ public static QuotaSettings throttleRegionServer(final String regionServer, - final ThrottleType type, final long limit, final TimeUnit timeUnit) { + final ThrottleType type, final long limit, final TimeUnit timeUnit) { return throttle(null, null, null, regionServer, type, limit, timeUnit, QuotaScope.MACHINE); } /** * Remove the throttling for the specified region server. - * * @param regionServer the region Server * @return the quota settings */ @@ -481,20 +464,19 @@ public class QuotaSettingsFactory { /** * Remove the throttling for the specified region server by throttle type. - * - * @param regionServer the region Server - * @param type the type of throttling + * @param regionServer the region Server + * @param type the type of throttling * @return the quota settings */ public static QuotaSettings unthrottleRegionServerByThrottleType(final String regionServer, - final ThrottleType type) { + final ThrottleType type) { return throttle(null, null, null, regionServer, type, 0, null, QuotaScope.MACHINE); } /* Throttle helper */ private static QuotaSettings throttle(final String userName, final TableName tableName, - final String namespace, final String regionServer, final ThrottleType type, final long limit, - final TimeUnit timeUnit, QuotaScope scope) { + final String namespace, final String regionServer, final ThrottleType type, final long limit, + final TimeUnit timeUnit, QuotaScope scope) { QuotaProtos.ThrottleRequest.Builder builder = QuotaProtos.ThrottleRequest.newBuilder(); if (type != null) { builder.setType(ProtobufUtil.toProtoThrottleType(type)); @@ -505,44 +487,42 @@ public class QuotaSettingsFactory { return new ThrottleSettings(userName, tableName, namespace, regionServer, builder.build()); } - /* ========================================================================== - * Global Settings + /* + * ========================================================================== Global Settings */ /** * Set the "bypass global settings" for the specified user - * - * @param userName the user to throttle + * @param userName the user to throttle * @param bypassGlobals true if the global settings should be bypassed * @return the quota settings */ public static QuotaSettings bypassGlobals(final String userName, final boolean bypassGlobals) { - return new QuotaGlobalsSettingsBypass(userName, null, null, null, bypassGlobals); + return new QuotaGlobalsSettingsBypass(userName, null, null, null, bypassGlobals); } - /* ========================================================================== - * FileSystem Space Settings + /* + * ========================================================================== FileSystem Space + * Settings */ /** * Creates a {@link QuotaSettings} object to limit the FileSystem space usage for the given table * to the given size in bytes. When the space usage is exceeded by the table, the provided * {@link SpaceViolationPolicy} is enacted on the table. - * - * @param tableName The name of the table on which the quota should be applied. - * @param sizeLimit The limit of a table's size in bytes. + * @param tableName The name of the table on which the quota should be applied. + * @param sizeLimit The limit of a table's size in bytes. * @param violationPolicy The action to take when the quota is exceeded. * @return An {@link QuotaSettings} object. */ - public static QuotaSettings limitTableSpace( - final TableName tableName, long sizeLimit, final SpaceViolationPolicy violationPolicy) { + public static QuotaSettings limitTableSpace(final TableName tableName, long sizeLimit, + final SpaceViolationPolicy violationPolicy) { return new SpaceLimitSettings(tableName, sizeLimit, violationPolicy); } /** * Creates a {@link QuotaSettings} object to remove the FileSystem space quota for the given * table. - * * @param tableName The name of the table to remove the quota for. * @return A {@link QuotaSettings} object. */ @@ -554,21 +534,19 @@ public class QuotaSettingsFactory { * Creates a {@link QuotaSettings} object to limit the FileSystem space usage for the given * namespace to the given size in bytes. When the space usage is exceeded by all tables in the * namespace, the provided {@link SpaceViolationPolicy} is enacted on all tables in the namespace. - * - * @param namespace The namespace on which the quota should be applied. - * @param sizeLimit The limit of the namespace's size in bytes. + * @param namespace The namespace on which the quota should be applied. + * @param sizeLimit The limit of the namespace's size in bytes. * @param violationPolicy The action to take when the the quota is exceeded. * @return An {@link QuotaSettings} object. */ - public static QuotaSettings limitNamespaceSpace( - final String namespace, long sizeLimit, final SpaceViolationPolicy violationPolicy) { + public static QuotaSettings limitNamespaceSpace(final String namespace, long sizeLimit, + final SpaceViolationPolicy violationPolicy) { return new SpaceLimitSettings(namespace, sizeLimit, violationPolicy); } /** * Creates a {@link QuotaSettings} object to remove the FileSystem space quota for the given * namespace. - * * @param namespace The namespace to remove the quota on. * @return A {@link QuotaSettings} object. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java index 94b87c4683e..1afb15c0ac6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.quotas; import java.io.ByteArrayInputStream; @@ -30,7 +29,6 @@ import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.regex.Pattern; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; @@ -72,18 +70,59 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; /** * Helper class to interact with the quota table. * - * - * - * - * - * - * - * - * - * - * - * - * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * *
      ROW-KEYFAM/QUALDATADESC
      n.<namespace>q:s<global-quotas>
      n.<namespace>u:p<namespace-quota policy>
      n.<namespace>u:s<SpaceQuotaSnapshot>The size of all snapshots against tables in the namespace
      t.<table>q:s<global-quotas>
      t.<table>u:p<table-quota policy>
      t.<table>u:ss.<snapshot name><SpaceQuotaSnapshot>The size of a snapshot against a table
      u.<user>q:s<global-quotas>
      u.<user>q:s.<table><table-quotas>
      u.<user>q:s.<ns><namespace-quotas>
      ROW-KEYFAM/QUALDATADESC
      n.<namespace>q:s<global-quotas>
      n.<namespace>u:p<namespace-quota policy>
      n.<namespace>u:s<SpaceQuotaSnapshot>The size of all snapshots against tables in the namespace
      t.<table>q:s<global-quotas>
      t.<table>u:p<table-quota policy>
      t.<table>u:ss.<snapshot name><SpaceQuotaSnapshot>The size of a snapshot against a table
      u.<user>q:s<global-quotas>
      u.<user>q:s.<table><table-quotas>
      u.<user>q:s.<ns><namespace-quotas>
      */ @InterfaceAudience.Private @@ -93,7 +132,7 @@ public class QuotaTableUtil { /** System table for quotas */ public static final TableName QUOTA_TABLE_NAME = - TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "quota"); + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "quota"); protected static final byte[] QUOTA_FAMILY_INFO = Bytes.toBytes("q"); protected static final byte[] QUOTA_FAMILY_USAGE = Bytes.toBytes("u"); @@ -102,13 +141,13 @@ public class QuotaTableUtil { protected static final byte[] QUOTA_QUALIFIER_POLICY = Bytes.toBytes("p"); protected static final byte[] QUOTA_SNAPSHOT_SIZE_QUALIFIER = Bytes.toBytes("ss"); protected static final String QUOTA_POLICY_COLUMN = - Bytes.toString(QUOTA_FAMILY_USAGE) + ":" + Bytes.toString(QUOTA_QUALIFIER_POLICY); + Bytes.toString(QUOTA_FAMILY_USAGE) + ":" + Bytes.toString(QUOTA_QUALIFIER_POLICY); protected static final byte[] QUOTA_USER_ROW_KEY_PREFIX = Bytes.toBytes("u."); protected static final byte[] QUOTA_TABLE_ROW_KEY_PREFIX = Bytes.toBytes("t."); protected static final byte[] QUOTA_NAMESPACE_ROW_KEY_PREFIX = Bytes.toBytes("n."); protected static final byte[] QUOTA_REGION_SERVER_ROW_KEY_PREFIX = Bytes.toBytes("r."); private static final byte[] QUOTA_EXCEED_THROTTLE_QUOTA_ROW_KEY = - Bytes.toBytes("exceedThrottleQuota"); + Bytes.toBytes("exceedThrottleQuota"); /* * TODO: Setting specified region server quota isn't supported currently and the row key "r.all" @@ -116,47 +155,48 @@ public class QuotaTableUtil { */ public static final String QUOTA_REGION_SERVER_ROW_KEY = "all"; - /* ========================================================================= - * Quota "settings" helpers + /* + * ========================================================================= Quota "settings" + * helpers */ public static Quotas getTableQuota(final Connection connection, final TableName table) - throws IOException { + throws IOException { return getQuotas(connection, getTableRowKey(table)); } public static Quotas getNamespaceQuota(final Connection connection, final String namespace) - throws IOException { + throws IOException { return getQuotas(connection, getNamespaceRowKey(namespace)); } public static Quotas getUserQuota(final Connection connection, final String user) - throws IOException { + throws IOException { return getQuotas(connection, getUserRowKey(user)); } public static Quotas getUserQuota(final Connection connection, final String user, - final TableName table) throws IOException { + final TableName table) throws IOException { return getQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserTable(table)); } public static Quotas getUserQuota(final Connection connection, final String user, - final String namespace) throws IOException { + final String namespace) throws IOException { return getQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserNamespace(namespace)); } private static Quotas getQuotas(final Connection connection, final byte[] rowKey) - throws IOException { + throws IOException { return getQuotas(connection, rowKey, QUOTA_QUALIFIER_SETTINGS); } public static Quotas getRegionServerQuota(final Connection connection, final String regionServer) - throws IOException { + throws IOException { return getQuotas(connection, getRegionServerRowKey(regionServer)); } private static Quotas getQuotas(final Connection connection, final byte[] rowKey, - final byte[] qualifier) throws IOException { + final byte[] qualifier) throws IOException { Get get = new Get(rowKey); get.addColumn(QUOTA_FAMILY_INFO, qualifier); Result result = doGet(connection, get); @@ -185,13 +225,13 @@ public class QuotaTableUtil { } public static Get makeGetForUserQuotas(final String user, final Iterable tables, - final Iterable namespaces) { + final Iterable namespaces) { Get get = new Get(getUserRowKey(user)); get.addColumn(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); - for (final TableName table: tables) { + for (final TableName table : tables) { get.addColumn(QUOTA_FAMILY_INFO, getSettingsQualifierForUserTable(table)); } - for (final String ns: namespaces) { + for (final String ns : namespaces) { get.addColumn(QUOTA_FAMILY_INFO, getSettingsQualifierForUserNamespace(ns)); } return get; @@ -218,38 +258,36 @@ public class QuotaTableUtil { if (StringUtils.isNotEmpty(filter.getNamespaceFilter())) { FilterList nsFilters = new FilterList(FilterList.Operator.MUST_PASS_ALL); nsFilters.addFilter(new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); - nsFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, - new RegexStringComparator( - getSettingsQualifierRegexForUserNamespace(filter.getNamespaceFilter()), 0))); + new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); + nsFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator( + getSettingsQualifierRegexForUserNamespace(filter.getNamespaceFilter()), 0))); userFilters.addFilter(nsFilters); hasFilter = true; } if (StringUtils.isNotEmpty(filter.getTableFilter())) { FilterList tableFilters = new FilterList(FilterList.Operator.MUST_PASS_ALL); tableFilters.addFilter(new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); - tableFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, - new RegexStringComparator( - getSettingsQualifierRegexForUserTable(filter.getTableFilter()), 0))); + new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); + tableFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator( + getSettingsQualifierRegexForUserTable(filter.getTableFilter()), 0))); userFilters.addFilter(tableFilters); hasFilter = true; } if (!hasFilter) { userFilters.addFilter(new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); + new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); } filterList.addFilter(userFilters); } else if (StringUtils.isNotEmpty(filter.getTableFilter())) { filterList.addFilter(new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(getTableRowKeyRegex(filter.getTableFilter()), 0))); + new RegexStringComparator(getTableRowKeyRegex(filter.getTableFilter()), 0))); } else if (StringUtils.isNotEmpty(filter.getNamespaceFilter())) { filterList.addFilter(new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(getNamespaceRowKeyRegex(filter.getNamespaceFilter()), 0))); + new RegexStringComparator(getNamespaceRowKeyRegex(filter.getNamespaceFilter()), 0))); } else if (StringUtils.isNotEmpty(filter.getRegionServerFilter())) { - filterList.addFilter(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator( - getRegionServerRowKeyRegex(filter.getRegionServerFilter()), 0))); + filterList.addFilter(new RowFilter(CompareOperator.EQUAL, + new RegexStringComparator(getRegionServerRowKeyRegex(filter.getRegionServerFilter()), 0))); } return filterList; } @@ -263,14 +301,14 @@ public class QuotaTableUtil { /** * Fetches all {@link SpaceQuotaSnapshot} objects from the {@code hbase:quota} table. - * * @param conn The HBase connection * @return A map of table names and their computed snapshot. */ - public static Map getSnapshots(Connection conn) throws IOException { - Map snapshots = new HashMap<>(); + public static Map getSnapshots(Connection conn) + throws IOException { + Map snapshots = new HashMap<>(); try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME); - ResultScanner rs = quotaTable.getScanner(makeQuotaSnapshotScan())) { + ResultScanner rs = quotaTable.getScanner(makeQuotaSnapshotScan())) { for (Result r : rs) { extractQuotaSnapshot(r, snapshots); } @@ -311,15 +349,14 @@ public class QuotaTableUtil { /** * Extracts the {@link SpaceViolationPolicy} and {@link TableName} from the provided - * {@link Result} and adds them to the given {@link Map}. If the result does not contain - * the expected information or the serialized policy in the value is invalid, this method - * will throw an {@link IllegalArgumentException}. - * - * @param result A row from the quota table. + * {@link Result} and adds them to the given {@link Map}. If the result does not contain the + * expected information or the serialized policy in the value is invalid, this method will throw + * an {@link IllegalArgumentException}. + * @param result A row from the quota table. * @param snapshots A map of snapshots to add the result of this method into. */ - public static void extractQuotaSnapshot( - Result result, Map snapshots) { + public static void extractQuotaSnapshot(Result result, + Map snapshots) { byte[] row = Objects.requireNonNull(result).getRow(); if (row == null || row.length == 0) { throw new IllegalArgumentException("Provided result had a null row"); @@ -328,49 +365,47 @@ public class QuotaTableUtil { Cell c = result.getColumnLatestCell(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY); if (c == null) { throw new IllegalArgumentException("Result did not contain the expected column " - + QUOTA_POLICY_COLUMN + ", " + result.toString()); + + QUOTA_POLICY_COLUMN + ", " + result.toString()); } - ByteString buffer = UnsafeByteOperations.unsafeWrap( - c.getValueArray(), c.getValueOffset(), c.getValueLength()); + ByteString buffer = + UnsafeByteOperations.unsafeWrap(c.getValueArray(), c.getValueOffset(), c.getValueLength()); try { QuotaProtos.SpaceQuotaSnapshot snapshot = QuotaProtos.SpaceQuotaSnapshot.parseFrom(buffer); snapshots.put(targetTableName, SpaceQuotaSnapshot.toSpaceQuotaSnapshot(snapshot)); } catch (InvalidProtocolBufferException e) { throw new IllegalArgumentException( - "Result did not contain a valid SpaceQuota protocol buffer message", e); + "Result did not contain a valid SpaceQuota protocol buffer message", e); } } public static interface UserQuotasVisitor { - void visitUserQuotas(final String userName, final Quotas quotas) - throws IOException; + void visitUserQuotas(final String userName, final Quotas quotas) throws IOException; + void visitUserQuotas(final String userName, final TableName table, final Quotas quotas) throws IOException; + void visitUserQuotas(final String userName, final String namespace, final Quotas quotas) throws IOException; } public static interface TableQuotasVisitor { - void visitTableQuotas(final TableName tableName, final Quotas quotas) - throws IOException; + void visitTableQuotas(final TableName tableName, final Quotas quotas) throws IOException; } public static interface NamespaceQuotasVisitor { - void visitNamespaceQuotas(final String namespace, final Quotas quotas) - throws IOException; + void visitNamespaceQuotas(final String namespace, final Quotas quotas) throws IOException; } private static interface RegionServerQuotasVisitor { - void visitRegionServerQuotas(final String regionServer, final Quotas quotas) - throws IOException; + void visitRegionServerQuotas(final String regionServer, final Quotas quotas) throws IOException; } public static interface QuotasVisitor extends UserQuotasVisitor, TableQuotasVisitor, - NamespaceQuotasVisitor, RegionServerQuotasVisitor { + NamespaceQuotasVisitor, RegionServerQuotasVisitor { } public static void parseResult(final Result result, final QuotasVisitor visitor) - throws IOException { + throws IOException { byte[] row = result.getRow(); if (isNamespaceRowKey(row)) { parseNamespaceResult(result, visitor); @@ -391,7 +426,7 @@ public class QuotaTableUtil { } public static void parseResultToCollection(final Result result, - Collection quotaSettings) throws IOException { + Collection quotaSettings) throws IOException { QuotaTableUtil.parseResult(result, new QuotaTableUtil.QuotasVisitor() { @Override @@ -426,14 +461,14 @@ public class QuotaTableUtil { }); } - public static void parseNamespaceResult(final Result result, - final NamespaceQuotasVisitor visitor) throws IOException { + public static void parseNamespaceResult(final Result result, final NamespaceQuotasVisitor visitor) + throws IOException { String namespace = getNamespaceFromRowKey(result.getRow()); parseNamespaceResult(namespace, result, visitor); } protected static void parseNamespaceResult(final String namespace, final Result result, - final NamespaceQuotasVisitor visitor) throws IOException { + final NamespaceQuotasVisitor visitor) throws IOException { byte[] data = result.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); if (data != null) { Quotas quotas = quotasFromData(data); @@ -442,13 +477,13 @@ public class QuotaTableUtil { } private static void parseRegionServerResult(final Result result, - final RegionServerQuotasVisitor visitor) throws IOException { + final RegionServerQuotasVisitor visitor) throws IOException { String rs = getRegionServerFromRowKey(result.getRow()); parseRegionServerResult(rs, result, visitor); } private static void parseRegionServerResult(final String regionServer, final Result result, - final RegionServerQuotasVisitor visitor) throws IOException { + final RegionServerQuotasVisitor visitor) throws IOException { byte[] data = result.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); if (data != null) { Quotas quotas = quotasFromData(data); @@ -457,13 +492,13 @@ public class QuotaTableUtil { } public static void parseTableResult(final Result result, final TableQuotasVisitor visitor) - throws IOException { + throws IOException { TableName table = getTableFromRowKey(result.getRow()); parseTableResult(table, result, visitor); } protected static void parseTableResult(final TableName table, final Result result, - final TableQuotasVisitor visitor) throws IOException { + final TableQuotasVisitor visitor) throws IOException { byte[] data = result.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); if (data != null) { Quotas quotas = quotasFromData(data); @@ -472,17 +507,17 @@ public class QuotaTableUtil { } public static void parseUserResult(final Result result, final UserQuotasVisitor visitor) - throws IOException { + throws IOException { String userName = getUserFromRowKey(result.getRow()); parseUserResult(userName, result, visitor); } protected static void parseUserResult(final String userName, final Result result, - final UserQuotasVisitor visitor) throws IOException { + final UserQuotasVisitor visitor) throws IOException { Map familyMap = result.getFamilyMap(QUOTA_FAMILY_INFO); if (familyMap == null || familyMap.isEmpty()) return; - for (Map.Entry entry: familyMap.entrySet()) { + for (Map.Entry entry : familyMap.entrySet()) { Quotas quotas = quotasFromData(entry.getValue()); if (Bytes.startsWith(entry.getKey(), QUOTA_QUALIFIER_SETTINGS_PREFIX)) { String name = Bytes.toString(entry.getKey(), QUOTA_QUALIFIER_SETTINGS_PREFIX.length); @@ -505,9 +540,8 @@ public class QuotaTableUtil { */ static Put createPutForSpaceSnapshot(TableName tableName, SpaceQuotaSnapshot snapshot) { Put p = new Put(getTableRowKey(tableName)); - p.addColumn( - QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY, - SpaceQuotaSnapshot.toProtoSnapshot(snapshot).toByteArray()); + p.addColumn(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY, + SpaceQuotaSnapshot.toProtoSnapshot(snapshot).toByteArray()); return p; } @@ -516,23 +550,22 @@ public class QuotaTableUtil { */ static Get makeGetForSnapshotSize(TableName tn, String snapshot) { Get g = new Get(Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, Bytes.toBytes(tn.toString()))); - g.addColumn( - QUOTA_FAMILY_USAGE, - Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); + g.addColumn(QUOTA_FAMILY_USAGE, + Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); return g; } /** - * Creates a {@link Put} to persist the current size of the {@code snapshot} with respect to - * the given {@code table}. + * Creates a {@link Put} to persist the current size of the {@code snapshot} with respect to the + * given {@code table}. */ static Put createPutForSnapshotSize(TableName tableName, String snapshot, long size) { // We just need a pb message with some `long usage`, so we can just reuse the // SpaceQuotaSnapshot message instead of creating a new one. Put p = new Put(getTableRowKey(tableName)); p.addColumn(QUOTA_FAMILY_USAGE, getSnapshotSizeQualifier(snapshot), - org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot - .newBuilder().setQuotaUsage(size).build().toByteArray()); + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot.newBuilder() + .setQuotaUsage(size).build().toByteArray()); return p; } @@ -542,25 +575,25 @@ public class QuotaTableUtil { static Put createPutForNamespaceSnapshotSize(String namespace, long size) { Put p = new Put(getNamespaceRowKey(namespace)); p.addColumn(QUOTA_FAMILY_USAGE, QUOTA_SNAPSHOT_SIZE_QUALIFIER, - org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot - .newBuilder().setQuotaUsage(size).build().toByteArray()); + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot.newBuilder() + .setQuotaUsage(size).build().toByteArray()); return p; } /** - * Returns a list of {@code Delete} to remove given table snapshot - * entries to remove from quota table + * Returns a list of {@code Delete} to remove given table snapshot entries to remove from quota + * table * @param snapshotEntriesToRemove the entries to remove */ static List createDeletesForExistingTableSnapshotSizes( - Multimap snapshotEntriesToRemove) { + Multimap snapshotEntriesToRemove) { List deletes = new ArrayList<>(); for (Map.Entry> entry : snapshotEntriesToRemove.asMap() - .entrySet()) { + .entrySet()) { for (String snapshot : entry.getValue()) { Delete d = new Delete(getTableRowKey(entry.getKey())); d.addColumns(QUOTA_FAMILY_USAGE, - Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); + Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); deletes.add(d); } } @@ -572,17 +605,17 @@ public class QuotaTableUtil { * @param connection connection to re-use */ static List createDeletesForExistingTableSnapshotSizes(Connection connection) - throws IOException { + throws IOException { return createDeletesForExistingSnapshotsFromScan(connection, createScanForSpaceSnapshotSizes()); } /** - * Returns a list of {@code Delete} to remove given namespace snapshot - * entries to removefrom quota table + * Returns a list of {@code Delete} to remove given namespace snapshot entries to removefrom quota + * table * @param snapshotEntriesToRemove the entries to remove */ - static List createDeletesForExistingNamespaceSnapshotSizes( - Set snapshotEntriesToRemove) { + static List + createDeletesForExistingNamespaceSnapshotSizes(Set snapshotEntriesToRemove) { List deletes = new ArrayList<>(); for (String snapshot : snapshotEntriesToRemove) { Delete d = new Delete(getNamespaceRowKey(snapshot)); @@ -597,28 +630,28 @@ public class QuotaTableUtil { * @param connection connection to re-use */ static List createDeletesForExistingNamespaceSnapshotSizes(Connection connection) - throws IOException { + throws IOException { return createDeletesForExistingSnapshotsFromScan(connection, - createScanForNamespaceSnapshotSizes()); + createScanForNamespaceSnapshotSizes()); } /** * Returns a list of {@code Delete} to remove all entries returned by the passed scanner. * @param connection connection to re-use - * @param scan the scanner to use to generate the list of deletes + * @param scan the scanner to use to generate the list of deletes */ static List createDeletesForExistingSnapshotsFromScan(Connection connection, Scan scan) - throws IOException { + throws IOException { List deletes = new ArrayList<>(); try (Table quotaTable = connection.getTable(QUOTA_TABLE_NAME); - ResultScanner rs = quotaTable.getScanner(scan)) { + ResultScanner rs = quotaTable.getScanner(scan)) { for (Result r : rs) { CellScanner cs = r.cellScanner(); while (cs.advance()) { Cell c = cs.current(); byte[] family = Bytes.copy(c.getFamilyArray(), c.getFamilyOffset(), c.getFamilyLength()); byte[] qual = - Bytes.copy(c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength()); + Bytes.copy(c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength()); Delete d = new Delete(r.getRow()); d.addColumns(family, qual); deletes.add(d); @@ -631,26 +664,26 @@ public class QuotaTableUtil { /** * Remove table usage snapshots (u:p columns) for the namespace passed * @param connection connection to re-use - * @param namespace the namespace to fetch the list of table usage snapshots + * @param namespace the namespace to fetch the list of table usage snapshots */ static void deleteTableUsageSnapshotsForNamespace(Connection connection, String namespace) throws IOException { Scan s = new Scan(); - //Get rows for all tables in namespace + // Get rows for all tables in namespace s.setStartStopRowForPrefixScan( Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM))); - //Scan for table usage column (u:p) in quota table - s.addColumn(QUOTA_FAMILY_USAGE,QUOTA_QUALIFIER_POLICY); - //Scan for table quota column (q:s) if table has a space quota defined - s.addColumn(QUOTA_FAMILY_INFO,QUOTA_QUALIFIER_SETTINGS); + // Scan for table usage column (u:p) in quota table + s.addColumn(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY); + // Scan for table quota column (q:s) if table has a space quota defined + s.addColumn(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); try (Table quotaTable = connection.getTable(QUOTA_TABLE_NAME); - ResultScanner rs = quotaTable.getScanner(s)) { + ResultScanner rs = quotaTable.getScanner(s)) { for (Result r : rs) { byte[] data = r.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); - //if table does not have a table space quota defined, delete table usage column (u:p) + // if table does not have a table space quota defined, delete table usage column (u:p) if (data == null) { Delete delete = new Delete(r.getRow()); - delete.addColumns(QUOTA_FAMILY_USAGE,QUOTA_QUALIFIER_POLICY); + delete.addColumns(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY); quotaTable.delete(delete); } } @@ -660,8 +693,7 @@ public class QuotaTableUtil { /** * Fetches the computed size of all snapshots against tables in a namespace for space quotas. */ - static long getNamespaceSnapshotSize( - Connection conn, String namespace) throws IOException { + static long getNamespaceSnapshotSize(Connection conn, String namespace) throws IOException { try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { Result r = quotaTable.get(createGetNamespaceSnapshotSize(namespace)); if (r.isEmpty()) { @@ -687,8 +719,8 @@ public class QuotaTableUtil { * Parses the snapshot size from the given Cell's value. */ static long parseSnapshotSize(Cell c) throws InvalidProtocolBufferException { - ByteString bs = UnsafeByteOperations.unsafeWrap( - c.getValueArray(), c.getValueOffset(), c.getValueLength()); + ByteString bs = + UnsafeByteOperations.unsafeWrap(c.getValueArray(), c.getValueOffset(), c.getValueLength()); return QuotaProtos.SpaceQuotaSnapshot.parseFrom(bs).getQuotaUsage(); } @@ -717,7 +749,7 @@ public class QuotaTableUtil { // Just the usage family and only the snapshot size qualifiers return s.addFamily(QUOTA_FAMILY_USAGE) - .setFilter(new ColumnPrefixFilter(QUOTA_SNAPSHOT_SIZE_QUALIFIER)); + .setFilter(new ColumnPrefixFilter(QUOTA_SNAPSHOT_SIZE_QUALIFIER)); } static Scan createScanForSpaceSnapshotSizes() { @@ -737,22 +769,21 @@ public class QuotaTableUtil { } // Just the usage family and only the snapshot size qualifiers - return s.addFamily(QUOTA_FAMILY_USAGE).setFilter( - new ColumnPrefixFilter(QUOTA_SNAPSHOT_SIZE_QUALIFIER)); + return s.addFamily(QUOTA_FAMILY_USAGE) + .setFilter(new ColumnPrefixFilter(QUOTA_SNAPSHOT_SIZE_QUALIFIER)); } /** * Fetches any persisted HBase snapshot sizes stored in the quota table. The sizes here are - * computed relative to the table which the snapshot was created from. A snapshot's size will - * not include the size of files which the table still refers. These sizes, in bytes, are what - * is used internally to compute quota violation for tables and namespaces. - * + * computed relative to the table which the snapshot was created from. A snapshot's size will not + * include the size of files which the table still refers. These sizes, in bytes, are what is used + * internally to compute quota violation for tables and namespaces. * @return A map of snapshot name to size in bytes per space quota computations */ - public static Map getObservedSnapshotSizes(Connection conn) throws IOException { + public static Map getObservedSnapshotSizes(Connection conn) throws IOException { try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME); - ResultScanner rs = quotaTable.getScanner(createScanForSpaceSnapshotSizes())) { - final Map snapshotSizes = new HashMap<>(); + ResultScanner rs = quotaTable.getScanner(createScanForSpaceSnapshotSizes())) { + final Map snapshotSizes = new HashMap<>(); for (Result r : rs) { CellScanner cs = r.cellScanner(); while (cs.advance()) { @@ -772,7 +803,7 @@ public class QuotaTableUtil { */ public static Multimap getTableSnapshots(Connection conn) throws IOException { try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME); - ResultScanner rs = quotaTable.getScanner(createScanForSpaceSnapshotSizes())) { + ResultScanner rs = quotaTable.getScanner(createScanForSpaceSnapshotSizes())) { Multimap snapshots = HashMultimap.create(); for (Result r : rs) { CellScanner cs = r.cellScanner(); @@ -793,7 +824,7 @@ public class QuotaTableUtil { */ public static Set getNamespaceSnapshots(Connection conn) throws IOException { try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME); - ResultScanner rs = quotaTable.getScanner(createScanForNamespaceSnapshotSizes())) { + ResultScanner rs = quotaTable.getScanner(createScanForNamespaceSnapshotSizes())) { Set snapshots = new HashSet<>(); for (Result r : rs) { CellScanner cs = r.cellScanner(); @@ -810,11 +841,11 @@ public class QuotaTableUtil { * Returns the current space quota snapshot of the given {@code tableName} from * {@code QuotaTableUtil.QUOTA_TABLE_NAME} or null if the no quota information is available for * that tableName. - * @param conn connection to re-use + * @param conn connection to re-use * @param tableName name of the table whose current snapshot is to be retreived */ public static SpaceQuotaSnapshot getCurrentSnapshotFromQuotaTable(Connection conn, - TableName tableName) throws IOException { + TableName tableName) throws IOException { try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { Map snapshots = new HashMap<>(1); Result result = quotaTable.get(makeQuotaSnapshotGetForTable(tableName)); @@ -828,15 +859,16 @@ public class QuotaTableUtil { } } - /* ========================================================================= - * Quotas protobuf helpers + /* + * ========================================================================= Quotas protobuf + * helpers */ protected static Quotas quotasFromData(final byte[] data) throws IOException { return quotasFromData(data, 0, data.length); } - protected static Quotas quotasFromData( - final byte[] data, int offset, int length) throws IOException { + protected static Quotas quotasFromData(final byte[] data, int offset, int length) + throws IOException { int magicLen = ProtobufMagic.lengthOfPBMagic(); if (!ProtobufMagic.isPBMagicPrefix(data, offset, magicLen)) { throw new IOException("Missing pb magic prefix"); @@ -863,25 +895,25 @@ public class QuotaTableUtil { return !hasSettings; } - /* ========================================================================= - * HTable helpers + /* + * ========================================================================= HTable helpers */ - protected static Result doGet(final Connection connection, final Get get) - throws IOException { + protected static Result doGet(final Connection connection, final Get get) throws IOException { try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { return table.get(get); } } protected static Result[] doGet(final Connection connection, final List gets) - throws IOException { + throws IOException { try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { return table.get(gets); } } - /* ========================================================================= - * Quota table row key helpers + /* + * ========================================================================= Quota table row key + * helpers */ protected static byte[] getUserRowKey(final String user) { return Bytes.add(QUOTA_USER_ROW_KEY_PREFIX, Bytes.toBytes(user)); @@ -905,7 +937,7 @@ public class QuotaTableUtil { protected static byte[] getSettingsQualifierForUserNamespace(final String namespace) { return Bytes.add(QUOTA_QUALIFIER_SETTINGS_PREFIX, - Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM)); + Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM)); } protected static String getUserRowKeyRegex(final String user) { @@ -933,13 +965,13 @@ public class QuotaTableUtil { } protected static String getSettingsQualifierRegexForUserTable(final String table) { - return '^' + Pattern.quote(Bytes.toString(QUOTA_QUALIFIER_SETTINGS_PREFIX)) + - table + "(?> - unmodifiableTableCFsMap(Map> tableCFsMap) { + unmodifiableTableCFsMap(Map> tableCFsMap) { Map> newTableCFsMap = new HashMap<>(); tableCFsMap.forEach((table, cfs) -> newTableCFsMap.put(table, cfs != null ? Collections.unmodifiableList(cfs) : null)); @@ -138,8 +137,8 @@ public class ReplicationPeerConfig { * {@link ReplicationPeerConfigBuilder#setTableCFsMap(Map)} instead. */ @Deprecated - public ReplicationPeerConfig setTableCFsMap(Map> tableCFsMap) { + public ReplicationPeerConfig + setTableCFsMap(Map> tableCFsMap) { this.tableCFsMap = tableCFsMap; return this; } @@ -195,8 +194,8 @@ public class ReplicationPeerConfig { * {@link ReplicationPeerConfigBuilder#setExcludeTableCFsMap(Map)} instead. */ @Deprecated - public ReplicationPeerConfig setExcludeTableCFsMap(Map> tableCFsMap) { + public ReplicationPeerConfig + setExcludeTableCFsMap(Map> tableCFsMap) { this.excludeTableCFsMap = tableCFsMap; return this; } @@ -292,8 +291,7 @@ public class ReplicationPeerConfig { } @Override - public ReplicationPeerConfigBuilder - setTableCFsMap(Map> tableCFsMap) { + public ReplicationPeerConfigBuilder setTableCFsMap(Map> tableCFsMap) { this.tableCFsMap = tableCFsMap; return this; } @@ -312,7 +310,7 @@ public class ReplicationPeerConfig { @Override public ReplicationPeerConfigBuilder - setExcludeTableCFsMap(Map> excludeTableCFsMap) { + setExcludeTableCFsMap(Map> excludeTableCFsMap) { this.excludeTableCFsMap = excludeTableCFsMap; return this; } @@ -380,11 +378,11 @@ public class ReplicationPeerConfig { /** * Decide whether the passed family of the table need replicate to the peer cluster according to * this peer config. - * @param table name of the table + * @param table name of the table * @param family family name - * @return true if (the family of) the table need replicate to the peer cluster. - * If passed family is null, return true if any CFs of the table need replicate; - * If passed family is not null, return true if the passed family need replicate. + * @return true if (the family of) the table need replicate to the peer cluster. If passed family + * is null, return true if any CFs of the table need replicate; If passed family is not + * null, return true if the passed family need replicate. */ public boolean needToReplicate(TableName table, byte[] family) { String namespace = table.getNamespaceAsString(); @@ -401,8 +399,8 @@ public class ReplicationPeerConfig { // If cfs is null or empty then we can make sure that we do not need to replicate this table, // otherwise, we may still need to replicate the table but filter out some families. return cfs != null && !cfs.isEmpty() - // If exclude-table-cfs contains passed family then we make sure that we do not need to - // replicate this family. + // If exclude-table-cfs contains passed family then we make sure that we do not need to + // replicate this family. && (family == null || !cfs.contains(Bytes.toString(family))); } else { // Not replicate all user tables, so filter by namespaces and table-cfs config @@ -419,7 +417,7 @@ public class ReplicationPeerConfig { return tableCFsMap != null && tableCFsMap.containsKey(table) && (family == null || CollectionUtils.isEmpty(tableCFsMap.get(table)) // If table-cfs must contain passed family then we need to replicate this family. - || tableCFsMap.get(table).contains(Bytes.toString(family))); + || tableCFsMap.get(table).contains(Bytes.toString(family))); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java index 180239b93bf..63cae1f5b99 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -45,7 +43,7 @@ public interface ReplicationPeerConfigBuilder { /** * Sets a "raw" configuration property for this replication peer. For experts only. - * @param key Configuration property key + * @param key Configuration property key * @param value Configuration property value * @return {@code this} */ @@ -60,7 +58,6 @@ public interface ReplicationPeerConfigBuilder { @InterfaceAudience.Private ReplicationPeerConfigBuilder removeConfiguration(String key); - /** * Adds all of the provided "raw" configuration entries to {@code this}. * @param configuration A collection of raw configuration entries @@ -90,17 +87,15 @@ public interface ReplicationPeerConfigBuilder { } /** - * Sets an explicit map of tables and column families in those tables that should be replicated - * to the given peer. Use {@link #setReplicateAllUserTables(boolean)} to replicate all tables - * to a peer. - * + * Sets an explicit map of tables and column families in those tables that should be replicated to + * the given peer. Use {@link #setReplicateAllUserTables(boolean)} to replicate all tables to a + * peer. * @param tableCFsMap A map from tableName to column family names. An empty collection can be - * passed to indicate replicating all column families. + * passed to indicate replicating all column families. * @return {@code this} * @see #setReplicateAllUserTables(boolean) */ - ReplicationPeerConfigBuilder - setTableCFsMap(Map> tableCFsMap); + ReplicationPeerConfigBuilder setTableCFsMap(Map> tableCFsMap); /** * Sets a unique collection of HBase namespaces that should be replicated to this peer. @@ -125,12 +120,11 @@ public interface ReplicationPeerConfigBuilder { ReplicationPeerConfigBuilder setReplicateAllUserTables(boolean replicateAllUserTables); /** - * Sets the mapping of table name to column families which should not be replicated. This - * method sets state which is mutually exclusive to {@link #setTableCFsMap(Map)}. Invoking this - * method is only relevant when all user tables are being replicated. - * - * @param tableCFsMap A mapping of table names to column families which should not be - * replicated. An empty list of column families implies all families for the table. + * Sets the mapping of table name to column families which should not be replicated. This method + * sets state which is mutually exclusive to {@link #setTableCFsMap(Map)}. Invoking this method is + * only relevant when all user tables are being replicated. + * @param tableCFsMap A mapping of table names to column families which should not be replicated. + * An empty list of column families implies all families for the table. * @return {@code this}. */ ReplicationPeerConfigBuilder setExcludeTableCFsMap(Map> tableCFsMap); @@ -140,7 +134,6 @@ public interface ReplicationPeerConfigBuilder { * configured to be replicated. This method sets state which is mutually exclusive to * {@link #setNamespaces(Set)}. Invoking this method is only relevant when all user tables are * being replicated. - * * @param namespaces A set of namespaces whose tables should not be replicated. * @return {@code this} */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java index ba97d07e785..c2a21e85758 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java index b1f0861e351..7be1ac630e1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +20,8 @@ package org.apache.hadoop.hbase.security; import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.sasl.SaslClient; import javax.security.sasl.SaslException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; import org.apache.hadoop.security.token.Token; @@ -47,44 +45,41 @@ public abstract class AbstractHBaseSaslRpcClient { /** * Create a HBaseSaslRpcClient for an authentication method - * @param conf the configuration object - * @param provider the authentication provider - * @param token token to use if needed by the authentication method - * @param serverAddr the address of the hbase service - * @param securityInfo the security details for the remote hbase service - * @param fallbackAllowed does the client allow fallback to simple authentication - * @throws IOException + * @param conf the configuration object + * @param provider the authentication provider + * @param token token to use if needed by the authentication method + * @param serverAddr the address of the hbase service + * @param securityInfo the security details for the remote hbase service + * @param fallbackAllowed does the client allow fallback to simple authentication n */ protected AbstractHBaseSaslRpcClient(Configuration conf, - SaslClientAuthenticationProvider provider, Token token, - InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed) - throws IOException { + SaslClientAuthenticationProvider provider, Token token, + InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed) throws IOException { this(conf, provider, token, serverAddr, securityInfo, fallbackAllowed, "authentication"); } /** * Create a HBaseSaslRpcClient for an authentication method - * @param conf configuration object - * @param provider the authentication provider - * @param token token to use if needed by the authentication method - * @param serverAddr the address of the hbase service - * @param securityInfo the security details for the remote hbase service + * @param conf configuration object + * @param provider the authentication provider + * @param token token to use if needed by the authentication method + * @param serverAddr the address of the hbase service + * @param securityInfo the security details for the remote hbase service * @param fallbackAllowed does the client allow fallback to simple authentication - * @param rpcProtection the protection level ("authentication", "integrity" or "privacy") - * @throws IOException + * @param rpcProtection the protection level ("authentication", "integrity" or "privacy") n */ protected AbstractHBaseSaslRpcClient(Configuration conf, - SaslClientAuthenticationProvider provider, Token token, - InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed, - String rpcProtection) throws IOException { + SaslClientAuthenticationProvider provider, Token token, + InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed, + String rpcProtection) throws IOException { this.fallbackAllowed = fallbackAllowed; saslProps = SaslUtil.initSaslProperties(rpcProtection); - saslClient = provider.createClient( - conf, serverAddr, securityInfo, token, fallbackAllowed, saslProps); + saslClient = + provider.createClient(conf, serverAddr, securityInfo, token, fallbackAllowed, saslProps); if (saslClient == null) { - throw new IOException("Authentication provider " + provider.getClass() - + " returned a null SaslClient"); + throw new IOException( + "Authentication provider " + provider.getClass() + " returned a null SaslClient"); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java index 259a0a4d651..873132899d9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.security; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.yetus.audience.InterfaceAudience; - /** * Exception thrown by access-related methods. */ @@ -33,7 +32,7 @@ public class AccessDeniedException extends DoNotRetryIOException { } public AccessDeniedException(Class clazz, String s) { - super( "AccessDenied [" + clazz.getName() + "]: " + s); + super("AccessDenied [" + clazz.getName() + "]: " + s); } public AccessDeniedException(String s) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java index 65fc6172236..62c41ab1aed 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,15 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.yetus.audience.InterfaceAudience; /** Authentication method */ @InterfaceAudience.Private @@ -39,7 +36,7 @@ public enum AuthMethod { public final UserGroupInformation.AuthenticationMethod authenticationMethod; AuthMethod(byte code, String mechanismName, - UserGroupInformation.AuthenticationMethod authMethod) { + UserGroupInformation.AuthenticationMethod authMethod) { this.code = code; this.mechanismName = mechanismName; this.authenticationMethod = authMethod; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESUnwrapHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESUnwrapHandler.java index 97be44fff10..31ed191f91a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESUnwrapHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESUnwrapHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; +import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; /** * Unwrap messages with Crypto AES. Should be placed after a diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESWrapHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESWrapHandler.java index ceb3f35c0c7..c4c914a04d8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESWrapHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESWrapHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,10 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; +import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; @@ -27,9 +29,6 @@ import org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise; import org.apache.hbase.thirdparty.io.netty.channel.CoalescingBufferQueue; import org.apache.hbase.thirdparty.io.netty.util.ReferenceCountUtil; import org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseCombiner; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; - /** * wrap messages with Crypto AES. @@ -52,7 +51,7 @@ public class CryptoAESWrapHandler extends ChannelOutboundHandlerAdapter { @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) - throws Exception { + throws Exception { if (msg instanceof ByteBuf) { queue.add((ByteBuf) msg, promise); } else { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java index 74ad96e2cbd..5a816877ba8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java @@ -36,7 +36,9 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.generated.EncryptionProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; @@ -55,34 +57,31 @@ public final class EncryptionUtil { } /** - * Protect a key by encrypting it with the secret key of the given subject. - * The configuration must be set up correctly for key alias resolution. - * @param conf configuration - * @param key the raw key bytes + * Protect a key by encrypting it with the secret key of the given subject. The configuration must + * be set up correctly for key alias resolution. + * @param conf configuration + * @param key the raw key bytes * @param algorithm the algorithm to use with this key material - * @return the encrypted key bytes - * @throws IOException + * @return the encrypted key bytes n */ public static byte[] wrapKey(Configuration conf, byte[] key, String algorithm) - throws IOException { + throws IOException { return wrapKey(conf, conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()), new SecretKeySpec(key, algorithm)); } /** - * Protect a key by encrypting it with the secret key of the given subject. - * The configuration must be set up correctly for key alias resolution. - * @param conf configuration + * Protect a key by encrypting it with the secret key of the given subject. The configuration must + * be set up correctly for key alias resolution. + * @param conf configuration * @param subject subject key alias - * @param key the key + * @param key the key * @return the encrypted key bytes */ - public static byte[] wrapKey(Configuration conf, String subject, Key key) - throws IOException { + public static byte[] wrapKey(Configuration conf, String subject, Key key) throws IOException { // Wrap the key with the configured encryption algorithm. - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Cipher cipher = Encryption.getCipher(conf, algorithm); if (cipher == null) { throw new RuntimeException("Cipher '" + algorithm + "' not available"); @@ -98,11 +97,11 @@ public final class EncryptionUtil { byte[] keyBytes = key.getEncoded(); builder.setLength(keyBytes.length); builder.setHashAlgorithm(Encryption.getConfiguredHashAlgorithm(conf)); - builder.setHash( - UnsafeByteOperations.unsafeWrap(Encryption.computeCryptoKeyHash(conf, keyBytes))); + builder + .setHash(UnsafeByteOperations.unsafeWrap(Encryption.computeCryptoKeyHash(conf, keyBytes))); ByteArrayOutputStream out = new ByteArrayOutputStream(); - Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, - conf, cipher, iv); + Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, conf, cipher, + iv); builder.setData(UnsafeByteOperations.unsafeWrap(out.toByteArray())); // Build and return the protobuf message out.reset(); @@ -111,21 +110,18 @@ public final class EncryptionUtil { } /** - * Unwrap a key by decrypting it with the secret key of the given subject. - * The configuration must be set up correctly for key alias resolution. - * @param conf configuration + * Unwrap a key by decrypting it with the secret key of the given subject. The configuration must + * be set up correctly for key alias resolution. + * @param conf configuration * @param subject subject key alias - * @param value the encrypted key bytes - * @return the raw key bytes - * @throws IOException - * @throws KeyException + * @param value the encrypted key bytes + * @return the raw key bytes nn */ public static Key unwrapKey(Configuration conf, String subject, byte[] value) - throws IOException, KeyException { - EncryptionProtos.WrappedKey wrappedKey = EncryptionProtos.WrappedKey.PARSER - .parseDelimitedFrom(new ByteArrayInputStream(value)); - String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, - HConstants.CIPHER_AES); + throws IOException, KeyException { + EncryptionProtos.WrappedKey wrappedKey = + EncryptionProtos.WrappedKey.PARSER.parseDelimitedFrom(new ByteArrayInputStream(value)); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Cipher cipher = Encryption.getCipher(conf, algorithm); if (cipher == null) { throw new RuntimeException("Cipher '" + algorithm + "' not available"); @@ -134,25 +130,27 @@ public final class EncryptionUtil { } private static Key getUnwrapKey(Configuration conf, String subject, - EncryptionProtos.WrappedKey wrappedKey, Cipher cipher) throws IOException, KeyException { + EncryptionProtos.WrappedKey wrappedKey, Cipher cipher) throws IOException, KeyException { String configuredHashAlgorithm = Encryption.getConfiguredHashAlgorithm(conf); String wrappedHashAlgorithm = wrappedKey.getHashAlgorithm().trim(); - if(!configuredHashAlgorithm.equalsIgnoreCase(wrappedHashAlgorithm)) { + if (!configuredHashAlgorithm.equalsIgnoreCase(wrappedHashAlgorithm)) { String msg = String.format("Unexpected encryption key hash algorithm: %s (expecting: %s)", wrappedHashAlgorithm, configuredHashAlgorithm); - if(Encryption.failOnHashAlgorithmMismatch(conf)) { + if (Encryption.failOnHashAlgorithmMismatch(conf)) { throw new KeyException(msg); } LOG.debug(msg); } ByteArrayOutputStream out = new ByteArrayOutputStream(); byte[] iv = wrappedKey.hasIv() ? wrappedKey.getIv().toByteArray() : null; - Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), - wrappedKey.getLength(), subject, conf, cipher, iv); + Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), wrappedKey.getLength(), + subject, conf, cipher, iv); byte[] keyBytes = out.toByteArray(); if (wrappedKey.hasHash()) { - if (!Bytes.equals(wrappedKey.getHash().toByteArray(), - Encryption.hashWithAlg(wrappedHashAlgorithm, keyBytes))) { + if ( + !Bytes.equals(wrappedKey.getHash().toByteArray(), + Encryption.hashWithAlg(wrappedHashAlgorithm, keyBytes)) + ) { throw new KeyException("Key was not successfully unwrapped"); } } @@ -162,17 +160,17 @@ public final class EncryptionUtil { /** * Unwrap a wal key by decrypting it with the secret key of the given subject. The configuration * must be set up correctly for key alias resolution. - * @param conf configuration + * @param conf configuration * @param subject subject key alias - * @param value the encrypted key bytes + * @param value the encrypted key bytes * @return the raw key bytes - * @throws IOException if key is not found for the subject, or if some I/O error occurs + * @throws IOException if key is not found for the subject, or if some I/O error occurs * @throws KeyException if fail to unwrap the key */ public static Key unwrapWALKey(Configuration conf, String subject, byte[] value) - throws IOException, KeyException { + throws IOException, KeyException { EncryptionProtos.WrappedKey wrappedKey = - EncryptionProtos.WrappedKey.PARSER.parseDelimitedFrom(new ByteArrayInputStream(value)); + EncryptionProtos.WrappedKey.PARSER.parseDelimitedFrom(new ByteArrayInputStream(value)); String algorithm = conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Cipher cipher = Encryption.getCipher(conf, algorithm); if (cipher == null) { @@ -183,11 +181,10 @@ public final class EncryptionUtil { /** * Helper to create an encyption context. - * - * @param conf The current configuration. + * @param conf The current configuration. * @param family The current column descriptor. * @return The created encryption context. - * @throws IOException if an encryption key for the column cannot be unwrapped + * @throws IOException if an encryption key for the column cannot be unwrapped * @throws IllegalStateException in case of encryption related configuration errors */ public static Encryption.Context createEncryptionContext(Configuration conf, @@ -195,7 +192,7 @@ public final class EncryptionUtil { Encryption.Context cryptoContext = Encryption.Context.NONE; String cipherName = family.getEncryptionType(); if (cipherName != null) { - if(!Encryption.isEncryptionEnabled(conf)) { + if (!Encryption.isEncryptionEnabled(conf)) { throw new IllegalStateException("Encryption for family '" + family.getNameAsString() + "' configured with type '" + cipherName + "' but the encryption feature is disabled"); } @@ -214,9 +211,9 @@ public final class EncryptionUtil { // We use the encryption type specified in the column schema as a sanity check on // what the wrapped key is telling us if (!cipher.getName().equalsIgnoreCase(cipherName)) { - throw new IllegalStateException("Encryption for family '" + family.getNameAsString() - + "' configured with type '" + cipherName + "' but key specifies algorithm '" - + cipher.getName() + "'"); + throw new IllegalStateException( + "Encryption for family '" + family.getNameAsString() + "' configured with type '" + + cipherName + "' but key specifies algorithm '" + cipher.getName() + "'"); } } else { // Family does not provide key material, create a random key @@ -236,19 +233,16 @@ public final class EncryptionUtil { /** * Helper for {@link #unwrapKey(Configuration, String, byte[])} which automatically uses the * configured master and alternative keys, rather than having to specify a key type to unwrap - * with. - * - * The configuration must be set up correctly for key alias resolution. - * - * @param conf the current configuration + * with. The configuration must be set up correctly for key alias resolution. + * @param conf the current configuration * @param keyBytes the key encrypted by master (or alternative) to unwrap * @return the key bytes, decrypted * @throws IOException if the key cannot be unwrapped */ public static Key unwrapKey(Configuration conf, byte[] keyBytes) throws IOException { Key key; - String masterKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, - User.getCurrent().getShortName()); + String masterKeyName = + conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()); try { // First try the master key key = unwrapKey(conf, masterKeyName, keyBytes); @@ -258,8 +252,7 @@ public final class EncryptionUtil { if (LOG.isDebugEnabled()) { LOG.debug("Unable to unwrap key with current master key '" + masterKeyName + "'"); } - String alternateKeyName = - conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY); + String alternateKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY); if (alternateKeyName != null) { try { key = unwrapKey(conf, alternateKeyName, keyBytes); @@ -275,24 +268,21 @@ public final class EncryptionUtil { /** * Helper to create an instance of CryptoAES. - * - * @param conf The current configuration. + * @param conf The current configuration. * @param cryptoCipherMeta The metadata for create CryptoAES. * @return The instance of CryptoAES. * @throws IOException if create CryptoAES failed */ public static CryptoAES createCryptoAES(RPCProtos.CryptoCipherMeta cryptoCipherMeta, - Configuration conf) throws IOException { + Configuration conf) throws IOException { Properties properties = new Properties(); // the property for cipher class properties.setProperty(CryptoCipherFactory.CLASSES_KEY, - conf.get("hbase.rpc.crypto.encryption.aes.cipher.class", - "org.apache.commons.crypto.cipher.JceCipher")); + conf.get("hbase.rpc.crypto.encryption.aes.cipher.class", + "org.apache.commons.crypto.cipher.JceCipher")); // create SaslAES for client return new CryptoAES(cryptoCipherMeta.getTransformation(), properties, - cryptoCipherMeta.getInKey().toByteArray(), - cryptoCipherMeta.getOutKey().toByteArray(), - cryptoCipherMeta.getInIv().toByteArray(), - cryptoCipherMeta.getOutIv().toByteArray()); + cryptoCipherMeta.getInKey().toByteArray(), cryptoCipherMeta.getOutKey().toByteArray(), + cryptoCipherMeta.getInIv().toByteArray(), cryptoCipherMeta.getOutIv().toByteArray()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java index 03af94ddad9..f9350edcf01 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import java.io.BufferedInputStream; @@ -29,14 +28,11 @@ import java.io.InputStream; import java.io.OutputStream; import java.net.InetAddress; import java.nio.ByteBuffer; - import javax.security.sasl.Sasl; import javax.security.sasl.SaslException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.SaslInputStream; @@ -47,6 +43,8 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; + /** * A utility class that encapsulates SASL logic for RPC client. Copied from * org.apache.hadoop.security @@ -64,15 +62,14 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient { private boolean initStreamForCrypto; public HBaseSaslRpcClient(Configuration conf, SaslClientAuthenticationProvider provider, - Token token, InetAddress serverAddr, SecurityInfo securityInfo, - boolean fallbackAllowed) throws IOException { + Token token, InetAddress serverAddr, SecurityInfo securityInfo, + boolean fallbackAllowed) throws IOException { super(conf, provider, token, serverAddr, securityInfo, fallbackAllowed); } public HBaseSaslRpcClient(Configuration conf, SaslClientAuthenticationProvider provider, - Token token, InetAddress serverAddr, SecurityInfo securityInfo, - boolean fallbackAllowed, String rpcProtection, boolean initStreamForCrypto) - throws IOException { + Token token, InetAddress serverAddr, SecurityInfo securityInfo, + boolean fallbackAllowed, String rpcProtection, boolean initStreamForCrypto) throws IOException { super(conf, provider, token, serverAddr, securityInfo, fallbackAllowed, rpcProtection); this.initStreamForCrypto = initStreamForCrypto; } @@ -81,16 +78,15 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient { int status = inStream.readInt(); // read status if (status != SaslStatus.SUCCESS.state) { throw new RemoteException(WritableUtils.readString(inStream), - WritableUtils.readString(inStream)); + WritableUtils.readString(inStream)); } } /** * Do client side SASL authentication with server via the given InputStream and OutputStream - * @param inS InputStream to use + * @param inS InputStream to use * @param outS OutputStream to use - * @return true if connection is set up, or false if needs to switch to simple Auth. - * @throws IOException + * @return true if connection is set up, or false if needs to switch to simple Auth. n */ public boolean saslConnect(InputStream inS, OutputStream outS) throws IOException { DataInputStream inStream = new DataInputStream(new BufferedInputStream(inS)); @@ -112,7 +108,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient { if (len == SaslUtil.SWITCH_TO_SIMPLE_AUTH) { if (!fallbackAllowed) { throw new IOException("Server asks us to fall back to SIMPLE auth, " - + "but this client is configured to only allow secure connections."); + + "but this client is configured to only allow secure connections."); } if (LOG.isDebugEnabled()) { LOG.debug("Server asks us to fall back to simple auth."); @@ -123,7 +119,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient { saslToken = new byte[len]; if (LOG.isDebugEnabled()) { LOG.debug("Will read input token of size " + saslToken.length - + " for processing by initSASLContext"); + + " for processing by initSASLContext"); } inStream.readFully(saslToken); } @@ -143,7 +139,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient { saslToken = new byte[inStream.readInt()]; if (LOG.isDebugEnabled()) { LOG.debug("Will read input token of size " + saslToken.length - + " for processing by initSASLContext"); + + " for processing by initSASLContext"); } inStream.readFully(saslToken); } @@ -151,16 +147,15 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient { try { readStatus(inStream); - } - catch (IOException e){ - if(e instanceof RemoteException){ + } catch (IOException e) { + if (e instanceof RemoteException) { LOG.debug("Sasl connection failed: ", e); throw e; } } if (LOG.isDebugEnabled()) { LOG.debug("SASL client context established. Negotiated QoP: " - + saslClient.getNegotiatedProperty(Sasl.QOP)); + + saslClient.getNegotiatedProperty(Sasl.QOP)); } // initial the inputStream, outputStream for both Sasl encryption // and Crypto AES encryption if necessary @@ -189,8 +184,8 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient { return (String) saslClient.getNegotiatedProperty(Sasl.QOP); } - public void initCryptoCipher(RPCProtos.CryptoCipherMeta cryptoCipherMeta, - Configuration conf) throws IOException { + public void initCryptoCipher(RPCProtos.CryptoCipherMeta cryptoCipherMeta, Configuration conf) + throws IOException { // create SaslAES for client cryptoAES = EncryptionUtil.createCryptoAES(cryptoCipherMeta, conf); cryptoAesEnable = true; @@ -198,8 +193,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient { /** * Get a SASL wrapped InputStream. Can be called only after saslConnect() has been called. - * @return a SASL wrapped InputStream - * @throws IOException + * @return a SASL wrapped InputStream n */ public InputStream getInputStream() throws IOException { if (!saslClient.isComplete()) { @@ -214,6 +208,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient { class WrappedInputStream extends FilterInputStream { private ByteBuffer unwrappedRpcBuffer = ByteBuffer.allocate(0); + public WrappedInputStream(InputStream in) throws IOException { super(in); } @@ -261,8 +256,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient { /** * Get a SASL wrapped OutputStream. Can be called only after saslConnect() has been called. - * @return a SASL wrapped OutputStream - * @throws IOException + * @return a SASL wrapped OutputStream n */ public OutputStream getOutputStream() throws IOException { if (!saslClient.isComplete()) { @@ -279,6 +273,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient { public WrappedOutputStream(OutputStream out) throws IOException { super(out); } + @Override public void write(byte[] buf, int off, int len) throws IOException { if (LOG.isDebugEnabled()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java index e4611d18137..a75091c5293 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; import org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder; import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise; -import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; /** @@ -43,7 +44,7 @@ public class NettyHBaseRpcConnectionHeaderHandler extends SimpleChannelInboundHa private final ByteBuf connectionHeaderWithLength; public NettyHBaseRpcConnectionHeaderHandler(Promise saslPromise, Configuration conf, - ByteBuf connectionHeaderWithLength) { + ByteBuf connectionHeaderWithLength) { this.saslPromise = saslPromise; this.conf = conf; this.connectionHeaderWithLength = connectionHeaderWithLength; @@ -57,12 +58,12 @@ public class NettyHBaseRpcConnectionHeaderHandler extends SimpleChannelInboundHa msg.readBytes(buff); RPCProtos.ConnectionHeaderResponse connectionHeaderResponse = - RPCProtos.ConnectionHeaderResponse.parseFrom(buff); + RPCProtos.ConnectionHeaderResponse.parseFrom(buff); // Get the CryptoCipherMeta, update the HBaseSaslRpcClient for Crypto Cipher if (connectionHeaderResponse.hasCryptoCipherMeta()) { - CryptoAES cryptoAES = EncryptionUtil.createCryptoAES( - connectionHeaderResponse.getCryptoCipherMeta(), conf); + CryptoAES cryptoAES = + EncryptionUtil.createCryptoAES(connectionHeaderResponse.getCryptoCipherMeta(), conf); // replace the Sasl handler with Crypto AES handler setupCryptoAESHandler(ctx.pipeline(), cryptoAES); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java index a5b980350d1..9b16a41afe4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,14 +17,9 @@ */ package org.apache.hadoop.hbase.security; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; -import org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder; - import java.io.IOException; import java.net.InetAddress; - import javax.security.sasl.Sasl; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; import org.apache.hadoop.security.token.Token; @@ -33,6 +28,9 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; +import org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder; + /** * Implement SASL logic for netty rpc client. * @since 2.0.0 @@ -42,8 +40,8 @@ public class NettyHBaseSaslRpcClient extends AbstractHBaseSaslRpcClient { private static final Logger LOG = LoggerFactory.getLogger(NettyHBaseSaslRpcClient.class); public NettyHBaseSaslRpcClient(Configuration conf, SaslClientAuthenticationProvider provider, - Token token, InetAddress serverAddr, SecurityInfo securityInfo, - boolean fallbackAllowed, String rpcProtection) throws IOException { + Token token, InetAddress serverAddr, SecurityInfo securityInfo, + boolean fallbackAllowed, String rpcProtection) throws IOException { super(conf, provider, token, serverAddr, securityInfo, fallbackAllowed, rpcProtection); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java index e011cc612e5..525a78d0ae8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,25 +17,24 @@ */ package org.apache.hadoop.hbase.security; -import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; -import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; -import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; -import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise; - import java.io.IOException; import java.net.InetAddress; import java.security.PrivilegedExceptionAction; - import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; import org.apache.hadoop.hbase.ipc.FallbackDisallowedException; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; +import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; +import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise; /** * Implement SASL logic for netty rpc client. @@ -59,18 +58,18 @@ public class NettyHBaseSaslRpcClientHandler extends SimpleChannelInboundHandler< /** * @param saslPromise {@code true} if success, {@code false} if server tells us to fallback to - * simple. + * simple. */ public NettyHBaseSaslRpcClientHandler(Promise saslPromise, UserGroupInformation ugi, - SaslClientAuthenticationProvider provider, Token token, - InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed, - Configuration conf) throws IOException { + SaslClientAuthenticationProvider provider, Token token, + InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed, Configuration conf) + throws IOException { this.saslPromise = saslPromise; this.ugi = ugi; this.conf = conf; this.saslRpcClient = new NettyHBaseSaslRpcClient(conf, provider, token, serverAddr, - securityInfo, fallbackAllowed, conf.get( - "hbase.rpc.protection", SaslUtil.QualityOfProtection.AUTHENTICATION.name().toLowerCase())); + securityInfo, fallbackAllowed, conf.get("hbase.rpc.protection", + SaslUtil.QualityOfProtection.AUTHENTICATION.name().toLowerCase())); } private void writeResponse(ChannelHandlerContext ctx, byte[] response) { @@ -91,10 +90,10 @@ public class NettyHBaseSaslRpcClientHandler extends SimpleChannelInboundHandler< } private void setCryptoAESOption() { - boolean saslEncryptionEnabled = SaslUtil.QualityOfProtection.PRIVACY. - getSaslQop().equalsIgnoreCase(saslRpcClient.getSaslQOP()); - needProcessConnectionHeader = saslEncryptionEnabled && conf.getBoolean( - "hbase.rpc.crypto.encryption.aes.enabled", false); + boolean saslEncryptionEnabled = SaslUtil.QualityOfProtection.PRIVACY.getSaslQop() + .equalsIgnoreCase(saslRpcClient.getSaslQOP()); + needProcessConnectionHeader = + saslEncryptionEnabled && conf.getBoolean("hbase.rpc.crypto.encryption.aes.enabled", false); } public boolean isNeedProcessConnectionHeader() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslChallengeDecoder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslChallengeDecoder.java index cbbcb0e7761..256d434f2ec 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslChallengeDecoder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslChallengeDecoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,17 +17,16 @@ */ package org.apache.hadoop.hbase.security; +import java.io.IOException; +import java.util.List; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder; -import java.io.IOException; -import java.util.List; - -import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.ipc.RemoteException; - /** * Decode the sasl challenge sent by RpcServer. */ @@ -48,7 +47,7 @@ public class SaslChallengeDecoder extends ByteToMessageDecoder { } if (len > MAX_CHALLENGE_SIZE) { throw new IOException( - "Sasl challenge too large(" + len + "), max allowed is " + MAX_CHALLENGE_SIZE); + "Sasl challenge too large(" + len + "), max allowed is " + MAX_CHALLENGE_SIZE); } int totalLen = 4 + len; if (readableBytes < totalLen) { @@ -69,7 +68,7 @@ public class SaslChallengeDecoder extends ByteToMessageDecoder { } if (classLen > MAX_CHALLENGE_SIZE) { throw new IOException("Exception class name length too large(" + classLen - + "), max allowed is " + MAX_CHALLENGE_SIZE); + + "), max allowed is " + MAX_CHALLENGE_SIZE); } if (readableBytes < 4 + classLen + 4) { return; @@ -79,8 +78,8 @@ public class SaslChallengeDecoder extends ByteToMessageDecoder { throw new IOException("Invalid exception message length " + msgLen); } if (msgLen > MAX_CHALLENGE_SIZE) { - throw new IOException("Exception message length too large(" + msgLen + "), max allowed is " - + MAX_CHALLENGE_SIZE); + throw new IOException( + "Exception message length too large(" + msgLen + "), max allowed is " + MAX_CHALLENGE_SIZE); } int totalLen = classLen + msgLen + 8; if (readableBytes < totalLen) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java index 332bc1933d6..5d6fa08bd4e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,17 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public enum SaslStatus { - SUCCESS (0), - ERROR (1); + SUCCESS(0), + ERROR(1); public final int state; + SaslStatus(int state) { this.state = state; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUnwrapHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUnwrapHandler.java index 00d0c41240a..dfc36e4ba31 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUnwrapHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUnwrapHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,15 +17,14 @@ */ package org.apache.hadoop.hbase.security; +import javax.security.sasl.SaslClient; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; -import javax.security.sasl.SaslClient; - -import org.apache.yetus.audience.InterfaceAudience; - /** * Unwrap sasl messages. Should be placed after a * io.netty.handler.codec.LengthFieldBasedFrameDecoder diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java index ad2067f2cf2..c2dc1042c91 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +20,10 @@ package org.apache.hadoop.hbase.security; import java.util.Base64; import java.util.Map; import java.util.TreeMap; - import javax.security.sasl.Sasl; import javax.security.sasl.SaslClient; import javax.security.sasl.SaslException; import javax.security.sasl.SaslServer; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -56,7 +53,7 @@ public class SaslUtil { public boolean matches(String stringQop) { if (saslQop.equals(stringQop)) { LOG.warn("Use authentication/integrity/privacy as value for rpc protection " - + "configurations instead of auth/auth-int/auth-conf."); + + "configurations instead of auth/auth-int/auth-conf."); return true; } return name().equalsIgnoreCase(stringQop); @@ -81,8 +78,8 @@ public class SaslUtil { } /** - * Returns {@link org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection} - * corresponding to the given {@code stringQop} value. + * Returns {@link org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection} corresponding to + * the given {@code stringQop} value. * @throws IllegalArgumentException If stringQop doesn't match any QOP. */ public static QualityOfProtection getQop(String stringQop) { @@ -91,8 +88,8 @@ public class SaslUtil { return qop; } } - throw new IllegalArgumentException("Invalid qop: " + stringQop - + ". It must be one of 'authentication', 'integrity', 'privacy'."); + throw new IllegalArgumentException("Invalid qop: " + stringQop + + ". It must be one of 'authentication', 'integrity', 'privacy'."); } /** @@ -110,7 +107,7 @@ public class SaslUtil { QualityOfProtection qop = getQop(qops[i]); saslQopBuilder.append(",").append(qop.getSaslQop()); } - saslQop = saslQopBuilder.substring(1); // remove first ',' + saslQop = saslQopBuilder.substring(1); // remove first ',' } Map saslProps = new TreeMap<>(); saslProps.put(Sasl.QOP, saslQop); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslWrapHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslWrapHandler.java index 62c127e2dfb..ebc32a827aa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslWrapHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslWrapHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security; import javax.security.sasl.SaslClient; - import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; import org.apache.yetus.audience.InterfaceAudience; @@ -31,7 +30,6 @@ import org.apache.hbase.thirdparty.io.netty.channel.CoalescingBufferQueue; import org.apache.hbase.thirdparty.io.netty.util.ReferenceCountUtil; import org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseCombiner; - /** * wrap sasl messages. */ @@ -53,7 +51,7 @@ public class SaslWrapHandler extends ChannelOutboundHandlerAdapter { @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) - throws Exception { + throws Exception { if (msg instanceof ByteBuf) { queue.add((ByteBuf) msg, promise); } else { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java index a2f086fe387..b5f21e6fc3c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java @@ -35,30 +35,28 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos; @InterfaceAudience.Private public class SecurityInfo { /** Maps RPC service names to authentication information */ - private static ConcurrentMap infos = new ConcurrentHashMap<>(); + private static ConcurrentMap infos = new ConcurrentHashMap<>(); // populate info for known services static { infos.put(AdminProtos.AdminService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, - Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(ClientProtos.ClientService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, - Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(MasterService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(RegionServerStatusProtos.RegionServerStatusService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(MasterProtos.HbckService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(RegistryProtos.ClientMetaService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); // NOTE: IF ADDING A NEW SERVICE, BE SURE TO UPDATE HBasePolicyProvider ALSO ELSE // new Service will not be found when all is Kerberized!!!! } /** - * Adds a security configuration for a new service name. Note that this will have no effect if - * the service name was already registered. + * Adds a security configuration for a new service name. Note that this will have no effect if the + * service name was already registered. */ public static void addInfo(String serviceName, SecurityInfo securityInfo) { infos.putIfAbsent(serviceName, securityInfo); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java index f56e8e21234..c6c0c2ecde5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,23 +21,22 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.regex.Pattern; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.security.SecurityCapability; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService.BlockingInterface; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * Utility client for doing access control admin operations. @@ -45,141 +44,104 @@ import org.apache.hadoop.hbase.util.Bytes; @InterfaceAudience.Public public class AccessControlClient { public static final TableName ACL_TABLE_NAME = - TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "acl"); + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "acl"); /** * Return true if authorization is supported and enabled * @param connection The connection to use - * @return true if authorization is supported and enabled, false otherwise - * @throws IOException + * @return true if authorization is supported and enabled, false otherwise n */ public static boolean isAuthorizationEnabled(Connection connection) throws IOException { return connection.getAdmin().getSecurityCapabilities() - .contains(SecurityCapability.AUTHORIZATION); + .contains(SecurityCapability.AUTHORIZATION); } /** * Return true if cell authorization is supported and enabled * @param connection The connection to use - * @return true if cell authorization is supported and enabled, false otherwise - * @throws IOException + * @return true if cell authorization is supported and enabled, false otherwise n */ public static boolean isCellAuthorizationEnabled(Connection connection) throws IOException { return connection.getAdmin().getSecurityCapabilities() - .contains(SecurityCapability.CELL_AUTHORIZATION); + .contains(SecurityCapability.CELL_AUTHORIZATION); } - private static BlockingInterface getAccessControlServiceStub(Table ht) - throws IOException { + private static BlockingInterface getAccessControlServiceStub(Table ht) throws IOException { CoprocessorRpcChannel service = ht.coprocessorService(HConstants.EMPTY_START_ROW); - BlockingInterface protocol = - AccessControlProtos.AccessControlService.newBlockingStub(service); + BlockingInterface protocol = AccessControlProtos.AccessControlService.newBlockingStub(service); return protocol; } /** * Grants permission on the specified table for the specified user - * @param connection The Connection instance to use - * @param tableName - * @param userName - * @param family - * @param qual - * @param mergeExistingPermissions If set to false, later granted permissions will override - * previous granted permissions. otherwise, it'll merge with previous granted - * permissions. - * @param actions - * @throws Throwable + * @param connection The Connection instance to use nnnn * @param mergeExistingPermissions If set + * to false, later granted permissions will override previous granted + * permissions. otherwise, it'll merge with previous granted permissions. nn */ - private static void grant(Connection connection, final TableName tableName, - final String userName, final byte[] family, final byte[] qual, boolean mergeExistingPermissions, - final Permission.Action... actions) throws Throwable { + private static void grant(Connection connection, final TableName tableName, final String userName, + final byte[] family, final byte[] qual, boolean mergeExistingPermissions, + final Permission.Action... actions) throws Throwable { connection.getAdmin().grant(new UserPermission(userName, Permission.newBuilder(tableName) - .withFamily(family).withQualifier(qual).withActions(actions).build()), + .withFamily(family).withQualifier(qual).withActions(actions).build()), mergeExistingPermissions); } /** - * Grants permission on the specified table for the specified user. - * If permissions for a specified user exists, later granted permissions will override previous granted permissions. - * @param connection The Connection instance to use - * @param tableName - * @param userName - * @param family - * @param qual - * @param actions - * @throws Throwable + * Grants permission on the specified table for the specified user. If permissions for a specified + * user exists, later granted permissions will override previous granted permissions. + * @param connection The Connection instance to use nnnnnn */ public static void grant(Connection connection, final TableName tableName, final String userName, - final byte[] family, final byte[] qual, final Permission.Action... actions) throws Throwable { + final byte[] family, final byte[] qual, final Permission.Action... actions) throws Throwable { grant(connection, tableName, userName, family, qual, true, actions); } /** - * Grants permission on the specified namespace for the specified user. - * @param connection - * @param namespace - * @param userName - * @param mergeExistingPermissions If set to false, later granted permissions will override - * previous granted permissions. otherwise, it'll merge with previous granted - * permissions. - * @param actions - * @throws Throwable + * Grants permission on the specified namespace for the specified user. nnn * @param + * mergeExistingPermissions If set to false, later granted permissions will override previous + * granted permissions. otherwise, it'll merge with previous granted permissions. nn */ private static void grant(Connection connection, final String namespace, final String userName, - boolean mergeExistingPermissions, final Permission.Action... actions) throws Throwable { + boolean mergeExistingPermissions, final Permission.Action... actions) throws Throwable { connection.getAdmin().grant( new UserPermission(userName, Permission.newBuilder(namespace).withActions(actions).build()), mergeExistingPermissions); } /** - * Grants permission on the specified namespace for the specified user. - * If permissions on the specified namespace exists, later granted permissions will override previous granted + * Grants permission on the specified namespace for the specified user. If permissions on the + * specified namespace exists, later granted permissions will override previous granted * permissions. - * @param connection The Connection instance to use - * @param namespace - * @param userName - * @param actions - * @throws Throwable + * @param connection The Connection instance to use nnnn */ public static void grant(Connection connection, final String namespace, final String userName, - final Permission.Action... actions) throws Throwable { + final Permission.Action... actions) throws Throwable { grant(connection, namespace, userName, true, actions); } /** - * Grant global permissions for the specified user. - * @param connection - * @param userName - * @param mergeExistingPermissions If set to false, later granted permissions will override - * previous granted permissions. otherwise, it'll merge with previous granted - * permissions. - * @param actions - * @throws Throwable + * Grant global permissions for the specified user. nn * @param mergeExistingPermissions If set to + * false, later granted permissions will override previous granted permissions. otherwise, it'll + * merge with previous granted permissions. nn */ private static void grant(Connection connection, final String userName, - boolean mergeExistingPermissions, final Permission.Action... actions) throws Throwable { + boolean mergeExistingPermissions, final Permission.Action... actions) throws Throwable { connection.getAdmin().grant( new UserPermission(userName, Permission.newBuilder().withActions(actions).build()), mergeExistingPermissions); } /** - * Grant global permissions for the specified user. - * If permissions for the specified user exists, later granted permissions will override previous granted - * permissions. - * @param connection - * @param userName - * @param actions - * @throws Throwable + * Grant global permissions for the specified user. If permissions for the specified user exists, + * later granted permissions will override previous granted permissions. nnnn */ public static void grant(Connection connection, final String userName, - final Permission.Action... actions) throws Throwable { + final Permission.Action... actions) throws Throwable { grant(connection, userName, true, actions); } public static boolean isAccessControllerRunning(Connection connection) - throws MasterNotRunningException, ZooKeeperConnectionException, IOException { + throws MasterNotRunningException, ZooKeeperConnectionException, IOException { try (Admin admin = connection.getAdmin()) { return admin.isTableAvailable(ACL_TABLE_NAME); } @@ -187,31 +149,21 @@ public class AccessControlClient { /** * Revokes the permission on the table - * @param connection The Connection instance to use - * @param tableName - * @param username - * @param family - * @param qualifier - * @param actions - * @throws Throwable + * @param connection The Connection instance to use nnnnnn */ - public static void revoke(Connection connection, final TableName tableName, - final String username, final byte[] family, final byte[] qualifier, - final Permission.Action... actions) throws Throwable { + public static void revoke(Connection connection, final TableName tableName, final String username, + final byte[] family, final byte[] qualifier, final Permission.Action... actions) + throws Throwable { connection.getAdmin().revoke(new UserPermission(username, Permission.newBuilder(tableName) - .withFamily(family).withQualifier(qualifier).withActions(actions).build())); + .withFamily(family).withQualifier(qualifier).withActions(actions).build())); } /** * Revokes the permission on the namespace for the specified user. - * @param connection The Connection instance to use - * @param namespace - * @param userName - * @param actions - * @throws Throwable + * @param connection The Connection instance to use nnnn */ - public static void revoke(Connection connection, final String namespace, - final String userName, final Permission.Action... actions) throws Throwable { + public static void revoke(Connection connection, final String namespace, final String userName, + final Permission.Action... actions) throws Throwable { connection.getAdmin().revoke( new UserPermission(userName, Permission.newBuilder(namespace).withActions(actions).build())); } @@ -221,9 +173,9 @@ public class AccessControlClient { * @param connection The Connection instance to use */ public static void revoke(Connection connection, final String userName, - final Permission.Action... actions) throws Throwable { + final Permission.Action... actions) throws Throwable { connection.getAdmin() - .revoke(new UserPermission(userName, Permission.newBuilder().withActions(actions).build())); + .revoke(new UserPermission(userName, Permission.newBuilder().withActions(actions).build())); } /** @@ -232,11 +184,10 @@ public class AccessControlClient { * along with the list of superusers would be returned. Else, no rows get returned. * @param connection The Connection instance to use * @param tableRegex The regular expression string to match against - * @return List of UserPermissions - * @throws Throwable + * @return List of UserPermissions n */ public static List getUserPermissions(Connection connection, String tableRegex) - throws Throwable { + throws Throwable { return getUserPermissions(connection, tableRegex, HConstants.EMPTY_STRING); } @@ -244,12 +195,12 @@ public class AccessControlClient { * List all the userPermissions matching the given table pattern and user name. * @param connection Connection * @param tableRegex The regular expression string to match against - * @param userName User name, if empty then all user permissions will be retrieved. + * @param userName User name, if empty then all user permissions will be retrieved. * @return List of UserPermissions * @throws Throwable on failure */ public static List getUserPermissions(Connection connection, String tableRegex, - String userName) throws Throwable { + String userName) throws Throwable { List permList = new ArrayList<>(); try (Admin admin = connection.getAdmin()) { if (tableRegex == null || tableRegex.isEmpty()) { @@ -269,7 +220,7 @@ public class AccessControlClient { List htds = admin.listTableDescriptors(Pattern.compile(tableRegex), true); for (TableDescriptor htd : htds) { permList.addAll(admin.getUserPermissions(GetUserPermissionsRequest - .newBuilder(htd.getTableName()).withUserName(userName).build())); + .newBuilder(htd.getTableName()).withUserName(userName).build())); } } } @@ -278,46 +229,46 @@ public class AccessControlClient { /** * List all the userPermissions matching the given table pattern and column family. - * @param connection Connection - * @param tableRegex The regular expression string to match against. It shouldn't be null, empty - * or a namespace regular expression. + * @param connection Connection + * @param tableRegex The regular expression string to match against. It shouldn't be null, empty + * or a namespace regular expression. * @param columnFamily Column family * @return List of UserPermissions * @throws Throwable on failure */ public static List getUserPermissions(Connection connection, String tableRegex, - byte[] columnFamily) throws Throwable { + byte[] columnFamily) throws Throwable { return getUserPermissions(connection, tableRegex, columnFamily, null, HConstants.EMPTY_STRING); } /** * List all the userPermissions matching the given table pattern, column family and user name. - * @param connection Connection - * @param tableRegex The regular expression string to match against. It shouldn't be null, empty - * or a namespace regular expression. + * @param connection Connection + * @param tableRegex The regular expression string to match against. It shouldn't be null, empty + * or a namespace regular expression. * @param columnFamily Column family - * @param userName User name, if empty then all user permissions will be retrieved. + * @param userName User name, if empty then all user permissions will be retrieved. * @return List of UserPermissions * @throws Throwable on failure */ public static List getUserPermissions(Connection connection, String tableRegex, - byte[] columnFamily, String userName) throws Throwable { + byte[] columnFamily, String userName) throws Throwable { return getUserPermissions(connection, tableRegex, columnFamily, null, userName); } /** * List all the userPermissions matching the given table pattern, column family and column * qualifier. - * @param connection Connection - * @param tableRegex The regular expression string to match against. It shouldn't be null, empty - * or a namespace regular expression. - * @param columnFamily Column family + * @param connection Connection + * @param tableRegex The regular expression string to match against. It shouldn't be null, + * empty or a namespace regular expression. + * @param columnFamily Column family * @param columnQualifier Column qualifier * @return List of UserPermissions * @throws Throwable on failure */ public static List getUserPermissions(Connection connection, String tableRegex, - byte[] columnFamily, byte[] columnQualifier) throws Throwable { + byte[] columnFamily, byte[] columnQualifier) throws Throwable { return getUserPermissions(connection, tableRegex, columnFamily, columnQualifier, HConstants.EMPTY_STRING); } @@ -325,17 +276,17 @@ public class AccessControlClient { /** * List all the userPermissions matching the given table pattern, column family and column * qualifier. - * @param connection Connection - * @param tableRegex The regular expression string to match against. It shouldn't be null, empty - * or a namespace regular expression. - * @param columnFamily Column family + * @param connection Connection + * @param tableRegex The regular expression string to match against. It shouldn't be null, + * empty or a namespace regular expression. + * @param columnFamily Column family * @param columnQualifier Column qualifier - * @param userName User name, if empty then all user permissions will be retrieved. + * @param userName User name, if empty then all user permissions will be retrieved. * @return List of UserPermissions * @throws Throwable on failure */ public static List getUserPermissions(Connection connection, String tableRegex, - byte[] columnFamily, byte[] columnQualifier, String userName) throws Throwable { + byte[] columnFamily, byte[] columnQualifier, String userName) throws Throwable { if (tableRegex == null || tableRegex.isEmpty() || tableRegex.charAt(0) == '@') { throw new IllegalArgumentException("Table name can't be null or empty or a namespace."); } @@ -346,7 +297,7 @@ public class AccessControlClient { for (TableDescriptor htd : htds) { permList.addAll(admin.getUserPermissions( GetUserPermissionsRequest.newBuilder(htd.getTableName()).withFamily(columnFamily) - .withQualifier(columnQualifier).withUserName(userName).build())); + .withQualifier(columnQualifier).withUserName(userName).build())); } } return permList; @@ -355,20 +306,20 @@ public class AccessControlClient { /** * Validates whether specified user has permission to perform actions on the mentioned table, * column family or column qualifier. - * @param connection Connection - * @param tableName Table name, it shouldn't be null or empty. - * @param columnFamily The column family. Optional argument, can be empty. If empty then - * validation will happen at table level. + * @param connection Connection + * @param tableName Table name, it shouldn't be null or empty. + * @param columnFamily The column family. Optional argument, can be empty. If empty then + * validation will happen at table level. * @param columnQualifier The column qualifier. Optional argument, can be empty. If empty then - * validation will happen at table and column family level. columnQualifier will not be - * considered if columnFamily is passed as null or empty. - * @param userName User name, it shouldn't be null or empty. - * @param actions Actions + * validation will happen at table and column family level. columnQualifier + * will not be considered if columnFamily is passed as null or empty. + * @param userName User name, it shouldn't be null or empty. + * @param actions Actions * @return true if access allowed to the specified user, otherwise false. * @throws Throwable on failure */ public static boolean hasPermission(Connection connection, String tableName, String columnFamily, - String columnQualifier, String userName, Permission.Action... actions) throws Throwable { + String columnQualifier, String userName, Permission.Action... actions) throws Throwable { return hasPermission(connection, tableName, Bytes.toBytes(columnFamily), Bytes.toBytes(columnQualifier), userName, actions); } @@ -376,26 +327,26 @@ public class AccessControlClient { /** * Validates whether specified user has permission to perform actions on the mentioned table, * column family or column qualifier. - * @param connection Connection - * @param tableName Table name, it shouldn't be null or empty. - * @param columnFamily The column family. Optional argument, can be empty. If empty then - * validation will happen at table level. + * @param connection Connection + * @param tableName Table name, it shouldn't be null or empty. + * @param columnFamily The column family. Optional argument, can be empty. If empty then + * validation will happen at table level. * @param columnQualifier The column qualifier. Optional argument, can be empty. If empty then - * validation will happen at table and column family level. columnQualifier will not be - * considered if columnFamily is passed as null or empty. - * @param userName User name, it shouldn't be null or empty. - * @param actions Actions + * validation will happen at table and column family level. columnQualifier + * will not be considered if columnFamily is passed as null or empty. + * @param userName User name, it shouldn't be null or empty. + * @param actions Actions * @return true if access allowed to the specified user, otherwise false. * @throws Throwable on failure */ public static boolean hasPermission(Connection connection, String tableName, byte[] columnFamily, - byte[] columnQualifier, String userName, Permission.Action... actions) throws Throwable { + byte[] columnQualifier, String userName, Permission.Action... actions) throws Throwable { if (StringUtils.isEmpty(tableName) || StringUtils.isEmpty(userName)) { throw new IllegalArgumentException("Table and user name can't be null or empty."); } List permissions = new ArrayList<>(1); permissions.add(Permission.newBuilder(TableName.valueOf(tableName)).withFamily(columnFamily) - .withQualifier(columnQualifier).withActions(actions).build()); + .withQualifier(columnQualifier).withActions(actions).build()); return connection.getAdmin().hasUserPermissions(userName, permissions).get(0); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlConstants.java index e0c4d99dfca..a795d296fe7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlConstants.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlConstants.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import org.apache.yetus.audience.InterfaceAudience; @@ -24,16 +23,16 @@ import org.apache.yetus.audience.InterfaceAudience; public interface AccessControlConstants { /** - * Configuration option that toggles whether EXEC permission checking is - * performed during coprocessor endpoint invocations. + * Configuration option that toggles whether EXEC permission checking is performed during + * coprocessor endpoint invocations. */ public static final String EXEC_PERMISSION_CHECKS_KEY = "hbase.security.exec.permission.checks"; /** Default setting for hbase.security.exec.permission.checks; false */ public static final boolean DEFAULT_EXEC_PERMISSION_CHECKS = false; /** - * Configuration or CF schema option for early termination of access checks - * if table or CF permissions grant access. Pre-0.98 compatible behavior + * Configuration or CF schema option for early termination of access checks if table or CF + * permissions grant access. Pre-0.98 compatible behavior */ public static final String CF_ATTRIBUTE_EARLY_OUT = "hbase.security.access.early_out"; /** Default setting for hbase.security.access.early_out */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java index 484cee3cb51..6496aaa0bb8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,53 +17,52 @@ */ package org.apache.hadoop.hbase.security.access; +import com.google.protobuf.ByteString; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import com.google.protobuf.ByteString; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; /** * @since 2.0.0 */ @InterfaceAudience.Private public class AccessControlUtil { - private AccessControlUtil() {} + private AccessControlUtil() { + } /** * Create a request to grant user table permissions. - * - * @param username the short user name who to grant permissions + * @param username the short user name who to grant permissions * @param tableName optional table name the permissions apply - * @param family optional column family + * @param family optional column family * @param qualifier optional qualifier - * @param actions the permissions to be granted + * @param actions the permissions to be granted * @return A {@link AccessControlProtos} GrantRequest */ - public static AccessControlProtos.GrantRequest buildGrantRequest( - String username, TableName tableName, byte[] family, byte[] qualifier, - boolean mergeExistingPermissions, AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + public static AccessControlProtos.GrantRequest buildGrantRequest(String username, + TableName tableName, byte[] family, byte[] qualifier, boolean mergeExistingPermissions, + AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.TablePermission.Builder permissionBuilder = - AccessControlProtos.TablePermission.newBuilder(); + AccessControlProtos.TablePermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } @@ -78,31 +77,26 @@ public class AccessControlUtil { if (qualifier != null) { permissionBuilder.setQualifier(ByteStringer.wrap(qualifier)); } - ret.setType(AccessControlProtos.Permission.Type.Table) - .setTablePermission(permissionBuilder); + ret.setType(AccessControlProtos.Permission.Type.Table).setTablePermission(permissionBuilder); return AccessControlProtos.GrantRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).setMergeExistingPermissions(mergeExistingPermissions).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .setMergeExistingPermissions(mergeExistingPermissions).build(); } /** * Create a request to grant user namespace permissions. - * - * @param username the short user name who to grant permissions + * @param username the short user name who to grant permissions * @param namespace optional table name the permissions apply - * @param actions the permissions to be granted + * @param actions the permissions to be granted * @return A {@link AccessControlProtos} GrantRequest */ - public static AccessControlProtos.GrantRequest buildGrantRequest( - String username, String namespace, boolean mergeExistingPermissions, - AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + public static AccessControlProtos.GrantRequest buildGrantRequest(String username, + String namespace, boolean mergeExistingPermissions, + AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.NamespacePermission.Builder permissionBuilder = - AccessControlProtos.NamespacePermission.newBuilder(); + AccessControlProtos.NamespacePermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } @@ -110,56 +104,46 @@ public class AccessControlUtil { permissionBuilder.setNamespaceName(ByteString.copyFromUtf8(namespace)); } ret.setType(AccessControlProtos.Permission.Type.Namespace) - .setNamespacePermission(permissionBuilder); + .setNamespacePermission(permissionBuilder); return AccessControlProtos.GrantRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).setMergeExistingPermissions(mergeExistingPermissions).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .setMergeExistingPermissions(mergeExistingPermissions).build(); } /** * Create a request to revoke user global permissions. - * * @param username the short user name whose permissions to be revoked - * @param actions the permissions to be revoked + * @param actions the permissions to be revoked * @return A {@link AccessControlProtos} RevokeRequest */ - public static AccessControlProtos.RevokeRequest buildRevokeRequest( - String username, AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + public static AccessControlProtos.RevokeRequest buildRevokeRequest(String username, + AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.GlobalPermission.Builder permissionBuilder = - AccessControlProtos.GlobalPermission.newBuilder(); + AccessControlProtos.GlobalPermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } - ret.setType(AccessControlProtos.Permission.Type.Global) - .setGlobalPermission(permissionBuilder); + ret.setType(AccessControlProtos.Permission.Type.Global).setGlobalPermission(permissionBuilder); return AccessControlProtos.RevokeRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .build(); } /** * Create a request to revoke user namespace permissions. - * - * @param username the short user name whose permissions to be revoked + * @param username the short user name whose permissions to be revoked * @param namespace optional table name the permissions apply - * @param actions the permissions to be revoked + * @param actions the permissions to be revoked * @return A {@link AccessControlProtos} RevokeRequest */ - public static AccessControlProtos.RevokeRequest buildRevokeRequest( - String username, String namespace, - AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + public static AccessControlProtos.RevokeRequest buildRevokeRequest(String username, + String namespace, AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.NamespacePermission.Builder permissionBuilder = - AccessControlProtos.NamespacePermission.newBuilder(); + AccessControlProtos.NamespacePermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } @@ -167,60 +151,51 @@ public class AccessControlUtil { permissionBuilder.setNamespaceName(ByteString.copyFromUtf8(namespace)); } ret.setType(AccessControlProtos.Permission.Type.Namespace) - .setNamespacePermission(permissionBuilder); + .setNamespacePermission(permissionBuilder); return AccessControlProtos.RevokeRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .build(); } /** * Create a request to grant user global permissions. - * * @param username the short user name who to grant permissions - * @param actions the permissions to be granted + * @param actions the permissions to be granted * @return A {@link AccessControlProtos} GrantRequest */ public static AccessControlProtos.GrantRequest buildGrantRequest(String username, - boolean mergeExistingPermissions, AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + boolean mergeExistingPermissions, AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.GlobalPermission.Builder permissionBuilder = - AccessControlProtos.GlobalPermission.newBuilder(); + AccessControlProtos.GlobalPermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } - ret.setType(AccessControlProtos.Permission.Type.Global) - .setGlobalPermission(permissionBuilder); + ret.setType(AccessControlProtos.Permission.Type.Global).setGlobalPermission(permissionBuilder); return AccessControlProtos.GrantRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).setMergeExistingPermissions(mergeExistingPermissions).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .setMergeExistingPermissions(mergeExistingPermissions).build(); } public static AccessControlProtos.UsersAndPermissions toUsersAndPermissions(String user, - Permission perms) { + Permission perms) { return AccessControlProtos.UsersAndPermissions.newBuilder() - .addUserPermissions(AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder() - .setUser(ByteString.copyFromUtf8(user)) - .addPermissions(toPermission(perms)) - .build()) - .build(); + .addUserPermissions(AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder() + .setUser(ByteString.copyFromUtf8(user)).addPermissions(toPermission(perms)).build()) + .build(); } - public static AccessControlProtos.UsersAndPermissions toUsersAndPermissions( - ListMultimap perms) { + public static AccessControlProtos.UsersAndPermissions + toUsersAndPermissions(ListMultimap perms) { AccessControlProtos.UsersAndPermissions.Builder builder = - AccessControlProtos.UsersAndPermissions.newBuilder(); + AccessControlProtos.UsersAndPermissions.newBuilder(); for (Map.Entry> entry : perms.asMap().entrySet()) { AccessControlProtos.UsersAndPermissions.UserPermissions.Builder userPermBuilder = - AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder(); + AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder(); userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey())); - for (Permission perm: entry.getValue()) { + for (Permission perm : entry.getValue()) { userPermBuilder.addPermissions(toPermission(perm)); } builder.addUserPermissions(userPermBuilder.build()); @@ -228,13 +203,13 @@ public class AccessControlUtil { return builder.build(); } - public static ListMultimap toUsersAndPermissions( - AccessControlProtos.UsersAndPermissions proto) { + public static ListMultimap + toUsersAndPermissions(AccessControlProtos.UsersAndPermissions proto) { ListMultimap result = ArrayListMultimap.create(); - for (AccessControlProtos.UsersAndPermissions.UserPermissions userPerms: - proto.getUserPermissionsList()) { + for (AccessControlProtos.UsersAndPermissions.UserPermissions userPerms : proto + .getUserPermissionsList()) { String user = userPerms.getUser().toStringUtf8(); - for (AccessControlProtos.Permission perm: userPerms.getPermissionsList()) { + for (AccessControlProtos.Permission perm : userPerms.getPermissionsList()) { result.put(user, toPermission(perm)); } } @@ -282,7 +257,7 @@ public class AccessControlUtil { throw new IllegalStateException("Namespace must not be empty in NamespacePermission"); } return Permission.newBuilder(perm.getNamespaceName().toStringUtf8()).withActions(actions) - .build(); + .build(); } if (proto.getType() == AccessControlProtos.Permission.Type.Table) { AccessControlProtos.TablePermission perm = proto.getTablePermission(); @@ -301,14 +276,13 @@ public class AccessControlUtil { qualifier = perm.getQualifier().toByteArray(); } return Permission.newBuilder(table).withFamily(family).withQualifier(qualifier) - .withActions(actions).build(); + .withActions(actions).build(); } throw new IllegalStateException("Unrecognize Perm Type: " + proto.getType()); } /** * Convert a client Permission to a Permission proto - * * @param perm the client Permission * @return the protobuf Permission */ @@ -353,7 +327,7 @@ public class AccessControlUtil { AccessControlProtos.GlobalPermission.newBuilder(); Permission.Action[] actions = perm.getActions(); if (actions != null) { - for (Permission.Action a: actions) { + for (Permission.Action a : actions) { builder.addAction(toPermissionAction(a)); } } @@ -364,12 +338,11 @@ public class AccessControlUtil { /** * Converts a list of Permission.Action proto to an array of client Permission.Action objects. - * * @param protoActions the list of protobuf Actions * @return the converted array of Actions */ public static Permission.Action[] - toPermissionActions(List protoActions) { + toPermissionActions(List protoActions) { Permission.Action[] actions = new Permission.Action[protoActions.size()]; for (int i = 0; i < protoActions.size(); i++) { actions[i] = toPermissionAction(protoActions.get(i)); @@ -379,68 +352,62 @@ public class AccessControlUtil { /** * Converts a Permission.Action proto to a client Permission.Action object. - * * @param action the protobuf Action * @return the converted Action */ - public static Permission.Action toPermissionAction( - AccessControlProtos.Permission.Action action) { + public static Permission.Action toPermissionAction(AccessControlProtos.Permission.Action action) { switch (action) { - case READ: - return Permission.Action.READ; - case WRITE: - return Permission.Action.WRITE; - case EXEC: - return Permission.Action.EXEC; - case CREATE: - return Permission.Action.CREATE; - case ADMIN: - return Permission.Action.ADMIN; + case READ: + return Permission.Action.READ; + case WRITE: + return Permission.Action.WRITE; + case EXEC: + return Permission.Action.EXEC; + case CREATE: + return Permission.Action.CREATE; + case ADMIN: + return Permission.Action.ADMIN; } - throw new IllegalArgumentException("Unknown action value "+action.name()); + throw new IllegalArgumentException("Unknown action value " + action.name()); } /** * Convert a client Permission.Action to a Permission.Action proto - * * @param action the client Action * @return the protobuf Action */ - public static AccessControlProtos.Permission.Action toPermissionAction( - Permission.Action action) { + public static AccessControlProtos.Permission.Action toPermissionAction(Permission.Action action) { switch (action) { - case READ: - return AccessControlProtos.Permission.Action.READ; - case WRITE: - return AccessControlProtos.Permission.Action.WRITE; - case EXEC: - return AccessControlProtos.Permission.Action.EXEC; - case CREATE: - return AccessControlProtos.Permission.Action.CREATE; - case ADMIN: - return AccessControlProtos.Permission.Action.ADMIN; + case READ: + return AccessControlProtos.Permission.Action.READ; + case WRITE: + return AccessControlProtos.Permission.Action.WRITE; + case EXEC: + return AccessControlProtos.Permission.Action.EXEC; + case CREATE: + return AccessControlProtos.Permission.Action.CREATE; + case ADMIN: + return AccessControlProtos.Permission.Action.ADMIN; } - throw new IllegalArgumentException("Unknown action value "+action.name()); + throw new IllegalArgumentException("Unknown action value " + action.name()); } /** * Convert a client user permission to a user permission proto - * * @param perm the client UserPermission * @return the protobuf UserPermission */ public static AccessControlProtos.UserPermission toUserPermission(UserPermission perm) { return AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(perm.getUser())) - .setPermission(toPermission(perm.getPermission())) - .build(); + .setUser(ByteString.copyFromUtf8(perm.getUser())) + .setPermission(toPermission(perm.getPermission())).build(); } /** * Converts the permissions list into a protocol buffer GetUserPermissionsResponse */ - public static GetUserPermissionsResponse buildGetUserPermissionsResponse( - final List permissions) { + public static GetUserPermissionsResponse + buildGetUserPermissionsResponse(final List permissions) { GetUserPermissionsResponse.Builder builder = GetUserPermissionsResponse.newBuilder(); for (UserPermission perm : permissions) { builder.addUserPermission(toUserPermission(perm)); @@ -450,7 +417,6 @@ public class AccessControlUtil { /** * Converts a user permission proto to a client user permission object. - * * @param proto the protobuf UserPermission * @return the converted UserPermission */ @@ -459,21 +425,20 @@ public class AccessControlUtil { } /** - * Convert a ListMultimap<String, TablePermission> where key is username - * to a protobuf UserPermission - * + * Convert a ListMultimap<String, TablePermission> where key is username to a protobuf + * UserPermission * @param perm the list of user and table permissions * @return the protobuf UserTablePermissions */ - public static AccessControlProtos.UsersAndPermissions toUserTablePermissions( - ListMultimap perm) { + public static AccessControlProtos.UsersAndPermissions + toUserTablePermissions(ListMultimap perm) { AccessControlProtos.UsersAndPermissions.Builder builder = - AccessControlProtos.UsersAndPermissions.newBuilder(); + AccessControlProtos.UsersAndPermissions.newBuilder(); for (Map.Entry> entry : perm.asMap().entrySet()) { AccessControlProtos.UsersAndPermissions.UserPermissions.Builder userPermBuilder = - AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder(); + AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder(); userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey())); - for (UserPermission userPerm: entry.getValue()) { + for (UserPermission userPerm : entry.getValue()) { userPermBuilder.addPermissions(toPermission(userPerm.getPermission())); } builder.addUserPermissions(userPermBuilder.build()); @@ -485,55 +450,52 @@ public class AccessControlUtil { * A utility used to grant a user global permissions. *

      * It's also called by the shell, in case you want to find references. - * - * @param protocol the AccessControlService protocol proxy + * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to grant permissions - * @param actions the permissions to be granted - * @throws ServiceException - * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead. + * @param actions the permissions to be granted n * @deprecated Use + * {@link Admin#grant(UserPermission, boolean)} instead. */ @Deprecated public static void grant(RpcController controller, - AccessControlService.BlockingInterface protocol, String userShortName, boolean mergeExistingPermissions, - Permission.Action... actions) throws ServiceException { + AccessControlService.BlockingInterface protocol, String userShortName, + boolean mergeExistingPermissions, Permission.Action... actions) throws ServiceException { List permActions = - Lists.newArrayListWithCapacity(actions.length); + Lists.newArrayListWithCapacity(actions.length); for (Permission.Action a : actions) { permActions.add(toPermissionAction(a)); } - AccessControlProtos.GrantRequest request = buildGrantRequest(userShortName, mergeExistingPermissions, + AccessControlProtos.GrantRequest request = + buildGrantRequest(userShortName, mergeExistingPermissions, permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.grant(controller, request); } /** - * A utility used to grant a user table permissions. The permissions will - * be for a table table/column family/qualifier. + * A utility used to grant a user table permissions. The permissions will be for a table + * table/column family/qualifier. *

      * It's also called by the shell, in case you want to find references. - * - * @param protocol the AccessControlService protocol proxy + * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to grant permissions - * @param tableName optional table name - * @param f optional column family - * @param q optional qualifier - * @param actions the permissions to be granted - * @throws ServiceException - * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead. + * @param tableName optional table name + * @param f optional column family + * @param q optional qualifier + * @param actions the permissions to be granted n * @deprecated Use + * {@link Admin#grant(UserPermission, boolean)} instead. */ @Deprecated public static void grant(RpcController controller, - AccessControlService.BlockingInterface protocol, String userShortName, TableName tableName, - byte[] f, byte[] q, boolean mergeExistingPermissions, Permission.Action... actions) - throws ServiceException { + AccessControlService.BlockingInterface protocol, String userShortName, TableName tableName, + byte[] f, byte[] q, boolean mergeExistingPermissions, Permission.Action... actions) + throws ServiceException { List permActions = - Lists.newArrayListWithCapacity(actions.length); + Lists.newArrayListWithCapacity(actions.length); for (Permission.Action a : actions) { permActions.add(toPermissionAction(a)); } AccessControlProtos.GrantRequest request = - buildGrantRequest(userShortName, tableName, f, q, mergeExistingPermissions, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + buildGrantRequest(userShortName, tableName, f, q, mergeExistingPermissions, + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.grant(controller, request); } @@ -541,24 +503,23 @@ public class AccessControlUtil { * A utility used to grant a user namespace permissions. *

      * It's also called by the shell, in case you want to find references. - * * @param controller RpcController - * @param protocol the AccessControlService protocol proxy - * @param namespace the short name of the user to grant permissions - * @param actions the permissions to be granted - * @throws ServiceException - * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead. + * @param protocol the AccessControlService protocol proxy + * @param namespace the short name of the user to grant permissions + * @param actions the permissions to be granted n * @deprecated Use + * {@link Admin#grant(UserPermission, boolean)} instead. */ @Deprecated public static void grant(RpcController controller, - AccessControlService.BlockingInterface protocol, String userShortName, String namespace, - boolean mergeExistingPermissions, Permission.Action... actions) throws ServiceException { + AccessControlService.BlockingInterface protocol, String userShortName, String namespace, + boolean mergeExistingPermissions, Permission.Action... actions) throws ServiceException { List permActions = - Lists.newArrayListWithCapacity(actions.length); + Lists.newArrayListWithCapacity(actions.length); for (Permission.Action a : actions) { permActions.add(toPermissionAction(a)); } - AccessControlProtos.GrantRequest request = buildGrantRequest(userShortName, namespace, mergeExistingPermissions, + AccessControlProtos.GrantRequest request = + buildGrantRequest(userShortName, namespace, mergeExistingPermissions, permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.grant(controller, request); } @@ -567,55 +528,53 @@ public class AccessControlUtil { * A utility used to revoke a user's global permissions. *

      * It's also called by the shell, in case you want to find references. - * - * @param controller RpcController - * @param protocol the AccessControlService protocol proxy + * @param controller RpcController + * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to revoke permissions - * @param actions the permissions to be revoked + * @param actions the permissions to be revoked * @throws ServiceException on failure * @deprecated Use {@link Admin#revoke(UserPermission)} instead. */ @Deprecated public static void revoke(RpcController controller, - AccessControlService.BlockingInterface protocol, String userShortName, - Permission.Action... actions) throws ServiceException { + AccessControlService.BlockingInterface protocol, String userShortName, + Permission.Action... actions) throws ServiceException { List permActions = - Lists.newArrayListWithCapacity(actions.length); + Lists.newArrayListWithCapacity(actions.length); for (Permission.Action a : actions) { permActions.add(toPermissionAction(a)); } AccessControlProtos.RevokeRequest request = buildRevokeRequest(userShortName, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.revoke(controller, request); } /** - * A utility used to revoke a user's table permissions. The permissions will - * be for a table/column family/qualifier. + * A utility used to revoke a user's table permissions. The permissions will be for a table/column + * family/qualifier. *

      * It's also called by the shell, in case you want to find references. - * - * @param controller RpcController - * @param protocol the AccessControlService protocol proxy + * @param controller RpcController + * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to revoke permissions - * @param tableName optional table name - * @param f optional column family - * @param q optional qualifier - * @param actions the permissions to be revoked + * @param tableName optional table name + * @param f optional column family + * @param q optional qualifier + * @param actions the permissions to be revoked * @throws ServiceException on failure * @deprecated Use {@link Admin#revoke(UserPermission)} instead. */ @Deprecated public static void revoke(RpcController controller, - AccessControlService.BlockingInterface protocol, String userShortName, TableName tableName, - byte[] f, byte[] q, Permission.Action... actions) throws ServiceException { + AccessControlService.BlockingInterface protocol, String userShortName, TableName tableName, + byte[] f, byte[] q, Permission.Action... actions) throws ServiceException { List permActions = - Lists.newArrayListWithCapacity(actions.length); + Lists.newArrayListWithCapacity(actions.length); for (Permission.Action a : actions) { permActions.add(toPermissionAction(a)); } AccessControlProtos.RevokeRequest request = buildRevokeRequest(userShortName, tableName, f, q, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.revoke(controller, request); } @@ -623,26 +582,25 @@ public class AccessControlUtil { * A utility used to revoke a user's namespace permissions. *

      * It's also called by the shell, in case you want to find references. - * - * @param controller RpcController - * @param protocol the AccessControlService protocol proxy + * @param controller RpcController + * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to revoke permissions - * @param namespace optional table name - * @param actions the permissions to be revoked + * @param namespace optional table name + * @param actions the permissions to be revoked * @throws ServiceException on failure * @deprecated Use {@link Admin#revoke(UserPermission)} instead. */ @Deprecated public static void revoke(RpcController controller, - AccessControlService.BlockingInterface protocol, String userShortName, String namespace, - Permission.Action... actions) throws ServiceException { + AccessControlService.BlockingInterface protocol, String userShortName, String namespace, + Permission.Action... actions) throws ServiceException { List permActions = - Lists.newArrayListWithCapacity(actions.length); + Lists.newArrayListWithCapacity(actions.length); for (Permission.Action a : actions) { permActions.add(toPermissionAction(a)); } AccessControlProtos.RevokeRequest request = buildRevokeRequest(userShortName, namespace, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.revoke(controller, request); } @@ -650,31 +608,30 @@ public class AccessControlUtil { * A utility used to get user's global permissions. *

      * It's also called by the shell, in case you want to find references. - * * @param controller RpcController - * @param protocol the AccessControlService protocol proxy + * @param protocol the AccessControlService protocol proxy * @throws ServiceException on failure * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. */ @Deprecated public static List getUserPermissions(RpcController controller, - AccessControlService.BlockingInterface protocol) throws ServiceException { + AccessControlService.BlockingInterface protocol) throws ServiceException { return getUserPermissions(controller, protocol, HConstants.EMPTY_STRING); } /** * A utility used to get user's global permissions based on the specified user name. * @param controller RpcController - * @param protocol the AccessControlService protocol proxy - * @param userName User name, if empty then all user permissions will be retrieved. - * @throws ServiceException - * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. + * @param protocol the AccessControlService protocol proxy + * @param userName User name, if empty then all user permissions will be retrieved. n + * * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} + * instead. */ @Deprecated public static List getUserPermissions(RpcController controller, - AccessControlService.BlockingInterface protocol, String userName) throws ServiceException { + AccessControlService.BlockingInterface protocol, String userName) throws ServiceException { AccessControlProtos.GetUserPermissionsRequest.Builder builder = - AccessControlProtos.GetUserPermissionsRequest.newBuilder(); + AccessControlProtos.GetUserPermissionsRequest.newBuilder(); builder.setType(AccessControlProtos.Permission.Type.Global); if (!StringUtils.isEmpty(userName)) { builder.setUserName(ByteString.copyFromUtf8(userName)); @@ -682,7 +639,7 @@ public class AccessControlUtil { AccessControlProtos.GetUserPermissionsRequest request = builder.build(); AccessControlProtos.GetUserPermissionsResponse response = - protocol.getUserPermissions(controller, request); + protocol.getUserPermissions(controller, request); List perms = new ArrayList<>(response.getUserPermissionCount()); for (AccessControlProtos.UserPermission perm : response.getUserPermissionList()) { perms.add(toUserPermission(perm)); @@ -694,38 +651,35 @@ public class AccessControlUtil { * A utility used to get user table permissions. *

      * It's also called by the shell, in case you want to find references. - * * @param controller RpcController - * @param protocol the AccessControlService protocol proxy - * @param t optional table name - * @throws ServiceException - * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. + * @param protocol the AccessControlService protocol proxy + * @param t optional table name n * @deprecated Use + * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. */ @Deprecated public static List getUserPermissions(RpcController controller, - AccessControlService.BlockingInterface protocol, - TableName t) throws ServiceException { + AccessControlService.BlockingInterface protocol, TableName t) throws ServiceException { return getUserPermissions(controller, protocol, t, null, null, HConstants.EMPTY_STRING); } /** * A utility used to get user table permissions based on the column family, column qualifier and * user name. - * @param controller RpcController - * @param protocol the AccessControlService protocol proxy - * @param t optional table name - * @param columnFamily Column family + * @param controller RpcController + * @param protocol the AccessControlService protocol proxy + * @param t optional table name + * @param columnFamily Column family * @param columnQualifier Column qualifier - * @param userName User name, if empty then all user permissions will be retrieved. - * @throws ServiceException - * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. + * @param userName User name, if empty then all user permissions will be retrieved. n + * * @deprecated Use + * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. */ @Deprecated public static List getUserPermissions(RpcController controller, - AccessControlService.BlockingInterface protocol, TableName t, byte[] columnFamily, - byte[] columnQualifier, String userName) throws ServiceException { + AccessControlService.BlockingInterface protocol, TableName t, byte[] columnFamily, + byte[] columnQualifier, String userName) throws ServiceException { AccessControlProtos.GetUserPermissionsRequest.Builder builder = - AccessControlProtos.GetUserPermissionsRequest.newBuilder(); + AccessControlProtos.GetUserPermissionsRequest.newBuilder(); if (t != null) { builder.setTableName(ProtobufUtil.toProtoTableName(t)); } @@ -742,7 +696,7 @@ public class AccessControlUtil { builder.setType(AccessControlProtos.Permission.Type.Table); AccessControlProtos.GetUserPermissionsRequest request = builder.build(); AccessControlProtos.GetUserPermissionsResponse response = - protocol.getUserPermissions(controller, request); + protocol.getUserPermissions(controller, request); List perms = new ArrayList<>(response.getUserPermissionCount()); for (AccessControlProtos.UserPermission perm : response.getUserPermissionList()) { perms.add(toUserPermission(perm)); @@ -754,35 +708,32 @@ public class AccessControlUtil { * A utility used to get permissions for selected namespace. *

      * It's also called by the shell, in case you want to find references. - * * @param controller RpcController - * @param protocol the AccessControlService protocol proxy - * @param namespace name of the namespace - * @throws ServiceException - * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. + * @param protocol the AccessControlService protocol proxy + * @param namespace name of the namespace n * @deprecated Use + * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. */ @Deprecated public static List getUserPermissions(RpcController controller, - AccessControlService.BlockingInterface protocol, - byte[] namespace) throws ServiceException { + AccessControlService.BlockingInterface protocol, byte[] namespace) throws ServiceException { return getUserPermissions(controller, protocol, namespace, HConstants.EMPTY_STRING); } /** * A utility used to get permissions for selected namespace based on the specified user name. * @param controller RpcController - * @param protocol the AccessControlService protocol proxy - * @param namespace name of the namespace - * @param userName User name, if empty then all user permissions will be retrieved. - * @throws ServiceException - * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. + * @param protocol the AccessControlService protocol proxy + * @param namespace name of the namespace + * @param userName User name, if empty then all user permissions will be retrieved. n + * * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} + * instead. */ @Deprecated public static List getUserPermissions(RpcController controller, - AccessControlService.BlockingInterface protocol, byte[] namespace, String userName) - throws ServiceException { + AccessControlService.BlockingInterface protocol, byte[] namespace, String userName) + throws ServiceException { AccessControlProtos.GetUserPermissionsRequest.Builder builder = - AccessControlProtos.GetUserPermissionsRequest.newBuilder(); + AccessControlProtos.GetUserPermissionsRequest.newBuilder(); if (namespace != null) { builder.setNamespaceName(ByteStringer.wrap(namespace)); } @@ -792,7 +743,7 @@ public class AccessControlUtil { builder.setType(AccessControlProtos.Permission.Type.Namespace); AccessControlProtos.GetUserPermissionsRequest request = builder.build(); AccessControlProtos.GetUserPermissionsResponse response = - protocol.getUserPermissions(controller, request); + protocol.getUserPermissions(controller, request); List perms = new ArrayList<>(response.getUserPermissionCount()); for (AccessControlProtos.UserPermission perm : response.getUserPermissionList()) { perms.add(toUserPermission(perm)); @@ -803,29 +754,27 @@ public class AccessControlUtil { /** * Validates whether specified user has permission to perform actions on the mentioned table, * column family or column qualifier. - * @param controller RpcController - * @param protocol the AccessControlService protocol proxy - * @param tableName Table name, it shouldn't be null or empty. - * @param columnFamily The column family. Optional argument, can be empty. If empty then - * validation will happen at table level. + * @param controller RpcController + * @param protocol the AccessControlService protocol proxy + * @param tableName Table name, it shouldn't be null or empty. + * @param columnFamily The column family. Optional argument, can be empty. If empty then + * validation will happen at table level. * @param columnQualifier The column qualifier. Optional argument, can be empty. If empty then - * validation will happen at table and column family level. columnQualifier will not be - * considered if columnFamily is passed as null or empty. - * @param userName User name, it shouldn't be null or empty. - * @param actions Actions - * @return true if access allowed, otherwise false - * @throws ServiceException - * @deprecated Use {@link Admin#hasUserPermissions(String, List)} instead. + * validation will happen at table and column family level. columnQualifier + * will not be considered if columnFamily is passed as null or empty. + * @param userName User name, it shouldn't be null or empty. + * @param actions Actions + * @return true if access allowed, otherwise false n * @deprecated Use + * {@link Admin#hasUserPermissions(String, List)} instead. */ @Deprecated public static boolean hasPermission(RpcController controller, - AccessControlService.BlockingInterface protocol, TableName tableName, byte[] columnFamily, - byte[] columnQualifier, String userName, Permission.Action[] actions) - throws ServiceException { + AccessControlService.BlockingInterface protocol, TableName tableName, byte[] columnFamily, + byte[] columnQualifier, String userName, Permission.Action[] actions) throws ServiceException { AccessControlProtos.TablePermission.Builder tablePermissionBuilder = - AccessControlProtos.TablePermission.newBuilder(); + AccessControlProtos.TablePermission.newBuilder(); tablePermissionBuilder - .setTableName(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toProtoTableName(tableName)); + .setTableName(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toProtoTableName(tableName)); if (Bytes.len(columnFamily) > 0) { tablePermissionBuilder.setFamily(ByteStringer.wrap(columnFamily)); } @@ -836,10 +785,10 @@ public class AccessControlUtil { tablePermissionBuilder.addAction(toPermissionAction(a)); } AccessControlProtos.HasPermissionRequest request = AccessControlProtos.HasPermissionRequest - .newBuilder().setTablePermission(tablePermissionBuilder) - .setUserName(ByteString.copyFromUtf8(userName)).build(); + .newBuilder().setTablePermission(tablePermissionBuilder) + .setUserName(ByteString.copyFromUtf8(userName)).build(); AccessControlProtos.HasPermissionResponse response = - protocol.hasPermission(controller, request); + protocol.hasPermission(controller, request); return response.getHasPermission(); } @@ -848,8 +797,8 @@ public class AccessControlUtil { * @param proto the proto UsersAndPermissions * @return a ListMultimap with user and its permissions */ - public static ListMultimap toUserPermission( - AccessControlProtos.UsersAndPermissions proto) { + public static ListMultimap + toUserPermission(AccessControlProtos.UsersAndPermissions proto) { ListMultimap userPermission = ArrayListMultimap.create(); AccessControlProtos.UsersAndPermissions.UserPermissions userPerm; for (int i = 0; i < proto.getUserPermissionsCount(); i++) { @@ -868,8 +817,8 @@ public class AccessControlUtil { * @param proto the proto UsersAndPermissions * @return a ListMultimap with user and its permissions */ - public static ListMultimap toPermission( - AccessControlProtos.UsersAndPermissions proto) { + public static ListMultimap + toPermission(AccessControlProtos.UsersAndPermissions proto) { ListMultimap perms = ArrayListMultimap.create(); AccessControlProtos.UsersAndPermissions.UserPermissions userPerm; for (int i = 0; i < proto.getUserPermissionsCount(); i++) { @@ -884,21 +833,19 @@ public class AccessControlUtil { /** * Create a request to revoke user table permissions. - * - * @param username the short user name whose permissions to be revoked + * @param username the short user name whose permissions to be revoked * @param tableName optional table name the permissions apply - * @param family optional column family + * @param family optional column family * @param qualifier optional qualifier - * @param actions the permissions to be revoked + * @param actions the permissions to be revoked * @return A {@link AccessControlProtos} RevokeRequest */ - public static AccessControlProtos.RevokeRequest buildRevokeRequest( - String username, TableName tableName, byte[] family, byte[] qualifier, - AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + public static AccessControlProtos.RevokeRequest buildRevokeRequest(String username, + TableName tableName, byte[] family, byte[] qualifier, + AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.TablePermission.Builder permissionBuilder = - AccessControlProtos.TablePermission.newBuilder(); + AccessControlProtos.TablePermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } @@ -911,13 +858,10 @@ public class AccessControlUtil { if (qualifier != null) { permissionBuilder.setQualifier(ByteStringer.wrap(qualifier)); } - ret.setType(AccessControlProtos.Permission.Type.Table) - .setTablePermission(permissionBuilder); + ret.setType(AccessControlProtos.Permission.Type.Table).setTablePermission(permissionBuilder); return AccessControlProtos.RevokeRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .build(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GetUserPermissionsRequest.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GetUserPermissionsRequest.java index 8e1767cce94..03dee44abc7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GetUserPermissionsRequest.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GetUserPermissionsRequest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.util.Objects; @@ -36,7 +35,7 @@ public final class GetUserPermissionsRequest { private byte[] qualifier; private GetUserPermissionsRequest(String userName, String namespace, TableName tableName, - byte[] family, byte[] qualifier) { + byte[] family, byte[] qualifier) { this.userName = userName; this.namespace = namespace; this.tableName = tableName; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java index 01d53ebb37f..570c543b4b5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java index 7781d229569..b4cbe6723a5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.util.Objects; - import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -38,7 +36,7 @@ public class NamespacePermission extends Permission { /** * Construct a namespace permission. * @param namespace namespace's name - * @param assigned assigned actions + * @param assigned assigned actions */ NamespacePermission(String namespace, Action... assigned) { super(assigned); @@ -53,7 +51,7 @@ public class NamespacePermission extends Permission { /** * check if given action is granted in given namespace. * @param namespace namespace's name - * @param action action to be checked + * @param action action to be checked * @return true if granted, false otherwise */ public boolean implies(String namespace, Action action) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java index 49f2432ffa5..b3ac386689e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.DataInput; @@ -27,20 +26,17 @@ import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.Objects; - import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.VersionedWritable; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.VersionedWritable; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; /** - * Base permissions instance representing the ability to perform a given set - * of actions. - * + * Base permissions instance representing the ability to perform a given set of actions. * @see TablePermission */ @InterfaceAudience.Public @@ -49,21 +45,32 @@ public class Permission extends VersionedWritable { @InterfaceAudience.Public public enum Action { - READ('R'), WRITE('W'), EXEC('X'), CREATE('C'), ADMIN('A'); + READ('R'), + WRITE('W'), + EXEC('X'), + CREATE('C'), + ADMIN('A'); private final byte code; + Action(char code) { this.code = (byte) code; } - public byte code() { return code; } + public byte code() { + return code; + } } @InterfaceAudience.Private protected enum Scope { - GLOBAL('G'), NAMESPACE('N'), TABLE('T'), EMPTY('E'); + GLOBAL('G'), + NAMESPACE('N'), + TABLE('T'), + EMPTY('E'); private final byte code; + Scope(char code) { this.code = (byte) code; } @@ -82,23 +89,15 @@ public class Permission extends VersionedWritable { protected Scope scope = Scope.EMPTY; static { - ACTION_BY_CODE = ImmutableMap.of( - Action.READ.code, Action.READ, - Action.WRITE.code, Action.WRITE, - Action.EXEC.code, Action.EXEC, - Action.CREATE.code, Action.CREATE, - Action.ADMIN.code, Action.ADMIN - ); + ACTION_BY_CODE = ImmutableMap.of(Action.READ.code, Action.READ, Action.WRITE.code, Action.WRITE, + Action.EXEC.code, Action.EXEC, Action.CREATE.code, Action.CREATE, Action.ADMIN.code, + Action.ADMIN); - SCOPE_BY_CODE = ImmutableMap.of( - Scope.GLOBAL.code, Scope.GLOBAL, - Scope.NAMESPACE.code, Scope.NAMESPACE, - Scope.TABLE.code, Scope.TABLE, - Scope.EMPTY.code, Scope.EMPTY - ); + SCOPE_BY_CODE = ImmutableMap.of(Scope.GLOBAL.code, Scope.GLOBAL, Scope.NAMESPACE.code, + Scope.NAMESPACE, Scope.TABLE.code, Scope.TABLE, Scope.EMPTY.code, Scope.EMPTY); } - /** Empty constructor for Writable implementation. Do not use. */ + /** Empty constructor for Writable implementation. Do not use. */ public Permission() { super(); } @@ -114,8 +113,8 @@ public class Permission extends VersionedWritable { for (byte code : actionCodes) { Action action = ACTION_BY_CODE.get(code); if (action == null) { - LOG.error("Ignoring unknown action code '" + - Bytes.toStringBinary(new byte[] { code }) + "'"); + LOG.error( + "Ignoring unknown action code '" + Bytes.toStringBinary(new byte[] { code }) + "'"); continue; } actions.add(action); @@ -146,9 +145,8 @@ public class Permission extends VersionedWritable { } /** - * Check if two permission equals regardless of actions. It is useful when - * merging a new permission with an existed permission which needs to check two permissions's - * fields. + * Check if two permission equals regardless of actions. It is useful when merging a new + * permission with an existed permission which needs to check two permissions's fields. * @param obj instance * @return true if equals, false otherwise */ @@ -221,8 +219,8 @@ public class Permission extends VersionedWritable { byte b = in.readByte(); Action action = ACTION_BY_CODE.get(b); if (action == null) { - throw new IOException("Unknown action code '" + - Bytes.toStringBinary(new byte[] { b }) + "' in input"); + throw new IOException( + "Unknown action code '" + Bytes.toStringBinary(new byte[] { b }) + "' in input"); } actions.add(action); } @@ -235,7 +233,7 @@ public class Permission extends VersionedWritable { super.write(out); out.writeByte(actions != null ? actions.size() : 0); if (actions != null) { - for (Action a: actions) { + for (Action a : actions) { out.writeByte(a.code()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java index 661bcc842a8..b6df2c94a04 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.util.Collection; @@ -28,6 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; + import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest; @@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** * Convert protobuf objects in AccessControl.proto under hbase-protocol-shaded to user-oriented * objects and vice versa.
      - * * In HBASE-15638, we create a hbase-protocol-shaded module for upgrading protobuf version to 3.x, * but there are still some coprocessor endpoints(such as AccessControl, Authentication, * MulitRowMutation) which depend on hbase-protocol module for CPEP compatibility. In fact, we use @@ -73,16 +72,16 @@ public class ShadedAccessControlUtil { */ public static Permission.Action toPermissionAction(AccessControlProtos.Permission.Action action) { switch (action) { - case READ: - return Permission.Action.READ; - case WRITE: - return Permission.Action.WRITE; - case EXEC: - return Permission.Action.EXEC; - case CREATE: - return Permission.Action.CREATE; - case ADMIN: - return Permission.Action.ADMIN; + case READ: + return Permission.Action.READ; + case WRITE: + return Permission.Action.WRITE; + case EXEC: + return Permission.Action.EXEC; + case CREATE: + return Permission.Action.CREATE; + case ADMIN: + return Permission.Action.ADMIN; } throw new IllegalArgumentException("Unknown action value " + action.name()); } @@ -94,7 +93,7 @@ public class ShadedAccessControlUtil { * @return the converted array of Actions */ public static Permission.Action[] - toPermissionActions(List protoActions) { + toPermissionActions(List protoActions) { Permission.Action[] actions = new Permission.Action[protoActions.size()]; for (int i = 0; i < protoActions.size(); i++) { actions[i] = toPermissionAction(protoActions.get(i)); @@ -110,8 +109,8 @@ public class ShadedAccessControlUtil { public static HBaseProtos.TableName toProtoTableName(TableName tableName) { return HBaseProtos.TableName.newBuilder() - .setNamespace(ByteString.copyFrom(tableName.getNamespace())) - .setQualifier(ByteString.copyFrom(tableName.getQualifier())).build(); + .setNamespace(ByteString.copyFrom(tableName.getNamespace())) + .setQualifier(ByteString.copyFrom(tableName.getQualifier())).build(); } /** @@ -151,7 +150,7 @@ public class ShadedAccessControlUtil { if (perm.hasFamily()) family = perm.getFamily().toByteArray(); if (perm.hasQualifier()) qualifier = perm.getQualifier().toByteArray(); return Permission.newBuilder(table).withFamily(family).withQualifier(qualifier) - .withActions(actions).build(); + .withActions(actions).build(); } throw new IllegalStateException("Unrecognize Perm Type: " + proto.getType()); } @@ -167,9 +166,9 @@ public class ShadedAccessControlUtil { NamespacePermission nsPerm = (NamespacePermission) perm; ret.setType(AccessControlProtos.Permission.Type.Namespace); AccessControlProtos.NamespacePermission.Builder builder = - AccessControlProtos.NamespacePermission.newBuilder(); + AccessControlProtos.NamespacePermission.newBuilder(); builder.setNamespaceName(org.apache.hbase.thirdparty.com.google.protobuf.ByteString - .copyFromUtf8(nsPerm.getNamespace())); + .copyFromUtf8(nsPerm.getNamespace())); Permission.Action[] actions = perm.getActions(); if (actions != null) { for (Permission.Action a : actions) { @@ -181,7 +180,7 @@ public class ShadedAccessControlUtil { TablePermission tablePerm = (TablePermission) perm; ret.setType(AccessControlProtos.Permission.Type.Table); AccessControlProtos.TablePermission.Builder builder = - AccessControlProtos.TablePermission.newBuilder(); + AccessControlProtos.TablePermission.newBuilder(); builder.setTableName(toProtoTableName(tablePerm.getTableName())); if (tablePerm.hasFamily()) { builder.setFamily(ByteString.copyFrom(tablePerm.getFamily())); @@ -200,7 +199,7 @@ public class ShadedAccessControlUtil { // perm.getAccessScope() == Permission.Scope.GLOBAL ret.setType(AccessControlProtos.Permission.Type.Global); AccessControlProtos.GlobalPermission.Builder builder = - AccessControlProtos.GlobalPermission.newBuilder(); + AccessControlProtos.GlobalPermission.newBuilder(); Permission.Action[] actions = perm.getActions(); if (actions != null) { for (Permission.Action a : actions) { @@ -218,8 +217,8 @@ public class ShadedAccessControlUtil { * @param proto the protobuf UserPermission * @return the converted UserPermission */ - public static ListMultimap toUserTablePermissions( - AccessControlProtos.UsersAndPermissions proto) { + public static ListMultimap + toUserTablePermissions(AccessControlProtos.UsersAndPermissions proto) { ListMultimap perms = ArrayListMultimap.create(); AccessControlProtos.UsersAndPermissions.UserPermissions userPerm; for (int i = 0; i < proto.getUserPermissionsCount(); i++) { @@ -239,12 +238,12 @@ public class ShadedAccessControlUtil { * @return the protobuf UserTablePermissions */ public static AccessControlProtos.UsersAndPermissions - toUserTablePermissions(ListMultimap perm) { + toUserTablePermissions(ListMultimap perm) { AccessControlProtos.UsersAndPermissions.Builder builder = - AccessControlProtos.UsersAndPermissions.newBuilder(); + AccessControlProtos.UsersAndPermissions.newBuilder(); for (Map.Entry> entry : perm.asMap().entrySet()) { AccessControlProtos.UsersAndPermissions.UserPermissions.Builder userPermBuilder = - AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder(); + AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder(); userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey())); for (UserPermission userPerm : entry.getValue()) { userPermBuilder.addPermissions(toPermission(userPerm.getPermission())); @@ -270,14 +269,14 @@ public class ShadedAccessControlUtil { */ public static AccessControlProtos.UserPermission toUserPermission(UserPermission perm) { return AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(perm.getUser())) - .setPermission(toPermission(perm.getPermission())).build(); + .setUser(ByteString.copyFromUtf8(perm.getUser())) + .setPermission(toPermission(perm.getPermission())).build(); } public static GrantRequest buildGrantRequest(UserPermission userPermission, - boolean mergeExistingPermissions) { + boolean mergeExistingPermissions) { return GrantRequest.newBuilder().setUserPermission(toUserPermission(userPermission)) - .setMergeExistingPermissions(mergeExistingPermissions).build(); + .setMergeExistingPermissions(mergeExistingPermissions).build(); } public static RevokeRequest buildRevokeRequest(UserPermission userPermission) { @@ -285,9 +284,9 @@ public class ShadedAccessControlUtil { } public static AccessControlProtos.GetUserPermissionsRequest - buildGetUserPermissionsRequest(GetUserPermissionsRequest request) { + buildGetUserPermissionsRequest(GetUserPermissionsRequest request) { AccessControlProtos.GetUserPermissionsRequest.Builder builder = - AccessControlProtos.GetUserPermissionsRequest.newBuilder(); + AccessControlProtos.GetUserPermissionsRequest.newBuilder(); if (request.getUserName() != null && !request.getUserName().isEmpty()) { builder.setUserName(ByteString.copyFromUtf8(request.getUserName())); } @@ -312,7 +311,7 @@ public class ShadedAccessControlUtil { } public static GetUserPermissionsResponse - buildGetUserPermissionsResponse(final List permissions) { + buildGetUserPermissionsResponse(final List permissions) { GetUserPermissionsResponse.Builder builder = GetUserPermissionsResponse.newBuilder(); for (UserPermission perm : permissions) { builder.addUserPermission(toUserPermission(perm)); @@ -321,7 +320,7 @@ public class ShadedAccessControlUtil { } public static HasUserPermissionsRequest buildHasUserPermissionsRequest(String userName, - List permissions) { + List permissions) { HasUserPermissionsRequest.Builder builder = HasUserPermissionsRequest.newBuilder(); if (userName != null && !userName.isEmpty()) { builder.setUserName(ByteString.copyFromUtf8(userName)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java index f17919f70bf..e0a12c7d431 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java @@ -15,24 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; - import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** - * Represents an authorization for access for the given actions, optionally - * restricted to the given column family or column qualifier, over the - * given table. If the family property is null, it implies - * full table access. + * Represents an authorization for access for the given actions, optionally restricted to the given + * column family or column qualifier, over the given table. If the family property is + * null, it implies full table access. */ @InterfaceAudience.Public public class TablePermission extends Permission { @@ -43,10 +40,10 @@ public class TablePermission extends Permission { /** * Construct a table:family:qualifier permission. - * @param table table name - * @param family family name + * @param table table name + * @param family family name * @param qualifier qualifier name - * @param assigned assigned actions + * @param assigned assigned actions */ TablePermission(TableName table, byte[] family, byte[] qualifier, Action... assigned) { super(assigned); @@ -82,10 +79,10 @@ public class TablePermission extends Permission { /** * Check if given action can performs on given table:family:qualifier. - * @param table table name - * @param family family name + * @param table table name + * @param family family name * @param qualifier qualifier name - * @param action one of [Read, Write, Create, Exec, Admin] + * @param action one of [Read, Write, Create, Exec, Admin] * @return true if can, false otherwise */ public boolean implies(TableName table, byte[] family, byte[] qualifier, Action action) { @@ -103,7 +100,7 @@ public class TablePermission extends Permission { /** * Check if given action can performs on given table:family. - * @param table table name + * @param table table name * @param family family name * @param action one of [Read, Write, Create, Exec, Admin] * @return true if can, false otherwise @@ -131,13 +128,13 @@ public class TablePermission extends Permission { } /** - * Checks if this permission grants access to perform the given action on - * the given table and key value. - * @param table the table on which the operation is being performed - * @param kv the KeyValue on which the operation is being requested + * Checks if this permission grants access to perform the given action on the given table and key + * value. + * @param table the table on which the operation is being performed + * @param kv the KeyValue on which the operation is being requested * @param action the action requested - * @return true if the action is allowed over the given scope - * by this permission, otherwise false + * @return true if the action is allowed over the given scope by this permission, + * otherwise false */ public boolean implies(TableName table, KeyValue kv, Action action) { if (failCheckTable(table)) { @@ -168,8 +165,8 @@ public class TablePermission extends Permission { boolean tEq = (table == null && tp.table == null) || (table != null && table.equals(tp.table)); boolean fEq = (family == null && tp.family == null) || Bytes.equals(family, tp.family); - boolean qEq = (qualifier == null && tp.qualifier == null) || - Bytes.equals(qualifier, tp.qualifier); + boolean qEq = + (qualifier == null && tp.qualifier == null) || Bytes.equals(qualifier, tp.qualifier); return tEq && fEq && qEq; } @@ -212,10 +209,9 @@ public class TablePermission extends Permission { protected String rawExpression() { StringBuilder raw = new StringBuilder(); if (table != null) { - raw.append("table=").append(table) - .append(", family=").append(family == null ? null : Bytes.toString(family)) - .append(", qualifier=").append(qualifier == null ? null : Bytes.toString(qualifier)) - .append(", "); + raw.append("table=").append(table).append(", family=") + .append(family == null ? null : Bytes.toString(family)).append(", qualifier=") + .append(qualifier == null ? null : Bytes.toString(qualifier)).append(", "); } return raw.toString() + super.rawExpression(); } @@ -224,7 +220,7 @@ public class TablePermission extends Permission { public void readFields(DataInput in) throws IOException { super.readFields(in); byte[] tableBytes = Bytes.readByteArray(in); - if(tableBytes.length > 0) { + if (tableBytes.length > 0) { table = TableName.valueOf(tableBytes); } if (in.readBoolean()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java index 896ba5251a3..39bd02ccabb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.util.Objects; - import org.apache.yetus.audience.InterfaceAudience; /** - * UserPermission consists of a user name and a permission. - * Permission can be one of [Global, Namespace, Table] permission. + * UserPermission consists of a user name and a permission. Permission can be one of [Global, + * Namespace, Table] permission. */ @InterfaceAudience.Public public class UserPermission { @@ -34,7 +32,7 @@ public class UserPermission { /** * Construct a user permission given permission. - * @param user user name + * @param user user name * @param permission one of [Global, Namespace, Table] permission */ public UserPermission(String user, Permission permission) { @@ -87,9 +85,8 @@ public class UserPermission { @Override public String toString() { - StringBuilder str = new StringBuilder("UserPermission: ") - .append("user=").append(user) - .append(", ").append(permission.toString()); + StringBuilder str = new StringBuilder("UserPermission: ").append("user=").append(user) + .append(", ").append(permission.toString()); return str.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java index d018ce19921..e9990066050 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java @@ -27,11 +27,10 @@ import org.apache.yetus.audience.InterfaceStability; */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving -public abstract class AbstractSaslClientAuthenticationProvider implements - SaslClientAuthenticationProvider { +public abstract class AbstractSaslClientAuthenticationProvider + implements SaslClientAuthenticationProvider { public static final String AUTH_TOKEN_TYPE = "HBASE_AUTH_TOKEN"; - @Override public final String getTokenKind() { // All HBase authentication tokens are "HBASE_AUTH_TOKEN"'s. We differentiate between them diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AuthenticationProviderSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AuthenticationProviderSelector.java index a681d53719d..cdd1fdb381f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AuthenticationProviderSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AuthenticationProviderSelector.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security.provider; import java.util.Collection; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.User; @@ -38,12 +37,12 @@ public interface AuthenticationProviderSelector { * {@link #selectProvider(String, User)}. */ void configure(Configuration conf, - Collection availableProviders); + Collection availableProviders); /** - * Chooses the authentication provider which should be used given the provided client context - * from the authentication providers passed in via {@link #configure(Configuration, Collection)}. + * Chooses the authentication provider which should be used given the provided client context from + * the authentication providers passed in via {@link #configure(Configuration, Collection)}. */ - Pair> selectProvider( - String clusterId, User user); + Pair> + selectProvider(String clusterId, User user); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInProviderSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInProviderSelector.java index 752003dad8c..2c9968f6f71 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInProviderSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInProviderSelector.java @@ -21,9 +21,7 @@ import static java.util.Objects.requireNonNull; import java.util.Collection; import java.util.Objects; - import net.jcip.annotations.NotThreadSafe; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.User; @@ -39,14 +37,12 @@ import org.slf4j.LoggerFactory; /** * Default implementation of {@link AuthenticationProviderSelector} which can choose from the * authentication implementations which HBase provides out of the box: Simple, Kerberos, and - * Delegation Token authentication. - * - * This implementation will ignore any {@link SaslAuthenticationProvider}'s which are available - * on the classpath or specified in the configuration because HBase cannot correctly choose which - * token should be returned to a client when multiple are present. It is expected that users - * implement their own {@link AuthenticationProviderSelector} when writing a custom provider. - * - * This implementation is not thread-safe. {@link #configure(Configuration, Collection)} and + * Delegation Token authentication. This implementation will ignore any + * {@link SaslAuthenticationProvider}'s which are available on the classpath or specified in the + * configuration because HBase cannot correctly choose which token should be returned to a client + * when multiple are present. It is expected that users implement their own + * {@link AuthenticationProviderSelector} when writing a custom provider. This implementation is not + * thread-safe. {@link #configure(Configuration, Collection)} and * {@link #selectProvider(String, User)} is not safe if they are called concurrently. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @@ -61,8 +57,8 @@ public class BuiltInProviderSelector implements AuthenticationProviderSelector { Text digestAuthTokenKind = null; @Override - public void configure( - Configuration conf, Collection providers) { + public void configure(Configuration conf, + Collection providers) { if (this.conf != null) { throw new IllegalStateException("configure() should only be called once"); } @@ -73,19 +69,19 @@ public class BuiltInProviderSelector implements AuthenticationProviderSelector { if (SimpleSaslAuthenticationProvider.SASL_AUTH_METHOD.getName().contentEquals(name)) { if (simpleAuth != null) { throw new IllegalStateException( - "Encountered multiple SimpleSaslClientAuthenticationProvider instances"); + "Encountered multiple SimpleSaslClientAuthenticationProvider instances"); } simpleAuth = (SimpleSaslClientAuthenticationProvider) provider; } else if (GssSaslAuthenticationProvider.SASL_AUTH_METHOD.getName().equals(name)) { if (krbAuth != null) { throw new IllegalStateException( - "Encountered multiple GssSaslClientAuthenticationProvider instances"); + "Encountered multiple GssSaslClientAuthenticationProvider instances"); } krbAuth = (GssSaslClientAuthenticationProvider) provider; } else if (DigestSaslAuthenticationProvider.SASL_AUTH_METHOD.getName().equals(name)) { if (digestAuth != null) { throw new IllegalStateException( - "Encountered multiple DigestSaslClientAuthenticationProvider instances"); + "Encountered multiple DigestSaslClientAuthenticationProvider instances"); } digestAuth = (DigestSaslClientAuthenticationProvider) provider; digestAuthTokenKind = new Text(digestAuth.getTokenKind()); @@ -95,13 +91,13 @@ public class BuiltInProviderSelector implements AuthenticationProviderSelector { } if (simpleAuth == null || krbAuth == null || digestAuth == null) { throw new IllegalStateException("Failed to load SIMPLE, KERBEROS, and DIGEST authentication " - + "providers. Classpath is not sane."); + + "providers. Classpath is not sane."); } } @Override - public Pair> selectProvider( - String clusterId, User user) { + public Pair> + selectProvider(String clusterId, User user) { requireNonNull(clusterId, "Null clusterId was given"); requireNonNull(user, "Null user was given"); @@ -117,10 +113,11 @@ public class BuiltInProviderSelector implements AuthenticationProviderSelector { // (for whatever that's worth). for (Token token : user.getTokens()) { // We need to check for two things: - // 1. This token is for the HBase cluster we want to talk to - // 2. We have suppporting client implementation to handle the token (the "kind" of token) - if (clusterIdAsText.equals(token.getService()) && - digestAuthTokenKind.equals(token.getKind())) { + // 1. This token is for the HBase cluster we want to talk to + // 2. We have suppporting client implementation to handle the token (the "kind" of token) + if ( + clusterIdAsText.equals(token.getService()) && digestAuthTokenKind.equals(token.getKind()) + ) { return new Pair<>(digestAuth, token); } } @@ -128,15 +125,17 @@ public class BuiltInProviderSelector implements AuthenticationProviderSelector { final UserGroupInformation currentUser = user.getUGI(); // May be null if Hadoop AuthenticationMethod is PROXY final UserGroupInformation realUser = currentUser.getRealUser(); - if (currentUser.hasKerberosCredentials() || - (realUser != null && realUser.hasKerberosCredentials())) { + if ( + currentUser.hasKerberosCredentials() + || (realUser != null && realUser.hasKerberosCredentials()) + ) { return new Pair<>(krbAuth, null); } // This indicates that a client is requesting some authentication mechanism which the servers // don't know how to process (e.g. there is no provider which can support it). This may be // a bug or simply a misconfiguration of client *or* server. LOG.warn("No matching SASL authentication provider and supporting token found from providers" - + " for user: {}", user); + + " for user: {}", user); return null; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java index c1b7ddb7c55..712d4035448 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java @@ -20,9 +20,8 @@ package org.apache.hadoop.hbase.security.provider; import org.apache.yetus.audience.InterfaceAudience; /** - * Base class for all Apache HBase, built-in {@link SaslAuthenticationProvider}'s to extend. - * - * HBase users should take care to note that this class (and its sub-classes) are marked with the + * Base class for all Apache HBase, built-in {@link SaslAuthenticationProvider}'s to extend. HBase + * users should take care to note that this class (and its sub-classes) are marked with the * {@code InterfaceAudience.Private} annotation. These implementations are available for users to * read, copy, and modify, but should not be extended or re-used in binary form. There are no * compatibility guarantees provided for implementations of this class. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java index 7cbdecd642b..d71c07d1575 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java @@ -26,8 +26,8 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class DigestSaslAuthenticationProvider extends BuiltInSaslAuthenticationProvider { - public static final SaslAuthMethod SASL_AUTH_METHOD = new SaslAuthMethod( - "DIGEST", (byte)82, "DIGEST-MD5", AuthenticationMethod.TOKEN); + public static final SaslAuthMethod SASL_AUTH_METHOD = + new SaslAuthMethod("DIGEST", (byte) 82, "DIGEST-MD5", AuthenticationMethod.TOKEN); @Override public SaslAuthMethod getSaslAuthMethod() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java index a84f24b9080..480e724599b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.security.provider; import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.auth.callback.Callback; import javax.security.auth.callback.CallbackHandler; import javax.security.auth.callback.NameCallback; @@ -30,7 +29,6 @@ import javax.security.sasl.RealmCallback; import javax.security.sasl.RealmChoiceCallback; import javax.security.sasl.Sasl; import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.SaslUtil; import org.apache.hadoop.hbase.security.SecurityInfo; @@ -45,19 +43,19 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformati @InterfaceAudience.Private public class DigestSaslClientAuthenticationProvider extends DigestSaslAuthenticationProvider - implements SaslClientAuthenticationProvider { + implements SaslClientAuthenticationProvider { @Override public SaslClient createClient(Configuration conf, InetAddress serverAddr, - SecurityInfo securityInfo, Token token, boolean fallbackAllowed, - Map saslProps) throws IOException { + SecurityInfo securityInfo, Token token, boolean fallbackAllowed, + Map saslProps) throws IOException { return Sasl.createSaslClient(new String[] { getSaslAuthMethod().getSaslMechanism() }, null, - null, SaslUtil.SASL_DEFAULT_REALM, saslProps, new DigestSaslClientCallbackHandler(token)); + null, SaslUtil.SASL_DEFAULT_REALM, saslProps, new DigestSaslClientCallbackHandler(token)); } public static class DigestSaslClientCallbackHandler implements CallbackHandler { private static final Logger LOG = - LoggerFactory.getLogger(DigestSaslClientCallbackHandler.class); + LoggerFactory.getLogger(DigestSaslClientCallbackHandler.class); private final String userName; private final char[] userPassword; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java index 07101848e50..7dea40f2657 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java @@ -26,8 +26,8 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class GssSaslAuthenticationProvider extends BuiltInSaslAuthenticationProvider { - public static final SaslAuthMethod SASL_AUTH_METHOD = new SaslAuthMethod( - "KERBEROS", (byte)81, "GSSAPI", AuthenticationMethod.KERBEROS); + public static final SaslAuthMethod SASL_AUTH_METHOD = + new SaslAuthMethod("KERBEROS", (byte) 81, "GSSAPI", AuthenticationMethod.KERBEROS); @Override public SaslAuthMethod getSaslAuthMethod() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java index 21a4828b49e..218fd13b60c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java @@ -20,10 +20,8 @@ package org.apache.hadoop.hbase.security.provider; import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.sasl.Sasl; import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.SaslUtil; import org.apache.hadoop.hbase.security.SecurityConstants; @@ -41,9 +39,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformati @InterfaceAudience.Private public class GssSaslClientAuthenticationProvider extends GssSaslAuthenticationProvider - implements SaslClientAuthenticationProvider { - private static final Logger LOG = LoggerFactory.getLogger( - GssSaslClientAuthenticationProvider.class); + implements SaslClientAuthenticationProvider { + private static final Logger LOG = + LoggerFactory.getLogger(GssSaslClientAuthenticationProvider.class); private static boolean useCanonicalHostname(Configuration conf) { return !conf.getBoolean( @@ -57,10 +55,9 @@ public class GssSaslClientAuthenticationProvider extends GssSaslAuthenticationPr if (useCanonicalHostname(conf)) { hostname = addr.getCanonicalHostName(); if (hostname.equals(addr.getHostAddress())) { - LOG.warn("Canonical hostname for SASL principal is the same with IP address: " - + hostname + ", " + addr.getHostName() + ". Check DNS configuration or consider " - + SecurityConstants.UNSAFE_HBASE_CLIENT_KERBEROS_HOSTNAME_DISABLE_REVERSEDNS - + "=true"); + LOG.warn("Canonical hostname for SASL principal is the same with IP address: " + hostname + + ", " + addr.getHostName() + ". Check DNS configuration or consider " + + SecurityConstants.UNSAFE_HBASE_CLIENT_KERBEROS_HOSTNAME_DISABLE_REVERSEDNS + "=true"); } } else { hostname = addr.getHostName(); @@ -70,30 +67,30 @@ public class GssSaslClientAuthenticationProvider extends GssSaslAuthenticationPr } String getServerPrincipal(Configuration conf, SecurityInfo securityInfo, InetAddress server) - throws IOException { + throws IOException { String hostname = getHostnameForServerPrincipal(conf, server); String serverKey = securityInfo.getServerPrincipal(); if (serverKey == null) { throw new IllegalArgumentException( - "Can't obtain server Kerberos config key from SecurityInfo"); + "Can't obtain server Kerberos config key from SecurityInfo"); } return SecurityUtil.getServerPrincipal(conf.get(serverKey), hostname); } @Override public SaslClient createClient(Configuration conf, InetAddress serverAddr, - SecurityInfo securityInfo, Token token, boolean fallbackAllowed, - Map saslProps) throws IOException { + SecurityInfo securityInfo, Token token, boolean fallbackAllowed, + Map saslProps) throws IOException { String serverPrincipal = getServerPrincipal(conf, securityInfo, serverAddr); LOG.debug("Setting up Kerberos RPC to server={}", serverPrincipal); String[] names = SaslUtil.splitKerberosName(serverPrincipal); if (names.length != 3) { - throw new IOException("Kerberos principal '" + serverPrincipal - + "' does not have the expected format"); + throw new IOException( + "Kerberos principal '" + serverPrincipal + "' does not have the expected format"); } return Sasl.createSaslClient(new String[] { getSaslAuthMethod().getSaslMechanism() }, null, - names[0], names[1], saslProps, null); + names[0], names[1], saslProps, null); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthMethod.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthMethod.java index 7930564cb9f..edea8a46399 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthMethod.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthMethod.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security.provider; import java.util.Objects; - import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; @@ -78,19 +77,13 @@ public class SaslAuthMethod { return false; } SaslAuthMethod other = (SaslAuthMethod) o; - return Objects.equals(name, other.name) && - code == other.code && - Objects.equals(saslMech, other.saslMech) && - Objects.equals(method, other.method); + return Objects.equals(name, other.name) && code == other.code + && Objects.equals(saslMech, other.saslMech) && Objects.equals(method, other.method); } @Override public int hashCode() { - return new HashCodeBuilder() - .append(name) - .append(code) - .append(saslMech) - .append(method) - .toHashCode(); + return new HashCodeBuilder().append(name).append(code).append(saslMech).append(method) + .toHashCode(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthenticationProvider.java index 1f6d821ce95..99e2916fa51 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthenticationProvider.java @@ -22,13 +22,11 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Encapsulation of client-side logic to authenticate to HBase via some means over SASL. - * It is suggested that custom implementations extend the abstract class in the type hierarchy - * instead of directly implementing this interface (clients have a base class available, but - * servers presently do not). - * - * Implementations of this interface must be unique among each other via the {@code byte} - * returned by {@link SaslAuthMethod#getCode()} on {@link #getSaslAuthMethod()}. + * Encapsulation of client-side logic to authenticate to HBase via some means over SASL. It is + * suggested that custom implementations extend the abstract class in the type hierarchy instead of + * directly implementing this interface (clients have a base class available, but servers presently + * do not). Implementations of this interface must be unique among each other via the + * {@code byte} returned by {@link SaslAuthMethod#getCode()} on {@link #getSaslAuthMethod()}. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java index 4b1cabcfc49..bbc5ddac91a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java @@ -20,9 +20,7 @@ package org.apache.hadoop.hbase.security.provider; import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.SecurityInfo; @@ -38,10 +36,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformati /** * Encapsulation of client-side logic to authenticate to HBase via some means over SASL. * Implementations should not directly implement this interface, but instead extend - * {@link AbstractSaslClientAuthenticationProvider}. - * - * Implementations of this interface must make an implementation of {@code hashCode()} - * which returns the same value across multiple instances of the provider implementation. + * {@link AbstractSaslClientAuthenticationProvider}. Implementations of this interface must make an + * implementation of {@code hashCode()} which returns the same value across multiple instances of + * the provider implementation. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving @@ -51,8 +48,8 @@ public interface SaslClientAuthenticationProvider extends SaslAuthenticationProv * Creates the SASL client instance for this auth'n method. */ SaslClient createClient(Configuration conf, InetAddress serverAddr, SecurityInfo securityInfo, - Token token, boolean fallbackAllowed, - Map saslProps) throws IOException; + Token token, boolean fallbackAllowed, Map saslProps) + throws IOException; /** * Constructs a {@link UserInformation} from the given {@link UserGroupInformation} @@ -60,18 +57,15 @@ public interface SaslClientAuthenticationProvider extends SaslAuthenticationProv UserInformation getUserInfo(User user); /** - * Returns the "real" user, the user who has the credentials being authenticated by the - * remote service, in the form of an {@link UserGroupInformation} object. - * - * It is common in the Hadoop "world" to have distinct notions of a "real" user and a "proxy" - * user. A "real" user is the user which actually has the credentials (often, a Kerberos ticket), - * but some code may be running as some other user who has no credentials. This method gives - * the authentication provider a chance to acknowledge this is happening and ensure that any - * RPCs are executed with the real user's credentials, because executing them as the proxy user - * would result in failure because no credentials exist to authenticate the RPC. - * - * Not all implementations will need to implement this method. By default, the provided User's - * UGI is returned directly. + * Returns the "real" user, the user who has the credentials being authenticated by the remote + * service, in the form of an {@link UserGroupInformation} object. It is common in the Hadoop + * "world" to have distinct notions of a "real" user and a "proxy" user. A "real" user is the user + * which actually has the credentials (often, a Kerberos ticket), but some code may be running as + * some other user who has no credentials. This method gives the authentication provider a chance + * to acknowledge this is happening and ensure that any RPCs are executed with the real user's + * credentials, because executing them as the proxy user would result in failure because no + * credentials exist to authenticate the RPC. Not all implementations will need to implement this + * method. By default, the provided User's UGI is returned directly. */ default UserGroupInformation getRealUser(User ugi) { return ugi.getUGI(); @@ -86,8 +80,9 @@ public interface SaslClientAuthenticationProvider extends SaslAuthenticationProv } /** - * Executes any necessary logic to re-login the client. Not all implementations will have - * any logic that needs to be executed. + * Executes any necessary logic to re-login the client. Not all implementations will have any + * logic that needs to be executed. */ - default void relogin() throws IOException {} + default void relogin() throws IOException { + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProviders.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProviders.java index aaaee003c59..befd52c4a37 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProviders.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProviders.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.Optional; import java.util.ServiceLoader; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.User; @@ -43,21 +42,20 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving public final class SaslClientAuthenticationProviders { - private static final Logger LOG = LoggerFactory.getLogger( - SaslClientAuthenticationProviders.class); + private static final Logger LOG = + LoggerFactory.getLogger(SaslClientAuthenticationProviders.class); public static final String SELECTOR_KEY = "hbase.client.sasl.provider.class"; public static final String EXTRA_PROVIDERS_KEY = "hbase.client.sasl.provider.extras"; private static final AtomicReference providersRef = - new AtomicReference<>(); + new AtomicReference<>(); private final Collection providers; private final AuthenticationProviderSelector selector; - private SaslClientAuthenticationProviders( - Collection providers, - AuthenticationProviderSelector selector) { + private SaslClientAuthenticationProviders(Collection providers, + AuthenticationProviderSelector selector) { this.providers = providers; this.selector = selector; } @@ -90,16 +88,16 @@ public final class SaslClientAuthenticationProviders { } /** - * Adds the given {@code provider} to the set, only if an equivalent provider does not - * already exist in the set. + * Adds the given {@code provider} to the set, only if an equivalent provider does not already + * exist in the set. */ static void addProviderIfNotExists(SaslClientAuthenticationProvider provider, - HashMap providers) { + HashMap providers) { Byte code = provider.getSaslAuthMethod().getCode(); SaslClientAuthenticationProvider existingProvider = providers.get(code); if (existingProvider != null) { throw new RuntimeException("Already registered authentication provider with " + code + " " - + existingProvider.getClass()); + + existingProvider.getClass()); } providers.put(code, provider); } @@ -108,9 +106,9 @@ public final class SaslClientAuthenticationProviders { * Instantiates the ProviderSelector implementation from the provided configuration. */ static AuthenticationProviderSelector instantiateSelector(Configuration conf, - Collection providers) { - Class clz = conf.getClass( - SELECTOR_KEY, BuiltInProviderSelector.class, AuthenticationProviderSelector.class); + Collection providers) { + Class clz = conf.getClass(SELECTOR_KEY, + BuiltInProviderSelector.class, AuthenticationProviderSelector.class); try { AuthenticationProviderSelector selector = clz.getConstructor().newInstance(); selector.configure(conf, providers); @@ -118,10 +116,10 @@ public final class SaslClientAuthenticationProviders { LOG.trace("Loaded ProviderSelector {}", selector.getClass()); } return selector; - } catch (InstantiationException | IllegalAccessException | NoSuchMethodException | - InvocationTargetException e) { - throw new RuntimeException("Failed to instantiate " + clz + - " as the ProviderSelector defined by " + SELECTOR_KEY, e); + } catch (InstantiationException | IllegalAccessException | NoSuchMethodException + | InvocationTargetException e) { + throw new RuntimeException( + "Failed to instantiate " + clz + " as the ProviderSelector defined by " + SELECTOR_KEY, e); } } @@ -129,8 +127,8 @@ public final class SaslClientAuthenticationProviders { * Extracts and instantiates authentication providers from the configuration. */ static void addExplicitProviders(Configuration conf, - HashMap providers) { - for(String implName : conf.getStringCollection(EXTRA_PROVIDERS_KEY)) { + HashMap providers) { + for (String implName : conf.getStringCollection(EXTRA_PROVIDERS_KEY)) { Class clz; // Load the class from the config try { @@ -143,7 +141,7 @@ public final class SaslClientAuthenticationProviders { // Make sure it's the right type if (!SaslClientAuthenticationProvider.class.isAssignableFrom(clz)) { LOG.warn("Ignoring SaslClientAuthenticationProvider {} because it is not an instance of" - + " SaslClientAuthenticationProvider", clz); + + " SaslClientAuthenticationProvider", clz); continue; } @@ -152,7 +150,7 @@ public final class SaslClientAuthenticationProviders { try { provider = (SaslClientAuthenticationProvider) clz.getConstructor().newInstance(); } catch (InstantiationException | IllegalAccessException | NoSuchMethodException - | InvocationTargetException e) { + | InvocationTargetException e) { LOG.warn("Failed to instantiate SaslClientAuthenticationProvider {}", clz, e); continue; } @@ -169,21 +167,20 @@ public final class SaslClientAuthenticationProviders { */ static SaslClientAuthenticationProviders instantiate(Configuration conf) { ServiceLoader loader = - ServiceLoader.load(SaslClientAuthenticationProvider.class); - HashMap providerMap = new HashMap<>(); + ServiceLoader.load(SaslClientAuthenticationProvider.class); + HashMap providerMap = new HashMap<>(); for (SaslClientAuthenticationProvider provider : loader) { addProviderIfNotExists(provider, providerMap); } addExplicitProviders(conf, providerMap); - Collection providers = Collections.unmodifiableCollection( - providerMap.values()); + Collection providers = + Collections.unmodifiableCollection(providerMap.values()); if (LOG.isTraceEnabled()) { - String loadedProviders = providers.stream() - .map((provider) -> provider.getClass().getName()) - .collect(Collectors.joining(", ")); + String loadedProviders = providers.stream().map((provider) -> provider.getClass().getName()) + .collect(Collectors.joining(", ")); LOG.trace("Found SaslClientAuthenticationProviders {}", loadedProviders); } @@ -192,16 +189,13 @@ public final class SaslClientAuthenticationProviders { } /** - * Returns the provider and token pair for SIMPLE authentication. - * - * This method is a "hack" while SIMPLE authentication for HBase does not flow through - * the SASL codepath. + * Returns the provider and token pair for SIMPLE authentication. This method is a "hack" while + * SIMPLE authentication for HBase does not flow through the SASL codepath. */ public Pair> - getSimpleProvider() { + getSimpleProvider() { Optional optional = providers.stream() - .filter((p) -> p instanceof SimpleSaslClientAuthenticationProvider) - .findFirst(); + .filter((p) -> p instanceof SimpleSaslClientAuthenticationProvider).findFirst(); return new Pair<>(optional.get(), null); } @@ -209,15 +203,14 @@ public final class SaslClientAuthenticationProviders { * Chooses the best authentication provider and corresponding token given the HBase cluster * identifier and the user. */ - public Pair> selectProvider( - String clusterId, User clientUser) { + public Pair> + selectProvider(String clusterId, User clientUser) { return selector.selectProvider(clusterId, clientUser); } @Override public String toString() { - return providers.stream() - .map((p) -> p.getClass().getName()) - .collect(Collectors.joining(", ", "providers=[", "], selector=")) + selector.getClass(); + return providers.stream().map((p) -> p.getClass().getName()) + .collect(Collectors.joining(", ", "providers=[", "], selector=")) + selector.getClass(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java index 3f1122c7541..01b1f452685 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java @@ -25,8 +25,8 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Private public class SimpleSaslAuthenticationProvider extends BuiltInSaslAuthenticationProvider { - public static final SaslAuthMethod SASL_AUTH_METHOD = new SaslAuthMethod( - "SIMPLE", (byte)80, "", AuthenticationMethod.SIMPLE); + public static final SaslAuthMethod SASL_AUTH_METHOD = + new SaslAuthMethod("SIMPLE", (byte) 80, "", AuthenticationMethod.SIMPLE); @Override public SaslAuthMethod getSaslAuthMethod() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java index 3a9142f34c4..6fff703689c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java @@ -20,9 +20,7 @@ package org.apache.hadoop.hbase.security.provider; import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.SecurityInfo; import org.apache.hadoop.hbase.security.User; @@ -34,13 +32,13 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; @InterfaceAudience.Private -public class SimpleSaslClientAuthenticationProvider extends - SimpleSaslAuthenticationProvider implements SaslClientAuthenticationProvider { +public class SimpleSaslClientAuthenticationProvider extends SimpleSaslAuthenticationProvider + implements SaslClientAuthenticationProvider { @Override public SaslClient createClient(Configuration conf, InetAddress serverAddress, - SecurityInfo securityInfo, Token token, boolean fallbackAllowed, - Map saslProps) throws IOException { + SecurityInfo securityInfo, Token token, boolean fallbackAllowed, + Map saslProps) throws IOException { return null; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java index 1e4a529f1ea..f56a6899462 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java @@ -15,21 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; +import com.google.protobuf.ByteString; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.TokenIdentifier; - -import com.google.protobuf.ByteString; +import org.apache.yetus.audience.InterfaceAudience; /** * Represents the identity information stored in an HBase authentication token. @@ -51,8 +48,8 @@ public class AuthenticationTokenIdentifier extends TokenIdentifier { this.username = username; } - public AuthenticationTokenIdentifier(String username, int keyId, - long issueDate, long expirationDate) { + public AuthenticationTokenIdentifier(String username, int keyId, long issueDate, + long expirationDate) { this.username = username; this.keyId = keyId; this.issueDate = issueDate; @@ -114,15 +111,13 @@ public class AuthenticationTokenIdentifier extends TokenIdentifier { public byte[] toBytes() { AuthenticationProtos.TokenIdentifier.Builder builder = - AuthenticationProtos.TokenIdentifier.newBuilder(); + AuthenticationProtos.TokenIdentifier.newBuilder(); builder.setKind(AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN); if (username != null) { builder.setUsername(ByteString.copyFromUtf8(username)); } - builder.setIssueDate(issueDate) - .setExpirationDate(expirationDate) - .setKeyId(keyId) - .setSequenceNumber(sequenceNumber); + builder.setIssueDate(issueDate).setExpirationDate(expirationDate).setKeyId(keyId) + .setSequenceNumber(sequenceNumber); return builder.build().toByteArray(); } @@ -143,9 +138,11 @@ public class AuthenticationTokenIdentifier extends TokenIdentifier { ProtobufUtil.mergeFrom(builder, inBytes); AuthenticationProtos.TokenIdentifier identifier = builder.build(); // sanity check on type - if (!identifier.hasKind() || - identifier.getKind() != AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN) { - throw new IOException("Invalid TokenIdentifier kind from input "+identifier.getKind()); + if ( + !identifier.hasKind() + || identifier.getKind() != AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN + ) { + throw new IOException("Invalid TokenIdentifier kind from input " + identifier.getKind()); } // copy the field values @@ -172,26 +169,22 @@ public class AuthenticationTokenIdentifier extends TokenIdentifier { return false; } if (other instanceof AuthenticationTokenIdentifier) { - AuthenticationTokenIdentifier ident = (AuthenticationTokenIdentifier)other; - return sequenceNumber == ident.getSequenceNumber() - && keyId == ident.getKeyId() - && issueDate == ident.getIssueDate() - && expirationDate == ident.getExpirationDate() - && (username == null ? ident.getUsername() == null : - username.equals(ident.getUsername())); + AuthenticationTokenIdentifier ident = (AuthenticationTokenIdentifier) other; + return sequenceNumber == ident.getSequenceNumber() && keyId == ident.getKeyId() + && issueDate == ident.getIssueDate() && expirationDate == ident.getExpirationDate() + && (username == null ? ident.getUsername() == null : username.equals(ident.getUsername())); } return false; } @Override public int hashCode() { - return (int)sequenceNumber; + return (int) sequenceNumber; } @Override public String toString() { - return "(username=" + username + ", keyId=" - + keyId + ", issueDate=" + issueDate - + ", expirationDate=" + expirationDate + ", sequenceNumber=" + sequenceNumber + ")"; + return "(username=" + username + ", keyId=" + keyId + ", issueDate=" + issueDate + + ", expirationDate=" + expirationDate + ", sequenceNumber=" + sequenceNumber + ")"; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java index 39959ef61db..1dbc7c8cd2f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java @@ -15,22 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; import java.util.Collection; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenSelector; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @InterfaceAudience.Private -public class AuthenticationTokenSelector - implements TokenSelector { +public class AuthenticationTokenSelector implements TokenSelector { private static final Logger LOG = LoggerFactory.getLogger(AuthenticationTokenSelector.class); public AuthenticationTokenSelector() { @@ -38,15 +35,17 @@ public class AuthenticationTokenSelector @Override public Token selectToken(Text serviceName, - Collection> tokens) { + Collection> tokens) { if (serviceName != null) { for (Token ident : tokens) { - if (serviceName.equals(ident.getService()) && - AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE.equals(ident.getKind())) { + if ( + serviceName.equals(ident.getService()) + && AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE.equals(ident.getKind()) + ) { if (LOG.isDebugEnabled()) { - LOG.debug("Returning token "+ident); + LOG.debug("Returning token " + ident); } - return (Token)ident; + return (Token) ident; } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java index 09398edc589..c8862bded1f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; import com.google.protobuf.ByteString; @@ -35,6 +34,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** @@ -47,7 +47,8 @@ public final class ClientTokenUtil { // Set in TestClientTokenUtil via reflection private static ServiceException injectedException; - private ClientTokenUtil() {} + private ClientTokenUtil() { + } private static void injectFault() throws ServiceException { if (injectedException != null) { @@ -62,20 +63,18 @@ public final class ClientTokenUtil { * @return the authentication token instance */ @InterfaceAudience.Private - public static Token obtainToken( - Connection conn) throws IOException { + public static Token obtainToken(Connection conn) + throws IOException { Table meta = null; try { injectFault(); meta = conn.getTable(TableName.META_TABLE_NAME); - CoprocessorRpcChannel rpcChannel = meta.coprocessorService( - HConstants.EMPTY_START_ROW); + CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW); AuthenticationProtos.AuthenticationService.BlockingInterface service = - AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); - AuthenticationProtos.GetAuthenticationTokenResponse response = - service.getAuthenticationToken(null, - AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance()); + AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); + AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken( + null, AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance()); return toToken(response.getToken()); } catch (ServiceException se) { @@ -89,7 +88,6 @@ public final class ClientTokenUtil { /** * Converts a Token instance (with embedded identifier) to the protobuf representation. - * * @param token the Token instance to copy * @return the protobuf Token message */ @@ -106,17 +104,15 @@ public final class ClientTokenUtil { /** * Converts a protobuf Token message back into a Token instance. - * * @param proto the protobuf Token message * @return the Token instance */ @InterfaceAudience.Private static Token toToken(AuthenticationProtos.Token proto) { - return new Token<>( - proto.hasIdentifier() ? proto.getIdentifier().toByteArray() : null, - proto.hasPassword() ? proto.getPassword().toByteArray() : null, - AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE, - proto.hasService() ? new Text(proto.getService().toStringUtf8()) : null); + return new Token<>(proto.hasIdentifier() ? proto.getIdentifier().toByteArray() : null, + proto.hasPassword() ? proto.getPassword().toByteArray() : null, + AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE, + proto.hasService() ? new Text(proto.getService().toStringUtf8()) : null); } /** @@ -126,8 +122,8 @@ public final class ClientTokenUtil { * @return the authentication token instance */ @InterfaceAudience.Private - static Token obtainToken( - final Connection conn, User user) throws IOException, InterruptedException { + static Token obtainToken(final Connection conn, User user) + throws IOException, InterruptedException { return user.runAs(new PrivilegedExceptionAction>() { @Override public Token run() throws Exception { @@ -137,16 +133,14 @@ public final class ClientTokenUtil { } /** - * Obtain an authentication token for the given user and add it to the - * user's credentials. + * Obtain an authentication token for the given user and add it to the user's credentials. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token - * @throws IOException If making a remote call to the authentication service fails + * @throws IOException If making a remote call to the authentication service fails * @throws InterruptedException If executing as the given user is interrupted */ - public static void obtainAndCacheToken(final Connection conn, - User user) - throws IOException, InterruptedException { + public static void obtainAndCacheToken(final Connection conn, User user) + throws IOException, InterruptedException { try { Token token = obtainToken(conn, user); @@ -154,15 +148,14 @@ public final class ClientTokenUtil { throw new IOException("No token returned for user " + user.getName()); } if (LOG.isDebugEnabled()) { - LOG.debug("Obtained token " + token.getKind().toString() + " for user " + - user.getName()); + LOG.debug("Obtained token " + token.getKind().toString() + " for user " + user.getName()); } user.addToken(token); } catch (IOException | InterruptedException | RuntimeException e) { throw e; } catch (Exception e) { throw new UndeclaredThrowableException(e, - "Unexpected exception obtaining token for user " + user.getName()); + "Unexpected exception obtaining token for user " + user.getName()); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java index f8ac1b96609..f15bab6c095 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.security.visibility; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -31,6 +30,7 @@ import org.apache.yetus.audience.InterfaceAudience; public class Authorizations { private List labels; + public Authorizations(String... labels) { this.labels = new ArrayList<>(labels.length); Collections.addAll(this.labels, labels); @@ -43,12 +43,12 @@ public class Authorizations { public List getLabels() { return Collections.unmodifiableList(this.labels); } - + @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("[ "); - for (String label: labels) { + for (String label : labels) { sb.append(label); sb.append(' '); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java index 6cf8fb748df..8abaee00509 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,14 +17,14 @@ */ package org.apache.hadoop.hbase.security.visibility; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * This contains a visibility expression which can be associated with a cell. When it is set with a * Mutation, all the cells in that mutation will get associated with this expression. A visibility - * expression can contain visibility labels combined with logical - * operators AND(&), OR(|) and NOT(!) + * expression can contain visibility labels combined with logical operators AND(&), OR(|) and + * NOT(!) */ @InterfaceAudience.Public public class CellVisibility { @@ -48,25 +48,22 @@ public class CellVisibility { } /** - * Helps in quoting authentication Strings. Use this if unicode characters to - * be used in expression or special characters like '(', ')', - * '"','\','&','|','!' + * Helps in quoting authentication Strings. Use this if unicode characters to be used in + * expression or special characters like '(', ')', '"','\','&','|','!' */ public static String quote(String auth) { return quote(Bytes.toBytes(auth)); } /** - * Helps in quoting authentication Strings. Use this if unicode characters to - * be used in expression or special characters like '(', ')', - * '"','\','&','|','!' + * Helps in quoting authentication Strings. Use this if unicode characters to be used in + * expression or special characters like '(', ')', '"','\','&','|','!' */ public static String quote(byte[] auth) { int escapeChars = 0; for (int i = 0; i < auth.length; i++) - if (auth[i] == '"' || auth[i] == '\\') - escapeChars++; + if (auth[i] == '"' || auth[i] == '\\') escapeChars++; byte[] escapedAuth = new byte[auth.length + escapeChars + 2]; int index = 1; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/InvalidLabelException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/InvalidLabelException.java index 778288d4c03..e9160ec976c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/InvalidLabelException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/InvalidLabelException.java @@ -28,4 +28,3 @@ public class InvalidLabelException extends DoNotRetryIOException { super(msg); } } - diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java index 35564d626e8..cca5e8362ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,11 +21,9 @@ import static org.apache.hadoop.hbase.security.visibility.VisibilityConstants.LA import com.google.protobuf.ByteString; import com.google.protobuf.ServiceException; - import java.io.IOException; import java.util.Map; import java.util.regex.Pattern; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Connection; @@ -48,7 +46,6 @@ import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; - /** * Utility client for doing visibility labels admin operations. */ @@ -58,140 +55,106 @@ public class VisibilityClient { /** * Return true if cell visibility features are supported and enabled * @param connection The connection to use - * @return true if cell visibility features are supported and enabled, false otherwise - * @throws IOException + * @return true if cell visibility features are supported and enabled, false otherwise n */ public static boolean isCellVisibilityEnabled(Connection connection) throws IOException { return connection.getAdmin().getSecurityCapabilities() - .contains(SecurityCapability.CELL_VISIBILITY); + .contains(SecurityCapability.CELL_VISIBILITY); } /** - * Utility method for adding label to the system. - * - * @param conf - * @param label - * @return VisibilityLabelsResponse - * @throws Throwable - * @deprecated Use {@link #addLabel(Connection,String)} instead. + * Utility method for adding label to the system. nnnn * @deprecated Use + * {@link #addLabel(Connection,String)} instead. */ @Deprecated public static VisibilityLabelsResponse addLabel(Configuration conf, final String label) - throws Throwable { + throws Throwable { try (Connection connection = ConnectionFactory.createConnection(conf)) { return addLabels(connection, new String[] { label }); } } /** - * Utility method for adding label to the system. - * - * @param connection - * @param label - * @return VisibilityLabelsResponse - * @throws Throwable + * Utility method for adding label to the system. nnnn */ public static VisibilityLabelsResponse addLabel(Connection connection, final String label) - throws Throwable { + throws Throwable { return addLabels(connection, new String[] { label }); } /** - * Utility method for adding labels to the system. - * - * @param conf - * @param labels - * @return VisibilityLabelsResponse - * @throws Throwable - * @deprecated Use {@link #addLabels(Connection,String[])} instead. + * Utility method for adding labels to the system. nnnn * @deprecated Use + * {@link #addLabels(Connection,String[])} instead. */ @Deprecated public static VisibilityLabelsResponse addLabels(Configuration conf, final String[] labels) - throws Throwable { + throws Throwable { try (Connection connection = ConnectionFactory.createConnection(conf)) { return addLabels(connection, labels); } } /** - * Utility method for adding labels to the system. - * - * @param connection - * @param labels - * @return VisibilityLabelsResponse - * @throws Throwable + * Utility method for adding labels to the system. nnnn */ public static VisibilityLabelsResponse addLabels(Connection connection, final String[] labels) - throws Throwable { + throws Throwable { try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - @Override - public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { - VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); - for (String label : labels) { - if (label.length() > 0) { - VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder(); - newBuilder.setLabel(ByteStringer.wrap(Bytes.toBytes(label))); - builder.addVisLabel(newBuilder.build()); + @Override + public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { + VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); + for (String label : labels) { + if (label.length() > 0) { + VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder(); + newBuilder.setLabel(ByteStringer.wrap(Bytes.toBytes(label))); + builder.addVisLabel(newBuilder.build()); + } } + service.addLabels(controller, builder.build(), rpcCallback); + VisibilityLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - service.addLabels(controller, builder.build(), rpcCallback); - VisibilityLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; + }; Map result = - table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, callable); + table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } } /** - * Sets given labels globally authorized for the user. - * @param conf - * @param auths - * @param user - * @return VisibilityLabelsResponse - * @throws Throwable - * @deprecated Use {@link #setAuths(Connection,String[],String)} instead. + * Sets given labels globally authorized for the user. nnnnn * @deprecated Use + * {@link #setAuths(Connection,String[],String)} instead. */ @Deprecated public static VisibilityLabelsResponse setAuths(Configuration conf, final String[] auths, - final String user) throws Throwable { + final String user) throws Throwable { try (Connection connection = ConnectionFactory.createConnection(conf)) { return setOrClearAuths(connection, auths, user, true); } } /** - * Sets given labels globally authorized for the user. - * @param connection - * @param auths - * @param user - * @return VisibilityLabelsResponse - * @throws Throwable + * Sets given labels globally authorized for the user. nnnnn */ public static VisibilityLabelsResponse setAuths(Connection connection, final String[] auths, - final String user) throws Throwable { + final String user) throws Throwable { return setOrClearAuths(connection, auths, user, true); } /** - * @param conf - * @param user - * @return labels, the given user is globally authorized for. - * @throws Throwable - * @deprecated Use {@link #getAuths(Connection,String)} instead. + * nn * @return labels, the given user is globally authorized for. n * @deprecated Use + * {@link #getAuths(Connection,String)} instead. */ @Deprecated public static GetAuthsResponse getAuths(Configuration conf, final String user) throws Throwable { @@ -201,52 +164,47 @@ public class VisibilityClient { } /** - * @param connection the Connection instance to use. - * @param user - * @return labels, the given user is globally authorized for. - * @throws Throwable + * @param connection the Connection instance to use. n * @return labels, the given user is + * globally authorized for. n */ public static GetAuthsResponse getAuths(Connection connection, final String user) - throws Throwable { + throws Throwable { try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - @Override - public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { - GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); - getAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); - service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback); - GetAuthsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); + @Override + public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { + GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); + getAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); + service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback); + GetAuthsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - return response; - } - }; - Map result = - table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, callable); + }; + Map result = table.coprocessorService(VisibilityLabelsService.class, + HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } } /** - * Retrieve the list of visibility labels defined in the system. - * @param conf - * @param regex The regular expression to filter which labels are returned. - * @return labels The list of visibility labels defined in the system. - * @throws Throwable - * @deprecated Use {@link #listLabels(Connection,String)} instead. + * Retrieve the list of visibility labels defined in the system. n * @param regex The regular + * expression to filter which labels are returned. + * @return labels The list of visibility labels defined in the system. n * @deprecated Use + * {@link #listLabels(Connection,String)} instead. */ @Deprecated public static ListLabelsResponse listLabels(Configuration conf, final String regex) - throws Throwable { - try(Connection connection = ConnectionFactory.createConnection(conf)){ + throws Throwable { + try (Connection connection = ConnectionFactory.createConnection(conf)) { return listLabels(connection, regex); } } @@ -254,108 +212,97 @@ public class VisibilityClient { /** * Retrieve the list of visibility labels defined in the system. * @param connection The Connection instance to use. - * @param regex The regular expression to filter which labels are returned. - * @return labels The list of visibility labels defined in the system. - * @throws Throwable + * @param regex The regular expression to filter which labels are returned. + * @return labels The list of visibility labels defined in the system. n */ public static ListLabelsResponse listLabels(Connection connection, final String regex) - throws Throwable { + throws Throwable { try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - @Override - public ListLabelsResponse call(VisibilityLabelsService service) throws IOException { - ListLabelsRequest.Builder listAuthLabelsReqBuilder = ListLabelsRequest.newBuilder(); - if (regex != null) { - // Compile the regex here to catch any regex exception earlier. - Pattern pattern = Pattern.compile(regex); - listAuthLabelsReqBuilder.setRegex(pattern.toString()); + @Override + public ListLabelsResponse call(VisibilityLabelsService service) throws IOException { + ListLabelsRequest.Builder listAuthLabelsReqBuilder = ListLabelsRequest.newBuilder(); + if (regex != null) { + // Compile the regex here to catch any regex exception earlier. + Pattern pattern = Pattern.compile(regex); + listAuthLabelsReqBuilder.setRegex(pattern.toString()); + } + service.listLabels(controller, listAuthLabelsReqBuilder.build(), rpcCallback); + ListLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - service.listLabels(controller, listAuthLabelsReqBuilder.build(), rpcCallback); - ListLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; + }; Map result = - table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, callable); + table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } } /** - * Removes given labels from user's globally authorized list of labels. - * @param conf - * @param auths - * @param user - * @return VisibilityLabelsResponse - * @throws Throwable - * @deprecated Use {@link #clearAuths(Connection,String[],String)} instead. + * Removes given labels from user's globally authorized list of labels. nnnnn * @deprecated Use + * {@link #clearAuths(Connection,String[],String)} instead. */ @Deprecated public static VisibilityLabelsResponse clearAuths(Configuration conf, final String[] auths, - final String user) throws Throwable { + final String user) throws Throwable { try (Connection connection = ConnectionFactory.createConnection(conf)) { return setOrClearAuths(connection, auths, user, false); } } /** - * Removes given labels from user's globally authorized list of labels. - * @param connection - * @param auths - * @param user - * @return VisibilityLabelsResponse - * @throws Throwable + * Removes given labels from user's globally authorized list of labels. nnnnn */ public static VisibilityLabelsResponse clearAuths(Connection connection, final String[] auths, - final String user) throws Throwable { + final String user) throws Throwable { return setOrClearAuths(connection, auths, user, false); } private static VisibilityLabelsResponse setOrClearAuths(Connection connection, - final String[] auths, final String user, final boolean setOrClear) - throws IOException, ServiceException, Throwable { + final String[] auths, final String user, final boolean setOrClear) + throws IOException, ServiceException, Throwable { try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - @Override - public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { - SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder(); - setAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); - for (String auth : auths) { - if (auth.length() > 0) { - setAuthReqBuilder.addAuth((ByteString.copyFromUtf8(auth))); + @Override + public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { + SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder(); + setAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); + for (String auth : auths) { + if (auth.length() > 0) { + setAuthReqBuilder.addAuth((ByteString.copyFromUtf8(auth))); + } } + if (setOrClear) { + service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback); + } else { + service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback); + } + VisibilityLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - if (setOrClear) { - service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback); - } else { - service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback); - } - VisibilityLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; - Map result = table.coprocessorService( - VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, - callable); + }; + Map result = + table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java index 0945dd98afc..c7bb27c6715 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,8 +19,8 @@ package org.apache.hadoop.hbase.security.visibility; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public final class VisibilityConstants { @@ -31,8 +31,8 @@ public final class VisibilityConstants { public static final String VISIBILITY_LABELS_ATTR_KEY = "VISIBILITY"; /** Internal storage table for visibility labels */ - public static final TableName LABELS_TABLE_NAME = TableName.valueOf( - NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "labels"); + public static final TableName LABELS_TABLE_NAME = + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "labels"); /** Family for the internal storage table for visibility labels */ public static final byte[] LABELS_TABLE_FAMILY = Bytes.toBytes("f"); @@ -41,16 +41,16 @@ public final class VisibilityConstants { public static final byte[] LABEL_QUALIFIER = new byte[1]; /** - * Visibility serialization version format. It indicates the visibility labels - * are sorted based on ordinal + * Visibility serialization version format. It indicates the visibility labels are sorted based on + * ordinal **/ public static final byte SORTED_ORDINAL_SERIALIZATION_FORMAT = 1; /** Byte representation of the visibility_serialization_version **/ public static final byte[] SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG_VAL = - new byte[] { SORTED_ORDINAL_SERIALIZATION_FORMAT }; + new byte[] { SORTED_ORDINAL_SERIALIZATION_FORMAT }; - public static final String CHECK_AUTHS_FOR_MUTATION = - "hbase.security.visibility.mutations.checkauths"; + public static final String CHECK_AUTHS_FOR_MUTATION = + "hbase.security.visibility.mutations.checkauths"; public static final String NOT_OPERATOR = "!"; public static final String AND_OPERATOR = "&"; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java index a73d4750191..7d8d550e82e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /* diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsValidator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsValidator.java index 874b2b42cec..dfbb0b9d02b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsValidator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsValidator.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security.visibility; import java.util.regex.Pattern; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -54,7 +53,7 @@ public class VisibilityLabelsValidator { validAuthChars['.'] = true; validAuthChars['/'] = true; } - + static final boolean isValidAuthChar(byte b) { return validAuthChars[0xff & b]; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 436d9eebbe3..cae868a01a9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -43,10 +43,8 @@ import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.regex.Pattern; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.client.BalanceRequest; import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.CacheEvictionStats; import org.apache.hadoop.hbase.CacheEvictionStatsBuilder; @@ -70,9 +68,10 @@ import org.apache.hadoop.hbase.ServerTask; import org.apache.hadoop.hbase.ServerTaskBuilder; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.BalanceRequest; import org.apache.hadoop.hbase.client.BalanceResponse; -import org.apache.hadoop.hbase.client.BalancerRejection; import org.apache.hadoop.hbase.client.BalancerDecision; +import org.apache.hadoop.hbase.client.BalancerRejection; import org.apache.hadoop.hbase.client.CheckAndMutate; import org.apache.hadoop.hbase.client.ClientUtil; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -119,6 +118,30 @@ import org.apache.hadoop.hbase.replication.ReplicationLoadSink; import org.apache.hadoop.hbase.replication.ReplicationLoadSource; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.security.visibility.CellVisibility; +import org.apache.hadoop.hbase.util.Addressing; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.DynamicClassLoader; +import org.apache.hadoop.hbase.util.ExceptionUtil; +import org.apache.hadoop.hbase.util.Methods; +import org.apache.hadoop.hbase.util.VersionInfo; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.io.ByteStreams; +import org.apache.hbase.thirdparty.com.google.gson.JsonArray; +import org.apache.hbase.thirdparty.com.google.gson.JsonElement; +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; +import org.apache.hbase.thirdparty.com.google.protobuf.Service; +import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; +import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; + import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearSlowLogResponses; @@ -190,36 +213,13 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDe import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor.EventType; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor; import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; -import org.apache.hadoop.hbase.util.Addressing; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.DynamicClassLoader; -import org.apache.hadoop.hbase.util.ExceptionUtil; -import org.apache.hadoop.hbase.util.Methods; -import org.apache.hadoop.hbase.util.VersionInfo; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.hbase.thirdparty.com.google.common.io.ByteStreams; -import org.apache.hbase.thirdparty.com.google.gson.JsonArray; -import org.apache.hbase.thirdparty.com.google.gson.JsonElement; -import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; -import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; -import org.apache.hbase.thirdparty.com.google.protobuf.Service; -import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; -import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; -import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; -import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; -import org.apache.yetus.audience.InterfaceAudience; /** - * Protobufs utility. - * Be aware that a class named org.apache.hadoop.hbase.protobuf.ProtobufUtil (i.e. no 'shaded' in - * the package name) carries a COPY of a subset of this class for non-shaded - * users; e.g. Coprocessor Endpoints. If you make change in here, be sure to make change in - * the companion class too (not the end of the world, especially if you are adding new functionality - * but something to be aware of. + * Protobufs utility. Be aware that a class named org.apache.hadoop.hbase.protobuf.ProtobufUtil + * (i.e. no 'shaded' in the package name) carries a COPY of a subset of this class for non-shaded + * users; e.g. Coprocessor Endpoints. If you make change in here, be sure to make change in the + * companion class too (not the end of the world, especially if you are adding new functionality but + * something to be aware of. */ @InterfaceAudience.Private // TODO: some clients (Hive, etc) use this class public final class ProtobufUtil { @@ -228,18 +228,18 @@ public final class ProtobufUtil { } /** - * Many results are simple: no cell, exists true or false. To save on object creations, - * we reuse them across calls. + * Many results are simple: no cell, exists true or false. To save on object creations, we reuse + * them across calls. */ - private final static Cell[] EMPTY_CELL_ARRAY = new Cell[]{}; + private final static Cell[] EMPTY_CELL_ARRAY = new Cell[] {}; private final static Result EMPTY_RESULT = Result.create(EMPTY_CELL_ARRAY); final static Result EMPTY_RESULT_EXISTS_TRUE = Result.create(null, true); final static Result EMPTY_RESULT_EXISTS_FALSE = Result.create(null, false); private final static Result EMPTY_RESULT_STALE = Result.create(EMPTY_CELL_ARRAY, null, true); - private final static Result EMPTY_RESULT_EXISTS_TRUE_STALE - = Result.create((Cell[])null, true, true); - private final static Result EMPTY_RESULT_EXISTS_FALSE_STALE - = Result.create((Cell[])null, false, true); + private final static Result EMPTY_RESULT_EXISTS_TRUE_STALE = + Result.create((Cell[]) null, true, true); + private final static Result EMPTY_RESULT_EXISTS_FALSE_STALE = + Result.create((Cell[]) null, false, true); private final static ClientProtos.Result EMPTY_RESULT_PB; private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_TRUE; @@ -248,13 +248,12 @@ public final class ProtobufUtil { private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_TRUE_STALE; private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_FALSE_STALE; - static { ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder(); builder.setExists(true); builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB_EXISTS_TRUE = builder.build(); + EMPTY_RESULT_PB_EXISTS_TRUE = builder.build(); builder.setStale(true); EMPTY_RESULT_PB_EXISTS_TRUE_STALE = builder.build(); @@ -262,13 +261,13 @@ public final class ProtobufUtil { builder.setExists(false); builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB_EXISTS_FALSE = builder.build(); + EMPTY_RESULT_PB_EXISTS_FALSE = builder.build(); builder.setStale(true); EMPTY_RESULT_PB_EXISTS_FALSE_STALE = builder.build(); builder.clear(); builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB = builder.build(); + EMPTY_RESULT_PB = builder.build(); builder.setStale(true); EMPTY_RESULT_PB_STALE = builder.build(); } @@ -284,9 +283,8 @@ public final class ProtobufUtil { static { ClassLoader parent = ProtobufUtil.class.getClassLoader(); Configuration conf = HBaseConfiguration.create(); - CLASS_LOADER = AccessController.doPrivileged((PrivilegedAction) - () -> new DynamicClassLoader(conf, parent) - ); + CLASS_LOADER = AccessController + .doPrivileged((PrivilegedAction) () -> new DynamicClassLoader(conf, parent)); classLoaderLoaded = true; } } @@ -296,14 +294,13 @@ public final class ProtobufUtil { } /** - * Prepend the passed bytes with four bytes of magic, {@link ProtobufMagic#PB_MAGIC}, - * to flag what follows as a protobuf in hbase. Prepend these bytes to all content written to - * znodes, etc. + * Prepend the passed bytes with four bytes of magic, {@link ProtobufMagic#PB_MAGIC}, to flag what + * follows as a protobuf in hbase. Prepend these bytes to all content written to znodes, etc. * @param bytes Bytes to decorate - * @return The passed bytes with magic prepended (Creates a new - * byte array that is bytes.length plus {@link ProtobufMagic#PB_MAGIC}.length. + * @return The passed bytes with magic prepended (Creates a new byte array that is + * bytes.length plus {@link ProtobufMagic#PB_MAGIC}.length. */ - public static byte [] prependPBMagic(final byte [] bytes) { + public static byte[] prependPBMagic(final byte[] bytes) { return Bytes.add(PB_MAGIC, bytes); } @@ -311,17 +308,17 @@ public final class ProtobufUtil { * @param bytes Bytes to check. * @return True if passed bytes has {@link ProtobufMagic#PB_MAGIC} for a prefix. */ - public static boolean isPBMagicPrefix(final byte [] bytes) { + public static boolean isPBMagicPrefix(final byte[] bytes) { return ProtobufMagic.isPBMagicPrefix(bytes); } /** - * @param bytes Bytes to check. + * @param bytes Bytes to check. * @param offset offset to start at - * @param len length to use + * @param len length to use * @return True if passed bytes has {@link ProtobufMagic#PB_MAGIC} for a prefix. */ - public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) { + public static boolean isPBMagicPrefix(final byte[] bytes, int offset, int len) { return ProtobufMagic.isPBMagicPrefix(bytes, offset, len); } @@ -333,7 +330,7 @@ public final class ProtobufUtil { if (!isPBMagicPrefix(bytes)) { String bytesPrefix = bytes == null ? "null" : Bytes.toStringBinary(bytes, 0, PB_MAGIC.length); throw new DeserializationException( - "Missing pb magic " + Bytes.toString(PB_MAGIC) + " prefix" + ", bytes: " + bytesPrefix); + "Missing pb magic " + Bytes.toString(PB_MAGIC) + " prefix" + ", bytes: " + bytesPrefix); } } @@ -344,7 +341,7 @@ public final class ProtobufUtil { return ProtobufMagic.lengthOfPBMagic(); } - public static ComparatorProtos.ByteArrayComparable toByteArrayComparable(final byte [] value) { + public static ComparatorProtos.ByteArrayComparable toByteArrayComparable(final byte[] value) { ComparatorProtos.ByteArrayComparable.Builder builder = ComparatorProtos.ByteArrayComparable.newBuilder(); if (value != null) builder.setValue(UnsafeByteOperations.unsafeWrap(value)); @@ -352,12 +349,10 @@ public final class ProtobufUtil { } /** - * Return the IOException thrown by the remote server wrapped in - * ServiceException as cause. - * + * Return the IOException thrown by the remote server wrapped in ServiceException as cause. * @param se ServiceException that wraps IO exception thrown by the server - * @return Exception wrapped in ServiceException or - * a new IOException that wraps the unexpected ServiceException. + * @return Exception wrapped in ServiceException or a new IOException that wraps the unexpected + * ServiceException. */ public static IOException getRemoteException(ServiceException se) { return makeIOExceptionOfException(se); @@ -366,9 +361,8 @@ public final class ProtobufUtil { /** * Like {@link #getRemoteException(ServiceException)} but more generic, able to handle more than * just {@link ServiceException}. Prefer this method to - * {@link #getRemoteException(ServiceException)} because trying to - * contain direct protobuf references. - * @param e + * {@link #getRemoteException(ServiceException)} because trying to contain direct protobuf + * references. n */ public static IOException handleRemoteException(Exception e) { return makeIOExceptionOfException(e); @@ -383,14 +377,13 @@ public final class ProtobufUtil { return ExceptionUtil.asInterrupt(t); } if (t instanceof RemoteException) { - t = ((RemoteException)t).unwrapRemoteException(); + t = ((RemoteException) t).unwrapRemoteException(); } - return t instanceof IOException? (IOException)t: new HBaseIOException(t); + return t instanceof IOException ? (IOException) t : new HBaseIOException(t); } /** * Convert a ServerName to a protocol buffer ServerName - * * @param serverName the ServerName to convert * @return the converted protocol buffer ServerName * @see #toServerName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName) @@ -399,8 +392,7 @@ public final class ProtobufUtil { if (serverName == null) { return null; } - HBaseProtos.ServerName.Builder builder = - HBaseProtos.ServerName.newBuilder(); + HBaseProtos.ServerName.Builder builder = HBaseProtos.ServerName.newBuilder(); builder.setHostName(serverName.getHostname()); if (serverName.getPort() >= 0) { builder.setPort(serverName.getPort()); @@ -413,7 +405,6 @@ public final class ProtobufUtil { /** * Convert a protocol buffer ServerName to a ServerName - * * @param proto the protocol buffer ServerName to convert * @return the converted ServerName */ @@ -436,10 +427,8 @@ public final class ProtobufUtil { * @param proto protocol buffer ServerNameList * @return a list of ServerName */ - public static List toServerNameList( - List proto) { - return proto.stream().map(ProtobufUtil::toServerName) - .collect(Collectors.toList()); + public static List toServerNameList(List proto) { + return proto.stream().map(ProtobufUtil::toServerName).collect(Collectors.toList()); } /** @@ -447,10 +436,10 @@ public final class ProtobufUtil { * @param proto the ListNamespaceDescriptorsResponse * @return a list of NamespaceDescriptor */ - public static List toNamespaceDescriptorList( - ListNamespaceDescriptorsResponse proto) { + public static List + toNamespaceDescriptorList(ListNamespaceDescriptorsResponse proto) { return proto.getNamespaceDescriptorList().stream().map(ProtobufUtil::toNamespaceDescriptor) - .collect(Collectors.toList()); + .collect(Collectors.toList()); } /** @@ -463,7 +452,7 @@ public final class ProtobufUtil { return new ArrayList<>(); } return proto.getTableSchemaList().stream().map(ProtobufUtil::toTableDescriptor) - .collect(Collectors.toList()); + .collect(Collectors.toList()); } /** @@ -472,20 +461,19 @@ public final class ProtobufUtil { * @return a list of TableDescriptor */ public static List - toTableDescriptorList(ListTableDescriptorsByNamespaceResponse proto) { + toTableDescriptorList(ListTableDescriptorsByNamespaceResponse proto) { if (proto == null) return new ArrayList<>(); return proto.getTableSchemaList().stream().map(ProtobufUtil::toTableDescriptor) - .collect(Collectors.toList()); + .collect(Collectors.toList()); } /** * get the split keys in form "byte [][]" from a CreateTableRequest proto - * * @param proto the CreateTableRequest * @return the split keys */ - public static byte [][] getSplitKeysArray(final CreateTableRequest proto) { - byte [][] splitKeys = new byte[proto.getSplitKeysCount()][]; + public static byte[][] getSplitKeysArray(final CreateTableRequest proto) { + byte[][] splitKeys = new byte[proto.getSplitKeysCount()][]; for (int i = 0; i < proto.getSplitKeysCount(); ++i) { splitKeys[i] = proto.getSplitKeys(i).toByteArray(); } @@ -495,51 +483,47 @@ public final class ProtobufUtil { /** * Convert a protobuf Durability into a client Durability */ - public static Durability toDurability( - final ClientProtos.MutationProto.Durability proto) { - switch(proto) { - case USE_DEFAULT: - return Durability.USE_DEFAULT; - case SKIP_WAL: - return Durability.SKIP_WAL; - case ASYNC_WAL: - return Durability.ASYNC_WAL; - case SYNC_WAL: - return Durability.SYNC_WAL; - case FSYNC_WAL: - return Durability.FSYNC_WAL; - default: - return Durability.USE_DEFAULT; + public static Durability toDurability(final ClientProtos.MutationProto.Durability proto) { + switch (proto) { + case USE_DEFAULT: + return Durability.USE_DEFAULT; + case SKIP_WAL: + return Durability.SKIP_WAL; + case ASYNC_WAL: + return Durability.ASYNC_WAL; + case SYNC_WAL: + return Durability.SYNC_WAL; + case FSYNC_WAL: + return Durability.FSYNC_WAL; + default: + return Durability.USE_DEFAULT; } } /** * Convert a client Durability into a protbuf Durability */ - public static ClientProtos.MutationProto.Durability toDurability( - final Durability d) { - switch(d) { - case USE_DEFAULT: - return ClientProtos.MutationProto.Durability.USE_DEFAULT; - case SKIP_WAL: - return ClientProtos.MutationProto.Durability.SKIP_WAL; - case ASYNC_WAL: - return ClientProtos.MutationProto.Durability.ASYNC_WAL; - case SYNC_WAL: - return ClientProtos.MutationProto.Durability.SYNC_WAL; - case FSYNC_WAL: - return ClientProtos.MutationProto.Durability.FSYNC_WAL; - default: - return ClientProtos.MutationProto.Durability.USE_DEFAULT; + public static ClientProtos.MutationProto.Durability toDurability(final Durability d) { + switch (d) { + case USE_DEFAULT: + return ClientProtos.MutationProto.Durability.USE_DEFAULT; + case SKIP_WAL: + return ClientProtos.MutationProto.Durability.SKIP_WAL; + case ASYNC_WAL: + return ClientProtos.MutationProto.Durability.ASYNC_WAL; + case SYNC_WAL: + return ClientProtos.MutationProto.Durability.SYNC_WAL; + case FSYNC_WAL: + return ClientProtos.MutationProto.Durability.FSYNC_WAL; + default: + return ClientProtos.MutationProto.Durability.USE_DEFAULT; } } /** * Convert a protocol buffer Get to a client Get - * * @param proto the protocol buffer Get to convert - * @return the converted client Get - * @throws IOException + * @return the converted client Get n */ public static Get toGet(final ClientProtos.Get proto) throws IOException { if (proto == null) return null; @@ -560,8 +544,8 @@ public final class ProtobufUtil { if (proto.getCfTimeRangeCount() > 0) { for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) { TimeRange timeRange = toTimeRange(cftr.getTimeRange()); - get.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), - timeRange.getMin(), timeRange.getMax()); + get.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), timeRange.getMin(), + timeRange.getMax()); } } if (proto.hasTimeRange()) { @@ -572,14 +556,14 @@ public final class ProtobufUtil { FilterProtos.Filter filter = proto.getFilter(); get.setFilter(ProtobufUtil.toFilter(filter)); } - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { get.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } if (proto.getColumnCount() > 0) { - for (Column column: proto.getColumnList()) { + for (Column column : proto.getColumnList()) { byte[] family = column.getFamily().toByteArray(); if (column.getQualifierCount() > 0) { - for (ByteString qualifier: column.getQualifierList()) { + for (ByteString qualifier : column.getQualifierList()) { get.addColumn(family, qualifier.toByteArray()); } } else { @@ -587,7 +571,7 @@ public final class ProtobufUtil { } } } - if (proto.hasExistenceOnly() && proto.getExistenceOnly()){ + if (proto.hasExistenceOnly() && proto.getExistenceOnly()) { get.setCheckExistenceOnly(true); } if (proto.hasConsistency()) { @@ -601,58 +585,59 @@ public final class ProtobufUtil { public static Consistency toConsistency(ClientProtos.Consistency consistency) { switch (consistency) { - case STRONG : return Consistency.STRONG; - case TIMELINE : return Consistency.TIMELINE; - default : return Consistency.STRONG; + case STRONG: + return Consistency.STRONG; + case TIMELINE: + return Consistency.TIMELINE; + default: + return Consistency.STRONG; } } public static ClientProtos.Consistency toConsistency(Consistency consistency) { switch (consistency) { - case STRONG : return ClientProtos.Consistency.STRONG; - case TIMELINE : return ClientProtos.Consistency.TIMELINE; - default : return ClientProtos.Consistency.STRONG; + case STRONG: + return ClientProtos.Consistency.STRONG; + case TIMELINE: + return ClientProtos.Consistency.TIMELINE; + default: + return ClientProtos.Consistency.STRONG; } } /** * Convert a protocol buffer Mutate to a Put. - * * @param proto The protocol buffer MutationProto to convert - * @return A client Put. - * @throws IOException + * @return A client Put. n */ - public static Put toPut(final MutationProto proto) - throws IOException { + public static Put toPut(final MutationProto proto) throws IOException { return toPut(proto, null); } /** * Convert a protocol buffer Mutate to a Put. - * - * @param proto The protocol buffer MutationProto to convert + * @param proto The protocol buffer MutationProto to convert * @param cellScanner If non-null, the Cell data that goes with this proto. - * @return A client Put. - * @throws IOException + * @return A client Put. n */ public static Put toPut(final MutationProto proto, final CellScanner cellScanner) - throws IOException { - // TODO: Server-side at least why do we convert back to the Client types? Why not just pb it? + throws IOException { + // TODO: Server-side at least why do we convert back to the Client types? Why not just pb it? MutationType type = proto.getMutateType(); - assert type == MutationType.PUT: type.name(); - long timestamp = proto.hasTimestamp()? proto.getTimestamp(): HConstants.LATEST_TIMESTAMP; + assert type == MutationType.PUT : type.name(); + long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP; Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), timestamp) : null; - int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0; + int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; if (cellCount > 0) { // The proto has metadata only and the data is separate to be found in the cellScanner. if (cellScanner == null) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - toShortString(proto)); + throw new DoNotRetryIOException( + "Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto)); } for (int i = 0; i < cellCount; i++) { if (!cellScanner.advance()) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + toShortString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + + " no cell returned: " + toShortString(proto)); } Cell cell = cellScanner.current(); if (put == null) { @@ -665,13 +650,13 @@ public final class ProtobufUtil { throw new IllegalArgumentException("row cannot be null"); } // The proto has the metadata and the data itself - ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - for (ColumnValue column: proto.getColumnValueList()) { + ExtendedCellBuilder cellBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + for (ColumnValue column : proto.getColumnValueList()) { byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv: column.getQualifierValueList()) { + for (QualifierValue qv : column.getQualifierValueList()) { if (!qv.hasValue()) { - throw new DoNotRetryIOException( - "Missing required field: qualifier value"); + throw new DoNotRetryIOException("Missing required field: qualifier value"); } long ts = timestamp; if (qv.hasTimestamp()) { @@ -680,51 +665,35 @@ public final class ProtobufUtil { byte[] allTagsBytes; if (qv.hasTags()) { allTagsBytes = qv.getTags().toByteArray(); - if(qv.hasDeleteType()) { - put.add(cellBuilder.clear() - .setRow(proto.getRow().toByteArray()) - .setFamily(family) - .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(fromDeleteType(qv.getDeleteType()).getCode()) - .setTags(allTagsBytes) - .build()); + if (qv.hasDeleteType()) { + put.add(cellBuilder.clear().setRow(proto.getRow().toByteArray()).setFamily(family) + .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) + .setTimestamp(ts).setType(fromDeleteType(qv.getDeleteType()).getCode()) + .setTags(allTagsBytes).build()); } else { - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) - .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(Cell.Type.Put) - .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null) - .setTags(allTagsBytes) - .build()); + put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family) + .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) + .setTimestamp(ts).setType(Cell.Type.Put) + .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null).setTags(allTagsBytes) + .build()); } } else { - if(qv.hasDeleteType()) { - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) - .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(fromDeleteType(qv.getDeleteType()).getCode()) - .build()); - } else{ - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) - .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(Type.Put) - .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null) - .build()); + if (qv.hasDeleteType()) { + put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family) + .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) + .setTimestamp(ts).setType(fromDeleteType(qv.getDeleteType()).getCode()).build()); + } else { + put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family) + .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) + .setTimestamp(ts).setType(Type.Put) + .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null).build()); } } } } } put.setDurability(toDurability(proto.getDurability())); - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { put.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } return put; @@ -732,43 +701,38 @@ public final class ProtobufUtil { /** * Convert a protocol buffer Mutate to a Delete - * * @param proto the protocol buffer Mutate to convert - * @return the converted client Delete - * @throws IOException + * @return the converted client Delete n */ - public static Delete toDelete(final MutationProto proto) - throws IOException { + public static Delete toDelete(final MutationProto proto) throws IOException { return toDelete(proto, null); } /** * Convert a protocol buffer Mutate to a Delete - * - * @param proto the protocol buffer Mutate to convert + * @param proto the protocol buffer Mutate to convert * @param cellScanner if non-null, the data that goes with this delete. - * @return the converted client Delete - * @throws IOException + * @return the converted client Delete n */ public static Delete toDelete(final MutationProto proto, final CellScanner cellScanner) - throws IOException { + throws IOException { MutationType type = proto.getMutateType(); assert type == MutationType.DELETE : type.name(); long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP; Delete delete = proto.hasRow() ? new Delete(proto.getRow().toByteArray(), timestamp) : null; - int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0; + int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; if (cellCount > 0) { // The proto has metadata only and the data is separate to be found in the cellScanner. if (cellScanner == null) { // TextFormat should be fine for a Delete since it carries no data, just coordinates. - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - TextFormat.shortDebugString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + + TextFormat.shortDebugString(proto)); } for (int i = 0; i < cellCount; i++) { if (!cellScanner.advance()) { // TextFormat should be fine for a Delete since it carries no data, just coordinates. - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + TextFormat.shortDebugString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + + " no cell returned: " + TextFormat.shortDebugString(proto)); } Cell cell = cellScanner.current(); if (delete == null) { @@ -781,9 +745,9 @@ public final class ProtobufUtil { if (delete == null) { throw new IllegalArgumentException("row cannot be null"); } - for (ColumnValue column: proto.getColumnValueList()) { + for (ColumnValue column : proto.getColumnValueList()) { byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv: column.getQualifierValueList()) { + for (QualifierValue qv : column.getQualifierValueList()) { DeleteType deleteType = qv.getDeleteType(); byte[] qualifier = null; if (qv.hasQualifier()) { @@ -803,35 +767,38 @@ public final class ProtobufUtil { } } delete.setDurability(toDurability(proto.getDurability())); - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { delete.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } return delete; } + @FunctionalInterface - private interface ConsumerWithException { + private interface ConsumerWithException { void accept(T t, U u) throws IOException; } - private static T toDelta(Function supplier, ConsumerWithException consumer, - final MutationProto proto, final CellScanner cellScanner) throws IOException { + private static T toDelta(Function supplier, + ConsumerWithException consumer, final MutationProto proto, + final CellScanner cellScanner) throws IOException { byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null; T mutation = row == null ? null : supplier.apply(new Bytes(row)); int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; if (cellCount > 0) { // The proto has metadata only and the data is separate to be found in the cellScanner. if (cellScanner == null) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - toShortString(proto)); + throw new DoNotRetryIOException( + "Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto)); } for (int i = 0; i < cellCount; i++) { if (!cellScanner.advance()) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + toShortString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + + " no cell returned: " + toShortString(proto)); } Cell cell = cellScanner.current(); if (mutation == null) { - mutation = supplier.apply(new Bytes(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + mutation = + supplier.apply(new Bytes(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); } consumer.accept(mutation, cell); } @@ -844,23 +811,18 @@ public final class ProtobufUtil { for (QualifierValue qv : column.getQualifierValueList()) { byte[] qualifier = qv.getQualifier().toByteArray(); if (!qv.hasValue()) { - throw new DoNotRetryIOException( - "Missing required field: qualifier value"); + throw new DoNotRetryIOException("Missing required field: qualifier value"); } byte[] value = qv.getValue().toByteArray(); byte[] tags = null; if (qv.hasTags()) { tags = qv.getTags().toByteArray(); } - consumer.accept(mutation, ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(mutation.getRow()) - .setFamily(family) - .setQualifier(qualifier) - .setTimestamp(cellTimestampOrLatest(qv)) - .setType(KeyValue.Type.Put.getCode()) - .setValue(value) - .setTags(tags) - .build()); + consumer.accept(mutation, + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) + .setRow(mutation.getRow()).setFamily(family).setQualifier(qualifier) + .setTimestamp(cellTimestampOrLatest(qv)).setType(KeyValue.Type.Put.getCode()) + .setValue(value).setTags(tags).build()); } } } @@ -880,18 +842,16 @@ public final class ProtobufUtil { } /** - * Convert a protocol buffer Mutate to an Append - * @param cellScanner - * @param proto the protocol buffer Mutate to convert - * @return the converted client Append - * @throws IOException + * Convert a protocol buffer Mutate to an Append n * @param proto the protocol buffer Mutate to + * convert + * @return the converted client Append n */ public static Append toAppend(final MutationProto proto, final CellScanner cellScanner) - throws IOException { + throws IOException { MutationType type = proto.getMutateType(); assert type == MutationType.APPEND : type.name(); Append append = toDelta((Bytes row) -> new Append(row.get(), row.getOffset(), row.getLength()), - Append::add, proto, cellScanner); + Append::add, proto, cellScanner); if (proto.hasTimeRange()) { TimeRange timeRange = toTimeRange(proto.getTimeRange()); append.setTimeRange(timeRange.getMin(), timeRange.getMax()); @@ -901,17 +861,16 @@ public final class ProtobufUtil { /** * Convert a protocol buffer Mutate to an Increment - * * @param proto the protocol buffer Mutate to convert - * @return the converted client Increment - * @throws IOException + * @return the converted client Increment n */ public static Increment toIncrement(final MutationProto proto, final CellScanner cellScanner) - throws IOException { + throws IOException { MutationType type = proto.getMutateType(); assert type == MutationType.INCREMENT : type.name(); - Increment increment = toDelta((Bytes row) -> new Increment(row.get(), row.getOffset(), row.getLength()), - Increment::add, proto, cellScanner); + Increment increment = + toDelta((Bytes row) -> new Increment(row.get(), row.getOffset(), row.getLength()), + Increment::add, proto, cellScanner); if (proto.hasTimeRange()) { TimeRange timeRange = toTimeRange(proto.getTimeRange()); increment.setTimeRange(timeRange.getMin(), timeRange.getMax()); @@ -921,10 +880,8 @@ public final class ProtobufUtil { /** * Convert a MutateRequest to Mutation - * * @param proto the protocol buffer Mutate to convert - * @return the converted Mutation - * @throws IOException + * @return the converted Mutation n */ public static Mutation toMutation(final MutationProto proto) throws IOException { MutationType type = proto.getMutateType(); @@ -971,15 +928,11 @@ public final class ProtobufUtil { /** * Convert a client Scan to a protocol buffer Scan - * * @param scan the client Scan to convert - * @return the converted protocol buffer Scan - * @throws IOException + * @return the converted protocol buffer Scan n */ - public static ClientProtos.Scan toScan( - final Scan scan) throws IOException { - ClientProtos.Scan.Builder scanBuilder = - ClientProtos.Scan.newBuilder(); + public static ClientProtos.Scan toScan(final Scan scan) throws IOException { + ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder(); scanBuilder.setCacheBlocks(scan.getCacheBlocks()); if (scan.getBatch() > 0) { scanBuilder.setBatchSize(scan.getBatch()); @@ -1000,15 +953,14 @@ public final class ProtobufUtil { scanBuilder.setMaxVersions(scan.getMaxVersions()); scan.getColumnFamilyTimeRange().forEach((cf, timeRange) -> { scanBuilder.addCfTimeRange(HBaseProtos.ColumnFamilyTimeRange.newBuilder() - .setColumnFamily(UnsafeByteOperations.unsafeWrap(cf)) - .setTimeRange(toTimeRange(timeRange)) + .setColumnFamily(UnsafeByteOperations.unsafeWrap(cf)).setTimeRange(toTimeRange(timeRange)) .build()); }); scanBuilder.setTimeRange(ProtobufUtil.toTimeRange(scan.getTimeRange())); Map attributes = scan.getAttributesMap(); if (!attributes.isEmpty()) { NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { + for (Map.Entry attribute : attributes.entrySet()) { attributeBuilder.setName(attribute.getKey()); attributeBuilder.setValue(UnsafeByteOperations.unsafeWrap(attribute.getValue())); scanBuilder.addAttribute(attributeBuilder.build()); @@ -1027,13 +979,12 @@ public final class ProtobufUtil { } if (scan.hasFamilies()) { Column.Builder columnBuilder = Column.newBuilder(); - for (Map.Entry> - family: scan.getFamilyMap().entrySet()) { + for (Map.Entry> family : scan.getFamilyMap().entrySet()) { columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family.getKey())); - NavigableSet qualifiers = family.getValue(); + NavigableSet qualifiers = family.getValue(); columnBuilder.clearQualifier(); if (qualifiers != null && qualifiers.size() > 0) { - for (byte [] qualifier: qualifiers) { + for (byte[] qualifier : qualifiers) { columnBuilder.addQualifier(UnsafeByteOperations.unsafeWrap(qualifier)); } } @@ -1074,13 +1025,10 @@ public final class ProtobufUtil { /** * Convert a protocol buffer Scan to a client Scan - * * @param proto the protocol buffer Scan to convert - * @return the converted client Scan - * @throws IOException + * @return the converted client Scan n */ - public static Scan toScan( - final ClientProtos.Scan proto) throws IOException { + public static Scan toScan(final ClientProtos.Scan proto) throws IOException { byte[] startRow = HConstants.EMPTY_START_ROW; byte[] stopRow = HConstants.EMPTY_END_ROW; boolean includeStartRow = true; @@ -1103,7 +1051,7 @@ public final class ProtobufUtil { } } Scan scan = - new Scan().withStartRow(startRow, includeStartRow).withStopRow(stopRow, includeStopRow); + new Scan().withStartRow(startRow, includeStartRow).withStopRow(stopRow, includeStopRow); if (proto.hasCacheBlocks()) { scan.setCacheBlocks(proto.getCacheBlocks()); } @@ -1122,8 +1070,8 @@ public final class ProtobufUtil { if (proto.getCfTimeRangeCount() > 0) { for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) { TimeRange timeRange = toTimeRange(cftr.getTimeRange()); - scan.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), - timeRange.getMin(), timeRange.getMax()); + scan.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), timeRange.getMin(), + timeRange.getMax()); } } if (proto.hasTimeRange()) { @@ -1146,14 +1094,14 @@ public final class ProtobufUtil { if (proto.hasAllowPartialResults()) { scan.setAllowPartialResults(proto.getAllowPartialResults()); } - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { scan.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } if (proto.getColumnCount() > 0) { - for (Column column: proto.getColumnList()) { + for (Column column : proto.getColumnList()) { byte[] family = column.getFamily().toByteArray(); if (column.getQualifierCount() > 0) { - for (ByteString qualifier: column.getQualifierList()) { + for (ByteString qualifier : column.getQualifierList()) { scan.addColumn(family, qualifier.toByteArray()); } } else { @@ -1192,8 +1140,8 @@ public final class ProtobufUtil { public static ClientProtos.Cursor toCursor(Cell cell) { return ClientProtos.Cursor.newBuilder() - .setRow(ByteString.copyFrom(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) - .build(); + .setRow(ByteString.copyFrom(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) + .build(); } public static Cursor toCursor(ClientProtos.Cursor cursor) { @@ -1202,15 +1150,11 @@ public final class ProtobufUtil { /** * Create a protocol buffer Get based on a client Get. - * * @param get the client Get - * @return a protocol buffer Get - * @throws IOException + * @return a protocol buffer Get n */ - public static ClientProtos.Get toGet( - final Get get) throws IOException { - ClientProtos.Get.Builder builder = - ClientProtos.Get.newBuilder(); + public static ClientProtos.Get toGet(final Get get) throws IOException { + ClientProtos.Get.Builder builder = ClientProtos.Get.newBuilder(); builder.setRow(UnsafeByteOperations.unsafeWrap(get.getRow())); builder.setCacheBlocks(get.getCacheBlocks()); builder.setMaxVersions(get.getMaxVersions()); @@ -1219,15 +1163,14 @@ public final class ProtobufUtil { } get.getColumnFamilyTimeRange().forEach((cf, timeRange) -> { builder.addCfTimeRange(HBaseProtos.ColumnFamilyTimeRange.newBuilder() - .setColumnFamily(UnsafeByteOperations.unsafeWrap(cf)) - .setTimeRange(toTimeRange(timeRange)) + .setColumnFamily(UnsafeByteOperations.unsafeWrap(cf)).setTimeRange(toTimeRange(timeRange)) .build()); }); builder.setTimeRange(ProtobufUtil.toTimeRange(get.getTimeRange())); Map attributes = get.getAttributesMap(); if (!attributes.isEmpty()) { NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { + for (Map.Entry attribute : attributes.entrySet()) { attributeBuilder.setName(attribute.getKey()); attributeBuilder.setValue(UnsafeByteOperations.unsafeWrap(attribute.getValue())); builder.addAttribute(attributeBuilder.build()); @@ -1236,12 +1179,12 @@ public final class ProtobufUtil { if (get.hasFamilies()) { Column.Builder columnBuilder = Column.newBuilder(); Map> families = get.getFamilyMap(); - for (Map.Entry> family: families.entrySet()) { + for (Map.Entry> family : families.entrySet()) { NavigableSet qualifiers = family.getValue(); columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family.getKey())); columnBuilder.clearQualifier(); if (qualifiers != null && qualifiers.size() > 0) { - for (byte[] qualifier: qualifiers) { + for (byte[] qualifier : qualifiers) { columnBuilder.addQualifier(UnsafeByteOperations.unsafeWrap(qualifier)); } } @@ -1254,7 +1197,7 @@ public final class ProtobufUtil { if (get.getRowOffsetPerColumnFamily() > 0) { builder.setStoreOffset(get.getRowOffsetPerColumnFamily()); } - if (get.isCheckExistenceOnly()){ + if (get.isCheckExistenceOnly()) { builder.setExistenceOnly(true); } if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) { @@ -1274,12 +1217,7 @@ public final class ProtobufUtil { } /** - * Create a protocol buffer Mutate based on a client Mutation - * - * @param type - * @param mutation - * @return a protobuf'd Mutation - * @throws IOException + * Create a protocol buffer Mutate based on a client Mutation nn * @return a protobuf'd Mutation n */ public static MutationProto toMutation(final MutationType type, final Mutation mutation, final long nonce) throws IOException { @@ -1287,13 +1225,12 @@ public final class ProtobufUtil { } public static MutationProto toMutation(final MutationType type, final Mutation mutation, - MutationProto.Builder builder) throws IOException { + MutationProto.Builder builder) throws IOException { return toMutation(type, mutation, builder, HConstants.NO_NONCE); } public static MutationProto toMutation(final MutationType type, final Mutation mutation, - MutationProto.Builder builder, long nonce) - throws IOException { + MutationProto.Builder builder, long nonce) throws IOException { builder = getMutationBuilderAndSetCommonFields(type, mutation, builder); if (nonce != HConstants.NO_NONCE) { builder.setNonce(nonce); @@ -1306,15 +1243,15 @@ public final class ProtobufUtil { } ColumnValue.Builder columnBuilder = ColumnValue.newBuilder(); QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); - for (Map.Entry> family: mutation.getFamilyCellMap().entrySet()) { + for (Map.Entry> family : mutation.getFamilyCellMap().entrySet()) { columnBuilder.clear(); columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family.getKey())); - for (Cell cell: family.getValue()) { + for (Cell cell : family.getValue()) { valueBuilder.clear(); - valueBuilder.setQualifier(UnsafeByteOperations.unsafeWrap( - cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); - valueBuilder.setValue(UnsafeByteOperations.unsafeWrap( - cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + valueBuilder.setQualifier(UnsafeByteOperations.unsafeWrap(cell.getQualifierArray(), + cell.getQualifierOffset(), cell.getQualifierLength())); + valueBuilder.setValue(UnsafeByteOperations.unsafeWrap(cell.getValueArray(), + cell.getValueOffset(), cell.getValueLength())); valueBuilder.setTimestamp(cell.getTimestamp()); if (type == MutationType.DELETE || (type == MutationType.PUT && CellUtil.isDelete(cell))) { KeyValue.Type keyValueType = KeyValue.Type.codeToType(cell.getTypeByte()); @@ -1329,34 +1266,27 @@ public final class ProtobufUtil { /** * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data. - * Understanding is that the Cell will be transported other than via protobuf. - * @param type - * @param mutation - * @param builder - * @return a protobuf'd Mutation - * @throws IOException + * Understanding is that the Cell will be transported other than via protobuf. nnn * @return a + * protobuf'd Mutation n */ public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation, - final MutationProto.Builder builder) throws IOException { + final MutationProto.Builder builder) throws IOException { return toMutationNoData(type, mutation, builder, HConstants.NO_NONCE); } /** - * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data. - * Understanding is that the Cell will be transported other than via protobuf. - * @param type - * @param mutation - * @return a protobuf'd Mutation - * @throws IOException + * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data. + * Understanding is that the Cell will be transported other than via protobuf. nn * @return a + * protobuf'd Mutation n */ public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation) - throws IOException { - MutationProto.Builder builder = MutationProto.newBuilder(); + throws IOException { + MutationProto.Builder builder = MutationProto.newBuilder(); return toMutationNoData(type, mutation, builder); } public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation, - final MutationProto.Builder builder, long nonce) throws IOException { + final MutationProto.Builder builder, long nonce) throws IOException { getMutationBuilderAndSetCommonFields(type, mutation, builder); builder.setAssociatedCellCount(mutation.size()); if (mutation instanceof Increment) { @@ -1373,13 +1303,11 @@ public final class ProtobufUtil { /** * Code shared by {@link #toMutation(MutationType, Mutation)} and - * {@link #toMutationNoData(MutationType, Mutation)} - * @param type - * @param mutation - * @return A partly-filled out protobuf'd Mutation. + * {@link #toMutationNoData(MutationType, Mutation)} nn * @return A partly-filled out protobuf'd + * Mutation. */ private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final MutationType type, - final Mutation mutation, MutationProto.Builder builder) { + final Mutation mutation, MutationProto.Builder builder) { builder.setRow(UnsafeByteOperations.unsafeWrap(mutation.getRow())); builder.setMutateType(type); builder.setDurability(toDurability(mutation.getDurability())); @@ -1387,7 +1315,7 @@ public final class ProtobufUtil { Map attributes = mutation.getAttributesMap(); if (!attributes.isEmpty()) { NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { + for (Map.Entry attribute : attributes.entrySet()) { attributeBuilder.setName(attribute.getKey()); attributeBuilder.setValue(UnsafeByteOperations.unsafeWrap(attribute.getValue())); builder.addAttribute(attributeBuilder.build()); @@ -1398,7 +1326,6 @@ public final class ProtobufUtil { /** * Convert a client Result to a protocol buffer Result - * * @param result the client Result to convert * @return the converted protocol buffer Result */ @@ -1407,14 +1334,13 @@ public final class ProtobufUtil { } /** - * Convert a client Result to a protocol buffer Result - * @param result the client Result to convert - * @param encodeTags whether to includeTags in converted protobuf result or not - * When @encodeTags is set to true, it will return all the tags in the response. - * These tags may contain some sensitive data like acl permissions, etc. - * Only the tools like Export, Import which needs to take backup needs to set - * it to true so that cell tags are persisted in backup. - * Refer to HBASE-25246 for more context. + * Convert a client Result to a protocol buffer Result + * @param result the client Result to convert + * @param encodeTags whether to includeTags in converted protobuf result or not When @encodeTags + * is set to true, it will return all the tags in the response. These tags may + * contain some sensitive data like acl permissions, etc. Only the tools like + * Export, Import which needs to take backup needs to set it to true so that + * cell tags are persisted in backup. Refer to HBASE-25246 for more context. * @return the converted protocol buffer Result */ public static ClientProtos.Result toResult(final Result result, boolean encodeTags) { @@ -1440,12 +1366,11 @@ public final class ProtobufUtil { /** * Convert a client Result to a protocol buffer Result - * * @param existence the client existence to send * @return the converted protocol buffer Result */ public static ClientProtos.Result toResult(final boolean existence, boolean stale) { - if (stale){ + if (stale) { return existence ? EMPTY_RESULT_PB_EXISTS_TRUE_STALE : EMPTY_RESULT_PB_EXISTS_FALSE_STALE; } else { return existence ? EMPTY_RESULT_PB_EXISTS_TRUE : EMPTY_RESULT_PB_EXISTS_FALSE; @@ -1453,9 +1378,8 @@ public final class ProtobufUtil { } /** - * Convert a client Result to a protocol buffer Result. - * The pb Result does not include the Cell data. That is for transport otherwise. - * + * Convert a client Result to a protocol buffer Result. The pb Result does not include the Cell + * data. That is for transport otherwise. * @param result the client Result to convert * @return the converted protocol buffer Result */ @@ -1471,7 +1395,6 @@ public final class ProtobufUtil { /** * Convert a protocol buffer Result to a client Result - * * @param proto the protocol buffer Result to convert * @return the converted client Result */ @@ -1481,26 +1404,24 @@ public final class ProtobufUtil { /** * Convert a protocol buffer Result to a client Result - * - * @param proto the protocol buffer Result to convert - * @param decodeTags whether to decode tags into converted client Result - * When @decodeTags is set to true, it will decode all the tags from the - * response. These tags may contain some sensitive data like acl permissions, - * etc. Only the tools like Export, Import which needs to take backup needs to - * set it to true so that cell tags are persisted in backup. - * Refer to HBASE-25246 for more context. + * @param proto the protocol buffer Result to convert + * @param decodeTags whether to decode tags into converted client Result When @decodeTags is set + * to true, it will decode all the tags from the response. These tags may + * contain some sensitive data like acl permissions, etc. Only the tools like + * Export, Import which needs to take backup needs to set it to true so that + * cell tags are persisted in backup. Refer to HBASE-25246 for more context. * @return the converted client Result */ public static Result toResult(final ClientProtos.Result proto, boolean decodeTags) { if (proto.hasExists()) { if (proto.getStale()) { - return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE :EMPTY_RESULT_EXISTS_FALSE_STALE; + return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE : EMPTY_RESULT_EXISTS_FALSE_STALE; } return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE : EMPTY_RESULT_EXISTS_FALSE; } List values = proto.getCellList(); - if (values.isEmpty()){ + if (values.isEmpty()) { return proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT; } @@ -1514,23 +1435,23 @@ public final class ProtobufUtil { /** * Convert a protocol buffer Result to a client Result - * - * @param proto the protocol buffer Result to convert + * @param proto the protocol buffer Result to convert * @param scanner Optional cell scanner. - * @return the converted client Result - * @throws IOException + * @return the converted client Result n */ public static Result toResult(final ClientProtos.Result proto, final CellScanner scanner) - throws IOException { + throws IOException { List values = proto.getCellList(); if (proto.hasExists()) { - if ((values != null && !values.isEmpty()) || - (proto.hasAssociatedCellCount() && proto.getAssociatedCellCount() > 0)) { + if ( + (values != null && !values.isEmpty()) + || (proto.hasAssociatedCellCount() && proto.getAssociatedCellCount() > 0) + ) { throw new IllegalArgumentException("bad proto: exists with cells is no allowed " + proto); } if (proto.getStale()) { - return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE :EMPTY_RESULT_EXISTS_FALSE_STALE; + return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE : EMPTY_RESULT_EXISTS_FALSE_STALE; } return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE : EMPTY_RESULT_EXISTS_FALSE; } @@ -1546,23 +1467,21 @@ public final class ProtobufUtil { } } - if (!values.isEmpty()){ + if (!values.isEmpty()) { if (cells == null) cells = new ArrayList<>(values.size()); ExtendedCellBuilder builder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - for (CellProtos.Cell c: values) { + for (CellProtos.Cell c : values) { cells.add(toCell(builder, c, false)); } } return (cells == null || cells.isEmpty()) - ? (proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT) - : Result.create(cells, null, proto.getStale()); + ? (proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT) + : Result.create(cells, null, proto.getStale()); } - /** * Convert a ByteArrayComparable to a protocol buffer Comparator - * * @param comparator the ByteArrayComparable to convert * @return the converted protocol buffer Comparator */ @@ -1575,23 +1494,22 @@ public final class ProtobufUtil { /** * Convert a protocol buffer Comparator to a ByteArrayComparable - * * @param proto the protocol buffer Comparator to convert * @return the converted ByteArrayComparable */ @SuppressWarnings("unchecked") public static ByteArrayComparable toComparator(ComparatorProtos.Comparator proto) - throws IOException { + throws IOException { String type = proto.getName(); String funcName = "parseFrom"; - byte [] value = proto.getSerializedComparator().toByteArray(); + byte[] value = proto.getSerializedComparator().toByteArray(); try { Class c = Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); Method parseFrom = c.getMethod(funcName, byte[].class); if (parseFrom == null) { throw new IOException("Unable to locate function: " + funcName + " in type: " + type); } - return (ByteArrayComparable)parseFrom.invoke(null, value); + return (ByteArrayComparable) parseFrom.invoke(null, value); } catch (Exception e) { throw new IOException(e); } @@ -1599,14 +1517,13 @@ public final class ProtobufUtil { /** * Convert a protocol buffer Filter to a client Filter - * * @param proto the protocol buffer Filter to convert * @return the converted Filter */ @SuppressWarnings("unchecked") public static Filter toFilter(FilterProtos.Filter proto) throws IOException { String type = proto.getName(); - final byte [] value = proto.getSerializedFilter().toByteArray(); + final byte[] value = proto.getSerializedFilter().toByteArray(); String funcName = "parseFrom"; try { Class c = Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); @@ -1614,7 +1531,7 @@ public final class ProtobufUtil { if (parseFrom == null) { throw new IOException("Unable to locate function: " + funcName + " in type: " + type); } - return (Filter)parseFrom.invoke(c, value); + return (Filter) parseFrom.invoke(c, value); } catch (Exception e) { // Either we couldn't instantiate the method object, or "parseFrom" failed. // In either case, let's not retry. @@ -1624,7 +1541,6 @@ public final class ProtobufUtil { /** * Convert a client Filter to a protocol buffer Filter - * * @param filter the Filter to convert * @return the converted protocol buffer Filter */ @@ -1636,54 +1552,46 @@ public final class ProtobufUtil { } /** - * Convert a delete KeyValue type to protocol buffer DeleteType. - * - * @param type - * @return protocol buffer DeleteType - * @throws IOException + * Convert a delete KeyValue type to protocol buffer DeleteType. n * @return protocol buffer + * DeleteType n */ - public static DeleteType toDeleteType( - KeyValue.Type type) throws IOException { + public static DeleteType toDeleteType(KeyValue.Type type) throws IOException { switch (type) { - case Delete: - return DeleteType.DELETE_ONE_VERSION; - case DeleteColumn: - return DeleteType.DELETE_MULTIPLE_VERSIONS; - case DeleteFamily: - return DeleteType.DELETE_FAMILY; - case DeleteFamilyVersion: - return DeleteType.DELETE_FAMILY_VERSION; - default: + case Delete: + return DeleteType.DELETE_ONE_VERSION; + case DeleteColumn: + return DeleteType.DELETE_MULTIPLE_VERSIONS; + case DeleteFamily: + return DeleteType.DELETE_FAMILY; + case DeleteFamilyVersion: + return DeleteType.DELETE_FAMILY_VERSION; + default: throw new IOException("Unknown delete type: " + type); } } /** * Convert a protocol buffer DeleteType to delete KeyValue type. - * * @param type The DeleteType - * @return The type. - * @throws IOException + * @return The type. n */ - public static KeyValue.Type fromDeleteType( - DeleteType type) throws IOException { + public static KeyValue.Type fromDeleteType(DeleteType type) throws IOException { switch (type) { - case DELETE_ONE_VERSION: - return KeyValue.Type.Delete; - case DELETE_MULTIPLE_VERSIONS: - return KeyValue.Type.DeleteColumn; - case DELETE_FAMILY: - return KeyValue.Type.DeleteFamily; - case DELETE_FAMILY_VERSION: - return KeyValue.Type.DeleteFamilyVersion; - default: - throw new IOException("Unknown delete type: " + type); + case DELETE_ONE_VERSION: + return KeyValue.Type.Delete; + case DELETE_MULTIPLE_VERSIONS: + return KeyValue.Type.DeleteColumn; + case DELETE_FAMILY: + return KeyValue.Type.DeleteFamily; + case DELETE_FAMILY_VERSION: + return KeyValue.Type.DeleteFamilyVersion; + default: + throw new IOException("Unknown delete type: " + type); } } /** * Convert a stringified protocol buffer exception Parameter to a Java Exception - * * @param parameter the protocol buffer Parameter to convert * @return the converted Exception * @throws IOException if failed to deserialize the parameter @@ -1695,7 +1603,7 @@ public final class ProtobufUtil { String type = parameter.getName(); try { Class c = - (Class)Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); + (Class) Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); Constructor cn = null; try { cn = c.getDeclaredConstructor(String.class); @@ -1710,33 +1618,34 @@ public final class ProtobufUtil { } } -// Start helpers for Client + // Start helpers for Client @SuppressWarnings("unchecked") public static T newServiceStub(Class service, RpcChannel channel) - throws Exception { - return (T)Methods.call(service, null, "newStub", - new Class[]{ RpcChannel.class }, new Object[]{ channel }); + throws Exception { + return (T) Methods.call(service, null, "newStub", new Class[] { RpcChannel.class }, + new Object[] { channel }); } -// End helpers for Client -// Start helpers for Admin + // End helpers for Client + // Start helpers for Admin /** - * A helper to retrieve region info given a region name or an - * encoded region name using admin protocol. - * + * A helper to retrieve region info given a region name or an encoded region name using admin + * protocol. * @return the retrieved region info */ public static org.apache.hadoop.hbase.client.RegionInfo getRegionInfo( - final RpcController controller, final AdminService.BlockingInterface admin, - final byte[] regionName) throws IOException { + final RpcController controller, final AdminService.BlockingInterface admin, + final byte[] regionName) throws IOException { try { GetRegionInfoRequest request = - org.apache.hadoop.hbase.client.RegionInfo.isEncodedRegionName(regionName)? - GetRegionInfoRequest.newBuilder().setRegion(RequestConverter. - buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, regionName)).build(): - RequestConverter.buildGetRegionInfoRequest(regionName); + org.apache.hadoop.hbase.client.RegionInfo.isEncodedRegionName(regionName) + ? GetRegionInfoRequest.newBuilder() + .setRegion(RequestConverter + .buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, regionName)) + .build() + : RequestConverter.buildGetRegionInfoRequest(regionName); GetRegionInfoResponse response = admin.getRegionInfo(controller, request); return toRegionInfo(response.getRegionInfo()); } catch (ServiceException se) { @@ -1744,10 +1653,10 @@ public final class ProtobufUtil { } } - public static List getRegionLoadInfo( - GetRegionLoadResponse regionLoadResponse) { + public static List + getRegionLoadInfo(GetRegionLoadResponse regionLoadResponse) { List regionLoadList = - new ArrayList<>(regionLoadResponse.getRegionLoadsCount()); + new ArrayList<>(regionLoadResponse.getRegionLoadsCount()); for (RegionLoad regionLoad : regionLoadResponse.getRegionLoadsList()) { regionLoadList.add(new org.apache.hadoop.hbase.RegionLoad(regionLoad)); } @@ -1755,16 +1664,11 @@ public final class ProtobufUtil { } /** - * A helper to close a region given a region name - * using admin protocol. - * - * @param admin - * @param regionName - * @throws IOException + * A helper to close a region given a region name using admin protocol. nnn */ public static void closeRegion(final RpcController controller, - final AdminService.BlockingInterface admin, final ServerName server, final byte[] regionName) - throws IOException { + final AdminService.BlockingInterface admin, final ServerName server, final byte[] regionName) + throws IOException { CloseRegionRequest closeRegionRequest = ProtobufUtil.buildCloseRegionRequest(server, regionName); try { @@ -1775,19 +1679,15 @@ public final class ProtobufUtil { } /** - * A helper to warmup a region given a region name - * using admin protocol - * - * @param admin - * @param regionInfo - * + * A helper to warmup a region given a region name using admin protocol nn * */ public static void warmupRegion(final RpcController controller, - final AdminService.BlockingInterface admin, final org.apache.hadoop.hbase.client.RegionInfo regionInfo) throws IOException { + final AdminService.BlockingInterface admin, + final org.apache.hadoop.hbase.client.RegionInfo regionInfo) throws IOException { try { WarmupRegionRequest warmupRegionRequest = - RequestConverter.buildWarmupRegionRequest(regionInfo); + RequestConverter.buildWarmupRegionRequest(regionInfo); admin.warmupRegion(controller, warmupRegionRequest); } catch (ServiceException e) { @@ -1796,16 +1696,12 @@ public final class ProtobufUtil { } /** - * A helper to open a region using admin protocol. - * @param admin - * @param region - * @throws IOException + * A helper to open a region using admin protocol. nnn */ public static void openRegion(final RpcController controller, - final AdminService.BlockingInterface admin, ServerName server, final org.apache.hadoop.hbase.client.RegionInfo region) - throws IOException { - OpenRegionRequest request = - RequestConverter.buildOpenRegionRequest(server, region, null); + final AdminService.BlockingInterface admin, ServerName server, + final org.apache.hadoop.hbase.client.RegionInfo region) throws IOException { + OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server, region, null); try { admin.openRegion(controller, request); } catch (ServiceException se) { @@ -1814,26 +1710,20 @@ public final class ProtobufUtil { } /** - * A helper to get the all the online regions on a region - * server using admin protocol. - * - * @param admin - * @return a list of online region info - * @throws IOException + * A helper to get the all the online regions on a region server using admin protocol. n * @return + * a list of online region info n */ - public static List getOnlineRegions(final AdminService.BlockingInterface admin) - throws IOException { + public static List + getOnlineRegions(final AdminService.BlockingInterface admin) throws IOException { return getOnlineRegions(null, admin); } /** - * A helper to get the all the online regions on a region - * server using admin protocol. + * A helper to get the all the online regions on a region server using admin protocol. * @return a list of online region info */ - public static List getOnlineRegions(final RpcController controller, - final AdminService.BlockingInterface admin) - throws IOException { + public static List getOnlineRegions( + final RpcController controller, final AdminService.BlockingInterface admin) throws IOException { GetOnlineRegionRequest request = RequestConverter.buildGetOnlineRegionRequest(); GetOnlineRegionResponse response = null; try { @@ -1846,14 +1736,15 @@ public final class ProtobufUtil { /** * Get the list of region info from a GetOnlineRegionResponse - * * @param proto the GetOnlineRegionResponse * @return the list of region info or empty if proto is null */ - public static List getRegionInfos(final GetOnlineRegionResponse proto) { + public static List + getRegionInfos(final GetOnlineRegionResponse proto) { if (proto == null) return Collections.EMPTY_LIST; - List regionInfos = new ArrayList<>(proto.getRegionInfoList().size()); - for (RegionInfo regionInfo: proto.getRegionInfoList()) { + List regionInfos = + new ArrayList<>(proto.getRegionInfoList().size()); + for (RegionInfo regionInfo : proto.getRegionInfoList()) { regionInfos.add(toRegionInfo(regionInfo)); } return regionInfos; @@ -1864,8 +1755,7 @@ public final class ProtobufUtil { * @return the server name */ public static ServerInfo getServerInfo(final RpcController controller, - final AdminService.BlockingInterface admin) - throws IOException { + final AdminService.BlockingInterface admin) throws IOException { GetServerInfoRequest request = RequestConverter.buildGetServerInfoRequest(); try { GetServerInfoResponse response = admin.getServerInfo(controller, request); @@ -1876,28 +1766,22 @@ public final class ProtobufUtil { } /** - * A helper to get the list of files of a column family - * on a given region using admin protocol. - * + * A helper to get the list of files of a column family on a given region using admin protocol. * @return the list of store files */ public static List getStoreFiles(final AdminService.BlockingInterface admin, - final byte[] regionName, final byte[] family) - throws IOException { + final byte[] regionName, final byte[] family) throws IOException { return getStoreFiles(null, admin, regionName, family); } /** - * A helper to get the list of files of a column family - * on a given region using admin protocol. - * + * A helper to get the list of files of a column family on a given region using admin protocol. * @return the list of store files */ public static List getStoreFiles(final RpcController controller, - final AdminService.BlockingInterface admin, final byte[] regionName, final byte[] family) - throws IOException { - GetStoreFileRequest request = - ProtobufUtil.buildGetStoreFileRequest(regionName, family); + final AdminService.BlockingInterface admin, final byte[] regionName, final byte[] family) + throws IOException { + GetStoreFileRequest request = ProtobufUtil.buildGetStoreFileRequest(regionName, family); try { GetStoreFileResponse response = admin.getStoreFile(controller, request); return response.getStoreFileList(); @@ -1906,7 +1790,7 @@ public final class ProtobufUtil { } } -// End helpers for Admin + // End helpers for Admin /* * Get the total (read + write) requests from a RegionLoad pb @@ -1921,11 +1805,10 @@ public final class ProtobufUtil { return rl.getReadRequestsCount() + rl.getWriteRequestsCount(); } - /** * @param m Message to get delimited pb serialization of (with pb magic prefix) */ - public static byte [] toDelimitedByteArray(final Message m) throws IOException { + public static byte[] toDelimitedByteArray(final Message m) throws IOException { // Allocate arbitrary big size so we avoid resizing. ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); baos.write(PB_MAGIC); @@ -1935,13 +1818,12 @@ public final class ProtobufUtil { /** * Find the HRegion encoded name based on a region specifier - * * @param regionSpecifier the region specifier * @return the corresponding region's encoded name * @throws DoNotRetryIOException if the specifier type is unsupported */ - public static String getRegionEncodedName( - final RegionSpecifier regionSpecifier) throws DoNotRetryIOException { + public static String getRegionEncodedName(final RegionSpecifier regionSpecifier) + throws DoNotRetryIOException { ByteString value = regionSpecifier.getValue(); RegionSpecifierType type = regionSpecifier.getType(); switch (type) { @@ -1950,8 +1832,7 @@ public final class ProtobufUtil { case ENCODED_REGION_NAME: return value.toStringUtf8(); default: - throw new DoNotRetryIOException( - "Unsupported region specifier type: " + type); + throw new DoNotRetryIOException("Unsupported region specifier type: " + type); } } @@ -1978,15 +1859,15 @@ public final class ProtobufUtil { Map metrics = scanMetrics.getMetricsMap(reset); for (Entry e : metrics.entrySet()) { HBaseProtos.NameInt64Pair nameInt64Pair = - HBaseProtos.NameInt64Pair.newBuilder().setName(e.getKey()).setValue(e.getValue()).build(); + HBaseProtos.NameInt64Pair.newBuilder().setName(e.getKey()).setValue(e.getValue()).build(); builder.addMetrics(nameInt64Pair); } return builder.build(); } /** - * Unwraps an exception from a protobuf service into the underlying (expected) IOException. - * This method will always throw an exception. + * Unwraps an exception from a protobuf service into the underlying (expected) IOException. This + * method will always throw an exception. * @param se the {@code ServiceException} instance to convert into an {@code IOException} */ public static void toIOException(ServiceException se) throws IOException { @@ -1996,7 +1877,7 @@ public final class ProtobufUtil { Throwable cause = se.getCause(); if (cause != null && cause instanceof IOException) { - throw (IOException)cause; + throw (IOException) cause; } throw new IOException(se); } @@ -2047,14 +1928,11 @@ public final class ProtobufUtil { } public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell, - boolean decodeTags) { - ExtendedCellBuilder builder = cellBuilder.clear() - .setRow(cell.getRow().toByteArray()) - .setFamily(cell.getFamily().toByteArray()) - .setQualifier(cell.getQualifier().toByteArray()) - .setTimestamp(cell.getTimestamp()) - .setType((byte) cell.getCellType().getNumber()) - .setValue(cell.getValue().toByteArray()); + boolean decodeTags) { + ExtendedCellBuilder builder = cellBuilder.clear().setRow(cell.getRow().toByteArray()) + .setFamily(cell.getFamily().toByteArray()).setQualifier(cell.getQualifier().toByteArray()) + .setTimestamp(cell.getTimestamp()).setType((byte) cell.getCellType().getNumber()) + .setValue(cell.getValue().toByteArray()); if (decodeTags && cell.hasTags()) { builder.setTags(cell.getTags().toByteArray()); } @@ -2063,12 +1941,10 @@ public final class ProtobufUtil { public static HBaseProtos.NamespaceDescriptor toProtoNamespaceDescriptor(NamespaceDescriptor ns) { HBaseProtos.NamespaceDescriptor.Builder b = - HBaseProtos.NamespaceDescriptor.newBuilder() - .setName(ByteString.copyFromUtf8(ns.getName())); - for(Map.Entry entry: ns.getConfiguration().entrySet()) { - b.addConfiguration(HBaseProtos.NameStringPair.newBuilder() - .setName(entry.getKey()) - .setValue(entry.getValue())); + HBaseProtos.NamespaceDescriptor.newBuilder().setName(ByteString.copyFromUtf8(ns.getName())); + for (Map.Entry entry : ns.getConfiguration().entrySet()) { + b.addConfiguration( + HBaseProtos.NameStringPair.newBuilder().setName(entry.getKey()).setValue(entry.getValue())); } return b.build(); } @@ -2082,25 +1958,25 @@ public final class ProtobufUtil { } public static CompactionDescriptor toCompactionDescriptor( - org.apache.hadoop.hbase.client.RegionInfo info, byte[] family, - List inputPaths, List outputPaths, Path storeDir) { + org.apache.hadoop.hbase.client.RegionInfo info, byte[] family, List inputPaths, + List outputPaths, Path storeDir) { return toCompactionDescriptor(info, null, family, inputPaths, outputPaths, storeDir); } public static CompactionDescriptor toCompactionDescriptor( - org.apache.hadoop.hbase.client.RegionInfo info, byte[] regionName, - byte[] family, List inputPaths, List outputPaths, Path storeDir) { + org.apache.hadoop.hbase.client.RegionInfo info, byte[] regionName, byte[] family, + List inputPaths, List outputPaths, Path storeDir) { // compaction descriptor contains relative paths. // input / output paths are relative to the store dir // store dir is relative to region dir CompactionDescriptor.Builder builder = CompactionDescriptor.newBuilder() - .setTableName(UnsafeByteOperations.unsafeWrap(info.getTable().toBytes())) - .setEncodedRegionName(UnsafeByteOperations.unsafeWrap( - regionName == null ? info.getEncodedNameAsBytes() : regionName)) - .setFamilyName(UnsafeByteOperations.unsafeWrap(family)) - .setStoreHomeDir(storeDir.getName()); //make relative + .setTableName(UnsafeByteOperations.unsafeWrap(info.getTable().toBytes())) + .setEncodedRegionName(UnsafeByteOperations + .unsafeWrap(regionName == null ? info.getEncodedNameAsBytes() : regionName)) + .setFamilyName(UnsafeByteOperations.unsafeWrap(family)).setStoreHomeDir(storeDir.getName()); // make + // relative for (Path inputPath : inputPaths) { - builder.addCompactionInput(inputPath.getName()); //relative path + builder.addCompactionInput(inputPath.getName()); // relative path } for (Path outputPath : outputPaths) { builder.addCompactionOutput(outputPath.getName()); @@ -2109,20 +1985,20 @@ public final class ProtobufUtil { return builder.build(); } - public static FlushDescriptor toFlushDescriptor(FlushAction action, org.apache.hadoop.hbase.client.RegionInfo hri, - long flushSeqId, Map> committedFiles) { - FlushDescriptor.Builder desc = FlushDescriptor.newBuilder() - .setAction(action) - .setEncodedRegionName(UnsafeByteOperations.unsafeWrap(hri.getEncodedNameAsBytes())) - .setRegionName(UnsafeByteOperations.unsafeWrap(hri.getRegionName())) - .setFlushSequenceNumber(flushSeqId) - .setTableName(UnsafeByteOperations.unsafeWrap(hri.getTable().getName())); + public static FlushDescriptor toFlushDescriptor(FlushAction action, + org.apache.hadoop.hbase.client.RegionInfo hri, long flushSeqId, + Map> committedFiles) { + FlushDescriptor.Builder desc = FlushDescriptor.newBuilder().setAction(action) + .setEncodedRegionName(UnsafeByteOperations.unsafeWrap(hri.getEncodedNameAsBytes())) + .setRegionName(UnsafeByteOperations.unsafeWrap(hri.getRegionName())) + .setFlushSequenceNumber(flushSeqId) + .setTableName(UnsafeByteOperations.unsafeWrap(hri.getTable().getName())); for (Map.Entry> entry : committedFiles.entrySet()) { WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder builder = - WALProtos.FlushDescriptor.StoreFlushDescriptor.newBuilder() + WALProtos.FlushDescriptor.StoreFlushDescriptor.newBuilder() .setFamilyName(UnsafeByteOperations.unsafeWrap(entry.getKey())) - .setStoreHomeDir(Bytes.toString(entry.getKey())); //relative to region + .setStoreHomeDir(Bytes.toString(entry.getKey())); // relative to region if (entry.getValue() != null) { for (Path path : entry.getValue()) { builder.addFlushOutput(path.getName()); @@ -2133,41 +2009,31 @@ public final class ProtobufUtil { return desc.build(); } - public static RegionEventDescriptor toRegionEventDescriptor( - EventType eventType, org.apache.hadoop.hbase.client.RegionInfo hri, long seqId, ServerName server, - Map> storeFiles) { + public static RegionEventDescriptor toRegionEventDescriptor(EventType eventType, + org.apache.hadoop.hbase.client.RegionInfo hri, long seqId, ServerName server, + Map> storeFiles) { final byte[] tableNameAsBytes = hri.getTable().getName(); final byte[] encodedNameAsBytes = hri.getEncodedNameAsBytes(); final byte[] regionNameAsBytes = hri.getRegionName(); - return toRegionEventDescriptor(eventType, - tableNameAsBytes, - encodedNameAsBytes, - regionNameAsBytes, - seqId, + return toRegionEventDescriptor(eventType, tableNameAsBytes, encodedNameAsBytes, + regionNameAsBytes, seqId, - server, - storeFiles); + server, storeFiles); } public static RegionEventDescriptor toRegionEventDescriptor(EventType eventType, - byte[] tableNameAsBytes, - byte[] encodedNameAsBytes, - byte[] regionNameAsBytes, - long seqId, + byte[] tableNameAsBytes, byte[] encodedNameAsBytes, byte[] regionNameAsBytes, long seqId, - ServerName server, - Map> storeFiles) { - RegionEventDescriptor.Builder desc = RegionEventDescriptor.newBuilder() - .setEventType(eventType) - .setTableName(UnsafeByteOperations.unsafeWrap(tableNameAsBytes)) - .setEncodedRegionName(UnsafeByteOperations.unsafeWrap(encodedNameAsBytes)) - .setRegionName(UnsafeByteOperations.unsafeWrap(regionNameAsBytes)) - .setLogSequenceNumber(seqId) - .setServer(toServerName(server)); + ServerName server, Map> storeFiles) { + RegionEventDescriptor.Builder desc = RegionEventDescriptor.newBuilder().setEventType(eventType) + .setTableName(UnsafeByteOperations.unsafeWrap(tableNameAsBytes)) + .setEncodedRegionName(UnsafeByteOperations.unsafeWrap(encodedNameAsBytes)) + .setRegionName(UnsafeByteOperations.unsafeWrap(regionNameAsBytes)).setLogSequenceNumber(seqId) + .setServer(toServerName(server)); for (Entry> entry : storeFiles.entrySet()) { - StoreDescriptor.Builder builder = StoreDescriptor.newBuilder() - .setFamilyName(UnsafeByteOperations.unsafeWrap(entry.getKey())) + StoreDescriptor.Builder builder = + StoreDescriptor.newBuilder().setFamilyName(UnsafeByteOperations.unsafeWrap(entry.getKey())) .setStoreHomeDir(Bytes.toString(entry.getKey())); for (Path path : entry.getValue()) { builder.addStoreFile(path.getName()); @@ -2179,55 +2045,53 @@ public final class ProtobufUtil { } /** - * Return short version of Message toString'd, shorter than TextFormat#shortDebugString. - * Tries to NOT print out data both because it can be big but also so we do not have data in our - * logs. Use judiciously. - * @param m - * @return toString of passed m + * Return short version of Message toString'd, shorter than TextFormat#shortDebugString. Tries to + * NOT print out data both because it can be big but also so we do not have data in our logs. Use + * judiciously. n * @return toString of passed m */ public static String getShortTextFormat(Message m) { if (m == null) return "null"; if (m instanceof ScanRequest) { - // This should be small and safe to output. No data. + // This should be small and safe to output. No data. return TextFormat.shortDebugString(m); } else if (m instanceof RegionServerReportRequest) { // Print a short message only, just the servername and the requests, not the full load. - RegionServerReportRequest r = (RegionServerReportRequest)m; - return "server " + TextFormat.shortDebugString(r.getServer()) + - " load { numberOfRequests: " + r.getLoad().getNumberOfRequests() + " }"; + RegionServerReportRequest r = (RegionServerReportRequest) m; + return "server " + TextFormat.shortDebugString(r.getServer()) + " load { numberOfRequests: " + + r.getLoad().getNumberOfRequests() + " }"; } else if (m instanceof RegionServerStartupRequest) { // Should be small enough. return TextFormat.shortDebugString(m); } else if (m instanceof MutationProto) { - return toShortString((MutationProto)m); + return toShortString((MutationProto) m); } else if (m instanceof GetRequest) { GetRequest r = (GetRequest) m; - return "region= " + getStringForByteString(r.getRegion().getValue()) + - ", row=" + getStringForByteString(r.getGet().getRow()); + return "region= " + getStringForByteString(r.getRegion().getValue()) + ", row=" + + getStringForByteString(r.getGet().getRow()); } else if (m instanceof ClientProtos.MultiRequest) { ClientProtos.MultiRequest r = (ClientProtos.MultiRequest) m; // Get the number of Actions - int actionsCount = r.getRegionActionList() - .stream() - .mapToInt(ClientProtos.RegionAction::getActionCount) - .sum(); + int actionsCount = + r.getRegionActionList().stream().mapToInt(ClientProtos.RegionAction::getActionCount).sum(); // Get first set of Actions. ClientProtos.RegionAction actions = r.getRegionActionList().get(0); - String row = actions.getActionCount() <= 0? "": - getStringForByteString(actions.getAction(0).hasGet()? - actions.getAction(0).getGet().getRow(): - actions.getAction(0).getMutation().getRow()); - return "region= " + getStringForByteString(actions.getRegion().getValue()) + - ", for " + actionsCount + " action(s) and 1st row key=" + row; + String row = actions.getActionCount() <= 0 + ? "" + : getStringForByteString(actions.getAction(0).hasGet() + ? actions.getAction(0).getGet().getRow() + : actions.getAction(0).getMutation().getRow()); + return "region= " + getStringForByteString(actions.getRegion().getValue()) + ", for " + + actionsCount + " action(s) and 1st row key=" + row; } else if (m instanceof ClientProtos.MutateRequest) { ClientProtos.MutateRequest r = (ClientProtos.MutateRequest) m; - return "region= " + getStringForByteString(r.getRegion().getValue()) + - ", row=" + getStringForByteString(r.getMutation().getRow()); + return "region= " + getStringForByteString(r.getRegion().getValue()) + ", row=" + + getStringForByteString(r.getMutation().getRow()); } else if (m instanceof ClientProtos.CoprocessorServiceRequest) { ClientProtos.CoprocessorServiceRequest r = (ClientProtos.CoprocessorServiceRequest) m; - return "coprocessorService= " + r.getCall().getServiceName() + ":" + r.getCall().getMethodName(); + return "coprocessorService= " + r.getCall().getServiceName() + ":" + + r.getCall().getMethodName(); } return "TODO: " + m.getClass().toString(); } @@ -2238,7 +2102,6 @@ public final class ProtobufUtil { /** * Return SlowLogParams to maintain recent online slowlog responses - * * @param message Message object {@link Message} * @return SlowLogParams with regionName(for filter queries) and params */ @@ -2258,15 +2121,13 @@ public final class ProtobufUtil { } else if (message instanceof GetRequest) { GetRequest getRequest = (GetRequest) message; String regionName = getStringForByteString(getRequest.getRegion().getValue()); - String params = "region= " + regionName + ", row= " - + getStringForByteString(getRequest.getGet().getRow()); + String params = + "region= " + regionName + ", row= " + getStringForByteString(getRequest.getGet().getRow()); return new SlowLogParams(regionName, params); } else if (message instanceof MultiRequest) { MultiRequest multiRequest = (MultiRequest) message; - int actionsCount = multiRequest.getRegionActionList() - .stream() - .mapToInt(ClientProtos.RegionAction::getActionCount) - .sum(); + int actionsCount = multiRequest.getRegionActionList().stream() + .mapToInt(ClientProtos.RegionAction::getActionCount).sum(); RegionAction actions = multiRequest.getRegionActionList().get(0); String regionName = getStringForByteString(actions.getRegion().getValue()); String params = "region= " + regionName + ", for " + actionsCount + " action(s)"; @@ -2278,8 +2139,7 @@ public final class ProtobufUtil { return new SlowLogParams(regionName, params); } else if (message instanceof CoprocessorServiceRequest) { CoprocessorServiceRequest coprocessorServiceRequest = (CoprocessorServiceRequest) message; - String params = "coprocessorService= " - + coprocessorServiceRequest.getCall().getServiceName() + String params = "coprocessorService= " + coprocessorServiceRequest.getCall().getServiceName() + ":" + coprocessorServiceRequest.getCall().getMethodName(); return new SlowLogParams(params); } @@ -2293,19 +2153,19 @@ public final class ProtobufUtil { * @return Short String of mutation proto */ static String toShortString(final MutationProto proto) { - return "row=" + Bytes.toString(proto.getRow().toByteArray()) + - ", type=" + proto.getMutateType().toString(); + return "row=" + Bytes.toString(proto.getRow().toByteArray()) + ", type=" + + proto.getMutateType().toString(); } public static TableName toTableName(HBaseProtos.TableName tableNamePB) { return TableName.valueOf(tableNamePB.getNamespace().asReadOnlyByteBuffer(), - tableNamePB.getQualifier().asReadOnlyByteBuffer()); + tableNamePB.getQualifier().asReadOnlyByteBuffer()); } public static HBaseProtos.TableName toProtoTableName(TableName tableName) { return HBaseProtos.TableName.newBuilder() - .setNamespace(UnsafeByteOperations.unsafeWrap(tableName.getNamespace())) - .setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier())).build(); + .setNamespace(UnsafeByteOperations.unsafeWrap(tableName.getNamespace())) + .setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier())).build(); } public static List toProtoTableNameList(List tableNameList) { @@ -2334,10 +2194,8 @@ public final class ProtobufUtil { } /** - * Convert a protocol buffer CellVisibility to a client CellVisibility - * - * @param proto - * @return the converted client CellVisibility + * Convert a protocol buffer CellVisibility to a client CellVisibility n * @return the converted + * client CellVisibility */ public static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) { if (proto == null) return null; @@ -2345,11 +2203,8 @@ public final class ProtobufUtil { } /** - * Convert a protocol buffer CellVisibility bytes to a client CellVisibility - * - * @param protoBytes - * @return the converted client CellVisibility - * @throws DeserializationException + * Convert a protocol buffer CellVisibility bytes to a client CellVisibility n * @return the + * converted client CellVisibility n */ public static CellVisibility toCellVisibility(byte[] protoBytes) throws DeserializationException { if (protoBytes == null) return null; @@ -2365,10 +2220,8 @@ public final class ProtobufUtil { } /** - * Create a protocol buffer CellVisibility based on a client CellVisibility. - * - * @param cellVisibility - * @return a protocol buffer CellVisibility + * Create a protocol buffer CellVisibility based on a client CellVisibility. n * @return a + * protocol buffer CellVisibility */ public static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibility) { ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder(); @@ -2377,10 +2230,8 @@ public final class ProtobufUtil { } /** - * Convert a protocol buffer Authorizations to a client Authorizations - * - * @param proto - * @return the converted client Authorizations + * Convert a protocol buffer Authorizations to a client Authorizations n * @return the converted + * client Authorizations */ public static Authorizations toAuthorizations(ClientProtos.Authorizations proto) { if (proto == null) return null; @@ -2388,11 +2239,8 @@ public final class ProtobufUtil { } /** - * Convert a protocol buffer Authorizations bytes to a client Authorizations - * - * @param protoBytes - * @return the converted client Authorizations - * @throws DeserializationException + * Convert a protocol buffer Authorizations bytes to a client Authorizations n * @return the + * converted client Authorizations n */ public static Authorizations toAuthorizations(byte[] protoBytes) throws DeserializationException { if (protoBytes == null) return null; @@ -2408,10 +2256,8 @@ public final class ProtobufUtil { } /** - * Create a protocol buffer Authorizations based on a client Authorizations. - * - * @param authorizations - * @return a protocol buffer Authorizations + * Create a protocol buffer Authorizations based on a client Authorizations. n * @return a + * protocol buffer Authorizations */ public static ClientProtos.Authorizations toAuthorizations(Authorizations authorizations) { ClientProtos.Authorizations.Builder builder = ClientProtos.Authorizations.newBuilder(); @@ -2422,48 +2268,56 @@ public final class ProtobufUtil { } /** - * Convert a protocol buffer TimeUnit to a client TimeUnit - * - * @param proto - * @return the converted client TimeUnit + * Convert a protocol buffer TimeUnit to a client TimeUnit n * @return the converted client + * TimeUnit */ public static TimeUnit toTimeUnit(final HBaseProtos.TimeUnit proto) { switch (proto) { - case NANOSECONDS: return TimeUnit.NANOSECONDS; - case MICROSECONDS: return TimeUnit.MICROSECONDS; - case MILLISECONDS: return TimeUnit.MILLISECONDS; - case SECONDS: return TimeUnit.SECONDS; - case MINUTES: return TimeUnit.MINUTES; - case HOURS: return TimeUnit.HOURS; - case DAYS: return TimeUnit.DAYS; + case NANOSECONDS: + return TimeUnit.NANOSECONDS; + case MICROSECONDS: + return TimeUnit.MICROSECONDS; + case MILLISECONDS: + return TimeUnit.MILLISECONDS; + case SECONDS: + return TimeUnit.SECONDS; + case MINUTES: + return TimeUnit.MINUTES; + case HOURS: + return TimeUnit.HOURS; + case DAYS: + return TimeUnit.DAYS; } throw new RuntimeException("Invalid TimeUnit " + proto); } /** - * Convert a client TimeUnit to a protocol buffer TimeUnit - * - * @param timeUnit - * @return the converted protocol buffer TimeUnit + * Convert a client TimeUnit to a protocol buffer TimeUnit n * @return the converted protocol + * buffer TimeUnit */ public static HBaseProtos.TimeUnit toProtoTimeUnit(final TimeUnit timeUnit) { switch (timeUnit) { - case NANOSECONDS: return HBaseProtos.TimeUnit.NANOSECONDS; - case MICROSECONDS: return HBaseProtos.TimeUnit.MICROSECONDS; - case MILLISECONDS: return HBaseProtos.TimeUnit.MILLISECONDS; - case SECONDS: return HBaseProtos.TimeUnit.SECONDS; - case MINUTES: return HBaseProtos.TimeUnit.MINUTES; - case HOURS: return HBaseProtos.TimeUnit.HOURS; - case DAYS: return HBaseProtos.TimeUnit.DAYS; + case NANOSECONDS: + return HBaseProtos.TimeUnit.NANOSECONDS; + case MICROSECONDS: + return HBaseProtos.TimeUnit.MICROSECONDS; + case MILLISECONDS: + return HBaseProtos.TimeUnit.MILLISECONDS; + case SECONDS: + return HBaseProtos.TimeUnit.SECONDS; + case MINUTES: + return HBaseProtos.TimeUnit.MINUTES; + case HOURS: + return HBaseProtos.TimeUnit.HOURS; + case DAYS: + return HBaseProtos.TimeUnit.DAYS; } throw new RuntimeException("Invalid TimeUnit " + timeUnit); } /** - * Convert a protocol buffer ThrottleType to a client ThrottleType - * - * @param proto - * @return the converted client ThrottleType + * Convert a protocol buffer ThrottleType to a client ThrottleType n * @return the converted + * client ThrottleType */ public static ThrottleType toThrottleType(final QuotaProtos.ThrottleType proto) { switch (proto) { @@ -2491,10 +2345,8 @@ public final class ProtobufUtil { } /** - * Convert a client ThrottleType to a protocol buffer ThrottleType - * - * @param type - * @return the converted protocol buffer ThrottleType + * Convert a client ThrottleType to a protocol buffer ThrottleType n * @return the converted + * protocol buffer ThrottleType */ public static QuotaProtos.ThrottleType toProtoThrottleType(final ThrottleType type) { switch (type) { @@ -2522,163 +2374,159 @@ public final class ProtobufUtil { } /** - * Convert a protocol buffer QuotaScope to a client QuotaScope - * - * @param proto - * @return the converted client QuotaScope + * Convert a protocol buffer QuotaScope to a client QuotaScope n * @return the converted client + * QuotaScope */ public static QuotaScope toQuotaScope(final QuotaProtos.QuotaScope proto) { switch (proto) { - case CLUSTER: return QuotaScope.CLUSTER; - case MACHINE: return QuotaScope.MACHINE; + case CLUSTER: + return QuotaScope.CLUSTER; + case MACHINE: + return QuotaScope.MACHINE; } throw new RuntimeException("Invalid QuotaScope " + proto); } /** - * Convert a client QuotaScope to a protocol buffer QuotaScope - * - * @param scope - * @return the converted protocol buffer QuotaScope + * Convert a client QuotaScope to a protocol buffer QuotaScope n * @return the converted protocol + * buffer QuotaScope */ public static QuotaProtos.QuotaScope toProtoQuotaScope(final QuotaScope scope) { switch (scope) { - case CLUSTER: return QuotaProtos.QuotaScope.CLUSTER; - case MACHINE: return QuotaProtos.QuotaScope.MACHINE; + case CLUSTER: + return QuotaProtos.QuotaScope.CLUSTER; + case MACHINE: + return QuotaProtos.QuotaScope.MACHINE; } throw new RuntimeException("Invalid QuotaScope " + scope); } /** - * Convert a protocol buffer QuotaType to a client QuotaType - * - * @param proto - * @return the converted client QuotaType + * Convert a protocol buffer QuotaType to a client QuotaType n * @return the converted client + * QuotaType */ public static QuotaType toQuotaScope(final QuotaProtos.QuotaType proto) { switch (proto) { - case THROTTLE: return QuotaType.THROTTLE; - case SPACE: return QuotaType.SPACE; + case THROTTLE: + return QuotaType.THROTTLE; + case SPACE: + return QuotaType.SPACE; } throw new RuntimeException("Invalid QuotaType " + proto); } /** - * Convert a client QuotaType to a protocol buffer QuotaType - * - * @param type - * @return the converted protocol buffer QuotaType + * Convert a client QuotaType to a protocol buffer QuotaType n * @return the converted protocol + * buffer QuotaType */ public static QuotaProtos.QuotaType toProtoQuotaScope(final QuotaType type) { switch (type) { - case THROTTLE: return QuotaProtos.QuotaType.THROTTLE; - case SPACE: return QuotaProtos.QuotaType.SPACE; - default: throw new RuntimeException("Invalid QuotaType " + type); + case THROTTLE: + return QuotaProtos.QuotaType.THROTTLE; + case SPACE: + return QuotaProtos.QuotaType.SPACE; + default: + throw new RuntimeException("Invalid QuotaType " + type); } } /** * Converts a protocol buffer SpaceViolationPolicy to a client SpaceViolationPolicy. - * * @param proto The protocol buffer space violation policy. * @return The corresponding client SpaceViolationPolicy. */ - public static SpaceViolationPolicy toViolationPolicy( - final QuotaProtos.SpaceViolationPolicy proto) { + public static SpaceViolationPolicy + toViolationPolicy(final QuotaProtos.SpaceViolationPolicy proto) { switch (proto) { - case DISABLE: return SpaceViolationPolicy.DISABLE; - case NO_WRITES_COMPACTIONS: return SpaceViolationPolicy.NO_WRITES_COMPACTIONS; - case NO_WRITES: return SpaceViolationPolicy.NO_WRITES; - case NO_INSERTS: return SpaceViolationPolicy.NO_INSERTS; + case DISABLE: + return SpaceViolationPolicy.DISABLE; + case NO_WRITES_COMPACTIONS: + return SpaceViolationPolicy.NO_WRITES_COMPACTIONS; + case NO_WRITES: + return SpaceViolationPolicy.NO_WRITES; + case NO_INSERTS: + return SpaceViolationPolicy.NO_INSERTS; } throw new RuntimeException("Invalid SpaceViolationPolicy " + proto); } /** * Converts a client SpaceViolationPolicy to a protocol buffer SpaceViolationPolicy. - * * @param policy The client SpaceViolationPolicy object. * @return The corresponding protocol buffer SpaceViolationPolicy. */ - public static QuotaProtos.SpaceViolationPolicy toProtoViolationPolicy( - final SpaceViolationPolicy policy) { + public static QuotaProtos.SpaceViolationPolicy + toProtoViolationPolicy(final SpaceViolationPolicy policy) { switch (policy) { - case DISABLE: return QuotaProtos.SpaceViolationPolicy.DISABLE; - case NO_WRITES_COMPACTIONS: return QuotaProtos.SpaceViolationPolicy.NO_WRITES_COMPACTIONS; - case NO_WRITES: return QuotaProtos.SpaceViolationPolicy.NO_WRITES; - case NO_INSERTS: return QuotaProtos.SpaceViolationPolicy.NO_INSERTS; + case DISABLE: + return QuotaProtos.SpaceViolationPolicy.DISABLE; + case NO_WRITES_COMPACTIONS: + return QuotaProtos.SpaceViolationPolicy.NO_WRITES_COMPACTIONS; + case NO_WRITES: + return QuotaProtos.SpaceViolationPolicy.NO_WRITES; + case NO_INSERTS: + return QuotaProtos.SpaceViolationPolicy.NO_INSERTS; } throw new RuntimeException("Invalid SpaceViolationPolicy " + policy); } /** * Build a protocol buffer TimedQuota - * - * @param limit the allowed number of request/data per timeUnit + * @param limit the allowed number of request/data per timeUnit * @param timeUnit the limit time unit - * @param scope the quota scope + * @param scope the quota scope * @return the protocol buffer TimedQuota */ public static QuotaProtos.TimedQuota toTimedQuota(final long limit, final TimeUnit timeUnit, - final QuotaScope scope) { - return QuotaProtos.TimedQuota.newBuilder() - .setSoftLimit(limit) - .setTimeUnit(toProtoTimeUnit(timeUnit)) - .setScope(toProtoQuotaScope(scope)) - .build(); + final QuotaScope scope) { + return QuotaProtos.TimedQuota.newBuilder().setSoftLimit(limit) + .setTimeUnit(toProtoTimeUnit(timeUnit)).setScope(toProtoQuotaScope(scope)).build(); } /** * Builds a protocol buffer SpaceQuota. - * - * @param limit The maximum space usage for the quota in bytes. + * @param limit The maximum space usage for the quota in bytes. * @param violationPolicy The policy to apply when the quota is violated. * @return The protocol buffer SpaceQuota. */ - public static QuotaProtos.SpaceQuota toProtoSpaceQuota( - final long limit, final SpaceViolationPolicy violationPolicy) { - return QuotaProtos.SpaceQuota.newBuilder() - .setSoftLimit(limit) - .setViolationPolicy(toProtoViolationPolicy(violationPolicy)) - .build(); + public static QuotaProtos.SpaceQuota toProtoSpaceQuota(final long limit, + final SpaceViolationPolicy violationPolicy) { + return QuotaProtos.SpaceQuota.newBuilder().setSoftLimit(limit) + .setViolationPolicy(toProtoViolationPolicy(violationPolicy)).build(); } /** - * Generates a marker for the WAL so that we propagate the notion of a bulk region load - * throughout the WAL. - * + * Generates a marker for the WAL so that we propagate the notion of a bulk region load throughout + * the WAL. * @param tableName The tableName into which the bulk load is being imported into. * @param encodedRegionName Encoded region name of the region which is being bulk loaded. * @param storeFiles A set of store files of a column family are bulk loaded. - * @param storeFilesSize Map of store files and their lengths - * @param bulkloadSeqId sequence ID (by a force flush) used to create bulk load hfile - * name + * @param storeFilesSize Map of store files and their lengths + * @param bulkloadSeqId sequence ID (by a force flush) used to create bulk load hfile name * @return The WAL log marker for bulk loads. */ public static WALProtos.BulkLoadDescriptor toBulkLoadDescriptor(TableName tableName, ByteString encodedRegionName, Map> storeFiles, Map storeFilesSize, long bulkloadSeqId) { - return toBulkLoadDescriptor(tableName, encodedRegionName, storeFiles, - storeFilesSize, bulkloadSeqId, null, true); + return toBulkLoadDescriptor(tableName, encodedRegionName, storeFiles, storeFilesSize, + bulkloadSeqId, null, true); } public static WALProtos.BulkLoadDescriptor toBulkLoadDescriptor(TableName tableName, - ByteString encodedRegionName, Map> storeFiles, - Map storeFilesSize, long bulkloadSeqId, - List clusterIds, boolean replicate) { + ByteString encodedRegionName, Map> storeFiles, + Map storeFilesSize, long bulkloadSeqId, List clusterIds, + boolean replicate) { BulkLoadDescriptor.Builder desc = - BulkLoadDescriptor.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName(tableName)) - .setEncodedRegionName(encodedRegionName) - .setBulkloadSeqNum(bulkloadSeqId) - .setReplicate(replicate); - if(clusterIds != null) { + BulkLoadDescriptor.newBuilder().setTableName(ProtobufUtil.toProtoTableName(tableName)) + .setEncodedRegionName(encodedRegionName).setBulkloadSeqNum(bulkloadSeqId) + .setReplicate(replicate); + if (clusterIds != null) { desc.addAllClusterIds(clusterIds); } for (Map.Entry> entry : storeFiles.entrySet()) { - WALProtos.StoreDescriptor.Builder builder = StoreDescriptor.newBuilder() - .setFamilyName(UnsafeByteOperations.unsafeWrap(entry.getKey())) + WALProtos.StoreDescriptor.Builder builder = + StoreDescriptor.newBuilder().setFamilyName(UnsafeByteOperations.unsafeWrap(entry.getKey())) .setStoreHomeDir(Bytes.toString(entry.getKey())); // relative to region for (Path path : entry.getValue()) { String name = path.getName(); @@ -2696,8 +2544,7 @@ public final class ProtobufUtil { * This version of protobuf's mergeDelimitedFrom avoid the hard-coded 64MB limit for decoding * buffers * @param builder current message builder - * @param in Inputsream with delimited protobuf data - * @throws IOException + * @param in Inputsream with delimited protobuf data n */ public static void mergeDelimitedFrom(Message.Builder builder, InputStream in) throws IOException { @@ -2715,15 +2562,14 @@ public final class ProtobufUtil { } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers where the message size is known + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers + * where the message size is known * @param builder current message builder - * @param in InputStream containing protobuf data - * @param size known size of protobuf data - * @throws IOException + * @param in InputStream containing protobuf data + * @param size known size of protobuf data n */ public static void mergeFrom(Message.Builder builder, InputStream in, int size) - throws IOException { + throws IOException { final CodedInputStream codedInput = CodedInputStream.newInstance(in); codedInput.setSizeLimit(size); builder.mergeFrom(codedInput); @@ -2731,14 +2577,12 @@ public final class ProtobufUtil { } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers where the message size is not known + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers + * where the message size is not known * @param builder current message builder - * @param in InputStream containing protobuf data - * @throws IOException + * @param in InputStream containing protobuf data n */ - public static void mergeFrom(Message.Builder builder, InputStream in) - throws IOException { + public static void mergeFrom(Message.Builder builder, InputStream in) throws IOException { final CodedInputStream codedInput = CodedInputStream.newInstance(in); codedInput.setSizeLimit(Integer.MAX_VALUE); builder.mergeFrom(codedInput); @@ -2746,11 +2590,10 @@ public final class ProtobufUtil { } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers when working with ByteStrings + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when + * working with ByteStrings * @param builder current message builder - * @param bs ByteString containing the - * @throws IOException + * @param bs ByteString containing the n */ public static void mergeFrom(Message.Builder builder, ByteString bs) throws IOException { final CodedInputStream codedInput = bs.newCodedInput(); @@ -2760,11 +2603,10 @@ public final class ProtobufUtil { } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers when working with byte arrays + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when + * working with byte arrays * @param builder current message builder - * @param b byte array - * @throws IOException + * @param b byte array n */ public static void mergeFrom(Message.Builder builder, byte[] b) throws IOException { final CodedInputStream codedInput = CodedInputStream.newInstance(b); @@ -2774,16 +2616,13 @@ public final class ProtobufUtil { } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers when working with byte arrays + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when + * working with byte arrays * @param builder current message builder - * @param b byte array - * @param offset - * @param length - * @throws IOException + * @param b byte array nnn */ public static void mergeFrom(Message.Builder builder, byte[] b, int offset, int length) - throws IOException { + throws IOException { final CodedInputStream codedInput = CodedInputStream.newInstance(b, offset, length); codedInput.setSizeLimit(length); builder.mergeFrom(codedInput); @@ -2791,7 +2630,7 @@ public final class ProtobufUtil { } public static void mergeFrom(Message.Builder builder, CodedInputStream codedInput, int length) - throws IOException { + throws IOException { codedInput.resetSizeCounter(); int prevLimit = codedInput.setSizeLimit(length); @@ -2803,37 +2642,32 @@ public final class ProtobufUtil { codedInput.setSizeLimit(prevLimit); } - public static ReplicationLoadSink toReplicationLoadSink( - ClusterStatusProtos.ReplicationLoadSink rls) { + public static ReplicationLoadSink + toReplicationLoadSink(ClusterStatusProtos.ReplicationLoadSink rls) { ReplicationLoadSink.ReplicationLoadSinkBuilder builder = ReplicationLoadSink.newBuilder(); - builder.setAgeOfLastAppliedOp(rls.getAgeOfLastAppliedOp()). - setTimestampsOfLastAppliedOp(rls.getTimeStampsOfLastAppliedOp()). - setTimestampStarted(rls.hasTimestampStarted()? rls.getTimestampStarted(): -1L). - setTotalOpsProcessed(rls.hasTotalOpsProcessed()? rls.getTotalOpsProcessed(): -1L); + builder.setAgeOfLastAppliedOp(rls.getAgeOfLastAppliedOp()) + .setTimestampsOfLastAppliedOp(rls.getTimeStampsOfLastAppliedOp()) + .setTimestampStarted(rls.hasTimestampStarted() ? rls.getTimestampStarted() : -1L) + .setTotalOpsProcessed(rls.hasTotalOpsProcessed() ? rls.getTotalOpsProcessed() : -1L); return builder.build(); } - public static ReplicationLoadSource toReplicationLoadSource( - ClusterStatusProtos.ReplicationLoadSource rls) { + public static ReplicationLoadSource + toReplicationLoadSource(ClusterStatusProtos.ReplicationLoadSource rls) { ReplicationLoadSource.ReplicationLoadSourceBuilder builder = ReplicationLoadSource.newBuilder(); - builder.setPeerID(rls.getPeerID()). - setAgeOfLastShippedOp(rls.getAgeOfLastShippedOp()). - setSizeOfLogQueue(rls.getSizeOfLogQueue()). - setTimestampOfLastShippedOp(rls.getTimeStampOfLastShippedOp()). - setTimeStampOfNextToReplicate(rls.getTimeStampOfNextToReplicate()). - setReplicationLag(rls.getReplicationLag()). - setQueueId(rls.getQueueId()). - setRecovered(rls.getRecovered()). - setRunning(rls.getRunning()). - setEditsSinceRestart(rls.getEditsSinceRestart()). - setEditsRead(rls.getEditsRead()). - setoPsShipped(rls.getOPsShipped()); + builder.setPeerID(rls.getPeerID()).setAgeOfLastShippedOp(rls.getAgeOfLastShippedOp()) + .setSizeOfLogQueue(rls.getSizeOfLogQueue()) + .setTimestampOfLastShippedOp(rls.getTimeStampOfLastShippedOp()) + .setTimeStampOfNextToReplicate(rls.getTimeStampOfNextToReplicate()) + .setReplicationLag(rls.getReplicationLag()).setQueueId(rls.getQueueId()) + .setRecovered(rls.getRecovered()).setRunning(rls.getRunning()) + .setEditsSinceRestart(rls.getEditsSinceRestart()).setEditsRead(rls.getEditsRead()) + .setoPsShipped(rls.getOPsShipped()); return builder.build(); } /** * Get a protocol buffer VersionInfo - * * @return the converted protocol buffer VersionInfo */ public static HBaseProtos.VersionInfo getVersionInfo() { @@ -2859,9 +2693,9 @@ public final class ProtobufUtil { * @return the converted list of SecurityCapability elements */ public static List toSecurityCapabilityList( - List capabilities) { + List capabilities) { List scList = new ArrayList<>(capabilities.size()); - for (MasterProtos.SecurityCapabilitiesResponse.Capability c: capabilities) { + for (MasterProtos.SecurityCapabilitiesResponse.Capability c : capabilities) { try { scList.add(SecurityCapability.valueOf(c.getNumber())); } catch (IllegalArgumentException e) { @@ -2873,9 +2707,9 @@ public final class ProtobufUtil { } public static TimeRange toTimeRange(HBaseProtos.TimeRange timeRange) { - return timeRange == null ? - TimeRange.allTime() : - new TimeRange(timeRange.hasFrom() ? timeRange.getFrom() : 0, + return timeRange == null + ? TimeRange.allTime() + : new TimeRange(timeRange.hasFrom() ? timeRange.getFrom() : 0, timeRange.hasTo() ? timeRange.getTo() : Long.MAX_VALUE); } @@ -2909,11 +2743,12 @@ public final class ProtobufUtil { */ public static ColumnFamilyDescriptor toColumnFamilyDescriptor(final ColumnFamilySchema cfs) { // Use the empty constructor so we preserve the initial values set on construction for things - // like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for + // like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for // unrelated-looking test failures that are hard to trace back to here. - ColumnFamilyDescriptorBuilder builder - = ColumnFamilyDescriptorBuilder.newBuilder(cfs.getName().toByteArray()); - cfs.getAttributesList().forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray())); + ColumnFamilyDescriptorBuilder builder = + ColumnFamilyDescriptorBuilder.newBuilder(cfs.getName().toByteArray()); + cfs.getAttributesList() + .forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray())); cfs.getConfigurationList().forEach(a -> builder.setConfiguration(a.getName(), a.getValue())); return builder.build(); } @@ -2944,23 +2779,19 @@ public final class ProtobufUtil { * @return An {@link TableDescriptor} made from the passed in pb ts. */ public static TableDescriptor toTableDescriptor(final TableSchema ts) { - TableDescriptorBuilder builder - = TableDescriptorBuilder.newBuilder(ProtobufUtil.toTableName(ts.getTableName())); - ts.getColumnFamiliesList() - .stream() - .map(ProtobufUtil::toColumnFamilyDescriptor) + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(ProtobufUtil.toTableName(ts.getTableName())); + ts.getColumnFamiliesList().stream().map(ProtobufUtil::toColumnFamilyDescriptor) .forEach(builder::setColumnFamily); ts.getAttributesList() .forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray())); - ts.getConfigurationList() - .forEach(a -> builder.setValue(a.getName(), a.getValue())); + ts.getConfigurationList().forEach(a -> builder.setValue(a.getName(), a.getValue())); return builder.build(); } /** * Creates {@link CompactionState} from {@link GetRegionInfoResponse.CompactionState} state - * @param state the protobuf CompactionState - * @return CompactionState + * @param state the protobuf CompactionState n */ public static CompactionState createCompactionState(GetRegionInfoResponse.CompactionState state) { return CompactionState.valueOf(state.toString()); @@ -2972,16 +2803,15 @@ public final class ProtobufUtil { /** * Creates {@link CompactionState} from {@link RegionLoad.CompactionState} state - * @param state the protobuf CompactionState - * @return CompactionState + * @param state the protobuf CompactionState n */ - public static CompactionState createCompactionStateForRegionLoad( - RegionLoad.CompactionState state) { + public static CompactionState + createCompactionStateForRegionLoad(RegionLoad.CompactionState state) { return CompactionState.valueOf(state.toString()); } - public static RegionLoad.CompactionState createCompactionStateForRegionLoad( - CompactionState state) { + public static RegionLoad.CompactionState + createCompactionStateForRegionLoad(CompactionState state) { return RegionLoad.CompactionState.valueOf(state.toString()); } @@ -2996,7 +2826,7 @@ public final class ProtobufUtil { * @return the protobuf SnapshotDescription type */ public static SnapshotProtos.SnapshotDescription.Type - createProtosSnapShotDescType(SnapshotType type) { + createProtosSnapShotDescType(SnapshotType type) { return SnapshotProtos.SnapshotDescription.Type.valueOf(type.name()); } @@ -3007,14 +2837,14 @@ public final class ProtobufUtil { * @return the protobuf SnapshotDescription type */ public static SnapshotProtos.SnapshotDescription.Type - createProtosSnapShotDescType(String snapshotDesc) { + createProtosSnapShotDescType(String snapshotDesc) { return SnapshotProtos.SnapshotDescription.Type.valueOf(snapshotDesc.toUpperCase(Locale.ROOT)); } /** * Creates {@link SnapshotType} from the {@link SnapshotProtos.SnapshotDescription.Type} - * @param type the snapshot description type - * @return the protobuf SnapshotDescription type + * @param type the snapshot description type + * @return the protobuf SnapshotDescription type */ public static SnapshotType createSnapshotType(SnapshotProtos.SnapshotDescription.Type type) { return SnapshotType.valueOf(type.toString()); @@ -3026,8 +2856,9 @@ public final class ProtobufUtil { * @return the protobuf SnapshotDescription */ public static SnapshotProtos.SnapshotDescription - createHBaseProtosSnapshotDesc(SnapshotDescription snapshotDesc) { - SnapshotProtos.SnapshotDescription.Builder builder = SnapshotProtos.SnapshotDescription.newBuilder(); + createHBaseProtosSnapshotDesc(SnapshotDescription snapshotDesc) { + SnapshotProtos.SnapshotDescription.Builder builder = + SnapshotProtos.SnapshotDescription.newBuilder(); if (snapshotDesc.getTableName() != null) { builder.setTable(snapshotDesc.getTableNameAsString()); } @@ -3040,8 +2871,10 @@ public final class ProtobufUtil { if (snapshotDesc.getCreationTime() != -1L) { builder.setCreationTime(snapshotDesc.getCreationTime()); } - if (snapshotDesc.getTtl() != -1L && - snapshotDesc.getTtl() < TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE)) { + if ( + snapshotDesc.getTtl() != -1L + && snapshotDesc.getTtl() < TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE) + ) { builder.setTtl(snapshotDesc.getTtl()); } if (snapshotDesc.getVersion() != -1) { @@ -3060,36 +2893,34 @@ public final class ProtobufUtil { * @return the POJO SnapshotDescription */ public static SnapshotDescription - createSnapshotDesc(SnapshotProtos.SnapshotDescription snapshotDesc) { + createSnapshotDesc(SnapshotProtos.SnapshotDescription snapshotDesc) { final Map snapshotProps = new HashMap<>(); snapshotProps.put("TTL", snapshotDesc.getTtl()); snapshotProps.put(TableDescriptorBuilder.MAX_FILESIZE, snapshotDesc.getMaxFileSize()); return new SnapshotDescription(snapshotDesc.getName(), - snapshotDesc.hasTable() ? TableName.valueOf(snapshotDesc.getTable()) : null, - createSnapshotType(snapshotDesc.getType()), snapshotDesc.getOwner(), - snapshotDesc.getCreationTime(), snapshotDesc.getVersion(), snapshotProps); + snapshotDesc.hasTable() ? TableName.valueOf(snapshotDesc.getTable()) : null, + createSnapshotType(snapshotDesc.getType()), snapshotDesc.getOwner(), + snapshotDesc.getCreationTime(), snapshotDesc.getVersion(), snapshotProps); } public static RegionLoadStats createRegionLoadStats(ClientProtos.RegionLoadStats stats) { return new RegionLoadStats(stats.getMemStoreLoad(), stats.getHeapOccupancy(), - stats.getCompactionPressure()); + stats.getCompactionPressure()); } /** - * @param msg - * @return A String version of the passed in msg + * n * @return A String version of the passed in msg */ public static String toText(Message msg) { return TextFormat.shortDebugString(msg); } - public static byte [] toBytes(ByteString bs) { + public static byte[] toBytes(ByteString bs) { return bs.toByteArray(); } /** - * Contain ServiceException inside here. Take a callable that is doing our pb rpc and run it. - * @throws IOException + * Contain ServiceException inside here. Take a callable that is doing our pb rpc and run it. n */ public static T call(Callable callable) throws IOException { try { @@ -3100,21 +2931,20 @@ public final class ProtobufUtil { } /** - * Create a protocol buffer GetStoreFileRequest for a given region name - * - * @param regionName the name of the region to get info - * @param family the family to get store file list - * @return a protocol buffer GetStoreFileRequest - */ - public static GetStoreFileRequest - buildGetStoreFileRequest(final byte[] regionName, final byte[] family) { - GetStoreFileRequest.Builder builder = GetStoreFileRequest.newBuilder(); - RegionSpecifier region = RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - builder.addFamily(UnsafeByteOperations.unsafeWrap(family)); - return builder.build(); - } + * Create a protocol buffer GetStoreFileRequest for a given region name + * @param regionName the name of the region to get info + * @param family the family to get store file list + * @return a protocol buffer GetStoreFileRequest + */ + public static GetStoreFileRequest buildGetStoreFileRequest(final byte[] regionName, + final byte[] family) { + GetStoreFileRequest.Builder builder = GetStoreFileRequest.newBuilder(); + RegionSpecifier region = + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.addFamily(UnsafeByteOperations.unsafeWrap(family)); + return builder.build(); + } /** * Create a CloseRegionRequest for a given region name @@ -3126,12 +2956,12 @@ public final class ProtobufUtil { } public static CloseRegionRequest buildCloseRegionRequest(ServerName server, byte[] regionName, - ServerName destinationServer) { + ServerName destinationServer) { return buildCloseRegionRequest(server, regionName, destinationServer, -1); } public static CloseRegionRequest buildCloseRegionRequest(ServerName server, byte[] regionName, - ServerName destinationServer, long closeProcId) { + ServerName destinationServer, long closeProcId) { CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder(); RegionSpecifier region = RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); @@ -3147,9 +2977,9 @@ public final class ProtobufUtil { } public static ProcedureDescription buildProcedureDescription(String signature, String instance, - Map props) { + Map props) { ProcedureDescription.Builder builder = - ProcedureDescription.newBuilder().setSignature(signature).setInstance(instance); + ProcedureDescription.newBuilder().setSignature(signature).setInstance(instance); if (props != null && !props.isEmpty()) { props.entrySet().forEach(entry -> builder.addConfiguration( NameStringPair.newBuilder().setName(entry.getKey()).setValue(entry.getValue()).build())); @@ -3158,29 +2988,27 @@ public final class ProtobufUtil { } /** - * Get the Meta region state from the passed data bytes. Can handle both old and new style - * server names. - * @param data protobuf serialized data with meta server name. + * Get the Meta region state from the passed data bytes. Can handle both old and new style server + * names. + * @param data protobuf serialized data with meta server name. * @param replicaId replica ID for this region * @return RegionState instance corresponding to the serialized data. * @throws DeserializationException if the data is invalid. */ public static RegionState parseMetaRegionStateFrom(final byte[] data, int replicaId) - throws DeserializationException { + throws DeserializationException { RegionState.State state = RegionState.State.OPEN; ServerName serverName; if (data != null && data.length > 0 && ProtobufUtil.isPBMagicPrefix(data)) { try { int prefixLen = ProtobufUtil.lengthOfPBMagic(); - ZooKeeperProtos.MetaRegionServer rl = - ZooKeeperProtos.MetaRegionServer.parser().parseFrom(data, prefixLen, - data.length - prefixLen); + ZooKeeperProtos.MetaRegionServer rl = ZooKeeperProtos.MetaRegionServer.parser() + .parseFrom(data, prefixLen, data.length - prefixLen); if (rl.hasState()) { state = RegionState.State.convert(rl.getState()); } HBaseProtos.ServerName sn = rl.getServer(); - serverName = ServerName.valueOf( - sn.getHostName(), sn.getPort(), sn.getStartCode()); + serverName = ServerName.valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode()); } catch (InvalidProtocolBufferException e) { throw new DeserializationException("Unable to parse meta region location"); } @@ -3191,21 +3019,21 @@ public final class ProtobufUtil { if (serverName == null) { state = RegionState.State.OFFLINE; } - return new RegionState(RegionReplicaUtil.getRegionInfoForReplica( - RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId), state, serverName); + return new RegionState( + RegionReplicaUtil.getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId), + state, serverName); } /** * Get a ServerName from the passed in data bytes. - * @param data Data with a serialize server name in it; can handle the old style - * servername where servername was host and port. Works too with data that - * begins w/ the pb 'PBUF' magic and that is then followed by a protobuf that - * has a serialized {@link ServerName} in it. - * @return Returns null if data is null else converts passed data - * to a ServerName instance. - * @throws DeserializationException + * @param data Data with a serialize server name in it; can handle the old style servername where + * servername was host and port. Works too with data that begins w/ the pb 'PBUF' + * magic and that is then followed by a protobuf that has a serialized + * {@link ServerName} in it. + * @return Returns null if data is null else converts passed data to a ServerName + * instance. n */ - public static ServerName parseServerNameFrom(final byte [] data) throws DeserializationException { + public static ServerName parseServerNameFrom(final byte[] data) throws DeserializationException { if (data == null || data.length <= 0) return null; if (ProtobufMagic.isPBMagicPrefix(data)) { int prefixLen = ProtobufMagic.lengthOfPBMagic(); @@ -3213,13 +3041,13 @@ public final class ProtobufUtil { ZooKeeperProtos.Master rss = ZooKeeperProtos.Master.PARSER.parseFrom(data, prefixLen, data.length - prefixLen); org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName sn = - rss.getMaster(); + rss.getMaster(); return ServerName.valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode()); - } catch (/*InvalidProtocolBufferException*/IOException e) { + } catch (/* InvalidProtocolBufferException */IOException e) { // A failed parse of the znode is pretty catastrophic. Rather than loop // retrying hoping the bad bytes will changes, and rather than change // the signature on this method to add an IOE which will send ripples all - // over the code base, throw a RuntimeException. This should "never" happen. + // over the code base, throw a RuntimeException. This should "never" happen. // Fail fast if it does. throw new DeserializationException(e); } @@ -3260,7 +3088,8 @@ public final class ProtobufUtil { JsonArray lockedResourceJsons = new JsonArray(lockedResourceProtos.size()); for (LockServiceProtos.LockedResource lockedResourceProto : lockedResourceProtos) { try { - JsonElement lockedResourceJson = ProtobufMessageConverter.toJsonElement(lockedResourceProto); + JsonElement lockedResourceJson = + ProtobufMessageConverter.toJsonElement(lockedResourceProto); lockedResourceJsons.add(lockedResourceJson); } catch (InvalidProtocolBufferException e) { lockedResourceJsons.add(e.toString()); @@ -3271,11 +3100,11 @@ public final class ProtobufUtil { /** * Convert a RegionInfo to a Proto RegionInfo - * * @param info the RegionInfo to convert * @return the converted Proto RegionInfo */ - public static HBaseProtos.RegionInfo toRegionInfo(final org.apache.hadoop.hbase.client.RegionInfo info) { + public static HBaseProtos.RegionInfo + toRegionInfo(final org.apache.hadoop.hbase.client.RegionInfo info) { if (info == null) { return null; } @@ -3296,18 +3125,18 @@ public final class ProtobufUtil { /** * Convert HBaseProto.RegionInfo to a RegionInfo - * * @param proto the RegionInfo to convert * @return the converted RegionInfo */ - public static org.apache.hadoop.hbase.client.RegionInfo toRegionInfo(final HBaseProtos.RegionInfo proto) { + public static org.apache.hadoop.hbase.client.RegionInfo + toRegionInfo(final HBaseProtos.RegionInfo proto) { if (proto == null) { return null; } TableName tableName = ProtobufUtil.toTableName(proto.getTableName()); long regionId = proto.getRegionId(); int defaultReplicaId = org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID; - int replicaId = proto.hasReplicaId()? proto.getReplicaId(): defaultReplicaId; + int replicaId = proto.hasReplicaId() ? proto.getReplicaId() : defaultReplicaId; if (tableName.equals(TableName.META_TABLE_NAME) && replicaId == defaultReplicaId) { return RegionInfoBuilder.FIRST_META_REGIONINFO; } @@ -3323,12 +3152,8 @@ public final class ProtobufUtil { if (proto.hasSplit()) { split = proto.getSplit(); } - RegionInfoBuilder rib = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(startKey) - .setEndKey(endKey) - .setRegionId(regionId) - .setReplicaId(replicaId) - .setSplit(split); + RegionInfoBuilder rib = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey) + .setEndKey(endKey).setRegionId(regionId).setReplicaId(replicaId).setSplit(split); if (proto.hasOffline()) { rib.setOffline(proto.getOffline()); } @@ -3351,18 +3176,17 @@ public final class ProtobufUtil { return new HRegionLocation(regionInfo, serverName, proto.getSeqNum()); } - public static List toSnapshotDescriptionList( - GetCompletedSnapshotsResponse response, Pattern pattern) { + public static List + toSnapshotDescriptionList(GetCompletedSnapshotsResponse response, Pattern pattern) { return response.getSnapshotsList().stream().map(ProtobufUtil::createSnapshotDesc) - .filter(snap -> pattern != null ? pattern.matcher(snap.getName()).matches() : true) - .collect(Collectors.toList()); + .filter(snap -> pattern != null ? pattern.matcher(snap.getName()).matches() : true) + .collect(Collectors.toList()); } - public static CacheEvictionStats toCacheEvictionStats( - HBaseProtos.CacheEvictionStats stats) throws IOException{ + public static CacheEvictionStats toCacheEvictionStats(HBaseProtos.CacheEvictionStats stats) + throws IOException { CacheEvictionStatsBuilder builder = CacheEvictionStats.builder(); - builder.withEvictedBlocks(stats.getEvictedBlocks()) - .withMaxCacheSize(stats.getMaxCacheSize()); + builder.withEvictedBlocks(stats.getEvictedBlocks()).withMaxCacheSize(stats.getMaxCacheSize()); if (stats.getExceptionCount() > 0) { for (HBaseProtos.RegionExceptionMessage exception : stats.getExceptionList()) { HBaseProtos.RegionSpecifier rs = exception.getRegion(); @@ -3373,65 +3197,52 @@ public final class ProtobufUtil { return builder.build(); } - public static HBaseProtos.CacheEvictionStats toCacheEvictionStats( - CacheEvictionStats cacheEvictionStats) { - HBaseProtos.CacheEvictionStats.Builder builder - = HBaseProtos.CacheEvictionStats.newBuilder(); + public static HBaseProtos.CacheEvictionStats + toCacheEvictionStats(CacheEvictionStats cacheEvictionStats) { + HBaseProtos.CacheEvictionStats.Builder builder = HBaseProtos.CacheEvictionStats.newBuilder(); for (Map.Entry entry : cacheEvictionStats.getExceptions().entrySet()) { - builder.addException( - RegionExceptionMessage.newBuilder() - .setRegion(RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, entry.getKey())) - .setException(ResponseConverter.buildException(entry.getValue())) - .build() - ); + builder.addException(RegionExceptionMessage.newBuilder() + .setRegion( + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, entry.getKey())) + .setException(ResponseConverter.buildException(entry.getValue())).build()); } - return builder - .setEvictedBlocks(cacheEvictionStats.getEvictedBlocks()) - .setMaxCacheSize(cacheEvictionStats.getMaxCacheSize()) - .build(); + return builder.setEvictedBlocks(cacheEvictionStats.getEvictedBlocks()) + .setMaxCacheSize(cacheEvictionStats.getMaxCacheSize()).build(); } - public static ClusterStatusProtos.ReplicationLoadSource toReplicationLoadSource( - ReplicationLoadSource rls) { - return ClusterStatusProtos.ReplicationLoadSource.newBuilder() - .setPeerID(rls.getPeerID()) - .setAgeOfLastShippedOp(rls.getAgeOfLastShippedOp()) - .setSizeOfLogQueue((int) rls.getSizeOfLogQueue()) - .setTimeStampOfLastShippedOp(rls.getTimestampOfLastShippedOp()) - .setReplicationLag(rls.getReplicationLag()) - .setQueueId(rls.getQueueId()) - .setRecovered(rls.isRecovered()) - .setRunning(rls.isRunning()) - .setEditsSinceRestart(rls.hasEditsSinceRestart()) - .setTimeStampOfNextToReplicate(rls.getTimeStampOfNextToReplicate()) - .setOPsShipped(rls.getOPsShipped()) - .setEditsRead(rls.getEditsRead()) - .build(); + public static ClusterStatusProtos.ReplicationLoadSource + toReplicationLoadSource(ReplicationLoadSource rls) { + return ClusterStatusProtos.ReplicationLoadSource.newBuilder().setPeerID(rls.getPeerID()) + .setAgeOfLastShippedOp(rls.getAgeOfLastShippedOp()) + .setSizeOfLogQueue((int) rls.getSizeOfLogQueue()) + .setTimeStampOfLastShippedOp(rls.getTimestampOfLastShippedOp()) + .setReplicationLag(rls.getReplicationLag()).setQueueId(rls.getQueueId()) + .setRecovered(rls.isRecovered()).setRunning(rls.isRunning()) + .setEditsSinceRestart(rls.hasEditsSinceRestart()) + .setTimeStampOfNextToReplicate(rls.getTimeStampOfNextToReplicate()) + .setOPsShipped(rls.getOPsShipped()).setEditsRead(rls.getEditsRead()).build(); } - public static ClusterStatusProtos.ReplicationLoadSink toReplicationLoadSink( - ReplicationLoadSink rls) { + public static ClusterStatusProtos.ReplicationLoadSink + toReplicationLoadSink(ReplicationLoadSink rls) { return ClusterStatusProtos.ReplicationLoadSink.newBuilder() - .setAgeOfLastAppliedOp(rls.getAgeOfLastAppliedOp()) - .setTimeStampsOfLastAppliedOp(rls.getTimestampsOfLastAppliedOp()) - .setTimestampStarted(rls.getTimestampStarted()) - .setTotalOpsProcessed(rls.getTotalOpsProcessed()) - .build(); + .setAgeOfLastAppliedOp(rls.getAgeOfLastAppliedOp()) + .setTimeStampsOfLastAppliedOp(rls.getTimestampsOfLastAppliedOp()) + .setTimestampStarted(rls.getTimestampStarted()) + .setTotalOpsProcessed(rls.getTotalOpsProcessed()).build(); } public static HBaseProtos.TimeRange toTimeRange(TimeRange timeRange) { if (timeRange == null) { timeRange = TimeRange.allTime(); } - return HBaseProtos.TimeRange.newBuilder().setFrom(timeRange.getMin()) - .setTo(timeRange.getMax()) + return HBaseProtos.TimeRange.newBuilder().setFrom(timeRange.getMin()).setTo(timeRange.getMax()) .build(); } public static byte[] toCompactionEventTrackerBytes(Set storeFiles) { HFileProtos.CompactionEventTracker.Builder builder = - HFileProtos.CompactionEventTracker.newBuilder(); + HFileProtos.CompactionEventTracker.newBuilder(); storeFiles.forEach(sf -> builder.addCompactedStoreFile(ByteString.copyFromUtf8(sf))); return ProtobufUtil.prependPBMagic(builder.build().toByteArray()); } @@ -3440,20 +3251,20 @@ public final class ProtobufUtil { if (bytes != null && ProtobufUtil.isPBMagicPrefix(bytes)) { int pbLen = ProtobufUtil.lengthOfPBMagic(); HFileProtos.CompactionEventTracker.Builder builder = - HFileProtos.CompactionEventTracker.newBuilder(); + HFileProtos.CompactionEventTracker.newBuilder(); ProtobufUtil.mergeFrom(builder, bytes, pbLen, bytes.length - pbLen); HFileProtos.CompactionEventTracker compactionEventTracker = builder.build(); List compactedStoreFiles = compactionEventTracker.getCompactedStoreFileList(); if (compactedStoreFiles != null && compactedStoreFiles.size() != 0) { return compactedStoreFiles.stream().map(ByteString::toStringUtf8) - .collect(Collectors.toSet()); + .collect(Collectors.toSet()); } } return Collections.emptySet(); } - public static ClusterStatusProtos.RegionStatesCount toTableRegionStatesCount( - RegionStatesCount regionStatesCount) { + public static ClusterStatusProtos.RegionStatesCount + toTableRegionStatesCount(RegionStatesCount regionStatesCount) { int openRegions = 0; int splitRegions = 0; int closedRegions = 0; @@ -3466,17 +3277,13 @@ public final class ProtobufUtil { regionsInTransition = regionStatesCount.getRegionsInTransition(); totalRegions = regionStatesCount.getTotalRegions(); } - return ClusterStatusProtos.RegionStatesCount.newBuilder() - .setOpenRegions(openRegions) - .setSplitRegions(splitRegions) - .setClosedRegions(closedRegions) - .setRegionsInTransition(regionsInTransition) - .setTotalRegions(totalRegions) - .build(); + return ClusterStatusProtos.RegionStatesCount.newBuilder().setOpenRegions(openRegions) + .setSplitRegions(splitRegions).setClosedRegions(closedRegions) + .setRegionsInTransition(regionsInTransition).setTotalRegions(totalRegions).build(); } - public static RegionStatesCount toTableRegionStatesCount( - ClusterStatusProtos.RegionStatesCount regionStatesCount) { + public static RegionStatesCount + toTableRegionStatesCount(ClusterStatusProtos.RegionStatesCount regionStatesCount) { int openRegions = 0; int splitRegions = 0; int closedRegions = 0; @@ -3489,59 +3296,47 @@ public final class ProtobufUtil { splitRegions = regionStatesCount.getSplitRegions(); totalRegions = regionStatesCount.getTotalRegions(); } - return new RegionStatesCount.RegionStatesCountBuilder() - .setOpenRegions(openRegions) - .setSplitRegions(splitRegions) - .setClosedRegions(closedRegions) - .setRegionsInTransition(regionsInTransition) - .setTotalRegions(totalRegions) - .build(); + return new RegionStatesCount.RegionStatesCountBuilder().setOpenRegions(openRegions) + .setSplitRegions(splitRegions).setClosedRegions(closedRegions) + .setRegionsInTransition(regionsInTransition).setTotalRegions(totalRegions).build(); } /** * Convert Protobuf class - * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog.SlowLogPayload} - * To client SlowLog Payload class {@link OnlineLogRecord} - * + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog.SlowLogPayload} To client + * SlowLog Payload class {@link OnlineLogRecord} * @param slowLogPayload SlowLog Payload protobuf instance * @return SlowLog Payload for client usecase */ - private static LogEntry getSlowLogRecord( - final TooSlowLog.SlowLogPayload slowLogPayload) { - OnlineLogRecord onlineLogRecord = new OnlineLogRecord.OnlineLogRecordBuilder() - .setCallDetails(slowLogPayload.getCallDetails()) - .setClientAddress(slowLogPayload.getClientAddress()) - .setMethodName(slowLogPayload.getMethodName()) - .setMultiGetsCount(slowLogPayload.getMultiGets()) - .setMultiMutationsCount(slowLogPayload.getMultiMutations()) - .setMultiServiceCalls(slowLogPayload.getMultiServiceCalls()) - .setParam(slowLogPayload.getParam()) - .setProcessingTime(slowLogPayload.getProcessingTime()) - .setQueueTime(slowLogPayload.getQueueTime()) - .setRegionName(slowLogPayload.getRegionName()) - .setResponseSize(slowLogPayload.getResponseSize()) - .setServerClass(slowLogPayload.getServerClass()) - .setStartTime(slowLogPayload.getStartTime()) - .setUserName(slowLogPayload.getUserName()) - .build(); + private static LogEntry getSlowLogRecord(final TooSlowLog.SlowLogPayload slowLogPayload) { + OnlineLogRecord onlineLogRecord = + new OnlineLogRecord.OnlineLogRecordBuilder().setCallDetails(slowLogPayload.getCallDetails()) + .setClientAddress(slowLogPayload.getClientAddress()) + .setMethodName(slowLogPayload.getMethodName()) + .setMultiGetsCount(slowLogPayload.getMultiGets()) + .setMultiMutationsCount(slowLogPayload.getMultiMutations()) + .setMultiServiceCalls(slowLogPayload.getMultiServiceCalls()) + .setParam(slowLogPayload.getParam()).setProcessingTime(slowLogPayload.getProcessingTime()) + .setQueueTime(slowLogPayload.getQueueTime()).setRegionName(slowLogPayload.getRegionName()) + .setResponseSize(slowLogPayload.getResponseSize()) + .setServerClass(slowLogPayload.getServerClass()).setStartTime(slowLogPayload.getStartTime()) + .setUserName(slowLogPayload.getUserName()).build(); return onlineLogRecord; } /** - * Convert AdminProtos#SlowLogResponses to list of {@link OnlineLogRecord} - * + * Convert AdminProtos#SlowLogResponses to list of {@link OnlineLogRecord} * @param logEntry slowlog response protobuf instance * @return list of SlowLog payloads for client usecase */ - public static List toSlowLogPayloads( - final HBaseProtos.LogEntry logEntry) { + public static List toSlowLogPayloads(final HBaseProtos.LogEntry logEntry) { try { final String logClassName = logEntry.getLogClassName(); Class logClass = Class.forName(logClassName).asSubclass(Message.class); Method method = logClass.getMethod("parseFrom", ByteString.class); if (logClassName.contains("SlowLogResponses")) { - AdminProtos.SlowLogResponses slowLogResponses = (AdminProtos.SlowLogResponses) method - .invoke(null, logEntry.getLogMessage()); + AdminProtos.SlowLogResponses slowLogResponses = + (AdminProtos.SlowLogResponses) method.invoke(null, logEntry.getLogMessage()); return slowLogResponses.getSlowLogPayloadsList().stream() .map(ProtobufUtil::getSlowLogRecord).collect(Collectors.toList()); } @@ -3554,7 +3349,6 @@ public final class ProtobufUtil { /** * Convert {@link ClearSlowLogResponses} to boolean - * * @param clearSlowLogResponses Clear slowlog response protobuf instance * @return boolean representing clear slowlog response */ @@ -3570,13 +3364,13 @@ public final class ProtobufUtil { if (filter != null) { builder.ifMatches(filter); } else { - builder.ifMatches(condition.getFamily().toByteArray(), - condition.getQualifier().toByteArray(), + builder.ifMatches(condition.getFamily().toByteArray(), condition.getQualifier().toByteArray(), CompareOperator.valueOf(condition.getCompareType().name()), ProtobufUtil.toComparator(condition.getComparator()).getValue()); } - TimeRange timeRange = condition.hasTimeRange() ? - ProtobufUtil.toTimeRange(condition.getTimeRange()) : TimeRange.allTime(); + TimeRange timeRange = condition.hasTimeRange() + ? ProtobufUtil.toTimeRange(condition.getTimeRange()) + : TimeRange.allTime(); builder.timeRange(timeRange); try { @@ -3607,13 +3401,13 @@ public final class ProtobufUtil { if (filter != null) { builder.ifMatches(filter); } else { - builder.ifMatches(condition.getFamily().toByteArray(), - condition.getQualifier().toByteArray(), + builder.ifMatches(condition.getFamily().toByteArray(), condition.getQualifier().toByteArray(), CompareOperator.valueOf(condition.getCompareType().name()), ProtobufUtil.toComparator(condition.getComparator()).getValue()); } - TimeRange timeRange = condition.hasTimeRange() ? - ProtobufUtil.toTimeRange(condition.getTimeRange()) : TimeRange.allTime(); + TimeRange timeRange = condition.hasTimeRange() + ? ProtobufUtil.toTimeRange(condition.getTimeRange()) + : TimeRange.allTime(); builder.timeRange(timeRange); try { @@ -3628,8 +3422,8 @@ public final class ProtobufUtil { } else if (m instanceof Append) { return builder.build((Append) m); } else { - throw new DoNotRetryIOException("Unsupported mutate type: " + m.getClass() - .getSimpleName().toUpperCase()); + throw new DoNotRetryIOException( + "Unsupported mutate type: " + m.getClass().getSimpleName().toUpperCase()); } } else { return builder.build(new RowMutations(mutations.get(0).getRow()).add(mutations)); @@ -3643,15 +3437,15 @@ public final class ProtobufUtil { final byte[] qualifier, final CompareOperator op, final byte[] value, final Filter filter, final TimeRange timeRange) throws IOException { - ClientProtos.Condition.Builder builder = ClientProtos.Condition.newBuilder() - .setRow(UnsafeByteOperations.unsafeWrap(row)); + ClientProtos.Condition.Builder builder = + ClientProtos.Condition.newBuilder().setRow(UnsafeByteOperations.unsafeWrap(row)); if (filter != null) { builder.setFilter(ProtobufUtil.toFilter(filter)); } else { builder.setFamily(UnsafeByteOperations.unsafeWrap(family)) - .setQualifier(UnsafeByteOperations.unsafeWrap( - qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier)) + .setQualifier(UnsafeByteOperations + .unsafeWrap(qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier)) .setComparator(ProtobufUtil.toComparator(new BinaryComparator(value))) .setCompareType(HBaseProtos.CompareType.valueOf(op.name())); } @@ -3665,21 +3459,19 @@ public final class ProtobufUtil { } public static ClientProtos.Condition toCondition(final byte[] row, final byte[] family, - final byte[] qualifier, final CompareOperator op, final byte[] value, - final TimeRange timeRange) throws IOException { + final byte[] qualifier, final CompareOperator op, final byte[] value, final TimeRange timeRange) + throws IOException { return toCondition(row, family, qualifier, op, value, null, timeRange); } - public static List toBalancerDecisionResponse( - HBaseProtos.LogEntry logEntry) { + public static List toBalancerDecisionResponse(HBaseProtos.LogEntry logEntry) { try { final String logClassName = logEntry.getLogClassName(); Class logClass = Class.forName(logClassName).asSubclass(Message.class); Method method = logClass.getMethod("parseFrom", ByteString.class); if (logClassName.contains("BalancerDecisionsResponse")) { MasterProtos.BalancerDecisionsResponse response = - (MasterProtos.BalancerDecisionsResponse) method - .invoke(null, logEntry.getLogMessage()); + (MasterProtos.BalancerDecisionsResponse) method.invoke(null, logEntry.getLogMessage()); return getBalancerDecisionEntries(response); } } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException @@ -3689,16 +3481,14 @@ public final class ProtobufUtil { throw new RuntimeException("Invalid response from server"); } - public static List toBalancerRejectionResponse( - HBaseProtos.LogEntry logEntry) { + public static List toBalancerRejectionResponse(HBaseProtos.LogEntry logEntry) { try { final String logClassName = logEntry.getLogClassName(); Class logClass = Class.forName(logClassName).asSubclass(Message.class); Method method = logClass.getMethod("parseFrom", ByteString.class); if (logClassName.contains("BalancerRejectionsResponse")) { MasterProtos.BalancerRejectionsResponse response = - (MasterProtos.BalancerRejectionsResponse) method - .invoke(null, logEntry.getLogMessage()); + (MasterProtos.BalancerRejectionsResponse) method.invoke(null, logEntry.getLogMessage()); return getBalancerRejectionEntries(response); } } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException @@ -3708,32 +3498,33 @@ public final class ProtobufUtil { throw new RuntimeException("Invalid response from server"); } - public static List getBalancerDecisionEntries( - MasterProtos.BalancerDecisionsResponse response) { + public static List + getBalancerDecisionEntries(MasterProtos.BalancerDecisionsResponse response) { List balancerDecisions = response.getBalancerDecisionList(); if (CollectionUtils.isEmpty(balancerDecisions)) { return Collections.emptyList(); } - return balancerDecisions.stream().map(balancerDecision -> new BalancerDecision.Builder() - .setInitTotalCost(balancerDecision.getInitTotalCost()) - .setInitialFunctionCosts(balancerDecision.getInitialFunctionCosts()) - .setComputedTotalCost(balancerDecision.getComputedTotalCost()) - .setFinalFunctionCosts(balancerDecision.getFinalFunctionCosts()) - .setComputedSteps(balancerDecision.getComputedSteps()) - .setRegionPlans(balancerDecision.getRegionPlansList()).build()) + return balancerDecisions.stream() + .map(balancerDecision -> new BalancerDecision.Builder() + .setInitTotalCost(balancerDecision.getInitTotalCost()) + .setInitialFunctionCosts(balancerDecision.getInitialFunctionCosts()) + .setComputedTotalCost(balancerDecision.getComputedTotalCost()) + .setFinalFunctionCosts(balancerDecision.getFinalFunctionCosts()) + .setComputedSteps(balancerDecision.getComputedSteps()) + .setRegionPlans(balancerDecision.getRegionPlansList()).build()) .collect(Collectors.toList()); } - public static List getBalancerRejectionEntries( - MasterProtos.BalancerRejectionsResponse response) { + public static List + getBalancerRejectionEntries(MasterProtos.BalancerRejectionsResponse response) { List balancerRejections = response.getBalancerRejectionList(); if (CollectionUtils.isEmpty(balancerRejections)) { return Collections.emptyList(); } - return balancerRejections.stream().map(balancerRejection -> new BalancerRejection.Builder() - .setReason(balancerRejection.getReason()) - .setCostFuncInfoList(balancerRejection.getCostFuncInfoList()) - .build()) + return balancerRejections.stream() + .map(balancerRejection -> new BalancerRejection.Builder() + .setReason(balancerRejection.getReason()) + .setCostFuncInfoList(balancerRejection.getCostFuncInfoList()).build()) .collect(Collectors.toList()); } @@ -3742,8 +3533,7 @@ public final class ProtobufUtil { MasterProtos.BalancerDecisionsRequest.newBuilder().setLimit(limit).build(); return HBaseProtos.LogRequest.newBuilder() .setLogClassName(balancerDecisionsRequest.getClass().getName()) - .setLogMessage(balancerDecisionsRequest.toByteString()) - .build(); + .setLogMessage(balancerDecisionsRequest.toByteString()).build(); } public static HBaseProtos.LogRequest toBalancerRejectionRequest(int limit) { @@ -3751,58 +3541,43 @@ public final class ProtobufUtil { MasterProtos.BalancerRejectionsRequest.newBuilder().setLimit(limit).build(); return HBaseProtos.LogRequest.newBuilder() .setLogClassName(balancerRejectionsRequest.getClass().getName()) - .setLogMessage(balancerRejectionsRequest.toByteString()) - .build(); + .setLogMessage(balancerRejectionsRequest.toByteString()).build(); } public static MasterProtos.BalanceRequest toBalanceRequest(BalanceRequest request) { - return MasterProtos.BalanceRequest.newBuilder() - .setDryRun(request.isDryRun()) - .setIgnoreRit(request.isIgnoreRegionsInTransition()) - .build(); + return MasterProtos.BalanceRequest.newBuilder().setDryRun(request.isDryRun()) + .setIgnoreRit(request.isIgnoreRegionsInTransition()).build(); } public static BalanceRequest toBalanceRequest(MasterProtos.BalanceRequest request) { - return BalanceRequest.newBuilder() - .setDryRun(request.hasDryRun() && request.getDryRun()) - .setIgnoreRegionsInTransition(request.hasIgnoreRit() && request.getIgnoreRit()) - .build(); + return BalanceRequest.newBuilder().setDryRun(request.hasDryRun() && request.getDryRun()) + .setIgnoreRegionsInTransition(request.hasIgnoreRit() && request.getIgnoreRit()).build(); } public static MasterProtos.BalanceResponse toBalanceResponse(BalanceResponse response) { - return MasterProtos.BalanceResponse.newBuilder() - .setBalancerRan(response.isBalancerRan()) + return MasterProtos.BalanceResponse.newBuilder().setBalancerRan(response.isBalancerRan()) .setMovesCalculated(response.getMovesCalculated()) - .setMovesExecuted(response.getMovesExecuted()) - .build(); + .setMovesExecuted(response.getMovesExecuted()).build(); } public static BalanceResponse toBalanceResponse(MasterProtos.BalanceResponse response) { return BalanceResponse.newBuilder() .setBalancerRan(response.hasBalancerRan() && response.getBalancerRan()) .setMovesCalculated(response.hasMovesCalculated() ? response.getMovesExecuted() : 0) - .setMovesExecuted(response.hasMovesExecuted() ? response.getMovesExecuted() : 0) - .build(); + .setMovesExecuted(response.hasMovesExecuted() ? response.getMovesExecuted() : 0).build(); } public static ServerTask getServerTask(ClusterStatusProtos.ServerTask task) { - return ServerTaskBuilder.newBuilder() - .setDescription(task.getDescription()) - .setStatus(task.getStatus()) - .setState(ServerTask.State.valueOf(task.getState().name())) - .setStartTime(task.getStartTime()) - .setCompletionTime(task.getCompletionTime()) - .build(); + return ServerTaskBuilder.newBuilder().setDescription(task.getDescription()) + .setStatus(task.getStatus()).setState(ServerTask.State.valueOf(task.getState().name())) + .setStartTime(task.getStartTime()).setCompletionTime(task.getCompletionTime()).build(); } public static ClusterStatusProtos.ServerTask toServerTask(ServerTask task) { - return ClusterStatusProtos.ServerTask.newBuilder() - .setDescription(task.getDescription()) + return ClusterStatusProtos.ServerTask.newBuilder().setDescription(task.getDescription()) .setStatus(task.getStatus()) .setState(ClusterStatusProtos.ServerTask.State.valueOf(task.getState().name())) - .setStartTime(task.getStartTime()) - .setCompletionTime(task.getCompletionTime()) - .build(); + .setStartTime(task.getStartTime()).setCompletionTime(task.getCompletionTime()).build(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index d00627a0e57..09f7895b977 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -127,8 +127,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos - .IsSnapshotCleanupEnabledRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest; @@ -147,8 +146,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalance import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos - .SetSnapshotCleanupRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetTableStateInMetaRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest; @@ -168,8 +166,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Remov import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; /** - * Helper utility to build protocol buffer requests, - * or build components for protocol buffer requests. + * Helper utility to build protocol buffer requests, or build components for protocol buffer + * requests. */ @InterfaceAudience.Private public final class RequestConverter { @@ -177,42 +175,31 @@ public final class RequestConverter { private RequestConverter() { } -// Start utilities for Client + // Start utilities for Client /** * Create a protocol buffer GetRequest for a client Get - * * @param regionName the name of the region to get - * @param get the client Get + * @param get the client Get * @return a protocol buffer GetRequest */ - public static GetRequest buildGetRequest(final byte[] regionName, - final Get get) throws IOException { + public static GetRequest buildGetRequest(final byte[] regionName, final Get get) + throws IOException { GetRequest.Builder builder = GetRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); builder.setGet(ProtobufUtil.toGet(get)); return builder.build(); } /** - * Create a protocol buffer MutateRequest for a client increment - * - * @param regionName - * @param row - * @param family - * @param qualifier - * @param amount - * @param durability - * @return a mutate request + * Create a protocol buffer MutateRequest for a client increment nnnnnn * @return a mutate request */ - public static MutateRequest buildIncrementRequest( - final byte[] regionName, final byte[] row, final byte[] family, final byte[] qualifier, - final long amount, final Durability durability, long nonceGroup, long nonce) { + public static MutateRequest buildIncrementRequest(final byte[] regionName, final byte[] row, + final byte[] family, final byte[] qualifier, final long amount, final Durability durability, + long nonceGroup, long nonce) { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); MutationProto.Builder mutateBuilder = MutationProto.newBuilder(); @@ -223,8 +210,8 @@ public final class RequestConverter { columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family)); QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); valueBuilder.setValue(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(amount))); - valueBuilder.setQualifier(UnsafeByteOperations - .unsafeWrap(qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier)); + valueBuilder.setQualifier( + UnsafeByteOperations.unsafeWrap(qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier)); valueBuilder.setTimestamp(HConstants.LATEST_TIMESTAMP); columnBuilder.addQualifierValue(valueBuilder.build()); mutateBuilder.addColumnValue(columnBuilder.build()); @@ -240,9 +227,7 @@ public final class RequestConverter { /** * Create a protocol buffer MutateRequest for a conditioned put/delete/increment/append - * - * @return a mutate request - * @throws IOException + * @return a mutate request n */ public static MutateRequest buildMutateRequest(final byte[] regionName, final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op, final byte[] value, @@ -262,21 +247,19 @@ public final class RequestConverter { /** * Create a protocol buffer MultiRequest for conditioned row mutations - * - * @return a multi request - * @throws IOException + * @return a multi request n */ public static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionName, - final byte[] row, final byte[] family, final byte[] qualifier, - final CompareOperator op, final byte[] value, final Filter filter, final TimeRange timeRange, + final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op, + final byte[] value, final Filter filter, final TimeRange timeRange, final RowMutations rowMutations, long nonceGroup, long nonce) throws IOException { - return buildMultiRequest(regionName, rowMutations, ProtobufUtil.toCondition(row, family, - qualifier, op, value, filter, timeRange), nonceGroup, nonce); + return buildMultiRequest(regionName, rowMutations, + ProtobufUtil.toCondition(row, family, qualifier, op, value, filter, timeRange), nonceGroup, + nonce); } /** * Create a protocol buffer MultiRequest for row mutations - * * @return a multi request */ public static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionName, @@ -294,7 +277,7 @@ public final class RequestConverter { boolean hasNonce = false; ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder(); MutationProto.Builder mutationBuilder = MutationProto.newBuilder(); - for (Mutation mutation: rowMutations.getMutations()) { + for (Mutation mutation : rowMutations.getMutations()) { mutationBuilder.clear(); MutationProto mp; if (mutation instanceof Increment || mutation instanceof Append) { @@ -321,104 +304,74 @@ public final class RequestConverter { } /** - * Create a protocol buffer MutateRequest for a put - * - * @param regionName - * @param put - * @return a mutate request - * @throws IOException + * Create a protocol buffer MutateRequest for a put nn * @return a mutate request n */ - public static MutateRequest buildMutateRequest( - final byte[] regionName, final Put put) throws IOException { + public static MutateRequest buildMutateRequest(final byte[] regionName, final Put put) + throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); builder.setMutation(ProtobufUtil.toMutation(MutationType.PUT, put, MutationProto.newBuilder())); return builder.build(); } /** - * Create a protocol buffer MutateRequest for an append - * - * @param regionName - * @param append - * @return a mutate request - * @throws IOException + * Create a protocol buffer MutateRequest for an append nn * @return a mutate request n */ - public static MutateRequest buildMutateRequest(final byte[] regionName, - final Append append, long nonceGroup, long nonce) throws IOException { + public static MutateRequest buildMutateRequest(final byte[] regionName, final Append append, + long nonceGroup, long nonce) throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); if (nonce != HConstants.NO_NONCE && nonceGroup != HConstants.NO_NONCE) { builder.setNonceGroup(nonceGroup); } - builder.setMutation(ProtobufUtil.toMutation(MutationType.APPEND, append, - MutationProto.newBuilder(), nonce)); + builder.setMutation( + ProtobufUtil.toMutation(MutationType.APPEND, append, MutationProto.newBuilder(), nonce)); return builder.build(); } /** - * Create a protocol buffer MutateRequest for a client increment - * - * @param regionName - * @param increment - * @return a mutate request + * Create a protocol buffer MutateRequest for a client increment nn * @return a mutate request */ - public static MutateRequest buildMutateRequest(final byte[] regionName, - final Increment increment, final long nonceGroup, final long nonce) throws IOException { + public static MutateRequest buildMutateRequest(final byte[] regionName, final Increment increment, + final long nonceGroup, final long nonce) throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); if (nonce != HConstants.NO_NONCE && nonceGroup != HConstants.NO_NONCE) { builder.setNonceGroup(nonceGroup); } builder.setMutation(ProtobufUtil.toMutation(MutationType.INCREMENT, increment, - MutationProto.newBuilder(), nonce)); + MutationProto.newBuilder(), nonce)); return builder.build(); } /** - * Create a protocol buffer MutateRequest for a delete - * - * @param regionName - * @param delete - * @return a mutate request - * @throws IOException + * Create a protocol buffer MutateRequest for a delete nn * @return a mutate request n */ - public static MutateRequest buildMutateRequest( - final byte[] regionName, final Delete delete) throws IOException { + public static MutateRequest buildMutateRequest(final byte[] regionName, final Delete delete) + throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); - builder.setMutation(ProtobufUtil.toMutation(MutationType.DELETE, delete, - MutationProto.newBuilder())); + builder.setMutation( + ProtobufUtil.toMutation(MutationType.DELETE, delete, MutationProto.newBuilder())); return builder.build(); } public static RegionAction.Builder getRegionActionBuilderWithRegion( - final RegionAction.Builder regionActionBuilder, final byte [] regionName) { + final RegionAction.Builder regionActionBuilder, final byte[] regionName) { RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); regionActionBuilder.setRegion(region); return regionActionBuilder; } /** - * Create a protocol buffer ScanRequest for a client Scan - * - * @param regionName - * @param scan - * @param numberOfRows - * @param closeScanner - * @return a scan request - * @throws IOException + * Create a protocol buffer ScanRequest for a client Scan nnnn * @return a scan request n */ public static ScanRequest buildScanRequest(byte[] regionName, Scan scan, int numberOfRows, - boolean closeScanner) throws IOException { + boolean closeScanner) throws IOException { ScanRequest.Builder builder = ScanRequest.newBuilder(); RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setNumberOfRows(numberOfRows); @@ -435,14 +388,10 @@ public final class RequestConverter { } /** - * Create a protocol buffer ScanRequest for a scanner id - * @param scannerId - * @param numberOfRows - * @param closeScanner - * @return a scan request + * Create a protocol buffer ScanRequest for a scanner id nnn * @return a scan request */ public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boolean closeScanner, - boolean trackMetrics) { + boolean trackMetrics) { ScanRequest.Builder builder = ScanRequest.newBuilder(); builder.setNumberOfRows(numberOfRows); builder.setCloseScanner(closeScanner); @@ -454,15 +403,10 @@ public final class RequestConverter { } /** - * Create a protocol buffer ScanRequest for a scanner id - * @param scannerId - * @param numberOfRows - * @param closeScanner - * @param nextCallSeq - * @return a scan request + * Create a protocol buffer ScanRequest for a scanner id nnnn * @return a scan request */ public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boolean closeScanner, - long nextCallSeq, boolean trackMetrics, boolean renew, int limitOfRows) { + long nextCallSeq, boolean trackMetrics, boolean renew, int limitOfRows) { ScanRequest.Builder builder = ScanRequest.newBuilder(); builder.setNumberOfRows(numberOfRows); builder.setCloseScanner(closeScanner); @@ -479,68 +423,48 @@ public final class RequestConverter { } /** - * Create a protocol buffer bulk load request - * - * @param familyPaths - * @param regionName - * @param assignSeqNum - * @param userToken - * @param bulkToken - * @return a bulk load request + * Create a protocol buffer bulk load request nnnnn * @return a bulk load request */ public static BulkLoadHFileRequest buildBulkLoadHFileRequest( - final List> familyPaths, - final byte[] regionName, boolean assignSeqNum, - final Token userToken, final String bulkToken) { + final List> familyPaths, final byte[] regionName, boolean assignSeqNum, + final Token userToken, final String bulkToken) { return buildBulkLoadHFileRequest(familyPaths, regionName, assignSeqNum, userToken, bulkToken, - false, null, true); + false, null, true); } /** - * Create a protocol buffer bulk load request - * - * @param familyPaths - * @param regionName - * @param assignSeqNum - * @param userToken - * @param bulkToken - * @param copyFiles - * @return a bulk load request + * Create a protocol buffer bulk load request nnnnnn * @return a bulk load request */ public static BulkLoadHFileRequest buildBulkLoadHFileRequest( - final List> familyPaths, final byte[] regionName, boolean assignSeqNum, - final Token userToken, final String bulkToken, boolean copyFiles, - List clusterIds, boolean replicate) { - RegionSpecifier region = RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + final List> familyPaths, final byte[] regionName, boolean assignSeqNum, + final Token userToken, final String bulkToken, boolean copyFiles, List clusterIds, + boolean replicate) { + RegionSpecifier region = + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); ClientProtos.DelegationToken protoDT = null; if (userToken != null) { - protoDT = - ClientProtos.DelegationToken.newBuilder() - .setIdentifier(UnsafeByteOperations.unsafeWrap(userToken.getIdentifier())) - .setPassword(UnsafeByteOperations.unsafeWrap(userToken.getPassword())) - .setKind(userToken.getKind().toString()) - .setService(userToken.getService().toString()).build(); + protoDT = ClientProtos.DelegationToken.newBuilder() + .setIdentifier(UnsafeByteOperations.unsafeWrap(userToken.getIdentifier())) + .setPassword(UnsafeByteOperations.unsafeWrap(userToken.getPassword())) + .setKind(userToken.getKind().toString()).setService(userToken.getService().toString()) + .build(); } - List protoFamilyPaths = new ArrayList<>(familyPaths.size()); + List protoFamilyPaths = + new ArrayList<>(familyPaths.size()); if (!familyPaths.isEmpty()) { - ClientProtos.BulkLoadHFileRequest.FamilyPath.Builder pathBuilder - = ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder(); - for(Pair el: familyPaths) { - protoFamilyPaths.add(pathBuilder - .setFamily(UnsafeByteOperations.unsafeWrap(el.getFirst())) + ClientProtos.BulkLoadHFileRequest.FamilyPath.Builder pathBuilder = + ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder(); + for (Pair el : familyPaths) { + protoFamilyPaths.add(pathBuilder.setFamily(UnsafeByteOperations.unsafeWrap(el.getFirst())) .setPath(el.getSecond()).build()); } pathBuilder.clear(); } - BulkLoadHFileRequest.Builder request = - ClientProtos.BulkLoadHFileRequest.newBuilder() - .setRegion(region) - .setAssignSeqNum(assignSeqNum) - .addAllFamilyPath(protoFamilyPaths); + BulkLoadHFileRequest.Builder request = ClientProtos.BulkLoadHFileRequest.newBuilder() + .setRegion(region).setAssignSeqNum(assignSeqNum).addAllFamilyPath(protoFamilyPaths); if (userToken != null) { request.setFsToken(protoDT); } @@ -558,69 +482,65 @@ public final class RequestConverter { /** * Create a protocol buffer multi request for a list of actions. Propagates Actions original * index. The passed in multiRequestBuilder will be populated with region actions. - * @param regionName The region name of the actions. - * @param actions The actions that are grouped by the same region name. + * @param regionName The region name of the actions. + * @param actions The actions that are grouped by the same region name. * @param multiRequestBuilder The multiRequestBuilder to be populated with region actions. * @param regionActionBuilder regionActionBuilder to be used to build region action. - * @param actionBuilder actionBuilder to be used to build action. - * @param mutationBuilder mutationBuilder to be used to build mutation. - * @param nonceGroup nonceGroup to be applied. - * @param indexMap Map of created RegionAction to the original index for a - * RowMutations/CheckAndMutate within the original list of actions - * @throws IOException + * @param actionBuilder actionBuilder to be used to build action. + * @param mutationBuilder mutationBuilder to be used to build mutation. + * @param nonceGroup nonceGroup to be applied. + * @param indexMap Map of created RegionAction to the original index for a + * RowMutations/CheckAndMutate within the original list of actions n */ - public static void buildRegionActions(final byte[] regionName, - final List actions, final MultiRequest.Builder multiRequestBuilder, - final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder, - long nonceGroup, final Map indexMap) throws IOException { + public static void buildRegionActions(final byte[] regionName, final List actions, + final MultiRequest.Builder multiRequestBuilder, final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder, + long nonceGroup, final Map indexMap) throws IOException { regionActionBuilder.clear(); - RegionAction.Builder builder = getRegionActionBuilderWithRegion( - regionActionBuilder, regionName); + RegionAction.Builder builder = + getRegionActionBuilderWithRegion(regionActionBuilder, regionName); ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null; boolean hasNonce = false; List rowMutationsList = new ArrayList<>(); List checkAndMutates = new ArrayList<>(); - for (Action action: actions) { + for (Action action : actions) { Row row = action.getAction(); actionBuilder.clear(); actionBuilder.setIndex(action.getOriginalIndex()); mutationBuilder.clear(); if (row instanceof Get) { - Get g = (Get)row; + Get g = (Get) row; builder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g))); } else if (row instanceof Put) { - builder.addAction(actionBuilder. - setMutation(ProtobufUtil.toMutation(MutationType.PUT, (Put)row, mutationBuilder))); + builder.addAction(actionBuilder + .setMutation(ProtobufUtil.toMutation(MutationType.PUT, (Put) row, mutationBuilder))); } else if (row instanceof Delete) { - builder.addAction(actionBuilder. - setMutation(ProtobufUtil.toMutation(MutationType.DELETE, (Delete)row, mutationBuilder))); + builder.addAction(actionBuilder.setMutation( + ProtobufUtil.toMutation(MutationType.DELETE, (Delete) row, mutationBuilder))); } else if (row instanceof Append) { - builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation( - MutationType.APPEND, (Append)row, mutationBuilder, action.getNonce()))); + builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.APPEND, + (Append) row, mutationBuilder, action.getNonce()))); hasNonce = true; } else if (row instanceof Increment) { - builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation( - MutationType.INCREMENT, (Increment)row, mutationBuilder, action.getNonce()))); + builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.INCREMENT, + (Increment) row, mutationBuilder, action.getNonce()))); hasNonce = true; } else if (row instanceof RegionCoprocessorServiceExec) { RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row; // DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString. org.apache.hbase.thirdparty.com.google.protobuf.ByteString value = - org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations.unsafeWrap( - exec.getRequest().toByteArray()); + org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations + .unsafeWrap(exec.getRequest().toByteArray()); if (cpBuilder == null) { cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder(); } else { cpBuilder.clear(); } - builder.addAction(actionBuilder.setServiceCall( - cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())) - .setServiceName(exec.getMethod().getService().getFullName()) - .setMethodName(exec.getMethod().getName()) - .setRequest(value))); + builder.addAction(actionBuilder + .setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())) + .setServiceName(exec.getMethod().getService().getFullName()) + .setMethodName(exec.getMethod().getName()).setRequest(value))); } else if (row instanceof RowMutations) { rowMutationsList.add(action); } else if (row instanceof CheckAndMutate) { @@ -645,16 +565,14 @@ public final class RequestConverter { builder.clear(); getRegionActionBuilderWithRegion(builder, regionName); - buildRegionAction((RowMutations) action.getAction(), builder, actionBuilder, - mutationBuilder); + buildRegionAction((RowMutations) action.getAction(), builder, actionBuilder, mutationBuilder); builder.setAtomic(true); multiRequestBuilder.addRegionAction(builder.build()); // This rowMutations region action is at (multiRequestBuilder.getRegionActionCount() - 1) // in the overall multiRequest. - indexMap.put(multiRequestBuilder.getRegionActionCount() - 1, - action.getOriginalIndex()); + indexMap.put(multiRequestBuilder.getRegionActionCount() - 1, action.getOriginalIndex()); } // Process CheckAndMutate here. Similar to RowMutations, we do separate RegionAction for each @@ -665,27 +583,26 @@ public final class RequestConverter { getRegionActionBuilderWithRegion(builder, regionName); CheckAndMutate cam = (CheckAndMutate) action.getAction(); - builder.setCondition(ProtobufUtil.toCondition(cam.getRow(), cam.getFamily(), - cam.getQualifier(), cam.getCompareOp(), cam.getValue(), cam.getFilter(), - cam.getTimeRange())); + builder + .setCondition(ProtobufUtil.toCondition(cam.getRow(), cam.getFamily(), cam.getQualifier(), + cam.getCompareOp(), cam.getValue(), cam.getFilter(), cam.getTimeRange())); if (cam.getAction() instanceof Put) { actionBuilder.clear(); mutationBuilder.clear(); - builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.PUT, - (Put) cam.getAction(), mutationBuilder))); + builder.addAction(actionBuilder.setMutation( + ProtobufUtil.toMutation(MutationType.PUT, (Put) cam.getAction(), mutationBuilder))); } else if (cam.getAction() instanceof Delete) { actionBuilder.clear(); mutationBuilder.clear(); - builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.DELETE, - (Delete) cam.getAction(), mutationBuilder))); + builder.addAction(actionBuilder.setMutation( + ProtobufUtil.toMutation(MutationType.DELETE, (Delete) cam.getAction(), mutationBuilder))); } else if (cam.getAction() instanceof RowMutations) { - buildRegionAction((RowMutations) cam.getAction(), builder, actionBuilder, - mutationBuilder); + buildRegionAction((RowMutations) cam.getAction(), builder, actionBuilder, mutationBuilder); builder.setAtomic(true); } else { - throw new DoNotRetryIOException("CheckAndMutate doesn't support " + - cam.getAction().getClass().getName()); + throw new DoNotRetryIOException( + "CheckAndMutate doesn't support " + cam.getAction().getClass().getName()); } multiRequestBuilder.addRegionAction(builder.build()); @@ -697,18 +614,17 @@ public final class RequestConverter { } private static void buildRegionAction(final RowMutations rowMutations, - final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) - throws IOException { - for (Mutation mutation: rowMutations.getMutations()) { + final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, + final MutationProto.Builder mutationBuilder) throws IOException { + for (Mutation mutation : rowMutations.getMutations()) { MutationType mutateType; if (mutation instanceof Put) { mutateType = MutationType.PUT; } else if (mutation instanceof Delete) { mutateType = MutationType.DELETE; } else { - throw new DoNotRetryIOException("RowMutations supports only put and delete, not " + - mutation.getClass().getName()); + throw new DoNotRetryIOException( + "RowMutations supports only put and delete, not " + mutation.getClass().getName()); } mutationBuilder.clear(); MutationProto mp = ProtobufUtil.toMutation(mutateType, mutation, mutationBuilder); @@ -719,47 +635,46 @@ public final class RequestConverter { /** * Create a protocol buffer multirequest with NO data for a list of actions (data is carried - * otherwise than via protobuf). This means it just notes attributes, whether to write the - * WAL, etc., and the presence in protobuf serves as place holder for the data which is - * coming along otherwise. Note that Get is different. It does not contain 'data' and is always - * carried by protobuf. We return references to the data by adding them to the passed in - * data param. - *

      Propagates Actions original index. - *

      The passed in multiRequestBuilder will be populated with region actions. - * @param regionName The region name of the actions. - * @param actions The actions that are grouped by the same region name. - * @param cells Place to stuff references to actual data. + * otherwise than via protobuf). This means it just notes attributes, whether to write the WAL, + * etc., and the presence in protobuf serves as place holder for the data which is coming along + * otherwise. Note that Get is different. It does not contain 'data' and is always carried by + * protobuf. We return references to the data by adding them to the passed in data + * param. + *

      + * Propagates Actions original index. + *

      + * The passed in multiRequestBuilder will be populated with region actions. + * @param regionName The region name of the actions. + * @param actions The actions that are grouped by the same region name. + * @param cells Place to stuff references to actual data. * @param multiRequestBuilder The multiRequestBuilder to be populated with region actions. * @param regionActionBuilder regionActionBuilder to be used to build region action. - * @param actionBuilder actionBuilder to be used to build action. - * @param mutationBuilder mutationBuilder to be used to build mutation. - * @param nonceGroup nonceGroup to be applied. - * @param indexMap Map of created RegionAction to the original index for a - * RowMutations/CheckAndMutate within the original list of actions - * @throws IOException + * @param actionBuilder actionBuilder to be used to build action. + * @param mutationBuilder mutationBuilder to be used to build mutation. + * @param nonceGroup nonceGroup to be applied. + * @param indexMap Map of created RegionAction to the original index for a + * RowMutations/CheckAndMutate within the original list of actions n */ public static void buildNoDataRegionActions(final byte[] regionName, - final Iterable actions, final List cells, - final MultiRequest.Builder multiRequestBuilder, - final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder, - long nonceGroup, final Map indexMap) throws IOException { + final Iterable actions, final List cells, + final MultiRequest.Builder multiRequestBuilder, final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder, + long nonceGroup, final Map indexMap) throws IOException { regionActionBuilder.clear(); - RegionAction.Builder builder = getRegionActionBuilderWithRegion( - regionActionBuilder, regionName); + RegionAction.Builder builder = + getRegionActionBuilderWithRegion(regionActionBuilder, regionName); ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null; boolean hasNonce = false; List rowMutationsList = new ArrayList<>(); List checkAndMutates = new ArrayList<>(); - for (Action action: actions) { + for (Action action : actions) { Row row = action.getAction(); actionBuilder.clear(); actionBuilder.setIndex(action.getOriginalIndex()); mutationBuilder.clear(); if (row instanceof Get) { - Get g = (Get)row; + Get g = (Get) row; builder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g))); } else if (row instanceof Put) { buildNoDataRegionAction((Put) row, cells, builder, actionBuilder, mutationBuilder); @@ -777,18 +692,17 @@ public final class RequestConverter { RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row; // DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString. org.apache.hbase.thirdparty.com.google.protobuf.ByteString value = - org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations.unsafeWrap( - exec.getRequest().toByteArray()); + org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations + .unsafeWrap(exec.getRequest().toByteArray()); if (cpBuilder == null) { cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder(); } else { cpBuilder.clear(); } - builder.addAction(actionBuilder.setServiceCall( - cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())) - .setServiceName(exec.getMethod().getService().getFullName()) - .setMethodName(exec.getMethod().getName()) - .setRequest(value))); + builder.addAction(actionBuilder + .setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())) + .setServiceName(exec.getMethod().getService().getFullName()) + .setMethodName(exec.getMethod().getName()).setRequest(value))); } else if (row instanceof RowMutations) { rowMutationsList.add(action); } else if (row instanceof CheckAndMutate) { @@ -832,9 +746,9 @@ public final class RequestConverter { getRegionActionBuilderWithRegion(builder, regionName); CheckAndMutate cam = (CheckAndMutate) action.getAction(); - builder.setCondition(ProtobufUtil.toCondition(cam.getRow(), cam.getFamily(), - cam.getQualifier(), cam.getCompareOp(), cam.getValue(), cam.getFilter(), - cam.getTimeRange())); + builder + .setCondition(ProtobufUtil.toCondition(cam.getRow(), cam.getFamily(), cam.getQualifier(), + cam.getCompareOp(), cam.getValue(), cam.getFilter(), cam.getTimeRange())); if (cam.getAction() instanceof Put) { actionBuilder.clear(); @@ -866,8 +780,8 @@ public final class RequestConverter { } builder.setAtomic(true); } else { - throw new DoNotRetryIOException("CheckAndMutate doesn't support " + - cam.getAction().getClass().getName()); + throw new DoNotRetryIOException( + "CheckAndMutate doesn't support " + cam.getAction().getClass().getName()); } multiRequestBuilder.addRegionAction(builder.build()); @@ -883,50 +797,48 @@ public final class RequestConverter { } private static void buildNoDataRegionAction(final Put put, final List cells, - final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, + final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) throws IOException { cells.add(put); - regionActionBuilder.addAction(actionBuilder. - setMutation(ProtobufUtil.toMutationNoData(MutationType.PUT, put, mutationBuilder))); + regionActionBuilder.addAction(actionBuilder + .setMutation(ProtobufUtil.toMutationNoData(MutationType.PUT, put, mutationBuilder))); } - private static void buildNoDataRegionAction(final Delete delete, - final List cells, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) - throws IOException { + private static void buildNoDataRegionAction(final Delete delete, final List cells, + final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, + final MutationProto.Builder mutationBuilder) throws IOException { int size = delete.size(); // Note that a legitimate Delete may have a size of zero; i.e. a Delete that has nothing - // in it but the row to delete. In this case, the current implementation does not make + // in it but the row to delete. In this case, the current implementation does not make // a KeyValue to represent a delete-of-all-the-row until we serialize... For such cases // where the size returned is zero, we will send the Delete fully pb'd rather than have // metadata only in the pb and then send the kv along the side in cells. if (size > 0) { cells.add(delete); - regionActionBuilder.addAction(actionBuilder. - setMutation(ProtobufUtil.toMutationNoData(MutationType.DELETE, delete, mutationBuilder))); + regionActionBuilder.addAction(actionBuilder + .setMutation(ProtobufUtil.toMutationNoData(MutationType.DELETE, delete, mutationBuilder))); } else { - regionActionBuilder.addAction(actionBuilder. - setMutation(ProtobufUtil.toMutation(MutationType.DELETE, delete, mutationBuilder))); + regionActionBuilder.addAction(actionBuilder + .setMutation(ProtobufUtil.toMutation(MutationType.DELETE, delete, mutationBuilder))); } } private static void buildNoDataRegionAction(final Increment increment, final List cells, long nonce, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder) throws IOException { + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) + throws IOException { cells.add(increment); - regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData( - MutationType.INCREMENT, increment, mutationBuilder, nonce))); + regionActionBuilder.addAction(actionBuilder.setMutation( + ProtobufUtil.toMutationNoData(MutationType.INCREMENT, increment, mutationBuilder, nonce))); } - private static void buildNoDataRegionAction(final Append append, - final List cells, long nonce, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder) throws IOException { + private static void buildNoDataRegionAction(final Append append, final List cells, + long nonce, final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) + throws IOException { cells.add(append); - regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData( - MutationType.APPEND, append, mutationBuilder, nonce))); + regionActionBuilder.addAction(actionBuilder.setMutation( + ProtobufUtil.toMutationNoData(MutationType.APPEND, append, mutationBuilder, nonce))); } /** @@ -937,7 +849,7 @@ public final class RequestConverter { final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) throws IOException { boolean ret = false; - for (Mutation mutation: rowMutations.getMutations()) { + for (Mutation mutation : rowMutations.getMutations()) { mutationBuilder.clear(); MutationProto mp; if (mutation instanceof Increment || mutation instanceof Append) { @@ -966,45 +878,39 @@ public final class RequestConverter { } } -// End utilities for Client -//Start utilities for Admin + // End utilities for Client + // Start utilities for Admin /** * Create a protocol buffer GetRegionInfoRequest for a given region name - * * @param regionName the name of the region to get info * @return a protocol buffer GetRegionInfoRequest */ - public static GetRegionInfoRequest - buildGetRegionInfoRequest(final byte[] regionName) { + public static GetRegionInfoRequest buildGetRegionInfoRequest(final byte[] regionName) { return buildGetRegionInfoRequest(regionName, false); } /** * Create a protocol buffer GetRegionInfoRequest for a given region name - * - * @param regionName the name of the region to get info + * @param regionName the name of the region to get info * @param includeCompactionState indicate if the compaction state is requested * @return a protocol buffer GetRegionInfoRequest */ - public static GetRegionInfoRequest - buildGetRegionInfoRequest(final byte[] regionName, - final boolean includeCompactionState) { + public static GetRegionInfoRequest buildGetRegionInfoRequest(final byte[] regionName, + final boolean includeCompactionState) { return buildGetRegionInfoRequest(regionName, includeCompactionState, false); } /** - * - * @param regionName the name of the region to get info - * @param includeCompactionState indicate if the compaction state is requested - * @param includeBestSplitRow indicate if the bestSplitRow is requested + * @param regionName the name of the region to get info + * @param includeCompactionState indicate if the compaction state is requested + * @param includeBestSplitRow indicate if the bestSplitRow is requested * @return protocol buffer GetRegionInfoRequest */ public static GetRegionInfoRequest buildGetRegionInfoRequest(final byte[] regionName, - final boolean includeCompactionState, boolean includeBestSplitRow) { + final boolean includeCompactionState, boolean includeBestSplitRow) { GetRegionInfoRequest.Builder builder = GetRegionInfoRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); if (includeCompactionState) { builder.setCompactionState(includeCompactionState); @@ -1047,7 +953,7 @@ public final class RequestConverter { /** * Create a protocol buffer FlushRegionRequest for a given region name - * @param regionName the name of the region to get info + * @param regionName the name of the region to get info * @param columnFamily column family within a region * @return a protocol buffer FlushRegionRequest */ @@ -1065,13 +971,13 @@ public final class RequestConverter { /** * Create a protocol buffer OpenRegionRequest for a given region - * @param server the serverName for the RPC - * @param region the region to open + * @param server the serverName for the RPC + * @param region the region to open * @param favoredNodes a list of favored nodes * @return a protocol buffer OpenRegionRequest */ - public static OpenRegionRequest buildOpenRegionRequest(ServerName server, - final RegionInfo region, List favoredNodes) { + public static OpenRegionRequest buildOpenRegionRequest(ServerName server, final RegionInfo region, + List favoredNodes) { OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder(); builder.addOpenInfo(buildRegionOpenInfo(region, favoredNodes, -1L)); if (server != null) { @@ -1087,7 +993,7 @@ public final class RequestConverter { * @return a protocol buffer UpdateFavoredNodesRequest */ public static UpdateFavoredNodesRequest buildUpdateFavoredNodesRequest( - final List>> updateRegionInfos) { + final List>> updateRegionInfos) { UpdateFavoredNodesRequest.Builder ubuilder = UpdateFavoredNodesRequest.newBuilder(); if (updateRegionInfos != null && !updateRegionInfos.isEmpty()) { RegionUpdateInfo.Builder builder = RegionUpdateInfo.newBuilder(); @@ -1116,12 +1022,10 @@ public final class RequestConverter { /** * Create a CompactRegionRequest for a given region name * @param regionName the name of the region to get info - * @param major indicator if it is a major compaction - * @param columnFamily - * @return a CompactRegionRequest + * @param major indicator if it is a major compaction n * @return a CompactRegionRequest */ public static CompactRegionRequest buildCompactRegionRequest(byte[] regionName, boolean major, - byte[] columnFamily) { + byte[] columnFamily) { CompactRegionRequest.Builder builder = CompactRegionRequest.newBuilder(); RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); @@ -1135,8 +1039,8 @@ public final class RequestConverter { /** * @see #buildRollWALWriterRequest() */ - private static RollWALWriterRequest ROLL_WAL_WRITER_REQUEST = RollWALWriterRequest.newBuilder() - .build(); + private static RollWALWriterRequest ROLL_WAL_WRITER_REQUEST = + RollWALWriterRequest.newBuilder().build(); /** * Create a new RollWALWriterRequest @@ -1149,8 +1053,8 @@ public final class RequestConverter { /** * @see #buildGetServerInfoRequest() */ - private static GetServerInfoRequest GET_SERVER_INFO_REQUEST = GetServerInfoRequest.newBuilder() - .build(); + private static GetServerInfoRequest GET_SERVER_INFO_REQUEST = + GetServerInfoRequest.newBuilder().build(); /** * Create a new GetServerInfoRequest @@ -1171,17 +1075,16 @@ public final class RequestConverter { return builder.build(); } -//End utilities for Admin + // End utilities for Admin /** * Convert a byte array to a protocol buffer RegionSpecifier - * - * @param type the region specifier type + * @param type the region specifier type * @param value the region specifier byte array value * @return a protocol buffer RegionSpecifier */ - public static RegionSpecifier buildRegionSpecifier( - final RegionSpecifierType type, final byte[] value) { + public static RegionSpecifier buildRegionSpecifier(final RegionSpecifierType type, + final byte[] value) { RegionSpecifier.Builder regionBuilder = RegionSpecifier.newBuilder(); regionBuilder.setValue(UnsafeByteOperations.unsafeWrap(value)); regionBuilder.setType(type); @@ -1189,17 +1092,10 @@ public final class RequestConverter { } /** - * Create a protocol buffer AddColumnRequest - * - * @param tableName - * @param column - * @return an AddColumnRequest + * Create a protocol buffer AddColumnRequest nn * @return an AddColumnRequest */ - public static AddColumnRequest buildAddColumnRequest( - final TableName tableName, - final ColumnFamilyDescriptor column, - final long nonceGroup, - final long nonce) { + public static AddColumnRequest buildAddColumnRequest(final TableName tableName, + final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) { AddColumnRequest.Builder builder = AddColumnRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column)); @@ -1209,17 +1105,10 @@ public final class RequestConverter { } /** - * Create a protocol buffer DeleteColumnRequest - * - * @param tableName - * @param columnName - * @return a DeleteColumnRequest + * Create a protocol buffer DeleteColumnRequest nn * @return a DeleteColumnRequest */ - public static DeleteColumnRequest buildDeleteColumnRequest( - final TableName tableName, - final byte [] columnName, - final long nonceGroup, - final long nonce) { + public static DeleteColumnRequest buildDeleteColumnRequest(final TableName tableName, + final byte[] columnName, final long nonceGroup, final long nonce) { DeleteColumnRequest.Builder builder = DeleteColumnRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setColumnName(UnsafeByteOperations.unsafeWrap(columnName)); @@ -1229,17 +1118,10 @@ public final class RequestConverter { } /** - * Create a protocol buffer ModifyColumnRequest - * - * @param tableName - * @param column - * @return an ModifyColumnRequest + * Create a protocol buffer ModifyColumnRequest nn * @return an ModifyColumnRequest */ - public static ModifyColumnRequest buildModifyColumnRequest( - final TableName tableName, - final ColumnFamilyDescriptor column, - final long nonceGroup, - final long nonce) { + public static ModifyColumnRequest buildModifyColumnRequest(final TableName tableName, + final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) { ModifyColumnRequest.Builder builder = ModifyColumnRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column)); @@ -1248,9 +1130,9 @@ public final class RequestConverter { return builder.build(); } - public static ModifyColumnStoreFileTrackerRequest - buildModifyColumnStoreFileTrackerRequest(final TableName tableName, final byte[] family, - final String dstSFT, final long nonceGroup, final long nonce) { + public static ModifyColumnStoreFileTrackerRequest buildModifyColumnStoreFileTrackerRequest( + final TableName tableName, final byte[] family, final String dstSFT, final long nonceGroup, + final long nonce) { ModifyColumnStoreFileTrackerRequest.Builder builder = ModifyColumnStoreFileTrackerRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); @@ -1262,16 +1144,13 @@ public final class RequestConverter { } /** - * Create a protocol buffer MoveRegionRequest - * @param encodedRegionName - * @param destServerName - * @return A MoveRegionRequest + * Create a protocol buffer MoveRegionRequest nn * @return A MoveRegionRequest */ public static MoveRegionRequest buildMoveRegionRequest(byte[] encodedRegionName, - ServerName destServerName) { + ServerName destServerName) { MoveRegionRequest.Builder builder = MoveRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, - encodedRegionName)); + builder + .setRegion(buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, encodedRegionName)); if (destServerName != null) { builder.setDestServerName(ProtobufUtil.toServerName(destServerName)); } @@ -1279,14 +1158,12 @@ public final class RequestConverter { } public static MergeTableRegionsRequest buildMergeTableRegionsRequest( - final byte[][] encodedNameOfdaughaterRegions, - final boolean forcible, - final long nonceGroup, - final long nonce) throws DeserializationException { + final byte[][] encodedNameOfdaughaterRegions, final boolean forcible, final long nonceGroup, + final long nonce) throws DeserializationException { MergeTableRegionsRequest.Builder builder = MergeTableRegionsRequest.newBuilder(); - for (int i = 0; i< encodedNameOfdaughaterRegions.length; i++) { - builder.addRegion(buildRegionSpecifier( - RegionSpecifierType.ENCODED_REGION_NAME, encodedNameOfdaughaterRegions[i])); + for (int i = 0; i < encodedNameOfdaughaterRegions.length; i++) { + builder.addRegion(buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, + encodedNameOfdaughaterRegions[i])); } builder.setForcible(forcible); builder.setNonceGroup(nonceGroup); @@ -1295,8 +1172,8 @@ public final class RequestConverter { } public static SplitTableRegionRequest buildSplitTableRegionRequest(final RegionInfo regionInfo, - final byte[] splitRow, final long nonceGroup, final long nonce) - throws DeserializationException { + final byte[] splitRow, final long nonceGroup, final long nonce) + throws DeserializationException { SplitTableRegionRequest.Builder builder = SplitTableRegionRequest.newBuilder(); builder.setRegionInfo(ProtobufUtil.toRegionInfo(regionInfo)); if (splitRow != null) { @@ -1308,52 +1185,37 @@ public final class RequestConverter { } /** - * Create a protocol buffer AssignRegionRequest - * - * @param regionName - * @return an AssignRegionRequest + * Create a protocol buffer AssignRegionRequest n * @return an AssignRegionRequest */ - public static AssignRegionRequest buildAssignRegionRequest(final byte [] regionName) { + public static AssignRegionRequest buildAssignRegionRequest(final byte[] regionName) { AssignRegionRequest.Builder builder = AssignRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); + builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)); return builder.build(); } /** - * Creates a protocol buffer UnassignRegionRequest - * - * @param regionName - * @return an UnassignRegionRequest + * Creates a protocol buffer UnassignRegionRequest n * @return an UnassignRegionRequest */ - public static UnassignRegionRequest buildUnassignRegionRequest( - final byte [] regionName) { + public static UnassignRegionRequest buildUnassignRegionRequest(final byte[] regionName) { UnassignRegionRequest.Builder builder = UnassignRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); + builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)); return builder.build(); } /** - * Creates a protocol buffer OfflineRegionRequest - * - * @param regionName - * @return an OfflineRegionRequest + * Creates a protocol buffer OfflineRegionRequest n * @return an OfflineRegionRequest */ - public static OfflineRegionRequest buildOfflineRegionRequest(final byte [] regionName) { + public static OfflineRegionRequest buildOfflineRegionRequest(final byte[] regionName) { OfflineRegionRequest.Builder builder = OfflineRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); + builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)); return builder.build(); } /** - * Creates a protocol buffer DeleteTableRequest - * - * @param tableName - * @return a DeleteTableRequest + * Creates a protocol buffer DeleteTableRequest n * @return a DeleteTableRequest */ - public static DeleteTableRequest buildDeleteTableRequest( - final TableName tableName, - final long nonceGroup, - final long nonce) { + public static DeleteTableRequest buildDeleteTableRequest(final TableName tableName, + final long nonceGroup, final long nonce) { DeleteTableRequest.Builder builder = DeleteTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setNonceGroup(nonceGroup); @@ -1363,16 +1225,12 @@ public final class RequestConverter { /** * Creates a protocol buffer TruncateTableRequest - * - * @param tableName name of table to truncate + * @param tableName name of table to truncate * @param preserveSplits True if the splits should be preserved * @return a TruncateTableRequest */ - public static TruncateTableRequest buildTruncateTableRequest( - final TableName tableName, - final boolean preserveSplits, - final long nonceGroup, - final long nonce) { + public static TruncateTableRequest buildTruncateTableRequest(final TableName tableName, + final boolean preserveSplits, final long nonceGroup, final long nonce) { TruncateTableRequest.Builder builder = TruncateTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setPreserveSplits(preserveSplits); @@ -1382,15 +1240,10 @@ public final class RequestConverter { } /** - * Creates a protocol buffer EnableTableRequest - * - * @param tableName - * @return an EnableTableRequest + * Creates a protocol buffer EnableTableRequest n * @return an EnableTableRequest */ - public static EnableTableRequest buildEnableTableRequest( - final TableName tableName, - final long nonceGroup, - final long nonce) { + public static EnableTableRequest buildEnableTableRequest(final TableName tableName, + final long nonceGroup, final long nonce) { EnableTableRequest.Builder builder = EnableTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setNonceGroup(nonceGroup); @@ -1399,15 +1252,10 @@ public final class RequestConverter { } /** - * Creates a protocol buffer DisableTableRequest - * - * @param tableName - * @return a DisableTableRequest + * Creates a protocol buffer DisableTableRequest n * @return a DisableTableRequest */ - public static DisableTableRequest buildDisableTableRequest( - final TableName tableName, - final long nonceGroup, - final long nonce) { + public static DisableTableRequest buildDisableTableRequest(final TableName tableName, + final long nonceGroup, final long nonce) { DisableTableRequest.Builder builder = DisableTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setNonceGroup(nonceGroup); @@ -1416,21 +1264,14 @@ public final class RequestConverter { } /** - * Creates a protocol buffer CreateTableRequest - * - * @param tableDescriptor - * @param splitKeys - * @return a CreateTableRequest + * Creates a protocol buffer CreateTableRequest nn * @return a CreateTableRequest */ - public static CreateTableRequest buildCreateTableRequest( - final TableDescriptor tableDescriptor, - final byte [][] splitKeys, - final long nonceGroup, - final long nonce) { + public static CreateTableRequest buildCreateTableRequest(final TableDescriptor tableDescriptor, + final byte[][] splitKeys, final long nonceGroup, final long nonce) { CreateTableRequest.Builder builder = CreateTableRequest.newBuilder(); builder.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor)); if (splitKeys != null) { - for(byte[] key : splitKeys) { + for (byte[] key : splitKeys) { builder.addSplitKeys(UnsafeByteOperations.unsafeWrap(key)); } } @@ -1440,17 +1281,10 @@ public final class RequestConverter { } /** - * Creates a protocol buffer ModifyTableRequest - * - * @param tableName - * @param tableDesc - * @return a ModifyTableRequest + * Creates a protocol buffer ModifyTableRequest nn * @return a ModifyTableRequest */ - public static ModifyTableRequest buildModifyTableRequest( - final TableName tableName, - final TableDescriptor tableDesc, - final long nonceGroup, - final long nonce) { + public static ModifyTableRequest buildModifyTableRequest(final TableName tableName, + final TableDescriptor tableDesc, final long nonceGroup, final long nonce) { ModifyTableRequest.Builder builder = ModifyTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setTableSchema(ProtobufUtil.toTableSchema(tableDesc)); @@ -1471,26 +1305,20 @@ public final class RequestConverter { } /** - * Creates a protocol buffer GetSchemaAlterStatusRequest - * - * @param tableName - * @return a GetSchemaAlterStatusRequest + * Creates a protocol buffer GetSchemaAlterStatusRequest n * @return a GetSchemaAlterStatusRequest */ - public static GetSchemaAlterStatusRequest buildGetSchemaAlterStatusRequest( - final TableName tableName) { + public static GetSchemaAlterStatusRequest + buildGetSchemaAlterStatusRequest(final TableName tableName) { GetSchemaAlterStatusRequest.Builder builder = GetSchemaAlterStatusRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); return builder.build(); } /** - * Creates a protocol buffer GetTableDescriptorsRequest - * - * @param tableNames - * @return a GetTableDescriptorsRequest + * Creates a protocol buffer GetTableDescriptorsRequest n * @return a GetTableDescriptorsRequest */ - public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest( - final List tableNames) { + public static GetTableDescriptorsRequest + buildGetTableDescriptorsRequest(final List tableNames) { GetTableDescriptorsRequest.Builder builder = GetTableDescriptorsRequest.newBuilder(); if (tableNames != null) { for (TableName tableName : tableNames) { @@ -1502,13 +1330,12 @@ public final class RequestConverter { /** * Creates a protocol buffer GetTableDescriptorsRequest - * - * @param pattern The compiled regular expression to match against + * @param pattern The compiled regular expression to match against * @param includeSysTables False to match only against userspace tables * @return a GetTableDescriptorsRequest */ public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest(final Pattern pattern, - boolean includeSysTables) { + boolean includeSysTables) { GetTableDescriptorsRequest.Builder builder = GetTableDescriptorsRequest.newBuilder(); if (pattern != null) { builder.setRegex(pattern.toString()); @@ -1519,13 +1346,12 @@ public final class RequestConverter { /** * Creates a protocol buffer GetTableNamesRequest - * - * @param pattern The compiled regular expression to match against + * @param pattern The compiled regular expression to match against * @param includeSysTables False to match only against userspace tables * @return a GetTableNamesRequest */ public static GetTableNamesRequest buildGetTableNamesRequest(final Pattern pattern, - boolean includeSysTables) { + boolean includeSysTables) { GetTableNamesRequest.Builder builder = GetTableNamesRequest.newBuilder(); if (pattern != null) { builder.setRegex(pattern.toString()); @@ -1536,15 +1362,12 @@ public final class RequestConverter { /** * Creates a protocol buffer GetTableStateRequest - * * @param tableName table to get request for * @return a GetTableStateRequest */ - public static GetTableStateRequest buildGetTableStateRequest( - final TableName tableName) { - return GetTableStateRequest.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName(tableName)) - .build(); + public static GetTableStateRequest buildGetTableStateRequest(final TableName tableName) { + return GetTableStateRequest.newBuilder().setTableName(ProtobufUtil.toProtoTableName(tableName)) + .build(); } /** @@ -1554,7 +1377,7 @@ public final class RequestConverter { */ public static SetTableStateInMetaRequest buildSetTableStateInMetaRequest(final TableState state) { return SetTableStateInMetaRequest.newBuilder().setTableState(state.convert()) - .setTableName(ProtobufUtil.toProtoTableName(state.getTableName())).build(); + .setTableName(ProtobufUtil.toProtoTableName(state.getTableName())).build(); } /** @@ -1581,20 +1404,17 @@ public final class RequestConverter { /** * Creates a protocol buffer GetTableDescriptorsRequest for a single table - * * @param tableName the table name * @return a GetTableDescriptorsRequest */ - public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest( - final TableName tableName) { + public static GetTableDescriptorsRequest + buildGetTableDescriptorsRequest(final TableName tableName) { return GetTableDescriptorsRequest.newBuilder() - .addTableNames(ProtobufUtil.toProtoTableName(tableName)) - .build(); + .addTableNames(ProtobufUtil.toProtoTableName(tableName)).build(); } /** * Creates a protocol buffer IsMasterRunningRequest - * * @return a IsMasterRunningRequest */ public static IsMasterRunningRequest buildIsMasterRunningRequest() { @@ -1602,21 +1422,15 @@ public final class RequestConverter { } /** - * Creates a protocol buffer SetBalancerRunningRequest - * - * @param on - * @param synchronous - * @return a SetBalancerRunningRequest + * Creates a protocol buffer SetBalancerRunningRequest nn * @return a SetBalancerRunningRequest */ - public static SetBalancerRunningRequest buildSetBalancerRunningRequest( - boolean on, - boolean synchronous) { + public static SetBalancerRunningRequest buildSetBalancerRunningRequest(boolean on, + boolean synchronous) { return SetBalancerRunningRequest.newBuilder().setOn(on).setSynchronous(synchronous).build(); } /** * Creates a protocol buffer IsBalancerEnabledRequest - * * @return a IsBalancerEnabledRequest */ public static IsBalancerEnabledRequest buildIsBalancerEnabledRequest() { @@ -1625,28 +1439,23 @@ public final class RequestConverter { /** * Creates a protocol buffer ClearRegionBlockCacheRequest - * * @return a ClearRegionBlockCacheRequest */ public static ClearRegionBlockCacheRequest - buildClearRegionBlockCacheRequest(List hris) { + buildClearRegionBlockCacheRequest(List hris) { ClearRegionBlockCacheRequest.Builder builder = ClearRegionBlockCacheRequest.newBuilder(); - hris.forEach( - hri -> builder.addRegion( - buildRegionSpecifier(RegionSpecifierType.REGION_NAME, hri.getRegionName()) - )); + hris.forEach(hri -> builder + .addRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, hri.getRegionName()))); return builder.build(); } /** * Creates a protocol buffer GetClusterStatusRequest - * * @return A GetClusterStatusRequest */ public static GetClusterStatusRequest buildGetClusterStatusRequest(EnumSet

      - * In case the size of the pool is set to a non-zero positive number, that is - * used to cap the number of resources that a pool may contain for any given - * key. A size of {@link Integer#MAX_VALUE} is interpreted as an unbounded pool. + * In case the size of the pool is set to a non-zero positive number, that is used to cap the number + * of resources that a pool may contain for any given key. A size of {@link Integer#MAX_VALUE} is + * interpreted as an unbounded pool. *

      - * *

      - * PoolMap is thread-safe. It does not remove elements automatically. Unused resources - * must be closed and removed explicitly. + * PoolMap is thread-safe. It does not remove elements automatically. Unused resources must be + * closed and removed explicitly. *

      - * - * @param - * the type of the key to the resource - * @param - * the type of the resource being pooled + * @param the type of the key to the resource + * @param the type of the resource being pooled */ @InterfaceAudience.Private public class PoolMap { @@ -58,32 +49,33 @@ public class PoolMap { private final PoolType poolType; private final int poolMaxSize; - public PoolMap(PoolType poolType, int poolMaxSize) { - pools = new HashMap<>(); - this.poolType = poolType; - this.poolMaxSize = poolMaxSize; + public PoolMap(PoolType poolType, int poolMaxSize) { + pools = new HashMap<>(); + this.poolType = poolType; + this.poolMaxSize = poolMaxSize; } public V getOrCreate(K key, PoolResourceSupplier supplier) throws IOException { - synchronized (pools) { - Pool pool = pools.get(key); + synchronized (pools) { + Pool pool = pools.get(key); - if (pool == null) { - pool = createPool(); - pools.put(key, pool); - } + if (pool == null) { + pool = createPool(); + pools.put(key, pool); + } - try { - return pool.getOrCreate(supplier); - } catch (IOException | RuntimeException | Error e) { - if (pool.size() == 0) { - pools.remove(key); - } + try { + return pool.getOrCreate(supplier); + } catch (IOException | RuntimeException | Error e) { + if (pool.size() == 0) { + pools.remove(key); + } - throw e; - } - } + throw e; + } + } } + public boolean remove(K key, V value) { synchronized (pools) { Pool pool = pools.get(key); @@ -128,7 +120,7 @@ public class PoolMap { } public interface PoolResourceSupplier { - R get() throws IOException; + R get() throws IOException; } protected static V createResource(PoolResourceSupplier supplier) throws IOException { @@ -149,7 +141,8 @@ public class PoolMap { } public enum PoolType { - ThreadLocal, RoundRobin; + ThreadLocal, + RoundRobin; public static PoolType valueOf(String poolTypeName, PoolType defaultPoolType) { PoolType poolType = PoolType.fuzzyMatch(poolTypeName); @@ -172,30 +165,25 @@ public class PoolMap { protected Pool createPool() { switch (poolType) { - case RoundRobin: - return new RoundRobinPool<>(poolMaxSize); - case ThreadLocal: - return new ThreadLocalPool<>(); - default: - return new RoundRobinPool<>(poolMaxSize); + case RoundRobin: + return new RoundRobinPool<>(poolMaxSize); + case ThreadLocal: + return new ThreadLocalPool<>(); + default: + return new RoundRobinPool<>(poolMaxSize); } } /** - * The RoundRobinPool represents a {@link PoolMap.Pool}, which - * stores its resources in an {@link ArrayList}. It load-balances access to - * its resources by returning a different resource every time a given key is - * looked up. - * + * The RoundRobinPool represents a {@link PoolMap.Pool}, which stores its resources + * in an {@link ArrayList}. It load-balances access to its resources by returning a different + * resource every time a given key is looked up. *

      - * If {@link #maxSize} is set to {@link Integer#MAX_VALUE}, then the size of - * the pool is unbounded. Otherwise, it caps the number of resources in this - * pool to the (non-zero positive) value specified in {@link #maxSize}. + * If {@link #maxSize} is set to {@link Integer#MAX_VALUE}, then the size of the pool is + * unbounded. Otherwise, it caps the number of resources in this pool to the (non-zero positive) + * value specified in {@link #maxSize}. *

      - * - * @param - * the type of the resource - * + * @param the type of the resource */ @SuppressWarnings("serial") static class RoundRobinPool implements Pool { @@ -254,18 +242,15 @@ public class PoolMap { } /** - * The ThreadLocalPool represents a {@link PoolMap.Pool} that - * works similarly to {@link ThreadLocal} class. It essentially binds the resource - * to the thread from which it is accessed. It doesn't remove resources when a thread exits, - * those resources must be closed manually. - * + * The ThreadLocalPool represents a {@link PoolMap.Pool} that works similarly to + * {@link ThreadLocal} class. It essentially binds the resource to the thread from which it is + * accessed. It doesn't remove resources when a thread exits, those resources must be closed + * manually. *

      - * Note that the size of the pool is essentially bounded by the number of threads - * that add resources to this pool. + * Note that the size of the pool is essentially bounded by the number of threads that add + * resources to this pool. *

      - * - * @param - * the type of the resource + * @param the type of the resource */ static class ThreadLocalPool implements Pool { private final Map resources; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java index 698330acc92..7ebbbf44ceb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,10 +24,9 @@ import java.io.DataOutputStream; import java.io.IOException; import java.util.ArrayList; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.Writable; +import org.apache.yetus.audience.InterfaceAudience; /** * Utility class with methods for manipulating Writable objects @@ -38,11 +36,11 @@ public class Writables { /** * @param w writable * @return The bytes of w gotten by running its - * {@link Writable#write(java.io.DataOutput)} method. + * {@link Writable#write(java.io.DataOutput)} method. * @throws IOException e * @see #getWritable(byte[], Writable) */ - public static byte [] getBytes(final Writable w) throws IOException { + public static byte[] getBytes(final Writable w) throws IOException { if (w == null) { throw new IllegalArgumentException("Writable cannot be null"); } @@ -64,20 +62,20 @@ public class Writables { * Put a bunch of Writables as bytes all into the one byte array. * @param ws writable * @return The bytes of w gotten by running its - * {@link Writable#write(java.io.DataOutput)} method. + * {@link Writable#write(java.io.DataOutput)} method. * @throws IOException e */ - public static byte [] getBytes(final Writable... ws) throws IOException { - List bytes = new ArrayList<>(ws.length); + public static byte[] getBytes(final Writable... ws) throws IOException { + List bytes = new ArrayList<>(ws.length); int size = 0; - for (Writable w: ws) { - byte [] b = getBytes(w); + for (Writable w : ws) { + byte[] b = getBytes(w); size += b.length; bytes.add(b); } - byte [] result = new byte[size]; + byte[] result = new byte[size]; int offset = 0; - for (byte [] b: bytes) { + for (byte[] b : bytes) { System.arraycopy(b, 0, result, offset, b.length); offset += b.length; } @@ -88,39 +86,32 @@ public class Writables { * Set bytes into the passed Writable by calling its * {@link Writable#readFields(java.io.DataInput)}. * @param bytes serialized bytes - * @param w An empty Writable (usually made by calling the null-arg - * constructor). - * @return The passed Writable after its readFields has been called fed - * by the passed bytes array or IllegalArgumentException - * if passed null or an empty bytes array. - * @throws IOException e - * @throws IllegalArgumentException + * @param w An empty Writable (usually made by calling the null-arg constructor). + * @return The passed Writable after its readFields has been called fed by the passed + * bytes array or IllegalArgumentException if passed null or an empty + * bytes array. + * @throws IOException e n */ - public static Writable getWritable(final byte [] bytes, final Writable w) - throws IOException { + public static Writable getWritable(final byte[] bytes, final Writable w) throws IOException { return getWritable(bytes, 0, bytes.length, w); } /** * Set bytes into the passed Writable by calling its * {@link Writable#readFields(java.io.DataInput)}. - * @param bytes serialized bytes + * @param bytes serialized bytes * @param offset offset into array * @param length length of data - * @param w An empty Writable (usually made by calling the null-arg - * constructor). - * @return The passed Writable after its readFields has been called fed - * by the passed bytes array or IllegalArgumentException - * if passed null or an empty bytes array. - * @throws IOException e - * @throws IllegalArgumentException + * @param w An empty Writable (usually made by calling the null-arg constructor). + * @return The passed Writable after its readFields has been called fed by the passed + * bytes array or IllegalArgumentException if passed null or an empty + * bytes array. + * @throws IOException e n */ - public static Writable getWritable(final byte [] bytes, final int offset, - final int length, final Writable w) - throws IOException { - if (bytes == null || length <=0) { - throw new IllegalArgumentException("Can't build a writable with empty " + - "bytes array"); + public static Writable getWritable(final byte[] bytes, final int offset, final int length, + final Writable w) throws IOException { + if (bytes == null || length <= 0) { + throw new IllegalArgumentException("Can't build a writable with empty " + "bytes array"); } if (w == null) { throw new IllegalArgumentException("Writable cannot be null"); @@ -136,26 +127,24 @@ public class Writables { } /** - * Copy one Writable to another. Copies bytes using data streams. + * Copy one Writable to another. Copies bytes using data streams. * @param src Source Writable * @param tgt Target Writable * @return The target Writable. * @throws IOException e */ - public static Writable copyWritable(final Writable src, final Writable tgt) - throws IOException { + public static Writable copyWritable(final Writable src, final Writable tgt) throws IOException { return copyWritable(getBytes(src), tgt); } /** - * Copy one Writable to another. Copies bytes using data streams. + * Copy one Writable to another. Copies bytes using data streams. * @param bytes Source Writable - * @param tgt Target Writable + * @param tgt Target Writable * @return The target Writable. * @throws IOException e */ - public static Writable copyWritable(final byte [] bytes, final Writable tgt) - throws IOException { + public static Writable copyWritable(final byte[] bytes, final Writable tgt) throws IOException { DataInputStream dis = new DataInputStream(new ByteArrayInputStream(bytes)); try { tgt.readFields(dis); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java index 0447e31fdd0..96170736208 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ public final class ReadOnlyZKClient implements Closeable { private static final int DEFAULT_RECOVERY_RETRY = 30; public static final String RECOVERY_RETRY_INTERVAL_MILLIS = - "zookeeper.recovery.retry.intervalmill"; + "zookeeper.recovery.retry.intervalmill"; private static final int DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS = 1000; @@ -134,11 +134,11 @@ public final class ReadOnlyZKClient implements Closeable { this.sessionTimeoutMs = conf.getInt(ZK_SESSION_TIMEOUT, DEFAULT_ZK_SESSION_TIMEOUT); this.maxRetries = conf.getInt(RECOVERY_RETRY, DEFAULT_RECOVERY_RETRY); this.retryIntervalMs = - conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS); + conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS); this.keepAliveTimeMs = conf.getInt(KEEPALIVE_MILLIS, DEFAULT_KEEPALIVE_MILLIS); LOG.debug( - "Connect {} to {} with session timeout={}ms, retries {}, " + - "retry interval {}ms, keepAlive={}ms", + "Connect {} to {} with session timeout={}ms, retries {}, " + + "retry interval {}ms, keepAlive={}ms", getId(), connectString, sessionTimeoutMs, maxRetries, retryIntervalMs, keepAliveTimeMs); Threads.setDaemonThreadRunning(new Thread(this::run), "ReadOnlyZKClient-" + connectString + "@" + getId()); @@ -260,8 +260,8 @@ public final class ReadOnlyZKClient implements Closeable { @Override protected void doExec(ZooKeeper zk) { - zk.getData(path, false, - (rc, path, ctx, data, stat) -> onComplete(zk, rc, data, true), null); + zk.getData(path, false, (rc, path, ctx, data, stat) -> onComplete(zk, rc, data, true), + null); } }); return future; @@ -311,7 +311,8 @@ public final class ReadOnlyZKClient implements Closeable { private ZooKeeper getZk() throws IOException { // may be closed when session expired if (zookeeper == null || !zookeeper.getState().isAlive()) { - zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> {}); + zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> { + }); } return zookeeper; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java index 5072706cb5a..f0fae958a66 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.zookeeper; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java index 1affd9e627b..003878e7b21 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java @@ -43,9 +43,8 @@ public class ZNodePaths { public final String baseZNode; /** - * The prefix of meta znode. Does not include baseZNode. - * Its a 'prefix' because meta replica id integer can be tagged on the end (if - * no number present, it is 'default' replica). + * The prefix of meta znode. Does not include baseZNode. Its a 'prefix' because meta replica id + * integer can be tagged on the end (if no number present, it is 'default' replica). */ private final String metaZNodePrefix; @@ -98,51 +97,44 @@ public class ZNodePaths { drainingZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.draining.rs", "draining")); masterAddressZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.master", "master")); backupMasterAddressesZNode = - joinZNode(baseZNode, conf.get("zookeeper.znode.backup.masters", "backup-masters")); + joinZNode(baseZNode, conf.get("zookeeper.znode.backup.masters", "backup-masters")); clusterStateZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.state", "running")); tableZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.tableEnableDisable", "table")); clusterIdZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.clusterId", "hbaseid")); splitLogZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.splitlog", SPLIT_LOGDIR_NAME)); balancerZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.balancer", "balancer")); regionNormalizerZNode = - joinZNode(baseZNode, conf.get("zookeeper.znode.regionNormalizer", "normalizer")); + joinZNode(baseZNode, conf.get("zookeeper.znode.regionNormalizer", "normalizer")); switchZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch")); namespaceZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.namespace", "namespace")); masterMaintZNode = - joinZNode(baseZNode, conf.get("zookeeper.znode.masterMaintenance", "master-maintenance")); + joinZNode(baseZNode, conf.get("zookeeper.znode.masterMaintenance", "master-maintenance")); replicationZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.replication", "replication")); peersZNode = - joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.peers", "peers")); + joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.peers", "peers")); queuesZNode = joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.rs", "rs")); - hfileRefsZNode = joinZNode(replicationZNode, - conf.get("zookeeper.znode.replication.hfile.refs", "hfile-refs")); + hfileRefsZNode = + joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.hfile.refs", "hfile-refs")); snapshotCleanupZNode = joinZNode(baseZNode, - conf.get("zookeeper.znode.snapshot.cleanup", DEFAULT_SNAPSHOT_CLEANUP_ZNODE)); + conf.get("zookeeper.znode.snapshot.cleanup", DEFAULT_SNAPSHOT_CLEANUP_ZNODE)); } @Override public String toString() { - return new StringBuilder() - .append("ZNodePaths [baseZNode=").append(baseZNode) - .append(", rsZNode=").append(rsZNode) - .append(", drainingZNode=").append(drainingZNode) - .append(", masterAddressZNode=").append(masterAddressZNode) - .append(", backupMasterAddressesZNode=").append(backupMasterAddressesZNode) - .append(", clusterStateZNode=").append(clusterStateZNode) - .append(", tableZNode=").append(tableZNode) - .append(", clusterIdZNode=").append(clusterIdZNode) - .append(", splitLogZNode=").append(splitLogZNode) - .append(", balancerZNode=").append(balancerZNode) - .append(", regionNormalizerZNode=").append(regionNormalizerZNode) - .append(", switchZNode=").append(switchZNode) - .append(", namespaceZNode=").append(namespaceZNode) - .append(", masterMaintZNode=").append(masterMaintZNode) - .append(", replicationZNode=").append(replicationZNode) - .append(", peersZNode=").append(peersZNode) - .append(", queuesZNode=").append(queuesZNode) - .append(", hfileRefsZNode=").append(hfileRefsZNode) - .append(", snapshotCleanupZNode=").append(snapshotCleanupZNode) - .append("]").toString(); + return new StringBuilder().append("ZNodePaths [baseZNode=").append(baseZNode) + .append(", rsZNode=").append(rsZNode).append(", drainingZNode=").append(drainingZNode) + .append(", masterAddressZNode=").append(masterAddressZNode) + .append(", backupMasterAddressesZNode=").append(backupMasterAddressesZNode) + .append(", clusterStateZNode=").append(clusterStateZNode).append(", tableZNode=") + .append(tableZNode).append(", clusterIdZNode=").append(clusterIdZNode) + .append(", splitLogZNode=").append(splitLogZNode).append(", balancerZNode=") + .append(balancerZNode).append(", regionNormalizerZNode=").append(regionNormalizerZNode) + .append(", switchZNode=").append(switchZNode).append(", namespaceZNode=") + .append(namespaceZNode).append(", masterMaintZNode=").append(masterMaintZNode) + .append(", replicationZNode=").append(replicationZNode).append(", peersZNode=") + .append(peersZNode).append(", queuesZNode=").append(queuesZNode).append(", hfileRefsZNode=") + .append(hfileRefsZNode).append(", snapshotCleanupZNode=").append(snapshotCleanupZNode) + .append("]").toString(); } /** @@ -158,8 +150,7 @@ public class ZNodePaths { /** * Parses the meta replicaId from the passed path. - * @param path the name of the full path which includes baseZNode. - * @return replicaId + * @param path the name of the full path which includes baseZNode. n */ public int getMetaReplicaIdFromPath(String path) { // Extract the znode from path. The prefix is of the following format. @@ -170,13 +161,12 @@ public class ZNodePaths { /** * Parse the meta replicaId from the passed znode - * @param znode the name of the znode, does not include baseZNode - * @return replicaId + * @param znode the name of the znode, does not include baseZNode n */ public int getMetaReplicaIdFromZNode(String znode) { - return znode.equals(metaZNodePrefix)? - RegionInfo.DEFAULT_REPLICA_ID: - Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1)); + return znode.equals(metaZNodePrefix) + ? RegionInfo.DEFAULT_REPLICA_ID + : Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1)); } /** @@ -202,8 +192,8 @@ public class ZNodePaths { // Developer notice: These znodes are world readable. DO NOT add more znodes here UNLESS // all clients need to access this data to work. Using zk for sharing data to clients (other // than service lookup case is not a recommended design pattern. - return path.equals(baseZNode) || isMetaZNodePath(path) || path.equals(masterAddressZNode) || - path.equals(clusterIdZNode) || path.equals(rsZNode) || + return path.equals(baseZNode) || isMetaZNodePath(path) || path.equals(masterAddressZNode) + || path.equals(clusterIdZNode) || path.equals(rsZNode) || // /hbase/table and /hbase/table/foo is allowed, /hbase/table-lock is not path.equals(tableZNode) || path.startsWith(tableZNode + "/"); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java index dd26ed5f209..e6c3da4d0f9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java @@ -19,13 +19,12 @@ package org.apache.hadoop.hbase.zookeeper; import java.io.IOException; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.base.Stopwatch; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.ZooKeeper; +import org.apache.hbase.thirdparty.com.google.common.base.Stopwatch; /** * Methods that help working with ZooKeeper @@ -39,11 +38,12 @@ public final class ZooKeeperHelper { /** * Get a ZooKeeper instance and wait until it connected before returning. * @param sessionTimeoutMs Used as session timeout passed to the created ZooKeeper AND as the - * timeout to wait on connection establishment. + * timeout to wait on connection establishment. */ public static ZooKeeper getConnectedZooKeeper(String connectString, int sessionTimeoutMs) - throws IOException { - ZooKeeper zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> {}); + throws IOException { + ZooKeeper zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> { + }); return ensureConnectedZooKeeper(zookeeper, sessionTimeoutMs); } @@ -52,18 +52,17 @@ public final class ZooKeeperHelper { * @param timeout Time to wait on established Connection */ public static ZooKeeper ensureConnectedZooKeeper(ZooKeeper zookeeper, int timeout) - throws ZooKeeperConnectionException { + throws ZooKeeperConnectionException { if (zookeeper.getState().isConnected()) { return zookeeper; } Stopwatch stopWatch = Stopwatch.createStarted(); // Make sure we are connected before we hand it back. - while(!zookeeper.getState().isConnected()) { + while (!zookeeper.getState().isConnected()) { Threads.sleep(1); if (stopWatch.elapsed(TimeUnit.MILLISECONDS) > timeout) { - throw new ZooKeeperConnectionException("Failed connect after waiting " + - stopWatch.elapsed(TimeUnit.MILLISECONDS) + "ms (zk session timeout); " + - zookeeper); + throw new ZooKeeperConnectionException("Failed connect after waiting " + + stopWatch.elapsed(TimeUnit.MILLISECONDS) + "ms (zk session timeout); " + zookeeper); } } return zookeeper; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java index e1b67857404..fc05d9eec52 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,28 +40,25 @@ import org.junit.rules.ExpectedException; /** * Tests the HColumnDescriptor with appropriate arguments. - * * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 together with - * {@link HColumnDescriptor}. + * {@link HColumnDescriptor}. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) @Deprecated public class TestHColumnDescriptor { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHColumnDescriptor.class); + HBaseClassTestRule.forClass(TestHColumnDescriptor.class); @Rule public ExpectedException expectedEx = ExpectedException.none(); + @Test public void testPb() throws DeserializationException { - HColumnDescriptor hcd = new HColumnDescriptor( - new HColumnDescriptor(HConstants.CATALOG_FAMILY) - .setInMemory(true) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - .setBloomFilterType(BloomType.NONE) - .setCacheDataInL1(true)); + HColumnDescriptor hcd = new HColumnDescriptor(new HColumnDescriptor(HConstants.CATALOG_FAMILY) + .setInMemory(true).setScope(HConstants.REPLICATION_SCOPE_LOCAL) + .setBloomFilterType(BloomType.NONE).setCacheDataInL1(true)); final int v = 123; hcd.setBlocksize(v); hcd.setTimeToLive(v); @@ -82,7 +79,7 @@ public class TestHColumnDescriptor { hcd.setMobThreshold(1000L); hcd.setDFSReplication((short) v); - byte [] bytes = hcd.toByteArray(); + byte[] bytes = hcd.toByteArray(); HColumnDescriptor deserializedHcd = HColumnDescriptor.parseFrom(bytes); assertTrue(hcd.equals(deserializedHcd)); assertEquals(v, hcd.getBlocksize()); @@ -134,11 +131,11 @@ public class TestHColumnDescriptor { // We unify the format of all values saved in the descriptor. // Each value is stored as bytes of string. String isMobString = PrettyPrinter.format(String.valueOf(isMob), - HColumnDescriptor.getUnit(HColumnDescriptor.IS_MOB)); + HColumnDescriptor.getUnit(HColumnDescriptor.IS_MOB)); String thresholdString = PrettyPrinter.format(String.valueOf(threshold), - HColumnDescriptor.getUnit(HColumnDescriptor.MOB_THRESHOLD)); + HColumnDescriptor.getUnit(HColumnDescriptor.MOB_THRESHOLD)); String policyString = PrettyPrinter.format(Bytes.toStringBinary(Bytes.toBytes(policy)), - HColumnDescriptor.getUnit(HColumnDescriptor.MOB_COMPACT_PARTITION_POLICY)); + HColumnDescriptor.getUnit(HColumnDescriptor.MOB_COMPACT_PARTITION_POLICY)); assertEquals(String.valueOf(isMob), isMobString); assertEquals(String.valueOf(threshold), thresholdString); assertEquals(String.valueOf(policy), policyString); @@ -146,16 +143,11 @@ public class TestHColumnDescriptor { @Test public void testClassMethodsAreBuilderStyle() { - /* HColumnDescriptor should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * HColumnDescriptor hcd - * = new HColumnDescriptor() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * HColumnDescriptor should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: HColumnDescriptor hcd = new HColumnDescriptor() + * .setFoo(foo) .setBar(bar) .setBuz(buz) This test ensures that all methods starting with "set" + * returns the declaring object */ BuilderStyleTest.assertClassesAreBuilderStyle(HColumnDescriptor.class); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java index 94d05f77283..2bfaa2de788 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,24 +41,23 @@ import org.slf4j.LoggerFactory; /** * Test setting values in the descriptor - * * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 together with * {@link HTableDescriptor}. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) @Deprecated public class TestHTableDescriptor { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHTableDescriptor.class); + HBaseClassTestRule.forClass(TestHTableDescriptor.class); private static final Logger LOG = LoggerFactory.getLogger(TestHTableDescriptor.class); @Rule public TestName name = new TestName(); - @Test (expected=IOException.class) + @Test(expected = IOException.class) public void testAddCoprocessorTwice() throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME); String cpName = "a.b.c.d"; @@ -110,7 +109,7 @@ public class TestHTableDescriptor { htd.setDurability(Durability.ASYNC_WAL); htd.setReadOnly(true); htd.setRegionReplication(2); - byte [] bytes = htd.toByteArray(); + byte[] bytes = htd.toByteArray(); HTableDescriptor deserializedHtd = HTableDescriptor.parseFrom(bytes); assertEquals(htd, deserializedHtd); assertEquals(v, deserializedHtd.getMaxFileSize()); @@ -121,7 +120,6 @@ public class TestHTableDescriptor { /** * Test cps in the table description. - * * @throws Exception if adding a coprocessor fails */ @Test @@ -139,7 +137,6 @@ public class TestHTableDescriptor { /** * Test cps in the table description. - * * @throws Exception if adding a coprocessor fails */ @Test @@ -198,13 +195,13 @@ public class TestHTableDescriptor { String[] legalTableNames = { "foo", "with-dash_under.dot", "_under_start_ok", "with-dash.with_underscore", "02-01-2012.my_table_01-02", "xyz._mytable_", "9_9_0.table_02", "dot1.dot2.table", "new.-mytable", "with-dash.with.dot", "legal..t2", "legal..legal.t2", - "trailingdots..", "trailing.dots...", "ns:mytable", "ns:_mytable_", "ns:my_table_01-02", - "汉", "汉:字", "_字_", "foo:字", "foo.字", "字.foo"}; + "trailingdots..", "trailing.dots...", "ns:mytable", "ns:_mytable_", "ns:my_table_01-02", "汉", + "汉:字", "_字_", "foo:字", "foo.字", "字.foo" }; // Avoiding "zookeeper" in here as it's tough to encode in regex String[] illegalTableNames = { ".dot_start_illegal", "-dash_start_illegal", "spaces not ok", "-dash-.start_illegal", "new.table with space", "01 .table", "ns:-illegaldash", - "new:.illegaldot", "new:illegalcolon1:", "new:illegalcolon1:2", String.valueOf((char)130), - String.valueOf((char)5), String.valueOf((char)65530)}; + "new:.illegaldot", "new:illegalcolon1:", "new:illegalcolon1:2", String.valueOf((char) 130), + String.valueOf((char) 5), String.valueOf((char) 65530) }; @Test public void testLegalHTableNames() { @@ -241,8 +238,8 @@ public class TestHTableDescriptor { public void testLegalHTableNamesRegex() { for (String tn : legalTableNames) { TableName tName = TableName.valueOf(tn); - assertTrue("Testing: '" + tn + "'", Pattern.matches(TableName.VALID_USER_TABLE_REGEX, - tName.getNameAsString())); + assertTrue("Testing: '" + tn + "'", + Pattern.matches(TableName.VALID_USER_TABLE_REGEX, tName.getNameAsString())); } } @@ -254,7 +251,7 @@ public class TestHTableDescriptor { } } - /** + /** * Test default value handling for maxFileSize */ @Test @@ -292,16 +289,11 @@ public class TestHTableDescriptor { @Test public void testClassMethodsAreBuilderStyle() { - /* HTableDescriptor should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * HTableDescriptor htd - * = new HTableDescriptor() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * HTableDescriptor should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: HTableDescriptor htd = new HTableDescriptor() .setFoo(foo) + * .setBar(bar) .setBuz(buz) This test ensures that all methods starting with "set" returns the + * declaring object */ BuilderStyleTest.assertClassesAreBuilderStyle(HTableDescriptor.class); @@ -325,7 +317,7 @@ public class TestHTableDescriptor { assertEquals(1, htd.getFamily(familyName).getDFSReplication()); } - @Test(expected=IllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testModifyInexistentFamily() { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName())); byte[] familyName = Bytes.toBytes("cf"); @@ -333,7 +325,7 @@ public class TestHTableDescriptor { htd.modifyFamily(hcd); } - @Test(expected=IllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testAddDuplicateFamilies() { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName())); byte[] familyName = Bytes.toBytes("cf"); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java index f43ce4a52bb..d87f3b3cb04 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,12 +29,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestRegionLocations { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionLocations.class); + HBaseClassTestRule.forClass(TestRegionLocations.class); ServerName sn0 = ServerName.valueOf("host0", 10, 10); ServerName sn1 = ServerName.valueOf("host1", 10, 10); @@ -56,7 +56,7 @@ public class TestRegionLocations { assertEquals(0, list.size()); assertEquals(0, list.numNonNullElements()); - list = hrll((HRegionLocation)null); + list = hrll((HRegionLocation) null); assertTrue(list.isEmpty()); assertEquals(1, list.size()); assertEquals(0, list.numNonNullElements()); @@ -99,7 +99,7 @@ public class TestRegionLocations { return new HRegionLocation(hri, sn, seqNum); } - private RegionLocations hrll(HRegionLocation ... locations) { + private RegionLocations hrll(HRegionLocation... locations) { return new RegionLocations(locations); } @@ -168,7 +168,6 @@ public class TestRegionLocations { assertNull(list.getRegionLocation(5)); assertNull(list.getRegionLocation(9)); - // test multi-element remove from multi element list list = hrll(hrl(info0, sn1), hrl(info1, sn1), hrl(info2, sn0), hrl(info9, sn0)); list = list.remove(hrl(info9, sn0)); @@ -334,12 +333,11 @@ public class TestRegionLocations { assertEquals(3, list.size()); } - @Test public void testConstructWithNullElements() { // RegionLocations can contain null elements as well. These null elements can - RegionLocations list = new RegionLocations((HRegionLocation)null); + RegionLocations list = new RegionLocations((HRegionLocation) null); assertTrue(list.isEmpty()); assertEquals(1, list.size()); assertEquals(0, list.numNonNullElements()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImplTest.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImplTest.java index 0291bb79122..965259983e6 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImplTest.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImplTest.java @@ -1,19 +1,19 @@ /* - Licensed to the Apache Software Foundation (ASF) under one - or more contributor license agreements. See the NOTICE file - distributed with this work for additional information - regarding copyright ownership. The ASF licenses this file - to you under the Apache License, Version 2.0 (the - "License"); you may not use this file except in compliance - with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.client; @@ -27,9 +27,10 @@ import org.junit.Assert; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; -@Category({ ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class RpcRetryingCallerImplTest { @ClassRule @@ -43,8 +44,7 @@ public class RpcRetryingCallerImplTest { new RemoteWithExtrasException(CallDroppedException.class.getName(), message, false)); Throwable result = RpcRetryingCallerImpl.translateException(exception); - Assert.assertTrue("Expect unwrap CallDroppedException", - result instanceof CallDroppedException); + Assert.assertTrue("Expect unwrap CallDroppedException", result instanceof CallDroppedException); Assert.assertEquals(message, result.getMessage()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java index 44670dd9538..d0fcbbbddbc 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionConfiguration.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionConfiguration.java index d5a2b5b2486..f4c9fc059bd 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionConfiguration.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionConfiguration.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java index 14bbc49f348..9377898482d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,8 +69,8 @@ public class TestAsyncConnectionTracing { return CompletableFuture.completedFuture(masterServer); } }; - conn = new AsyncConnectionImpl(CONF, registry, "test", - UserProvider.instantiate(CONF).getCurrent()); + conn = + new AsyncConnectionImpl(CONF, registry, "test", UserProvider.instantiate(CONF).getCurrent()); } @After @@ -81,14 +81,13 @@ public class TestAsyncConnectionTracing { private void assertTrace(String methodName, ServerName serverName) { Waiter.waitFor(CONF, 1000, () -> traceRule.getSpans().stream() - .anyMatch(span -> span.getName().equals("AsyncConnection." + methodName) && - span.getKind() == SpanKind.INTERNAL && span.hasEnded())); + .anyMatch(span -> span.getName().equals("AsyncConnection." + methodName) + && span.getKind() == SpanKind.INTERNAL && span.hasEnded())); SpanData data = traceRule.getSpans().stream() .filter(s -> s.getName().equals("AsyncConnection." + methodName)).findFirst().get(); assertEquals(StatusCode.OK, data.getStatus().getStatusCode()); if (serverName != null) { - assertEquals( - serverName.getServerName(), + assertEquals(serverName.getServerName(), data.getAttributes().get(HBaseSemanticAttributes.SERVER_NAME_KEY)); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java index 32e5a03ecbc..89634924e57 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -79,18 +79,18 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -@Category({ClientTests.class, LargeTests.class}) +@Category({ ClientTests.class, LargeTests.class }) public class TestAsyncProcess { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncProcess.class); + HBaseClassTestRule.forClass(TestAsyncProcess.class); private static final Logger LOG = LoggerFactory.getLogger(TestAsyncProcess.class); - private static final TableName DUMMY_TABLE = - TableName.valueOf("DUMMY_TABLE"); + private static final TableName DUMMY_TABLE = TableName.valueOf("DUMMY_TABLE"); private static final byte[] DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1"); private static final byte[] DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2"); private static final byte[] DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3"); @@ -101,11 +101,11 @@ public class TestAsyncProcess { private static final ServerName sn2 = ServerName.valueOf("s2,2,2"); private static final ServerName sn3 = ServerName.valueOf("s3,3,3"); private static final HRegionInfo hri1 = - new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1); + new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1); private static final HRegionInfo hri2 = - new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2); + new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2); private static final HRegionInfo hri3 = - new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3); + new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3); private static final HRegionLocation loc1 = new HRegionLocation(hri1, sn); private static final HRegionLocation loc2 = new HRegionLocation(hri2, sn); private static final HRegionLocation loc3 = new HRegionLocation(hri3, sn2); @@ -115,11 +115,11 @@ public class TestAsyncProcess { private static final RegionInfo hri1r2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 2); private static final RegionInfo hri2r1 = RegionReplicaUtil.getRegionInfoForReplica(hri2, 1); private static final RegionLocations hrls1 = new RegionLocations(new HRegionLocation(hri1, sn), - new HRegionLocation(hri1r1, sn2), new HRegionLocation(hri1r2, sn3)); - private static final RegionLocations hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2), - new HRegionLocation(hri2r1, sn3)); + new HRegionLocation(hri1r1, sn2), new HRegionLocation(hri1r2, sn3)); + private static final RegionLocations hrls2 = + new RegionLocations(new HRegionLocation(hri2, sn2), new HRegionLocation(hri2r1, sn3)); private static final RegionLocations hrls3 = - new RegionLocations(new HRegionLocation(hri3, sn3), null); + new RegionLocations(new HRegionLocation(hri3, sn3), null); private static final String success = "success"; private static Exception failure = new Exception("failure"); @@ -134,10 +134,10 @@ public class TestAsyncProcess { this.CONF = new Configuration(); CONF.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, NB_RETRIES); this.CONNECTION_CONFIG = new ConnectionConfiguration(CONF); - this.RPC_TIMEOUT = CONF.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + this.RPC_TIMEOUT = + CONF.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); this.OPERATION_TIMEOUT = CONF.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, - HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); } static class CountingThreadFactory implements ThreadFactory { @@ -145,13 +145,14 @@ public class TestAsyncProcess { ThreadFactory realFactory = new ThreadFactoryBuilder().setNameFormat("test-TestAsyncProcess-pool-%d") .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(); + @Override public Thread newThread(Runnable r) { nbThreads.incrementAndGet(); return realFactory.newThread(r); } - CountingThreadFactory(AtomicInteger nbThreads){ + CountingThreadFactory(AtomicInteger nbThreads) { this.nbThreads = nbThreads; } } @@ -165,61 +166,59 @@ public class TestAsyncProcess { private long previousTimeout = -1; final ExecutorService service; + @Override - protected AsyncRequestFutureImpl createAsyncRequestFuture( - AsyncProcessTask task, List actions, long nonceGroup) { + protected AsyncRequestFutureImpl createAsyncRequestFuture(AsyncProcessTask task, + List actions, long nonceGroup) { // Test HTable has tableName of null, so pass DUMMY_TABLE - AsyncProcessTask wrap = new AsyncProcessTask(task){ + AsyncProcessTask wrap = new AsyncProcessTask(task) { @Override public TableName getTableName() { return DUMMY_TABLE; } }; - AsyncRequestFutureImpl r = new MyAsyncRequestFutureImpl<>( - wrap, actions, nonceGroup, this); + AsyncRequestFutureImpl r = + new MyAsyncRequestFutureImpl<>(wrap, actions, nonceGroup, this); allReqs.add(r); return r; } public MyAsyncProcess(ClusterConnection hc, Configuration conf) { - super(hc, conf, - new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf)); + super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf)); service = Executors.newFixedThreadPool(5); this.conf = conf; } public MyAsyncProcess(ClusterConnection hc, Configuration conf, AtomicInteger nbThreads) { super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf)); - service = new ThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS, - new SynchronousQueue<>(), new CountingThreadFactory(nbThreads)); + service = new ThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS, new SynchronousQueue<>(), + new CountingThreadFactory(nbThreads)); } public AsyncRequestFuture submit(ExecutorService pool, TableName tableName, - List rows, boolean atLeastOne, Batch.Callback callback, - boolean needResults) throws InterruptedIOException { + List rows, boolean atLeastOne, Batch.Callback callback, + boolean needResults) throws InterruptedIOException { AsyncProcessTask task = AsyncProcessTask.newBuilder(callback) - .setPool(pool == null ? service : pool) - .setTableName(tableName) - .setRowAccess(rows) - .setSubmittedRows(atLeastOne ? SubmittedRows.AT_LEAST_ONE : SubmittedRows.NORMAL) - .setNeedResults(needResults) - .setRpcTimeout(conf.getInt(HConstants.HBASE_RPC_READ_TIMEOUT_KEY, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT)) - .setOperationTimeout(conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, - HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT)) - .build(); + .setPool(pool == null ? service : pool).setTableName(tableName).setRowAccess(rows) + .setSubmittedRows(atLeastOne ? SubmittedRows.AT_LEAST_ONE : SubmittedRows.NORMAL) + .setNeedResults(needResults) + .setRpcTimeout( + conf.getInt(HConstants.HBASE_RPC_READ_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)) + .setOperationTimeout(conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT)) + .build(); return submit(task); } - public AsyncRequestFuture submit(TableName tableName, - final List rows, boolean atLeastOne, Batch.Callback callback, - boolean needResults) throws InterruptedIOException { + public AsyncRequestFuture submit(TableName tableName, final List rows, + boolean atLeastOne, Batch.Callback callback, boolean needResults) + throws InterruptedIOException { return submit(null, tableName, rows, atLeastOne, callback, needResults); } @Override public AsyncRequestFuture submit(AsyncProcessTask task) - throws InterruptedIOException { + throws InterruptedIOException { previousTimeout = task.getRpcTimeout(); // We use results in tests to check things, so override to always save them. AsyncProcessTask wrap = new AsyncProcessTask(task) { @@ -232,28 +231,26 @@ public class TestAsyncProcess { } @Override - protected RpcRetryingCaller createCaller( - CancellableRegionServerCallable callable, int rpcTimeout) { + protected RpcRetryingCaller + createCaller(CancellableRegionServerCallable callable, int rpcTimeout) { callsCt.incrementAndGet(); MultiServerCallable callable1 = (MultiServerCallable) callable; - final MultiResponse mr = createMultiResponse( - callable1.getMulti(), nbMultiResponse, nbActions, - new ResponseGenerator() { - @Override - public void addResponse(MultiResponse mr, byte[] regionName, Action a) { - if (Arrays.equals(FAILS, a.getAction().getRow())) { - mr.add(regionName, a.getOriginalIndex(), failure); - } else { - mr.add(regionName, a.getOriginalIndex(), success); - } + final MultiResponse mr = createMultiResponse(callable1.getMulti(), nbMultiResponse, nbActions, + new ResponseGenerator() { + @Override + public void addResponse(MultiResponse mr, byte[] regionName, Action a) { + if (Arrays.equals(FAILS, a.getAction().getRow())) { + mr.add(regionName, a.getOriginalIndex(), failure); + } else { + mr.add(regionName, a.getOriginalIndex(), success); } - }); + } + }); return new RpcRetryingCallerImpl(100, 500, 10, 9) { @Override public AbstractResponse callWithoutRetries(RetryingCallable callable, - int callTimeout) - throws IOException, RuntimeException { + int callTimeout) throws IOException, RuntimeException { try { // sleep one second in order for threadpool to start another thread instead of reusing // existing one. @@ -266,13 +263,13 @@ public class TestAsyncProcess { }; } - } static class MyAsyncRequestFutureImpl extends AsyncRequestFutureImpl { private final Map> heapSizesByServer = new HashMap<>(); - public MyAsyncRequestFutureImpl(AsyncProcessTask task, List actions, - long nonceGroup, AsyncProcess asyncProcess) { + + public MyAsyncRequestFutureImpl(AsyncProcessTask task, List actions, long nonceGroup, + AsyncProcess asyncProcess) { super(task, actions, nonceGroup, asyncProcess); } @@ -286,11 +283,10 @@ public class TestAsyncProcess { } @Override - SingleServerRequestRunnable createSingleServerRequest( - MultiAction multiAction, int numAttempt, ServerName server, - Set callsInProgress) { - SingleServerRequestRunnable rq = new SingleServerRequestRunnable( - multiAction, numAttempt, server, callsInProgress); + SingleServerRequestRunnable createSingleServerRequest(MultiAction multiAction, int numAttempt, + ServerName server, Set callsInProgress) { + SingleServerRequestRunnable rq = + new SingleServerRequestRunnable(multiAction, numAttempt, server, callsInProgress); List heapCount = heapSizesByServer.get(server); if (heapCount == null) { heapCount = new ArrayList<>(); @@ -301,16 +297,13 @@ public class TestAsyncProcess { } private long heapSizeOf(MultiAction multiAction) { - return multiAction.actions.values().stream() - .flatMap(v -> v.stream()) - .map(action -> action.getAction()) - .filter(row -> row instanceof Mutation) - .mapToLong(row -> ((Mutation) row).heapSize()) - .sum(); + return multiAction.actions.values().stream().flatMap(v -> v.stream()) + .map(action -> action.getAction()).filter(row -> row instanceof Mutation) + .mapToLong(row -> ((Mutation) row).heapSize()).sum(); } } - static class CallerWithFailure extends RpcRetryingCallerImpl{ + static class CallerWithFailure extends RpcRetryingCallerImpl { private final IOException e; @@ -321,13 +314,11 @@ public class TestAsyncProcess { @Override public AbstractResponse callWithoutRetries(RetryingCallable callable, - int callTimeout) - throws IOException, RuntimeException { + int callTimeout) throws IOException, RuntimeException { throw e; } } - static class AsyncProcessWithFailure extends MyAsyncProcess { private final IOException ioe; @@ -339,8 +330,8 @@ public class TestAsyncProcess { } @Override - protected RpcRetryingCaller createCaller( - CancellableRegionServerCallable callable, int rpcTimeout) { + protected RpcRetryingCaller + createCaller(CancellableRegionServerCallable callable, int rpcTimeout) { callsCt.incrementAndGet(); return new CallerWithFailure(ioe); } @@ -351,6 +342,7 @@ public class TestAsyncProcess { */ static class MyClientBackoffPolicy implements ClientBackoffPolicy { private final Map count = new HashMap<>(); + @Override public long getBackoffTime(ServerName serverName, byte[] region, ServerStatistics stats) { AtomicInteger inc = count.get(serverName); @@ -392,28 +384,27 @@ public class TestAsyncProcess { } @Override - protected RpcRetryingCaller createCaller( - CancellableRegionServerCallable payloadCallable, int rpcTimeout) { + protected RpcRetryingCaller + createCaller(CancellableRegionServerCallable payloadCallable, int rpcTimeout) { MultiServerCallable callable = (MultiServerCallable) payloadCallable; - final MultiResponse mr = createMultiResponse( - callable.getMulti(), nbMultiResponse, nbActions, new ResponseGenerator() { - @Override - public void addResponse(MultiResponse mr, byte[] regionName, Action a) { - if (failures.contains(regionName)) { - mr.add(regionName, a.getOriginalIndex(), failure); - } else { - boolean isStale = !RegionReplicaUtil.isDefaultReplica(a.getReplicaId()); - mr.add(regionName, a.getOriginalIndex(), - Result.create(new Cell[0], null, isStale)); - } + final MultiResponse mr = createMultiResponse(callable.getMulti(), nbMultiResponse, nbActions, + new ResponseGenerator() { + @Override + public void addResponse(MultiResponse mr, byte[] regionName, Action a) { + if (failures.contains(regionName)) { + mr.add(regionName, a.getOriginalIndex(), failure); + } else { + boolean isStale = !RegionReplicaUtil.isDefaultReplica(a.getReplicaId()); + mr.add(regionName, a.getOriginalIndex(), Result.create(new Cell[0], null, isStale)); } - }); + } + }); // Currently AsyncProcess either sends all-replica, or all-primary request. final boolean isDefault = RegionReplicaUtil.isDefaultReplica( - callable.getMulti().actions.values().iterator().next().iterator().next().getReplicaId()); - final ServerName server = ((MultiServerCallable)callable).getServerName(); + callable.getMulti().actions.values().iterator().next().iterator().next().getReplicaId()); + final ServerName server = ((MultiServerCallable) callable).getServerName(); String debugMsg = "Call to " + server + ", primary=" + isDefault + " with " - + callable.getMulti().actions.size() + " entries: "; + + callable.getMulti().actions.size() + " entries: "; for (byte[] region : callable.getMulti().actions.keySet()) { debugMsg += "[" + Bytes.toStringBinary(region) + "], "; } @@ -425,8 +416,7 @@ public class TestAsyncProcess { return new RpcRetryingCallerImpl(100, 500, 10, 9) { @Override public MultiResponse callWithoutRetries(RetryingCallable callable, - int callTimeout) - throws IOException, RuntimeException { + int callTimeout) throws IOException, RuntimeException { long sleep = -1; if (isDefault) { Long customSleep = customPrimarySleepMs.get(server); @@ -446,8 +436,8 @@ public class TestAsyncProcess { } } - static MultiResponse createMultiResponse(final MultiAction multi, - AtomicInteger nbMultiResponse, AtomicInteger nbActions, ResponseGenerator gen) { + static MultiResponse createMultiResponse(final MultiAction multi, AtomicInteger nbMultiResponse, + AtomicInteger nbActions, ResponseGenerator gen) { final MultiResponse mr = new MultiResponse(); nbMultiResponse.incrementAndGet(); for (Map.Entry> entry : multi.actions.entrySet()) { @@ -487,14 +477,14 @@ public class TestAsyncProcess { } private static Configuration setupConf(Configuration conf) { - conf.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, - TestRegistry.class, ConnectionRegistry.class); + conf.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, TestRegistry.class, + ConnectionRegistry.class); return conf; } @Override - public RegionLocations locateRegion(TableName tableName, - byte[] row, boolean useCache, boolean retry, int replicaId) throws IOException { + public RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache, + boolean retry, int replicaId) throws IOException { return new RegionLocations(loc1); } @@ -518,10 +508,10 @@ public class TestAsyncProcess { } @Override - public RegionLocations locateRegion(TableName tableName, - byte[] row, boolean useCache, boolean retry, int replicaId) throws IOException { + public RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache, + boolean retry, int replicaId) throws IOException { int i = 0; - for (HRegionLocation hr : hrl){ + for (HRegionLocation hr : hrl) { if (Arrays.equals(row, hr.getRegionInfo().getStartKey())) { usedRegions[i] = true; return new RegionLocations(hr); @@ -531,6 +521,7 @@ public class TestAsyncProcess { return null; } } + @Test public void testListRowAccess() { int count = 10; @@ -566,6 +557,7 @@ public class TestAsyncProcess { assertEquals(0, taker.size()); assertEquals(count, takeCount); } + private static long calculateRequestCount(long putSizePerServer, long maxHeapSizePerRequest) { if (putSizePerServer <= maxHeapSizePerRequest) { return 1; @@ -603,8 +595,8 @@ public class TestAsyncProcess { } long putsHeapSize = n % limit; long maxHeapSizePerRequest = putsHeapSize / requestCount; - LOG.info("[testSubmitRandomSizeRequest] maxHeapSizePerRequest=" + maxHeapSizePerRequest + - ", putsHeapSize=" + putsHeapSize); + LOG.info("[testSubmitRandomSizeRequest] maxHeapSizePerRequest=" + maxHeapSizePerRequest + + ", putsHeapSize=" + putsHeapSize); doSubmitRequest(maxHeapSizePerRequest, putsHeapSize); } @@ -625,14 +617,14 @@ public class TestAsyncProcess { private void doSubmitRequest(long maxHeapSizePerRequest, long putsHeapSize) throws Exception { ClusterConnection conn = createHConnection(); final String defaultClazz = - conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); - final long defaultHeapSizePerRequest = conn.getConfiguration().getLong( - SimpleRequestController.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, - SimpleRequestController.DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE); + conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); + final long defaultHeapSizePerRequest = + conn.getConfiguration().getLong(SimpleRequestController.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, + SimpleRequestController.DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE); conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, SimpleRequestController.class.getName()); conn.getConfiguration().setLong(SimpleRequestController.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, - maxHeapSizePerRequest); + maxHeapSizePerRequest); // sn has two regions long putSizeSN = 0; @@ -654,11 +646,9 @@ public class TestAsyncProcess { int minCountSnRequest = (int) calculateRequestCount(putSizeSN, maxHeapSizePerRequest); int minCountSn2Request = (int) calculateRequestCount(putSizeSN2, maxHeapSizePerRequest); - LOG.info("Total put count:" + puts.size() + ", putSizeSN:"+ putSizeSN - + ", putSizeSN2:" + putSizeSN2 - + ", maxHeapSizePerRequest:" + maxHeapSizePerRequest - + ", minCountSnRequest:" + minCountSnRequest - + ", minCountSn2Request:" + minCountSn2Request); + LOG.info("Total put count:" + puts.size() + ", putSizeSN:" + putSizeSN + ", putSizeSN2:" + + putSizeSN2 + ", maxHeapSizePerRequest:" + maxHeapSizePerRequest + ", minCountSnRequest:" + + minCountSnRequest + ", minCountSn2Request:" + minCountSn2Request); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); @@ -710,7 +700,7 @@ public class TestAsyncProcess { } // restore config. conn.getConfiguration().setLong(SimpleRequestController.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, - defaultHeapSizePerRequest); + defaultHeapSizePerRequest); if (defaultClazz != null) { conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, defaultClazz); @@ -754,7 +744,7 @@ public class TestAsyncProcess { public void testSubmitBusyRegion() throws Exception { ClusterConnection conn = createHConnection(); final String defaultClazz = - conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); + conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, SimpleRequestController.class.getName()); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); @@ -777,18 +767,17 @@ public class TestAsyncProcess { } } - @Test public void testSubmitBusyRegionServer() throws Exception { ClusterConnection conn = createHConnection(); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); final String defaultClazz = - conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); + conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, SimpleRequestController.class.getName()); SimpleRequestController controller = (SimpleRequestController) ap.requestController; controller.taskCounterPerServer.put(sn2, - new AtomicInteger(controller.maxConcurrentTasksPerServer)); + new AtomicInteger(controller.maxConcurrentTasksPerServer)); List puts = new ArrayList<>(4); puts.add(createPut(1, true)); @@ -800,7 +789,7 @@ public class TestAsyncProcess { Assert.assertEquals(" puts=" + puts, 1, puts.size()); controller.taskCounterPerServer.put(sn2, - new AtomicInteger(controller.maxConcurrentTasksPerServer - 1)); + new AtomicInteger(controller.maxConcurrentTasksPerServer - 1)); ap.submit(null, DUMMY_TABLE, puts, false, null, false); Assert.assertTrue(puts.isEmpty()); if (defaultClazz != null) { @@ -825,22 +814,21 @@ public class TestAsyncProcess { Assert.assertEquals(1, ars.getErrors().exceptions.size()); Assert.assertTrue("was: " + ars.getErrors().exceptions.get(0), - failure.equals(ars.getErrors().exceptions.get(0))); + failure.equals(ars.getErrors().exceptions.get(0))); Assert.assertTrue("was: " + ars.getErrors().exceptions.get(0), - failure.equals(ars.getErrors().exceptions.get(0))); + failure.equals(ars.getErrors().exceptions.get(0))); Assert.assertEquals(1, ars.getFailedOperations().size()); Assert.assertTrue("was: " + ars.getFailedOperations().get(0), - p.equals(ars.getFailedOperations().get(0))); + p.equals(ars.getFailedOperations().get(0))); } - @Test public void testSubmitTrue() throws IOException { ClusterConnection conn = createHConnection(); final MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); final String defaultClazz = - conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); + conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, SimpleRequestController.class.getName()); SimpleRequestController controller = (SimpleRequestController) ap.requestController; @@ -851,9 +839,9 @@ public class TestAsyncProcess { final AtomicBoolean checkPoint = new AtomicBoolean(false); final AtomicBoolean checkPoint2 = new AtomicBoolean(false); - Thread t = new Thread(){ + Thread t = new Thread() { @Override - public void run(){ + public void run() { Threads.sleep(1000); Assert.assertFalse(checkPoint.get()); // TODO: this is timing-dependent ai.decrementAndGet(); @@ -875,7 +863,7 @@ public class TestAsyncProcess { Assert.assertTrue(puts.isEmpty()); checkPoint.set(true); - while (!checkPoint2.get()){ + while (!checkPoint2.get()) { Threads.sleep(1); } if (defaultClazz != null) { @@ -945,7 +933,7 @@ public class TestAsyncProcess { Mockito.when(conn.getStatisticsTracker()).thenReturn(ServerStatisticTracker.create(copyConf)); Mockito.when(conn.getBackoffPolicy()).thenReturn(bp); final String defaultClazz = - conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); + conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, SimpleRequestController.class.getName()); MyAsyncProcess ap = new MyAsyncProcess(conn, copyConf); @@ -957,7 +945,7 @@ public class TestAsyncProcess { } private void testTaskCount(MyAsyncProcess ap) - throws InterruptedIOException, InterruptedException { + throws InterruptedIOException, InterruptedException { SimpleRequestController controller = (SimpleRequestController) ap.requestController; List puts = new ArrayList<>(); for (int i = 0; i != 3; ++i) { @@ -982,13 +970,12 @@ public class TestAsyncProcess { public void testMaxTask() throws Exception { ClusterConnection conn = createHConnection(); final String defaultClazz = - conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); + conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, SimpleRequestController.class.getName()); final MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); SimpleRequestController controller = (SimpleRequestController) ap.requestController; - for (int i = 0; i < 1000; i++) { ap.incTaskCounters(Collections.singleton(Bytes.toBytes("dummy")), sn); } @@ -1031,7 +1018,7 @@ public class TestAsyncProcess { ap.submit(null, DUMMY_TABLE, new ArrayList<>(), false, null, false); long end = EnvironmentEdgeManager.currentTime(); - //Adds 100 to secure us against approximate timing. + // Adds 100 to secure us against approximate timing. Assert.assertTrue(start + 100L + sleepTime > end); if (defaultClazz != null) { conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, @@ -1049,8 +1036,9 @@ public class TestAsyncProcess { setMockLocation(hc, DUMMY_BYTES_1, new RegionLocations(loc1)); setMockLocation(hc, DUMMY_BYTES_2, new RegionLocations(loc2)); setMockLocation(hc, DUMMY_BYTES_3, new RegionLocations(loc3)); - Mockito.when(hc.locateRegions(Mockito.eq(DUMMY_TABLE), Mockito.anyBoolean(), - Mockito.anyBoolean())).thenReturn(Arrays.asList(loc1, loc2, loc3)); + Mockito + .when(hc.locateRegions(Mockito.eq(DUMMY_TABLE), Mockito.anyBoolean(), Mockito.anyBoolean())) + .thenReturn(Arrays.asList(loc1, loc2, loc3)); setMockLocation(hc, FAILS, new RegionLocations(loc2)); return hc; } @@ -1071,21 +1059,22 @@ public class TestAsyncProcess { for (HRegionLocation loc : hrls3.getRegionLocations()) { locations.add(loc); } - Mockito.when(hc.locateRegions(Mockito.eq(DUMMY_TABLE), Mockito.anyBoolean(), - Mockito.anyBoolean())).thenReturn(locations); + Mockito + .when(hc.locateRegions(Mockito.eq(DUMMY_TABLE), Mockito.anyBoolean(), Mockito.anyBoolean())) + .thenReturn(locations); return hc; } - private static void setMockLocation(ClusterConnection hc, byte[] row, - RegionLocations result) throws IOException { - Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), - Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyInt())).thenReturn(result); - Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), - Mockito.anyBoolean(), Mockito.anyBoolean())).thenReturn(result); + private static void setMockLocation(ClusterConnection hc, byte[] row, RegionLocations result) + throws IOException { + Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), Mockito.anyBoolean(), + Mockito.anyBoolean(), Mockito.anyInt())).thenReturn(result); + Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), Mockito.anyBoolean(), + Mockito.anyBoolean())).thenReturn(result); } - private ClusterConnection createHConnectionCommon( - ConnectionConfiguration connectionConfiguration) { + private ClusterConnection + createHConnectionCommon(ConnectionConfiguration connectionConfiguration) { ClusterConnection hc = Mockito.mock(ClusterConnection.class); NonceGenerator ng = Mockito.mock(NonceGenerator.class); Mockito.when(ng.getNonceGroup()).thenReturn(HConstants.NO_NONCE); @@ -1105,7 +1094,7 @@ public class TestAsyncProcess { Put put = createPut(1, true); Assert.assertEquals(conn.getConnectionConfiguration().getWriteBufferSize(), - ht.getWriteBufferSize()); + ht.getWriteBufferSize()); Assert.assertEquals(0, ht.getCurrentWriteBufferSize()); ht.mutate(put); ht.flush(); @@ -1117,25 +1106,17 @@ public class TestAsyncProcess { ClusterConnection conn = createHConnection(); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); - checkPeriodicFlushParameters(conn, ap, - 1234, 1234, - 1234, 1234); - checkPeriodicFlushParameters(conn, ap, - 0, 0, - 0, BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); - checkPeriodicFlushParameters(conn, ap, - -1234, 0, - -1234, BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); - checkPeriodicFlushParameters(conn, ap, - 1, 1, - 1, BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); + checkPeriodicFlushParameters(conn, ap, 1234, 1234, 1234, 1234); + checkPeriodicFlushParameters(conn, ap, 0, 0, 0, + BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); + checkPeriodicFlushParameters(conn, ap, -1234, 0, -1234, + BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); + checkPeriodicFlushParameters(conn, ap, 1, 1, 1, + BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); } - private void checkPeriodicFlushParameters(ClusterConnection conn, - MyAsyncProcess ap, - long setTO, long expectTO, - long setTT, long expectTT - ) { + private void checkPeriodicFlushParameters(ClusterConnection conn, MyAsyncProcess ap, long setTO, + long expectTO, long setTT, long expectTT) { BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); // The BufferedMutatorParams does nothing with the value @@ -1151,7 +1132,7 @@ public class TestAsyncProcess { // The BufferedMutatorImpl corrects illegal values (direct via setter) BufferedMutatorImpl ht2 = - new BufferedMutatorImpl(conn, createBufferedMutatorParams(ap, DUMMY_TABLE), ap); + new BufferedMutatorImpl(conn, createBufferedMutatorParams(ap, DUMMY_TABLE), ap); ht2.setWriteBufferPeriodicFlush(setTO, setTT); Assert.assertEquals(expectTO, ht2.getWriteBufferPeriodicFlushTimeoutMs()); Assert.assertEquals(expectTT, ht2.getWriteBufferPeriodicFlushTimerTickMs()); @@ -1164,9 +1145,9 @@ public class TestAsyncProcess { MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); - bufferParam.setWriteBufferPeriodicFlushTimeoutMs(1); // Flush ASAP + bufferParam.setWriteBufferPeriodicFlushTimeoutMs(1); // Flush ASAP bufferParam.setWriteBufferPeriodicFlushTimerTickMs(1); // Check every 100ms - bufferParam.writeBufferSize(10000); // Write buffer set to much larger than the single record + bufferParam.writeBufferSize(10000); // Write buffer set to much larger than the single record BufferedMutatorImpl ht = new BufferedMutatorImpl(conn, bufferParam, ap); @@ -1174,7 +1155,7 @@ public class TestAsyncProcess { Assert.assertEquals(10000, ht.getWriteBufferSize()); Assert.assertEquals(1, ht.getWriteBufferPeriodicFlushTimeoutMs()); Assert.assertEquals(BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS, - ht.getWriteBufferPeriodicFlushTimerTickMs()); + ht.getWriteBufferPeriodicFlushTimerTickMs()); Put put = createPut(1, true); @@ -1224,7 +1205,6 @@ public class TestAsyncProcess { Assert.assertEquals(0, ht.getCurrentWriteBufferSize()); } - @Test public void testBufferedMutatorImplWithSharedPool() throws Exception { ClusterConnection conn = createHConnection(); @@ -1240,8 +1220,8 @@ public class TestAsyncProcess { public void testFailedPutAndNewPut() throws Exception { ClusterConnection conn = createHConnection(); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); - BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE) - .writeBufferSize(0); + BufferedMutatorParams bufferParam = + createBufferedMutatorParams(ap, DUMMY_TABLE).writeBufferSize(0); BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap); Put p = createPut(1, false); @@ -1311,6 +1291,7 @@ public class TestAsyncProcess { Assert.assertEquals(success, res[5]); Assert.assertEquals(failure, res[6]); } + @Test public void testErrorsServers() throws IOException { Configuration configuration = new Configuration(CONF); @@ -1399,12 +1380,11 @@ public class TestAsyncProcess { Assert.assertEquals(NB_RETRIES + 1, ap.callsCt.get()); } - @Test public void testCallQueueTooLarge() throws IOException { ClusterConnection conn = new MyConnectionImpl(CONF); AsyncProcessWithFailure ap = - new AsyncProcessWithFailure(conn, CONF, new CallQueueTooBigException()); + new AsyncProcessWithFailure(conn, CONF, new CallQueueTooBigException()); BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap); Assert.assertNotNull(ap.createServerErrorTracker()); @@ -1421,9 +1401,10 @@ public class TestAsyncProcess { // Checking that the ErrorsServers came into play and didn't make us stop immediately Assert.assertEquals(NB_RETRIES + 1, ap.callsCt.get()); } + /** - * This test simulates multiple regions on 2 servers. We should have 2 multi requests and - * 2 threads: 1 per server, this whatever the number of regions. + * This test simulates multiple regions on 2 servers. We should have 2 multi requests and 2 + * threads: 1 per server, this whatever the number of regions. */ @Test public void testThreadCreation() throws Exception { @@ -1431,8 +1412,8 @@ public class TestAsyncProcess { List hrls = new ArrayList<>(NB_REGS); List gets = new ArrayList<>(NB_REGS); for (int i = 0; i < NB_REGS; i++) { - HRegionInfo hri = new HRegionInfo( - DUMMY_TABLE, Bytes.toBytes(i * 10L), Bytes.toBytes(i * 10L + 9L), false, i); + HRegionInfo hri = + new HRegionInfo(DUMMY_TABLE, Bytes.toBytes(i * 10L), Bytes.toBytes(i * 10L + 9L), false, i); HRegionLocation hrl = new HRegionLocation(hri, i % 2 == 0 ? sn : sn2); hrls.add(hrl); @@ -1451,7 +1432,7 @@ public class TestAsyncProcess { Assert.assertEquals("1 thread per server", 2, con.nbThreads.get()); int nbReg = 0; - for (int i =0; i rows) throws InterruptedIOException { - return submit(AsyncProcessTask.newBuilder() - .setPool(service) - .setTableName(tableName) - .setRowAccess(rows) - .setSubmittedRows(AsyncProcessTask.SubmittedRows.NORMAL) - .setNeedResults(true) - .setRpcTimeout(HConstants.DEFAULT_HBASE_RPC_TIMEOUT) - .setOperationTimeout(HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT) - .build()); + return submit(AsyncProcessTask.newBuilder().setPool(service).setTableName(tableName) + .setRowAccess(rows).setSubmittedRows(AsyncProcessTask.SubmittedRows.NORMAL) + .setNeedResults(true).setRpcTimeout(HConstants.DEFAULT_HBASE_RPC_TIMEOUT) + .setOperationTimeout(HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT).build()); } @Override - protected RpcRetryingCaller createCaller( - CancellableRegionServerCallable callable, int rpcTimeout) { + protected RpcRetryingCaller + createCaller(CancellableRegionServerCallable callable, int rpcTimeout) { MultiServerCallable callable1 = (MultiServerCallable) callable; MultiResponse mr = new MultiResponse(); callable1.getMulti().actions.forEach((regionName, actions) -> { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java index 47e09f8113f..bc9e971488d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java @@ -30,6 +30,7 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.hasItem; + import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.testing.junit4.OpenTelemetryRule; @@ -60,6 +61,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @Category({ ClientTests.class, MediumTests.class }) @@ -110,18 +112,14 @@ public class TestAsyncRegionLocatorTracing { private SpanData waitSpan(Matcher matcher) { Matcher spanLocator = allOf(matcher, hasEnded()); try { - Waiter.waitFor(CONF, 1000, new MatcherPredicate<>( - "waiting for span", + Waiter.waitFor(CONF, 1000, new MatcherPredicate<>("waiting for span", () -> traceRule.getSpans(), hasItem(spanLocator))); } catch (AssertionError e) { LOG.error("AssertionError while waiting for matching span. Span reservoir contains: {}", traceRule.getSpans()); throw e; } - return traceRule.getSpans() - .stream() - .filter(spanLocator::matches) - .findFirst() + return traceRule.getSpans().stream().filter(spanLocator::matches).findFirst() .orElseThrow(AssertionError::new); } @@ -129,9 +127,7 @@ public class TestAsyncRegionLocatorTracing { public void testClearCache() { conn.getLocator().clearCache(); SpanData span = waitSpan("AsyncRegionLocator.clearCache"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), + assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), buildConnectionAttributesMatcher(conn))); } @@ -140,22 +136,20 @@ public class TestAsyncRegionLocatorTracing { ServerName sn = ServerName.valueOf("127.0.0.1", 12345, System.currentTimeMillis()); conn.getLocator().clearCache(sn); SpanData span = waitSpan("AsyncRegionLocator.clearCache"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), - buildConnectionAttributesMatcher(conn), - hasAttributes(containsEntry("db.hbase.server.name", sn.getServerName())))); + assertThat(span, + allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), + buildConnectionAttributesMatcher(conn), + hasAttributes(containsEntry("db.hbase.server.name", sn.getServerName())))); } @Test public void testClearCacheTableName() { conn.getLocator().clearCache(TableName.META_TABLE_NAME); SpanData span = waitSpan("AsyncRegionLocator.clearCache"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME))); + assertThat(span, + allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), + buildConnectionAttributesMatcher(conn), + buildTableAttributesMatcher(TableName.META_TABLE_NAME))); } @Test @@ -163,13 +157,11 @@ public class TestAsyncRegionLocatorTracing { conn.getLocator().getRegionLocation(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, RegionLocateType.CURRENT, TimeUnit.SECONDS.toNanos(1)).join(); SpanData span = waitSpan("AsyncRegionLocator.getRegionLocation"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), - hasAttributes( - containsEntryWithStringValuesOf("db.hbase.regions", + assertThat(span, + allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), + buildConnectionAttributesMatcher(conn), + buildTableAttributesMatcher(TableName.META_TABLE_NAME), + hasAttributes(containsEntryWithStringValuesOf("db.hbase.regions", locs.getDefaultRegionLocation().getRegion().getRegionNameAsString())))); } @@ -178,16 +170,12 @@ public class TestAsyncRegionLocatorTracing { conn.getLocator().getRegionLocations(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, RegionLocateType.CURRENT, false, TimeUnit.SECONDS.toNanos(1)).join(); SpanData span = waitSpan("AsyncRegionLocator.getRegionLocations"); - String[] expectedRegions = Arrays.stream(locs.getRegionLocations()) - .map(HRegionLocation::getRegion) - .map(RegionInfo::getRegionNameAsString) - .toArray(String[]::new); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), + String[] expectedRegions = + Arrays.stream(locs.getRegionLocations()).map(HRegionLocation::getRegion) + .map(RegionInfo::getRegionNameAsString).toArray(String[]::new); + assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), - hasAttributes( + buildTableAttributesMatcher(TableName.META_TABLE_NAME), hasAttributes( containsEntryWithStringValuesOf("db.hbase.regions", containsInAnyOrder(expectedRegions))))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java index 1e6a4b345e7..7d6ef3f0c4e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell.Type; @@ -68,7 +67,9 @@ import org.junit.rules.TestName; import org.mockito.ArgumentMatcher; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; + import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; @@ -84,7 +85,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrEx import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse; - /** * Confirm that we will set the priority in {@link HBaseRpcController} for several table operations. */ @@ -171,7 +171,7 @@ public class TestAsyncTableRpcPriority { @Override public CompletableFuture answer(InvocationOnMock invocation) - throws Throwable { + throws Throwable { TableName tableName = invocation.getArgument(0); RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); ServerName serverName = ServerName.valueOf("rs", 16010, 12345); @@ -486,26 +486,24 @@ public class TestAsyncTableRpcPriority { ScanRequest req = invocation.getArgument(1); RpcCallback done = invocation.getArgument(2); if (!req.hasScannerId()) { - done.run( - ScanResponse.newBuilder().setScannerId(scannerId).setTtl(800) - .setMoreResultsInRegion(true).setMoreResults(true).build()); + done.run(ScanResponse.newBuilder().setScannerId(scannerId).setTtl(800) + .setMoreResultsInRegion(true).setMoreResults(true).build()); } else { if (req.hasRenew() && req.getRenew()) { future.complete(null); } assertFalse("close scanner should not come in with scan priority " + scanPriority, - req.hasCloseScanner() && req.getCloseScanner()); + req.hasCloseScanner() && req.getCloseScanner()); - Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setType(Type.Put).setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) - .setFamily(Bytes.toBytes("cf")).setQualifier(Bytes.toBytes("cq")) - .setValue(Bytes.toBytes("v")).build(); + Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Type.Put) + .setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) + .setFamily(Bytes.toBytes("cf")).setQualifier(Bytes.toBytes("cq")) + .setValue(Bytes.toBytes("v")).build(); Result result = Result.create(Arrays.asList(cell)); - done.run( - ScanResponse.newBuilder().setScannerId(scannerId).setTtl(800) - .setMoreResultsInRegion(true).setMoreResults(true) - .addResults(ProtobufUtil.toResult(result)).build()); + done.run(ScanResponse.newBuilder().setScannerId(scannerId).setTtl(800) + .setMoreResultsInRegion(true).setMoreResults(true) + .addResults(ProtobufUtil.toResult(result)).build()); } }); return null; @@ -521,10 +519,9 @@ public class TestAsyncTableRpcPriority { ScanRequest req = invocation.getArgument(1); RpcCallback done = invocation.getArgument(2); assertTrue("close request should have scannerId", req.hasScannerId()); - assertEquals("close request's scannerId should match", scannerId, - req.getScannerId()); + assertEquals("close request's scannerId should match", scannerId, req.getScannerId()); assertTrue("close request should have closerScanner set", - req.hasCloseScanner() && req.getCloseScanner()); + req.hasCloseScanner() && req.getCloseScanner()); done.run(ScanResponse.getDefaultInstance()); }); @@ -549,8 +546,8 @@ public class TestAsyncTableRpcPriority { @Test public void testScanSystemTable() throws Exception { CompletableFuture renewFuture = mockScanReturnRenewFuture(SYSTEMTABLE_QOS); - testForTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName()), - renewFuture, Optional.empty()); + testForTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName()), renewFuture, + Optional.empty()); } @Test @@ -560,7 +557,7 @@ public class TestAsyncTableRpcPriority { } private void testForTable(TableName tableName, CompletableFuture renewFuture, - Optional priority) throws Exception { + Optional priority) throws Exception { Scan scan = new Scan().setCaching(1).setMaxResultSize(1); priority.ifPresent(scan::setPriority); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java index 31d154f03a4..a5ea94e7cae 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java @@ -104,7 +104,7 @@ public class TestAsyncTableTracing { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableTracing.class); + HBaseClassTestRule.forClass(TestAsyncTableTracing.class); private static Configuration CONF = HBaseConfiguration.create(); @@ -129,18 +129,18 @@ public class TestAsyncTableTracing { RpcCallback done = invocation.getArgument(2); if (!req.hasScannerId()) { done.run(ScanResponse.newBuilder().setScannerId(1).setTtl(800) - .setMoreResultsInRegion(true).setMoreResults(true).build()); + .setMoreResultsInRegion(true).setMoreResults(true).build()); } else { if (req.hasCloseScanner() && req.getCloseScanner()) { done.run(ScanResponse.getDefaultInstance()); } else { Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setType(Cell.Type.Put).setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) - .setFamily(Bytes.toBytes("cf")).setQualifier(Bytes.toBytes("cq")) - .setValue(Bytes.toBytes("v")).build(); + .setType(Cell.Type.Put).setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) + .setFamily(Bytes.toBytes("cf")).setQualifier(Bytes.toBytes("cq")) + .setValue(Bytes.toBytes("v")).build(); Result result = Result.create(Arrays.asList(cell)); ScanResponse.Builder builder = ScanResponse.newBuilder().setScannerId(1).setTtl(800) - .addResults(ProtobufUtil.toResult(result)); + .addResults(ProtobufUtil.toResult(result)); if (req.getLimitOfRows() == 1) { builder.setMoreResultsInRegion(false).setMoreResults(false); } else { @@ -183,12 +183,12 @@ public class TestAsyncTableTracing { ColumnValue value = req.getColumnValue(0); QualifierValue qvalue = value.getQualifierValue(0); Cell cell = - CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) - .setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray()) - .setQualifier(qvalue.getQualifier().toByteArray()) - .setValue(qvalue.getValue().toByteArray()).build(); + CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) + .setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray()) + .setQualifier(qvalue.getQualifier().toByteArray()) + .setValue(qvalue.getValue().toByteArray()).build(); resp = MutateResponse.newBuilder() - .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell)))).build(); + .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell)))).build(); break; default: resp = MutateResponse.getDefaultInstance(); @@ -215,18 +215,18 @@ public class TestAsyncTableTracing { AsyncRegionLocator getLocator() { AsyncRegionLocator locator = mock(AsyncRegionLocator.class); Answer> answer = - new Answer>() { + new Answer>() { - @Override - public CompletableFuture answer(InvocationOnMock invocation) - throws Throwable { - TableName tableName = invocation.getArgument(0); - RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); - ServerName serverName = ServerName.valueOf("rs", 16010, 12345); - HRegionLocation loc = new HRegionLocation(info, serverName); - return CompletableFuture.completedFuture(loc); - } - }; + @Override + public CompletableFuture answer(InvocationOnMock invocation) + throws Throwable { + TableName tableName = invocation.getArgument(0); + RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); + ServerName serverName = ServerName.valueOf("rs", 16010, 12345); + HRegionLocation loc = new HRegionLocation(info, serverName); + return CompletableFuture.completedFuture(loc); + } + }; doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), any(RegionLocateType.class), anyLong()); doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), @@ -256,13 +256,13 @@ public class TestAsyncTableTracing { // TestHTableTracing final TableName tableName = table.getName(); final Matcher spanLocator = - allOf(hasName(containsString(tableOperation)), hasEnded()); + allOf(hasName(containsString(tableOperation)), hasEnded()); final String expectedName = tableOperation + " " + tableName.getNameWithNamespaceInclAsString(); Waiter.waitFor(CONF, 1000, new MatcherPredicate<>("waiting for span to emit", - () -> traceRule.getSpans(), hasItem(spanLocator))); + () -> traceRule.getSpans(), hasItem(spanLocator))); List candidateSpans = - traceRule.getSpans().stream().filter(spanLocator::matches).collect(Collectors.toList()); + traceRule.getSpans().stream().filter(spanLocator::matches).collect(Collectors.toList()); assertThat(candidateSpans, hasSize(1)); SpanData data = candidateSpans.iterator().next(); assertThat(data, @@ -305,16 +305,16 @@ public class TestAsyncTableTracing { @Test public void testIncrement() { table - .increment( - new Increment(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1)) - .join(); + .increment( + new Increment(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1)) + .join(); assertTrace("INCREMENT"); } @Test public void testIncrementColumnValue1() { table.incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1) - .join(); + .join(); assertTrace("INCREMENT"); } @@ -328,18 +328,18 @@ public class TestAsyncTableTracing { @Test public void testCheckAndMutate() { table.checkAndMutate(CheckAndMutate.newBuilder(Bytes.toBytes(0)) - .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) - .build(new Delete(Bytes.toBytes(0)))).join(); + .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) + .build(new Delete(Bytes.toBytes(0)))).join(); assertTrace("CHECK_AND_MUTATE"); } @Test public void testCheckAndMutateList() { CompletableFuture - .allOf(table.checkAndMutate(Arrays.asList(CheckAndMutate.newBuilder(Bytes.toBytes(0)) - .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) - .build(new Delete(Bytes.toBytes(0))))).toArray(new CompletableFuture[0])) - .join(); + .allOf(table.checkAndMutate(Arrays.asList(CheckAndMutate.newBuilder(Bytes.toBytes(0)) + .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) + .build(new Delete(Bytes.toBytes(0))))).toArray(new CompletableFuture[0])) + .join(); assertTrace("BATCH", hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "CHECK_AND_MUTATE", "DELETE"))); @@ -348,8 +348,8 @@ public class TestAsyncTableTracing { @Test public void testCheckAndMutateAll() { table.checkAndMutateAll(Arrays.asList(CheckAndMutate.newBuilder(Bytes.toBytes(0)) - .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) - .build(new Delete(Bytes.toBytes(0))))).join(); + .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) + .build(new Delete(Bytes.toBytes(0))))).join(); assertTrace("BATCH", hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "CHECK_AND_MUTATE", "DELETE"))); @@ -357,8 +357,8 @@ public class TestAsyncTableTracing { private void testCheckAndMutateBuilder(Row op) { AsyncTable.CheckAndMutateBuilder builder = - table.checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifEquals(Bytes.toBytes("v")); + table.checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifEquals(Bytes.toBytes("v")); if (op instanceof Put) { Put put = (Put) op; builder.thenPut(put).join(); @@ -388,18 +388,17 @@ public class TestAsyncTableTracing { @Test public void testCheckAndMutateBuilderThenMutations() throws IOException { - RowMutations mutations = - new RowMutations(Bytes.toBytes(0)) - .add((Mutation) (new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"), - Bytes.toBytes("cq"), Bytes.toBytes("v")))) - .add((Mutation) new Delete(Bytes.toBytes(0))); + RowMutations mutations = new RowMutations(Bytes.toBytes(0)) + .add((Mutation) (new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), + Bytes.toBytes("v")))) + .add((Mutation) new Delete(Bytes.toBytes(0))); testCheckAndMutateBuilder(mutations); } private void testCheckAndMutateWithFilterBuilder(Row op) { // use of `PrefixFilter` is completely arbitrary here. AsyncTable.CheckAndMutateWithFilterBuilder builder = - table.checkAndMutate(Bytes.toBytes(0), new PrefixFilter(Bytes.toBytes(0))); + table.checkAndMutate(Bytes.toBytes(0), new PrefixFilter(Bytes.toBytes(0))); if (op instanceof Put) { Put put = (Put) op; builder.thenPut(put).join(); @@ -429,21 +428,19 @@ public class TestAsyncTableTracing { @Test public void testCheckAndMutateWithFilterBuilderThenMutations() throws IOException { - RowMutations mutations = - new RowMutations(Bytes.toBytes(0)) - .add((Mutation) new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"), - Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .add((Mutation) new Delete(Bytes.toBytes(0))); + RowMutations mutations = new RowMutations(Bytes.toBytes(0)) + .add((Mutation) new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), + Bytes.toBytes("v"))) + .add((Mutation) new Delete(Bytes.toBytes(0))); testCheckAndMutateWithFilterBuilder(mutations); } @Test public void testMutateRow() throws IOException { - final RowMutations mutations = - new RowMutations(Bytes.toBytes(0)) - .add((Mutation) new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), - Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .add((Mutation) new Delete(Bytes.toBytes(0))); + final RowMutations mutations = new RowMutations(Bytes.toBytes(0)) + .add((Mutation) new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), + Bytes.toBytes("v"))) + .add((Mutation) new Delete(Bytes.toBytes(0))); table.mutateRow(mutations).join(); assertTrace("BATCH", hasAttributes( containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE", "PUT"))); @@ -508,9 +505,9 @@ public class TestAsyncTableTracing { @Test public void testExistsList() { CompletableFuture - .allOf( - table.exists(Arrays.asList(new Get(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) - .join(); + .allOf( + table.exists(Arrays.asList(new Get(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) + .join(); assertTrace("BATCH", hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @@ -525,9 +522,8 @@ public class TestAsyncTableTracing { @Test public void testGetList() { CompletableFuture - .allOf( - table.get(Arrays.asList(new Get(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) - .join(); + .allOf(table.get(Arrays.asList(new Get(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) + .join(); assertTrace("BATCH", hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @@ -542,9 +538,9 @@ public class TestAsyncTableTracing { @Test public void testPutList() { CompletableFuture - .allOf(table.put(Arrays.asList(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), - Bytes.toBytes("cq"), Bytes.toBytes("v")))).toArray(new CompletableFuture[0])) - .join(); + .allOf(table.put(Arrays.asList(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), + Bytes.toBytes("cq"), Bytes.toBytes("v")))).toArray(new CompletableFuture[0])) + .join(); assertTrace("BATCH", hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT"))); } @@ -559,9 +555,10 @@ public class TestAsyncTableTracing { @Test public void testDeleteList() { - CompletableFuture.allOf( - table.delete(Arrays.asList(new Delete(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) - .join(); + CompletableFuture + .allOf( + table.delete(Arrays.asList(new Delete(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) + .join(); assertTrace("BATCH", hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } @@ -575,9 +572,10 @@ public class TestAsyncTableTracing { @Test public void testBatch() { - CompletableFuture.allOf( - table.batch(Arrays.asList(new Delete(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) - .join(); + CompletableFuture + .allOf( + table.batch(Arrays.asList(new Delete(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) + .join(); assertTrace("BATCH", hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java index 15d5104730a..efb993561a5 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,13 +27,14 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestAttributes { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAttributes.class); + HBaseClassTestRule.forClass(TestAttributes.class); + + private static final byte[] ROW = new byte[] { 'r' }; - private static final byte [] ROW = new byte [] {'r'}; @Test public void testPutAttributes() { Put put = new Put(ROW); @@ -48,22 +49,22 @@ public class TestAttributes { put.setAttribute("attribute1", Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), put.getAttribute("attribute1"))); Assert.assertEquals(1, put.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), - put.getAttributesMap().get("attribute1"))); + Assert + .assertTrue(Arrays.equals(Bytes.toBytes("value1"), put.getAttributesMap().get("attribute1"))); // overriding attribute value put.setAttribute("attribute1", Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), put.getAttribute("attribute1"))); Assert.assertEquals(1, put.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), - put.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value12"), put.getAttributesMap().get("attribute1"))); // adding another attribute put.setAttribute("attribute2", Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), put.getAttribute("attribute2"))); Assert.assertEquals(2, put.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), - put.getAttributesMap().get("attribute2"))); + Assert + .assertTrue(Arrays.equals(Bytes.toBytes("value2"), put.getAttributesMap().get("attribute2"))); // removing attribute put.setAttribute("attribute2", null); @@ -86,7 +87,7 @@ public class TestAttributes { @Test public void testDeleteAttributes() { - Delete del = new Delete(new byte [] {'r'}); + Delete del = new Delete(new byte[] { 'r' }); Assert.assertTrue(del.getAttributesMap().isEmpty()); Assert.assertNull(del.getAttribute("absent")); @@ -98,22 +99,22 @@ public class TestAttributes { del.setAttribute("attribute1", Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), del.getAttribute("attribute1"))); Assert.assertEquals(1, del.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), - del.getAttributesMap().get("attribute1"))); + Assert + .assertTrue(Arrays.equals(Bytes.toBytes("value1"), del.getAttributesMap().get("attribute1"))); // overriding attribute value del.setAttribute("attribute1", Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), del.getAttribute("attribute1"))); Assert.assertEquals(1, del.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), - del.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value12"), del.getAttributesMap().get("attribute1"))); // adding another attribute del.setAttribute("attribute2", Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), del.getAttribute("attribute2"))); Assert.assertEquals(2, del.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), - del.getAttributesMap().get("attribute2"))); + Assert + .assertTrue(Arrays.equals(Bytes.toBytes("value2"), del.getAttributesMap().get("attribute2"))); // removing attribute del.setAttribute("attribute2", null); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java index c9993ee2dd1..2d83c29bae9 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,33 +34,33 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({SmallTests.class, ClientTests.class}) +@Category({ SmallTests.class, ClientTests.class }) public class TestBufferedMutator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBufferedMutator.class); + HBaseClassTestRule.forClass(TestBufferedMutator.class); @Rule public TestName name = new TestName(); /** - * My BufferedMutator. - * Just to prove that I can insert a BM other than default. + * My BufferedMutator. Just to prove that I can insert a BM other than default. */ public static class MyBufferedMutator extends BufferedMutatorImpl { MyBufferedMutator(ClusterConnection conn, RpcRetryingCallerFactory rpcCallerFactory, - RpcControllerFactory rpcFactory, BufferedMutatorParams params) { + RpcControllerFactory rpcFactory, BufferedMutatorParams params) { super(conn, rpcCallerFactory, rpcFactory, params); } } @Test public void testAlternateBufferedMutatorImpl() throws IOException { - BufferedMutatorParams params = new BufferedMutatorParams(TableName.valueOf(name.getMethodName())); + BufferedMutatorParams params = + new BufferedMutatorParams(TableName.valueOf(name.getMethodName())); Configuration conf = HBaseConfiguration.create(); conf.set(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, - DoNothingConnectionRegistry.class.getName()); + DoNothingConnectionRegistry.class.getName()); try (Connection connection = ConnectionFactory.createConnection(conf)) { BufferedMutator bm = connection.getBufferedMutator(params); // Assert we get default BM if nothing specified. diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.java index 73953d0db75..ba23d105393 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ public class TestBufferedMutatorParams { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBufferedMutatorParams.class); + HBaseClassTestRule.forClass(TestBufferedMutatorParams.class); @Rule public TestName name = new TestName(); @@ -98,28 +98,26 @@ public class TestBufferedMutatorParams { } @Override - public List> invokeAll( - Collection> tasks) throws InterruptedException { + public List> invokeAll(Collection> tasks) + throws InterruptedException { return null; } @Override - public List> invokeAll( - Collection> tasks, long timeout, TimeUnit unit) - throws InterruptedException { + public List> invokeAll(Collection> tasks, long timeout, + TimeUnit unit) throws InterruptedException { return null; } @Override public T invokeAny(Collection> tasks) - throws InterruptedException, ExecutionException { + throws InterruptedException, ExecutionException { return null; } @Override - public T invokeAny(Collection> tasks, - long timeout, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { + public T invokeAny(Collection> tasks, long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { return null; } } @@ -129,8 +127,8 @@ public class TestBufferedMutatorParams { */ private static class MockExceptionListener implements BufferedMutator.ExceptionListener { @Override - public void onException(RetriesExhaustedWithDetailsException exception, - BufferedMutator mutator) throws RetriesExhaustedWithDetailsException { + public void onException(RetriesExhaustedWithDetailsException exception, BufferedMutator mutator) + throws RetriesExhaustedWithDetailsException { } } @@ -141,13 +139,9 @@ public class TestBufferedMutatorParams { BufferedMutatorParams bmp = new BufferedMutatorParams(TableName.valueOf(tableName)); BufferedMutator.ExceptionListener listener = new MockExceptionListener(); - bmp - .writeBufferSize(17) - .setWriteBufferPeriodicFlushTimeoutMs(123) - .setWriteBufferPeriodicFlushTimerTickMs(456) - .maxKeyValueSize(13) - .pool(pool) - .listener(listener); + bmp.writeBufferSize(17).setWriteBufferPeriodicFlushTimeoutMs(123) + .setWriteBufferPeriodicFlushTimerTickMs(456).maxKeyValueSize(13).pool(pool) + .listener(listener); bmp.implementationClassName("someClassName"); BufferedMutatorParams clone = bmp.clone(); @@ -172,19 +166,17 @@ public class TestBufferedMutatorParams { /** * Confirm all fields are equal. - * @param some some instance + * @param some some instance * @param clone a clone of that instance, but not the same instance. */ - private void cloneTest(BufferedMutatorParams some, - BufferedMutatorParams clone) { + private void cloneTest(BufferedMutatorParams some, BufferedMutatorParams clone) { assertFalse(some == clone); - assertEquals(some.getTableName().toString(), - clone.getTableName().toString()); + assertEquals(some.getTableName().toString(), clone.getTableName().toString()); assertEquals(some.getWriteBufferSize(), clone.getWriteBufferSize()); assertEquals(some.getWriteBufferPeriodicFlushTimeoutMs(), - clone.getWriteBufferPeriodicFlushTimeoutMs()); + clone.getWriteBufferPeriodicFlushTimeoutMs()); assertEquals(some.getWriteBufferPeriodicFlushTimerTickMs(), - clone.getWriteBufferPeriodicFlushTimerTickMs()); + clone.getWriteBufferPeriodicFlushTimerTickMs()); assertEquals(some.getMaxKeyValueSize(), clone.getMaxKeyValueSize()); assertTrue(some.getListener() == clone.getListener()); assertTrue(some.getPool() == clone.getPool()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java index 0df04b8043f..6ae602aaadb 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,11 +36,11 @@ import org.mockito.Mockito; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestClientExponentialBackoff { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClientExponentialBackoff.class); + HBaseClassTestRule.forClass(TestClientExponentialBackoff.class); ServerName server = Mockito.mock(ServerName.class); byte[] regionname = Bytes.toBytes("region"); @@ -67,8 +67,8 @@ public class TestClientExponentialBackoff { ServerStatistics stats = new ServerStatistics(); update(stats, 100); - assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, backoff.getBackoffTime(server, - regionname, stats)); + assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, + backoff.getBackoffTime(server, regionname, stats)); // another policy with a different max timeout long max = 100; @@ -78,20 +78,20 @@ public class TestClientExponentialBackoff { // test beyond 100 still doesn't exceed the max update(stats, 101); - assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, backoff.getBackoffTime(server, - regionname, stats)); + assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, + backoff.getBackoffTime(server, regionname, stats)); assertEquals(max, backoffShortTimeout.getBackoffTime(server, regionname, stats)); // and that when we are below 100, its less than the max timeout update(stats, 99); - assertTrue(backoff.getBackoffTime(server, - regionname, stats) < ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF); + assertTrue(backoff.getBackoffTime(server, regionname, stats) + < ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF); assertTrue(backoffShortTimeout.getBackoffTime(server, regionname, stats) < max); } /** - * Make sure that we get results in the order that we expect - backoff for a load of 1 should - * less than backoff for 10, which should be less than that for 50. + * Make sure that we get results in the order that we expect - backoff for a load of 1 should less + * than backoff for 10, which should be less than that for 50. */ @Test public void testResultOrdering() { @@ -105,9 +105,8 @@ public class TestClientExponentialBackoff { for (int i = 1; i <= 100; i++) { update(stats, i); long next = backoff.getBackoffTime(server, regionname, stats); - assertTrue( - "Previous backoff time" + previous + " >= " + next + ", the next backoff time for " + - "load " + i, previous < next); + assertTrue("Previous backoff time" + previous + " >= " + next + ", the next backoff time for " + + "load " + i, previous < next); previous = next; } } @@ -151,8 +150,7 @@ public class TestClientExponentialBackoff { long previous = backoffTime; update(stats, 0, 0, 50); backoffTime = backoff.getBackoffTime(server, regionname, stats); - assertTrue("Compaction pressure should be bigger", - backoffTime > previous); + assertTrue("Compaction pressure should be bigger", backoffTime > previous); update(stats, 0, 0, 100); backoffTime = backoff.getBackoffTime(server, regionname, stats); @@ -161,18 +159,16 @@ public class TestClientExponentialBackoff { } private void update(ServerStatistics stats, int load) { - ClientProtos.RegionLoadStats stat = ClientProtos.RegionLoadStats.newBuilder() - .setMemStoreLoad(load).build(); + ClientProtos.RegionLoadStats stat = + ClientProtos.RegionLoadStats.newBuilder().setMemStoreLoad(load).build(); stats.update(regionname, ProtobufUtil.createRegionLoadStats(stat)); } private void update(ServerStatistics stats, int memstoreLoad, int heapOccupancy, - int compactionPressure) { - ClientProtos.RegionLoadStats stat = ClientProtos.RegionLoadStats.newBuilder() - .setMemStoreLoad(memstoreLoad) - .setHeapOccupancy(heapOccupancy) - .setCompactionPressure(compactionPressure) - .build(); + int compactionPressure) { + ClientProtos.RegionLoadStats stat = + ClientProtos.RegionLoadStats.newBuilder().setMemStoreLoad(memstoreLoad) + .setHeapOccupancy(heapOccupancy).setCompactionPressure(compactionPressure).build(); stats.update(regionname, ProtobufUtil.createRegionLoadStats(stat)); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index 6c987322cf1..e4ce5ca4a3a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -104,32 +104,31 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRespon import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; /** - * Test client behavior w/o setting up a cluster. - * Mock up cluster emissions. - * See below for a method that tests retries/timeouts currently commented out. + * Test client behavior w/o setting up a cluster. Mock up cluster emissions. See below for a method + * that tests retries/timeouts currently commented out. */ -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestClientNoCluster extends Configured implements Tool { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClientNoCluster.class); + HBaseClassTestRule.forClass(TestClientNoCluster.class); private static final Logger LOG = LoggerFactory.getLogger(TestClientNoCluster.class); private Configuration conf; /** - * A server that does not exist. I've changed the server in the below to 'localhost' so we - * have a servername that resolves -- otherwise, we just fail on server name lookup with - * UnknownHost... With localhost, was able to reproduce stack traces that looked like production - * stack traces. Was useful figuring out how retry/timeouts are functioning. + * A server that does not exist. I've changed the server in the below to 'localhost' so we have a + * servername that resolves -- otherwise, we just fail on server name lookup with UnknownHost... + * With localhost, was able to reproduce stack traces that looked like production stack traces. + * Was useful figuring out how retry/timeouts are functioning. */ public static final ServerName META_SERVERNAME = - ServerName.valueOf("meta.example.org", 16010, 12345); + ServerName.valueOf("meta.example.org", 16010, 12345); @Before public void setUp() throws Exception { this.conf = HBaseConfiguration.create(); - // Run my Connection overrides. Use my little ConnectionImplementation below which + // Run my Connection overrides. Use my little ConnectionImplementation below which // allows me insert mocks and also use my Registry below rather than the default zk based // one so tests run faster and don't have zk dependency. this.conf.set("hbase.client.registry.impl", SimpleRegistry.class.getName()); @@ -148,7 +147,7 @@ public class TestClientNoCluster extends Configured implements Tool { @Override public CompletableFuture getMetaRegionLocations() { return CompletableFuture.completedFuture(new RegionLocations( - new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, META_HOST))); + new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, META_HOST))); } @Override @@ -179,7 +178,7 @@ public class TestClientNoCluster extends Configured implements Tool { LOG.info("Got expected exception", e); t = e; } catch (RetriesExhaustedException e) { - // This is the old, unwanted behavior. If we get here FAIL!!! + // This is the old, unwanted behavior. If we get here FAIL!!! fail(); } finally { table.close(); @@ -195,7 +194,7 @@ public class TestClientNoCluster extends Configured implements Tool { // @Ignore @Test public void testAsyncTimeoutAndRetries() - throws IOException, ExecutionException, InterruptedException { + throws IOException, ExecutionException, InterruptedException { Configuration localConfig = HBaseConfiguration.create(this.conf); localConfig.set(ConnectionFactory.HBASE_CLIENT_ASYNC_CONNECTION_IMPL, RpcTimeoutAsyncConnection.class.getName()); @@ -218,8 +217,7 @@ public class TestClientNoCluster extends Configured implements Tool { } /** - * Test that operation timeout prevails over rpc default timeout and retries, etc. - * @throws IOException + * Test that operation timeout prevails over rpc default timeout and retries, etc. n */ @Test public void testRpcTimeout() throws IOException { @@ -229,9 +227,9 @@ public class TestClientNoCluster extends Configured implements Tool { int pause = 10; localConfig.setInt("hbase.client.pause", pause); localConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 10); - // Set the operation timeout to be < the pause. Expectation is that after first pause, we will + // Set the operation timeout to be < the pause. Expectation is that after first pause, we will // fail out of the rpc because the rpc timeout will have been set to the operation tiemout - // and it has expired. Otherwise, if this functionality is broke, all retries will be run -- + // and it has expired. Otherwise, if this functionality is broke, all retries will be run -- // all ten of them -- and we'll get the RetriesExhaustedException exception. localConfig.setInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, pause - 1); Connection connection = ConnectionFactory.createConnection(localConfig); @@ -245,7 +243,7 @@ public class TestClientNoCluster extends Configured implements Tool { LOG.info("Got expected exception", e); t = e; } catch (RetriesExhaustedException e) { - // This is the old, unwanted behavior. If we get here FAIL!!! + // This is the old, unwanted behavior. If we get here FAIL!!! fail(); } finally { table.close(); @@ -268,7 +266,7 @@ public class TestClientNoCluster extends Configured implements Tool { this.conf.set("hbase.client.connection.impl", RegionServerStoppedOnScannerOpenConnection.class.getName()); // Go against meta else we will try to find first region for the table on construction which - // means we'll have to do a bunch more mocking. Tests that go against meta only should be + // means we'll have to do a bunch more mocking. Tests that go against meta only should be // good for a bit of testing. Connection connection = ConnectionFactory.createConnection(this.conf); Table table = connection.getTable(TableName.META_TABLE_NAME); @@ -290,7 +288,7 @@ public class TestClientNoCluster extends Configured implements Tool { this.conf.set("hbase.client.connection.impl", RegionServerStoppedOnScannerOpenConnection.class.getName()); // Go against meta else we will try to find first region for the table on construction which - // means we'll have to do a bunch more mocking. Tests that go against meta only should be + // means we'll have to do a bunch more mocking. Tests that go against meta only should be // good for a bit of testing. Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(TableName.META_TABLE_NAME); @@ -324,7 +322,7 @@ public class TestClientNoCluster extends Configured implements Tool { } catch (Exception e) { if (!(e instanceof DoNotRetryIOException)) { String errMsg = - "Should have thrown DoNotRetryException but actually " + e.getClass().getSimpleName(); + "Should have thrown DoNotRetryException but actually " + e.getClass().getSimpleName(); LOG.error(errMsg, e); fail(errMsg); } @@ -336,24 +334,23 @@ public class TestClientNoCluster extends Configured implements Tool { /** * Override to shutdown going to zookeeper for cluster id and meta location. */ - static class RegionServerStoppedOnScannerOpenConnection - extends ConnectionImplementation { + static class RegionServerStoppedOnScannerOpenConnection extends ConnectionImplementation { final ClientService.BlockingInterface stub; - RegionServerStoppedOnScannerOpenConnection(Configuration conf, - ExecutorService pool, User user) throws IOException { + RegionServerStoppedOnScannerOpenConnection(Configuration conf, ExecutorService pool, User user) + throws IOException { super(conf, pool, user); // Mock up my stub so open scanner returns a scanner id and then on next, we throw // exceptions for three times and then after that, we return no more to scan. this.stub = Mockito.mock(ClientService.BlockingInterface.class); long sid = 12345L; try { - Mockito.when(stub.scan((RpcController)Mockito.any(), - (ClientProtos.ScanRequest)Mockito.any())). - thenReturn(ClientProtos.ScanResponse.newBuilder().setScannerId(sid).build()). - thenThrow(new ServiceException(new RegionServerStoppedException("From Mockito"))). - thenReturn(ClientProtos.ScanResponse.newBuilder().setScannerId(sid). - setMoreResults(false).build()); + Mockito + .when(stub.scan((RpcController) Mockito.any(), (ClientProtos.ScanRequest) Mockito.any())) + .thenReturn(ClientProtos.ScanResponse.newBuilder().setScannerId(sid).build()) + .thenThrow(new ServiceException(new RegionServerStoppedException("From Mockito"))) + .thenReturn( + ClientProtos.ScanResponse.newBuilder().setScannerId(sid).setMoreResults(false).build()); } catch (ServiceException e) { throw new IOException(e); } @@ -371,15 +368,14 @@ public class TestClientNoCluster extends Configured implements Tool { static class RpcTimeoutConnection extends ConnectionImplementation { final ClientService.BlockingInterface stub; - RpcTimeoutConnection(Configuration conf, ExecutorService pool, User user) - throws IOException { + RpcTimeoutConnection(Configuration conf, ExecutorService pool, User user) throws IOException { super(conf, pool, user); // Mock up my stub so an exists call -- which turns into a get -- throws an exception this.stub = Mockito.mock(ClientService.BlockingInterface.class); try { - Mockito.when(stub.get((RpcController)Mockito.any(), - (ClientProtos.GetRequest)Mockito.any())). - thenThrow(new ServiceException(new java.net.ConnectException("Connection refused"))); + Mockito + .when(stub.get((RpcController) Mockito.any(), (ClientProtos.GetRequest) Mockito.any())) + .thenThrow(new ServiceException(new java.net.ConnectException("Connection refused"))); } catch (ServiceException e) { throw new IOException(e); } @@ -396,7 +392,7 @@ public class TestClientNoCluster extends Configured implements Tool { */ static class RpcTimeoutAsyncConnection extends AsyncConnectionImpl { RpcTimeoutAsyncConnection(Configuration configuration, ConnectionRegistry registry, - String clusterId, User user) { + String clusterId, User user) { super(configuration, registry, clusterId, user); } } @@ -404,29 +400,26 @@ public class TestClientNoCluster extends Configured implements Tool { /** * Fake many regionservers and many regions on a connection implementation. */ - static class ManyServersManyRegionsConnection - extends ConnectionImplementation { + static class ManyServersManyRegionsConnection extends ConnectionImplementation { // All access should be synchronized final Map serversByClient; /** * Map of faked-up rows of a 'meta table'. */ - final SortedMap> meta; + final SortedMap> meta; final AtomicLong sequenceids = new AtomicLong(0); private final Configuration conf; - ManyServersManyRegionsConnection(Configuration conf, - ExecutorService pool, User user) - throws IOException { + ManyServersManyRegionsConnection(Configuration conf, ExecutorService pool, User user) + throws IOException { super(conf, pool, user); int serverCount = conf.getInt("hbase.test.servers", 10); this.serversByClient = new HashMap<>(serverCount); - this.meta = makeMeta(Bytes.toBytes( - conf.get("hbase.test.tablename", Bytes.toString(BIG_USER_TABLE))), - conf.getInt("hbase.test.regions", 100), - conf.getLong("hbase.test.namespace.span", 1000), - serverCount); + this.meta = + makeMeta(Bytes.toBytes(conf.get("hbase.test.tablename", Bytes.toString(BIG_USER_TABLE))), + conf.getInt("hbase.test.regions", 100), conf.getLong("hbase.test.namespace.span", 1000), + serverCount); this.conf = conf; } @@ -445,18 +438,17 @@ public class TestClientNoCluster extends Configured implements Tool { } } - static MultiResponse doMultiResponse(final SortedMap> meta, - final AtomicLong sequenceids, final MultiRequest request) { - // Make a response to match the request. Act like there were no failures. + static MultiResponse doMultiResponse(final SortedMap> meta, + final AtomicLong sequenceids, final MultiRequest request) { + // Make a response to match the request. Act like there were no failures. ClientProtos.MultiResponse.Builder builder = ClientProtos.MultiResponse.newBuilder(); // Per Region. - RegionActionResult.Builder regionActionResultBuilder = - RegionActionResult.newBuilder(); + RegionActionResult.Builder regionActionResultBuilder = RegionActionResult.newBuilder(); ResultOrException.Builder roeBuilder = ResultOrException.newBuilder(); - for (RegionAction regionAction: request.getRegionActionList()) { + for (RegionAction regionAction : request.getRegionActionList()) { regionActionResultBuilder.clear(); // Per Action in a Region. - for (ClientProtos.Action action: regionAction.getActionList()) { + for (ClientProtos.Action action : regionAction.getActionList()) { roeBuilder.clear(); // Return empty Result and proper index as result. roeBuilder.setResult(ClientProtos.Result.getDefaultInstance()); @@ -469,33 +461,31 @@ public class TestClientNoCluster extends Configured implements Tool { } /** - * Fake 'server'. - * Implements the ClientService responding as though it were a 'server' (presumes a new - * ClientService.BlockingInterface made per server). + * Fake 'server'. Implements the ClientService responding as though it were a 'server' (presumes a + * new ClientService.BlockingInterface made per server). */ static class FakeServer implements ClientService.BlockingInterface { private AtomicInteger multiInvocationsCount = new AtomicInteger(0); - private final SortedMap> meta; + private final SortedMap> meta; private final AtomicLong sequenceids; private final long multiPause; private final int tooManyMultiRequests; - FakeServer(final Configuration c, final SortedMap> meta, - final AtomicLong sequenceids) { + FakeServer(final Configuration c, final SortedMap> meta, + final AtomicLong sequenceids) { this.meta = meta; this.sequenceids = sequenceids; - // Pause to simulate the server taking time applying the edits. This will drive up the + // Pause to simulate the server taking time applying the edits. This will drive up the // number of threads used over in client. this.multiPause = c.getLong("hbase.test.multi.pause.when.done", 0); this.tooManyMultiRequests = c.getInt("hbase.test.multi.too.many", 3); } @Override - public GetResponse get(RpcController controller, GetRequest request) - throws ServiceException { - boolean metaRegion = isMetaRegion(request.getRegion().getValue().toByteArray(), - request.getRegion().getType()); + public GetResponse get(RpcController controller, GetRequest request) throws ServiceException { + boolean metaRegion = + isMetaRegion(request.getRegion().getValue().toByteArray(), request.getRegion().getType()); if (!metaRegion) { return doGetResponse(request); } @@ -512,41 +502,39 @@ public class TestClientNoCluster extends Configured implements Tool { } @Override - public MutateResponse mutate(RpcController controller, - MutateRequest request) throws ServiceException { + public MutateResponse mutate(RpcController controller, MutateRequest request) + throws ServiceException { throw new NotImplementedException(HConstants.NOT_IMPLEMENTED); } @Override - public ScanResponse scan(RpcController controller, - ScanRequest request) throws ServiceException { + public ScanResponse scan(RpcController controller, ScanRequest request) + throws ServiceException { // Presume it is a scan of meta for now. Not all scans provide a region spec expecting - // the server to keep reference by scannerid. TODO. + // the server to keep reference by scannerid. TODO. return doMetaScanResponse(meta, sequenceids, request); } @Override - public BulkLoadHFileResponse bulkLoadHFile( - RpcController controller, BulkLoadHFileRequest request) - throws ServiceException { + public BulkLoadHFileResponse bulkLoadHFile(RpcController controller, + BulkLoadHFileRequest request) throws ServiceException { throw new NotImplementedException(HConstants.NOT_IMPLEMENTED); } @Override - public CoprocessorServiceResponse execService( - RpcController controller, CoprocessorServiceRequest request) - throws ServiceException { + public CoprocessorServiceResponse execService(RpcController controller, + CoprocessorServiceRequest request) throws ServiceException { throw new NotImplementedException(HConstants.NOT_IMPLEMENTED); } @Override public MultiResponse multi(RpcController controller, MultiRequest request) - throws ServiceException { + throws ServiceException { int concurrentInvocations = this.multiInvocationsCount.incrementAndGet(); try { if (concurrentInvocations >= tooManyMultiRequests) { - throw new ServiceException(new RegionTooBusyException("concurrentInvocations=" + - concurrentInvocations)); + throw new ServiceException( + new RegionTooBusyException("concurrentInvocations=" + concurrentInvocations)); } Threads.sleep(multiPause); return doMultiResponse(meta, sequenceids, request); @@ -557,32 +545,33 @@ public class TestClientNoCluster extends Configured implements Tool { @Override public CoprocessorServiceResponse execRegionServerService(RpcController controller, - CoprocessorServiceRequest request) throws ServiceException { + CoprocessorServiceRequest request) throws ServiceException { throw new NotImplementedException(HConstants.NOT_IMPLEMENTED); } @Override public PrepareBulkLoadResponse prepareBulkLoad(RpcController controller, - PrepareBulkLoadRequest request) throws ServiceException { + PrepareBulkLoadRequest request) throws ServiceException { throw new NotImplementedException(HConstants.NOT_IMPLEMENTED); } @Override public CleanupBulkLoadResponse cleanupBulkLoad(RpcController controller, - CleanupBulkLoadRequest request) throws ServiceException { + CleanupBulkLoadRequest request) throws ServiceException { throw new NotImplementedException(HConstants.NOT_IMPLEMENTED); } } - static ScanResponse doMetaScanResponse(final SortedMap> meta, - final AtomicLong sequenceids, final ScanRequest request) { + static ScanResponse doMetaScanResponse( + final SortedMap> meta, final AtomicLong sequenceids, + final ScanRequest request) { ScanResponse.Builder builder = ScanResponse.newBuilder(); int max = request.getNumberOfRows(); int count = 0; - Map> tail = - request.hasScan()? meta.tailMap(request.getScan().getStartRow().toByteArray()): meta; - ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder(); - for (Map.Entry> e: tail.entrySet()) { + Map> tail = + request.hasScan() ? meta.tailMap(request.getScan().getStartRow().toByteArray()) : meta; + ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder(); + for (Map.Entry> e : tail.entrySet()) { // Can be 0 on open of a scanner -- i.e. rpc to setup scannerid only. if (max <= 0) break; if (++count > max) break; @@ -598,13 +587,13 @@ public class TestClientNoCluster extends Configured implements Tool { else builder.setMoreResults(true); } // If no scannerid, set one. - builder.setScannerId(request.hasScannerId()? - request.getScannerId(): sequenceids.incrementAndGet()); + builder.setScannerId( + request.hasScannerId() ? request.getScannerId() : sequenceids.incrementAndGet()); return builder.build(); } - static GetResponse doMetaGetResponse(final SortedMap> meta, - final GetRequest request) { + static GetResponse doMetaGetResponse(final SortedMap> meta, + final GetRequest request) { ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder(); ByteString row = request.getGet().getRow(); Pair p = meta.get(row.toByteArray()); @@ -619,26 +608,26 @@ public class TestClientNoCluster extends Configured implements Tool { } /** - * @param name region name or encoded region name. - * @param type - * @return True if we are dealing with a hbase:meta region. + * @param name region name or encoded region name. n * @return True if we are dealing with a + * hbase:meta region. */ - static boolean isMetaRegion(final byte [] name, final RegionSpecifierType type) { + static boolean isMetaRegion(final byte[] name, final RegionSpecifierType type) { switch (type) { - case REGION_NAME: - return Bytes.equals(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), name); - case ENCODED_REGION_NAME: - return Bytes.equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), name); - default: throw new UnsupportedOperationException(); + case REGION_NAME: + return Bytes.equals(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), name); + case ENCODED_REGION_NAME: + return Bytes.equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), name); + default: + throw new UnsupportedOperationException(); } } private final static ByteString CATALOG_FAMILY_BYTESTRING = - UnsafeByteOperations.unsafeWrap(HConstants.CATALOG_FAMILY); + UnsafeByteOperations.unsafeWrap(HConstants.CATALOG_FAMILY); private final static ByteString REGIONINFO_QUALIFIER_BYTESTRING = - UnsafeByteOperations.unsafeWrap(HConstants.REGIONINFO_QUALIFIER); + UnsafeByteOperations.unsafeWrap(HConstants.REGIONINFO_QUALIFIER); private final static ByteString SERVER_QUALIFIER_BYTESTRING = - UnsafeByteOperations.unsafeWrap(HConstants.SERVER_QUALIFIER); + UnsafeByteOperations.unsafeWrap(HConstants.SERVER_QUALIFIER); static CellProtos.Cell.Builder getBaseCellBuilder(final ByteString row) { CellProtos.Cell.Builder cellBuilder = CellProtos.Cell.newBuilder(); @@ -666,41 +655,37 @@ public class TestClientNoCluster extends Configured implements Tool { CellProtos.Cell.Builder cellBuilder = getBaseCellBuilder(row); cellBuilder.setQualifier(UnsafeByteOperations.unsafeWrap(HConstants.STARTCODE_QUALIFIER)); // TODO: - cellBuilder.setValue(UnsafeByteOperations.unsafeWrap( - Bytes.toBytes(META_SERVERNAME.getStartcode()))); + cellBuilder + .setValue(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(META_SERVERNAME.getStartcode()))); return cellBuilder.build(); } - private static final byte [] BIG_USER_TABLE = Bytes.toBytes("t"); + private static final byte[] BIG_USER_TABLE = Bytes.toBytes("t"); /** - * Format passed integer. Zero-pad. - * Copied from hbase-server PE class and small amendment. Make them share. - * @param number - * @return Returns zero-prefixed 10-byte wide decimal version of passed - * number (Does absolute in case number is negative). + * Format passed integer. Zero-pad. Copied from hbase-server PE class and small amendment. Make + * them share. n * @return Returns zero-prefixed 10-byte wide decimal version of passed number + * (Does absolute in case number is negative). */ - private static byte [] format(final long number) { - byte [] b = new byte[10]; + private static byte[] format(final long number) { + byte[] b = new byte[10]; long d = number; for (int i = b.length - 1; i >= 0; i--) { - b[i] = (byte)((d % 10) + '0'); + b[i] = (byte) ((d % 10) + '0'); d /= 10; } return b; } /** - * @param count - * @param namespaceSpan - * @return count regions + * nn * @return count regions */ - private static HRegionInfo [] makeHRegionInfos(final byte [] tableName, final int count, - final long namespaceSpan) { - byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; - byte [] endKey = HConstants.EMPTY_BYTE_ARRAY; + private static HRegionInfo[] makeHRegionInfos(final byte[] tableName, final int count, + final long namespaceSpan) { + byte[] startKey = HConstants.EMPTY_BYTE_ARRAY; + byte[] endKey = HConstants.EMPTY_BYTE_ARRAY; long interval = namespaceSpan / count; - HRegionInfo [] hris = new HRegionInfo[count]; + HRegionInfo[] hris = new HRegionInfo[count]; for (int i = 0; i < count; i++) { if (i == 0) { endKey = format(interval); @@ -715,11 +700,10 @@ public class TestClientNoCluster extends Configured implements Tool { } /** - * @param count - * @return Return count servernames. + * n * @return Return count servernames. */ - private static ServerName [] makeServerNames(final int count) { - ServerName [] sns = new ServerName[count]; + private static ServerName[] makeServerNames(final int count) { + ServerName[] sns = new ServerName[count]; for (int i = 0; i < count; i++) { sns[i] = ServerName.valueOf("" + i + ".example.org", 16010, i); } @@ -729,8 +713,9 @@ public class TestClientNoCluster extends Configured implements Tool { /** * Comparator for meta row keys. */ - private static class MetaRowsComparator implements Comparator { + private static class MetaRowsComparator implements Comparator { private final CellComparatorImpl delegate = MetaCellComparator.META_COMPARATOR; + @Override public int compare(byte[] left, byte[] right) { return delegate.compareRows(new KeyValue.KeyOnlyKeyValue(left), right, 0, right.length); @@ -742,16 +727,16 @@ public class TestClientNoCluster extends Configured implements Tool { * ServerName to return for this row. * @return Map with faked hbase:meta content in it. */ - static SortedMap> makeMeta(final byte [] tableName, - final int regionCount, final long namespaceSpan, final int serverCount) { + static SortedMap> makeMeta(final byte[] tableName, + final int regionCount, final long namespaceSpan, final int serverCount) { // I need a comparator for meta rows so we sort properly. - SortedMap> meta = + SortedMap> meta = new ConcurrentSkipListMap<>(new MetaRowsComparator()); - HRegionInfo [] hris = makeHRegionInfos(tableName, regionCount, namespaceSpan); - ServerName [] serverNames = makeServerNames(serverCount); + HRegionInfo[] hris = makeHRegionInfos(tableName, regionCount, namespaceSpan); + ServerName[] serverNames = makeServerNames(serverCount); int per = regionCount / serverCount; int count = 0; - for (HRegionInfo hri: hris) { + for (HRegionInfo hri : hris) { Pair p = new Pair<>(hri, serverNames[count++ / per]); meta.put(hri.getRegionName(), p); } @@ -759,14 +744,10 @@ public class TestClientNoCluster extends Configured implements Tool { } /** - * Code for each 'client' to run. - * - * @param id - * @param c - * @param sharedConnection - * @throws IOException + * Code for each 'client' to run. nnnn */ - static void cycle(int id, final Configuration c, final Connection sharedConnection) throws IOException { + static void cycle(int id, final Configuration c, final Connection sharedConnection) + throws IOException { long namespaceSpan = c.getLong("hbase.test.namespace.span", 1000000); long startTime = EnvironmentEdgeManager.currentTime(); final int printInterval = 100000; @@ -774,38 +755,40 @@ public class TestClientNoCluster extends Configured implements Tool { boolean get = c.getBoolean("hbase.test.do.gets", false); TableName tableName = TableName.valueOf(BIG_USER_TABLE); if (get) { - try (Table table = sharedConnection.getTable(tableName)){ + try (Table table = sharedConnection.getTable(tableName)) { Stopwatch stopWatch = Stopwatch.createStarted(); for (int i = 0; i < namespaceSpan; i++) { - byte [] b = format(rd.nextLong()); + byte[] b = format(rd.nextLong()); Get g = new Get(b); table.get(g); if (i % printInterval == 0) { - LOG.info("Get " + printInterval + "/" + stopWatch.elapsed(java.util.concurrent.TimeUnit.MILLISECONDS)); + LOG.info("Get " + printInterval + "/" + + stopWatch.elapsed(java.util.concurrent.TimeUnit.MILLISECONDS)); stopWatch.reset(); stopWatch.start(); } } - LOG.info("Finished a cycle putting " + namespaceSpan + " in " + - (EnvironmentEdgeManager.currentTime() - startTime) + "ms"); + LOG.info("Finished a cycle putting " + namespaceSpan + " in " + + (EnvironmentEdgeManager.currentTime() - startTime) + "ms"); } } else { try (BufferedMutator mutator = sharedConnection.getBufferedMutator(tableName)) { Stopwatch stopWatch = Stopwatch.createStarted(); for (int i = 0; i < namespaceSpan; i++) { - byte [] b = format(rd.nextLong()); + byte[] b = format(rd.nextLong()); Put p = new Put(b); p.addColumn(HConstants.CATALOG_FAMILY, b, b); mutator.mutate(p); if (i % printInterval == 0) { - LOG.info("Put " + printInterval + "/" + stopWatch.elapsed(java.util.concurrent.TimeUnit.MILLISECONDS)); + LOG.info("Put " + printInterval + "/" + + stopWatch.elapsed(java.util.concurrent.TimeUnit.MILLISECONDS)); stopWatch.reset(); stopWatch.start(); } } - LOG.info("Finished a cycle putting " + namespaceSpan + " in " + - (EnvironmentEdgeManager.currentTime() - startTime) + "ms"); - } + LOG.info("Finished a cycle putting " + namespaceSpan + " in " + + (EnvironmentEdgeManager.currentTime() - startTime) + "ms"); + } } } @@ -824,16 +807,15 @@ public class TestClientNoCluster extends Configured implements Tool { final long multiPause = 0; // Check args make basic sense. if ((namespaceSpan < regions) || (regions < servers)) { - throw new IllegalArgumentException("namespaceSpan=" + namespaceSpan + " must be > regions=" + - regions + " which must be > servers=" + servers); + throw new IllegalArgumentException("namespaceSpan=" + namespaceSpan + " must be > regions=" + + regions + " which must be > servers=" + servers); } // Set my many servers and many regions faking connection in place. - getConf().set("hbase.client.connection.impl", - ManyServersManyRegionsConnection.class.getName()); + getConf().set("hbase.client.connection.impl", ManyServersManyRegionsConnection.class.getName()); // Use simple kv registry rather than zk getConf().set("hbase.client.registry.impl", SimpleRegistry.class.getName()); - // When to report fails. Default is we report the 10th. This means we'll see log everytime + // When to report fails. Default is we report the 10th. This means we'll see log everytime // an exception is thrown -- usually RegionTooBusyException when we have more than // hbase.test.multi.too.many requests outstanding at any time. getConf().setInt("hbase.client.start.log.errors.counter", 0); @@ -850,14 +832,14 @@ public class TestClientNoCluster extends Configured implements Tool { // Have them all share the same connection so they all share the same instance of // ManyServersManyRegionsConnection so I can keep an eye on how many requests by server. - final ExecutorService pool = Executors.newCachedThreadPool( - new ThreadFactoryBuilder().setNameFormat("p-pool-%d") + final ExecutorService pool = + Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("p-pool-%d") .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); - // Executors.newFixedThreadPool(servers * 10, Threads.getNamedThreadFactory("p")); + // Executors.newFixedThreadPool(servers * 10, Threads.getNamedThreadFactory("p")); // Share a connection so I can keep counts in the 'server' on concurrency. - final Connection sharedConnection = ConnectionFactory.createConnection(getConf()/*, pool*/); + final Connection sharedConnection = ConnectionFactory.createConnection(getConf()/* , pool */); try { - Thread [] ts = new Thread[clients]; + Thread[] ts = new Thread[clients]; for (int j = 0; j < ts.length; j++) { final int id = j; ts[j] = new Thread("" + j) { @@ -885,8 +867,7 @@ public class TestClientNoCluster extends Configured implements Tool { /** * Run a client instance against a faked up server. - * @param args TODO - * @throws Exception + * @param args TODO n */ public static void main(String[] args) throws Exception { System.exit(ToolRunner.run(HBaseConfiguration.create(), new TestClientNoCluster(), args)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java index 244abe01139..c2c2bb3f98d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,7 +65,7 @@ public class TestClientScanner { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClientScanner.class); + HBaseClassTestRule.forClass(TestClientScanner.class); Scan scan; ExecutorService pool; @@ -103,11 +103,11 @@ public class TestClientScanner { private boolean initialized = false; public MockClientScanner(final Configuration conf, final Scan scan, final TableName tableName, - ClusterConnection connection, RpcRetryingCallerFactory rpcFactory, - RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout) - throws IOException { + ClusterConnection connection, RpcRetryingCallerFactory rpcFactory, + RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout) + throws IOException { super(conf, scan, tableName, connection, rpcFactory, controllerFactory, pool, - primaryOperationTimeout); + primaryOperationTimeout); } @Override @@ -121,8 +121,8 @@ public class TestClientScanner { } // Enforce that we don't short-circuit more than once if (rpcFinishedFired) { - throw new RuntimeException("Expected nextScanner to only be called once after " + - " short-circuit was triggered."); + throw new RuntimeException( + "Expected nextScanner to only be called once after " + " short-circuit was triggered."); } rpcFinishedFired = true; return false; @@ -137,19 +137,20 @@ public class TestClientScanner { @SuppressWarnings("unchecked") public void testNoResultsHint() throws IOException { final Result[] results = new Result[1]; - KeyValue kv1 = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, - Type.Maximum); - results[0] = Result.create(new Cell[] {kv1}); + KeyValue kv1 = + new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, Type.Maximum); + results[0] = Result.create(new Cell[] { kv1 }); RpcRetryingCaller caller = Mockito.mock(RpcRetryingCaller.class); Mockito.when(rpcFactory. newCaller()).thenReturn(caller); - Mockito.when(caller.callWithoutRetries(Mockito.any(), - Mockito.anyInt())).thenAnswer(new Answer() { + Mockito.when(caller.callWithoutRetries(Mockito.any(), Mockito.anyInt())) + .thenAnswer(new Answer() { private int count = 0; + @Override public Result[] answer(InvocationOnMock invocation) throws Throwable { - ScannerCallableWithReplicas callable = invocation.getArgument(0); + ScannerCallableWithReplicas callable = invocation.getArgument(0); switch (count) { case 0: // initialize count++; @@ -163,14 +164,15 @@ public class TestClientScanner { throw new RuntimeException("Expected only 2 invocations"); } } - }); + }); // Set a much larger cache and buffer size than we'll provide scan.setCaching(100); - scan.setMaxResultSize(1000*1000); + scan.setMaxResultSize(1000 * 1000); - try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), - clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + try (MockClientScanner scanner = + new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), clusterConn, + rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { scanner.setRpcFinished(true); @@ -180,8 +182,7 @@ public class TestClientScanner { // One for fetching the results // One for fetching empty results and quit as we do not have moreResults hint. - inOrder.verify(caller, Mockito.times(2)).callWithoutRetries( - Mockito.any(), Mockito.anyInt()); + inOrder.verify(caller, Mockito.times(2)).callWithoutRetries(Mockito.any(), Mockito.anyInt()); assertEquals(1, scanner.cache.size()); Result r = scanner.cache.poll(); @@ -197,16 +198,17 @@ public class TestClientScanner { @SuppressWarnings("unchecked") public void testSizeLimit() throws IOException { final Result[] results = new Result[1]; - KeyValue kv1 = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, - Type.Maximum); - results[0] = Result.create(new Cell[] {kv1}); + KeyValue kv1 = + new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, Type.Maximum); + results[0] = Result.create(new Cell[] { kv1 }); RpcRetryingCaller caller = Mockito.mock(RpcRetryingCaller.class); Mockito.when(rpcFactory. newCaller()).thenReturn(caller); - Mockito.when(caller.callWithoutRetries(Mockito.any(), - Mockito.anyInt())).thenAnswer(new Answer() { + Mockito.when(caller.callWithoutRetries(Mockito.any(), Mockito.anyInt())) + .thenAnswer(new Answer() { private int count = 0; + @Override public Result[] answer(InvocationOnMock invocation) throws Throwable { ScannerCallableWithReplicas callable = invocation.getArgument(0); @@ -223,7 +225,7 @@ public class TestClientScanner { throw new RuntimeException("Expected only 2 invocations"); } } - }); + }); Mockito.when(rpcFactory. newCaller()).thenReturn(caller); @@ -232,14 +234,14 @@ public class TestClientScanner { // The single key-value will exit the loop scan.setMaxResultSize(1); - try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), - clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + try (MockClientScanner scanner = + new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), clusterConn, + rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { InOrder inOrder = Mockito.inOrder(caller); scanner.loadCache(); - inOrder.verify(caller, Mockito.times(1)).callWithoutRetries( - Mockito.any(), Mockito.anyInt()); + inOrder.verify(caller, Mockito.times(1)).callWithoutRetries(Mockito.any(), Mockito.anyInt()); assertEquals(1, scanner.cache.size()); Result r = scanner.cache.poll(); @@ -255,20 +257,21 @@ public class TestClientScanner { @SuppressWarnings("unchecked") public void testCacheLimit() throws IOException { KeyValue kv1 = new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, - Type.Maximum); + Type.Maximum); KeyValue kv2 = new KeyValue(Bytes.toBytes("row2"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, - Type.Maximum); + Type.Maximum); KeyValue kv3 = new KeyValue(Bytes.toBytes("row3"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, - Type.Maximum); - final Result[] results = new Result[] {Result.create(new Cell[] {kv1}), - Result.create(new Cell[] {kv2}), Result.create(new Cell[] {kv3})}; + Type.Maximum); + final Result[] results = new Result[] { Result.create(new Cell[] { kv1 }), + Result.create(new Cell[] { kv2 }), Result.create(new Cell[] { kv3 }) }; RpcRetryingCaller caller = Mockito.mock(RpcRetryingCaller.class); Mockito.when(rpcFactory. newCaller()).thenReturn(caller); - Mockito.when(caller.callWithoutRetries(Mockito.any(), - Mockito.anyInt())).thenAnswer(new Answer() { + Mockito.when(caller.callWithoutRetries(Mockito.any(), Mockito.anyInt())) + .thenAnswer(new Answer() { private int count = 0; + @Override public Result[] answer(InvocationOnMock invocation) throws Throwable { ScannerCallableWithReplicas callable = invocation.getArgument(0); @@ -285,23 +288,23 @@ public class TestClientScanner { throw new RuntimeException("Expected only 2 invocations"); } } - }); + }); Mockito.when(rpcFactory. newCaller()).thenReturn(caller); // Set a small cache scan.setCaching(1); // Set a very large size - scan.setMaxResultSize(1000*1000); + scan.setMaxResultSize(1000 * 1000); - try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), - clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + try (MockClientScanner scanner = + new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), clusterConn, + rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { InOrder inOrder = Mockito.inOrder(caller); scanner.loadCache(); - inOrder.verify(caller, Mockito.times(1)).callWithoutRetries( - Mockito.any(), Mockito.anyInt()); + inOrder.verify(caller, Mockito.times(1)).callWithoutRetries(Mockito.any(), Mockito.anyInt()); assertEquals(3, scanner.cache.size()); Result r = scanner.cache.poll(); @@ -331,16 +334,17 @@ public class TestClientScanner { @SuppressWarnings("unchecked") public void testNoMoreResults() throws IOException { final Result[] results = new Result[1]; - KeyValue kv1 = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, - Type.Maximum); - results[0] = Result.create(new Cell[] {kv1}); + KeyValue kv1 = + new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, Type.Maximum); + results[0] = Result.create(new Cell[] { kv1 }); RpcRetryingCaller caller = Mockito.mock(RpcRetryingCaller.class); Mockito.when(rpcFactory. newCaller()).thenReturn(caller); - Mockito.when(caller.callWithoutRetries(Mockito.any(), - Mockito.anyInt())).thenAnswer(new Answer() { + Mockito.when(caller.callWithoutRetries(Mockito.any(), Mockito.anyInt())) + .thenAnswer(new Answer() { private int count = 0; + @Override public Result[] answer(InvocationOnMock invocation) throws Throwable { ScannerCallableWithReplicas callable = invocation.getArgument(0); @@ -356,24 +360,24 @@ public class TestClientScanner { throw new RuntimeException("Expected only 2 invocations"); } } - }); + }); Mockito.when(rpcFactory. newCaller()).thenReturn(caller); // Set a much larger cache and buffer size than we'll provide scan.setCaching(100); - scan.setMaxResultSize(1000*1000); + scan.setMaxResultSize(1000 * 1000); - try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), - clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + try (MockClientScanner scanner = + new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), clusterConn, + rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { scanner.setRpcFinished(true); InOrder inOrder = Mockito.inOrder(caller); scanner.loadCache(); - inOrder.verify(caller, Mockito.times(1)).callWithoutRetries( - Mockito.any(), Mockito.anyInt()); + inOrder.verify(caller, Mockito.times(1)).callWithoutRetries(Mockito.any(), Mockito.anyInt()); assertEquals(1, scanner.cache.size()); Result r = scanner.cache.poll(); @@ -389,57 +393,57 @@ public class TestClientScanner { @SuppressWarnings("unchecked") public void testMoreResults() throws IOException { final Result[] results1 = new Result[1]; - KeyValue kv1 = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, - Type.Maximum); - results1[0] = Result.create(new Cell[] {kv1}); + KeyValue kv1 = + new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, Type.Maximum); + results1[0] = Result.create(new Cell[] { kv1 }); final Result[] results2 = new Result[1]; KeyValue kv2 = new KeyValue(Bytes.toBytes("row2"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, - Type.Maximum); - results2[0] = Result.create(new Cell[] {kv2}); - + Type.Maximum); + results2[0] = Result.create(new Cell[] { kv2 }); RpcRetryingCaller caller = Mockito.mock(RpcRetryingCaller.class); Mockito.when(rpcFactory. newCaller()).thenReturn(caller); - Mockito.when(caller.callWithoutRetries(Mockito.any(), - Mockito.anyInt())).thenAnswer(new Answer() { - private int count = 0; - @Override - public Result[] answer(InvocationOnMock invocation) throws Throwable { - ScannerCallableWithReplicas callable = invocation.getArgument(0); - switch (count) { - case 0: // initialize - count++; - callable.currentScannerCallable.setMoreResultsInRegion(MoreResults.YES); - return results1; - case 1: - count++; - // The server reports back false WRT more results - callable.currentScannerCallable.setMoreResultsInRegion(MoreResults.NO); - return results2; - case 2: // close - count++; - return null; - default: - throw new RuntimeException("Expected only 3 invocations"); - } + Mockito.when(caller.callWithoutRetries(Mockito.any(), Mockito.anyInt())) + .thenAnswer(new Answer() { + private int count = 0; + + @Override + public Result[] answer(InvocationOnMock invocation) throws Throwable { + ScannerCallableWithReplicas callable = invocation.getArgument(0); + switch (count) { + case 0: // initialize + count++; + callable.currentScannerCallable.setMoreResultsInRegion(MoreResults.YES); + return results1; + case 1: + count++; + // The server reports back false WRT more results + callable.currentScannerCallable.setMoreResultsInRegion(MoreResults.NO); + return results2; + case 2: // close + count++; + return null; + default: + throw new RuntimeException("Expected only 3 invocations"); } + } }); // Set a much larger cache and buffer size than we'll provide scan.setCaching(100); - scan.setMaxResultSize(1000*1000); + scan.setMaxResultSize(1000 * 1000); - try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), - clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + try (MockClientScanner scanner = + new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), clusterConn, + rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { InOrder inOrder = Mockito.inOrder(caller); scanner.setRpcFinished(true); scanner.loadCache(); - inOrder.verify(caller, Mockito.times(2)).callWithoutRetries( - Mockito.any(), Mockito.anyInt()); + inOrder.verify(caller, Mockito.times(2)).callWithoutRetries(Mockito.any(), Mockito.anyInt()); assertEquals(2, scanner.cache.size()); Result r = scanner.cache.poll(); @@ -473,11 +477,12 @@ public class TestClientScanner { MockRpcRetryingCallerFactory.class.getName()); // mock 3 replica locations - when(clusterConn.locateRegion((TableName)any(), (byte[])any(), anyBoolean(), - anyBoolean(), anyInt())).thenReturn(new RegionLocations(null, null, null)); + when(clusterConn.locateRegion((TableName) any(), (byte[]) any(), anyBoolean(), anyBoolean(), + anyInt())).thenReturn(new RegionLocations(null, null, null)); - try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), - clusterConn, rpcFactory, new RpcControllerFactory(conf), pool, Integer.MAX_VALUE)) { + try (MockClientScanner scanner = + new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), clusterConn, + rpcFactory, new RpcControllerFactory(conf), pool, Integer.MAX_VALUE)) { Iterator iter = scanner.iterator(); while (iter.hasNext()) { iter.next(); @@ -500,15 +505,16 @@ public class TestClientScanner { @Override public void cancel() { } + @Override public T callWithRetries(RetryingCallable callable, int callTimeout) - throws IOException, RuntimeException { + throws IOException, RuntimeException { throw new IOException("Scanner exception"); } @Override public T callWithoutRetries(RetryingCallable callable, int callTimeout) - throws IOException, RuntimeException { + throws IOException, RuntimeException { try { return callable.call(callTimeout); } catch (IOException e) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java index 15772324c81..c3ff379077e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import java.util.Map; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -42,24 +43,21 @@ import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.ExpectedException; -import java.util.Map; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestColumnFamilyDescriptorBuilder { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestColumnFamilyDescriptorBuilder.class); + HBaseClassTestRule.forClass(TestColumnFamilyDescriptorBuilder.class); @Rule public ExpectedException expectedEx = ExpectedException.none(); @Test public void testBuilder() throws DeserializationException { - ColumnFamilyDescriptorBuilder builder - = ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY) - .setInMemory(true) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - .setBloomFilterType(BloomType.NONE); + ColumnFamilyDescriptorBuilder builder = + ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY).setInMemory(true) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE); final int v = 123; builder.setBlocksize(v); builder.setTimeToLive(v); @@ -81,13 +79,13 @@ public class TestColumnFamilyDescriptorBuilder { builder.setDFSReplication((short) v); ColumnFamilyDescriptor hcd = builder.build(); - byte [] bytes = ColumnFamilyDescriptorBuilder.toByteArray(hcd); + byte[] bytes = ColumnFamilyDescriptorBuilder.toByteArray(hcd); ColumnFamilyDescriptor deserializedHcd = ColumnFamilyDescriptorBuilder.parseFrom(bytes); assertTrue(hcd.equals(deserializedHcd)); assertEquals(v, hcd.getBlocksize()); assertEquals(v, hcd.getTimeToLive()); - assertTrue(Bytes.equals(hcd.getValue(Bytes.toBytes("a")), - deserializedHcd.getValue(Bytes.toBytes("a")))); + assertTrue( + Bytes.equals(hcd.getValue(Bytes.toBytes("a")), deserializedHcd.getValue(Bytes.toBytes("a")))); assertEquals(hcd.getMaxVersions(), deserializedHcd.getMaxVersions()); assertEquals(hcd.getMinVersions(), deserializedHcd.getMinVersions()); assertEquals(hcd.getKeepDeletedCells(), deserializedHcd.getKeepDeletedCells()); @@ -116,8 +114,8 @@ public class TestColumnFamilyDescriptorBuilder { */ @Test public void testAddGetRemoveConfiguration() { - ColumnFamilyDescriptorBuilder builder - = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); + ColumnFamilyDescriptorBuilder builder = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); String key = "Some"; String value = "value"; builder.setConfiguration(key, value); @@ -134,11 +132,11 @@ public class TestColumnFamilyDescriptorBuilder { // We unify the format of all values saved in the descriptor. // Each value is stored as bytes of string. String isMobString = PrettyPrinter.format(String.valueOf(isMob), - HColumnDescriptor.getUnit(HColumnDescriptor.IS_MOB)); + HColumnDescriptor.getUnit(HColumnDescriptor.IS_MOB)); String thresholdString = PrettyPrinter.format(String.valueOf(threshold), - HColumnDescriptor.getUnit(HColumnDescriptor.MOB_THRESHOLD)); + HColumnDescriptor.getUnit(HColumnDescriptor.MOB_THRESHOLD)); String policyString = PrettyPrinter.format(Bytes.toStringBinary(Bytes.toBytes(policy)), - HColumnDescriptor.getUnit(HColumnDescriptor.MOB_COMPACT_PARTITION_POLICY)); + HColumnDescriptor.getUnit(HColumnDescriptor.MOB_COMPACT_PARTITION_POLICY)); assertEquals(String.valueOf(isMob), isMobString); assertEquals(String.valueOf(threshold), thresholdString); assertEquals(String.valueOf(policy), policyString); @@ -146,16 +144,11 @@ public class TestColumnFamilyDescriptorBuilder { @Test public void testClassMethodsAreBuilderStyle() { - /* HColumnDescriptor should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * HColumnDescriptor hcd - * = new HColumnDescriptor() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * HColumnDescriptor should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: HColumnDescriptor hcd = new HColumnDescriptor() + * .setFoo(foo) .setBar(bar) .setBuz(buz) This test ensures that all methods starting with "set" + * returns the declaring object */ BuilderStyleTest.assertClassesAreBuilderStyle(ColumnFamilyDescriptorBuilder.class); @@ -164,8 +157,8 @@ public class TestColumnFamilyDescriptorBuilder { @Test public void testSetTimeToLive() throws HBaseException { String ttl; - ColumnFamilyDescriptorBuilder builder - = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); + ColumnFamilyDescriptorBuilder builder = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); ttl = "50000"; builder.setTimeToLive(ttl); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorLowerCaseEnum.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorLowerCaseEnum.java index ac8aed866e6..900f4093d1b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorLowerCaseEnum.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorLowerCaseEnum.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,7 +68,7 @@ public class TestColumnFamilyDescriptorLowerCaseEnum { @Test public void test() - throws IllegalAccessException, InvocationTargetException, NoSuchMethodException { + throws IllegalAccessException, InvocationTargetException, NoSuchMethodException { Map> getMethod2Value = new HashMap<>(); ColumnFamilyDescriptorBuilder builder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionConfiguration.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionConfiguration.java index 5f1b4879e89..d3031fe85c7 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionConfiguration.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionConfiguration.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +19,7 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; @@ -29,7 +29,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestConnectionConfiguration { @ClassRule diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionFactoryTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionFactoryTracing.java index ed2eb80fe87..0e61dfc9db2 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionFactoryTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionFactoryTracing.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY; + import java.io.Closeable; import java.io.IOException; import java.util.concurrent.ExecutionException; @@ -31,6 +32,7 @@ import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @Category({ ClientTests.class, SmallTests.class }) diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementationTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementationTracing.java index 9cbd0240e9a..fba41ecbe5b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementationTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementationTracing.java @@ -28,6 +28,7 @@ import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @Category({ ClientTests.class, SmallTests.class }) diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java index 561b1f5715f..a2df7e93239 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,7 +72,7 @@ public class TestConnectionRegistryLeak { @BeforeClass public static void setUp() { CONF.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, - ConnectionRegistryForTest.class, ConnectionRegistry.class); + ConnectionRegistryForTest.class, ConnectionRegistry.class); } @Test diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java index b288f98f1f9..f1a8e000136 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,9 +55,8 @@ public class TestCoprocessorDescriptor { int priority = 100; String propertyKey = "propertyKey"; String propertyValue = "propertyValue"; - CoprocessorDescriptor cp = - CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path).setPriority(priority) - .setProperty(propertyKey, propertyValue).build(); + CoprocessorDescriptor cp = CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path) + .setPriority(priority).setProperty(propertyKey, propertyValue).build(); assertEquals(className, cp.getClassName()); assertEquals(path, cp.getJarPath().get()); assertEquals(priority, cp.getPriority()); @@ -73,13 +72,11 @@ public class TestCoprocessorDescriptor { String path = "path"; int priority = Math.abs(className.hashCode()); String propertyValue = "propertyValue"; - cps.add( - CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path).setPriority(priority) - .setProperty(propertyKey, propertyValue).build()); + cps.add(CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path) + .setPriority(priority).setProperty(propertyKey, propertyValue).build()); } - TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setCoprocessors(cps).build(); + TableDescriptor tableDescriptor = TableDescriptorBuilder + .newBuilder(TableName.valueOf(name.getMethodName())).setCoprocessors(cps).build(); for (CoprocessorDescriptor cp : cps) { boolean match = false; for (CoprocessorDescriptor that : tableDescriptor.getCoprocessorDescriptors()) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDelayingRunner.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDelayingRunner.java index 22e718bf95f..30d7c2cb2eb 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDelayingRunner.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDelayingRunner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,18 +36,17 @@ public class TestDelayingRunner { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDelayingRunner.class); + HBaseClassTestRule.forClass(TestDelayingRunner.class); - private static final TableName DUMMY_TABLE = - TableName.valueOf("DUMMY_TABLE"); + private static final TableName DUMMY_TABLE = TableName.valueOf("DUMMY_TABLE"); private static final byte[] DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1"); private static final byte[] DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2"); private static HRegionInfo hri1 = - new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1); + new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1); @SuppressWarnings({ "rawtypes", "unchecked" }) @Test - public void testDelayingRunner() throws Exception{ + public void testDelayingRunner() throws Exception { MultiAction ma = new MultiAction(); ma.add(hri1.getRegionName(), new Action(new Put(DUMMY_BYTES_1), 0)); final AtomicLong endTime = new AtomicLong(); @@ -63,7 +62,7 @@ public class TestDelayingRunner { runner.run(); long delay = endTime.get() - startTime; assertTrue("DelayingRunner did not delay long enough", delay >= sleepTime); - assertFalse("DelayingRunner delayed too long", delay > sleepTime + sleepTime*0.2); + assertFalse("DelayingRunner delayed too long", delay > sleepTime + sleepTime * 0.2); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java index e855055fd88..cc329cd3d03 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,20 +30,19 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestDeleteTimeStamp { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDeleteTimeStamp.class); + HBaseClassTestRule.forClass(TestDeleteTimeStamp.class); private static final byte[] ROW = Bytes.toBytes("testRow"); private static final byte[] FAMILY = Bytes.toBytes("testFamily"); private static final byte[] QUALIFIER = Bytes.toBytes("testQualifier"); /* - * Test for verifying that the timestamp in delete object is being honored. - * @throws Exception + * Test for verifying that the timestamp in delete object is being honored. n */ @Test public void testTimeStamp() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java index 38e11c9b457..95e05911008 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,43 +52,42 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; // TODO: cover more test cases -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestGet { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGet.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestGet.class); - private static final byte [] ROW = new byte [] {'r'}; + private static final byte[] ROW = new byte[] { 'r' }; private static final String PB_GET = "CgNyb3ciEwoPdGVzdC5Nb2NrRmlsdGVyEgAwATgB"; private static final String PB_GET_WITH_FILTER_LIST = - "CgFyIosBCilvcmcuYXBhY2hlLmhhZG9vcC5oYmFzZS5maWx0ZXIuRmlsdGVyTGlzdBJeCAESEwoP" + - "dGVzdC5Nb2NrRmlsdGVyEgASEQoNbXkuTW9ja0ZpbHRlchIAEjIKLG9yZy5hcGFjaGUuaGFkb29w" + - "LmhiYXNlLmZpbHRlci5LZXlPbmx5RmlsdGVyEgIIADABOAE="; + "CgFyIosBCilvcmcuYXBhY2hlLmhhZG9vcC5oYmFzZS5maWx0ZXIuRmlsdGVyTGlzdBJeCAESEwoP" + + "dGVzdC5Nb2NrRmlsdGVyEgASEQoNbXkuTW9ja0ZpbHRlchIAEjIKLG9yZy5hcGFjaGUuaGFkb29w" + + "LmhiYXNlLmZpbHRlci5LZXlPbmx5RmlsdGVyEgIIADABOAE="; private static final String MOCK_FILTER_JAR = - "UEsDBBQACAgIANWDlEMAAAAAAAAAAAAAAAAJAAQATUVUQS1JTkYv/soAAAMAUEsHCAAAAAACAAAA" + - "AAAAAFBLAwQUAAgICADVg5RDAAAAAAAAAAAAAAAAFAAAAE1FVEEtSU5GL01BTklGRVNULk1G803M" + - "y0xLLS7RDUstKs7Mz7NSMNQz4OVyLkpNLElN0XWqBAmY6xnEG1gqaPgXJSbnpCo45xcV5BcllgCV" + - "a/Jy8XIBAFBLBwgxyqRbQwAAAEQAAABQSwMEFAAICAgAUoOUQwAAAAAAAAAAAAAAABMAAABteS9N" + - "b2NrRmlsdGVyLmNsYXNzdZHPTsJAEMa/LYVCRVFQMd68gQc38YrxUJUTetGQGE7bstrVwjbbYsSn" + - "0hOJJj6AD2WcFoP/4iYzX+bb32xmd9/en18B7GPLhY11BxsurEw3GUoHaqzSQ4ZCq91nsI/0UDLU" + - "emoszyYjX5oL4Ufk1Hs6EFFfGJXVn6adhirJ6NGUn+rgtquiVJoOQyUWJpFdo0cMjdbAa/8hnNj3" + - "pqmkbmvgMbgn94GMU6XHiYMm1ed6YgJJeDbNV+fejbgTVRRRYlj+cSZDW5trLmIRhJKHYqh1zENf" + - "JJJf5QCfcx45DJ3/WLmYgx/LRNJ1I/UgMmMxIXbo9WxkywLLZqHsUMVJGWlxdwb2lG+XKZdys4kK" + - "5eocgIsl0grVy0Q5+e9Y+V75BdblDIXHX/3b3/rLWEGNdJXCJmeNop7zjQ9QSwcI1kzyMToBAADs" + - "AQAAUEsDBBQACAgIAFKDlEMAAAAAAAAAAAAAAAAVAAAAdGVzdC9Nb2NrRmlsdGVyLmNsYXNzdVHB" + - "TsJAFJwthUJFERQx3ryBBzfxivFQlRN60ZAYTtuy2tXCNtti1K/SE4kmfoAfZXwtBg3RTd6bzOy8" + - "zezux+frO4ADbLuwsemg6cLKcIuhdKgmKj1iKLQ7Awb7WI8kQ62vJvJ8OvaluRR+REqjrwMRDYRR" + - "Gf8W7TRUCUO9n8ok5Wc6uOupKJWmy1CJhUlkz+gxQ7M99Dp/eJzY9x5JZrCGHoN7+hDIOFV6kjho" + - "Eb/QUxNIsmeJfib3b8W9qKKIEslLpzJ0tLnhIhZBKHkoRlrHPPRFIvl1buBzn0cKQ/c/r1wk4Scy" + - "kXTpSD2JTFhkxC69oY1sWWBZGuoOMU7ICIt7M7CXfLtMvZSLLVSoV+cGuFghrBBfJZeT/5GV75Xf" + - "YF3NUHhemt/5NV/GGmqE61Q2KXWqRu7f+AJQSwcIrS5nKDoBAADyAQAAUEsBAhQAFAAICAgA1YOU" + - "QwAAAAACAAAAAAAAAAkABAAAAAAAAAAAAAAAAAAAAE1FVEEtSU5GL/7KAABQSwECFAAUAAgICADV" + - "g5RDMcqkW0MAAABEAAAAFAAAAAAAAAAAAAAAAAA9AAAATUVUQS1JTkYvTUFOSUZFU1QuTUZQSwEC" + - "FAAUAAgICABSg5RD1kzyMToBAADsAQAAEwAAAAAAAAAAAAAAAADCAAAAbXkvTW9ja0ZpbHRlci5j" + - "bGFzc1BLAQIUABQACAgIAFKDlEOtLmcoOgEAAPIBAAAVAAAAAAAAAAAAAAAAAD0CAAB0ZXN0L01v" + - "Y2tGaWx0ZXIuY2xhc3NQSwUGAAAAAAQABAABAQAAugMAAAAA"; + "UEsDBBQACAgIANWDlEMAAAAAAAAAAAAAAAAJAAQATUVUQS1JTkYv/soAAAMAUEsHCAAAAAACAAAA" + + "AAAAAFBLAwQUAAgICADVg5RDAAAAAAAAAAAAAAAAFAAAAE1FVEEtSU5GL01BTklGRVNULk1G803M" + + "y0xLLS7RDUstKs7Mz7NSMNQz4OVyLkpNLElN0XWqBAmY6xnEG1gqaPgXJSbnpCo45xcV5BcllgCV" + + "a/Jy8XIBAFBLBwgxyqRbQwAAAEQAAABQSwMEFAAICAgAUoOUQwAAAAAAAAAAAAAAABMAAABteS9N" + + "b2NrRmlsdGVyLmNsYXNzdZHPTsJAEMa/LYVCRVFQMd68gQc38YrxUJUTetGQGE7bstrVwjbbYsSn" + + "0hOJJj6AD2WcFoP/4iYzX+bb32xmd9/en18B7GPLhY11BxsurEw3GUoHaqzSQ4ZCq91nsI/0UDLU" + + "emoszyYjX5oL4Ufk1Hs6EFFfGJXVn6adhirJ6NGUn+rgtquiVJoOQyUWJpFdo0cMjdbAa/8hnNj3" + + "pqmkbmvgMbgn94GMU6XHiYMm1ed6YgJJeDbNV+fejbgTVRRRYlj+cSZDW5trLmIRhJKHYqh1zENf" + + "JJJf5QCfcx45DJ3/WLmYgx/LRNJ1I/UgMmMxIXbo9WxkywLLZqHsUMVJGWlxdwb2lG+XKZdys4kK" + + "5eocgIsl0grVy0Q5+e9Y+V75BdblDIXHX/3b3/rLWEGNdJXCJmeNop7zjQ9QSwcI1kzyMToBAADs" + + "AQAAUEsDBBQACAgIAFKDlEMAAAAAAAAAAAAAAAAVAAAAdGVzdC9Nb2NrRmlsdGVyLmNsYXNzdVHB" + + "TsJAFJwthUJFERQx3ryBBzfxivFQlRN60ZAYTtuy2tXCNtti1K/SE4kmfoAfZXwtBg3RTd6bzOy8" + + "zezux+frO4ADbLuwsemg6cLKcIuhdKgmKj1iKLQ7Awb7WI8kQ62vJvJ8OvaluRR+REqjrwMRDYRR" + + "Gf8W7TRUCUO9n8ok5Wc6uOupKJWmy1CJhUlkz+gxQ7M99Dp/eJzY9x5JZrCGHoN7+hDIOFV6kjho" + + "Eb/QUxNIsmeJfib3b8W9qKKIEslLpzJ0tLnhIhZBKHkoRlrHPPRFIvl1buBzn0cKQ/c/r1wk4Scy" + + "kXTpSD2JTFhkxC69oY1sWWBZGuoOMU7ICIt7M7CXfLtMvZSLLVSoV+cGuFghrBBfJZeT/5GV75Xf" + + "YF3NUHhemt/5NV/GGmqE61Q2KXWqRu7f+AJQSwcIrS5nKDoBAADyAQAAUEsBAhQAFAAICAgA1YOU" + + "QwAAAAACAAAAAAAAAAkABAAAAAAAAAAAAAAAAAAAAE1FVEEtSU5GL/7KAABQSwECFAAUAAgICADV" + + "g5RDMcqkW0MAAABEAAAAFAAAAAAAAAAAAAAAAAA9AAAATUVUQS1JTkYvTUFOSUZFU1QuTUZQSwEC" + + "FAAUAAgICABSg5RD1kzyMToBAADsAQAAEwAAAAAAAAAAAAAAAADCAAAAbXkvTW9ja0ZpbHRlci5j" + + "bGFzc1BLAQIUABQACAgIAFKDlEOtLmcoOgEAAPIBAAAVAAAAAAAAAAAAAAAAAD0CAAB0ZXN0L01v" + + "Y2tGaWx0ZXIuY2xhc3NQSwUGAAAAAAQABAABAQAAugMAAAAA"; @Test public void testAttributesSerialization() throws IOException { @@ -121,22 +120,22 @@ public class TestGet { get.setAttribute("attribute1", Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), get.getAttribute("attribute1"))); Assert.assertEquals(1, get.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), - get.getAttributesMap().get("attribute1"))); + Assert + .assertTrue(Arrays.equals(Bytes.toBytes("value1"), get.getAttributesMap().get("attribute1"))); // overriding attribute value get.setAttribute("attribute1", Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), get.getAttribute("attribute1"))); Assert.assertEquals(1, get.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), - get.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value12"), get.getAttributesMap().get("attribute1"))); // adding another attribute get.setAttribute("attribute2", Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), get.getAttribute("attribute2"))); Assert.assertEquals(2, get.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), - get.getAttributesMap().get("attribute2"))); + Assert + .assertTrue(Arrays.equals(Bytes.toBytes("value2"), get.getAttributesMap().get("attribute2"))); // removing attribute get.setAttribute("attribute2", null); @@ -209,14 +208,12 @@ public class TestGet { @Test public void testDynamicFilter() throws Exception { Configuration conf = HBaseConfiguration.create(); - String localPath = conf.get("hbase.local.dir") - + File.separator + "jars" + File.separator; + String localPath = conf.get("hbase.local.dir") + File.separator + "jars" + File.separator; File jarFile = new File(localPath, "MockFilter.jar"); jarFile.delete(); assertFalse("Should be deleted: " + jarFile.getPath(), jarFile.exists()); - ClientProtos.Get getProto1 = - ClientProtos.Get.parseFrom(Base64.getDecoder().decode(PB_GET)); + ClientProtos.Get getProto1 = ClientProtos.Get.parseFrom(Base64.getDecoder().decode(PB_GET)); ClientProtos.Get getProto2 = ClientProtos.Get.parseFrom(Base64.getDecoder().decode(PB_GET_WITH_FILTER_LIST)); try { @@ -230,9 +227,8 @@ public class TestGet { fail("Should not be able to load the filter class"); } catch (IOException ioe) { assertTrue(ioe.getCause() instanceof InvocationTargetException); - InvocationTargetException ite = (InvocationTargetException)ioe.getCause(); - assertTrue(ite.getTargetException() - instanceof DeserializationException); + InvocationTargetException ite = (InvocationTargetException) ioe.getCause(); + assertTrue(ite.getTargetException() instanceof DeserializationException); } FileOutputStream fos = new FileOutputStream(jarFile); fos.write(Base64.getDecoder().decode(MOCK_FILTER_JAR)); @@ -243,7 +239,7 @@ public class TestGet { Get get2 = ProtobufUtil.toGet(getProto2); assertTrue(get2.getFilter() instanceof FilterList); - List filters = ((FilterList)get2.getFilter()).getFilters(); + List filters = ((FilterList) get2.getFilter()).getFilters(); assertEquals(3, filters.size()); assertEquals("test.MockFilter", filters.get(0).getClass().getName()); assertEquals("my.MockFilter", filters.get(1).getClass().getName()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerViaMocks.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerViaMocks.java index cce4939279d..3b9bc271c35 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerViaMocks.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerViaMocks.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestHTableMultiplexerViaMocks { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHTableMultiplexerViaMocks.class); + HBaseClassTestRule.forClass(TestHTableMultiplexerViaMocks.class); private HTableMultiplexer mockMultiplexer; private ClusterConnection mockConnection; @@ -57,7 +57,8 @@ public class TestHTableMultiplexerViaMocks { } @SuppressWarnings("deprecation") - @Test public void testConnectionClosing() throws IOException { + @Test + public void testConnectionClosing() throws IOException { doCallRealMethod().when(mockMultiplexer).close(); // If the connection is not closed when(mockConnection.isClosed()).thenReturn(false); @@ -69,7 +70,8 @@ public class TestHTableMultiplexerViaMocks { } @SuppressWarnings("deprecation") - @Test public void testClosingAlreadyClosedConnection() throws IOException { + @Test + public void testClosingAlreadyClosedConnection() throws IOException { doCallRealMethod().when(mockMultiplexer).close(); // If the connection is already closed when(mockConnection.isClosed()).thenReturn(true); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableTracing.java index a4adfe5988a..a2193553a65 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableTracing.java @@ -38,6 +38,7 @@ import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; + import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.trace.data.SpanData; @@ -70,8 +71,10 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; @@ -112,19 +115,16 @@ public class TestHTableTracing extends TestTracingBase { public ScanResponse answer(InvocationOnMock invocation) throws Throwable { ScanRequest req = invocation.getArgument(1); if (!req.hasScannerId()) { - return ScanResponse.newBuilder().setScannerId(1).setTtl(800) - .setMoreResultsInRegion(true).setMoreResults(true).build(); + return ScanResponse.newBuilder().setScannerId(1).setTtl(800).setMoreResultsInRegion(true) + .setMoreResults(true).build(); } else { if (req.hasCloseScanner() && req.getCloseScanner()) { return ScanResponse.getDefaultInstance(); } else { Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setType(Cell.Type.Put) - .setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) - .setFamily(Bytes.toBytes("cf")) - .setQualifier(Bytes.toBytes("cq")) - .setValue(Bytes.toBytes("v")) - .build(); + .setType(Cell.Type.Put).setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) + .setFamily(Bytes.toBytes("cf")).setQualifier(Bytes.toBytes("cq")) + .setValue(Bytes.toBytes("v")).build(); Result result = Result.create(Arrays.asList(cell)); ScanResponse.Builder builder = ScanResponse.newBuilder().setScannerId(1).setTtl(800) .addResults(ProtobufUtil.toResult(result)); @@ -142,7 +142,8 @@ public class TestHTableTracing extends TestTracingBase { doAnswer(new Answer() { @Override public MultiResponse answer(InvocationOnMock invocation) throws Throwable { - MultiResponse resp = MultiResponse.newBuilder() + MultiResponse resp = + MultiResponse.newBuilder() .addRegionActionResult(RegionActionResult.newBuilder().addResultOrException( ResultOrException.newBuilder().setResult(ProtobufUtil.toResult(new Result())))) .build(); @@ -159,13 +160,11 @@ public class TestHTableTracing extends TestTracingBase { case INCREMENT: ColumnValue value = req.getColumnValue(0); QualifierValue qvalue = value.getQualifierValue(0); - Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setType(Cell.Type.Put) - .setRow(req.getRow().toByteArray()) - .setFamily(value.getFamily().toByteArray()) - .setQualifier(qvalue.getQualifier().toByteArray()) - .setValue(qvalue.getValue().toByteArray()) - .build(); + Cell cell = + CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) + .setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray()) + .setQualifier(qvalue.getQualifier().toByteArray()) + .setValue(qvalue.getValue().toByteArray()).build(); resp = MutateResponse.newBuilder() .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell)))).build(); break; @@ -183,13 +182,10 @@ public class TestHTableTracing extends TestTracingBase { ClientProtos.Get req = ((GetRequest) invocation.getArgument(1)).getGet(); ColumnValue value = ColumnValue.getDefaultInstance(); QualifierValue qvalue = QualifierValue.getDefaultInstance(); - Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setType(Cell.Type.Put) - .setRow(req.getRow().toByteArray()) - .setFamily(value.getFamily().toByteArray()) + Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) + .setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray()) .setQualifier(qvalue.getQualifier().toByteArray()) - .setValue(qvalue.getValue().toByteArray()) - .build(); + .setValue(qvalue.getValue().toByteArray()).build(); return GetResponse.newBuilder() .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell), true))).build(); } @@ -202,7 +198,8 @@ public class TestHTableTracing extends TestTracingBase { RegionLocator locator = mock(HRegionLocator.class); Answer answer = new Answer() { - @Override public HRegionLocation answer(InvocationOnMock invocation) throws Throwable { + @Override + public HRegionLocation answer(InvocationOnMock invocation) throws Throwable { TableName tableName = TableName.META_TABLE_NAME; RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); ServerName serverName = MASTER_HOST; @@ -210,8 +207,8 @@ public class TestHTableTracing extends TestTracingBase { return loc; } }; - doAnswer(answer).when(locator) - .getRegionLocation(any(byte[].class), anyInt(), anyBoolean()); + doAnswer(answer).when(locator).getRegionLocation(any(byte[].class), anyInt(), + anyBoolean()); doAnswer(answer).when(locator).getRegionLocation(any(byte[].class)); doAnswer(answer).when(locator).getRegionLocation(any(byte[].class), anyInt()); doAnswer(answer).when(locator).getRegionLocation(any(byte[].class), anyBoolean()); @@ -219,8 +216,7 @@ public class TestHTableTracing extends TestTracingBase { } @Override - public ClientService.BlockingInterface getClient(ServerName serverName) - throws IOException { + public ClientService.BlockingInterface getClient(ServerName serverName) throws IOException { return stub; } }); @@ -247,32 +243,25 @@ public class TestHTableTracing extends TestTracingBase { // n.b. this method implementation must match the one of the same name found in // TestAsyncTableTracing final TableName tableName = table.getName(); - final Matcher spanLocator = allOf( - hasName(containsString(tableOperation)), hasEnded()); + final Matcher spanLocator = + allOf(hasName(containsString(tableOperation)), hasEnded()); final String expectedName = tableOperation + " " + tableName.getNameWithNamespaceInclAsString(); - Waiter.waitFor(conf, 1000, new MatcherPredicate<>( - "waiting for span to emit", + Waiter.waitFor(conf, 1000, new MatcherPredicate<>("waiting for span to emit", () -> TRACE_RULE.getSpans(), hasItem(spanLocator))); - List candidateSpans = TRACE_RULE.getSpans() - .stream() - .filter(spanLocator::matches) - .collect(Collectors.toList()); + List candidateSpans = + TRACE_RULE.getSpans().stream().filter(spanLocator::matches).collect(Collectors.toList()); assertThat(candidateSpans, hasSize(1)); SpanData data = candidateSpans.iterator().next(); - assertThat(data, allOf( - hasName(expectedName), - hasKind(SpanKind.CLIENT), - hasStatusWithCode(StatusCode.OK), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(tableName), - matcher)); + assertThat(data, + allOf(hasName(expectedName), hasKind(SpanKind.CLIENT), hasStatusWithCode(StatusCode.OK), + buildConnectionAttributesMatcher(conn), buildTableAttributesMatcher(tableName), matcher)); } @Test public void testPut() throws IOException { - table.put(new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))); + table.put(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), + Bytes.toBytes("v"))); assertTrace("PUT"); } @@ -326,9 +315,9 @@ public class TestHTableTracing extends TestTracingBase { table.checkAndMutate(CheckAndMutate.newBuilder(Bytes.toBytes(0)) .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) .build(new Delete(Bytes.toBytes(0)))); - assertTrace("CHECK_AND_MUTATE", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "CHECK_AND_MUTATE", "DELETE"))); + assertTrace("CHECK_AND_MUTATE", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", + "CHECK_AND_MUTATE", "DELETE"))); } @Test @@ -336,9 +325,9 @@ public class TestHTableTracing extends TestTracingBase { table.checkAndMutate(Arrays.asList(CheckAndMutate.newBuilder(Bytes.toBytes(0)) .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) .build(new Delete(Bytes.toBytes(0))))); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "CHECK_AND_MUTATE", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", + "CHECK_AND_MUTATE", "DELETE"))); } @Test @@ -346,67 +335,60 @@ public class TestHTableTracing extends TestTracingBase { table.checkAndMutate(Arrays.asList(CheckAndMutate.newBuilder(Bytes.toBytes(0)) .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) .build(new Delete(Bytes.toBytes(0))))); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "CHECK_AND_MUTATE", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", + "CHECK_AND_MUTATE", "DELETE"))); } @Test public void testMutateRow() throws Exception { byte[] row = Bytes.toBytes(0); table.mutateRow(RowMutations.of(Arrays.asList(new Delete(row)))); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } @Test public void testExistsList() throws IOException { table.exists(Arrays.asList(new Get(Bytes.toBytes(0)))); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "GET"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test public void testExistsAll() throws IOException { table.existsAll(Arrays.asList(new Get(Bytes.toBytes(0)))); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "GET"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test public void testGetList() throws IOException { table.get(Arrays.asList(new Get(Bytes.toBytes(0)))); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "GET"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test public void testPutList() throws IOException { table.put(Arrays.asList(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")))); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "PUT"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT"))); } @Test public void testDeleteList() throws IOException { table.delete(Lists.newArrayList(new Delete(Bytes.toBytes(0)))); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } @Test public void testBatchList() throws IOException, InterruptedException { table.batch(Arrays.asList(new Delete(Bytes.toBytes(0))), null); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } @Test diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHColumnDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHColumnDescriptor.java index 1ac483da91f..f7c2e3b5226 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHColumnDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHColumnDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,54 +39,41 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestImmutableHColumnDescriptor { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImmutableHColumnDescriptor.class); + HBaseClassTestRule.forClass(TestImmutableHColumnDescriptor.class); @Rule public TestName name = new TestName(); private static final List> TEST_FUNCTION = Arrays.asList( - hcd -> hcd.setValue("a", "a"), - hcd -> hcd.setValue(Bytes.toBytes("a"), Bytes.toBytes("a")), - hcd -> hcd.setConfiguration("aaa", "ccc"), - hcd -> hcd.remove(Bytes.toBytes("aaa")), - hcd -> hcd.removeConfiguration("xxx"), - hcd -> hcd.setBlockCacheEnabled(false), - hcd -> hcd.setBlocksize(10), - hcd -> hcd.setBloomFilterType(BloomType.NONE), - hcd -> hcd.setCacheBloomsOnWrite(false), - hcd -> hcd.setCacheDataOnWrite(true), + hcd -> hcd.setValue("a", "a"), hcd -> hcd.setValue(Bytes.toBytes("a"), Bytes.toBytes("a")), + hcd -> hcd.setConfiguration("aaa", "ccc"), hcd -> hcd.remove(Bytes.toBytes("aaa")), + hcd -> hcd.removeConfiguration("xxx"), hcd -> hcd.setBlockCacheEnabled(false), + hcd -> hcd.setBlocksize(10), hcd -> hcd.setBloomFilterType(BloomType.NONE), + hcd -> hcd.setCacheBloomsOnWrite(false), hcd -> hcd.setCacheDataOnWrite(true), hcd -> hcd.setCacheIndexesOnWrite(true), hcd -> hcd.setCompactionCompressionType(Compression.Algorithm.LZO), - hcd -> hcd.setCompressTags(true), - hcd -> hcd.setCompressionType(Compression.Algorithm.LZO), + hcd -> hcd.setCompressTags(true), hcd -> hcd.setCompressionType(Compression.Algorithm.LZO), hcd -> hcd.setDFSReplication((short) 10), hcd -> hcd.setDataBlockEncoding(DataBlockEncoding.NONE), - hcd -> hcd.setEncryptionKey(Bytes.toBytes("xxx")), - hcd -> hcd.setEncryptionType("xxx"), - hcd -> hcd.setEvictBlocksOnClose(true), - hcd -> hcd.setInMemory(true), + hcd -> hcd.setEncryptionKey(Bytes.toBytes("xxx")), hcd -> hcd.setEncryptionType("xxx"), + hcd -> hcd.setEvictBlocksOnClose(true), hcd -> hcd.setInMemory(true), hcd -> hcd.setInMemoryCompaction(MemoryCompactionPolicy.NONE), - hcd -> hcd.setKeepDeletedCells(KeepDeletedCells.FALSE), - hcd -> hcd.setMaxVersions(1000), + hcd -> hcd.setKeepDeletedCells(KeepDeletedCells.FALSE), hcd -> hcd.setMaxVersions(1000), hcd -> hcd.setMinVersions(10), hcd -> hcd.setMobCompactPartitionPolicy(MobCompactPartitionPolicy.DAILY), - hcd -> hcd.setMobEnabled(true), - hcd -> hcd.setMobThreshold(10), - hcd -> hcd.setPrefetchBlocksOnOpen(true), - hcd -> hcd.setScope(0), - hcd -> hcd.setStoragePolicy("aaa"), - hcd -> hcd.setTimeToLive(100), - hcd -> hcd.setVersions(1, 10) - ); + hcd -> hcd.setMobEnabled(true), hcd -> hcd.setMobThreshold(10), + hcd -> hcd.setPrefetchBlocksOnOpen(true), hcd -> hcd.setScope(0), + hcd -> hcd.setStoragePolicy("aaa"), hcd -> hcd.setTimeToLive(100), + hcd -> hcd.setVersions(1, 10)); @Test public void testImmutable() { - ImmutableHColumnDescriptor hcd = new ImmutableHColumnDescriptor( - new HColumnDescriptor(Bytes.toBytes(name.getMethodName()))); + ImmutableHColumnDescriptor hcd = + new ImmutableHColumnDescriptor(new HColumnDescriptor(Bytes.toBytes(name.getMethodName()))); for (int i = 0; i != TEST_FUNCTION.size(); ++i) { try { TEST_FUNCTION.get(i).accept(hcd); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHRegionInfo.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHRegionInfo.java index 68afeec2663..341f77f3c2d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHRegionInfo.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHRegionInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,20 +36,18 @@ import org.junit.rules.TestName; /** * Test ImmutableHRegionInfo */ -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestImmutableHRegionInfo { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImmutableHRegionInfo.class); + HBaseClassTestRule.forClass(TestImmutableHRegionInfo.class); @Rule public TestName name = new TestName(); - private final List> TEST_FUNCTIONS = Arrays.asList( - hri -> hri.setOffline(true), - hri -> hri.setSplit(true) - ); + private final List> TEST_FUNCTIONS = + Arrays.asList(hri -> hri.setOffline(true), hri -> hri.setSplit(true)); @Test public void testImmutable() { @@ -60,7 +58,7 @@ public class TestImmutableHRegionInfo { try { f.accept(immutableHri); fail("ImmutableHRegionInfo can't be modified !!!"); - } catch(UnsupportedOperationException e) { + } catch (UnsupportedOperationException e) { } }); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHTableDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHTableDescriptor.java index b83c01ab29a..a121219443d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHTableDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHTableDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,45 +37,34 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestImmutableHTableDescriptor { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImmutableHTableDescriptor.class); + HBaseClassTestRule.forClass(TestImmutableHTableDescriptor.class); @Rule public TestName name = new TestName(); private static final List> TEST_FUNCTION = Arrays.asList( - htd -> htd.setValue("a", "a"), - htd -> htd.setValue(Bytes.toBytes("a"), Bytes.toBytes("a")), + htd -> htd.setValue("a", "a"), htd -> htd.setValue(Bytes.toBytes("a"), Bytes.toBytes("a")), htd -> htd.setValue(new Bytes(Bytes.toBytes("a")), new Bytes(Bytes.toBytes("a"))), - htd -> htd.setCompactionEnabled(false), - htd -> htd.setConfiguration("aaa", "ccc"), - htd -> htd.setDurability(Durability.USE_DEFAULT), - htd -> htd.setFlushPolicyClassName("class"), - htd -> htd.setMaxFileSize(123), - htd -> htd.setMemStoreFlushSize(123123123), - htd -> htd.setNormalizationEnabled(false), - htd -> htd.setPriority(123), - htd -> htd.setReadOnly(true), - htd -> htd.setRegionMemstoreReplication(true), - htd -> htd.setRegionReplication(123), - htd -> htd.setRegionSplitPolicyClassName("class"), + htd -> htd.setCompactionEnabled(false), htd -> htd.setConfiguration("aaa", "ccc"), + htd -> htd.setDurability(Durability.USE_DEFAULT), htd -> htd.setFlushPolicyClassName("class"), + htd -> htd.setMaxFileSize(123), htd -> htd.setMemStoreFlushSize(123123123), + htd -> htd.setNormalizationEnabled(false), htd -> htd.setPriority(123), + htd -> htd.setReadOnly(true), htd -> htd.setRegionMemstoreReplication(true), + htd -> htd.setRegionReplication(123), htd -> htd.setRegionSplitPolicyClassName("class"), htd -> htd.addFamily(new HColumnDescriptor(Bytes.toBytes("fm"))), - htd -> htd.remove(new Bytes(Bytes.toBytes("aaa"))), - htd -> htd.remove("aaa"), - htd -> htd.remove(Bytes.toBytes("aaa")), - htd -> htd.removeConfiguration("xxx"), - htd -> htd.removeFamily(Bytes.toBytes("fm")), - htd -> { + htd -> htd.remove(new Bytes(Bytes.toBytes("aaa"))), htd -> htd.remove("aaa"), + htd -> htd.remove(Bytes.toBytes("aaa")), htd -> htd.removeConfiguration("xxx"), + htd -> htd.removeFamily(Bytes.toBytes("fm")), htd -> { try { htd.addCoprocessor("xxx"); } catch (IOException e) { throw new RuntimeException(e); } - } - ); + }); @Test public void testImmutable() { @@ -113,18 +102,13 @@ public class TestImmutableHTableDescriptor { @Test public void testClassMethodsAreBuilderStyle() { - /* ImmutableHTableDescriptor should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * ImmutableHTableDescriptor d - * = new ImmutableHTableDescriptor() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object - */ + /* + * ImmutableHTableDescriptor should have a builder style setup where setXXX/addXXX methods can + * be chainable together: . For example: ImmutableHTableDescriptor d = new + * ImmutableHTableDescriptor() .setFoo(foo) .setBar(bar) .setBuz(buz) This test ensures that all + * methods starting with "set" returns the declaring object + */ - BuilderStyleTest.assertClassesAreBuilderStyle(ImmutableHTableDescriptor.class); + BuilderStyleTest.assertClassesAreBuilderStyle(ImmutableHTableDescriptor.class); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableScan.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableScan.java index 09b9add8ca0..aa05f5a3c7d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableScan.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableScan.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,9 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; + import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; @@ -39,8 +40,6 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; /** * Small tests for ImmutableScan @@ -59,37 +58,17 @@ public class TestImmutableScan { Scan scan = new Scan(); scan.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q")) - .setACL("test_user2", new Permission(Permission.Action.READ)) - .setAllowPartialResults(true) - .setAsyncPrefetch(false) - .setAttribute("test_key", Bytes.toBytes("test_value")) - .setAuthorizations(new Authorizations("test_label")) - .setBatch(10) - .setCacheBlocks(false) - .setCaching(10) - .setConsistency(Consistency.TIMELINE) - .setFilter(new FilterList()) - .setId("scan_copy_constructor") - .setIsolationLevel(IsolationLevel.READ_COMMITTED) - .setLimit(100) - .setLoadColumnFamiliesOnDemand(false) - .setMaxResultSize(100) - .setMaxResultsPerColumnFamily(1000) - .readVersions(9999) - .setMvccReadPoint(5) - .setNeedCursorResult(true) - .setPriority(1) - .setRaw(true) - .setReplicaId(3) - .setReversed(true) - .setRowOffsetPerColumnFamily(5) - .setStartStopRowForPrefixScan(Bytes.toBytes("row_")) - .setScanMetricsEnabled(true) - .setSmall(true) - .setReadType(Scan.ReadType.STREAM) - .withStartRow(Bytes.toBytes("row_1")) - .withStopRow(Bytes.toBytes("row_2")) - .setTimeRange(0, 13); + .setACL("test_user2", new Permission(Permission.Action.READ)).setAllowPartialResults(true) + .setAsyncPrefetch(false).setAttribute("test_key", Bytes.toBytes("test_value")) + .setAuthorizations(new Authorizations("test_label")).setBatch(10).setCacheBlocks(false) + .setCaching(10).setConsistency(Consistency.TIMELINE).setFilter(new FilterList()) + .setId("scan_copy_constructor").setIsolationLevel(IsolationLevel.READ_COMMITTED).setLimit(100) + .setLoadColumnFamiliesOnDemand(false).setMaxResultSize(100).setMaxResultsPerColumnFamily(1000) + .readVersions(9999).setMvccReadPoint(5).setNeedCursorResult(true).setPriority(1).setRaw(true) + .setReplicaId(3).setReversed(true).setRowOffsetPerColumnFamily(5) + .setStartStopRowForPrefixScan(Bytes.toBytes("row_")).setScanMetricsEnabled(true) + .setSmall(true).setReadType(Scan.ReadType.STREAM).withStartRow(Bytes.toBytes("row_1")) + .withStopRow(Bytes.toBytes("row_2")).setTimeRange(0, 13); // create a copy of existing scan object Scan scanCopy = new ImmutableScan(scan); @@ -210,8 +189,7 @@ public class TestImmutableScan { scanCopy.setCaching(1); throw new RuntimeException("Should not reach here"); } catch (UnsupportedOperationException e) { - assertEquals("ImmutableScan does not allow access to setCaching", - e.getMessage()); + assertEquals("ImmutableScan does not allow access to setCaching", e.getMessage()); } try { scanCopy.setLoadColumnFamiliesOnDemand(true); @@ -302,8 +280,7 @@ public class TestImmutableScan { scanCopy.setAllowPartialResults(true); throw new RuntimeException("Should not reach here"); } catch (UnsupportedOperationException e) { - assertEquals("ImmutableScan does not allow access to setAllowPartialResults", - e.getMessage()); + assertEquals("ImmutableScan does not allow access to setAllowPartialResults", e.getMessage()); } try { scanCopy.setId("id"); @@ -382,12 +359,13 @@ public class TestImmutableScan { } private static boolean isGetter(Method method) { - if ("hashCode".equals(method.getName()) || "equals".equals(method.getName()) - || method.getName().startsWith("set")) { + if ( + "hashCode".equals(method.getName()) || "equals".equals(method.getName()) + || method.getName().startsWith("set") + ) { return false; } - return !void.class.equals(method.getReturnType()) - && !Scan.class.equals(method.getReturnType()); + return !void.class.equals(method.getReturnType()) && !Scan.class.equals(method.getReturnType()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java index 75bad5ea416..309c1007ae0 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,27 +29,27 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestIncrement { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIncrement.class); + HBaseClassTestRule.forClass(TestIncrement.class); @Test public void testIncrementInstance() { final long expected = 13; - Increment inc = new Increment(new byte [] {'r'}); + Increment inc = new Increment(new byte[] { 'r' }); int total = 0; for (int i = 0; i < 2; i++) { - byte [] bytes = Bytes.toBytes(i); + byte[] bytes = Bytes.toBytes(i); inc.addColumn(bytes, bytes, expected); total++; } - Map> familyMapOfLongs = inc.getFamilyMapOfLongs(); + Map> familyMapOfLongs = inc.getFamilyMapOfLongs(); int found = 0; - for (Map.Entry> entry: familyMapOfLongs.entrySet()) { - for (Map.Entry e: entry.getValue().entrySet()) { + for (Map.Entry> entry : familyMapOfLongs.entrySet()) { + for (Map.Entry e : entry.getValue().entrySet()) { assertEquals(expected, e.getValue().longValue()); found++; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java index 953fba777eb..ebdb7adc588 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java index 69d0389eb94..cf86bb40287 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; + import com.codahale.metrics.RatioGauge; import com.codahale.metrics.RatioGauge.Ratio; import java.io.IOException; @@ -40,7 +41,9 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; + import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest; @@ -51,15 +54,16 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanReques import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -@Category({ClientTests.class, MetricsTests.class, SmallTests.class}) +@Category({ ClientTests.class, MetricsTests.class, SmallTests.class }) public class TestMetricsConnection { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsConnection.class); + HBaseClassTestRule.forClass(TestMetricsConnection.class); private static MetricsConnection METRICS; private static final ThreadPoolExecutor BATCH_POOL = (ThreadPoolExecutor) Executors.newFixedThreadPool(2); + @BeforeClass public static void beforeClass() { METRICS = new MetricsConnection("mocked-connection", () -> BATCH_POOL, () -> null); @@ -100,8 +104,8 @@ public class TestMetricsConnection { Mockito.when(mockRegistry.getClusterId()) .thenReturn(CompletableFuture.completedFuture(clusterId)); - ConnectionImplementation impl = new ConnectionImplementation(conf, null, - User.getCurrent(), mockRegistry); + ConnectionImplementation impl = + new ConnectionImplementation(conf, null, User.getCurrent(), mockRegistry); MetricsConnection metrics = impl.getConnectionMetrics(); assertNotNull("Metrics should be present", metrics); assertEquals(clusterId + "@" + Integer.toHexString(impl.hashCode()), metrics.scope); @@ -116,71 +120,54 @@ public class TestMetricsConnection { @Test public void testStaticMetrics() throws IOException { final byte[] foo = Bytes.toBytes("foo"); - final RegionSpecifier region = RegionSpecifier.newBuilder() - .setValue(ByteString.EMPTY) - .setType(RegionSpecifierType.REGION_NAME) - .build(); + final RegionSpecifier region = RegionSpecifier.newBuilder().setValue(ByteString.EMPTY) + .setType(RegionSpecifierType.REGION_NAME).build(); final int loop = 5; for (int i = 0; i < loop; i++) { - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Get"), - GetRequest.getDefaultInstance(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Scan"), - ScanRequest.getDefaultInstance(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Multi"), - MultiRequest.getDefaultInstance(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Mutate"), - MutateRequest.newBuilder() - .setMutation(ProtobufUtil.toMutation(MutationType.APPEND, new Append(foo))) - .setRegion(region) - .build(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Mutate"), - MutateRequest.newBuilder() - .setMutation(ProtobufUtil.toMutation(MutationType.DELETE, new Delete(foo))) - .setRegion(region) - .build(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Mutate"), - MutateRequest.newBuilder() - .setMutation(ProtobufUtil.toMutation(MutationType.INCREMENT, new Increment(foo))) - .setRegion(region) - .build(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Mutate"), - MutateRequest.newBuilder() - .setMutation(ProtobufUtil.toMutation(MutationType.PUT, new Put(foo))) - .setRegion(region) - .build(), - MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Get"), + GetRequest.getDefaultInstance(), MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Scan"), + ScanRequest.getDefaultInstance(), MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Multi"), + MultiRequest.getDefaultInstance(), MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + MutateRequest.newBuilder() + .setMutation(ProtobufUtil.toMutation(MutationType.APPEND, new Append(foo))) + .setRegion(region).build(), + MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + MutateRequest.newBuilder() + .setMutation(ProtobufUtil.toMutation(MutationType.DELETE, new Delete(foo))) + .setRegion(region).build(), + MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + MutateRequest.newBuilder() + .setMutation(ProtobufUtil.toMutation(MutationType.INCREMENT, new Increment(foo))) + .setRegion(region).build(), + MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + MutateRequest.newBuilder() + .setMutation(ProtobufUtil.toMutation(MutationType.PUT, new Put(foo))).setRegion(region) + .build(), + MetricsConnection.newCallStats()); } - for (String method: new String[]{"Get", "Scan", "Mutate"}) { + for (String method : new String[] { "Get", "Scan", "Mutate" }) { final String metricKey = "rpcCount_" + ClientService.getDescriptor().getName() + "_" + method; final long metricVal = METRICS.rpcCounters.get(metricKey).getCount(); assertTrue("metric: " + metricKey + " val: " + metricVal, metricVal >= loop); } - for (MetricsConnection.CallTracker t : new MetricsConnection.CallTracker[] { - METRICS.getTracker, METRICS.scanTracker, METRICS.multiTracker, METRICS.appendTracker, - METRICS.deleteTracker, METRICS.incrementTracker, METRICS.putTracker - }) { + for (MetricsConnection.CallTracker t : new MetricsConnection.CallTracker[] { METRICS.getTracker, + METRICS.scanTracker, METRICS.multiTracker, METRICS.appendTracker, METRICS.deleteTracker, + METRICS.incrementTracker, METRICS.putTracker }) { assertEquals("Failed to invoke callTimer on " + t, loop, t.callTimer.getCount()); assertEquals("Failed to invoke reqHist on " + t, loop, t.reqHist.getCount()); assertEquals("Failed to invoke respHist on " + t, loop, t.respHist.getCount()); } - RatioGauge executorMetrics = (RatioGauge) METRICS.getMetricRegistry() - .getMetrics().get(METRICS.getExecutorPoolName()); - RatioGauge metaMetrics = (RatioGauge) METRICS.getMetricRegistry() - .getMetrics().get(METRICS.getMetaPoolName()); + RatioGauge executorMetrics = + (RatioGauge) METRICS.getMetricRegistry().getMetrics().get(METRICS.getExecutorPoolName()); + RatioGauge metaMetrics = + (RatioGauge) METRICS.getMetricRegistry().getMetrics().get(METRICS.getMetaPoolName()); assertEquals(Ratio.of(0, 3).getValue(), executorMetrics.getValue(), 0); assertEquals(Double.NaN, metaMetrics.getValue(), 0); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java index 99699a4fea6..dcb4d6eb88a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,7 @@ public class TestMutation { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMutation.class); + HBaseClassTestRule.forClass(TestMutation.class); @Test public void testAppendCopyConstructor() throws IOException { @@ -50,20 +50,16 @@ public class TestMutation { origin.setPriority(100); byte[] family = Bytes.toBytes("CF-01"); - origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(origin.getRow()) - .setFamily(family) - .setQualifier(Bytes.toBytes("q")) - .setType(Type.Put) - .setValue(Bytes.toBytes(100)) - .build()); + origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Type.Put) + .setValue(Bytes.toBytes(100)).build()); origin.addColumn(family, Bytes.toBytes("q0"), Bytes.toBytes("value")); origin.setTimeRange(100, 1000); Append clone = new Append(origin); assertEquals(origin, clone); origin.addColumn(family, Bytes.toBytes("q1"), Bytes.toBytes("value")); - //They should have different cell lists + // They should have different cell lists assertNotEquals(origin.getCellList(family), clone.getCellList(family)); } @@ -73,20 +69,16 @@ public class TestMutation { origin.setPriority(100); byte[] family = Bytes.toBytes("CF-01"); - origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(origin.getRow()) - .setFamily(family) - .setQualifier(Bytes.toBytes("q")) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes(100)) - .build()); + origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Cell.Type.Put) + .setValue(Bytes.toBytes(100)).build()); origin.addColumn(family, Bytes.toBytes("q0"), 4); origin.setTimeRange(100, 1000); Increment clone = new Increment(origin); assertEquals(origin, clone); origin.addColumn(family, Bytes.toBytes("q1"), 3); - //They should have different cell lists + // They should have different cell lists assertNotEquals(origin.getCellList(family), clone.getCellList(family)); } @@ -96,12 +88,8 @@ public class TestMutation { origin.setPriority(100); byte[] family = Bytes.toBytes("CF-01"); - origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(origin.getRow()) - .setFamily(family) - .setQualifier(Bytes.toBytes("q")) - .setType(Type.Delete) - .build()); + origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Type.Delete).build()); origin.addColumn(family, Bytes.toBytes("q0")); origin.addColumns(family, Bytes.toBytes("q1")); origin.addFamily(family); @@ -111,7 +99,7 @@ public class TestMutation { assertEquals(origin, clone); origin.addColumn(family, Bytes.toBytes("q3")); - //They should have different cell lists + // They should have different cell lists assertNotEquals(origin.getCellList(family), clone.getCellList(family)); } @@ -121,20 +109,16 @@ public class TestMutation { origin.setPriority(100); byte[] family = Bytes.toBytes("CF-01"); - origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(origin.getRow()) - .setFamily(family) - .setQualifier(Bytes.toBytes("q")) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes("value")) - .build()); + origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Cell.Type.Put) + .setValue(Bytes.toBytes("value")).build()); origin.addColumn(family, Bytes.toBytes("q0"), Bytes.toBytes("V-01")); origin.addColumn(family, Bytes.toBytes("q1"), 100, Bytes.toBytes("V-01")); Put clone = new Put(origin); assertEquals(origin, clone); origin.addColumn(family, Bytes.toBytes("q2"), Bytes.toBytes("V-02")); - //They should have different cell lists + // They should have different cell lists assertNotEquals(origin.getCellList(family), clone.getCellList(family)); } @@ -160,10 +144,10 @@ public class TestMutation { Assert.assertEquals(origin.getTimestamp(), clone.getTimestamp()); Assert.assertEquals(origin.getPriority(), clone.getPriority()); if (origin instanceof Append) { - assertEquals(((Append)origin).getTimeRange(), ((Append)clone).getTimeRange()); + assertEquals(((Append) origin).getTimeRange(), ((Append) clone).getTimeRange()); } if (origin instanceof Increment) { - assertEquals(((Increment)origin).getTimeRange(), ((Increment)clone).getTimeRange()); + assertEquals(((Increment) origin).getTimeRange(), ((Increment) clone).getTimeRange()); } } @@ -179,65 +163,55 @@ public class TestMutation { // Test when row key is immutable Put putRowIsImmutable = new Put(rowKey, true); - assertTrue(rowKey == putRowIsImmutable.getRow()); // No local copy is made + assertTrue(rowKey == putRowIsImmutable.getRow()); // No local copy is made // Test when row key is not immutable Put putRowIsNotImmutable = new Put(rowKey, 1000L, false); - assertTrue(rowKey != putRowIsNotImmutable.getRow()); // A local copy is made + assertTrue(rowKey != putRowIsNotImmutable.getRow()); // A local copy is made } // HBASE-14882 @Test public void testAddImmutableToPut() throws IOException { - byte[] row = Bytes.toBytes("immutable-row"); - byte[] family = Bytes.toBytes("immutable-family"); + byte[] row = Bytes.toBytes("immutable-row"); + byte[] family = Bytes.toBytes("immutable-family"); byte[] qualifier0 = Bytes.toBytes("immutable-qualifier-0"); - byte[] value0 = Bytes.toBytes("immutable-value-0"); + byte[] value0 = Bytes.toBytes("immutable-value-0"); byte[] qualifier1 = Bytes.toBytes("immutable-qualifier-1"); - byte[] value1 = Bytes.toBytes("immutable-value-1"); - long ts1 = 5000L; + byte[] value1 = Bytes.toBytes("immutable-value-1"); + long ts1 = 5000L; // "true" indicates that the input row is immutable Put put = new Put(row, true); - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(row) - .setFamily(family) - .setQualifier(qualifier0) - .setTimestamp(put.getTimestamp()) - .setType(Type.Put) - .setValue(value0) - .build()) - .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(row) - .setFamily(family) - .setQualifier(qualifier1) - .setTimestamp(ts1) - .setType(Type.Put) - .setValue(value1) - .build()); + put + .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row).setFamily(family) + .setQualifier(qualifier0).setTimestamp(put.getTimestamp()).setType(Type.Put) + .setValue(value0).build()) + .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row).setFamily(family) + .setQualifier(qualifier1).setTimestamp(ts1).setType(Type.Put).setValue(value1).build()); // Verify the cell of family:qualifier0 Cell cell0 = put.get(family, qualifier0).get(0); // Verify no local copy is made for family, qualifier or value - assertTrue(cell0.getFamilyArray() == family); + assertTrue(cell0.getFamilyArray() == family); assertTrue(cell0.getQualifierArray() == qualifier0); - assertTrue(cell0.getValueArray() == value0); + assertTrue(cell0.getValueArray() == value0); // Verify timestamp - assertTrue(cell0.getTimestamp() == put.getTimestamp()); + assertTrue(cell0.getTimestamp() == put.getTimestamp()); // Verify the cell of family:qualifier1 Cell cell1 = put.get(family, qualifier1).get(0); // Verify no local copy is made for family, qualifier or value - assertTrue(cell1.getFamilyArray() == family); + assertTrue(cell1.getFamilyArray() == family); assertTrue(cell1.getQualifierArray() == qualifier1); - assertTrue(cell1.getValueArray() == value1); + assertTrue(cell1.getValueArray() == value1); // Verify timestamp - assertTrue(cell1.getTimestamp() == ts1); + assertTrue(cell1.getTimestamp() == ts1); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java index a56bb2c4d0f..a4552f1a407 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,19 +71,19 @@ import org.apache.hbase.thirdparty.com.google.common.reflect.TypeToken; import org.apache.hbase.thirdparty.com.google.gson.Gson; /** - * Run tests that use the functionality of the Operation superclass for - * Puts, Gets, Deletes, Scans, and MultiPuts. + * Run tests that use the functionality of the Operation superclass for Puts, Gets, Deletes, Scans, + * and MultiPuts. */ -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestOperation { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestOperation.class); + HBaseClassTestRule.forClass(TestOperation.class); - private static byte [] ROW = Bytes.toBytes("testRow"); - private static byte [] FAMILY = Bytes.toBytes("testFamily"); - private static byte [] QUALIFIER = Bytes.toBytes("testQualifier"); - private static byte [] VALUE = Bytes.toBytes("testValue"); + private static byte[] ROW = Bytes.toBytes("testRow"); + private static byte[] FAMILY = Bytes.toBytes("testFamily"); + private static byte[] QUALIFIER = Bytes.toBytes("testQualifier"); + private static byte[] VALUE = Bytes.toBytes("testValue"); private static Gson GSON = GsonUtil.createGson().create(); @@ -94,19 +94,19 @@ public class TestOperation { private static List L_TS_LIST = Arrays.asList(0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L); private static TimestampsFilter L_TS_FILTER = new TimestampsFilter(L_TS_LIST); private static String STR_L_TS_FILTER = - L_TS_FILTER.getClass().getSimpleName() + " (5/11): [0, 1, 2, 3, 4]"; + L_TS_FILTER.getClass().getSimpleName() + " (5/11): [0, 1, 2, 3, 4]"; private static String COL_NAME_1 = "col1"; private static ColumnPrefixFilter COL_PRE_FILTER = - new ColumnPrefixFilter(Bytes.toBytes(COL_NAME_1)); + new ColumnPrefixFilter(Bytes.toBytes(COL_NAME_1)); private static String STR_COL_PRE_FILTER = - COL_PRE_FILTER.getClass().getSimpleName() + " " + COL_NAME_1; + COL_PRE_FILTER.getClass().getSimpleName() + " " + COL_NAME_1; private static String COL_NAME_2 = "col2"; private static ColumnRangeFilter CR_FILTER = - new ColumnRangeFilter(Bytes.toBytes(COL_NAME_1), true, Bytes.toBytes(COL_NAME_2), false); - private static String STR_CR_FILTER = CR_FILTER.getClass().getSimpleName() - + " [" + COL_NAME_1 + ", " + COL_NAME_2 + ")"; + new ColumnRangeFilter(Bytes.toBytes(COL_NAME_1), true, Bytes.toBytes(COL_NAME_2), false); + private static String STR_CR_FILTER = + CR_FILTER.getClass().getSimpleName() + " [" + COL_NAME_1 + ", " + COL_NAME_2 + ")"; private static int COL_COUNT = 9; private static ColumnCountGetFilter CCG_FILTER = new ColumnCountGetFilter(COL_COUNT); @@ -115,14 +115,13 @@ public class TestOperation { private static int LIMIT = 3; private static int OFFSET = 4; private static ColumnPaginationFilter CP_FILTER = new ColumnPaginationFilter(LIMIT, OFFSET); - private static String STR_CP_FILTER = CP_FILTER.getClass().getSimpleName() - + " (" + LIMIT + ", " + OFFSET + ")"; + private static String STR_CP_FILTER = + CP_FILTER.getClass().getSimpleName() + " (" + LIMIT + ", " + OFFSET + ")"; private static String STOP_ROW_KEY = "stop"; private static InclusiveStopFilter IS_FILTER = - new InclusiveStopFilter(Bytes.toBytes(STOP_ROW_KEY)); - private static String STR_IS_FILTER = - IS_FILTER.getClass().getSimpleName() + " " + STOP_ROW_KEY; + new InclusiveStopFilter(Bytes.toBytes(STOP_ROW_KEY)); + private static String STR_IS_FILTER = IS_FILTER.getClass().getSimpleName() + " " + STOP_ROW_KEY; private static String PREFIX = "prefix"; private static PrefixFilter PREFIX_FILTER = new PrefixFilter(Bytes.toBytes(PREFIX)); @@ -131,15 +130,15 @@ public class TestOperation { private static byte[][] PREFIXES = { Bytes.toBytes("0"), Bytes.toBytes("1"), Bytes.toBytes("2") }; private static MultipleColumnPrefixFilter MCP_FILTER = new MultipleColumnPrefixFilter(PREFIXES); private static String STR_MCP_FILTER = - MCP_FILTER.getClass().getSimpleName() + " (3/3): [0, 1, 2]"; + MCP_FILTER.getClass().getSimpleName() + " (3/3): [0, 1, 2]"; - private static byte[][] L_PREFIXES = { - Bytes.toBytes("0"), Bytes.toBytes("1"), Bytes.toBytes("2"), Bytes.toBytes("3"), - Bytes.toBytes("4"), Bytes.toBytes("5"), Bytes.toBytes("6"), Bytes.toBytes("7") }; + private static byte[][] L_PREFIXES = + { Bytes.toBytes("0"), Bytes.toBytes("1"), Bytes.toBytes("2"), Bytes.toBytes("3"), + Bytes.toBytes("4"), Bytes.toBytes("5"), Bytes.toBytes("6"), Bytes.toBytes("7") }; private static MultipleColumnPrefixFilter L_MCP_FILTER = - new MultipleColumnPrefixFilter(L_PREFIXES); + new MultipleColumnPrefixFilter(L_PREFIXES); private static String STR_L_MCP_FILTER = - L_MCP_FILTER.getClass().getSimpleName() + " (5/8): [0, 1, 2, 3, 4]"; + L_MCP_FILTER.getClass().getSimpleName() + " (5/8): [0, 1, 2, 3, 4]"; private static int PAGE_SIZE = 9; private static PageFilter PAGE_FILTER = new PageFilter(PAGE_SIZE); @@ -147,143 +146,133 @@ public class TestOperation { private static SkipFilter SKIP_FILTER = new SkipFilter(L_TS_FILTER); private static String STR_SKIP_FILTER = - SKIP_FILTER.getClass().getSimpleName() + " " + STR_L_TS_FILTER; + SKIP_FILTER.getClass().getSimpleName() + " " + STR_L_TS_FILTER; private static WhileMatchFilter WHILE_FILTER = new WhileMatchFilter(L_TS_FILTER); private static String STR_WHILE_FILTER = - WHILE_FILTER.getClass().getSimpleName() + " " + STR_L_TS_FILTER; + WHILE_FILTER.getClass().getSimpleName() + " " + STR_L_TS_FILTER; private static KeyOnlyFilter KEY_ONLY_FILTER = new KeyOnlyFilter(); private static String STR_KEY_ONLY_FILTER = KEY_ONLY_FILTER.getClass().getSimpleName(); private static FirstKeyOnlyFilter FIRST_KEY_ONLY_FILTER = new FirstKeyOnlyFilter(); private static String STR_FIRST_KEY_ONLY_FILTER = - FIRST_KEY_ONLY_FILTER.getClass().getSimpleName(); + FIRST_KEY_ONLY_FILTER.getClass().getSimpleName(); private static CompareOp CMP_OP = CompareOp.EQUAL; private static byte[] CMP_VALUE = Bytes.toBytes("value"); private static BinaryComparator BC = new BinaryComparator(CMP_VALUE); private static DependentColumnFilter DC_FILTER = - new DependentColumnFilter(FAMILY, QUALIFIER, true, CMP_OP, BC); - private static String STR_DC_FILTER = String.format( - "%s (%s, %s, %s, %s, %s)", DC_FILTER.getClass().getSimpleName(), - Bytes.toStringBinary(FAMILY), Bytes.toStringBinary(QUALIFIER), true, - CMP_OP.name(), Bytes.toStringBinary(BC.getValue())); + new DependentColumnFilter(FAMILY, QUALIFIER, true, CMP_OP, BC); + private static String STR_DC_FILTER = String.format("%s (%s, %s, %s, %s, %s)", + DC_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), + Bytes.toStringBinary(QUALIFIER), true, CMP_OP.name(), Bytes.toStringBinary(BC.getValue())); private static FamilyFilter FAMILY_FILTER = new FamilyFilter(CMP_OP, BC); private static String STR_FAMILY_FILTER = - FAMILY_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; + FAMILY_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; private static QualifierFilter QUALIFIER_FILTER = new QualifierFilter(CMP_OP, BC); private static String STR_QUALIFIER_FILTER = - QUALIFIER_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; + QUALIFIER_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; private static RowFilter ROW_FILTER = new RowFilter(CMP_OP, BC); private static String STR_ROW_FILTER = ROW_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; private static ValueFilter VALUE_FILTER = new ValueFilter(CMP_OP, BC); private static String STR_VALUE_FILTER = - VALUE_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; + VALUE_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; private static SingleColumnValueFilter SCV_FILTER = - new SingleColumnValueFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE); + new SingleColumnValueFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE); private static String STR_SCV_FILTER = String.format("%s (%s, %s, %s, %s)", - SCV_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), - Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), - Bytes.toStringBinary(CMP_VALUE)); + SCV_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), + Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), Bytes.toStringBinary(CMP_VALUE)); private static SingleColumnValueExcludeFilter SCVE_FILTER = - new SingleColumnValueExcludeFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE); + new SingleColumnValueExcludeFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE); private static String STR_SCVE_FILTER = String.format("%s (%s, %s, %s, %s)", - SCVE_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), - Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), Bytes.toStringBinary(CMP_VALUE)); + SCVE_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), + Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), Bytes.toStringBinary(CMP_VALUE)); - private static FilterList AND_FILTER_LIST = new FilterList( - Operator.MUST_PASS_ALL, Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER)); - private static String STR_AND_FILTER_LIST = String.format( - "%s AND (3/3): [%s, %s, %s]", AND_FILTER_LIST.getClass().getSimpleName(), - STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); + private static FilterList AND_FILTER_LIST = new FilterList(Operator.MUST_PASS_ALL, + Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER)); + private static String STR_AND_FILTER_LIST = String.format("%s AND (3/3): [%s, %s, %s]", + AND_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); - private static FilterList OR_FILTER_LIST = new FilterList( - Operator.MUST_PASS_ONE, Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER)); - private static String STR_OR_FILTER_LIST = String.format( - "%s OR (3/3): [%s, %s, %s]", AND_FILTER_LIST.getClass().getSimpleName(), - STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); + private static FilterList OR_FILTER_LIST = new FilterList(Operator.MUST_PASS_ONE, + Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER)); + private static String STR_OR_FILTER_LIST = String.format("%s OR (3/3): [%s, %s, %s]", + AND_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); - private static FilterList L_FILTER_LIST = new FilterList( - Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER, COL_PRE_FILTER, - CCG_FILTER, CP_FILTER, PREFIX_FILTER, PAGE_FILTER)); - private static String STR_L_FILTER_LIST = String.format( - "%s AND (5/8): [%s, %s, %s, %s, %s, %s]", - L_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, - STR_CR_FILTER, STR_COL_PRE_FILTER, STR_CCG_FILTER, STR_CP_FILTER); + private static FilterList L_FILTER_LIST = new FilterList(Arrays.asList((Filter) TS_FILTER, + L_TS_FILTER, CR_FILTER, COL_PRE_FILTER, CCG_FILTER, CP_FILTER, PREFIX_FILTER, PAGE_FILTER)); + private static String STR_L_FILTER_LIST = String.format("%s AND (5/8): [%s, %s, %s, %s, %s, %s]", + L_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER, + STR_COL_PRE_FILTER, STR_CCG_FILTER, STR_CP_FILTER); - private static Filter[] FILTERS = { - TS_FILTER, // TimestampsFilter - L_TS_FILTER, // TimestampsFilter - COL_PRE_FILTER, // ColumnPrefixFilter - CP_FILTER, // ColumnPaginationFilter - CR_FILTER, // ColumnRangeFilter - CCG_FILTER, // ColumnCountGetFilter - IS_FILTER, // InclusiveStopFilter - PREFIX_FILTER, // PrefixFilter - PAGE_FILTER, // PageFilter - SKIP_FILTER, // SkipFilter - WHILE_FILTER, // WhileMatchFilter - KEY_ONLY_FILTER, // KeyOnlyFilter + private static Filter[] FILTERS = { TS_FILTER, // TimestampsFilter + L_TS_FILTER, // TimestampsFilter + COL_PRE_FILTER, // ColumnPrefixFilter + CP_FILTER, // ColumnPaginationFilter + CR_FILTER, // ColumnRangeFilter + CCG_FILTER, // ColumnCountGetFilter + IS_FILTER, // InclusiveStopFilter + PREFIX_FILTER, // PrefixFilter + PAGE_FILTER, // PageFilter + SKIP_FILTER, // SkipFilter + WHILE_FILTER, // WhileMatchFilter + KEY_ONLY_FILTER, // KeyOnlyFilter FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter - MCP_FILTER, // MultipleColumnPrefixFilter - L_MCP_FILTER, // MultipleColumnPrefixFilter - DC_FILTER, // DependentColumnFilter - FAMILY_FILTER, // FamilyFilter - QUALIFIER_FILTER, // QualifierFilter - ROW_FILTER, // RowFilter - VALUE_FILTER, // ValueFilter - SCV_FILTER, // SingleColumnValueFilter - SCVE_FILTER, // SingleColumnValueExcludeFilter - AND_FILTER_LIST, // FilterList - OR_FILTER_LIST, // FilterList - L_FILTER_LIST, // FilterList + MCP_FILTER, // MultipleColumnPrefixFilter + L_MCP_FILTER, // MultipleColumnPrefixFilter + DC_FILTER, // DependentColumnFilter + FAMILY_FILTER, // FamilyFilter + QUALIFIER_FILTER, // QualifierFilter + ROW_FILTER, // RowFilter + VALUE_FILTER, // ValueFilter + SCV_FILTER, // SingleColumnValueFilter + SCVE_FILTER, // SingleColumnValueExcludeFilter + AND_FILTER_LIST, // FilterList + OR_FILTER_LIST, // FilterList + L_FILTER_LIST, // FilterList }; - private static String[] FILTERS_INFO = { - STR_TS_FILTER, // TimestampsFilter - STR_L_TS_FILTER, // TimestampsFilter - STR_COL_PRE_FILTER, // ColumnPrefixFilter - STR_CP_FILTER, // ColumnPaginationFilter - STR_CR_FILTER, // ColumnRangeFilter - STR_CCG_FILTER, // ColumnCountGetFilter - STR_IS_FILTER, // InclusiveStopFilter - STR_PREFIX_FILTER, // PrefixFilter - STR_PAGE_FILTER, // PageFilter - STR_SKIP_FILTER, // SkipFilter - STR_WHILE_FILTER, // WhileMatchFilter - STR_KEY_ONLY_FILTER, // KeyOnlyFilter + private static String[] FILTERS_INFO = { STR_TS_FILTER, // TimestampsFilter + STR_L_TS_FILTER, // TimestampsFilter + STR_COL_PRE_FILTER, // ColumnPrefixFilter + STR_CP_FILTER, // ColumnPaginationFilter + STR_CR_FILTER, // ColumnRangeFilter + STR_CCG_FILTER, // ColumnCountGetFilter + STR_IS_FILTER, // InclusiveStopFilter + STR_PREFIX_FILTER, // PrefixFilter + STR_PAGE_FILTER, // PageFilter + STR_SKIP_FILTER, // SkipFilter + STR_WHILE_FILTER, // WhileMatchFilter + STR_KEY_ONLY_FILTER, // KeyOnlyFilter STR_FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter - STR_MCP_FILTER, // MultipleColumnPrefixFilter - STR_L_MCP_FILTER, // MultipleColumnPrefixFilter - STR_DC_FILTER, // DependentColumnFilter - STR_FAMILY_FILTER, // FamilyFilter - STR_QUALIFIER_FILTER, // QualifierFilter - STR_ROW_FILTER, // RowFilter - STR_VALUE_FILTER, // ValueFilter - STR_SCV_FILTER, // SingleColumnValueFilter - STR_SCVE_FILTER, // SingleColumnValueExcludeFilter - STR_AND_FILTER_LIST, // FilterList - STR_OR_FILTER_LIST, // FilterList - STR_L_FILTER_LIST, // FilterList + STR_MCP_FILTER, // MultipleColumnPrefixFilter + STR_L_MCP_FILTER, // MultipleColumnPrefixFilter + STR_DC_FILTER, // DependentColumnFilter + STR_FAMILY_FILTER, // FamilyFilter + STR_QUALIFIER_FILTER, // QualifierFilter + STR_ROW_FILTER, // RowFilter + STR_VALUE_FILTER, // ValueFilter + STR_SCV_FILTER, // SingleColumnValueFilter + STR_SCVE_FILTER, // SingleColumnValueExcludeFilter + STR_AND_FILTER_LIST, // FilterList + STR_OR_FILTER_LIST, // FilterList + STR_L_FILTER_LIST, // FilterList }; static { - assertEquals("The sizes of static arrays do not match: " - + "[FILTERS: %d <=> FILTERS_INFO: %d]", - FILTERS.length, FILTERS_INFO.length); + assertEquals("The sizes of static arrays do not match: " + "[FILTERS: %d <=> FILTERS_INFO: %d]", + FILTERS.length, FILTERS_INFO.length); } /** - * Test the client Operations' JSON encoding to ensure that produced JSON is - * parseable and that the details are present and not corrupted. - * + * Test the client Operations' JSON encoding to ensure that produced JSON is parseable and that + * the details are present and not corrupted. * @throws IOException if the JSON conversion fails */ @Test @@ -297,16 +286,14 @@ public class TestOperation { }.getType(); Map parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row - assertEquals("startRow incorrect in Scan.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("startRow")); + assertEquals("startRow incorrect in Scan.toJSON()", Bytes.toStringBinary(ROW), + parsedJSON.get("startRow")); // check for the family and the qualifier. - List familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); + List familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Scan.toJSON()", familyInfo); assertEquals("Qualifier absent in Scan.toJSON()", 1, familyInfo.size()); - assertEquals("Qualifier incorrect in Scan.toJSON()", - Bytes.toStringBinary(QUALIFIER), - familyInfo.get(0)); + assertEquals("Qualifier incorrect in Scan.toJSON()", Bytes.toStringBinary(QUALIFIER), + familyInfo.get(0)); // produce a Get Operation Get get = new Get(ROW); @@ -315,16 +302,13 @@ public class TestOperation { json = get.toJSON(); parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row - assertEquals("row incorrect in Get.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("row")); + assertEquals("row incorrect in Get.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); // check for the family and the qualifier. - familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); + familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Get.toJSON()", familyInfo); assertEquals("Qualifier absent in Get.toJSON()", 1, familyInfo.size()); - assertEquals("Qualifier incorrect in Get.toJSON()", - Bytes.toStringBinary(QUALIFIER), - familyInfo.get(0)); + assertEquals("Qualifier incorrect in Get.toJSON()", Bytes.toStringBinary(QUALIFIER), + familyInfo.get(0)); // produce a Put operation Put put = new Put(ROW); @@ -333,17 +317,14 @@ public class TestOperation { json = put.toJSON(); parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row - assertEquals("row absent in Put.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("row")); + assertEquals("row absent in Put.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); // check for the family and the qualifier. - familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); + familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Put.toJSON()", familyInfo); assertEquals("KeyValue absent in Put.toJSON()", 1, familyInfo.size()); Map kvMap = (Map) familyInfo.get(0); - assertEquals("Qualifier incorrect in Put.toJSON()", - Bytes.toStringBinary(QUALIFIER), - kvMap.get("qualifier")); + assertEquals("Qualifier incorrect in Put.toJSON()", Bytes.toStringBinary(QUALIFIER), + kvMap.get("qualifier")); assertEquals("Value length incorrect in Put.toJSON()", VALUE.length, ((Number) kvMap.get("vlen")).intValue()); @@ -354,16 +335,14 @@ public class TestOperation { json = delete.toJSON(); parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row - assertEquals("row absent in Delete.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("row")); + assertEquals("row absent in Delete.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); // check for the family and the qualifier. - familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); + familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Delete.toJSON()", familyInfo); assertEquals("KeyValue absent in Delete.toJSON()", 1, familyInfo.size()); kvMap = (Map) familyInfo.get(0); - assertEquals("Qualifier incorrect in Delete.toJSON()", - Bytes.toStringBinary(QUALIFIER), kvMap.get("qualifier")); + assertEquals("Qualifier incorrect in Delete.toJSON()", Bytes.toStringBinary(QUALIFIER), + kvMap.get("qualifier")); } @Test @@ -386,7 +365,7 @@ public class TestOperation { c = p.get(FAMILY, QUALIFIER); Assert.assertEquals(1, c.size()); Assert.assertEquals(2013L, c.get(0).getTimestamp()); - Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0))); + Assert.assertArrayEquals(new byte[] {}, CellUtil.cloneValue(c.get(0))); Assert.assertEquals(HConstants.LATEST_TIMESTAMP, p.getTimestamp()); Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); @@ -395,7 +374,7 @@ public class TestOperation { c = p.get(FAMILY, QUALIFIER); Assert.assertEquals(1, c.size()); Assert.assertEquals(2001L, c.get(0).getTimestamp()); - Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0))); + Assert.assertArrayEquals(new byte[] {}, CellUtil.cloneValue(c.get(0))); Assert.assertArrayEquals(ROW, CellUtil.cloneRow(c.get(0))); Assert.assertEquals(HConstants.LATEST_TIMESTAMP, p.getTimestamp()); Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); @@ -405,7 +384,7 @@ public class TestOperation { c = p.get(FAMILY, QUALIFIER); Assert.assertEquals(1, c.size()); Assert.assertEquals(2001L, c.get(0).getTimestamp()); - Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0))); + Assert.assertArrayEquals(new byte[] {}, CellUtil.cloneValue(c.get(0))); Assert.assertArrayEquals(ROW, CellUtil.cloneRow(c.get(0))); Assert.assertEquals(1970L, p.getTimestamp()); Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); @@ -414,29 +393,16 @@ public class TestOperation { @Test @SuppressWarnings("rawtypes") public void testOperationSubClassMethodsAreBuilderStyle() { - /* All Operation subclasses should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * Scan scan = new Scan() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * All Operation subclasses should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: Scan scan = new Scan() .setFoo(foo) .setBar(bar) + * .setBuz(buz) This test ensures that all methods starting with "set" returns the declaring + * object */ // TODO: We should ensure all subclasses of Operation is checked. - Class[] classes = new Class[] { - Operation.class, - OperationWithAttributes.class, - Mutation.class, - Query.class, - Delete.class, - Increment.class, - Append.class, - Put.class, - Get.class, - Scan.class}; + Class[] classes = new Class[] { Operation.class, OperationWithAttributes.class, Mutation.class, + Query.class, Delete.class, Increment.class, Append.class, Put.class, Get.class, Scan.class }; BuilderStyleTest.assertClassesAreBuilderStyle(classes); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java index 01740e98461..d066a72c60c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,12 +37,12 @@ import org.mockito.Mockito; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestProcedureFuture { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureFuture.class); + HBaseClassTestRule.forClass(TestProcedureFuture.class); private static class TestFuture extends HBaseAdmin.ProcedureFuture { private boolean postOperationResultCalled = false; @@ -71,12 +71,11 @@ public class TestProcedureFuture { } @Override - protected GetProcedureResultResponse getProcedureResult( - final GetProcedureResultRequest request) throws IOException { + protected GetProcedureResultResponse getProcedureResult(final GetProcedureResultRequest request) + throws IOException { getProcedureResultCalled = true; return GetProcedureResultResponse.newBuilder() - .setState(GetProcedureResultResponse.State.FINISHED) - .build(); + .setState(GetProcedureResultResponse.State.FINISHED).build(); } @Override @@ -86,23 +85,21 @@ public class TestProcedureFuture { } @Override - protected Void waitOperationResult(final long deadlineTs) - throws IOException, TimeoutException { + protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException { waitOperationResultCalled = true; return null; } @Override protected Void postOperationResult(final Void result, final long deadlineTs) - throws IOException, TimeoutException { + throws IOException, TimeoutException { postOperationResultCalled = true; return result; } } /** - * When a master return a result with procId, - * we are skipping the waitOperationResult() call, + * When a master return a result with procId, we are skipping the waitOperationResult() call, * since we are getting the procedure result. */ @Test @@ -126,13 +123,14 @@ public class TestProcedureFuture { HBaseAdmin admin = Mockito.mock(HBaseAdmin.class); TestFuture f = new TestFuture(admin, 100L) { @Override - protected GetProcedureResultResponse getProcedureResult( - final GetProcedureResultRequest request) throws IOException { + protected GetProcedureResultResponse + getProcedureResult(final GetProcedureResultRequest request) throws IOException { boolean done = spinCount.incrementAndGet() >= 10; return GetProcedureResultResponse.newBuilder() - .setState(done ? GetProcedureResultResponse.State.FINISHED : - GetProcedureResultResponse.State.RUNNING) - .build(); + .setState(done + ? GetProcedureResultResponse.State.FINISHED + : GetProcedureResultResponse.State.RUNNING) + .build(); } }; f.get(1, TimeUnit.MINUTES); @@ -144,8 +142,7 @@ public class TestProcedureFuture { } /** - * When a master return a result without procId, - * we are skipping the getProcedureResult() call. + * When a master return a result without procId, we are skipping the getProcedureResult() call. */ @Test public void testWithoutProcId() throws Exception { @@ -160,20 +157,19 @@ public class TestProcedureFuture { } /** - * When a new client with procedure support tries to ask an old-master without proc-support - * the procedure result we get a DoNotRetryIOException (which is an UnsupportedOperationException) - * The future should trap that and fallback to the waitOperationResult(). - * - * This happens when the operation calls happens on a "new master" but while we are waiting - * the operation to be completed, we failover on an "old master". + * When a new client with procedure support tries to ask an old-master without proc-support the + * procedure result we get a DoNotRetryIOException (which is an UnsupportedOperationException) The + * future should trap that and fallback to the waitOperationResult(). This happens when the + * operation calls happens on a "new master" but while we are waiting the operation to be + * completed, we failover on an "old master". */ @Test public void testOnServerWithNoProcedureSupport() throws Exception { HBaseAdmin admin = Mockito.mock(HBaseAdmin.class); TestFuture f = new TestFuture(admin, 100L) { @Override - protected GetProcedureResultResponse getProcedureResult( - final GetProcedureResultRequest request) throws IOException { + protected GetProcedureResultResponse + getProcedureResult(final GetProcedureResultRequest request) throws IOException { super.getProcedureResult(request); throw new DoNotRetryIOException(new UnsupportedOperationException("getProcedureResult")); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java index ef9d4c96d28..1e533633c76 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,16 +27,15 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) /** - * Addresses HBASE-6047 - * We test put.has call with all of its polymorphic magic + * Addresses HBASE-6047 We test put.has call with all of its polymorphic magic */ public class TestPutDotHas { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestPutDotHas.class); + HBaseClassTestRule.forClass(TestPutDotHas.class); public static final byte[] ROW_01 = Bytes.toBytes("row-01"); public static final byte[] QUALIFIER_01 = Bytes.toBytes("qualifier-01"); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutWriteToWal.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutWriteToWal.java index 8572c0b47a1..f57145cdb1b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutWriteToWal.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutWriteToWal.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.client; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java index 3b66f7eb2e6..f74b79a0672 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -141,7 +141,7 @@ public class TestRegionInfoBuilder { @Test public void testContainsRangeForMetaTable() { TableDescriptor tableDesc = - TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build(); + TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build(); RegionInfo hri = RegionInfoBuilder.newBuilder(tableDesc.getTableName()).build(); byte[] startRow = HConstants.EMPTY_START_ROW; byte[] row1 = Bytes.toBytes("a,a,0"); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java index f72fb66ef73..922b5fdce99 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,40 +35,38 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestRegionInfoDisplay { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionInfoDisplay.class); + HBaseClassTestRule.forClass(TestRegionInfoDisplay.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); @Test public void testRegionDetailsForDisplay() throws IOException { - byte[] startKey = new byte[] {0x01, 0x01, 0x02, 0x03}; - byte[] endKey = new byte[] {0x01, 0x01, 0x02, 0x04}; + byte[] startKey = new byte[] { 0x01, 0x01, 0x02, 0x03 }; + byte[] endKey = new byte[] { 0x01, 0x01, 0x02, 0x04 }; Configuration conf = new Configuration(); conf.setBoolean("hbase.display.keys", false); RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) .setStartKey(startKey).setEndKey(endKey).build(); checkEquality(ri, conf); // check HRIs with non-default replicaId - ri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(startKey) - .setEndKey(endKey) - .setSplit(false) - .setRegionId(EnvironmentEdgeManager.currentTime()) - .setReplicaId(1).build(); + ri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setStartKey(startKey) + .setEndKey(endKey).setSplit(false).setRegionId(EnvironmentEdgeManager.currentTime()) + .setReplicaId(1).build(); checkEquality(ri, conf); Assert.assertArrayEquals(RegionInfoDisplay.HIDDEN_END_KEY, - RegionInfoDisplay.getEndKeyForDisplay(ri, conf)); + RegionInfoDisplay.getEndKeyForDisplay(ri, conf)); Assert.assertArrayEquals(RegionInfoDisplay.HIDDEN_START_KEY, - RegionInfoDisplay.getStartKeyForDisplay(ri, conf)); + RegionInfoDisplay.getStartKeyForDisplay(ri, conf)); RegionState state = RegionState.createForTesting(convert(ri), RegionState.State.OPEN); String descriptiveNameForDisplay = - RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(state, conf); + RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(state, conf); String originalDescriptive = state.toDescriptiveString(); checkDescriptiveNameEquality(descriptiveNameForDisplay, originalDescriptive, startKey); @@ -76,25 +74,22 @@ public class TestRegionInfoDisplay { Assert.assertArrayEquals(endKey, RegionInfoDisplay.getEndKeyForDisplay(ri, conf)); Assert.assertArrayEquals(startKey, RegionInfoDisplay.getStartKeyForDisplay(ri, conf)); Assert.assertEquals(originalDescriptive, - RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(state, conf)); + RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(state, conf)); } private void checkDescriptiveNameEquality(String descriptiveNameForDisplay, String origDesc, - byte[] startKey) { + byte[] startKey) { // except for the "hidden-start-key" substring everything else should exactly match - String firstPart = descriptiveNameForDisplay.substring(0, - descriptiveNameForDisplay.indexOf( - new String(RegionInfoDisplay.HIDDEN_START_KEY, StandardCharsets.UTF_8))); - String secondPart = descriptiveNameForDisplay.substring( - descriptiveNameForDisplay.indexOf( - new String(RegionInfoDisplay.HIDDEN_START_KEY, StandardCharsets.UTF_8)) + - RegionInfoDisplay.HIDDEN_START_KEY.length); + String firstPart = descriptiveNameForDisplay.substring(0, descriptiveNameForDisplay + .indexOf(new String(RegionInfoDisplay.HIDDEN_START_KEY, StandardCharsets.UTF_8))); + String secondPart = descriptiveNameForDisplay.substring(descriptiveNameForDisplay + .indexOf(new String(RegionInfoDisplay.HIDDEN_START_KEY, StandardCharsets.UTF_8)) + + RegionInfoDisplay.HIDDEN_START_KEY.length); String firstPartOrig = origDesc.substring(0, origDesc.indexOf(Bytes.toStringBinary(startKey))); String secondPartOrig = origDesc.substring( - origDesc.indexOf(Bytes.toStringBinary(startKey)) + - Bytes.toStringBinary(startKey).length()); - assert(firstPart.equals(firstPartOrig)); - assert(secondPart.equals(secondPartOrig)); + origDesc.indexOf(Bytes.toStringBinary(startKey)) + Bytes.toStringBinary(startKey).length()); + assert (firstPart.equals(firstPartOrig)); + assert (secondPart.equals(secondPartOrig)); } private void checkEquality(RegionInfo ri, Configuration conf) throws IOException { @@ -103,18 +98,18 @@ public class TestRegionInfoDisplay { byte[][] modifiedRegionNameParts = RegionInfo.parseRegionName(modifiedRegionName); byte[][] regionNameParts = RegionInfo.parseRegionName(ri.getRegionName()); - //same number of parts - assert(modifiedRegionNameParts.length == regionNameParts.length); + // same number of parts + assert (modifiedRegionNameParts.length == regionNameParts.length); for (int i = 0; i < regionNameParts.length; i++) { // all parts should match except for [1] where in the modified one, // we should have "hidden_start_key" if (i != 1) { - System.out.println("" + i + " " + Bytes.toString(regionNameParts[i]) + " " + - Bytes.toString(modifiedRegionNameParts[i])); + System.out.println("" + i + " " + Bytes.toString(regionNameParts[i]) + " " + + Bytes.toString(modifiedRegionNameParts[i])); Assert.assertArrayEquals(regionNameParts[i], modifiedRegionNameParts[i]); } else { - System.out.println("" + i + " " + Bytes.toString(regionNameParts[i]) + " " + - Bytes.toString(modifiedRegionNameParts[i])); + System.out.println("" + i + " " + Bytes.toString(regionNameParts[i]) + " " + + Bytes.toString(modifiedRegionNameParts[i])); Assert.assertNotEquals(regionNameParts[i], modifiedRegionNameParts[i]); Assert.assertArrayEquals(modifiedRegionNameParts[1], RegionInfoDisplay.getStartKeyForDisplay(ri, conf)); @@ -123,8 +118,8 @@ public class TestRegionInfoDisplay { } private HRegionInfo convert(RegionInfo ri) { - HRegionInfo hri =new HRegionInfo(ri.getTable(), ri.getStartKey(), ri.getEndKey(), - ri.isSplit(), ri.getRegionId()); + HRegionInfo hri = new HRegionInfo(ri.getTable(), ri.getStartKey(), ri.getEndKey(), ri.isSplit(), + ri.getRegionId()); hri.setOffline(ri.isOffline()); return hri; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocatorTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocatorTracing.java index a0415da28d0..1a9f2b3497c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocatorTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocatorTracing.java @@ -26,6 +26,7 @@ import static org.apache.hadoop.hbase.client.trace.hamcrest.TraceTestUtil.buildT import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsInAnyOrder; + import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.trace.data.SpanData; @@ -43,6 +44,7 @@ import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @Category({ ClientTests.class, MediumTests.class }) @@ -69,13 +71,11 @@ public class TestRegionLocatorTracing extends TestTracingBase { public void testGetRegionLocation() throws IOException { conn.getRegionLocator(TableName.META_TABLE_NAME).getRegionLocation(HConstants.EMPTY_START_ROW); SpanData span = waitSpan("HRegionLocator.getRegionLocation"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), - hasAttributes( - containsEntryWithStringValuesOf("db.hbase.regions", + assertThat(span, + allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), + buildConnectionAttributesMatcher(conn), + buildTableAttributesMatcher(TableName.META_TABLE_NAME), + hasAttributes(containsEntryWithStringValuesOf("db.hbase.regions", META_REGION_LOCATION.getDefaultRegionLocation().getRegion().getRegionNameAsString())))); } @@ -84,16 +84,12 @@ public class TestRegionLocatorTracing extends TestTracingBase { conn.getRegionLocator(TableName.META_TABLE_NAME).getRegionLocations(HConstants.EMPTY_START_ROW); SpanData span = waitSpan("HRegionLocator.getRegionLocations"); // TODO: Use a value of `META_REGION_LOCATION` that contains multiple region locations. - String[] expectedRegions = Arrays.stream(META_REGION_LOCATION.getRegionLocations()) - .map(HRegionLocation::getRegion) - .map(RegionInfo::getRegionNameAsString) - .toArray(String[]::new); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), + String[] expectedRegions = + Arrays.stream(META_REGION_LOCATION.getRegionLocations()).map(HRegionLocation::getRegion) + .map(RegionInfo::getRegionNameAsString).toArray(String[]::new); + assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), - hasAttributes( + buildTableAttributesMatcher(TableName.META_TABLE_NAME), hasAttributes( containsEntryWithStringValuesOf("db.hbase.regions", containsInAnyOrder(expectedRegions))))); } @@ -102,16 +98,12 @@ public class TestRegionLocatorTracing extends TestTracingBase { conn.getRegionLocator(TableName.META_TABLE_NAME).getAllRegionLocations(); SpanData span = waitSpan("HRegionLocator.getAllRegionLocations"); // TODO: Use a value of `META_REGION_LOCATION` that contains multiple region locations. - String[] expectedRegions = Arrays.stream(META_REGION_LOCATION.getRegionLocations()) - .map(HRegionLocation::getRegion) - .map(RegionInfo::getRegionNameAsString) - .toArray(String[]::new); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), + String[] expectedRegions = + Arrays.stream(META_REGION_LOCATION.getRegionLocations()).map(HRegionLocation::getRegion) + .map(RegionInfo::getRegionNameAsString).toArray(String[]::new); + assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), - hasAttributes( + buildTableAttributesMatcher(TableName.META_TABLE_NAME), hasAttributes( containsEntryWithStringValuesOf("db.hbase.regions", containsInAnyOrder(expectedRegions))))); } @@ -119,11 +111,10 @@ public class TestRegionLocatorTracing extends TestTracingBase { public void testClearRegionLocationCache() throws IOException { conn.getRegionLocator(TableName.META_TABLE_NAME).clearRegionLocationCache(); SpanData span = waitSpan("HRegionLocator.clearRegionLocationCache"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME))); + assertThat(span, + allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), + buildConnectionAttributesMatcher(conn), + buildTableAttributesMatcher(TableName.META_TABLE_NAME))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestResultStatsUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestResultStatsUtil.java index 5b591030c96..7ee074b84b9 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestResultStatsUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestResultStatsUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,15 +29,14 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestResultStatsUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestResultStatsUtil.class); + HBaseClassTestRule.forClass(TestResultStatsUtil.class); - private static final RegionLoadStats regionLoadStats = new RegionLoadStats(100, - 10,90); - private static final byte[] regionName = {80}; + private static final RegionLoadStats regionLoadStats = new RegionLoadStats(100, 10, 90); + private static final byte[] regionName = { 80 }; private static final ServerName server = ServerName.parseServerName("3.1.yg.n,50,1"); @Test @@ -51,12 +50,12 @@ public class TestResultStatsUtil { // Check that the tracker was updated as expected ServerStatistics stats = serverStatisticTracker.getStats(server); - assertEquals(regionLoadStats.memstoreLoad, stats.getStatsForRegion(regionName) - .getMemStoreLoadPercent()); - assertEquals(regionLoadStats.compactionPressure, stats.getStatsForRegion(regionName) - .getCompactionPressure()); - assertEquals(regionLoadStats.heapOccupancy, stats.getStatsForRegion(regionName) - .getHeapOccupancyPercent()); + assertEquals(regionLoadStats.memstoreLoad, + stats.getStatsForRegion(regionName).getMemStoreLoadPercent()); + assertEquals(regionLoadStats.compactionPressure, + stats.getStatsForRegion(regionName).getCompactionPressure()); + assertEquals(regionLoadStats.heapOccupancy, + stats.getStatsForRegion(regionName).getHeapOccupancyPercent()); } @Test diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRetriesExhaustedWithDetailsException.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRetriesExhaustedWithDetailsException.java index 7b584e94861..f380be0048f 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRetriesExhaustedWithDetailsException.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRetriesExhaustedWithDetailsException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,14 +31,15 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; import org.mockito.Mockito; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestRetriesExhaustedWithDetailsException { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRetriesExhaustedWithDetailsException.class); + HBaseClassTestRule.forClass(TestRetriesExhaustedWithDetailsException.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); /** * Assert that a RetriesExhaustedException that has RegionTooBusyException outputs region name. @@ -53,7 +54,7 @@ public class TestRetriesExhaustedWithDetailsException { List hostAndPorts = new ArrayList<>(1); hostAndPorts.add("example.com:1234"); RetriesExhaustedException ree = - new RetriesExhaustedWithDetailsException(ts, rows, hostAndPorts); + new RetriesExhaustedWithDetailsException(ts, rows, hostAndPorts); assertTrue(ree.toString().contains(regionName)); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java index 67976b8233a..b7a16b198ed 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; @@ -52,7 +51,7 @@ public class TestReversedScannerCallable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReversedScannerCallable.class); + HBaseClassTestRule.forClass(TestReversedScannerCallable.class); private static final TableName TABLE_NAME = TableName.valueOf("TestReversedScannerCallable"); @@ -81,8 +80,7 @@ public class TestReversedScannerCallable { @Test public void testPrepareAlwaysUsesCache() throws Exception { - when(connection.locateRegion(TABLE_NAME, ROW, true, true, 0)) - .thenReturn(regionLocations); + when(connection.locateRegion(TABLE_NAME, ROW, true, true, 0)).thenReturn(regionLocations); ReversedScannerCallable callable = new ReversedScannerCallable(connection, TABLE_NAME, DEFAULT_SCAN, null, rpcFactory, 0); @@ -97,7 +95,7 @@ public class TestReversedScannerCallable { when(connection.isTableDisabled(TABLE_NAME)).thenReturn(true); ReversedScannerCallable callable = - new ReversedScannerCallable(connection, TABLE_NAME, DEFAULT_SCAN, null, rpcFactory, 0); + new ReversedScannerCallable(connection, TABLE_NAME, DEFAULT_SCAN, null, rpcFactory, 0); assertThrows(TableNotEnabledException.class, () -> callable.prepare(true)); } @@ -105,7 +103,7 @@ public class TestReversedScannerCallable { @Test public void testUpdateSearchKeyCacheLocation() throws IOException { byte[] regionName = RegionInfo.createRegionName(TABLE_NAME, - ConnectionUtils.createCloseRowBefore(ConnectionUtils.MAX_BYTE_ARRAY), "123", false); + ConnectionUtils.createCloseRowBefore(ConnectionUtils.MAX_BYTE_ARRAY), "123", false); HRegionInfo mockRegionInfo = mock(HRegionInfo.class); when(mockRegionInfo.containsRow(ConnectionUtils.MAX_BYTE_ARRAY)).thenReturn(true); when(mockRegionInfo.getEndKey()).thenReturn(HConstants.EMPTY_END_ROW); @@ -115,17 +113,17 @@ public class TestReversedScannerCallable { IOException testThrowable = new IOException("test throwable"); when(connection.locateRegion(TABLE_NAME, ConnectionUtils.MAX_BYTE_ARRAY, true, true, 0)) - .thenReturn(regionLocations); + .thenReturn(regionLocations); Scan scan = new Scan().setReversed(true); ReversedScannerCallable callable = - new ReversedScannerCallable(connection, TABLE_NAME, scan, null, rpcFactory, 0); + new ReversedScannerCallable(connection, TABLE_NAME, scan, null, rpcFactory, 0); callable.prepare(false); callable.throwable(testThrowable, true); - verify(connection).updateCachedLocations(TABLE_NAME, regionName, - ConnectionUtils.MAX_BYTE_ARRAY, testThrowable, SERVERNAME); + verify(connection).updateCachedLocations(TABLE_NAME, regionName, ConnectionUtils.MAX_BYTE_ARRAY, + testThrowable, SERVERNAME); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRowComparator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRowComparator.java index 64983089ae0..9b2c598eaaa 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRowComparator.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRowComparator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,15 +35,15 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestRowComparator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRowComparator.class); + HBaseClassTestRule.forClass(TestRowComparator.class); - private static final List DEFAULT_ROWS = IntStream.range(1, 9) - .mapToObj(String::valueOf).map(Bytes::toBytes).collect(Collectors.toList()); + private static final List DEFAULT_ROWS = IntStream.range(1, 9).mapToObj(String::valueOf) + .map(Bytes::toBytes).collect(Collectors.toList()); @Test public void testPut() { @@ -71,8 +71,7 @@ public class TestRowComparator { } private static void test(Function f) { - List rows = new ArrayList(DEFAULT_ROWS.stream() - .map(f).collect(Collectors.toList())); + List rows = new ArrayList(DEFAULT_ROWS.stream().map(f).collect(Collectors.toList())); do { Collections.shuffle(rows); } while (needShuffle(rows)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java index e534ab094d9..43737a71ca0 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -168,7 +168,8 @@ public class TestRpcBasedRegistryHedgedReads { return CompletableFuture.completedFuture(BOOTSTRAP_NODES); } - @Override public String getConnectionString() { + @Override + public String getConnectionString() { return "unimplemented"; } }; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcRetryingCallerImpl.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcRetryingCallerImpl.java index cc6d6f4229a..3d3d64f4c21 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcRetryingCallerImpl.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcRetryingCallerImpl.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +20,7 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.io.IOException; import org.apache.hadoop.hbase.CallDroppedException; import org.apache.hadoop.hbase.CallQueueTooBigException; @@ -32,7 +32,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestRpcRetryingCallerImpl { @ClassRule @@ -61,8 +61,8 @@ public class TestRpcRetryingCallerImpl { RpcRetryingCallerImpl caller = new RpcRetryingCallerImpl<>(pauseMillis, specialPauseMillis, 2, 0); - RetryingCallable callable = new ThrowingCallable( - CallQueueTooBigException.class, specialPauseMillis); + RetryingCallable callable = + new ThrowingCallable(CallQueueTooBigException.class, specialPauseMillis); try { caller.callWithRetries(callable, 5000); fail("Expected " + exceptionClass.getSimpleName()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java index fe538054d9f..8fe447e7fbd 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.util.Arrays; import java.util.Set; - import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; @@ -40,15 +39,15 @@ import org.junit.Assert; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; // TODO: cover more test cases -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestScan { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScan.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestScan.class); @Test public void testAttributesSerialization() throws IOException { @@ -71,22 +70,14 @@ public class TestScan { @Test public void testGetToScan() throws Exception { Get get = new Get(Bytes.toBytes(1)); - get.setCacheBlocks(true) - .setConsistency(Consistency.TIMELINE) - .setFilter(new FilterList()) - .setId("get") - .setIsolationLevel(IsolationLevel.READ_COMMITTED) - .setLoadColumnFamiliesOnDemand(false) - .setMaxResultsPerColumnFamily(1000) - .setMaxVersions(9999) - .setRowOffsetPerColumnFamily(5) - .setTimeRange(0, 13) - .setAttribute("att_v0", Bytes.toBytes("att_v0")) - .setColumnFamilyTimeRange(Bytes.toBytes("cf"), 0, 123) - .setReplicaId(3) - .setACL("test_user", new Permission(Permission.Action.READ)) - .setAuthorizations(new Authorizations("test_label")) - .setPriority(3); + get.setCacheBlocks(true).setConsistency(Consistency.TIMELINE).setFilter(new FilterList()) + .setId("get").setIsolationLevel(IsolationLevel.READ_COMMITTED) + .setLoadColumnFamiliesOnDemand(false).setMaxResultsPerColumnFamily(1000).setMaxVersions(9999) + .setRowOffsetPerColumnFamily(5).setTimeRange(0, 13) + .setAttribute("att_v0", Bytes.toBytes("att_v0")) + .setColumnFamilyTimeRange(Bytes.toBytes("cf"), 0, 123).setReplicaId(3) + .setACL("test_user", new Permission(Permission.Action.READ)) + .setAuthorizations(new Authorizations("test_label")).setPriority(3); Scan scan = new Scan(get); assertEquals(get.getCacheBlocks(), scan.getCacheBlocks()); @@ -95,7 +86,7 @@ public class TestScan { assertEquals(get.getId(), scan.getId()); assertEquals(get.getIsolationLevel(), scan.getIsolationLevel()); assertEquals(get.getLoadColumnFamiliesOnDemandValue(), - scan.getLoadColumnFamiliesOnDemandValue()); + scan.getLoadColumnFamiliesOnDemandValue()); assertEquals(get.getMaxResultsPerColumnFamily(), scan.getMaxResultsPerColumnFamily()); assertEquals(get.getMaxVersions(), scan.getMaxVersions()); assertEquals(get.getRowOffsetPerColumnFamily(), scan.getRowOffsetPerColumnFamily()); @@ -103,9 +94,9 @@ public class TestScan { assertEquals(get.getTimeRange().getMax(), scan.getTimeRange().getMax()); assertTrue(Bytes.equals(get.getAttribute("att_v0"), scan.getAttribute("att_v0"))); assertEquals(get.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMin(), - scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMin()); + scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMin()); assertEquals(get.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMax(), - scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMax()); + scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMax()); assertEquals(get.getReplicaId(), scan.getReplicaId()); assertEquals(get.getACL(), scan.getACL()); assertEquals(get.getAuthorizations().getLabels(), scan.getAuthorizations().getLabels()); @@ -126,22 +117,22 @@ public class TestScan { scan.setAttribute("attribute1", Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), scan.getAttribute("attribute1"))); Assert.assertEquals(1, scan.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), - scan.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value1"), scan.getAttributesMap().get("attribute1"))); // overriding attribute value scan.setAttribute("attribute1", Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), scan.getAttribute("attribute1"))); Assert.assertEquals(1, scan.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), - scan.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value12"), scan.getAttributesMap().get("attribute1"))); // adding another attribute scan.setAttribute("attribute2", Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), scan.getAttribute("attribute2"))); Assert.assertEquals(2, scan.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), - scan.getAttributesMap().get("attribute2"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value2"), scan.getAttributesMap().get("attribute2"))); // removing attribute scan.setAttribute("attribute2", null); @@ -199,7 +190,7 @@ public class TestScan { scan.setStartRow(new byte[1]); scan.setStartRow(new byte[HConstants.MAX_ROW_LENGTH]); try { - scan.setStartRow(new byte[HConstants.MAX_ROW_LENGTH+1]); + scan.setStartRow(new byte[HConstants.MAX_ROW_LENGTH + 1]); fail("should've thrown exception"); } catch (IllegalArgumentException iae) { } catch (Exception e) { @@ -210,7 +201,7 @@ public class TestScan { scan.setStopRow(new byte[1]); scan.setStopRow(new byte[HConstants.MAX_ROW_LENGTH]); try { - scan.setStopRow(new byte[HConstants.MAX_ROW_LENGTH+1]); + scan.setStopRow(new byte[HConstants.MAX_ROW_LENGTH + 1]); fail("should've thrown exception"); } catch (IllegalArgumentException iae) { } catch (Exception e) { @@ -223,37 +214,17 @@ public class TestScan { Scan scan = new Scan(); scan.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q")) - .setACL("test_user", new Permission(Permission.Action.READ)) - .setAllowPartialResults(true) - .setAsyncPrefetch(false) - .setAttribute("test_key", Bytes.toBytes("test_value")) - .setAuthorizations(new Authorizations("test_label")) - .setBatch(10) - .setCacheBlocks(false) - .setCaching(10) - .setConsistency(Consistency.TIMELINE) - .setFilter(new FilterList()) - .setId("scan_copy_constructor") - .setIsolationLevel(IsolationLevel.READ_COMMITTED) - .setLimit(100) - .setLoadColumnFamiliesOnDemand(false) - .setMaxResultSize(100) - .setMaxResultsPerColumnFamily(1000) - .readVersions(9999) - .setMvccReadPoint(5) - .setNeedCursorResult(true) - .setPriority(1) - .setRaw(true) - .setReplicaId(3) - .setReversed(true) - .setRowOffsetPerColumnFamily(5) - .setStartStopRowForPrefixScan(Bytes.toBytes("row_")) - .setScanMetricsEnabled(true) - .setSmall(true) - .setReadType(ReadType.STREAM) - .withStartRow(Bytes.toBytes("row_1")) - .withStopRow(Bytes.toBytes("row_2")) - .setTimeRange(0, 13); + .setACL("test_user", new Permission(Permission.Action.READ)).setAllowPartialResults(true) + .setAsyncPrefetch(false).setAttribute("test_key", Bytes.toBytes("test_value")) + .setAuthorizations(new Authorizations("test_label")).setBatch(10).setCacheBlocks(false) + .setCaching(10).setConsistency(Consistency.TIMELINE).setFilter(new FilterList()) + .setId("scan_copy_constructor").setIsolationLevel(IsolationLevel.READ_COMMITTED).setLimit(100) + .setLoadColumnFamiliesOnDemand(false).setMaxResultSize(100).setMaxResultsPerColumnFamily(1000) + .readVersions(9999).setMvccReadPoint(5).setNeedCursorResult(true).setPriority(1).setRaw(true) + .setReplicaId(3).setReversed(true).setRowOffsetPerColumnFamily(5) + .setStartStopRowForPrefixScan(Bytes.toBytes("row_")).setScanMetricsEnabled(true) + .setSmall(true).setReadType(ReadType.STREAM).withStartRow(Bytes.toBytes("row_1")) + .withStopRow(Bytes.toBytes("row_2")).setTimeRange(0, 13); // create a copy of existing scan object Scan scanCopy = new Scan(scan); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScannerCallable.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScannerCallable.java index 6c48fd26fb1..4fa747be57b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScannerCallable.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScannerCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HRegionLocation; @@ -48,7 +47,7 @@ import org.mockito.runners.MockitoJUnitRunner; public class TestScannerCallable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannerCallable.class); + HBaseClassTestRule.forClass(TestScannerCallable.class); private static final TableName TABLE_NAME = TableName.valueOf("TestScannerCallable"); @@ -77,11 +76,10 @@ public class TestScannerCallable { @Test public void testPrepareAlwaysUsesCache() throws Exception { - when(connection.locateRegion(TABLE_NAME, ROW, true, true, 0)) - .thenReturn(regionLocations); + when(connection.locateRegion(TABLE_NAME, ROW, true, true, 0)).thenReturn(regionLocations); ScannerCallable callable = - new ScannerCallable(connection, TABLE_NAME, DEFAULT_SCAN, null, rpcFactory, 0); + new ScannerCallable(connection, TABLE_NAME, DEFAULT_SCAN, null, rpcFactory, 0); callable.prepare(false); callable.prepare(true); @@ -93,7 +91,7 @@ public class TestScannerCallable { when(connection.isTableDisabled(TABLE_NAME)).thenReturn(true); ScannerCallable callable = - new ScannerCallable(connection, TABLE_NAME, DEFAULT_SCAN, null, rpcFactory, 0); + new ScannerCallable(connection, TABLE_NAME, DEFAULT_SCAN, null, rpcFactory, 0); assertThrows(TableNotEnabledException.class, () -> callable.prepare(true)); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java index 3dc86de2f35..36da042628a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,26 +48,25 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestSimpleRequestController { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSimpleRequestController.class); + HBaseClassTestRule.forClass(TestSimpleRequestController.class); - private static final TableName DUMMY_TABLE - = TableName.valueOf("DUMMY_TABLE"); + private static final TableName DUMMY_TABLE = TableName.valueOf("DUMMY_TABLE"); private static final byte[] DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1"); private static final byte[] DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2"); private static final byte[] DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3"); private static final ServerName SN = ServerName.valueOf("s1,1,1"); private static final ServerName SN2 = ServerName.valueOf("s2,2,2"); - private static final HRegionInfo HRI1 - = new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1); - private static final HRegionInfo HRI2 - = new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2); - private static final HRegionInfo HRI3 - = new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3); + private static final HRegionInfo HRI1 = + new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1); + private static final HRegionInfo HRI2 = + new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2); + private static final HRegionInfo HRI3 = + new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3); private static final HRegionLocation LOC1 = new HRegionLocation(HRI1, SN); private static final HRegionLocation LOC2 = new HRegionLocation(HRI2, SN); private static final HRegionLocation LOC3 = new HRegionLocation(HRI3, SN2); @@ -125,17 +124,15 @@ public class TestSimpleRequestController { final Map taskCounterPerServer = new HashMap<>(); final Map taskCounterPerRegion = new HashMap<>(); SimpleRequestController.TaskCountChecker countChecker = - new SimpleRequestController.TaskCountChecker( - maxTotalConcurrentTasks, - maxConcurrentTasksPerServer, - maxConcurrentTasksPerRegion, - tasksInProgress, taskCounterPerServer, taskCounterPerRegion); + new SimpleRequestController.TaskCountChecker(maxTotalConcurrentTasks, + maxConcurrentTasksPerServer, maxConcurrentTasksPerRegion, tasksInProgress, + taskCounterPerServer, taskCounterPerRegion); final long maxHeapSizePerRequest = 2 * 1024 * 1024; // unlimiited SimpleRequestController.RequestHeapSizeChecker sizeChecker = - new SimpleRequestController.RequestHeapSizeChecker(maxHeapSizePerRequest); + new SimpleRequestController.RequestHeapSizeChecker(maxHeapSizePerRequest); RequestController.Checker checker = - SimpleRequestController.newChecker(Arrays.asList(countChecker, sizeChecker)); + SimpleRequestController.newChecker(Arrays.asList(countChecker, sizeChecker)); ReturnCode loc1Code = checker.canTakeRow(LOC1, createPut(maxHeapSizePerRequest)); assertEquals(ReturnCode.INCLUDE, loc1Code); @@ -166,8 +163,8 @@ public class TestSimpleRequestController { @Test public void testRequestHeapSizeChecker() throws IOException { final long maxHeapSizePerRequest = 2 * 1024 * 1024; - SimpleRequestController.RequestHeapSizeChecker checker - = new SimpleRequestController.RequestHeapSizeChecker(maxHeapSizePerRequest); + SimpleRequestController.RequestHeapSizeChecker checker = + new SimpleRequestController.RequestHeapSizeChecker(maxHeapSizePerRequest); // inner state is unchanged. for (int i = 0; i != 10; ++i) { @@ -208,10 +205,10 @@ public class TestSimpleRequestController { @Test public void testRequestRowsChecker() throws IOException { final long maxRowCount = 100; - SimpleRequestController.RequestRowsChecker checker - = new SimpleRequestController.RequestRowsChecker(maxRowCount); + SimpleRequestController.RequestRowsChecker checker = + new SimpleRequestController.RequestRowsChecker(maxRowCount); - final long heapSizeOfRow = 100; //unused + final long heapSizeOfRow = 100; // unused // inner state is unchanged. for (int i = 0; i != 10; ++i) { ReturnCode code = checker.canTakeOperation(LOC1, heapSizeOfRow); @@ -253,8 +250,8 @@ public class TestSimpleRequestController { @Test public void testSubmittedSizeChecker() { final long maxHeapSizeSubmit = 2 * 1024 * 1024; - SimpleRequestController.SubmittedSizeChecker checker - = new SimpleRequestController.SubmittedSizeChecker(maxHeapSizeSubmit); + SimpleRequestController.SubmittedSizeChecker checker = + new SimpleRequestController.SubmittedSizeChecker(maxHeapSizeSubmit); for (int i = 0; i != 10; ++i) { ReturnCode include = checker.canTakeOperation(LOC1, 100000); @@ -290,10 +287,8 @@ public class TestSimpleRequestController { Map taskCounterPerServer = new HashMap<>(); Map taskCounterPerRegion = new HashMap<>(); SimpleRequestController.TaskCountChecker checker = new SimpleRequestController.TaskCountChecker( - maxTotalConcurrentTasks, - maxConcurrentTasksPerServer, - maxConcurrentTasksPerRegion, - tasksInProgress, taskCounterPerServer, taskCounterPerRegion); + maxTotalConcurrentTasks, maxConcurrentTasksPerServer, maxConcurrentTasksPerRegion, + tasksInProgress, taskCounterPerServer, taskCounterPerRegion); // inner state is unchanged. for (int i = 0; i != 10; ++i) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java index ec2d29d1fa4..34b38e77cc3 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,12 +48,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRe /** * Test snapshot logic from the client */ -@Category({SmallTests.class, ClientTests.class}) +@Category({ SmallTests.class, ClientTests.class }) public class TestSnapshotFromAdmin { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotFromAdmin.class); + HBaseClassTestRule.forClass(TestSnapshotFromAdmin.class); private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotFromAdmin.class); @@ -62,8 +62,7 @@ public class TestSnapshotFromAdmin { /** * Test that the logic for doing 'correct' back-off based on exponential increase and the max-time - * passed from the server ensures the correct overall waiting for the snapshot to finish. - * @throws Exception + * passed from the server ensures the correct overall waiting for the snapshot to finish. n */ @Test public void testBackoffLogic() throws Exception { @@ -80,11 +79,10 @@ public class TestSnapshotFromAdmin { // the correct wait time, capping at the maxTime/tries + fudge room final long time = pauseTime * 3L + ((maxWaitTime / numRetries) * 3) + 300L; assertTrue("Capped snapshot wait time isn't less that the uncapped backoff time " - + "- further testing won't prove anything.", time < ignoreExpectedTime); + + "- further testing won't prove anything.", time < ignoreExpectedTime); // setup the mocks - ConnectionImplementation mockConnection = Mockito - .mock(ConnectionImplementation.class); + ConnectionImplementation mockConnection = Mockito.mock(ConnectionImplementation.class); Configuration conf = HBaseConfiguration.create(); // setup the conf to match the expected properties conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, numRetries); @@ -97,26 +95,22 @@ public class TestSnapshotFromAdmin { // we need a real retrying caller RpcRetryingCallerFactory callerFactory = new RpcRetryingCallerFactory(conf); RpcControllerFactory controllerFactory = Mockito.mock(RpcControllerFactory.class); - Mockito.when(controllerFactory.newController()).thenReturn( - Mockito.mock(HBaseRpcController.class)); + Mockito.when(controllerFactory.newController()) + .thenReturn(Mockito.mock(HBaseRpcController.class)); Mockito.when(mockConnection.getRpcRetryingCallerFactory()).thenReturn(callerFactory); Mockito.when(mockConnection.getRpcControllerFactory()).thenReturn(controllerFactory); // set the max wait time for the snapshot to complete - SnapshotResponse response = SnapshotResponse.newBuilder() - .setExpectedTimeout(maxWaitTime) - .build(); - Mockito - .when( - mockMaster.snapshot((RpcController) Mockito.any(), - Mockito.any())).thenReturn(response); + SnapshotResponse response = + SnapshotResponse.newBuilder().setExpectedTimeout(maxWaitTime).build(); + Mockito.when(mockMaster.snapshot((RpcController) Mockito.any(), Mockito.any())) + .thenReturn(response); // setup the response IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder(); builder.setDone(false); // first five times, we return false, last we get success - Mockito.when( - mockMaster.isSnapshotDone((RpcController) Mockito.any(), - Mockito.any())).thenReturn(builder.build(), builder.build(), - builder.build(), builder.build(), builder.build(), builder.setDone(true).build()); + Mockito.when(mockMaster.isSnapshotDone((RpcController) Mockito.any(), Mockito.any())) + .thenReturn(builder.build(), builder.build(), builder.build(), builder.build(), + builder.build(), builder.setDone(true).build()); // setup the admin and run the test Admin admin = new HBaseAdmin(mockConnection); @@ -138,15 +132,14 @@ public class TestSnapshotFromAdmin { */ @Test public void testValidateSnapshotName() throws Exception { - ConnectionImplementation mockConnection = Mockito - .mock(ConnectionImplementation.class); + ConnectionImplementation mockConnection = Mockito.mock(ConnectionImplementation.class); Configuration conf = HBaseConfiguration.create(); Mockito.when(mockConnection.getConfiguration()).thenReturn(conf); // we need a real retrying caller RpcRetryingCallerFactory callerFactory = new RpcRetryingCallerFactory(conf); RpcControllerFactory controllerFactory = Mockito.mock(RpcControllerFactory.class); - Mockito.when(controllerFactory.newController()).thenReturn( - Mockito.mock(HBaseRpcController.class)); + Mockito.when(controllerFactory.newController()) + .thenReturn(Mockito.mock(HBaseRpcController.class)); Mockito.when(mockConnection.getRpcRetryingCallerFactory()).thenReturn(callerFactory); Mockito.when(mockConnection.getRpcControllerFactory()).thenReturn(controllerFactory); Admin admin = new HBaseAdmin(mockConnection); @@ -166,20 +159,17 @@ public class TestSnapshotFromAdmin { MasterKeepAliveConnection master = Mockito.mock(MasterKeepAliveConnection.class); Mockito.when(mockConnection.getMaster()).thenReturn(master); SnapshotResponse response = SnapshotResponse.newBuilder().setExpectedTimeout(0).build(); - Mockito.when( - master.snapshot((RpcController) Mockito.any(), Mockito.any())) - .thenReturn(response); + Mockito.when(master.snapshot((RpcController) Mockito.any(), Mockito.any())) + .thenReturn(response); IsSnapshotDoneResponse doneResponse = IsSnapshotDoneResponse.newBuilder().setDone(true).build(); - Mockito.when( - master.isSnapshotDone((RpcController) Mockito.any(), - Mockito.any())).thenReturn(doneResponse); + Mockito.when(master.isSnapshotDone((RpcController) Mockito.any(), Mockito.any())) + .thenReturn(doneResponse); - // make sure that we can use valid names + // make sure that we can use valid names admin.snapshot(new SnapshotDescription("snapshot", TableName.valueOf(name.getMethodName()))); } - private void failSnapshotStart(Admin admin, SnapshotDescription snapshot) - throws IOException { + private void failSnapshotStart(Admin admin, SnapshotDescription snapshot) throws IOException { try { admin.snapshot(snapshot); fail("Snapshot should not have succeed with name:" + snapshot.getName()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java index 658ad0641a5..6aa4e6765e4 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,39 +45,32 @@ import org.slf4j.LoggerFactory; /** * Test setting values in the descriptor. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestTableDescriptorBuilder { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableDescriptorBuilder.class); + HBaseClassTestRule.forClass(TestTableDescriptorBuilder.class); private static final Logger LOG = LoggerFactory.getLogger(TestTableDescriptorBuilder.class); @Rule public TestName name = new TestName(); - @Test (expected=IOException.class) + @Test(expected = IOException.class) public void testAddCoprocessorTwice() throws IOException { String cpName = "a.b.c.d"; - TableDescriptor htd - = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME) - .setCoprocessor(cpName) - .setCoprocessor(cpName) - .build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME) + .setCoprocessor(cpName).setCoprocessor(cpName).build(); } @Test public void testPb() throws DeserializationException, IOException { final int v = 123; - TableDescriptor htd - = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME) - .setMaxFileSize(v) - .setDurability(Durability.ASYNC_WAL) - .setReadOnly(true) - .setRegionReplication(2) - .build(); + TableDescriptor htd = + TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).setMaxFileSize(v) + .setDurability(Durability.ASYNC_WAL).setReadOnly(true).setRegionReplication(2).build(); - byte [] bytes = TableDescriptorBuilder.toByteArray(htd); + byte[] bytes = TableDescriptorBuilder.toByteArray(htd); TableDescriptor deserializedHtd = TableDescriptorBuilder.parseFrom(bytes); assertEquals(htd, deserializedHtd); assertEquals(v, deserializedHtd.getMaxFileSize()); @@ -88,59 +81,59 @@ public class TestTableDescriptorBuilder { /** * Test cps in the table description. - * * @throws Exception if setting a coprocessor fails */ @Test public void testGetSetRemoveCP() throws Exception { // simple CP String className = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver"; - TableDescriptor desc - = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setCoprocessor(className) // add and check that it is present - .build(); + TableDescriptor desc = TableDescriptorBuilder + .newBuilder(TableName.valueOf(name.getMethodName())).setCoprocessor(className) // add and + // check that + // it is + // present + .build(); assertTrue(desc.hasCoprocessor(className)); - desc = TableDescriptorBuilder.newBuilder(desc) - .removeCoprocessor(className) // remove it and check that it is gone - .build(); + desc = TableDescriptorBuilder.newBuilder(desc).removeCoprocessor(className) // remove it and + // check that it is + // gone + .build(); assertFalse(desc.hasCoprocessor(className)); } /** * Test cps in the table description. - * * @throws Exception if setting a coprocessor fails */ @Test public void testSetListRemoveCP() throws Exception { - TableDescriptor desc - = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); // Check that any coprocessor is present. assertTrue(desc.getCoprocessorDescriptors().isEmpty()); // simple CP String className1 = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver"; String className2 = "org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver"; - desc = TableDescriptorBuilder.newBuilder(desc) - .setCoprocessor(className1) // Add the 1 coprocessor and check if present. - .build(); + desc = TableDescriptorBuilder.newBuilder(desc).setCoprocessor(className1) // Add the 1 + // coprocessor and + // check if present. + .build(); assertTrue(desc.getCoprocessorDescriptors().size() == 1); assertTrue(desc.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) .anyMatch(name -> name.equals(className1))); desc = TableDescriptorBuilder.newBuilder(desc) - // Add the 2nd coprocessor and check if present. - // remove it and check that it is gone - .setCoprocessor(className2) - .build(); + // Add the 2nd coprocessor and check if present. + // remove it and check that it is gone + .setCoprocessor(className2).build(); assertTrue(desc.getCoprocessorDescriptors().size() == 2); assertTrue(desc.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) .anyMatch(name -> name.equals(className2))); desc = TableDescriptorBuilder.newBuilder(desc) - // Remove one and check - .removeCoprocessor(className1) - .build(); + // Remove one and check + .removeCoprocessor(className1).build(); assertTrue(desc.getCoprocessorDescriptors().size() == 1); assertFalse(desc.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) .anyMatch(name -> name.equals(className1))); @@ -148,9 +141,8 @@ public class TestTableDescriptorBuilder { .anyMatch(name -> name.equals(className2))); desc = TableDescriptorBuilder.newBuilder(desc) - // Remove the last and check - .removeCoprocessor(className2) - .build(); + // Remove the last and check + .removeCoprocessor(className2).build(); assertTrue(desc.getCoprocessorDescriptors().isEmpty()); assertFalse(desc.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) .anyMatch(name -> name.equals(className1))); @@ -164,9 +156,8 @@ public class TestTableDescriptorBuilder { @Test public void testRemoveNonExistingCoprocessor() throws Exception { String className = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver"; - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); assertFalse(desc.hasCoprocessor(className)); desc = TableDescriptorBuilder.newBuilder(desc).removeCoprocessor(className).build(); assertFalse(desc.hasCoprocessor(className)); @@ -179,24 +170,20 @@ public class TestTableDescriptorBuilder { public void testRemoveString() { byte[] key = Bytes.toBytes("Some"); byte[] value = Bytes.toBytes("value"); - TableDescriptor desc - = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setValue(key, value) - .build(); + TableDescriptor desc = TableDescriptorBuilder + .newBuilder(TableName.valueOf(name.getMethodName())).setValue(key, value).build(); assertTrue(Bytes.equals(value, desc.getValue(key))); - desc = TableDescriptorBuilder.newBuilder(desc) - .removeValue(key) - .build(); + desc = TableDescriptorBuilder.newBuilder(desc).removeValue(key).build(); assertTrue(desc.getValue(key) == null); } String[] legalTableNames = { "foo", "with-dash_under.dot", "_under_start_ok", "with-dash.with_underscore", "02-01-2012.my_table_01-02", "xyz._mytable_", "9_9_0.table_02", "dot1.dot2.table", "new.-mytable", "with-dash.with.dot", "legal..t2", "legal..legal.t2", - "trailingdots..", "trailing.dots...", "ns:mytable", "ns:_mytable_", "ns:my_table_01-02"}; + "trailingdots..", "trailing.dots...", "ns:mytable", "ns:_mytable_", "ns:my_table_01-02" }; String[] illegalTableNames = { ".dot_start_illegal", "-dash_start_illegal", "spaces not ok", "-dash-.start_illegal", "new.table with space", "01 .table", "ns:-illegaldash", - "new:.illegaldot", "new:illegalcolon1:", "new:illegalcolon1:2"}; + "new:.illegaldot", "new:illegalcolon1:", "new:illegalcolon1:2" }; @Test public void testLegalTableNames() { @@ -221,8 +208,8 @@ public class TestTableDescriptorBuilder { public void testLegalTableNamesRegex() { for (String tn : legalTableNames) { TableName tName = TableName.valueOf(tn); - assertTrue("Testing: '" + tn + "'", Pattern.matches(TableName.VALID_USER_TABLE_REGEX, - tName.getNameAsString())); + assertTrue("Testing: '" + tn + "'", + Pattern.matches(TableName.VALID_USER_TABLE_REGEX, tName.getNameAsString())); } } @@ -234,17 +221,16 @@ public class TestTableDescriptorBuilder { } } - /** + /** * Test default value handling for maxFileSize */ @Test public void testGetMaxFileSize() { - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); assertEquals(-1, desc.getMaxFileSize()); - desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setMaxFileSize(1111L).build(); + desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setMaxFileSize(1111L).build(); assertEquals(1111L, desc.getMaxFileSize()); } @@ -280,12 +266,11 @@ public class TestTableDescriptorBuilder { */ @Test public void testGetMemStoreFlushSize() { - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); assertEquals(-1, desc.getMemStoreFlushSize()); - desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setMemStoreFlushSize(1111L).build(); + desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setMemStoreFlushSize(1111L).build(); assertEquals(1111L, desc.getMemStoreFlushSize()); } @@ -325,50 +310,36 @@ public class TestTableDescriptorBuilder { public void testModifyFamily() { byte[] familyName = Bytes.toBytes("cf"); ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName) - .setBlocksize(1000) - .setDFSReplication((short) 3) - .build(); - TableDescriptor htd - = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(hcd) - .build(); + .setBlocksize(1000).setDFSReplication((short) 3).build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(hcd).build(); assertEquals(1000, htd.getColumnFamily(familyName).getBlocksize()); assertEquals(3, htd.getColumnFamily(familyName).getDFSReplication()); - hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName) - .setBlocksize(2000) - .setDFSReplication((short) 1) - .build(); - htd = TableDescriptorBuilder.newBuilder(htd) - .modifyColumnFamily(hcd) - .build(); + hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(2000) + .setDFSReplication((short) 1).build(); + htd = TableDescriptorBuilder.newBuilder(htd).modifyColumnFamily(hcd).build(); assertEquals(2000, htd.getColumnFamily(familyName).getBlocksize()); assertEquals(1, htd.getColumnFamily(familyName).getDFSReplication()); } - @Test(expected=IllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testModifyInexistentFamily() { byte[] familyName = Bytes.toBytes("cf"); HColumnDescriptor hcd = new HColumnDescriptor(familyName); - TableDescriptor htd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .modifyColumnFamily(hcd) - .build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .modifyColumnFamily(hcd).build(); } - @Test(expected=IllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testAddDuplicateFamilies() { byte[] familyName = Bytes.toBytes("cf"); - ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName) - .setBlocksize(1000) - .build(); + ColumnFamilyDescriptor hcd = + ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(1000).build(); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(hcd) - .build(); + .setColumnFamily(hcd).build(); assertEquals(1000, htd.getColumnFamily(familyName).getBlocksize()); - hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName) - .setBlocksize(2000) - .build(); + hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(2000).build(); // add duplicate column TableDescriptorBuilder.newBuilder(htd).setColumnFamily(hcd).build(); } @@ -376,35 +347,27 @@ public class TestTableDescriptorBuilder { @Test public void testPriority() { TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setPriority(42) - .build(); + .setPriority(42).build(); assertEquals(42, htd.getPriority()); } @Test public void testStringCustomizedValues() throws HBaseException { byte[] familyName = Bytes.toBytes("cf"); - ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName) - .setBlocksize(131072) - .build(); + ColumnFamilyDescriptor hcd = + ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(131072).build(); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(hcd) - .setDurability(Durability.ASYNC_WAL) - .build(); + .setColumnFamily(hcd).setDurability(Durability.ASYNC_WAL).build(); assertEquals( - "'testStringCustomizedValues', " + - "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL'}}, " + "'testStringCustomizedValues', " + "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL'}}, " + "{NAME => 'cf', BLOCKSIZE => '131072 B (128KB)'}", htd.toStringCustomizedValues()); - htd = TableDescriptorBuilder.newBuilder(htd) - .setMaxFileSize("10737942528") - .setMemStoreFlushSize("256MB") - .build(); + htd = TableDescriptorBuilder.newBuilder(htd).setMaxFileSize("10737942528") + .setMemStoreFlushSize("256MB").build(); assertEquals( - "'testStringCustomizedValues', " + - "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL', " + "'testStringCustomizedValues', " + "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL', " + "MAX_FILESIZE => '10737942528 B (10GB 512KB)', " + "MEMSTORE_FLUSHSIZE => '268435456 B (256MB)'}}, " + "{NAME => 'cf', BLOCKSIZE => '131072 B (128KB)'}", diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.java index 44d199764c5..8ba350b03b1 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; import java.util.Arrays; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.TableDescriptorUtils.TableDescriptorDelta; @@ -35,7 +33,7 @@ import org.junit.experimental.categories.Category; public class TestTableDescriptorUtils { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableDescriptorUtils.class); + HBaseClassTestRule.forClass(TestTableDescriptorUtils.class); @Test public void testDelta() { @@ -43,32 +41,25 @@ public class TestTableDescriptorUtils { ColumnFamilyDescriptor cf2 = ColumnFamilyDescriptorBuilder.of("cf2"); ColumnFamilyDescriptor cf3 = ColumnFamilyDescriptorBuilder.of("cf3"); ColumnFamilyDescriptor cf4 = ColumnFamilyDescriptorBuilder.of("cf4"); - TableDescriptor td = TableDescriptorBuilder - .newBuilder(TableName.valueOf("test")) - .setColumnFamilies(Arrays.asList(cf1, cf2, cf3, cf4)) - .build(); + TableDescriptor td = TableDescriptorBuilder.newBuilder(TableName.valueOf("test")) + .setColumnFamilies(Arrays.asList(cf1, cf2, cf3, cf4)).build(); TableDescriptorDelta selfCompare = TableDescriptorUtils.computeDelta(td, td); assertEquals(0, selfCompare.getColumnsAdded().size()); assertEquals(0, selfCompare.getColumnsDeleted().size()); assertEquals(0, selfCompare.getColumnsModified().size()); - ColumnFamilyDescriptor modCf2 = ColumnFamilyDescriptorBuilder - .newBuilder(cf2).setMaxVersions(5).build(); - ColumnFamilyDescriptor modCf3 = ColumnFamilyDescriptorBuilder - .newBuilder(cf3).setMaxVersions(5).build(); + ColumnFamilyDescriptor modCf2 = + ColumnFamilyDescriptorBuilder.newBuilder(cf2).setMaxVersions(5).build(); + ColumnFamilyDescriptor modCf3 = + ColumnFamilyDescriptorBuilder.newBuilder(cf3).setMaxVersions(5).build(); ColumnFamilyDescriptor cf5 = ColumnFamilyDescriptorBuilder.of("cf5"); ColumnFamilyDescriptor cf6 = ColumnFamilyDescriptorBuilder.of("cf6"); ColumnFamilyDescriptor cf7 = ColumnFamilyDescriptorBuilder.of("cf7"); - TableDescriptor newTd = TableDescriptorBuilder - .newBuilder(td) - .removeColumnFamily(Bytes.toBytes("cf1")) - .modifyColumnFamily(modCf2) - .modifyColumnFamily(modCf3) - .setColumnFamily(cf5) - .setColumnFamily(cf6) - .setColumnFamily(cf7) - .build(); + TableDescriptor newTd = + TableDescriptorBuilder.newBuilder(td).removeColumnFamily(Bytes.toBytes("cf1")) + .modifyColumnFamily(modCf2).modifyColumnFamily(modCf3).setColumnFamily(cf5) + .setColumnFamily(cf6).setColumnFamily(cf7).build(); TableDescriptorDelta delta = TableDescriptorUtils.computeDelta(td, newTd); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableRpcPriority.java index 882381bb067..2ae3daa6cb4 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableRpcPriority.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableRpcPriority.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,6 +32,7 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; + import java.io.IOException; import java.util.Arrays; import java.util.Optional; @@ -62,13 +63,15 @@ import org.junit.rules.TestName; import org.mockito.ArgumentMatcher; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; + import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; /** - * Test that correct rpc priority is sent to server from blocking Table calls. Currently - * only implements checks for scans, but more could be added here. + * Test that correct rpc priority is sent to server from blocking Table calls. Currently only + * implements checks for scans, but more could be added here. */ @Category({ ClientTests.class, MediumTests.class }) public class TestTableRpcPriority { @@ -123,8 +126,8 @@ public class TestTableRpcPriority { } /** - * This test verifies that our closeScanner request honors the original - * priority of the scan if it's greater than our expected HIGH_QOS for close calls. + * This test verifies that our closeScanner request honors the original priority of the scan if + * it's greater than our expected HIGH_QOS for close calls. */ @Test public void testScanSuperHighPriority() throws Exception { @@ -163,19 +166,19 @@ public class TestTableRpcPriority { // just verify that the calls happened. verification of priority occurred in the mocking // open, next, then several renew lease verify(stub, atLeast(3)).scan(any(), any(ClientProtos.ScanRequest.class)); - verify(stub, times(1)).scan( - assertControllerArgs(Math.max(priority.orElse(0), HIGH_QOS)), assertScannerCloseRequest()); + verify(stub, times(1)).scan(assertControllerArgs(Math.max(priority.orElse(0), HIGH_QOS)), + assertScannerCloseRequest()); } private void mockScan(int scanPriority) throws ServiceException { int scannerId = 1; doAnswer(new Answer() { - @Override public ClientProtos.ScanResponse answer(InvocationOnMock invocation) - throws Throwable { + @Override + public ClientProtos.ScanResponse answer(InvocationOnMock invocation) throws Throwable { throw new IllegalArgumentException( - "Call not covered by explicit mock for arguments controller=" - + invocation.getArgument(0) + ", request=" + invocation.getArgument(1)); + "Call not covered by explicit mock for arguments controller=" + invocation.getArgument(0) + + ", request=" + invocation.getArgument(1)); } }).when(stub).scan(any(), any()); @@ -183,8 +186,7 @@ public class TestTableRpcPriority { doAnswer(new Answer() { @Override - public ClientProtos.ScanResponse answer(InvocationOnMock invocation) - throws Throwable { + public ClientProtos.ScanResponse answer(InvocationOnMock invocation) throws Throwable { ClientProtos.ScanRequest req = invocation.getArgument(1); assertFalse("close scanner should not come in with scan priority " + scanPriority, req.hasCloseScanner() && req.getCloseScanner()); @@ -208,8 +210,7 @@ public class TestTableRpcPriority { doAnswer(new Answer() { @Override - public ClientProtos.ScanResponse answer(InvocationOnMock invocation) - throws Throwable { + public ClientProtos.ScanResponse answer(InvocationOnMock invocation) throws Throwable { ClientProtos.ScanRequest req = invocation.getArgument(1); assertTrue("close request should have scannerId", req.hasScannerId()); assertEquals("close request's scannerId should match", scannerId, req.getScannerId()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTracingBase.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTracingBase.java index 2a10d3b9e8c..513206a10e1 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTracingBase.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTracingBase.java @@ -23,6 +23,7 @@ import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.hasItem; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.testing.junit4.OpenTelemetryRule; @@ -70,19 +71,16 @@ public class TestTracingBase { TableName tableName) { String expectedSpanName = String.format("%s.%s", className, methodName); Waiter.waitFor(conf, 1000, - () -> TRACE_RULE.getSpans().stream() - .anyMatch(span -> span.getName().equals(expectedSpanName) && - span.getKind() == SpanKind.INTERNAL && span.hasEnded())); - SpanData data = TRACE_RULE.getSpans().stream() - .filter(s -> s.getName().equals(expectedSpanName)).findFirst().get(); + () -> TRACE_RULE.getSpans().stream().anyMatch(span -> span.getName().equals(expectedSpanName) + && span.getKind() == SpanKind.INTERNAL && span.hasEnded())); + SpanData data = TRACE_RULE.getSpans().stream().filter(s -> s.getName().equals(expectedSpanName)) + .findFirst().get(); assertEquals(StatusCode.OK, data.getStatus().getStatusCode()); if (serverName != null) { Optional foundServerName = - TRACE_RULE.getSpans().stream() - .filter(s -> s.getName().equals(expectedSpanName)) - .filter(s -> Objects.equals( - serverName.getServerName(), + TRACE_RULE.getSpans().stream().filter(s -> s.getName().equals(expectedSpanName)) + .filter(s -> Objects.equals(serverName.getServerName(), s.getAttributes().get(HBaseSemanticAttributes.SERVER_NAME_KEY))) .findAny(); assertTrue(foundServerName.isPresent()); @@ -103,18 +101,14 @@ public class TestTracingBase { protected SpanData waitSpan(Matcher matcher) { Matcher spanLocator = allOf(matcher, hasEnded()); try { - Waiter.waitFor(conf, 1000, new MatcherPredicate<>( - "waiting for span", + Waiter.waitFor(conf, 1000, new MatcherPredicate<>("waiting for span", () -> TRACE_RULE.getSpans(), hasItem(spanLocator))); } catch (AssertionError e) { LOG.error("AssertionError while waiting for matching span. Span reservoir contains: {}", TRACE_RULE.getSpans()); throw e; } - return TRACE_RULE.getSpans() - .stream() - .filter(spanLocator::matches) - .findFirst() + return TRACE_RULE.getSpans().stream().filter(spanLocator::matches).findFirst() .orElseThrow(AssertionError::new); } @@ -138,11 +132,13 @@ public class TestTracingBase { return CompletableFuture.completedFuture(MASTER_HOST); } - @Override public String getConnectionString() { + @Override + public String getConnectionString() { return "nothing"; } - @Override public void close() { + @Override + public void close() { } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/StringTraceRenderer.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/StringTraceRenderer.java index 2c7061259f9..57f62148004 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/StringTraceRenderer.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/StringTraceRenderer.java @@ -75,56 +75,43 @@ public class StringTraceRenderer { } private static List findRoots(final Map spansById) { - return spansById.values() - .stream() + return spansById.values().stream() .filter(node -> Objects.equals(node.spanData.getParentSpanId(), SpanId.getInvalid())) .collect(Collectors.toList()); } public void render(final Consumer writer) { - for (ListIterator iter = graphs.listIterator(); iter.hasNext(); ) { + for (ListIterator iter = graphs.listIterator(); iter.hasNext();) { final int idx = iter.nextIndex(); final Node node = iter.next(); render(writer, node, 0, idx == 0); } } - private static void render( - final Consumer writer, - final Node node, - final int indent, - final boolean isFirst - ) { + private static void render(final Consumer writer, final Node node, final int indent, + final boolean isFirst) { writer.accept(render(node.spanData, indent, isFirst)); final List children = new ArrayList<>(node.children.values()); - for (ListIterator iter = children.listIterator(); iter.hasNext(); ) { + for (ListIterator iter = children.listIterator(); iter.hasNext();) { final int idx = iter.nextIndex(); final Node child = iter.next(); render(writer, child, indent + 2, idx == 0); } } - private static String render( - final SpanData spanData, - final int indent, - final boolean isFirst - ) { + private static String render(final SpanData spanData, final int indent, final boolean isFirst) { final StringBuilder sb = new StringBuilder(); for (int i = 0; i < indent; i++) { sb.append(' '); } - return sb.append(isFirst ? "└─ " : "├─ ") - .append(render(spanData)) - .toString(); + return sb.append(isFirst ? "└─ " : "├─ ").append(render(spanData)).toString(); } private static String render(final SpanData spanData) { return new ToStringBuilder(spanData, ToStringStyle.NO_CLASS_NAME_STYLE) - .append("spanId", spanData.getSpanId()) - .append("name", spanData.getName()) - .append("hasEnded", spanData.hasEnded()) - .toString(); + .append("spanId", spanData.getSpanId()).append("name", spanData.getName()) + .append("hasEnded", spanData.hasEnded()).toString(); } private static class Node { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/AttributesMatchers.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/AttributesMatchers.java index c7bb205076c..d73abba6907 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/AttributesMatchers.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/AttributesMatchers.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client.trace.hamcrest; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasProperty; + import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; import java.util.Arrays; @@ -32,12 +33,11 @@ import org.hamcrest.TypeSafeMatcher; */ public final class AttributesMatchers { - private AttributesMatchers() { } + private AttributesMatchers() { + } - public static Matcher containsEntry( - Matcher> keyMatcher, - Matcher valueMatcher - ) { + public static Matcher containsEntry(Matcher> keyMatcher, + Matcher valueMatcher) { return new IsAttributesContaining<>(keyMatcher, valueMatcher); } @@ -53,10 +53,8 @@ public final class AttributesMatchers { return containsEntry(AttributeKey.stringArrayKey(key), Arrays.asList(values)); } - public static Matcher containsEntryWithStringValuesOf( - String key, - Matcher> matcher - ) { + public static Matcher containsEntryWithStringValuesOf(String key, + Matcher> matcher) { return new IsAttributesContaining<>(equalTo(AttributeKey.stringArrayKey(key)), matcher); } @@ -64,37 +62,28 @@ public final class AttributesMatchers { private final Matcher> keyMatcher; private final Matcher valueMatcher; - private IsAttributesContaining( - final Matcher> keyMatcher, - final Matcher valueMatcher - ) { + private IsAttributesContaining(final Matcher> keyMatcher, + final Matcher valueMatcher) { this.keyMatcher = keyMatcher; this.valueMatcher = valueMatcher; } @Override protected boolean matchesSafely(Attributes item) { - return item.asMap().entrySet().stream().anyMatch(e -> allOf( - hasProperty("key", keyMatcher), - hasProperty("value", valueMatcher)) - .matches(e)); + return item.asMap().entrySet().stream().anyMatch( + e -> allOf(hasProperty("key", keyMatcher), hasProperty("value", valueMatcher)).matches(e)); } @Override public void describeMismatchSafely(Attributes item, Description mismatchDescription) { - mismatchDescription - .appendText("Attributes was ") - .appendValueList("[", ", ", "]", item.asMap().entrySet()); + mismatchDescription.appendText("Attributes was ").appendValueList("[", ", ", "]", + item.asMap().entrySet()); } @Override public void describeTo(Description description) { - description - .appendText("Attributes containing [") - .appendDescriptionOf(keyMatcher) - .appendText("->") - .appendDescriptionOf(valueMatcher) - .appendText("]"); + description.appendText("Attributes containing [").appendDescriptionOf(keyMatcher) + .appendText("->").appendDescriptionOf(valueMatcher).appendText("]"); } } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/EventMatchers.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/EventMatchers.java index e24245fb4c6..ec2110b9a34 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/EventMatchers.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/EventMatchers.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client.trace.hamcrest; import static org.hamcrest.Matchers.equalTo; + import io.opentelemetry.api.common.Attributes; import io.opentelemetry.sdk.trace.data.EventData; import org.hamcrest.FeatureMatcher; @@ -28,12 +29,14 @@ import org.hamcrest.Matcher; */ public final class EventMatchers { - private EventMatchers() { } + private EventMatchers() { + } public static Matcher hasAttributes(Matcher matcher) { - return new FeatureMatcher( - matcher, "EventData having attributes that ", "attributes") { - @Override protected Attributes featureValueOf(EventData actual) { + return new FeatureMatcher(matcher, "EventData having attributes that ", + "attributes") { + @Override + protected Attributes featureValueOf(EventData actual) { return actual.getAttributes(); } }; @@ -45,7 +48,8 @@ public final class EventMatchers { public static Matcher hasName(Matcher matcher) { return new FeatureMatcher(matcher, "EventData with a name that ", "name") { - @Override protected String featureValueOf(EventData actual) { + @Override + protected String featureValueOf(EventData actual) { return actual.getName(); } }; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/SpanDataMatchers.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/SpanDataMatchers.java index c7a9d9029fc..6d0468c32ed 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/SpanDataMatchers.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/SpanDataMatchers.java @@ -45,7 +45,7 @@ public final class SpanDataMatchers { public static Matcher hasAttributes(Matcher matcher) { return new FeatureMatcher(matcher, "SpanData having attributes that ", - "attributes") { + "attributes") { @Override protected Attributes featureValueOf(SpanData item) { return item.getAttributes(); @@ -55,7 +55,7 @@ public final class SpanDataMatchers { public static Matcher hasDuration(Matcher matcher) { return new FeatureMatcher(matcher, "SpanData having duration that ", - "duration") { + "duration") { @Override protected Duration featureValueOf(SpanData item) { return Duration.ofNanos(item.getEndEpochNanos() - item.getStartEpochNanos()); @@ -79,7 +79,7 @@ public final class SpanDataMatchers { public static Matcher hasEvents(Matcher> matcher) { return new FeatureMatcher>(matcher, - "SpanData having events that", "events") { + "SpanData having events that", "events") { @Override protected Iterable featureValueOf(SpanData item) { return item.getEvents(); @@ -93,19 +93,19 @@ public final class SpanDataMatchers { public static Matcher hasException(Matcher matcher) { return new FeatureMatcher(matcher, - "SpanData having Exception with Attributes that", "exception attributes") { + "SpanData having Exception with Attributes that", "exception attributes") { @Override protected Attributes featureValueOf(SpanData actual) { return actual.getEvents().stream() - .filter(e -> Objects.equals(SemanticAttributes.EXCEPTION_EVENT_NAME, e.getName())) - .map(EventData::getAttributes).findFirst().orElse(null); + .filter(e -> Objects.equals(SemanticAttributes.EXCEPTION_EVENT_NAME, e.getName())) + .map(EventData::getAttributes).findFirst().orElse(null); } }; } public static Matcher hasKind(SpanKind kind) { return new FeatureMatcher(equalTo(kind), "SpanData with kind that", - "SpanKind") { + "SpanKind") { @Override protected SpanKind featureValueOf(SpanData item) { return item.getKind(); @@ -136,7 +136,7 @@ public final class SpanDataMatchers { public static Matcher hasParentSpanId(Matcher matcher) { return new FeatureMatcher(matcher, "SpanKind with a parentSpanId that", - "parentSpanId") { + "parentSpanId") { @Override protected String featureValueOf(SpanData item) { return item.getParentSpanId(); @@ -151,7 +151,7 @@ public final class SpanDataMatchers { protected boolean matchesSafely(SpanData item) { final StatusData statusData = item.getStatus(); return statusData != null && statusData.getStatusCode() != null - && matcher.matches(statusData.getStatusCode()); + && matcher.matches(statusData.getStatusCode()); } @Override @@ -167,7 +167,7 @@ public final class SpanDataMatchers { public static Matcher hasTraceId(Matcher matcher) { return new FeatureMatcher(matcher, "SpanData with a traceId that ", - "traceId") { + "traceId") { @Override protected String featureValueOf(SpanData item) { return item.getTraceId(); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/TraceTestUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/TraceTestUtil.java index 71aedbde649..d2b39bceaa6 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/TraceTestUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/TraceTestUtil.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.trace.hamcrest; import static org.apache.hadoop.hbase.client.trace.hamcrest.AttributesMatchers.containsEntry; import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasAttributes; import static org.hamcrest.Matchers.allOf; + import io.opentelemetry.api.trace.Span; import io.opentelemetry.sdk.trace.data.SpanData; import org.apache.hadoop.hbase.TableName; @@ -30,16 +30,16 @@ import org.hamcrest.Matcher; public final class TraceTestUtil { - private TraceTestUtil() { } + private TraceTestUtil() { + } /** * All {@link Span}s involving {@code conn} should include these attributes. */ public static Matcher buildConnectionAttributesMatcher(AsyncConnectionImpl conn) { - return hasAttributes(allOf( - containsEntry("db.system", "hbase"), - containsEntry("db.connection_string", "nothing"), - containsEntry("db.user", conn.getUser().toString()))); + return hasAttributes( + allOf(containsEntry("db.system", "hbase"), containsEntry("db.connection_string", "nothing"), + containsEntry("db.user", conn.getUser().toString()))); } /** @@ -47,18 +47,16 @@ public final class TraceTestUtil { * @see #buildConnectionAttributesMatcher(AsyncConnectionImpl) */ public static Matcher buildConnectionAttributesMatcher(ConnectionImplementation conn) { - return hasAttributes(allOf( - containsEntry("db.system", "hbase"), - containsEntry("db.connection_string", "nothing"), - containsEntry("db.user", conn.getUser().toString()))); + return hasAttributes( + allOf(containsEntry("db.system", "hbase"), containsEntry("db.connection_string", "nothing"), + containsEntry("db.user", conn.getUser().toString()))); } /** * All {@link Span}s involving {@code tableName} should include these attributes. */ public static Matcher buildTableAttributesMatcher(TableName tableName) { - return hasAttributes(allOf( - containsEntry("db.name", tableName.getNamespaceAsString()), + return hasAttributes(allOf(containsEntry("db.name", tableName.getNamespaceAsString()), containsEntry("db.hbase.table", tableName.getNameAsString()))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.java index 275fb0931ae..401d38d66cd 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; public class TestClientExceptionsUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClientExceptionsUtil.class); + HBaseClassTestRule.forClass(TestClientExceptionsUtil.class); @Test public void testFindException() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestComparators.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestComparators.java index 868f3b7fda4..721c258fb30 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestComparators.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestComparators.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,12 +34,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestComparators { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestComparators.class); + HBaseClassTestRule.forClass(TestComparators.class); @Test public void testCellFieldsCompare() throws Exception { @@ -105,55 +105,55 @@ public class TestComparators { assertFalse(PrivateCellUtil.qualifierStartsWith(kv, q2)); assertFalse(PrivateCellUtil.qualifierStartsWith(kv, Bytes.toBytes("longerthanthequalifier"))); - //Binary component comparisons + // Binary component comparisons byte[] val = Bytes.toBytes("abcd"); kv = new KeyValue(r0, f, q1, val); buffer = ByteBuffer.wrap(kv.getBuffer()); bbCell = new ByteBufferKeyValue(buffer, 0, buffer.remaining()); - //equality check - //row comparison - //row is "row0"(set by variable r0) - //and we are checking for equality to 'o' at position 1 - //'r' is at position 0. + // equality check + // row comparison + // row is "row0"(set by variable r0) + // and we are checking for equality to 'o' at position 1 + // 'r' is at position 0. byte[] component = Bytes.toBytes("o"); comparable = new BinaryComponentComparator(component, 1); assertEquals(0, PrivateCellUtil.compareRow(bbCell, comparable)); assertEquals(0, PrivateCellUtil.compareRow(kv, comparable)); - //value comparison - //value is "abcd"(set by variable val). - //and we are checking for equality to 'c' at position 2. - //'a' is at position 0. + // value comparison + // value is "abcd"(set by variable val). + // and we are checking for equality to 'c' at position 2. + // 'a' is at position 0. component = Bytes.toBytes("c"); comparable = new BinaryComponentComparator(component, 2); - assertEquals(0,PrivateCellUtil.compareValue(bbCell, comparable)); - assertEquals(0,PrivateCellUtil.compareValue(kv, comparable)); + assertEquals(0, PrivateCellUtil.compareValue(bbCell, comparable)); + assertEquals(0, PrivateCellUtil.compareValue(kv, comparable)); - //greater than + // greater than component = Bytes.toBytes("z"); - //checking for greater than at position 1. - //for both row("row0") and value("abcd") - //'z' > 'r' + // checking for greater than at position 1. + // for both row("row0") and value("abcd") + // 'z' > 'r' comparable = new BinaryComponentComparator(component, 1); - //row comparison + // row comparison assertTrue(PrivateCellUtil.compareRow(bbCell, comparable) > 0); assertTrue(PrivateCellUtil.compareRow(kv, comparable) > 0); - //value comparison - //'z' > 'a' + // value comparison + // 'z' > 'a' assertTrue(PrivateCellUtil.compareValue(bbCell, comparable) > 0); assertTrue(PrivateCellUtil.compareValue(kv, comparable) > 0); - //less than + // less than component = Bytes.toBytes("a"); - //checking for less than at position 1 for row ("row0") + // checking for less than at position 1 for row ("row0") comparable = new BinaryComponentComparator(component, 1); - //row comparison - //'a' < 'r' + // row comparison + // 'a' < 'r' assertTrue(PrivateCellUtil.compareRow(bbCell, comparable) < 0); assertTrue(PrivateCellUtil.compareRow(kv, comparable) < 0); - //value comparison - //checking for less than at position 2 for value("abcd") - //'a' < 'c' + // value comparison + // checking for less than at position 2 for value("abcd") + // 'a' < 'c' comparable = new BinaryComponentComparator(component, 2); assertTrue(PrivateCellUtil.compareValue(bbCell, comparable) < 0); assertTrue(PrivateCellUtil.compareValue(kv, comparable) < 0); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java index 3e03a0796a5..df17570a80a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ public class TestKeyOnlyFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestKeyOnlyFilter.class); + HBaseClassTestRule.forClass(TestKeyOnlyFilter.class); @Parameterized.Parameter public boolean lenAsVal; @@ -64,12 +64,10 @@ public class TestKeyOnlyFilter { byte[] q = Bytes.toBytes("qual1"); byte[] v = Bytes.toBytes("val1"); byte[] tags = Bytes.toBytes("tag1"); - KeyValue kv = new KeyValue(r, f, q, 0, q.length, 1234L, Type.Put, v, 0, - v.length, tags); + KeyValue kv = new KeyValue(r, f, q, 0, q.length, 1234L, Type.Put, v, 0, v.length, tags); ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer()); - ByteBufferKeyValue bbCell = new ByteBufferKeyValue(buffer, 0, - buffer.remaining()); + ByteBufferKeyValue bbCell = new ByteBufferKeyValue(buffer, 0, buffer.remaining()); // KV format: // Rebuild as: <0:4> @@ -86,41 +84,34 @@ public class TestKeyOnlyFilter { KeyValue KeyOnlyKeyValue = new KeyValue(newBuffer); KeyOnlyCell keyOnlyCell = new KeyOnlyCell(kv, lenAsVal); - KeyOnlyByteBufferExtendedCell keyOnlyByteBufferedCell = new KeyOnlyByteBufferExtendedCell( - bbCell, lenAsVal); + KeyOnlyByteBufferExtendedCell keyOnlyByteBufferedCell = + new KeyOnlyByteBufferExtendedCell(bbCell, lenAsVal); assertTrue(CellUtil.matchingRows(KeyOnlyKeyValue, keyOnlyCell)); assertTrue(CellUtil.matchingRows(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); assertTrue(CellUtil.matchingFamily(KeyOnlyKeyValue, keyOnlyCell)); - assertTrue(CellUtil - .matchingFamily(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); + assertTrue(CellUtil.matchingFamily(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); assertTrue(CellUtil.matchingQualifier(KeyOnlyKeyValue, keyOnlyCell)); - assertTrue(CellUtil.matchingQualifier(KeyOnlyKeyValue, - keyOnlyByteBufferedCell)); + assertTrue(CellUtil.matchingQualifier(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); assertTrue(CellUtil.matchingValue(KeyOnlyKeyValue, keyOnlyCell)); - assertTrue(KeyOnlyKeyValue.getValueLength() == keyOnlyByteBufferedCell - .getValueLength()); + assertTrue(KeyOnlyKeyValue.getValueLength() == keyOnlyByteBufferedCell.getValueLength()); assertEquals(8 + keyLen + (lenAsVal ? 4 : 0), KeyOnlyKeyValue.getSerializedSize()); assertEquals(8 + keyLen + (lenAsVal ? 4 : 0), keyOnlyCell.getSerializedSize()); if (keyOnlyByteBufferedCell.getValueLength() > 0) { - assertTrue(CellUtil.matchingValue(KeyOnlyKeyValue, - keyOnlyByteBufferedCell)); + assertTrue(CellUtil.matchingValue(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); } assertTrue(KeyOnlyKeyValue.getTimestamp() == keyOnlyCell.getTimestamp()); - assertTrue(KeyOnlyKeyValue.getTimestamp() == keyOnlyByteBufferedCell - .getTimestamp()); + assertTrue(KeyOnlyKeyValue.getTimestamp() == keyOnlyByteBufferedCell.getTimestamp()); assertTrue(KeyOnlyKeyValue.getTypeByte() == keyOnlyCell.getTypeByte()); - assertTrue(KeyOnlyKeyValue.getTypeByte() == keyOnlyByteBufferedCell - .getTypeByte()); + assertTrue(KeyOnlyKeyValue.getTypeByte() == keyOnlyByteBufferedCell.getTypeByte()); assertTrue(KeyOnlyKeyValue.getTagsLength() == keyOnlyCell.getTagsLength()); - assertTrue(KeyOnlyKeyValue.getTagsLength() == keyOnlyByteBufferedCell - .getTagsLength()); + assertTrue(KeyOnlyKeyValue.getTagsLength() == keyOnlyByteBufferedCell.getTagsLength()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java index 60c8cd08499..ae5fb23161a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,14 +31,14 @@ import org.junit.experimental.categories.Category; public class TestLongComparator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLongComparator.class); + HBaseClassTestRule.forClass(TestLongComparator.class); - private long[] values = { Long.MIN_VALUE, -10000000000L, -1000000L, 0L, 1000000L, 10000000000L, - Long.MAX_VALUE }; + private long[] values = + { Long.MIN_VALUE, -10000000000L, -1000000L, 0L, 1000000L, 10000000000L, Long.MAX_VALUE }; @Test public void testSimple() { - for (int i = 1; i < values.length ; i++) { + for (int i = 1; i < values.length; i++) { for (int j = 0; j < i; j++) { LongComparator cp = new LongComparator(values[i]); assertEquals(1, cp.compareTo(Bytes.toBytes(values[j]))); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java index 62eba1ecea5..f9c93811b4e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,7 +52,7 @@ import org.slf4j.LoggerFactory; public class TestCellBlockBuilder { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCellBlockBuilder.class); + HBaseClassTestRule.forClass(TestCellBlockBuilder.class); private static final Logger LOG = LoggerFactory.getLogger(TestCellBlockBuilder.class); @@ -71,19 +71,20 @@ public class TestCellBlockBuilder { } static void doBuildCellBlockUndoCellBlock(final CellBlockBuilder builder, final Codec codec, - final CompressionCodec compressor) throws IOException { + final CompressionCodec compressor) throws IOException { doBuildCellBlockUndoCellBlock(builder, codec, compressor, 10, 1, false); } static void doBuildCellBlockUndoCellBlock(final CellBlockBuilder builder, final Codec codec, - final CompressionCodec compressor, final int count, final int size, final boolean sized) - throws IOException { + final CompressionCodec compressor, final int count, final int size, final boolean sized) + throws IOException { Cell[] cells = getCells(count, size); - CellScanner cellScanner = sized ? getSizedCellScanner(cells) - : CellUtil.createCellScanner(Arrays.asList(cells).iterator()); + CellScanner cellScanner = sized + ? getSizedCellScanner(cells) + : CellUtil.createCellScanner(Arrays.asList(cells).iterator()); ByteBuffer bb = builder.buildCellBlock(codec, compressor, cellScanner); - cellScanner = builder.createCellScannerReusingBuffers(codec, compressor, - new SingleByteBuff(bb)); + cellScanner = + builder.createCellScannerReusingBuffers(codec, compressor, new SingleByteBuff(bb)); int i = 0; while (cellScanner.advance()) { i++; @@ -148,7 +149,7 @@ public class TestCellBlockBuilder { } private static void timerTests(final CellBlockBuilder builder, final int count, final int size, - final Codec codec, final CompressionCodec compressor) throws IOException { + final Codec codec, final CompressionCodec compressor) throws IOException { final int cycles = 1000; StopWatch timer = new StopWatch(); timer.start(); @@ -157,7 +158,7 @@ public class TestCellBlockBuilder { } timer.stop(); LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + false + ", count=" - + count + ", size=" + size + ", + took=" + timer.getTime() + "ms"); + + count + ", size=" + size + ", + took=" + timer.getTime() + "ms"); timer.reset(); timer.start(); for (int i = 0; i < cycles; i++) { @@ -165,18 +166,17 @@ public class TestCellBlockBuilder { } timer.stop(); LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + true + ", count=" - + count + ", size=" + size + ", + took=" + timer.getTime() + "ms"); + + count + ", size=" + size + ", + took=" + timer.getTime() + "ms"); } private static void timerTest(final CellBlockBuilder builder, final StopWatch timer, - final int count, final int size, final Codec codec, final CompressionCodec compressor, - final boolean sized) throws IOException { + final int count, final int size, final Codec codec, final CompressionCodec compressor, + final boolean sized) throws IOException { doBuildCellBlockUndoCellBlock(builder, codec, compressor, count, size, sized); } /** * For running a few tests of methods herein. - * * @param args the arguments to use for the timer test * @throws IOException if creating the build fails */ diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java index 48a079d3e75..da962cac0d3 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,15 +33,17 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class, ClientTests.class}) +@Category({ SmallTests.class, ClientTests.class }) public class TestConnectionId { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConnectionId.class); + HBaseClassTestRule.forClass(TestConnectionId.class); private Configuration testConfig = HBaseConfiguration.create(); - private User testUser1 = User.createUserForTesting(testConfig, "test", new String[]{"testgroup"}); - private User testUser2 = User.createUserForTesting(testConfig, "test", new String[]{"testgroup"}); + private User testUser1 = + User.createUserForTesting(testConfig, "test", new String[] { "testgroup" }); + private User testUser2 = + User.createUserForTesting(testConfig, "test", new String[] { "testgroup" }); private String serviceName = "test"; private Address address = Address.fromParts("localhost", 999); private ConnectionId connectionId1 = new ConnectionId(testUser1, serviceName, address); @@ -71,9 +73,8 @@ public class TestConnectionId { } /** - * Test if the over-ridden equals method satisfies all the properties - * (reflexive, symmetry, transitive and null) - * along with their hashcode + * Test if the over-ridden equals method satisfies all the properties (reflexive, symmetry, + * transitive and null) along with their hashcode */ @Test public void testEqualsWithHashCode() { @@ -87,8 +88,8 @@ public class TestConnectionId { // Test the Transitive Property ConnectionId connectionId3 = new ConnectionId(testUser1, serviceName, address); - assertTrue(connectionId1.equals(connectionId) && connectionId.equals(connectionId3) && - connectionId1.equals(connectionId3)); + assertTrue(connectionId1.equals(connectionId) && connectionId.equals(connectionId3) + && connectionId1.equals(connectionId3)); assertEquals(connectionId.hashCode(), connectionId3.hashCode()); // Test For null @@ -99,8 +100,8 @@ public class TestConnectionId { } /** - * Test the hashcode for same object and different object with both hashcode - * function and static hashcode function + * Test the hashcode for same object and different object with both hashcode function and static + * hashcode function */ @Test public void testHashCode() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java index dc94e91f4fd..0dafef0b764 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -92,7 +92,7 @@ public class TestFailedServersLog { verify(mockAppender, times(1)).append(any(org.apache.logging.log4j.core.LogEvent.class)); assertEquals(org.apache.logging.log4j.Level.DEBUG, level.get()); - assertEquals("Added failed server with address " + addr.toString() + " to list caused by " + - nullException.toString(), msg.get()); + assertEquals("Added failed server with address " + addr.toString() + " to list caused by " + + nullException.toString(), msg.get()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java index d829b4bfd65..a0b68646b14 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.junit.experimental.categories.Category; public class TestHBaseRpcControllerImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHBaseRpcControllerImpl.class); + HBaseClassTestRule.forClass(TestHBaseRpcControllerImpl.class); @Test public void testListOfCellScannerables() throws IOException { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java index 45da1e8560d..c327896f72a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -104,15 +104,16 @@ public class TestIPCUtil { Address addr = Address.fromParts("127.0.0.1", 12345); for (Throwable exception : exceptions) { if (exception instanceof TimeoutException) { - assertThat(IPCUtil.wrapException(addr, null, exception), instanceOf(TimeoutIOException.class)); + assertThat(IPCUtil.wrapException(addr, null, exception), + instanceOf(TimeoutIOException.class)); } else { - IOException ioe = IPCUtil.wrapException(addr, RegionInfoBuilder.FIRST_META_REGIONINFO, - exception); + IOException ioe = + IPCUtil.wrapException(addr, RegionInfoBuilder.FIRST_META_REGIONINFO, exception); // Assert that the exception contains the Region name if supplied. HBASE-25735. // Not all exceptions get the region stuffed into it. if (ioe.getMessage() != null) { - assertTrue(ioe.getMessage(). - contains(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionNameAsString())); + assertTrue(ioe.getMessage() + .contains(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionNameAsString())); } assertThat(ioe, instanceOf(exception.getClass())); } @@ -135,8 +136,8 @@ public class TestIPCUtil { if (depth <= IPCUtil.MAX_DEPTH) { if (numElements <= numStackTraceElements.intValue()) { future.completeExceptionally( - new AssertionError("should call run directly but stack trace decreased from " + - numStackTraceElements.intValue() + " to " + numElements)); + new AssertionError("should call run directly but stack trace decreased from " + + numStackTraceElements.intValue() + " to " + numElements)); return; } numStackTraceElements.setValue(numElements); @@ -144,9 +145,9 @@ public class TestIPCUtil { } else { if (numElements >= numStackTraceElements.intValue()) { future.completeExceptionally( - new AssertionError("should call eventLoop.execute to prevent stack overflow but" + - " stack trace increased from " + numStackTraceElements.intValue() + " to " + - numElements)); + new AssertionError("should call eventLoop.execute to prevent stack overflow but" + + " stack trace increased from " + numStackTraceElements.intValue() + " to " + + numElements)); } else { future.complete(null); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java index 8782fe116b0..a9c40fd3bb7 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRemoteWithExtrasException.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRemoteWithExtrasException.java index df6e6f2045a..dfa3450b74a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRemoteWithExtrasException.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRemoteWithExtrasException.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.ipc; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; + import java.io.IOException; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseServerException; @@ -37,18 +38,17 @@ public class TestRemoteWithExtrasException { HBaseClassTestRule.forClass(TestRemoteWithExtrasException.class); /** - * test verifies that we honor the inherent value of an exception for isServerOverloaded. - * We don't want a false value passed into RemoteWithExtrasExceptions to override the - * inherent value of an exception if it's already true. This could be due to an out of date - * server not sending the proto field we expect. + * test verifies that we honor the inherent value of an exception for isServerOverloaded. We don't + * want a false value passed into RemoteWithExtrasExceptions to override the inherent value of an + * exception if it's already true. This could be due to an out of date server not sending the + * proto field we expect. */ @Test public void itUsesExceptionDefaultValueForServerOverloaded() { // pass false for server overloaded, we still expect the exception to be true due to // the exception type - RemoteWithExtrasException ex = - new RemoteWithExtrasException(ServerOverloadedException.class.getName(), - "server is overloaded", false, false); + RemoteWithExtrasException ex = new RemoteWithExtrasException( + ServerOverloadedException.class.getName(), "server is overloaded", false, false); IOException result = ex.unwrapRemoteException(); assertEquals(result.getClass(), ServerOverloadedException.class); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java index ba1e27258d2..48bd5498cd4 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestRpcClientDeprecatedNameMapping { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRpcClientDeprecatedNameMapping.class); + HBaseClassTestRule.forClass(TestRpcClientDeprecatedNameMapping.class); @Test public void test() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaFilter.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaFilter.java index 62e204a65a2..25010d190f6 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaFilter.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,24 +25,19 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestQuotaFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestQuotaFilter.class); + HBaseClassTestRule.forClass(TestQuotaFilter.class); @Test public void testClassMethodsAreBuilderStyle() { - /* ReplicationPeerConfig should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * QuotaFilter qf - * = new QuotaFilter() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * ReplicationPeerConfig should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: QuotaFilter qf = new QuotaFilter() .setFoo(foo) + * .setBar(bar) .setBuz(buz) This test ensures that all methods starting with "set" returns the + * declaring object */ BuilderStyleTest.assertClassesAreBuilderStyle(QuotaFilter.class); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java index 37a21dc2b18..e2843180938 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,7 +22,6 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.quotas.QuotaSettingsFactory.QuotaGlobalsSettingsBypass; @@ -30,12 +30,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestQuotaGlobalsSettingsBypass { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestQuotaGlobalsSettingsBypass.class); + HBaseClassTestRule.forClass(TestQuotaGlobalsSettingsBypass.class); @Test public void testMerge() throws IOException { @@ -47,19 +47,19 @@ public class TestQuotaGlobalsSettingsBypass { @Test public void testInvalidMerges() throws IOException { QuotaGlobalsSettingsBypass userBypass = - new QuotaGlobalsSettingsBypass("joe", null, null, null, true); + new QuotaGlobalsSettingsBypass("joe", null, null, null, true); QuotaGlobalsSettingsBypass tableBypass = - new QuotaGlobalsSettingsBypass(null, TableName.valueOf("table"), null, null, true); + new QuotaGlobalsSettingsBypass(null, TableName.valueOf("table"), null, null, true); QuotaGlobalsSettingsBypass namespaceBypass = - new QuotaGlobalsSettingsBypass(null, null, "ns", null, true); + new QuotaGlobalsSettingsBypass(null, null, "ns", null, true); QuotaGlobalsSettingsBypass regionServerBypass = - new QuotaGlobalsSettingsBypass(null, null, null, "all", true); + new QuotaGlobalsSettingsBypass(null, null, null, "all", true); QuotaGlobalsSettingsBypass userOnTableBypass = - new QuotaGlobalsSettingsBypass("joe", TableName.valueOf("table"), null, null, true); + new QuotaGlobalsSettingsBypass("joe", TableName.valueOf("table"), null, null, true); QuotaGlobalsSettingsBypass userOnNamespaceBypass = - new QuotaGlobalsSettingsBypass("joe", null, "ns", null, true); + new QuotaGlobalsSettingsBypass("joe", null, "ns", null, true); QuotaGlobalsSettingsBypass userOnRegionServerBypass = - new QuotaGlobalsSettingsBypass("joe", null, null, "all", true); + new QuotaGlobalsSettingsBypass("joe", null, null, "all", true); assertTrue(userBypass.merge(userBypass).getBypass()); expectFailure(userBypass, new QuotaGlobalsSettingsBypass("frank", null, null, null, false)); @@ -142,6 +142,7 @@ public class TestQuotaGlobalsSettingsBypass { try { one.merge(two); fail("Expected to see an Exception merging " + two + " into " + one); - } catch (IllegalArgumentException e) {} + } catch (IllegalArgumentException e) { + } } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java index 6b9212f6260..be659bc202d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,28 +49,26 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota; public class TestQuotaSettingsFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestQuotaSettingsFactory.class); + HBaseClassTestRule.forClass(TestQuotaSettingsFactory.class); @Test public void testAllQuotasAddedToList() { - final SpaceQuota spaceQuota = SpaceQuota.newBuilder() - .setSoftLimit(1024L * 1024L * 1024L * 50L) // 50G - .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) // Disable the table - .build(); + final SpaceQuota spaceQuota = SpaceQuota.newBuilder().setSoftLimit(1024L * 1024L * 1024L * 50L) // 50G + .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) // Disable the table + .build(); final long readLimit = 1000; final long writeLimit = 500; final Throttle throttle = Throttle.newBuilder() - // 1000 read reqs/min - .setReadNum(TimedQuota.newBuilder().setSoftLimit(readLimit) - .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build()) - // 500 write reqs/min - .setWriteNum(TimedQuota.newBuilder().setSoftLimit(writeLimit) - .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build()) - .build(); - final Quotas quotas = Quotas.newBuilder() - .setSpace(spaceQuota) // Set the FS quotas - .setThrottle(throttle) // Set some RPC limits - .build(); + // 1000 read reqs/min + .setReadNum(TimedQuota.newBuilder().setSoftLimit(readLimit) + .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build()) + // 500 write reqs/min + .setWriteNum(TimedQuota.newBuilder().setSoftLimit(writeLimit) + .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build()) + .build(); + final Quotas quotas = Quotas.newBuilder().setSpace(spaceQuota) // Set the FS quotas + .setThrottle(throttle) // Set some RPC limits + .build(); final TableName tn = TableName.valueOf("my_table"); List settings = QuotaSettingsFactory.fromTableQuotas(tn, quotas); assertEquals(3, settings.size()); @@ -125,19 +123,15 @@ public class TestQuotaSettingsFactory { @Test(expected = IllegalArgumentException.class) public void testNeitherTableNorNamespace() { - final SpaceQuota spaceQuota = SpaceQuota.newBuilder() - .setSoftLimit(1L) - .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) - .build(); + final SpaceQuota spaceQuota = SpaceQuota.newBuilder().setSoftLimit(1L) + .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE).build(); QuotaSettingsFactory.fromSpace(null, null, spaceQuota); } @Test(expected = IllegalArgumentException.class) public void testBothTableAndNamespace() { - final SpaceQuota spaceQuota = SpaceQuota.newBuilder() - .setSoftLimit(1L) - .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) - .build(); + final SpaceQuota spaceQuota = SpaceQuota.newBuilder().setSoftLimit(1L) + .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE).build(); QuotaSettingsFactory.fromSpace(TableName.valueOf("foo"), "bar", spaceQuota); } @@ -147,10 +141,10 @@ public class TestQuotaSettingsFactory { final long sizeLimit = 1024L * 1024L * 1024L * 75; // 75GB final SpaceViolationPolicy violationPolicy = SpaceViolationPolicy.NO_INSERTS; QuotaSettings settings = - QuotaSettingsFactory.limitTableSpace(tableName, sizeLimit, violationPolicy); + QuotaSettingsFactory.limitTableSpace(tableName, sizeLimit, violationPolicy); assertNotNull("QuotaSettings should not be null", settings); assertTrue("Should be an instance of SpaceLimitSettings", - settings instanceof SpaceLimitSettings); + settings instanceof SpaceLimitSettings); SpaceLimitSettings spaceLimitSettings = (SpaceLimitSettings) settings; SpaceLimitRequest protoRequest = spaceLimitSettings.getProto(); assertTrue("Request should have a SpaceQuota", protoRequest.hasQuota()); @@ -167,7 +161,7 @@ public class TestQuotaSettingsFactory { QuotaSettings nsSettings = QuotaSettingsFactory.removeNamespaceSpaceLimit(ns); assertNotNull("QuotaSettings should not be null", nsSettings); assertTrue("Should be an instance of SpaceLimitSettings", - nsSettings instanceof SpaceLimitSettings); + nsSettings instanceof SpaceLimitSettings); SpaceLimitRequest nsProto = ((SpaceLimitSettings) nsSettings).getProto(); assertTrue("Request should have a SpaceQuota", nsProto.hasQuota()); assertTrue("The remove attribute should be true", nsProto.getQuota().getRemove()); @@ -175,7 +169,7 @@ public class TestQuotaSettingsFactory { QuotaSettings tableSettings = QuotaSettingsFactory.removeTableSpaceLimit(tn); assertNotNull("QuotaSettings should not be null", tableSettings); assertTrue("Should be an instance of SpaceLimitSettings", - tableSettings instanceof SpaceLimitSettings); + tableSettings instanceof SpaceLimitSettings); SpaceLimitRequest tableProto = ((SpaceLimitSettings) tableSettings).getProto(); assertTrue("Request should have a SpaceQuota", tableProto.hasQuota()); assertTrue("The remove attribute should be true", tableProto.getQuota().getRemove()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java index 2406d10ed0a..a3fc235c2e8 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,12 +38,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; /** * Test class for {@link SpaceLimitSettings}. */ -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestSpaceLimitSettings { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSpaceLimitSettings.class); + HBaseClassTestRule.forClass(TestSpaceLimitSettings.class); @Test(expected = IllegalArgumentException.class) public void testInvalidTableQuotaSizeLimit() { @@ -130,14 +130,14 @@ public class TestSpaceLimitSettings { @Test public void testQuotaMerging() throws IOException { TableName tn = TableName.valueOf("foo"); - QuotaSettings originalSettings = QuotaSettingsFactory.limitTableSpace( - tn, 1024L * 1024L, SpaceViolationPolicy.DISABLE); - QuotaSettings largerSizeLimit = QuotaSettingsFactory.limitTableSpace( - tn, 5L * 1024L * 1024L, SpaceViolationPolicy.DISABLE); - QuotaSettings differentPolicy = QuotaSettingsFactory.limitTableSpace( - tn, 1024L * 1024L, SpaceViolationPolicy.NO_WRITES); - QuotaSettings incompatibleSettings = QuotaSettingsFactory.limitNamespaceSpace( - "ns1", 5L * 1024L * 1024L, SpaceViolationPolicy.NO_WRITES); + QuotaSettings originalSettings = + QuotaSettingsFactory.limitTableSpace(tn, 1024L * 1024L, SpaceViolationPolicy.DISABLE); + QuotaSettings largerSizeLimit = + QuotaSettingsFactory.limitTableSpace(tn, 5L * 1024L * 1024L, SpaceViolationPolicy.DISABLE); + QuotaSettings differentPolicy = + QuotaSettingsFactory.limitTableSpace(tn, 1024L * 1024L, SpaceViolationPolicy.NO_WRITES); + QuotaSettings incompatibleSettings = QuotaSettingsFactory.limitNamespaceSpace("ns1", + 5L * 1024L * 1024L, SpaceViolationPolicy.NO_WRITES); assertEquals(originalSettings.merge(largerSizeLimit), largerSizeLimit); assertEquals(originalSettings.merge(differentPolicy), differentPolicy); @@ -145,7 +145,7 @@ public class TestSpaceLimitSettings { originalSettings.merge(incompatibleSettings); fail("Should not be able to merge a Table space quota with a namespace space quota."); } catch (IllegalArgumentException e) { - //pass + // pass } } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestThrottleSettings.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestThrottleSettings.java index 53fb9bd3e92..11e2f737b31 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestThrottleSettings.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestThrottleSettings.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,27 +34,25 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.ThrottleRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestThrottleSettings { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestThrottleSettings.class); + HBaseClassTestRule.forClass(TestThrottleSettings.class); @Test public void testMerge() throws IOException { TimedQuota tq1 = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest tr1 = ThrottleRequest.newBuilder().setTimedQuota(tq1) - .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); + .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, tr1); TimedQuota tq2 = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); ThrottleRequest tr2 = ThrottleRequest.newBuilder().setTimedQuota(tq2) - .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); + .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings merged = orig.merge(new ThrottleSettings("joe", null, null, null, tr2)); @@ -66,17 +64,15 @@ public class TestThrottleSettings { @Test public void testIncompatibleThrottleTypes() throws IOException { TimedQuota requestsQuota = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest requestsQuotaReq = ThrottleRequest.newBuilder().setTimedQuota(requestsQuota) - .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); + .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, requestsQuotaReq); TimedQuota readsQuota = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); ThrottleRequest readsQuotaReq = ThrottleRequest.newBuilder().setTimedQuota(readsQuota) - .setType(QuotaProtos.ThrottleType.READ_NUMBER).build(); + .setType(QuotaProtos.ThrottleType.READ_NUMBER).build(); try { orig.merge(new ThrottleSettings("joe", null, null, null, readsQuotaReq)); @@ -89,17 +85,15 @@ public class TestThrottleSettings { @Test public void testNoThrottleReturnsOriginal() throws IOException { TimedQuota tq1 = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest tr1 = ThrottleRequest.newBuilder().setTimedQuota(tq1) - .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); + .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, tr1); - ThrottleRequest tr2 = ThrottleRequest.newBuilder() - .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); + ThrottleRequest tr2 = + ThrottleRequest.newBuilder().setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); - assertTrue( - "The same object should be returned by merge, but it wasn't", + assertTrue("The same object should be returned by merge, but it wasn't", orig == orig.merge(new ThrottleSettings("joe", null, null, null, tr2))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationPeerConfig.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationPeerConfig.java index ae2d4262e64..fb74df39473 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationPeerConfig.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationPeerConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue; import java.util.List; import java.util.Map; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -37,12 +36,12 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestReplicationPeerConfig { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationPeerConfig.class); + HBaseClassTestRule.forClass(TestReplicationPeerConfig.class); private static final String NAMESPACE_REPLICATE = "replicate"; private static final String NAMESPACE_OTHER = "other"; @@ -53,16 +52,11 @@ public class TestReplicationPeerConfig { @Test public void testClassMethodsAreBuilderStyle() { - /* ReplicationPeerConfig should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * ReplicationPeerConfig htd - * = new ReplicationPeerConfig() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * ReplicationPeerConfig should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: ReplicationPeerConfig htd = new ReplicationPeerConfig() + * .setFoo(foo) .setBar(bar) .setBuz(buz) This test ensures that all methods starting with "set" + * returns the declaring object */ BuilderStyleTest.assertClassesAreBuilderStyle(ReplicationPeerConfig.class); @@ -72,48 +66,39 @@ public class TestReplicationPeerConfig { public void testNeedToReplicateWithReplicatingAll() { // 1. replication_all flag is true, no namespaces and table-cfs config ReplicationPeerConfig peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .build(); + .setReplicateAllUserTables(true).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // 2. replicate_all flag is true, and config in excludedTableCfs // Exclude empty table-cfs map peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(Maps.newHashMap()) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(Maps.newHashMap()).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // Exclude table B Map> tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_B, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_B)); // 3. replicate_all flag is true, and config in excludeNamespaces // Exclude empty namespace set peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeNamespaces(Sets.newHashSet()) - .build(); + .setReplicateAllUserTables(true).setExcludeNamespaces(Sets.newHashSet()).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // Exclude namespace other - peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_OTHER)) - .build(); + peerConfig = + new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl().setReplicateAllUserTables(true) + .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_OTHER)).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // Exclude namespace replication - peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .build(); + peerConfig = + new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl().setReplicateAllUserTables(true) + .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // 4. replicate_all flag is true, and config excludeNamespaces and excludedTableCfs both @@ -121,30 +106,24 @@ public class TestReplicationPeerConfig { tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .setExcludeTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) + .setExcludeTableCFsMap(tableCfs).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // Namespaces config conflicts with table-cfs config tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(tableCfs) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_OTHER)) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(tableCfs) + .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_OTHER)).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_B)); tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_B, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(tableCfs) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(tableCfs) + .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_B)); } @@ -156,78 +135,61 @@ public class TestReplicationPeerConfig { // 1. replication_all flag is false, no namespaces and table-cfs config peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .build(); + .setReplicateAllUserTables(false).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // 2. replicate_all flag is false, and only config table-cfs in peer // Set empty table-cfs map peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(Maps.newHashMap()) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(Maps.newHashMap()).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // Set table B tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_B, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_B)); // 3. replication_all flag is false, and only config namespace in peer // Set empty namespace set peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setNamespaces(Sets.newHashSet()) - .build(); + .setReplicateAllUserTables(false).setNamespaces(Sets.newHashSet()).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // Set namespace other peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setNamespaces(Sets.newHashSet(NAMESPACE_OTHER)) - .build(); + .setReplicateAllUserTables(false).setNamespaces(Sets.newHashSet(NAMESPACE_OTHER)).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // Set namespace replication peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .build(); + .setReplicateAllUserTables(false).setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // 4. replicate_all flag is false, and config namespaces and table-cfs both // Namespaces config doesn't conflict with table-cfs config tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); - peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .build(); + peerConfig = + new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl().setReplicateAllUserTables(false) + .setTableCFsMap(tableCfs).setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // Namespaces config conflicts with table-cfs config tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); - peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .setNamespaces(Sets.newHashSet(NAMESPACE_OTHER)) - .build(); + peerConfig = + new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl().setReplicateAllUserTables(false) + .setTableCFsMap(tableCfs).setNamespaces(Sets.newHashSet(NAMESPACE_OTHER)).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_B, null); - peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .setTableCFsMap(tableCfs) - .build(); + peerConfig = + new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl().setReplicateAllUserTables(false) + .setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)).setTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); } @@ -236,9 +198,7 @@ public class TestReplicationPeerConfig { Map> excludeTableCfs = Maps.newHashMap(); excludeTableCfs.put(TABLE_A, null); ReplicationPeerConfig peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(excludeTableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(excludeTableCfs).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -246,9 +206,7 @@ public class TestReplicationPeerConfig { excludeTableCfs = Maps.newHashMap(); excludeTableCfs.put(TABLE_A, Lists.newArrayList()); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(excludeTableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(excludeTableCfs).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -256,9 +214,7 @@ public class TestReplicationPeerConfig { excludeTableCfs = Maps.newHashMap(); excludeTableCfs.put(TABLE_A, Lists.newArrayList(Bytes.toString(FAMILY1))); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(excludeTableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(excludeTableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -269,9 +225,7 @@ public class TestReplicationPeerConfig { Map> tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); ReplicationPeerConfig peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -279,9 +233,7 @@ public class TestReplicationPeerConfig { tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, Lists.newArrayList()); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -289,9 +241,7 @@ public class TestReplicationPeerConfig { tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, Lists.newArrayList(Bytes.toString(FAMILY1))); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY2)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java index cf5939031b0..6f7be831585 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import static org.junit.Assert.fail; import java.security.Key; import java.security.KeyException; - import javax.crypto.spec.SecretKeySpec; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -38,7 +37,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestEncryptionUtil { private static final String INVALID_HASH_ALG = "this-hash-algorithm-not-exists hopefully... :)"; @@ -46,11 +45,11 @@ public class TestEncryptionUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestEncryptionUtil.class); + HBaseClassTestRule.forClass(TestEncryptionUtil.class); // There does not seem to be a ready way to test either getKeyFromBytesOrMasterKey // or createEncryptionContext, and the existing code under MobUtils appeared to be - // untested. Not ideal! + // untested. Not ideal! @Test public void testKeyWrappingUsingHashAlgDefault() throws Exception { @@ -146,15 +145,14 @@ public class TestEncryptionUtil { // set up the key provider for testing to resolve a key for our test subject Configuration conf = new Configuration(); // we don't need HBaseConfiguration for this conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); - if(!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { + if (!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { conf.set(Encryption.CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY, hashAlgorithm); } // generate a test key byte[] keyBytes = new byte[AES.KEY_LENGTH]; Bytes.secureRandom(keyBytes); - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Key key = new SecretKeySpec(keyBytes, algorithm); // wrap the test key @@ -168,7 +166,7 @@ public class TestEncryptionUtil { assertTrue(unwrappedKey instanceof SecretKeySpec); // did we get back what we wrapped? assertTrue("Unwrapped key bytes do not match original", - Bytes.equals(keyBytes, unwrappedKey.getEncoded())); + Bytes.equals(keyBytes, unwrappedKey.getEncoded())); // unwrap with an incorrect key try { @@ -183,7 +181,7 @@ public class TestEncryptionUtil { // set up the key provider for testing to resolve a key for our test subject Configuration conf = new Configuration(); // we don't need HBaseConfiguration for this conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); - if(!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { + if (!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { conf.set(Encryption.CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY, hashAlgorithm); } @@ -204,7 +202,7 @@ public class TestEncryptionUtil { assertTrue(unwrappedKey instanceof SecretKeySpec); // did we get back what we wrapped? assertTrue("Unwrapped key bytes do not match original", - Bytes.equals(keyBytes, unwrappedKey.getEncoded())); + Bytes.equals(keyBytes, unwrappedKey.getEncoded())); } private void testKeyWrappingWithMismatchingAlgorithms(Configuration conf) throws Exception { @@ -215,8 +213,7 @@ public class TestEncryptionUtil { // generate a test key byte[] keyBytes = new byte[AES.KEY_LENGTH]; Bytes.secureRandom(keyBytes); - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Key key = new SecretKeySpec(keyBytes, algorithm); // wrap the test key diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java index 538a9b91c3c..8d82ba538bd 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,12 +65,12 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Strings; -@Category({SecurityTests.class, SmallTests.class}) +@Category({ SecurityTests.class, SmallTests.class }) public class TestHBaseSaslRpcClient { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHBaseSaslRpcClient.class); + HBaseClassTestRule.forClass(TestHBaseSaslRpcClient.class); static { System.setProperty("java.security.krb5.realm", "DOMAIN.COM"); @@ -82,19 +82,18 @@ public class TestHBaseSaslRpcClient { private static final Logger LOG = LoggerFactory.getLogger(TestHBaseSaslRpcClient.class); - @Rule public ExpectedException exception = ExpectedException.none(); @Test public void testSaslClientUsesGivenRpcProtection() throws Exception { - Token token = createTokenMockWithCredentials(DEFAULT_USER_NAME, - DEFAULT_USER_PASSWORD); + Token token = + createTokenMockWithCredentials(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD); DigestSaslClientAuthenticationProvider provider = new DigestSaslClientAuthenticationProvider(); for (SaslUtil.QualityOfProtection qop : SaslUtil.QualityOfProtection.values()) { String negotiatedQop = new HBaseSaslRpcClient(HBaseConfiguration.create(), provider, token, - Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false, qop.name(), - false) { + Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false, qop.name(), + false) { public String getQop() { return saslProps.get(Sasl.QOP); } @@ -114,9 +113,9 @@ public class TestHBaseSaslRpcClient { final RealmCallback realmCallback = mock(RealmCallback.class); // We can provide a realmCallback, but HBase presently does nothing with it. - Callback[] callbackArray = {nameCallback, passwordCallback, realmCallback}; + Callback[] callbackArray = { nameCallback, passwordCallback, realmCallback }; final DigestSaslClientCallbackHandler saslClCallbackHandler = - new DigestSaslClientCallbackHandler(token); + new DigestSaslClientCallbackHandler(token); saslClCallbackHandler.handle(callbackArray); verify(nameCallback).setName(anyString()); verify(passwordCallback).setPassword(any()); @@ -128,11 +127,11 @@ public class TestHBaseSaslRpcClient { when(token.getIdentifier()).thenReturn(Bytes.toBytes(DEFAULT_USER_NAME)); when(token.getPassword()).thenReturn(Bytes.toBytes(DEFAULT_USER_PASSWORD)); final DigestSaslClientCallbackHandler saslClCallbackHandler = - new DigestSaslClientCallbackHandler(token); + new DigestSaslClientCallbackHandler(token); try { saslClCallbackHandler.handle(new Callback[] { mock(TextOutputCallback.class) }); } catch (UnsupportedCallbackException expEx) { - //expected + // expected } catch (Exception ex) { fail("testDigestSaslClientCallbackHandlerWithException error : " + ex.getMessage()); } @@ -140,7 +139,7 @@ public class TestHBaseSaslRpcClient { @Test public void testHBaseSaslRpcClientCreation() throws Exception { - //creation kerberos principal check section + // creation kerberos principal check section assertFalse(assertSuccessCreationKerberosPrincipal(null)); assertFalse(assertSuccessCreationKerberosPrincipal("DOMAIN.COM")); assertFalse(assertSuccessCreationKerberosPrincipal("principal/DOMAIN.COM")); @@ -150,22 +149,22 @@ public class TestHBaseSaslRpcClient { LOG.warn("Could not create a SASL client with valid Kerberos credential"); } - //creation digest principal check section + // creation digest principal check section assertFalse(assertSuccessCreationDigestPrincipal(null, null)); assertFalse(assertSuccessCreationDigestPrincipal("", "")); assertFalse(assertSuccessCreationDigestPrincipal("", null)); assertFalse(assertSuccessCreationDigestPrincipal(null, "")); assertTrue(assertSuccessCreationDigestPrincipal(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); - //creation simple principal check section + // creation simple principal check section assertFalse(assertSuccessCreationSimplePrincipal("", "")); assertFalse(assertSuccessCreationSimplePrincipal(null, null)); assertFalse(assertSuccessCreationSimplePrincipal(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); - //exceptions check section + // exceptions check section assertTrue(assertIOExceptionThenSaslClientIsNull(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); - assertTrue(assertIOExceptionWhenGetStreamsBeforeConnectCall( - DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); + assertTrue( + assertIOExceptionWhenGetStreamsBeforeConnectCall(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); } @Test @@ -182,47 +181,46 @@ public class TestHBaseSaslRpcClient { assertAuthMethodWrite(out, AuthMethod.DIGEST); } - private void assertAuthMethodRead(DataInputBuffer in, AuthMethod authMethod) - throws IOException { - in.reset(new byte[] {authMethod.code}, 1); + private void assertAuthMethodRead(DataInputBuffer in, AuthMethod authMethod) throws IOException { + in.reset(new byte[] { authMethod.code }, 1); assertEquals(authMethod, AuthMethod.read(in)); } private void assertAuthMethodWrite(DataOutputBuffer out, AuthMethod authMethod) - throws IOException { + throws IOException { authMethod.write(out); assertEquals(authMethod.code, out.getData()[0]); out.reset(); } private boolean assertIOExceptionWhenGetStreamsBeforeConnectCall(String principal, - String password) throws IOException { + String password) throws IOException { boolean inState = false; boolean outState = false; DigestSaslClientAuthenticationProvider provider = new DigestSaslClientAuthenticationProvider() { @Override public SaslClient createClient(Configuration conf, InetAddress serverAddress, - SecurityInfo securityInfo, Token token, - boolean fallbackAllowed, Map saslProps) { + SecurityInfo securityInfo, Token token, boolean fallbackAllowed, + Map saslProps) { return Mockito.mock(SaslClient.class); } }; HBaseSaslRpcClient rpcClient = new HBaseSaslRpcClient(HBaseConfiguration.create(), provider, - createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), - Mockito.mock(SecurityInfo.class), false); + createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), + Mockito.mock(SecurityInfo.class), false); try { rpcClient.getInputStream(); - } catch(IOException ex) { - //Sasl authentication exchange hasn't completed yet + } catch (IOException ex) { + // Sasl authentication exchange hasn't completed yet inState = true; } try { rpcClient.getOutputStream(); - } catch(IOException ex) { - //Sasl authentication exchange hasn't completed yet + } catch (IOException ex) { + // Sasl authentication exchange hasn't completed yet outState = true; } @@ -232,18 +230,17 @@ public class TestHBaseSaslRpcClient { private boolean assertIOExceptionThenSaslClientIsNull(String principal, String password) { try { DigestSaslClientAuthenticationProvider provider = - new DigestSaslClientAuthenticationProvider() { - @Override - public SaslClient createClient(Configuration conf, InetAddress serverAddress, - SecurityInfo securityInfo, - Token token, boolean fallbackAllowed, - Map saslProps) { - return null; - } - }; + new DigestSaslClientAuthenticationProvider() { + @Override + public SaslClient createClient(Configuration conf, InetAddress serverAddress, + SecurityInfo securityInfo, Token token, + boolean fallbackAllowed, Map saslProps) { + return null; + } + }; new HBaseSaslRpcClient(HBaseConfiguration.create(), provider, - createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), - Mockito.mock(SecurityInfo.class), false); + createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), + Mockito.mock(SecurityInfo.class), false); return false; } catch (IOException ex) { return true; @@ -254,7 +251,7 @@ public class TestHBaseSaslRpcClient { HBaseSaslRpcClient rpcClient = null; try { rpcClient = createSaslRpcClientForKerberos(principal); - } catch(Exception ex) { + } catch (Exception ex) { LOG.error(ex.getMessage(), ex); } return rpcClient != null; @@ -264,10 +261,10 @@ public class TestHBaseSaslRpcClient { HBaseSaslRpcClient rpcClient = null; try { rpcClient = new HBaseSaslRpcClient(HBaseConfiguration.create(), - new DigestSaslClientAuthenticationProvider(), - createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), - Mockito.mock(SecurityInfo.class), false); - } catch(Exception ex) { + new DigestSaslClientAuthenticationProvider(), + createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), + Mockito.mock(SecurityInfo.class), false); + } catch (Exception ex) { LOG.error(ex.getMessage(), ex); } return rpcClient != null; @@ -277,22 +274,20 @@ public class TestHBaseSaslRpcClient { HBaseSaslRpcClient rpcClient = null; try { rpcClient = createSaslRpcClientSimple(principal, password); - } catch(Exception ex) { + } catch (Exception ex) { LOG.error(ex.getMessage(), ex); } return rpcClient != null; } - private HBaseSaslRpcClient createSaslRpcClientForKerberos(String principal) - throws IOException { + private HBaseSaslRpcClient createSaslRpcClientForKerberos(String principal) throws IOException { return new HBaseSaslRpcClient(HBaseConfiguration.create(), - new GssSaslClientAuthenticationProvider(), createTokenMock(), - Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); + new GssSaslClientAuthenticationProvider(), createTokenMock(), Mockito.mock(InetAddress.class), + Mockito.mock(SecurityInfo.class), false); } - private Token createTokenMockWithCredentials( - String principal, String password) - throws IOException { + private Token createTokenMockWithCredentials(String principal, + String password) throws IOException { Token token = createTokenMock(); if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(password)) { when(token.getIdentifier()).thenReturn(Bytes.toBytes(DEFAULT_USER_NAME)); @@ -302,10 +297,10 @@ public class TestHBaseSaslRpcClient { } private HBaseSaslRpcClient createSaslRpcClientSimple(String principal, String password) - throws IOException { + throws IOException { return new HBaseSaslRpcClient(HBaseConfiguration.create(), - new SimpleSaslClientAuthenticationProvider(), createTokenMock(), - Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); + new SimpleSaslClientAuthenticationProvider(), createTokenMock(), + Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); } @SuppressWarnings("unchecked") @@ -314,8 +309,8 @@ public class TestHBaseSaslRpcClient { } @Test(expected = IOException.class) - public void testFailedEvaluateResponse() throws IOException { - //prep mockin the SaslClient + public void testFailedEvaluateResponse() throws IOException { + // prep mockin the SaslClient SimpleSaslClientAuthenticationProvider mockProvider = Mockito.mock(SimpleSaslClientAuthenticationProvider.class); SaslClient mockClient = Mockito.mock(SaslClient.class); @@ -323,11 +318,10 @@ public class TestHBaseSaslRpcClient { Assert.assertNotNull(mockClient); Mockito.when(mockProvider.createClient(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.anyBoolean(), Mockito.any())).thenReturn(mockClient); - HBaseSaslRpcClient rpcClient = new HBaseSaslRpcClient(HBaseConfiguration.create(), - mockProvider, createTokenMock(), - Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); + HBaseSaslRpcClient rpcClient = new HBaseSaslRpcClient(HBaseConfiguration.create(), mockProvider, + createTokenMock(), Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); - //simulate getting an error from a failed saslServer.evaluateResponse + // simulate getting an error from a failed saslServer.evaluateResponse DataOutputBuffer errorBuffer = new DataOutputBuffer(); errorBuffer.writeInt(SaslStatus.ERROR.state); WritableUtils.writeString(errorBuffer, IOException.class.getName()); @@ -337,7 +331,7 @@ public class TestHBaseSaslRpcClient { in.reset(errorBuffer.getData(), 0, errorBuffer.getLength()); DataOutputBuffer out = new DataOutputBuffer(); - //simulate that authentication exchange has completed quickly after sending the token + // simulate that authentication exchange has completed quickly after sending the token Mockito.when(mockClient.isComplete()).thenReturn(true); rpcClient.saslConnect(in, out); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java index 36f29dec240..ccb23a99e37 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.ExpectedException; -@Category({SecurityTests.class, SmallTests.class}) +@Category({ SecurityTests.class, SmallTests.class }) public class TestSaslUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSaslUtil.class); + HBaseClassTestRule.forClass(TestSaslUtil.class); @Rule public ExpectedException exception = ExpectedException.none(); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestDefaultProviderSelector.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestDefaultProviderSelector.java index eff3b5f8dd0..52ebebc372f 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestDefaultProviderSelector.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestDefaultProviderSelector.java @@ -23,7 +23,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -32,14 +31,15 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestDefaultProviderSelector { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDefaultProviderSelector.class); + HBaseClassTestRule.forClass(TestDefaultProviderSelector.class); BuiltInProviderSelector selector; + @Before public void setup() { selector = new BuiltInProviderSelector(); @@ -70,9 +70,9 @@ public class TestDefaultProviderSelector { @Test public void testExpectedProviders() { - HashSet providers = new HashSet<>(Arrays.asList( - new SimpleSaslClientAuthenticationProvider(), new GssSaslClientAuthenticationProvider(), - new DigestSaslClientAuthenticationProvider())); + HashSet providers = + new HashSet<>(Arrays.asList(new SimpleSaslClientAuthenticationProvider(), + new GssSaslClientAuthenticationProvider(), new DigestSaslClientAuthenticationProvider())); selector.configure(new Configuration(false), providers); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslClientAuthenticationProviders.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslClientAuthenticationProviders.java index 2b399593e7c..029c880600b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslClientAuthenticationProviders.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslClientAuthenticationProviders.java @@ -25,9 +25,7 @@ import java.io.IOException; import java.net.InetAddress; import java.util.HashMap; import java.util.Map; - import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -44,16 +42,16 @@ import org.junit.experimental.categories.Category; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; -@Category({SmallTests.class, SecurityTests.class}) +@Category({ SmallTests.class, SecurityTests.class }) public class TestSaslClientAuthenticationProviders { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSaslClientAuthenticationProviders.class); + HBaseClassTestRule.forClass(TestSaslClientAuthenticationProviders.class); @Test public void testCannotAddTheSameProviderTwice() { - HashMap registeredProviders = new HashMap<>(); + HashMap registeredProviders = new HashMap<>(); SaslClientAuthenticationProvider p1 = new SimpleSaslClientAuthenticationProvider(); SaslClientAuthenticationProvider p2 = new SimpleSaslClientAuthenticationProvider(); @@ -62,25 +60,26 @@ public class TestSaslClientAuthenticationProviders { try { SaslClientAuthenticationProviders.addProviderIfNotExists(p2, registeredProviders); - } catch (RuntimeException e) {} + } catch (RuntimeException e) { + } assertSame("Expected the original provider to be present", p1, - registeredProviders.entrySet().iterator().next().getValue()); + registeredProviders.entrySet().iterator().next().getValue()); } @Test public void testInstanceIsCached() { Configuration conf = HBaseConfiguration.create(); SaslClientAuthenticationProviders providers1 = - SaslClientAuthenticationProviders.getInstance(conf); + SaslClientAuthenticationProviders.getInstance(conf); SaslClientAuthenticationProviders providers2 = - SaslClientAuthenticationProviders.getInstance(conf); + SaslClientAuthenticationProviders.getInstance(conf); assertSame(providers1, providers2); SaslClientAuthenticationProviders.reset(); SaslClientAuthenticationProviders providers3 = - SaslClientAuthenticationProviders.getInstance(conf); + SaslClientAuthenticationProviders.getInstance(conf); assertNotSame(providers1, providers3); assertEquals(providers1.getNumRegisteredProviders(), providers3.getNumRegisteredProviders()); } @@ -89,58 +88,66 @@ public class TestSaslClientAuthenticationProviders { public void testDifferentConflictingImplementationsFail() { Configuration conf = HBaseConfiguration.create(); conf.setStrings(SaslClientAuthenticationProviders.EXTRA_PROVIDERS_KEY, - ConflictingProvider1.class.getName(), ConflictingProvider2.class.getName()); + ConflictingProvider1.class.getName(), ConflictingProvider2.class.getName()); SaslClientAuthenticationProviders.getInstance(conf); } static class ConflictingProvider1 implements SaslClientAuthenticationProvider { - static final SaslAuthMethod METHOD1 = new SaslAuthMethod( - "FOO", (byte)12, "DIGEST-MD5", AuthenticationMethod.SIMPLE); + static final SaslAuthMethod METHOD1 = + new SaslAuthMethod("FOO", (byte) 12, "DIGEST-MD5", AuthenticationMethod.SIMPLE); public ConflictingProvider1() { } - @Override public SaslAuthMethod getSaslAuthMethod() { + @Override + public SaslAuthMethod getSaslAuthMethod() { return METHOD1; } - @Override public String getTokenKind() { + @Override + public String getTokenKind() { return null; } - @Override public SaslClient createClient(Configuration conf, InetAddress serverAddr, - SecurityInfo securityInfo, Token token, boolean fallbackAllowed, - Map saslProps) throws IOException { + @Override + public SaslClient createClient(Configuration conf, InetAddress serverAddr, + SecurityInfo securityInfo, Token token, boolean fallbackAllowed, + Map saslProps) throws IOException { return null; } - @Override public UserInformation getUserInfo(User user) { + @Override + public UserInformation getUserInfo(User user) { return null; } } static class ConflictingProvider2 implements SaslClientAuthenticationProvider { - static final SaslAuthMethod METHOD2 = new SaslAuthMethod( - "BAR", (byte)12, "DIGEST-MD5", AuthenticationMethod.SIMPLE); + static final SaslAuthMethod METHOD2 = + new SaslAuthMethod("BAR", (byte) 12, "DIGEST-MD5", AuthenticationMethod.SIMPLE); public ConflictingProvider2() { } - @Override public SaslAuthMethod getSaslAuthMethod() { + @Override + public SaslAuthMethod getSaslAuthMethod() { return METHOD2; } - @Override public String getTokenKind() { + @Override + public String getTokenKind() { return null; } - @Override public SaslClient createClient(Configuration conf, InetAddress serverAddr, - SecurityInfo securityInfo, Token token, boolean fallbackAllowed, - Map saslProps) throws IOException { + @Override + public SaslClient createClient(Configuration conf, InetAddress serverAddr, + SecurityInfo securityInfo, Token token, boolean fallbackAllowed, + Map saslProps) throws IOException { return null; } - @Override public UserInformation getUserInfo(User user) { + @Override + public UserInformation getUserInfo(User user) { return null; } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/token/TestClientTokenUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/token/TestClientTokenUtil.java index 50db3a99d22..a2f26e073e7 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/token/TestClientTokenUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/token/TestClientTokenUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,9 @@ import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @Category(SmallTests.class) @@ -66,7 +68,7 @@ public class TestClientTokenUtil { shouldInjectFault.set(null, injected); try { - ClientTokenUtil.obtainToken((Connection)null); + ClientTokenUtil.obtainToken((Connection) null); fail("Should have injected exception."); } catch (IOException e) { Throwable t = e; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java index 317dff9efeb..b27d832ee8c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,9 +70,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; public class TestProtobufUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProtobufUtil.class); + HBaseClassTestRule.forClass(TestProtobufUtil.class); private static final String TAG_STR = "tag-1"; - private static final byte TAG_TYPE = (byte)10; + private static final byte TAG_TYPE = (byte) 10; + public TestProtobufUtil() { } @@ -93,7 +94,6 @@ public class TestProtobufUtil { /** * Test basic Get conversions. - * * @throws IOException if the conversion to a {@link Get} fails */ @Test @@ -126,7 +126,6 @@ public class TestProtobufUtil { /** * Test Delete Mutate conversions. - * * @throws IOException if the conversion to a {@link Delete} or a * {@link org.apache.hadoop.hbase.client.Mutation} fails */ @@ -161,20 +160,16 @@ public class TestProtobufUtil { // delete always have empty value, // add empty value to the original mutate - for (ColumnValue.Builder column: - mutateBuilder.getColumnValueBuilderList()) { - for (QualifierValue.Builder qualifier: - column.getQualifierValueBuilderList()) { + for (ColumnValue.Builder column : mutateBuilder.getColumnValueBuilderList()) { + for (QualifierValue.Builder qualifier : column.getQualifierValueBuilderList()) { qualifier.setValue(ByteString.EMPTY); } } - assertEquals(mutateBuilder.build(), - ProtobufUtil.toMutation(MutationType.DELETE, delete)); + assertEquals(mutateBuilder.build(), ProtobufUtil.toMutation(MutationType.DELETE, delete)); } /** * Test Put Mutate conversions. - * * @throws IOException if the conversion to a {@link Put} or a * {@link org.apache.hadoop.hbase.client.Mutation} fails */ @@ -210,22 +205,18 @@ public class TestProtobufUtil { // value level timestamp specified, // add the timestamp to the original mutate long timestamp = put.getTimestamp(); - for (ColumnValue.Builder column: - mutateBuilder.getColumnValueBuilderList()) { - for (QualifierValue.Builder qualifier: - column.getQualifierValueBuilderList()) { + for (ColumnValue.Builder column : mutateBuilder.getColumnValueBuilderList()) { + for (QualifierValue.Builder qualifier : column.getQualifierValueBuilderList()) { if (!qualifier.hasTimestamp()) { qualifier.setTimestamp(timestamp); } } } - assertEquals(mutateBuilder.build(), - ProtobufUtil.toMutation(MutationType.PUT, put)); + assertEquals(mutateBuilder.build(), ProtobufUtil.toMutation(MutationType.PUT, put)); } /** * Test basic Scan conversions. - * * @throws IOException if the conversion to a {@link org.apache.hadoop.hbase.client.Scan} fails */ @Test @@ -259,19 +250,18 @@ public class TestProtobufUtil { scanBuilder.setIncludeStopRow(false); ClientProtos.Scan expectedProto = scanBuilder.build(); - ClientProtos.Scan actualProto = ProtobufUtil.toScan( - ProtobufUtil.toScan(expectedProto)); + ClientProtos.Scan actualProto = ProtobufUtil.toScan(ProtobufUtil.toScan(expectedProto)); assertEquals(expectedProto, actualProto); } @Test public void testToCell() { KeyValue kv1 = - new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]); + new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]); KeyValue kv2 = - new KeyValue(Bytes.toBytes("bbb"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]); + new KeyValue(Bytes.toBytes("bbb"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]); KeyValue kv3 = - new KeyValue(Bytes.toBytes("ccc"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]); + new KeyValue(Bytes.toBytes("ccc"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]); byte[] arr = new byte[kv1.getLength() + kv2.getLength() + kv3.getLength()]; System.arraycopy(kv1.getBuffer(), kv1.getOffset(), arr, 0, kv1.getLength()); System.arraycopy(kv2.getBuffer(), kv2.getOffset(), arr, kv1.getLength(), kv2.getLength()); @@ -281,15 +271,13 @@ public class TestProtobufUtil { dbb.put(arr); ByteBufferKeyValue offheapKV = new ByteBufferKeyValue(dbb, kv1.getLength(), kv2.getLength()); CellProtos.Cell cell = ProtobufUtil.toCell(offheapKV, false); - Cell newOffheapKV = - ProtobufUtil.toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell, - false); + Cell newOffheapKV = ProtobufUtil + .toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell, false); assertTrue(CellComparatorImpl.COMPARATOR.compare(offheapKV, newOffheapKV) == 0); } /** * Test Increment Mutate conversions. - * * @throws IOException if converting to an {@link Increment} or * {@link org.apache.hadoop.hbase.client.Mutation} fails */ @@ -334,23 +322,20 @@ public class TestProtobufUtil { } /** - * Older clients may not send along a timestamp in the MutationProto. Check that we - * default correctly. + * Older clients may not send along a timestamp in the MutationProto. Check that we default + * correctly. */ @Test public void testIncrementNoTimestamp() throws IOException { MutationProto mutation = getIncrementMutation(null); Increment increment = ProtobufUtil.toIncrement(mutation, null); assertEquals(HConstants.LATEST_TIMESTAMP, increment.getTimestamp()); - increment.getFamilyCellMap().values() - .forEach(cells -> - cells.forEach(cell -> - assertEquals(HConstants.LATEST_TIMESTAMP, cell.getTimestamp()))); + increment.getFamilyCellMap().values().forEach(cells -> cells + .forEach(cell -> assertEquals(HConstants.LATEST_TIMESTAMP, cell.getTimestamp()))); } /** * Test Append Mutate conversions. - * * @throws IOException if converting to an {@link Append} fails */ @Test @@ -373,15 +358,16 @@ public class TestProtobufUtil { } /** - * Older clients may not send along a timestamp in the MutationProto. Check that we - * default correctly. + * Older clients may not send along a timestamp in the MutationProto. Check that we default + * correctly. */ @Test public void testAppendNoTimestamp() throws IOException { MutationProto mutation = getAppendMutation(null); Append append = ProtobufUtil.toAppend(mutation, null); assertEquals(HConstants.LATEST_TIMESTAMP, append.getTimestamp()); - append.getFamilyCellMap().values().forEach(cells -> cells.forEach(cell -> assertEquals(HConstants.LATEST_TIMESTAMP, cell.getTimestamp()))); + append.getFamilyCellMap().values().forEach(cells -> cells + .forEach(cell -> assertEquals(HConstants.LATEST_TIMESTAMP, cell.getTimestamp()))); } private MutationProto getAppendMutation(Long timestamp) { @@ -424,9 +410,9 @@ public class TestProtobufUtil { } private static LockServiceProtos.LockedResource createLockedResource( - LockServiceProtos.LockedResourceType resourceType, String resourceName, - LockServiceProtos.LockType lockType, - ProcedureProtos.Procedure exclusiveLockOwnerProcedure, int sharedLockCount) { + LockServiceProtos.LockedResourceType resourceType, String resourceName, + LockServiceProtos.LockType lockType, ProcedureProtos.Procedure exclusiveLockOwnerProcedure, + int sharedLockCount) { LockServiceProtos.LockedResource.Builder build = LockServiceProtos.LockedResource.newBuilder(); build.setResourceType(resourceType); build.setResourceName(resourceName); @@ -448,94 +434,65 @@ public class TestProtobufUtil { ProcedureProtos.Procedure procedure = builder.build(); String procJson = ProtobufUtil.toProcedureJson(Lists.newArrayList(procedure)); - assertEquals("[{" - + "\"className\":\"java.lang.Object\"," - + "\"procId\":\"1\"," - + "\"submittedTime\":\"0\"," - + "\"state\":\"RUNNABLE\"," - + "\"lastUpdate\":\"0\"," - + "\"stateMessage\":[{\"value\":\"QQ==\"}]" - + "}]", procJson); + assertEquals("[{" + "\"className\":\"java.lang.Object\"," + "\"procId\":\"1\"," + + "\"submittedTime\":\"0\"," + "\"state\":\"RUNNABLE\"," + "\"lastUpdate\":\"0\"," + + "\"stateMessage\":[{\"value\":\"QQ==\"}]" + "}]", procJson); } @Test public void testServerLockInfo() { - LockServiceProtos.LockedResource resource = createLockedResource( - LockServiceProtos.LockedResourceType.SERVER, "server", + LockServiceProtos.LockedResource resource = + createLockedResource(LockServiceProtos.LockedResourceType.SERVER, "server", LockServiceProtos.LockType.SHARED, null, 2); String lockJson = ProtobufUtil.toLockJson(Lists.newArrayList(resource)); - assertEquals("[{" - + "\"resourceType\":\"SERVER\"," - + "\"resourceName\":\"server\"," - + "\"lockType\":\"SHARED\"," - + "\"sharedLockCount\":2" - + "}]", lockJson); + assertEquals("[{" + "\"resourceType\":\"SERVER\"," + "\"resourceName\":\"server\"," + + "\"lockType\":\"SHARED\"," + "\"sharedLockCount\":2" + "}]", lockJson); } @Test public void testNamespaceLockInfo() { - LockServiceProtos.LockedResource resource = createLockedResource( - LockServiceProtos.LockedResourceType.NAMESPACE, "ns", + LockServiceProtos.LockedResource resource = + createLockedResource(LockServiceProtos.LockedResourceType.NAMESPACE, "ns", LockServiceProtos.LockType.EXCLUSIVE, createProcedure(2), 0); String lockJson = ProtobufUtil.toLockJson(Lists.newArrayList(resource)); - assertEquals("[{" - + "\"resourceType\":\"NAMESPACE\"," - + "\"resourceName\":\"ns\"," - + "\"lockType\":\"EXCLUSIVE\"," - + "\"exclusiveLockOwnerProcedure\":{" - + "\"className\":\"java.lang.Object\"," - + "\"procId\":\"2\"," - + "\"submittedTime\":\"0\"," - + "\"state\":\"RUNNABLE\"," - + "\"lastUpdate\":\"0\"" - + "}," - + "\"sharedLockCount\":0" - + "}]", lockJson); + assertEquals("[{" + "\"resourceType\":\"NAMESPACE\"," + "\"resourceName\":\"ns\"," + + "\"lockType\":\"EXCLUSIVE\"," + "\"exclusiveLockOwnerProcedure\":{" + + "\"className\":\"java.lang.Object\"," + "\"procId\":\"2\"," + "\"submittedTime\":\"0\"," + + "\"state\":\"RUNNABLE\"," + "\"lastUpdate\":\"0\"" + "}," + "\"sharedLockCount\":0" + "}]", + lockJson); } @Test public void testTableLockInfo() { - LockServiceProtos.LockedResource resource = createLockedResource( - LockServiceProtos.LockedResourceType.TABLE, "table", + LockServiceProtos.LockedResource resource = + createLockedResource(LockServiceProtos.LockedResourceType.TABLE, "table", LockServiceProtos.LockType.SHARED, null, 2); String lockJson = ProtobufUtil.toLockJson(Lists.newArrayList(resource)); - assertEquals("[{" - + "\"resourceType\":\"TABLE\"," - + "\"resourceName\":\"table\"," - + "\"lockType\":\"SHARED\"," - + "\"sharedLockCount\":2" - + "}]", lockJson); + assertEquals("[{" + "\"resourceType\":\"TABLE\"," + "\"resourceName\":\"table\"," + + "\"lockType\":\"SHARED\"," + "\"sharedLockCount\":2" + "}]", lockJson); } @Test public void testRegionLockInfo() { - LockServiceProtos.LockedResource resource = createLockedResource( - LockServiceProtos.LockedResourceType.REGION, "region", + LockServiceProtos.LockedResource resource = + createLockedResource(LockServiceProtos.LockedResourceType.REGION, "region", LockServiceProtos.LockType.EXCLUSIVE, createProcedure(3), 0); String lockJson = ProtobufUtil.toLockJson(Lists.newArrayList(resource)); - assertEquals("[{" - + "\"resourceType\":\"REGION\"," - + "\"resourceName\":\"region\"," - + "\"lockType\":\"EXCLUSIVE\"," - + "\"exclusiveLockOwnerProcedure\":{" - + "\"className\":\"java.lang.Object\"," - + "\"procId\":\"3\"," - + "\"submittedTime\":\"0\"," - + "\"state\":\"RUNNABLE\"," - + "\"lastUpdate\":\"0\"" - + "}," - + "\"sharedLockCount\":0" - + "}]", lockJson); + assertEquals("[{" + "\"resourceType\":\"REGION\"," + "\"resourceName\":\"region\"," + + "\"lockType\":\"EXCLUSIVE\"," + "\"exclusiveLockOwnerProcedure\":{" + + "\"className\":\"java.lang.Object\"," + "\"procId\":\"3\"," + "\"submittedTime\":\"0\"," + + "\"state\":\"RUNNABLE\"," + "\"lastUpdate\":\"0\"" + "}," + "\"sharedLockCount\":0" + "}]", + lockJson); } /** * Test {@link ProtobufUtil#toCell(Cell, boolean)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion - * methods when it contains tags and encode/decode tags is set to true. + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion methods + * when it contains tags and encode/decode tags is set to true. */ @Test public void testCellConversionWithTags() { @@ -546,7 +503,7 @@ public class TestProtobufUtil { Cell decodedCell = getCellFromProtoResult(protoCell, true); List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(1, decodedTags.size()); + assertEquals(1, decodedTags.size()); Tag decodedTag = decodedTags.get(0); assertEquals(TAG_TYPE, decodedTag.getType()); assertEquals(TAG_STR, Tag.getValueAsString(decodedTag)); @@ -572,8 +529,8 @@ public class TestProtobufUtil { /** * Test {@link ProtobufUtil#toCell(Cell, boolean)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion - * methods when it contains tags and encode/decode tags is set to false. + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion methods + * when it contains tags and encode/decode tags is set to false. */ @Test public void testCellConversionWithoutTags() { @@ -583,14 +540,13 @@ public class TestProtobufUtil { Cell decodedCell = getCellFromProtoResult(protoCell, false); List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(0, decodedTags.size()); + assertEquals(0, decodedTags.size()); } /** * Test {@link ProtobufUtil#toCell(Cell, boolean)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion - * methods when it contains tags and encoding of tags is set to false - * and decoding of tags is set to true. + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion methods + * when it contains tags and encoding of tags is set to false and decoding of tags is set to true. */ @Test public void testTagEncodeFalseDecodeTrue() { @@ -600,14 +556,13 @@ public class TestProtobufUtil { Cell decodedCell = getCellFromProtoResult(protoCell, true); List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(0, decodedTags.size()); + assertEquals(0, decodedTags.size()); } /** * Test {@link ProtobufUtil#toCell(Cell, boolean)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion - * methods when it contains tags and encoding of tags is set to true - * and decoding of tags is set to false. + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion methods + * when it contains tags and encoding of tags is set to true and decoding of tags is set to false. */ @Test public void testTagEncodeTrueDecodeFalse() { @@ -617,6 +572,6 @@ public class TestProtobufUtil { Cell decodedCell = getCellFromProtoResult(protoCell, false); List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(0, decodedTags.size()); + assertEquals(0, decodedTags.size()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java index 808e245062a..cae296b4d0e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,32 +27,17 @@ import java.util.Map; import java.util.Set; /** - * Utility class to check whether a given class conforms to builder-style: - * Foo foo = - * new Foo() - * .setBar(bar) - * .setBaz(baz) + * Utility class to check whether a given class conforms to builder-style: Foo foo = new Foo() + * .setBar(bar) .setBaz(baz) */ public final class BuilderStyleTest { - private BuilderStyleTest() {} + private BuilderStyleTest() { + } /* * If a base class Foo declares a method setFoo() returning Foo, then the subclass should - * re-declare the methods overriding the return class with the subclass: - * - * class Foo { - * Foo setFoo() { - * .. - * return this; - * } - * } - * - * class Bar { - * Bar setFoo() { - * return (Bar) super.setFoo(); - * } - * } - * + * re-declare the methods overriding the return class with the subclass: class Foo { Foo setFoo() + * { .. return this; } } class Bar { Bar setFoo() { return (Bar) super.setFoo(); } } */ @SuppressWarnings("rawtypes") public static void assertClassesAreBuilderStyle(Class... classes) { @@ -66,13 +51,13 @@ public final class BuilderStyleTest { } Class ret = method.getReturnType(); if (method.getName().startsWith("set") || method.getName().startsWith("add")) { - System.out.println(" " + clazz.getSimpleName() + "." + method.getName() + "() : " - + ret.getSimpleName()); + System.out.println( + " " + clazz.getSimpleName() + "." + method.getName() + "() : " + ret.getSimpleName()); // because of subclass / super class method overrides, we group the methods fitting the // same signatures because we get two method definitions from java reflection: // Mutation.setDurability() : Mutation - // Delete.setDurability() : Mutation + // Delete.setDurability() : Mutation // Delete.setDurability() : Delete String sig = method.getName(); for (Class param : method.getParameterTypes()) { @@ -97,8 +82,8 @@ public final class BuilderStyleTest { } } String errorMsg = "All setXXX()|addXX() methods in " + clazz.getSimpleName() - + " should return a " + clazz.getSimpleName() + " object in builder style. " - + "Offending method:" + e.getValue().iterator().next().getName(); + + " should return a " + clazz.getSimpleName() + " object in builder style. " + + "Offending method:" + e.getValue().iterator().next().getName(); assertTrue(errorMsg, found); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java index 314cae9e175..fbf92f64139 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,15 +17,7 @@ */ package org.apache.hadoop.hbase.util; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.Objects; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.hbase.util.PoolMap.PoolType; -import org.junit.After; import org.junit.Before; public abstract class PoolMapTestBase { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java index 2fd73caea46..d7ce6265b8c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,7 @@ public class TestRoundRobinPoolMap extends PoolMapTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRoundRobinPoolMap.class); + HBaseClassTestRule.forClass(TestRoundRobinPoolMap.class); @Override protected PoolType getPoolType() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java index 2f497c6fdfb..45f533f1a73 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestThreadLocalPoolMap extends PoolMapTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestThreadLocalPoolMap.class); + HBaseClassTestRule.forClass(TestThreadLocalPoolMap.class); @Override protected PoolType getPoolType() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZNodePaths.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZNodePaths.java index a8b7644c52a..c6d61ca4457 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZNodePaths.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZNodePaths.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,7 @@ public class TestZNodePaths { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestZNodePaths.class); + HBaseClassTestRule.forClass(TestZNodePaths.class); @Test public void testIsClientReadable() { diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml index e4f1a03744f..2d1b33ae15e 100644 --- a/hbase-common/pom.xml +++ b/hbase-common/pom.xml @@ -1,6 +1,6 @@ - + - 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -31,113 +31,6 @@ Apache HBase - Common Common functionality for HBase - - - - src/main/resources/ - - hbase-default.xml - - - - - - src/test/resources/META-INF/ - META-INF/ - - NOTICE - - true - - - src/test/resources - - **/** - - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - - maven-assembly-plugin - - true - - - - maven-antrun-plugin - - - process-resources - - - - - - - run - - - - - generate-Version-information - generate-sources - - - - - - - - - - - run - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - - versionInfo-source - generate-sources - - add-source - - - - ${project.build.directory}/generated-sources/java - - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - hbase-default.xml - - - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase @@ -260,6 +153,112 @@ + + + + src/main/resources/ + + hbase-default.xml + + + + + + META-INF/ + true + src/test/resources/META-INF/ + + NOTICE + + + + src/test/resources + + **/** + + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + + maven-assembly-plugin + + true + + + + maven-antrun-plugin + + + + run + + process-resources + + + + + + + + + generate-Version-information + + run + + generate-sources + + + + + + + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + + versionInfo-source + + add-source + + generate-sources + + + ${project.build.directory}/generated-sources/java + + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + hbase-default.xml + + + + + net.revelc.code + warbucks-maven-plugin + + + + @@ -272,10 +271,10 @@ license-javadocs - prepare-package copy-resources + prepare-package ${project.build.directory}/apidocs @@ -309,14 +308,14 @@ - hadoop-2.0 - - !hadoop.profile + + + !hadoop.profile @@ -334,10 +333,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 2.5.0-SNAPSHOT .. hbase-compression-aircompressor Apache HBase - Compression - Aircompressor Pure Java compression support using Aircompressor codecs - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - @@ -165,6 +131,40 @@ test + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java index d5fd3cfdd3d..3378c9030b8 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java @@ -1,24 +1,25 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.Compressor; import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.compress.CanReinit; import org.apache.hadoop.hbase.io.compress.CompressionUtil; @@ -26,14 +27,12 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import io.airlift.compress.Compressor; - /** * Hadoop compressor glue for aircompressor compressors. */ @InterfaceAudience.Private public abstract class HadoopCompressor - implements CanReinit, org.apache.hadoop.io.compress.Compressor { + implements CanReinit, org.apache.hadoop.io.compress.Compressor { protected static final Logger LOG = LoggerFactory.getLogger(HadoopCompressor.class); protected T compressor; @@ -165,7 +164,7 @@ public abstract class HadoopCompressor public void reset() { LOG.trace("reset"); try { - compressor = (T)(compressor.getClass().getDeclaredConstructor().newInstance()); + compressor = (T) (compressor.getClass().getDeclaredConstructor().newInstance()); } catch (Exception e) { throw new RuntimeException(e); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopDecompressor.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopDecompressor.java index 868094f32fc..737c802ad68 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopDecompressor.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopDecompressor.java @@ -1,37 +1,36 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.Decompressor; import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import io.airlift.compress.Decompressor; - /** * Hadoop decompressor glue for aircompressor decompressors. */ @InterfaceAudience.Private public class HadoopDecompressor - implements org.apache.hadoop.io.compress.Decompressor { + implements org.apache.hadoop.io.compress.Decompressor { protected static final Logger LOG = LoggerFactory.getLogger(HadoopDecompressor.class); protected T decompressor; @@ -104,7 +103,7 @@ public class HadoopDecompressor public void reset() { LOG.trace("reset"); try { - decompressor = (T)(decompressor.getClass().getDeclaredConstructor().newInstance()); + decompressor = (T) (decompressor.getClass().getDeclaredConstructor().newInstance()); } catch (Exception e) { throw new RuntimeException(e); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/Lz4Codec.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/Lz4Codec.java index 81199531ad9..70ea7943e8d 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/Lz4Codec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/Lz4Codec.java @@ -1,25 +1,27 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.lz4.Lz4Compressor; +import io.airlift.compress.lz4.Lz4Decompressor; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -33,9 +35,6 @@ import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; -import io.airlift.compress.lz4.Lz4Compressor; -import io.airlift.compress.lz4.Lz4Decompressor; - /** * Hadoop Lz4 codec implemented with aircompressor. *

      @@ -79,7 +78,7 @@ public class Lz4Codec implements Configurable, CompressionCodec { @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -90,7 +89,7 @@ public class Lz4Codec implements Configurable, CompressionCodec { @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, CompressionUtil.compressionOverhead(bufferSize)); diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/LzoCodec.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/LzoCodec.java index 57ac8daada7..5395dba5d0d 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/LzoCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/LzoCodec.java @@ -1,25 +1,27 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.lzo.LzoCompressor; +import io.airlift.compress.lzo.LzoDecompressor; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -33,9 +35,6 @@ import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; -import io.airlift.compress.lzo.LzoCompressor; -import io.airlift.compress.lzo.LzoDecompressor; - /** * Hadoop Lzo codec implemented with aircompressor. *

      @@ -79,7 +78,7 @@ public class LzoCodec implements Configurable, CompressionCodec { @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -90,7 +89,7 @@ public class LzoCodec implements Configurable, CompressionCodec { @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, CompressionUtil.compressionOverhead(bufferSize)); diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/SnappyCodec.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/SnappyCodec.java index 3669b1d9d2a..2448404191f 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/SnappyCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/SnappyCodec.java @@ -1,25 +1,27 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.snappy.SnappyCompressor; +import io.airlift.compress.snappy.SnappyDecompressor; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -33,9 +35,6 @@ import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; -import io.airlift.compress.snappy.SnappyCompressor; -import io.airlift.compress.snappy.SnappyDecompressor; - /** * Hadoop snappy codec implemented with aircompressor. *

      @@ -79,7 +78,7 @@ public class SnappyCodec implements Configurable, CompressionCodec { @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -90,7 +89,7 @@ public class SnappyCodec implements Configurable, CompressionCodec { @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, CompressionUtil.compressionOverhead(bufferSize)); diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java index f653dc0f676..3e8d345c660 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java @@ -1,25 +1,27 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.zstd.ZstdCompressor; +import io.airlift.compress.zstd.ZstdDecompressor; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -33,21 +35,17 @@ import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; -import io.airlift.compress.zstd.ZstdCompressor; -import io.airlift.compress.zstd.ZstdDecompressor; - /** * Hadoop codec implementation for Zstandard, implemented with aircompressor. *

      - * Unlike the other codecs this one should be considered as under development and unstable - * (as in changing), reflecting the status of aircompressor's zstandard implementation. + * Unlike the other codecs this one should be considered as under development and unstable (as in + * changing), reflecting the status of aircompressor's zstandard implementation. *

      - * NOTE: This codec is NOT data format compatible with the Hadoop native zstandard codec. - * There are issues with both framing and limitations of the aircompressor zstandard - * compressor. This codec can be used as an alternative to the native codec, if the native - * codec cannot be made available and/or an eventual migration will never be necessary - * (i.e. this codec's performance meets anticipated requirements). Once you begin using this - * alternative you will be locked into it. + * NOTE: This codec is NOT data format compatible with the Hadoop native zstandard codec. There are + * issues with both framing and limitations of the aircompressor zstandard compressor. This codec + * can be used as an alternative to the native codec, if the native codec cannot be made available + * and/or an eventual migration will never be necessary (i.e. this codec's performance meets + * anticipated requirements). Once you begin using this alternative you will be locked into it. */ @InterfaceAudience.Private public class ZstdCodec implements Configurable, CompressionCodec { @@ -88,7 +86,7 @@ public class ZstdCodec implements Configurable, CompressionCodec { @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -99,7 +97,7 @@ public class ZstdCodec implements Configurable, CompressionCodec { @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, CompressionUtil.compressionOverhead(bufferSize)); diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java index 5ddee513cd6..e71b2a4f771 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionLz4 extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionLz4.class); + HBaseClassTestRule.forClass(TestHFileCompressionLz4.class); private static Configuration conf; diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java index 143db468e58..b0e41d08802 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionLzo extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionLzo.class); + HBaseClassTestRule.forClass(TestHFileCompressionLzo.class); private static Configuration conf; diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java index e9b08cb937f..2f8296ce538 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionSnappy extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionSnappy.class); + HBaseClassTestRule.forClass(TestHFileCompressionSnappy.class); private static Configuration conf; diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java index c3a52d808aa..dc46a40cadf 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionZstd extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionZstd.class); + HBaseClassTestRule.forClass(TestHFileCompressionZstd.class); private static Configuration conf; diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLz4Codec.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLz4Codec.java index db1cc7214fd..0ba491b8465 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLz4Codec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLz4Codec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; @@ -28,7 +29,7 @@ public class TestLz4Codec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLz4Codec.class); + HBaseClassTestRule.forClass(TestLz4Codec.class); @Test public void testLz4CodecSmall() throws Exception { @@ -39,7 +40,7 @@ public class TestLz4Codec extends CompressionTestBase { public void testLz4CodecLarge() throws Exception { codecLargeTest(new Lz4Codec(), 1.1); // poor compressability, expansion with this codec codecLargeTest(new Lz4Codec(), 2); - codecLargeTest(new Lz4Codec(), 10); // high compressability + codecLargeTest(new Lz4Codec(), 10); // high compressability } @Test diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLzoCodec.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLzoCodec.java index bd1b75aecc1..15e6700c7bb 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLzoCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLzoCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; @@ -28,7 +29,7 @@ public class TestLzoCodec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLzoCodec.class); + HBaseClassTestRule.forClass(TestLzoCodec.class); @Test public void testLzoCodecSmall() throws Exception { @@ -39,7 +40,7 @@ public class TestLzoCodec extends CompressionTestBase { public void testLzoCodecLarge() throws Exception { codecLargeTest(new LzoCodec(), 1.1); // poor compressability, expansion with this codec codecLargeTest(new LzoCodec(), 2); - codecLargeTest(new LzoCodec(), 10); // very high compressability + codecLargeTest(new LzoCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestSnappyCodec.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestSnappyCodec.java index 98e628121c9..5bcdc0fb749 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestSnappyCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestSnappyCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; @@ -28,7 +29,7 @@ public class TestSnappyCodec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnappyCodec.class); + HBaseClassTestRule.forClass(TestSnappyCodec.class); @Test public void testSnappyCodecSmall() throws Exception { @@ -39,7 +40,7 @@ public class TestSnappyCodec extends CompressionTestBase { public void testSnappyCodecLarge() throws Exception { codecLargeTest(new SnappyCodec(), 1.1); // poor compressability, expansion with this codec codecLargeTest(new SnappyCodec(), 2); - codecLargeTest(new SnappyCodec(), 10); // very high compressability + codecLargeTest(new SnappyCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java index 23d7777f07c..34a7dcfedfc 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionLz4 extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionLz4.class); + HBaseClassTestRule.forClass(TestWALCompressionLz4.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java index 997d6873c61..9c5bc8838c0 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionLzo extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionLzo.class); + HBaseClassTestRule.forClass(TestWALCompressionLzo.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java index 924e46a77ee..72813bcbd65 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionSnappy extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionSnappy.class); + HBaseClassTestRule.forClass(TestWALCompressionSnappy.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java index 0de6de2b027..0f5c80ce269 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionZstd extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionZstd.class); + HBaseClassTestRule.forClass(TestWALCompressionZstd.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestZstdCodec.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestZstdCodec.java index 707fee2aded..899b441bf13 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestZstdCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestZstdCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; @@ -28,7 +29,7 @@ public class TestZstdCodec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestZstdCodec.class); + HBaseClassTestRule.forClass(TestZstdCodec.class); @Test public void testZstdCodecSmall() throws Exception { @@ -39,7 +40,7 @@ public class TestZstdCodec extends CompressionTestBase { public void testZstdCodecLarge() throws Exception { codecLargeTest(new ZstdCodec(), 1.1); // poor compressability, expansion with this codec codecLargeTest(new ZstdCodec(), 2); - codecLargeTest(new ZstdCodec(), 10); // very high compressability + codecLargeTest(new ZstdCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-brotli/pom.xml b/hbase-compression/hbase-compression-brotli/pom.xml index 4f60bd36711..1f176927d4b 100644 --- a/hbase-compression/hbase-compression-brotli/pom.xml +++ b/hbase-compression/hbase-compression-brotli/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 2.5.0-SNAPSHOT .. hbase-compression-brotli Apache HBase - Compression - Brotli Compression support using Brotli4j - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - @@ -150,6 +116,40 @@ test + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliCodec.java b/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliCodec.java index d052d6a0838..16aa764ba3b 100644 --- a/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliCodec.java +++ b/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.brotli; @@ -78,7 +79,7 @@ public class BrotliCodec implements Configurable, CompressionCodec { @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -89,7 +90,7 @@ public class BrotliCodec implements Configurable, CompressionCodec { @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, CompressionUtil.compressionOverhead(bufferSize)); diff --git a/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliCompressor.java b/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliCompressor.java index f6989064b76..f52482d5ca5 100644 --- a/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliCompressor.java +++ b/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliCompressor.java @@ -1,25 +1,25 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.brotli; import com.aayushatharva.brotli4j.Brotli4jLoader; import com.aayushatharva.brotli4j.encoder.Encoder; import com.aayushatharva.brotli4j.encoder.Encoders; - import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; diff --git a/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliDecompressor.java b/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliDecompressor.java index 8f167cd3960..9174644a959 100644 --- a/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliDecompressor.java +++ b/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliDecompressor.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.brotli; @@ -86,7 +87,6 @@ public class BrotliDecompressor implements Decompressor { return 0; } - @Override public void end() { LOG.trace("end"); diff --git a/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestBrotliCodec.java b/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestBrotliCodec.java index 50de8aae607..f83a4ab728f 100644 --- a/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestBrotliCodec.java +++ b/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestBrotliCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.brotli; @@ -28,7 +29,7 @@ public class TestBrotliCodec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBrotliCodec.class); + HBaseClassTestRule.forClass(TestBrotliCodec.class); @Test public void testBrotliCodecSmall() throws Exception { @@ -39,7 +40,7 @@ public class TestBrotliCodec extends CompressionTestBase { public void testBrotliCodecLarge() throws Exception { codecLargeTest(new BrotliCodec(), 1.1); // poor compressability codecLargeTest(new BrotliCodec(), 2); - codecLargeTest(new BrotliCodec(), 10); // very high compressability + codecLargeTest(new BrotliCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestHFileCompressionBrotli.java b/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestHFileCompressionBrotli.java index 7feb26ed1f2..3f11132ac9b 100644 --- a/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestHFileCompressionBrotli.java +++ b/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestHFileCompressionBrotli.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionBrotli extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionBrotli.class); + HBaseClassTestRule.forClass(TestHFileCompressionBrotli.class); private static Configuration conf; diff --git a/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestWALCompressionBrotli.java b/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestWALCompressionBrotli.java index ac25951d2d4..e37276fed6d 100644 --- a/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestWALCompressionBrotli.java +++ b/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestWALCompressionBrotli.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionBrotli extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionBrotli.class); + HBaseClassTestRule.forClass(TestWALCompressionBrotli.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-lz4/pom.xml b/hbase-compression/hbase-compression-lz4/pom.xml index c891a6a2a7c..d4720a37f16 100644 --- a/hbase-compression/hbase-compression-lz4/pom.xml +++ b/hbase-compression/hbase-compression-lz4/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 2.5.0-SNAPSHOT .. hbase-compression-lz4 Apache HBase - Compression - LZ4 Pure Java compression support using lz4-java - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - - @@ -154,6 +131,29 @@ test + + + + + + maven-assembly-plugin + + true + + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Codec.java b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Codec.java index d6b0365d63d..8f0f5dee672 100644 --- a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Codec.java +++ b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Codec.java @@ -1,25 +1,25 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.lz4; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -76,7 +76,7 @@ public class Lz4Codec implements Configurable, CompressionCodec { @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -87,7 +87,7 @@ public class Lz4Codec implements Configurable, CompressionCodec { @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, CompressionUtil.compressionOverhead(bufferSize)); diff --git a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java index 61046cd2050..243d227a8be 100644 --- a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java +++ b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java @@ -1,24 +1,26 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.lz4; import java.io.IOException; import java.nio.ByteBuffer; - +import net.jpountz.lz4.LZ4Compressor; +import net.jpountz.lz4.LZ4Factory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.compress.CanReinit; import org.apache.hadoop.hbase.io.compress.CompressionUtil; @@ -27,9 +29,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import net.jpountz.lz4.LZ4Compressor; -import net.jpountz.lz4.LZ4Factory; - /** * Hadoop compressor glue for lz4-java. */ diff --git a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Decompressor.java b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Decompressor.java index 5c46671ab91..3aef246ec7d 100644 --- a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Decompressor.java +++ b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Decompressor.java @@ -1,33 +1,32 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.lz4; import java.io.IOException; import java.nio.ByteBuffer; - +import net.jpountz.lz4.LZ4Factory; +import net.jpountz.lz4.LZ4SafeDecompressor; import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import net.jpountz.lz4.LZ4Factory; -import net.jpountz.lz4.LZ4SafeDecompressor; - /** * Hadoop decompressor glue for lz4-java. */ diff --git a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java index 8f61829f59a..279b1f074d6 100644 --- a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java +++ b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionLz4 extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionLz4.class); + HBaseClassTestRule.forClass(TestHFileCompressionLz4.class); private static Configuration conf; diff --git a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestLz4Codec.java b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestLz4Codec.java index 0c237e105ba..bd1cebfda7b 100644 --- a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestLz4Codec.java +++ b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestLz4Codec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.lz4; @@ -28,7 +29,7 @@ public class TestLz4Codec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLz4Codec.class); + HBaseClassTestRule.forClass(TestLz4Codec.class); @Test public void testLz4CodecSmall() throws Exception { @@ -38,8 +39,8 @@ public class TestLz4Codec extends CompressionTestBase { @Test public void testLz4CodecLarge() throws Exception { codecLargeTest(new Lz4Codec(), 1.1); // poor compressability, expansion with this codec - codecLargeTest(new Lz4Codec(), 2); - codecLargeTest(new Lz4Codec(), 10); // very high compressability + codecLargeTest(new Lz4Codec(), 2); + codecLargeTest(new Lz4Codec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java index fdf9b0a9cc1..81b5d943dc6 100644 --- a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java +++ b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionLz4 extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionLz4.class); + HBaseClassTestRule.forClass(TestWALCompressionLz4.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-snappy/pom.xml b/hbase-compression/hbase-compression-snappy/pom.xml index 069896dbf16..8da6b79aa17 100644 --- a/hbase-compression/hbase-compression-snappy/pom.xml +++ b/hbase-compression/hbase-compression-snappy/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 2.5.0-SNAPSHOT .. hbase-compression-snappy Apache HBase - Compression - Snappy Pure Java compression support using Xerial Snappy - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - - @@ -154,6 +131,29 @@ test + + + + + + maven-assembly-plugin + + true + + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCodec.java b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCodec.java index aae07b4d4ed..b8048ac0406 100644 --- a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCodec.java +++ b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCodec.java @@ -1,25 +1,25 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xerial; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -76,7 +76,7 @@ public class SnappyCodec implements Configurable, CompressionCodec { @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -87,7 +87,7 @@ public class SnappyCodec implements Configurable, CompressionCodec { @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, Snappy.maxCompressedLength(bufferSize) - bufferSize); // overhead only diff --git a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java index 2a43ca61dca..ba3fe470ca2 100644 --- a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java +++ b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java @@ -1,24 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xerial; import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.compress.CanReinit; import org.apache.hadoop.hbase.io.compress.CompressionUtil; @@ -26,7 +26,6 @@ import org.apache.hadoop.io.compress.Compressor; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.xerial.snappy.Snappy; /** diff --git a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyDecompressor.java b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyDecompressor.java index 0bad64971d6..e7934b3e242 100644 --- a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyDecompressor.java +++ b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyDecompressor.java @@ -1,30 +1,29 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xerial; import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.xerial.snappy.Snappy; /** diff --git a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java index 0343e8b0a5a..638713ed740 100644 --- a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java +++ b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionSnappy extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionSnappy.class); + HBaseClassTestRule.forClass(TestHFileCompressionSnappy.class); private static Configuration conf; diff --git a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestSnappyCodec.java b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestSnappyCodec.java index e882d79df52..1c879db8925 100644 --- a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestSnappyCodec.java +++ b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestSnappyCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xerial; @@ -28,7 +29,7 @@ public class TestSnappyCodec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnappyCodec.class); + HBaseClassTestRule.forClass(TestSnappyCodec.class); @Test public void testSnappyCodecSmall() throws Exception { @@ -39,7 +40,7 @@ public class TestSnappyCodec extends CompressionTestBase { public void testSnappyCodecLarge() throws Exception { codecLargeTest(new SnappyCodec(), 1.1); // poor compressability, expansion with this codec codecLargeTest(new SnappyCodec(), 2); - codecLargeTest(new SnappyCodec(), 10); // very high compressability + codecLargeTest(new SnappyCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java index ba59b652534..dfbb63d0f6c 100644 --- a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java +++ b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionSnappy extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionSnappy.class); + HBaseClassTestRule.forClass(TestWALCompressionSnappy.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-xz/pom.xml b/hbase-compression/hbase-compression-xz/pom.xml index 23452af8183..24229f6af3e 100644 --- a/hbase-compression/hbase-compression-xz/pom.xml +++ b/hbase-compression/hbase-compression-xz/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 2.5.0-SNAPSHOT .. hbase-compression-xz Apache HBase - Compression - XZ Pure Java compression support using XZ for Java - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - - @@ -138,6 +115,29 @@ test + + + + + + maven-assembly-plugin + + true + + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java index 8509aa05ddc..d4b8ce01148 100644 --- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java +++ b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java @@ -1,25 +1,25 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xz; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.compress.CompressionUtil; @@ -76,7 +76,7 @@ public class LzmaCodec implements Configurable, CompressionCodec { @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -87,7 +87,7 @@ public class LzmaCodec implements Configurable, CompressionCodec { @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, CompressionUtil.compressionOverhead(bufferSize)); diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java index 7174942bc7d..08eb33301e8 100644 --- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java +++ b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java @@ -1,28 +1,28 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xz; import java.io.IOException; import java.nio.BufferOverflowException; import java.nio.ByteBuffer; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.hbase.io.ByteBufferOutputStream; +import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.io.compress.Compressor; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -112,7 +112,7 @@ public class LzmaCompressor implements Compressor { } }) { try (LZMAOutputStream out = - new LZMAOutputStream(lowerOut, lzOptions, uncompressed, ARRAY_CACHE)) { + new LZMAOutputStream(lowerOut, lzOptions, uncompressed, ARRAY_CACHE)) { out.write(inBuf.array(), inBuf.arrayOffset(), uncompressed); } } diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java index 6c3399dfb26..4da49d913c8 100644 --- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java +++ b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java @@ -1,24 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xz; import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.hbase.io.ByteBufferInputStream; import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.io.compress.Decompressor; diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java index 617e02dcbf0..af843c5a019 100644 --- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java +++ b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,12 +32,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionLzma extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionLzma.class); + HBaseClassTestRule.forClass(TestHFileCompressionLzma.class); private static Configuration conf; diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java index 63978abe838..e5320da1677 100644 --- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java +++ b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xz; @@ -29,7 +30,7 @@ public class TestLzmaCodec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLzmaCodec.class); + HBaseClassTestRule.forClass(TestLzmaCodec.class); @Test public void testLzmaCodecSmall() throws Exception { @@ -39,8 +40,8 @@ public class TestLzmaCodec extends CompressionTestBase { @Test public void testLzmaCodecLarge() throws Exception { codecLargeTest(new LzmaCodec(), 1.1); // poor compressability - codecLargeTest(new LzmaCodec(), 2); - codecLargeTest(new LzmaCodec(), 10); // very high compressability + codecLargeTest(new LzmaCodec(), 2); + codecLargeTest(new LzmaCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java index 89ce68b0600..ee937230cd2 100644 --- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java +++ b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionLzma extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionLzma.class); + HBaseClassTestRule.forClass(TestWALCompressionLzma.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-zstd/pom.xml b/hbase-compression/hbase-compression-zstd/pom.xml index cb416517c56..f3751d298f1 100644 --- a/hbase-compression/hbase-compression-zstd/pom.xml +++ b/hbase-compression/hbase-compression-zstd/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 2.5.0-SNAPSHOT .. hbase-compression-zstd Apache HBase - Compression - ZStandard Pure Java compression support using zstd-jni - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - - @@ -154,6 +131,29 @@ test + + + + + + maven-assembly-plugin + + true + + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java index 521af5b25dd..6848f0dfc48 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; @@ -81,7 +82,7 @@ public class ZstdCodec implements Configurable, CompressionCodec { @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -92,10 +93,10 @@ public class ZstdCodec implements Configurable, CompressionCodec { @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, - (int)Zstd.compressBound(bufferSize) - bufferSize); // overhead only + (int) Zstd.compressBound(bufferSize) - bufferSize); // overhead only } @Override @@ -117,8 +118,7 @@ public class ZstdCodec implements Configurable, CompressionCodec { static int getLevel(Configuration conf) { return conf.getInt(ZSTD_LEVEL_KEY, - conf.getInt( - CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY, + conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_DEFAULT)); } @@ -143,10 +143,8 @@ public class ZstdCodec implements Configurable, CompressionCodec { // Reference: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md static boolean isDictionary(byte[] dictionary) { - return (dictionary[0] == (byte)0x37 && - dictionary[1] == (byte)0xA4 && - dictionary[2] == (byte)0x30 && - dictionary[3] == (byte)0xEC); + return (dictionary[0] == (byte) 0x37 && dictionary[1] == (byte) 0xA4 + && dictionary[2] == (byte) 0x30 && dictionary[3] == (byte) 0xEC); } static int getDictionaryId(byte[] dictionary) { diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java index ea45414ccb9..181c8dba06b 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java @@ -1,24 +1,26 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; +import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdDictCompress; import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.compress.CanReinit; import org.apache.hadoop.hbase.io.compress.CompressionUtil; @@ -27,9 +29,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.github.luben.zstd.Zstd; -import com.github.luben.zstd.ZstdDictCompress; - /** * Hadoop compressor glue for zstd-jni. */ diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java index 6bfa84e1c59..dd962f72098 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java @@ -1,21 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; +import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdDictDecompress; import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; @@ -25,8 +28,6 @@ import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.github.luben.zstd.Zstd; -import com.github.luben.zstd.ZstdDictDecompress; /** * Hadoop decompressor glue for zstd-java. diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java index 55a197b45f6..da8e1ae52bc 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,12 +32,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionZstd extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionZstd.class); + HBaseClassTestRule.forClass(TestHFileCompressionZstd.class); private static Configuration conf; diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java index e75de9b9c46..55d61cf83ec 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionZstd extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionZstd.class); + HBaseClassTestRule.forClass(TestWALCompressionZstd.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java index bf1c78cbc17..6a66ac5f0e1 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; @@ -31,7 +32,7 @@ public class TestZstdCodec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestZstdCodec.class); + HBaseClassTestRule.forClass(TestZstdCodec.class); @Test public void testZstdCodecSmall() throws Exception { @@ -41,8 +42,8 @@ public class TestZstdCodec extends CompressionTestBase { @Test public void testZstdCodecLarge() throws Exception { codecLargeTest(new ZstdCodec(), 1.1); // poor compressability - codecLargeTest(new ZstdCodec(), 2); - codecLargeTest(new ZstdCodec(), 10); // very high compressability + codecLargeTest(new ZstdCodec(), 2); + codecLargeTest(new ZstdCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java index 5a76a4531f2..2f5a9784ec4 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; @@ -53,7 +54,7 @@ public class TestZstdDictionary extends CompressionTestBase { public static void setUp() throws Exception { Configuration conf = new Configuration(); TEST_DATA = DictionaryCache.loadFromResource(conf, - DictionaryCache.RESOURCE_SCHEME + "zstd.test.data", /* maxSize */ 1024*1024); + DictionaryCache.RESOURCE_SCHEME + "zstd.test.data", /* maxSize */ 1024 * 1024); assertNotNull("Failed to load test data", TEST_DATA); } @@ -76,7 +77,7 @@ public class TestZstdDictionary extends CompressionTestBase { public static void main(String[] args) throws IOException { // Write 1000 1k blocks for training to the specified file // Train with: - // zstd --train -B1024 -o + // zstd --train -B1024 -o if (args.length < 1) { System.err.println("Usage: TestZstdCodec "); System.exit(-1); diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java index dff3848f560..2a1877c77ab 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java @@ -85,8 +85,7 @@ public class TestZstdDictionarySplitMerge { final TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfName) .setCompressionType(Compression.Algorithm.ZSTD) - .setConfiguration(ZstdCodec.ZSTD_DICTIONARY_KEY, dictionaryPath) - .build()) + .setConfiguration(ZstdCodec.ZSTD_DICTIONARY_KEY, dictionaryPath).build()) .build(); final Admin admin = TEST_UTIL.getAdmin(); admin.createTable(td, new byte[][] { Bytes.toBytes(1) }); @@ -108,6 +107,7 @@ public class TestZstdDictionarySplitMerge { public boolean evaluate() throws Exception { return TEST_UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 3; } + @Override public String explainFailure() throws Exception { return "Split has not finished yet"; @@ -120,7 +120,7 @@ public class TestZstdDictionarySplitMerge { RegionInfo regionA = null; RegionInfo regionB = null; - for (RegionInfo region: admin.getRegions(tableName)) { + for (RegionInfo region : admin.getRegions(tableName)) { if (region.getStartKey().length == 0) { regionA = region; } else if (Bytes.equals(region.getStartKey(), Bytes.toBytes(1))) { @@ -129,16 +129,14 @@ public class TestZstdDictionarySplitMerge { } assertNotNull(regionA); assertNotNull(regionB); - admin.mergeRegionsAsync(new byte[][] { - regionA.getRegionName(), - regionB.getRegionName() - }, false).get(30, TimeUnit.SECONDS); + admin + .mergeRegionsAsync(new byte[][] { regionA.getRegionName(), regionB.getRegionName() }, false) + .get(30, TimeUnit.SECONDS); assertEquals(2, admin.getRegions(tableName).size()); ServerName expected = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName(); assertEquals(expected, TEST_UTIL.getConnection().getRegionLocator(tableName) .getRegionLocation(Bytes.toBytes(1), true).getServerName()); - try (AsyncConnection asyncConn = - ConnectionFactory.createAsyncConnection(conf).get()) { + try (AsyncConnection asyncConn = ConnectionFactory.createAsyncConnection(conf).get()) { assertEquals(expected, asyncConn.getRegionLocator(tableName) .getRegionLocation(Bytes.toBytes(1), true).get().getServerName()); } diff --git a/hbase-compression/pom.xml b/hbase-compression/pom.xml index 23a0fa091e5..eec94ff38cc 100644 --- a/hbase-compression/pom.xml +++ b/hbase-compression/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-compression + pom Apache HBase - Compression Pure Java compression support parent - pom hbase-compression-aircompressor @@ -81,10 +81,10 @@ spotbugs-maven-plugin - false spotbugs + false ${project.basedir}/../dev-support/spotbugs-exclude.xml diff --git a/hbase-endpoint/pom.xml b/hbase-endpoint/pom.xml index 83ae8507b9f..64fd2177264 100644 --- a/hbase-endpoint/pom.xml +++ b/hbase-endpoint/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -33,51 +33,6 @@ true - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - org.xolstice.maven.plugins - protobuf-maven-plugin - - - compile-protoc - generate-sources - - compile - - - - ${basedir}/../hbase-protocol/src/main/protobuf - - - - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - org.apache.hbase.thirdparty @@ -271,6 +226,51 @@ test + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + + + compile-protoc + + compile + + generate-sources + + + ${basedir}/../hbase-protocol/src/main/protobuf + + + + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java index 8101654ab83..25510438520 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java @@ -24,7 +24,6 @@ import com.google.protobuf.ByteString; import com.google.protobuf.Message; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; - import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; @@ -35,7 +34,6 @@ import java.util.NavigableMap; import java.util.NavigableSet; import java.util.TreeMap; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; @@ -58,29 +56,26 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This client class is for invoking the aggregate functions deployed on the - * Region Server side via the AggregateService. This class will implement the - * supporting functionality for summing/processing the individual results - * obtained from the AggregateService for each region. + * This client class is for invoking the aggregate functions deployed on the Region Server side via + * the AggregateService. This class will implement the supporting functionality for + * summing/processing the individual results obtained from the AggregateService for each region. *

      - * This will serve as the client side handler for invoking the aggregate - * functions. - * For all aggregate functions, + * This will serve as the client side handler for invoking the aggregate functions. For all + * aggregate functions, *

        *
      • start row < end row is an essential condition (if they are not * {@link HConstants#EMPTY_BYTE_ARRAY}) - *
      • Column family can't be null. In case where multiple families are - * provided, an IOException will be thrown. An optional column qualifier can - * also be defined.
      • - *
      • For methods to find maximum, minimum, sum, rowcount, it returns the - * parameter type. For average and std, it returns a double value. For row - * count, it returns a long value.
      • + *
      • Column family can't be null. In case where multiple families are provided, an IOException + * will be thrown. An optional column qualifier can also be defined.
      • + *
      • For methods to find maximum, minimum, sum, rowcount, it returns the parameter type. For + * average and std, it returns a double value. For row count, it returns a long value.
      • *
      - *

      Call {@link #close()} when done. + *

      + * Call {@link #close()} when done. */ @InterfaceAudience.Public public class AggregationClient implements Closeable { - // TODO: This class is not used. Move to examples? + // TODO: This class is not used. Move to examples? private static final Logger log = LoggerFactory.getLogger(AggregationClient.class); private final Connection connection; @@ -152,38 +147,35 @@ public class AggregationClient implements Closeable { } /** - * It gives the maximum value of a column for a given column family for the - * given range. In case qualifier is null, a max of all values for the given - * family is returned. + * It gives the maximum value of a column for a given column family for the given range. In case + * qualifier is null, a max of all values for the given family is returned. * @param tableName the name of the table to scan - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return max val <R> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public R max( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public R + max(final TableName tableName, final ColumnInterpreter ci, final Scan scan) + throws Throwable { try (Table table = connection.getTable(tableName)) { return max(table, ci, scan); } } /** - * It gives the maximum value of a column for a given column family for the - * given range. In case qualifier is null, a max of all values for the given - * family is returned. + * It gives the maximum value of a column for a given column family for the given range. In case + * qualifier is null, a max of all values for the given family is returned. * @param table table to scan. - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return max val <> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - R max(final Table table, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public R max(final Table table, + final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class MaxCallBack implements Batch.Callback { R max = null; @@ -199,61 +191,58 @@ public class AggregationClient implements Closeable { } MaxCallBack aMaxCallBack = new MaxCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call() { - @Override - public R call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getMax(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - if (response.getFirstPartCount() > 0) { - ByteString b = response.getFirstPart(0); - Q q = getParsedGenericInstance(ci.getClass(), 3, b); - return ci.getCellValueFromProto(q); - } - return null; + new Batch.Call() { + @Override + public R call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getMax(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); } - }, aMaxCallBack); + if (response.getFirstPartCount() > 0) { + ByteString b = response.getFirstPart(0); + Q q = getParsedGenericInstance(ci.getClass(), 3, b); + return ci.getCellValueFromProto(q); + } + return null; + } + }, aMaxCallBack); return aMaxCallBack.getMax(); } /** - * It gives the minimum value of a column for a given column family for the - * given range. In case qualifier is null, a min of all values for the given - * family is returned. + * It gives the minimum value of a column for a given column family for the given range. In case + * qualifier is null, a min of all values for the given family is returned. * @param tableName the name of the table to scan - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return min val <R> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public R min( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public R + min(final TableName tableName, final ColumnInterpreter ci, final Scan scan) + throws Throwable { try (Table table = connection.getTable(tableName)) { return min(table, ci, scan); } } /** - * It gives the minimum value of a column for a given column family for the - * given range. In case qualifier is null, a min of all values for the given - * family is returned. + * It gives the minimum value of a column for a given column family for the given range. In case + * qualifier is null, a min of all values for the given family is returned. * @param table table to scan. - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return min val <R> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - R min(final Table table, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public R min(final Table table, + final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class MinCallBack implements Batch.Callback { private R min = null; @@ -270,68 +259,66 @@ public class AggregationClient implements Closeable { MinCallBack minCallBack = new MinCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call() { - @Override - public R call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getMin(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - if (response.getFirstPartCount() > 0) { - ByteString b = response.getFirstPart(0); - Q q = getParsedGenericInstance(ci.getClass(), 3, b); - return ci.getCellValueFromProto(q); - } - return null; + new Batch.Call() { + @Override + public R call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getMin(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); } - }, minCallBack); + if (response.getFirstPartCount() > 0) { + ByteString b = response.getFirstPart(0); + Q q = getParsedGenericInstance(ci.getClass(), 3, b); + return ci.getCellValueFromProto(q); + } + return null; + } + }, minCallBack); log.debug("Min fom all regions is: " + minCallBack.getMinimum()); return minCallBack.getMinimum(); } /** - * It gives the row count, by summing up the individual results obtained from - * regions. In case the qualifier is null, FirstKeyValueFilter is used to - * optimised the operation. In case qualifier is provided, I can't use the - * filter as it may set the flag to skip to next row, but the value read is - * not of the given filter: in this case, this particular row will not be - * counted ==> an error. + * It gives the row count, by summing up the individual results obtained from regions. In case the + * qualifier is null, FirstKeyValueFilter is used to optimised the operation. In case qualifier is + * provided, I can't use the filter as it may set the flag to skip to next row, but the value read + * is not of the given filter: in this case, this particular row will not be counted ==> an + * error. * @param tableName the name of the table to scan - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public long rowCount( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public long + rowCount(final TableName tableName, final ColumnInterpreter ci, final Scan scan) + throws Throwable { try (Table table = connection.getTable(tableName)) { return rowCount(table, ci, scan); } } /** - * It gives the row count, by summing up the individual results obtained from - * regions. In case the qualifier is null, FirstKeyValueFilter is used to - * optimised the operation. In case qualifier is provided, I can't use the - * filter as it may set the flag to skip to next row, but the value read is - * not of the given filter: in this case, this particular row will not be - * counted ==> an error. + * It gives the row count, by summing up the individual results obtained from regions. In case the + * qualifier is null, FirstKeyValueFilter is used to optimised the operation. In case qualifier is + * provided, I can't use the filter as it may set the flag to skip to next row, but the value read + * is not of the given filter: in this case, this particular row will not be counted ==> an + * error. * @param table table to scan. - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - long rowCount(final Table table, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public long + rowCount(final Table table, final ColumnInterpreter ci, final Scan scan) + throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, true); class RowNumCallback implements Batch.Callback { private final AtomicLong rowCountL = new AtomicLong(0); @@ -348,57 +335,56 @@ public class AggregationClient implements Closeable { RowNumCallback rowNum = new RowNumCallback(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call() { - @Override - public Long call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getRowNum(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - byte[] bytes = getBytesFromResponse(response.getFirstPart(0)); - ByteBuffer bb = ByteBuffer.allocate(8).put(bytes); - bb.rewind(); - return bb.getLong(); + new Batch.Call() { + @Override + public Long call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getRowNum(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); } - }, rowNum); + byte[] bytes = getBytesFromResponse(response.getFirstPart(0)); + ByteBuffer bb = ByteBuffer.allocate(8).put(bytes); + bb.rewind(); + return bb.getLong(); + } + }, rowNum); return rowNum.getRowNumCount(); } /** - * It sums up the value returned from various regions. In case qualifier is - * null, summation of all the column qualifiers in the given family is done. + * It sums up the value returned from various regions. In case qualifier is null, summation of all + * the column qualifiers in the given family is done. * @param tableName the name of the table to scan - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return sum <S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public S sum( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public S + sum(final TableName tableName, final ColumnInterpreter ci, final Scan scan) + throws Throwable { try (Table table = connection.getTable(tableName)) { return sum(table, ci, scan); } } /** - * It sums up the value returned from various regions. In case qualifier is - * null, summation of all the column qualifiers in the given family is done. + * It sums up the value returned from various regions. In case qualifier is null, summation of all + * the column qualifiers in the given family is done. * @param table table to scan. - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return sum <S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - S sum(final Table table, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public S sum(final Table table, + final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class SumCallBack implements Batch.Callback { @@ -415,59 +401,59 @@ public class AggregationClient implements Closeable { } SumCallBack sumCallBack = new SumCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call() { - @Override - public S call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - // Not sure what is going on here why I have to do these casts. TODO. - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getSum(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - if (response.getFirstPartCount() == 0) { - return null; - } - ByteString b = response.getFirstPart(0); - T t = getParsedGenericInstance(ci.getClass(), 4, b); - S s = ci.getPromotedValueFromProto(t); - return s; + new Batch.Call() { + @Override + public S call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + // Not sure what is going on here why I have to do these casts. TODO. + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getSum(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); } - }, sumCallBack); + if (response.getFirstPartCount() == 0) { + return null; + } + ByteString b = response.getFirstPart(0); + T t = getParsedGenericInstance(ci.getClass(), 4, b); + S s = ci.getPromotedValueFromProto(t); + return s; + } + }, sumCallBack); return sumCallBack.getSumResult(); } /** - * It computes average while fetching sum and row count from all the - * corresponding regions. Approach is to compute a global sum of region level - * sum and rowcount and then compute the average. + * It computes average while fetching sum and row count from all the corresponding regions. + * Approach is to compute a global sum of region level sum and rowcount and then compute the + * average. * @param tableName the name of the table to scan - * @param scan the HBase scan object to use to read data from HBase - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @param scan the HBase scan object to use to read data from HBase + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ private Pair getAvgArgs( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + final TableName tableName, final ColumnInterpreter ci, final Scan scan) + throws Throwable { try (Table table = connection.getTable(tableName)) { return getAvgArgs(table, ci, scan); } } /** - * It computes average while fetching sum and row count from all the - * corresponding regions. Approach is to compute a global sum of region level - * sum and rowcount and then compute the average. + * It computes average while fetching sum and row count from all the corresponding regions. + * Approach is to compute a global sum of region level sum and rowcount and then compute the + * average. * @param table table to scan. - * @param scan the HBase scan object to use to read data from HBase - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @param scan the HBase scan object to use to read data from HBase + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - private - Pair getAvgArgs(final Table table, final ColumnInterpreter ci, - final Scan scan) throws Throwable { + private Pair + getAvgArgs(final Table table, final ColumnInterpreter ci, final Scan scan) + throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class AvgCallBack implements Batch.Callback> { S sum = null; @@ -486,90 +472,85 @@ public class AggregationClient implements Closeable { AvgCallBack avgCallBack = new AvgCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call>() { - @Override - public Pair call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getAvg(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - Pair pair = new Pair<>(null, 0L); - if (response.getFirstPartCount() == 0) { - return pair; - } - ByteString b = response.getFirstPart(0); - T t = getParsedGenericInstance(ci.getClass(), 4, b); - S s = ci.getPromotedValueFromProto(t); - pair.setFirst(s); - ByteBuffer bb = ByteBuffer.allocate(8).put( - getBytesFromResponse(response.getSecondPart())); - bb.rewind(); - pair.setSecond(bb.getLong()); + new Batch.Call>() { + @Override + public Pair call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getAvg(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); + } + Pair pair = new Pair<>(null, 0L); + if (response.getFirstPartCount() == 0) { return pair; } - }, avgCallBack); + ByteString b = response.getFirstPart(0); + T t = getParsedGenericInstance(ci.getClass(), 4, b); + S s = ci.getPromotedValueFromProto(t); + pair.setFirst(s); + ByteBuffer bb = + ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart())); + bb.rewind(); + pair.setSecond(bb.getLong()); + return pair; + } + }, avgCallBack); return avgCallBack.getAvgArgs(); } /** - * This is the client side interface/handle for calling the average method for - * a given cf-cq combination. It was necessary to add one more call stack as - * its return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the average and returs the double value. + * This is the client side interface/handle for calling the average method for a given cf-cq + * combination. It was necessary to add one more call stack as its return type should be a decimal + * value, irrespective of what columninterpreter says. So, this methods collects the necessary + * parameters to compute the average and returs the double value. * @param tableName the name of the table to scan - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - double avg(final TableName tableName, final ColumnInterpreter ci, - Scan scan) throws Throwable { + public double + avg(final TableName tableName, final ColumnInterpreter ci, Scan scan) + throws Throwable { Pair p = getAvgArgs(tableName, ci, scan); return ci.divideForAvg(p.getFirst(), p.getSecond()); } /** - * This is the client side interface/handle for calling the average method for - * a given cf-cq combination. It was necessary to add one more call stack as - * its return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the average and returs the double value. + * This is the client side interface/handle for calling the average method for a given cf-cq + * combination. It was necessary to add one more call stack as its return type should be a decimal + * value, irrespective of what columninterpreter says. So, this methods collects the necessary + * parameters to compute the average and returs the double value. * @param table table to scan. - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public double avg( - final Table table, final ColumnInterpreter ci, Scan scan) - throws Throwable { + public double + avg(final Table table, final ColumnInterpreter ci, Scan scan) throws Throwable { Pair p = getAvgArgs(table, ci, scan); return ci.divideForAvg(p.getFirst(), p.getSecond()); } /** - * It computes a global standard deviation for a given column and its value. - * Standard deviation is square root of (average of squares - - * average*average). From individual regions, it obtains sum, square sum and - * number of rows. With these, the above values are computed to get the global - * std. + * It computes a global standard deviation for a given column and its value. Standard deviation is + * square root of (average of squares - average*average). From individual regions, it obtains sum, + * square sum and number of rows. With these, the above values are computed to get the global std. * @param table table to scan. - * @param scan the HBase scan object to use to read data from HBase + * @param scan the HBase scan object to use to read data from HBase * @return standard deviations - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - private - Pair, Long> getStdArgs(final Table table, final ColumnInterpreter ci, - final Scan scan) throws Throwable { + private Pair, Long> + getStdArgs(final Table table, final ColumnInterpreter ci, final Scan scan) + throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class StdCallback implements Batch.Callback, Long>> { long rowCountVal = 0L; @@ -595,75 +576,72 @@ public class AggregationClient implements Closeable { StdCallback stdCallback = new StdCallback(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call, Long>>() { - @Override - public Pair, Long> call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getStd(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - Pair, Long> pair = new Pair<>(new ArrayList<>(), 0L); - if (response.getFirstPartCount() == 0) { - return pair; - } - List list = new ArrayList<>(); - for (int i = 0; i < response.getFirstPartCount(); i++) { - ByteString b = response.getFirstPart(i); - T t = getParsedGenericInstance(ci.getClass(), 4, b); - S s = ci.getPromotedValueFromProto(t); - list.add(s); - } - pair.setFirst(list); - ByteBuffer bb = ByteBuffer.allocate(8).put( - getBytesFromResponse(response.getSecondPart())); - bb.rewind(); - pair.setSecond(bb.getLong()); + new Batch.Call, Long>>() { + @Override + public Pair, Long> call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getStd(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); + } + Pair, Long> pair = new Pair<>(new ArrayList<>(), 0L); + if (response.getFirstPartCount() == 0) { return pair; } - }, stdCallback); + List list = new ArrayList<>(); + for (int i = 0; i < response.getFirstPartCount(); i++) { + ByteString b = response.getFirstPart(i); + T t = getParsedGenericInstance(ci.getClass(), 4, b); + S s = ci.getPromotedValueFromProto(t); + list.add(s); + } + pair.setFirst(list); + ByteBuffer bb = + ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart())); + bb.rewind(); + pair.setSecond(bb.getLong()); + return pair; + } + }, stdCallback); return stdCallback.getStdParams(); } /** - * This is the client side interface/handle for calling the std method for a - * given cf-cq combination. It was necessary to add one more call stack as its - * return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the std and returns the double value. + * This is the client side interface/handle for calling the std method for a given cf-cq + * combination. It was necessary to add one more call stack as its return type should be a decimal + * value, irrespective of what columninterpreter says. So, this methods collects the necessary + * parameters to compute the std and returns the double value. * @param tableName the name of the table to scan - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - double std(final TableName tableName, ColumnInterpreter ci, - Scan scan) throws Throwable { + public double std( + final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { try (Table table = connection.getTable(tableName)) { return std(table, ci, scan); } } /** - * This is the client side interface/handle for calling the std method for a - * given cf-cq combination. It was necessary to add one more call stack as its - * return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the std and returns the double value. + * This is the client side interface/handle for calling the std method for a given cf-cq + * combination. It was necessary to add one more call stack as its return type should be a decimal + * value, irrespective of what columninterpreter says. So, this methods collects the necessary + * parameters to compute the std and returns the double value. * @param table table to scan. - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public double std( - final Table table, ColumnInterpreter ci, Scan scan) throws Throwable { + public double + std(final Table table, ColumnInterpreter ci, Scan scan) throws Throwable { Pair, Long> p = getStdArgs(table, ci, scan); double res = 0d; double avg = ci.divideForAvg(p.getFirst().get(0), p.getSecond()); @@ -674,21 +652,19 @@ public class AggregationClient implements Closeable { } /** - * It helps locate the region with median for a given column whose weight - * is specified in an optional column. - * From individual regions, it obtains sum of values and sum of weights. + * It helps locate the region with median for a given column whose weight is specified in an + * optional column. From individual regions, it obtains sum of values and sum of weights. * @param table table to scan. - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase - * @return pair whose first element is a map between start row of the region - * and (sum of values, sum of weights) for the region, the second element is - * (sum of values, sum of weights) for all the regions chosen - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase + * @return pair whose first element is a map between start row of the region and (sum of values, + * sum of weights) for the region, the second element is (sum of values, sum of weights) + * for all the regions chosen + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ private - Pair>, List> - getMedianArgs(final Table table, + Pair>, List> getMedianArgs(final Table table, final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); final NavigableMap> map = new TreeMap<>(Bytes.BYTES_COMPARATOR); @@ -712,64 +688,63 @@ public class AggregationClient implements Closeable { } StdCallback stdCallback = new StdCallback(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call>() { - @Override - public List call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getMedian(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - - List list = new ArrayList<>(); - for (int i = 0; i < response.getFirstPartCount(); i++) { - ByteString b = response.getFirstPart(i); - T t = getParsedGenericInstance(ci.getClass(), 4, b); - S s = ci.getPromotedValueFromProto(t); - list.add(s); - } - return list; + new Batch.Call>() { + @Override + public List call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getMedian(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); } - }, stdCallback); + List list = new ArrayList<>(); + for (int i = 0; i < response.getFirstPartCount(); i++) { + ByteString b = response.getFirstPart(i); + T t = getParsedGenericInstance(ci.getClass(), 4, b); + S s = ci.getPromotedValueFromProto(t); + list.add(s); + } + return list; + } + + }, stdCallback); return stdCallback.getMedianParams(); } /** - * This is the client side interface/handler for calling the median method for a - * given cf-cq combination. This method collects the necessary parameters - * to compute the median and returns the median. + * This is the client side interface/handler for calling the median method for a given cf-cq + * combination. This method collects the necessary parameters to compute the median and returns + * the median. * @param tableName the name of the table to scan - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return R the median - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - R median(final TableName tableName, ColumnInterpreter ci, - Scan scan) throws Throwable { + public R median( + final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { try (Table table = connection.getTable(tableName)) { return median(table, ci, scan); } } /** - * This is the client side interface/handler for calling the median method for a - * given cf-cq combination. This method collects the necessary parameters - * to compute the median and returns the median. + * This is the client side interface/handler for calling the median method for a given cf-cq + * combination. This method collects the necessary parameters to compute the median and returns + * the median. * @param table table to scan. - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return R the median - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - R median(final Table table, ColumnInterpreter ci, Scan scan) throws Throwable { + public R median(final Table table, + ColumnInterpreter ci, Scan scan) throws Throwable { Pair>, List> p = getMedianArgs(table, ci, scan); byte[] startRow = null; byte[] colFamily = scan.getFamilies()[0]; diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java index 6d804e43d73..a8a87aaa2ff 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java @@ -19,13 +19,11 @@ package org.apache.hadoop.hbase.client.coprocessor; import com.google.protobuf.ByteString; import com.google.protobuf.Message; - import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; - import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; @@ -39,18 +37,21 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Private public final class AggregationHelper { - private AggregationHelper() {} + private AggregationHelper() { + } /** - * @param scan the HBase scan object to use to read data from HBase + * @param scan the HBase scan object to use to read data from HBase * @param canFamilyBeAbsent whether column family can be absent in familyMap of scan */ private static void validateParameters(Scan scan, boolean canFamilyBeAbsent) throws IOException { - if (scan == null + if ( + scan == null || (Bytes.equals(scan.getStartRow(), scan.getStopRow()) - && !Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW)) + && !Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW)) || ((Bytes.compareTo(scan.getStartRow(), scan.getStopRow()) > 0) - && !Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW))) { + && !Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) + ) { throw new IOException("Agg client Exception: Startrow should be smaller than Stoprow"); } else if (!canFamilyBeAbsent) { if (scan.getFamilyMap().size() != 1) { @@ -60,8 +61,8 @@ public final class AggregationHelper { } static AggregateRequest - validateArgAndGetPB(Scan scan, ColumnInterpreter ci, boolean canFamilyBeAbsent) - throws IOException { + validateArgAndGetPB(Scan scan, ColumnInterpreter ci, boolean canFamilyBeAbsent) + throws IOException { validateParameters(scan, canFamilyBeAbsent); final AggregateRequest.Builder requestBuilder = AggregateRequest.newBuilder(); requestBuilder.setInterpreterClassName(ci.getClass().getCanonicalName()); @@ -78,15 +79,15 @@ public final class AggregationHelper { * assumed to be a PB Message subclass, and the instance is created using parseFrom method on the * passed ByteString. * @param runtimeClass the runtime type of the class - * @param position the position of the argument in the class declaration - * @param b the ByteString which should be parsed to get the instance created + * @param position the position of the argument in the class declaration + * @param b the ByteString which should be parsed to get the instance created * @return the instance * @throws IOException Either we couldn't instantiate the method object, or "parseFrom" failed. */ @SuppressWarnings("unchecked") // Used server-side too by Aggregation Coprocesor Endpoint. Undo this interdependence. TODO. public static T getParsedGenericInstance(Class runtimeClass, int position, - ByteString b) throws IOException { + ByteString b) throws IOException { Type type = runtimeClass.getGenericSuperclass(); Type argType = ((ParameterizedType) type).getActualTypeArguments()[position]; Class classType = (Class) argType; diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java index b3003c4e141..b477fbef988 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,10 +52,11 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Public public final class AsyncAggregationClient { - private AsyncAggregationClient() {} + private AsyncAggregationClient() { + } private static abstract class AbstractAggregationCallback - implements CoprocessorCallback { + implements CoprocessorCallback { private final CompletableFuture future; protected boolean finished = false; @@ -82,8 +83,7 @@ public final class AsyncAggregationClient { completeExceptionally(error); } - protected abstract void aggregate(RegionInfo region, AggregateResponse resp) - throws IOException; + protected abstract void aggregate(RegionInfo region, AggregateResponse resp) throws IOException; @Override public synchronized void onRegionComplete(RegionInfo region, AggregateResponse resp) { @@ -107,15 +107,15 @@ public final class AsyncAggregationClient { } private static R - getCellValueFromProto(ColumnInterpreter ci, AggregateResponse resp, - int firstPartIndex) throws IOException { + getCellValueFromProto(ColumnInterpreter ci, AggregateResponse resp, + int firstPartIndex) throws IOException { Q q = getParsedGenericInstance(ci.getClass(), 3, resp.getFirstPart(firstPartIndex)); return ci.getCellValueFromProto(q); } private static S - getPromotedValueFromProto(ColumnInterpreter ci, AggregateResponse resp, - int firstPartIndex) throws IOException { + getPromotedValueFromProto(ColumnInterpreter ci, AggregateResponse resp, + int firstPartIndex) throws IOException { T t = getParsedGenericInstance(ci.getClass(), 4, resp.getFirstPart(firstPartIndex)); return ci.getPromotedValueFromProto(t); } @@ -125,7 +125,7 @@ public final class AsyncAggregationClient { } public static CompletableFuture - max(AsyncTable table, ColumnInterpreter ci, Scan scan) { + max(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -154,15 +154,15 @@ public final class AsyncAggregationClient { } }; table - . coprocessorService(AggregateService::newStub, - (stub, controller, rpcCallback) -> stub.getMax(controller, req, rpcCallback), callback) - .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) - .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); + . coprocessorService(AggregateService::newStub, + (stub, controller, rpcCallback) -> stub.getMax(controller, req, rpcCallback), callback) + .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) + .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); return future; } public static CompletableFuture - min(AsyncTable table, ColumnInterpreter ci, Scan scan) { + min(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -192,16 +192,16 @@ public final class AsyncAggregationClient { } }; table - . coprocessorService(AggregateService::newStub, - (stub, controller, rpcCallback) -> stub.getMin(controller, req, rpcCallback), callback) - .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) - .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); + . coprocessorService(AggregateService::newStub, + (stub, controller, rpcCallback) -> stub.getMin(controller, req, rpcCallback), callback) + .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) + .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); return future; } public static - CompletableFuture rowCount(AsyncTable table, ColumnInterpreter ci, - Scan scan) { + CompletableFuture + rowCount(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -225,15 +225,15 @@ public final class AsyncAggregationClient { } }; table - . coprocessorService(AggregateService::newStub, - (stub, controller, rpcCallback) -> stub.getRowNum(controller, req, rpcCallback), callback) - .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) - .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); + . coprocessorService(AggregateService::newStub, + (stub, controller, rpcCallback) -> stub.getRowNum(controller, req, rpcCallback), callback) + .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) + .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); return future; } public static CompletableFuture - sum(AsyncTable table, ColumnInterpreter ci, Scan scan) { + sum(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -259,16 +259,16 @@ public final class AsyncAggregationClient { } }; table - . coprocessorService(AggregateService::newStub, - (stub, controller, rpcCallback) -> stub.getSum(controller, req, rpcCallback), callback) - .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) - .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); + . coprocessorService(AggregateService::newStub, + (stub, controller, rpcCallback) -> stub.getSum(controller, req, rpcCallback), callback) + .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) + .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); return future; } public static - CompletableFuture avg(AsyncTable table, ColumnInterpreter ci, - Scan scan) { + CompletableFuture + avg(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -296,16 +296,16 @@ public final class AsyncAggregationClient { } }; table - . coprocessorService(AggregateService::newStub, - (stub, controller, rpcCallback) -> stub.getAvg(controller, req, rpcCallback), callback) - .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) - .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); + . coprocessorService(AggregateService::newStub, + (stub, controller, rpcCallback) -> stub.getAvg(controller, req, rpcCallback), callback) + .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) + .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); return future; } public static - CompletableFuture std(AsyncTable table, ColumnInterpreter ci, - Scan scan) { + CompletableFuture + std(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -339,19 +339,19 @@ public final class AsyncAggregationClient { } }; table - . coprocessorService(AggregateService::newStub, - (stub, controller, rpcCallback) -> stub.getStd(controller, req, rpcCallback), callback) - .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) - .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); + . coprocessorService(AggregateService::newStub, + (stub, controller, rpcCallback) -> stub.getStd(controller, req, rpcCallback), callback) + .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) + .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); return future; } // the map key is the startRow of the region private static - CompletableFuture> - sumByRegion(AsyncTable table, ColumnInterpreter ci, Scan scan) { + CompletableFuture> + sumByRegion(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture> future = - new CompletableFuture>(); + new CompletableFuture>(); AggregateRequest req; try { req = validateArgAndGetPB(scan, ci, false); @@ -361,9 +361,9 @@ public final class AsyncAggregationClient { } int firstPartIndex = scan.getFamilyMap().get(scan.getFamilies()[0]).size() - 1; AbstractAggregationCallback> callback = - new AbstractAggregationCallback>(future) { + new AbstractAggregationCallback>(future) { - private final NavigableMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); + private final NavigableMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); @Override protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException { @@ -378,16 +378,16 @@ public final class AsyncAggregationClient { } }; table - . coprocessorService(AggregateService::newStub, - (stub, controller, rpcCallback) -> stub.getMedian(controller, req, rpcCallback), callback) - .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) - .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); + . coprocessorService(AggregateService::newStub, + (stub, controller, rpcCallback) -> stub.getMedian(controller, req, rpcCallback), callback) + .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) + .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); return future; } private static void findMedian( - CompletableFuture future, AsyncTable table, - ColumnInterpreter ci, Scan scan, NavigableMap sumByRegion) { + CompletableFuture future, AsyncTable table, + ColumnInterpreter ci, Scan scan, NavigableMap sumByRegion) { double halfSum = ci.divideForAvg(sumByRegion.values().stream().reduce(ci::add).get(), 2L); S movingSum = null; byte[] startRow = null; @@ -453,9 +453,9 @@ public final class AsyncAggregationClient { }); } - public static - CompletableFuture median(AsyncTable table, - ColumnInterpreter ci, Scan scan) { + public static CompletableFuture + median(AsyncTable table, ColumnInterpreter ci, + Scan scan) { CompletableFuture future = new CompletableFuture<>(); addListener(sumByRegion(table, ci, scan), (sumByRegion, error) -> { if (error != null) { diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java index 5571e1b14cb..d04be6e0ae1 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java @@ -24,7 +24,6 @@ import com.google.protobuf.Message; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; - import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.nio.ByteBuffer; @@ -32,7 +31,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.NavigableSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.client.Scan; @@ -48,11 +46,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * A concrete AggregateProtocol implementation. Its system level coprocessor - * that computes the aggregate function at a region level. - * {@link ColumnInterpreter} is used to interpret column value. This class is - * parameterized with the following (these are the types with which the {@link ColumnInterpreter} - * is parameterized, and for more description on these, refer to {@link ColumnInterpreter}): + * A concrete AggregateProtocol implementation. Its system level coprocessor that computes the + * aggregate function at a region level. {@link ColumnInterpreter} is used to interpret column + * value. This class is parameterized with the following (these are the types with which the + * {@link ColumnInterpreter} is parameterized, and for more description on these, refer to + * {@link ColumnInterpreter}): * @param Cell value data type * @param Promoted data type * @param

      PB message that is used to transport initializer specific bytes @@ -66,15 +64,14 @@ public class AggregateImplementation done) { + RpcCallback done) { InternalScanner scanner = null; AggregateResponse response = null; T max = null; @@ -112,24 +109,24 @@ public class AggregateImplementation done) { + RpcCallback done) { AggregateResponse response = null; InternalScanner scanner = null; T min = null; @@ -156,8 +153,8 @@ public class AggregateImplementation done) { + RpcCallback done) { AggregateResponse response = null; InternalScanner scanner = null; long sum = 0L; @@ -212,8 +209,8 @@ public class AggregateImplementation done) { + RpcCallback done) { AggregateResponse response = null; long counter = 0L; List results = new ArrayList<>(); @@ -244,8 +242,8 @@ public class AggregateImplementation qualifiers = colFamilies != null ? - scan.getFamilyMap().get(colFamily) : null; + NavigableSet qualifiers = + colFamilies != null ? scan.getFamilyMap().get(colFamily) : null; byte[] qualifier = null; if (qualifiers != null && !qualifiers.isEmpty()) { qualifier = qualifiers.pollFirst(); @@ -264,38 +262,35 @@ public class AggregateImplementation - * The average is computed in - * AggregationClient#avg(byte[], ColumnInterpreter, Scan) by - * processing results from all regions, so its "ok" to pass sum and a Long - * type. + * The average is computed in AggregationClient#avg(byte[], ColumnInterpreter, Scan) by processing + * results from all regions, so its "ok" to pass sum and a Long type. */ @Override public void getAvg(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { AggregateResponse response = null; InternalScanner scanner = null; try { @@ -318,8 +313,8 @@ public class AggregateImplementation done) { + RpcCallback done) { InternalScanner scanner = null; AggregateResponse response = null; try { @@ -379,8 +374,8 @@ public class AggregateImplementation done) { + RpcCallback done) { AggregateResponse response = null; InternalScanner scanner = null; try { @@ -447,11 +441,10 @@ public class AggregateImplementation constructColumnInterpreterFromRequest( - AggregateRequest request) throws IOException { + ColumnInterpreter constructColumnInterpreterFromRequest(AggregateRequest request) + throws IOException { String className = request.getInterpreterClassName(); try { - ColumnInterpreter ci; + ColumnInterpreter ci; Class cls = Class.forName(className); ci = (ColumnInterpreter) cls.getDeclaredConstructor().newInstance(); @@ -493,8 +487,8 @@ public class AggregateImplementation DEFAULT_CODEC = DefaultCodec.class; private static final SequenceFile.CompressionType DEFAULT_TYPE = - SequenceFile.CompressionType.RECORD; + SequenceFile.CompressionType.RECORD; private RegionCoprocessorEnvironment env = null; private UserProvider userProvider; @@ -110,31 +105,29 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces return null; } Triple arguments = - ExportUtils.getArgumentsFromCommandLine(conf, otherArgs); + ExportUtils.getArgumentsFromCommandLine(conf, otherArgs); return run(conf, arguments.getFirst(), arguments.getSecond(), arguments.getThird()); } - public static Map run(final Configuration conf, TableName tableName, - Scan scan, Path dir) throws Throwable { + public static Map run(final Configuration conf, TableName tableName, Scan scan, + Path dir) throws Throwable { FileSystem fs = dir.getFileSystem(conf); UserProvider userProvider = UserProvider.instantiate(conf); checkDir(fs, dir); FsDelegationToken fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); fsDelegationToken.acquireDelegationToken(fs); try { - final ExportProtos.ExportRequest request = getConfiguredRequest(conf, dir, - scan, fsDelegationToken.getUserToken()); + final ExportProtos.ExportRequest request = + getConfiguredRequest(conf, dir, scan, fsDelegationToken.getUserToken()); try (Connection con = ConnectionFactory.createConnection(conf); - Table table = con.getTable(tableName)) { + Table table = con.getTable(tableName)) { Map result = new TreeMap<>(Bytes.BYTES_COMPARATOR); - table.coprocessorService(ExportProtos.ExportService.class, - scan.getStartRow(), - scan.getStopRow(), - (ExportProtos.ExportService service) -> { + table.coprocessorService(ExportProtos.ExportService.class, scan.getStartRow(), + scan.getStopRow(), (ExportProtos.ExportService service) -> { ServerRpcController controller = new ServerRpcController(); Map rval = new TreeMap<>(Bytes.BYTES_COMPARATOR); - CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); service.export(controller, request, rpcCallback); if (controller.failedOnException()) { throw controller.getFailedOn(); @@ -159,8 +152,8 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } } - private static SequenceFile.CompressionType getCompressionType( - final ExportProtos.ExportRequest request) { + private static SequenceFile.CompressionType + getCompressionType(final ExportProtos.ExportRequest request) { if (request.hasCompressType()) { return SequenceFile.CompressionType.valueOf(request.getCompressType()); } else { @@ -169,24 +162,24 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } private static CompressionCodec getCompressionCodec(final Configuration conf, - final ExportProtos.ExportRequest request) { + final ExportProtos.ExportRequest request) { try { Class codecClass; if (request.hasCompressCodec()) { - codecClass = conf.getClassByName(request.getCompressCodec()) - .asSubclass(CompressionCodec.class); + codecClass = + conf.getClassByName(request.getCompressCodec()).asSubclass(CompressionCodec.class); } else { codecClass = DEFAULT_CODEC; } return ReflectionUtils.newInstance(codecClass, conf); } catch (ClassNotFoundException e) { - throw new IllegalArgumentException("Compression codec " - + request.getCompressCodec() + " was not found.", e); + throw new IllegalArgumentException( + "Compression codec " + request.getCompressCodec() + " was not found.", e); } } private static SequenceFile.Writer.Option getOutputPath(final Configuration conf, - final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException { + final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException { Path file = new Path(request.getOutputPath(), "export-" + info.getEncodedName()); FileSystem fs = file.getFileSystem(conf); if (fs.exists(file)) { @@ -196,14 +189,14 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } private static List getWriterOptions(final Configuration conf, - final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException { + final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException { List rval = new LinkedList<>(); rval.add(SequenceFile.Writer.keyClass(ImmutableBytesWritable.class)); rval.add(SequenceFile.Writer.valueClass(Result.class)); rval.add(getOutputPath(conf, info, request)); if (getCompression(request)) { rval.add(SequenceFile.Writer.compression(getCompressionType(request), - getCompressionCodec(conf, request))); + getCompressionCodec(conf, request))); } else { rval.add(SequenceFile.Writer.compression(SequenceFile.CompressionType.NONE)); } @@ -211,12 +204,12 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } private static ExportProtos.ExportResponse processData(final Region region, - final Configuration conf, final UserProvider userProvider, final Scan scan, - final Token userToken, final List opts) throws IOException { + final Configuration conf, final UserProvider userProvider, final Scan scan, + final Token userToken, final List opts) throws IOException { ScanCoprocessor cp = new ScanCoprocessor(region); RegionScanner scanner = null; try (RegionOp regionOp = new RegionOp(region); - SecureWriter out = new SecureWriter(conf, userProvider, userToken, opts)) { + SecureWriter out = new SecureWriter(conf, userProvider, userToken, opts)) { scanner = cp.checkScannerOpen(scan); ImmutableBytesWritable key = new ImmutableBytesWritable(); long rowCount = 0; @@ -235,11 +228,13 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } Cell firstCell = cells.get(0); for (Cell cell : cells) { - if (Bytes.compareTo(firstCell.getRowArray(), firstCell.getRowOffset(), + if ( + Bytes.compareTo(firstCell.getRowArray(), firstCell.getRowOffset(), firstCell.getRowLength(), cell.getRowArray(), cell.getRowOffset(), - cell.getRowLength()) != 0) { - throw new IOException("Why the RegionScanner#nextRaw returns the data of different" - + " rows?? first row=" + cell.getRowLength()) != 0 + ) { + throw new IOException( + "Why the RegionScanner#nextRaw returns the data of different" + " rows?? first row=" + Bytes.toHex(firstCell.getRowArray(), firstCell.getRowOffset(), firstCell.getRowLength()) + ", current row=" @@ -258,10 +253,8 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } results.clear(); } while (hasMore); - return ExportProtos.ExportResponse.newBuilder() - .setRowCount(rowCount) - .setCellCount(cellCount) - .build(); + return ExportProtos.ExportResponse.newBuilder().setRowCount(rowCount).setCellCount(cellCount) + .build(); } finally { cp.checkScannerClose(scanner); } @@ -276,31 +269,24 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } } - private static ExportProtos.ExportRequest getConfiguredRequest(Configuration conf, - Path dir, final Scan scan, final Token userToken) throws IOException { + private static ExportProtos.ExportRequest getConfiguredRequest(Configuration conf, Path dir, + final Scan scan, final Token userToken) throws IOException { boolean compressed = conf.getBoolean(FileOutputFormat.COMPRESS, false); - String compressionType = conf.get(FileOutputFormat.COMPRESS_TYPE, - DEFAULT_TYPE.toString()); - String compressionCodec = conf.get(FileOutputFormat.COMPRESS_CODEC, - DEFAULT_CODEC.getName()); + String compressionType = conf.get(FileOutputFormat.COMPRESS_TYPE, DEFAULT_TYPE.toString()); + String compressionCodec = conf.get(FileOutputFormat.COMPRESS_CODEC, DEFAULT_CODEC.getName()); DelegationToken protoToken = null; if (userToken != null) { - protoToken = DelegationToken.newBuilder() - .setIdentifier(ByteStringer.wrap(userToken.getIdentifier())) - .setPassword(ByteStringer.wrap(userToken.getPassword())) - .setKind(userToken.getKind().toString()) - .setService(userToken.getService().toString()).build(); + protoToken = + DelegationToken.newBuilder().setIdentifier(ByteStringer.wrap(userToken.getIdentifier())) + .setPassword(ByteStringer.wrap(userToken.getPassword())) + .setKind(userToken.getKind().toString()).setService(userToken.getService().toString()) + .build(); } - LOG.info("compressed=" + compressed - + ", compression type=" + compressionType - + ", compression codec=" + compressionCodec - + ", userToken=" + userToken); + LOG.info("compressed=" + compressed + ", compression type=" + compressionType + + ", compression codec=" + compressionCodec + ", userToken=" + userToken); ExportProtos.ExportRequest.Builder builder = ExportProtos.ExportRequest.newBuilder() - .setScan(ProtobufUtil.toScan(scan)) - .setOutputPath(dir.toString()) - .setCompressed(compressed) - .setCompressCodec(compressionCodec) - .setCompressType(compressionType); + .setScan(ProtobufUtil.toScan(scan)).setOutputPath(dir.toString()).setCompressed(compressed) + .setCompressCodec(compressionCodec).setCompressType(compressionType); if (protoToken != null) { builder.setFsToken(protoToken); } @@ -328,11 +314,11 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces @Override public void export(RpcController controller, ExportProtos.ExportRequest request, - RpcCallback done) { + RpcCallback done) { Region region = env.getRegion(); Configuration conf = HBaseConfiguration.create(env.getConfiguration()); conf.setStrings("io.serializations", conf.get("io.serializations"), - ResultSerialization.class.getName()); + ResultSerialization.class.getName()); try { Scan scan = validateKey(region.getRegionInfo(), request); Token userToken = null; @@ -340,12 +326,11 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces LOG.warn("Hadoop security is enable, but no found of user token"); } else if (userProvider.isHadoopSecurityEnabled()) { userToken = new Token(request.getFsToken().getIdentifier().toByteArray(), - request.getFsToken().getPassword().toByteArray(), - new Text(request.getFsToken().getKind()), - new Text(request.getFsToken().getService())); + request.getFsToken().getPassword().toByteArray(), + new Text(request.getFsToken().getKind()), new Text(request.getFsToken().getService())); } - ExportProtos.ExportResponse response = processData(region, conf, userProvider, - scan, userToken, getWriterOptions(conf, region.getRegionInfo(), request)); + ExportProtos.ExportResponse response = processData(region, conf, userProvider, scan, + userToken, getWriterOptions(conf, region.getRegionInfo(), request)); done.run(response); } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -354,18 +339,16 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } private Scan validateKey(final RegionInfo region, final ExportProtos.ExportRequest request) - throws IOException { + throws IOException { Scan scan = ProtobufUtil.toScan(request.getScan()); byte[] regionStartKey = region.getStartKey(); byte[] originStartKey = scan.getStartRow(); - if (originStartKey == null - || Bytes.compareTo(originStartKey, regionStartKey) < 0) { + if (originStartKey == null || Bytes.compareTo(originStartKey, regionStartKey) < 0) { scan.setStartRow(regionStartKey); } byte[] regionEndKey = region.getEndKey(); byte[] originEndKey = scan.getStopRow(); - if (originEndKey == null - || Bytes.compareTo(originEndKey, regionEndKey) > 0) { + if (originEndKey == null || Bytes.compareTo(originEndKey, regionEndKey) > 0) { scan.setStartRow(regionEndKey); } return scan; @@ -423,8 +406,8 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } } - boolean preScannerNext(final InternalScanner s, - final List results, final int limit) throws IOException { + boolean preScannerNext(final InternalScanner s, final List results, final int limit) + throws IOException { if (region.getCoprocessorHost() == null) { return false; } else { @@ -433,9 +416,8 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } } - boolean postScannerNext(final InternalScanner s, - final List results, final int limit, boolean hasMore) - throws IOException { + boolean postScannerNext(final InternalScanner s, final List results, final int limit, + boolean hasMore) throws IOException { if (region.getCoprocessorHost() == null) { return false; } else { @@ -447,15 +429,13 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces private static class SecureWriter implements Closeable { private final PrivilegedWriter privilegedWriter; - SecureWriter(final Configuration conf, final UserProvider userProvider, - final Token userToken, final List opts) - throws IOException { + SecureWriter(final Configuration conf, final UserProvider userProvider, final Token userToken, + final List opts) throws IOException { User user = getActiveUser(userProvider, userToken); try { SequenceFile.Writer sequenceFileWriter = - user.runAs((PrivilegedExceptionAction) () -> - SequenceFile.createWriter(conf, - opts.toArray(new SequenceFile.Writer.Option[opts.size()]))); + user.runAs((PrivilegedExceptionAction) () -> SequenceFile + .createWriter(conf, opts.toArray(new SequenceFile.Writer.Option[opts.size()]))); privilegedWriter = new PrivilegedWriter(user, sequenceFileWriter); } catch (InterruptedException e) { throw new IOException(e); @@ -467,7 +447,7 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } private static User getActiveUser(final UserProvider userProvider, final Token userToken) - throws IOException { + throws IOException { User user = RpcServer.getRequestUser().orElse(userProvider.getCurrent()); if (user == null && userToken != null) { LOG.warn("No found of user credentials, but a token was got from user request"); @@ -483,8 +463,7 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } } - private static class PrivilegedWriter implements PrivilegedExceptionAction, - Closeable { + private static class PrivilegedWriter implements PrivilegedExceptionAction, Closeable { private final User user; private final SequenceFile.Writer out; private Object key; @@ -541,11 +520,8 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces @Override public String toString() { StringBuilder builder = new StringBuilder(35); - return builder.append("rowCount=") - .append(rowCount) - .append(", cellCount=") - .append(cellCount) - .toString(); + return builder.append("rowCount=").append(rowCount).append(", cellCount=").append(cellCount) + .toString(); } } } diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java index fb161d94661..52b74910076 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java @@ -15,18 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; - import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.Map; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor; @@ -48,7 +45,6 @@ import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBul import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -70,8 +66,8 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg @Override public void start(CoprocessorEnvironment env) { - this.env = (RegionCoprocessorEnvironment)env; - rsServices = ((HasRegionServerServices)this.env).getRegionServerServices(); + this.env = (RegionCoprocessorEnvironment) env; + rsServices = ((HasRegionServerServices) this.env).getRegionServerServices(); LOG.warn("SecureBulkLoadEndpoint is deprecated. It will be removed in future releases."); LOG.warn("Secure bulk load has been integrated into HBase core."); } @@ -82,12 +78,12 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg @Override public void prepareBulkLoad(RpcController controller, PrepareBulkLoadRequest request, - RpcCallback done) { + RpcCallback done) { try { SecureBulkLoadManager secureBulkLoadManager = this.rsServices.getSecureBulkLoadManager(); - String bulkToken = secureBulkLoadManager.prepareBulkLoad((HRegion) this.env.getRegion(), - convert(request)); + String bulkToken = + secureBulkLoadManager.prepareBulkLoad((HRegion) this.env.getRegion(), convert(request)); done.run(PrepareBulkLoadResponse.newBuilder().setBulkToken(bulkToken).build()); } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -96,23 +92,22 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg } /** - * Convert from CPEP protobuf 2.5 to internal protobuf 3.3. + * Convert from CPEP protobuf 2.5 to internal protobuf 3.3. */ org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest convert(PrepareBulkLoadRequest request) - throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException { - byte [] bytes = request.toByteArray(); - org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest.Builder - builder = - org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest. - newBuilder(); + throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException { + byte[] bytes = request.toByteArray(); + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest.Builder builder = + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest + .newBuilder(); builder.mergeFrom(bytes); return builder.build(); } @Override public void cleanupBulkLoad(RpcController controller, CleanupBulkLoadRequest request, - RpcCallback done) { + RpcCallback done) { try { SecureBulkLoadManager secureBulkLoadManager = this.rsServices.getSecureBulkLoadManager(); secureBulkLoadManager.cleanupBulkLoad((HRegion) this.env.getRegion(), convert(request)); @@ -124,30 +119,29 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg } /** - * Convert from CPEP protobuf 2.5 to internal protobuf 3.3. + * Convert from CPEP protobuf 2.5 to internal protobuf 3.3. */ org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest convert(CleanupBulkLoadRequest request) throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException { - byte [] bytes = request.toByteArray(); - org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest.Builder - builder = - org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest. - newBuilder(); + byte[] bytes = request.toByteArray(); + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest.Builder builder = + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest + .newBuilder(); builder.mergeFrom(bytes); return builder.build(); } @Override public void secureBulkLoadHFiles(RpcController controller, SecureBulkLoadHFilesRequest request, - RpcCallback done) { + RpcCallback done) { boolean loaded = false; Map> map = null; try { SecureBulkLoadManager secureBulkLoadManager = this.rsServices.getSecureBulkLoadManager(); BulkLoadHFileRequest bulkLoadHFileRequest = ConvertSecureBulkLoadHFilesRequest(request); map = secureBulkLoadManager.secureBulkLoadHFiles((HRegion) this.env.getRegion(), - convert(bulkLoadHFileRequest)); + convert(bulkLoadHFileRequest)); loaded = map != null && !map.isEmpty(); } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -156,29 +150,27 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg } /** - * Convert from CPEP protobuf 2.5 to internal protobuf 3.3. + * Convert from CPEP protobuf 2.5 to internal protobuf 3.3. */ org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest convert(BulkLoadHFileRequest request) throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException { - byte [] bytes = request.toByteArray(); - org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest.Builder - builder = - org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest. - newBuilder(); + byte[] bytes = request.toByteArray(); + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest.Builder builder = + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest + .newBuilder(); builder.mergeFrom(bytes); return builder.build(); } - private BulkLoadHFileRequest ConvertSecureBulkLoadHFilesRequest( - SecureBulkLoadHFilesRequest request) { + private BulkLoadHFileRequest + ConvertSecureBulkLoadHFilesRequest(SecureBulkLoadHFilesRequest request) { BulkLoadHFileRequest.Builder bulkLoadHFileRequest = BulkLoadHFileRequest.newBuilder(); - RegionSpecifier region = - ProtobufUtil.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, this.env - .getRegionInfo().getRegionName()); + RegionSpecifier region = ProtobufUtil.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, + this.env.getRegionInfo().getRegionName()); bulkLoadHFileRequest.setRegion(region).setFsToken(request.getFsToken()) - .setBulkToken(request.getBulkToken()).setAssignSeqNum(request.getAssignSeqNum()) - .addAllFamilyPath(request.getFamilyPathList()); + .setBulkToken(request.getBulkToken()).setAssignSeqNum(request.getAssignSeqNum()) + .addAllFamilyPath(request.getFamilyPathList()); return bulkLoadHFileRequest.build(); } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClient.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClient.java index d50ceb9c1c3..7cc4d0712b9 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClient.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ public class TestAsyncAggregationClient { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncAggregationClient.class); + HBaseClassTestRule.forClass(TestAsyncAggregationClient.class); private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); @@ -76,9 +76,9 @@ public class TestAsyncAggregationClient { CONN = ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get(); TABLE = CONN.getTable(TABLE_NAME); TABLE.putAll(LongStream.range(0, COUNT) - .mapToObj(l -> new Put(Bytes.toBytes(String.format("%03d", l))) - .addColumn(CF, CQ, Bytes.toBytes(l)).addColumn(CF, CQ2, Bytes.toBytes(l * l))) - .collect(Collectors.toList())).get(); + .mapToObj(l -> new Put(Bytes.toBytes(String.format("%03d", l))) + .addColumn(CF, CQ, Bytes.toBytes(l)).addColumn(CF, CQ2, Bytes.toBytes(l * l))) + .collect(Collectors.toList())).get(); } @AfterClass @@ -90,34 +90,35 @@ public class TestAsyncAggregationClient { @Test public void testMax() throws InterruptedException, ExecutionException { assertEquals(COUNT - 1, AsyncAggregationClient - .max(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().longValue()); + .max(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().longValue()); } @Test public void testMin() throws InterruptedException, ExecutionException { assertEquals(0, AsyncAggregationClient - .min(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().longValue()); + .min(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().longValue()); } @Test public void testRowCount() throws InterruptedException, ExecutionException { assertEquals(COUNT, AsyncAggregationClient - .rowCount(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get() - .longValue()); + .rowCount(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get() + .longValue()); } @Test public void testSum() throws InterruptedException, ExecutionException { assertEquals(COUNT * (COUNT - 1) / 2, AsyncAggregationClient - .sum(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().longValue()); + .sum(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().longValue()); } private static final double DELTA = 1E-3; @Test public void testAvg() throws InterruptedException, ExecutionException { - assertEquals((COUNT - 1) / 2.0, AsyncAggregationClient + assertEquals( + (COUNT - 1) / 2.0, AsyncAggregationClient .avg(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().doubleValue(), DELTA); } @@ -125,11 +126,12 @@ public class TestAsyncAggregationClient { @Test public void testStd() throws InterruptedException, ExecutionException { double avgSq = - LongStream.range(0, COUNT).map(l -> l * l).reduce((l1, l2) -> l1 + l2).getAsLong() - / (double) COUNT; + LongStream.range(0, COUNT).map(l -> l * l).reduce((l1, l2) -> l1 + l2).getAsLong() + / (double) COUNT; double avg = (COUNT - 1) / 2.0; double std = Math.sqrt(avgSq - avg * avg); - assertEquals(std, AsyncAggregationClient + assertEquals( + std, AsyncAggregationClient .std(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().doubleValue(), DELTA); } @@ -146,16 +148,14 @@ public class TestAsyncAggregationClient { break; } } - assertEquals(median, - AsyncAggregationClient - .median(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get() - .longValue()); + assertEquals(median, AsyncAggregationClient + .median(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().longValue()); } @Test public void testMedianWithWeight() throws InterruptedException, ExecutionException { long halfSum = - LongStream.range(0, COUNT).map(l -> l * l).reduce((l1, l2) -> l1 + l2).getAsLong() / 2; + LongStream.range(0, COUNT).map(l -> l * l).reduce((l1, l2) -> l1 + l2).getAsLong() / 2; long median = 0L; long sum = 0L; for (int i = 0; i < COUNT; i++) { @@ -165,7 +165,8 @@ public class TestAsyncAggregationClient { break; } } - assertEquals(median, AsyncAggregationClient + assertEquals(median, + AsyncAggregationClient .median(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ).addColumn(CF, CQ2)) .get().longValue()); } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java index 30fce3c7870..d96085f5269 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,12 +52,12 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ConcurrentHashMulti import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Multiset; -@Category({MediumTests.class, ClientTests.class}) +@Category({ MediumTests.class, ClientTests.class }) public class TestRpcControllerFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRpcControllerFactory.class); + HBaseClassTestRule.forClass(TestRpcControllerFactory.class); public static class StaticRpcControllerFactory extends RpcControllerFactory { @@ -77,7 +77,7 @@ public class TestRpcControllerFactory { @Override public HBaseRpcController newController(RegionInfo regionInfo, - List cellIterables) { + List cellIterables) { return new CountingRpcController(super.newController(regionInfo, cellIterables)); } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java index 7b315f9f367..39f88f10190 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java @@ -20,12 +20,10 @@ package org.apache.hadoop.hbase.coprocessor; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; - import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -43,7 +41,7 @@ import org.slf4j.LoggerFactory; * The aggregation implementation at a region. */ public class ColumnAggregationEndpoint extends ColumnAggregationService - implements RegionCoprocessor { + implements RegionCoprocessor { private static final Logger LOG = LoggerFactory.getLogger(ColumnAggregationEndpoint.class); private RegionCoprocessorEnvironment env = null; @@ -55,7 +53,7 @@ public class ColumnAggregationEndpoint extends ColumnAggregationService @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; return; } throw new CoprocessorException("Must be loaded on a table region!"); @@ -71,8 +69,8 @@ public class ColumnAggregationEndpoint extends ColumnAggregationService // aggregate at each region Scan scan = new Scan(); // Family is required in pb. Qualifier is not. - byte [] family = request.getFamily().toByteArray(); - byte [] qualifier = request.hasQualifier()? request.getQualifier().toByteArray(): null; + byte[] family = request.getFamily().toByteArray(); + byte[] qualifier = request.hasQualifier() ? request.getQualifier().toByteArray() : null; if (request.hasQualifier()) { scan.addColumn(family, qualifier); } else { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java index 8ed137a2e7b..5b5c3f0ec40 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java @@ -20,12 +20,10 @@ package org.apache.hadoop.hbase.coprocessor; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; - import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -42,14 +40,14 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Test coprocessor endpoint that always returns {@code null} for requests to the last region - * in the table. This allows tests to provide assurance of correct {@code null} handling for - * response values. + * Test coprocessor endpoint that always returns {@code null} for requests to the last region in the + * table. This allows tests to provide assurance of correct {@code null} handling for response + * values. */ public class ColumnAggregationEndpointNullResponse extends ColumnAggregationServiceNullResponse - implements RegionCoprocessor { + implements RegionCoprocessor { private static final Logger LOG = - LoggerFactory.getLogger(ColumnAggregationEndpointNullResponse.class); + LoggerFactory.getLogger(ColumnAggregationEndpointNullResponse.class); private RegionCoprocessorEnvironment env = null; @@ -61,7 +59,7 @@ public class ColumnAggregationEndpointNullResponse extends ColumnAggregationServ @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; return; } throw new CoprocessorException("Must be loaded on a table region!"); @@ -74,7 +72,7 @@ public class ColumnAggregationEndpointNullResponse extends ColumnAggregationServ @Override public void sum(RpcController controller, ColumnAggregationNullResponseSumRequest request, - RpcCallback done) { + RpcCallback done) { // aggregate at each region Scan scan = new Scan(); // Family is required in pb. Qualifier is not. @@ -122,9 +120,8 @@ public class ColumnAggregationEndpointNullResponse extends ColumnAggregationServ } } } - done.run(ColumnAggregationNullResponseSumResponse.newBuilder().setSum(sumResult) - .build()); - LOG.info("Returning sum " + sumResult + " for region " + - Bytes.toStringBinary(env.getRegion().getRegionInfo().getRegionName())); + done.run(ColumnAggregationNullResponseSumResponse.newBuilder().setSum(sumResult).build()); + LOG.info("Returning sum " + sumResult + " for region " + + Bytes.toStringBinary(env.getRegion().getRegionInfo().getRegionName())); } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java index 4aaaea268e3..5bfe270ce05 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java @@ -20,12 +20,10 @@ package org.apache.hadoop.hbase.coprocessor; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; - import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -43,15 +41,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Test coprocessor endpoint that always throws a {@link DoNotRetryIOException} for requests on - * the last region in the table. This allows tests to ensure correct error handling of - * coprocessor endpoints throwing exceptions. + * Test coprocessor endpoint that always throws a {@link DoNotRetryIOException} for requests on the + * last region in the table. This allows tests to ensure correct error handling of coprocessor + * endpoints throwing exceptions. */ public class ColumnAggregationEndpointWithErrors - extends ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors - implements RegionCoprocessor { + extends ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors + implements RegionCoprocessor { private static final Logger LOG = - LoggerFactory.getLogger(ColumnAggregationEndpointWithErrors.class); + LoggerFactory.getLogger(ColumnAggregationEndpointWithErrors.class); private RegionCoprocessorEnvironment env = null; @@ -63,7 +61,7 @@ public class ColumnAggregationEndpointWithErrors @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; return; } throw new CoprocessorException("Must be loaded on a table region!"); @@ -76,7 +74,7 @@ public class ColumnAggregationEndpointWithErrors @Override public void sum(RpcController controller, ColumnAggregationWithErrorsSumRequest request, - RpcCallback done) { + RpcCallback done) { // aggregate at each region Scan scan = new Scan(); // Family is required in pb. Qualifier is not. diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java index 63af9ca7c48..8406e27779e 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,10 +20,8 @@ package org.apache.hadoop.hbase.coprocessor; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; - import java.io.IOException; import java.util.Collections; - import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -41,8 +39,9 @@ import org.apache.hadoop.hbase.util.Threads; * service methods. For internal use by unit tests only. */ public class ProtobufCoprocessorService extends TestRpcServiceProtos.TestProtobufRpcProto - implements MasterCoprocessor, RegionCoprocessor { - public ProtobufCoprocessorService() {} + implements MasterCoprocessor, RegionCoprocessor { + public ProtobufCoprocessorService() { + } @Override public Iterable getServices() { @@ -51,36 +50,36 @@ public class ProtobufCoprocessorService extends TestRpcServiceProtos.TestProtobu @Override public void ping(RpcController controller, TestProtos.EmptyRequestProto request, - RpcCallback done) { + RpcCallback done) { done.run(TestProtos.EmptyResponseProto.getDefaultInstance()); } @Override public void echo(RpcController controller, TestProtos.EchoRequestProto request, - RpcCallback done) { + RpcCallback done) { String message = request.getMessage(); done.run(TestProtos.EchoResponseProto.newBuilder().setMessage(message).build()); } @Override public void error(RpcController controller, TestProtos.EmptyRequestProto request, - RpcCallback done) { + RpcCallback done) { CoprocessorRpcUtils.setControllerException(controller, new IOException("Test exception")); done.run(null); } @Override public void pause(RpcController controller, PauseRequestProto request, - RpcCallback done) { + RpcCallback done) { Threads.sleepWithoutInterrupt(request.getMs()); done.run(EmptyResponseProto.getDefaultInstance()); } @Override public void addr(RpcController controller, EmptyRequestProto request, - RpcCallback done) { + RpcCallback done) { done.run(AddrResponseProto.newBuilder() - .setAddr(RpcServer.getRemoteAddress().get().getHostAddress()).build()); + .setAddr(RpcServer.getRemoteAddress().get().getHostAddress()).build()); } @Override diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java index 1ea3b1ed53c..acecfe2c146 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java @@ -55,7 +55,7 @@ import org.junit.runners.Parameterized; public class TestAsyncCoprocessorEndpoint extends TestAsyncAdminBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncCoprocessorEndpoint.class); + HBaseClassTestRule.forClass(TestAsyncCoprocessorEndpoint.class); private static final FileNotFoundException WHAT_TO_THROW = new FileNotFoundException("/file.txt"); private static final String DUMMY_VALUE = "val"; @@ -76,12 +76,12 @@ public class TestAsyncCoprocessorEndpoint extends TestAsyncAdminBase { @Test public void testMasterCoprocessorService() throws Exception { TestProtos.EchoRequestProto request = - TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); - TestProtos.EchoResponseProto response = - admin - . - coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto::newStub, - (s, c, done) -> s.echo(c, request, done)).get(); + TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); + TestProtos.EchoResponseProto response = admin. coprocessorService( + TestRpcServiceProtos.TestProtobufRpcProto::newStub, + (s, c, done) -> s.echo(c, request, done)) + .get(); assertEquals("hello", response.getMessage()); } @@ -89,10 +89,11 @@ public class TestAsyncCoprocessorEndpoint extends TestAsyncAdminBase { public void testMasterCoprocessorError() throws Exception { TestProtos.EmptyRequestProto emptyRequest = TestProtos.EmptyRequestProto.getDefaultInstance(); try { - admin - . - coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto::newStub, - (s, c, done) -> s.error(c, emptyRequest, done)).get(); + admin. coprocessorService( + TestRpcServiceProtos.TestProtobufRpcProto::newStub, + (s, c, done) -> s.error(c, emptyRequest, done)) + .get(); fail("Should have thrown an exception"); } catch (Exception e) { } @@ -102,13 +103,13 @@ public class TestAsyncCoprocessorEndpoint extends TestAsyncAdminBase { public void testRegionServerCoprocessorService() throws Exception { final ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); DummyRegionServerEndpointProtos.DummyRequest request = - DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(); + DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(); DummyRegionServerEndpointProtos.DummyResponse response = - admin - . coprocessorService( - DummyRegionServerEndpointProtos.DummyService::newStub, - (s, c, done) -> s.dummyCall(c, request, done), serverName).get(); + admin. coprocessorService( + DummyRegionServerEndpointProtos.DummyService::newStub, + (s, c, done) -> s.dummyCall(c, request, done), serverName) + .get(); assertEquals(DUMMY_VALUE, response.getValue()); } @@ -116,13 +117,13 @@ public class TestAsyncCoprocessorEndpoint extends TestAsyncAdminBase { public void testRegionServerCoprocessorServiceError() throws Exception { final ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); DummyRegionServerEndpointProtos.DummyRequest request = - DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(); + DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(); try { - admin - . coprocessorService( - DummyRegionServerEndpointProtos.DummyService::newStub, - (s, c, done) -> s.dummyThrow(c, request, done), serverName).get(); + admin. coprocessorService( + DummyRegionServerEndpointProtos.DummyService::newStub, + (s, c, done) -> s.dummyThrow(c, request, done), serverName) + .get(); fail("Should have thrown an exception"); } catch (Exception e) { assertTrue(e.getCause() instanceof RetriesExhaustedException); @@ -131,8 +132,9 @@ public class TestAsyncCoprocessorEndpoint extends TestAsyncAdminBase { } public static class DummyRegionServerEndpoint extends DummyService - implements RegionServerCoprocessor { - public DummyRegionServerEndpoint() {} + implements RegionServerCoprocessor { + public DummyRegionServerEndpoint() { + } @Override public Iterable getServices() { @@ -149,14 +151,13 @@ public class TestAsyncCoprocessorEndpoint extends TestAsyncAdminBase { @Override public void dummyCall(RpcController controller, DummyRequest request, - RpcCallback callback) { + RpcCallback callback) { callback.run(DummyResponse.newBuilder().setValue(DUMMY_VALUE).build()); } @Override - public void dummyThrow(RpcController controller, - DummyRequest request, - RpcCallback done) { + public void dummyThrow(RpcController controller, DummyRequest request, + RpcCallback done) { CoprocessorRpcUtils.setControllerException(controller, WHAT_TO_THROW); } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBatchCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBatchCoprocessorEndpoint.java index c108db28a2a..8ec19153596 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBatchCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBatchCoprocessorEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,17 +57,16 @@ import org.slf4j.LoggerFactory; /** * TestEndpoint: test cases to verify the batch execution of coprocessor Endpoint */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestBatchCoprocessorEndpoint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBatchCoprocessorEndpoint.class); + HBaseClassTestRule.forClass(TestBatchCoprocessorEndpoint.class); private static final Logger LOG = LoggerFactory.getLogger(TestBatchCoprocessorEndpoint.class); - private static final TableName TEST_TABLE = - TableName.valueOf("TestTable"); + private static final TableName TEST_TABLE = TableName.valueOf("TestTable"); private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily"); private static final byte[] TEST_QUALIFIER = Bytes.toBytes("TestQualifier"); private static byte[] ROW = Bytes.toBytes("testRow"); @@ -84,17 +83,17 @@ public class TestBatchCoprocessorEndpoint { // set configure to indicate which cp should be loaded Configuration conf = util.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), - ProtobufCoprocessorService.class.getName(), - ColumnAggregationEndpointWithErrors.class.getName(), - ColumnAggregationEndpointNullResponse.class.getName()); + org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), + ProtobufCoprocessorService.class.getName(), + ColumnAggregationEndpointWithErrors.class.getName(), + ColumnAggregationEndpointNullResponse.class.getName()); conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - ProtobufCoprocessorService.class.getName()); + ProtobufCoprocessorService.class.getName()); util.startMiniCluster(2); Admin admin = util.getAdmin(); HTableDescriptor desc = new HTableDescriptor(TEST_TABLE); desc.addFamily(new HColumnDescriptor(TEST_FAMILY)); - admin.createTable(desc, new byte[][]{ROWS[rowSeperator1], ROWS[rowSeperator2]}); + admin.createTable(desc, new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] }); util.waitUntilAllRegionsAssigned(TEST_TABLE); admin.close(); @@ -116,24 +115,22 @@ public class TestBatchCoprocessorEndpoint { public void testAggregationNullResponse() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); ColumnAggregationNullResponseSumRequest.Builder builder = - ColumnAggregationNullResponseSumRequest - .newBuilder(); + ColumnAggregationNullResponseSumRequest.newBuilder(); builder.setFamily(ByteString.copyFrom(TEST_FAMILY)); if (TEST_QUALIFIER != null && TEST_QUALIFIER.length > 0) { builder.setQualifier(ByteString.copyFrom(TEST_QUALIFIER)); } - Map results = - table.batchCoprocessorService( - ColumnAggregationServiceNullResponse.getDescriptor().findMethodByName("sum"), - builder.build(), ROWS[0], ROWS[ROWS.length - 1], - ColumnAggregationNullResponseSumResponse.getDefaultInstance()); + Map results = table.batchCoprocessorService( + ColumnAggregationServiceNullResponse.getDescriptor().findMethodByName("sum"), + builder.build(), ROWS[0], ROWS[ROWS.length - 1], + ColumnAggregationNullResponseSumResponse.getDefaultInstance()); int sumResult = 0; int expectedResult = 0; - for (Map.Entry e : - results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + for (Map.Entry e : results.entrySet()) { + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = 0; i < rowSeperator2; i++) { @@ -152,29 +149,29 @@ public class TestBatchCoprocessorEndpoint { } private Map sum(final Table table, final byte[] family, - final byte[] qualifier, final byte[] start, final byte[] end) throws ServiceException, - Throwable { - ColumnAggregationProtos.SumRequest.Builder builder = ColumnAggregationProtos.SumRequest - .newBuilder(); + final byte[] qualifier, final byte[] start, final byte[] end) + throws ServiceException, Throwable { + ColumnAggregationProtos.SumRequest.Builder builder = + ColumnAggregationProtos.SumRequest.newBuilder(); builder.setFamily(ByteString.copyFrom(family)); if (qualifier != null && qualifier.length > 0) { builder.setQualifier(ByteString.copyFrom(qualifier)); } return table.batchCoprocessorService( - ColumnAggregationProtos.ColumnAggregationService.getDescriptor().findMethodByName("sum"), - builder.build(), start, end, ColumnAggregationProtos.SumResponse.getDefaultInstance()); + ColumnAggregationProtos.ColumnAggregationService.getDescriptor().findMethodByName("sum"), + builder.build(), start, end, ColumnAggregationProtos.SumResponse.getDefaultInstance()); } @Test public void testAggregationWithReturnValue() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - Map results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], - ROWS[ROWS.length - 1]); + Map results = + sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], ROWS[ROWS.length - 1]); int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = 0; i < ROWSIZE; i++) { @@ -185,13 +182,12 @@ public class TestBatchCoprocessorEndpoint { results.clear(); // scan: for region 2 and region 3 - results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], - ROWS[ROWS.length - 1]); + results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length - 1]); sumResult = 0; expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = rowSeperator1; i < ROWSIZE; i++) { @@ -204,13 +200,13 @@ public class TestBatchCoprocessorEndpoint { @Test public void testAggregation() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - Map results = sum(table, TEST_FAMILY, TEST_QUALIFIER, - ROWS[0], ROWS[ROWS.length - 1]); + Map results = + sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], ROWS[ROWS.length - 1]); int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = 0; i < ROWSIZE; i++) { @@ -223,8 +219,8 @@ public class TestBatchCoprocessorEndpoint { sumResult = 0; expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = rowSeperator1; i < ROWSIZE; i++) { @@ -237,14 +233,10 @@ public class TestBatchCoprocessorEndpoint { @Test public void testAggregationWithErrors() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - final Map results = - Collections.synchronizedMap( - new TreeMap( - Bytes.BYTES_COMPARATOR - )); + final Map results = Collections.synchronizedMap( + new TreeMap(Bytes.BYTES_COMPARATOR)); ColumnAggregationWithErrorsSumRequest.Builder builder = - ColumnAggregationWithErrorsSumRequest - .newBuilder(); + ColumnAggregationWithErrorsSumRequest.newBuilder(); builder.setFamily(ByteString.copyFrom(TEST_FAMILY)); if (TEST_QUALIFIER != null && TEST_QUALIFIER.length > 0) { builder.setQualifier(ByteString.copyFrom(TEST_QUALIFIER)); @@ -253,18 +245,18 @@ public class TestBatchCoprocessorEndpoint { boolean hasError = false; try { table.batchCoprocessorService( - ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors.getDescriptor() - .findMethodByName("sum"), - builder.build(), ROWS[0], ROWS[ROWS.length - 1], - ColumnAggregationWithErrorsSumResponse.getDefaultInstance(), - new Batch.Callback() { + ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors.getDescriptor() + .findMethodByName("sum"), + builder.build(), ROWS[0], ROWS[ROWS.length - 1], + ColumnAggregationWithErrorsSumResponse.getDefaultInstance(), + new Batch.Callback() { - @Override - public void update(byte[] region, byte[] row, - ColumnAggregationWithErrorsSumResponse result) { - results.put(region, result); - } - }); + @Override + public void update(byte[] region, byte[] row, + ColumnAggregationWithErrorsSumResponse result) { + results.put(region, result); + } + }); } catch (Throwable t) { LOG.info("Exceptions in coprocessor service", t); hasError = true; @@ -273,8 +265,8 @@ public class TestBatchCoprocessorEndpoint { int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = 0; i < rowSeperator2; i++) { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java index 5c3e5363922..60498b237b7 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java @@ -64,11 +64,11 @@ import org.slf4j.LoggerFactory; /** * Test coprocessors class loading. */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestClassLoading { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClassLoading.class); + HBaseClassTestRule.forClass(TestClassLoading.class); private static final Logger LOG = LoggerFactory.getLogger(TestClassLoading.class); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -91,17 +91,17 @@ public class TestClassLoading { static final String cpName6 = "TestCP6"; private static Class regionCoprocessor1 = ColumnAggregationEndpoint.class; - // TOOD: Fix the import of this handler. It is coming in from a package that is far away. + // TOOD: Fix the import of this handler. It is coming in from a package that is far away. private static Class regionCoprocessor2 = TestServerCustomProtocol.PingHandler.class; private static Class regionServerCoprocessor = SampleRegionWALCoprocessor.class; private static Class masterCoprocessor = TestMasterCoprocessor.class; private static final String[] regionServerSystemCoprocessors = - new String[]{ regionServerCoprocessor.getSimpleName() }; + new String[] { regionServerCoprocessor.getSimpleName() }; - private static final String[] masterRegionServerSystemCoprocessors = new String[] { - regionCoprocessor1.getSimpleName(), MultiRowMutationEndpoint.class.getSimpleName(), - regionServerCoprocessor.getSimpleName() }; + private static final String[] masterRegionServerSystemCoprocessors = + new String[] { regionCoprocessor1.getSimpleName(), + MultiRowMutationEndpoint.class.getSimpleName(), regionServerCoprocessor.getSimpleName() }; @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -109,19 +109,15 @@ public class TestClassLoading { // regionCoprocessor1 will be loaded on all regionservers, since it is // loaded for any tables (user or meta). - conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - regionCoprocessor1.getName()); + conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, regionCoprocessor1.getName()); // regionCoprocessor2 will be loaded only on regionservers that serve a // user table region. Therefore, if there are no user tables loaded, // this coprocessor will not be loaded on any regionserver. - conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, - regionCoprocessor2.getName()); + conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, regionCoprocessor2.getName()); - conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, - regionServerCoprocessor.getName()); - conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - masterCoprocessor.getName()); + conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, regionServerCoprocessor.getName()); + conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, masterCoprocessor.getName()); TEST_UTIL.startMiniCluster(1); cluster = TEST_UTIL.getDFSCluster(); } @@ -132,11 +128,9 @@ public class TestClassLoading { } static File buildCoprocessorJar(String className) throws Exception { - String code = - "import org.apache.hadoop.hbase.coprocessor.*;" + - "public class " + className + " implements RegionCoprocessor {}"; - return ClassLoaderTestHelper.buildJar( - TEST_UTIL.getDataTestDir().toString(), className, code); + String code = "import org.apache.hadoop.hbase.coprocessor.*;" + "public class " + className + + " implements RegionCoprocessor {}"; + return ClassLoaderTestHelper.buildJar(TEST_UTIL.getDataTestDir().toString(), className, code); } @Test @@ -150,31 +144,27 @@ public class TestClassLoading { // copy the jars into dfs fs.copyFromLocalFile(new Path(jarFile1.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR)); - String jarFileOnHDFS1 = fs.getUri().toString() + Path.SEPARATOR + - jarFile1.getName(); + String jarFileOnHDFS1 = fs.getUri().toString() + Path.SEPARATOR + jarFile1.getName(); Path pathOnHDFS1 = new Path(jarFileOnHDFS1); - assertTrue("Copy jar file to HDFS failed.", - fs.exists(pathOnHDFS1)); + assertTrue("Copy jar file to HDFS failed.", fs.exists(pathOnHDFS1)); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS1); fs.copyFromLocalFile(new Path(jarFile2.getPath()), - new Path(fs.getUri().toString() + Path.SEPARATOR)); - String jarFileOnHDFS2 = fs.getUri().toString() + Path.SEPARATOR + - jarFile2.getName(); + new Path(fs.getUri().toString() + Path.SEPARATOR)); + String jarFileOnHDFS2 = fs.getUri().toString() + Path.SEPARATOR + jarFile2.getName(); Path pathOnHDFS2 = new Path(jarFileOnHDFS2); - assertTrue("Copy jar file to HDFS failed.", - fs.exists(pathOnHDFS2)); + assertTrue("Copy jar file to HDFS failed.", fs.exists(pathOnHDFS2)); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS2); // create a table that references the coprocessors HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("test")); - // without configuration values - htd.setValue("COPROCESSOR$1", jarFileOnHDFS1.toString() + "|" + cpName1 + - "|" + Coprocessor.PRIORITY_USER); - // with configuration values - htd.setValue("COPROCESSOR$2", jarFileOnHDFS2.toString() + "|" + cpName2 + - "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); + // without configuration values + htd.setValue("COPROCESSOR$1", + jarFileOnHDFS1.toString() + "|" + cpName1 + "|" + Coprocessor.PRIORITY_USER); + // with configuration values + htd.setValue("COPROCESSOR$2", jarFileOnHDFS2.toString() + "|" + cpName2 + "|" + + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(tableName)) { if (admin.isTableEnabled(tableName)) { @@ -183,18 +173,17 @@ public class TestClassLoading { admin.deleteTable(tableName); } CoprocessorClassLoader.clearCache(); - byte[] startKey = {10, 63}; - byte[] endKey = {12, 43}; + byte[] startKey = { 10, 63 }; + byte[] endKey = { 12, 43 }; admin.createTable(htd, startKey, endKey, 4); waitForTable(htd.getTableName()); // verify that the coprocessors were loaded - boolean foundTableRegion=false; + boolean foundTableRegion = false; boolean found1 = true, found2 = true, found2_k1 = true, found2_k2 = true, found2_k3 = true; Map> regionsActiveClassLoaders = new HashMap<>(); MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: - hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) { foundTableRegion = true; CoprocessorEnvironment env; @@ -212,8 +201,8 @@ public class TestClassLoading { found2_k2 = false; found2_k3 = false; } - regionsActiveClassLoaders - .put(region, ((CoprocessorHost) region.getCoprocessorHost()).getExternalClassLoaders()); + regionsActiveClassLoaders.put(region, + ((CoprocessorHost) region.getCoprocessorHost()).getExternalClassLoaders()); } } @@ -228,18 +217,16 @@ public class TestClassLoading { CoprocessorClassLoader.getIfCached(pathOnHDFS1)); assertNotNull(jarFileOnHDFS2 + " was not cached", CoprocessorClassLoader.getIfCached(pathOnHDFS2)); - //two external jar used, should be one classloader per jar - assertEquals("The number of cached classloaders should be equal to the number" + - " of external jar files", + // two external jar used, should be one classloader per jar + assertEquals( + "The number of cached classloaders should be equal to the number" + " of external jar files", 2, CoprocessorClassLoader.getAllCached().size()); - //check if region active classloaders are shared across all RS regions - Set externalClassLoaders = new HashSet<>( - CoprocessorClassLoader.getAllCached()); + // check if region active classloaders are shared across all RS regions + Set externalClassLoaders = new HashSet<>(CoprocessorClassLoader.getAllCached()); for (Map.Entry> regionCP : regionsActiveClassLoaders.entrySet()) { assertTrue("Some CP classloaders for region " + regionCP.getKey() + " are not cached." - + " ClassLoader Cache:" + externalClassLoaders - + " Region ClassLoaders:" + regionCP.getValue(), - externalClassLoaders.containsAll(regionCP.getValue())); + + " ClassLoader Cache:" + externalClassLoaders + " Region ClassLoaders:" + + regionCP.getValue(), externalClassLoaders.containsAll(regionCP.getValue())); } } @@ -255,8 +242,8 @@ public class TestClassLoading { // create a table that references the jar HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(cpName3)); htd.addFamily(new HColumnDescriptor("test")); - htd.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName3 + "|" + - Coprocessor.PRIORITY_USER); + htd.setValue("COPROCESSOR$1", + getLocalPath(jarFile) + "|" + cpName3 + "|" + Coprocessor.PRIORITY_USER); Admin admin = TEST_UTIL.getAdmin(); admin.createTable(htd); waitForTable(htd.getTableName()); @@ -264,7 +251,7 @@ public class TestClassLoading { // verify that the coprocessor was loaded boolean found = false; MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName3)) { found = (region.getCoprocessorHost().findCoprocessor(cpName3) != null); } @@ -280,8 +267,8 @@ public class TestClassLoading { // create a table that references the jar HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(cpName4)); htd.addFamily(new HColumnDescriptor("test")); - htd.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName4 + "|" + - Coprocessor.PRIORITY_USER); + htd.setValue("COPROCESSOR$1", + getLocalPath(jarFile) + "|" + cpName4 + "|" + Coprocessor.PRIORITY_USER); Admin admin = TEST_UTIL.getAdmin(); admin.createTable(htd); waitForTable(htd.getTableName()); @@ -289,7 +276,7 @@ public class TestClassLoading { // verify that the coprocessor was loaded correctly boolean found = false; MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName4)) { Coprocessor cp = region.getCoprocessorHost().findCoprocessor(cpName4); if (cp != null) { @@ -317,12 +304,10 @@ public class TestClassLoading { String cpKey2 = " Coprocessor$2 "; String cpKey3 = " coprocessor$03 "; - String cpValue1 = getLocalPath(jarFile1) + "|" + cpName1 + "|" + - Coprocessor.PRIORITY_USER; + String cpValue1 = getLocalPath(jarFile1) + "|" + cpName1 + "|" + Coprocessor.PRIORITY_USER; String cpValue2 = getLocalPath(jarFile2) + " | " + cpName2 + " | "; // load from default class loader - String cpValue3 = - " | org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver | | k=v "; + String cpValue3 = " | org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver | | k=v "; // create a table that references the jar HTableDescriptor htd = new HTableDescriptor(tableName); @@ -334,14 +319,12 @@ public class TestClassLoading { htd.setValue(cpKey3, cpValue3); // add 2 coprocessor by using new htd.setCoprocessor() api - htd.addCoprocessor(cpName5, new Path(getLocalPath(jarFile5)), - Coprocessor.PRIORITY_USER, null); + htd.addCoprocessor(cpName5, new Path(getLocalPath(jarFile5)), Coprocessor.PRIORITY_USER, null); Map kvs = new HashMap<>(); kvs.put("k1", "v1"); kvs.put("k2", "v2"); kvs.put("k3", "v3"); - htd.addCoprocessor(cpName6, new Path(getLocalPath(jarFile6)), - Coprocessor.PRIORITY_USER, kvs); + htd.addCoprocessor(cpName6, new Path(getLocalPath(jarFile6)), Coprocessor.PRIORITY_USER, kvs); Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(tableName)) { @@ -354,26 +337,20 @@ public class TestClassLoading { waitForTable(htd.getTableName()); // verify that the coprocessor was loaded - boolean found_2 = false, found_1 = false, found_3 = false, - found_5 = false, found_6 = false; - boolean found6_k1 = false, found6_k2 = false, found6_k3 = false, - found6_k4 = false; + boolean found_2 = false, found_1 = false, found_3 = false, found_5 = false, found_6 = false; + boolean found6_k1 = false, found6_k2 = false, found6_k3 = false, found6_k4 = false; MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) { - found_1 = found_1 || - (region.getCoprocessorHost().findCoprocessor(cpName1) != null); - found_2 = found_2 || - (region.getCoprocessorHost().findCoprocessor(cpName2) != null); - found_3 = found_3 || - (region.getCoprocessorHost().findCoprocessor("SimpleRegionObserver") - != null); - found_5 = found_5 || - (region.getCoprocessorHost().findCoprocessor(cpName5) != null); + found_1 = found_1 || (region.getCoprocessorHost().findCoprocessor(cpName1) != null); + found_2 = found_2 || (region.getCoprocessorHost().findCoprocessor(cpName2) != null); + found_3 = + found_3 || (region.getCoprocessorHost().findCoprocessor("SimpleRegionObserver") != null); + found_5 = found_5 || (region.getCoprocessorHost().findCoprocessor(cpName5) != null); CoprocessorEnvironment env = - region.getCoprocessorHost().findCoprocessorEnvironment(cpName6); + region.getCoprocessorHost().findCoprocessorEnvironment(cpName6); if (env != null) { found_6 = true; Configuration conf = env.getConfiguration(); @@ -413,27 +390,24 @@ public class TestClassLoading { File innerJarFile2 = buildCoprocessorJar(cpName2); File outerJarFile = new File(TEST_UTIL.getDataTestDir().toString(), "outer.jar"); - ClassLoaderTestHelper.addJarFilesToJar( - outerJarFile, libPrefix, innerJarFile1, innerJarFile2); + ClassLoaderTestHelper.addJarFilesToJar(outerJarFile, libPrefix, innerJarFile1, innerJarFile2); // copy the jars into dfs fs.copyFromLocalFile(new Path(outerJarFile.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR)); - String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR + - outerJarFile.getName(); - assertTrue("Copy jar file to HDFS failed.", - fs.exists(new Path(jarFileOnHDFS))); + String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR + outerJarFile.getName(); + assertTrue("Copy jar file to HDFS failed.", fs.exists(new Path(jarFileOnHDFS))); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS); // create a table that references the coprocessors HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("test")); - // without configuration values - htd.setValue("COPROCESSOR$1", jarFileOnHDFS.toString() + "|" + cpName1 + - "|" + Coprocessor.PRIORITY_USER); - // with configuration values - htd.setValue("COPROCESSOR$2", jarFileOnHDFS.toString() + "|" + cpName2 + - "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); + // without configuration values + htd.setValue("COPROCESSOR$1", + jarFileOnHDFS.toString() + "|" + cpName1 + "|" + Coprocessor.PRIORITY_USER); + // with configuration values + htd.setValue("COPROCESSOR$2", jarFileOnHDFS.toString() + "|" + cpName2 + "|" + + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(tableName)) { if (admin.isTableEnabled(tableName)) { @@ -445,10 +419,9 @@ public class TestClassLoading { waitForTable(htd.getTableName()); // verify that the coprocessors were loaded - boolean found1 = false, found2 = false, found2_k1 = false, - found2_k2 = false, found2_k3 = false; + boolean found1 = false, found2 = false, found2_k1 = false, found2_k2 = false, found2_k3 = false; MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) { CoprocessorEnvironment env; env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1); @@ -481,24 +454,21 @@ public class TestClassLoading { } /** - * return the subset of all regionservers - * (actually returns set of ServerLoads) - * which host some region in a given table. - * used by assertAllRegionServers() below to - * test reporting of loaded coprocessors. + * return the subset of all regionservers (actually returns set of ServerLoads) which host some + * region in a given table. used by assertAllRegionServers() below to test reporting of loaded + * coprocessors. * @param tableName : given table. * @return subset of all servers. */ Map serversForTable(String tableName) { Map serverLoadHashMap = new HashMap<>(); - for(Map.Entry server: - TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager(). - getOnlineServers().entrySet()) { - for(Map.Entry region: - server.getValue().getRegionMetrics().entrySet()) { + for (Map.Entry server : TEST_UTIL.getMiniHBaseCluster().getMaster() + .getServerManager().getOnlineServers().entrySet()) { + for (Map.Entry region : server.getValue().getRegionMetrics() + .entrySet()) { if (region.getValue().getNameAsString().equals(tableName)) { // this server hosts a region of tableName: add this server.. - serverLoadHashMap.put(server.getKey(),server.getValue()); + serverLoadHashMap.put(server.getKey(), server.getValue()); // .. and skip the rest of the regions that it hosts. break; } @@ -519,13 +489,12 @@ public class TestClassLoading { } for (int i = 0; i < 5; i++) { boolean any_failed = false; - for(Map.Entry server: servers.entrySet()) { + for (Map.Entry server : servers.entrySet()) { String[] actualCoprocessors = server.getValue().getCoprocessorNames().stream().toArray(size -> new String[size]); if (!Arrays.equals(actualCoprocessors, expectedCoprocessors)) { - LOG.debug("failed comparison: actual: " + - Arrays.toString(actualCoprocessors) + - " ; expected: " + Arrays.toString(expectedCoprocessors)); + LOG.debug("failed comparison: actual: " + Arrays.toString(actualCoprocessors) + + " ; expected: " + Arrays.toString(expectedCoprocessors)); any_failed = true; expectedCoprocessors = switchExpectedCoprocessors(expectedCoprocessors); break; @@ -556,11 +525,9 @@ public class TestClassLoading { // HBASE 4070: Improve region server metrics to report loaded coprocessors // to master: verify that the master is reporting the correct set of // loaded coprocessors. - final String loadedMasterCoprocessorsVerify = - "[" + masterCoprocessor.getSimpleName() + "]"; + final String loadedMasterCoprocessorsVerify = "[" + masterCoprocessor.getSimpleName() + "]"; String loadedMasterCoprocessors = - java.util.Arrays.toString( - TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessors()); + java.util.Arrays.toString(TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessors()); assertEquals(loadedMasterCoprocessorsVerify, loadedMasterCoprocessors); } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java index 4b8f6c7e8be..c07188df580 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java @@ -65,16 +65,15 @@ import org.slf4j.LoggerFactory; /** * TestEndpoint: test cases to verify coprocessor Endpoint */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestCoprocessorEndpoint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCoprocessorEndpoint.class); + HBaseClassTestRule.forClass(TestCoprocessorEndpoint.class); private static final Logger LOG = LoggerFactory.getLogger(TestCoprocessorEndpoint.class); - private static final TableName TEST_TABLE = - TableName.valueOf("TestCoprocessorEndpoint"); + private static final TableName TEST_TABLE = TableName.valueOf("TestCoprocessorEndpoint"); private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily"); private static final byte[] TEST_QUALIFIER = Bytes.toBytes("TestQualifier"); private static byte[] ROW = Bytes.toBytes("testRow"); @@ -92,15 +91,15 @@ public class TestCoprocessorEndpoint { Configuration conf = util.getConfiguration(); conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), - ProtobufCoprocessorService.class.getName()); + org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), + ProtobufCoprocessorService.class.getName()); conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - ProtobufCoprocessorService.class.getName()); + ProtobufCoprocessorService.class.getName()); util.startMiniCluster(2); Admin admin = util.getAdmin(); HTableDescriptor desc = new HTableDescriptor(TEST_TABLE); desc.addFamily(new HColumnDescriptor(TEST_FAMILY)); - admin.createTable(desc, new byte[][]{ROWS[rowSeperator1], ROWS[rowSeperator2]}); + admin.createTable(desc, new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] }); util.waitUntilAllRegionsAssigned(TEST_TABLE); Table table = util.getConnection().getTable(TEST_TABLE); @@ -117,17 +116,15 @@ public class TestCoprocessorEndpoint { util.shutdownMiniCluster(); } - private Map sum(final Table table, final byte [] family, - final byte [] qualifier, final byte [] start, final byte [] end) - throws ServiceException, Throwable { - return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, - start, end, - new Batch.Call() { + private Map sum(final Table table, final byte[] family, final byte[] qualifier, + final byte[] start, final byte[] end) throws ServiceException, Throwable { + return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, start, + end, new Batch.Call() { @Override public Long call(ColumnAggregationProtos.ColumnAggregationService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); ColumnAggregationProtos.SumRequest.Builder builder = ColumnAggregationProtos.SumRequest.newBuilder(); builder.setFamily(ByteStringer.wrap(family)); @@ -143,12 +140,12 @@ public class TestCoprocessorEndpoint { @Test public void testAggregation() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - Map results = sum(table, TEST_FAMILY, TEST_QUALIFIER, - ROWS[0], ROWS[ROWS.length-1]); + Map results = + sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], ROWS[ROWS.length - 1]); int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue(); } for (int i = 0; i < ROWSIZE; i++) { @@ -159,12 +156,11 @@ public class TestCoprocessorEndpoint { results.clear(); // scan: for region 2 and region 3 - results = sum(table, TEST_FAMILY, TEST_QUALIFIER, - ROWS[rowSeperator1], ROWS[ROWS.length-1]); + results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length - 1]); sumResult = 0; expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue(); } for (int i = rowSeperator1; i < ROWSIZE; i++) { @@ -179,77 +175,73 @@ public class TestCoprocessorEndpoint { Table table = util.getConnection().getTable(TEST_TABLE); List regions; - try(RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) { + try (RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) { regions = rl.getAllRegionLocations(); } final TestProtos.EchoRequestProto request = - TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); - final Map results = Collections.synchronizedMap( - new TreeMap(Bytes.BYTES_COMPARATOR)); + TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); + final Map results = + Collections.synchronizedMap(new TreeMap(Bytes.BYTES_COMPARATOR)); try { // scan: for all regions final RpcController controller = new ServerRpcController(); - table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, - ROWS[0], ROWS[ROWS.length - 1], + table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, ROWS[0], + ROWS[ROWS.length - 1], new Batch.Call() { @Override - public TestProtos.EchoResponseProto call( - TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { + public TestProtos.EchoResponseProto + call(TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance()); CoprocessorRpcUtils.BlockingRpcCallback callback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.echo(controller, request, callback); TestProtos.EchoResponseProto response = callback.get(); LOG.debug("Batch.Call returning result " + response); return response; } - }, - new Batch.Callback() { + }, new Batch.Callback() { @Override public void update(byte[] region, byte[] row, TestProtos.EchoResponseProto result) { assertNotNull(result); assertEquals("hello", result.getMessage()); results.put(region, result.getMessage()); } - } - ); + }); for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); } assertEquals(3, results.size()); for (HRegionLocation info : regions) { - LOG.info("Region info is "+info.getRegionInfo().getRegionNameAsString()); + LOG.info("Region info is " + info.getRegionInfo().getRegionNameAsString()); assertTrue(results.containsKey(info.getRegionInfo().getRegionName())); } results.clear(); // scan: for region 2 and region 3 - table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, - ROWS[rowSeperator1], ROWS[ROWS.length - 1], + table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, ROWS[rowSeperator1], + ROWS[ROWS.length - 1], new Batch.Call() { @Override - public TestProtos.EchoResponseProto call( - TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { + public TestProtos.EchoResponseProto + call(TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance()); CoprocessorRpcUtils.BlockingRpcCallback callback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.echo(controller, request, callback); TestProtos.EchoResponseProto response = callback.get(); LOG.debug("Batch.Call returning result " + response); return response; } - }, - new Batch.Callback() { + }, new Batch.Callback() { @Override public void update(byte[] region, byte[] row, TestProtos.EchoResponseProto result) { assertNotNull(result); assertEquals("hello", result.getMessage()); results.put(region, result.getMessage()); } - } - ); + }); for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); } assertEquals(2, results.size()); } finally { @@ -261,38 +253,37 @@ public class TestCoprocessorEndpoint { public void testCoprocessorServiceNullResponse() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); List regions; - try(RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) { + try (RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) { regions = rl.getAllRegionLocations(); } final TestProtos.EchoRequestProto request = - TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); + TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); try { // scan: for all regions final RpcController controller = new ServerRpcController(); // test that null results are supported - Map results = - table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, + Map results = table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, ROWS[0], ROWS[ROWS.length - 1], new Batch.Call() { public String call(TestRpcServiceProtos.TestProtobufRpcProto instance) - throws IOException { + throws IOException { CoprocessorRpcUtils.BlockingRpcCallback callback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.echo(controller, request, callback); TestProtos.EchoResponseProto response = callback.get(); LOG.debug("Batch.Call got result " + response); return null; } - } - ); + }); for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); } assertEquals(3, results.size()); for (HRegionLocation region : regions) { HRegionInfo info = region.getRegionInfo(); - LOG.info("Region info is "+info.getRegionNameAsString()); + LOG.info("Region info is " + info.getRegionNameAsString()); assertTrue(results.containsKey(info.getRegionName())); assertNull(results.get(info.getRegionName())); } @@ -305,9 +296,9 @@ public class TestCoprocessorEndpoint { public void testMasterCoprocessorService() throws Throwable { Admin admin = util.getAdmin(); final TestProtos.EchoRequestProto request = - TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); + TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service = - TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(admin.coprocessorService()); + TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(admin.coprocessorService()); assertEquals("hello", service.echo(null, request).getMessage()); } @@ -322,7 +313,7 @@ public class TestCoprocessorEndpoint { CoprocessorRpcChannel protocol = table.coprocessorService(ROWS[0]); TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service = - TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(protocol); + TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(protocol); service.error(null, TestProtos.EmptyRequestProto.getDefaultInstance()); fail("Should have thrown an exception"); @@ -336,7 +327,7 @@ public class TestCoprocessorEndpoint { public void testMasterCoprocessorError() throws Throwable { Admin admin = util.getAdmin(); TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service = - TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(admin.coprocessorService()); + TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(admin.coprocessorService()); try { service.error(null, TestProtos.EmptyRequestProto.getDefaultInstance()); fail("Should have thrown an exception"); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpointTracing.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpointTracing.java index 1e79afd43b8..007efbf5651 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpointTracing.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpointTracing.java @@ -33,6 +33,7 @@ import static org.hamcrest.Matchers.hasProperty; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; + import com.google.protobuf.Descriptors; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @@ -90,12 +91,13 @@ import org.junit.rules.TestName; import org.junit.rules.TestRule; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; /** * Test cases to verify tracing coprocessor Endpoint execution */ -@Category({ CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestCoprocessorEndpointTracing { private static final Logger logger = LoggerFactory.getLogger(TestCoprocessorEndpointTracing.class); @@ -105,8 +107,8 @@ public class TestCoprocessorEndpointTracing { HBaseClassTestRule.forClass(TestCoprocessorEndpointTracing.class); private static final OpenTelemetryClassRule otelClassRule = OpenTelemetryClassRule.create(); - private static final MiniClusterRule miniclusterRule = MiniClusterRule.newBuilder() - .setConfiguration(() -> { + private static final MiniClusterRule miniclusterRule = + MiniClusterRule.newBuilder().setConfiguration(() -> { final Configuration conf = HBaseConfiguration.create(); conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, @@ -114,8 +116,7 @@ public class TestCoprocessorEndpointTracing { conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, ProtobufCoprocessorService.class.getName()); return conf; - }) - .build(); + }).build(); private static final ConnectionRule connectionRule = ConnectionRule.createConnectionRule( miniclusterRule::createConnection, miniclusterRule::createAsyncConnection); @@ -133,10 +134,8 @@ public class TestCoprocessorEndpointTracing { } @ClassRule - public static final TestRule testRule = RuleChain.outerRule(otelClassRule) - .around(miniclusterRule) - .around(connectionRule) - .around(new Setup()); + public static final TestRule testRule = RuleChain.outerRule(otelClassRule).around(miniclusterRule) + .around(connectionRule).around(new Setup()); private static final TableName TEST_TABLE = TableName.valueOf(TestCoprocessorEndpointTracing.class.getSimpleName()); @@ -189,8 +188,7 @@ public class TestCoprocessorEndpointTracing { final Map results = TraceUtil.trace(() -> { table.coprocessorService(TestProtobufRpcProto::newStub, - (stub, controller, cb) -> stub.echo(controller, request, cb), callback) - .execute(); + (stub, controller, cb) -> stub.echo(controller, request, cb), callback).execute(); try { return future.get(); } catch (InterruptedException | ExecutionException e) { @@ -199,31 +197,21 @@ public class TestCoprocessorEndpointTracing { }, testName.getMethodName()); assertNotNull(results); assertTrue("coprocessor call returned no results.", MapUtils.isNotEmpty(results)); - assertThat(results.values(), everyItem(allOf( - notNullValue(), - equalTo("hello")))); + assertThat(results.values(), everyItem(allOf(notNullValue(), equalTo("hello")))); final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/ExecService"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/ExecService"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -237,45 +225,33 @@ public class TestCoprocessorEndpointTracing { new CoprocessorRpcUtils.BlockingRpcCallback<>(); final Map results = TraceUtil.trace(() -> { try { - return table.coprocessorService(TestProtobufRpcProto.class, null, null, - t -> { - t.echo(controller, request, callback); - return callback.get(); - }); + return table.coprocessorService(TestProtobufRpcProto.class, null, null, t -> { + t.echo(controller, request, callback); + return callback.get(); + }); } catch (Throwable t) { throw new RuntimeException(t); } }, testName.getMethodName()); assertNotNull(results); assertTrue("coprocessor call returned no results.", MapUtils.isNotEmpty(results)); - assertThat(results.values(), everyItem(allOf( - notNullValue(), - hasProperty("message", equalTo("hello"))))); + assertThat(results.values(), + everyItem(allOf(notNullValue(), hasProperty("message", equalTo("hello"))))); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/ExecService"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/ExecService"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -300,34 +276,23 @@ public class TestCoprocessorEndpointTracing { }, testName.getMethodName()); assertNotNull(results); assertTrue("coprocessor call returned no results.", MapUtils.isNotEmpty(results)); - assertThat(results.values(), everyItem(allOf( - notNullValue(), - hasProperty("message", equalTo("hello"))))); + assertThat(results.values(), + everyItem(allOf(notNullValue(), hasProperty("message", equalTo("hello"))))); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/ExecService"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/ExecService"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -350,9 +315,7 @@ public class TestCoprocessorEndpointTracing { assertEquals("hello", response.getMessage()); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); @@ -361,13 +324,10 @@ public class TestCoprocessorEndpointTracing { * The Table instance isn't issuing a command here, it's not a table operation, so don't expect * there to be a span like `COPROC_EXEC table`. */ - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = + allOf(hasName(containsString("COPROC_EXEC")), hasParentSpanId(testSpan)); assertThat(spans, not(hasItem(tableOpMatcher))); } @@ -380,41 +340,30 @@ public class TestCoprocessorEndpointTracing { final EchoRequestProto request = EchoRequestProto.newBuilder().setMessage("hello").build(); final Map response = TraceUtil.trace(() -> { try { - return table.batchCoprocessorService( - descriptor, request, null, null, EchoResponseProto.getDefaultInstance()); + return table.batchCoprocessorService(descriptor, request, null, null, + EchoResponseProto.getDefaultInstance()); } catch (Throwable t) { throw new RuntimeException(t); } }, testName.getMethodName()); assertNotNull(response); - assertThat(response.values(), everyItem(allOf( - notNullValue(), - hasProperty("message", equalTo("hello"))))); + assertThat(response.values(), + everyItem(allOf(notNullValue(), hasProperty("message", equalTo("hello"))))); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/Multi"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/Multi"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -436,34 +385,23 @@ public class TestCoprocessorEndpointTracing { }, testName.getMethodName()); assertNotNull(results); assertTrue("coprocessor call returned no results.", MapUtils.isNotEmpty(results)); - assertThat(results.values(), everyItem(allOf( - notNullValue(), - hasProperty("message", equalTo("hello"))))); + assertThat(results.values(), + everyItem(allOf(notNullValue(), hasProperty("message", equalTo("hello"))))); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/Multi"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/Multi"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -475,27 +413,20 @@ public class TestCoprocessorEndpointTracing { final ServiceCaller callback = (stub, controller, cb) -> stub.echo(controller, request, cb); - final String response = TraceUtil.tracedFuture( - () -> admin.coprocessorService(TestProtobufRpcProto::newStub, callback), - testName.getMethodName()) - .get() - .getMessage(); + final String response = TraceUtil + .tracedFuture(() -> admin.coprocessorService(TestProtobufRpcProto::newStub, callback), + testName.getMethodName()) + .get().getMessage(); assertEquals("hello", response); - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.MasterService/ExecMasterService"), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.MasterService/ExecMasterService"), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -516,27 +447,21 @@ public class TestCoprocessorEndpointTracing { assertEquals("hello", response); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.MasterService/ExecMasterService"), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.MasterService/ExecMasterService"), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } private void waitForAndLog(Matcher spanMatcher) { final Configuration conf = connectionRule.getAsyncConnection().getConfiguration(); - Waiter.waitFor(conf, TimeUnit.SECONDS.toMillis(5), new MatcherPredicate<>( - otelClassRule::getSpans, hasItem(spanMatcher))); + Waiter.waitFor(conf, TimeUnit.SECONDS.toMillis(5), + new MatcherPredicate<>(otelClassRule::getSpans, hasItem(spanMatcher))); final List spans = otelClassRule.getSpans(); if (logger.isDebugEnabled()) { StringTraceRenderer renderer = new StringTraceRenderer(spans); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorServiceBackwardCompatibility.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorServiceBackwardCompatibility.java index d1e848dbc62..e280be92515 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorServiceBackwardCompatibility.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorServiceBackwardCompatibility.java @@ -40,11 +40,11 @@ import org.junit.experimental.categories.Category; /** * Tests to ensure that 2.0 is backward compatible in loading CoprocessorService. */ -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestCoprocessorServiceBackwardCompatibility { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCoprocessorServiceBackwardCompatibility.class); + HBaseClassTestRule.forClass(TestCoprocessorServiceBackwardCompatibility.class); private static HBaseTestingUtility TEST_UTIL = null; private static Configuration CONF = null; @@ -54,7 +54,7 @@ public class TestCoprocessorServiceBackwardCompatibility { private static final long REGION = 3; public static class DummyCoprocessorService extends DummyService - implements CoprocessorService, SingletonCoprocessorService { + implements CoprocessorService, SingletonCoprocessorService { // depending on the value passed thru DummyRequest, the following fields would be incremented // value == MASTER static int numMaster = 0; @@ -70,7 +70,7 @@ public class TestCoprocessorServiceBackwardCompatibility { @Override public void dummyCall(RpcController controller, DummyRequest request, - RpcCallback callback) { + RpcCallback callback) { callback.run(DummyResponse.newBuilder().setValue("").build()); if (request.getValue() == MASTER) { numMaster += request.getValue(); @@ -83,7 +83,7 @@ public class TestCoprocessorServiceBackwardCompatibility { @Override public void dummyThrow(RpcController controller, DummyRequest request, - RpcCallback callback) { + RpcCallback callback) { } } @@ -92,11 +92,11 @@ public class TestCoprocessorServiceBackwardCompatibility { TEST_UTIL = new HBaseTestingUtility(); CONF = TEST_UTIL.getConfiguration(); CONF.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - DummyCoprocessorService.class.getName()); + DummyCoprocessorService.class.getName()); CONF.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, - DummyCoprocessorService.class.getName()); + DummyCoprocessorService.class.getName()); CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - DummyCoprocessorService.class.getName()); + DummyCoprocessorService.class.getName()); TEST_UTIL.startMiniCluster(); } @@ -108,21 +108,21 @@ public class TestCoprocessorServiceBackwardCompatibility { @Test public void testCoprocessorServiceLoadedByMaster() throws Throwable { TEST_UTIL.getAdmin().coprocessorService().callBlockingMethod( - DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"), null, - DummyRequest.newBuilder().setValue(MASTER).build(), DummyResponse.getDefaultInstance()); + DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"), null, + DummyRequest.newBuilder().setValue(MASTER).build(), DummyResponse.getDefaultInstance()); assertEquals(MASTER, DummyCoprocessorService.numMaster); - TEST_UTIL.getAdmin().coprocessorService( - TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName()).callBlockingMethod( - DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"), null, - DummyRequest.newBuilder().setValue(REGIONSERVER).build(), - DummyResponse.getDefaultInstance()); + TEST_UTIL.getAdmin() + .coprocessorService(TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName()) + .callBlockingMethod(DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"), + null, DummyRequest.newBuilder().setValue(REGIONSERVER).build(), + DummyResponse.getDefaultInstance()); assertEquals(REGIONSERVER, DummyCoprocessorService.numRegionServer); TEST_UTIL.getConnection().getTable(TableName.valueOf("hbase:meta")).batchCoprocessorService( - DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"), - DummyRequest.newBuilder().setValue(REGION).build(), Bytes.toBytes(""), Bytes.toBytes(""), - DummyResponse.getDefaultInstance()); + DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"), + DummyRequest.newBuilder().setValue(REGION).build(), Bytes.toBytes(""), Bytes.toBytes(""), + DummyResponse.getDefaultInstance()); assertEquals(REGION, DummyCoprocessorService.numRegion); } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java index 3bec2034a12..5913da591ac 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java @@ -45,11 +45,11 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestCoprocessorTableEndpoint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCoprocessorTableEndpoint.class); + HBaseClassTestRule.forClass(TestCoprocessorTableEndpoint.class); private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily"); private static final byte[] TEST_QUALIFIER = Bytes.toBytes("TestQualifier"); @@ -109,17 +109,16 @@ public class TestCoprocessorTableEndpoint { return ret; } - private static Map sum(final Table table, final byte [] family, - final byte [] qualifier, final byte [] start, final byte [] end) - throws ServiceException, Throwable { - return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, - start, end, - new Batch.Call() { + private static Map sum(final Table table, final byte[] family, + final byte[] qualifier, final byte[] start, final byte[] end) + throws ServiceException, Throwable { + return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, start, + end, new Batch.Call() { @Override public Long call(ColumnAggregationProtos.ColumnAggregationService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); ColumnAggregationProtos.SumRequest.Builder builder = ColumnAggregationProtos.SumRequest.newBuilder(); builder.setFamily(ByteString.copyFrom(family)); @@ -134,7 +133,7 @@ public class TestCoprocessorTableEndpoint { private static final void createTable(HTableDescriptor desc) throws Exception { Admin admin = TEST_UTIL.getAdmin(); - admin.createTable(desc, new byte[][]{ROWS[rowSeperator1], ROWS[rowSeperator2]}); + admin.createTable(desc, new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] }); TEST_UTIL.waitUntilAllRegionsAssigned(desc.getTableName()); Table table = TEST_UTIL.getConnection().getTable(desc.getTableName()); try { @@ -158,8 +157,8 @@ public class TestCoprocessorTableEndpoint { private static final void verifyTable(TableName tableName) throws Throwable { Table table = TEST_UTIL.getConnection().getTable(tableName); try { - Map results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], - ROWS[ROWS.length-1]); + Map results = + sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], ROWS[ROWS.length - 1]); int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { @@ -172,7 +171,7 @@ public class TestCoprocessorTableEndpoint { // scan: for region 2 and region 3 results.clear(); - results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length-1]); + results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length - 1]); sumResult = 0; expectedResult = 0; for (Map.Entry e : results.entrySet()) { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestImportExport.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestImportExport.java index 8a4c7b21b55..cd9b474cf3e 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestImportExport.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestImportExport.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,12 +26,12 @@ import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestImportExport extends org.apache.hadoop.hbase.mapreduce.TestImportExport { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportExport.class); + HBaseClassTestRule.forClass(TestImportExport.class); @BeforeClass public static void beforeClass() throws Throwable { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java index f1808845d0a..d9a7aec4670 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,12 +45,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestRegionServerCoprocessorEndpoint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionServerCoprocessorEndpoint.class); + HBaseClassTestRule.forClass(TestRegionServerCoprocessorEndpoint.class); public static final FileNotFoundException WHAT_TO_THROW = new FileNotFoundException("/file.txt"); private static HBaseTestingUtility TEST_UTIL = null; @@ -75,13 +75,14 @@ public class TestRegionServerCoprocessorEndpoint { public void testEndpoint() throws Exception { final ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); final ServerRpcController controller = new ServerRpcController(); - final CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); + final CoprocessorRpcUtils.BlockingRpcCallback< + DummyRegionServerEndpointProtos.DummyResponse> rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); DummyRegionServerEndpointProtos.DummyService service = - ProtobufUtil.newServiceStub(DummyRegionServerEndpointProtos.DummyService.class, - TEST_UTIL.getAdmin().coprocessorService(serverName)); - service.dummyCall(controller, - DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), rpcCallback); + ProtobufUtil.newServiceStub(DummyRegionServerEndpointProtos.DummyService.class, + TEST_UTIL.getAdmin().coprocessorService(serverName)); + service.dummyCall(controller, DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), + rpcCallback); assertEquals(DUMMY_VALUE, rpcCallback.get().getValue()); if (controller.failedOnException()) { throw controller.getFailedOn(); @@ -92,21 +93,22 @@ public class TestRegionServerCoprocessorEndpoint { public void testEndpointExceptions() throws Exception { final ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); final ServerRpcController controller = new ServerRpcController(); - final CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); + final CoprocessorRpcUtils.BlockingRpcCallback< + DummyRegionServerEndpointProtos.DummyResponse> rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); DummyRegionServerEndpointProtos.DummyService service = - ProtobufUtil.newServiceStub(DummyRegionServerEndpointProtos.DummyService.class, - TEST_UTIL.getAdmin().coprocessorService(serverName)); + ProtobufUtil.newServiceStub(DummyRegionServerEndpointProtos.DummyService.class, + TEST_UTIL.getAdmin().coprocessorService(serverName)); service.dummyThrow(controller, - DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), rpcCallback); + DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), rpcCallback); assertEquals(null, rpcCallback.get()); assertTrue(controller.failedOnException()); assertEquals(WHAT_TO_THROW.getClass().getName().trim(), - ((RemoteWithExtrasException) controller.getFailedOn().getCause()).getClassName().trim()); + ((RemoteWithExtrasException) controller.getFailedOn().getCause()).getClassName().trim()); } public static class DummyRegionServerEndpoint extends DummyService - implements RegionServerCoprocessor { + implements RegionServerCoprocessor { @Override public Iterable getServices() { @@ -115,14 +117,13 @@ public class TestRegionServerCoprocessorEndpoint { @Override public void dummyCall(RpcController controller, DummyRequest request, - RpcCallback callback) { + RpcCallback callback) { callback.run(DummyResponse.newBuilder().setValue(DUMMY_VALUE).build()); } @Override - public void dummyThrow(RpcController controller, - DummyRequest request, - RpcCallback done) { + public void dummyThrow(RpcController controller, DummyRequest request, + RpcCallback done) { CoprocessorRpcUtils.setControllerException(controller, WHAT_TO_THROW); } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java index e788e5d11f5..5f133b79a50 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java @@ -79,14 +79,14 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Verifies ProcessEndpoint works. - * The tested RowProcessor performs two scans and a read-modify-write. + * Verifies ProcessEndpoint works. The tested RowProcessor performs two scans and a + * read-modify-write. */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestRowProcessorEndpoint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRowProcessorEndpoint.class); + HBaseClassTestRule.forClass(TestRowProcessorEndpoint.class); private static final Logger LOG = LoggerFactory.getLogger(TestRowProcessorEndpoint.class); @@ -120,7 +120,7 @@ public class TestRowProcessorEndpoint { public static void setupBeforeClass() throws Exception { Configuration conf = util.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - RowProcessorEndpoint.class.getName()); + RowProcessorEndpoint.class.getName()); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); conf.setLong("hbase.hregion.row.processor.timeout", 1000L); conf.setLong(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH, 2048); @@ -142,9 +142,9 @@ public class TestRowProcessorEndpoint { table = util.createTable(TABLE, FAM); { Put put = new Put(ROW); - put.addColumn(FAM, A, Bytes.add(B, C)); // B, C are friends of A + put.addColumn(FAM, A, Bytes.add(B, C)); // B, C are friends of A put.addColumn(FAM, B, Bytes.add(D, E, F)); // D, E, F are friends of B - put.addColumn(FAM, C, G); // G is a friend of C + put.addColumn(FAM, C, G); // G is a friend of C table.put(put); rowSize = put.size(); } @@ -161,16 +161,15 @@ public class TestRowProcessorEndpoint { CoprocessorRpcChannel channel = table.coprocessorService(ROW); RowProcessorEndpoint.FriendsOfFriendsProcessor processor = - new RowProcessorEndpoint.FriendsOfFriendsProcessor(ROW, A); - RowProcessorService.BlockingInterface service = - RowProcessorService.newBlockingStub(channel); + new RowProcessorEndpoint.FriendsOfFriendsProcessor(ROW, A); + RowProcessorService.BlockingInterface service = RowProcessorService.newBlockingStub(channel); ProcessRequest request = RowProcessorClient.getRowProcessorPB(processor); ProcessResponse protoResult = service.process(null, request); FriendsOfFriendsProcessorResponse response = - FriendsOfFriendsProcessorResponse.parseFrom(protoResult.getRowProcessorResult()); + FriendsOfFriendsProcessorResponse.parseFrom(protoResult.getRowProcessorResult()); Set result = new HashSet<>(); result.addAll(response.getResultList()); - Set expected = new HashSet<>(Arrays.asList(new String[]{"d", "e", "f", "g"})); + Set expected = new HashSet<>(Arrays.asList(new String[] { "d", "e", "f", "g" })); Get get = new Get(ROW); LOG.debug("row keyvalues:" + stringifyKvs(table.get(get).listCells())); assertEquals(expected, result); @@ -207,13 +206,12 @@ public class TestRowProcessorEndpoint { private int incrementCounter(Table table) throws Throwable { CoprocessorRpcChannel channel = table.coprocessorService(ROW); RowProcessorEndpoint.IncrementCounterProcessor processor = - new RowProcessorEndpoint.IncrementCounterProcessor(ROW); - RowProcessorService.BlockingInterface service = - RowProcessorService.newBlockingStub(channel); + new RowProcessorEndpoint.IncrementCounterProcessor(ROW); + RowProcessorService.BlockingInterface service = RowProcessorService.newBlockingStub(channel); ProcessRequest request = RowProcessorClient.getRowProcessorPB(processor); ProcessResponse protoResult = service.process(null, request); - IncCounterProcessorResponse response = IncCounterProcessorResponse - .parseFrom(protoResult.getRowProcessorResult()); + IncCounterProcessorResponse response = + IncCounterProcessorResponse.parseFrom(protoResult.getRowProcessorResult()); Integer result = response.getResponse(); return result; } @@ -246,10 +244,8 @@ public class TestRowProcessorEndpoint { failures.set(0); int numThreads = 100; concurrentExec(new SwapRowsRunner(), numThreads); - LOG.debug("row keyvalues:" + - stringifyKvs(table.get(new Get(ROW)).listCells())); - LOG.debug("row2 keyvalues:" + - stringifyKvs(table.get(new Get(ROW2)).listCells())); + LOG.debug("row keyvalues:" + stringifyKvs(table.get(new Get(ROW)).listCells())); + LOG.debug("row2 keyvalues:" + stringifyKvs(table.get(new Get(ROW2)).listCells())); int failureNumber = failures.get(); if (failureNumber > 0) { LOG.debug("We failed " + failureNumber + " times during test"); @@ -278,9 +274,8 @@ public class TestRowProcessorEndpoint { private void swapRows(Table table) throws Throwable { CoprocessorRpcChannel channel = table.coprocessorService(ROW); RowProcessorEndpoint.RowSwapProcessor processor = - new RowProcessorEndpoint.RowSwapProcessor(ROW, ROW2); - RowProcessorService.BlockingInterface service = - RowProcessorService.newBlockingStub(channel); + new RowProcessorEndpoint.RowSwapProcessor(ROW, ROW2); + RowProcessorService.BlockingInterface service = RowProcessorService.newBlockingStub(channel); ProcessRequest request = RowProcessorClient.getRowProcessorPB(processor); service.process(null, request); } @@ -290,9 +285,8 @@ public class TestRowProcessorEndpoint { prepareTestData(); CoprocessorRpcChannel channel = table.coprocessorService(ROW); RowProcessorEndpoint.TimeoutProcessor processor = - new RowProcessorEndpoint.TimeoutProcessor(ROW); - RowProcessorService.BlockingInterface service = - RowProcessorService.newBlockingStub(channel); + new RowProcessorEndpoint.TimeoutProcessor(ROW); + RowProcessorService.BlockingInterface service = RowProcessorService.newBlockingStub(channel); ProcessRequest request = RowProcessorClient.getRowProcessorPB(processor); boolean exceptionCaught = false; try { @@ -304,17 +298,15 @@ public class TestRowProcessorEndpoint { } /** - * This class defines two RowProcessors: - * IncrementCounterProcessor and FriendsOfFriendsProcessor. - * - * We define the RowProcessors as the inner class of the endpoint. - * So they can be loaded with the endpoint on the coprocessor. + * This class defines two RowProcessors: IncrementCounterProcessor and FriendsOfFriendsProcessor. + * We define the RowProcessors as the inner class of the endpoint. So they can be loaded with the + * endpoint on the coprocessor. */ - public static class RowProcessorEndpoint - extends BaseRowProcessorEndpoint { - public static class IncrementCounterProcessor extends - BaseRowProcessor { + public static class RowProcessorEndpoint + extends BaseRowProcessorEndpoint { + public static class IncrementCounterProcessor + extends BaseRowProcessor { int counter = 0; byte[] row = new byte[0]; @@ -346,15 +338,14 @@ public class TestRowProcessorEndpoint { } @Override - public void process(long now, HRegion region, - List mutations, WALEdit walEdit) throws IOException { + public void process(long now, HRegion region, List mutations, WALEdit walEdit) + throws IOException { // Scan current counter List kvs = new ArrayList<>(); Scan scan = new Scan(row, row); scan.addColumn(FAM, COUNTER); doScan(region, scan, kvs); - counter = kvs.isEmpty() ? 0 : - Bytes.toInt(CellUtil.cloneValue(kvs.iterator().next())); + counter = kvs.isEmpty() ? 0 : Bytes.toInt(CellUtil.cloneValue(kvs.iterator().next())); // Assert counter value assertEquals(expectedCounter, counter); @@ -363,19 +354,15 @@ public class TestRowProcessorEndpoint { counter += 1; expectedCounter += 1; - Put p = new Put(row); - KeyValue kv = - new KeyValue(row, FAM, COUNTER, now, Bytes.toBytes(counter)); + KeyValue kv = new KeyValue(row, FAM, COUNTER, now, Bytes.toBytes(counter)); p.add(kv); mutations.add(p); walEdit.add(kv); // We can also inject some meta data to the walEdit - KeyValue metaKv = new KeyValue( - row, WALEdit.METAFAMILY, - Bytes.toBytes("I just increment counter"), - Bytes.toBytes(counter)); + KeyValue metaKv = new KeyValue(row, WALEdit.METAFAMILY, + Bytes.toBytes("I just increment counter"), Bytes.toBytes(counter)); walEdit.add(metaKv); } @@ -395,7 +382,7 @@ public class TestRowProcessorEndpoint { } public static class FriendsOfFriendsProcessor extends - BaseRowProcessor { + BaseRowProcessor { byte[] row = null; byte[] person = null; final Set result = new HashSet<>(); @@ -419,7 +406,7 @@ public class TestRowProcessorEndpoint { @Override public FriendsOfFriendsProcessorResponse getResult() { FriendsOfFriendsProcessorResponse.Builder builder = - FriendsOfFriendsProcessorResponse.newBuilder(); + FriendsOfFriendsProcessorResponse.newBuilder(); builder.addAllResult(result); return builder.build(); } @@ -430,8 +417,8 @@ public class TestRowProcessorEndpoint { } @Override - public void process(long now, HRegion region, - List mutations, WALEdit walEdit) throws IOException { + public void process(long now, HRegion region, List mutations, WALEdit walEdit) + throws IOException { List kvs = new ArrayList<>(); { // First scan to get friends of the person Scan scan = new Scan(row, row); @@ -444,7 +431,7 @@ public class TestRowProcessorEndpoint { for (Cell kv : kvs) { byte[] friends = CellUtil.cloneValue(kv); for (byte f : friends) { - scan.addColumn(FAM, new byte[]{f}); + scan.addColumn(FAM, new byte[] { f }); } } doScan(region, scan, kvs); @@ -453,7 +440,7 @@ public class TestRowProcessorEndpoint { result.clear(); for (Cell kv : kvs) { for (byte b : CellUtil.cloneValue(kv)) { - result.add((char)b + ""); + result.add((char) b + ""); } } } @@ -461,7 +448,7 @@ public class TestRowProcessorEndpoint { @Override public FriendsOfFriendsProcessorRequest getRequestData() throws IOException { FriendsOfFriendsProcessorRequest.Builder builder = - FriendsOfFriendsProcessorRequest.newBuilder(); + FriendsOfFriendsProcessorRequest.newBuilder(); builder.setPerson(ByteStringer.wrap(person)); builder.setRow(ByteStringer.wrap(row)); builder.addAllResult(result); @@ -470,8 +457,7 @@ public class TestRowProcessorEndpoint { } @Override - public void initialize(FriendsOfFriendsProcessorRequest request) - throws IOException { + public void initialize(FriendsOfFriendsProcessorRequest request) throws IOException { this.person = request.getPerson().toByteArray(); this.row = request.getRow().toByteArray(); result.clear(); @@ -479,8 +465,8 @@ public class TestRowProcessorEndpoint { } } - public static class RowSwapProcessor extends - BaseRowProcessor { + public static class RowSwapProcessor + extends BaseRowProcessor { byte[] row1 = new byte[0]; byte[] row2 = new byte[0]; @@ -514,8 +500,8 @@ public class TestRowProcessorEndpoint { } @Override - public void process(long now, HRegion region, - List mutations, WALEdit walEdit) throws IOException { + public void process(long now, HRegion region, List mutations, WALEdit walEdit) + throws IOException { // Override the time to avoid race-condition in the unit test caused by // inacurate timer on some machines @@ -541,19 +527,17 @@ public class TestRowProcessorEndpoint { List> kvs = new ArrayList<>(2); kvs.add(kvs1); kvs.add(kvs2); - byte[][] rows = new byte[][]{row1, row2}; + byte[][] rows = new byte[][] { row1, row2 }; for (int i = 0; i < kvs.size(); ++i) { for (Cell kv : kvs.get(i)) { // Delete from the current row and add to the other row Delete d = new Delete(rows[i]); - KeyValue kvDelete = - new KeyValue(rows[i], CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), - kv.getTimestamp(), KeyValue.Type.Delete); + KeyValue kvDelete = new KeyValue(rows[i], CellUtil.cloneFamily(kv), + CellUtil.cloneQualifier(kv), kv.getTimestamp(), KeyValue.Type.Delete); d.add(kvDelete); Put p = new Put(rows[1 - i]); - KeyValue kvAdd = - new KeyValue(rows[1 - i], CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), - now, CellUtil.cloneValue(kv)); + KeyValue kvAdd = new KeyValue(rows[1 - i], CellUtil.cloneFamily(kv), + CellUtil.cloneQualifier(kv), now, CellUtil.cloneValue(kv)); p.add(kvAdd); mutations.add(d); walEdit.add(kvDelete); @@ -583,8 +567,8 @@ public class TestRowProcessorEndpoint { } } - public static class TimeoutProcessor extends - BaseRowProcessor { + public static class TimeoutProcessor + extends BaseRowProcessor { byte[] row = new byte[0]; /** @@ -607,8 +591,8 @@ public class TestRowProcessorEndpoint { } @Override - public void process(long now, HRegion region, - List mutations, WALEdit walEdit) throws IOException { + public void process(long now, HRegion region, List mutations, WALEdit walEdit) + throws IOException { try { // Sleep for a long time so it timeout Thread.sleep(100 * 1000L); @@ -663,11 +647,9 @@ public class TestRowProcessorEndpoint { byte[] col = CellUtil.cloneQualifier(kv); byte[] val = CellUtil.cloneValue(kv); if (Bytes.equals(col, COUNTER)) { - out.append(Bytes.toStringBinary(col) + ":" + - Bytes.toInt(val) + " "); + out.append(Bytes.toStringBinary(col) + ":" + Bytes.toInt(val) + " "); } else { - out.append(Bytes.toStringBinary(col) + ":" + - Bytes.toStringBinary(val) + " "); + out.append(Bytes.toStringBinary(col) + ":" + Bytes.toStringBinary(val) + " "); } } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java index 08e127fdab9..ef7e87ed32d 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java @@ -82,11 +82,11 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestSecureExport { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSecureExport.class); + HBaseClassTestRule.forClass(TestSecureExport.class); private static final Logger LOG = LoggerFactory.getLogger(TestSecureExport.class); private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); @@ -123,25 +123,20 @@ public class TestSecureExport { private static final String TOPSECRET = "topsecret"; @Rule public final TestName name = new TestName(); + private static void setUpKdcServer() throws Exception { KDC = UTIL.setupMiniKdc(KEYTAB_FILE); USERNAME = UserGroupInformation.getLoginUser().getShortUserName(); SERVER_PRINCIPAL = USERNAME + "/" + LOCALHOST; HTTP_PRINCIPAL = "HTTP/" + LOCALHOST; - KDC.createPrincipal(KEYTAB_FILE, - SERVER_PRINCIPAL, - HTTP_PRINCIPAL, - USER_ADMIN + "/" + LOCALHOST, - USER_OWNER + "/" + LOCALHOST, - USER_RX + "/" + LOCALHOST, - USER_RO + "/" + LOCALHOST, - USER_XO + "/" + LOCALHOST, - USER_NONE + "/" + LOCALHOST); + KDC.createPrincipal(KEYTAB_FILE, SERVER_PRINCIPAL, HTTP_PRINCIPAL, USER_ADMIN + "/" + LOCALHOST, + USER_OWNER + "/" + LOCALHOST, USER_RX + "/" + LOCALHOST, USER_RO + "/" + LOCALHOST, + USER_XO + "/" + LOCALHOST, USER_NONE + "/" + LOCALHOST); } private static User getUserByLogin(final String user) throws IOException { - return User.create(UserGroupInformation.loginUserFromKeytabAndReturnUGI( - getPrinciple(user), KEYTAB_FILE.getAbsolutePath())); + return User.create(UserGroupInformation.loginUserFromKeytabAndReturnUGI(getPrinciple(user), + KEYTAB_FILE.getAbsolutePath())); } private static String getPrinciple(final String user) { @@ -150,28 +145,27 @@ public class TestSecureExport { private static void setUpClusterKdc() throws Exception { HBaseKerberosUtils.setSecuredConfiguration(UTIL.getConfiguration(), - SERVER_PRINCIPAL + "@" + KDC.getRealm(), HTTP_PRINCIPAL + "@" + KDC.getRealm()); + SERVER_PRINCIPAL + "@" + KDC.getRealm(), HTTP_PRINCIPAL + "@" + KDC.getRealm()); HBaseKerberosUtils.setSSLConfiguration(UTIL, TestSecureExport.class); UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - UTIL.getConfiguration().get( - CoprocessorHost.REGION_COPROCESSOR_CONF_KEY) + "," + Export.class.getName()); + UTIL.getConfiguration().get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY) + "," + + Export.class.getName()); } private static void addLabels(final Configuration conf, final List users, - final List labels) throws Exception { - PrivilegedExceptionAction action - = () -> { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.addLabels(conn, labels.toArray(new String[labels.size()])); - for (String user : users) { - VisibilityClient.setAuths(conn, labels.toArray(new String[labels.size()]), user); - } - } catch (Throwable t) { - throw new IOException(t); + final List labels) throws Exception { + PrivilegedExceptionAction action = () -> { + try (Connection conn = ConnectionFactory.createConnection(conf)) { + VisibilityClient.addLabels(conn, labels.toArray(new String[labels.size()])); + for (String user : users) { + VisibilityClient.setAuths(conn, labels.toArray(new String[labels.size()]), user); } - return null; - }; + } catch (Throwable t) { + throw new IOException(t); + } + return null; + }; getUserByLogin(USER_ADMIN).runAs(action); } @@ -197,7 +191,7 @@ public class TestSecureExport { @BeforeClass public static void beforeClass() throws Exception { UserProvider.setUserProviderForTesting(UTIL.getConfiguration(), - HadoopSecurityEnabledUserProviderForTesting.class); + HadoopSecurityEnabledUserProviderForTesting.class); setUpKdcServer(); SecureTestUtil.enableSecurity(UTIL.getConfiguration()); UTIL.getConfiguration().setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true); @@ -209,14 +203,10 @@ public class TestSecureExport { UTIL.waitUntilAllRegionsAssigned(VisibilityConstants.LABELS_TABLE_NAME); UTIL.waitTableEnabled(PermissionStorage.ACL_TABLE_NAME, 50000); UTIL.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME, 50000); - SecureTestUtil.grantGlobal(UTIL, USER_ADMIN, - Permission.Action.ADMIN, - Permission.Action.CREATE, - Permission.Action.EXEC, - Permission.Action.READ, - Permission.Action.WRITE); + SecureTestUtil.grantGlobal(UTIL, USER_ADMIN, Permission.Action.ADMIN, Permission.Action.CREATE, + Permission.Action.EXEC, Permission.Action.READ, Permission.Action.WRITE); addLabels(UTIL.getConfiguration(), Arrays.asList(USER_OWNER), - Arrays.asList(PRIVATE, CONFIDENTIAL, SECRET, TOPSECRET)); + Arrays.asList(PRIVATE, CONFIDENTIAL, SECRET, TOPSECRET)); } @AfterClass @@ -228,49 +218,39 @@ public class TestSecureExport { } /** - * Test the ExportEndpoint's access levels. The {@link Export} test is ignored - * since the access exceptions cannot be collected from the mappers. + * Test the ExportEndpoint's access levels. The {@link Export} test is ignored since the access + * exceptions cannot be collected from the mappers. */ @Test public void testAccessCase() throws Throwable { final String exportTable = name.getMethodName(); - TableDescriptor exportHtd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)) - .setOwnerString(USER_OWNER) - .build(); - SecureTestUtil.createTable(UTIL, exportHtd, new byte[][]{Bytes.toBytes("s")}); - SecureTestUtil.grantOnTable(UTIL, USER_RO, - TableName.valueOf(exportTable), null, null, - Permission.Action.READ); - SecureTestUtil.grantOnTable(UTIL, USER_RX, - TableName.valueOf(exportTable), null, null, - Permission.Action.READ, - Permission.Action.EXEC); - SecureTestUtil.grantOnTable(UTIL, USER_XO, - TableName.valueOf(exportTable), null, null, - Permission.Action.EXEC); + TableDescriptor exportHtd = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)).setOwnerString(USER_OWNER) + .build(); + SecureTestUtil.createTable(UTIL, exportHtd, new byte[][] { Bytes.toBytes("s") }); + SecureTestUtil.grantOnTable(UTIL, USER_RO, TableName.valueOf(exportTable), null, null, + Permission.Action.READ); + SecureTestUtil.grantOnTable(UTIL, USER_RX, TableName.valueOf(exportTable), null, null, + Permission.Action.READ, Permission.Action.EXEC); + SecureTestUtil.grantOnTable(UTIL, USER_XO, TableName.valueOf(exportTable), null, null, + Permission.Action.EXEC); assertEquals(4, PermissionStorage - .getTablePermissions(UTIL.getConfiguration(), TableName.valueOf(exportTable)).size()); + .getTablePermissions(UTIL.getConfiguration(), TableName.valueOf(exportTable)).size()); AccessTestAction putAction = () -> { Put p = new Put(ROW1); p.addColumn(FAMILYA, Bytes.toBytes("qual_0"), NOW, QUAL); p.addColumn(FAMILYA, Bytes.toBytes("qual_1"), NOW, QUAL); try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Table t = conn.getTable(TableName.valueOf(exportTable))) { + Table t = conn.getTable(TableName.valueOf(exportTable))) { t.put(p); } return null; }; // no hdfs access. - SecureTestUtil.verifyAllowed(putAction, - getUserByLogin(USER_ADMIN), - getUserByLogin(USER_OWNER)); - SecureTestUtil.verifyDenied(putAction, - getUserByLogin(USER_RO), - getUserByLogin(USER_XO), - getUserByLogin(USER_RX), - getUserByLogin(USER_NONE)); + SecureTestUtil.verifyAllowed(putAction, getUserByLogin(USER_ADMIN), getUserByLogin(USER_OWNER)); + SecureTestUtil.verifyDenied(putAction, getUserByLogin(USER_RO), getUserByLogin(USER_XO), + getUserByLogin(USER_RX), getUserByLogin(USER_NONE)); final FileSystem fs = UTIL.getDFSCluster().getFileSystem(); final Path openDir = fs.makeQualified(new Path("testAccessCase")); @@ -279,9 +259,9 @@ public class TestSecureExport { final Path output = fs.makeQualified(new Path(openDir, "output")); AccessTestAction exportAction = () -> { try { - String[] args = new String[]{exportTable, output.toString()}; - Map result - = Export.run(new Configuration(UTIL.getConfiguration()), args); + String[] args = new String[] { exportTable, output.toString() }; + Map result = + Export.run(new Configuration(UTIL.getConfiguration()), args); long rowCount = 0; long cellCount = 0; for (Export.Response r : result.values()) { @@ -305,7 +285,7 @@ public class TestSecureExport { assertEquals("Unexpected file owner", currentUserName, outputDirFileStatus.getOwner()); FileStatus[] outputFileStatus = fs.listStatus(new Path(openDir, "output")); - for (FileStatus fileStatus: outputFileStatus) { + for (FileStatus fileStatus : outputFileStatus) { assertEquals("Unexpected file owner", currentUserName, fileStatus.getOwner()); } } else { @@ -315,14 +295,10 @@ public class TestSecureExport { clearOutput(output); } }; - SecureTestUtil.verifyDenied(exportAction, - getUserByLogin(USER_RO), - getUserByLogin(USER_XO), + SecureTestUtil.verifyDenied(exportAction, getUserByLogin(USER_RO), getUserByLogin(USER_XO), getUserByLogin(USER_NONE)); - SecureTestUtil.verifyAllowed(exportAction, - getUserByLogin(USER_ADMIN), - getUserByLogin(USER_OWNER), - getUserByLogin(USER_RX)); + SecureTestUtil.verifyAllowed(exportAction, getUserByLogin(USER_ADMIN), + getUserByLogin(USER_OWNER), getUserByLogin(USER_RX)); AccessTestAction deleteAction = () -> { UTIL.deleteTable(TableName.valueOf(exportTable)); return null; @@ -336,12 +312,11 @@ public class TestSecureExport { public void testVisibilityLabels() throws IOException, Throwable { final String exportTable = name.getMethodName() + "_export"; final String importTable = name.getMethodName() + "_import"; - final TableDescriptor exportHtd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(exportTable)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)) - .setOwnerString(USER_OWNER) - .build(); - SecureTestUtil.createTable(UTIL, exportHtd, new byte[][]{Bytes.toBytes("s")}); + final TableDescriptor exportHtd = + TableDescriptorBuilder.newBuilder(TableName.valueOf(exportTable)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)).setOwnerString(USER_OWNER) + .build(); + SecureTestUtil.createTable(UTIL, exportHtd, new byte[][] { Bytes.toBytes("s") }); AccessTestAction putAction = () -> { Put p1 = new Put(ROW1); p1.addColumn(FAMILYA, QUAL, NOW, QUAL); @@ -353,7 +328,7 @@ public class TestSecureExport { p3.addColumn(FAMILYA, QUAL, NOW, QUAL); p3.setCellVisibility(new CellVisibility("!" + CONFIDENTIAL + " & " + TOPSECRET)); try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Table t = conn.getTable(TableName.valueOf(exportTable))) { + Table t = conn.getTable(TableName.valueOf(exportTable))) { t.put(p1); t.put(p2); t.put(p3); @@ -370,7 +345,7 @@ public class TestSecureExport { for (final Pair, Integer> labelsAndRowCount : labelsAndRowCounts) { final List labels = labelsAndRowCount.getFirst(); final int rowCount = labelsAndRowCount.getSecond(); - //create a open permission directory. + // create a open permission directory. final Path openDir = new Path("testAccessCase"); final FileSystem fs = openDir.getFileSystem(UTIL.getConfiguration()); fs.mkdirs(openDir); @@ -381,10 +356,9 @@ public class TestSecureExport { labels.forEach(v -> buf.append(v).append(",")); buf.deleteCharAt(buf.length() - 1); try { - String[] args = new String[]{ - "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + buf.toString(), - exportTable, - output.toString(),}; + String[] args = + new String[] { "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + buf.toString(), + exportTable, output.toString(), }; Export.run(new Configuration(UTIL.getConfiguration()), args); return null; } catch (ServiceException | IOException ex) { @@ -394,20 +368,17 @@ public class TestSecureExport { } }; SecureTestUtil.verifyAllowed(exportAction, getUserByLogin(USER_OWNER)); - final TableDescriptor importHtd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(importTable)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYB)) - .setOwnerString(USER_OWNER) - .build(); - SecureTestUtil.createTable(UTIL, importHtd, new byte[][]{Bytes.toBytes("s")}); + final TableDescriptor importHtd = + TableDescriptorBuilder.newBuilder(TableName.valueOf(importTable)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYB)).setOwnerString(USER_OWNER) + .build(); + SecureTestUtil.createTable(UTIL, importHtd, new byte[][] { Bytes.toBytes("s") }); AccessTestAction importAction = () -> { - String[] args = new String[]{ - "-D" + Import.CF_RENAME_PROP + "=" + FAMILYA_STRING + ":" + FAMILYB_STRING, - importTable, - output.toString() - }; - assertEquals(0, ToolRunner.run( - new Configuration(UTIL.getConfiguration()), new Import(), args)); + String[] args = + new String[] { "-D" + Import.CF_RENAME_PROP + "=" + FAMILYA_STRING + ":" + FAMILYB_STRING, + importTable, output.toString() }; + assertEquals(0, + ToolRunner.run(new Configuration(UTIL.getConfiguration()), new Import(), args)); return null; }; SecureTestUtil.verifyAllowed(importAction, getUserByLogin(USER_OWNER)); @@ -415,8 +386,8 @@ public class TestSecureExport { Scan scan = new Scan(); scan.setAuthorizations(new Authorizations(labels)); try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Table table = conn.getTable(importHtd.getTableName()); - ResultScanner scanner = table.getScanner(scan)) { + Table table = conn.getTable(importHtd.getTableName()); + ResultScanner scanner = table.getScanner(scan)) { int count = 0; for (Result r : scanner) { ++count; diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/ipc/TestCoprocessorRpcUtils.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/ipc/TestCoprocessorRpcUtils.java index 170a303845b..0d75f6e625a 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/ipc/TestCoprocessorRpcUtils.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/ipc/TestCoprocessorRpcUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,18 +33,18 @@ public class TestCoprocessorRpcUtils { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCoprocessorRpcUtils.class); + HBaseClassTestRule.forClass(TestCoprocessorRpcUtils.class); @Test public void testServiceName() throws Exception { // verify that we de-namespace build in HBase rpc services Descriptors.ServiceDescriptor authService = - AuthenticationProtos.AuthenticationService.getDescriptor(); + AuthenticationProtos.AuthenticationService.getDescriptor(); assertEquals(authService.getName(), CoprocessorRpcUtils.getServiceName(authService)); // non-hbase rpc services should remain fully qualified Descriptors.ServiceDescriptor dummyService = - DummyRegionServerEndpointProtos.DummyService.getDescriptor(); + DummyRegionServerEndpointProtos.DummyService.getDescriptor(); assertEquals(dummyService.getFullName(), CoprocessorRpcUtils.getServiceName(dummyService)); } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java index 0d15f93d9f5..84596b4b431 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Table; @@ -58,16 +57,15 @@ public class SecureBulkLoadEndpointClient { try { CoprocessorRpcChannel channel = table.coprocessorService(HConstants.EMPTY_START_ROW); SecureBulkLoadProtos.SecureBulkLoadService instance = - ProtobufUtil.newServiceStub(SecureBulkLoadProtos.SecureBulkLoadService.class, channel); + ProtobufUtil.newServiceStub(SecureBulkLoadProtos.SecureBulkLoadService.class, channel); ServerRpcController controller = new ServerRpcController(); CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); - PrepareBulkLoadRequest request = - PrepareBulkLoadRequest.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName(tableName)).build(); + PrepareBulkLoadRequest request = PrepareBulkLoadRequest.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName(tableName)).build(); instance.prepareBulkLoad(controller, request, rpcCallback); @@ -86,20 +84,17 @@ public class SecureBulkLoadEndpointClient { try { CoprocessorRpcChannel channel = table.coprocessorService(HConstants.EMPTY_START_ROW); SecureBulkLoadProtos.SecureBulkLoadService instance = - ProtobufUtil.newServiceStub(SecureBulkLoadProtos.SecureBulkLoadService.class, channel); + ProtobufUtil.newServiceStub(SecureBulkLoadProtos.SecureBulkLoadService.class, channel); ServerRpcController controller = new ServerRpcController(); CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); CleanupBulkLoadRequest request = - CleanupBulkLoadRequest.newBuilder() - .setBulkToken(bulkToken).build(); + CleanupBulkLoadRequest.newBuilder().setBulkToken(bulkToken).build(); - instance.cleanupBulkLoad(controller, - request, - rpcCallback); + instance.cleanupBulkLoad(controller, request, rpcCallback); if (controller.failedOnException()) { throw controller.getFailedOn(); @@ -110,46 +105,39 @@ public class SecureBulkLoadEndpointClient { } public boolean bulkLoadHFiles(final List> familyPaths, - final Token userToken, final String bulkToken, final byte[] startRow) - throws IOException { + final Token userToken, final String bulkToken, final byte[] startRow) throws IOException { // we never want to send a batch of HFiles to all regions, thus cannot call // HTable#coprocessorService methods that take start and end rowkeys; see HBASE-9639 try { CoprocessorRpcChannel channel = table.coprocessorService(startRow); SecureBulkLoadProtos.SecureBulkLoadService instance = - ProtobufUtil.newServiceStub(SecureBulkLoadProtos.SecureBulkLoadService.class, channel); + ProtobufUtil.newServiceStub(SecureBulkLoadProtos.SecureBulkLoadService.class, channel); - DelegationToken protoDT = - DelegationToken.newBuilder().build(); - if(userToken != null) { + DelegationToken protoDT = DelegationToken.newBuilder().build(); + if (userToken != null) { protoDT = - DelegationToken.newBuilder() - .setIdentifier(ByteStringer.wrap(userToken.getIdentifier())) - .setPassword(ByteStringer.wrap(userToken.getPassword())) - .setKind(userToken.getKind().toString()) - .setService(userToken.getService().toString()).build(); + DelegationToken.newBuilder().setIdentifier(ByteStringer.wrap(userToken.getIdentifier())) + .setPassword(ByteStringer.wrap(userToken.getPassword())) + .setKind(userToken.getKind().toString()).setService(userToken.getService().toString()) + .build(); } List protoFamilyPaths = - new ArrayList<>(familyPaths.size()); - for(Pair el: familyPaths) { + new ArrayList<>(familyPaths.size()); + for (Pair el : familyPaths) { protoFamilyPaths.add(ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder() - .setFamily(ByteStringer.wrap(el.getFirst())) - .setPath(el.getSecond()).build()); + .setFamily(ByteStringer.wrap(el.getFirst())).setPath(el.getSecond()).build()); } SecureBulkLoadProtos.SecureBulkLoadHFilesRequest request = - SecureBulkLoadProtos.SecureBulkLoadHFilesRequest.newBuilder() - .setFsToken(protoDT) - .addAllFamilyPath(protoFamilyPaths) - .setBulkToken(bulkToken).build(); + SecureBulkLoadProtos.SecureBulkLoadHFilesRequest.newBuilder().setFsToken(protoDT) + .addAllFamilyPath(protoFamilyPaths).setBulkToken(bulkToken).build(); ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.secureBulkLoadHFiles(controller, - request, - rpcCallback); + CoprocessorRpcUtils.BlockingRpcCallback< + SecureBulkLoadProtos.SecureBulkLoadHFilesResponse> rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.secureBulkLoadHFiles(controller, request, rpcCallback); SecureBulkLoadProtos.SecureBulkLoadHFilesResponse response = rpcCallback.get(); if (controller.failedOnException()) { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java index 49697b83150..8e3a69b87b0 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java @@ -59,19 +59,19 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegi * removed when old non-secure client for backward compatibility is not supported. */ @RunWith(Parameterized.class) -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) @Ignore // BROKEN. FIX OR REMOVE. public class TestHRegionServerBulkLoadWithOldSecureEndpoint extends TestHRegionServerBulkLoad { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHRegionServerBulkLoadWithOldSecureEndpoint.class); + HBaseClassTestRule.forClass(TestHRegionServerBulkLoadWithOldSecureEndpoint.class); public TestHRegionServerBulkLoadWithOldSecureEndpoint(int duration) { super(duration); } private static final Logger LOG = - LoggerFactory.getLogger(TestHRegionServerBulkLoadWithOldSecureEndpoint.class); + LoggerFactory.getLogger(TestHRegionServerBulkLoadWithOldSecureEndpoint.class); @BeforeClass public static void setUpBeforeClass() throws IOException { @@ -86,15 +86,14 @@ public class TestHRegionServerBulkLoadWithOldSecureEndpoint extends TestHRegionS private TableName tableName; public AtomicHFileLoader(TableName tableName, TestContext ctx, byte[][] targetFamilies) - throws IOException { + throws IOException { super(ctx); this.tableName = tableName; } public void doAnAction() throws Exception { long iteration = numBulkLoads.getAndIncrement(); - Path dir = UTIL.getDataTestDirOnTestFS(String.format("bulkLoad_%08d", - iteration)); + Path dir = UTIL.getDataTestDirOnTestFS(String.format("bulkLoad_%08d", iteration)); // create HFiles for different column families FileSystem fs = UTIL.getTestFileSystem(); @@ -112,20 +111,19 @@ public class TestHRegionServerBulkLoadWithOldSecureEndpoint extends TestHRegionS Table table = conn.getTable(tableName); final String bulkToken = new SecureBulkLoadEndpointClient(table).prepareBulkLoad(tableName); RpcControllerFactory rpcControllerFactory = new RpcControllerFactory(UTIL.getConfiguration()); - ClientServiceCallable callable = - new ClientServiceCallable(conn, tableName, Bytes.toBytes("aaa"), - rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { - @Override - protected Void rpcCall() throws Exception { - LOG.debug("Going to connect to server " + getLocation() + " for row " + - Bytes.toStringBinary(getRow())); - try (Table table = conn.getTable(getTableName())) { - boolean loaded = new SecureBulkLoadEndpointClient(table).bulkLoadHFiles(famPaths, - null, bulkToken, getLocation().getRegionInfo().getStartKey()); - } - return null; + ClientServiceCallable callable = new ClientServiceCallable(conn, tableName, + Bytes.toBytes("aaa"), rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { + @Override + protected Void rpcCall() throws Exception { + LOG.debug("Going to connect to server " + getLocation() + " for row " + + Bytes.toStringBinary(getRow())); + try (Table table = conn.getTable(getTableName())) { + boolean loaded = new SecureBulkLoadEndpointClient(table).bulkLoadHFiles(famPaths, null, + bulkToken, getLocation().getRegionInfo().getStartKey()); } - }; + return null; + } + }; RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(conf); RpcRetryingCaller caller = factory. newCaller(); caller.callWithRetries(callable, Integer.MAX_VALUE); @@ -134,16 +132,14 @@ public class TestHRegionServerBulkLoadWithOldSecureEndpoint extends TestHRegionS if (numBulkLoads.get() % 5 == 0) { // 5 * 50 = 250 open file handles! callable = new ClientServiceCallable(conn, tableName, Bytes.toBytes("aaa"), - rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { + rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { @Override protected Void rpcCall() throws Exception { - LOG.debug("compacting " + getLocation() + " for row " - + Bytes.toStringBinary(getRow())); + LOG.debug("compacting " + getLocation() + " for row " + Bytes.toStringBinary(getRow())); AdminProtos.AdminService.BlockingInterface server = conn.getAdmin(getLocation().getServerName()); - CompactRegionRequest request = - RequestConverter.buildCompactRegionRequest( - getLocation().getRegionInfo().getRegionName(), true, null); + CompactRegionRequest request = RequestConverter + .buildCompactRegionRequest(getLocation().getRegionInfo().getRegionName(), true, null); server.compactRegion(null, request); numCompactions.incrementAndGet(); return null; @@ -155,7 +151,7 @@ public class TestHRegionServerBulkLoadWithOldSecureEndpoint extends TestHRegionS } void runAtomicBulkloadTest(TableName tableName, int millisToRun, int numScanners) - throws Exception { + throws Exception { setupTable(tableName, 10); TestContext ctx = new TestContext(UTIL.getConfiguration()); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java index bc368e3c5a9..6cfeaf89a5f 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java @@ -66,11 +66,11 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestServerCustomProtocol { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestServerCustomProtocol.class); + HBaseClassTestRule.forClass(TestServerCustomProtocol.class); private static final Logger LOG = LoggerFactory.getLogger(TestServerCustomProtocol.class); static final String WHOAREYOU = "Who are you?"; @@ -96,27 +96,27 @@ public class TestServerCustomProtocol { @Override public void ping(RpcController controller, PingRequest request, - RpcCallback done) { + RpcCallback done) { this.counter++; done.run(PingResponse.newBuilder().setPong("pong").build()); } @Override public void count(RpcController controller, CountRequest request, - RpcCallback done) { + RpcCallback done) { done.run(CountResponse.newBuilder().setCount(this.counter).build()); } @Override - public void increment(RpcController controller, - IncrementCountRequest request, RpcCallback done) { + public void increment(RpcController controller, IncrementCountRequest request, + RpcCallback done) { this.counter += request.getDiff(); done.run(IncrementCountResponse.newBuilder().setCount(this.counter).build()); } @Override public void hello(RpcController controller, HelloRequest request, - RpcCallback done) { + RpcCallback done) { if (!request.hasName()) { done.run(HelloResponse.newBuilder().setResponse(WHOAREYOU).build()); } else if (request.getName().equals(NOBODY)) { @@ -128,7 +128,7 @@ public class TestServerCustomProtocol { @Override public void noop(RpcController controller, NoopRequest request, - RpcCallback done) { + RpcCallback done) { done.run(NoopResponse.newBuilder().build()); } @@ -188,10 +188,10 @@ public class TestServerCustomProtocol { @Test public void testSingleProxy() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - Map results = ping(table, null, null); + Map results = ping(table, null, null); // There are three regions so should get back three results. assertEquals(3, results.size()); - for (Map.Entry e: results.entrySet()) { + for (Map.Entry e : results.entrySet()) { assertEquals("Invalid custom protocol response", "pong", e.getValue()); } hello(table, "George", HELLO + "George"); @@ -200,9 +200,8 @@ public class TestServerCustomProtocol { LOG.info("Who are you"); hello(table, NOBODY, null); LOG.info(NOBODY); - Map intResults = table.coprocessorService(PingProtos.PingService.class, - null, null, - new Batch.Call() { + Map intResults = table.coprocessorService(PingProtos.PingService.class, null, + null, new Batch.Call() { @Override public Integer call(PingProtos.PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = @@ -212,109 +211,104 @@ public class TestServerCustomProtocol { } }); int count = -1; - for (Map.Entry e: intResults.entrySet()) { + for (Map.Entry e : intResults.entrySet()) { assertTrue(e.getValue() > 0); count = e.getValue(); } final int diff = 5; - intResults = table.coprocessorService(PingProtos.PingService.class, - null, null, + intResults = table.coprocessorService(PingProtos.PingService.class, null, null, new Batch.Call() { @Override public Integer call(PingProtos.PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.increment(null, - PingProtos.IncrementCountRequest.newBuilder().setDiff(diff).build(), - rpcCallback); + PingProtos.IncrementCountRequest.newBuilder().setDiff(diff).build(), rpcCallback); return rpcCallback.get().getCount(); } }); // There are three regions so should get back three results. assertEquals(3, results.size()); - for (Map.Entry e: intResults.entrySet()) { + for (Map.Entry e : intResults.entrySet()) { assertEquals(e.getValue().intValue(), count + diff); } table.close(); } - private Map hello(final Table table, final String send, final String response) - throws ServiceException, Throwable { - Map results = hello(table, send); - for (Map.Entry e: results.entrySet()) { + private Map hello(final Table table, final String send, final String response) + throws ServiceException, Throwable { + Map results = hello(table, send); + for (Map.Entry e : results.entrySet()) { assertEquals("Invalid custom protocol response", response, e.getValue()); } return results; } - private Map hello(final Table table, final String send) - throws ServiceException, Throwable { + private Map hello(final Table table, final String send) + throws ServiceException, Throwable { return hello(table, send, null, null); } - private Map hello(final Table table, final String send, final byte [] start, - final byte [] end) throws ServiceException, Throwable { - return table.coprocessorService(PingProtos.PingService.class, - start, end, - new Batch.Call() { - @Override - public String call(PingProtos.PingService instance) throws IOException { - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - PingProtos.HelloRequest.Builder builder = PingProtos.HelloRequest.newBuilder(); - if (send != null) { - builder.setName(send); - } - instance.hello(null, builder.build(), rpcCallback); - PingProtos.HelloResponse r = rpcCallback.get(); - return r != null && r.hasResponse()? r.getResponse(): null; - } - }); - } - - private Map compoundOfHelloAndPing(final Table table, final byte [] start, - final byte [] end) throws ServiceException, Throwable { - return table.coprocessorService(PingProtos.PingService.class, - start, end, - new Batch.Call() { - @Override - public String call(PingProtos.PingService instance) throws IOException { - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - PingProtos.HelloRequest.Builder builder = PingProtos.HelloRequest.newBuilder(); - // Call ping on same instance. Use result calling hello on same instance. - builder.setName(doPing(instance)); - instance.hello(null, builder.build(), rpcCallback); - PingProtos.HelloResponse r = rpcCallback.get(); - return r != null && r.hasResponse()? r.getResponse(): null; - } - }); - } - - private Map noop(final Table table, final byte [] start, final byte [] end) - throws ServiceException, Throwable { + private Map hello(final Table table, final String send, final byte[] start, + final byte[] end) throws ServiceException, Throwable { return table.coprocessorService(PingProtos.PingService.class, start, end, - new Batch.Call() { - @Override - public String call(PingProtos.PingService instance) throws IOException { - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - PingProtos.NoopRequest.Builder builder = PingProtos.NoopRequest.newBuilder(); - instance.noop(null, builder.build(), rpcCallback); - rpcCallback.get(); - // Looks like null is expected when void. That is what the test below is looking for - return null; + new Batch.Call() { + @Override + public String call(PingProtos.PingService instance) throws IOException { + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + PingProtos.HelloRequest.Builder builder = PingProtos.HelloRequest.newBuilder(); + if (send != null) { + builder.setName(send); } - }); + instance.hello(null, builder.build(), rpcCallback); + PingProtos.HelloResponse r = rpcCallback.get(); + return r != null && r.hasResponse() ? r.getResponse() : null; + } + }); + } + + private Map compoundOfHelloAndPing(final Table table, final byte[] start, + final byte[] end) throws ServiceException, Throwable { + return table.coprocessorService(PingProtos.PingService.class, start, end, + new Batch.Call() { + @Override + public String call(PingProtos.PingService instance) throws IOException { + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + PingProtos.HelloRequest.Builder builder = PingProtos.HelloRequest.newBuilder(); + // Call ping on same instance. Use result calling hello on same instance. + builder.setName(doPing(instance)); + instance.hello(null, builder.build(), rpcCallback); + PingProtos.HelloResponse r = rpcCallback.get(); + return r != null && r.hasResponse() ? r.getResponse() : null; + } + }); + } + + private Map noop(final Table table, final byte[] start, final byte[] end) + throws ServiceException, Throwable { + return table.coprocessorService(PingProtos.PingService.class, start, end, + new Batch.Call() { + @Override + public String call(PingProtos.PingService instance) throws IOException { + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + PingProtos.NoopRequest.Builder builder = PingProtos.NoopRequest.newBuilder(); + instance.noop(null, builder.build(), rpcCallback); + rpcCallback.get(); + // Looks like null is expected when void. That is what the test below is looking for + return null; + } + }); } @Test public void testSingleMethod() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); - RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - Map results = table.coprocessorService(PingProtos.PingService.class, - null, ROW_A, - new Batch.Call() { + RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { + Map results = table.coprocessorService(PingProtos.PingService.class, null, + ROW_A, new Batch.Call() { @Override public String call(PingProtos.PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = @@ -340,10 +334,10 @@ public class TestServerCustomProtocol { @Test public void testRowRange() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); - RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - for (HRegionLocation e: locator.getAllRegionLocations()) { - LOG.info("Region " + e.getRegionInfo().getRegionNameAsString() - + ", servername=" + e.getServerName()); + RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { + for (HRegionLocation e : locator.getAllRegionLocations()) { + LOG.info("Region " + e.getRegionInfo().getRegionNameAsString() + ", servername=" + + e.getServerName()); } // Here are what regions looked like on a run: // @@ -351,7 +345,7 @@ public class TestServerCustomProtocol { // test,bbb,1355943549661.110393b070dd1ed93441e0bc9b3ffb7e. // test,ccc,1355943549665.c3d6d125141359cbbd2a43eaff3cdf74. - Map results = ping(table, null, ROW_A); + Map results = ping(table, null, ROW_A); // Should contain first region only. assertEquals(1, results.size()); verifyRegionResults(locator, results, ROW_A); @@ -374,7 +368,7 @@ public class TestServerCustomProtocol { verifyRegionResults(locator, results, ROW_B); loc = locator.getRegionLocation(ROW_C, true); assertNull("Should be missing region for row ccc (past stop row)", - results.get(loc.getRegionInfo().getRegionName())); + results.get(loc.getRegionInfo().getRegionName())); // test explicit start + end results = ping(table, ROW_AB, ROW_BC); @@ -384,7 +378,7 @@ public class TestServerCustomProtocol { verifyRegionResults(locator, results, ROW_B); loc = locator.getRegionLocation(ROW_C, true); assertNull("Should be missing region for row ccc (past stop row)", - results.get(loc.getRegionInfo().getRegionName())); + results.get(loc.getRegionInfo().getRegionName())); // test single region results = ping(table, ROW_B, ROW_BC); @@ -393,15 +387,15 @@ public class TestServerCustomProtocol { verifyRegionResults(locator, results, ROW_B); loc = locator.getRegionLocation(ROW_A, true); assertNull("Should be missing region for row aaa (prior to start)", - results.get(loc.getRegionInfo().getRegionName())); + results.get(loc.getRegionInfo().getRegionName())); loc = locator.getRegionLocation(ROW_C, true); assertNull("Should be missing region for row ccc (past stop row)", - results.get(loc.getRegionInfo().getRegionName())); + results.get(loc.getRegionInfo().getRegionName())); } } - private Map ping(final Table table, final byte [] start, final byte [] end) - throws ServiceException, Throwable { + private Map ping(final Table table, final byte[] start, final byte[] end) + throws ServiceException, Throwable { return table.coprocessorService(PingProtos.PingService.class, start, end, new Batch.Call() { @Override @@ -413,7 +407,7 @@ public class TestServerCustomProtocol { private static String doPing(PingProtos.PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.ping(null, PingProtos.PingRequest.newBuilder().build(), rpcCallback); return rpcCallback.get().getPong(); } @@ -421,8 +415,8 @@ public class TestServerCustomProtocol { @Test public void testCompoundCall() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); - RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - Map results = compoundOfHelloAndPing(table, ROW_A, ROW_C); + RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { + Map results = compoundOfHelloAndPing(table, ROW_A, ROW_C); verifyRegionResults(locator, results, "Hello, pong", ROW_A); verifyRegionResults(locator, results, "Hello, pong", ROW_B); verifyRegionResults(locator, results, "Hello, pong", ROW_C); @@ -432,8 +426,8 @@ public class TestServerCustomProtocol { @Test public void testNullCall() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); - RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - Map results = hello(table, null, ROW_A, ROW_C); + RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { + Map results = hello(table, null, ROW_A, ROW_C); verifyRegionResults(locator, results, "Who are you?", ROW_A); verifyRegionResults(locator, results, "Who are you?", ROW_B); verifyRegionResults(locator, results, "Who are you?", ROW_C); @@ -443,8 +437,8 @@ public class TestServerCustomProtocol { @Test public void testNullReturn() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); - RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - Map results = hello(table, "nobody", ROW_A, ROW_C); + RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { + Map results = hello(table, "nobody", ROW_A, ROW_C); verifyRegionResults(locator, results, null, ROW_A); verifyRegionResults(locator, results, null, ROW_B); verifyRegionResults(locator, results, null, ROW_C); @@ -454,7 +448,7 @@ public class TestServerCustomProtocol { @Test public void testEmptyReturnType() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE)) { - Map results = noop(table, ROW_A, ROW_C); + Map results = noop(table, ROW_A, ROW_C); assertEquals("Should have results from three regions", 3, results.size()); // all results should be null for (Object v : results.values()) { @@ -463,24 +457,22 @@ public class TestServerCustomProtocol { } } - private void verifyRegionResults(RegionLocator table, Map results, byte[] row) - throws Exception { + private void verifyRegionResults(RegionLocator table, Map results, byte[] row) + throws Exception { verifyRegionResults(table, results, "pong", row); } private void verifyRegionResults(RegionLocator regionLocator, Map results, - String expected, byte[] row) throws Exception { - for (Map.Entry e: results.entrySet()) { - LOG.info("row=" + Bytes.toString(row) + ", expected=" + expected + - ", result key=" + Bytes.toString(e.getKey()) + - ", value=" + e.getValue()); + String expected, byte[] row) throws Exception { + for (Map.Entry e : results.entrySet()) { + LOG.info("row=" + Bytes.toString(row) + ", expected=" + expected + ", result key=" + + Bytes.toString(e.getKey()) + ", value=" + e.getValue()); } HRegionLocation loc = regionLocator.getRegionLocation(row, true); byte[] region = loc.getRegionInfo().getRegionName(); - assertTrue("Results should contain region " + - Bytes.toStringBinary(region) + " for row '" + Bytes.toStringBinary(row)+ "'", - results.containsKey(region)); - assertEquals("Invalid result for row '"+Bytes.toStringBinary(row)+"'", - expected, results.get(region)); + assertTrue("Results should contain region " + Bytes.toStringBinary(region) + " for row '" + + Bytes.toStringBinary(row) + "'", results.containsKey(region)); + assertEquals("Invalid result for row '" + Bytes.toStringBinary(row) + "'", expected, + results.get(region)); } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java index d66ab4cb9ea..b194d705ad1 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -112,7 +112,7 @@ public class TestReplicationSyncUpToolWithBulkLoadedData extends TestReplication } private void mimicSyncUpAfterBulkLoad(Iterator randomHFileRangeListIterator) - throws Exception { + throws Exception { LOG.debug("mimicSyncUpAfterBulkLoad"); shutDownTargetHBaseCluster(); @@ -163,16 +163,16 @@ public class TestReplicationSyncUpToolWithBulkLoadedData extends TestReplication LOG.info("SyncUpAfterBulkLoad succeeded at retry = " + i); break; } else { - LOG.debug("SyncUpAfterBulkLoad failed at retry = " + i + - ", with rowCount_ht1TargetPeer1 =" + rowCountHt1TargetAtPeer1 + - " and rowCount_ht2TargetAtPeer1 =" + rowCountHt2TargetAtPeer1); + LOG.debug("SyncUpAfterBulkLoad failed at retry = " + i + ", with rowCount_ht1TargetPeer1 =" + + rowCountHt1TargetAtPeer1 + " and rowCount_ht2TargetAtPeer1 =" + + rowCountHt2TargetAtPeer1); } Thread.sleep(SLEEP_TIME); } } private void loadAndReplicateHFiles(boolean verifyReplicationOnSlave, - Iterator randomHFileRangeListIterator) throws Exception { + Iterator randomHFileRangeListIterator) throws Exception { LOG.debug("loadAndReplicateHFiles"); // Load 100 + 3 hfiles to t1_syncup. @@ -210,7 +210,7 @@ public class TestReplicationSyncUpToolWithBulkLoadedData extends TestReplication } private void loadAndValidateHFileReplication(String testName, byte[] row, byte[] fam, - Table source, byte[][][] hfileRanges, int numOfRows) throws Exception { + Table source, byte[][][] hfileRanges, int numOfRows) throws Exception { Path dir = UTIL1.getDataTestDirOnTestFS(testName); FileSystem fs = UTIL1.getTestFileSystem(); dir = dir.makeQualified(fs); @@ -230,7 +230,7 @@ public class TestReplicationSyncUpToolWithBulkLoadedData extends TestReplication } private void wait(Table target, int expectedCount, String msg) - throws IOException, InterruptedException { + throws IOException, InterruptedException { for (int i = 0; i < NB_RETRIES; i++) { int rowCountHt2TargetAtPeer1 = UTIL2.countRows(target); if (i == NB_RETRIES - 1) { diff --git a/hbase-examples/README.txt b/hbase-examples/README.txt index 7ac2d13bd9c..894586615c0 100644 --- a/hbase-examples/README.txt +++ b/hbase-examples/README.txt @@ -64,7 +64,7 @@ Example code. ON PROTOBUFS This maven module has core protobuf definition files ('.protos') used by hbase -examples. +examples. Generation of java files from protobuf .proto files included here is done as part of the build. diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml index b6b3dfc4dc1..d5d193c50fc 100644 --- a/hbase-examples/pom.xml +++ b/hbase-examples/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-examples Apache HBase - Examples Examples of HBase usage - - - - - - maven-assembly-plugin - - true - - - - maven-surefire-plugin - - - ${surefire.firstPartGroups} - - - - - org.apache.maven.plugins - maven-source-plugin - - - org.xolstice.maven.plugins - protobuf-maven-plugin - - - compile-protoc - generate-sources - - compile - - - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - org.apache.hbase.thirdparty @@ -211,8 +162,8 @@ org.apache.hbase hbase-http - test test-jar + test org.slf4j @@ -245,6 +196,55 @@ test + + + + + + maven-assembly-plugin + + true + + + + maven-surefire-plugin + + + ${surefire.firstPartGroups} + + + + + org.apache.maven.plugins + maven-source-plugin + + + org.xolstice.maven.plugins + protobuf-maven-plugin + + + compile-protoc + + compile + + generate-sources + + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + @@ -289,10 +289,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-external-blockcache Apache HBase - External Block Cache - - HBase module that provides out of process block cache. + HBase module that provides out of process block cache. Currently Memcached is the reference implementation for external block cache. External block caches allow HBase to take advantage of other more complex caches that can live longer than the HBase regionserver process and are not necessarily tied to a single computer - life time. However external block caches add in extra operational overhead. - - + life time. However external block caches add in extra operational overhead. + + + + org.apache.hbase + hbase-common + + + org.apache.hbase + hbase-server + + + net.spy + spymemcached + true + + + org.slf4j + slf4j-api + + + junit + junit + test + + + @@ -60,10 +81,10 @@ versionInfo-source - generate-sources add-source + generate-sources ${project.build.directory}/generated-sources/java @@ -91,31 +112,6 @@ - - - org.apache.hbase - hbase-common - - - org.apache.hbase - hbase-server - - - net.spy - spymemcached - true - - - org.slf4j - slf4j-api - - - junit - junit - test - - - @@ -128,10 +124,10 @@ license-javadocs - prepare-package copy-resources + prepare-package ${project.build.directory}/apidocs @@ -170,7 +166,8 @@ - !hadoop.profile + + !hadoop.profile @@ -192,10 +189,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-hadoop-compat Apache HBase - Hadoop Compatibility - - Interfaces to be implemented in order to smooth - over hadoop version differences - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - + Interfaces to be implemented in order to smooth + over hadoop version differences @@ -125,8 +100,29 @@ test + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + - + skipHadoopCompatTests @@ -149,15 +145,14 @@ - org.eclipse.m2e lifecycle-mapping - - + diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java index d29e7bc1d3b..0aef77b7370 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Iterator; import java.util.ServiceLoader; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,7 +36,8 @@ public class CompatibilityFactory { /** * This is a static only class don't let any instance be created. */ - protected CompatibilityFactory() {} + protected CompatibilityFactory() { + } public static synchronized T getInstance(Class klass) { T instance = null; @@ -48,10 +47,9 @@ public class CompatibilityFactory { instance = it.next(); if (it.hasNext()) { StringBuilder msg = new StringBuilder(); - msg.append("ServiceLoader provided more than one implementation for class: ") - .append(klass) - .append(", using implementation: ").append(instance.getClass()) - .append(", other implementations: {"); + msg.append("ServiceLoader provided more than one implementation for class: ").append(klass) + .append(", using implementation: ").append(instance.getClass()) + .append(", other implementations: {"); while (it.hasNext()) { msg.append(it.next()).append(" "); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java index 0e633b8b15f..ccd4d4f97ad 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,39 +15,39 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.ServiceLoader; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Factory for classes supplied by hadoop compatibility modules. Only one of each class will be - * created. + * Factory for classes supplied by hadoop compatibility modules. Only one of each class will be + * created. */ @InterfaceAudience.Private public class CompatibilitySingletonFactory extends CompatibilityFactory { public static enum SingletonStorage { INSTANCE; + private final Object lock = new Object(); private final Map instances = new HashMap<>(); } + private static final Logger LOG = LoggerFactory.getLogger(CompatibilitySingletonFactory.class); /** * This is a static only class don't let anyone create an instance. */ - protected CompatibilitySingletonFactory() { } + protected CompatibilitySingletonFactory() { + } /** * Get the singleton instance of Any classes defined by compatibiliy jar's - * * @return the singleton */ @SuppressWarnings("unchecked") @@ -62,9 +62,8 @@ public class CompatibilitySingletonFactory extends CompatibilityFactory { if (it.hasNext()) { StringBuilder msg = new StringBuilder(); msg.append("ServiceLoader provided more than one implementation for class: ") - .append(klass) - .append(", using implementation: ").append(instance.getClass()) - .append(", other implementations: {"); + .append(klass).append(", using implementation: ").append(instance.getClass()) + .append(", other implementations: {"); while (it.hasNext()) { msg.append(it.next()).append(" "); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java index c0a8519c10c..af7e87483d1 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -44,23 +43,21 @@ public interface MetricsIOSource extends BaseSource { */ String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - String FS_READ_TIME_HISTO_KEY = "fsReadTime"; String FS_PREAD_TIME_HISTO_KEY = "fsPReadTime"; String FS_WRITE_HISTO_KEY = "fsWriteTime"; String CHECKSUM_FAILURES_KEY = "fsChecksumFailureCount"; - String FS_READ_TIME_HISTO_DESC - = "Latency of HFile's sequential reads on this region server in milliseconds"; - String FS_PREAD_TIME_HISTO_DESC - = "Latency of HFile's positional reads on this region server in milliseconds"; - String FS_WRITE_TIME_HISTO_DESC - = "Latency of HFile's writes on this region server in milliseconds"; + String FS_READ_TIME_HISTO_DESC = + "Latency of HFile's sequential reads on this region server in milliseconds"; + String FS_PREAD_TIME_HISTO_DESC = + "Latency of HFile's positional reads on this region server in milliseconds"; + String FS_WRITE_TIME_HISTO_DESC = + "Latency of HFile's writes on this region server in milliseconds"; String CHECKSUM_FAILURES_DESC = "Number of checksum failures for the HBase HFile checksums at the" - + " HBase level (separate from HDFS checksums)"; - + + " HBase level (separate from HDFS checksums)"; /** * Update the fs sequential read time histogram diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java index 3ba8cd5d0ae..e3dc724d8b7 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java index 69bd040e7f9..a1ec313f97a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.metrics.ExceptionTrackingSource; @@ -25,20 +23,15 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public interface MetricsHBaseServerSource extends ExceptionTrackingSource { String AUTHORIZATION_SUCCESSES_NAME = "authorizationSuccesses"; - String AUTHORIZATION_SUCCESSES_DESC = - "Number of authorization successes."; + String AUTHORIZATION_SUCCESSES_DESC = "Number of authorization successes."; String AUTHORIZATION_FAILURES_NAME = "authorizationFailures"; - String AUTHORIZATION_FAILURES_DESC = - "Number of authorization failures."; + String AUTHORIZATION_FAILURES_DESC = "Number of authorization failures."; String AUTHENTICATION_SUCCESSES_NAME = "authenticationSuccesses"; - String AUTHENTICATION_SUCCESSES_DESC = - "Number of authentication successes."; + String AUTHENTICATION_SUCCESSES_DESC = "Number of authentication successes."; String AUTHENTICATION_FAILURES_NAME = "authenticationFailures"; - String AUTHENTICATION_FAILURES_DESC = - "Number of authentication failures."; + String AUTHENTICATION_FAILURES_DESC = "Number of authentication failures."; String AUTHENTICATION_FALLBACKS_NAME = "authenticationFallbacks"; - String AUTHENTICATION_FALLBACKS_DESC = - "Number of fallbacks to insecure authentication."; + String AUTHENTICATION_FALLBACKS_DESC = "Number of fallbacks to insecure authentication."; String SENT_BYTES_NAME = "sentBytes"; String SENT_BYTES_DESC = "Number of bytes sent."; String RECEIVED_BYTES_NAME = "receivedBytes"; @@ -54,27 +47,26 @@ public interface MetricsHBaseServerSource extends ExceptionTrackingSource { String TOTAL_CALL_TIME_NAME = "totalCallTime"; String TOTAL_CALL_TIME_DESC = "Total call time, including both queued and processing time."; String QUEUE_SIZE_NAME = "queueSize"; - String QUEUE_SIZE_DESC = "Number of bytes in the call queues; request has been read and " + - "parsed and is waiting to run or is currently being executed."; + String QUEUE_SIZE_DESC = "Number of bytes in the call queues; request has been read and " + + "parsed and is waiting to run or is currently being executed."; String GENERAL_QUEUE_NAME = "numCallsInGeneralQueue"; - String GENERAL_QUEUE_DESC = "Number of calls in the general call queue; " + - "parsed requests waiting in scheduler to be executed"; + String GENERAL_QUEUE_DESC = "Number of calls in the general call queue; " + + "parsed requests waiting in scheduler to be executed"; String PRIORITY_QUEUE_NAME = "numCallsInPriorityQueue"; String METAPRIORITY_QUEUE_NAME = "numCallsInMetaPriorityQueue"; String REPLICATION_QUEUE_NAME = "numCallsInReplicationQueue"; - String REPLICATION_QUEUE_DESC = - "Number of calls in the replication call queue waiting to be run"; + String REPLICATION_QUEUE_DESC = "Number of calls in the replication call queue waiting to be run"; String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue waiting to be run"; String METAPRIORITY_QUEUE_DESC = "Number of calls in the priority call queue waiting to be run"; String WRITE_QUEUE_NAME = "numCallsInWriteQueue"; - String WRITE_QUEUE_DESC = "Number of calls in the write call queue; " + - "parsed requests waiting in scheduler to be executed"; + String WRITE_QUEUE_DESC = "Number of calls in the write call queue; " + + "parsed requests waiting in scheduler to be executed"; String READ_QUEUE_NAME = "numCallsInReadQueue"; - String READ_QUEUE_DESC = "Number of calls in the read call queue; " + - "parsed requests waiting in scheduler to be executed"; + String READ_QUEUE_DESC = "Number of calls in the read call queue; " + + "parsed requests waiting in scheduler to be executed"; String SCAN_QUEUE_NAME = "numCallsInScanQueue"; - String SCAN_QUEUE_DESC = "Number of calls in the scan call queue; " + - "parsed requests waiting in scheduler to be executed"; + String SCAN_QUEUE_DESC = "Number of calls in the scan call queue; " + + "parsed requests waiting in scheduler to be executed"; String NUM_OPEN_CONNECTIONS_NAME = "numOpenConnections"; String NUM_OPEN_CONNECTIONS_DESC = "Number of open connections."; String NUM_ACTIVE_HANDLER_NAME = "numActiveHandler"; @@ -92,17 +84,16 @@ public interface MetricsHBaseServerSource extends ExceptionTrackingSource { String NUM_ACTIVE_SCAN_HANDLER_NAME = "numActiveScanHandler"; String NUM_ACTIVE_SCAN_HANDLER_DESC = "Number of active scan rpc handlers."; String NUM_GENERAL_CALLS_DROPPED_NAME = "numGeneralCallsDropped"; - String NUM_GENERAL_CALLS_DROPPED_DESC = "Total number of calls in general queue which " + - "were dropped by CoDel RPC executor"; + String NUM_GENERAL_CALLS_DROPPED_DESC = + "Total number of calls in general queue which " + "were dropped by CoDel RPC executor"; String NUM_LIFO_MODE_SWITCHES_NAME = "numLifoModeSwitches"; - String NUM_LIFO_MODE_SWITCHES_DESC = "Total number of calls in general queue which " + - "were served from the tail of the queue"; + String NUM_LIFO_MODE_SWITCHES_DESC = + "Total number of calls in general queue which " + "were served from the tail of the queue"; // Direct Memory Usage metrics String NETTY_DM_USAGE_NAME = "nettyDirectMemoryUsage"; String NETTY_DM_USAGE_DESC = "Current Netty direct memory usage."; - void authorizationSuccess(); void authorizationFailure(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java index 7f1415ae86f..027c197333a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.yetus.audience.InterfaceAudience; @@ -34,18 +32,16 @@ public abstract class MetricsHBaseServerSourceFactory { static final String METRICS_DESCRIPTION = "Metrics about HBase Server IPC"; /** - * The Suffix of the JMX Context that a MetricsHBaseServerSource will register under. - * - * JMX_CONTEXT will be created by createContextName(serverClassName) + METRICS_JMX_CONTEXT_SUFFIX + * The Suffix of the JMX Context that a MetricsHBaseServerSource will register under. JMX_CONTEXT + * will be created by createContextName(serverClassName) + METRICS_JMX_CONTEXT_SUFFIX */ static final String METRICS_JMX_CONTEXT_SUFFIX = ",sub=" + METRICS_NAME; abstract MetricsHBaseServerSource create(String serverName, MetricsHBaseServerWrapper wrapper); /** - * From the name of the class that's starting up create the - * context that an IPC source should register itself. - * + * From the name of the class that's starting up create the context that an IPC source should + * register itself. * @param serverName The name of the class that's starting up. * @return The Camel Cased context name. */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java index db30c0348c3..136294883b6 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java index 279454245a6..76a70edbddf 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -55,10 +54,11 @@ public interface MetricsAssignmentManagerSource extends BaseSource { String RIT_COUNT_DESC = "Current number of Regions In Transition (Gauge)."; String RIT_COUNT_OVER_THRESHOLD_DESC = - "Current number of Regions In Transition over threshold time (Gauge)."; - String RIT_OLDEST_AGE_DESC = "Timestamp in milliseconds of the oldest Region In Transition (Gauge)."; + "Current number of Regions In Transition over threshold time (Gauge)."; + String RIT_OLDEST_AGE_DESC = + "Timestamp in milliseconds of the oldest Region In Transition (Gauge)."; String RIT_DURATION_DESC = - "Total durations in milliseconds for all Regions in Transition (Histogram)."; + "Total durations in milliseconds for all Regions in Transition (Histogram)."; // HBCK report metrics String ORPHAN_REGIONS_ON_RS = "orphanRegionsOnRS"; @@ -79,7 +79,7 @@ public interface MetricsAssignmentManagerSource extends BaseSource { String OVERLAPS_DESC = "Current number of Overlaps (Gauge)."; String UNKNOWN_SERVER_REGIONS_DESC = "Current number of Unknown Server Regions (Gauge)."; String EMPTY_REGION_INFO_REGIONS_DESC = - "Current number of Regions with Empty Region Info (Gauge)."; + "Current number of Regions with Empty Region Info (Gauge)."; String ASSIGN_METRIC_PREFIX = "assign"; String UNASSIGN_METRIC_PREFIX = "unassign"; @@ -94,21 +94,18 @@ public interface MetricsAssignmentManagerSource extends BaseSource { /** * Set the number of regions in transition. - * * @param ritCount count of the regions in transition. */ void setRIT(int ritCount); /** * Set the count of the number of regions that have been in transition over the threshold time. - * * @param ritCountOverThreshold number of regions in transition for longer than threshold. */ void setRITCountOverThreshold(int ritCountOverThreshold); /** * Set the oldest region in transition. - * * @param age age of the oldest RIT. */ void setRITOldestAge(long age); @@ -121,35 +118,30 @@ public interface MetricsAssignmentManagerSource extends BaseSource { /** * Set the number of orphan regions on RS. - * * @param orphanRegionsOnRs count of the orphan regions on RS in HBCK chore report. */ void setOrphanRegionsOnRs(int orphanRegionsOnRs); /** * Set the number of orphan regions on FS. - * * @param orphanRegionsOnFs count of the orphan regions on FS in HBCK chore report. */ void setOrphanRegionsOnFs(int orphanRegionsOnFs); /** * Set the number of inconsistent regions. - * * @param inconsistentRegions count of the inconsistent regions in HBCK chore report. */ void setInconsistentRegions(int inconsistentRegions); /** * Set the number of holes. - * * @param holes count of the holes in CatalogJanitor Consistency report. */ void setHoles(int holes); /** * Set the number of overlaps. - * * @param overlaps count of the overlaps in CatalogJanitor Consistency report. */ void setOverlaps(int overlaps); @@ -157,14 +149,14 @@ public interface MetricsAssignmentManagerSource extends BaseSource { /** * Set the number of unknown server regions. * @param unknownServerRegions count of the unknown server regions in CatalogJanitor Consistency - * report. + * report. */ void setUnknownServerRegions(int unknownServerRegions); /** * Set the number of regions with empty region info. * @param emptyRegionInfoRegions count of the regions with empty region info in CatalogJanitor - * Consistency report. + * Consistency report. */ void setEmptyRegionInfoRegions(int emptyRegionInfoRegions); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java index 91dc71a034c..53ed8a25ed0 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -54,7 +53,6 @@ public interface MetricsMasterFileSystemSource extends BaseSource { String SPLIT_TIME_DESC = "Time it takes to finish WAL.splitLog()"; String SPLIT_SIZE_DESC = "Size of WAL files being split"; - void updateMetaWALSplitTime(long time); void updateMetaWALSplitSize(long size); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java index db4f25ec03e..07ceaaf2e24 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java index 197f9f9fe75..a399e53b4fb 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java index 8450432ade6..c0b4c73cc61 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,16 +41,16 @@ public interface MetricsMasterQuotaSource extends BaseSource { String NUM_REGION_SIZE_REPORTS_DESC = "Number of Region sizes reported"; String QUOTA_OBSERVER_CHORE_TIME_NAME = "quotaObserverChoreTime"; String QUOTA_OBSERVER_CHORE_TIME_DESC = - "Histogram for the time in millis for the QuotaObserverChore"; + "Histogram for the time in millis for the QuotaObserverChore"; String SNAPSHOT_OBSERVER_CHORE_TIME_NAME = "snapshotQuotaObserverChoreTime"; String SNAPSHOT_OBSERVER_CHORE_TIME_DESC = - "Histogram for the time in millis for the SnapshotQuotaObserverChore"; + "Histogram for the time in millis for the SnapshotQuotaObserverChore"; String SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_NAME = "snapshotObserverSizeComputationTime"; String SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_DESC = - "Histogram for the time in millis to compute the size of each snapshot"; + "Histogram for the time in millis to compute the size of each snapshot"; String SNAPSHOT_OBSERVER_FETCH_TIME_NAME = "snapshotObserverSnapshotFetchTime"; String SNAPSHOT_OBSERVER_FETCH_TIME_DESC = - "Histogram for the time in millis to fetch all snapshots from HBase"; + "Histogram for the time in millis to fetch all snapshots from HBase"; String TABLE_QUOTA_USAGE_NAME = "tableSpaceQuotaOverview"; String TABLE_QUOTA_USAGE_DESC = "A JSON summary of the usage of all tables with space quotas"; String NS_QUOTA_USAGE_NAME = "namespaceSpaceQuotaOverview"; @@ -57,40 +58,35 @@ public interface MetricsMasterQuotaSource extends BaseSource { /** * Updates the metric tracking the number of space quotas defined in the system. - * * @param numSpaceQuotas The number of space quotas defined */ void updateNumSpaceQuotas(long numSpaceQuotas); /** - * Updates the metric tracking the number of tables the master has computed to be in - * violation of their space quota. - * + * Updates the metric tracking the number of tables the master has computed to be in violation of + * their space quota. * @param numTablesInViolation The number of tables violating a space quota */ void updateNumTablesInSpaceQuotaViolation(long numTablesInViolation); /** - * Updates the metric tracking the number of namespaces the master has computed to be in - * violation of their space quota. - * + * Updates the metric tracking the number of namespaces the master has computed to be in violation + * of their space quota. * @param numNamespacesInViolation The number of namespaces violating a space quota */ void updateNumNamespacesInSpaceQuotaViolation(long numNamespacesInViolation); /** - * Updates the metric tracking the number of region size reports the master is currently - * retaining in memory. - * + * Updates the metric tracking the number of region size reports the master is currently retaining + * in memory. * @param numCurrentRegionSizeReports The number of region size reports the master is holding in - * memory + * memory */ void updateNumCurrentSpaceQuotaRegionSizeReports(long numCurrentRegionSizeReports); /** - * Updates the metric tracking the amount of time taken by the {@code QuotaObserverChore} - * which runs periodically. - * + * Updates the metric tracking the amount of time taken by the {@code QuotaObserverChore} which + * runs periodically. * @param time The execution time of the chore in milliseconds */ void incrementSpaceQuotaObserverChoreTime(long time); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactory.java index 2dcd945ea81..a53652b0f3d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactory.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java index 3bf2fddbc21..36322fd9258 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -69,7 +68,8 @@ public interface MetricsMasterSource extends BaseSource { String CLUSTER_REQUESTS_NAME = "clusterRequests"; String MASTER_ACTIVE_TIME_DESC = "Master Active Time"; String MASTER_START_TIME_DESC = "Master Start Time"; - String MASTER_FINISHED_INITIALIZATION_TIME_DESC = "Timestamp when Master has finished initializing"; + String MASTER_FINISHED_INITIALIZATION_TIME_DESC = + "Timestamp when Master has finished initializing"; String AVERAGE_LOAD_DESC = "AverageLoad"; String LIVE_REGION_SERVERS_DESC = "Names of live RegionServers"; String NUMBER_OF_REGION_SERVERS_DESC = "Number of RegionServers"; @@ -88,7 +88,6 @@ public interface MetricsMasterSource extends BaseSource { /** * Increment the number of requests the cluster has seen. - * * @param inc Ammount to increment the total by. */ void incRequests(final long inc); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java index fce574a2cf0..bfdf348b34f 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java index fc95be8f42a..051ad4335c2 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import java.util.Map; @@ -41,63 +40,54 @@ public interface MetricsMasterWrapper { /** * Get Average Load - * * @return Average Load */ double getAverageLoad(); /** * Get the Cluster ID - * * @return Cluster ID */ String getClusterId(); /** * Get the ZooKeeper Quorum Info - * * @return ZooKeeper Quorum Info */ String getZookeeperQuorum(); /** * Get the co-processors - * * @return Co-processors */ String[] getCoprocessors(); /** * Get hbase master start time - * * @return Start time of master in milliseconds */ long getStartTime(); /** * Get the hbase master active time - * * @return Time in milliseconds when master became active */ long getActiveTime(); /** * Whether this master is the active master - * * @return True if this is the active master */ boolean getIsActiveMaster(); /** * Get the live region servers - * * @return Live region servers */ String getRegionServers(); /** * Get the number of live region servers - * * @return number of Live region servers */ @@ -105,28 +95,24 @@ public interface MetricsMasterWrapper { /** * Get the dead region servers - * * @return Dead region Servers */ String getDeadRegionServers(); /** * Get the number of dead region servers - * * @return number of Dead region Servers */ int getNumDeadRegionServers(); /** * Get the draining region servers - * * @return Draining region server */ String getDrainingRegionServers(); /** * Get the number of draining region servers - * * @return number of draining region servers */ int getNumDrainingRegionServers(); @@ -149,12 +135,12 @@ public interface MetricsMasterWrapper { /** * Gets the space usage and limit for each table. */ - Map> getTableSpaceUtilization(); + Map> getTableSpaceUtilization(); /** * Gets the space usage and limit for each namespace. */ - Map> getNamespaceSpaceUtilization(); + Map> getNamespaceSpaceUtilization(); /** * Get the time in Millis when the master finished initializing/becoming the active master diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java index 15315b6c3ef..88e21621f10 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java index 6b8c40ba512..502de8859ae 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public interface MetricsBalancerSource extends BaseSource { +public interface MetricsBalancerSource extends BaseSource { /** * The name of the metrics diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java index 6eecc1233fd..f3318c40ab5 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.yetus.audience.InterfaceAudience; /** - * This interface extends the basic metrics balancer source to add a function - * to report metrics that related to stochastic load balancer. The purpose is to - * offer an insight to the internal cost calculations that can be useful to tune - * the balancer. For details, refer to HBASE-13965 + * This interface extends the basic metrics balancer source to add a function to report metrics that + * related to stochastic load balancer. The purpose is to offer an insight to the internal cost + * calculations that can be useful to tune the balancer. For details, refer to HBASE-13965 */ @InterfaceAudience.Private public interface MetricsStochasticBalancerSource extends MetricsBalancerSource { @@ -38,5 +36,5 @@ public interface MetricsStochasticBalancerSource extends MetricsBalancerSource { * Reports stochastic load balancer costs to JMX */ public void updateStochasticCost(String tableName, String costFunctionName, - String costFunctionDesc, Double value); + String costFunctionDesc, Double value); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java index 76391bb8d7b..3ed8cce6385 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; /** - * BaseSource for dynamic metrics to announce to Metrics2. - * In hbase-hadoop{1|2}-compat there is an implementation of this interface. + * BaseSource for dynamic metrics to announce to Metrics2. In hbase-hadoop{1|2}-compat there is an + * implementation of this interface. */ @InterfaceAudience.Private public interface BaseSource { @@ -36,7 +35,6 @@ public interface BaseSource { /** * Set a gauge to a specific value. - * * @param gaugeName the name of the gauge * @param value the value */ @@ -44,7 +42,6 @@ public interface BaseSource { /** * Add some amount to a gauge. - * * @param gaugeName the name of the gauge * @param delta the amount to change the gauge by. */ @@ -52,7 +49,6 @@ public interface BaseSource { /** * Subtract some amount from a gauge. - * * @param gaugeName the name of the gauge * @param delta the amount to change the gauge by. */ @@ -60,14 +56,12 @@ public interface BaseSource { /** * Remove a metric and no longer announce it. - * * @param key Name of the gauge to remove. */ void removeMetric(String key); /** * Add some amount to a counter. - * * @param counterName the name of the counter * @param delta the amount to change the counter by. */ @@ -75,17 +69,14 @@ public interface BaseSource { /** * Add some value to a histogram. - * - * @param name the name of the histogram + * @param name the name of the histogram * @param value the value to add to the histogram */ void updateHistogram(String name, long value); - /** - * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. + * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. * eg. regionserver, master, thriftserver - * * @return The string context used to register this source to hadoop's metrics2 system. */ String getMetricsContext(); @@ -96,20 +87,19 @@ public interface BaseSource { String getMetricsDescription(); /** - * Get the name of the context in JMX that this source will be exposed through. - * This is in ObjectName format. With the default context being Hadoop -> HBase + * Get the name of the context in JMX that this source will be exposed through. This is in + * ObjectName format. With the default context being Hadoop -> HBase */ String getMetricsJmxContext(); /** - * Get the name of the metrics that are being exported by this source. - * Eg. IPC, GC, WAL + * Get the name of the metrics that are being exported by this source. Eg. IPC, GC, WAL */ String getMetricsName(); default MetricRegistryInfo getMetricRegistryInfo() { - return new MetricRegistryInfo(getMetricsName(), getMetricsDescription(), - getMetricsContext(), getMetricsJmxContext(), true); + return new MetricRegistryInfo(getMetricsName(), getMetricsDescription(), getMetricsContext(), + getMetricsJmxContext(), true); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java index 3c5f898fc29..afa5f17361a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; @@ -26,19 +25,20 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Private public interface ExceptionTrackingSource extends BaseSource { - String EXCEPTIONS_NAME="exceptions"; - String EXCEPTIONS_DESC="Exceptions caused by requests"; - String EXCEPTIONS_TYPE_DESC="Number of requests that resulted in the specified type of Exception"; - String EXCEPTIONS_OOO_NAME="exceptions.OutOfOrderScannerNextException"; - String EXCEPTIONS_BUSY_NAME="exceptions.RegionTooBusyException"; - String EXCEPTIONS_UNKNOWN_NAME="exceptions.UnknownScannerException"; - String EXCEPTIONS_SCANNER_RESET_NAME="exceptions.ScannerResetException"; - String EXCEPTIONS_SANITY_NAME="exceptions.FailedSanityCheckException"; - String EXCEPTIONS_MOVED_NAME="exceptions.RegionMovedException"; - String EXCEPTIONS_NSRE_NAME="exceptions.NotServingRegionException"; + String EXCEPTIONS_NAME = "exceptions"; + String EXCEPTIONS_DESC = "Exceptions caused by requests"; + String EXCEPTIONS_TYPE_DESC = + "Number of requests that resulted in the specified type of Exception"; + String EXCEPTIONS_OOO_NAME = "exceptions.OutOfOrderScannerNextException"; + String EXCEPTIONS_BUSY_NAME = "exceptions.RegionTooBusyException"; + String EXCEPTIONS_UNKNOWN_NAME = "exceptions.UnknownScannerException"; + String EXCEPTIONS_SCANNER_RESET_NAME = "exceptions.ScannerResetException"; + String EXCEPTIONS_SANITY_NAME = "exceptions.FailedSanityCheckException"; + String EXCEPTIONS_MOVED_NAME = "exceptions.RegionMovedException"; + String EXCEPTIONS_NSRE_NAME = "exceptions.NotServingRegionException"; String EXCEPTIONS_MULTI_TOO_LARGE_NAME = "exceptions.multiResponseTooLarge"; - String EXCEPTIONS_MULTI_TOO_LARGE_DESC = "A response to a multi request was too large and the " + - "rest of the requests will have to be retried."; + String EXCEPTIONS_MULTI_TOO_LARGE_DESC = "A response to a multi request was too large and the " + + "rest of the requests will have to be retried."; String EXCEPTIONS_CALL_QUEUE_TOO_BIG = "exceptions.callQueueTooBig"; String EXCEPTIONS_CALL_QUEUE_TOO_BIG_DESC = "Call queue is full"; String EXCEPTIONS_QUOTA_EXCEEDED = "exceptions.quotaExceeded"; @@ -54,18 +54,32 @@ public interface ExceptionTrackingSource extends BaseSource { * Different types of exceptions */ void outOfOrderException(); + void failedSanityException(); + void movedRegionException(); + void notServingRegionException(); + void unknownScannerException(); + void scannerResetException(); + void tooBusyException(); + void multiActionTooLargeException(); + void callQueueTooBigException(); + void quotaExceededException(); + void rpcThrottlingException(); + void callDroppedException(); + void callTimedOut(); + void requestTooBigException(); + void otherExceptions(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/JvmPauseMonitorSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/JvmPauseMonitorSource.java index 6cb542586c9..d488eeb0512 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/JvmPauseMonitorSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/JvmPauseMonitorSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; @@ -24,7 +23,7 @@ import org.apache.yetus.audience.InterfaceAudience; * Interface for sources that will export JvmPauseMonitor metrics */ @InterfaceAudience.Private -public interface JvmPauseMonitorSource { +public interface JvmPauseMonitorSource { String INFO_THRESHOLD_COUNT_KEY = "pauseInfoThresholdExceeded"; String INFO_THRESHOLD_COUNT_DESC = "Count of INFO level pause threshold alerts"; @@ -36,7 +35,7 @@ public interface JvmPauseMonitorSource { String PAUSE_TIME_WITHOUT_GC_KEY = "pauseTimeWithoutGc"; String PAUSE_TIME_WITHOUT_GC_DESC = - "Histogram for excessive pause times without GC activity detected"; + "Histogram for excessive pause times without GC activity detected"; /** * Increment the INFO level threshold exceeded count @@ -52,14 +51,12 @@ public interface JvmPauseMonitorSource { /** * Update the pause time histogram where GC activity was detected. - * * @param t time it took */ void updatePauseTimeWithGc(long t); /** * Update the pause time histogram where GC activity was not detected. - * * @param t time it took */ void updatePauseTimeWithoutGc(long t); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java index 575ca31c644..3cd7613fb21 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,26 +15,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import javax.management.ObjectName; import org.apache.yetus.audience.InterfaceAudience; /** - * Object that will register an mbean with the underlying metrics implementation. + * Object that will register an mbean with the underlying metrics implementation. */ @InterfaceAudience.Private -public interface MBeanSource { +public interface MBeanSource { /** * Register an mbean with the underlying metrics system * @param serviceName Metrics service/system name * @param metricsName name of the metrics object to expose - * @param theMbean the actual MBean + * @param theMbean the actual MBean * @return ObjectName from jmx */ - ObjectName register(String serviceName, String metricsName, - Object theMbean); + ObjectName register(String serviceName, String metricsName, Object theMbean); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/OperationMetrics.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/OperationMetrics.java index 064c9ca3f9a..b90b6a3c674 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/OperationMetrics.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/OperationMetrics.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; @@ -41,9 +40,9 @@ public class OperationMetrics { Preconditions.checkNotNull(metricNamePrefix); /** - * TODO: As of now, Metrics description cannot be added/ registered with - * {@link MetricRegistry}. As metric names are unambiguous but concise, descriptions of - * metrics need to be made available someplace for users. + * TODO: As of now, Metrics description cannot be added/ registered with {@link MetricRegistry}. + * As metric names are unambiguous but concise, descriptions of metrics need to be made + * available someplace for users. */ submittedCounter = registry.counter(metricNamePrefix + SUBMITTED_COUNT); timeHisto = registry.histogram(metricNamePrefix + TIME); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java index 868acd84f7e..451fc3b800b 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java @@ -1,20 +1,19 @@ /* - * Copyright The Apache Software Foundation + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.regionserver; @@ -101,16 +100,16 @@ public interface MetricsHeapMemoryManagerSource extends BaseSource { String UNBLOCKED_FLUSH_DESC = "Histogram for the number of unblocked flushes in the memstore"; String INC_MEMSTORE_TUNING_NAME = "increaseMemStoreSize"; String INC_MEMSTORE_TUNING_DESC = - "Histogram for the heap memory tuner expanding memstore global size limit in bytes"; + "Histogram for the heap memory tuner expanding memstore global size limit in bytes"; String DEC_MEMSTORE_TUNING_NAME = "decreaseMemStoreSize"; String DEC_MEMSTORE_TUNING_DESC = - "Histogram for the heap memory tuner shrinking memstore global size limit in bytes"; + "Histogram for the heap memory tuner shrinking memstore global size limit in bytes"; String INC_BLOCKCACHE_TUNING_NAME = "increaseBlockCacheSize"; String INC_BLOCKCACHE_TUNING_DESC = - "Histogram for the heap memory tuner expanding blockcache max heap size in bytes"; + "Histogram for the heap memory tuner expanding blockcache max heap size in bytes"; String DEC_BLOCKCACHE_TUNING_NAME = "decreaseBlockCacheSize"; String DEC_BLOCKCACHE_TUNING_DESC = - "Histogram for the heap memory tuner shrinking blockcache max heap size in bytes"; + "Histogram for the heap memory tuner shrinking blockcache max heap size in bytes"; // Gauges String BLOCKED_FLUSH_GAUGE_NAME = "blockedFlushGauge"; @@ -125,8 +124,8 @@ public interface MetricsHeapMemoryManagerSource extends BaseSource { // Counters String DO_NOTHING_COUNTER_NAME = "tunerDoNothingCounter"; String DO_NOTHING_COUNTER_DESC = - "The number of times that tuner neither expands memstore global size limit nor expands blockcache max size"; + "The number of times that tuner neither expands memstore global size limit nor expands blockcache max size"; String ABOVE_HEAP_LOW_WATERMARK_COUNTER_NAME = "aboveHeapOccupancyLowWaterMarkCounter"; String ABOVE_HEAP_LOW_WATERMARK_COUNTER_DESC = - "The number of times that heap occupancy percent is above low watermark"; + "The number of times that heap occupancy percent is above low watermark"; } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java index 1d9e1ac1965..733ff60df71 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; /** - * This interface will be implemented by a MetricsSource that will export metrics from - * multiple regions into the hadoop metrics system. + * This interface will be implemented by a MetricsSource that will export metrics from multiple + * regions into the hadoop metrics system. */ @InterfaceAudience.Private public interface MetricsRegionAggregateSource extends BaseSource { @@ -52,14 +51,12 @@ public interface MetricsRegionAggregateSource extends BaseSource { /** * Register a MetricsRegionSource as being open. - * * @param source the source for the region being opened. */ void register(MetricsRegionSource source); /** * Remove a region's source. This is called when a region is closed. - * * @param source The region to remove. */ void deregister(MetricsRegionSource source); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java index 93990ef1bd4..991187bc98e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,31 +40,28 @@ public interface MetricsRegionServerQuotaSource extends BaseSource { String REGION_SIZE_REPORTING_CHORE_TIME_NAME = "regionSizeReportingChoreTime"; /** - * Updates the metric tracking how many tables this RegionServer has marked as in violation - * of their space quota. + * Updates the metric tracking how many tables this RegionServer has marked as in violation of + * their space quota. */ void updateNumTablesInSpaceQuotaViolation(long tablesInViolation); /** * Updates the metric tracking how many tables this RegionServer has received * {@code SpaceQuotaSnapshot}s for. - * * @param numSnapshots The number of {@code SpaceQuotaSnapshot}s received from the Master. */ void updateNumTableSpaceQuotaSnapshots(long numSnapshots); /** - * Updates the metric tracking how much time was spent scanning the filesystem to compute - * the size of each region hosted by this RegionServer. - * + * Updates the metric tracking how much time was spent scanning the filesystem to compute the size + * of each region hosted by this RegionServer. * @param time The execution time of the chore in milliseconds. */ void incrementSpaceQuotaFileSystemScannerChoreTime(long time); /** - * Updates the metric tracking how much time was spent updating the RegionServer with the - * latest information on space quotas from the {@code hbase:quota} table. - * + * Updates the metric tracking how much time was spent updating the RegionServer with the latest + * information on space quotas from the {@code hbase:quota} table. * @param time The execution time of the chore in milliseconds. */ void incrementSpaceQuotaRefresherChoreTime(long time); @@ -71,7 +69,6 @@ public interface MetricsRegionServerQuotaSource extends BaseSource { /** * Updates the metric tracking how many region size reports were sent from this RegionServer to * the Master. These reports contain information on the size of each Region hosted locally. - * * @param numReportsSent The number of region size reports sent */ void incrementNumRegionSizeReportsSent(long numReportsSent); @@ -79,7 +76,6 @@ public interface MetricsRegionServerQuotaSource extends BaseSource { /** * Updates the metric tracking how much time was spent sending region size reports to the Master * by the RegionSizeReportingChore. - * * @param time The execution time in milliseconds. */ void incrementRegionSizeReportingChoreTime(long time); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java index 179e53f0199..decdbae20fd 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -50,20 +49,17 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo /** * Update the Put time histogram - * * @param t time it took */ void updatePut(long t); /** - * Update the PutBatch time histogram if a batch contains a Put op - * @param t + * Update the PutBatch time histogram if a batch contains a Put op n */ void updatePutBatch(long t); /** * Update the Delete time histogram - * * @param t time it took */ void updateDelete(long t); @@ -94,42 +90,37 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo /** * Update the Get time histogram . - * * @param t time it took */ void updateGet(long t); /** * Update the Increment time histogram. - * * @param t time it took */ void updateIncrement(long t); /** * Update the Append time histogram. - * * @param t time it took */ void updateAppend(long t); /** * Update the Replay time histogram. - * * @param t time it took */ void updateReplay(long t); /** * Update the scan size. - * * @param scanSize size of the scan */ void updateScanSize(long scanSize); /** * Update the scan time. - * */ + */ void updateScanTime(long t); /** @@ -194,35 +185,35 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo /** * Update the compaction time histogram, both major and minor * @param isMajor whether compaction is a major compaction - * @param t time it took, in milliseconds + * @param t time it took, in milliseconds */ void updateCompactionTime(boolean isMajor, long t); /** * Update the compaction input number of files histogram * @param isMajor whether compaction is a major compaction - * @param c number of files + * @param c number of files */ void updateCompactionInputFileCount(boolean isMajor, long c); /** * Update the compaction total input file size histogram * @param isMajor whether compaction is a major compaction - * @param bytes the number of bytes of the compaction input file + * @param bytes the number of bytes of the compaction input file */ void updateCompactionInputSize(boolean isMajor, long bytes); /** * Update the compaction output number of files histogram * @param isMajor whether compaction is a major compaction - * @param c number of files + * @param c number of files */ void updateCompactionOutputFileCount(boolean isMajor, long c); /** * Update the compaction total output file size * @param isMajor whether compaction is a major compaction - * @param bytes the number of bytes of the compaction input file + * @param bytes the number of bytes of the compaction input file */ void updateCompactionOutputSize(boolean isMajor, long bytes); @@ -256,51 +247,49 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String STOREFILE_SIZE_DESC = "Size of storefiles being served."; String TOTAL_REQUEST_COUNT = "totalRequestCount"; String TOTAL_REQUEST_COUNT_DESC = - "Total number of requests this RegionServer has answered; increments the count once for " + - "EVERY access whether an admin operation, a Scan, a Put or Put of 1M rows, or a Get " + - "of a non-existent row"; + "Total number of requests this RegionServer has answered; increments the count once for " + + "EVERY access whether an admin operation, a Scan, a Put or Put of 1M rows, or a Get " + + "of a non-existent row"; String TOTAL_ROW_ACTION_REQUEST_COUNT = "totalRowActionRequestCount"; String TOTAL_ROW_ACTION_REQUEST_COUNT_DESC = - "Total number of region requests this RegionServer has answered; counts by row-level " + - "action at the RPC Server (Sums 'readRequestsCount' and 'writeRequestsCount'); counts" + - "once per access whether a Put of 1M rows or a Get that returns 1M Results"; + "Total number of region requests this RegionServer has answered; counts by row-level " + + "action at the RPC Server (Sums 'readRequestsCount' and 'writeRequestsCount'); counts" + + "once per access whether a Put of 1M rows or a Get that returns 1M Results"; String READ_REQUEST_COUNT = "readRequestCount"; String FILTERED_READ_REQUEST_COUNT = "filteredReadRequestCount"; String FILTERED_READ_REQUEST_COUNT_DESC = - "Number of read requests this region server has answered."; + "Number of read requests this region server has answered."; String READ_REQUEST_COUNT_DESC = - "Number of read requests with non-empty Results that this RegionServer has answered."; + "Number of read requests with non-empty Results that this RegionServer has answered."; String READ_REQUEST_RATE_PER_SECOND = "readRequestRatePerSecond"; String READ_REQUEST_RATE_DESC = - "Rate of answering the read requests by this region server per second."; + "Rate of answering the read requests by this region server per second."; String WRITE_REQUEST_COUNT = "writeRequestCount"; - String WRITE_REQUEST_COUNT_DESC = - "Number of mutation requests this RegionServer has answered."; + String WRITE_REQUEST_COUNT_DESC = "Number of mutation requests this RegionServer has answered."; String WRITE_REQUEST_RATE_PER_SECOND = "writeRequestRatePerSecond"; String WRITE_REQUEST_RATE_DESC = - "Rate of answering the mutation requests by this region server per second."; + "Rate of answering the mutation requests by this region server per second."; String CHECK_MUTATE_FAILED_COUNT = "checkMutateFailedCount"; String CHECK_MUTATE_FAILED_COUNT_DESC = - "Number of Check and Mutate calls that failed the checks."; + "Number of Check and Mutate calls that failed the checks."; String CHECK_MUTATE_PASSED_COUNT = "checkMutatePassedCount"; String CHECK_MUTATE_PASSED_COUNT_DESC = - "Number of Check and Mutate calls that passed the checks."; + "Number of Check and Mutate calls that passed the checks."; String STOREFILE_INDEX_SIZE = "storeFileIndexSize"; String STOREFILE_INDEX_SIZE_DESC = "Size of indexes in storefiles on disk."; String STATIC_INDEX_SIZE = "staticIndexSize"; String STATIC_INDEX_SIZE_DESC = "Uncompressed size of the static indexes."; String STATIC_BLOOM_SIZE = "staticBloomSize"; - String STATIC_BLOOM_SIZE_DESC = - "Uncompressed size of the static bloom filters."; + String STATIC_BLOOM_SIZE_DESC = "Uncompressed size of the static bloom filters."; String NUMBER_OF_MUTATIONS_WITHOUT_WAL = "mutationsWithoutWALCount"; String NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC = - "Number of mutations that have been sent by clients with the write ahead logging turned off."; + "Number of mutations that have been sent by clients with the write ahead logging turned off."; String DATA_SIZE_WITHOUT_WAL = "mutationsWithoutWALSize"; String DATA_SIZE_WITHOUT_WAL_DESC = - "Size of data that has been sent by clients with the write ahead logging turned off."; + "Size of data that has been sent by clients with the write ahead logging turned off."; String PERCENT_FILES_LOCAL = "percentFilesLocal"; String PERCENT_FILES_LOCAL_DESC = - "The percent of HFiles that are stored on the local hdfs data node."; + "The percent of HFiles that are stored on the local hdfs data node."; String PERCENT_FILES_LOCAL_SECONDARY_REGIONS = "percentFilesLocalSecondaryRegions"; String PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC = "The percent of HFiles used by secondary regions that are stored on the local hdfs data node."; @@ -311,14 +300,13 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String SMALL_COMPACTION_QUEUE_LENGTH = "smallCompactionQueueLength"; String COMPACTION_QUEUE_LENGTH_DESC = "Length of the queue for compactions."; String LARGE_COMPACTION_QUEUE_LENGTH_DESC = "Length of the queue for compactions with input size " - + "larger than throttle threshold (2.5GB by default)"; + + "larger than throttle threshold (2.5GB by default)"; String SMALL_COMPACTION_QUEUE_LENGTH_DESC = "Length of the queue for compactions with input size " - + "smaller than throttle threshold (2.5GB by default)"; + + "smaller than throttle threshold (2.5GB by default)"; String FLUSH_QUEUE_LENGTH = "flushQueueLength"; String FLUSH_QUEUE_LENGTH_DESC = "Length of the queue for region flushes"; String BLOCK_CACHE_FREE_SIZE = "blockCacheFreeSize"; - String BLOCK_CACHE_FREE_DESC = - "Size of the block cache that is not occupied."; + String BLOCK_CACHE_FREE_DESC = "Size of the block cache that is not occupied."; String BLOCK_CACHE_COUNT = "blockCacheCount"; String BLOCK_CACHE_COUNT_DESC = "Number of block in the block cache."; String BLOCK_CACHE_SIZE = "blockCacheSize"; @@ -329,26 +317,25 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String BLOCK_CACHE_PRIMARY_HIT_COUNT_DESC = "Count of hit on primary replica in the block cache."; String BLOCK_CACHE_MISS_COUNT = "blockCacheMissCount"; String BLOCK_COUNT_MISS_COUNT_DESC = - "Number of requests for a block that missed the block cache."; + "Number of requests for a block that missed the block cache."; String BLOCK_CACHE_PRIMARY_MISS_COUNT = "blockCacheMissCountPrimary"; String BLOCK_COUNT_PRIMARY_MISS_COUNT_DESC = - "Number of requests for a block of primary replica that missed the block cache."; + "Number of requests for a block of primary replica that missed the block cache."; String BLOCK_CACHE_EVICTION_COUNT = "blockCacheEvictionCount"; String BLOCK_CACHE_EVICTION_COUNT_DESC = - "Count of the number of blocks evicted from the block cache." + "Count of the number of blocks evicted from the block cache." + "(Not including blocks evicted because of HFile removal)"; String BLOCK_CACHE_PRIMARY_EVICTION_COUNT = "blockCacheEvictionCountPrimary"; String BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC = - "Count of the number of blocks evicted from primary replica in the block cache."; + "Count of the number of blocks evicted from primary replica in the block cache."; String BLOCK_CACHE_HIT_PERCENT = "blockCacheCountHitPercent"; - String BLOCK_CACHE_HIT_PERCENT_DESC = - "Percent of block cache requests that are hits"; + String BLOCK_CACHE_HIT_PERCENT_DESC = "Percent of block cache requests that are hits"; String BLOCK_CACHE_EXPRESS_HIT_PERCENT = "blockCacheExpressHitPercent"; String BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC = - "The percent of the time that requests with the cache turned on hit the cache."; + "The percent of the time that requests with the cache turned on hit the cache."; String BLOCK_CACHE_FAILED_INSERTION_COUNT = "blockCacheFailedInsertionCount"; - String BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC = "Number of times that a block cache " + - "insertion failed. Usually due to size restrictions."; + String BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC = + "Number of times that a block cache " + "insertion failed. Usually due to size restrictions."; String BLOCK_CACHE_DATA_MISS_COUNT = "blockCacheDataMissCount"; String BLOCK_CACHE_ENCODED_DATA_MISS_COUNT = "blockCacheEncodedDataMissCount"; String BLOCK_CACHE_LEAF_INDEX_MISS_COUNT = "blockCacheLeafIndexMissCount"; @@ -397,7 +384,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String CLUSTER_ID_DESC = "Cluster Id"; String UPDATES_BLOCKED_TIME = "updatesBlockedTime"; String UPDATES_BLOCKED_DESC = - "Number of MS updates have been blocked so that the memstore can be flushed."; + "Number of MS updates have been blocked so that the memstore can be flushed."; String DELETE_KEY = "delete"; String CHECK_AND_DELETE_KEY = "checkAndDelete"; String CHECK_AND_PUT_KEY = "checkAndPut"; @@ -419,15 +406,12 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String SLOW_DELETE_KEY = "slowDeleteCount"; String SLOW_INCREMENT_KEY = "slowIncrementCount"; String SLOW_APPEND_KEY = "slowAppendCount"; - String SLOW_PUT_DESC = - "The number of batches containing puts that took over 1000ms to complete"; + String SLOW_PUT_DESC = "The number of batches containing puts that took over 1000ms to complete"; String SLOW_DELETE_DESC = - "The number of batches containing delete(s) that took over 1000ms to complete"; + "The number of batches containing delete(s) that took over 1000ms to complete"; String SLOW_GET_DESC = "The number of Gets that took over 1000ms to complete"; - String SLOW_INCREMENT_DESC = - "The number of Increments that took over 1000ms to complete"; - String SLOW_APPEND_DESC = - "The number of Appends that took over 1000ms to complete"; + String SLOW_INCREMENT_DESC = "The number of Increments that took over 1000ms to complete"; + String SLOW_APPEND_DESC = "The number of Appends that took over 1000ms to complete"; String FLUSHED_CELLS = "flushedCellsCount"; String FLUSHED_CELLS_DESC = "The number of cells flushed to disk"; @@ -437,25 +421,23 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String COMPACTED_CELLS_DESC = "The number of cells processed during minor compactions"; String COMPACTED_CELLS_SIZE = "compactedCellsSize"; String COMPACTED_CELLS_SIZE_DESC = - "The total amount of data processed during minor compactions, in bytes"; + "The total amount of data processed during minor compactions, in bytes"; String MAJOR_COMPACTED_CELLS = "majorCompactedCellsCount"; - String MAJOR_COMPACTED_CELLS_DESC = - "The number of cells processed during major compactions"; + String MAJOR_COMPACTED_CELLS_DESC = "The number of cells processed during major compactions"; String MAJOR_COMPACTED_CELLS_SIZE = "majorCompactedCellsSize"; String MAJOR_COMPACTED_CELLS_SIZE_DESC = - "The total amount of data processed during major compactions, in bytes"; + "The total amount of data processed during major compactions, in bytes"; String CELLS_COUNT_COMPACTED_TO_MOB = "cellsCountCompactedToMob"; - String CELLS_COUNT_COMPACTED_TO_MOB_DESC = - "The number of cells moved to mob during compaction"; + String CELLS_COUNT_COMPACTED_TO_MOB_DESC = "The number of cells moved to mob during compaction"; String CELLS_COUNT_COMPACTED_FROM_MOB = "cellsCountCompactedFromMob"; String CELLS_COUNT_COMPACTED_FROM_MOB_DESC = - "The number of cells moved from mob during compaction"; + "The number of cells moved from mob during compaction"; String CELLS_SIZE_COMPACTED_TO_MOB = "cellsSizeCompactedToMob"; String CELLS_SIZE_COMPACTED_TO_MOB_DESC = - "The total amount of cells move to mob during compaction, in bytes"; + "The total amount of cells move to mob during compaction, in bytes"; String CELLS_SIZE_COMPACTED_FROM_MOB = "cellsSizeCompactedFromMob"; String CELLS_SIZE_COMPACTED_FROM_MOB_DESC = - "The total amount of cells move from mob during compaction, in bytes"; + "The total amount of cells move from mob during compaction, in bytes"; String MOB_FLUSH_COUNT = "mobFlushCount"; String MOB_FLUSH_COUNT_DESC = "The number of the flushes in mob-enabled stores"; String MOB_FLUSHED_CELLS_COUNT = "mobFlushedCellsCount"; @@ -481,7 +463,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String HEDGED_READS_DESC = "The number of times we started a hedged read"; String HEDGED_READ_WINS = "hedgedReadWins"; String HEDGED_READ_WINS_DESC = - "The number of times we started a hedged read and a hedged read won"; + "The number of times we started a hedged read and a hedged read won"; String HEDGED_READ_IN_CUR_THREAD = "hedgedReadOpsInCurThread"; String HEDGED_READ_IN_CUR_THREAD_DESC = "The number of times we execute a hedged read" + " in current thread as a fallback for task rejection"; @@ -489,17 +471,15 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String TOTAL_BYTES_READ = "totalBytesRead"; String TOTAL_BYTES_READ_DESC = "The total number of bytes read from HDFS"; String LOCAL_BYTES_READ = "localBytesRead"; - String LOCAL_BYTES_READ_DESC = - "The number of bytes read from the local HDFS DataNode"; + String LOCAL_BYTES_READ_DESC = "The number of bytes read from the local HDFS DataNode"; String SHORTCIRCUIT_BYTES_READ = "shortCircuitBytesRead"; String SHORTCIRCUIT_BYTES_READ_DESC = "The number of bytes read through HDFS short circuit read"; String ZEROCOPY_BYTES_READ = "zeroCopyBytesRead"; - String ZEROCOPY_BYTES_READ_DESC = - "The number of bytes read through HDFS zero copy"; + String ZEROCOPY_BYTES_READ_DESC = "The number of bytes read through HDFS zero copy"; String BLOCKED_REQUESTS_COUNT = "blockedRequestCount"; String BLOCKED_REQUESTS_COUNT_DESC = "The number of blocked requests because of memstore size is " - + "larger than blockingMemStoreSize"; + + "larger than blockingMemStoreSize"; String SPLIT_KEY = "splitTime"; String SPLIT_REQUEST_KEY = "splitRequestCount"; @@ -519,77 +499,76 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String FLUSHED_MEMSTORE_BYTES_DESC = "Total number of bytes of cells in memstore from flush"; String COMPACTION_TIME = "compactionTime"; - String COMPACTION_TIME_DESC - = "Histogram for the time in millis for compaction, both major and minor"; + String COMPACTION_TIME_DESC = + "Histogram for the time in millis for compaction, both major and minor"; String COMPACTION_INPUT_FILE_COUNT = "compactionInputFileCount"; - String COMPACTION_INPUT_FILE_COUNT_DESC - = "Histogram for the compaction input number of files, both major and minor"; + String COMPACTION_INPUT_FILE_COUNT_DESC = + "Histogram for the compaction input number of files, both major and minor"; String COMPACTION_INPUT_SIZE = "compactionInputSize"; - String COMPACTION_INPUT_SIZE_DESC - = "Histogram for the compaction total input file sizes, both major and minor"; + String COMPACTION_INPUT_SIZE_DESC = + "Histogram for the compaction total input file sizes, both major and minor"; String COMPACTION_OUTPUT_FILE_COUNT = "compactionOutputFileCount"; - String COMPACTION_OUTPUT_FILE_COUNT_DESC - = "Histogram for the compaction output number of files, both major and minor"; + String COMPACTION_OUTPUT_FILE_COUNT_DESC = + "Histogram for the compaction output number of files, both major and minor"; String COMPACTION_OUTPUT_SIZE = "compactionOutputSize"; - String COMPACTION_OUTPUT_SIZE_DESC - = "Histogram for the compaction total output file sizes, both major and minor"; + String COMPACTION_OUTPUT_SIZE_DESC = + "Histogram for the compaction total output file sizes, both major and minor"; String COMPACTED_INPUT_BYTES = "compactedInputBytes"; - String COMPACTED_INPUT_BYTES_DESC - = "Total number of bytes that is read for compaction, both major and minor"; + String COMPACTED_INPUT_BYTES_DESC = + "Total number of bytes that is read for compaction, both major and minor"; String COMPACTED_OUTPUT_BYTES = "compactedOutputBytes"; - String COMPACTED_OUTPUT_BYTES_DESC - = "Total number of bytes that is output from compaction, both major and minor"; + String COMPACTED_OUTPUT_BYTES_DESC = + "Total number of bytes that is output from compaction, both major and minor"; String MAJOR_COMPACTION_TIME = "majorCompactionTime"; - String MAJOR_COMPACTION_TIME_DESC - = "Histogram for the time in millis for compaction, major only"; + String MAJOR_COMPACTION_TIME_DESC = "Histogram for the time in millis for compaction, major only"; String MAJOR_COMPACTION_INPUT_FILE_COUNT = "majorCompactionInputFileCount"; - String MAJOR_COMPACTION_INPUT_FILE_COUNT_DESC - = "Histogram for the compaction input number of files, major only"; + String MAJOR_COMPACTION_INPUT_FILE_COUNT_DESC = + "Histogram for the compaction input number of files, major only"; String MAJOR_COMPACTION_INPUT_SIZE = "majorCompactionInputSize"; - String MAJOR_COMPACTION_INPUT_SIZE_DESC - = "Histogram for the compaction total input file sizes, major only"; + String MAJOR_COMPACTION_INPUT_SIZE_DESC = + "Histogram for the compaction total input file sizes, major only"; String MAJOR_COMPACTION_OUTPUT_FILE_COUNT = "majorCompactionOutputFileCount"; - String MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC - = "Histogram for the compaction output number of files, major only"; + String MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC = + "Histogram for the compaction output number of files, major only"; String MAJOR_COMPACTION_OUTPUT_SIZE = "majorCompactionOutputSize"; - String MAJOR_COMPACTION_OUTPUT_SIZE_DESC - = "Histogram for the compaction total output file sizes, major only"; + String MAJOR_COMPACTION_OUTPUT_SIZE_DESC = + "Histogram for the compaction total output file sizes, major only"; String MAJOR_COMPACTED_INPUT_BYTES = "majorCompactedInputBytes"; - String MAJOR_COMPACTED_INPUT_BYTES_DESC - = "Total number of bytes that is read for compaction, major only"; + String MAJOR_COMPACTED_INPUT_BYTES_DESC = + "Total number of bytes that is read for compaction, major only"; String MAJOR_COMPACTED_OUTPUT_BYTES = "majorCompactedOutputBytes"; - String MAJOR_COMPACTED_OUTPUT_BYTES_DESC - = "Total number of bytes that is output from compaction, major only"; + String MAJOR_COMPACTED_OUTPUT_BYTES_DESC = + "Total number of bytes that is output from compaction, major only"; String RPC_GET_REQUEST_COUNT = "rpcGetRequestCount"; String RPC_GET_REQUEST_COUNT_DESC = "Number of rpc get requests this RegionServer has answered."; String RPC_SCAN_REQUEST_COUNT = "rpcScanRequestCount"; String RPC_SCAN_REQUEST_COUNT_DESC = - "Number of rpc scan requests this RegionServer has answered."; + "Number of rpc scan requests this RegionServer has answered."; String RPC_FULL_SCAN_REQUEST_COUNT = "rpcFullScanRequestCount"; String RPC_FULL_SCAN_REQUEST_COUNT_DESC = - "Number of rpc scan requests that were possible full region scans."; + "Number of rpc scan requests that were possible full region scans."; String RPC_MULTI_REQUEST_COUNT = "rpcMultiRequestCount"; String RPC_MULTI_REQUEST_COUNT_DESC = - "Number of rpc multi requests this RegionServer has answered."; + "Number of rpc multi requests this RegionServer has answered."; String RPC_MUTATE_REQUEST_COUNT = "rpcMutateRequestCount"; String RPC_MUTATE_REQUEST_COUNT_DESC = - "Number of rpc mutation requests this RegionServer has answered."; + "Number of rpc mutation requests this RegionServer has answered."; String AVERAGE_REGION_SIZE = "averageRegionSize"; String AVERAGE_REGION_SIZE_DESC = - "Average region size over the RegionServer including memstore and storefile sizes."; + "Average region size over the RegionServer including memstore and storefile sizes."; /** Metrics for {@link org.apache.hadoop.hbase.io.ByteBuffAllocator} **/ String BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES = "ByteBuffAllocatorHeapAllocationBytes"; String BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES_DESC = - "Bytes of heap allocation from ByteBuffAllocator"; + "Bytes of heap allocation from ByteBuffAllocator"; String BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES = "ByteBuffAllocatorPoolAllocationBytes"; String BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES_DESC = - "Bytes of pool allocation from ByteBuffAllocator"; + "Bytes of pool allocation from ByteBuffAllocator"; String BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO = "ByteBuffAllocatorHeapAllocationRatio"; String BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO_DESC = - "Ratio of heap allocation from ByteBuffAllocator, means heapAllocation/totalAllocation"; + "Ratio of heap allocation from ByteBuffAllocator, means heapAllocation/totalAllocation"; String BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT = "ByteBuffAllocatorTotalBufferCount"; String BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT_DESC = "Total buffer count in ByteBuffAllocator"; String BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT = "ByteBuffAllocatorUsedBufferCount"; @@ -599,5 +578,6 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String ACTIVE_SCANNERS_DESC = "Gauge of currently active scanners"; String SCANNER_LEASE_EXPIRED_COUNT = "scannerLeaseExpiredCount"; - String SCANNER_LEASE_EXPIRED_COUNT_DESC = "Count of scanners which were expired due to scanner lease timeout"; + String SCANNER_LEASE_EXPIRED_COUNT_DESC = + "Count of scanners which were expired due to scanner lease timeout"; } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java index ef33909839c..9a12d75373f 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.io.MetricsIOSource; @@ -30,7 +29,6 @@ public interface MetricsRegionServerSourceFactory { /** * Given a wrapper create a MetricsRegionServerSource. - * * @param regionServerWrapper The wrapped region server * @return a Metrics Source. */ @@ -38,7 +36,6 @@ public interface MetricsRegionServerSourceFactory { /** * Create a MetricsRegionSource from a MetricsRegionWrapper. - * * @param wrapper The wrapped region * @return A metrics region source */ @@ -58,8 +55,7 @@ public interface MetricsRegionServerSourceFactory { /** * Create a MetricsTableSource from a MetricsTableWrapper. - * - * @param table The table name + * @param table The table name * @param wrapper The wrapped table aggregate * @return A metrics table source */ @@ -67,7 +63,6 @@ public interface MetricsRegionServerSourceFactory { /** * Get a MetricsTableAggregateSource - * * @return A metrics table aggregate source */ MetricsTableAggregateSource getTableAggregate(); @@ -80,7 +75,6 @@ public interface MetricsRegionServerSourceFactory { /** * Create a MetricsIOSource from a MetricsIOWrapper. - * * @return A metrics IO source */ MetricsIOSource createIO(MetricsIOWrapper wrapper); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java index ca0c6d9a31d..876d49280cb 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.List; @@ -35,28 +34,24 @@ public interface MetricsRegionServerWrapper { /** * Get the Cluster ID - * * @return Cluster ID */ String getClusterId(); /** * Get the ZooKeeper Quorum Info - * * @return ZooKeeper Quorum Info */ String getZookeeperQuorum(); /** * Get the co-processors - * * @return Co-processors */ String getCoprocessors(); /** * Get HRegionServer start time - * * @return Start time of RegionServer in milliseconds */ long getStartCode(); @@ -91,9 +86,9 @@ public interface MetricsRegionServerWrapper { */ long getNumWALSlowAppend(); - /** - * Get the number of store files hosted on this region server. - */ + /** + * Get the number of store files hosted on this region server. + */ long getNumStoreFiles(); /** @@ -127,12 +122,12 @@ public interface MetricsRegionServerWrapper { long getMinStoreFileAge(); /** - * @return Average age of store files hosted on this region server + * @return Average age of store files hosted on this region server */ long getAvgStoreFileAge(); /** - * @return Number of reference files on this region server + * @return Number of reference files on this region server */ long getNumReferenceFiles(); @@ -202,8 +197,8 @@ public interface MetricsRegionServerWrapper { long getNumMutationsWithoutWAL(); /** - * Ammount of data in the memstore but not in the WAL because mutations explicitly had their - * WAL turned off. + * Ammount of data in the memstore but not in the WAL because mutations explicitly had their WAL + * turned off. */ long getDataInMemoryWithoutWAL(); @@ -237,8 +232,8 @@ public interface MetricsRegionServerWrapper { int getFlushQueueSize(); /** - * Get the limit size of the off heap memstore (if enabled), otherwise - * get the limit size of the on heap memstore. + * Get the limit size of the off heap memstore (if enabled), otherwise get the limit size of the + * on heap memstore. */ long getMemStoreLimit(); @@ -297,7 +292,6 @@ public interface MetricsRegionServerWrapper { */ long getBlockCachePrimaryEvictedCount(); - /** * Get the percent of all requests that hit the block cache. */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java index b3a556e3d9f..c3d955592d6 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.yetus.audience.InterfaceAudience; @@ -45,10 +44,10 @@ public interface MetricsRegionSource extends Comparable { String MAX_COMPACTION_QUEUE_DESC = "Max number of compactions queued for this region"; String FLUSHES_QUEUED_DESC = "Number flushes requested/queued for this region"; String MAX_FLUSH_QUEUE_DESC = "Max number of flushes queued for this region"; - String NUM_BYTES_COMPACTED_DESC = - "Sum of filesize on all files entering a finished, successful or aborted, compaction"; + String NUM_BYTES_COMPACTED_DESC = + "Sum of filesize on all files entering a finished, successful or aborted, compaction"; String NUM_FILES_COMPACTED_DESC = - "Number of files that were input for finished, successful or aborted, compactions"; + "Number of files that were input for finished, successful or aborted, compactions"; String COPROCESSOR_EXECUTION_STATISTICS = "coprocessorExecutionStatistics"; String COPROCESSOR_EXECUTION_STATISTICS_DESC = "Statistics for coprocessor execution times"; String REPLICA_ID = "replicaid"; @@ -81,7 +80,7 @@ public interface MetricsRegionSource extends Comparable { /** * Update time used of resultScanner.next(). - * */ + */ void updateScanTime(long mills); /** @@ -99,5 +98,4 @@ public interface MetricsRegionSource extends Comparable { */ MetricsRegionAggregateSource getAggregateSource(); - } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java index 28f8832e749..b20d1c1ec80 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,36 +15,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; /** - * Interface of class that will wrap an HRegion and export numbers so they can be - * used in MetricsRegionSource + * Interface of class that will wrap an HRegion and export numbers so they can be used in + * MetricsRegionSource */ @InterfaceAudience.Private public interface MetricsRegionWrapper { /** * Get the name of the table the region belongs to. - * * @return The string version of the table name. */ String getTableName(); /** * Get the name of the namespace this table is in. - * @return String version of the namespace. Can't be empty. + * @return String version of the namespace. Can't be empty. */ String getNamespace(); /** * Get the name of the region. - * * @return The encoded name of the region. */ String getRegionName(); @@ -90,12 +86,12 @@ public interface MetricsRegionWrapper { long getMinStoreFileAge(); /** - * @return Average age of store files under this region + * @return Average age of store files under this region */ long getAvgStoreFileAge(); /** - * @return Number of reference files under this region + * @return Number of reference files under this region */ long getNumReferenceFiles(); @@ -113,38 +109,38 @@ public interface MetricsRegionWrapper { long getNumCompactionsCompleted(); /** - * @return Age of the last major compaction + * @return Age of the last major compaction */ long getLastMajorCompactionAge(); /** - * Returns the total number of compactions that have been reported as failed on this region. - * Note that a given compaction can be reported as both completed and failed if an exception - * is thrown in the processing after {@code HRegion.compact()}. + * Returns the total number of compactions that have been reported as failed on this region. Note + * that a given compaction can be reported as both completed and failed if an exception is thrown + * in the processing after {@code HRegion.compact()}. */ long getNumCompactionsFailed(); /** - * @return the total number of compactions that are currently queued(or being executed) at point in - * time + * @return the total number of compactions that are currently queued(or being executed) at point + * in time */ long getNumCompactionsQueued(); /** - * @return the total number of flushes currently queued(being executed) for this region at point in - * time + * @return the total number of flushes currently queued(being executed) for this region at point + * in time */ long getNumFlushesQueued(); /** - * @return the max number of compactions queued for this region - * Note that this metric is updated periodically and hence might miss some data points + * @return the max number of compactions queued for this region Note that this metric is updated + * periodically and hence might miss some data points */ long getMaxCompactionQueueSize(); /** - * @return the max number of flushes queued for this region - * Note that this metric is updated periodically and hence might miss some data points + * @return the max number of flushes queued for this region Note that this metric is updated + * periodically and hence might miss some data points */ long getMaxFlushQueueSize(); @@ -161,8 +157,8 @@ public interface MetricsRegionWrapper { long getStoreRefCount(); /** - * @return the max number of references active on any store file among - * all compacted store files that belong to this region + * @return the max number of references active on any store file among all compacted store files + * that belong to this region */ long getMaxCompactedStoreFileRefCount(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSource.java index f746c98c545..e11f1864f48 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; /** - * This interface will be implemented by a MetricsSource that will export metrics from - * multiple regions of a table into the hadoop metrics system. + * This interface will be implemented by a MetricsSource that will export metrics from multiple + * regions of a table into the hadoop metrics system. */ @InterfaceAudience.Private public interface MetricsTableAggregateSource extends BaseSource { @@ -59,7 +58,6 @@ public interface MetricsTableAggregateSource extends BaseSource { /** * Remove a table's source. This is called when regions of a table are closed. - * * @param table The table name */ void deleteTableSource(String table); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java index 2aeb82b0d64..e7d447aef49 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -59,94 +60,85 @@ public interface MetricsTableLatencies { /** * Update the Put time histogram - * * @param tableName The table the metric is for - * @param t time it took + * @param t time it took */ void updatePut(String tableName, long t); /** * Update the batch Put time histogram - * * @param tableName The table the metric is for - * @param t time it took + * @param t time it took */ void updatePutBatch(String tableName, long t); /** * Update the Delete time histogram - * * @param tableName The table the metric is for - * @param t time it took + * @param t time it took */ void updateDelete(String tableName, long t); /** * Update the batch Delete time histogram - * * @param tableName The table the metric is for - * @param t time it took + * @param t time it took */ void updateDeleteBatch(String tableName, long t); /** * Update the Get time histogram . - * * @param tableName The table the metric is for - * @param t time it took + * @param t time it took */ void updateGet(String tableName, long t); /** * Update the Increment time histogram. - * * @param tableName The table the metric is for - * @param t time it took + * @param t time it took */ void updateIncrement(String tableName, long t); /** * Update the Append time histogram. - * * @param tableName The table the metric is for - * @param t time it took + * @param t time it took */ void updateAppend(String tableName, long t); /** * Update the scan size. - * * @param tableName The table the metric is for - * @param scanSize size of the scan + * @param scanSize size of the scan */ void updateScanSize(String tableName, long scanSize); /** * Update the scan time. - * * @param tableName The table the metric is for - * @param t time it took + * @param t time it took */ void updateScanTime(String tableName, long t); /** * Update the CheckAndDelete time histogram. * @param nameAsString The table the metric is for - * @param time time it took + * @param time time it took */ void updateCheckAndDelete(String nameAsString, long time); /** * Update the CheckAndPut time histogram. * @param nameAsString The table the metric is for - * @param time time it took + * @param time time it took */ void updateCheckAndPut(String nameAsString, long time); /** * Update the CheckAndMutate time histogram. * @param nameAsString The table the metric is for - * @param time time it took + * @param time time it took */ void updateCheckAndMutate(String nameAsString, long time); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java index c3b819228fe..1ff86de67ec 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,7 +32,7 @@ public interface MetricsTableQueryMeter { /** * Update table read QPS * @param tableName The table the metric is for - * @param count Number of occurrences to record + * @param count Number of occurrences to record */ void updateTableReadQueryMeter(TableName tableName, long count); @@ -44,7 +45,7 @@ public interface MetricsTableQueryMeter { /** * Update table write QPS * @param tableName The table the metric is for - * @param count Number of occurrences to record + * @param count Number of occurrences to record */ void updateTableWriteQueryMeter(TableName tableName, long count); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java index 9fc606257e0..b65457a8714 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.Closeable; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -83,35 +82,35 @@ public interface MetricsTableSource extends Comparable, Clos /** * Update the compaction time histogram, both major and minor * @param isMajor whether compaction is a major compaction - * @param t time it took, in milliseconds + * @param t time it took, in milliseconds */ void updateCompactionTime(boolean isMajor, long t); /** * Update the compaction input number of files histogram * @param isMajor whether compaction is a major compaction - * @param c number of files + * @param c number of files */ void updateCompactionInputFileCount(boolean isMajor, long c); /** * Update the compaction total input file size histogram * @param isMajor whether compaction is a major compaction - * @param bytes the number of bytes of the compaction input file + * @param bytes the number of bytes of the compaction input file */ void updateCompactionInputSize(boolean isMajor, long bytes); /** * Update the compaction output number of files histogram * @param isMajor whether compaction is a major compaction - * @param c number of files + * @param c number of files */ void updateCompactionOutputFileCount(boolean isMajor, long c); /** * Update the compaction total output file size * @param isMajor whether compaction is a major compaction - * @param bytes the number of bytes of the compaction input file + * @param bytes the number of bytes of the compaction input file */ void updateCompactionOutputSize(boolean isMajor, long bytes); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java index e8eae0d7716..ad72876f02e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,20 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; /** - * Interface of class that will wrap a MetricsTableSource and export numbers so they can be - * used in MetricsTableSource + * Interface of class that will wrap a MetricsTableSource and export numbers so they can be used in + * MetricsTableSource */ @InterfaceAudience.Private public interface MetricsTableWrapperAggregate { public String HASH = "#"; + /** * Get the number of read requests that have been issued against this table */ @@ -38,6 +37,7 @@ public interface MetricsTableWrapperAggregate { * Get the number of write requests that have been issued against this table */ long getFilteredReadRequestCount(String table); + /** * Get the number of write requests that have been issued for this table */ @@ -63,7 +63,6 @@ public interface MetricsTableWrapperAggregate { */ long getTableSize(String table); - /** * Get the average region size for this table */ @@ -95,12 +94,12 @@ public interface MetricsTableWrapperAggregate { long getMinStoreFileAge(String table); /** - * @return Average age of store files for this table + * @return Average age of store files for this table */ long getAvgStoreFileAge(String table); /** - * @return Number of reference files for this table + * @return Number of reference files for this table */ long getNumReferenceFiles(String table); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSource.java index ee570f00d99..fe5b2ab4753 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; - import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; /** -* This interface will be implemented by a MetricsSource that will export metrics from -* multiple users into the hadoop metrics system. -*/ + * This interface will be implemented by a MetricsSource that will export metrics from multiple + * users into the hadoop metrics system. + */ @InterfaceAudience.Private public interface MetricsUserAggregateSource extends BaseSource { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSource.java index 96173669bbc..2d75c9246ba 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; - import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public interface MetricsUserSource extends Comparable { - //These client metrics will be reported through clusterStatus and hbtop only + // These client metrics will be reported through clusterStatus and hbtop only interface ClientMetrics { void incrementReadRequest(); @@ -66,15 +64,14 @@ public interface MetricsUserSource extends Comparable { void getMetrics(MetricsCollector metricsCollector, boolean all); /** - * Metrics collected at client level for a user(needed for reporting through clusterStatus - * and hbtop currently) + * Metrics collected at client level for a user(needed for reporting through clusterStatus and + * hbtop currently) * @return metrics per hostname */ Map getClientMetrics(); /** * Create a instance of ClientMetrics if not present otherwise return the previous one - * * @param hostName hostname of the client * @return Instance of ClientMetrics */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java index 4a430cdc434..ce4cd8f1619 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import org.apache.hadoop.hbase.TableName; @@ -28,7 +27,6 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public interface MetricsWALSource extends BaseSource { - /** * The name of the metrics */ @@ -49,7 +47,6 @@ public interface MetricsWALSource extends BaseSource { */ String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - String APPEND_TIME = "appendTime"; String APPEND_TIME_DESC = "Time an append to the log took."; String APPEND_COUNT = "appendCount"; @@ -64,16 +61,16 @@ public interface MetricsWALSource extends BaseSource { String ROLL_REQUESTED_DESC = "How many times a roll has been requested total"; String ERROR_ROLL_REQUESTED = "errorRollRequest"; String ERROR_ROLL_REQUESTED_DESC = - "How many times a roll was requested due to I/O or other errors."; + "How many times a roll was requested due to I/O or other errors."; String LOW_REPLICA_ROLL_REQUESTED = "lowReplicaRollRequest"; String LOW_REPLICA_ROLL_REQUESTED_DESC = - "How many times a roll was requested due to too few datanodes in the write pipeline."; + "How many times a roll was requested due to too few datanodes in the write pipeline."; String SLOW_SYNC_ROLL_REQUESTED = "slowSyncRollRequest"; String SLOW_SYNC_ROLL_REQUESTED_DESC = - "How many times a roll was requested due to sync too slow on the write pipeline."; + "How many times a roll was requested due to sync too slow on the write pipeline."; String SIZE_ROLL_REQUESTED = "sizeRollRequest"; String SIZE_ROLL_REQUESTED_DESC = - "How many times a roll was requested due to file size roll threshold."; + "How many times a roll was requested due to file size roll threshold."; String WRITTEN_BYTES = "writtenBytes"; String WRITTEN_BYTES_DESC = "Size (in bytes) of the data written to the WAL."; String SUCCESSFUL_LOG_ROLLS = "successfulLogRolls"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java index fe11c1049ce..73ef29d86c8 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.yetus.audience.InterfaceAudience; @@ -29,11 +28,18 @@ public interface MetricsReplicationSinkSource { public static final String SINK_APPLIED_HFILES = "sink.appliedHFiles"; void setLastAppliedOpAge(long age); + void incrAppliedBatches(long batches); + void incrAppliedOps(long batchsize); + void incrFailedBatches(); + long getLastAppliedOpAge(); + void incrAppliedHFiles(long hfileSize); + long getSinkAppliedOps(); + long getFailedBatches(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java index 6fb5d71ef02..a891b773288 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java index 5e4ad27e091..9c567d8bec2 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.yetus.audience.InterfaceAudience; @@ -23,7 +22,10 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public interface MetricsReplicationSourceFactory { public MetricsReplicationSinkSource getSink(); + public MetricsReplicationSourceSource getSource(String id); + public MetricsReplicationTableSource getTableSource(String tableName); + public MetricsReplicationGlobalSourceSource getGlobalSource(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java index d37dc133e2c..c9d109e0232 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -44,10 +43,10 @@ public interface MetricsReplicationSourceSource extends BaseSource { public static final String SOURCE_SIZE_OF_HFILE_REFS_QUEUE = "source.sizeOfHFileRefsQueue"; public static final String SOURCE_CLOSED_LOGS_WITH_UNKNOWN_LENGTH = - "source.closedLogsWithUnknownFileLength"; + "source.closedLogsWithUnknownFileLength"; public static final String SOURCE_UNCLEANLY_CLOSED_LOGS = "source.uncleanlyClosedLogs"; public static final String SOURCE_UNCLEANLY_CLOSED_IGNORED_IN_BYTES = - "source.ignoredUncleanlyClosedLogContentsInBytes"; + "source.ignoredUncleanlyClosedLogContentsInBytes"; public static final String SOURCE_RESTARTED_LOG_READING = "source.restartedLogReading"; public static final String SOURCE_REPEATED_LOG_FILE_BYTES = "source.repeatedLogFileBytes"; public static final String SOURCE_COMPLETED_LOGS = "source.completedLogs"; @@ -57,36 +56,68 @@ public interface MetricsReplicationSourceSource extends BaseSource { public static final String SOURCE_INITIALIZING = "source.numInitializing"; void setLastShippedAge(long age); + void incrSizeOfLogQueue(int size); + void decrSizeOfLogQueue(int size); + void incrLogEditsFiltered(long size); + void incrBatchesShipped(int batches); + void incrFailedBatches(); + void incrOpsShipped(long ops); + void incrShippedBytes(long size); + void incrLogReadInBytes(long size); + void incrLogReadInEdits(long size); + void clear(); + long getLastShippedAge(); + int getSizeOfLogQueue(); + void incrHFilesShipped(long hfiles); + void incrSizeOfHFileRefsQueue(long size); + void decrSizeOfHFileRefsQueue(long size); + void incrUnknownFileLengthForClosedWAL(); + void incrUncleanlyClosedWALs(); + long getUncleanlyClosedWALs(); + void incrBytesSkippedInUncleanlyClosedWALs(final long bytes); + void incrRestartedWALReading(); + void incrRepeatedFileBytes(final long bytes); + void incrCompletedWAL(); + void incrCompletedRecoveryQueue(); + void incrFailedRecoveryQueue(); + long getWALEditsRead(); + long getShippedOps(); + long getEditsFiltered(); + void setOldestWalAge(long age); + long getOldestWalAge(); + void incrSourceInitializing(); + void decrSourceInitializing(); + int getSourceInitializing(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java index faa944a6870..c4550abb6e8 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -25,8 +24,12 @@ import org.apache.yetus.audience.InterfaceAudience; public interface MetricsReplicationTableSource extends BaseSource { void setLastShippedAge(long age); + void incrShippedBytes(long size); + long getShippedBytes(); + void clear(); + long getLastShippedAge(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java index 22c8753b8e3..1de3a34bfeb 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -64,91 +63,77 @@ public interface MetricsRESTSource extends BaseSource, JvmPauseMonitorSource { /** * Increment the number of requests - * * @param inc Ammount to increment by */ void incrementRequests(int inc); /** * Increment the number of successful Get requests. - * * @param inc Number of successful get requests. */ void incrementSucessfulGetRequests(int inc); /** * Increment the number of successful Put requests. - * * @param inc Number of successful put requests. */ void incrementSucessfulPutRequests(int inc); /** - * Increment the number of successful Delete requests. - * - * @param inc + * Increment the number of successful Delete requests. n */ void incrementSucessfulDeleteRequests(int inc); /** * Increment the number of failed Put Requests. - * * @param inc Number of failed Put requests. */ void incrementFailedPutRequests(int inc); /** * Increment the number of failed Get requests. - * * @param inc The number of failed Get Requests. */ void incrementFailedGetRequests(int inc); /** * Increment the number of failed Delete requests. - * * @param inc The number of failed delete requests. */ void incrementFailedDeleteRequests(int inc); /** * Increment the number of successful scan requests. - * * @param inc Number of successful scan requests. */ void incrementSucessfulScanRequests(final int inc); /** * Increment the number failed scan requests. - * * @param inc Number of failed scan requests. */ void incrementFailedScanRequests(final int inc); /** * Increment the number of successful append requests. - * * @param inc Number of successful append requests. */ void incrementSucessfulAppendRequests(final int inc); /** * Increment the number failed append requests. - * * @param inc Number of failed append requests. */ void incrementFailedAppendRequests(final int inc); /** * Increment the number of successful increment requests. - * * @param inc Number of successful increment requests. */ void incrementSucessfulIncrementRequests(final int inc); /** * Increment the number failed increment requests. - * * @param inc Number of failed increment requests. */ void incrementFailedIncrementRequests(final int inc); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java index 1e95782c2ba..214626204e7 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.thrift; import org.apache.hadoop.hbase.metrics.ExceptionTrackingSource; @@ -37,8 +36,7 @@ public interface MetricsThriftServerSource extends ExceptionTrackingSource, JvmP String ACTIVE_WORKER_COUNT_KEY = "numActiveWorkers"; /** - * Add how long an operation was in the queue. - * @param time + * Add how long an operation was in the queue. n */ void incTimeInQueue(long time); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java index 7918d7360d8..1dd7d177aad 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,12 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.thrift; import org.apache.yetus.audience.InterfaceAudience; -/** Factory that will be used to create metrics sources for the two diffent types of thrift servers. */ +/** + * Factory that will be used to create metrics sources for the two diffent types of thrift servers. + */ @InterfaceAudience.Private public interface MetricsThriftServerSourceFactory { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSource.java index 3e971243d09..5e920a25c12 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,15 +51,18 @@ public interface MetricsZooKeeperSource extends BaseSource { String EXCEPTION_CONNECTIONLOSS = "CONNECTIONLOSS Exception"; String EXCEPTION_CONNECTIONLOSS_DESC = "Number of failed ops due to a CONNECTIONLOSS exception."; String EXCEPTION_DATAINCONSISTENCY = "DATAINCONSISTENCY Exception"; - String EXCEPTION_DATAINCONSISTENCY_DESC = "Number of failed ops due to a DATAINCONSISTENCY exception."; + String EXCEPTION_DATAINCONSISTENCY_DESC = + "Number of failed ops due to a DATAINCONSISTENCY exception."; String EXCEPTION_INVALIDACL = "INVALIDACL Exception"; String EXCEPTION_INVALIDACL_DESC = "Number of failed ops due to an INVALIDACL exception"; String EXCEPTION_NOAUTH = "NOAUTH Exception"; String EXCEPTION_NOAUTH_DESC = "Number of failed ops due to a NOAUTH exception."; String EXCEPTION_OPERATIONTIMEOUT = "OPERATIONTIMEOUT Exception"; - String EXCEPTION_OPERATIONTIMEOUT_DESC = "Number of failed ops due to an OPERATIONTIMEOUT exception."; + String EXCEPTION_OPERATIONTIMEOUT_DESC = + "Number of failed ops due to an OPERATIONTIMEOUT exception."; String EXCEPTION_RUNTIMEINCONSISTENCY = "RUNTIMEINCONSISTENCY Exception"; - String EXCEPTION_RUNTIMEINCONSISTENCY_DESC = "Number of failed ops due to a RUNTIMEINCONSISTENCY exception."; + String EXCEPTION_RUNTIMEINCONSISTENCY_DESC = + "Number of failed ops due to a RUNTIMEINCONSISTENCY exception."; String EXCEPTION_SESSIONEXPIRED = "SESSIONEXPIRED Exception"; String EXCEPTION_SESSIONEXPIRED_DESC = "Number of failed ops due to a SESSIONEXPIRED exception."; String EXCEPTION_SYSTEMERROR = "SYSTEMERROR Exception"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java index bc1e8cb242f..66d5e3d87a4 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,19 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2; import org.apache.yetus.audience.InterfaceAudience; /** - * Metrics Histogram interface. Implementing classes will expose computed - * quartile values through the metrics system. + * Metrics Histogram interface. Implementing classes will expose computed quartile values through + * the metrics system. */ @InterfaceAudience.Private public interface MetricHistogram { - //Strings used to create metrics names. + // Strings used to create metrics names. String NUM_OPS_METRIC_NAME = "_num_ops"; String MIN_METRIC_NAME = "_min"; String MAX_METRIC_NAME = "_max"; @@ -42,8 +41,7 @@ public interface MetricHistogram { String NINETY_NINE_POINT_NINETH_PERCENTILE_METRIC_NAME = "_99.9th_percentile"; /** - * Add a single value to a histogram's stream of values. - * @param value + * Add a single value to a histogram's stream of values. n */ void add(long value); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java index 1366fd0b920..33b6c0d9a93 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2; import java.util.concurrent.ScheduledExecutorService; diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java index 157327babb2..92442de2b3c 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,23 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; - /** * A compatibility shim layer for interacting with different versions of Hadoop. */ -//NOTE: we can move this under src/main if main code wants to use this shim layer +// NOTE: we can move this under src/main if main code wants to use this shim layer public interface HadoopShims { /** * Returns a TaskAttemptContext instance created from the given parameters. - * @param job an instance of o.a.h.mapreduce.Job + * @param job an instance of o.a.h.mapreduce.Job * @param taskId an identifier for the task attempt id. Should be parsable by - * TaskAttemptId.forName() + * TaskAttemptId.forName() * @return a concrete TaskAttemptContext instance of o.a.h.mapreduce.TaskAttemptContext */ - T createTestTaskAttemptContext(final J job, final String taskId); + T createTestTaskAttemptContext(final J job, final String taskId); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java index f72843cc4b0..8ea3da856af 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; public interface RandomStringGenerator { diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java index 91cd19ef009..8f8fbd66f98 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,10 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; - import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; @@ -27,8 +25,8 @@ public class RandomStringGeneratorImpl implements RandomStringGenerator { private final String s; public RandomStringGeneratorImpl() { - s = new UUID(ThreadLocalRandom.current().nextLong(), - ThreadLocalRandom.current().nextLong()).toString(); + s = new UUID(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()) + .toString(); } @Override diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java index 27888db0f6d..e963a439efa 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,12 +32,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestCompatibilitySingletonFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCompatibilitySingletonFactory.class); + HBaseClassTestRule.forClass(TestCompatibilitySingletonFactory.class); private static final int ITERATIONS = 100000; @@ -47,9 +47,8 @@ public class TestCompatibilitySingletonFactory { public String call() throws Exception { // XXX: Why is this sleep here? Thread.sleep(10); - RandomStringGenerator - instance = - CompatibilitySingletonFactory.getInstance(RandomStringGenerator.class); + RandomStringGenerator instance = + CompatibilitySingletonFactory.getInstance(RandomStringGenerator.class); return instance.getRandString(); } } @@ -59,7 +58,6 @@ public class TestCompatibilitySingletonFactory { List callables = new ArrayList<>(ITERATIONS); List resultStrings = new ArrayList<>(ITERATIONS); - // Create the callables. for (int i = 0; i < ITERATIONS; i++) { callables.add(new TestCompatibilitySingletonFactoryCallable()); @@ -77,7 +75,6 @@ public class TestCompatibilitySingletonFactory { // Get the first string. String firstString = resultStrings.get(0); - // Assert that all the strings are equal to the fist. for (String s : resultStrings) { assertEquals(firstString, s); diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java index a49c8a7b892..6071a7e8200 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,18 +26,18 @@ import org.junit.Test; import org.junit.experimental.categories.Category; /** - * Test for the CompatibilitySingletonFactory and building MetricsMasterSource + * Test for the CompatibilitySingletonFactory and building MetricsMasterSource */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsMasterSourceFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsMasterSourceFactory.class); + HBaseClassTestRule.forClass(TestMetricsMasterSourceFactory.class); - @Test(expected=RuntimeException.class) + @Test(expected = RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. + // This should throw an exception because there is no compat lib on the class path. CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java index 3c2a21d1533..598995d846f 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,18 +26,18 @@ import org.junit.Test; import org.junit.experimental.categories.Category; /** - * Test for the CompatibilitySingletonFactory and building MetricsRegionServerSource + * Test for the CompatibilitySingletonFactory and building MetricsRegionServerSource */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsRegionServerSourceFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsRegionServerSourceFactory.class); + HBaseClassTestRule.forClass(TestMetricsRegionServerSourceFactory.class); - @Test(expected=RuntimeException.class) + @Test(expected = RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. + // This should throw an exception because there is no compat lib on the class path. CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java index 2a804158e9e..10de587db0a 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,16 +25,16 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsWALSource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsWALSource.class); + HBaseClassTestRule.forClass(TestMetricsWALSource.class); - @Test(expected=RuntimeException.class) + @Test(expected = RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. + // This should throw an exception because there is no compat lib on the class path. CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java index 19e041193ee..2bdd8e754e5 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,18 +26,18 @@ import org.junit.Test; import org.junit.experimental.categories.Category; /** - * Test for the CompatibilitySingletonFactory and building MetricsReplicationSource + * Test for the CompatibilitySingletonFactory and building MetricsReplicationSource */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsReplicationSourceFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsReplicationSourceFactory.class); + HBaseClassTestRule.forClass(TestMetricsReplicationSourceFactory.class); - @Test(expected=RuntimeException.class) + @Test(expected = RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. + // This should throw an exception because there is no compat lib on the class path. CompatibilitySingletonFactory.getInstance(MetricsReplicationSource.class); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java index fc4caae0b20..e81aaec77d0 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,18 +26,18 @@ import org.junit.Test; import org.junit.experimental.categories.Category; /** - * Test of Rest Metrics Source interface. + * Test of Rest Metrics Source interface. */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsRESTSource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsRESTSource.class); + HBaseClassTestRule.forClass(TestMetricsRESTSource.class); - @Test(expected=RuntimeException.class) + @Test(expected = RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. + // This should throw an exception because there is no compat lib on the class path. CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java index 49d25723b88..eccbecacb31 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.test; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -24,157 +23,141 @@ import org.apache.hadoop.hbase.metrics.BaseSource; public interface MetricsAssertHelper { /** - * Init helper. This method will make sure that the metrics system is set - * up for tests. + * Init helper. This method will make sure that the metrics system is set up for tests. */ void init(); /** * Assert that a tag exists and has a given value. - * * @param name The name of the tag. * @param expected The expected value - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertTag(String name, String expected, BaseSource source); /** * Assert that a gauge exists and that it's value is equal to the expected value. - * * @param name The name of the gauge * @param expected The expected value of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGauge(String name, long expected, BaseSource source); /** * Assert that a gauge exists and it's value is greater than a given value - * * @param name The name of the gauge * @param expected Value that the gauge is expected to be greater than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGaugeGt(String name, long expected, BaseSource source); /** * Assert that a gauge exists and it's value is less than a given value - * * @param name The name of the gauge * @param expected Value that the gauge is expected to be less than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGaugeLt(String name, long expected, BaseSource source); /** * Assert that a gauge exists and that it's value is equal to the expected value. - * * @param name The name of the gauge * @param expected The expected value of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGauge(String name, double expected, BaseSource source); /** * Assert that a gauge exists and it's value is greater than a given value - * * @param name The name of the gauge * @param expected Value that the gauge is expected to be greater than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGaugeGt(String name, double expected, BaseSource source); /** * Assert that a gauge exists and it's value is less than a given value - * * @param name The name of the gauge * @param expected Value that the gauge is expected to be less than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGaugeLt(String name, double expected, BaseSource source); /** * Assert that a counter exists and that it's value is equal to the expected value. - * * @param name The name of the counter. * @param expected The expected value - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertCounter(String name, long expected, BaseSource source); /** * Assert that a counter exists and that it's value is greater than the given value. - * * @param name The name of the counter. * @param expected The value the counter is expected to be greater than. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertCounterGt(String name, long expected, BaseSource source); /** * Assert that a counter exists and that it's value is less than the given value. - * * @param name The name of the counter. * @param expected The value the counter is expected to be less than. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertCounterLt(String name, long expected, BaseSource source); /** * Get the value of a counter. - * * @param name name of the counter. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return long value of the counter. */ long getCounter(String name, BaseSource source); /** * Check if a dynamic counter exists. - * * @param name name of the counter. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return boolean true if counter metric exists. */ boolean checkCounterExists(String name, BaseSource source); /** * Check if a gauge exists. - * * @param name name of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return boolean true if gauge metric exists. */ boolean checkGaugeExists(String name, BaseSource source); /** * Get the value of a gauge as a double. - * * @param name name of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return double value of the gauge. */ double getGaugeDouble(String name, BaseSource source); /** * Get the value of a gauge as a long. - * * @param name name of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return long value of the gauge. */ long getGaugeLong(String name, BaseSource source); diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java index c49a2a51ee5..23a4ef8e742 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,17 +28,16 @@ import org.junit.experimental.categories.Category; /** * Test for the interface of MetricsThriftServerSourceFactory */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsThriftServerSourceFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsThriftServerSourceFactory.class); + HBaseClassTestRule.forClass(TestMetricsThriftServerSourceFactory.class); - - @Test(expected=RuntimeException.class) + @Test(expected = RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws RuntimeException { - //This should throw an exception because there is no compat lib on the class path. + // This should throw an exception because there is no compat lib on the class path. CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSource.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSource.java index ca7ba2402e6..2ed502922e4 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSource.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,16 +25,16 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsZooKeeperSource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsZooKeeperSource.class); + HBaseClassTestRule.forClass(TestMetricsZooKeeperSource.class); - @Test(expected=RuntimeException.class) + @Test(expected = RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. + // This should throw an exception because there is no compat lib on the class path. CompatibilitySingletonFactory.getInstance(MetricsZooKeeperSource.class); } } diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml index db0b47fb9a5..3d6351af7e3 100644 --- a/hbase-hadoop2-compat/pom.xml +++ b/hbase-hadoop2-compat/pom.xml @@ -1,6 +1,6 @@ - + - 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-hadoop2-compat Apache HBase - Hadoop Two Compatibility - - Interfaces to be implemented in order to smooth - over hadoop version differences - - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-dependency-plugin - - - create-mrapp-generated-classpath - generate-test-resources - - build-classpath - - - - - ${project.build.directory}/test-classes/mrapp-generated-classpath - - - - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - + Interfaces to be implemented in order to smooth + over hadoop version differences @@ -200,21 +149,68 @@ limitations under the License. test + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-dependency-plugin + + + create-mrapp-generated-classpath + + build-classpath + + generate-test-resources + + + ${project.build.directory}/test-classes/mrapp-generated-classpath + + + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + - - - skipHadoopTwoCompatTests - - - skipHadoopTwoCompatTests - - - - true - true - - + + + skipHadoopTwoCompatTests + + + skipHadoopTwoCompatTests + + + + true + true + + eclipse-specific diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java index edbb9257c28..2807766da20 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -38,21 +37,18 @@ public class MetricsIOSourceImpl extends BaseSourceImpl implements MetricsIOSour this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, wrapper); } - public MetricsIOSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsIOWrapper wrapper) { + public MetricsIOSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext, MetricsIOWrapper wrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.wrapper = wrapper; - fsReadTimeHisto = getMetricsRegistry() - .newTimeHistogram(FS_READ_TIME_HISTO_KEY, FS_READ_TIME_HISTO_DESC); - fsPReadTimeHisto = getMetricsRegistry() - .newTimeHistogram(FS_PREAD_TIME_HISTO_KEY, FS_PREAD_TIME_HISTO_DESC); - fsWriteTimeHisto = getMetricsRegistry() - .newTimeHistogram(FS_WRITE_HISTO_KEY, FS_WRITE_TIME_HISTO_DESC); + fsReadTimeHisto = + getMetricsRegistry().newTimeHistogram(FS_READ_TIME_HISTO_KEY, FS_READ_TIME_HISTO_DESC); + fsPReadTimeHisto = + getMetricsRegistry().newTimeHistogram(FS_PREAD_TIME_HISTO_KEY, FS_PREAD_TIME_HISTO_DESC); + fsWriteTimeHisto = + getMetricsRegistry().newTimeHistogram(FS_WRITE_HISTO_KEY, FS_WRITE_TIME_HISTO_DESC); } @Override diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java index 67325c0728e..bdb87a727a5 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,18 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.util.HashMap; import java.util.Locale; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class MetricsHBaseServerSourceFactoryImpl extends MetricsHBaseServerSourceFactory { private enum SourceStorage { INSTANCE; + HashMap sources = new HashMap<>(); } @@ -37,19 +35,16 @@ public class MetricsHBaseServerSourceFactoryImpl extends MetricsHBaseServerSourc } private static synchronized MetricsHBaseServerSource getSource(String serverName, - MetricsHBaseServerWrapper wrap) { + MetricsHBaseServerWrapper wrap) { String context = createContextName(serverName); MetricsHBaseServerSource source = SourceStorage.INSTANCE.sources.get(context); if (source == null) { - //Create the source. - source = new MetricsHBaseServerSourceImpl( - context, - METRICS_DESCRIPTION, - context.toLowerCase(Locale.ROOT), - context + METRICS_JMX_CONTEXT_SUFFIX, wrap); + // Create the source. + source = new MetricsHBaseServerSourceImpl(context, METRICS_DESCRIPTION, + context.toLowerCase(Locale.ROOT), context + METRICS_JMX_CONTEXT_SUFFIX, wrap); - //Store back in storage + // Store back in storage SourceStorage.INSTANCE.sources.put(context, source); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java index e4fee95e2c4..440ebc6f5a6 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.metrics.ExceptionTrackingSourceImpl; @@ -29,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class MetricsHBaseServerSourceImpl extends ExceptionTrackingSourceImpl - implements MetricsHBaseServerSource { + implements MetricsHBaseServerSource { private final MetricsHBaseServerWrapper wrapper; private final MutableFastCounter authorizationSuccesses; private final MutableFastCounter authorizationFailures; @@ -39,45 +37,40 @@ public class MetricsHBaseServerSourceImpl extends ExceptionTrackingSourceImpl private final MutableFastCounter sentBytes; private final MutableFastCounter receivedBytes; - private MetricHistogram queueCallTime; private MetricHistogram processCallTime; private MetricHistogram totalCallTime; private MetricHistogram requestSize; private MetricHistogram responseSize; - public MetricsHBaseServerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsHBaseServerWrapper wrapper) { + public MetricsHBaseServerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsHBaseServerWrapper wrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.wrapper = wrapper; this.authorizationSuccesses = this.getMetricsRegistry().newCounter(AUTHORIZATION_SUCCESSES_NAME, - AUTHORIZATION_SUCCESSES_DESC, 0L); + AUTHORIZATION_SUCCESSES_DESC, 0L); this.authorizationFailures = this.getMetricsRegistry().newCounter(AUTHORIZATION_FAILURES_NAME, - AUTHORIZATION_FAILURES_DESC, 0L); - this.authenticationSuccesses = this.getMetricsRegistry().newCounter( - AUTHENTICATION_SUCCESSES_NAME, AUTHENTICATION_SUCCESSES_DESC, 0L); + AUTHORIZATION_FAILURES_DESC, 0L); + this.authenticationSuccesses = this.getMetricsRegistry() + .newCounter(AUTHENTICATION_SUCCESSES_NAME, AUTHENTICATION_SUCCESSES_DESC, 0L); this.authenticationFailures = this.getMetricsRegistry().newCounter(AUTHENTICATION_FAILURES_NAME, - AUTHENTICATION_FAILURES_DESC, 0L); - this.authenticationFallbacks = this.getMetricsRegistry().newCounter( - AUTHENTICATION_FALLBACKS_NAME, AUTHENTICATION_FALLBACKS_DESC, 0L); - this.sentBytes = this.getMetricsRegistry().newCounter(SENT_BYTES_NAME, - SENT_BYTES_DESC, 0L); - this.receivedBytes = this.getMetricsRegistry().newCounter(RECEIVED_BYTES_NAME, - RECEIVED_BYTES_DESC, 0L); - this.queueCallTime = this.getMetricsRegistry().newTimeHistogram(QUEUE_CALL_TIME_NAME, - QUEUE_CALL_TIME_DESC); - this.processCallTime = this.getMetricsRegistry().newTimeHistogram(PROCESS_CALL_TIME_NAME, - PROCESS_CALL_TIME_DESC); - this.totalCallTime = this.getMetricsRegistry().newTimeHistogram(TOTAL_CALL_TIME_NAME, - TOTAL_CALL_TIME_DESC); - this.requestSize = this.getMetricsRegistry().newSizeHistogram(REQUEST_SIZE_NAME, - REQUEST_SIZE_DESC); - this.responseSize = this.getMetricsRegistry().newSizeHistogram(RESPONSE_SIZE_NAME, - RESPONSE_SIZE_DESC); + AUTHENTICATION_FAILURES_DESC, 0L); + this.authenticationFallbacks = this.getMetricsRegistry() + .newCounter(AUTHENTICATION_FALLBACKS_NAME, AUTHENTICATION_FALLBACKS_DESC, 0L); + this.sentBytes = this.getMetricsRegistry().newCounter(SENT_BYTES_NAME, SENT_BYTES_DESC, 0L); + this.receivedBytes = + this.getMetricsRegistry().newCounter(RECEIVED_BYTES_NAME, RECEIVED_BYTES_DESC, 0L); + this.queueCallTime = + this.getMetricsRegistry().newTimeHistogram(QUEUE_CALL_TIME_NAME, QUEUE_CALL_TIME_DESC); + this.processCallTime = + this.getMetricsRegistry().newTimeHistogram(PROCESS_CALL_TIME_NAME, PROCESS_CALL_TIME_DESC); + this.totalCallTime = + this.getMetricsRegistry().newTimeHistogram(TOTAL_CALL_TIME_NAME, TOTAL_CALL_TIME_DESC); + this.requestSize = + this.getMetricsRegistry().newSizeHistogram(REQUEST_SIZE_NAME, REQUEST_SIZE_DESC); + this.responseSize = + this.getMetricsRegistry().newSizeHistogram(RESPONSE_SIZE_NAME, RESPONSE_SIZE_DESC); } @Override @@ -146,44 +139,40 @@ public class MetricsHBaseServerSourceImpl extends ExceptionTrackingSourceImpl if (wrapper != null) { mrb.addGauge(Interns.info(QUEUE_SIZE_NAME, QUEUE_SIZE_DESC), wrapper.getTotalQueueSize()) - .addGauge(Interns.info(GENERAL_QUEUE_NAME, GENERAL_QUEUE_DESC), - wrapper.getGeneralQueueLength()) - .addGauge(Interns.info(REPLICATION_QUEUE_NAME, - REPLICATION_QUEUE_DESC), wrapper.getReplicationQueueLength()) - .addGauge(Interns.info(PRIORITY_QUEUE_NAME, PRIORITY_QUEUE_DESC), - wrapper.getPriorityQueueLength()) - .addGauge(Interns.info(METAPRIORITY_QUEUE_NAME, METAPRIORITY_QUEUE_DESC), - wrapper.getMetaPriorityQueueLength()) - .addGauge(Interns.info(NUM_OPEN_CONNECTIONS_NAME, - NUM_OPEN_CONNECTIONS_DESC), wrapper.getNumOpenConnections()) - .addGauge(Interns.info(NUM_ACTIVE_HANDLER_NAME, - NUM_ACTIVE_HANDLER_DESC), wrapper.getActiveRpcHandlerCount()) - .addGauge(Interns.info(NUM_ACTIVE_GENERAL_HANDLER_NAME, NUM_ACTIVE_GENERAL_HANDLER_DESC), - wrapper.getActiveGeneralRpcHandlerCount()) - .addGauge( - Interns.info(NUM_ACTIVE_PRIORITY_HANDLER_NAME, NUM_ACTIVE_PRIORITY_HANDLER_DESC), - wrapper.getActivePriorityRpcHandlerCount()) - .addGauge( - Interns.info(NUM_ACTIVE_REPLICATION_HANDLER_NAME, NUM_ACTIVE_REPLICATION_HANDLER_DESC), - wrapper.getActiveReplicationRpcHandlerCount()) - .addCounter(Interns.info(NUM_GENERAL_CALLS_DROPPED_NAME, - NUM_GENERAL_CALLS_DROPPED_DESC), wrapper.getNumGeneralCallsDropped()) - .addCounter(Interns.info(NUM_LIFO_MODE_SWITCHES_NAME, - NUM_LIFO_MODE_SWITCHES_DESC), wrapper.getNumLifoModeSwitches()) - .addGauge(Interns.info(WRITE_QUEUE_NAME, WRITE_QUEUE_DESC), - wrapper.getWriteQueueLength()) - .addGauge(Interns.info(READ_QUEUE_NAME, READ_QUEUE_DESC), - wrapper.getReadQueueLength()) - .addGauge(Interns.info(SCAN_QUEUE_NAME, SCAN_QUEUE_DESC), - wrapper.getScanQueueLength()) - .addGauge(Interns.info(NUM_ACTIVE_WRITE_HANDLER_NAME, NUM_ACTIVE_WRITE_HANDLER_DESC), - wrapper.getActiveWriteRpcHandlerCount()) - .addGauge(Interns.info(NUM_ACTIVE_READ_HANDLER_NAME, NUM_ACTIVE_READ_HANDLER_DESC), - wrapper.getActiveReadRpcHandlerCount()) - .addGauge(Interns.info(NUM_ACTIVE_SCAN_HANDLER_NAME, NUM_ACTIVE_SCAN_HANDLER_DESC), - wrapper.getActiveScanRpcHandlerCount()) - .addGauge(Interns.info(NETTY_DM_USAGE_NAME, NETTY_DM_USAGE_DESC), - wrapper.getNettyDmUsage()); + .addGauge(Interns.info(GENERAL_QUEUE_NAME, GENERAL_QUEUE_DESC), + wrapper.getGeneralQueueLength()) + .addGauge(Interns.info(REPLICATION_QUEUE_NAME, REPLICATION_QUEUE_DESC), + wrapper.getReplicationQueueLength()) + .addGauge(Interns.info(PRIORITY_QUEUE_NAME, PRIORITY_QUEUE_DESC), + wrapper.getPriorityQueueLength()) + .addGauge(Interns.info(METAPRIORITY_QUEUE_NAME, METAPRIORITY_QUEUE_DESC), + wrapper.getMetaPriorityQueueLength()) + .addGauge(Interns.info(NUM_OPEN_CONNECTIONS_NAME, NUM_OPEN_CONNECTIONS_DESC), + wrapper.getNumOpenConnections()) + .addGauge(Interns.info(NUM_ACTIVE_HANDLER_NAME, NUM_ACTIVE_HANDLER_DESC), + wrapper.getActiveRpcHandlerCount()) + .addGauge(Interns.info(NUM_ACTIVE_GENERAL_HANDLER_NAME, NUM_ACTIVE_GENERAL_HANDLER_DESC), + wrapper.getActiveGeneralRpcHandlerCount()) + .addGauge(Interns.info(NUM_ACTIVE_PRIORITY_HANDLER_NAME, NUM_ACTIVE_PRIORITY_HANDLER_DESC), + wrapper.getActivePriorityRpcHandlerCount()) + .addGauge( + Interns.info(NUM_ACTIVE_REPLICATION_HANDLER_NAME, NUM_ACTIVE_REPLICATION_HANDLER_DESC), + wrapper.getActiveReplicationRpcHandlerCount()) + .addCounter(Interns.info(NUM_GENERAL_CALLS_DROPPED_NAME, NUM_GENERAL_CALLS_DROPPED_DESC), + wrapper.getNumGeneralCallsDropped()) + .addCounter(Interns.info(NUM_LIFO_MODE_SWITCHES_NAME, NUM_LIFO_MODE_SWITCHES_DESC), + wrapper.getNumLifoModeSwitches()) + .addGauge(Interns.info(WRITE_QUEUE_NAME, WRITE_QUEUE_DESC), wrapper.getWriteQueueLength()) + .addGauge(Interns.info(READ_QUEUE_NAME, READ_QUEUE_DESC), wrapper.getReadQueueLength()) + .addGauge(Interns.info(SCAN_QUEUE_NAME, SCAN_QUEUE_DESC), wrapper.getScanQueueLength()) + .addGauge(Interns.info(NUM_ACTIVE_WRITE_HANDLER_NAME, NUM_ACTIVE_WRITE_HANDLER_DESC), + wrapper.getActiveWriteRpcHandlerCount()) + .addGauge(Interns.info(NUM_ACTIVE_READ_HANDLER_NAME, NUM_ACTIVE_READ_HANDLER_DESC), + wrapper.getActiveReadRpcHandlerCount()) + .addGauge(Interns.info(NUM_ACTIVE_SCAN_HANDLER_NAME, NUM_ACTIVE_SCAN_HANDLER_DESC), + wrapper.getActiveScanRpcHandlerCount()) + .addGauge(Interns.info(NETTY_DM_USAGE_NAME, NETTY_DM_USAGE_DESC), + wrapper.getNettyDmUsage()); } metricsRegistry.snapshot(mrb, all); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java index b4f62b3970b..f1555f660be 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.Cluster; @@ -43,23 +41,20 @@ public abstract class JobUtil { /** * Initializes the staging directory and returns the path. - * * @param conf system configuration * @return staging directory path - * @throws IOException if the ownership on the staging directory is not as expected + * @throws IOException if the ownership on the staging directory is not as expected * @throws InterruptedException if the thread getting the staging directory is interrupted */ - public static Path getStagingDir(Configuration conf) - throws IOException, InterruptedException { + public static Path getStagingDir(Configuration conf) throws IOException, InterruptedException { return JobSubmissionFiles.getStagingDir(new Cluster(conf), conf); } /** * Initializes the staging directory and returns the qualified path. - * * @param conf conf system configuration * @return qualified staging directory path - * @throws IOException if the ownership on the staging directory is not as expected + * @throws IOException if the ownership on the staging directory is not as expected * @throws InterruptedException if the thread getting the staging directory is interrupted */ public static Path getQualifiedStagingDir(Configuration conf) diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java index c78464da372..5d88638df52 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -28,9 +27,8 @@ import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public class MetricsAssignmentManagerSourceImpl - extends BaseSourceImpl - implements MetricsAssignmentManagerSource { +public class MetricsAssignmentManagerSourceImpl extends BaseSourceImpl + implements MetricsAssignmentManagerSource { private MutableGaugeLong ritGauge; private MutableGaugeLong ritCountOverThresholdGauge; @@ -63,16 +61,15 @@ public class MetricsAssignmentManagerSourceImpl this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsAssignmentManagerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { + public MetricsAssignmentManagerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } public void init() { ritGauge = metricsRegistry.newGauge(RIT_COUNT_NAME, RIT_COUNT_DESC, 0L); - ritCountOverThresholdGauge = metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, - RIT_COUNT_OVER_THRESHOLD_DESC,0L); + ritCountOverThresholdGauge = + metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, RIT_COUNT_OVER_THRESHOLD_DESC, 0L); ritOldestAgeGauge = metricsRegistry.newGauge(RIT_OLDEST_AGE_NAME, RIT_OLDEST_AGE_DESC, 0L); ritDurationHisto = metricsRegistry.newTimeHistogram(RIT_DURATION_NAME, RIT_DURATION_DESC); operationCounter = metricsRegistry.getCounter(OPERATION_COUNT_NAME, 0L); @@ -80,22 +77,22 @@ public class MetricsAssignmentManagerSourceImpl unknownServerOpenRegions = metricsRegistry.newGauge(UNKNOWN_SERVER_OPEN_REGIONS, "", 0); orphanRegionsOnRsGauge = - metricsRegistry.newGauge(ORPHAN_REGIONS_ON_RS, ORPHAN_REGIONS_ON_RS_DESC, 0L); + metricsRegistry.newGauge(ORPHAN_REGIONS_ON_RS, ORPHAN_REGIONS_ON_RS_DESC, 0L); orphanRegionsOnFsGauge = - metricsRegistry.newGauge(ORPHAN_REGIONS_ON_FS, ORPHAN_REGIONS_ON_FS_DESC, 0L); + metricsRegistry.newGauge(ORPHAN_REGIONS_ON_FS, ORPHAN_REGIONS_ON_FS_DESC, 0L); inconsistentRegionsGauge = - metricsRegistry.newGauge(INCONSISTENT_REGIONS, INCONSISTENT_REGIONS_DESC, 0L); + metricsRegistry.newGauge(INCONSISTENT_REGIONS, INCONSISTENT_REGIONS_DESC, 0L); holesGauge = metricsRegistry.newGauge(HOLES, HOLES_DESC, 0L); overlapsGauge = metricsRegistry.newGauge(OVERLAPS, OVERLAPS_DESC, 0L); unknownServerRegionsGauge = - metricsRegistry.newGauge(UNKNOWN_SERVER_REGIONS, UNKNOWN_SERVER_REGIONS_DESC, 0L); + metricsRegistry.newGauge(UNKNOWN_SERVER_REGIONS, UNKNOWN_SERVER_REGIONS_DESC, 0L); emptyRegionInfoRegionsGauge = - metricsRegistry.newGauge(EMPTY_REGION_INFO_REGIONS, EMPTY_REGION_INFO_REGIONS_DESC, 0L); + metricsRegistry.newGauge(EMPTY_REGION_INFO_REGIONS, EMPTY_REGION_INFO_REGIONS_DESC, 0L); /** - * NOTE: Please refer to HBASE-9774 and HBASE-14282. Based on these two issues, HBase is - * moving away from using Hadoop's metric2 to having independent HBase specific Metrics. Use + * NOTE: Please refer to HBASE-9774 and HBASE-14282. Based on these two issues, HBase is moving + * away from using Hadoop's metric2 to having independent HBase specific Metrics. Use * {@link BaseSourceImpl#registry} to register the new metrics. */ assignMetrics = new OperationMetrics(registry, ASSIGN_METRIC_PREFIX); @@ -222,7 +219,7 @@ public class MetricsAssignmentManagerSourceImpl public void getMetrics(MetricsCollector metricsCollector, boolean all) { MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName); metricsRegistry.snapshot(metricsRecordBuilder, all); - if(metricsAdapter != null) { + if (metricsAdapter != null) { metricsAdapter.snapshotAllMetrics(registry, metricsRecordBuilder); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java index d78efce2add..dc2a2824269 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -23,9 +22,8 @@ import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public class MetricsMasterFilesystemSourceImpl - extends BaseSourceImpl - implements MetricsMasterFileSystemSource { +public class MetricsMasterFilesystemSourceImpl extends BaseSourceImpl + implements MetricsMasterFileSystemSource { private MetricHistogram splitSizeHisto; private MetricHistogram splitTimeHisto; @@ -36,9 +34,8 @@ public class MetricsMasterFilesystemSourceImpl this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsMasterFilesystemSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { + public MetricsMasterFilesystemSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } @@ -47,9 +44,9 @@ public class MetricsMasterFilesystemSourceImpl splitSizeHisto = metricsRegistry.newSizeHistogram(SPLIT_SIZE_NAME, SPLIT_SIZE_DESC); splitTimeHisto = metricsRegistry.newTimeHistogram(SPLIT_TIME_NAME, SPLIT_TIME_DESC); metaSplitTimeHisto = - metricsRegistry.newTimeHistogram(META_SPLIT_TIME_NAME, META_SPLIT_TIME_DESC); + metricsRegistry.newTimeHistogram(META_SPLIT_TIME_NAME, META_SPLIT_TIME_DESC); metaSplitSizeHisto = - metricsRegistry.newSizeHistogram(META_SPLIT_SIZE_NAME, META_SPLIT_SIZE_DESC); + metricsRegistry.newSizeHistogram(META_SPLIT_SIZE_NAME, META_SPLIT_SIZE_DESC); } @Override @@ -62,7 +59,6 @@ public class MetricsMasterFilesystemSourceImpl splitSizeHisto.add(size); } - @Override public void updateMetaWALSplitTime(long time) { metaSplitTimeHisto.add(time); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java index 6fd254e9a69..dc5773cb904 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java index 69e7d7958fa..001ab016814 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -25,29 +24,20 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop2 implementation of MetricsMasterSource. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * Hadoop2 implementation of MetricsMasterSource. Implements BaseSource through BaseSourceImpl, + * following the pattern */ @InterfaceAudience.Private -public class MetricsMasterProcSourceImpl - extends BaseSourceImpl implements MetricsMasterProcSource { +public class MetricsMasterProcSourceImpl extends BaseSourceImpl implements MetricsMasterProcSource { private final MetricsMasterWrapper masterWrapper; public MetricsMasterProcSourceImpl(MetricsMasterWrapper masterWrapper) { - this(METRICS_NAME, - METRICS_DESCRIPTION, - METRICS_CONTEXT, - METRICS_JMX_CONTEXT, - masterWrapper); + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, masterWrapper); } - public MetricsMasterProcSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsMasterWrapper masterWrapper) { + public MetricsMasterProcSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsMasterWrapper masterWrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.masterWrapper = masterWrapper; @@ -64,13 +54,12 @@ public class MetricsMasterProcSourceImpl // masterWrapper can be null because this function is called inside of init. if (masterWrapper != null) { - metricsRecordBuilder - .addGauge(Interns.info(NUM_MASTER_WALS_NAME, NUM_MASTER_WALS_DESC), - masterWrapper.getNumWALFiles()); + metricsRecordBuilder.addGauge(Interns.info(NUM_MASTER_WALS_NAME, NUM_MASTER_WALS_DESC), + masterWrapper.getNumWALFiles()); } metricsRegistry.snapshot(metricsRecordBuilder, all); - if(metricsAdapter != null) { + if (metricsAdapter != null) { metricsAdapter.snapshotAllMetrics(registry, metricsRecordBuilder); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactoryImpl.java index 0fae0e74405..6a489eb7001 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactoryImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java index 750c1c959fc..71c0ea63ac2 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +19,6 @@ package org.apache.hadoop.hbase.master; import java.util.Map; import java.util.Map.Entry; - import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.MetricsCollector; @@ -33,7 +33,7 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Private public class MetricsMasterQuotaSourceImpl extends BaseSourceImpl - implements MetricsMasterQuotaSource { + implements MetricsMasterQuotaSource { private final MetricsMasterWrapper wrapper; private final MutableGaugeLong spaceQuotasGauge; private final MutableGaugeLong tablesViolatingQuotasGauge; @@ -48,30 +48,29 @@ public class MetricsMasterQuotaSourceImpl extends BaseSourceImpl this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, wrapper); } - public MetricsMasterQuotaSourceImpl( - String metricsName, String metricsDescription, String metricsContext, - String metricsJmxContext, MetricsMasterWrapper wrapper) { + public MetricsMasterQuotaSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsMasterWrapper wrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.wrapper = wrapper; - spaceQuotasGauge = getMetricsRegistry().newGauge( - NUM_SPACE_QUOTAS_NAME, NUM_SPACE_QUOTAS_DESC, 0L); - tablesViolatingQuotasGauge = getMetricsRegistry().newGauge( - NUM_TABLES_QUOTA_VIOLATIONS_NAME, NUM_TABLES_QUOTA_VIOLATIONS_DESC, 0L); - namespacesViolatingQuotasGauge = getMetricsRegistry().newGauge( - NUM_NS_QUOTA_VIOLATIONS_NAME, NUM_NS_QUOTA_VIOLATIONS_DESC, 0L); - regionSpaceReportsGauge = getMetricsRegistry().newGauge( - NUM_REGION_SIZE_REPORTS_NAME, NUM_REGION_SIZE_REPORTS_DESC, 0L); + spaceQuotasGauge = + getMetricsRegistry().newGauge(NUM_SPACE_QUOTAS_NAME, NUM_SPACE_QUOTAS_DESC, 0L); + tablesViolatingQuotasGauge = getMetricsRegistry().newGauge(NUM_TABLES_QUOTA_VIOLATIONS_NAME, + NUM_TABLES_QUOTA_VIOLATIONS_DESC, 0L); + namespacesViolatingQuotasGauge = + getMetricsRegistry().newGauge(NUM_NS_QUOTA_VIOLATIONS_NAME, NUM_NS_QUOTA_VIOLATIONS_DESC, 0L); + regionSpaceReportsGauge = + getMetricsRegistry().newGauge(NUM_REGION_SIZE_REPORTS_NAME, NUM_REGION_SIZE_REPORTS_DESC, 0L); - quotaObserverTimeHisto = getMetricsRegistry().newTimeHistogram( - QUOTA_OBSERVER_CHORE_TIME_NAME, QUOTA_OBSERVER_CHORE_TIME_DESC); - snapshotObserverTimeHisto = getMetricsRegistry().newTimeHistogram( - SNAPSHOT_OBSERVER_CHORE_TIME_NAME, SNAPSHOT_OBSERVER_CHORE_TIME_DESC); + quotaObserverTimeHisto = getMetricsRegistry().newTimeHistogram(QUOTA_OBSERVER_CHORE_TIME_NAME, + QUOTA_OBSERVER_CHORE_TIME_DESC); + snapshotObserverTimeHisto = getMetricsRegistry() + .newTimeHistogram(SNAPSHOT_OBSERVER_CHORE_TIME_NAME, SNAPSHOT_OBSERVER_CHORE_TIME_DESC); snapshotObserverSizeComputationTimeHisto = getMetricsRegistry().newTimeHistogram( - SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_NAME, SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_DESC); - snapshotObserverSnapshotFetchTimeHisto = getMetricsRegistry().newTimeHistogram( - SNAPSHOT_OBSERVER_FETCH_TIME_NAME, SNAPSHOT_OBSERVER_FETCH_TIME_DESC); + SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_NAME, SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_DESC); + snapshotObserverSnapshotFetchTimeHisto = getMetricsRegistry() + .newTimeHistogram(SNAPSHOT_OBSERVER_FETCH_TIME_NAME, SNAPSHOT_OBSERVER_FETCH_TIME_DESC); } @Override @@ -109,7 +108,7 @@ public class MetricsMasterQuotaSourceImpl extends BaseSourceImpl MetricsRecordBuilder record = metricsCollector.addRecord(metricsRegistry.info()); if (wrapper != null) { // Summarize the tables - Map> tableUsages = wrapper.getTableSpaceUtilization(); + Map> tableUsages = wrapper.getTableSpaceUtilization(); String tableSummary = "[]"; if (tableUsages != null && !tableUsages.isEmpty()) { tableSummary = generateJsonQuotaSummary(tableUsages.entrySet(), "table"); @@ -118,7 +117,7 @@ public class MetricsMasterQuotaSourceImpl extends BaseSourceImpl // Summarize the namespaces String nsSummary = "[]"; - Map> namespaceUsages = wrapper.getNamespaceSpaceUtilization(); + Map> namespaceUsages = wrapper.getNamespaceSpaceUtilization(); if (namespaceUsages != null && !namespaceUsages.isEmpty()) { nsSummary = generateJsonQuotaSummary(namespaceUsages.entrySet(), "namespace"); } @@ -130,10 +129,10 @@ public class MetricsMasterQuotaSourceImpl extends BaseSourceImpl /** * Summarizes the usage and limit for many targets (table or namespace) into JSON. */ - private String generateJsonQuotaSummary( - Iterable>> data, String target) { + private String generateJsonQuotaSummary(Iterable>> data, + String target) { StringBuilder sb = new StringBuilder(); - for (Entry> tableUsage : data) { + for (Entry> tableUsage : data) { String tableName = tableUsage.getKey(); long usage = tableUsage.getValue().getKey(); long limit = tableUsage.getValue().getValue(); @@ -141,7 +140,7 @@ public class MetricsMasterQuotaSourceImpl extends BaseSourceImpl sb.append(", "); } sb.append("{").append(target).append("=").append(tableName).append(", usage=").append(usage) - .append(", limit=").append(limit).append("}"); + .append(", limit=").append(limit).append("}"); } sb.insert(0, "[").append("]"); return sb.toString(); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java index a4b3fa194f9..84c49062f03 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; /** - * Factory to create MetricsMasterSource when given a MetricsMasterWrapper + * Factory to create MetricsMasterSource when given a MetricsMasterWrapper */ @InterfaceAudience.Private public class MetricsMasterSourceFactoryImpl implements MetricsMasterSourceFactory { private static enum FactoryStorage { INSTANCE; + MetricsMasterSource masterSource; } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java index ca94cdb61e7..5e06f61082d 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -27,13 +26,11 @@ import org.apache.hadoop.metrics2.lib.MutableFastCounter; import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop2 implementation of MetricsMasterSource. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * Hadoop2 implementation of MetricsMasterSource. Implements BaseSource through BaseSourceImpl, + * following the pattern */ @InterfaceAudience.Private -public class MetricsMasterSourceImpl - extends BaseSourceImpl implements MetricsMasterSource { +public class MetricsMasterSourceImpl extends BaseSourceImpl implements MetricsMasterSource { private final MetricsMasterWrapper masterWrapper; private MutableFastCounter clusterRequestsCounter; @@ -41,18 +38,11 @@ public class MetricsMasterSourceImpl private OperationMetrics serverCrashMetrics; public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) { - this(METRICS_NAME, - METRICS_DESCRIPTION, - METRICS_CONTEXT, - METRICS_JMX_CONTEXT, - masterWrapper); + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, masterWrapper); } - public MetricsMasterSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsMasterWrapper masterWrapper) { + public MetricsMasterSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsMasterWrapper masterWrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.masterWrapper = masterWrapper; @@ -64,9 +54,9 @@ public class MetricsMasterSourceImpl clusterRequestsCounter = metricsRegistry.newCounter(CLUSTER_REQUESTS_NAME, "", 0L); /* - * NOTE: Please refer to HBASE-9774 and HBASE-14282. Based on these two issues, HBase is - * moving away from using Hadoop's metric2 to having independent HBase specific Metrics. Use - * {@link BaseSourceImpl#registry} to register the new metrics. + * NOTE: Please refer to HBASE-9774 and HBASE-14282. Based on these two issues, HBase is moving + * away from using Hadoop's metric2 to having independent HBase specific Metrics. Use {@link + * BaseSourceImpl#registry} to register the new metrics. */ serverCrashMetrics = new OperationMetrics(registry, SERVER_CRASH_METRIC_PREFIX); } @@ -86,43 +76,40 @@ public class MetricsMasterSourceImpl // metrics again. if (masterWrapper != null && masterWrapper.isRunning()) { metricsRecordBuilder - .addGauge(Interns.info(MERGE_PLAN_COUNT_NAME, MERGE_PLAN_COUNT_DESC), - masterWrapper.getMergePlanCount()) - .addGauge(Interns.info(SPLIT_PLAN_COUNT_NAME, SPLIT_PLAN_COUNT_DESC), - masterWrapper.getSplitPlanCount()) - .addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME, - MASTER_ACTIVE_TIME_DESC), masterWrapper.getActiveTime()) - .addGauge(Interns.info(MASTER_START_TIME_NAME, - MASTER_START_TIME_DESC), masterWrapper.getStartTime()) - .addGauge(Interns.info(MASTER_FINISHED_INITIALIZATION_TIME_NAME, - MASTER_FINISHED_INITIALIZATION_TIME_DESC), - masterWrapper.getMasterInitializationTime()) - .addGauge(Interns.info(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC), - masterWrapper.getAverageLoad()) - .tag(Interns.info(LIVE_REGION_SERVERS_NAME, LIVE_REGION_SERVERS_DESC), - masterWrapper.getRegionServers()) - .addGauge(Interns.info(NUM_REGION_SERVERS_NAME, - NUMBER_OF_REGION_SERVERS_DESC), masterWrapper.getNumRegionServers()) - .tag(Interns.info(DEAD_REGION_SERVERS_NAME, DEAD_REGION_SERVERS_DESC), - masterWrapper.getDeadRegionServers()) - .addGauge(Interns.info(NUM_DEAD_REGION_SERVERS_NAME, - NUMBER_OF_DEAD_REGION_SERVERS_DESC), - masterWrapper.getNumDeadRegionServers()) - .tag(Interns.info(DRAINING_REGION_SERVER_NAME, DRAINING_REGION_SERVER_DESC), - masterWrapper.getDrainingRegionServers()) - .addGauge(Interns.info(NUM_DRAINING_REGION_SERVERS_NAME, NUMBER_OF_REGION_SERVERS_DESC), - masterWrapper.getNumDrainingRegionServers()) - .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), - masterWrapper.getZookeeperQuorum()) - .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), masterWrapper.getServerName()) - .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), masterWrapper.getClusterId()) - .tag(Interns.info(IS_ACTIVE_MASTER_NAME, - IS_ACTIVE_MASTER_DESC), - String.valueOf(masterWrapper.getIsActiveMaster())); + .addGauge(Interns.info(MERGE_PLAN_COUNT_NAME, MERGE_PLAN_COUNT_DESC), + masterWrapper.getMergePlanCount()) + .addGauge(Interns.info(SPLIT_PLAN_COUNT_NAME, SPLIT_PLAN_COUNT_DESC), + masterWrapper.getSplitPlanCount()) + .addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME, MASTER_ACTIVE_TIME_DESC), + masterWrapper.getActiveTime()) + .addGauge(Interns.info(MASTER_START_TIME_NAME, MASTER_START_TIME_DESC), + masterWrapper.getStartTime()) + .addGauge(Interns.info(MASTER_FINISHED_INITIALIZATION_TIME_NAME, + MASTER_FINISHED_INITIALIZATION_TIME_DESC), masterWrapper.getMasterInitializationTime()) + .addGauge(Interns.info(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC), + masterWrapper.getAverageLoad()) + .tag(Interns.info(LIVE_REGION_SERVERS_NAME, LIVE_REGION_SERVERS_DESC), + masterWrapper.getRegionServers()) + .addGauge(Interns.info(NUM_REGION_SERVERS_NAME, NUMBER_OF_REGION_SERVERS_DESC), + masterWrapper.getNumRegionServers()) + .tag(Interns.info(DEAD_REGION_SERVERS_NAME, DEAD_REGION_SERVERS_DESC), + masterWrapper.getDeadRegionServers()) + .addGauge(Interns.info(NUM_DEAD_REGION_SERVERS_NAME, NUMBER_OF_DEAD_REGION_SERVERS_DESC), + masterWrapper.getNumDeadRegionServers()) + .tag(Interns.info(DRAINING_REGION_SERVER_NAME, DRAINING_REGION_SERVER_DESC), + masterWrapper.getDrainingRegionServers()) + .addGauge(Interns.info(NUM_DRAINING_REGION_SERVERS_NAME, NUMBER_OF_REGION_SERVERS_DESC), + masterWrapper.getNumDrainingRegionServers()) + .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), + masterWrapper.getZookeeperQuorum()) + .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), masterWrapper.getServerName()) + .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), masterWrapper.getClusterId()) + .tag(Interns.info(IS_ACTIVE_MASTER_NAME, IS_ACTIVE_MASTER_DESC), + String.valueOf(masterWrapper.getIsActiveMaster())); } metricsRegistry.snapshot(metricsRecordBuilder, all); - if(metricsAdapter != null) { + if (metricsAdapter != null) { metricsAdapter.snapshotAllMetrics(registry, metricsRecordBuilder); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java index 7077f73ea47..52311218734 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -33,20 +32,18 @@ public class MetricsSnapshotSourceImpl extends BaseSourceImpl implements Metrics this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsSnapshotSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { + public MetricsSnapshotSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } @Override public void init() { - snapshotTimeHisto = metricsRegistry.newTimeHistogram( - SNAPSHOT_TIME_NAME, SNAPSHOT_TIME_DESC); - snapshotCloneTimeHisto = metricsRegistry.newTimeHistogram( - SNAPSHOT_CLONE_TIME_NAME, SNAPSHOT_CLONE_TIME_DESC); - snapshotRestoreTimeHisto = metricsRegistry.newTimeHistogram( - SNAPSHOT_RESTORE_TIME_NAME, SNAPSHOT_RESTORE_TIME_DESC); + snapshotTimeHisto = metricsRegistry.newTimeHistogram(SNAPSHOT_TIME_NAME, SNAPSHOT_TIME_DESC); + snapshotCloneTimeHisto = + metricsRegistry.newTimeHistogram(SNAPSHOT_CLONE_TIME_NAME, SNAPSHOT_CLONE_TIME_DESC); + snapshotRestoreTimeHisto = + metricsRegistry.newTimeHistogram(SNAPSHOT_RESTORE_TIME_NAME, SNAPSHOT_RESTORE_TIME_DESC); } @Override diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java index 7bccbb70d58..9cd07dbb2bf 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -32,9 +31,8 @@ public class MetricsBalancerSourceImpl extends BaseSourceImpl implements Metrics this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsBalancerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { + public MetricsBalancerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); updateBalancerStatus(true); } @@ -57,6 +55,6 @@ public class MetricsBalancerSourceImpl extends BaseSourceImpl implements Metrics @Override public void updateBalancerStatus(boolean status) { - metricsRegistry.tag(BALANCER_STATUS,"", String.valueOf(status), true); + metricsRegistry.tag(BALANCER_STATUS, "", String.valueOf(status), true); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java index de1dd81b17f..358e4a79515 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import java.util.LinkedHashMap; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.metrics.Interns; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public class MetricsStochasticBalancerSourceImpl extends MetricsBalancerSourceImpl implements - MetricsStochasticBalancerSource { +public class MetricsStochasticBalancerSourceImpl extends MetricsBalancerSourceImpl + implements MetricsStochasticBalancerSource { private static final String TABLE_FUNCTION_SEP = "_"; // Most Recently Used(MRU) cache @@ -38,14 +36,14 @@ public class MetricsStochasticBalancerSourceImpl extends MetricsBalancerSourceIm private int mruCap = calcMruCap(metricsSize); private final Map> stochasticCosts = - new LinkedHashMap>(mruCap, MRU_LOAD_FACTOR, true) { - private static final long serialVersionUID = 8204713453436906599L; + new LinkedHashMap>(mruCap, MRU_LOAD_FACTOR, true) { + private static final long serialVersionUID = 8204713453436906599L; - @Override - protected boolean removeEldestEntry(Map.Entry> eldest) { - return size() > mruCap; - } - }; + @Override + protected boolean removeEldestEntry(Map.Entry> eldest) { + return size() > mruCap; + } + }; private Map costFunctionDescs = new ConcurrentHashMap<>(); /** @@ -67,7 +65,7 @@ public class MetricsStochasticBalancerSourceImpl extends MetricsBalancerSourceIm * Reports stochastic load balancer costs to JMX */ public void updateStochasticCost(String tableName, String costFunctionName, String functionDesc, - Double cost) { + Double cost) { if (tableName == null || costFunctionName == null || cost == null) { return; } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java index a90d810701c..9f11ff97128 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.hadoop.hbase.metrics.impl.GlobalMetricRegistriesAdapter; @@ -33,16 +32,16 @@ import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop 2 implementation of BaseSource (using metrics2 framework). It handles registration to - * DefaultMetricsSystem and creation of the metrics registry. - * - * All MetricsSource's in hbase-hadoop2-compat should derive from this class. + * Hadoop 2 implementation of BaseSource (using metrics2 framework). It handles registration to + * DefaultMetricsSystem and creation of the metrics registry. All MetricsSource's in + * hbase-hadoop2-compat should derive from this class. */ @InterfaceAudience.Private public class BaseSourceImpl implements BaseSource, MetricsSource { private static enum DefaultMetricsSystemInitializer { INSTANCE; + private boolean inited = false; synchronized void init(String name) { @@ -62,10 +61,10 @@ public class BaseSourceImpl implements BaseSource, MetricsSource { } /** - * @deprecated Use hbase-metrics/hbase-metrics-api module interfaces for new metrics. - * Defining BaseSources for new metric groups (WAL, RPC, etc) is not needed anymore, - * however, for existing {@link BaseSource} implementations, please use the field - * named "registry" which is a {@link MetricRegistry} instance together with the + * @deprecated Use hbase-metrics/hbase-metrics-api module interfaces for new metrics. Defining + * BaseSources for new metric groups (WAL, RPC, etc) is not needed anymore, however, + * for existing {@link BaseSource} implementations, please use the field named + * "registry" which is a {@link MetricRegistry} instance together with the * {@link HBaseMetrics2HadoopMetricsAdapter}. */ @Deprecated @@ -77,17 +76,16 @@ public class BaseSourceImpl implements BaseSource, MetricsSource { /** * Note that there are at least 4 MetricRegistry definitions in the source code. The first one is - * Hadoop Metrics2 MetricRegistry, second one is DynamicMetricsRegistry which is HBase's fork - * of the Hadoop metrics2 class. The third one is the dropwizard metrics implementation of + * Hadoop Metrics2 MetricRegistry, second one is DynamicMetricsRegistry which is HBase's fork of + * the Hadoop metrics2 class. The third one is the dropwizard metrics implementation of * MetricRegistry, and finally a new API abstraction in HBase that is the * o.a.h.h.metrics.MetricRegistry class. This last one is the new way to use metrics within the - * HBase code. However, the others are in play because of existing metrics2 based code still - * needs to coexists until we get rid of all of our BaseSource and convert them to the new - * framework. Until that happens, new metrics can use the new API, but will be collected - * through the HBaseMetrics2HadoopMetricsAdapter class. - * - * BaseSourceImpl has two MetricRegistries. metricRegistry is for hadoop Metrics2 based - * metrics, while the registry is for hbase-metrics based metrics. + * HBase code. However, the others are in play because of existing metrics2 based code still needs + * to coexists until we get rid of all of our BaseSource and convert them to the new framework. + * Until that happens, new metrics can use the new API, but will be collected through the + * HBaseMetrics2HadoopMetricsAdapter class. BaseSourceImpl has two MetricRegistries. + * metricRegistry is for hadoop Metrics2 based metrics, while the registry is for hbase-metrics + * based metrics. */ protected final MetricRegistry registry; @@ -101,11 +99,8 @@ public class BaseSourceImpl implements BaseSource, MetricsSource { */ protected final HBaseMetrics2HadoopMetricsAdapter metricsAdapter; - public BaseSourceImpl( - String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public BaseSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext) { this.metricsName = metricsName; this.metricsDescription = metricsDescription; @@ -115,7 +110,7 @@ public class BaseSourceImpl implements BaseSource, MetricsSource { metricsRegistry = new DynamicMetricsRegistry(metricsName).setContext(metricsContext); DefaultMetricsSystemInitializer.INSTANCE.init(metricsName); - //Register this instance. + // Register this instance. DefaultMetricsSystem.instance().register(metricsJmxContext, metricsDescription, this); // hbase-metrics module based metrics are registered in the hbase MetricsRegistry. @@ -132,7 +127,6 @@ public class BaseSourceImpl implements BaseSource, MetricsSource { /** * Set a single gauge to a value. - * * @param gaugeName gauge name * @param value the new value of the gauge. */ @@ -143,7 +137,6 @@ public class BaseSourceImpl implements BaseSource, MetricsSource { /** * Add some amount to a gauge. - * * @param gaugeName The name of the gauge to increment. * @param delta The amount to increment the gauge by. */ @@ -154,7 +147,6 @@ public class BaseSourceImpl implements BaseSource, MetricsSource { /** * Decrease the value of a named gauge. - * * @param gaugeName The name of the gauge. * @param delta the ammount to subtract from a gauge value. */ @@ -165,7 +157,6 @@ public class BaseSourceImpl implements BaseSource, MetricsSource { /** * Increment a named counter by some value. - * * @param key the name of the counter * @param delta the ammount to increment */ @@ -183,7 +174,6 @@ public class BaseSourceImpl implements BaseSource, MetricsSource { /** * Remove a named gauge. - * * @param key the key of the gauge to remove */ public void removeMetric(String key) { diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java index a4e75ba0137..58abc166bf7 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java @@ -15,19 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.hadoop.metrics2.lib.MutableFastCounter; import org.apache.yetus.audience.InterfaceAudience; /** - * Common base implementation for metrics sources which need to track exceptions thrown or - * received. + * Common base implementation for metrics sources which need to track exceptions thrown or received. */ @InterfaceAudience.Private -public class ExceptionTrackingSourceImpl extends BaseSourceImpl - implements ExceptionTrackingSource { +public class ExceptionTrackingSourceImpl extends BaseSourceImpl implements ExceptionTrackingSource { protected MutableFastCounter exceptions; protected MutableFastCounter exceptionsOOO; protected MutableFastCounter exceptionsBusy; @@ -46,7 +43,7 @@ public class ExceptionTrackingSourceImpl extends BaseSourceImpl protected MutableFastCounter otherExceptions; public ExceptionTrackingSourceImpl(String metricsName, String metricsDescription, - String metricsContext, String metricsJmxContext) { + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } @@ -54,36 +51,36 @@ public class ExceptionTrackingSourceImpl extends BaseSourceImpl public void init() { super.init(); this.exceptions = this.getMetricsRegistry().newCounter(EXCEPTIONS_NAME, EXCEPTIONS_DESC, 0L); - this.exceptionsOOO = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_OOO_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsBusy = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_BUSY_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsUnknown = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_UNKNOWN_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsScannerReset = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_SCANNER_RESET_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsSanity = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_SANITY_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsMoved = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_MOVED_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsNSRE = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_NSRE_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsOOO = + this.getMetricsRegistry().newCounter(EXCEPTIONS_OOO_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsBusy = + this.getMetricsRegistry().newCounter(EXCEPTIONS_BUSY_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsUnknown = + this.getMetricsRegistry().newCounter(EXCEPTIONS_UNKNOWN_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsScannerReset = + this.getMetricsRegistry().newCounter(EXCEPTIONS_SCANNER_RESET_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsSanity = + this.getMetricsRegistry().newCounter(EXCEPTIONS_SANITY_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsMoved = + this.getMetricsRegistry().newCounter(EXCEPTIONS_MOVED_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsNSRE = + this.getMetricsRegistry().newCounter(EXCEPTIONS_NSRE_NAME, EXCEPTIONS_TYPE_DESC, 0L); this.exceptionsMultiTooLarge = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_MULTI_TOO_LARGE_NAME, EXCEPTIONS_MULTI_TOO_LARGE_DESC, 0L); + .newCounter(EXCEPTIONS_MULTI_TOO_LARGE_NAME, EXCEPTIONS_MULTI_TOO_LARGE_DESC, 0L); this.exceptionsCallQueueTooBig = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_CALL_QUEUE_TOO_BIG, EXCEPTIONS_CALL_QUEUE_TOO_BIG_DESC, 0L); - this.exceptionsQuotaExceeded = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_QUOTA_EXCEEDED, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsRpcThrottling = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_RPC_THROTTLING, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsCallDropped = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_CALL_DROPPED, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsCallTimedOut = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_CALL_TIMED_OUT, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionRequestTooBig = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_REQUEST_TOO_BIG, EXCEPTIONS_TYPE_DESC, 0L); - this.otherExceptions = this.getMetricsRegistry() - .newCounter(OTHER_EXCEPTIONS, EXCEPTIONS_TYPE_DESC, 0L); + .newCounter(EXCEPTIONS_CALL_QUEUE_TOO_BIG, EXCEPTIONS_CALL_QUEUE_TOO_BIG_DESC, 0L); + this.exceptionsQuotaExceeded = + this.getMetricsRegistry().newCounter(EXCEPTIONS_QUOTA_EXCEEDED, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsRpcThrottling = + this.getMetricsRegistry().newCounter(EXCEPTIONS_RPC_THROTTLING, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsCallDropped = + this.getMetricsRegistry().newCounter(EXCEPTIONS_CALL_DROPPED, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsCallTimedOut = + this.getMetricsRegistry().newCounter(EXCEPTIONS_CALL_TIMED_OUT, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionRequestTooBig = + this.getMetricsRegistry().newCounter(EXCEPTIONS_REQUEST_TOO_BIG, EXCEPTIONS_TYPE_DESC, 0L); + this.otherExceptions = + this.getMetricsRegistry().newCounter(OTHER_EXCEPTIONS, EXCEPTIONS_TYPE_DESC, 0L); } @Override diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java index 254d3b4a971..5952a60a0a5 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsTag; import org.apache.yetus.audience.InterfaceAudience; @@ -39,25 +37,25 @@ import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache; public final class Interns { private static LoadingCache> infoCache = - CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.DAYS) - .build(new CacheLoader>() { - public ConcurrentHashMap load(String key) { - return new ConcurrentHashMap<>(); - } - }); + CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.DAYS) + .build(new CacheLoader>() { + public ConcurrentHashMap load(String key) { + return new ConcurrentHashMap<>(); + } + }); private static LoadingCache> tagCache = - CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.DAYS) - .build(new CacheLoader>() { - public ConcurrentHashMap load(MetricsInfo key) { - return new ConcurrentHashMap<>(); - } - }); + CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.DAYS) + .build(new CacheLoader>() { + public ConcurrentHashMap load(MetricsInfo key) { + return new ConcurrentHashMap<>(); + } + }); - private Interns(){} + private Interns() { + } /** * Get a metric info object - * * @return an interned metric info object */ public static MetricsInfo info(String name, String description) { @@ -72,7 +70,6 @@ public final class Interns { /** * Get a metrics tag - * * @param info of the tag * @param value of the tag * @return an interned metrics tag @@ -89,7 +86,6 @@ public final class Interns { /** * Get a metrics tag - * * @param name of the tag * @param description of the tag * @param value of the tag diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java index a5ffe8fb5e2..f54d26bdb54 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import javax.management.ObjectName; - import org.apache.hadoop.metrics2.util.MBeans; import org.apache.yetus.audience.InterfaceAudience; @@ -33,7 +31,7 @@ public class MBeanSourceImpl implements MBeanSource { * Register an mbean with the underlying metrics system * @param serviceName Metrics service/system name * @param metricsName name of the metrics obejct to expose - * @param theMbean the actual MBean + * @param theMbean the actual MBean * @return ObjectName from jmx */ @Override diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.java index 42d139cb4e5..c36a592682e 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.hadoop.metrics2.MetricsInfo; @@ -37,30 +36,33 @@ class MetricsInfoImpl implements MetricsInfo { this.description = Preconditions.checkNotNull(description, "description"); } - @Override public String name() { + @Override + public String name() { return name; } - @Override public String description() { + @Override + public String description() { return description; } - @Override public boolean equals(Object obj) { + @Override + public boolean equals(Object obj) { if (obj instanceof MetricsInfo) { MetricsInfo other = (MetricsInfo) obj; - return Objects.equal(name, other.name()) && - Objects.equal(description, other.description()); + return Objects.equal(name, other.name()) && Objects.equal(description, other.description()); } return false; } - @Override public int hashCode() { + @Override + public int hashCode() { return Objects.hashCode(name, description); } - @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("name", name).add("description", description) - .toString(); + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("name", name).add("description", description) + .toString(); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java index a816d497044..9afa094524b 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,13 +40,11 @@ import org.slf4j.LoggerFactory; /** * This class acts as an adapter to export the MetricRegistry's in the global registry. Each - * MetricRegistry will be registered or unregistered from the metric2 system. The collection will - * be performed via the MetricsSourceAdapter and the MetricRegistry will collected like a - * BaseSource instance for a group of metrics (like WAL, RPC, etc) with the MetricRegistryInfo's - * JMX context. - * - *

      Developer note: - * Unlike the current metrics2 based approach, the new metrics approach + * MetricRegistry will be registered or unregistered from the metric2 system. The collection will be + * performed via the MetricsSourceAdapter and the MetricRegistry will collected like a BaseSource + * instance for a group of metrics (like WAL, RPC, etc) with the MetricRegistryInfo's JMX context. + *

      + * Developer note: Unlike the current metrics2 based approach, the new metrics approach * (hbase-metrics-api and hbase-metrics modules) work by having different MetricRegistries that are * initialized and used from the code that lives in their respective modules (hbase-server, etc). * There is no need to define BaseSource classes and do a lot of indirection. The MetricRegistry'es @@ -54,7 +52,6 @@ import org.slf4j.LoggerFactory; * MetricRegistries.global() and register adapters to the metrics2 subsystem. These adapters then * report the actual values by delegating to * {@link HBaseMetrics2HadoopMetricsAdapter#snapshotAllMetrics(MetricRegistry, MetricsCollector)}. - * * We do not initialize the Hadoop Metrics2 system assuming that other BaseSources already do so * (see BaseSourceImpl). Once the last BaseSource is moved to the new system, the metric2 * initialization should be moved here. @@ -67,6 +64,7 @@ public final class GlobalMetricRegistriesAdapter { private class MetricsSourceAdapter implements MetricsSource { private final MetricRegistry registry; + MetricsSourceAdapter(MetricRegistry registry) { this.registry = registry; } @@ -135,7 +133,7 @@ public final class GlobalMetricRegistriesAdapter { MetricsSourceAdapter adapter = new MetricsSourceAdapter(registry); LOG.info("Registering " + info.getMetricsJmxContext() + " " + info.getMetricsDescription()); DefaultMetricsSystem.instance().register(info.getMetricsJmxContext(), - info.getMetricsDescription(), adapter); + info.getMetricsDescription(), adapter); registeredSources.put(info, adapter); // next collection will collect the newly registered MetricSource. Doing this here leads to // ConcurrentModificationException. @@ -145,7 +143,7 @@ public final class GlobalMetricRegistriesAdapter { boolean removed = false; // Remove registered sources if it is removed from the global registry for (Iterator> it = - registeredSources.entrySet().iterator(); it.hasNext();) { + registeredSources.entrySet().iterator(); it.hasNext();) { Entry entry = it.next(); MetricRegistryInfo info = entry.getKey(); Optional found = MetricRegistries.global().get(info); @@ -153,7 +151,7 @@ public final class GlobalMetricRegistriesAdapter { if (LOG.isDebugEnabled()) { LOG.debug("Removing adapter for the MetricRegistry: " + info.getMetricsJmxContext()); } - synchronized(DefaultMetricsSystem.instance()) { + synchronized (DefaultMetricsSystem.instance()) { DefaultMetricsSystem.instance().unregisterSource(info.getMetricsJmxContext()); helper.removeSourceName(info.getMetricsJmxContext()); helper.removeObjectName(info.getMetricsJmxContext()); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/HBaseMetrics2HadoopMetricsAdapter.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/HBaseMetrics2HadoopMetricsAdapter.java index 5fc2450cdb5..8e8fcf736bf 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/HBaseMetrics2HadoopMetricsAdapter.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/HBaseMetrics2HadoopMetricsAdapter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -/* - * Copyright 2016 Josh Elser - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ package org.apache.hadoop.hbase.metrics.impl; import java.util.Map; @@ -53,16 +38,15 @@ import org.slf4j.LoggerFactory; /** * This is the adapter from "HBase Metrics Framework", implemented in hbase-metrics-api and - * hbase-metrics modules to the Hadoop Metrics2 framework. This adapter is not a metric source, - * but a helper to be able to collect all of the Metric's in the MetricRegistry using the - * MetricsCollector and MetricsRecordBuilder. - * - * Some of the code is forked from https://github.com/joshelser/dropwizard-hadoop-metrics2. + * hbase-metrics modules to the Hadoop Metrics2 framework. This adapter is not a metric source, but + * a helper to be able to collect all of the Metric's in the MetricRegistry using the + * MetricsCollector and MetricsRecordBuilder. Some of the code is forked from + * https://github.com/joshelser/dropwizard-hadoop-metrics2. */ @InterfaceAudience.Private public class HBaseMetrics2HadoopMetricsAdapter { - private static final Logger LOG - = LoggerFactory.getLogger(HBaseMetrics2HadoopMetricsAdapter.class); + private static final Logger LOG = + LoggerFactory.getLogger(HBaseMetrics2HadoopMetricsAdapter.class); private static final String EMPTY_STRING = ""; public HBaseMetrics2HadoopMetricsAdapter() { @@ -70,14 +54,12 @@ public class HBaseMetrics2HadoopMetricsAdapter { /** * Iterates over the MetricRegistry and adds them to the {@code collector}. - * * @param collector A metrics collector */ - public void snapshotAllMetrics(MetricRegistry metricRegistry, - MetricsCollector collector) { + public void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsCollector collector) { MetricRegistryInfo info = metricRegistry.getMetricRegistryInfo(); - MetricsRecordBuilder builder = collector.addRecord(Interns.info(info.getMetricsName(), - info.getMetricsDescription())); + MetricsRecordBuilder builder = + collector.addRecord(Interns.info(info.getMetricsName(), info.getMetricsDescription())); builder.setContext(info.getMetricsContext()); snapshotAllMetrics(metricRegistry, builder); @@ -85,13 +67,12 @@ public class HBaseMetrics2HadoopMetricsAdapter { /** * Iterates over the MetricRegistry and adds them to the {@code builder}. - * * @param builder A record builder */ public void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsRecordBuilder builder) { Map metrics = metricRegistry.getMetrics(); - for (Map.Entry e: metrics.entrySet()) { + for (Map.Entry e : metrics.entrySet()) { // Always capitalize the name String name = StringUtils.capitalize(e.getKey()); Metric metric = e.getValue(); @@ -99,13 +80,13 @@ public class HBaseMetrics2HadoopMetricsAdapter { if (metric instanceof Gauge) { addGauge(name, (Gauge) metric, builder); } else if (metric instanceof Counter) { - addCounter(name, (Counter)metric, builder); + addCounter(name, (Counter) metric, builder); } else if (metric instanceof Histogram) { - addHistogram(name, (Histogram)metric, builder); + addHistogram(name, (Histogram) metric, builder); } else if (metric instanceof Meter) { - addMeter(name, (Meter)metric, builder); + addMeter(name, (Meter) metric, builder); } else if (metric instanceof Timer) { - addTimer(name, (Timer)metric, builder); + addTimer(name, (Timer) metric, builder); } else { LOG.info("Ignoring unknown Metric class " + metric.getClass().getName()); } @@ -137,10 +118,9 @@ public class HBaseMetrics2HadoopMetricsAdapter { /** * Add Histogram value-distribution data to a Hadoop-Metrics2 record building. - * - * @param name A base name for this record. + * @param name A base name for this record. * @param histogram A histogram to measure distribution of values. - * @param builder A Hadoop-Metrics2 record builder. + * @param builder A Hadoop-Metrics2 record builder. */ private void addHistogram(String name, Histogram histogram, MetricsRecordBuilder builder) { MutableHistogram.snapshot(name, EMPTY_STRING, histogram, builder, true); @@ -149,9 +129,8 @@ public class HBaseMetrics2HadoopMetricsAdapter { /** * Add Dropwizard-Metrics rate information to a Hadoop-Metrics2 record builder, converting the * rates to the appropriate unit. - * * @param builder A Hadoop-Metrics2 record builder. - * @param name A base name for this record. + * @param name A base name for this record. */ private void addMeter(String name, Meter meter, MetricsRecordBuilder builder) { builder.addGauge(Interns.info(name + "_count", EMPTY_STRING), meter.getCount()); @@ -159,7 +138,7 @@ public class HBaseMetrics2HadoopMetricsAdapter { builder.addGauge(Interns.info(name + "_1min_rate", EMPTY_STRING), meter.getOneMinuteRate()); builder.addGauge(Interns.info(name + "_5min_rate", EMPTY_STRING), meter.getFiveMinuteRate()); builder.addGauge(Interns.info(name + "_15min_rate", EMPTY_STRING), - meter.getFifteenMinuteRate()); + meter.getFifteenMinuteRate()); } private void addTimer(String name, Timer timer, MetricsRecordBuilder builder) { diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java index 047f8e13b1e..c2e8d329143 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java @@ -1,20 +1,19 @@ /* - * Copyright The Apache Software Foundation + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.regionserver; @@ -29,8 +28,8 @@ import org.apache.yetus.audience.InterfaceAudience; * BaseSourceImpl, following the pattern */ @InterfaceAudience.Private -public class MetricsHeapMemoryManagerSourceImpl extends BaseSourceImpl implements - MetricsHeapMemoryManagerSource { +public class MetricsHeapMemoryManagerSourceImpl extends BaseSourceImpl + implements MetricsHeapMemoryManagerSource { private final MetricHistogram blockedFlushHistogram; private final MetricHistogram unblockedFlushHistogram; @@ -52,39 +51,38 @@ public class MetricsHeapMemoryManagerSourceImpl extends BaseSourceImpl implement } public MetricsHeapMemoryManagerSourceImpl(String metricsName, String metricsDescription, - String metricsContext, String metricsJmxContext) { + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); // Histograms - blockedFlushHistogram = getMetricsRegistry() - .newSizeHistogram(BLOCKED_FLUSH_NAME, BLOCKED_FLUSH_DESC); - unblockedFlushHistogram = getMetricsRegistry() - .newSizeHistogram(UNBLOCKED_FLUSH_NAME, UNBLOCKED_FLUSH_DESC); - incMemStoreSizeHistogram = getMetricsRegistry() - .newSizeHistogram(INC_MEMSTORE_TUNING_NAME, INC_MEMSTORE_TUNING_DESC); - decMemStoreSizeHistogram = getMetricsRegistry() - .newSizeHistogram(DEC_MEMSTORE_TUNING_NAME, DEC_MEMSTORE_TUNING_DESC); - incBlockCacheSizeHistogram = getMetricsRegistry() - .newSizeHistogram(INC_BLOCKCACHE_TUNING_NAME, INC_BLOCKCACHE_TUNING_DESC); - decBlockCacheSizeHistogram = getMetricsRegistry() - .newSizeHistogram(DEC_BLOCKCACHE_TUNING_NAME, DEC_BLOCKCACHE_TUNING_DESC); + blockedFlushHistogram = + getMetricsRegistry().newSizeHistogram(BLOCKED_FLUSH_NAME, BLOCKED_FLUSH_DESC); + unblockedFlushHistogram = + getMetricsRegistry().newSizeHistogram(UNBLOCKED_FLUSH_NAME, UNBLOCKED_FLUSH_DESC); + incMemStoreSizeHistogram = + getMetricsRegistry().newSizeHistogram(INC_MEMSTORE_TUNING_NAME, INC_MEMSTORE_TUNING_DESC); + decMemStoreSizeHistogram = + getMetricsRegistry().newSizeHistogram(DEC_MEMSTORE_TUNING_NAME, DEC_MEMSTORE_TUNING_DESC); + incBlockCacheSizeHistogram = + getMetricsRegistry().newSizeHistogram(INC_BLOCKCACHE_TUNING_NAME, INC_BLOCKCACHE_TUNING_DESC); + decBlockCacheSizeHistogram = + getMetricsRegistry().newSizeHistogram(DEC_BLOCKCACHE_TUNING_NAME, DEC_BLOCKCACHE_TUNING_DESC); // Gauges - blockedFlushGauge = getMetricsRegistry() - .newGauge(BLOCKED_FLUSH_GAUGE_NAME, BLOCKED_FLUSH_GAUGE_DESC, 0L); - unblockedFlushGauge = getMetricsRegistry() - .newGauge(UNBLOCKED_FLUSH_GAUGE_NAME, UNBLOCKED_FLUSH_GAUGE_DESC, 0L); - memStoreSizeGauge = getMetricsRegistry() - .newGauge(MEMSTORE_SIZE_GAUGE_NAME, MEMSTORE_SIZE_GAUGE_DESC, 0L); - blockCacheSizeGauge = getMetricsRegistry() - .newGauge(BLOCKCACHE_SIZE_GAUGE_NAME, BLOCKCACHE_SIZE_GAUGE_DESC, 0L); + blockedFlushGauge = + getMetricsRegistry().newGauge(BLOCKED_FLUSH_GAUGE_NAME, BLOCKED_FLUSH_GAUGE_DESC, 0L); + unblockedFlushGauge = + getMetricsRegistry().newGauge(UNBLOCKED_FLUSH_GAUGE_NAME, UNBLOCKED_FLUSH_GAUGE_DESC, 0L); + memStoreSizeGauge = + getMetricsRegistry().newGauge(MEMSTORE_SIZE_GAUGE_NAME, MEMSTORE_SIZE_GAUGE_DESC, 0L); + blockCacheSizeGauge = + getMetricsRegistry().newGauge(BLOCKCACHE_SIZE_GAUGE_NAME, BLOCKCACHE_SIZE_GAUGE_DESC, 0L); // Counters - doNothingCounter = getMetricsRegistry() - .newCounter(DO_NOTHING_COUNTER_NAME, DO_NOTHING_COUNTER_DESC, 0L); + doNothingCounter = + getMetricsRegistry().newCounter(DO_NOTHING_COUNTER_NAME, DO_NOTHING_COUNTER_DESC, 0L); aboveHeapOccupancyLowWatermarkCounter = getMetricsRegistry() - .newCounter(ABOVE_HEAP_LOW_WATERMARK_COUNTER_NAME, - ABOVE_HEAP_LOW_WATERMARK_COUNTER_DESC, 0L); + .newCounter(ABOVE_HEAP_LOW_WATERMARK_COUNTER_NAME, ABOVE_HEAP_LOW_WATERMARK_COUNTER_DESC, 0L); } @Override diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java index 44ce40b16c2..a1f46182c17 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Collections; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -34,24 +32,21 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.Private public class MetricsRegionAggregateSourceImpl extends BaseSourceImpl - implements MetricsRegionAggregateSource { + implements MetricsRegionAggregateSource { private static final Logger LOG = LoggerFactory.getLogger(MetricsRegionAggregateSourceImpl.class); private final MetricsExecutorImpl executor = new MetricsExecutorImpl(); private final Set regionSources = - Collections.newSetFromMap(new ConcurrentHashMap()); + Collections.newSetFromMap(new ConcurrentHashMap()); public MetricsRegionAggregateSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - - public MetricsRegionAggregateSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsRegionAggregateSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); // Every few mins clean the JMX cache. @@ -75,9 +70,8 @@ public class MetricsRegionAggregateSourceImpl extends BaseSourceImpl } catch (Exception e) { // Ignored. If this errors out it means that someone is double // closing the region source and the region is already nulled out. - LOG.info( - "Error trying to remove " + toRemove + " from " + this.getClass().getSimpleName(), - e); + LOG.info("Error trying to remove " + toRemove + " from " + this.getClass().getSimpleName(), + e); } clearCache(); } @@ -87,10 +81,9 @@ public class MetricsRegionAggregateSourceImpl extends BaseSourceImpl } /** - * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all - * expectations of java programmers. Instead of returning anything Hadoop metrics expects + * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all + * expectations of java programmers. Instead of returning anything Hadoop metrics expects * getMetrics to push the metrics into the collector. - * * @param collector the collector * @param all get all the metrics regardless of when they last changed. */ diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java index 3a796ddf0c5..b13a0508391 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.hbase.metrics.Counter; import org.apache.hadoop.hbase.metrics.Meter; @@ -28,8 +28,8 @@ import org.apache.yetus.audience.InterfaceAudience; * Implementation of {@link MetricsRegionServerQuotaSource}. */ @InterfaceAudience.Private -public class MetricsRegionServerQuotaSourceImpl extends BaseSourceImpl implements - MetricsRegionServerQuotaSource { +public class MetricsRegionServerQuotaSourceImpl extends BaseSourceImpl + implements MetricsRegionServerQuotaSource { private final Meter tablesInViolationCounter; private final Meter spaceQuotaSnapshotsReceived; @@ -43,7 +43,7 @@ public class MetricsRegionServerQuotaSourceImpl extends BaseSourceImpl implement } public MetricsRegionServerQuotaSourceImpl(String metricsName, String metricsDescription, - String metricsContext, String metricsJmxContext) { + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); tablesInViolationCounter = this.registry.meter(NUM_TABLES_IN_VIOLATION_NAME); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java index ccc17492dba..c2a5e163f0f 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,12 +23,13 @@ import org.apache.hadoop.hbase.io.MetricsIOWrapper; import org.apache.yetus.audience.InterfaceAudience; /** - * Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper + * Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper */ @InterfaceAudience.Private public class MetricsRegionServerSourceFactoryImpl implements MetricsRegionServerSourceFactory { public static enum FactoryStorage { INSTANCE; + private Object aggLock = new Object(); private MetricsRegionAggregateSourceImpl regionAggImpl; private MetricsUserAggregateSourceImpl userAggImpl; @@ -75,8 +76,8 @@ public class MetricsRegionServerSourceFactoryImpl implements MetricsRegionServer } @Override - public synchronized MetricsRegionServerSource createServer( - MetricsRegionServerWrapper regionServerWrapper) { + public synchronized MetricsRegionServerSource + createServer(MetricsRegionServerWrapper regionServerWrapper) { return new MetricsRegionServerSourceImpl(regionServerWrapper); } @@ -97,6 +98,6 @@ public class MetricsRegionServerSourceFactoryImpl implements MetricsRegionServer @Override public org.apache.hadoop.hbase.regionserver.MetricsUserSource createUser(String shortUserName) { return new org.apache.hadoop.hbase.regionserver.MetricsUserSourceImpl(shortUserName, - getUserAggregate()); + getUserAggregate()); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java index e9ff31f29d4..705b5f3033b 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -27,13 +26,12 @@ import org.apache.hadoop.metrics2.lib.MutableFastCounter; import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop2 implementation of MetricsRegionServerSource. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * Hadoop2 implementation of MetricsRegionServerSource. Implements BaseSource through + * BaseSourceImpl, following the pattern */ @InterfaceAudience.Private -public class MetricsRegionServerSourceImpl - extends BaseSourceImpl implements MetricsRegionServerSource { +public class MetricsRegionServerSourceImpl extends BaseSourceImpl + implements MetricsRegionServerSource { final MetricsRegionServerWrapper rsWrap; private final MetricHistogram putHisto; @@ -97,11 +95,8 @@ public class MetricsRegionServerSourceImpl this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, rsWrap); } - public MetricsRegionServerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsRegionServerWrapper rsWrap) { + public MetricsRegionServerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsRegionServerWrapper rsWrap) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.rsWrap = rsWrap; @@ -131,58 +126,59 @@ public class MetricsRegionServerSourceImpl scanTimeHisto = getMetricsRegistry().newTimeHistogram(SCAN_TIME_KEY); flushTimeHisto = getMetricsRegistry().newTimeHistogram(FLUSH_TIME, FLUSH_TIME_DESC); - flushMemstoreSizeHisto = getMetricsRegistry() - .newSizeHistogram(FLUSH_MEMSTORE_SIZE, FLUSH_MEMSTORE_SIZE_DESC); - flushOutputSizeHisto = getMetricsRegistry().newSizeHistogram(FLUSH_OUTPUT_SIZE, - FLUSH_OUTPUT_SIZE_DESC); - flushedOutputBytes = getMetricsRegistry().newCounter(FLUSHED_OUTPUT_BYTES, - FLUSHED_OUTPUT_BYTES_DESC, 0L); - flushedMemstoreBytes = getMetricsRegistry().newCounter(FLUSHED_MEMSTORE_BYTES, - FLUSHED_MEMSTORE_BYTES_DESC, 0L); + flushMemstoreSizeHisto = + getMetricsRegistry().newSizeHistogram(FLUSH_MEMSTORE_SIZE, FLUSH_MEMSTORE_SIZE_DESC); + flushOutputSizeHisto = + getMetricsRegistry().newSizeHistogram(FLUSH_OUTPUT_SIZE, FLUSH_OUTPUT_SIZE_DESC); + flushedOutputBytes = + getMetricsRegistry().newCounter(FLUSHED_OUTPUT_BYTES, FLUSHED_OUTPUT_BYTES_DESC, 0L); + flushedMemstoreBytes = + getMetricsRegistry().newCounter(FLUSHED_MEMSTORE_BYTES, FLUSHED_MEMSTORE_BYTES_DESC, 0L); - compactionTimeHisto = getMetricsRegistry() - .newTimeHistogram(COMPACTION_TIME, COMPACTION_TIME_DESC); - compactionInputFileCountHisto = getMetricsRegistry() - .newHistogram(COMPACTION_INPUT_FILE_COUNT, COMPACTION_INPUT_FILE_COUNT_DESC); - compactionInputSizeHisto = getMetricsRegistry() - .newSizeHistogram(COMPACTION_INPUT_SIZE, COMPACTION_INPUT_SIZE_DESC); - compactionOutputFileCountHisto = getMetricsRegistry() - .newHistogram(COMPACTION_OUTPUT_FILE_COUNT, COMPACTION_OUTPUT_FILE_COUNT_DESC); - compactionOutputSizeHisto = getMetricsRegistry() - .newSizeHistogram(COMPACTION_OUTPUT_SIZE, COMPACTION_OUTPUT_SIZE_DESC); - compactedInputBytes = getMetricsRegistry() - .newCounter(COMPACTED_INPUT_BYTES, COMPACTED_INPUT_BYTES_DESC, 0L); - compactedOutputBytes = getMetricsRegistry() - .newCounter(COMPACTED_OUTPUT_BYTES, COMPACTED_OUTPUT_BYTES_DESC, 0L); + compactionTimeHisto = + getMetricsRegistry().newTimeHistogram(COMPACTION_TIME, COMPACTION_TIME_DESC); + compactionInputFileCountHisto = getMetricsRegistry().newHistogram(COMPACTION_INPUT_FILE_COUNT, + COMPACTION_INPUT_FILE_COUNT_DESC); + compactionInputSizeHisto = + getMetricsRegistry().newSizeHistogram(COMPACTION_INPUT_SIZE, COMPACTION_INPUT_SIZE_DESC); + compactionOutputFileCountHisto = getMetricsRegistry().newHistogram(COMPACTION_OUTPUT_FILE_COUNT, + COMPACTION_OUTPUT_FILE_COUNT_DESC); + compactionOutputSizeHisto = + getMetricsRegistry().newSizeHistogram(COMPACTION_OUTPUT_SIZE, COMPACTION_OUTPUT_SIZE_DESC); + compactedInputBytes = + getMetricsRegistry().newCounter(COMPACTED_INPUT_BYTES, COMPACTED_INPUT_BYTES_DESC, 0L); + compactedOutputBytes = + getMetricsRegistry().newCounter(COMPACTED_OUTPUT_BYTES, COMPACTED_OUTPUT_BYTES_DESC, 0L); - majorCompactionTimeHisto = getMetricsRegistry() - .newTimeHistogram(MAJOR_COMPACTION_TIME, MAJOR_COMPACTION_TIME_DESC); + majorCompactionTimeHisto = + getMetricsRegistry().newTimeHistogram(MAJOR_COMPACTION_TIME, MAJOR_COMPACTION_TIME_DESC); majorCompactionInputFileCountHisto = getMetricsRegistry() .newHistogram(MAJOR_COMPACTION_INPUT_FILE_COUNT, MAJOR_COMPACTION_INPUT_FILE_COUNT_DESC); majorCompactionInputSizeHisto = getMetricsRegistry() - .newSizeHistogram(MAJOR_COMPACTION_INPUT_SIZE, MAJOR_COMPACTION_INPUT_SIZE_DESC); + .newSizeHistogram(MAJOR_COMPACTION_INPUT_SIZE, MAJOR_COMPACTION_INPUT_SIZE_DESC); majorCompactionOutputFileCountHisto = getMetricsRegistry() - .newHistogram(MAJOR_COMPACTION_OUTPUT_FILE_COUNT, MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC); + .newHistogram(MAJOR_COMPACTION_OUTPUT_FILE_COUNT, MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC); majorCompactionOutputSizeHisto = getMetricsRegistry() .newSizeHistogram(MAJOR_COMPACTION_OUTPUT_SIZE, MAJOR_COMPACTION_OUTPUT_SIZE_DESC); - majorCompactedInputBytes = getMetricsRegistry() - .newCounter(MAJOR_COMPACTED_INPUT_BYTES, MAJOR_COMPACTED_INPUT_BYTES_DESC, 0L); - majorCompactedOutputBytes = getMetricsRegistry() - .newCounter(MAJOR_COMPACTED_OUTPUT_BYTES, MAJOR_COMPACTED_OUTPUT_BYTES_DESC, 0L); + majorCompactedInputBytes = getMetricsRegistry().newCounter(MAJOR_COMPACTED_INPUT_BYTES, + MAJOR_COMPACTED_INPUT_BYTES_DESC, 0L); + majorCompactedOutputBytes = getMetricsRegistry().newCounter(MAJOR_COMPACTED_OUTPUT_BYTES, + MAJOR_COMPACTED_OUTPUT_BYTES_DESC, 0L); splitTimeHisto = getMetricsRegistry().newTimeHistogram(SPLIT_KEY); splitRequest = getMetricsRegistry().newCounter(SPLIT_REQUEST_KEY, SPLIT_REQUEST_DESC, 0L); splitSuccess = getMetricsRegistry().newCounter(SPLIT_SUCCESS_KEY, SPLIT_SUCCESS_DESC, 0L); // pause monitor metrics - infoPauseThresholdExceeded = getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, - INFO_THRESHOLD_COUNT_DESC, 0L); - warnPauseThresholdExceeded = getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, - WARN_THRESHOLD_COUNT_DESC, 0L); + infoPauseThresholdExceeded = + getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, INFO_THRESHOLD_COUNT_DESC, 0L); + warnPauseThresholdExceeded = + getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, WARN_THRESHOLD_COUNT_DESC, 0L); pausesWithGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITH_GC_KEY); pausesWithoutGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITHOUT_GC_KEY); - scannerLeaseExpiredCount = getMetricsRegistry().newCounter(SCANNER_LEASE_EXPIRED_COUNT, SCANNER_LEASE_EXPIRED_COUNT_DESC, 0L); + scannerLeaseExpiredCount = getMetricsRegistry().newCounter(SCANNER_LEASE_EXPIRED_COUNT, + SCANNER_LEASE_EXPIRED_COUNT_DESC, 0L); } @Override @@ -332,10 +328,9 @@ public class MetricsRegionServerSourceImpl } /** - * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all - * expectations of java programmers. Instead of returning anything Hadoop metrics expects + * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all + * expectations of java programmers. Instead of returning anything Hadoop metrics expects * getMetrics to push the metrics into the collector. - * * @param metricsCollector Collector to accept metrics * @param all push all or only changed? */ @@ -346,135 +341,129 @@ public class MetricsRegionServerSourceImpl // rsWrap can be null because this function is called inside of init. if (rsWrap != null) { addGaugesToMetricsRecordBuilder(mrb) - .addCounter(Interns.info(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC), - rsWrap.getTotalRequestCount()) - .addCounter(Interns.info(TOTAL_ROW_ACTION_REQUEST_COUNT, - TOTAL_ROW_ACTION_REQUEST_COUNT_DESC), rsWrap.getTotalRowActionRequestCount()) - .addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC), - rsWrap.getReadRequestsCount()) - .addCounter(Interns.info(FILTERED_READ_REQUEST_COUNT, - FILTERED_READ_REQUEST_COUNT_DESC), rsWrap.getFilteredReadRequestsCount()) - .addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC), - rsWrap.getWriteRequestsCount()) - .addCounter(Interns.info(RPC_GET_REQUEST_COUNT, RPC_GET_REQUEST_COUNT_DESC), - rsWrap.getRpcGetRequestsCount()) - .addCounter(Interns.info(RPC_FULL_SCAN_REQUEST_COUNT, RPC_FULL_SCAN_REQUEST_COUNT_DESC), - rsWrap.getRpcFullScanRequestsCount()) - .addCounter(Interns.info(RPC_SCAN_REQUEST_COUNT, RPC_SCAN_REQUEST_COUNT_DESC), - rsWrap.getRpcScanRequestsCount()) - .addCounter(Interns.info(RPC_MULTI_REQUEST_COUNT, RPC_MULTI_REQUEST_COUNT_DESC), - rsWrap.getRpcMultiRequestsCount()) - .addCounter(Interns.info(RPC_MUTATE_REQUEST_COUNT, RPC_MUTATE_REQUEST_COUNT_DESC), - rsWrap.getRpcMutateRequestsCount()) - .addCounter(Interns.info(CHECK_MUTATE_FAILED_COUNT, CHECK_MUTATE_FAILED_COUNT_DESC), - rsWrap.getCheckAndMutateChecksFailed()) - .addCounter(Interns.info(CHECK_MUTATE_PASSED_COUNT, CHECK_MUTATE_PASSED_COUNT_DESC), - rsWrap.getCheckAndMutateChecksPassed()) - .addCounter(Interns.info(BLOCK_CACHE_HIT_COUNT, BLOCK_CACHE_HIT_COUNT_DESC), - rsWrap.getBlockCacheHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_HIT_COUNT, - BLOCK_CACHE_PRIMARY_HIT_COUNT_DESC), rsWrap.getBlockCachePrimaryHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_MISS_COUNT, BLOCK_COUNT_MISS_COUNT_DESC), - rsWrap.getBlockCacheMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_MISS_COUNT, - BLOCK_COUNT_PRIMARY_MISS_COUNT_DESC), rsWrap.getBlockCachePrimaryMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_EVICTION_COUNT, BLOCK_CACHE_EVICTION_COUNT_DESC), - rsWrap.getBlockCacheEvictedCount()) - .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_EVICTION_COUNT, - BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC), - rsWrap.getBlockCachePrimaryEvictedCount()) - .addCounter(Interns.info(BLOCK_CACHE_FAILED_INSERTION_COUNT, - BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC), - rsWrap.getBlockCacheFailedInsertions()) - .addCounter(Interns.info(BLOCK_CACHE_DATA_MISS_COUNT, ""), - rsWrap.getDataMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_MISS_COUNT, ""), - rsWrap.getLeafIndexMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_MISS_COUNT, ""), - rsWrap.getBloomChunkMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_META_MISS_COUNT, ""), - rsWrap.getMetaMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_MISS_COUNT, ""), - rsWrap.getRootIndexMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_MISS_COUNT, ""), - rsWrap.getIntermediateIndexMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_MISS_COUNT, ""), - rsWrap.getFileInfoMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_MISS_COUNT, ""), - rsWrap.getGeneralBloomMetaMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_MISS_COUNT, ""), - rsWrap.getDeleteFamilyBloomMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_TRAILER_MISS_COUNT, ""), - rsWrap.getTrailerMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_DATA_HIT_COUNT, ""), - rsWrap.getDataHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_HIT_COUNT, ""), - rsWrap.getLeafIndexHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_HIT_COUNT, ""), - rsWrap.getBloomChunkHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_META_HIT_COUNT, ""), - rsWrap.getMetaHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_HIT_COUNT, ""), - rsWrap.getRootIndexHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_HIT_COUNT, ""), - rsWrap.getIntermediateIndexHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_HIT_COUNT, ""), - rsWrap.getFileInfoHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_HIT_COUNT, ""), - rsWrap.getGeneralBloomMetaHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_HIT_COUNT, ""), - rsWrap.getDeleteFamilyBloomHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_TRAILER_HIT_COUNT, ""), - rsWrap.getTrailerHitCount()) - .addCounter(Interns.info(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC), - rsWrap.getUpdatesBlockedTime()) - .addCounter(Interns.info(FLUSHED_CELLS, FLUSHED_CELLS_DESC), - rsWrap.getFlushedCellsCount()) - .addCounter(Interns.info(COMPACTED_CELLS, COMPACTED_CELLS_DESC), - rsWrap.getCompactedCellsCount()) - .addCounter(Interns.info(MAJOR_COMPACTED_CELLS, MAJOR_COMPACTED_CELLS_DESC), - rsWrap.getMajorCompactedCellsCount()) - .addCounter(Interns.info(FLUSHED_CELLS_SIZE, FLUSHED_CELLS_SIZE_DESC), - rsWrap.getFlushedCellsSize()) - .addCounter(Interns.info(COMPACTED_CELLS_SIZE, COMPACTED_CELLS_SIZE_DESC), - rsWrap.getCompactedCellsSize()) - .addCounter(Interns.info(MAJOR_COMPACTED_CELLS_SIZE, MAJOR_COMPACTED_CELLS_SIZE_DESC), - rsWrap.getMajorCompactedCellsSize()) - .addCounter(Interns.info(CELLS_COUNT_COMPACTED_FROM_MOB, - CELLS_COUNT_COMPACTED_FROM_MOB_DESC), rsWrap.getCellsCountCompactedFromMob()) - .addCounter(Interns.info(CELLS_COUNT_COMPACTED_TO_MOB, - CELLS_COUNT_COMPACTED_TO_MOB_DESC), rsWrap.getCellsCountCompactedToMob()) - .addCounter(Interns.info(CELLS_SIZE_COMPACTED_FROM_MOB, - CELLS_SIZE_COMPACTED_FROM_MOB_DESC), rsWrap.getCellsSizeCompactedFromMob()) - .addCounter(Interns.info(CELLS_SIZE_COMPACTED_TO_MOB, - CELLS_SIZE_COMPACTED_TO_MOB_DESC), rsWrap.getCellsSizeCompactedToMob()) - .addCounter(Interns.info(MOB_FLUSH_COUNT, MOB_FLUSH_COUNT_DESC), - rsWrap.getMobFlushCount()) - .addCounter(Interns.info(MOB_FLUSHED_CELLS_COUNT, MOB_FLUSHED_CELLS_COUNT_DESC), - rsWrap.getMobFlushedCellsCount()) - .addCounter(Interns.info(MOB_FLUSHED_CELLS_SIZE, MOB_FLUSHED_CELLS_SIZE_DESC), - rsWrap.getMobFlushedCellsSize()) - .addCounter(Interns.info(MOB_SCAN_CELLS_COUNT, MOB_SCAN_CELLS_COUNT_DESC), - rsWrap.getMobScanCellsCount()) - .addCounter(Interns.info(MOB_SCAN_CELLS_SIZE, MOB_SCAN_CELLS_SIZE_DESC), - rsWrap.getMobScanCellsSize()) - .addCounter(Interns.info(MOB_FILE_CACHE_ACCESS_COUNT, - MOB_FILE_CACHE_ACCESS_COUNT_DESC), rsWrap.getMobFileCacheAccessCount()) - .addCounter(Interns.info(MOB_FILE_CACHE_MISS_COUNT, MOB_FILE_CACHE_MISS_COUNT_DESC), - rsWrap.getMobFileCacheMissCount()) - .addCounter(Interns.info(MOB_FILE_CACHE_EVICTED_COUNT, - MOB_FILE_CACHE_EVICTED_COUNT_DESC), rsWrap.getMobFileCacheEvictedCount()) - .addCounter(Interns.info(HEDGED_READS, HEDGED_READS_DESC), rsWrap.getHedgedReadOps()) - .addCounter(Interns.info(HEDGED_READ_WINS, HEDGED_READ_WINS_DESC), - rsWrap.getHedgedReadWins()) - .addCounter(Interns.info(HEDGED_READ_IN_CUR_THREAD, HEDGED_READ_IN_CUR_THREAD_DESC), - rsWrap.getHedgedReadOpsInCurThread()) - .addCounter(Interns.info(BLOCKED_REQUESTS_COUNT, BLOCKED_REQUESTS_COUNT_DESC), - rsWrap.getBlockedRequestsCount()) - .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), - rsWrap.getZookeeperQuorum()) - .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), rsWrap.getServerName()) - .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), rsWrap.getClusterId()); + .addCounter(Interns.info(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC), + rsWrap.getTotalRequestCount()) + .addCounter( + Interns.info(TOTAL_ROW_ACTION_REQUEST_COUNT, TOTAL_ROW_ACTION_REQUEST_COUNT_DESC), + rsWrap.getTotalRowActionRequestCount()) + .addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC), + rsWrap.getReadRequestsCount()) + .addCounter(Interns.info(FILTERED_READ_REQUEST_COUNT, FILTERED_READ_REQUEST_COUNT_DESC), + rsWrap.getFilteredReadRequestsCount()) + .addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC), + rsWrap.getWriteRequestsCount()) + .addCounter(Interns.info(RPC_GET_REQUEST_COUNT, RPC_GET_REQUEST_COUNT_DESC), + rsWrap.getRpcGetRequestsCount()) + .addCounter(Interns.info(RPC_FULL_SCAN_REQUEST_COUNT, RPC_FULL_SCAN_REQUEST_COUNT_DESC), + rsWrap.getRpcFullScanRequestsCount()) + .addCounter(Interns.info(RPC_SCAN_REQUEST_COUNT, RPC_SCAN_REQUEST_COUNT_DESC), + rsWrap.getRpcScanRequestsCount()) + .addCounter(Interns.info(RPC_MULTI_REQUEST_COUNT, RPC_MULTI_REQUEST_COUNT_DESC), + rsWrap.getRpcMultiRequestsCount()) + .addCounter(Interns.info(RPC_MUTATE_REQUEST_COUNT, RPC_MUTATE_REQUEST_COUNT_DESC), + rsWrap.getRpcMutateRequestsCount()) + .addCounter(Interns.info(CHECK_MUTATE_FAILED_COUNT, CHECK_MUTATE_FAILED_COUNT_DESC), + rsWrap.getCheckAndMutateChecksFailed()) + .addCounter(Interns.info(CHECK_MUTATE_PASSED_COUNT, CHECK_MUTATE_PASSED_COUNT_DESC), + rsWrap.getCheckAndMutateChecksPassed()) + .addCounter(Interns.info(BLOCK_CACHE_HIT_COUNT, BLOCK_CACHE_HIT_COUNT_DESC), + rsWrap.getBlockCacheHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_HIT_COUNT, BLOCK_CACHE_PRIMARY_HIT_COUNT_DESC), + rsWrap.getBlockCachePrimaryHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_MISS_COUNT, BLOCK_COUNT_MISS_COUNT_DESC), + rsWrap.getBlockCacheMissCount()) + .addCounter( + Interns.info(BLOCK_CACHE_PRIMARY_MISS_COUNT, BLOCK_COUNT_PRIMARY_MISS_COUNT_DESC), + rsWrap.getBlockCachePrimaryMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_EVICTION_COUNT, BLOCK_CACHE_EVICTION_COUNT_DESC), + rsWrap.getBlockCacheEvictedCount()) + .addCounter( + Interns.info(BLOCK_CACHE_PRIMARY_EVICTION_COUNT, BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC), + rsWrap.getBlockCachePrimaryEvictedCount()) + .addCounter( + Interns.info(BLOCK_CACHE_FAILED_INSERTION_COUNT, BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC), + rsWrap.getBlockCacheFailedInsertions()) + .addCounter(Interns.info(BLOCK_CACHE_DATA_MISS_COUNT, ""), rsWrap.getDataMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_MISS_COUNT, ""), + rsWrap.getLeafIndexMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_MISS_COUNT, ""), + rsWrap.getBloomChunkMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_META_MISS_COUNT, ""), rsWrap.getMetaMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_MISS_COUNT, ""), + rsWrap.getRootIndexMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_MISS_COUNT, ""), + rsWrap.getIntermediateIndexMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_MISS_COUNT, ""), + rsWrap.getFileInfoMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_MISS_COUNT, ""), + rsWrap.getGeneralBloomMetaMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_MISS_COUNT, ""), + rsWrap.getDeleteFamilyBloomMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_TRAILER_MISS_COUNT, ""), rsWrap.getTrailerMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_DATA_HIT_COUNT, ""), rsWrap.getDataHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_HIT_COUNT, ""), + rsWrap.getLeafIndexHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_HIT_COUNT, ""), + rsWrap.getBloomChunkHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_META_HIT_COUNT, ""), rsWrap.getMetaHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_HIT_COUNT, ""), + rsWrap.getRootIndexHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_HIT_COUNT, ""), + rsWrap.getIntermediateIndexHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_HIT_COUNT, ""), rsWrap.getFileInfoHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_HIT_COUNT, ""), + rsWrap.getGeneralBloomMetaHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_HIT_COUNT, ""), + rsWrap.getDeleteFamilyBloomHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_TRAILER_HIT_COUNT, ""), rsWrap.getTrailerHitCount()) + .addCounter(Interns.info(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC), + rsWrap.getUpdatesBlockedTime()) + .addCounter(Interns.info(FLUSHED_CELLS, FLUSHED_CELLS_DESC), rsWrap.getFlushedCellsCount()) + .addCounter(Interns.info(COMPACTED_CELLS, COMPACTED_CELLS_DESC), + rsWrap.getCompactedCellsCount()) + .addCounter(Interns.info(MAJOR_COMPACTED_CELLS, MAJOR_COMPACTED_CELLS_DESC), + rsWrap.getMajorCompactedCellsCount()) + .addCounter(Interns.info(FLUSHED_CELLS_SIZE, FLUSHED_CELLS_SIZE_DESC), + rsWrap.getFlushedCellsSize()) + .addCounter(Interns.info(COMPACTED_CELLS_SIZE, COMPACTED_CELLS_SIZE_DESC), + rsWrap.getCompactedCellsSize()) + .addCounter(Interns.info(MAJOR_COMPACTED_CELLS_SIZE, MAJOR_COMPACTED_CELLS_SIZE_DESC), + rsWrap.getMajorCompactedCellsSize()) + .addCounter( + Interns.info(CELLS_COUNT_COMPACTED_FROM_MOB, CELLS_COUNT_COMPACTED_FROM_MOB_DESC), + rsWrap.getCellsCountCompactedFromMob()) + .addCounter(Interns.info(CELLS_COUNT_COMPACTED_TO_MOB, CELLS_COUNT_COMPACTED_TO_MOB_DESC), + rsWrap.getCellsCountCompactedToMob()) + .addCounter(Interns.info(CELLS_SIZE_COMPACTED_FROM_MOB, CELLS_SIZE_COMPACTED_FROM_MOB_DESC), + rsWrap.getCellsSizeCompactedFromMob()) + .addCounter(Interns.info(CELLS_SIZE_COMPACTED_TO_MOB, CELLS_SIZE_COMPACTED_TO_MOB_DESC), + rsWrap.getCellsSizeCompactedToMob()) + .addCounter(Interns.info(MOB_FLUSH_COUNT, MOB_FLUSH_COUNT_DESC), rsWrap.getMobFlushCount()) + .addCounter(Interns.info(MOB_FLUSHED_CELLS_COUNT, MOB_FLUSHED_CELLS_COUNT_DESC), + rsWrap.getMobFlushedCellsCount()) + .addCounter(Interns.info(MOB_FLUSHED_CELLS_SIZE, MOB_FLUSHED_CELLS_SIZE_DESC), + rsWrap.getMobFlushedCellsSize()) + .addCounter(Interns.info(MOB_SCAN_CELLS_COUNT, MOB_SCAN_CELLS_COUNT_DESC), + rsWrap.getMobScanCellsCount()) + .addCounter(Interns.info(MOB_SCAN_CELLS_SIZE, MOB_SCAN_CELLS_SIZE_DESC), + rsWrap.getMobScanCellsSize()) + .addCounter(Interns.info(MOB_FILE_CACHE_ACCESS_COUNT, MOB_FILE_CACHE_ACCESS_COUNT_DESC), + rsWrap.getMobFileCacheAccessCount()) + .addCounter(Interns.info(MOB_FILE_CACHE_MISS_COUNT, MOB_FILE_CACHE_MISS_COUNT_DESC), + rsWrap.getMobFileCacheMissCount()) + .addCounter(Interns.info(MOB_FILE_CACHE_EVICTED_COUNT, MOB_FILE_CACHE_EVICTED_COUNT_DESC), + rsWrap.getMobFileCacheEvictedCount()) + .addCounter(Interns.info(HEDGED_READS, HEDGED_READS_DESC), rsWrap.getHedgedReadOps()) + .addCounter(Interns.info(HEDGED_READ_WINS, HEDGED_READ_WINS_DESC), + rsWrap.getHedgedReadWins()) + .addCounter(Interns.info(HEDGED_READ_IN_CUR_THREAD, HEDGED_READ_IN_CUR_THREAD_DESC), + rsWrap.getHedgedReadOpsInCurThread()) + .addCounter(Interns.info(BLOCKED_REQUESTS_COUNT, BLOCKED_REQUESTS_COUNT_DESC), + rsWrap.getBlockedRequestsCount()) + .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), + rsWrap.getZookeeperQuorum()) + .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), rsWrap.getServerName()) + .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), rsWrap.getClusterId()); } metricsRegistry.snapshot(mrb, all); @@ -488,112 +477,105 @@ public class MetricsRegionServerSourceImpl private MetricsRecordBuilder addGaugesToMetricsRecordBuilder(MetricsRecordBuilder mrb) { return mrb.addGauge(Interns.info(REGION_COUNT, REGION_COUNT_DESC), rsWrap.getNumOnlineRegions()) - .addGauge(Interns.info(STORE_COUNT, STORE_COUNT_DESC), rsWrap.getNumStores()) - .addGauge(Interns.info(WALFILE_COUNT, WALFILE_COUNT_DESC), rsWrap.getNumWALFiles()) - .addGauge(Interns.info(WALFILE_SIZE, WALFILE_SIZE_DESC), rsWrap.getWALFileSize()) - .addGauge(Interns.info(STOREFILE_COUNT, STOREFILE_COUNT_DESC), - rsWrap.getNumStoreFiles()) - .addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemStoreSize()) - .addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize()) - .addGauge(Interns.info(MAX_STORE_FILE_AGE, MAX_STORE_FILE_AGE_DESC), - rsWrap.getMaxStoreFileAge()) - .addGauge(Interns.info(MIN_STORE_FILE_AGE, MIN_STORE_FILE_AGE_DESC), - rsWrap.getMinStoreFileAge()) - .addGauge(Interns.info(AVG_STORE_FILE_AGE, AVG_STORE_FILE_AGE_DESC), - rsWrap.getAvgStoreFileAge()) - .addGauge(Interns.info(NUM_REFERENCE_FILES, NUM_REFERENCE_FILES_DESC), - rsWrap.getNumReferenceFiles()) - .addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC), rsWrap.getStartCode()) - .addGauge(Interns.info(AVERAGE_REGION_SIZE, AVERAGE_REGION_SIZE_DESC), - rsWrap.getAverageRegionSize()) - .addGauge(Interns.info(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC), - rsWrap.getStoreFileIndexSize()) - .addGauge(Interns.info(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC), - rsWrap.getTotalStaticIndexSize()) - .addGauge(Interns.info(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC), - rsWrap.getTotalStaticBloomSize()) - .addGauge(Interns.info(NUMBER_OF_MUTATIONS_WITHOUT_WAL, - NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC), rsWrap.getNumMutationsWithoutWAL()) - .addGauge(Interns.info(DATA_SIZE_WITHOUT_WAL, DATA_SIZE_WITHOUT_WAL_DESC), - rsWrap.getDataInMemoryWithoutWAL()) - .addGauge(Interns.info(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC), - rsWrap.getPercentFileLocal()) - .addGauge(Interns.info(PERCENT_FILES_LOCAL_SECONDARY_REGIONS, - PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC), - rsWrap.getPercentFileLocalSecondaryRegions()) - .addGauge(Interns.info(TOTAL_BYTES_READ, - TOTAL_BYTES_READ_DESC), - rsWrap.getTotalBytesRead()) - .addGauge(Interns.info(LOCAL_BYTES_READ, - LOCAL_BYTES_READ_DESC), - rsWrap.getLocalBytesRead()) - .addGauge(Interns.info(SHORTCIRCUIT_BYTES_READ, - SHORTCIRCUIT_BYTES_READ_DESC), - rsWrap.getShortCircuitBytesRead()) - .addGauge(Interns.info(ZEROCOPY_BYTES_READ, - ZEROCOPY_BYTES_READ_DESC), - rsWrap.getZeroCopyBytesRead()) - .addGauge(Interns.info(SPLIT_QUEUE_LENGTH, SPLIT_QUEUE_LENGTH_DESC), - rsWrap.getSplitQueueSize()) - .addGauge(Interns.info(COMPACTION_QUEUE_LENGTH, COMPACTION_QUEUE_LENGTH_DESC), - rsWrap.getCompactionQueueSize()) - .addGauge(Interns.info(SMALL_COMPACTION_QUEUE_LENGTH, - SMALL_COMPACTION_QUEUE_LENGTH_DESC), rsWrap.getSmallCompactionQueueSize()) - .addGauge(Interns.info(LARGE_COMPACTION_QUEUE_LENGTH, - LARGE_COMPACTION_QUEUE_LENGTH_DESC), rsWrap.getLargeCompactionQueueSize()) - .addGauge(Interns.info(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC), - rsWrap.getFlushQueueSize()) - .addGauge(Interns.info(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC), - rsWrap.getBlockCacheFreeSize()) - .addGauge(Interns.info(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC), - rsWrap.getBlockCacheCount()) - .addGauge(Interns.info(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC), - rsWrap.getBlockCacheSize()) - .addGauge(Interns.info(BLOCK_CACHE_HIT_PERCENT, BLOCK_CACHE_HIT_PERCENT_DESC), - rsWrap.getBlockCacheHitPercent()) - .addGauge(Interns.info(BLOCK_CACHE_EXPRESS_HIT_PERCENT, - BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC), rsWrap.getBlockCacheHitCachingPercent()) - .addGauge(Interns.info(L1_CACHE_HIT_COUNT, L1_CACHE_HIT_COUNT_DESC), - rsWrap.getL1CacheHitCount()) - .addGauge(Interns.info(L1_CACHE_MISS_COUNT, L1_CACHE_MISS_COUNT_DESC), - rsWrap.getL1CacheMissCount()) - .addGauge(Interns.info(L1_CACHE_HIT_RATIO, L1_CACHE_HIT_RATIO_DESC), - rsWrap.getL1CacheHitRatio()) - .addGauge(Interns.info(L1_CACHE_MISS_RATIO, L1_CACHE_MISS_RATIO_DESC), - rsWrap.getL1CacheMissRatio()) - .addGauge(Interns.info(L2_CACHE_HIT_COUNT, L2_CACHE_HIT_COUNT_DESC), - rsWrap.getL2CacheHitCount()) - .addGauge(Interns.info(L2_CACHE_MISS_COUNT, L2_CACHE_MISS_COUNT_DESC), - rsWrap.getL2CacheMissCount()) - .addGauge(Interns.info(L2_CACHE_HIT_RATIO, L2_CACHE_HIT_RATIO_DESC), - rsWrap.getL2CacheHitRatio()) - .addGauge(Interns.info(L2_CACHE_MISS_RATIO, L2_CACHE_MISS_RATIO_DESC), - rsWrap.getL2CacheMissRatio()) - .addGauge(Interns.info(MOB_FILE_CACHE_COUNT, MOB_FILE_CACHE_COUNT_DESC), - rsWrap.getMobFileCacheCount()) - .addGauge(Interns.info(MOB_FILE_CACHE_HIT_PERCENT, MOB_FILE_CACHE_HIT_PERCENT_DESC), - rsWrap.getMobFileCacheHitPercent()) - .addGauge(Interns.info(READ_REQUEST_RATE_PER_SECOND, READ_REQUEST_RATE_DESC), - rsWrap.getReadRequestsRatePerSecond()) - .addGauge(Interns.info(WRITE_REQUEST_RATE_PER_SECOND, WRITE_REQUEST_RATE_DESC), - rsWrap.getWriteRequestsRatePerSecond()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES, - BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES_DESC), - rsWrap.getByteBuffAllocatorHeapAllocationBytes()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES, - BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES_DESC), - rsWrap.getByteBuffAllocatorPoolAllocationBytes()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO, - BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO_DESC), - rsWrap.getByteBuffAllocatorHeapAllocRatio()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT, - BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT_DESC), - rsWrap.getByteBuffAllocatorTotalBufferCount()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT, - BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT_DESC), - rsWrap.getByteBuffAllocatorUsedBufferCount()) - .addGauge(Interns.info(ACTIVE_SCANNERS, ACTIVE_SCANNERS_DESC), - rsWrap.getActiveScanners()); + .addGauge(Interns.info(STORE_COUNT, STORE_COUNT_DESC), rsWrap.getNumStores()) + .addGauge(Interns.info(WALFILE_COUNT, WALFILE_COUNT_DESC), rsWrap.getNumWALFiles()) + .addGauge(Interns.info(WALFILE_SIZE, WALFILE_SIZE_DESC), rsWrap.getWALFileSize()) + .addGauge(Interns.info(STOREFILE_COUNT, STOREFILE_COUNT_DESC), rsWrap.getNumStoreFiles()) + .addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemStoreSize()) + .addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize()) + .addGauge(Interns.info(MAX_STORE_FILE_AGE, MAX_STORE_FILE_AGE_DESC), + rsWrap.getMaxStoreFileAge()) + .addGauge(Interns.info(MIN_STORE_FILE_AGE, MIN_STORE_FILE_AGE_DESC), + rsWrap.getMinStoreFileAge()) + .addGauge(Interns.info(AVG_STORE_FILE_AGE, AVG_STORE_FILE_AGE_DESC), + rsWrap.getAvgStoreFileAge()) + .addGauge(Interns.info(NUM_REFERENCE_FILES, NUM_REFERENCE_FILES_DESC), + rsWrap.getNumReferenceFiles()) + .addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC), rsWrap.getStartCode()) + .addGauge(Interns.info(AVERAGE_REGION_SIZE, AVERAGE_REGION_SIZE_DESC), + rsWrap.getAverageRegionSize()) + .addGauge(Interns.info(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC), + rsWrap.getStoreFileIndexSize()) + .addGauge(Interns.info(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC), + rsWrap.getTotalStaticIndexSize()) + .addGauge(Interns.info(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC), + rsWrap.getTotalStaticBloomSize()) + .addGauge(Interns.info(NUMBER_OF_MUTATIONS_WITHOUT_WAL, NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC), + rsWrap.getNumMutationsWithoutWAL()) + .addGauge(Interns.info(DATA_SIZE_WITHOUT_WAL, DATA_SIZE_WITHOUT_WAL_DESC), + rsWrap.getDataInMemoryWithoutWAL()) + .addGauge(Interns.info(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC), + rsWrap.getPercentFileLocal()) + .addGauge(Interns.info(PERCENT_FILES_LOCAL_SECONDARY_REGIONS, + PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC), rsWrap.getPercentFileLocalSecondaryRegions()) + .addGauge(Interns.info(TOTAL_BYTES_READ, TOTAL_BYTES_READ_DESC), rsWrap.getTotalBytesRead()) + .addGauge(Interns.info(LOCAL_BYTES_READ, LOCAL_BYTES_READ_DESC), rsWrap.getLocalBytesRead()) + .addGauge(Interns.info(SHORTCIRCUIT_BYTES_READ, SHORTCIRCUIT_BYTES_READ_DESC), + rsWrap.getShortCircuitBytesRead()) + .addGauge(Interns.info(ZEROCOPY_BYTES_READ, ZEROCOPY_BYTES_READ_DESC), + rsWrap.getZeroCopyBytesRead()) + .addGauge(Interns.info(SPLIT_QUEUE_LENGTH, SPLIT_QUEUE_LENGTH_DESC), + rsWrap.getSplitQueueSize()) + .addGauge(Interns.info(COMPACTION_QUEUE_LENGTH, COMPACTION_QUEUE_LENGTH_DESC), + rsWrap.getCompactionQueueSize()) + .addGauge(Interns.info(SMALL_COMPACTION_QUEUE_LENGTH, SMALL_COMPACTION_QUEUE_LENGTH_DESC), + rsWrap.getSmallCompactionQueueSize()) + .addGauge(Interns.info(LARGE_COMPACTION_QUEUE_LENGTH, LARGE_COMPACTION_QUEUE_LENGTH_DESC), + rsWrap.getLargeCompactionQueueSize()) + .addGauge(Interns.info(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC), + rsWrap.getFlushQueueSize()) + .addGauge(Interns.info(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC), + rsWrap.getBlockCacheFreeSize()) + .addGauge(Interns.info(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC), + rsWrap.getBlockCacheCount()) + .addGauge(Interns.info(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC), rsWrap.getBlockCacheSize()) + .addGauge(Interns.info(BLOCK_CACHE_HIT_PERCENT, BLOCK_CACHE_HIT_PERCENT_DESC), + rsWrap.getBlockCacheHitPercent()) + .addGauge(Interns.info(BLOCK_CACHE_EXPRESS_HIT_PERCENT, BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC), + rsWrap.getBlockCacheHitCachingPercent()) + .addGauge(Interns.info(L1_CACHE_HIT_COUNT, L1_CACHE_HIT_COUNT_DESC), + rsWrap.getL1CacheHitCount()) + .addGauge(Interns.info(L1_CACHE_MISS_COUNT, L1_CACHE_MISS_COUNT_DESC), + rsWrap.getL1CacheMissCount()) + .addGauge(Interns.info(L1_CACHE_HIT_RATIO, L1_CACHE_HIT_RATIO_DESC), + rsWrap.getL1CacheHitRatio()) + .addGauge(Interns.info(L1_CACHE_MISS_RATIO, L1_CACHE_MISS_RATIO_DESC), + rsWrap.getL1CacheMissRatio()) + .addGauge(Interns.info(L2_CACHE_HIT_COUNT, L2_CACHE_HIT_COUNT_DESC), + rsWrap.getL2CacheHitCount()) + .addGauge(Interns.info(L2_CACHE_MISS_COUNT, L2_CACHE_MISS_COUNT_DESC), + rsWrap.getL2CacheMissCount()) + .addGauge(Interns.info(L2_CACHE_HIT_RATIO, L2_CACHE_HIT_RATIO_DESC), + rsWrap.getL2CacheHitRatio()) + .addGauge(Interns.info(L2_CACHE_MISS_RATIO, L2_CACHE_MISS_RATIO_DESC), + rsWrap.getL2CacheMissRatio()) + .addGauge(Interns.info(MOB_FILE_CACHE_COUNT, MOB_FILE_CACHE_COUNT_DESC), + rsWrap.getMobFileCacheCount()) + .addGauge(Interns.info(MOB_FILE_CACHE_HIT_PERCENT, MOB_FILE_CACHE_HIT_PERCENT_DESC), + rsWrap.getMobFileCacheHitPercent()) + .addGauge(Interns.info(READ_REQUEST_RATE_PER_SECOND, READ_REQUEST_RATE_DESC), + rsWrap.getReadRequestsRatePerSecond()) + .addGauge(Interns.info(WRITE_REQUEST_RATE_PER_SECOND, WRITE_REQUEST_RATE_DESC), + rsWrap.getWriteRequestsRatePerSecond()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES, + BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES_DESC), + rsWrap.getByteBuffAllocatorHeapAllocationBytes()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES, + BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES_DESC), + rsWrap.getByteBuffAllocatorPoolAllocationBytes()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO, + BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO_DESC), + rsWrap.getByteBuffAllocatorHeapAllocRatio()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT, + BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT_DESC), + rsWrap.getByteBuffAllocatorTotalBufferCount()) + .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT, + BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT_DESC), rsWrap.getByteBuffAllocatorUsedBufferCount()) + .addGauge(Interns.info(ACTIVE_SCANNERS, ACTIVE_SCANNERS_DESC), rsWrap.getActiveScanners()); } @Override diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java index 09d600f26ff..0c20456e8cb 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.hbase.metrics.Interns; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; @@ -72,19 +70,19 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { private final int hashCode; public MetricsRegionSourceImpl(MetricsRegionWrapper regionWrapper, - MetricsRegionAggregateSourceImpl aggregate) { + MetricsRegionAggregateSourceImpl aggregate) { this.regionWrapper = regionWrapper; agg = aggregate; hashCode = regionWrapper.getRegionHashCode(); agg.register(this); - LOG.debug("Creating new MetricsRegionSourceImpl for table " + - regionWrapper.getTableName() + " " + regionWrapper.getRegionName()); + LOG.debug("Creating new MetricsRegionSourceImpl for table " + regionWrapper.getTableName() + " " + + regionWrapper.getRegionName()); registry = agg.getMetricsRegistry(); regionNamePrefix1 = "Namespace_" + regionWrapper.getNamespace() + "_table_" - + regionWrapper.getTableName() + "_region_" + regionWrapper.getRegionName(); + + regionWrapper.getTableName() + "_region_" + regionWrapper.getRegionName(); regionNamePrefix2 = "_metric_"; regionNamePrefix = regionNamePrefix1 + regionNamePrefix2; @@ -204,111 +202,89 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { return; } + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STORE_COUNT, + MetricsRegionServerSource.STORE_COUNT_DESC), this.regionWrapper.getNumStores()); + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, + MetricsRegionServerSource.STOREFILE_COUNT_DESC), this.regionWrapper.getNumStoreFiles()); + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STORE_REF_COUNT, + MetricsRegionServerSource.STORE_REF_COUNT), this.regionWrapper.getStoreRefCount()); mrb.addGauge( - Interns.info( - regionNamePrefix + MetricsRegionServerSource.STORE_COUNT, - MetricsRegionServerSource.STORE_COUNT_DESC), - this.regionWrapper.getNumStores()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, - MetricsRegionServerSource.STOREFILE_COUNT_DESC), - this.regionWrapper.getNumStoreFiles()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.STORE_REF_COUNT, - MetricsRegionServerSource.STORE_REF_COUNT), - this.regionWrapper.getStoreRefCount()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.MAX_COMPACTED_STORE_FILE_REF_COUNT, - MetricsRegionServerSource.MAX_COMPACTED_STORE_FILE_REF_COUNT), - this.regionWrapper.getMaxCompactedStoreFileRefCount() - ); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, - MetricsRegionServerSource.MEMSTORE_SIZE_DESC), - this.regionWrapper.getMemStoreSize()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.MAX_STORE_FILE_AGE, - MetricsRegionServerSource.MAX_STORE_FILE_AGE_DESC), + Interns.info( + regionNamePrefix + MetricsRegionServerSource.MAX_COMPACTED_STORE_FILE_REF_COUNT, + MetricsRegionServerSource.MAX_COMPACTED_STORE_FILE_REF_COUNT), + this.regionWrapper.getMaxCompactedStoreFileRefCount()); + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, + MetricsRegionServerSource.MEMSTORE_SIZE_DESC), this.regionWrapper.getMemStoreSize()); + mrb.addGauge( + Interns.info(regionNamePrefix + MetricsRegionServerSource.MAX_STORE_FILE_AGE, + MetricsRegionServerSource.MAX_STORE_FILE_AGE_DESC), this.regionWrapper.getMaxStoreFileAge()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.MIN_STORE_FILE_AGE, - MetricsRegionServerSource.MIN_STORE_FILE_AGE_DESC), + mrb.addGauge( + Interns.info(regionNamePrefix + MetricsRegionServerSource.MIN_STORE_FILE_AGE, + MetricsRegionServerSource.MIN_STORE_FILE_AGE_DESC), this.regionWrapper.getMinStoreFileAge()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.AVG_STORE_FILE_AGE, - MetricsRegionServerSource.AVG_STORE_FILE_AGE_DESC), + mrb.addGauge( + Interns.info(regionNamePrefix + MetricsRegionServerSource.AVG_STORE_FILE_AGE, + MetricsRegionServerSource.AVG_STORE_FILE_AGE_DESC), this.regionWrapper.getAvgStoreFileAge()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES, - MetricsRegionServerSource.NUM_REFERENCE_FILES_DESC), + mrb.addGauge( + Interns.info(regionNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES, + MetricsRegionServerSource.NUM_REFERENCE_FILES_DESC), this.regionWrapper.getNumReferenceFiles()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, - MetricsRegionServerSource.STOREFILE_SIZE_DESC), - this.regionWrapper.getStoreFileSize()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.COMPACTIONS_COMPLETED_COUNT, - MetricsRegionSource.COMPACTIONS_COMPLETED_DESC), - this.regionWrapper.getNumCompactionsCompleted()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.COMPACTIONS_FAILED_COUNT, + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, + MetricsRegionServerSource.STOREFILE_SIZE_DESC), this.regionWrapper.getStoreFileSize()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.COMPACTIONS_COMPLETED_COUNT, + MetricsRegionSource.COMPACTIONS_COMPLETED_DESC), + this.regionWrapper.getNumCompactionsCompleted()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.COMPACTIONS_FAILED_COUNT, MetricsRegionSource.COMPACTIONS_FAILED_DESC), - this.regionWrapper.getNumCompactionsFailed()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.LAST_MAJOR_COMPACTION_AGE, - MetricsRegionSource.LAST_MAJOR_COMPACTION_DESC), - this.regionWrapper.getLastMajorCompactionAge()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.NUM_BYTES_COMPACTED_COUNT, - MetricsRegionSource.NUM_BYTES_COMPACTED_DESC), - this.regionWrapper.getNumBytesCompacted()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.NUM_FILES_COMPACTED_COUNT, - MetricsRegionSource.NUM_FILES_COMPACTED_DESC), - this.regionWrapper.getNumFilesCompacted()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, - MetricsRegionServerSource.READ_REQUEST_COUNT_DESC), - this.regionWrapper.getReadRequestCount()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT, - MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC), - this.regionWrapper.getFilteredReadRequestCount()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, - MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC), - this.regionWrapper.getWriteRequestCount()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.REPLICA_ID, - MetricsRegionSource.REPLICA_ID_DESC), - this.regionWrapper.getReplicaId()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.COMPACTIONS_QUEUED_COUNT, - MetricsRegionSource.COMPACTIONS_QUEUED_DESC), - this.regionWrapper.getNumCompactionsQueued()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.FLUSHES_QUEUED_COUNT, - MetricsRegionSource.FLUSHES_QUEUED_DESC), - this.regionWrapper.getNumFlushesQueued()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.MAX_COMPACTION_QUEUE_SIZE, - MetricsRegionSource.MAX_COMPACTION_QUEUE_DESC), - this.regionWrapper.getMaxCompactionQueueSize()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.MAX_FLUSH_QUEUE_SIZE, - MetricsRegionSource.MAX_FLUSH_QUEUE_DESC), - this.regionWrapper.getMaxFlushQueueSize()); + this.regionWrapper.getNumCompactionsFailed()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.LAST_MAJOR_COMPACTION_AGE, + MetricsRegionSource.LAST_MAJOR_COMPACTION_DESC), + this.regionWrapper.getLastMajorCompactionAge()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.NUM_BYTES_COMPACTED_COUNT, + MetricsRegionSource.NUM_BYTES_COMPACTED_DESC), this.regionWrapper.getNumBytesCompacted()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.NUM_FILES_COMPACTED_COUNT, + MetricsRegionSource.NUM_FILES_COMPACTED_DESC), this.regionWrapper.getNumFilesCompacted()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, + MetricsRegionServerSource.READ_REQUEST_COUNT_DESC), + this.regionWrapper.getReadRequestCount()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT, + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC), + this.regionWrapper.getFilteredReadRequestCount()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, + MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC), + this.regionWrapper.getWriteRequestCount()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.REPLICA_ID, + MetricsRegionSource.REPLICA_ID_DESC), this.regionWrapper.getReplicaId()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.COMPACTIONS_QUEUED_COUNT, + MetricsRegionSource.COMPACTIONS_QUEUED_DESC), + this.regionWrapper.getNumCompactionsQueued()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.FLUSHES_QUEUED_COUNT, + MetricsRegionSource.FLUSHES_QUEUED_DESC), this.regionWrapper.getNumFlushesQueued()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.MAX_COMPACTION_QUEUE_SIZE, + MetricsRegionSource.MAX_COMPACTION_QUEUE_DESC), + this.regionWrapper.getMaxCompactionQueueSize()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.MAX_FLUSH_QUEUE_SIZE, + MetricsRegionSource.MAX_FLUSH_QUEUE_DESC), this.regionWrapper.getMaxFlushQueueSize()); addCounter(mrb, this.regionWrapper.getMemstoreOnlyRowReadsCount(), MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE, MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE_DESC); addCounter(mrb, this.regionWrapper.getMixedRowReadsCount(), - MetricsRegionSource.MIXED_ROW_READS, - MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC); + MetricsRegionSource.MIXED_ROW_READS, MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC); } } private void addCounter(MetricsRecordBuilder mrb, Map metricMap, String metricName, - String metricDesc) { + String metricDesc) { if (metricMap != null) { for (Entry entry : metricMap.entrySet()) { // append 'store' and its name to the metric @@ -326,7 +302,7 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { @Override public boolean equals(Object obj) { - return obj == this || - (obj instanceof MetricsRegionSourceImpl && compareTo((MetricsRegionSourceImpl) obj) == 0); + return obj == this + || (obj instanceof MetricsRegionSourceImpl && compareTo((MetricsRegionSourceImpl) obj) == 0); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java index 0b13e5c8dfe..9b36d27b99d 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.hbase.metrics.Interns; @@ -31,7 +29,7 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.Private public class MetricsTableAggregateSourceImpl extends BaseSourceImpl - implements MetricsTableAggregateSource { + implements MetricsTableAggregateSource { private static final Logger LOG = LoggerFactory.getLogger(MetricsTableAggregateSourceImpl.class); private ConcurrentHashMap tableSources = new ConcurrentHashMap<>(); @@ -40,10 +38,8 @@ public class MetricsTableAggregateSourceImpl extends BaseSourceImpl this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsTableAggregateSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsTableAggregateSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } @@ -67,7 +63,7 @@ public class MetricsTableAggregateSourceImpl extends BaseSourceImpl @Override public MetricsTableSource getOrCreateTableSource(String table, - MetricsTableWrapperAggregate wrapper) { + MetricsTableWrapperAggregate wrapper) { MetricsTableSource source = tableSources.get(table); if (source != null) { return source; @@ -82,10 +78,9 @@ public class MetricsTableAggregateSourceImpl extends BaseSourceImpl } /** - * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all - * expectations of java programmers. Instead of returning anything Hadoop metrics expects + * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all + * expectations of java programmers. Instead of returning anything Hadoop metrics expects * getMetrics to push the metrics into the collector. - * * @param collector the collector * @param all get all the metrics regardless of when they last changed. */ diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java index dd143d4c6f5..1c90b33d5d4 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,7 +33,7 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class MetricsTableLatenciesImpl extends BaseSourceImpl implements MetricsTableLatencies { - private final HashMap histogramsByTable = new HashMap<>(); + private final HashMap histogramsByTable = new HashMap<>(); public static class TableHistograms { final MetricHistogram getTimeHisto; @@ -50,20 +51,17 @@ public class MetricsTableLatenciesImpl extends BaseSourceImpl implements Metrics TableHistograms(DynamicMetricsRegistry registry, TableName tn) { getTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, GET_TIME)); - incrementTimeHisto = registry.newTimeHistogram( - qualifyMetricsName(tn, INCREMENT_TIME)); + incrementTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, INCREMENT_TIME)); appendTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, APPEND_TIME)); putTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, PUT_TIME)); putBatchTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, PUT_BATCH_TIME)); deleteTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, DELETE_TIME)); - deleteBatchTimeHisto = registry.newTimeHistogram( - qualifyMetricsName(tn, DELETE_BATCH_TIME)); + deleteBatchTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, DELETE_BATCH_TIME)); scanTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, SCAN_TIME)); scanSizeHisto = registry.newSizeHistogram(qualifyMetricsName(tn, SCAN_SIZE)); checkAndDeleteTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_DELETE_TIME)); - checkAndPutTimeHisto = - registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_PUT_TIME)); + checkAndPutTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_PUT_TIME)); checkAndMutateTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_MUTATE_TIME)); } @@ -141,7 +139,7 @@ public class MetricsTableLatenciesImpl extends BaseSourceImpl implements Metrics } public MetricsTableLatenciesImpl(String metricsName, String metricsDescription, - String metricsContext, String metricsJmxContext) { + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java index 6b1d323dc19..dc53c940166 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,15 +19,14 @@ package org.apache.hadoop.hbase.regionserver; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.metrics.Meter; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.yetus.audience.InterfaceAudience; /** - * Implementation of {@link MetricsTableQueryMeter} to track query per second for each table in - * a RegionServer. + * Implementation of {@link MetricsTableQueryMeter} to track query per second for each table in a + * RegionServer. */ @InterfaceAudience.Private public class MetricsTableQueryMeterImpl implements MetricsTableQueryMeter { @@ -42,8 +42,8 @@ public class MetricsTableQueryMeterImpl implements MetricsTableQueryMeter { final Meter tableWriteQueryMeter; TableMeters(MetricRegistry metricRegistry, TableName tableName) { - this.tableReadQueryMeter = metricRegistry.meter(qualifyMetricsName(tableName, - TABLE_READ_QUERY_PER_SECOND)); + this.tableReadQueryMeter = + metricRegistry.meter(qualifyMetricsName(tableName, TABLE_READ_QUERY_PER_SECOND)); this.tableWriteQueryMeter = metricRegistry.meter(qualifyMetricsName(tableName, TABLE_WRITE_QUERY_PER_SECOND)); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java index d82cc535bf7..65f8f04102e 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,7 +64,6 @@ import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.SPL import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.metrics.Interns; import org.apache.hadoop.metrics2.MetricHistogram; @@ -126,19 +125,18 @@ public class MetricsTableSourceImpl implements MetricsTableSource { private MutableFastCounter majorCompactedInputBytes; private MutableFastCounter majorCompactedOutputBytes; - public MetricsTableSourceImpl(String tblName, - MetricsTableAggregateSourceImpl aggregate, MetricsTableWrapperAggregate tblWrapperAgg) { + public MetricsTableSourceImpl(String tblName, MetricsTableAggregateSourceImpl aggregate, + MetricsTableWrapperAggregate tblWrapperAgg) { LOG.debug("Creating new MetricsTableSourceImpl for table '{}'", tblName); this.tableName = TableName.valueOf(tblName); this.agg = aggregate; this.tableWrapperAgg = tblWrapperAgg; this.registry = agg.getMetricsRegistry(); - this.tableNamePrefixPart1 = "Namespace_" + this.tableName.getNamespaceAsString() + - "_table_" + this.tableName.getQualifierAsString(); + this.tableNamePrefixPart1 = "Namespace_" + this.tableName.getNamespaceAsString() + "_table_" + + this.tableName.getQualifierAsString(); this.tableNamePrefixPart2 = "_metric_"; - this.tableNamePrefix = tableNamePrefixPart1 + - tableNamePrefixPart2; + this.tableNamePrefix = tableNamePrefixPart1 + tableNamePrefixPart2; this.hashCode = this.tableName.hashCode(); } @@ -146,16 +144,16 @@ public class MetricsTableSourceImpl implements MetricsTableSource { public synchronized void registerMetrics() { flushTimeHisto = registry.newTimeHistogram(tableNamePrefix + FLUSH_TIME, FLUSH_TIME_DESC); flushMemstoreSizeHisto = - registry.newSizeHistogram(tableNamePrefix + FLUSH_MEMSTORE_SIZE, FLUSH_MEMSTORE_SIZE_DESC); + registry.newSizeHistogram(tableNamePrefix + FLUSH_MEMSTORE_SIZE, FLUSH_MEMSTORE_SIZE_DESC); flushOutputSizeHisto = - registry.newSizeHistogram(tableNamePrefix + FLUSH_OUTPUT_SIZE, FLUSH_OUTPUT_SIZE_DESC); + registry.newSizeHistogram(tableNamePrefix + FLUSH_OUTPUT_SIZE, FLUSH_OUTPUT_SIZE_DESC); flushedOutputBytes = - registry.newCounter(tableNamePrefix + FLUSHED_OUTPUT_BYTES, FLUSHED_OUTPUT_BYTES_DESC, 0L); + registry.newCounter(tableNamePrefix + FLUSHED_OUTPUT_BYTES, FLUSHED_OUTPUT_BYTES_DESC, 0L); flushedMemstoreBytes = registry.newCounter(tableNamePrefix + FLUSHED_MEMSTORE_BYTES, FLUSHED_MEMSTORE_BYTES_DESC, 0L); compactionTimeHisto = - registry.newTimeHistogram(tableNamePrefix + COMPACTION_TIME, COMPACTION_TIME_DESC); + registry.newTimeHistogram(tableNamePrefix + COMPACTION_TIME, COMPACTION_TIME_DESC); compactionInputFileCountHisto = registry.newHistogram( tableNamePrefix + COMPACTION_INPUT_FILE_COUNT, COMPACTION_INPUT_FILE_COUNT_DESC); compactionInputSizeHisto = registry.newSizeHistogram(tableNamePrefix + COMPACTION_INPUT_SIZE, @@ -164,8 +162,8 @@ public class MetricsTableSourceImpl implements MetricsTableSource { tableNamePrefix + COMPACTION_OUTPUT_FILE_COUNT, COMPACTION_OUTPUT_FILE_COUNT_DESC); compactionOutputSizeHisto = registry.newSizeHistogram(tableNamePrefix + COMPACTION_OUTPUT_SIZE, COMPACTION_OUTPUT_SIZE_DESC); - compactedInputBytes = registry.newCounter(tableNamePrefix + COMPACTED_INPUT_BYTES, - COMPACTED_INPUT_BYTES_DESC, 0L); + compactedInputBytes = + registry.newCounter(tableNamePrefix + COMPACTED_INPUT_BYTES, COMPACTED_INPUT_BYTES_DESC, 0L); compactedOutputBytes = registry.newCounter(tableNamePrefix + COMPACTED_OUTPUT_BYTES, COMPACTED_OUTPUT_BYTES_DESC, 0L); @@ -176,8 +174,8 @@ public class MetricsTableSourceImpl implements MetricsTableSource { majorCompactionInputSizeHisto = registry.newSizeHistogram( tableNamePrefix + MAJOR_COMPACTION_INPUT_SIZE, MAJOR_COMPACTION_INPUT_SIZE_DESC); majorCompactionOutputFileCountHisto = - registry.newHistogram(tableNamePrefix + MAJOR_COMPACTION_OUTPUT_FILE_COUNT, - MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC); + registry.newHistogram(tableNamePrefix + MAJOR_COMPACTION_OUTPUT_FILE_COUNT, + MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC); majorCompactionOutputSizeHisto = registry.newSizeHistogram( tableNamePrefix + MAJOR_COMPACTION_OUTPUT_SIZE, MAJOR_COMPACTION_OUTPUT_SIZE_DESC); majorCompactedInputBytes = registry.newCounter(tableNamePrefix + MAJOR_COMPACTED_INPUT_BYTES, @@ -238,6 +236,7 @@ public class MetricsTableSourceImpl implements MetricsTableSource { tableWrapperAgg = null; } } + @Override public MetricsTableAggregateSource getAggregateSource() { return agg; @@ -272,71 +271,83 @@ public class MetricsTableSourceImpl implements MetricsTableSource { } if (this.tableWrapperAgg != null) { - mrb.addCounter(Interns.info(tableNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, - MetricsRegionServerSource.READ_REQUEST_COUNT_DESC), - tableWrapperAgg.getReadRequestCount(tableName.getNameAsString())); mrb.addCounter( - Interns.info(tableNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT, - MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC), - tableWrapperAgg.getFilteredReadRequestCount(tableName.getNameAsString())); - mrb.addCounter(Interns.info(tableNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, + Interns.info(tableNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, + MetricsRegionServerSource.READ_REQUEST_COUNT_DESC), + tableWrapperAgg.getReadRequestCount(tableName.getNameAsString())); + mrb.addCounter( + Interns.info(tableNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT, + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC), + tableWrapperAgg.getFilteredReadRequestCount(tableName.getNameAsString())); + mrb.addCounter( + Interns.info(tableNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC), - tableWrapperAgg.getWriteRequestCount(tableName.getNameAsString())); - mrb.addCounter(Interns.info(tableNamePrefix + MetricsRegionServerSource.TOTAL_REQUEST_COUNT, + tableWrapperAgg.getWriteRequestCount(tableName.getNameAsString())); + mrb.addCounter( + Interns.info(tableNamePrefix + MetricsRegionServerSource.TOTAL_REQUEST_COUNT, MetricsRegionServerSource.TOTAL_REQUEST_COUNT_DESC), - tableWrapperAgg.getTotalRequestsCount(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, + tableWrapperAgg.getTotalRequestsCount(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, MetricsRegionServerSource.MEMSTORE_SIZE_DESC), - tableWrapperAgg.getMemStoreSize(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, + tableWrapperAgg.getMemStoreSize(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, MetricsRegionServerSource.STOREFILE_COUNT_DESC), - tableWrapperAgg.getNumStoreFiles(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, + tableWrapperAgg.getNumStoreFiles(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, MetricsRegionServerSource.STOREFILE_SIZE_DESC), - tableWrapperAgg.getStoreFileSize(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsTableSource.TABLE_SIZE, - MetricsTableSource.TABLE_SIZE_DESC), + tableWrapperAgg.getStoreFileSize(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsTableSource.TABLE_SIZE, + MetricsTableSource.TABLE_SIZE_DESC), tableWrapperAgg.getTableSize(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.AVERAGE_REGION_SIZE, + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.AVERAGE_REGION_SIZE, MetricsRegionServerSource.AVERAGE_REGION_SIZE_DESC), - tableWrapperAgg.getAvgRegionSize(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.REGION_COUNT, + tableWrapperAgg.getAvgRegionSize(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.REGION_COUNT, MetricsRegionServerSource.REGION_COUNT_DESC), - tableWrapperAgg.getNumRegions(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.STORE_COUNT, + tableWrapperAgg.getNumRegions(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.STORE_COUNT, MetricsRegionServerSource.STORE_COUNT_DESC), - tableWrapperAgg.getNumStores(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.MAX_STORE_FILE_AGE, + tableWrapperAgg.getNumStores(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.MAX_STORE_FILE_AGE, MetricsRegionServerSource.MAX_STORE_FILE_AGE_DESC), - tableWrapperAgg.getMaxStoreFileAge(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.MIN_STORE_FILE_AGE, + tableWrapperAgg.getMaxStoreFileAge(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.MIN_STORE_FILE_AGE, MetricsRegionServerSource.MIN_STORE_FILE_AGE_DESC), - tableWrapperAgg.getMinStoreFileAge(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.AVG_STORE_FILE_AGE, + tableWrapperAgg.getMinStoreFileAge(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.AVG_STORE_FILE_AGE, MetricsRegionServerSource.AVG_STORE_FILE_AGE_DESC), - tableWrapperAgg.getAvgStoreFileAge(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES, + tableWrapperAgg.getAvgStoreFileAge(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES, MetricsRegionServerSource.NUM_REFERENCE_FILES_DESC), - tableWrapperAgg.getNumReferenceFiles(tableName.getNameAsString())); + tableWrapperAgg.getNumReferenceFiles(tableName.getNameAsString())); addGauge(mrb, tableWrapperAgg.getMemstoreOnlyRowReadsCount(tableName.getNameAsString()), MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE, MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE_DESC); addGauge(mrb, tableWrapperAgg.getMixedRowReadsCount(tableName.getNameAsString()), - MetricsRegionSource.MIXED_ROW_READS, - MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC); + MetricsRegionSource.MIXED_ROW_READS, MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC); } } } private void addGauge(MetricsRecordBuilder mrb, Map metricMap, String metricName, - String metricDesc) { + String metricDesc) { if (metricMap != null) { for (Entry entry : metricMap.entrySet()) { // append 'store' and its name to the metric mrb.addGauge(Interns.info(this.tableNamePrefixPart1 + _COLUMNFAMILY - + entry.getKey().split(MetricsTableWrapperAggregate.HASH)[1] - + this.tableNamePrefixPart2 + metricName, - metricDesc), entry.getValue()); + + entry.getKey().split(MetricsTableWrapperAggregate.HASH)[1] + this.tableNamePrefixPart2 + + metricName, metricDesc), entry.getValue()); } } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSourceImpl.java index 28726c4ee1f..6a2e9713eb9 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Collections; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -37,16 +35,14 @@ public class MetricsUserAggregateSourceImpl extends BaseSourceImpl private static final Logger LOG = LoggerFactory.getLogger(MetricsUserAggregateSourceImpl.class); private final ConcurrentHashMap userSources = - new ConcurrentHashMap(); + new ConcurrentHashMap(); public MetricsUserAggregateSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsUserAggregateSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsUserAggregateSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java index ef0eb7bf462..8559e0e4b92 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Collections; @@ -23,7 +22,6 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.LongAdder; - import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -74,32 +72,39 @@ public class MetricsUserSourceImpl implements MetricsUserSource { this.hostName = hostName; } - @Override public void incrementReadRequest() { + @Override + public void incrementReadRequest() { readRequestsCount.increment(); } - @Override public void incrementWriteRequest() { + @Override + public void incrementWriteRequest() { writeRequestsCount.increment(); } - @Override public String getHostName() { + @Override + public String getHostName() { return hostName; } - @Override public long getReadRequestsCount() { + @Override + public long getReadRequestsCount() { return readRequestsCount.sum(); } - @Override public long getWriteRequestsCount() { + @Override + public long getWriteRequestsCount() { return writeRequestsCount.sum(); } - @Override public void incrementFilteredReadRequests() { + @Override + public void incrementFilteredReadRequests() { filteredRequestsCount.increment(); } - @Override public long getFilteredReadRequests() { + @Override + public long getFilteredReadRequests() { return filteredRequestsCount.sum(); } } @@ -191,8 +196,8 @@ public class MetricsUserSourceImpl implements MetricsUserSource { @Override public boolean equals(Object obj) { - return obj == this || - (obj instanceof MetricsUserSourceImpl && compareTo((MetricsUserSourceImpl) obj) == 0); + return obj == this + || (obj instanceof MetricsUserSourceImpl && compareTo((MetricsUserSourceImpl) obj) == 0); } void snapshot(MetricsRecordBuilder mrb, boolean ignored) { @@ -252,16 +257,19 @@ public class MetricsUserSourceImpl implements MetricsUserSource { scanTimeHisto.add(t); } - @Override public void getMetrics(MetricsCollector metricsCollector, boolean all) { + @Override + public void getMetrics(MetricsCollector metricsCollector, boolean all) { MetricsRecordBuilder mrb = metricsCollector.addRecord(this.userNamePrefix); registry.snapshot(mrb, all); } - @Override public Map getClientMetrics() { + @Override + public Map getClientMetrics() { return Collections.unmodifiableMap(clientMetricsMap); } - @Override public ClientMetrics getOrCreateMetricsClient(String client) { + @Override + public ClientMetrics getOrCreateMetricsClient(String client) { ClientMetrics source = clientMetricsMap.get(client); if (source != null) { return source; diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java index 4f71681113c..9400eb7d22c 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.util.concurrent.ConcurrentHashMap; @@ -27,9 +26,8 @@ import org.apache.hadoop.metrics2.lib.MutableFastCounter; import org.apache.yetus.audience.InterfaceAudience; /** - * Class that transitions metrics from MetricsWAL into the metrics subsystem. - * - * Implements BaseSource through BaseSourceImpl, following the pattern. + * Class that transitions metrics from MetricsWAL into the metrics subsystem. Implements BaseSource + * through BaseSourceImpl, following the pattern. * @see org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource */ @InterfaceAudience.Private @@ -55,32 +53,30 @@ public class MetricsWALSourceImpl extends BaseSourceImpl implements MetricsWALSo this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsWALSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsWALSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - //Create and store the metrics that will be used. + // Create and store the metrics that will be used. appendTimeHisto = this.getMetricsRegistry().newTimeHistogram(APPEND_TIME, APPEND_TIME_DESC); appendSizeHisto = this.getMetricsRegistry().newSizeHistogram(APPEND_SIZE, APPEND_SIZE_DESC); appendCount = this.getMetricsRegistry().newCounter(APPEND_COUNT, APPEND_COUNT_DESC, 0L); slowAppendCount = - this.getMetricsRegistry().newCounter(SLOW_APPEND_COUNT, SLOW_APPEND_COUNT_DESC, 0L); + this.getMetricsRegistry().newCounter(SLOW_APPEND_COUNT, SLOW_APPEND_COUNT_DESC, 0L); syncTimeHisto = this.getMetricsRegistry().newTimeHistogram(SYNC_TIME, SYNC_TIME_DESC); logRollRequested = - this.getMetricsRegistry().newCounter(ROLL_REQUESTED, ROLL_REQUESTED_DESC, 0L); - errorRollRequested = this.getMetricsRegistry() - .newCounter(ERROR_ROLL_REQUESTED, ERROR_ROLL_REQUESTED_DESC, 0L); - lowReplicationRollRequested = this.getMetricsRegistry() - .newCounter(LOW_REPLICA_ROLL_REQUESTED, LOW_REPLICA_ROLL_REQUESTED_DESC, 0L); - slowSyncRollRequested = this.getMetricsRegistry() - .newCounter(SLOW_SYNC_ROLL_REQUESTED, SLOW_SYNC_ROLL_REQUESTED_DESC, 0L); - sizeRollRequested = this.getMetricsRegistry() - .newCounter(SIZE_ROLL_REQUESTED, SIZE_ROLL_REQUESTED_DESC, 0L); + this.getMetricsRegistry().newCounter(ROLL_REQUESTED, ROLL_REQUESTED_DESC, 0L); + errorRollRequested = + this.getMetricsRegistry().newCounter(ERROR_ROLL_REQUESTED, ERROR_ROLL_REQUESTED_DESC, 0L); + lowReplicationRollRequested = this.getMetricsRegistry().newCounter(LOW_REPLICA_ROLL_REQUESTED, + LOW_REPLICA_ROLL_REQUESTED_DESC, 0L); + slowSyncRollRequested = this.getMetricsRegistry().newCounter(SLOW_SYNC_ROLL_REQUESTED, + SLOW_SYNC_ROLL_REQUESTED_DESC, 0L); + sizeRollRequested = + this.getMetricsRegistry().newCounter(SIZE_ROLL_REQUESTED, SIZE_ROLL_REQUESTED_DESC, 0L); writtenBytes = this.getMetricsRegistry().newCounter(WRITTEN_BYTES, WRITTEN_BYTES_DESC, 0L); - successfulLogRolls = this.getMetricsRegistry() - .newCounter(SUCCESSFUL_LOG_ROLLS, SUCCESSFUL_LOG_ROLLS_DESC, 0L); + successfulLogRolls = + this.getMetricsRegistry().newCounter(SUCCESSFUL_LOG_ROLLS, SUCCESSFUL_LOG_ROLLS_DESC, 0L); perTableAppendCount = new ConcurrentHashMap<>(); perTableAppendSize = new ConcurrentHashMap<>(); } @@ -93,8 +89,8 @@ public class MetricsWALSourceImpl extends BaseSourceImpl implements MetricsWALSo // Ideally putIfAbsent is atomic and we don't need a branch check but we still do it to avoid // expensive string construction for every append. String metricsKey = String.format("%s.%s", tableName, APPEND_SIZE); - perTableAppendSize.putIfAbsent( - tableName, getMetricsRegistry().newCounter(metricsKey, APPEND_SIZE_DESC, 0L)); + perTableAppendSize.putIfAbsent(tableName, + getMetricsRegistry().newCounter(metricsKey, APPEND_SIZE_DESC, 0L)); tableAppendSizeCounter = perTableAppendSize.get(tableName); } tableAppendSizeCounter.incr(size); @@ -111,8 +107,8 @@ public class MetricsWALSourceImpl extends BaseSourceImpl implements MetricsWALSo MutableFastCounter tableAppendCounter = perTableAppendCount.get(tableName); if (tableAppendCounter == null) { String metricsKey = String.format("%s.%s", tableName, APPEND_COUNT); - perTableAppendCount.putIfAbsent( - tableName, getMetricsRegistry().newCounter(metricsKey, APPEND_COUNT_DESC, 0L)); + perTableAppendCount.putIfAbsent(tableName, + getMetricsRegistry().newCounter(metricsKey, APPEND_COUNT_DESC, 0L)); tableAppendCounter = perTableAppendCount.get(tableName); } tableAppendCounter.incr(); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSourceImpl.java index cc97d7491a9..ae64c0f1996 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.metrics2.lib.MutableFastCounter; @@ -86,59 +85,67 @@ public class MetricsReplicationGlobalSourceSourceImpl shippedHFilesCounter = rms.getMetricsRegistry().getCounter(SOURCE_SHIPPED_HFILES, 0L); sizeOfHFileRefsQueueGauge = - rms.getMetricsRegistry().getGauge(SOURCE_SIZE_OF_HFILE_REFS_QUEUE, 0L); + rms.getMetricsRegistry().getGauge(SOURCE_SIZE_OF_HFILE_REFS_QUEUE, 0L); - unknownFileLengthForClosedWAL = rms.getMetricsRegistry() - .getCounter(SOURCE_CLOSED_LOGS_WITH_UNKNOWN_LENGTH, 0L); + unknownFileLengthForClosedWAL = + rms.getMetricsRegistry().getCounter(SOURCE_CLOSED_LOGS_WITH_UNKNOWN_LENGTH, 0L); uncleanlyClosedWAL = rms.getMetricsRegistry().getCounter(SOURCE_UNCLEANLY_CLOSED_LOGS, 0L); - uncleanlyClosedSkippedBytes = rms.getMetricsRegistry() - .getCounter(SOURCE_UNCLEANLY_CLOSED_IGNORED_IN_BYTES, 0L); + uncleanlyClosedSkippedBytes = + rms.getMetricsRegistry().getCounter(SOURCE_UNCLEANLY_CLOSED_IGNORED_IN_BYTES, 0L); restartWALReading = rms.getMetricsRegistry().getCounter(SOURCE_RESTARTED_LOG_READING, 0L); repeatedFileBytes = rms.getMetricsRegistry().getCounter(SOURCE_REPEATED_LOG_FILE_BYTES, 0L); completedWAL = rms.getMetricsRegistry().getCounter(SOURCE_COMPLETED_LOGS, 0L); - completedRecoveryQueue = rms.getMetricsRegistry() - .getCounter(SOURCE_COMPLETED_RECOVERY_QUEUES, 0L); - failedRecoveryQueue = rms.getMetricsRegistry() - .getCounter(SOURCE_FAILED_RECOVERY_QUEUES, 0L); + completedRecoveryQueue = + rms.getMetricsRegistry().getCounter(SOURCE_COMPLETED_RECOVERY_QUEUES, 0L); + failedRecoveryQueue = rms.getMetricsRegistry().getCounter(SOURCE_FAILED_RECOVERY_QUEUES, 0L); - walReaderBufferUsageBytes = rms.getMetricsRegistry() - .getGauge(SOURCE_WAL_READER_EDITS_BUFFER, 0L); + walReaderBufferUsageBytes = + rms.getMetricsRegistry().getGauge(SOURCE_WAL_READER_EDITS_BUFFER, 0L); sourceInitializing = rms.getMetricsRegistry().getGaugeInt(SOURCE_INITIALIZING, 0); } - @Override public void setLastShippedAge(long age) { + @Override + public void setLastShippedAge(long age) { ageOfLastShippedOpHist.add(age); } - @Override public void incrSizeOfLogQueue(int size) { + @Override + public void incrSizeOfLogQueue(int size) { sizeOfLogQueueGauge.incr(size); } - @Override public void decrSizeOfLogQueue(int size) { + @Override + public void decrSizeOfLogQueue(int size) { sizeOfLogQueueGauge.decr(size); } - @Override public void incrLogReadInEdits(long size) { + @Override + public void incrLogReadInEdits(long size) { logReadInEditsCounter.incr(size); } - @Override public void incrLogEditsFiltered(long size) { + @Override + public void incrLogEditsFiltered(long size) { walEditsFilteredCounter.incr(size); } - @Override public void incrBatchesShipped(int batches) { + @Override + public void incrBatchesShipped(int batches) { shippedBatchesCounter.incr(batches); } - @Override public void incrFailedBatches() { + @Override + public void incrFailedBatches() { failedBatchesCounter.incr(); } - @Override public void incrOpsShipped(long ops) { + @Override + public void incrOpsShipped(long ops) { shippedOpsCounter.incr(ops); } - @Override public void incrShippedBytes(long size) { + @Override + public void incrShippedBytes(long size) { shippedBytesCounter.incr(size); // obtained value maybe smaller than 1024. We should make sure that KB count // eventually picks up even from multiple smaller updates. @@ -148,7 +155,7 @@ public class MetricsReplicationGlobalSourceSourceImpl static void incrementKBsCounter(MutableFastCounter bytesCounter, MutableFastCounter kbsCounter) { // Following code should be thread-safe. long delta = 0; - while(true) { + while (true) { long bytes = bytesCounter.value(); delta = (bytes / 1024) - kbsCounter.value(); if (delta > 0) { @@ -158,11 +165,14 @@ public class MetricsReplicationGlobalSourceSourceImpl } } } - @Override public void incrLogReadInBytes(long size) { + + @Override + public void incrLogReadInBytes(long size) { logReadInBytesCounter.incr(size); } - @Override public void clear() { + @Override + public void clear() { } @Override @@ -170,7 +180,8 @@ public class MetricsReplicationGlobalSourceSourceImpl return ageOfLastShippedOpHist.getMax(); } - @Override public void incrHFilesShipped(long hfiles) { + @Override + public void incrHFilesShipped(long hfiles) { shippedHFilesCounter.incr(hfiles); } @@ -186,13 +197,14 @@ public class MetricsReplicationGlobalSourceSourceImpl @Override public int getSizeOfLogQueue() { - return (int)sizeOfLogQueueGauge.value(); + return (int) sizeOfLogQueueGauge.value(); } @Override public void incrUnknownFileLengthForClosedWAL() { unknownFileLengthForClosedWAL.incr(1L); } + @Override public void incrUncleanlyClosedWALs() { uncleanlyClosedWAL.incr(1L); @@ -207,22 +219,27 @@ public class MetricsReplicationGlobalSourceSourceImpl public void incrBytesSkippedInUncleanlyClosedWALs(final long bytes) { uncleanlyClosedSkippedBytes.incr(bytes); } + @Override public void incrRestartedWALReading() { restartWALReading.incr(1L); } + @Override public void incrRepeatedFileBytes(final long bytes) { repeatedFileBytes.incr(bytes); } + @Override public void incrCompletedWAL() { completedWAL.incr(1L); } + @Override public void incrCompletedRecoveryQueue() { completedRecoveryQueue.incr(1L); } + @Override public void incrFailedRecoveryQueue() { failedRecoveryQueue.incr(1L); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java index 86bc60577a6..3242f089654 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.metrics2.lib.MutableFastCounter; @@ -39,20 +38,23 @@ public class MetricsReplicationSinkSourceImpl implements MetricsReplicationSinkS hfilesCounter = rms.getMetricsRegistry().getCounter(SINK_APPLIED_HFILES, 0L); } - @Override public void setLastAppliedOpAge(long age) { + @Override + public void setLastAppliedOpAge(long age) { ageHist.add(age); } - @Override public void incrAppliedBatches(long batches) { + @Override + public void incrAppliedBatches(long batches) { batchesCounter.incr(batches); } - @Override public void incrAppliedOps(long batchsize) { + @Override + public void incrAppliedOps(long batchsize) { opsCounter.incr(batchsize); } @Override - public void incrFailedBatches(){ + public void incrFailedBatches() { failedBatchesCounter.incr(); } @@ -71,7 +73,8 @@ public class MetricsReplicationSinkSourceImpl implements MetricsReplicationSinkS hfilesCounter.incr(hfiles); } - @Override public long getSinkAppliedOps() { + @Override + public long getSinkAppliedOps() { return opsCounter.value(); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java index c0cd1c73e0c..5b223b42392 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java @@ -24,22 +24,27 @@ public class MetricsReplicationSourceFactoryImpl implements MetricsReplicationSo private static enum SourceHolder { INSTANCE; + final MetricsReplicationSourceImpl source = new MetricsReplicationSourceImpl(); } - @Override public MetricsReplicationSinkSource getSink() { + @Override + public MetricsReplicationSinkSource getSink() { return new MetricsReplicationSinkSourceImpl(SourceHolder.INSTANCE.source); } - @Override public MetricsReplicationSourceSource getSource(String id) { + @Override + public MetricsReplicationSourceSource getSource(String id) { return new MetricsReplicationSourceSourceImpl(SourceHolder.INSTANCE.source, id); } - @Override public MetricsReplicationTableSource getTableSource(String tableName) { + @Override + public MetricsReplicationTableSource getTableSource(String tableName) { return new MetricsReplicationTableSourceImpl(SourceHolder.INSTANCE.source, tableName); } - @Override public MetricsReplicationGlobalSourceSource getGlobalSource() { + @Override + public MetricsReplicationGlobalSourceSource getGlobalSource() { return new MetricsReplicationGlobalSourceSourceImpl(SourceHolder.INSTANCE.source); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java index 02045f8bbd1..0caf9970ce0 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -23,23 +22,18 @@ import org.apache.yetus.audience.InterfaceAudience; /** * Hadoop2 implementation of MetricsReplicationSource. This provides access to metrics gauges and - * counters. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * counters. Implements BaseSource through BaseSourceImpl, following the pattern */ @InterfaceAudience.Private -public class MetricsReplicationSourceImpl extends BaseSourceImpl implements - MetricsReplicationSource { - +public class MetricsReplicationSourceImpl extends BaseSourceImpl + implements MetricsReplicationSource { public MetricsReplicationSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - MetricsReplicationSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + MetricsReplicationSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java index bf1392ce9b7..93b5bcabecd 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java @@ -148,49 +148,60 @@ public class MetricsReplicationSourceSourceImpl implements MetricsReplicationSou sourceInitializing = rms.getMetricsRegistry().getGaugeInt(sourceInitializingKey, 0); } - @Override public void setLastShippedAge(long age) { + @Override + public void setLastShippedAge(long age) { ageOfLastShippedOpHist.add(age); } - @Override public void incrSizeOfLogQueue(int size) { + @Override + public void incrSizeOfLogQueue(int size) { sizeOfLogQueueGauge.incr(size); } - @Override public void decrSizeOfLogQueue(int size) { + @Override + public void decrSizeOfLogQueue(int size) { sizeOfLogQueueGauge.decr(size); } - @Override public void incrLogReadInEdits(long size) { + @Override + public void incrLogReadInEdits(long size) { logReadInEditsCounter.incr(size); } - @Override public void incrLogEditsFiltered(long size) { + @Override + public void incrLogEditsFiltered(long size) { walEditsFilteredCounter.incr(size); } - @Override public void incrBatchesShipped(int batches) { + @Override + public void incrBatchesShipped(int batches) { shippedBatchesCounter.incr(batches); } - @Override public void incrFailedBatches() { + @Override + public void incrFailedBatches() { failedBatchesCounter.incr(); } - @Override public void incrOpsShipped(long ops) { + @Override + public void incrOpsShipped(long ops) { shippedOpsCounter.incr(ops); } - @Override public void incrShippedBytes(long size) { + @Override + public void incrShippedBytes(long size) { shippedBytesCounter.incr(size); - MetricsReplicationGlobalSourceSourceImpl - .incrementKBsCounter(shippedBytesCounter, shippedKBsCounter); + MetricsReplicationGlobalSourceSourceImpl.incrementKBsCounter(shippedBytesCounter, + shippedKBsCounter); } - @Override public void incrLogReadInBytes(long size) { + @Override + public void incrLogReadInBytes(long size) { logReadInBytesCounter.incr(size); } - @Override public void clear() { + @Override + public void clear() { rms.removeMetric(ageOfLastShippedOpKey); rms.removeMetric(sizeOfLogQueueKey); @@ -242,7 +253,7 @@ public class MetricsReplicationSourceSourceImpl implements MetricsReplicationSou @Override public int getSizeOfLogQueue() { - return (int)sizeOfLogQueueGauge.value(); + return (int) sizeOfLogQueueGauge.value(); } @Override @@ -286,13 +297,16 @@ public class MetricsReplicationSourceSourceImpl implements MetricsReplicationSou } @Override - public void incrFailedRecoveryQueue() {/*no op*/} + public void incrFailedRecoveryQueue() { + /* no op */} - @Override public void setOldestWalAge(long age) { + @Override + public void setOldestWalAge(long age) { oldestWalAge.set(age); } - @Override public long getOldestWalAge() { + @Override + public long getOldestWalAge() { return oldestWalAge.value(); } @@ -306,7 +320,8 @@ public class MetricsReplicationSourceSourceImpl implements MetricsReplicationSou return sourceInitializing.value(); } - @Override public void decrSourceInitializing() { + @Override + public void decrSourceInitializing() { sourceInitializing.decr(1); } @@ -365,15 +380,18 @@ public class MetricsReplicationSourceSourceImpl implements MetricsReplicationSou return rms.getMetricsName(); } - @Override public long getWALEditsRead() { + @Override + public long getWALEditsRead() { return this.logReadInEditsCounter.value(); } - @Override public long getShippedOps() { + @Override + public long getShippedOps() { return this.shippedOpsCounter.value(); } - @Override public long getEditsFiltered() { + @Override + public long getEditsFiltered() { return this.walEditsFilteredCounter.value(); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java index 9ca0cd1a94e..244298faff6 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java @@ -22,9 +22,8 @@ import org.apache.hadoop.metrics2.lib.MutableHistogram; import org.apache.yetus.audience.InterfaceAudience; /** - * This is the metric source for table level replication metrics. - * We can easy monitor some useful table level replication metrics such as - * ageOfLastShippedOp and shippedBytes + * This is the metric source for table level replication metrics. We can easy monitor some useful + * table level replication metrics such as ageOfLastShippedOp and shippedBytes */ @InterfaceAudience.Private public class MetricsReplicationTableSourceImpl implements MetricsReplicationTableSource { diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java index 3474265ee26..e01c428f928 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -25,9 +24,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** * Hadoop Two implementation of a metrics2 source that will export metrics from the Rest server to - * the hadoop metrics2 subsystem. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * the hadoop metrics2 subsystem. Implements BaseSource through BaseSourceImpl, following the + * pattern */ @InterfaceAudience.Private public class MetricsRESTSourceImpl extends BaseSourceImpl implements MetricsRESTSource { @@ -55,17 +53,15 @@ public class MetricsRESTSourceImpl extends BaseSourceImpl implements MetricsREST this(METRICS_NAME, METRICS_DESCRIPTION, CONTEXT, JMX_CONTEXT); } - public MetricsRESTSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsRESTSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); // pause monitor metrics - infoPauseThresholdExceeded = getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, - INFO_THRESHOLD_COUNT_DESC, 0L); - warnPauseThresholdExceeded = getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, - WARN_THRESHOLD_COUNT_DESC, 0L); + infoPauseThresholdExceeded = + getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, INFO_THRESHOLD_COUNT_DESC, 0L); + warnPauseThresholdExceeded = + getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, WARN_THRESHOLD_COUNT_DESC, 0L); pausesWithGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITH_GC_KEY); pausesWithoutGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITHOUT_GC_KEY); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java index 3ce2d5d1fdc..0ad71fc46c4 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.thrift; import org.apache.yetus.audience.InterfaceAudience; @@ -32,6 +31,7 @@ public class MetricsThriftServerSourceFactoryImpl implements MetricsThriftServer */ private enum FactoryStorage { INSTANCE; + MetricsThriftServerSourceImpl thriftOne; MetricsThriftServerSourceImpl thriftTwo; } @@ -40,9 +40,7 @@ public class MetricsThriftServerSourceFactoryImpl implements MetricsThriftServer public MetricsThriftServerSource createThriftOneSource() { if (FactoryStorage.INSTANCE.thriftOne == null) { FactoryStorage.INSTANCE.thriftOne = new MetricsThriftServerSourceImpl(METRICS_NAME, - METRICS_DESCRIPTION, - THRIFT_ONE_METRICS_CONTEXT, - THRIFT_ONE_JMX_CONTEXT); + METRICS_DESCRIPTION, THRIFT_ONE_METRICS_CONTEXT, THRIFT_ONE_JMX_CONTEXT); } return FactoryStorage.INSTANCE.thriftOne; } @@ -51,9 +49,7 @@ public class MetricsThriftServerSourceFactoryImpl implements MetricsThriftServer public MetricsThriftServerSource createThriftTwoSource() { if (FactoryStorage.INSTANCE.thriftTwo == null) { FactoryStorage.INSTANCE.thriftTwo = new MetricsThriftServerSourceImpl(METRICS_NAME, - METRICS_DESCRIPTION, - THRIFT_TWO_METRICS_CONTEXT, - THRIFT_TWO_JMX_CONTEXT); + METRICS_DESCRIPTION, THRIFT_TWO_METRICS_CONTEXT, THRIFT_TWO_JMX_CONTEXT); } return FactoryStorage.INSTANCE.thriftTwo; } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java index 4ed974c95dc..fe50ccb084a 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.thrift; import org.apache.hadoop.hbase.metrics.ExceptionTrackingSourceImpl; @@ -26,13 +25,12 @@ import org.apache.hadoop.metrics2.lib.MutableHistogram; import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop 2 version of {@link org.apache.hadoop.hbase.thrift.MetricsThriftServerSource} - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * Hadoop 2 version of {@link org.apache.hadoop.hbase.thrift.MetricsThriftServerSource} Implements + * BaseSource through BaseSourceImpl, following the pattern */ @InterfaceAudience.Private -public class MetricsThriftServerSourceImpl extends ExceptionTrackingSourceImpl implements - MetricsThriftServerSource { +public class MetricsThriftServerSourceImpl extends ExceptionTrackingSourceImpl + implements MetricsThriftServerSource { private MetricHistogram batchGetStat; private MetricHistogram batchMutateStat; @@ -51,17 +49,15 @@ public class MetricsThriftServerSourceImpl extends ExceptionTrackingSourceImpl i private final MetricHistogram pausesWithGc; private final MetricHistogram pausesWithoutGc; - public MetricsThriftServerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsThriftServerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); // pause monitor metrics - infoPauseThresholdExceeded = getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, - INFO_THRESHOLD_COUNT_DESC, 0L); - warnPauseThresholdExceeded = getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, - WARN_THRESHOLD_COUNT_DESC, 0L); + infoPauseThresholdExceeded = + getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, INFO_THRESHOLD_COUNT_DESC, 0L); + warnPauseThresholdExceeded = + getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, WARN_THRESHOLD_COUNT_DESC, 0L); pausesWithGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITH_GC_KEY); pausesWithoutGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITHOUT_GC_KEY); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSourceImpl.java index 50ebd46b716..9429428d300 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.zookeeper; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -25,9 +24,8 @@ import org.apache.hadoop.metrics2.lib.MutableHistogram; import org.apache.yetus.audience.InterfaceAudience; /** - * Class that transitions metrics from MetricsZooKeeper into the metrics subsystem. - * - * Implements BaseSource through BaseSourceImpl, following the pattern. + * Class that transitions metrics from MetricsZooKeeper into the metrics subsystem. Implements + * BaseSource through BaseSourceImpl, following the pattern. */ @InterfaceAudience.Private public class MetricsZooKeeperSourceImpl extends BaseSourceImpl implements MetricsZooKeeperSource { @@ -52,37 +50,37 @@ public class MetricsZooKeeperSourceImpl extends BaseSourceImpl implements Metric } public MetricsZooKeeperSourceImpl(String metricsName, String metricsDescription, - String metricsContext, String metricsJmxContext) { + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - //Create and store the metrics that will be used. - authFailedFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_AUTHFAILED, EXCEPTION_AUTHFAILED_DESC, 0L); - connectionLossFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_CONNECTIONLOSS, EXCEPTION_CONNECTIONLOSS_DESC, 0L); - dataInconsistencyFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_DATAINCONSISTENCY, EXCEPTION_DATAINCONSISTENCY_DESC, 0L); - invalidACLFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_INVALIDACL, EXCEPTION_INVALIDACL_DESC, 0L); - noAuthFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_NOAUTH, EXCEPTION_NOAUTH_DESC, 0L); - operationTimeOutFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_OPERATIONTIMEOUT, EXCEPTION_OPERATIONTIMEOUT_DESC, 0L); - runtimeInconsistencyFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_RUNTIMEINCONSISTENCY, EXCEPTION_RUNTIMEINCONSISTENCY_DESC, 0L); - sessionExpiredFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_SESSIONEXPIRED, EXCEPTION_SESSIONEXPIRED_DESC, 0L); - systemErrorFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_SYSTEMERROR, EXCEPTION_SYSTEMERROR_DESC, 0L); - totalFailedZKCalls = this.getMetricsRegistry().newGauge( - TOTAL_FAILED_ZK_CALLS, TOTAL_FAILED_ZK_CALLS_DESC, 0L); + // Create and store the metrics that will be used. + authFailedFailedOpCount = + this.getMetricsRegistry().newGauge(EXCEPTION_AUTHFAILED, EXCEPTION_AUTHFAILED_DESC, 0L); + connectionLossFailedOpCount = this.getMetricsRegistry().newGauge(EXCEPTION_CONNECTIONLOSS, + EXCEPTION_CONNECTIONLOSS_DESC, 0L); + dataInconsistencyFailedOpCount = this.getMetricsRegistry().newGauge(EXCEPTION_DATAINCONSISTENCY, + EXCEPTION_DATAINCONSISTENCY_DESC, 0L); + invalidACLFailedOpCount = + this.getMetricsRegistry().newGauge(EXCEPTION_INVALIDACL, EXCEPTION_INVALIDACL_DESC, 0L); + noAuthFailedOpCount = + this.getMetricsRegistry().newGauge(EXCEPTION_NOAUTH, EXCEPTION_NOAUTH_DESC, 0L); + operationTimeOutFailedOpCount = this.getMetricsRegistry().newGauge(EXCEPTION_OPERATIONTIMEOUT, + EXCEPTION_OPERATIONTIMEOUT_DESC, 0L); + runtimeInconsistencyFailedOpCount = this.getMetricsRegistry() + .newGauge(EXCEPTION_RUNTIMEINCONSISTENCY, EXCEPTION_RUNTIMEINCONSISTENCY_DESC, 0L); + sessionExpiredFailedOpCount = this.getMetricsRegistry().newGauge(EXCEPTION_SESSIONEXPIRED, + EXCEPTION_SESSIONEXPIRED_DESC, 0L); + systemErrorFailedOpCount = + this.getMetricsRegistry().newGauge(EXCEPTION_SYSTEMERROR, EXCEPTION_SYSTEMERROR_DESC, 0L); + totalFailedZKCalls = + this.getMetricsRegistry().newGauge(TOTAL_FAILED_ZK_CALLS, TOTAL_FAILED_ZK_CALLS_DESC, 0L); - readOpLatency = this.getMetricsRegistry().newHistogram( - READ_OPERATION_LATENCY_NAME, READ_OPERATION_LATENCY_DESC); - writeOpLatency = this.getMetricsRegistry().newHistogram( - WRITE_OPERATION_LATENCY_NAME, WRITE_OPERATION_LATENCY_DESC); - syncOpLatency = this.getMetricsRegistry().newHistogram( - SYNC_OPERATION_LATENCY_NAME, SYNC_OPERATION_LATENCY_DESC); + readOpLatency = this.getMetricsRegistry().newHistogram(READ_OPERATION_LATENCY_NAME, + READ_OPERATION_LATENCY_DESC); + writeOpLatency = this.getMetricsRegistry().newHistogram(WRITE_OPERATION_LATENCY_NAME, + WRITE_OPERATION_LATENCY_DESC); + syncOpLatency = this.getMetricsRegistry().newHistogram(SYNC_OPERATION_LATENCY_NAME, + SYNC_OPERATION_LATENCY_DESC); } public void getMetrics(MetricsCollector metricsCollector, boolean all) { @@ -91,7 +89,7 @@ public class MetricsZooKeeperSourceImpl extends BaseSourceImpl implements Metric } private void clearZKExceptionMetrics() { - //Reset the exception metrics. + // Reset the exception metrics. clearMetricIfNotNull(authFailedFailedOpCount); clearMetricIfNotNull(connectionLossFailedOpCount); clearMetricIfNotNull(dataInconsistencyFailedOpCount); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java index 88b491ba3ea..9b62cd898f6 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; - import org.apache.hadoop.metrics2.MetricsExecutor; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsExecutorImpl; @@ -32,11 +31,9 @@ import org.slf4j.LoggerFactory; /** * JMX caches the beans that have been exported; even after the values are removed from hadoop's - * metrics system the keys and old values will still remain. This class stops and restarts the - * Hadoop metrics system, forcing JMX to clear the cache of exported metrics. - * - * This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used - * are package private. + * metrics system the keys and old values will still remain. This class stops and restarts the + * Hadoop metrics system, forcing JMX to clear the cache of exported metrics. This class need to be + * in the o.a.h.metrics2.impl namespace as many of the variables/calls used are package private. */ @InterfaceAudience.Private public final class JmxCacheBuster { @@ -56,7 +53,7 @@ public final class JmxCacheBuster { if (LOG.isTraceEnabled()) { LOG.trace("clearing JMX Cache" + StringUtils.stringifyException(new Exception())); } - //If there are more then 100 ms before the executor will run then everything should be merged. + // If there are more then 100 ms before the executor will run then everything should be merged. ScheduledFuture future = fut.get(); if ((future != null && (!future.isDone() && future.getDelay(TimeUnit.MILLISECONDS) > 100))) { // BAIL OUT @@ -104,9 +101,9 @@ public final class JmxCacheBuster { Thread.sleep(500); DefaultMetricsSystem.instance().start(); } - } catch (Exception exception) { + } catch (Exception exception) { LOG.debug("error clearing the jmx it appears the metrics system hasn't been started", - exception); + exception); } } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java index 723e6d34c1d..09556707648 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -77,8 +77,8 @@ public class DefaultMetricsSystemHelper { * so far as a Source, thus preventing further re-registration of the source with the same name. * In case of dynamic metrics tied to region-lifecycles, this becomes a problem because we would * like to be able to re-register and remove with the same name. Otherwise, it is resource leak. - * This ugly code manually removes the name from the UniqueNames map. - * TODO: May not be needed for Hadoop versions after YARN-5190. + * This ugly code manually removes the name from the UniqueNames map. TODO: May not be needed for + * Hadoop versions after YARN-5190. */ public void removeSourceName(String name) { if (sourceNamesField == null || mapField == null) { @@ -92,8 +92,9 @@ public class DefaultMetricsSystemHelper { } } catch (Exception ex) { if (LOG.isTraceEnabled()) { - LOG.trace("Received exception while trying to access Hadoop Metrics classes via " + - "reflection.", ex); + LOG.trace( + "Received exception while trying to access Hadoop Metrics classes via " + "reflection.", + ex); } } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java index 7a791c92bc1..7177f322210 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import java.util.Collection; @@ -29,51 +28,41 @@ import org.apache.hadoop.metrics2.impl.MsInfo; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; /** - * An optional metrics registry class for creating and maintaining a - * collection of MetricsMutables, making writing metrics source easier. - * NOTE: this is a copy of org.apache.hadoop.metrics2.lib.MetricsRegistry with added one - * feature: metrics can be removed. When HADOOP-8313 is fixed, usages of this class - * should be substituted with org.apache.hadoop.metrics2.lib.MetricsRegistry. - * This implementation also provides handy methods for creating metrics - * dynamically. - * Another difference is that metricsMap implementation is substituted with - * thread-safe map, as we allow dynamic metrics additions/removals. + * An optional metrics registry class for creating and maintaining a collection of MetricsMutables, + * making writing metrics source easier. NOTE: this is a copy of + * org.apache.hadoop.metrics2.lib.MetricsRegistry with added one feature: metrics can be removed. + * When HADOOP-8313 is fixed, usages of this class should be substituted with + * org.apache.hadoop.metrics2.lib.MetricsRegistry. This implementation also provides handy methods + * for creating metrics dynamically. Another difference is that metricsMap implementation is + * substituted with thread-safe map, as we allow dynamic metrics additions/removals. */ @InterfaceAudience.Private public class DynamicMetricsRegistry { private static final Logger LOG = LoggerFactory.getLogger(DynamicMetricsRegistry.class); - private final ConcurrentMap metricsMap = - Maps.newConcurrentMap(); - private final ConcurrentMap tagsMap = - Maps.newConcurrentMap(); + private final ConcurrentMap metricsMap = Maps.newConcurrentMap(); + private final ConcurrentMap tagsMap = Maps.newConcurrentMap(); private final MetricsInfo metricsInfo; private final DefaultMetricsSystemHelper helper = new DefaultMetricsSystemHelper(); - private final static String[] histogramSuffixes = new String[]{ - "_num_ops", - "_min", - "_max", - "_median", - "_75th_percentile", - "_90th_percentile", - "_95th_percentile", - "_99th_percentile"}; + private final static String[] histogramSuffixes = new String[] { "_num_ops", "_min", "_max", + "_median", "_75th_percentile", "_90th_percentile", "_95th_percentile", "_99th_percentile" }; /** * Construct the registry with a record name - * @param name of the record of the metrics + * @param name of the record of the metrics */ public DynamicMetricsRegistry(String name) { - this(Interns.info(name,name)); + this(Interns.info(name, name)); } /** * Construct the registry with a metadata object - * @param info the info object for the metrics record/group + * @param info the info object for the metrics record/group */ public DynamicMetricsRegistry(MetricsInfo info) { metricsInfo = info; @@ -88,7 +77,7 @@ public class DynamicMetricsRegistry { /** * Get a metric by name - * @param name of the metric + * @param name of the metric * @return the metric object */ public MutableMetric get(String name) { @@ -97,7 +86,7 @@ public class DynamicMetricsRegistry { /** * Get a tag by name - * @param name of the tag + * @param name of the tag * @return the tag object */ public MetricsTag getTag(String name) { @@ -106,9 +95,9 @@ public class DynamicMetricsRegistry { /** * Create a mutable long integer counter - * @param name of the metric - * @param desc metric description - * @param iVal initial value + * @param name of the metric + * @param desc metric description + * @param iVal initial value * @return a new counter object */ public MutableFastCounter newCounter(String name, String desc, long iVal) { @@ -117,8 +106,8 @@ public class DynamicMetricsRegistry { /** * Create a mutable long integer counter - * @param info metadata of the metric - * @param iVal initial value + * @param info metadata of the metric + * @param iVal initial value * @return a new counter object */ public MutableFastCounter newCounter(MetricsInfo info, long iVal) { @@ -128,9 +117,9 @@ public class DynamicMetricsRegistry { /** * Create a mutable long integer gauge - * @param name of the metric - * @param desc metric description - * @param iVal initial value + * @param name of the metric + * @param desc metric description + * @param iVal initial value * @return a new gauge object */ public MutableGaugeLong newGauge(String name, String desc, long iVal) { @@ -139,8 +128,8 @@ public class DynamicMetricsRegistry { /** * Create a mutable long integer gauge - * @param info metadata of the metric - * @param iVal initial value + * @param info metadata of the metric + * @param iVal initial value * @return a new gauge object */ public MutableGaugeLong newGauge(MetricsInfo info, long iVal) { @@ -150,36 +139,34 @@ public class DynamicMetricsRegistry { /** * Create a mutable metric with stats - * @param name of the metric - * @param desc metric description - * @param sampleName of the metric (e.g., "Ops") - * @param valueName of the metric (e.g., "Time" or "Latency") - * @param extended produce extended stat (stdev, min/max etc.) if true. + * @param name of the metric + * @param desc metric description + * @param sampleName of the metric (e.g., "Ops") + * @param valueName of the metric (e.g., "Time" or "Latency") + * @param extended produce extended stat (stdev, min/max etc.) if true. * @return a new mutable stat metric object */ - public MutableStat newStat(String name, String desc, - String sampleName, String valueName, boolean extended) { - MutableStat ret = - new MutableStat(name, desc, sampleName, valueName, extended); + public MutableStat newStat(String name, String desc, String sampleName, String valueName, + boolean extended) { + MutableStat ret = new MutableStat(name, desc, sampleName, valueName, extended); return addNewMetricIfAbsent(name, ret, MutableStat.class); } /** * Create a mutable metric with stats - * @param name of the metric - * @param desc metric description - * @param sampleName of the metric (e.g., "Ops") - * @param valueName of the metric (e.g., "Time" or "Latency") + * @param name of the metric + * @param desc metric description + * @param sampleName of the metric (e.g., "Ops") + * @param valueName of the metric (e.g., "Time" or "Latency") * @return a new mutable metric object */ - public MutableStat newStat(String name, String desc, - String sampleName, String valueName) { + public MutableStat newStat(String name, String desc, String sampleName, String valueName) { return newStat(name, desc, sampleName, valueName, false); } /** * Create a mutable rate metric - * @param name of the metric + * @param name of the metric * @return a new mutable metric object */ public MutableRate newRate(String name) { @@ -188,7 +175,7 @@ public class DynamicMetricsRegistry { /** * Create a mutable rate metric - * @param name of the metric + * @param name of the metric * @param description of the metric * @return a new mutable rate metric object */ @@ -198,9 +185,9 @@ public class DynamicMetricsRegistry { /** * Create a mutable rate metric (for throughput measurement) - * @param name of the metric - * @param desc description - * @param extended produce extended stat (stdev/min/max etc.) if true + * @param name of the metric + * @param desc description + * @param extended produce extended stat (stdev/min/max etc.) if true * @return a new mutable rate metric object */ public MutableRate newRate(String name, String desc, boolean extended) { @@ -208,8 +195,7 @@ public class DynamicMetricsRegistry { } @InterfaceAudience.Private - public MutableRate newRate(String name, String desc, - boolean extended, boolean returnExisting) { + public MutableRate newRate(String name, String desc, boolean extended, boolean returnExisting) { if (returnExisting) { MutableMetric rate = metricsMap.get(name); if (rate != null) { @@ -217,8 +203,7 @@ public class DynamicMetricsRegistry { return (MutableRate) rate; } - throw new MetricsException("Unexpected metrics type "+ rate.getClass() - +" for "+ name); + throw new MetricsException("Unexpected metrics type " + rate.getClass() + " for " + name); } } MutableRate ret = new MutableRate(name, desc, extended); @@ -244,7 +229,7 @@ public class DynamicMetricsRegistry { MutableHistogram histo = new MutableHistogram(name, desc); return addNewMetricIfAbsent(name, histo, MutableHistogram.class); } - + /** * Create a new histogram with time range counts. * @param name Name of the histogram. @@ -264,7 +249,7 @@ public class DynamicMetricsRegistry { MutableTimeHistogram histo = new MutableTimeHistogram(name, desc); return addNewMetricIfAbsent(name, histo, MutableTimeHistogram.class); } - + /** * Create a new histogram with size range counts. * @param name Name of the histogram. @@ -285,7 +270,6 @@ public class DynamicMetricsRegistry { return addNewMetricIfAbsent(name, histo, MutableSizeHistogram.class); } - synchronized void add(String name, MutableMetric metric) { addNewMetricIfAbsent(name, metric, MutableMetric.class); } @@ -301,12 +285,10 @@ public class DynamicMetricsRegistry { if (m != null) { if (m instanceof MutableStat) { ((MutableStat) m).add(value); + } else { + throw new MetricsException("Unsupported add(value) for metric " + name); } - else { - throw new MetricsException("Unsupported add(value) for metric "+ name); - } - } - else { + } else { metricsMap.put(name, newRate(name)); // default is a rate metric add(name, value); } @@ -323,9 +305,9 @@ public class DynamicMetricsRegistry { /** * Add a tag to the metrics - * @param name of the tag + * @param name of the tag * @param description of the tag - * @param value of the tag + * @param value of the tag * @return the registry (for keep adding tags) */ public DynamicMetricsRegistry tag(String name, String description, String value) { @@ -334,21 +316,21 @@ public class DynamicMetricsRegistry { /** * Add a tag to the metrics - * @param name of the tag + * @param name of the tag * @param description of the tag - * @param value of the tag - * @param override existing tag if true + * @param value of the tag + * @param override existing tag if true * @return the registry (for keep adding tags) */ public DynamicMetricsRegistry tag(String name, String description, String value, - boolean override) { + boolean override) { return tag(new MetricsInfoImpl(name, description), value, override); } /** * Add a tag to the metrics - * @param info metadata of the tag - * @param value of the tag + * @param info metadata of the tag + * @param value of the tag * @param override existing tag if true * @return the registry (for keep adding tags etc.) */ @@ -358,7 +340,7 @@ public class DynamicMetricsRegistry { if (!override) { MetricsTag existing = tagsMap.putIfAbsent(info.name(), tag); if (existing != null) { - throw new MetricsException("Tag "+ info.name() +" already exists!"); + throw new MetricsException("Tag " + info.name() + " already exists!"); } return this; } @@ -383,7 +365,7 @@ public class DynamicMetricsRegistry { /** * Sample all the mutable metrics and put the snapshot in the builder * @param builder to contain the metrics snapshot - * @param all get all the metrics even if the values are not changed. + * @param all get all the metrics even if the values are not changed. */ public void snapshot(MetricsRecordBuilder builder, boolean all) { for (MetricsTag tag : tags()) { @@ -394,10 +376,10 @@ public class DynamicMetricsRegistry { } } - @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("info", metricsInfo).add("tags", tags()).add("metrics", metrics()) - .toString(); + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("info", metricsInfo).add("tags", tags()) + .add("metrics", metrics()).toString(); } /** @@ -410,131 +392,125 @@ public class DynamicMetricsRegistry { } public void removeHistogramMetrics(String baseName) { - for (String suffix:histogramSuffixes) { - removeMetric(baseName+suffix); + for (String suffix : histogramSuffixes) { + removeMetric(baseName + suffix); } } /** - * Get a MetricMutableGaugeLong from the storage. If it is not there atomically put it. - * + * Get a MetricMutableGaugeLong from the storage. If it is not there atomically put it. * @param gaugeName name of the gauge to create or get. * @param potentialStartingValue value of the new gauge if we have to create it. */ public MutableGaugeLong getGauge(String gaugeName, long potentialStartingValue) { - //Try and get the guage. + // Try and get the guage. MutableMetric metric = metricsMap.get(gaugeName); - //If it's not there then try and put a new one in the storage. + // If it's not there then try and put a new one in the storage. if (metric == null) { - //Create the potential new gauge. - MutableGaugeLong newGauge = new MutableGaugeLong(new MetricsInfoImpl(gaugeName, ""), - potentialStartingValue); + // Create the potential new gauge. + MutableGaugeLong newGauge = + new MutableGaugeLong(new MetricsInfoImpl(gaugeName, ""), potentialStartingValue); - // Try and put the gauge in. This is atomic. + // Try and put the gauge in. This is atomic. metric = metricsMap.putIfAbsent(gaugeName, newGauge); - //If the value we get back is null then the put was successful and we will return that. - //otherwise gaugeLong should contain the thing that was in before the put could be completed. + // If the value we get back is null then the put was successful and we will return that. + // otherwise gaugeLong should contain the thing that was in before the put could be completed. if (metric == null) { return newGauge; } } if (!(metric instanceof MutableGaugeLong)) { - throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + - " and not of type MetricMutableGaugeLong"); + throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + + " and not of type MetricMutableGaugeLong"); } return (MutableGaugeLong) metric; } /** - * Get a MetricMutableGaugeInt from the storage. If it is not there atomically put it. - * + * Get a MetricMutableGaugeInt from the storage. If it is not there atomically put it. * @param gaugeName name of the gauge to create or get. * @param potentialStartingValue value of the new gauge if we have to create it. */ public MutableGaugeInt getGaugeInt(String gaugeName, int potentialStartingValue) { - //Try and get the guage. + // Try and get the guage. MutableMetric metric = metricsMap.get(gaugeName); - //If it's not there then try and put a new one in the storage. + // If it's not there then try and put a new one in the storage. if (metric == null) { - //Create the potential new gauge. - MutableGaugeInt newGauge = new MutableGaugeInt(new MetricsInfoImpl(gaugeName, ""), - potentialStartingValue); + // Create the potential new gauge. + MutableGaugeInt newGauge = + new MutableGaugeInt(new MetricsInfoImpl(gaugeName, ""), potentialStartingValue); - // Try and put the gauge in. This is atomic. + // Try and put the gauge in. This is atomic. metric = metricsMap.putIfAbsent(gaugeName, newGauge); - //If the value we get back is null then the put was successful and we will return that. - //otherwise gaugeInt should contain the thing that was in before the put could be completed. + // If the value we get back is null then the put was successful and we will return that. + // otherwise gaugeInt should contain the thing that was in before the put could be completed. if (metric == null) { return newGauge; } } if (!(metric instanceof MutableGaugeInt)) { - throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + - " and not of type MetricMutableGaugeInr"); + throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + + " and not of type MetricMutableGaugeInr"); } return (MutableGaugeInt) metric; } /** - * Get a MetricMutableCounterLong from the storage. If it is not there atomically put it. - * + * Get a MetricMutableCounterLong from the storage. If it is not there atomically put it. * @param counterName Name of the counter to get * @param potentialStartingValue starting value if we have to create a new counter */ public MutableFastCounter getCounter(String counterName, long potentialStartingValue) { - //See getGauge for description on how this works. + // See getGauge for description on how this works. MutableMetric counter = metricsMap.get(counterName); if (counter == null) { MutableFastCounter newCounter = - new MutableFastCounter(new MetricsInfoImpl(counterName, ""), potentialStartingValue); + new MutableFastCounter(new MetricsInfoImpl(counterName, ""), potentialStartingValue); counter = metricsMap.putIfAbsent(counterName, newCounter); if (counter == null) { return newCounter; } } - if (!(counter instanceof MutableCounter)) { - throw new MetricsException("Metric already exists in registry for metric name: " + - counterName + " and not of type MutableCounter"); + throw new MetricsException("Metric already exists in registry for metric name: " + counterName + + " and not of type MutableCounter"); } return (MutableFastCounter) counter; } public MutableHistogram getHistogram(String histoName) { - //See getGauge for description on how this works. + // See getGauge for description on how this works. MutableMetric histo = metricsMap.get(histoName); if (histo == null) { - MutableHistogram newCounter = - new MutableHistogram(new MetricsInfoImpl(histoName, "")); + MutableHistogram newCounter = new MutableHistogram(new MetricsInfoImpl(histoName, "")); histo = metricsMap.putIfAbsent(histoName, newCounter); if (histo == null) { return newCounter; } } - if (!(histo instanceof MutableHistogram)) { - throw new MetricsException("Metric already exists in registry for metric name: " + - histoName + " and not of type MutableHistogram"); + throw new MetricsException("Metric already exists in registry for metric name: " + histoName + + " and not of type MutableHistogram"); } return (MutableHistogram) histo; } - private T addNewMetricIfAbsent(String name, T ret, - Class metricClass) { - //If the value we get back is null then the put was successful and we will + private T addNewMetricIfAbsent(String name, T ret, + Class metricClass) { + // If the value we get back is null then the put was successful and we will // return that. Otherwise metric should contain the thing that was in // before the put could be completed. MutableMetric metric = metricsMap.putIfAbsent(name, ret); @@ -546,19 +522,17 @@ public class DynamicMetricsRegistry { } @SuppressWarnings("unchecked") - private T returnExistingWithCast(MutableMetric metric, - Class metricClass, String name) { + private T returnExistingWithCast(MutableMetric metric, Class metricClass, String name) { if (!metricClass.isAssignableFrom(metric.getClass())) { - throw new MetricsException("Metric already exists in registry for metric name: " + - name + " and not of type " + metricClass + - " but instead of type " + metric.getClass()); + throw new MetricsException("Metric already exists in registry for metric name: " + name + + " and not of type " + metricClass + " but instead of type " + metric.getClass()); } return (T) metric; } public void clearMetrics() { - for (String name:metricsMap.keySet()) { + for (String name : metricsMap.keySet()) { helper.removeObjectName(name); } metricsMap.clear(); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java index d24f23f7f35..1b4d9ecc8f1 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadFactory; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.metrics2.MetricsExecutor; import org.apache.yetus.audience.InterfaceAudience; /** - * Class to handle the ScheduledExecutorService{@link ScheduledExecutorService} used by - * MetricsRegionAggregateSourceImpl, and - * JmxCacheBuster + * Class to handle the ScheduledExecutorService{@link ScheduledExecutorService} used by + * MetricsRegionAggregateSourceImpl, and JmxCacheBuster */ @InterfaceAudience.Private public class MetricsExecutorImpl implements MetricsExecutor { @@ -48,8 +45,9 @@ public class MetricsExecutorImpl implements MetricsExecutor { private enum ExecutorSingleton { INSTANCE; - private final transient ScheduledExecutorService scheduler = new ScheduledThreadPoolExecutor(1, - new ThreadPoolExecutorThreadFactory("HBase-Metrics2-")); + + private final transient ScheduledExecutorService scheduler = + new ScheduledThreadPoolExecutor(1, new ThreadPoolExecutorThreadFactory("HBase-Metrics2-")); } private final static class ThreadPoolExecutorThreadFactory implements ThreadFactory { diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableFastCounter.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableFastCounter.java index 7b5ec024a50..f8f8aee3550 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableFastCounter.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableFastCounter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import java.util.concurrent.atomic.LongAdder; - import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java index dc86ebe8bf7..d5356aecda3 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import org.apache.commons.lang3.StringUtils; @@ -51,7 +50,8 @@ public class MutableHistogram extends MutableMetric implements MetricHistogram { histogram.update(val); } - @Override public long getCount() { + @Override + public long getCount() { return histogram.getCount(); } @@ -65,7 +65,7 @@ public class MutableHistogram extends MutableMetric implements MetricHistogram { } public static void snapshot(String name, String desc, Histogram histogram, - MetricsRecordBuilder metricsRecordBuilder, boolean all) { + MetricsRecordBuilder metricsRecordBuilder, boolean all) { // Get a reference to the old histogram. Snapshot snapshot = histogram.snapshot(); if (snapshot != null) { @@ -74,29 +74,29 @@ public class MutableHistogram extends MutableMetric implements MetricHistogram { } protected static void updateSnapshotMetrics(String name, String desc, Histogram histogram, - Snapshot snapshot, MetricsRecordBuilder metricsRecordBuilder) { + Snapshot snapshot, MetricsRecordBuilder metricsRecordBuilder) { metricsRecordBuilder.addCounter(Interns.info(name + NUM_OPS_METRIC_NAME, desc), - histogram.getCount()); + histogram.getCount()); metricsRecordBuilder.addGauge(Interns.info(name + MIN_METRIC_NAME, desc), snapshot.getMin()); metricsRecordBuilder.addGauge(Interns.info(name + MAX_METRIC_NAME, desc), snapshot.getMax()); metricsRecordBuilder.addGauge(Interns.info(name + MEAN_METRIC_NAME, desc), snapshot.getMean()); metricsRecordBuilder.addGauge(Interns.info(name + TWENTY_FIFTH_PERCENTILE_METRIC_NAME, desc), - snapshot.get25thPercentile()); + snapshot.get25thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + MEDIAN_METRIC_NAME, desc), - snapshot.getMedian()); + snapshot.getMedian()); metricsRecordBuilder.addGauge(Interns.info(name + SEVENTY_FIFTH_PERCENTILE_METRIC_NAME, desc), - snapshot.get75thPercentile()); + snapshot.get75thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + NINETIETH_PERCENTILE_METRIC_NAME, desc), - snapshot.get90thPercentile()); + snapshot.get90thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + NINETY_FIFTH_PERCENTILE_METRIC_NAME, desc), - snapshot.get95thPercentile()); + snapshot.get95thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + NINETY_EIGHTH_PERCENTILE_METRIC_NAME, desc), - snapshot.get98thPercentile()); + snapshot.get98thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + NINETY_NINETH_PERCENTILE_METRIC_NAME, desc), - snapshot.get99thPercentile()); + snapshot.get99thPercentile()); metricsRecordBuilder.addGauge( - Interns.info(name + NINETY_NINE_POINT_NINETH_PERCENTILE_METRIC_NAME, desc), - snapshot.get999thPercentile()); + Interns.info(name + NINETY_NINE_POINT_NINETH_PERCENTILE_METRIC_NAME, desc), + snapshot.get999thPercentile()); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java index 507e9540026..a4d316fa9f4 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.hbase.metrics.Interns; @@ -40,16 +39,15 @@ public abstract class MutableRangeHistogram extends MutableHistogram implements } /** - * Returns the type of range histogram size or time + * Returns the type of range histogram size or time */ public abstract String getRangeType(); - + /** - * Returns the ranges to be counted + * Returns the ranges to be counted */ public abstract long[] getRanges(); - @Override public synchronized void snapshot(MetricsRecordBuilder metricsRecordBuilder, boolean all) { // Get a reference to the old histogram. @@ -61,7 +59,7 @@ public abstract class MutableRangeHistogram extends MutableHistogram implements } public void updateSnapshotRangeMetrics(MetricsRecordBuilder metricsRecordBuilder, - Snapshot snapshot) { + Snapshot snapshot) { long priorRange = 0; long cumNum = 0; @@ -71,8 +69,8 @@ public abstract class MutableRangeHistogram extends MutableHistogram implements long val = snapshot.getCountAtOrBelow(ranges[i]); if (val - cumNum > 0) { metricsRecordBuilder.addCounter( - Interns.info(name + "_" + rangeType + "_" + priorRange + "-" + ranges[i], desc), - val - cumNum); + Interns.info(name + "_" + rangeType + "_" + priorRange + "-" + ranges[i], desc), + val - cumNum); } priorRange = ranges[i]; cumNum = val; @@ -80,12 +78,12 @@ public abstract class MutableRangeHistogram extends MutableHistogram implements long val = snapshot.getCount(); if (val - cumNum > 0) { metricsRecordBuilder.addCounter( - Interns.info(name + "_" + rangeType + "_" + priorRange + "-inf", desc), - val - cumNum); + Interns.info(name + "_" + rangeType + "_" + priorRange + "-inf", desc), val - cumNum); } } - @Override public long getCount() { + @Override + public long getCount() { return histogram.getCount(); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java index b02efb76f9d..07c29ef636e 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.metrics2.MetricsInfo; @@ -28,7 +27,8 @@ import org.apache.yetus.audience.InterfaceAudience; public class MutableSizeHistogram extends MutableRangeHistogram { private final static String RANGE_TYPE = "SizeRangeCount"; - private final static long[] RANGES = {10,100,1000,10000,100000,1000000,10000000,100000000}; + private final static long[] RANGES = + { 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000 }; public MutableSizeHistogram(MetricsInfo info) { this(info.name(), info.description()); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java index 7c6dfbbd577..42418de944d 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.metrics2.MetricsInfo; diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java index 84a76edf72e..1256dcc9999 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java @@ -20,8 +20,7 @@ package org.apache.hadoop.metrics2.util; import org.apache.yetus.audience.InterfaceAudience; /** - * Specifies a quantile (with error bounds) to be watched by a - * {@link MetricSampleQuantiles} object. + * Specifies a quantile (with error bounds) to be watched by a {@link MetricSampleQuantiles} object. */ @InterfaceAudience.Private public class MetricQuantile { @@ -54,12 +53,11 @@ public class MetricQuantile { long ebits = Double.doubleToLongBits(error); return qbits == Double.doubleToLongBits(that.quantile) - && ebits == Double.doubleToLongBits(that.error); + && ebits == Double.doubleToLongBits(that.error); } @Override public int hashCode() { - return (int) (Double.doubleToLongBits(quantile) ^ Double - .doubleToLongBits(error)); + return (int) (Double.doubleToLongBits(quantile) ^ Double.doubleToLongBits(error)); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java index c1880f8203b..cf01b099bb8 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.util; import java.io.IOException; @@ -24,24 +23,16 @@ import java.util.HashMap; import java.util.LinkedList; import java.util.ListIterator; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; /** - * Implementation of the Cormode, Korn, Muthukrishnan, and Srivastava algorithm - * for streaming calculation of targeted high-percentile epsilon-approximate - * quantiles. - * - * This is a generalization of the earlier work by Greenwald and Khanna (GK), - * which essentially allows different error bounds on the targeted quantiles, - * which allows for far more efficient calculation of high-percentiles. - * - * See: Cormode, Korn, Muthukrishnan, and Srivastava - * "Effective Computation of Biased Quantiles over Data Streams" in ICDE 2005 - * - * Greenwald and Khanna, - * "Space-efficient online computation of quantile summaries" in SIGMOD 2001 - * + * Implementation of the Cormode, Korn, Muthukrishnan, and Srivastava algorithm for streaming + * calculation of targeted high-percentile epsilon-approximate quantiles. This is a generalization + * of the earlier work by Greenwald and Khanna (GK), which essentially allows different error bounds + * on the targeted quantiles, which allows for far more efficient calculation of high-percentiles. + * See: Cormode, Korn, Muthukrishnan, and Srivastava "Effective Computation of Biased Quantiles over + * Data Streams" in ICDE 2005 Greenwald and Khanna, "Space-efficient online computation of quantile + * summaries" in SIGMOD 2001 */ @InterfaceAudience.Private public class MetricSampleQuantiles { @@ -57,9 +48,8 @@ public class MetricSampleQuantiles { private LinkedList samples; /** - * Buffers incoming items to be inserted in batch. Items are inserted into - * the buffer linearly. When the buffer fills, it is flushed into the samples - * array in its entirety. + * Buffers incoming items to be inserted in batch. Items are inserted into the buffer linearly. + * When the buffer fills, it is flushed into the samples array in its entirety. */ private long[] buffer = new long[500]; private int bufferCount = 0; @@ -75,14 +65,9 @@ public class MetricSampleQuantiles { } /** - * Specifies the allowable error for this rank, depending on which quantiles - * are being targeted. - * - * This is the f(r_i, n) function from the CKMS paper. It's basically how wide - * the range of this rank can be. - * - * @param rank - * the index in the list of samples + * Specifies the allowable error for this rank, depending on which quantiles are being targeted. + * This is the f(r_i, n) function from the CKMS paper. It's basically how wide the range of this + * rank can be. n * the index in the list of samples */ private double allowableError(int rank) { int size = samples.size(); @@ -104,7 +89,6 @@ public class MetricSampleQuantiles { /** * Add a new value from the stream. - * * @param v the value to insert */ synchronized public void insert(long v) { @@ -120,8 +104,8 @@ public class MetricSampleQuantiles { } /** - * Merges items from buffer into the samples array in one pass. - * This is more efficient than doing an insert on every item. + * Merges items from buffer into the samples array in one pass. This is more efficient than doing + * an insert on every item. */ private void insertBatch() { if (bufferCount == 0) { @@ -166,9 +150,8 @@ public class MetricSampleQuantiles { } /** - * Try to remove extraneous items from the set of sampled items. This checks - * if an item is unnecessary based on the desired error bounds, and merges it - * with the adjacent item if it is. + * Try to remove extraneous items from the set of sampled items. This checks if an item is + * unnecessary based on the desired error bounds, and merges it with the adjacent item if it is. */ private void compress() { if (samples.size() < 2) { @@ -196,7 +179,6 @@ public class MetricSampleQuantiles { /** * Get the estimated value at the specified quantile. - * * @param quantile Queried quantile, e.g. 0.50 or 0.99. * @return Estimated value at that quantile. */ @@ -225,10 +207,7 @@ public class MetricSampleQuantiles { /** * Get a snapshot of the current values of all the tracked quantiles. - * - * @return snapshot of the tracked quantiles - * @throws IOException - * if no items have been added to the estimator + * @return snapshot of the tracked quantiles n * if no items have been added to the estimator */ synchronized public Map snapshot() throws IOException { // flush the buffer first for best results @@ -243,7 +222,6 @@ public class MetricSampleQuantiles { /** * Returns the number of items that the estimator has processed - * * @return count total number of items processed */ synchronized public long getCount() { @@ -252,7 +230,6 @@ public class MetricSampleQuantiles { /** * Returns the number of samples kept by the estimator - * * @return count current number of samples */ synchronized public int getSampleCount() { @@ -269,27 +246,24 @@ public class MetricSampleQuantiles { } /** - * Describes a measured value passed to the estimator, tracking additional - * metadata required by the CKMS algorithm. + * Describes a measured value passed to the estimator, tracking additional metadata required by + * the CKMS algorithm. */ private static class SampleItem { - + /** * Value of the sampled item (e.g. a measured latency value) */ private final long value; - + /** - * Difference between the lowest possible rank of the previous item, and - * the lowest possible rank of this item. - * - * The sum of the g of all previous items yields this item's lower bound. + * Difference between the lowest possible rank of the previous item, and the lowest possible + * rank of this item. The sum of the g of all previous items yields this item's lower bound. */ private int g; - + /** - * Difference between the item's greatest possible rank and lowest possible - * rank. + * Difference between the item's greatest possible rank and lowest possible rank. */ private final int delta; diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java index a022ef3e018..533d2d6d104 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.hadoop.mapreduce.Job; @@ -28,7 +27,7 @@ import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; public class HadoopShimsImpl implements HadoopShims { /** * Returns a TaskAttemptContext instance created from the given parameters. - * @param job an instance of o.a.h.mapreduce.Job + * @param job an instance of o.a.h.mapreduce.Job * @param taskId an identifier for the task attempt id. Should be parsable by * {@link TaskAttemptID#forName(String)} * @return a concrete TaskAttemptContext instance of o.a.h.mapreduce.TaskAttemptContext @@ -36,7 +35,7 @@ public class HadoopShimsImpl implements HadoopShims { @Override @SuppressWarnings("unchecked") public T createTestTaskAttemptContext(J job, String taskId) { - Job j = (Job)job; - return (T)new TaskAttemptContextImpl(j.getConfiguration(), TaskAttemptID.forName(taskId)); + Job j = (Job) job; + return (T) new TaskAttemptContextImpl(j.getConfiguration(), TaskAttemptID.forName(taskId)); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java index d95c282ecf9..04f95bf2eb9 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,23 +29,23 @@ import org.junit.Test; import org.junit.experimental.categories.Category; /** - * Test for MetricsMasterProcSourceImpl + * Test for MetricsMasterProcSourceImpl */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsMasterProcSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsMasterProcSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsMasterProcSourceImpl.class); @Test public void testGetInstance() throws Exception { - MetricsMasterProcSourceFactory metricsMasterProcSourceFactory = CompatibilitySingletonFactory - .getInstance(MetricsMasterProcSourceFactory.class); + MetricsMasterProcSourceFactory metricsMasterProcSourceFactory = + CompatibilitySingletonFactory.getInstance(MetricsMasterProcSourceFactory.class); MetricsMasterProcSource masterProcSource = metricsMasterProcSourceFactory.create(null); assertTrue(masterProcSource instanceof MetricsMasterProcSourceImpl); assertSame(metricsMasterProcSourceFactory, - CompatibilitySingletonFactory.getInstance(MetricsMasterProcSourceFactory.class)); + CompatibilitySingletonFactory.getInstance(MetricsMasterProcSourceFactory.class)); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java index 70ec90ab39a..8dc1ae6e163 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java @@ -29,21 +29,21 @@ import org.junit.Test; import org.junit.experimental.categories.Category; /** - * Test for MetricsMasterSourceImpl + * Test for MetricsMasterSourceImpl */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsMasterSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsMasterSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsMasterSourceImpl.class); @Test public void testGetInstance() { - MetricsMasterSourceFactory metricsMasterSourceFactory = CompatibilitySingletonFactory - .getInstance(MetricsMasterSourceFactory.class); + MetricsMasterSourceFactory metricsMasterSourceFactory = + CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class); MetricsMasterSource masterSource = metricsMasterSourceFactory.create(null); assertTrue(masterSource instanceof MetricsMasterSourceImpl); - assertSame(metricsMasterSourceFactory, CompatibilitySingletonFactory.getInstance( - MetricsMasterSourceFactory.class)); + assertSame(metricsMasterSourceFactory, + CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class)); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java index 063071b4317..63f22143a91 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,14 +31,14 @@ import org.junit.Test; import org.junit.experimental.categories.Category; /** - * Test of default BaseSource for hadoop 2 + * Test of default BaseSource for hadoop 2 */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestBaseSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBaseSourceImpl.class); + HBaseClassTestRule.forClass(TestBaseSourceImpl.class); private static BaseSourceImpl bmsi; diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java index 56199f4d611..9fcbd24f9ea 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.HashMap; diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java index 86a94baf72f..3cda99b134e 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java @@ -28,24 +28,22 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsRegionServerSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsRegionServerSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsRegionServerSourceImpl.class); @Test public void testGetInstance() { MetricsRegionServerSourceFactory metricsRegionServerSourceFactory = - CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); - MetricsRegionServerSource serverSource = - metricsRegionServerSourceFactory.createServer(null); + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); + MetricsRegionServerSource serverSource = metricsRegionServerSourceFactory.createServer(null); assertTrue(serverSource instanceof MetricsRegionServerSourceImpl); assertSame(metricsRegionServerSourceFactory, - CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)); + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)); } - @Test(expected = RuntimeException.class) public void testNoGetRegionServerMetricsSourceImpl() { // This should throw an exception because MetricsRegionServerSourceImpl should only diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java index a802e8321c3..3fe116a11a7 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java @@ -22,7 +22,6 @@ import static org.junit.Assert.assertNotEquals; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MetricsTests; @@ -31,17 +30,17 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsRegionSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsRegionSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsRegionSourceImpl.class); @SuppressWarnings("SelfComparison") @Test public void testCompareToHashCodeEquals() { - MetricsRegionServerSourceFactory fact = CompatibilitySingletonFactory.getInstance( - MetricsRegionServerSourceFactory.class); + MetricsRegionServerSourceFactory fact = + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); MetricsRegionSource one = fact.createRegion(new RegionWrapperStub("TEST")); MetricsRegionSource oneClone = fact.createRegion(new RegionWrapperStub("TEST")); diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java index 11177edcafb..f1694801d7b 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java @@ -30,27 +30,26 @@ import org.junit.Test; import org.junit.experimental.categories.Category; /** - * Test for MetricsTableSourceImpl + * Test for MetricsTableSourceImpl */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsTableSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsTableSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsTableSourceImpl.class); @SuppressWarnings("SelfComparison") @Test public void testCompareToHashCode() throws Exception { MetricsRegionServerSourceFactory metricsFact = - CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); - MetricsTableSource one = metricsFact.createTable( - "ONETABLE", new MetricsTableWrapperStub("ONETABLE")); - MetricsTableSource oneClone = metricsFact.createTable( - "ONETABLE", - new MetricsTableWrapperStub("ONETABLE")); - MetricsTableSource two = metricsFact.createTable( - "TWOTABLE", new MetricsTableWrapperStub("TWOTABLE")); + MetricsTableSource one = + metricsFact.createTable("ONETABLE", new MetricsTableWrapperStub("ONETABLE")); + MetricsTableSource oneClone = + metricsFact.createTable("ONETABLE", new MetricsTableWrapperStub("ONETABLE")); + MetricsTableSource two = + metricsFact.createTable("TWOTABLE", new MetricsTableWrapperStub("TWOTABLE")); assertEquals(0, one.compareTo(oneClone)); assertEquals(one.hashCode(), oneClone.hashCode()); @@ -72,7 +71,7 @@ public class TestMetricsTableSourceImpl { @Test public void testGetTableMetrics() { MetricsTableSource oneTbl = - CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class) + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class) .createTable("ONETABLE", new MetricsTableWrapperStub("ONETABLE")); assertEquals("ONETABLE", oneTbl.getTableName()); } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java index 8a72961edad..b339dd8cc52 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import static org.junit.Assert.assertEquals; @@ -30,18 +29,18 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsUserSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsUserSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsUserSourceImpl.class); @SuppressWarnings("SelfComparison") @Test public void testCompareToHashCodeEquals() throws Exception { - MetricsRegionServerSourceFactory fact - = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); + MetricsRegionServerSourceFactory fact = + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); MetricsUserSource one = fact.createUser("ONE"); MetricsUserSource oneClone = fact.createUser("ONE"); @@ -57,8 +56,7 @@ public class TestMetricsUserSourceImpl { assertTrue(two.compareTo(two) == 0); } - - @Test (expected = RuntimeException.class) + @Test(expected = RuntimeException.class) public void testNoGetRegionServerMetricsSourceImpl() throws Exception { // This should throw an exception because MetricsUserSourceImpl should only // be created by a factory. @@ -67,8 +65,8 @@ public class TestMetricsUserSourceImpl { @Test public void testGetUser() { - MetricsRegionServerSourceFactory fact - = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); + MetricsRegionServerSourceFactory fact = + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); MetricsUserSource one = fact.createUser("ONE"); assertEquals("ONE", one.getUser()); diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java index d8ec0af92bb..300d536dc5f 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,19 +28,17 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsWALSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsWALSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsWALSourceImpl.class); @Test public void testGetInstance() throws Exception { - MetricsWALSource walSource = - CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); + MetricsWALSource walSource = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); assertTrue(walSource instanceof MetricsWALSourceImpl); - assertSame(walSource, - CompatibilitySingletonFactory.getInstance(MetricsWALSource.class)); + assertSame(walSource, CompatibilitySingletonFactory.getInstance(MetricsWALSource.class)); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java index 6cc26e2a4dd..c825d01b162 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java @@ -27,16 +27,16 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsReplicationSourceFactoryImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsReplicationSourceFactoryImpl.class); + HBaseClassTestRule.forClass(TestMetricsReplicationSourceFactoryImpl.class); @Test public void testGetInstance() { - MetricsReplicationSourceFactory rms = CompatibilitySingletonFactory - .getInstance(MetricsReplicationSourceFactory.class); + MetricsReplicationSourceFactory rms = + CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class); assertTrue(rms instanceof MetricsReplicationSourceFactoryImpl); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java index faff4b38917..6c228413104 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java @@ -27,16 +27,16 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsReplicationSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsReplicationSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsReplicationSourceImpl.class); @Test public void testGetInstance() throws Exception { - MetricsReplicationSource rms = CompatibilitySingletonFactory - .getInstance(MetricsReplicationSource.class); + MetricsReplicationSource rms = + CompatibilitySingletonFactory.getInstance(MetricsReplicationSource.class); assertTrue(rms instanceof MetricsReplicationSourceImpl); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java index 2ac7996485e..d1901f68bcc 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java @@ -31,16 +31,16 @@ import org.junit.experimental.categories.Category; /** * Test for hadoop 2's version of {@link MetricsRESTSource}. */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsRESTSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsRESTSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsRESTSourceImpl.class); @Test public void ensureCompatRegistered() { assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class)); - assertTrue(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class) - instanceof MetricsRESTSourceImpl); + assertTrue(CompatibilitySingletonFactory + .getInstance(MetricsRESTSource.class) instanceof MetricsRESTSourceImpl); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java index 83e25a636f0..7c7357c4f04 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.test; import static org.junit.Assert.assertEquals; @@ -25,7 +24,6 @@ import static org.junit.Assert.assertTrue; import java.util.HashMap; import java.util.Locale; import java.util.Map; - import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsCollector; @@ -36,7 +34,7 @@ import org.apache.hadoop.metrics2.MetricsTag; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; /** - * A helper class that will allow tests to get into hadoop2's metrics2 values. + * A helper class that will allow tests to get into hadoop2's metrics2 values. */ public class MetricsAssertHelperImpl implements MetricsAssertHelper { private Map tags = new HashMap<>(); @@ -203,8 +201,8 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper { public long getCounter(String name, BaseSource source) { getMetrics(source); String cName = canonicalizeMetricName(name); - assertNotNull("Should get counter "+cName + " but did not",counters.get(cName)); - return counters.get(cName).longValue(); + assertNotNull("Should get counter " + cName + " but did not", counters.get(cName)); + return counters.get(cName).longValue(); } @Override @@ -225,8 +223,8 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper { public double getGaugeDouble(String name, BaseSource source) { getMetrics(source); String cName = canonicalizeMetricName(name); - assertNotNull("Should get gauge "+cName + " but did not",gauges.get(cName)); - return gauges.get(cName).doubleValue(); + assertNotNull("Should get gauge " + cName + " but did not", gauges.get(cName)); + return gauges.get(cName).doubleValue(); } @Override diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java index 7206810ab13..3bba46bcfa9 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java @@ -32,32 +32,32 @@ import org.junit.experimental.categories.Category; /** * Test for hadoop 2's version of MetricsThriftServerSourceFactory. */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsThriftServerSourceFactoryImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsThriftServerSourceFactoryImpl.class); + HBaseClassTestRule.forClass(TestMetricsThriftServerSourceFactoryImpl.class); @Test public void testCompatabilityRegistered() { - assertNotNull(CompatibilitySingletonFactory.getInstance( - MetricsThriftServerSourceFactory.class)); - assertTrue(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class) - instanceof MetricsThriftServerSourceFactoryImpl); + assertNotNull( + CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class)); + assertTrue(CompatibilitySingletonFactory.getInstance( + MetricsThriftServerSourceFactory.class) instanceof MetricsThriftServerSourceFactoryImpl); } @Test public void testCreateThriftOneSource() { - //Make sure that the factory gives back a singleton. + // Make sure that the factory gives back a singleton. assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftOneSource(), - new MetricsThriftServerSourceFactoryImpl().createThriftOneSource()); + new MetricsThriftServerSourceFactoryImpl().createThriftOneSource()); } @Test public void testCreateThriftTwoSource() { - //Make sure that the factory gives back a singleton. + // Make sure that the factory gives back a singleton. assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource(), - new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource()); + new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource()); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java index a199a78938a..bc200fd1e38 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java @@ -28,16 +28,16 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsZooKeeperSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsZooKeeperSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsZooKeeperSourceImpl.class); @Test public void testGetInstance() { MetricsZooKeeperSource zkSource = - CompatibilitySingletonFactory.getInstance(MetricsZooKeeperSource.class); + CompatibilitySingletonFactory.getInstance(MetricsZooKeeperSource.class); assertTrue(zkSource instanceof MetricsZooKeeperSourceImpl); assertSame(zkSource, CompatibilitySingletonFactory.getInstance(MetricsZooKeeperSource.class)); } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRangeHistogram.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRangeHistogram.java index 1cf8702b7b1..62595a96bbd 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRangeHistogram.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRangeHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import static org.junit.Assert.assertEquals; +import java.util.ArrayList; +import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MetricsTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -30,9 +31,6 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -import java.util.ArrayList; -import java.util.List; - @Category({ MetricsTests.class, SmallTests.class }) public class TestMutableRangeHistogram { diff --git a/hbase-hbtop/pom.xml b/hbase-hbtop/pom.xml index 3559382fb91..e0b0d6cc2bd 100644 --- a/hbase-hbtop/pom.xml +++ b/hbase-hbtop/pom.xml @@ -1,7 +1,5 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-hbtop Apache HBase - HBTop A real-time monitoring tool for HBase like Unix's top command - - - - - org.apache.maven.plugins - maven-source-plugin - - - org.apache.hbase @@ -107,4 +96,13 @@ test + + + + + org.apache.maven.plugins + maven-source-plugin + + + diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java index 9c1a000831a..7e6944f73e7 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,6 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.DefaultParser; import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; import org.apache.hbase.thirdparty.org.apache.commons.cli.Options; - /** * A real-time monitoring tool for HBase like Unix top command. */ @@ -212,15 +211,12 @@ public class HBTop extends Configured implements Tool { private Options getOptions() { Options opts = new Options(); - opts.addOption("h", "help", false, - "Print usage; for help while the tool is running press 'h'"); - opts.addOption("d", "delay", true, - "The refresh delay (in seconds); default is 3 seconds"); + opts.addOption("h", "help", false, "Print usage; for help while the tool is running press 'h'"); + opts.addOption("d", "delay", true, "The refresh delay (in seconds); default is 3 seconds"); opts.addOption("m", "mode", true, "The mode; n (Namespace)|t (Table)|r (Region)|s (RegionServer)|u (User)" + "|c (Client), default is r"); - opts.addOption("n", "numberOfIterations", true, - "The number of iterations"); + opts.addOption("n", "numberOfIterations", true, "The number of iterations"); opts.addOption("s", "sortField", true, "The initial sort field. You can prepend a `+' or `-' to the field name to also override" + " the sort direction. A leading `+' will force sorting high to low, whereas a `-' will" diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/Record.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/Record.java index 577172a38cb..3331cd03550 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/Record.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/Record.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -94,7 +94,8 @@ public final class Record implements Map { } public static Record ofEntries(Stream entries) { - return entries.collect(Record::builder, Builder::put, (r1, r2) -> {}).build(); + return entries.collect(Record::builder, Builder::put, (r1, r2) -> { + }).build(); } private Record(ImmutableMap values) { @@ -165,12 +166,11 @@ public final class Record implements Map { } public Record combine(Record o) { - return ofEntries(values.keySet().stream() - .map(k -> { - if (k.getFieldValueType() == FieldValueType.STRING) { - return entry(k, values.get(k)); - } - return entry(k, values.get(k).plus(o.values.get(k))); - })); + return ofEntries(values.keySet().stream().map(k -> { + if (k.getFieldValueType() == FieldValueType.STRING) { + return entry(k, values.get(k)); + } + return entry(k, values.get(k).plus(o.values.get(k))); + })); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java index 78adf7cce00..7d14f5691de 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import org.apache.hadoop.hbase.hbtop.field.Field; import org.apache.hadoop.hbase.hbtop.field.FieldValue; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a filter that's filtering the metric {@link Record}s. */ @@ -67,8 +66,10 @@ public final class RecordFilter { } StringBuilder fieldString = new StringBuilder(); - while (filterString.length() > index && filterString.charAt(index) != '<' - && filterString.charAt(index) != '>' && filterString.charAt(index) != '=') { + while ( + filterString.length() > index && filterString.charAt(index) != '<' + && filterString.charAt(index) != '>' && filterString.charAt(index) != '=' + ) { fieldString.append(filterString.charAt(index++)); } @@ -82,8 +83,10 @@ public final class RecordFilter { } StringBuilder operatorString = new StringBuilder(); - while (filterString.length() > index && (filterString.charAt(index) == '<' || - filterString.charAt(index) == '>' || filterString.charAt(index) == '=')) { + while ( + filterString.length() > index && (filterString.charAt(index) == '<' + || filterString.charAt(index) == '>' || filterString.charAt(index) == '=') + ) { operatorString.append(filterString.charAt(index++)); } @@ -166,8 +169,7 @@ public final class RecordFilter { return not != ret; } - int compare = ignoreCase ? - fieldValue.compareToIgnoreCase(value) : fieldValue.compareTo(value); + int compare = ignoreCase ? fieldValue.compareToIgnoreCase(value) : fieldValue.compareTo(value); boolean ret; switch (operator) { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/Field.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/Field.java index df460dd31cf..ab776cf0336 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/Field.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/Field.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.hbtop.field; import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents fields that are displayed in the top screen. */ @@ -34,8 +33,7 @@ public enum Field { REGION("REGION", "Encoded Region Name", false, true, FieldValueType.STRING), REGION_SERVER("RS", "Short Region Server Name", true, true, FieldValueType.STRING), LONG_REGION_SERVER("LRS", "Long Region Server Name", true, true, FieldValueType.STRING), - REQUEST_COUNT_PER_SECOND("#REQ/S", "Request Count per second", false, false, - FieldValueType.LONG), + REQUEST_COUNT_PER_SECOND("#REQ/S", "Request Count per second", false, false, FieldValueType.LONG), READ_REQUEST_COUNT_PER_SECOND("#READ/S", "Read Request Count per second", false, false, FieldValueType.LONG), FILTERED_READ_REQUEST_COUNT_PER_SECOND("#FREAD/S", "Filtered Read Request Count per second", @@ -49,8 +47,7 @@ public enum Field { MEM_STORE_SIZE("MEMSTORE", "MemStore Size", false, false, FieldValueType.SIZE), LOCALITY("LOCALITY", "Block Locality", false, false, FieldValueType.FLOAT), START_KEY("SKEY", "Start Key", true, true, FieldValueType.STRING), - COMPACTING_CELL_COUNT("#COMPingCELL", "Compacting Cell Count", false, false, - FieldValueType.LONG), + COMPACTING_CELL_COUNT("#COMPingCELL", "Compacting Cell Count", false, false, FieldValueType.LONG), COMPACTED_CELL_COUNT("#COMPedCELL", "Compacted Cell Count", false, false, FieldValueType.LONG), COMPACTION_PROGRESS("%COMP", "Compaction Progress", false, false, FieldValueType.PERCENT), LAST_MAJOR_COMPACTION_TIME("LASTMCOMP", "Last Major Compaction Time", false, true, diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldInfo.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldInfo.java index 3f0e5f7ad1d..ad153210dd9 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldInfo.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +20,11 @@ package org.apache.hadoop.hbase.hbtop.field; import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** - * Information about a field. - * - * This has a {@link Field} itself and additional information (e.g. {@code defaultLength} and - * {@code displayByDefault}). This additional information is different between the - * {@link org.apache.hadoop.hbase.hbtop.mode.Mode}s even when the field is the same. That's why the - * additional information is separated from {@link Field}. + * Information about a field. This has a {@link Field} itself and additional information (e.g. + * {@code defaultLength} and {@code displayByDefault}). This additional information is different + * between the {@link org.apache.hadoop.hbase.hbtop.mode.Mode}s even when the field is the same. + * That's why the additional information is separated from {@link Field}. */ @InterfaceAudience.Private public class FieldInfo { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java index 086dadc3e29..4c4a29c0bb5 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,11 +22,8 @@ import java.util.Objects; import org.apache.hadoop.hbase.Size; import org.apache.yetus.audience.InterfaceAudience; - /** - * Represents a value of a field. - * - * The type of a value is defined by {@link FieldValue}. + * Represents a value of a field. The type of a value is defined by {@link FieldValue}. */ @InterfaceAudience.Private public final class FieldValue implements Comparable { @@ -103,23 +100,29 @@ public final class FieldValue implements Comparable { private Size optimizeSize(Size size) { if (size.get(Size.Unit.BYTE) < 1024d) { - return size.getUnit() == Size.Unit.BYTE ? - size : new Size(size.get(Size.Unit.BYTE), Size.Unit.BYTE); + return size.getUnit() == Size.Unit.BYTE + ? size + : new Size(size.get(Size.Unit.BYTE), Size.Unit.BYTE); } else if (size.get(Size.Unit.KILOBYTE) < 1024d) { - return size.getUnit() == Size.Unit.KILOBYTE ? - size : new Size(size.get(Size.Unit.KILOBYTE), Size.Unit.KILOBYTE); + return size.getUnit() == Size.Unit.KILOBYTE + ? size + : new Size(size.get(Size.Unit.KILOBYTE), Size.Unit.KILOBYTE); } else if (size.get(Size.Unit.MEGABYTE) < 1024d) { - return size.getUnit() == Size.Unit.MEGABYTE ? - size : new Size(size.get(Size.Unit.MEGABYTE), Size.Unit.MEGABYTE); + return size.getUnit() == Size.Unit.MEGABYTE + ? size + : new Size(size.get(Size.Unit.MEGABYTE), Size.Unit.MEGABYTE); } else if (size.get(Size.Unit.GIGABYTE) < 1024d) { - return size.getUnit() == Size.Unit.GIGABYTE ? - size : new Size(size.get(Size.Unit.GIGABYTE), Size.Unit.GIGABYTE); + return size.getUnit() == Size.Unit.GIGABYTE + ? size + : new Size(size.get(Size.Unit.GIGABYTE), Size.Unit.GIGABYTE); } else if (size.get(Size.Unit.TERABYTE) < 1024d) { - return size.getUnit() == Size.Unit.TERABYTE ? - size : new Size(size.get(Size.Unit.TERABYTE), Size.Unit.TERABYTE); + return size.getUnit() == Size.Unit.TERABYTE + ? size + : new Size(size.get(Size.Unit.TERABYTE), Size.Unit.TERABYTE); } - return size.getUnit() == Size.Unit.PETABYTE ? - size : new Size(size.get(Size.Unit.PETABYTE), Size.Unit.PETABYTE); + return size.getUnit() == Size.Unit.PETABYTE + ? size + : new Size(size.get(Size.Unit.PETABYTE), Size.Unit.PETABYTE); } private Size parseSizeString(String sizeString) { @@ -133,7 +136,7 @@ public final class FieldValue implements Comparable { } private Size.Unit convertToUnit(String unitSimpleName) { - for (Size.Unit unit: Size.Unit.values()) { + for (Size.Unit unit : Size.Unit.values()) { if (unitSimpleName.equals(unit.getSimpleName())) { return unit; } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValueType.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValueType.java index e2edae87b80..e9825d9206a 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValueType.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValueType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +19,15 @@ package org.apache.hadoop.hbase.hbtop.field; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents the type of a {@link FieldValue}. */ @InterfaceAudience.Private public enum FieldValueType { - STRING, INTEGER, LONG, FLOAT, SIZE, PERCENT + STRING, + INTEGER, + LONG, + FLOAT, + SIZE, + PERCENT } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ClientModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ClientModeStrategy.java index fe3edd1b254..8327b1425cf 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ClientModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ClientModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.UserMetrics; @@ -41,51 +40,53 @@ import org.apache.yetus.audience.InterfaceAudience; /** * Implementation for {@link ModeStrategy} for client Mode. */ -@InterfaceAudience.Private public final class ClientModeStrategy implements ModeStrategy { +@InterfaceAudience.Private +public final class ClientModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays - .asList(new FieldInfo(Field.CLIENT, 0, true), - new FieldInfo(Field.USER_COUNT, 5, true), - new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 10, true)); + private final List fieldInfos = + Arrays.asList(new FieldInfo(Field.CLIENT, 0, true), new FieldInfo(Field.USER_COUNT, 5, true), + new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 10, true)); private final Map requestCountPerSecondMap = new HashMap<>(); ClientModeStrategy() { } - @Override public List getFieldInfos() { + @Override + public List getFieldInfos() { return fieldInfos; } - @Override public Field getDefaultSortField() { + @Override + public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, - List pushDownFilters) { + @Override + public List getRecords(ClusterMetrics clusterMetrics, + List pushDownFilters) { List records = createRecords(clusterMetrics); return aggregateRecordsAndAddDistinct( - ModeStrategyUtils.applyFilterAndGet(records, pushDownFilters), Field.CLIENT, Field.USER, - Field.USER_COUNT); + ModeStrategyUtils.applyFilterAndGet(records, pushDownFilters), Field.CLIENT, Field.USER, + Field.USER_COUNT); } List createRecords(ClusterMetrics clusterMetrics) { List ret = new ArrayList<>(); for (ServerMetrics serverMetrics : clusterMetrics.getLiveServerMetrics().values()) { long lastReportTimestamp = serverMetrics.getLastReportTimestamp(); - serverMetrics.getUserMetrics().values().forEach(um -> um.getClientMetrics().values().forEach( - clientMetrics -> ret.add( - createRecord(um.getNameAsString(), clientMetrics, lastReportTimestamp, - serverMetrics.getServerName().getServerName())))); + serverMetrics.getUserMetrics().values() + .forEach(um -> um.getClientMetrics().values() + .forEach(clientMetrics -> ret.add(createRecord(um.getNameAsString(), clientMetrics, + lastReportTimestamp, serverMetrics.getServerName().getServerName())))); } return ret; } /** * Aggregate the records and count the unique values for the given distinctField - * * @param records records to be processed * @param groupBy Field on which group by needs to be done * @param distinctField Field whose unique values needs to be counted @@ -93,40 +94,39 @@ import org.apache.yetus.audience.InterfaceAudience; * @return aggregated records */ List aggregateRecordsAndAddDistinct(List records, Field groupBy, - Field distinctField, Field uniqueCountAssignedTo) { + Field distinctField, Field uniqueCountAssignedTo) { List result = new ArrayList<>(); - records.stream().collect(Collectors.groupingBy(r -> r.get(groupBy))).values() - .forEach(val -> { - Set distinctValues = new HashSet<>(); - Map map = new HashMap<>(); - for (Record record : val) { - for (Map.Entry field : record.entrySet()) { - if (distinctField.equals(field.getKey())) { - //We will not be adding the field in the new record whose distinct count is required - distinctValues.add(record.get(distinctField)); + records.stream().collect(Collectors.groupingBy(r -> r.get(groupBy))).values().forEach(val -> { + Set distinctValues = new HashSet<>(); + Map map = new HashMap<>(); + for (Record record : val) { + for (Map.Entry field : record.entrySet()) { + if (distinctField.equals(field.getKey())) { + // We will not be adding the field in the new record whose distinct count is required + distinctValues.add(record.get(distinctField)); + } else { + if (field.getKey().getFieldValueType() == FieldValueType.STRING) { + map.put(field.getKey(), field.getValue()); + } else { + if (map.get(field.getKey()) == null) { + map.put(field.getKey(), field.getValue()); } else { - if (field.getKey().getFieldValueType() == FieldValueType.STRING) { - map.put(field.getKey(), field.getValue()); - } else { - if (map.get(field.getKey()) == null) { - map.put(field.getKey(), field.getValue()); - } else { - map.put(field.getKey(), map.get(field.getKey()).plus(field.getValue())); - } - } + map.put(field.getKey(), map.get(field.getKey()).plus(field.getValue())); } } } - // Add unique count field - map.put(uniqueCountAssignedTo, uniqueCountAssignedTo.newValue(distinctValues.size())); - result.add(Record.ofEntries(map.entrySet().stream() - .map(k -> Record.entry(k.getKey(), k.getValue())))); - }); + } + } + // Add unique count field + map.put(uniqueCountAssignedTo, uniqueCountAssignedTo.newValue(distinctValues.size())); + result.add( + Record.ofEntries(map.entrySet().stream().map(k -> Record.entry(k.getKey(), k.getValue())))); + }); return result; } Record createRecord(String user, UserMetrics.ClientMetrics clientMetrics, - long lastReportTimestamp, String server) { + long lastReportTimestamp, String server) { Record.Builder builder = Record.builder(); String client = clientMetrics.getHostName(); builder.put(Field.CLIENT, clientMetrics.getHostName()); @@ -137,21 +137,22 @@ import org.apache.yetus.audience.InterfaceAudience; requestCountPerSecondMap.put(mapKey, requestCountPerSecond); } requestCountPerSecond.refresh(lastReportTimestamp, clientMetrics.getReadRequestsCount(), - clientMetrics.getFilteredReadRequestsCount(), clientMetrics.getWriteRequestsCount()); + clientMetrics.getFilteredReadRequestsCount(), clientMetrics.getWriteRequestsCount()); builder.put(Field.REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getRequestCountPerSecond()); builder.put(Field.READ_REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getReadRequestCountPerSecond()); + requestCountPerSecond.getReadRequestCountPerSecond()); builder.put(Field.WRITE_REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getWriteRequestCountPerSecond()); + requestCountPerSecond.getWriteRequestCountPerSecond()); builder.put(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getFilteredReadRequestCountPerSecond()); + requestCountPerSecond.getFilteredReadRequestCountPerSecond()); builder.put(Field.USER, user); return builder.build(); } - @Override public DrillDownInfo drillDown(Record selectedRecord) { + @Override + public DrillDownInfo drillDown(Record selectedRecord) { List initialFilters = Collections.singletonList( - RecordFilter.newBuilder(Field.CLIENT).doubleEquals(selectedRecord.get(Field.CLIENT))); + RecordFilter.newBuilder(Field.CLIENT).doubleEquals(selectedRecord.get(Field.CLIENT))); return new DrillDownInfo(Mode.USER, initialFilters); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/DrillDownInfo.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/DrillDownInfo.java index de3d582fb9f..7061d5374e8 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/DrillDownInfo.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/DrillDownInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,15 +21,12 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Objects; - import org.apache.hadoop.hbase.hbtop.RecordFilter; import org.apache.yetus.audience.InterfaceAudience; - /** - * Information about drilling down. - * - * When drilling down, going to next {@link Mode} with initial {@link RecordFilter}s. + * Information about drilling down. When drilling down, going to next {@link Mode} with initial + * {@link RecordFilter}s. */ @InterfaceAudience.Private public class DrillDownInfo { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/Mode.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/Mode.java index ffd98dfd683..4ae1b4faf33 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/Mode.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/Mode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.hbtop.field.Field; import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a display mode in the top screen. */ @@ -45,7 +44,7 @@ public enum Mode { private final ModeStrategy modeStrategy; Mode(String header, String description, ModeStrategy modeStrategy) { - this.header = Objects.requireNonNull(header); + this.header = Objects.requireNonNull(header); this.description = Objects.requireNonNull(description); this.modeStrategy = Objects.requireNonNull(modeStrategy); } @@ -59,7 +58,7 @@ public enum Mode { } public List getRecords(ClusterMetrics clusterMetrics, - List pushDownFilters) { + List pushDownFilters) { return modeStrategy.getRecords(clusterMetrics, pushDownFilters); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategy.java index 021cee25810..db58f1facae 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,14 +26,17 @@ import org.apache.hadoop.hbase.hbtop.field.Field; import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * An interface for strategy logic for {@link Mode}. */ @InterfaceAudience.Private interface ModeStrategy { List getFieldInfos(); + Field getDefaultSortField(); + List getRecords(ClusterMetrics clusterMetrics, List pushDownFilters); - @Nullable DrillDownInfo drillDown(Record selectedRecord); + + @Nullable + DrillDownInfo drillDown(Record selectedRecord); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategyUtils.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategyUtils.java index 9175820e0ca..6b78be9e206 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategyUtils.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategyUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.hbtop.mode; import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; - import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.RecordFilter; import org.apache.hadoop.hbase.hbtop.field.Field; @@ -36,28 +35,26 @@ public final class ModeStrategyUtils { * @param filters List of filters * @return filtered records */ - public static List applyFilterAndGet(List records, - List filters) { + public static List applyFilterAndGet(List records, List filters) { if (filters != null && !filters.isEmpty()) { return records.stream().filter(r -> filters.stream().allMatch(f -> f.execute(r))) - .collect(Collectors.toList()); + .collect(Collectors.toList()); } return records; } - /** - * Group by records on the basis of supplied groupBy field and - * Aggregate records using {@link Record#combine(Record)} - * + * Group by records on the basis of supplied groupBy field and Aggregate records using + * {@link Record#combine(Record)} * @param records records needs to be processed * @param groupBy Field to be used for group by * @return aggregated records */ public static List aggregateRecords(List records, Field groupBy) { return records.stream().collect(Collectors.groupingBy(r -> r.get(groupBy))).entrySet().stream() - .flatMap(e -> e.getValue().stream().reduce(Record::combine).map(Stream::of) - .orElse(Stream.empty())).collect(Collectors.toList()); + .flatMap( + e -> e.getValue().stream().reduce(Record::combine).map(Stream::of).orElse(Stream.empty())) + .collect(Collectors.toList()); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/NamespaceModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/NamespaceModeStrategy.java index f74d8bf22eb..a4a8a88aca3 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/NamespaceModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/NamespaceModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.hbtop.mode; import java.util.Arrays; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.RecordFilter; @@ -28,15 +27,13 @@ import org.apache.hadoop.hbase.hbtop.field.Field; import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * Implementation for {@link ModeStrategy} for Namespace Mode. */ @InterfaceAudience.Private public final class NamespaceModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays.asList( - new FieldInfo(Field.NAMESPACE, 0, true), + private final List fieldInfos = Arrays.asList(new FieldInfo(Field.NAMESPACE, 0, true), new FieldInfo(Field.REGION_COUNT, 7, true), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), @@ -44,13 +41,11 @@ public final class NamespaceModeStrategy implements ModeStrategy { new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.STORE_FILE_SIZE, 13, true), new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 15, false), - new FieldInfo(Field.NUM_STORE_FILES, 7, true), - new FieldInfo(Field.MEM_STORE_SIZE, 11, true) - ); + new FieldInfo(Field.NUM_STORE_FILES, 7, true), new FieldInfo(Field.MEM_STORE_SIZE, 11, true)); private final RegionModeStrategy regionModeStrategy = new RegionModeStrategy(); - NamespaceModeStrategy(){ + NamespaceModeStrategy() { } @Override @@ -63,11 +58,12 @@ public final class NamespaceModeStrategy implements ModeStrategy { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, - List pushDownFilters) { + @Override + public List getRecords(ClusterMetrics clusterMetrics, + List pushDownFilters) { // Get records from RegionModeStrategy and add REGION_COUNT field List records = regionModeStrategy.selectModeFieldsAndAddCountField(fieldInfos, - regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); + regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); // Aggregation by NAMESPACE field return ModeStrategyUtils.aggregateRecords(records, Field.NAMESPACE); @@ -75,9 +71,8 @@ public final class NamespaceModeStrategy implements ModeStrategy { @Override public DrillDownInfo drillDown(Record selectedRecord) { - List initialFilters = - Collections.singletonList(RecordFilter.newBuilder(Field.NAMESPACE) - .doubleEquals(selectedRecord.get(Field.NAMESPACE))); + List initialFilters = Collections.singletonList( + RecordFilter.newBuilder(Field.NAMESPACE).doubleEquals(selectedRecord.get(Field.NAMESPACE))); return new DrillDownInfo(Mode.TABLE, initialFilters); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionModeStrategy.java index 0adbc823bf4..9a70f61005a 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; - import org.apache.commons.lang3.time.FastDateFormat; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.RegionMetrics; @@ -39,7 +38,6 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; - /** * Implementation for {@link ModeStrategy} for Region Mode. */ @@ -47,29 +45,22 @@ import org.apache.yetus.audience.InterfaceAudience; public final class RegionModeStrategy implements ModeStrategy { private final List fieldInfos = Arrays.asList( - new FieldInfo(Field.REGION_NAME, 0, false), - new FieldInfo(Field.NAMESPACE, 0, true), - new FieldInfo(Field.TABLE, 0, true), - new FieldInfo(Field.START_CODE, 13, false), - new FieldInfo(Field.REPLICA_ID, 5, false), - new FieldInfo(Field.REGION, 32, true), - new FieldInfo(Field.REGION_SERVER, 0, true), - new FieldInfo(Field.LONG_REGION_SERVER, 0, false), + new FieldInfo(Field.REGION_NAME, 0, false), new FieldInfo(Field.NAMESPACE, 0, true), + new FieldInfo(Field.TABLE, 0, true), new FieldInfo(Field.START_CODE, 13, false), + new FieldInfo(Field.REPLICA_ID, 5, false), new FieldInfo(Field.REGION, 32, true), + new FieldInfo(Field.REGION_SERVER, 0, true), new FieldInfo(Field.LONG_REGION_SERVER, 0, false), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.STORE_FILE_SIZE, 10, true), new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 12, false), - new FieldInfo(Field.NUM_STORE_FILES,4, true), - new FieldInfo(Field.MEM_STORE_SIZE, 8, true), - new FieldInfo(Field.LOCALITY, 8, true), - new FieldInfo(Field.START_KEY, 0, false), + new FieldInfo(Field.NUM_STORE_FILES, 4, true), new FieldInfo(Field.MEM_STORE_SIZE, 8, true), + new FieldInfo(Field.LOCALITY, 8, true), new FieldInfo(Field.START_KEY, 0, false), new FieldInfo(Field.COMPACTING_CELL_COUNT, 12, false), new FieldInfo(Field.COMPACTED_CELL_COUNT, 12, false), new FieldInfo(Field.COMPACTION_PROGRESS, 7, false), - new FieldInfo(Field.LAST_MAJOR_COMPACTION_TIME, 19, false) - ); + new FieldInfo(Field.LAST_MAJOR_COMPACTION_TIME, 19, false)); private final Map requestCountPerSecondMap = new HashMap<>(); @@ -86,8 +77,9 @@ public final class RegionModeStrategy implements ModeStrategy { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, - List pushDownFilters) { + @Override + public List getRecords(ClusterMetrics clusterMetrics, + List pushDownFilters) { List ret = new ArrayList<>(); for (ServerMetrics sm : clusterMetrics.getLiveServerMetrics().values()) { long lastReportTimestamp = sm.getLastReportTimestamp(); @@ -119,8 +111,8 @@ public final class RegionModeStrategy implements ModeStrategy { tableName = tn.getQualifierAsString(); startKey = Bytes.toStringBinary(elements[1]); startCode = Bytes.toString(elements[2]); - replicaId = elements.length == 4 ? - Integer.valueOf(Bytes.toString(elements[3])).toString() : ""; + replicaId = + elements.length == 4 ? Integer.valueOf(Bytes.toString(elements[3])).toString() : ""; region = RegionInfo.encodeRegionName(regionMetrics.getRegionName()); } catch (IOException ignored) { } @@ -145,11 +137,10 @@ public final class RegionModeStrategy implements ModeStrategy { builder.put(Field.READ_REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getReadRequestCountPerSecond()); builder.put(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getFilteredReadRequestCountPerSecond()); + requestCountPerSecond.getFilteredReadRequestCountPerSecond()); builder.put(Field.WRITE_REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getWriteRequestCountPerSecond()); - builder.put(Field.REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getRequestCountPerSecond()); + builder.put(Field.REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getRequestCountPerSecond()); builder.put(Field.STORE_FILE_SIZE, regionMetrics.getStoreFileSize()); builder.put(Field.UNCOMPRESSED_STORE_FILE_SIZE, regionMetrics.getUncompressedStoreFileSize()); @@ -160,7 +151,7 @@ public final class RegionModeStrategy implements ModeStrategy { long compactingCellCount = regionMetrics.getCompactingCellCount(); long compactedCellCount = regionMetrics.getCompactedCellCount(); float compactionProgress = 0; - if (compactedCellCount > 0) { + if (compactedCellCount > 0) { compactionProgress = 100 * ((float) compactedCellCount / compactingCellCount); } @@ -178,24 +169,22 @@ public final class RegionModeStrategy implements ModeStrategy { } /** - * Form new record list with records formed by only fields provided through fieldInfo and - * add a count field for each record with value 1 - * We are doing two operation of selecting and adding new field - * because of saving some CPU cycles on rebuilding the record again - * + * Form new record list with records formed by only fields provided through fieldInfo and add a + * count field for each record with value 1 We are doing two operation of selecting and adding new + * field because of saving some CPU cycles on rebuilding the record again * @param fieldInfos List of FieldInfos required in the record * @param records List of records which needs to be processed * @param countField Field which needs to be added with value 1 for each record * @return records after selecting required fields and adding count field */ List selectModeFieldsAndAddCountField(List fieldInfos, List records, - Field countField) { + Field countField) { - return records.stream().map(record -> Record.ofEntries( - fieldInfos.stream().filter(fi -> record.containsKey(fi.getField())) - .map(fi -> Record.entry(fi.getField(), record.get(fi.getField()))))) - .map(record -> Record.builder().putAll(record).put(countField, 1).build()) - .collect(Collectors.toList()); + return records.stream().map( + record -> Record.ofEntries(fieldInfos.stream().filter(fi -> record.containsKey(fi.getField())) + .map(fi -> Record.entry(fi.getField(), record.get(fi.getField()))))) + .map(record -> Record.builder().putAll(record).put(countField, 1).build()) + .collect(Collectors.toList()); } @Nullable diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionServerModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionServerModeStrategy.java index 44a9a2c8271..d06060bcc09 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionServerModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionServerModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.util.Collections; import java.util.List; import java.util.Map; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.hbtop.Record; @@ -32,32 +31,27 @@ import org.apache.hadoop.hbase.hbtop.field.Field; import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * Implementation for {@link ModeStrategy} for RegionServer Mode. */ @InterfaceAudience.Private public final class RegionServerModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays.asList( - new FieldInfo(Field.REGION_SERVER, 0, true), - new FieldInfo(Field.LONG_REGION_SERVER, 0, false), - new FieldInfo(Field.REGION_COUNT, 7, true), - new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 8, true), - new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.STORE_FILE_SIZE, 13, true), - new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 15, false), - new FieldInfo(Field.NUM_STORE_FILES, 7, true), - new FieldInfo(Field.MEM_STORE_SIZE, 11, true), - new FieldInfo(Field.USED_HEAP_SIZE, 11, true), - new FieldInfo(Field.MAX_HEAP_SIZE, 11, true) - ); + private final List fieldInfos = + Arrays.asList(new FieldInfo(Field.REGION_SERVER, 0, true), + new FieldInfo(Field.LONG_REGION_SERVER, 0, false), new FieldInfo(Field.REGION_COUNT, 7, true), + new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 8, true), + new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.STORE_FILE_SIZE, 13, true), + new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 15, false), + new FieldInfo(Field.NUM_STORE_FILES, 7, true), new FieldInfo(Field.MEM_STORE_SIZE, 11, true), + new FieldInfo(Field.USED_HEAP_SIZE, 11, true), new FieldInfo(Field.MAX_HEAP_SIZE, 11, true)); private final RegionModeStrategy regionModeStrategy = new RegionModeStrategy(); - RegionServerModeStrategy(){ + RegionServerModeStrategy() { } @Override @@ -70,15 +64,16 @@ public final class RegionServerModeStrategy implements ModeStrategy { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, - List pushDownFilters) { + @Override + public List getRecords(ClusterMetrics clusterMetrics, + List pushDownFilters) { // Get records from RegionModeStrategy and add REGION_COUNT field List records = regionModeStrategy.selectModeFieldsAndAddCountField(fieldInfos, - regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); + regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); // Aggregation by LONG_REGION_SERVER field Map retMap = - ModeStrategyUtils.aggregateRecords(records, Field.LONG_REGION_SERVER).stream() - .collect(Collectors.toMap(r -> r.get(Field.LONG_REGION_SERVER).asString(), r -> r)); + ModeStrategyUtils.aggregateRecords(records, Field.LONG_REGION_SERVER).stream() + .collect(Collectors.toMap(r -> r.get(Field.LONG_REGION_SERVER).asString(), r -> r)); // Add USED_HEAP_SIZE field and MAX_HEAP_SIZE field for (ServerMetrics sm : clusterMetrics.getLiveServerMetrics().values()) { @@ -87,9 +82,9 @@ public final class RegionServerModeStrategy implements ModeStrategy { continue; } - Record newRecord = Record.builder().putAll(record) - .put(Field.USED_HEAP_SIZE, sm.getUsedHeapSize()) - .put(Field.MAX_HEAP_SIZE, sm.getMaxHeapSize()).build(); + Record newRecord = + Record.builder().putAll(record).put(Field.USED_HEAP_SIZE, sm.getUsedHeapSize()) + .put(Field.MAX_HEAP_SIZE, sm.getMaxHeapSize()).build(); retMap.put(sm.getServerName().getServerName(), newRecord); } @@ -100,8 +95,7 @@ public final class RegionServerModeStrategy implements ModeStrategy { @Override public DrillDownInfo drillDown(Record selectedRecord) { List initialFilters = Collections.singletonList(RecordFilter - .newBuilder(Field.REGION_SERVER) - .doubleEquals(selectedRecord.get(Field.REGION_SERVER))); + .newBuilder(Field.REGION_SERVER).doubleEquals(selectedRecord.get(Field.REGION_SERVER))); return new DrillDownInfo(Mode.REGION, initialFilters); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java index d546070db71..72802569750 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.hbtop.mode; import org.apache.yetus.audience.InterfaceAudience; - /** * Utility class for calculating request counts per second. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/TableModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/TableModeStrategy.java index 4acc3441258..735dfdb4a4c 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/TableModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/TableModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,26 +29,21 @@ import org.apache.hadoop.hbase.hbtop.field.Field; import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * Implementation for {@link ModeStrategy} for Table Mode. */ @InterfaceAudience.Private public final class TableModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays.asList( - new FieldInfo(Field.NAMESPACE, 0, true), - new FieldInfo(Field.TABLE, 0, true), - new FieldInfo(Field.REGION_COUNT, 7, true), + private final List fieldInfos = Arrays.asList(new FieldInfo(Field.NAMESPACE, 0, true), + new FieldInfo(Field.TABLE, 0, true), new FieldInfo(Field.REGION_COUNT, 7, true), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.STORE_FILE_SIZE, 13, true), new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 15, false), - new FieldInfo(Field.NUM_STORE_FILES, 7, true), - new FieldInfo(Field.MEM_STORE_SIZE, 11, true) - ); + new FieldInfo(Field.NUM_STORE_FILES, 7, true), new FieldInfo(Field.MEM_STORE_SIZE, 11, true)); private final RegionModeStrategy regionModeStrategy = new RegionModeStrategy(); @@ -65,25 +60,21 @@ public final class TableModeStrategy implements ModeStrategy { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, - List pushDownFilters) { + @Override + public List getRecords(ClusterMetrics clusterMetrics, + List pushDownFilters) { // Get records from RegionModeStrategy and add REGION_COUNT field List records = regionModeStrategy.selectModeFieldsAndAddCountField(fieldInfos, - regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); + regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); // Aggregation by NAMESPACE field and TABLE field - return records.stream() - .collect(Collectors.groupingBy(r -> { - String namespace = r.get(Field.NAMESPACE).asString(); - String table = r.get(Field.TABLE).asString(); - return TableName.valueOf(namespace, table); - })) - .entrySet().stream() + return records.stream().collect(Collectors.groupingBy(r -> { + String namespace = r.get(Field.NAMESPACE).asString(); + String table = r.get(Field.TABLE).asString(); + return TableName.valueOf(namespace, table); + })).entrySet().stream() .flatMap( - e -> e.getValue().stream() - .reduce(Record::combine) - .map(Stream::of) - .orElse(Stream.empty())) + e -> e.getValue().stream().reduce(Record::combine).map(Stream::of).orElse(Stream.empty())) .collect(Collectors.toList()); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/UserModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/UserModeStrategy.java index 605376e1221..d2c9cf4c8ec 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/UserModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/UserModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.hbtop.mode; import java.util.Arrays; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.RecordFilter; @@ -31,40 +30,44 @@ import org.apache.yetus.audience.InterfaceAudience; /** * Implementation for {@link ModeStrategy} for User Mode. */ -@InterfaceAudience.Private public final class UserModeStrategy implements ModeStrategy { +@InterfaceAudience.Private +public final class UserModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays - .asList(new FieldInfo(Field.USER, 0, true), - new FieldInfo(Field.CLIENT_COUNT, 7, true), - new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 10, true)); + private final List fieldInfos = + Arrays.asList(new FieldInfo(Field.USER, 0, true), new FieldInfo(Field.CLIENT_COUNT, 7, true), + new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 10, true)); private final ClientModeStrategy clientModeStrategy = new ClientModeStrategy(); UserModeStrategy() { } - @Override public List getFieldInfos() { + @Override + public List getFieldInfos() { return fieldInfos; } - @Override public Field getDefaultSortField() { + @Override + public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, - List pushDownFilters) { + @Override + public List getRecords(ClusterMetrics clusterMetrics, + List pushDownFilters) { List records = clientModeStrategy.createRecords(clusterMetrics); return clientModeStrategy.aggregateRecordsAndAddDistinct( - ModeStrategyUtils.applyFilterAndGet(records, pushDownFilters), Field.USER, Field.CLIENT, - Field.CLIENT_COUNT); + ModeStrategyUtils.applyFilterAndGet(records, pushDownFilters), Field.USER, Field.CLIENT, + Field.CLIENT_COUNT); } - @Override public DrillDownInfo drillDown(Record selectedRecord) { - //Drill down to client and using selected USER as a filter + @Override + public DrillDownInfo drillDown(Record selectedRecord) { + // Drill down to client and using selected USER as a filter List initialFilters = Collections.singletonList( - RecordFilter.newBuilder(Field.USER).doubleEquals(selectedRecord.get(Field.USER))); + RecordFilter.newBuilder(Field.USER).doubleEquals(selectedRecord.get(Field.USER))); return new DrillDownInfo(Mode.CLIENT, initialFilters); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/AbstractScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/AbstractScreenView.java index 8b55d6ec0df..4620d0896c2 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/AbstractScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/AbstractScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize; import org.apache.yetus.audience.InterfaceAudience; - /** * An abstract class for {@link ScreenView} that has the common useful methods and the default * implementations for the abstract methods. diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java index 2846c25d1cc..da0bd1e97e6 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * This dispatches key presses and timers to the current {@link ScreenView}. */ @@ -58,8 +57,7 @@ public class Screen implements Closeable { public Screen(Configuration conf, long initialRefreshDelay, Mode initialMode, @Nullable List initialFields, @Nullable Field initialSortField, @Nullable Boolean initialAscendingSort, @Nullable List initialFilters, - long numberOfIterations, boolean batchMode) - throws IOException { + long numberOfIterations, boolean batchMode) throws IOException { connection = ConnectionFactory.createConnection(conf); admin = connection.getAdmin(); @@ -69,9 +67,8 @@ public class Screen implements Closeable { } else { terminal = new TerminalImpl("hbtop"); } - currentScreenView = new TopScreenView(this, terminal, initialRefreshDelay, admin, - initialMode, initialFields, initialSortField, initialAscendingSort, initialFilters, - numberOfIterations); + currentScreenView = new TopScreenView(this, terminal, initialRefreshDelay, admin, initialMode, + initialFields, initialSortField, initialAscendingSort, initialFilters, numberOfIterations); } @Override diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/ScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/ScreenView.java index f061bff831d..9291cedb7db 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/ScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/ScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,13 +21,16 @@ import edu.umd.cs.findbugs.annotations.Nullable; import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.yetus.audience.InterfaceAudience; - /** * An interface for a screen view that handles key presses and timers. */ @InterfaceAudience.Private public interface ScreenView { void init(); - @Nullable ScreenView handleKeyPress(KeyPress keyPress); - @Nullable ScreenView handleTimer(); + + @Nullable + ScreenView handleKeyPress(KeyPress keyPress); + + @Nullable + ScreenView handleTimer(); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenPresenter.java index 45f5fd01efb..16576475419 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +21,10 @@ import java.util.ArrayList; import java.util.EnumMap; import java.util.List; import java.util.Objects; - import org.apache.hadoop.hbase.hbtop.field.Field; import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the field screen. */ @@ -63,7 +61,7 @@ public class FieldScreenPresenter { int headerLength = 0; int descriptionLength = 0; - for (int i = 0; i < fields.size(); i ++) { + for (int i = 0; i < fields.size(); i++) { Field field = fields.get(i); if (field == sortField) { @@ -86,8 +84,8 @@ public class FieldScreenPresenter { public void init() { fieldScreenView.hideCursor(); fieldScreenView.clearTerminal(); - fieldScreenView.showFieldScreen(sortField.getHeader(), fields, fieldDisplayMap, - currentPosition, headerMaxLength, descriptionMaxLength, moveMode); + fieldScreenView.showFieldScreen(sortField.getHeader(), fields, fieldDisplayMap, currentPosition, + headerMaxLength, descriptionMaxLength, moveMode); fieldScreenView.refreshTerminal(); } @@ -132,7 +130,7 @@ public class FieldScreenPresenter { } public void pageDown() { - if (currentPosition < fields.size() - 1 && !moveMode) { + if (currentPosition < fields.size() - 1 && !moveMode) { int previousPosition = currentPosition; currentPosition = fields.size() - 1; showField(previousPosition); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenView.java index 16585014224..954786b476a 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; import org.apache.yetus.audience.InterfaceAudience; - /** * The screen where we can change the displayed fields, the sort key and the order of the fields. */ @@ -122,7 +121,7 @@ public class FieldScreenView extends AbstractScreenView { int descriptionMaxLength, boolean moveMode) { showScreenDescription(sortFieldHeader); - for (int i = 0; i < fields.size(); i ++) { + for (int i = 0; i < fields.size(); i++) { Field field = fields.get(i); showField(i, field, fieldDisplayMap.get(field), i == currentPosition, headerMaxLength, descriptionMaxLength, moveMode); @@ -143,8 +142,8 @@ public class FieldScreenView extends AbstractScreenView { int fieldHeaderMaxLength, int fieldDescriptionMaxLength, boolean moveMode) { String fieldHeader = String.format("%-" + fieldHeaderMaxLength + "s", field.getHeader()); - String fieldDescription = String.format("%-" + fieldDescriptionMaxLength + "s", - field.getDescription()); + String fieldDescription = + String.format("%-" + fieldDescriptionMaxLength + "s", field.getDescription()); int row = FIELD_START_ROW + pos; TerminalPrinter printer = getTerminalPrinter(row); @@ -157,8 +156,8 @@ public class FieldScreenView extends AbstractScreenView { printer.startBold(); } - printer.startHighlight() - .printFormat("%s = %s", fieldHeader, fieldDescription).stopHighlight(); + printer.startHighlight().printFormat("%s = %s", fieldHeader, fieldDescription) + .stopHighlight(); if (display) { printer.stopBold(); @@ -172,8 +171,8 @@ public class FieldScreenView extends AbstractScreenView { printer.startBold(); } - printer.startHighlight().print(fieldHeader).stopHighlight() - .printFormat(" = %s", fieldDescription); + printer.startHighlight().print(fieldHeader).stopHighlight().printFormat(" = %s", + fieldDescription); if (display) { printer.stopBold(); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/CommandDescription.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/CommandDescription.java index 5002ab8f6c1..218de676d4e 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/CommandDescription.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/CommandDescription.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +21,8 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Objects; - import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a description of a command that we can execute in the top screen. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenPresenter.java index f170fc57fde..9534796dfcc 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,35 +19,32 @@ package org.apache.hadoop.hbase.hbtop.screen.help; import java.util.Arrays; import java.util.Objects; - import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the help screen. */ @InterfaceAudience.Private public class HelpScreenPresenter { - private static final CommandDescription[] COMMAND_DESCRIPTIONS = new CommandDescription[] { - new CommandDescription("f", "Add/Remove/Order/Sort the fields"), - new CommandDescription("R", "Toggle the sort order (ascending/descending)"), - new CommandDescription("m", "Select mode"), - new CommandDescription("o", "Add a filter with ignoring case"), - new CommandDescription("O", "Add a filter with case sensitive"), - new CommandDescription("^o", "Show the current filters"), - new CommandDescription("=", "Clear the current filters"), - new CommandDescription("i", "Drill down"), - new CommandDescription( - Arrays.asList("up", "down", "left", "right", "pageUp", "pageDown", "home", "end"), - "Scroll the metrics"), - new CommandDescription("d", "Change the refresh delay"), - new CommandDescription("X", "Adjust the field length"), - new CommandDescription("", "Refresh the display"), - new CommandDescription("h", "Display this screen"), - new CommandDescription(Arrays.asList("q", ""), "Quit") - }; + private static final CommandDescription[] COMMAND_DESCRIPTIONS = + new CommandDescription[] { new CommandDescription("f", "Add/Remove/Order/Sort the fields"), + new CommandDescription("R", "Toggle the sort order (ascending/descending)"), + new CommandDescription("m", "Select mode"), + new CommandDescription("o", "Add a filter with ignoring case"), + new CommandDescription("O", "Add a filter with case sensitive"), + new CommandDescription("^o", "Show the current filters"), + new CommandDescription("=", "Clear the current filters"), + new CommandDescription("i", "Drill down"), + new CommandDescription( + Arrays.asList("up", "down", "left", "right", "pageUp", "pageDown", "home", "end"), + "Scroll the metrics"), + new CommandDescription("d", "Change the refresh delay"), + new CommandDescription("X", "Adjust the field length"), + new CommandDescription("", "Refresh the display"), + new CommandDescription("h", "Display this screen"), + new CommandDescription(Arrays.asList("q", ""), "Quit") }; private final HelpScreenView helpScreenView; private final long refreshDelay; diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenView.java index ccdc15737d1..fc4f75dd966 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; import org.apache.yetus.audience.InterfaceAudience; - /** * The help screen. */ @@ -68,8 +67,8 @@ public class HelpScreenView extends AbstractScreenView { private void showScreenDescription(long refreshDelay) { TerminalPrinter printer = getTerminalPrinter(SCREEN_DESCRIPTION_START_ROW); printer.startBold().print("Help for Interactive Commands").stopBold().endOfLine(); - printer.print("Refresh delay: ").startBold() - .print((double) refreshDelay / 1000).stopBold().endOfLine(); + printer.print("Refresh delay: ").startBold().print((double) refreshDelay / 1000).stopBold() + .endOfLine(); } private void showCommandDescription(TerminalPrinter terminalPrinter, diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenPresenter.java index 8cd9879b0ed..07f9dc7ee43 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.hbtop.mode.Mode; import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the mode screen. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenView.java index bda9853028b..5aa404ef2ef 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; import org.apache.yetus.audience.InterfaceAudience; - /** * The screen where we can choose the {@link Mode} in the top screen. */ @@ -43,8 +42,8 @@ public class ModeScreenView extends AbstractScreenView { public ModeScreenView(Screen screen, Terminal terminal, Mode currentMode, Consumer resultListener, ScreenView nextScreenView) { super(screen, terminal); - this.modeScreenPresenter = new ModeScreenPresenter(this, currentMode, resultListener, - nextScreenView); + this.modeScreenPresenter = + new ModeScreenPresenter(this, currentMode, resultListener, nextScreenView); } @Override @@ -106,16 +105,16 @@ public class ModeScreenView extends AbstractScreenView { showScreenDescription(currentMode); for (int i = 0; i < modes.size(); i++) { - showMode(i, modes.get(i), i == currentPosition, - modeHeaderMaxLength, modeDescriptionMaxLength); + showMode(i, modes.get(i), i == currentPosition, modeHeaderMaxLength, + modeDescriptionMaxLength); } } private void showScreenDescription(Mode currentMode) { TerminalPrinter printer = getTerminalPrinter(SCREEN_DESCRIPTION_START_ROW); printer.startBold().print("Mode Management").stopBold().endOfLine(); - printer.print("Current mode: ") - .startBold().print(currentMode.getHeader()).stopBold().endOfLine(); + printer.print("Current mode: ").startBold().print(currentMode.getHeader()).stopBold() + .endOfLine(); printer.print("Select mode followed by ").endOfLine(); } @@ -123,8 +122,8 @@ public class ModeScreenView extends AbstractScreenView { int modeDescriptionMaxLength) { String modeHeader = String.format("%-" + modeHeaderMaxLength + "s", mode.getHeader()); - String modeDescription = String.format("%-" + modeDescriptionMaxLength + "s", - mode.getDescription()); + String modeDescription = + String.format("%-" + modeDescriptionMaxLength + "s", mode.getDescription()); int row = MODE_START_ROW + pos; TerminalPrinter printer = getTerminalPrinter(row); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenPresenter.java index 6c6bf1c1b21..b123deff243 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.hbtop.RecordFilter; import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the filter display mode. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenView.java index e85a4b7df42..86585ad9928 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,11 +27,8 @@ import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.yetus.audience.InterfaceAudience; - /** - * The filter display mode in the top screen. - * - * Exit if Enter key is pressed. + * The filter display mode in the top screen. Exit if Enter key is pressed. */ @InterfaceAudience.Private public class FilterDisplayModeScreenView extends AbstractScreenView { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Header.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Header.java index df672e9695d..98a059faacc 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Header.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Header.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.Objects; import org.apache.hadoop.hbase.hbtop.field.Field; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents headers for the metrics in the top screen. */ @@ -36,7 +35,7 @@ public class Header { } public String format() { - return "%" + (field.isLeftJustify() ? "-" : "") + length + "s"; + return "%" + (field.isLeftJustify() ? "-" : "") + length + "s"; } public Field getField() { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenPresenter.java index 8ab858b995f..e79c50f845f 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.util.function.Function; import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the input mode. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenView.java index ab64a8ade22..7c5cecc8f4a 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.yetus.audience.InterfaceAudience; - /** * The input mode in the top screen. */ @@ -40,8 +39,8 @@ public class InputModeScreenView extends AbstractScreenView { List histories, Function resultListener) { super(screen, terminal); this.row = row; - this.inputModeScreenPresenter = new InputModeScreenPresenter(this, message, histories, - resultListener); + this.inputModeScreenPresenter = + new InputModeScreenPresenter(this, message, histories, resultListener); } @Override diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenPresenter.java index 174a15a4843..8a91891e2c6 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,11 +21,8 @@ import java.util.Objects; import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** - * The presentation logic for the message mode. - * - * Exit after 2 seconds or if any key is pressed. + * The presentation logic for the message mode. Exit after 2 seconds or if any key is pressed. */ @InterfaceAudience.Private public class MessageModeScreenPresenter { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenView.java index 0dfa388fad0..8e8dc35af6a 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.yetus.audience.InterfaceAudience; - /** * The message mode in the top screen. */ @@ -38,8 +37,7 @@ public class MessageModeScreenView extends AbstractScreenView { ScreenView nextScreenView) { super(screen, terminal); this.row = row; - this.messageModeScreenPresenter = - new MessageModeScreenPresenter(this, message, nextScreenView); + this.messageModeScreenPresenter = new MessageModeScreenPresenter(this, message, nextScreenView); } @Override diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Paging.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Paging.java index b95e6f480e6..4f93dda8ec5 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Paging.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Paging.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.hbtop.screen.top; import org.apache.yetus.audience.InterfaceAudience; - /** * Utility class for paging for the metrics. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Summary.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Summary.java index 03598f66fb4..635fe07a601 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Summary.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Summary.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.hbtop.screen.top; import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents the summary of the metrics. */ @@ -37,8 +36,8 @@ public class Summary { private final double averageLoad; private final long aggregateRequestPerSecond; - public Summary(String currentTime, String version, String clusterId, int servers, - int liveServers, int deadServers, int regionCount, int ritCount, double averageLoad, + public Summary(String currentTime, String version, String clusterId, int servers, int liveServers, + int deadServers, int regionCount, int ritCount, double averageLoad, long aggregateRequestPerSecond) { this.currentTime = Objects.requireNonNull(currentTime); this.version = Objects.requireNonNull(version); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java index 9cbcd18e885..aca2d0f3a8f 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * The data and business logic for the top screen. */ @@ -87,12 +86,11 @@ public class TopScreenModel { if (initialFields != null) { List tmp = new ArrayList<>(initialFields); tmp.addAll(currentMode.getFieldInfos().stream().map(FieldInfo::getField) - .filter(f -> !initialFields.contains(f)) - .collect(Collectors.toList())); + .filter(f -> !initialFields.contains(f)).collect(Collectors.toList())); fields = Collections.unmodifiableList(tmp); } else { - fields = Collections.unmodifiableList(currentMode.getFieldInfos().stream() - .map(FieldInfo::getField).collect(Collectors.toList())); + fields = Collections.unmodifiableList( + currentMode.getFieldInfos().stream().map(FieldInfo::getField).collect(Collectors.toList())); } if (keepSortFieldAndSortOrderIfPossible) { @@ -146,8 +144,7 @@ public class TopScreenModel { } private void refreshSummary(ClusterMetrics clusterMetrics) { - String currentTime = ISO_8601_EXTENDED_TIME_FORMAT - .format(EnvironmentEdgeManager.currentTime()); + String currentTime = ISO_8601_EXTENDED_TIME_FORMAT.format(EnvironmentEdgeManager.currentTime()); String version = clusterMetrics.getHBaseVersion(); String clusterId = clusterMetrics.getClusterId(); int liveServers = clusterMetrics.getLiveServerMetrics().size(); @@ -158,16 +155,15 @@ public class TopScreenModel { long aggregateRequestPerSecond = clusterMetrics.getLiveServerMetrics().entrySet().stream() .mapToLong(e -> e.getValue().getRequestCountPerSecond()).sum(); - summary = new Summary(currentTime, version, clusterId, liveServers + deadServers, - liveServers, deadServers, regionCount, ritCount, averageLoad, aggregateRequestPerSecond); + summary = new Summary(currentTime, version, clusterId, liveServers + deadServers, liveServers, + deadServers, regionCount, ritCount, averageLoad, aggregateRequestPerSecond); } private void refreshRecords(ClusterMetrics clusterMetrics) { List records = currentMode.getRecords(clusterMetrics, pushDownFilters); // Filter and sort - records = records.stream() - .filter(r -> filters.stream().allMatch(f -> f.execute(r))) + records = records.stream().filter(r -> filters.stream().allMatch(f -> f.execute(r))) .sorted((recordLeft, recordRight) -> { FieldValue left = recordLeft.get(currentSortField); FieldValue right = recordRight.get(currentSortField); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java index e4e3caee594..9912e35fd23 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the top screen. */ @@ -132,8 +131,7 @@ public class TopScreenPresenter { for (Field f : topScreenModel.getFields()) { if (f.isAutoAdjust()) { int maxLength = topScreenModel.getRecords().stream() - .map(r -> r.get(f).asString().length()) - .max(Integer::compareTo).orElse(0); + .map(r -> r.get(f).asString().length()).max(Integer::compareTo).orElse(0); fieldLengthMap.put(f, Math.max(maxLength, f.getHeader().length())); } } @@ -142,8 +140,7 @@ public class TopScreenPresenter { private List

      getDisplayedHeaders() { List displayFields = - topScreenModel.getFields().stream() - .filter(fieldDisplayMap::get).collect(Collectors.toList()); + topScreenModel.getFields().stream().filter(fieldDisplayMap::get).collect(Collectors.toList()); if (displayFields.isEmpty()) { horizontalScroll = 0; @@ -231,8 +228,7 @@ public class TopScreenPresenter { } private int getHeaderSize() { - return (int) topScreenModel.getFields().stream() - .filter(fieldDisplayMap::get).count(); + return (int) topScreenModel.getFields().stream().filter(fieldDisplayMap::get).count(); } public void switchSortOrder() { @@ -250,10 +246,8 @@ public class TopScreenPresenter { } public ScreenView transitionToFieldScreen(Screen screen, Terminal terminal) { - return new FieldScreenView(screen, terminal, - topScreenModel.getCurrentSortField(), topScreenModel.getFields(), - fieldDisplayMap, - (sortField, fields, fieldDisplayMap) -> { + return new FieldScreenView(screen, terminal, topScreenModel.getCurrentSortField(), + topScreenModel.getFields(), fieldDisplayMap, (sortField, fields, fieldDisplayMap) -> { topScreenModel.setSortFieldAndFields(sortField, fields); this.fieldDisplayMap.clear(); this.fieldDisplayMap.putAll(fieldDisplayMap); @@ -324,10 +318,9 @@ public class TopScreenPresenter { public ScreenView goToInputModeForFilter(Screen screen, Terminal terminal, int row, boolean ignoreCase) { return new InputModeScreenView(screen, terminal, row, - "add filter #" + (topScreenModel.getFilters().size() + 1) + - " (" + (ignoreCase ? "ignoring case" : "case sensitive") + ") as: [!]FLD?VAL", - topScreenModel.getFilterHistories(), - (inputString) -> { + "add filter #" + (topScreenModel.getFilters().size() + 1) + " (" + + (ignoreCase ? "ignoring case" : "case sensitive") + ") as: [!]FLD?VAL", + topScreenModel.getFilterHistories(), (inputString) -> { if (inputString.isEmpty()) { return topScreenView; } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenView.java index da5c88360d1..467201fcc55 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,12 +35,10 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize; import org.apache.yetus.audience.InterfaceAudience; - /** - * The screen that provides a dynamic real-time view for the HBase metrics. - * - * This shows the metric {@link Summary} and the metric {@link Record}s. The summary and the - * metrics are updated periodically (3 seconds by default). + * The screen that provides a dynamic real-time view for the HBase metrics. This shows the metric + * {@link Summary} and the metric {@link Record}s. The summary and the metrics are updated + * periodically (3 seconds by default). */ @InterfaceAudience.Private public class TopScreenView extends AbstractScreenView { @@ -59,9 +57,11 @@ public class TopScreenView extends AbstractScreenView { @Nullable Boolean initialAscendingSort, @Nullable List initialFilters, long numberOfIterations) { super(screen, terminal); - this.topScreenPresenter = new TopScreenPresenter(this, initialRefreshDelay, - new TopScreenModel(admin, initialMode, initialFields, initialSortField, - initialAscendingSort, initialFilters), initialFields, numberOfIterations); + this.topScreenPresenter = + new TopScreenPresenter( + this, initialRefreshDelay, new TopScreenModel(admin, initialMode, initialFields, + initialSortField, initialAscendingSort, initialFilters), + initialFields, numberOfIterations); } @Override @@ -235,23 +235,17 @@ public class TopScreenView extends AbstractScreenView { printer.print(String.format("HBase hbtop - %s", summary.getCurrentTime())).endOfLine(); printer.print(String.format("Version: %s", summary.getVersion())).endOfLine(); printer.print(String.format("Cluster ID: %s", summary.getClusterId())).endOfLine(); - printer.print("RegionServer(s): ") - .startBold().print(Integer.toString(summary.getServers())).stopBold() - .print(" total, ") - .startBold().print(Integer.toString(summary.getLiveServers())).stopBold() - .print(" live, ") - .startBold().print(Integer.toString(summary.getDeadServers())).stopBold() - .print(" dead").endOfLine(); - printer.print("RegionCount: ") - .startBold().print(Integer.toString(summary.getRegionCount())).stopBold() - .print(" total, ") - .startBold().print(Integer.toString(summary.getRitCount())).stopBold() - .print(" rit").endOfLine(); - printer.print("Average Cluster Load: ") - .startBold().print(String.format("%.2f", summary.getAverageLoad())).stopBold().endOfLine(); - printer.print("Aggregate Request/s: ") - .startBold().print(Long.toString(summary.getAggregateRequestPerSecond())).stopBold() - .endOfLine(); + printer.print("RegionServer(s): ").startBold().print(Integer.toString(summary.getServers())) + .stopBold().print(" total, ").startBold().print(Integer.toString(summary.getLiveServers())) + .stopBold().print(" live, ").startBold().print(Integer.toString(summary.getDeadServers())) + .stopBold().print(" dead").endOfLine(); + printer.print("RegionCount: ").startBold().print(Integer.toString(summary.getRegionCount())) + .stopBold().print(" total, ").startBold().print(Integer.toString(summary.getRitCount())) + .stopBold().print(" rit").endOfLine(); + printer.print("Average Cluster Load: ").startBold() + .print(String.format("%.2f", summary.getAverageLoad())).stopBold().endOfLine(); + printer.print("Aggregate Request/s: ").startBold() + .print(Long.toString(summary.getAggregateRequestPerSecond())).stopBold().endOfLine(); } private void showRecords(List
      headers, List records, Record selectedRecord) { @@ -264,7 +258,7 @@ public class TopScreenView extends AbstractScreenView { } List buf = new ArrayList<>(headers.size()); for (int i = 0; i < size; i++) { - if(i < records.size()) { + if (i < records.size()) { Record record = records.get(i); buf.clear(); for (Header header : headers) { @@ -293,8 +287,7 @@ public class TopScreenView extends AbstractScreenView { } private void showHeaders(List
      headers) { - String header = headers.stream() - .map(h -> String.format(h.format(), h.getField().getHeader())) + String header = headers.stream().map(h -> String.format(h.format(), h.getField().getHeader())) .collect(Collectors.joining(" ")); if (!header.isEmpty()) { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Attributes.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Attributes.java index 9322aaa8157..331128ba2d3 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Attributes.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Attributes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.hbtop.terminal; import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * The attributes of text in the terminal. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Color.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Color.java index 843a315ab71..8747de0c0cc 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Color.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Color.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +19,17 @@ package org.apache.hadoop.hbase.hbtop.terminal; import org.apache.yetus.audience.InterfaceAudience; - /** * Terminal color definitions. */ @InterfaceAudience.Private public enum Color { - BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE + BLACK, + RED, + GREEN, + YELLOW, + BLUE, + MAGENTA, + CYAN, + WHITE } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/CursorPosition.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/CursorPosition.java index 775ff3d72e6..11da1b58c6e 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/CursorPosition.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/CursorPosition.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.hbtop.terminal; import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * A 2-d position in 'terminal space'. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/KeyPress.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/KeyPress.java index d0be00c5868..d85b3f05d3c 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/KeyPress.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/KeyPress.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import edu.umd.cs.findbugs.annotations.Nullable; import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents the user pressing a key on the keyboard. */ @@ -97,13 +96,8 @@ public class KeyPress { @Override public String toString() { - return "KeyPress{" + - "type=" + type + - ", character=" + escape(character) + - ", alt=" + alt + - ", ctrl=" + ctrl + - ", shift=" + shift + - '}'; + return "KeyPress{" + "type=" + type + ", character=" + escape(character) + ", alt=" + alt + + ", ctrl=" + ctrl + ", shift=" + shift + '}'; } private String escape(Character character) { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Terminal.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Terminal.java index c834b7515c2..f34cfc298c6 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Terminal.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Terminal.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,19 +21,29 @@ import edu.umd.cs.findbugs.annotations.Nullable; import java.io.Closeable; import org.apache.yetus.audience.InterfaceAudience; - /** * The terminal interface that is an abstraction of terminal screen. */ @InterfaceAudience.Private public interface Terminal extends Closeable { void clear(); + void refresh(); - @Nullable TerminalSize getSize(); - @Nullable TerminalSize doResizeIfNecessary(); - @Nullable KeyPress pollKeyPress(); + + @Nullable + TerminalSize getSize(); + + @Nullable + TerminalSize doResizeIfNecessary(); + + @Nullable + KeyPress pollKeyPress(); + CursorPosition getCursorPosition(); + void setCursorPosition(int column, int row); + void hideCursor(); + TerminalPrinter getTerminalPrinter(int startRow); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalPrinter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalPrinter.java index 66fb55875b0..52818e42a7d 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalPrinter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.hbtop.terminal; import org.apache.yetus.audience.InterfaceAudience; - /** * The interface responsible for printing to the terminal. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalSize.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalSize.java index f7e55dde7b5..7aea3dac115 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalSize.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalSize.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.hbtop.terminal; import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * Terminal dimensions in 2-d space, measured in number of rows and columns. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/Cell.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/Cell.java index de61477ce33..6cd9475c6d0 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/Cell.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/Cell.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.Attributes; import org.apache.hadoop.hbase.hbtop.terminal.Color; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a single text cell of the terminal. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/EscapeSequences.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/EscapeSequences.java index 52f8e374364..28cc52e4d8a 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/EscapeSequences.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/EscapeSequences.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.hbtop.terminal.impl; import org.apache.hadoop.hbase.hbtop.terminal.Color; import org.apache.yetus.audience.InterfaceAudience; - /** * Utility class for escape sequences. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/KeyPressGenerator.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/KeyPressGenerator.java index a20222c3eb5..15cbb3070c3 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/KeyPressGenerator.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/KeyPressGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,10 +37,9 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; - /** - * This generates {@link KeyPress} objects from the given input stream and offers them to the - * given queue. + * This generates {@link KeyPress} objects from the given input stream and offers them to the given + * queue. */ @InterfaceAudience.Private public class KeyPressGenerator { @@ -48,7 +47,10 @@ public class KeyPressGenerator { private static final Logger LOGGER = LoggerFactory.getLogger(KeyPressGenerator.class); private enum ParseState { - START, ESCAPE, ESCAPE_SEQUENCE_PARAM1, ESCAPE_SEQUENCE_PARAM2 + START, + ESCAPE, + ESCAPE_SEQUENCE_PARAM1, + ESCAPE_SEQUENCE_PARAM2 } private final Queue keyPressQueue; @@ -67,9 +69,9 @@ public class KeyPressGenerator { input = new InputStreamReader(inputStream, StandardCharsets.UTF_8); this.keyPressQueue = keyPressQueue; - executorService = Executors.newFixedThreadPool(2, new ThreadFactoryBuilder() - .setNameFormat("KeyPressGenerator-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + executorService = Executors.newFixedThreadPool(2, + new ThreadFactoryBuilder().setNameFormat("KeyPressGenerator-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); initState(); } @@ -469,8 +471,10 @@ public class KeyPressGenerator { private void offer(KeyPress keyPress) { // Handle ctrl + c - if (keyPress.isCtrl() && keyPress.getType() == KeyPress.Type.Character && - keyPress.getCharacter() == 'c') { + if ( + keyPress.isCtrl() && keyPress.getType() == KeyPress.Type.Character + && keyPress.getCharacter() == 'c' + ) { System.exit(0); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/ScreenBuffer.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/ScreenBuffer.java index 8752c5fe689..887851f36bb 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/ScreenBuffer.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/ScreenBuffer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.Attributes; import org.apache.hadoop.hbase.hbtop.terminal.CursorPosition; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a buffer of the terminal screen for double-buffering. */ @@ -78,8 +77,10 @@ public class ScreenBuffer { flushRow(row, sb, attributes); } - if (cursorVisible && cursorRow >= 0 && cursorColumn >= 0 && cursorRow < rows && - cursorColumn < columns) { + if ( + cursorVisible && cursorRow >= 0 && cursorColumn >= 0 && cursorRow < rows + && cursorColumn < columns + ) { sb.append(cursor(true)); sb.append(moveCursor(cursorColumn, cursorRow)); } else { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java index c6b74afcbfa..e579b0ff057 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,10 +43,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * An implementation of the {@link Terminal} interface for normal display mode. - * - * This implementation produces output intended for human viewing. In particular, it only displays - * one screenful of data. The output contains some escape sequences for formatting. + * An implementation of the {@link Terminal} interface for normal display mode. This implementation + * produces output intended for human viewing. In particular, it only displays one screenful of + * data. The output contains some escape sequences for formatting. */ @InterfaceAudience.Private public class TerminalImpl implements Terminal { @@ -181,8 +180,8 @@ public class TerminalImpl implements Terminal { } private void sttyRaw() { - doStty("-ignbrk -brkint -parmrk -istrip -inlcr -igncr -icrnl -ixon -opost " + - "-echo -echonl -icanon -isig -iexten -parenb cs8 min 1"); + doStty("-ignbrk -brkint -parmrk -istrip -inlcr -igncr -icrnl -ixon -opost " + + "-echo -echonl -icanon -isig -iexten -parenb cs8 min 1"); } private void sttyCooked() { @@ -190,7 +189,7 @@ public class TerminalImpl implements Terminal { } private String doStty(String sttyOptionsString) { - String [] cmd = {"/bin/sh", "-c", "stty " + sttyOptionsString + " < /dev/tty"}; + String[] cmd = { "/bin/sh", "-c", "stty " + sttyOptionsString + " < /dev/tty" }; try { Process process = Runtime.getRuntime().exec(cmd); @@ -198,14 +197,14 @@ public class TerminalImpl implements Terminal { String ret; // stdout - try (BufferedReader stdout = new BufferedReader(new InputStreamReader( - process.getInputStream(), StandardCharsets.UTF_8))) { + try (BufferedReader stdout = new BufferedReader( + new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { ret = stdout.readLine(); } // stderr - try (BufferedReader stderr = new BufferedReader(new InputStreamReader( - process.getErrorStream(), StandardCharsets.UTF_8))) { + try (BufferedReader stderr = new BufferedReader( + new InputStreamReader(process.getErrorStream(), StandardCharsets.UTF_8))) { String line = stderr.readLine(); if ((line != null) && (line.length() > 0)) { LOGGER.error("Error output from stty: " + line); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java index 788d2679958..05e0b561153 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java index 60f550289e2..ba7a5de40a5 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,11 +25,9 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize; /** - * An implementation of the {@link Terminal} interface for batch mode. - * - * This implementation produces output that's more sensible for collecting to a log file or for - * parsing. There is no limit on the number of output lines, and the output doesn't contain any - * escape sequences for formatting. + * An implementation of the {@link Terminal} interface for batch mode. This implementation produces + * output that's more sensible for collecting to a log file or for parsing. There is no limit on the + * number of output lines, and the output doesn't contain any escape sequences for formatting. */ public class BatchTerminal implements Terminal { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java index 60316669daa..ed216a16492 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java index 339cc40847d..3790af32ed6 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,20 +28,17 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRecord { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRecord.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRecord.class); @Test public void testBuilder() { - Record actual1 = Record.builder().put(Field.TABLE, "tableName") - .put(entry(Field.REGION_COUNT, 3)) - .put(Field.REQUEST_COUNT_PER_SECOND, Field.REQUEST_COUNT_PER_SECOND.newValue(100L)) - .build(); + Record actual1 = + Record.builder().put(Field.TABLE, "tableName").put(entry(Field.REGION_COUNT, 3)) + .put(Field.REQUEST_COUNT_PER_SECOND, Field.REQUEST_COUNT_PER_SECOND.newValue(100L)).build(); assertThat(actual1.size(), is(3)); assertThat(actual1.get(Field.TABLE).asString(), is("tableName")); @@ -58,11 +55,8 @@ public class TestRecord { @Test public void testOfEntries() { - Record actual = Record.ofEntries( - entry(Field.TABLE, "tableName"), - entry(Field.REGION_COUNT, 3), - entry(Field.REQUEST_COUNT_PER_SECOND, 100L) - ); + Record actual = Record.ofEntries(entry(Field.TABLE, "tableName"), entry(Field.REGION_COUNT, 3), + entry(Field.REQUEST_COUNT_PER_SECOND, 100L)); assertThat(actual.size(), is(3)); assertThat(actual.get(Field.TABLE).asString(), is("tableName")); @@ -72,17 +66,11 @@ public class TestRecord { @Test public void testCombine() { - Record record1 = Record.ofEntries( - entry(Field.TABLE, "tableName"), - entry(Field.REGION_COUNT, 3), - entry(Field.REQUEST_COUNT_PER_SECOND, 100L) - ); + Record record1 = Record.ofEntries(entry(Field.TABLE, "tableName"), entry(Field.REGION_COUNT, 3), + entry(Field.REQUEST_COUNT_PER_SECOND, 100L)); - Record record2 = Record.ofEntries( - entry(Field.TABLE, "tableName"), - entry(Field.REGION_COUNT, 5), - entry(Field.REQUEST_COUNT_PER_SECOND, 500L) - ); + Record record2 = Record.ofEntries(entry(Field.TABLE, "tableName"), entry(Field.REGION_COUNT, 5), + entry(Field.REQUEST_COUNT_PER_SECOND, 500L)); Record actual = record1.combine(record2); diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java index 2807fd8ef61..155b7942a66 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,6 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRecordFilter { @@ -49,8 +48,7 @@ public class TestRecordFilter { testParseAndBuilder("REGION=region1", false, RecordFilter.newBuilder(Field.REGION).equal("region1")); - testParseAndBuilder("REGION=", false, - RecordFilter.newBuilder(Field.REGION).equal("")); + testParseAndBuilder("REGION=", false, RecordFilter.newBuilder(Field.REGION).equal("")); testParseAndBuilder("!REGION=region1", false, RecordFilter.newBuilder(Field.REGION).notEqual("region1")); @@ -132,8 +130,8 @@ public class TestRecordFilter { public void testFilters() { List records = createTestRecords(); - testFilter(records, "REGION=region", false, - "region1", "region2", "region3", "region4", "region5"); + testFilter(records, "REGION=region", false, "region1", "region2", "region3", "region4", + "region5"); testFilter(records, "!REGION=region", false); testFilter(records, "REGION=Region", false); @@ -148,8 +146,7 @@ public class TestRecordFilter { testFilter(records, "LOCALITY<0.5", false, "region5"); testFilter(records, "%COMP<=50%", false, "region2", "region3", "region4", "region5"); - testFilters(records, Arrays.asList("SF>=100MB", "#REQ/S>100"), false, - "region2", "region5"); + testFilters(records, Arrays.asList("SF>=100MB", "#REQ/S>100"), false, "region2", "region5"); testFilters(records, Arrays.asList("%COMP<=50%", "!#SF>=10"), false, "region4"); testFilters(records, Arrays.asList("!REGION==region1", "LOCALITY<0.5", "#REQ/S>100"), false, "region5"); @@ -159,10 +156,10 @@ public class TestRecordFilter { public void testFiltersIgnoreCase() { List records = createTestRecords(); - testFilter(records, "REGION=Region", true, - "region1", "region2", "region3", "region4", "region5"); - testFilter(records, "REGION=REGION", true, - "region1", "region2", "region3", "region4", "region5"); + testFilter(records, "REGION=Region", true, "region1", "region2", "region3", "region4", + "region5"); + testFilter(records, "REGION=REGION", true, "region1", "region2", "region3", "region4", + "region5"); } private List createTestRecords() { @@ -175,8 +172,8 @@ public class TestRecordFilter { return ret; } - private Record createTestRecord(String region, long requestCountPerSecond, - Size storeFileSize, int numStoreFiles, float locality, float compactionProgress) { + private Record createTestRecord(String region, long requestCountPerSecond, Size storeFileSize, + int numStoreFiles, float locality, float compactionProgress) { Record.Builder builder = Record.builder(); builder.put(Field.REGION, region); builder.put(Field.REQUEST_COUNT_PER_SECOND, requestCountPerSecond); @@ -194,12 +191,10 @@ public class TestRecordFilter { private void testFilters(List records, List filterStrings, boolean ignoreCase, String... expectedRegions) { - List actual = - records.stream().filter(r -> filterStrings.stream() - .map(f -> RecordFilter.parse(f, ignoreCase)) + List actual = records.stream() + .filter(r -> filterStrings.stream().map(f -> RecordFilter.parse(f, ignoreCase)) .allMatch(f -> f.execute(r))) - .map(r -> r.get(Field.REGION).asString()) - .collect(Collectors.toList()); + .map(r -> r.get(Field.REGION).asString()).collect(Collectors.toList()); assertThat(actual, hasItems(expectedRegions)); assertThat(actual.size(), is(expectedRegions.length)); } diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java index c633e37825e..0f6a02a27c1 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.hbtop.screen.top.Summary; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.util.Bytes; - public final class TestUtils { private TestUtils() { @@ -57,81 +56,62 @@ public final class TestUtils { // host1 List regionMetricsList = new ArrayList<>(); List userMetricsList = new ArrayList<>(); - userMetricsList.add(createUserMetrics("FOO",1,2, 4)); - userMetricsList.add(createUserMetrics("BAR",2,3, 3)); - regionMetricsList.add(createRegionMetrics( - "table1,,1.00000000000000000000000000000000.", - 100, 50, 100, - new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, + userMetricsList.add(createUserMetrics("FOO", 1, 2, 4)); + userMetricsList.add(createUserMetrics("BAR", 2, 3, 3)); + regionMetricsList.add(createRegionMetrics("table1,,1.00000000000000000000000000000000.", 100, + 50, 100, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, new Size(100, Size.Unit.MEGABYTE), 0.1f, 100, 100, "2019-07-22 00:00:00")); - regionMetricsList.add(createRegionMetrics( - "table2,1,2.00000000000000000000000000000001.", - 200, 100, 200, - new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, + regionMetricsList.add(createRegionMetrics("table2,1,2.00000000000000000000000000000001.", 200, + 100, 200, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, new Size(200, Size.Unit.MEGABYTE), 0.2f, 50, 200, "2019-07-22 00:00:01")); - regionMetricsList.add(createRegionMetrics( - "namespace:table3,,3_0001.00000000000000000000000000000002.", - 300, 150, 300, - new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, - new Size(300, Size.Unit.MEGABYTE), 0.3f, 100, 300, "2019-07-22 00:00:02")); + regionMetricsList + .add(createRegionMetrics("namespace:table3,,3_0001.00000000000000000000000000000002.", 300, + 150, 300, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, + new Size(300, Size.Unit.MEGABYTE), 0.3f, 100, 300, "2019-07-22 00:00:02")); ServerName host1 = ServerName.valueOf("host1.apache.com", 1000, 1); - serverMetricsMap.put(host1, createServerMetrics(host1, 100, - new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 100, - regionMetricsList, userMetricsList)); + serverMetricsMap.put(host1, createServerMetrics(host1, 100, new Size(100, Size.Unit.MEGABYTE), + new Size(200, Size.Unit.MEGABYTE), 100, regionMetricsList, userMetricsList)); // host2 regionMetricsList.clear(); userMetricsList.clear(); - userMetricsList.add(createUserMetrics("FOO",5,7, 3)); - userMetricsList.add(createUserMetrics("BAR",4,8, 4)); - regionMetricsList.add(createRegionMetrics( - "table1,1,4.00000000000000000000000000000003.", - 100, 50, 100, - new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, + userMetricsList.add(createUserMetrics("FOO", 5, 7, 3)); + userMetricsList.add(createUserMetrics("BAR", 4, 8, 4)); + regionMetricsList.add(createRegionMetrics("table1,1,4.00000000000000000000000000000003.", 100, + 50, 100, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, new Size(100, Size.Unit.MEGABYTE), 0.4f, 50, 100, "2019-07-22 00:00:03")); - regionMetricsList.add(createRegionMetrics( - "table2,,5.00000000000000000000000000000004.", - 200, 100, 200, - new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, + regionMetricsList.add(createRegionMetrics("table2,,5.00000000000000000000000000000004.", 200, + 100, 200, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, new Size(200, Size.Unit.MEGABYTE), 0.5f, 150, 200, "2019-07-22 00:00:04")); - regionMetricsList.add(createRegionMetrics( - "namespace:table3,,6.00000000000000000000000000000005.", - 300, 150, 300, - new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, - new Size(300, Size.Unit.MEGABYTE), 0.6f, 200, 300, "2019-07-22 00:00:05")); + regionMetricsList + .add(createRegionMetrics("namespace:table3,,6.00000000000000000000000000000005.", 300, 150, + 300, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, + new Size(300, Size.Unit.MEGABYTE), 0.6f, 200, 300, "2019-07-22 00:00:05")); ServerName host2 = ServerName.valueOf("host2.apache.com", 1001, 2); - serverMetricsMap.put(host2, createServerMetrics(host2, 200, - new Size(16, Size.Unit.GIGABYTE), new Size(32, Size.Unit.GIGABYTE), 200, - regionMetricsList, userMetricsList)); + serverMetricsMap.put(host2, createServerMetrics(host2, 200, new Size(16, Size.Unit.GIGABYTE), + new Size(32, Size.Unit.GIGABYTE), 200, regionMetricsList, userMetricsList)); ServerName host3 = ServerName.valueOf("host3.apache.com", 1002, 3); - return ClusterMetricsBuilder.newBuilder() - .setHBaseVersion("3.0.0-SNAPSHOT") - .setClusterId("01234567-89ab-cdef-0123-456789abcdef") - .setLiveServerMetrics(serverMetricsMap) + return ClusterMetricsBuilder.newBuilder().setHBaseVersion("3.0.0-SNAPSHOT") + .setClusterId("01234567-89ab-cdef-0123-456789abcdef").setLiveServerMetrics(serverMetricsMap) .setDeadServerNames(Collections.singletonList(host3)) - .setRegionsInTransition(Collections.singletonList( - new RegionState(RegionInfoBuilder.newBuilder(TableName.valueOf("table4")) - .setStartKey(new byte [0]) - .setEndKey(new byte [0]) - .setOffline(true) - .setReplicaId(0) - .setRegionId(0) - .setSplit(false) - .build(), - RegionState.State.OFFLINE, host3))) + .setRegionsInTransition(Collections + .singletonList(new RegionState(RegionInfoBuilder.newBuilder(TableName.valueOf("table4")) + .setStartKey(new byte[0]).setEndKey(new byte[0]).setOffline(true).setReplicaId(0) + .setRegionId(0).setSplit(false).build(), RegionState.State.OFFLINE, host3))) .build(); } private static UserMetrics createUserMetrics(String user, long readRequestCount, - long writeRequestCount, long filteredReadRequestsCount) { - return UserMetricsBuilder.newBuilder(Bytes.toBytes(user)).addClientMetris( - new UserMetricsBuilder.ClientMetricsImpl("CLIENT_A_" + user, readRequestCount, - writeRequestCount, filteredReadRequestsCount)).addClientMetris( - new UserMetricsBuilder.ClientMetricsImpl("CLIENT_B_" + user, readRequestCount, - writeRequestCount, filteredReadRequestsCount)).build(); + long writeRequestCount, long filteredReadRequestsCount) { + return UserMetricsBuilder.newBuilder(Bytes.toBytes(user)) + .addClientMetris(new UserMetricsBuilder.ClientMetricsImpl("CLIENT_A_" + user, + readRequestCount, writeRequestCount, filteredReadRequestsCount)) + .addClientMetris(new UserMetricsBuilder.ClientMetricsImpl("CLIENT_B_" + user, + readRequestCount, writeRequestCount, filteredReadRequestsCount)) + .build(); } private static RegionMetrics createRegionMetrics(String regionName, long readRequestCount, @@ -142,8 +122,7 @@ public final class TestUtils { FastDateFormat df = FastDateFormat.getInstance("yyyy-MM-dd HH:mm:ss"); try { return RegionMetricsBuilder.newBuilder(Bytes.toBytes(regionName)) - .setReadRequestCount(readRequestCount) - .setFilteredReadRequestCount(filteredReadRequestCount) + .setReadRequestCount(readRequestCount).setFilteredReadRequestCount(filteredReadRequestCount) .setWriteRequestCount(writeRequestCount).setStoreFileSize(storeFileSize) .setUncompressedStoreFileSize(uncompressedStoreFileSize).setStoreFileCount(storeFileCount) .setMemStoreSize(memStoreSize).setDataLocality(locality) @@ -158,12 +137,9 @@ public final class TestUtils { Size usedHeapSize, Size maxHeapSize, long requestCountPerSecond, List regionMetricsList, List userMetricsList) { - return ServerMetricsBuilder.newBuilder(serverName) - .setReportTimestamp(reportTimestamp) - .setUsedHeapSize(usedHeapSize) - .setMaxHeapSize(maxHeapSize) - .setRequestCountPerSecond(requestCountPerSecond) - .setRegionMetrics(regionMetricsList) + return ServerMetricsBuilder.newBuilder(serverName).setReportTimestamp(reportTimestamp) + .setUsedHeapSize(usedHeapSize).setMaxHeapSize(maxHeapSize) + .setRequestCountPerSecond(requestCountPerSecond).setRegionMetrics(regionMetricsList) .setUserMetrics(userMetricsList).build(); } @@ -174,48 +150,44 @@ public final class TestUtils { switch (record.get(Field.REGION_NAME).asString()) { case "table1,,1.00000000000000000000000000000000.": assertRecordInRegionMode(record, "default", "1", "", "table1", - "00000000000000000000000000000000", "host1:1000", "host1.apache.com,1000,1",0L, - 0L, 0L, 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, - new Size(100, Size.Unit.MEGABYTE), 0.1f, "", 100L, 100L, 100f, - "2019-07-22 00:00:00"); + "00000000000000000000000000000000", "host1:1000", "host1.apache.com,1000,1", 0L, 0L, 0L, + 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, + new Size(100, Size.Unit.MEGABYTE), 0.1f, "", 100L, 100L, 100f, "2019-07-22 00:00:00"); break; case "table1,1,4.00000000000000000000000000000003.": assertRecordInRegionMode(record, "default", "4", "", "table1", - "00000000000000000000000000000003", "host2:1001", "host2.apache.com,1001,2",0L, - 0L, 0L, 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, - new Size(100, Size.Unit.MEGABYTE), 0.4f, "1", 100L, 50L, 50f, - "2019-07-22 00:00:03"); + "00000000000000000000000000000003", "host2:1001", "host2.apache.com,1001,2", 0L, 0L, 0L, + 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, + new Size(100, Size.Unit.MEGABYTE), 0.4f, "1", 100L, 50L, 50f, "2019-07-22 00:00:03"); break; case "table2,,5.00000000000000000000000000000004.": assertRecordInRegionMode(record, "default", "5", "", "table2", - "00000000000000000000000000000004", "host2:1001", "host2.apache.com,1001,2",0L, - 0L, 0L, 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, - new Size(200, Size.Unit.MEGABYTE), 0.5f, "", 200L, 150L, 75f, - "2019-07-22 00:00:04"); + "00000000000000000000000000000004", "host2:1001", "host2.apache.com,1001,2", 0L, 0L, 0L, + 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, + new Size(200, Size.Unit.MEGABYTE), 0.5f, "", 200L, 150L, 75f, "2019-07-22 00:00:04"); break; case "table2,1,2.00000000000000000000000000000001.": assertRecordInRegionMode(record, "default", "2", "", "table2", - "00000000000000000000000000000001", "host1:1000", "host1.apache.com,1000,1",0L, - 0L, 0L, 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, - new Size(200, Size.Unit.MEGABYTE), 0.2f, "1", 200L, 50L, 25f, - "2019-07-22 00:00:01"); + "00000000000000000000000000000001", "host1:1000", "host1.apache.com,1000,1", 0L, 0L, 0L, + 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, + new Size(200, Size.Unit.MEGABYTE), 0.2f, "1", 200L, 50L, 25f, "2019-07-22 00:00:01"); break; case "namespace:table3,,6.00000000000000000000000000000005.": assertRecordInRegionMode(record, "namespace", "6", "", "table3", - "00000000000000000000000000000005", "host2:1001", "host2.apache.com,1001,2",0L, - 0L, 0L, 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, + "00000000000000000000000000000005", "host2:1001", "host2.apache.com,1001,2", 0L, 0L, 0L, + 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, new Size(300, Size.Unit.MEGABYTE), 0.6f, "", 300L, 200L, 66.66667f, "2019-07-22 00:00:05"); break; case "namespace:table3,,3_0001.00000000000000000000000000000002.": assertRecordInRegionMode(record, "namespace", "3", "1", "table3", - "00000000000000000000000000000002", "host1:1000", "host1.apache.com,1000,1",0L, - 0L, 0L, 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, + "00000000000000000000000000000002", "host1:1000", "host1.apache.com,1000,1", 0L, 0L, 0L, + 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, new Size(300, Size.Unit.MEGABYTE), 0.3f, "", 300L, 100L, 33.333336f, "2019-07-22 00:00:02"); break; @@ -229,10 +201,10 @@ public final class TestUtils { private static void assertRecordInRegionMode(Record record, String namespace, String startCode, String replicaId, String table, String region, String regionServer, String longRegionServer, long requestCountPerSecond, long readRequestCountPerSecond, - long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, - Size storeFileSize, Size uncompressedStoreFileSize, int numStoreFiles, - Size memStoreSize, float locality, String startKey, long compactingCellCount, - long compactedCellCount, float compactionProgress, String lastMajorCompactionTime) { + long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, Size storeFileSize, + Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, float locality, + String startKey, long compactingCellCount, long compactedCellCount, float compactionProgress, + String lastMajorCompactionTime) { assertThat(record.size(), is(22)); assertThat(record.get(Field.NAMESPACE).asString(), is(namespace)); assertThat(record.get(Field.START_CODE).asString(), is(startCode)); @@ -241,8 +213,7 @@ public final class TestUtils { assertThat(record.get(Field.REGION).asString(), is(region)); assertThat(record.get(Field.REGION_SERVER).asString(), is(regionServer)); assertThat(record.get(Field.LONG_REGION_SERVER).asString(), is(longRegionServer)); - assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), - is(requestCountPerSecond)); + assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), is(readRequestCountPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), @@ -289,8 +260,7 @@ public final class TestUtils { long writeCountRequestPerSecond, Size storeFileSize, Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, int regionCount) { assertThat(record.size(), is(10)); - assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), - is(requestCountPerSecond)); + assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), is(readRequestCountPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), @@ -339,7 +309,7 @@ public final class TestUtils { for (Record record : records) { String user = record.get(Field.USER).asString(); switch (user) { - //readRequestPerSecond and writeRequestPerSecond will be zero + // readRequestPerSecond and writeRequestPerSecond will be zero // because there is no change or new metrics during refresh case "FOO": assertRecordInUserMode(record, 0L, 0L, 0L); @@ -358,8 +328,8 @@ public final class TestUtils { for (Record record : records) { String client = record.get(Field.CLIENT).asString(); switch (client) { - //readRequestPerSecond and writeRequestPerSecond will be zero - // because there is no change or new metrics during refresh + // readRequestPerSecond and writeRequestPerSecond will be zero + // because there is no change or new metrics during refresh case "CLIENT_A_FOO": assertRecordInClientMode(record, 0L, 0L, 0L); break; @@ -379,36 +349,35 @@ public final class TestUtils { } private static void assertRecordInUserMode(Record record, long readRequestCountPerSecond, - long writeCountRequestPerSecond, long filteredReadRequestsCount) { + long writeCountRequestPerSecond, long filteredReadRequestsCount) { assertThat(record.size(), is(6)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), - is(readRequestCountPerSecond)); + is(readRequestCountPerSecond)); assertThat(record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong(), - is(writeCountRequestPerSecond)); + is(writeCountRequestPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), - is(filteredReadRequestsCount)); + is(filteredReadRequestsCount)); assertThat(record.get(Field.CLIENT_COUNT).asInt(), is(2)); } private static void assertRecordInClientMode(Record record, long readRequestCountPerSecond, - long writeCountRequestPerSecond, long filteredReadRequestsCount) { + long writeCountRequestPerSecond, long filteredReadRequestsCount) { assertThat(record.size(), is(6)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), - is(readRequestCountPerSecond)); + is(readRequestCountPerSecond)); assertThat(record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong(), - is(writeCountRequestPerSecond)); + is(writeCountRequestPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), - is(filteredReadRequestsCount)); + is(filteredReadRequestsCount)); assertThat(record.get(Field.USER_COUNT).asInt(), is(1)); } private static void assertRecordInTableMode(Record record, long requestCountPerSecond, - long readRequestCountPerSecond, long filteredReadRequestCountPerSecond, - long writeCountRequestPerSecond, Size storeFileSize, Size uncompressedStoreFileSize, - int numStoreFiles, Size memStoreSize, int regionCount) { + long readRequestCountPerSecond, long filteredReadRequestCountPerSecond, + long writeCountRequestPerSecond, Size storeFileSize, Size uncompressedStoreFileSize, + int numStoreFiles, Size memStoreSize, int regionCount) { assertThat(record.size(), is(11)); - assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), - is(requestCountPerSecond)); + assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), is(readRequestCountPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), @@ -450,14 +419,12 @@ public final class TestUtils { private static void assertRecordInRegionServerMode(Record record, String longRegionServer, long requestCountPerSecond, long readRequestCountPerSecond, - long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, - Size storeFileSize, Size uncompressedStoreFileSize, int numStoreFiles, - Size memStoreSize, int regionCount, Size usedHeapSize, Size maxHeapSize) { + long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, Size storeFileSize, + Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, int regionCount, + Size usedHeapSize, Size maxHeapSize) { assertThat(record.size(), is(13)); - assertThat(record.get(Field.LONG_REGION_SERVER).asString(), - is(longRegionServer)); - assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), - is(requestCountPerSecond)); + assertThat(record.get(Field.LONG_REGION_SERVER).asString(), is(longRegionServer)); + assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), is(readRequestCountPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java index dcbdb6b9b8a..d2af864bdd0 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestFieldValue { @@ -122,8 +121,7 @@ public class TestFieldValue { } // Percent - FieldValue percentFieldValue = - new FieldValue(100f, FieldValueType.PERCENT); + FieldValue percentFieldValue = new FieldValue(100f, FieldValueType.PERCENT); assertThat(percentFieldValue.asString(), is("100.00%")); assertThat(percentFieldValue.asFloat(), is(100f)); @@ -255,44 +253,35 @@ public class TestFieldValue { @Test public void testOptimizeSize() { - FieldValue sizeFieldValue = - new FieldValue(new Size(1, Size.Unit.BYTE), FieldValueType.SIZE); + FieldValue sizeFieldValue = new FieldValue(new Size(1, Size.Unit.BYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1.0B")); - sizeFieldValue = - new FieldValue(new Size(1024, Size.Unit.BYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1024, Size.Unit.BYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1.0KB")); - sizeFieldValue = - new FieldValue(new Size(2 * 1024, Size.Unit.BYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(2 * 1024, Size.Unit.BYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("2.0KB")); - sizeFieldValue = - new FieldValue(new Size(2 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(2 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("2.0MB")); - sizeFieldValue = - new FieldValue(new Size(1024 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1024 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1.0GB")); sizeFieldValue = new FieldValue(new Size(2 * 1024 * 1024, Size.Unit.MEGABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("2.0TB")); - sizeFieldValue = - new FieldValue(new Size(2 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(2 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("2.0PB")); - sizeFieldValue = - new FieldValue(new Size(1024 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1024 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1024.0PB")); - sizeFieldValue = - new FieldValue(new Size(1, Size.Unit.PETABYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1, Size.Unit.PETABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1.0PB")); - sizeFieldValue = - new FieldValue(new Size(1024, Size.Unit.PETABYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1024, Size.Unit.PETABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1024.0PB")); } } diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java index 4f086483853..a5803385140 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,18 +33,22 @@ import org.junit.experimental.categories.Category; @Category(SmallTests.class) public class TestClientMode extends TestModeBase { - @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClientMode.class); + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestClientMode.class); - @Override protected Mode getMode() { + @Override + protected Mode getMode() { return Mode.CLIENT; } - @Override protected void assertRecords(List records) { + @Override + protected void assertRecords(List records) { TestUtils.assertRecordsInClientMode(records); } - @Override protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo) { + @Override + protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo) { assertThat(drillDownInfo.getNextMode(), is(Mode.USER)); assertThat(drillDownInfo.getInitialFilters().size(), is(1)); String client = currentRecord.get(Field.CLIENT).asString(); diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestModeBase.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestModeBase.java index a52b332265b..2d29fc41460 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestModeBase.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestModeBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,23 +22,21 @@ import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.TestUtils; import org.junit.Test; - public abstract class TestModeBase { @Test public void testGetRecords() { - List records = getMode().getRecords(TestUtils.createDummyClusterMetrics(), - null); + List records = getMode().getRecords(TestUtils.createDummyClusterMetrics(), null); assertRecords(records); } protected abstract Mode getMode(); + protected abstract void assertRecords(List records); @Test public void testDrillDown() { - List records = getMode().getRecords(TestUtils.createDummyClusterMetrics(), - null); + List records = getMode().getRecords(TestUtils.createDummyClusterMetrics(), null); for (Record record : records) { assertDrillDown(record, getMode().drillDown(record)); } diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java index 6c498e94eb1..ab439fd826d 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestNamespaceMode extends TestModeBase { @@ -59,8 +58,7 @@ public class TestNamespaceMode extends TestModeBase { break; case "namespace": - assertThat(drillDownInfo.getInitialFilters().get(0).toString(), - is("NAMESPACE==namespace")); + assertThat(drillDownInfo.getInitialFilters().get(0).toString(), is("NAMESPACE==namespace")); break; default: diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java index b705531475f..f0756e48a95 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRegionMode extends TestModeBase { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java index cbfc7283fc6..62cbeea5d13 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRegionServerMode extends TestModeBase { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java index a73d54ea6bb..25dca63d57e 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRequestCountPerSecond { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java index f718304671c..0f05e484c1b 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestTableMode extends TestModeBase { @@ -68,8 +67,7 @@ public class TestTableMode extends TestModeBase { break; case "namespace:table3": - assertThat(drillDownInfo.getInitialFilters().get(0).toString(), - is("NAMESPACE==namespace")); + assertThat(drillDownInfo.getInitialFilters().get(0).toString(), is("NAMESPACE==namespace")); assertThat(drillDownInfo.getInitialFilters().get(1).toString(), is("TABLE==table3")); break; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java index f094c85f548..772e24a82c2 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java index cbf740430b0..18211ee2463 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,6 @@ import org.mockito.InOrder; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestFieldScreenPresenter { @@ -71,17 +70,15 @@ public class TestFieldScreenPresenter { @Before public void setup() { Field sortField = Mode.REGION.getDefaultSortField(); - fields = Mode.REGION.getFieldInfos().stream() - .map(FieldInfo::getField) - .collect(Collectors.toList()); + fields = + Mode.REGION.getFieldInfos().stream().map(FieldInfo::getField).collect(Collectors.toList()); - fieldDisplayMap = Mode.REGION.getFieldInfos().stream() - .collect(() -> new EnumMap<>(Field.class), - (r, fi) -> r.put(fi.getField(), fi.isDisplayByDefault()), (r1, r2) -> {}); + fieldDisplayMap = Mode.REGION.getFieldInfos().stream().collect(() -> new EnumMap<>(Field.class), + (r, fi) -> r.put(fi.getField(), fi.isDisplayByDefault()), (r1, r2) -> { + }); - fieldScreenPresenter = - new FieldScreenPresenter(fieldScreenView, sortField, fields, fieldDisplayMap, resultListener, - topScreenView); + fieldScreenPresenter = new FieldScreenPresenter(fieldScreenView, sortField, fields, + fieldDisplayMap, resultListener, topScreenView); for (int i = 0; i < fields.size(); i++) { Field field = fields.get(i); @@ -122,8 +119,8 @@ public class TestFieldScreenPresenter { inOrder.verify(fieldScreenView).showScreenDescription(eq("LRS")); inOrder.verify(fieldScreenView).showScreenDescription(eq("#READ/S")); inOrder.verify(fieldScreenView).showScreenDescription(eq(fields.get(0).getHeader())); - inOrder.verify(fieldScreenView).showScreenDescription( - eq(fields.get(fields.size() - 1).getHeader())); + inOrder.verify(fieldScreenView) + .showScreenDescription(eq(fields.get(fields.size() - 1).getHeader())); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java index 245bf615e73..d6f21749897 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,6 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestHelpScreenPresenter { @@ -55,8 +54,8 @@ public class TestHelpScreenPresenter { @Before public void setup() { - helpScreenPresenter = new HelpScreenPresenter(helpScreenView, TEST_REFRESH_DELAY, - topScreenView); + helpScreenPresenter = + new HelpScreenPresenter(helpScreenView, TEST_REFRESH_DELAY, topScreenView); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java index 1b7e12a6240..c4984966c79 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,6 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestModeScreenPresenter { @@ -69,7 +68,7 @@ public class TestModeScreenPresenter { int modeDescriptionMaxLength = Mode.REGION_SERVER.getDescription().length(); verify(modeScreenView).showModeScreen(eq(Mode.REGION), eq(Arrays.asList(Mode.values())), - eq(Mode.REGION.ordinal()) , eq(modeHeaderMaxLength), eq(modeDescriptionMaxLength)); + eq(Mode.REGION.ordinal()), eq(modeHeaderMaxLength), eq(modeDescriptionMaxLength)); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java index 414b5b0702c..a79bcbd808f 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,6 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestFilterDisplayModeScreenPresenter { @@ -58,24 +57,23 @@ public class TestFilterDisplayModeScreenPresenter { @Before public void setup() { - List fields = Mode.REGION.getFieldInfos().stream() - .map(FieldInfo::getField) - .collect(Collectors.toList()); + List fields = + Mode.REGION.getFieldInfos().stream().map(FieldInfo::getField).collect(Collectors.toList()); - List filters = new ArrayList<>(); + List filters = new ArrayList<>(); filters.add(RecordFilter.parse("NAMESPACE==namespace", fields, true)); filters.add(RecordFilter.parse("TABLE==table", fields, true)); - filterDisplayModeScreenPresenter = new FilterDisplayModeScreenPresenter( - filterDisplayModeScreenView, filters, topScreenView); + filterDisplayModeScreenPresenter = + new FilterDisplayModeScreenPresenter(filterDisplayModeScreenView, filters, topScreenView); } @Test public void testInit() { filterDisplayModeScreenPresenter.init(); - verify(filterDisplayModeScreenView).showFilters(argThat(filters -> filters.size() == 2 - && filters.get(0).toString().equals("NAMESPACE==namespace") - && filters.get(1).toString().equals("TABLE==table"))); + verify(filterDisplayModeScreenView).showFilters(argThat( + filters -> filters.size() == 2 && filters.get(0).toString().equals("NAMESPACE==namespace") + && filters.get(1).toString().equals("TABLE==table"))); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java index b5e9bb9f3ba..e7abefd854a 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,6 @@ import org.mockito.InOrder; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestInputModeScreenPresenter { @@ -68,8 +67,8 @@ public class TestInputModeScreenPresenter { histories.add("history1"); histories.add("history2"); - inputModeScreenPresenter = new InputModeScreenPresenter(inputModeScreenView, - TEST_INPUT_MESSAGE, histories, resultListener); + inputModeScreenPresenter = new InputModeScreenPresenter(inputModeScreenView, TEST_INPUT_MESSAGE, + histories, resultListener); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java index 0acd79c56d2..4b4d10e8323 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,6 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestMessageModeScreenPresenter { @@ -53,8 +52,8 @@ public class TestMessageModeScreenPresenter { @Before public void setup() { - messageModeScreenPresenter = new MessageModeScreenPresenter(messageModeScreenView, - TEST_MESSAGE, topScreenView); + messageModeScreenPresenter = + new MessageModeScreenPresenter(messageModeScreenView, TEST_MESSAGE, topScreenView); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java index e0c09dfe167..f5a90cc6071 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,13 +26,11 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestPaging { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestPaging.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestPaging.class); @Test public void testArrowUpAndArrowDown() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java index 44a8878407a..177d64002c7 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,6 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestTopScreenModel { @@ -65,9 +64,8 @@ public class TestTopScreenModel { when(admin.getClusterMetrics()).thenReturn(TestUtils.createDummyClusterMetrics()); topScreenModel = new TopScreenModel(admin, Mode.REGION, null, null, null, null); - fields = Mode.REGION.getFieldInfos().stream() - .map(FieldInfo::getField) - .collect(Collectors.toList()); + fields = + Mode.REGION.getFieldInfos().stream().map(FieldInfo::getField).collect(Collectors.toList()); } @Test @@ -172,9 +170,9 @@ public class TestTopScreenModel { assertThat(topScreenModel.getCurrentMode(), is(Mode.TABLE)); // Test for initialFilters - List initialFilters = Arrays.asList( - RecordFilter.parse("TABLE==table1", fields, true), - RecordFilter.parse("TABLE==table2", fields, true)); + List initialFilters = + Arrays.asList(RecordFilter.parse("TABLE==table1", fields, true), + RecordFilter.parse("TABLE==table2", fields, true)); topScreenModel.switchMode(Mode.TABLE, false, initialFilters); diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenPresenter.java index d218dd52950..6a780b5ff85 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,6 @@ import org.mockito.InOrder; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestTopScreenPresenter { @@ -52,29 +51,19 @@ public class TestTopScreenPresenter { HBaseClassTestRule.forClass(TestTopScreenPresenter.class); private static final List TEST_FIELD_INFOS = Arrays.asList( - new FieldInfo(Field.REGION, 10, true), - new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.LOCALITY, 10, true) - ); + new FieldInfo(Field.REGION, 10, true), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.LOCALITY, 10, true)); private static final List TEST_RECORDS = Arrays.asList( - Record.ofEntries( - entry(Field.REGION, "region1"), - entry(Field.REQUEST_COUNT_PER_SECOND, 1L), + Record.ofEntries(entry(Field.REGION, "region1"), entry(Field.REQUEST_COUNT_PER_SECOND, 1L), entry(Field.LOCALITY, 0.3f)), - Record.ofEntries( - entry(Field.REGION, "region2"), - entry(Field.REQUEST_COUNT_PER_SECOND, 2L), + Record.ofEntries(entry(Field.REGION, "region2"), entry(Field.REQUEST_COUNT_PER_SECOND, 2L), entry(Field.LOCALITY, 0.2f)), - Record.ofEntries( - entry(Field.REGION, "region3"), - entry(Field.REQUEST_COUNT_PER_SECOND, 3L), - entry(Field.LOCALITY, 0.1f)) - ); + Record.ofEntries(entry(Field.REGION, "region3"), entry(Field.REQUEST_COUNT_PER_SECOND, 3L), + entry(Field.LOCALITY, 0.1f))); - private static final Summary TEST_SUMMARY = new Summary( - "00:00:01", "3.0.0-SNAPSHOT", "01234567-89ab-cdef-0123-456789abcdef", - 3, 2, 1, 6, 1, 3.0, 300); + private static final Summary TEST_SUMMARY = new Summary("00:00:01", "3.0.0-SNAPSHOT", + "01234567-89ab-cdef-0123-456789abcdef", 3, 2, 1, 6, 1, 3.0, 300); @Mock private TopScreenView topScreenView; @@ -90,13 +79,13 @@ public class TestTopScreenPresenter { when(topScreenView.getPageSize()).thenReturn(100); when(topScreenModel.getFieldInfos()).thenReturn(TEST_FIELD_INFOS); - when(topScreenModel.getFields()).thenReturn(TEST_FIELD_INFOS.stream() - .map(FieldInfo::getField).collect(Collectors.toList())); + when(topScreenModel.getFields()) + .thenReturn(TEST_FIELD_INFOS.stream().map(FieldInfo::getField).collect(Collectors.toList())); when(topScreenModel.getRecords()).thenReturn(TEST_RECORDS); when(topScreenModel.getSummary()).thenReturn(TEST_SUMMARY); - topScreenPresenter = new TopScreenPresenter(topScreenView, 3000, topScreenModel, - null, Long.MAX_VALUE); + topScreenPresenter = + new TopScreenPresenter(topScreenView, 3000, topScreenModel, null, Long.MAX_VALUE); } @Test @@ -104,8 +93,8 @@ public class TestTopScreenPresenter { topScreenPresenter.init(); topScreenPresenter.refresh(true); - verify(topScreenView).showTopScreen(argThat(this::assertSummary), - argThat(this::assertHeaders), argThat(this::assertRecords), + verify(topScreenView).showTopScreen(argThat(this::assertSummary), argThat(this::assertHeaders), + argThat(this::assertRecords), argThat(selectedRecord -> assertSelectedRecord(selectedRecord, 0))); } @@ -211,9 +200,8 @@ public class TestTopScreenPresenter { } private boolean assertHeaders(List
      actual) { - List
      expected = - TEST_FIELD_INFOS.stream().map(fi -> new Header(fi.getField(), fi.getDefaultLength())) - .collect(Collectors.toList()); + List
      expected = TEST_FIELD_INFOS.stream() + .map(fi -> new Header(fi.getField(), fi.getDefaultLength())).collect(Collectors.toList()); if (actual.size() != expected.size()) { return false; @@ -250,8 +238,9 @@ public class TestTopScreenPresenter { } private boolean assertRecord(Record actual, Record expected) { - return actual.get(Field.REGION).equals(expected.get(Field.REGION)) && actual - .get(Field.REQUEST_COUNT_PER_SECOND).equals(expected.get(Field.REQUEST_COUNT_PER_SECOND)) + return actual.get(Field.REGION).equals(expected.get(Field.REGION)) + && actual.get(Field.REQUEST_COUNT_PER_SECOND) + .equals(expected.get(Field.REQUEST_COUNT_PER_SECOND)) && actual.get(Field.LOCALITY).equals(expected.get(Field.LOCALITY)); } } diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestCursor.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestCursor.java index 304c92b8497..3458e7ee31b 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestCursor.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestCursor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,9 @@ package org.apache.hadoop.hbase.hbtop.terminal.impl; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.hadoop.hbase.hbtop.terminal.Terminal; - public final class TestCursor { private TestCursor() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestKeyPress.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestKeyPress.java index ebfe56981c4..6295cd0166a 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestKeyPress.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestKeyPress.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,9 @@ package org.apache.hadoop.hbase.hbtop.terminal.impl; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.hadoop.hbase.hbtop.terminal.Terminal; - public final class TestKeyPress { private TestKeyPress() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestTerminalPrinter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestTerminalPrinter.java index 212395fecaf..6af4eef609c 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestTerminalPrinter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestTerminalPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,12 +18,10 @@ package org.apache.hadoop.hbase.hbtop.terminal.impl; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; - public final class TestTerminalPrinter { private TestTerminalPrinter() { @@ -38,8 +36,8 @@ public final class TestTerminalPrinter { printer.print("Normal string").endOfLine(); printer.startHighlight().print("Highlighted string").stopHighlight().endOfLine(); printer.startBold().print("Bold string").stopBold().endOfLine(); - printer.startHighlight().startBold().print("Highlighted bold string") - .stopBold().stopHighlight().endOfLine(); + printer.startHighlight().startBold().print("Highlighted bold string").stopBold() + .stopHighlight().endOfLine(); printer.endOfLine(); printer.print("Press any key to finish").endOfLine(); diff --git a/hbase-http/pom.xml b/hbase-http/pom.xml index 6ad70498419..04063d15ab0 100644 --- a/hbase-http/pom.xml +++ b/hbase-http/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-http Apache HBase - HTTP HTTP functionality for HBase Servers - - - - - - - src/test/resources/META-INF/ - META-INF/ - - NOTICE - - true - - - src/test/resources - - **/** - - - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - default - - false - - ${build.year} - ${license.debug.print.included} - ${license.bundles.dependencies} - ${license.bundles.jquery} - ${license.bundles.vega} - ${license.bundles.logo} - ${license.bundles.bootstrap} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - supplemental-models.xml - - - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - package - - jar - test-jar - - - - - - - maven-surefire-plugin - - - target/test-classes/webapps - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase.thirdparty @@ -267,6 +167,106 @@ test + + + + + + + META-INF/ + true + src/test/resources/META-INF/ + + NOTICE + + + + src/test/resources + + **/** + + + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + default + + false + + ${build.year} + ${license.debug.print.included} + ${license.bundles.dependencies} + ${license.bundles.jquery} + ${license.bundles.vega} + ${license.bundles.logo} + ${license.bundles.bootstrap} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + supplemental-models.xml + + + + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + + jar + test-jar + + package + + + + + + maven-surefire-plugin + + + target/test-classes/webapps + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + build-with-jdk11 @@ -291,10 +291,10 @@ license-javadocs - prepare-package copy-resources + prepare-package ${project.build.directory}/apidocs @@ -338,8 +338,9 @@ hadoop-2.0 - - !hadoop.profile + + + !hadoop.profile @@ -350,6 +351,7 @@ org.apache.hadoop hadoop-minicluster + test com.google.guava @@ -360,7 +362,6 @@ zookeeper - test org.apache.hadoop @@ -379,10 +380,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources @@ -506,7 +507,7 @@ - + diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedFilter.java index 215ff37e3bf..ba72af2e5f4 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedFilter.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.http; import java.io.IOException; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -27,7 +26,6 @@ import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.yetus.audience.InterfaceAudience; @@ -38,16 +36,17 @@ public class AdminAuthorizedFilter implements Filter { private Configuration conf; private AccessControlList adminsAcl; - @Override public void init(FilterConfig filterConfig) throws ServletException { - adminsAcl = (AccessControlList) filterConfig.getServletContext().getAttribute( - HttpServer.ADMINS_ACL); - conf = (Configuration) filterConfig.getServletContext().getAttribute( - HttpServer.CONF_CONTEXT_ATTRIBUTE); + @Override + public void init(FilterConfig filterConfig) throws ServletException { + adminsAcl = + (AccessControlList) filterConfig.getServletContext().getAttribute(HttpServer.ADMINS_ACL); + conf = (Configuration) filterConfig.getServletContext() + .getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE); } @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) - throws IOException, ServletException { + throws IOException, ServletException { if (!(request instanceof HttpServletRequest) || !(response instanceof HttpServletResponse)) { throw new UnsupportedOperationException("Only accepts HTTP"); } @@ -61,5 +60,7 @@ public class AdminAuthorizedFilter implements Filter { chain.doFilter(request, response); } - @Override public void destroy() {} + @Override + public void destroy() { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedServlet.java index 10156f43b44..2ad09b5ae5c 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,8 +39,7 @@ public class AdminAuthorizedServlet extends DefaultServlet { protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { // Do the authorization - if (HttpServer.hasAdministratorAccess(getServletContext(), request, - response)) { + if (HttpServer.hasAdministratorAccess(getServletContext(), request, response)) { // Authorization is done. Just call super. super.doGet(request, response); } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java index 0f0c7150c41..3cc58e8cf44 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.http; import java.io.IOException; import java.util.HashMap; import java.util.Map; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -28,10 +27,8 @@ import javax.servlet.ServletException; import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @@ -46,7 +43,7 @@ public class ClickjackingPreventionFilter implements Filter { @Override public void doFilter(ServletRequest req, ServletResponse res, FilterChain chain) - throws IOException, ServletException { + throws IOException, ServletException { HttpServletResponse httpRes = (HttpServletResponse) res; httpRes.addHeader("X-Frame-Options", filterConfig.getInitParameter("xframeoptions")); chain.doFilter(req, res); @@ -58,8 +55,8 @@ public class ClickjackingPreventionFilter implements Filter { public static Map getDefaultParameters(Configuration conf) { Map params = new HashMap<>(); - params.put("xframeoptions", conf.get("hbase.http.filter.xframeoptions.mode", - DEFAULT_XFRAMEOPTIONS)); + params.put("xframeoptions", + conf.get("hbase.http.filter.xframeoptions.mode", DEFAULT_XFRAMEOPTIONS)); return params; } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterContainer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterContainer.java index 5869ce3f92e..7c98352cf22 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterContainer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterContainer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,16 +27,17 @@ import org.apache.yetus.audience.InterfaceAudience; public interface FilterContainer { /** * Add a filter to the container. - * @param name Filter name - * @param classname Filter class name + * @param name Filter name + * @param classname Filter class name * @param parameters a map from parameter names to initial values */ void addFilter(String name, String classname, Map parameters); + /** - * Add a global filter to the container - This global filter will be - * applied to all available web contexts. - * @param name filter name - * @param classname filter class name + * Add a global filter to the container - This global filter will be applied to all available web + * contexts. + * @param name filter name + * @param classname filter class name * @param parameters a map from parameter names to initial values */ void addGlobalFilter(String name, String classname, Map parameters); diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterInitializer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterInitializer.java index 7e8595e7d04..135115acb1b 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterInitializer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterInitializer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,7 @@ public abstract class FilterInitializer { /** * Initialize a Filter to a FilterContainer. * @param container The filter container - * @param conf Configuration for run-time parameters + * @param conf Configuration for run-time parameters */ public abstract void initFilter(FilterContainer container, Configuration conf); } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HtmlQuoting.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HtmlQuoting.java index 678b397949f..ee83a9a950e 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HtmlQuoting.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HtmlQuoting.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,17 +36,17 @@ public final class HtmlQuoting { /** * Does the given string need to be quoted? * @param data the string to check - * @param off the starting position - * @param len the number of bytes to check + * @param off the starting position + * @param len the number of bytes to check * @return does the string contain any of the active html characters? */ public static boolean needsQuoting(byte[] data, int off, int len) { - if (off+len > data.length) { - throw new IllegalStateException("off+len=" + off+len + " should be lower" - + " than data length=" + data.length); + if (off + len > data.length) { + throw new IllegalStateException( + "off+len=" + off + len + " should be lower" + " than data length=" + data.length); } - for(int i=off; i< off+len; ++i) { - switch(data[i]) { + for (int i = off; i < off + len; ++i) { + switch (data[i]) { case '&': case '<': case '>': @@ -70,20 +70,19 @@ public final class HtmlQuoting { return false; } byte[] bytes = str.getBytes(); - return needsQuoting(bytes, 0 , bytes.length); + return needsQuoting(bytes, 0, bytes.length); } /** - * Quote all of the active HTML characters in the given string as they - * are added to the buffer. + * Quote all of the active HTML characters in the given string as they are added to the buffer. * @param output the stream to write the output to * @param buffer the byte array to take the characters from - * @param off the index of the first byte to quote - * @param len the number of bytes to quote + * @param off the index of the first byte to quote + * @param len the number of bytes to quote */ public static void quoteHtmlChars(OutputStream output, byte[] buffer, int off, int len) - throws IOException { - for(int i=off; i < off+len; i++) { + throws IOException { + for (int i = off; i < off + len; i++) { switch (buffer[i]) { case '&': output.write(ampBytes); @@ -138,6 +137,7 @@ public final class HtmlQuoting { public static OutputStream quoteOutputStream(final OutputStream out) { return new OutputStream() { private byte[] data = new byte[1]; + @Override public void write(byte[] data, int off, int len) throws IOException { quoteHtmlChars(out, data, off, len); @@ -196,12 +196,11 @@ public final class HtmlQuoting { buffer.append('"'); next += 6; } else { - int end = item.indexOf(';', next)+1; + int end = item.indexOf(';', next) + 1; if (end == 0) { end = len; } - throw new IllegalArgumentException("Bad HTML quoting for " + - item.substring(next,end)); + throw new IllegalArgumentException("Bad HTML quoting for " + item.substring(next, end)); } posn = next; next = item.indexOf('&', posn); @@ -214,15 +213,16 @@ public final class HtmlQuoting { if (args.length == 0) { throw new IllegalArgumentException("Please provide some arguments"); } - for(String arg:args) { + for (String arg : args) { System.out.println("Original: " + arg); String quoted = quoteHtmlChars(arg); - System.out.println("Quoted: "+ quoted); + System.out.println("Quoted: " + quoted); String unquoted = unquoteHtmlChars(quoted); System.out.println("Unquoted: " + unquoted); System.out.println(); } } - private HtmlQuoting() {} + private HtmlQuoting() { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpConfig.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpConfig.java index 52c9133dcf6..09de376ea18 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpConfig.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.http; import org.apache.hadoop.conf.Configuration; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -29,6 +28,7 @@ import org.apache.yetus.audience.InterfaceStability; @InterfaceStability.Unstable public class HttpConfig { private Policy policy; + public enum Policy { HTTP_ONLY, HTTPS_ONLY, @@ -53,8 +53,7 @@ public class HttpConfig { } public HttpConfig(final Configuration conf) { - boolean sslEnabled = conf.getBoolean( - ServerConfigurationKeys.HBASE_SSL_ENABLED_KEY, + boolean sslEnabled = conf.getBoolean(ServerConfigurationKeys.HBASE_SSL_ENABLED_KEY, ServerConfigurationKeys.HBASE_SSL_ENABLED_DEFAULT); policy = sslEnabled ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY; if (sslEnabled) { diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java index d3e8005eb9c..5f63dda9f3e 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java index f8c04bac971..de2e59befb4 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,6 +68,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.org.eclipse.jetty.http.HttpVersion; @@ -96,12 +97,10 @@ import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig; import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer; /** - * Create a Jetty embedded server to answer http requests. The primary goal - * is to serve up status information for the server. - * There are three contexts: - * "/logs/" -> points to the log directory - * "/static/" -> points to common static files (src/webapps/static) - * "/" -> the jsp server code from (src/webapps/<name>) + * Create a Jetty embedded server to answer http requests. The primary goal is to serve up status + * information for the server. There are three contexts: "/logs/" -> points to the log directory + * "/static/" -> points to common static files (src/webapps/static) "/" -> the jsp server code + * from (src/webapps/<name>) */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -111,37 +110,35 @@ public class HttpServer implements FilterContainer { private static final int DEFAULT_MAX_HEADER_SIZE = 64 * 1024; // 64K - static final String FILTER_INITIALIZERS_PROPERTY - = "hbase.http.filter.initializers"; + static final String FILTER_INITIALIZERS_PROPERTY = "hbase.http.filter.initializers"; static final String HTTP_MAX_THREADS = "hbase.http.max.threads"; public static final String HTTP_UI_AUTHENTICATION = "hbase.security.authentication.ui"; static final String HTTP_AUTHENTICATION_PREFIX = "hbase.security.authentication."; - static final String HTTP_SPNEGO_AUTHENTICATION_PREFIX = HTTP_AUTHENTICATION_PREFIX - + "spnego."; + static final String HTTP_SPNEGO_AUTHENTICATION_PREFIX = HTTP_AUTHENTICATION_PREFIX + "spnego."; static final String HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX = "kerberos.principal"; public static final String HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY = - HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX; + HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX; static final String HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX = "kerberos.keytab"; public static final String HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY = - HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX; + HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX; static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX = "kerberos.name.rules"; public static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY = - HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX; - static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX = "kerberos.proxyuser.enable"; + HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX; + static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX = + "kerberos.proxyuser.enable"; public static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY = - HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX; - public static final boolean HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT = false; - static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX = - "signature.secret.file"; + HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX; + public static final boolean HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT = false; + static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX = "signature.secret.file"; public static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY = - HTTP_AUTHENTICATION_PREFIX + HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX; + HTTP_AUTHENTICATION_PREFIX + HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX; public static final String HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY = - HTTP_SPNEGO_AUTHENTICATION_PREFIX + "admin.users"; + HTTP_SPNEGO_AUTHENTICATION_PREFIX + "admin.users"; public static final String HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY = - HTTP_SPNEGO_AUTHENTICATION_PREFIX + "admin.groups"; + HTTP_SPNEGO_AUTHENTICATION_PREFIX + "admin.groups"; public static final String HTTP_PRIVILEGED_CONF_KEY = - "hbase.security.authentication.ui.config.protected"; + "hbase.security.authentication.ui.config.protected"; public static final boolean HTTP_PRIVILEGED_CONF_DEFAULT = false; // The ServletContext attribute where the daemon Configuration @@ -162,11 +159,11 @@ public class HttpServer implements FilterContainer { private static final class ListenerInfo { /** - * Boolean flag to determine whether the HTTP server should clean up the - * listener in stop(). + * Boolean flag to determine whether the HTTP server should clean up the listener in stop(). */ private final boolean isManaged; private final ServerConnector listener; + private ListenerInfo(boolean isManaged, ServerConnector listener) { this.isManaged = isManaged; this.listener = listener; @@ -240,14 +237,10 @@ public class HttpServer implements FilterContainer { private int port = -1; /** - * Add an endpoint that the HTTP server should listen to. - * - * @param endpoint - * the endpoint of that the HTTP server should listen to. The - * scheme specifies the protocol (i.e. HTTP / HTTPS), the host - * specifies the binding address, and the port specifies the - * listening port. Unspecified or zero port means that the server - * can listen to any port. + * Add an endpoint that the HTTP server should listen to. n * the endpoint of that the HTTP + * server should listen to. The scheme specifies the protocol (i.e. HTTP / HTTPS), the host + * specifies the binding address, and the port specifies the listening port. Unspecified or zero + * port means that the server can listen to any port. */ public Builder addEndpoint(URI endpoint) { endpoints.add(endpoint); @@ -255,9 +248,9 @@ public class HttpServer implements FilterContainer { } /** - * Set the hostname of the http server. The host name is used to resolve the - * _HOST field in Kerberos principals. The hostname of the first listener - * will be used if the name is unspecified. + * Set the hostname of the http server. The host name is used to resolve the _HOST field in + * Kerberos principals. The hostname of the first listener will be used if the name is + * unspecified. */ public Builder hostName(String hostName) { this.hostName = hostName; @@ -284,8 +277,7 @@ public class HttpServer implements FilterContainer { } /** - * Specify whether the server should authorize the client in SSL - * connections. + * Specify whether the server should authorize the client in SSL connections. */ public Builder needsClientAuth(boolean value) { this.needsClientAuth = value; @@ -297,7 +289,7 @@ public class HttpServer implements FilterContainer { * @deprecated Since 0.99.0. Use {@link #setAppDir(String)} instead. */ @Deprecated - public Builder setName(String name){ + public Builder setName(String name) { this.name = name; return this; } @@ -307,7 +299,7 @@ public class HttpServer implements FilterContainer { * @deprecated Since 0.99.0. Use {@link #addEndpoint(URI)} instead. */ @Deprecated - public Builder setBindAddress(String bindAddress){ + public Builder setBindAddress(String bindAddress) { this.bindAddress = bindAddress; return this; } @@ -393,7 +385,7 @@ public class HttpServer implements FilterContainer { try { endpoints.add(0, new URI("http", "", bindAddress, port, "", "", "")); } catch (URISyntaxException e) { - throw new HadoopIllegalArgumentException("Invalid endpoint: "+ e); + throw new HadoopIllegalArgumentException("Invalid endpoint: " + e); } } @@ -447,11 +439,11 @@ public class HttpServer implements FilterContainer { LOG.debug("Excluded SSL Cipher List:" + excludeCiphers); } - listener = new ServerConnector(server.webServer, new SslConnectionFactory(sslCtxFactory, - HttpVersion.HTTP_1_1.toString()), new HttpConnectionFactory(httpsConfig)); + listener = new ServerConnector(server.webServer, + new SslConnectionFactory(sslCtxFactory, HttpVersion.HTTP_1_1.toString()), + new HttpConnectionFactory(httpsConfig)); } else { - throw new HadoopIllegalArgumentException( - "unknown scheme for endpoint:" + ep); + throw new HadoopIllegalArgumentException("unknown scheme for endpoint:" + ep); } // default settings for connector @@ -482,90 +474,83 @@ public class HttpServer implements FilterContainer { */ @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort) - throws IOException { + throws IOException { this(name, bindAddress, port, findPort, new Configuration()); } /** - * Create a status server on the given port. Allows you to specify the - * path specifications that this server will be serving so that they will be - * added to the filters properly. - * - * @param name The name of the server + * Create a status server on the given port. Allows you to specify the path specifications that + * this server will be serving so that they will be added to the filters properly. + * @param name The name of the server * @param bindAddress The address for this server - * @param port The port to use on the server - * @param findPort whether the server should start at the given port and - * increment by 1 until it finds a free port. - * @param conf Configuration - * @param pathSpecs Path specifications that this httpserver will be serving. - * These will be added to any filters. + * @param port The port to use on the server + * @param findPort whether the server should start at the given port and increment by 1 until + * it finds a free port. + * @param conf Configuration + * @param pathSpecs Path specifications that this httpserver will be serving. These will be + * added to any filters. * @deprecated Since 0.99.0 */ @Deprecated - public HttpServer(String name, String bindAddress, int port, - boolean findPort, Configuration conf, String[] pathSpecs) throws IOException { + public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, + String[] pathSpecs) throws IOException { this(name, bindAddress, port, findPort, conf, null, pathSpecs); } /** - * Create a status server on the given port. - * The jsp scripts are taken from src/webapps/<name>. - * @param name The name of the server - * @param port The port to use on the server - * @param findPort whether the server should start at the given port and - * increment by 1 until it finds a free port. - * @param conf Configuration + * Create a status server on the given port. The jsp scripts are taken from + * src/webapps/<name>. + * @param name The name of the server + * @param port The port to use on the server + * @param findPort whether the server should start at the given port and increment by 1 until it + * finds a free port. + * @param conf Configuration * @deprecated Since 0.99.0 */ @Deprecated - public HttpServer(String name, String bindAddress, int port, - boolean findPort, Configuration conf) throws IOException { + public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf) + throws IOException { this(name, bindAddress, port, findPort, conf, null, null); } /** - * Creates a status server on the given port. The JSP scripts are taken - * from src/webapp<name>. - * - * @param name the name of the server + * Creates a status server on the given port. The JSP scripts are taken from + * src/webapp<name>. + * @param name the name of the server * @param bindAddress the address for this server - * @param port the port to use on the server - * @param findPort whether the server should start at the given port and increment by 1 until it - * finds a free port - * @param conf the configuration to use - * @param adminsAcl {@link AccessControlList} of the admins + * @param port the port to use on the server + * @param findPort whether the server should start at the given port and increment by 1 until + * it finds a free port + * @param conf the configuration to use + * @param adminsAcl {@link AccessControlList} of the admins * @throws IOException when creating the server fails * @deprecated Since 0.99.0 */ @Deprecated - public HttpServer(String name, String bindAddress, int port, - boolean findPort, Configuration conf, AccessControlList adminsAcl) - throws IOException { + public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, + AccessControlList adminsAcl) throws IOException { this(name, bindAddress, port, findPort, conf, adminsAcl, null); } /** - * Create a status server on the given port. - * The jsp scripts are taken from src/webapps/<name>. - * @param name The name of the server + * Create a status server on the given port. The jsp scripts are taken from + * src/webapps/<name>. + * @param name The name of the server * @param bindAddress The address for this server - * @param port The port to use on the server - * @param findPort whether the server should start at the given port and - * increment by 1 until it finds a free port. - * @param conf Configuration - * @param adminsAcl {@link AccessControlList} of the admins - * @param pathSpecs Path specifications that this httpserver will be serving. - * These will be added to any filters. + * @param port The port to use on the server + * @param findPort whether the server should start at the given port and increment by 1 until + * it finds a free port. + * @param conf Configuration + * @param adminsAcl {@link AccessControlList} of the admins + * @param pathSpecs Path specifications that this httpserver will be serving. These will be + * added to any filters. * @deprecated Since 0.99.0 */ @Deprecated - public HttpServer(String name, String bindAddress, int port, - boolean findPort, Configuration conf, AccessControlList adminsAcl, - String[] pathSpecs) throws IOException { - this(new Builder().setName(name) - .addEndpoint(URI.create("http://" + bindAddress + ":" + port)) - .setFindPort(findPort).setConf(conf).setACL(adminsAcl) - .setPathSpec(pathSpecs)); + public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, + AccessControlList adminsAcl, String[] pathSpecs) throws IOException { + this(new Builder().setName(name).addEndpoint(URI.create("http://" + bindAddress + ":" + port)) + .setFindPort(findPort).setConf(conf).setACL(adminsAcl).setPathSpec(pathSpecs)); } private HttpServer(final Builder b) throws IOException { @@ -573,12 +558,11 @@ public class HttpServer implements FilterContainer { this.logDir = b.logDir; final String appDir = getWebAppsPath(b.name); - int maxThreads = b.conf.getInt(HTTP_MAX_THREADS, 16); // If HTTP_MAX_THREADS is less than or equal to 0, QueueThreadPool() will use the // default value (currently 200). - QueuedThreadPool threadPool = maxThreads <= 0 ? new QueuedThreadPool() - : new QueuedThreadPool(maxThreads); + QueuedThreadPool threadPool = + maxThreads <= 0 ? new QueuedThreadPool() : new QueuedThreadPool(maxThreads); threadPool.setDaemon(true); this.webServer = new Server(threadPool); @@ -590,9 +574,8 @@ public class HttpServer implements FilterContainer { this.webServer.setHandler(buildGzipHandler(this.webServer.getHandler())); } - private void initializeWebServer(String name, String hostName, - Configuration conf, String[] pathSpecs, HttpServer.Builder b) - throws FileNotFoundException, IOException { + private void initializeWebServer(String name, String hostName, Configuration conf, + String[] pathSpecs, HttpServer.Builder b) throws FileNotFoundException, IOException { Preconditions.checkNotNull(webAppContext); @@ -623,20 +606,18 @@ public class HttpServer implements FilterContainer { addGlobalFilter("safety", QuotingInputFilter.class.getName(), null); - addGlobalFilter("clickjackingprevention", - ClickjackingPreventionFilter.class.getName(), - ClickjackingPreventionFilter.getDefaultParameters(conf)); + addGlobalFilter("clickjackingprevention", ClickjackingPreventionFilter.class.getName(), + ClickjackingPreventionFilter.getDefaultParameters(conf)); HttpConfig httpConfig = new HttpConfig(conf); - addGlobalFilter("securityheaders", - SecurityHeadersFilter.class.getName(), - SecurityHeadersFilter.getDefaultParameters(conf, httpConfig.isSecure())); + addGlobalFilter("securityheaders", SecurityHeadersFilter.class.getName(), + SecurityHeadersFilter.getDefaultParameters(conf, httpConfig.isSecure())); // But security needs to be enabled prior to adding the other servlets if (authenticationEnabled) { initSpnego(conf, hostName, b.usernameConfKey, b.keytabConfKey, b.kerberosNameRulesKey, - b.signatureSecretFileKey); + b.signatureSecretFileKey); } final FilterInitializer[] initializers = getFilterInitializers(conf); @@ -662,16 +643,16 @@ public class HttpServer implements FilterContainer { listeners.add(new ListenerInfo(true, connector)); } - private static WebAppContext createWebAppContext(String name, - Configuration conf, AccessControlList adminsAcl, final String appDir) { + private static WebAppContext createWebAppContext(String name, Configuration conf, + AccessControlList adminsAcl, final String appDir) { WebAppContext ctx = new WebAppContext(); ctx.setDisplayName(name); ctx.setContextPath("/"); ctx.setWar(appDir + "/" + name); ctx.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf); // for org.apache.hadoop.metrics.MetricsServlet - ctx.getServletContext().setAttribute( - org.apache.hadoop.http.HttpServer2.CONF_CONTEXT_ATTRIBUTE, conf); + ctx.getServletContext().setAttribute(org.apache.hadoop.http.HttpServer2.CONF_CONTEXT_ATTRIBUTE, + conf); ctx.getServletContext().setAttribute(ADMINS_ACL, adminsAcl); addNoCacheFilter(ctx); return ctx; @@ -681,11 +662,12 @@ public class HttpServer implements FilterContainer { * Construct and configure an instance of {@link GzipHandler}. With complex * multi-{@link WebAppContext} configurations, it's easiest to apply this handler directly to the * instance of {@link Server} near the end of its configuration, something like + * *
      -   *    Server server = new Server();
      -   *    //...
      -   *    server.setHandler(buildGzipHandler(server.getHandler()));
      -   *    server.start();
      +   * Server server = new Server();
      +   * // ...
      +   * server.setHandler(buildGzipHandler(server.getHandler()));
      +   * server.start();
          * 
      */ public static GzipHandler buildGzipHandler(final Handler wrapped) { @@ -696,7 +678,7 @@ public class HttpServer implements FilterContainer { private static void addNoCacheFilter(WebAppContext ctxt) { defineFilter(ctxt, NO_CACHE_FILTER, NoCacheFilter.class.getName(), - Collections. emptyMap(), new String[] { "/*" }); + Collections. emptyMap(), new String[] { "/*" }); } /** Get an array of FilterConfiguration specified in the conf */ @@ -711,8 +693,8 @@ public class HttpServer implements FilterContainer { } FilterInitializer[] initializers = new FilterInitializer[classes.length]; - for(int i = 0; i < classes.length; i++) { - initializers[i] = (FilterInitializer)ReflectionUtils.newInstance(classes[i]); + for (int i = 0; i < classes.length; i++) { + initializers[i] = (FilterInitializer) ReflectionUtils.newInstance(classes[i]); } return initializers; } @@ -721,8 +703,8 @@ public class HttpServer implements FilterContainer { * Add default apps. * @param appDir The application directory */ - protected void addDefaultApps(ContextHandlerCollection parent, - final String appDir, Configuration conf) { + protected void addDefaultApps(ContextHandlerCollection parent, final String appDir, + Configuration conf) { // set up the context for "/logs/" if "hadoop.log.dir" property is defined. String logDir = this.logDir; if (logDir == null) { @@ -733,12 +715,12 @@ public class HttpServer implements FilterContainer { logContext.addServlet(AdminAuthorizedServlet.class, "/*"); logContext.setResourceBase(logDir); - if (conf.getBoolean( - ServerConfigurationKeys.HBASE_JETTY_LOGS_SERVE_ALIASES, - ServerConfigurationKeys.DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES)) { + if ( + conf.getBoolean(ServerConfigurationKeys.HBASE_JETTY_LOGS_SERVE_ALIASES, + ServerConfigurationKeys.DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES) + ) { Map params = logContext.getInitParams(); - params.put( - "org.mortbay.jetty.servlet.Default.aliases", "true"); + params.put("org.mortbay.jetty.servlet.Default.aliases", "true"); } logContext.setDisplayName("logs"); setContextAttributes(logContext, conf); @@ -761,13 +743,13 @@ public class HttpServer implements FilterContainer { /** * Add default servlets. */ - protected void addDefaultServlets( - ContextHandlerCollection contexts, Configuration conf) throws IOException { + protected void addDefaultServlets(ContextHandlerCollection contexts, Configuration conf) + throws IOException { // set up default servlets addPrivilegedServlet("stacks", "/stacks", StackServlet.class); addPrivilegedServlet("logLevel", "/logLevel", LogLevel.Servlet.class); - // Hadoop3 has moved completely to metrics2, and dropped support for Metrics v1's - // MetricsServlet (see HADOOP-12504). We'll using reflection to load if against hadoop2. + // Hadoop3 has moved completely to metrics2, and dropped support for Metrics v1's + // MetricsServlet (see HADOOP-12504). We'll using reflection to load if against hadoop2. // Remove when we drop support for hbase on hadoop2.x. try { Class clz = Class.forName("org.apache.hadoop.metrics.MetricsServlet"); @@ -796,15 +778,15 @@ public class HttpServer implements FilterContainer { genCtx.setDisplayName("prof-output-hbase"); } else { addUnprivilegedServlet("prof", "/prof", ProfileServlet.DisabledServlet.class); - LOG.info("ASYNC_PROFILER_HOME environment variable and async.profiler.home system property " + - "not specified. Disabling /prof endpoint."); + LOG.info("ASYNC_PROFILER_HOME environment variable and async.profiler.home system property " + + "not specified. Disabling /prof endpoint."); } } /** - * Set a value in the webapp context. These values are available to the jsp - * pages as "application.getAttribute(name)". - * @param name The name of the attribute + * Set a value in the webapp context. These values are available to the jsp pages as + * "application.getAttribute(name)". + * @param name The name of the attribute * @param value The value of the attribute */ public void setAttribute(String name, Object value) { @@ -814,12 +796,10 @@ public class HttpServer implements FilterContainer { /** * Add a Jersey resource package. * @param packageName The Java package name containing the Jersey resource. - * @param pathSpec The path spec for the servlet + * @param pathSpec The path spec for the servlet */ - public void addJerseyResourcePackage(final String packageName, - final String pathSpec) { - LOG.info("addJerseyResourcePackage: packageName=" + packageName - + ", pathSpec=" + pathSpec); + public void addJerseyResourcePackage(final String packageName, final String pathSpec) { + LOG.info("addJerseyResourcePackage: packageName=" + packageName + ", pathSpec=" + pathSpec); ResourceConfig application = new ResourceConfig().packages(packageName); final ServletHolder sh = new ServletHolder(new ServletContainer(application)); @@ -828,23 +808,23 @@ public class HttpServer implements FilterContainer { /** * Adds a servlet in the server that any user can access. This method differs from - * {@link #addPrivilegedServlet(String, String, Class)} in that any authenticated user - * can interact with the servlet added by this method. - * @param name The name of the servlet (can be passed as null) + * {@link #addPrivilegedServlet(String, String, Class)} in that any authenticated user can + * interact with the servlet added by this method. + * @param name The name of the servlet (can be passed as null) * @param pathSpec The path spec for the servlet - * @param clazz The servlet class + * @param clazz The servlet class */ public void addUnprivilegedServlet(String name, String pathSpec, - Class clazz) { + Class clazz) { addServletWithAuth(name, pathSpec, clazz, false); } /** * Adds a servlet in the server that any user can access. This method differs from - * {@link #addPrivilegedServlet(String, ServletHolder)} in that any authenticated user - * can interact with the servlet added by this method. + * {@link #addPrivilegedServlet(String, ServletHolder)} in that any authenticated user can + * interact with the servlet added by this method. * @param pathSpec The path spec for the servlet - * @param holder The servlet holder + * @param holder The servlet holder */ public void addUnprivilegedServlet(String pathSpec, ServletHolder holder) { addServletWithAuth(pathSpec, holder, false); @@ -856,15 +836,14 @@ public class HttpServer implements FilterContainer { * who are identified as administrators can interact with the servlet added by this method. */ public void addPrivilegedServlet(String name, String pathSpec, - Class clazz) { + Class clazz) { addServletWithAuth(name, pathSpec, clazz, true); } /** * Adds a servlet in the server that only administrators can access. This method differs from - * {@link #addUnprivilegedServlet(String, ServletHolder)} in that only those - * authenticated user who are identified as administrators can interact with the servlet added by - * this method. + * {@link #addUnprivilegedServlet(String, ServletHolder)} in that only those authenticated user + * who are identified as administrators can interact with the servlet added by this method. */ public void addPrivilegedServlet(String pathSpec, ServletHolder holder) { addServletWithAuth(pathSpec, holder, true); @@ -875,8 +854,8 @@ public class HttpServer implements FilterContainer { * directly, but invoke it via {@link #addUnprivilegedServlet(String, String, Class)} or * {@link #addPrivilegedServlet(String, String, Class)}. */ - void addServletWithAuth(String name, String pathSpec, - Class clazz, boolean requireAuthz) { + void addServletWithAuth(String name, String pathSpec, Class clazz, + boolean requireAuthz) { addInternalServlet(name, pathSpec, clazz, requireAuthz); addFilterPathMapping(pathSpec, webAppContext); } @@ -892,20 +871,17 @@ public class HttpServer implements FilterContainer { } /** - * Add an internal servlet in the server, specifying whether or not to - * protect with Kerberos authentication. - * Note: This method is to be used for adding servlets that facilitate - * internal communication and not for user facing functionality. For - * servlets added using this method, filters (except internal Kerberos - * filters) are not enabled. - * - * @param name The name of the {@link Servlet} (can be passed as null) - * @param pathSpec The path spec for the {@link Servlet} - * @param clazz The {@link Servlet} class + * Add an internal servlet in the server, specifying whether or not to protect with Kerberos + * authentication. Note: This method is to be used for adding servlets that facilitate internal + * communication and not for user facing functionality. For servlets added using this method, + * filters (except internal Kerberos filters) are not enabled. + * @param name The name of the {@link Servlet} (can be passed as null) + * @param pathSpec The path spec for the {@link Servlet} + * @param clazz The {@link Servlet} class * @param requireAuthz Require Kerberos authenticate to access servlet */ - void addInternalServlet(String name, String pathSpec, - Class clazz, boolean requireAuthz) { + void addInternalServlet(String name, String pathSpec, Class clazz, + boolean requireAuthz) { ServletHolder holder = new ServletHolder(clazz); if (name != null) { holder.setName(name); @@ -914,15 +890,12 @@ public class HttpServer implements FilterContainer { } /** - * Add an internal servlet in the server, specifying whether or not to - * protect with Kerberos authentication. - * Note: This method is to be used for adding servlets that facilitate - * internal communication and not for user facing functionality. For - * servlets added using this method, filters (except internal Kerberos - * filters) are not enabled. - * - * @param pathSpec The path spec for the {@link Servlet} - * @param holder The object providing the {@link Servlet} instance + * Add an internal servlet in the server, specifying whether or not to protect with Kerberos + * authentication. Note: This method is to be used for adding servlets that facilitate internal + * communication and not for user facing functionality. For servlets added using this method, + * filters (except internal Kerberos filters) are not enabled. + * @param pathSpec The path spec for the {@link Servlet} + * @param holder The object providing the {@link Servlet} instance * @param requireAuthz Require Kerberos authenticate to access servlet */ void addInternalServlet(String pathSpec, ServletHolder holder, boolean requireAuthz) { @@ -944,15 +917,15 @@ public class HttpServer implements FilterContainer { public void addFilter(String name, String classname, Map parameters) { final String[] USER_FACING_URLS = { "*.html", "*.jsp" }; defineFilter(webAppContext, name, classname, parameters, USER_FACING_URLS); - LOG.info("Added filter " + name + " (class=" + classname - + ") to context " + webAppContext.getDisplayName()); + LOG.info("Added filter " + name + " (class=" + classname + ") to context " + + webAppContext.getDisplayName()); final String[] ALL_URLS = { "/*" }; for (Map.Entry e : defaultContexts.entrySet()) { if (e.getValue()) { ServletContextHandler handler = e.getKey(); defineFilter(handler, name, classname, parameters, ALL_URLS); - LOG.info("Added filter " + name + " (class=" + classname - + ") to context " + handler.getDisplayName()); + LOG.info("Added filter " + name + " (class=" + classname + ") to context " + + handler.getDisplayName()); } } filterNames.add(name); @@ -971,8 +944,8 @@ public class HttpServer implements FilterContainer { /** * Define a filter for a context and set up default url mappings. */ - public static void defineFilter(ServletContextHandler handler, String name, - String classname, Map parameters, String[] urls) { + public static void defineFilter(ServletContextHandler handler, String name, String classname, + Map parameters, String[] urls) { FilterHolder holder = new FilterHolder(); holder.setName(name); holder.setClassName(classname); @@ -988,12 +961,11 @@ public class HttpServer implements FilterContainer { /** * Add the path spec to the filter path mapping. - * @param pathSpec The path spec + * @param pathSpec The path spec * @param webAppCtx The WebApplicationContext to add to */ - protected void addFilterPathMapping(String pathSpec, - WebAppContext webAppCtx) { - for(String name : filterNames) { + protected void addFilterPathMapping(String pathSpec, WebAppContext webAppCtx) { + for (String name : filterNames) { FilterMapping fmap = new FilterMapping(); fmap.setPathSpec(pathSpec); fmap.setFilterName(name); @@ -1011,7 +983,7 @@ public class HttpServer implements FilterContainer { return webAppContext.getAttribute(name); } - public WebAppContext getWebAppContext(){ + public WebAppContext getWebAppContext() { return this.webAppContext; } @@ -1029,8 +1001,7 @@ public class HttpServer implements FilterContainer { URL url = getClass().getClassLoader().getResource(webapps + "/" + appName); if (url == null) { - throw new FileNotFoundException(webapps + "/" + appName - + " not found in CLASSPATH"); + throw new FileNotFoundException(webapps + "/" + appName + " not found in CLASSPATH"); } String urlString = url.toString(); @@ -1044,14 +1015,13 @@ public class HttpServer implements FilterContainer { */ @Deprecated public int getPort() { - return ((ServerConnector)webServer.getConnectors()[0]).getLocalPort(); + return ((ServerConnector) webServer.getConnectors()[0]).getLocalPort(); } /** * Get the address that corresponds to a particular connector. - * - * @return the corresponding address for the connector, or null if there's no - * such connector or the connector is not bounded. + * @return the corresponding address for the connector, or null if there's no such connector or + * the connector is not bounded. */ public InetSocketAddress getConnectorAddress(int index) { Preconditions.checkArgument(index >= 0); @@ -1060,7 +1030,7 @@ public class HttpServer implements FilterContainer { return null; } - ServerConnector c = (ServerConnector)webServer.getConnectors()[index]; + ServerConnector c = (ServerConnector) webServer.getConnectors()[index]; if (c.getLocalPort() == -1 || c.getLocalPort() == -2) { // -1 if the connector has not been opened // -2 if it has been closed @@ -1079,14 +1049,14 @@ public class HttpServer implements FilterContainer { pool.setMaxThreads(max); } - private void initSpnego(Configuration conf, String hostName, - String usernameConfKey, String keytabConfKey, String kerberosNameRuleKey, - String signatureSecretKeyFileKey) throws IOException { + private void initSpnego(Configuration conf, String hostName, String usernameConfKey, + String keytabConfKey, String kerberosNameRuleKey, String signatureSecretKeyFileKey) + throws IOException { Map params = new HashMap<>(); String principalInConf = getOrEmptyString(conf, usernameConfKey); if (!principalInConf.isEmpty()) { - params.put(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX, SecurityUtil.getServerPrincipal( - principalInConf, hostName)); + params.put(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX, + SecurityUtil.getServerPrincipal(principalInConf, hostName)); } String httpKeytab = getOrEmptyString(conf, keytabConfKey); if (!httpKeytab.isEmpty()) { @@ -1098,30 +1068,34 @@ public class HttpServer implements FilterContainer { } String signatureSecretKeyFile = getOrEmptyString(conf, signatureSecretKeyFileKey); if (!signatureSecretKeyFile.isEmpty()) { - params.put(HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX, - signatureSecretKeyFile); + params.put(HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX, signatureSecretKeyFile); } params.put(AuthenticationFilter.AUTH_TYPE, "kerberos"); // Verify that the required options were provided - if (isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX)) || - isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX))) { - throw new IllegalArgumentException(usernameConfKey + " and " - + keytabConfKey + " are both required in the configuration " + if ( + isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX)) + || isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX)) + ) { + throw new IllegalArgumentException( + usernameConfKey + " and " + keytabConfKey + " are both required in the configuration " + "to enable SPNEGO/Kerberos authentication for the Web UI"); } - if (conf.getBoolean(HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY, - HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT)) { - //Copy/rename standard hadoop proxyuser settings to filter - for(Map.Entry proxyEntry : - conf.getPropsWithPrefix(ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) { - params.put(ProxyUserAuthenticationFilter.PROXYUSER_PREFIX + proxyEntry.getKey(), - proxyEntry.getValue()); - } - addGlobalFilter(SPNEGO_PROXYUSER_FILTER, ProxyUserAuthenticationFilter.class.getName(), params); + if ( + conf.getBoolean(HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY, + HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT) + ) { + // Copy/rename standard hadoop proxyuser settings to filter + for (Map.Entry proxyEntry : conf + .getPropsWithPrefix(ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) { + params.put(ProxyUserAuthenticationFilter.PROXYUSER_PREFIX + proxyEntry.getKey(), + proxyEntry.getValue()); + } + addGlobalFilter(SPNEGO_PROXYUSER_FILTER, ProxyUserAuthenticationFilter.class.getName(), + params); } else { - addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), params); + addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), params); } } @@ -1136,8 +1110,7 @@ public class HttpServer implements FilterContainer { } /** - * Extracts the value for the given key from the configuration of returns a string of - * zero length. + * Extracts the value for the given key from the configuration of returns a string of zero length. */ private String getOrEmptyString(Configuration conf, String key) { if (null == key) { @@ -1166,8 +1139,7 @@ public class HttpServer implements FilterContainer { Handler[] handlers = webServer.getHandlers(); for (int i = 0; i < handlers.length; i++) { if (handlers[i].isFailed()) { - throw new IOException( - "Problem in starting http server. Server handlers failed"); + throw new IOException("Problem in starting http server. Server handlers failed"); } } // Make sure there are no errors initializing the context. @@ -1176,14 +1148,13 @@ public class HttpServer implements FilterContainer { // Have to stop the webserver, or else its non-daemon threads // will hang forever. webServer.stop(); - throw new IOException("Unable to initialize WebAppContext", - unavailableException); + throw new IOException("Unable to initialize WebAppContext", unavailableException); } } catch (IOException e) { throw e; } catch (InterruptedException e) { - throw (IOException) new InterruptedIOException( - "Interrupted while starting HTTP server").initCause(e); + throw (IOException) new InterruptedIOException("Interrupted while starting HTTP server") + .initCause(e); } catch (Exception e) { throw new IOException("Problem starting http server", e); } @@ -1216,12 +1187,12 @@ public class HttpServer implements FilterContainer { LOG.info("Jetty bound to port " + listener.getLocalPort()); break; } catch (IOException ex) { - if(!(ex instanceof BindException) && !(ex.getCause() instanceof BindException)) { + if (!(ex instanceof BindException) && !(ex.getCause() instanceof BindException)) { throw ex; } if (port == 0 || !findPort) { - BindException be = new BindException("Port in use: " - + listener.getHost() + ":" + listener.getPort()); + BindException be = + new BindException("Port in use: " + listener.getHost() + ":" + listener.getPort()); be.initCause(ex); throw be; } @@ -1246,9 +1217,7 @@ public class HttpServer implements FilterContainer { try { li.listener.close(); } catch (Exception e) { - LOG.error( - "Error while stopping listener for webapp" - + webAppContext.getDisplayName(), e); + LOG.error("Error while stopping listener for webapp" + webAppContext.getDisplayName(), e); exception = addMultiException(exception, e); } } @@ -1258,16 +1227,15 @@ public class HttpServer implements FilterContainer { webAppContext.clearAttributes(); webAppContext.stop(); } catch (Exception e) { - LOG.error("Error while stopping web app context for webapp " - + webAppContext.getDisplayName(), e); + LOG.error("Error while stopping web app context for webapp " + webAppContext.getDisplayName(), + e); exception = addMultiException(exception, e); } try { webServer.stop(); } catch (Exception e) { - LOG.error("Error while stopping web server for webapp " - + webAppContext.getDisplayName(), e); + LOG.error("Error while stopping web server for webapp " + webAppContext.getDisplayName(), e); exception = addMultiException(exception, e); } @@ -1278,7 +1246,7 @@ public class HttpServer implements FilterContainer { } private MultiException addMultiException(MultiException exception, Exception e) { - if(exception == null){ + if (exception == null) { exception = new MultiException(); } exception.add(e); @@ -1307,8 +1275,8 @@ public class HttpServer implements FilterContainer { return "Inactive HttpServer"; } else { StringBuilder sb = new StringBuilder("HttpServer (") - .append(isAlive() ? STATE_DESCRIPTION_ALIVE : - STATE_DESCRIPTION_NOT_LIVE).append("), listening at:"); + .append(isAlive() ? STATE_DESCRIPTION_ALIVE : STATE_DESCRIPTION_NOT_LIVE) + .append("), listening at:"); for (ListenerInfo li : listeners) { ServerConnector l = li.listener; sb.append(l.getHost()).append(":").append(l.getPort()).append("/,"); @@ -1320,29 +1288,26 @@ public class HttpServer implements FilterContainer { /** * Checks the user has privileges to access to instrumentation servlets. *

      - * If hadoop.security.instrumentation.requires.admin is set to FALSE - * (default value) it always returns TRUE. - *

      - * If hadoop.security.instrumentation.requires.admin is set to TRUE - * it will check that if the current user is in the admin ACLS. If the user is - * in the admin ACLs it returns TRUE, otherwise it returns FALSE. + * If hadoop.security.instrumentation.requires.admin is set to FALSE (default value) + * it always returns TRUE. + *

      + *

      + * If hadoop.security.instrumentation.requires.admin is set to TRUE it will check + * that if the current user is in the admin ACLS. If the user is in the admin ACLs it returns + * TRUE, otherwise it returns FALSE. *

      - * * @param servletContext the servlet context. - * @param request the servlet request. - * @param response the servlet response. + * @param request the servlet request. + * @param response the servlet response. * @return TRUE/FALSE based on the logic decribed above. */ - public static boolean isInstrumentationAccessAllowed( - ServletContext servletContext, HttpServletRequest request, - HttpServletResponse response) throws IOException { - Configuration conf = - (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); + public static boolean isInstrumentationAccessAllowed(ServletContext servletContext, + HttpServletRequest request, HttpServletResponse response) throws IOException { + Configuration conf = (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); boolean access = true; - boolean adminAccess = conf.getBoolean( - CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, - false); + boolean adminAccess = conf + .getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, false); if (adminAccess) { access = hasAdministratorAccess(servletContext, request, response); } @@ -1350,44 +1315,39 @@ public class HttpServer implements FilterContainer { } /** - * Does the user sending the HttpServletRequest has the administrator ACLs? If - * it isn't the case, response will be modified to send an error to the user. - * + * Does the user sending the HttpServletRequest has the administrator ACLs? If it isn't the case, + * response will be modified to send an error to the user. * @param servletContext the {@link ServletContext} to use - * @param request the {@link HttpServletRequest} to check - * @param response used to send the error response if user does not have admin access. + * @param request the {@link HttpServletRequest} to check + * @param response used to send the error response if user does not have admin access. * @return true if admin-authorized, false otherwise * @throws IOException if an unauthenticated or unauthorized user tries to access the page */ - public static boolean hasAdministratorAccess( - ServletContext servletContext, HttpServletRequest request, - HttpServletResponse response) throws IOException { - Configuration conf = - (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); + public static boolean hasAdministratorAccess(ServletContext servletContext, + HttpServletRequest request, HttpServletResponse response) throws IOException { + Configuration conf = (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); AccessControlList acl = (AccessControlList) servletContext.getAttribute(ADMINS_ACL); return hasAdministratorAccess(conf, acl, request, response); } public static boolean hasAdministratorAccess(Configuration conf, AccessControlList acl, - HttpServletRequest request, HttpServletResponse response) throws IOException { + HttpServletRequest request, HttpServletResponse response) throws IOException { // If there is no authorization, anybody has administrator access. - if (!conf.getBoolean( - CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { + if (!conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { return true; } String remoteUser = request.getRemoteUser(); if (remoteUser == null) { response.sendError(HttpServletResponse.SC_UNAUTHORIZED, - "Unauthenticated users are not " + - "authorized to access this page."); + "Unauthenticated users are not " + "authorized to access this page."); return false; } if (acl != null && !userHasAdministratorAccess(acl, remoteUser)) { - response.sendError(HttpServletResponse.SC_FORBIDDEN, "User " - + remoteUser + " is unauthorized to access this page."); + response.sendError(HttpServletResponse.SC_FORBIDDEN, + "User " + remoteUser + " is unauthorized to access this page."); return false; } @@ -1395,32 +1355,27 @@ public class HttpServer implements FilterContainer { } /** - * Get the admin ACLs from the given ServletContext and check if the given - * user is in the ACL. - * + * Get the admin ACLs from the given ServletContext and check if the given user is in the ACL. * @param servletContext the context containing the admin ACL. - * @param remoteUser the remote user to check for. - * @return true if the user is present in the ACL, false if no ACL is set or - * the user is not present + * @param remoteUser the remote user to check for. + * @return true if the user is present in the ACL, false if no ACL is set or the user is not + * present */ public static boolean userHasAdministratorAccess(ServletContext servletContext, - String remoteUser) { - AccessControlList adminsAcl = (AccessControlList) servletContext - .getAttribute(ADMINS_ACL); + String remoteUser) { + AccessControlList adminsAcl = (AccessControlList) servletContext.getAttribute(ADMINS_ACL); return userHasAdministratorAccess(adminsAcl, remoteUser); } public static boolean userHasAdministratorAccess(AccessControlList acl, String remoteUser) { - UserGroupInformation remoteUserUGI = - UserGroupInformation.createRemoteUser(remoteUser); + UserGroupInformation remoteUserUGI = UserGroupInformation.createRemoteUser(remoteUser); return acl != null && acl.isUserAllowed(remoteUserUGI); } /** - * A very simple servlet to serve up a text representation of the current - * stack traces. It both returns the stacks to the caller and logs them. - * Currently the stack traces are done sequentially rather than exactly the - * same data. + * A very simple servlet to serve up a text representation of the current stack traces. It both + * returns the stacks to the caller and logs them. Currently the stack traces are done + * sequentially rather than exactly the same data. */ public static class StackServlet extends HttpServlet { private static final long serialVersionUID = -6284183679759467039L; @@ -1428,13 +1383,11 @@ public class HttpServer implements FilterContainer { @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { - if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), - request, response)) { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), request, response)) { return; } response.setContentType("text/plain; charset=UTF-8"); - try (PrintStream out = new PrintStream( - response.getOutputStream(), false, "UTF-8")) { + try (PrintStream out = new PrintStream(response.getOutputStream(), false, "UTF-8")) { Threads.printThreadInfo(out, ""); out.flush(); } @@ -1443,9 +1396,9 @@ public class HttpServer implements FilterContainer { } /** - * A Servlet input filter that quotes all HTML active characters in the - * parameter names and values. The goal is to quote the characters to make - * all of the servlets resistant to cross-site scripting attacks. + * A Servlet input filter that quotes all HTML active characters in the parameter names and + * values. The goal is to quote the characters to make all of the servlets resistant to cross-site + * scripting attacks. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public static class QuotingInputFilter implements Filter { @@ -1453,6 +1406,7 @@ public class HttpServer implements FilterContainer { public static class RequestQuoter extends HttpServletRequestWrapper { private final HttpServletRequest rawRequest; + public RequestQuoter(HttpServletRequest rawRequest) { super(rawRequest); this.rawRequest = rawRequest; @@ -1464,8 +1418,8 @@ public class HttpServer implements FilterContainer { @Override public Enumeration getParameterNames() { return new Enumeration() { - private Enumeration rawIterator = - rawRequest.getParameterNames(); + private Enumeration rawIterator = rawRequest.getParameterNames(); + @Override public boolean hasMoreElements() { return rawIterator.hasMoreElements(); @@ -1483,8 +1437,8 @@ public class HttpServer implements FilterContainer { */ @Override public String getParameter(String name) { - return HtmlQuoting.quoteHtmlChars(rawRequest.getParameter( - HtmlQuoting.unquoteHtmlChars(name))); + return HtmlQuoting + .quoteHtmlChars(rawRequest.getParameter(HtmlQuoting.unquoteHtmlChars(name))); } @Override @@ -1495,7 +1449,7 @@ public class HttpServer implements FilterContainer { return null; } String[] result = new String[unquoteValue.length]; - for(int i=0; i < result.length; ++i) { + for (int i = 0; i < result.length; ++i) { result[i] = HtmlQuoting.quoteHtmlChars(unquoteValue[i]); } return result; @@ -1505,10 +1459,10 @@ public class HttpServer implements FilterContainer { public Map getParameterMap() { Map result = new HashMap<>(); Map raw = rawRequest.getParameterMap(); - for (Map.Entry item: raw.entrySet()) { + for (Map.Entry item : raw.entrySet()) { String[] rawValue = item.getValue(); String[] cookedValue = new String[rawValue.length]; - for(int i=0; i< rawValue.length; ++i) { + for (int i = 0; i < rawValue.length; ++i) { cookedValue[i] = HtmlQuoting.quoteHtmlChars(rawValue[i]); } result.put(HtmlQuoting.quoteHtmlChars(item.getKey()), cookedValue); @@ -1517,18 +1471,16 @@ public class HttpServer implements FilterContainer { } /** - * Quote the url so that users specifying the HOST HTTP header - * can't inject attacks. + * Quote the url so that users specifying the HOST HTTP header can't inject attacks. */ @Override - public StringBuffer getRequestURL(){ + public StringBuffer getRequestURL() { String url = rawRequest.getRequestURL().toString(); return new StringBuffer(HtmlQuoting.quoteHtmlChars(url)); } /** - * Quote the server name so that users specifying the HOST HTTP header - * can't inject attacks. + * Quote the server name so that users specifying the HOST HTTP header can't inject attacks. */ @Override public String getServerName() { @@ -1546,12 +1498,9 @@ public class HttpServer implements FilterContainer { } @Override - public void doFilter(ServletRequest request, - ServletResponse response, - FilterChain chain - ) throws IOException, ServletException { - HttpServletRequestWrapper quoted = - new RequestQuoter((HttpServletRequest) request); + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { + HttpServletRequestWrapper quoted = new RequestQuoter((HttpServletRequest) request); HttpServletResponse httpResponse = (HttpServletResponse) response; String mime = inferMimeType(request); @@ -1570,11 +1519,11 @@ public class HttpServer implements FilterContainer { } /** - * Infer the mime type for the response based on the extension of the request - * URI. Returns null if unknown. + * Infer the mime type for the response based on the extension of the request URI. Returns null + * if unknown. */ private String inferMimeType(ServletRequest request) { - String path = ((HttpServletRequest)request).getRequestURI(); + String path = ((HttpServletRequest) request).getRequestURI(); ServletContext context = config.getServletContext(); return context.getMimeType(path); } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java index 94269719aa4..686f0861f25 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,11 +31,11 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.util.security.Constraint; public final class HttpServerUtil { /** * Add constraints to a Jetty Context to disallow undesirable Http methods. - * @param ctxHandler The context to modify + * @param ctxHandler The context to modify * @param allowOptionsMethod if true then OPTIONS method will not be set in constraint mapping */ public static void constrainHttpMethods(ServletContextHandler ctxHandler, - boolean allowOptionsMethod) { + boolean allowOptionsMethod) { Constraint c = new Constraint(); c.setAuthenticate(true); @@ -59,5 +59,6 @@ public final class HttpServerUtil { ctxHandler.setSecurityHandler(securityHandler); } - private HttpServerUtil() {} + private HttpServerUtil() { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java index 8b13e2b2205..c44222b8334 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java @@ -27,16 +27,15 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort; import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder; /** - * Create a Jetty embedded server to answer http requests. The primary goal - * is to serve up status information for the server. - * There are three contexts: - * "/stacks/" -> points to stack trace - * "/static/" -> points to common static files (src/hbase-webapps/static) - * "/" -> the jsp server code from (src/hbase-webapps/<name>) + * Create a Jetty embedded server to answer http requests. The primary goal is to serve up status + * information for the server. There are three contexts: "/stacks/" -> points to stack trace + * "/static/" -> points to common static files (src/hbase-webapps/static) "/" -> the jsp + * server code from (src/hbase-webapps/<name>) */ @InterfaceAudience.Private public class InfoServer { @@ -44,38 +43,38 @@ public class InfoServer { private final org.apache.hadoop.hbase.http.HttpServer httpServer; /** - * Create a status server on the given port. - * The jsp scripts are taken from src/hbase-webapps/name. - * @param name The name of the server + * Create a status server on the given port. The jsp scripts are taken from + * src/hbase-webapps/name. + * @param name The name of the server * @param bindAddress address to bind to - * @param port The port to use on the server - * @param findPort whether the server should start at the given port and increment by 1 until it - * finds a free port. - * @param c the {@link Configuration} to build the server + * @param port The port to use on the server + * @param findPort whether the server should start at the given port and increment by 1 until + * it finds a free port. + * @param c the {@link Configuration} to build the server * @throws IOException if getting one of the password fails or the server cannot be created */ public InfoServer(String name, String bindAddress, int port, boolean findPort, - final Configuration c) throws IOException { + final Configuration c) throws IOException { HttpConfig httpConfig = new HttpConfig(c); - HttpServer.Builder builder = - new org.apache.hadoop.hbase.http.HttpServer.Builder(); + HttpServer.Builder builder = new org.apache.hadoop.hbase.http.HttpServer.Builder(); - builder.setName(name).addEndpoint(URI.create(httpConfig.getSchemePrefix() + - HostAndPort.fromParts(bindAddress,port).toString())). - setAppDir(HBASE_APP_DIR).setFindPort(findPort).setConf(c); + builder.setName(name) + .addEndpoint(URI + .create(httpConfig.getSchemePrefix() + HostAndPort.fromParts(bindAddress, port).toString())) + .setAppDir(HBASE_APP_DIR).setFindPort(findPort).setConf(c); String logDir = System.getProperty("hbase.log.dir"); if (logDir != null) { builder.setLogDir(logDir); } if (httpConfig.isSecure()) { - builder.keyPassword(HBaseConfiguration - .getPassword(c, "ssl.server.keystore.keypassword", null)) + builder + .keyPassword(HBaseConfiguration.getPassword(c, "ssl.server.keystore.keypassword", null)) .keyStore(c.get("ssl.server.keystore.location"), - HBaseConfiguration.getPassword(c,"ssl.server.keystore.password", null), - c.get("ssl.server.keystore.type", "jks")) + HBaseConfiguration.getPassword(c, "ssl.server.keystore.password", null), + c.get("ssl.server.keystore.type", "jks")) .trustStore(c.get("ssl.server.truststore.location"), - HBaseConfiguration.getPassword(c, "ssl.server.truststore.password", null), - c.get("ssl.server.truststore.type", "jks")); + HBaseConfiguration.getPassword(c, "ssl.server.truststore.password", null), + c.get("ssl.server.truststore.type", "jks")); builder.excludeCiphers(c.get("ssl.server.exclude.cipher.list")); } // Enable SPNEGO authentication @@ -83,8 +82,7 @@ public class InfoServer { builder.setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY) .setKerberosNameRulesKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY) - .setSignatureSecretFileKey( - HttpServer.HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY) + .setSignatureSecretFileKey(HttpServer.HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY) .setSecurityEnabled(true); // Set an admin ACL on sensitive webUI endpoints @@ -95,13 +93,13 @@ public class InfoServer { } /** - * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI - * which are meant only for administrators. + * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI which + * are meant only for administrators. */ AccessControlList buildAdminAcl(Configuration conf) { final String userGroups = conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY, null); - final String adminGroups = conf.get( - HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null); + final String adminGroups = + conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null); if (userGroups == null && adminGroups == null) { // Backwards compatibility - if the user doesn't have anything set, allow all users in. return new AccessControlList("*", null); @@ -111,17 +109,14 @@ public class InfoServer { /** * Explicitly invoke {@link #addPrivilegedServlet(String, String, Class)} or - * {@link #addUnprivilegedServlet(String, String, Class)} instead of this method. - * This method will add a servlet which any authenticated user can access. - * + * {@link #addUnprivilegedServlet(String, String, Class)} instead of this method. This method will + * add a servlet which any authenticated user can access. * @deprecated Use {@link #addUnprivilegedServlet(String, String, Class)} or - * {@link #addPrivilegedServlet(String, String, Class)} instead of this - * method which does not state outwardly what kind of authz rules will - * be applied to this servlet. + * {@link #addPrivilegedServlet(String, String, Class)} instead of this method which + * does not state outwardly what kind of authz rules will be applied to this servlet. */ @Deprecated - public void addServlet(String name, String pathSpec, - Class clazz) { + public void addServlet(String name, String pathSpec, Class clazz) { addUnprivilegedServlet(name, pathSpec, clazz); } @@ -130,7 +125,7 @@ public class InfoServer { * @see HttpServer#addUnprivilegedServlet(String, String, Class) */ public void addUnprivilegedServlet(String name, String pathSpec, - Class clazz) { + Class clazz) { this.httpServer.addUnprivilegedServlet(name, pathSpec, clazz); } @@ -150,7 +145,7 @@ public class InfoServer { * @see HttpServer#addPrivilegedServlet(String, String, Class) */ public void addPrivilegedServlet(String name, String pathSpec, - Class clazz) { + Class clazz) { this.httpServer.addPrivilegedServlet(name, pathSpec, clazz); } @@ -175,21 +170,22 @@ public class InfoServer { this.httpServer.stop(); } - /** * Returns true if and only if UI authentication (spnego) is enabled, UI authorization is enabled, * and the requesting user is defined as an administrator. If the UI is set to readonly, this * method always returns false. */ - public static boolean canUserModifyUI( - HttpServletRequest req, ServletContext ctx, Configuration conf) { + public static boolean canUserModifyUI(HttpServletRequest req, ServletContext ctx, + Configuration conf) { if (conf.getBoolean("hbase.master.ui.readonly", false)) { return false; } String remoteUser = req.getRemoteUser(); - if ("kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && - conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) && - remoteUser != null) { + if ( + "kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) + && conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) + && remoteUser != null + ) { return HttpServer.userHasAdministratorAccess(ctx, remoteUser); } return false; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java index cd49f7e16ba..0c6aaa05079 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.http; import java.io.IOException; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -26,7 +25,6 @@ import javax.servlet.ServletException; import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; @@ -38,8 +36,7 @@ public class NoCacheFilter implements Filter { } @Override - public void doFilter(ServletRequest req, ServletResponse res, - FilterChain chain) + public void doFilter(ServletRequest req, ServletResponse res, FilterChain chain) throws IOException, ServletException { HttpServletResponse httpRes = (HttpServletResponse) res; httpRes.setHeader("Cache-Control", "no-cache"); diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileOutputServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileOutputServlet.java index d77ea9b14ce..d92e7d009f6 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileOutputServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileOutputServlet.java @@ -49,7 +49,7 @@ public class ProfileOutputServlet extends DefaultServlet { // running which gets replaced by final output. If final output is not ready yet, the file size // will be <100 bytes (in all modes). if (requestedFile.length() < 100) { - LOG.info(requestedFile + " is incomplete. Sending auto-refresh header."); + LOG.info(requestedFile + " is incomplete. Sending auto-refresh header."); String refreshUrl = req.getRequestURI(); // Rebuild the query string (if we have one) if (req.getQueryString() != null) { @@ -57,8 +57,8 @@ public class ProfileOutputServlet extends DefaultServlet { } ProfileServlet.setResponseHeader(resp); resp.setHeader("Refresh", REFRESH_PERIOD + ";" + refreshUrl); - resp.getWriter().write("This page will be auto-refreshed every " + REFRESH_PERIOD + - " seconds until the output file is ready. Redirecting to " + refreshUrl); + resp.getWriter().write("This page will be auto-refreshed every " + REFRESH_PERIOD + + " seconds until the output file is ready. Redirecting to " + refreshUrl); } else { super.doGet(req, resp); } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java index 3ba59f4b66c..c5be339c0c7 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java @@ -25,63 +25,35 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; - import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.hbase.util.ProcessUtils; import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Joiner; /** - * Servlet that runs async-profiler as web-endpoint. - * Following options from async-profiler can be specified as query paramater. - * // -e event profiling event: cpu|alloc|lock|cache-misses etc. - * // -d duration run profiling for 'duration' seconds (integer) - * // -i interval sampling interval in nanoseconds (long) - * // -j jstackdepth maximum Java stack depth (integer) - * // -b bufsize frame buffer size (long) - * // -t profile different threads separately - * // -s simple class names instead of FQN - * // -o fmt[,fmt...] output format: summary|traces|flat|collapsed|svg|tree|jfr|html - * // --width px SVG width pixels (integer) - * // --height px SVG frame height pixels (integer) - * // --minwidth px skip frames smaller than px (double) - * // --reverse generate stack-reversed FlameGraph / Call tree - * Example: - * - To collect 30 second CPU profile of current process (returns FlameGraph svg) - * curl "http://localhost:10002/prof" - * - To collect 1 minute CPU profile of current process and output in tree format (html) - * curl "http://localhost:10002/prof?output=tree&duration=60" - * - To collect 30 second heap allocation profile of current process (returns FlameGraph svg) - * curl "http://localhost:10002/prof?event=alloc" - * - To collect lock contention profile of current process (returns FlameGraph svg) - * curl "http://localhost:10002/prof?event=lock" - * Following event types are supported (default is 'cpu') (NOTE: not all OS'es support all events) - * // Perf events: - * // cpu - * // page-faults - * // context-switches - * // cycles - * // instructions - * // cache-references - * // cache-misses - * // branches - * // branch-misses - * // bus-cycles - * // L1-dcache-load-misses - * // LLC-load-misses - * // dTLB-load-misses - * // mem:breakpoint - * // trace:tracepoint - * // Java events: - * // alloc - * // lock + * Servlet that runs async-profiler as web-endpoint. Following options from async-profiler can be + * specified as query paramater. // -e event profiling event: cpu|alloc|lock|cache-misses etc. // -d + * duration run profiling for 'duration' seconds (integer) // -i interval sampling interval in + * nanoseconds (long) // -j jstackdepth maximum Java stack depth (integer) // -b bufsize frame + * buffer size (long) // -t profile different threads separately // -s simple class names instead of + * FQN // -o fmt[,fmt...] output format: summary|traces|flat|collapsed|svg|tree|jfr|html // --width + * px SVG width pixels (integer) // --height px SVG frame height pixels (integer) // --minwidth px + * skip frames smaller than px (double) // --reverse generate stack-reversed FlameGraph / Call tree + * Example: - To collect 30 second CPU profile of current process (returns FlameGraph svg) curl + * "http://localhost:10002/prof" - To collect 1 minute CPU profile of current process and output in + * tree format (html) curl "http://localhost:10002/prof?output=tree&duration=60" - To collect 30 + * second heap allocation profile of current process (returns FlameGraph svg) curl + * "http://localhost:10002/prof?event=alloc" - To collect lock contention profile of current process + * (returns FlameGraph svg) curl "http://localhost:10002/prof?event=lock" Following event types are + * supported (default is 'cpu') (NOTE: not all OS'es support all events) // Perf events: // cpu // + * page-faults // context-switches // cycles // instructions // cache-references // cache-misses // + * branches // branch-misses // bus-cycles // L1-dcache-load-misses // LLC-load-misses // + * dTLB-load-misses // mem:breakpoint // trace:tracepoint // Java events: // alloc // lock */ @InterfaceAudience.Private public class ProfileServlet extends HttpServlet { @@ -154,7 +126,7 @@ public class ProfileServlet extends HttpServlet { } @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SE_TRANSIENT_FIELD_NOT_RESTORED", - justification = "This class is never serialized nor restored.") + justification = "This class is never serialized nor restored.") private transient Lock profilerLock = new ReentrantLock(); private transient volatile Process process; private String asyncProfilerHome; @@ -168,7 +140,7 @@ public class ProfileServlet extends HttpServlet { @Override protected void doGet(final HttpServletRequest req, final HttpServletResponse resp) - throws IOException { + throws IOException { if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), req, resp)) { resp.setStatus(HttpServletResponse.SC_UNAUTHORIZED); setResponseHeader(resp); @@ -180,10 +152,11 @@ public class ProfileServlet extends HttpServlet { if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) { resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); setResponseHeader(resp); - resp.getWriter().write("ASYNC_PROFILER_HOME env is not set.\n\n" + - "Please ensure the prerequsites for the Profiler Servlet have been installed and the\n" + - "environment is properly configured. For more information please see\n" + - "http://hbase.apache.org/book.html#profiler\n"); + resp.getWriter() + .write("ASYNC_PROFILER_HOME env is not set.\n\n" + + "Please ensure the prerequsites for the Profiler Servlet have been installed and the\n" + + "environment is properly configured. For more information please see\n" + + "http://hbase.apache.org/book.html#profiler\n"); return; } @@ -194,8 +167,8 @@ public class ProfileServlet extends HttpServlet { if (pid == null) { resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); setResponseHeader(resp); - resp.getWriter().write( - "'pid' query parameter unspecified or unable to determine PID of current process."); + resp.getWriter() + .write("'pid' query parameter unspecified or unable to determine PID of current process."); return; } @@ -217,9 +190,9 @@ public class ProfileServlet extends HttpServlet { int lockTimeoutSecs = 3; if (profilerLock.tryLock(lockTimeoutSecs, TimeUnit.SECONDS)) { try { - File outputFile = new File(OUTPUT_DIR, "async-prof-pid-" + pid + "-" + - event.name().toLowerCase() + "-" + ID_GEN.incrementAndGet() + "." + - output.name().toLowerCase()); + File outputFile = + new File(OUTPUT_DIR, "async-prof-pid-" + pid + "-" + event.name().toLowerCase() + "-" + + ID_GEN.incrementAndGet() + "." + output.name().toLowerCase()); List cmd = new ArrayList<>(); cmd.add(asyncProfilerHome + PROFILER_SCRIPT); cmd.add("-e"); @@ -270,11 +243,10 @@ public class ProfileServlet extends HttpServlet { setResponseHeader(resp); resp.setStatus(HttpServletResponse.SC_ACCEPTED); String relativeUrl = "/prof-output-hbase/" + outputFile.getName(); - resp.getWriter().write( - "Started [" + event.getInternalName() + - "] profiling. This page will automatically redirect to " + - relativeUrl + " after " + duration + " seconds.\n\nCommand:\n" + - Joiner.on(" ").join(cmd)); + resp.getWriter() + .write("Started [" + event.getInternalName() + + "] profiling. This page will automatically redirect to " + relativeUrl + " after " + + duration + " seconds.\n\nCommand:\n" + Joiner.on(" ").join(cmd)); // to avoid auto-refresh by ProfileOutputServlet, refreshDelay can be specified // via url param @@ -290,10 +262,10 @@ public class ProfileServlet extends HttpServlet { } else { setResponseHeader(resp); resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); - resp.getWriter().write( - "Unable to acquire lock. Another instance of profiler might be running."); - LOG.warn("Unable to acquire lock in " + lockTimeoutSecs + - " seconds. Another instance of profiler might be running."); + resp.getWriter() + .write("Unable to acquire lock. Another instance of profiler might be running."); + LOG.warn("Unable to acquire lock in " + lockTimeoutSecs + + " seconds. Another instance of profiler might be running."); } } catch (InterruptedException e) { LOG.warn("Interrupted while acquiring profile lock.", e); @@ -307,7 +279,7 @@ public class ProfileServlet extends HttpServlet { } private Integer getInteger(final HttpServletRequest req, final String param, - final Integer defaultValue) { + final Integer defaultValue) { final String value = req.getParameter(param); if (value != null) { try { @@ -386,13 +358,14 @@ public class ProfileServlet extends HttpServlet { @Override protected void doGet(final HttpServletRequest req, final HttpServletResponse resp) - throws IOException { + throws IOException { resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); setResponseHeader(resp); - resp.getWriter().write("The profiler servlet was disabled at startup.\n\n" + - "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n" + - "environment is properly configured. For more information please see\n" + - "http://hbase.apache.org/book.html#profiler\n"); + resp.getWriter() + .write("The profiler servlet was disabled at startup.\n\n" + + "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n" + + "environment is properly configured. For more information please see\n" + + "http://hbase.apache.org/book.html#profiler\n"); return; } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java index 182a4e10996..c8456a461bb 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java @@ -17,18 +17,6 @@ */ package org.apache.hadoop.hbase.http; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.security.authorize.AuthorizationException; -import org.apache.hadoop.security.authorize.ProxyUsers; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.server.AuthenticationFilter; -import org.apache.hadoop.util.HttpExceptionUtils; -import org.apache.hadoop.util.StringUtils; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.IOException; import java.security.Principal; import java.util.ArrayList; @@ -43,30 +31,32 @@ import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequestWrapper; import javax.servlet.http.HttpServletResponse; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.util.HttpExceptionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This file has been copied directly (changing only the package name and and the ASF license - * text format, and adding the Yetus annotations) from Hadoop, as the Hadoop version that HBase - * depends on doesn't have it yet - * (as of 2020 Apr 24, there is no Hadoop release that has it either). - * - * Hadoop version: - * unreleased, master branch commit 4ea6c2f457496461afc63f38ef4cef3ab0efce49 - * - * Haddop path: + * This file has been copied directly (changing only the package name and and the ASF license text + * format, and adding the Yetus annotations) from Hadoop, as the Hadoop version that HBase depends + * on doesn't have it yet (as of 2020 Apr 24, there is no Hadoop release that has it either). Hadoop + * version: unreleased, master branch commit 4ea6c2f457496461afc63f38ef4cef3ab0efce49 Haddop path: * hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/ - * server/ProxyUserAuthenticationFilter.java - * - * AuthenticationFilter which adds support to perform operations - * using end user instead of proxy user. Fetches the end user from - * doAs Query Parameter. + * server/ProxyUserAuthenticationFilter.java AuthenticationFilter which adds support to perform + * operations using end user instead of proxy user. Fetches the end user from doAs Query Parameter. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class ProxyUserAuthenticationFilter extends AuthenticationFilter { - private static final Logger LOG = LoggerFactory.getLogger( - ProxyUserAuthenticationFilter.class); + private static final Logger LOG = LoggerFactory.getLogger(ProxyUserAuthenticationFilter.class); private static final String DO_AS = "doas"; public static final String PROXYUSER_PREFIX = "proxyuser"; @@ -80,19 +70,18 @@ public class ProxyUserAuthenticationFilter extends AuthenticationFilter { @Override protected void doFilter(FilterChain filterChain, HttpServletRequest request, - HttpServletResponse response) throws IOException, ServletException { + HttpServletResponse response) throws IOException, ServletException { final HttpServletRequest lowerCaseRequest = toLowerCase(request); String doAsUser = lowerCaseRequest.getParameter(DO_AS); if (doAsUser != null && !doAsUser.equals(request.getRemoteUser())) { - LOG.debug("doAsUser = {}, RemoteUser = {} , RemoteAddress = {} ", - doAsUser, request.getRemoteUser(), request.getRemoteAddr()); - UserGroupInformation requestUgi = (request.getUserPrincipal() != null) ? - UserGroupInformation.createRemoteUser(request.getRemoteUser()) - : null; + LOG.debug("doAsUser = {}, RemoteUser = {} , RemoteAddress = {} ", doAsUser, + request.getRemoteUser(), request.getRemoteAddr()); + UserGroupInformation requestUgi = (request.getUserPrincipal() != null) + ? UserGroupInformation.createRemoteUser(request.getRemoteUser()) + : null; if (requestUgi != null) { - requestUgi = UserGroupInformation.createProxyUser(doAsUser, - requestUgi); + requestUgi = UserGroupInformation.createProxyUser(doAsUser, requestUgi); try { ProxyUsers.authorize(requestUgi, request.getRemoteAddr()); @@ -116,7 +105,7 @@ public class ProxyUserAuthenticationFilter extends AuthenticationFilter { LOG.debug("Proxy user Authentication successful"); } catch (AuthorizationException ex) { HttpExceptionUtils.createServletExceptionResponse(response, - HttpServletResponse.SC_FORBIDDEN, ex); + HttpServletResponse.SC_FORBIDDEN, ex); LOG.warn("Proxy user Authentication exception", ex); return; } @@ -126,7 +115,7 @@ public class ProxyUserAuthenticationFilter extends AuthenticationFilter { } protected Configuration getProxyuserConfiguration(FilterConfig filterConfig) - throws ServletException { + throws ServletException { Configuration conf = new Configuration(false); Enumeration names = filterConfig.getInitParameterNames(); while (names.hasMoreElements()) { @@ -140,8 +129,8 @@ public class ProxyUserAuthenticationFilter extends AuthenticationFilter { } static boolean containsUpperCase(final Iterable strings) { - for(String s : strings) { - for(int i = 0; i < s.length(); i++) { + for (String s : strings) { + for (int i = 0; i < s.length(); i++) { if (Character.isUpperCase(s.charAt(i))) { return true; } @@ -151,17 +140,15 @@ public class ProxyUserAuthenticationFilter extends AuthenticationFilter { } /** - * The purpose of this function is to get the doAs parameter of a http request - * case insensitively - * @param request - * @return doAs parameter if exists or null otherwise + * The purpose of this function is to get the doAs parameter of a http request case insensitively + * n * @return doAs parameter if exists or null otherwise */ - public static String getDoasFromHeader(final HttpServletRequest request) { + public static String getDoasFromHeader(final HttpServletRequest request) { String doas = null; final Enumeration headers = request.getHeaderNames(); - while (headers.hasMoreElements()){ + while (headers.hasMoreElements()) { String header = headers.nextElement(); - if (header.toLowerCase().equals("doas")){ + if (header.toLowerCase().equals("doas")) { doas = request.getHeader(header); break; } @@ -169,11 +156,9 @@ public class ProxyUserAuthenticationFilter extends AuthenticationFilter { return doas; } - public static HttpServletRequest toLowerCase( - final HttpServletRequest request) { + public static HttpServletRequest toLowerCase(final HttpServletRequest request) { @SuppressWarnings("unchecked") - final Map original = (Map) - request.getParameterMap(); + final Map original = (Map) request.getParameterMap(); if (!containsUpperCase(original.keySet())) { return request; } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java index f00f2a195af..01c8a32c62a 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,16 +6,15 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

      - * http://www.apache.org/licenses/LICENSE-2.0 - *

      + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.http; import java.io.IOException; @@ -37,10 +36,10 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class SecurityHeadersFilter implements Filter { - private static final Logger LOG = - LoggerFactory.getLogger(SecurityHeadersFilter.class); + private static final Logger LOG = LoggerFactory.getLogger(SecurityHeadersFilter.class); private static final String DEFAULT_HSTS = "max-age=63072000;includeSubDomains;preload"; - private static final String DEFAULT_CSP = "default-src https: data: 'unsafe-inline' 'unsafe-eval'"; + private static final String DEFAULT_CSP = + "default-src https: data: 'unsafe-inline' 'unsafe-eval'"; private FilterConfig filterConfig; @Override @@ -51,7 +50,7 @@ public class SecurityHeadersFilter implements Filter { @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) - throws IOException, ServletException { + throws IOException, ServletException { HttpServletResponse httpResponse = (HttpServletResponse) response; httpResponse.addHeader("X-Content-Type-Options", "nosniff"); httpResponse.addHeader("X-XSS-Protection", "1; mode=block"); diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServerConfigurationKeys.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServerConfigurationKeys.java index 8f338a7af68..9c99b0ab8dc 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServerConfigurationKeys.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServerConfigurationKeys.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +21,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * This interface contains constants for configuration keys used - * in the hbase http server code. + * This interface contains constants for configuration keys used in the hbase http server code. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -34,11 +33,9 @@ public interface ServerConfigurationKeys { public static final boolean HBASE_SSL_ENABLED_DEFAULT = false; /** Enable/Disable aliases serving from jetty */ - public static final String HBASE_JETTY_LOGS_SERVE_ALIASES = - "hbase.jetty.logs.serve.aliases"; + public static final String HBASE_JETTY_LOGS_SERVE_ALIASES = "hbase.jetty.logs.serve.aliases"; - public static final boolean DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES = - true; + public static final boolean DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES = true; public static final String HBASE_HTTP_STATIC_USER = "hbase.http.staticuser.user"; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/conf/ConfServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/conf/ConfServlet.java index 05ca9a3abd1..992b09191c4 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/conf/ConfServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/conf/ConfServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,22 +19,19 @@ package org.apache.hadoop.hbase.http.conf; import java.io.IOException; import java.io.Writer; - import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.http.HttpServer; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** * A servlet to print out the running configuration data. */ -@InterfaceAudience.LimitedPrivate({"HBase"}) +@InterfaceAudience.LimitedPrivate({ "HBase" }) @InterfaceStability.Unstable public class ConfServlet extends HttpServlet { private static final long serialVersionUID = 1L; @@ -44,21 +41,20 @@ public class ConfServlet extends HttpServlet { private static final String FORMAT_PARAM = "format"; /** - * Return the Configuration of the daemon hosting this servlet. - * This is populated when the HttpServer starts. + * Return the Configuration of the daemon hosting this servlet. This is populated when the + * HttpServer starts. */ private Configuration getConfFromContext() { - Configuration conf = (Configuration)getServletContext().getAttribute( - HttpServer.CONF_CONTEXT_ATTRIBUTE); + Configuration conf = + (Configuration) getServletContext().getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE); assert conf != null; return conf; } @Override public void doGet(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { - if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), - request, response)) { + throws ServletException, IOException { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), request, response)) { return; } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/ByteArraySerializer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/ByteArraySerializer.java index fdcd34783c0..f501e164859 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/ByteArraySerializer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/ByteArraySerializer.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.http.gson; import java.lang.reflect.Type; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.gson.JsonElement; import org.apache.hbase.thirdparty.com.google.gson.JsonPrimitive; import org.apache.hbase.thirdparty.com.google.gson.JsonSerializationContext; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/GsonMessageBodyWriter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/GsonMessageBodyWriter.java index c75113ded73..114a9aa9904 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/GsonMessageBodyWriter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/GsonMessageBodyWriter.java @@ -32,6 +32,7 @@ import javax.inject.Inject; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.gson.Gson; import org.apache.hbase.thirdparty.javax.ws.rs.Produces; import org.apache.hbase.thirdparty.javax.ws.rs.WebApplicationException; @@ -61,15 +62,9 @@ public final class GsonMessageBodyWriter implements MessageBodyWriter { } @Override - public void writeTo( - T t, - Class type, - Type genericType, - Annotation[] annotations, - MediaType mediaType, - MultivaluedMap httpHeaders, - OutputStream entityStream - ) throws IOException, WebApplicationException { + public void writeTo(T t, Class type, Type genericType, Annotation[] annotations, + MediaType mediaType, MultivaluedMap httpHeaders, OutputStream entityStream) + throws IOException, WebApplicationException { final Charset outputCharset = requestedCharset(mediaType); try (Writer writer = new OutputStreamWriter(entityStream, outputCharset)) { gson.toJson(t, writer); @@ -77,10 +72,8 @@ public final class GsonMessageBodyWriter implements MessageBodyWriter { } private static Charset requestedCharset(MediaType mediaType) { - return Optional.ofNullable(mediaType) - .map(MediaType::getParameters) - .map(params -> params.get("charset")) - .map(c -> { + return Optional.ofNullable(mediaType).map(MediaType::getParameters) + .map(params -> params.get("charset")).map(c -> { try { return Charset.forName(c); } catch (IllegalCharsetNameException e) { @@ -93,7 +86,6 @@ public final class GsonMessageBodyWriter implements MessageBodyWriter { logger.debug("Error while resolving Charset '{}'", c, e); return null; } - }) - .orElse(StandardCharsets.UTF_8); + }).orElse(StandardCharsets.UTF_8); } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/ResponseEntityMapper.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/ResponseEntityMapper.java index dc3f8a7bf43..e617fd7a41a 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/ResponseEntityMapper.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/ResponseEntityMapper.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.http.jersey; import java.io.IOException; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hbase.thirdparty.javax.ws.rs.container.ContainerRequestContext; import org.apache.hbase.thirdparty.javax.ws.rs.container.ContainerResponseContext; @@ -34,10 +35,8 @@ import org.apache.hbase.thirdparty.javax.ws.rs.core.Response.Status; public class ResponseEntityMapper implements ContainerResponseFilter { @Override - public void filter( - ContainerRequestContext requestContext, - ContainerResponseContext responseContext - ) throws IOException { + public void filter(ContainerRequestContext requestContext, + ContainerResponseContext responseContext) throws IOException { /* * Follows very loosely the top-level document specification described in by JSON API. Only * handles 200 response codes; leaves room for errors and other response types. diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/SupplierFactoryAdapter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/SupplierFactoryAdapter.java index 57a7e930905..0c7b869fece 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/SupplierFactoryAdapter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/SupplierFactoryAdapter.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.http.jersey; import java.util.function.Supplier; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.org.glassfish.hk2.api.Factory; /** @@ -34,9 +35,12 @@ public class SupplierFactoryAdapter implements Factory { this.supplier = supplier; } - @Override public T provide() { + @Override + public T provide() { return supplier.get(); } - @Override public void dispose(T instance) { } + @Override + public void dispose(T instance) { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java index 7e3a79df9d3..a48bf045239 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.http.jmx; import java.io.IOException; @@ -43,26 +43,21 @@ import org.slf4j.LoggerFactory; /** * Provides Read only web access to JMX. *

      - * This servlet generally will be placed under the /jmx URL for each - * HttpServer. It provides read only - * access to JMX metrics. The optional qry parameter - * may be used to query only a subset of the JMX Beans. This query - * functionality is provided through the - * {@link MBeanServer#queryNames(ObjectName, javax.management.QueryExp)} - * method. + * This servlet generally will be placed under the /jmx URL for each HttpServer. It provides read + * only access to JMX metrics. The optional qry parameter may be used to query only a + * subset of the JMX Beans. This query functionality is provided through the + * {@link MBeanServer#queryNames(ObjectName, javax.management.QueryExp)} method. *

      *

      - * For example http://.../jmx?qry=Hadoop:* will return - * all hadoop metrics exposed through JMX. + * For example http://.../jmx?qry=Hadoop:* will return all hadoop metrics exposed + * through JMX. *

      *

      - * The optional get parameter is used to query an specific - * attribute of a JMX bean. The format of the URL is - * http://.../jmx?get=MXBeanName::AttributeName + * The optional get parameter is used to query an specific attribute of a JMX bean. The + * format of the URL is http://.../jmx?get=MXBeanName::AttributeName *

      *

      - * For example - * + * For example * http://../jmx?get=Hadoop:service=NameNode,name=NameNodeInfo::ClusterId * will return the cluster id of the namenode mxbean. *

      @@ -72,8 +67,7 @@ import org.slf4j.LoggerFactory; * http://.../jmx?get=MXBeanName::*[RegExp1],*[RegExp2] *

      *

      - * For example - * + * For example *

      * http://../jmx?get=Hadoop:service=HBase,name=RegionServer,sub=Tables::[a-zA-z_0-9]*memStoreSize *

      @@ -82,17 +76,19 @@ import org.slf4j.LoggerFactory; *

      *
      *

      - * If the qry or the get parameter is not formatted - * correctly then a 400 BAD REQUEST http response code will be returned. + * If the qry or the get parameter is not formatted correctly then a 400 + * BAD REQUEST http response code will be returned. *

      *

      - * If a resouce such as a mbean or attribute can not be found, - * a 404 SC_NOT_FOUND http response code will be returned. + * If a resouce such as a mbean or attribute can not be found, a 404 SC_NOT_FOUND http response code + * will be returned. *

      *

      * The return format is JSON and in the form *

      - *
      
      + *
      + * 
      + * 
        *  {
        *    "beans" : [
        *      {
      @@ -101,28 +97,18 @@ import org.slf4j.LoggerFactory;
        *      }
        *    ]
        *  }
      - *  
      - *

      - * The servlet attempts to convert the the JMXBeans into JSON. Each - * bean's attributes will be converted to a JSON object member. - * - * If the attribute is a boolean, a number, a string, or an array - * it will be converted to the JSON equivalent. - * - * If the value is a {@link CompositeData} then it will be converted - * to a JSON object with the keys as the name of the JSON member and - * the value is converted following these same rules. - * - * If the value is a {@link TabularData} then it will be converted - * to an array of the {@link CompositeData} elements that it contains. - * - * All other objects will be converted to a string and output as such. - * - * The bean's name and modelerType will be returned for all beans. - * - * Optional paramater "callback" should be used to deliver JSONP response. + * + *

      + *

      + * The servlet attempts to convert the the JMXBeans into JSON. Each bean's attributes will be + * converted to a JSON object member. If the attribute is a boolean, a number, a string, or an array + * it will be converted to the JSON equivalent. If the value is a {@link CompositeData} then it will + * be converted to a JSON object with the keys as the name of the JSON member and the value is + * converted following these same rules. If the value is a {@link TabularData} then it will be + * converted to an array of the {@link CompositeData} elements that it contains. All other objects + * will be converted to a string and output as such. The bean's name and modelerType will be + * returned for all beans. Optional paramater "callback" should be used to deliver JSONP response. *

      - * */ @InterfaceAudience.Private public class JMXJsonServlet extends HttpServlet { @@ -156,12 +142,8 @@ public class JMXJsonServlet extends HttpServlet { } /** - * Process a GET request for the specified resource. - * - * @param request - * The servlet request we are processing - * @param response - * The servlet response we are creating + * Process a GET request for the specified resource. n * The servlet request we are processing n * + * The servlet response we are creating */ @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { @@ -199,8 +181,10 @@ public class JMXJsonServlet extends HttpServlet { response.setStatus(HttpServletResponse.SC_BAD_REQUEST); return; } - if (beanWriter.write(this.mBeanServer, new ObjectName(splitStrings[0]), - splitStrings[1], description) != 0) { + if ( + beanWriter.write(this.mBeanServer, new ObjectName(splitStrings[0]), splitStrings[1], + description) != 0 + ) { beanWriter.flush(); response.setStatus(HttpServletResponse.SC_BAD_REQUEST); } @@ -237,10 +221,9 @@ public class JMXJsonServlet extends HttpServlet { } /** - * Verifies that the callback property, if provided, is purely alphanumeric. - * This prevents a malicious callback name (that is javascript code) from being - * returned by the UI to an unsuspecting user. - * + * Verifies that the callback property, if provided, is purely alphanumeric. This prevents a + * malicious callback name (that is javascript code) from being returned by the UI to an + * unsuspecting user. * @param callbackName The callback name, can be null. * @return The callback name * @throws IOException If the name is disallowed. diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java index 72cedddd686..a11ad268ec1 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import static org.apache.hadoop.hbase.http.ServerConfigurationKeys.HBASE_HTTP_ST import java.io.IOException; import java.security.Principal; import java.util.HashMap; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -32,7 +31,6 @@ import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequestWrapper; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.http.FilterContainer; @@ -42,8 +40,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Provides a servlet filter that pretends to authenticate a fake user (Dr.Who) - * so that the web UI is usable for a secure cluster without authentication. + * Provides a servlet filter that pretends to authenticate a fake user (Dr.Who) so that the web UI + * is usable for a secure cluster without authentication. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class StaticUserWebFilter extends FilterInitializer { @@ -53,17 +51,21 @@ public class StaticUserWebFilter extends FilterInitializer { static class User implements Principal { private final String name; + public User(String name) { this.name = name; } + @Override public String getName() { return name; } + @Override public int hashCode() { return name.hashCode(); } + @Override public boolean equals(Object other) { if (other == this) { @@ -73,6 +75,7 @@ public class StaticUserWebFilter extends FilterInitializer { } return ((User) other).name.equals(name); } + @Override public String toString() { return name; @@ -90,20 +93,19 @@ public class StaticUserWebFilter extends FilterInitializer { } @Override - public void doFilter(ServletRequest request, ServletResponse response, - FilterChain chain - ) throws IOException, ServletException { + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { HttpServletRequest httpRequest = (HttpServletRequest) request; // if the user is already authenticated, don't override it if (httpRequest.getRemoteUser() != null) { chain.doFilter(request, response); } else { - HttpServletRequestWrapper wrapper = - new HttpServletRequestWrapper(httpRequest) { + HttpServletRequestWrapper wrapper = new HttpServletRequestWrapper(httpRequest) { @Override public Principal getUserPrincipal() { return user; } + @Override public String getRemoteUser() { return username; @@ -128,9 +130,7 @@ public class StaticUserWebFilter extends FilterInitializer { String username = getUsernameFromConf(conf); options.put(HBASE_HTTP_STATIC_USER, username); - container.addFilter("static_user_filter", - StaticUserFilter.class.getName(), - options); + container.addFilter("static_user_filter", StaticUserFilter.class.getName(), options); } /** @@ -141,13 +141,12 @@ public class StaticUserWebFilter extends FilterInitializer { if (oldStyleUgi != null) { // We can't use the normal configuration deprecation mechanism here // since we need to split out the username from the configured UGI. - LOG.warn(DEPRECATED_UGI_KEY + " should not be used. Instead, use " + - HBASE_HTTP_STATIC_USER + "."); + LOG.warn( + DEPRECATED_UGI_KEY + " should not be used. Instead, use " + HBASE_HTTP_STATIC_USER + "."); String[] parts = oldStyleUgi.split(","); return parts[0]; } else { - return conf.get(HBASE_HTTP_STATIC_USER, - DEFAULT_HBASE_HTTP_STATIC_USER); + return conf.get(HBASE_HTTP_STATIC_USER, DEFAULT_HBASE_HTTP_STATIC_USER); } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java index 611316d9ec6..cec05f53bbc 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,8 +54,8 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.Private public final class LogLevel { private static final String USAGES = "\nUsage: General options are:\n" - + "\t[-getlevel [-protocol (http|https)]\n" - + "\t[-setlevel [-protocol (http|https)]"; + + "\t[-getlevel [-protocol (http|https)]\n" + + "\t[-setlevel [-protocol (http|https)]"; public static final String PROTOCOL_HTTP = "http"; public static final String PROTOCOL_HTTPS = "https"; @@ -85,8 +85,7 @@ public final class LogLevel { } public static boolean isValidProtocol(String protocol) { - return ((protocol.equals(PROTOCOL_HTTP) || - protocol.equals(PROTOCOL_HTTPS))); + return ((protocol.equals(PROTOCOL_HTTP) || protocol.equals(PROTOCOL_HTTPS))); } static class CLI extends Configured implements Tool { @@ -114,10 +113,9 @@ public final class LogLevel { /** * Send HTTP request to the daemon. * @throws HadoopIllegalArgumentException if arguments are invalid. - * @throws Exception if unable to connect + * @throws Exception if unable to connect */ - private void sendLogLevelRequest() - throws HadoopIllegalArgumentException, Exception { + private void sendLogLevelRequest() throws HadoopIllegalArgumentException, Exception { switch (operation) { case GETLEVEL: doGetLevel(); @@ -126,13 +124,11 @@ public final class LogLevel { doSetLevel(); break; default: - throw new HadoopIllegalArgumentException( - "Expect either -getlevel or -setlevel"); + throw new HadoopIllegalArgumentException("Expect either -getlevel or -setlevel"); } } - public void parseArguments(String[] args) throws - HadoopIllegalArgumentException { + public void parseArguments(String[] args) throws HadoopIllegalArgumentException { if (args.length == 0) { throw new HadoopIllegalArgumentException("No arguments specified"); } @@ -149,15 +145,13 @@ public final class LogLevel { nextArgIndex = parseProtocolArgs(args, nextArgIndex); break; default: - throw new HadoopIllegalArgumentException( - "Unexpected argument " + args[nextArgIndex]); + throw new HadoopIllegalArgumentException("Unexpected argument " + args[nextArgIndex]); } } // if operation is never specified in the arguments if (operation == Operations.UNKNOWN) { - throw new HadoopIllegalArgumentException( - "Must specify either -getlevel or -setlevel"); + throw new HadoopIllegalArgumentException("Must specify either -getlevel or -setlevel"); } // if protocol is unspecified, set it as http. @@ -166,8 +160,7 @@ public final class LogLevel { } } - private int parseGetLevelArgs(String[] args, int index) throws - HadoopIllegalArgumentException { + private int parseGetLevelArgs(String[] args, int index) throws HadoopIllegalArgumentException { // fail if multiple operations are specified in the arguments if (operation != Operations.UNKNOWN) { throw new HadoopIllegalArgumentException("Redundant -getlevel command"); @@ -182,8 +175,7 @@ public final class LogLevel { return index + 3; } - private int parseSetLevelArgs(String[] args, int index) throws - HadoopIllegalArgumentException { + private int parseSetLevelArgs(String[] args, int index) throws HadoopIllegalArgumentException { // fail if multiple operations are specified in the arguments if (operation != Operations.UNKNOWN) { throw new HadoopIllegalArgumentException("Redundant -setlevel command"); @@ -199,32 +191,27 @@ public final class LogLevel { return index + 4; } - private int parseProtocolArgs(String[] args, int index) throws - HadoopIllegalArgumentException { + private int parseProtocolArgs(String[] args, int index) throws HadoopIllegalArgumentException { // make sure only -protocol is specified if (protocol != null) { - throw new HadoopIllegalArgumentException( - "Redundant -protocol command"); + throw new HadoopIllegalArgumentException("Redundant -protocol command"); } // check number of arguments is sufficient if (index + 1 >= args.length) { - throw new HadoopIllegalArgumentException( - "-protocol needs one parameter"); + throw new HadoopIllegalArgumentException("-protocol needs one parameter"); } // check protocol is valid protocol = args[index + 1]; if (!isValidProtocol(protocol)) { - throw new HadoopIllegalArgumentException( - "Invalid protocol: " + protocol); + throw new HadoopIllegalArgumentException("Invalid protocol: " + protocol); } return index + 2; } /** * Send HTTP request to get log level. - * * @throws HadoopIllegalArgumentException if arguments are invalid. - * @throws Exception if unable to connect + * @throws Exception if unable to connect */ private void doGetLevel() throws Exception { process(protocol + "://" + hostName + "/logLevel?log=" + className); @@ -232,20 +219,16 @@ public final class LogLevel { /** * Send HTTP request to set log level. - * * @throws HadoopIllegalArgumentException if arguments are invalid. - * @throws Exception if unable to connect + * @throws Exception if unable to connect */ private void doSetLevel() throws Exception { - process(protocol + "://" + hostName + "/logLevel?log=" + className - + "&level=" + level); + process(protocol + "://" + hostName + "/logLevel?log=" + className + "&level=" + level); } /** - * Connect to the URL. Supports HTTP and supports SPNEGO - * authentication. It falls back to simple authentication if it fails to - * initiate SPNEGO. - * + * Connect to the URL. Supports HTTP and supports SPNEGO authentication. It falls back to simple + * authentication if it fails to initiate SPNEGO. * @param url the URL address of the daemon servlet * @return a connected connection * @throws Exception if it can not establish a connection. @@ -274,8 +257,7 @@ public final class LogLevel { } /** - * Configures the client to send HTTP request to the URL. - * Supports SPENGO for authentication. + * Configures the client to send HTTP request to the URL. Supports SPENGO for authentication. * @param urlString URL and query string to the daemon's web UI * @throws Exception if unable to connect */ @@ -289,11 +271,12 @@ public final class LogLevel { // read from the servlet - try (InputStreamReader streamReader = - new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8); - BufferedReader bufferedReader = new BufferedReader(streamReader)) { + try ( + InputStreamReader streamReader = + new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8); + BufferedReader bufferedReader = new BufferedReader(streamReader)) { bufferedReader.lines().filter(Objects::nonNull).filter(line -> line.startsWith(MARKER)) - .forEach(line -> System.out.println(TAG.matcher(line).replaceAll(""))); + .forEach(line -> System.out.println(TAG.matcher(line).replaceAll(""))); } catch (IOException ioe) { System.err.println("" + ioe); } @@ -312,19 +295,16 @@ public final class LogLevel { @Override public void doGet(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { + throws ServletException, IOException { // Do the authorization - if (!HttpServer.hasAdministratorAccess(getServletContext(), request, - response)) { + if (!HttpServer.hasAdministratorAccess(getServletContext(), request, response)) { return; } // Disallow modification of the LogLevel if explicitly set to readonly - Configuration conf = (Configuration) getServletContext().getAttribute( - HttpServer.CONF_CONTEXT_ATTRIBUTE); + Configuration conf = + (Configuration) getServletContext().getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE); if (conf.getBoolean("hbase.master.ui.readonly", false)) { - sendError( - response, - HttpServletResponse.SC_FORBIDDEN, + sendError(response, HttpServletResponse.SC_FORBIDDEN, "Modification of HBase via the UI is disallowed in configuration."); return; } @@ -347,17 +327,13 @@ public final class LogLevel { if (logName != null) { out.println("

      Results:

      "); - out.println(MARKER - + "Submitted Log Name: " + logName + "
      "); + out.println(MARKER + "Submitted Log Name: " + logName + "
      "); Logger log = LoggerFactory.getLogger(logName); - out.println(MARKER - + "Log Class: " + log.getClass().getName() +"
      "); + out.println(MARKER + "Log Class: " + log.getClass().getName() + "
      "); if (level != null) { if (!isLogLevelChangeAllowed(logName, readOnlyLogLevels)) { - sendError( - response, - HttpServletResponse.SC_PRECONDITION_FAILED, + sendError(response, HttpServletResponse.SC_PRECONDITION_FAILED, "Modification of logger " + logName + " is disallowed in configuration."); return; } @@ -396,41 +372,41 @@ public final class LogLevel { } static final String FORMS = "
      \n" - + "
      \n" + "\n" + "
      \n" + "Actions:" + "

      " - + "

      \n" + "\n" + "\n" - + "\n" + "\n" + "\n" + "\n" + "\n" - + "\n" + "\n" + "\n" + "\n" - + "\n" + "\n" + "\n" - + "\n" + "
      \n" - + "\n" + "\n" - + "\n" + "" - + "Get the current log level for the specified log name." + "
      \n" - + "\n" + "\n" - + "\n" - + "\n" + "" - + "Set the specified log level for the specified log name." + "
      \n" + "
      \n" + "

      \n" + "
      \n"; + + "
      \n" + "\n" + "
      \n" + "Actions:" + "

      " + "

      \n" + + "\n" + "\n" + "\n" + + "\n" + "\n" + "\n" + "\n" + + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + + "\n" + "
      \n" + + "\n" + "\n" + + "\n" + "" + + "Get the current log level for the specified log name." + "
      \n" + + "\n" + "\n" + + "\n" + + "\n" + "" + + "Set the specified log level for the specified log name." + "
      \n" + "
      \n" + "

      \n" + "
      \n"; private static void process(Logger logger, String levelName, PrintWriter out) { if (levelName != null) { try { Log4jUtils.setLogLevel(logger.getName(), levelName); - out.println(MARKER + "
      " + "Setting Level to " + - levelName + " ...
      " + "
      "); + out.println(MARKER + "
      " + "Setting Level to " + + levelName + " ...
      " + "
      "); } catch (IllegalArgumentException e) { - out.println(MARKER + "
      " + "Bad level : " + levelName + - "
      " + "
      "); + out.println(MARKER + "
      " + "Bad level : " + levelName + + "
      " + "
      "); } } - out.println(MARKER + "Effective level: " + Log4jUtils.getEffectiveLevel(logger.getName()) + - "
      "); + out.println(MARKER + "Effective level: " + Log4jUtils.getEffectiveLevel(logger.getName()) + + "
      "); } } - private LogLevel() {} + private LogLevel() { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java index def2611b3a5..9a9761a90a4 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,7 +27,6 @@ import java.nio.charset.StandardCharsets; import java.util.Iterator; import java.util.Set; import java.util.regex.Pattern; - import javax.management.AttributeNotFoundException; import javax.management.InstanceNotFoundException; import javax.management.IntrospectionException; @@ -42,13 +42,13 @@ import javax.management.RuntimeMBeanException; import javax.management.openmbean.CompositeData; import javax.management.openmbean.CompositeType; import javax.management.openmbean.TabularData; - -import org.apache.hbase.thirdparty.com.google.gson.Gson; -import org.apache.hbase.thirdparty.com.google.gson.stream.JsonWriter; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.gson.Gson; +import org.apache.hbase.thirdparty.com.google.gson.stream.JsonWriter; + /** * Utility for doing JSON and MBeans. */ @@ -67,7 +67,7 @@ public class JSONBean { void write(String key, String value) throws IOException; int write(MBeanServer mBeanServer, ObjectName qry, String attribute, boolean description) - throws IOException; + throws IOException; void flush() throws IOException; } @@ -118,7 +118,7 @@ public class JSONBean { @Override public int write(MBeanServer mBeanServer, ObjectName qry, String attribute, - boolean description) throws IOException { + boolean description) throws IOException { return JSONBean.write(jsonWriter, mBeanServer, qry, attribute, description); } }; @@ -128,7 +128,7 @@ public class JSONBean { * @return Return non-zero if failed to find bean. 0 */ private static int write(JsonWriter writer, MBeanServer mBeanServer, ObjectName qry, - String attribute, boolean description) throws IOException { + String attribute, boolean description) throws IOException { LOG.debug("Listing beans for {}", qry); Set names = null; names = mBeanServer.queryNames(qry, null); @@ -247,7 +247,7 @@ public class JSONBean { } private static void writeAttribute(JsonWriter writer, MBeanServer mBeanServer, ObjectName oname, - boolean description, Pattern pattern[], MBeanAttributeInfo attr) throws IOException { + boolean description, Pattern pattern[], MBeanAttributeInfo attr) throws IOException { if (!attr.isReadable()) { return; } @@ -324,7 +324,7 @@ public class JSONBean { } private static void writeAttribute(JsonWriter writer, String attName, String descriptionStr, - Object value) throws IOException { + Object value) throws IOException { if (descriptionStr != null && descriptionStr.length() > 0 && !attName.equals(descriptionStr)) { writer.name(attName); writer.beginObject(); diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java index 6e155ae3961..760f4c0a2b0 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -14,7 +14,7 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ + */ package org.apache.hadoop.hbase.util; import java.beans.IntrospectionException; @@ -67,8 +67,8 @@ public final class JSONMetricUtil { } public static MBeanAttributeInfo[] getMBeanAttributeInfo(ObjectName bean) - throws IntrospectionException, InstanceNotFoundException, ReflectionException, - IntrospectionException, javax.management.IntrospectionException { + throws IntrospectionException, InstanceNotFoundException, ReflectionException, + IntrospectionException, javax.management.IntrospectionException { MBeanInfo mbinfo = mbServer.getMBeanInfo(bean); return mbinfo.getAttributes(); } @@ -78,8 +78,8 @@ public final class JSONMetricUtil { try { value = mbServer.getAttribute(bean, attribute); } catch (Exception e) { - LOG.error("Unable to get value from MBean= " + bean.toString() + "for attribute=" + - attribute + " " + e.getMessage()); + LOG.error("Unable to get value from MBean= " + bean.toString() + "for attribute=" + attribute + + " " + e.getMessage()); } return value; } @@ -88,11 +88,11 @@ public final class JSONMetricUtil { * Returns a subset of mbeans defined by qry. Modeled after DumpRegionServerMetrics#dumpMetrics. * Example: String qry= "java.lang:type=Memory" * @throws MalformedObjectNameException if json have bad format - * @throws IOException / + * @throws IOException / * @return String representation of json array. */ public static String dumpBeanToString(String qry) - throws MalformedObjectNameException, IOException { + throws MalformedObjectNameException, IOException { StringWriter sw = new StringWriter(1024 * 100); // Guess this size try (PrintWriter writer = new PrintWriter(sw)) { JSONBean dumper = new JSONBean(); @@ -107,7 +107,7 @@ public final class JSONMetricUtil { /** * Method for building map used for constructing ObjectName. Mapping is done with arrays indices - * @param keys Map keys + * @param keys Map keys * @param values Map values * @return Map or null if arrays are empty * or have different number of elements */ @@ -132,7 +132,7 @@ public final class JSONMetricUtil { } public static ObjectName buildObjectName(String domain, Hashtable keyValueTable) - throws MalformedObjectNameException { + throws MalformedObjectNameException { return new ObjectName(domain, keyValueTable); } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/LogMonitoring.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/LogMonitoring.java index 2b36c21c616..c5d1a8a47e6 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/LogMonitoring.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/LogMonitoring.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/ProcessUtils.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/ProcessUtils.java index 7ed09468cb6..fc1d523b0ef 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/ProcessUtils.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/ProcessUtils.java @@ -20,9 +20,7 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; import java.lang.management.ManagementFactory; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -33,7 +31,8 @@ import org.slf4j.LoggerFactory; public final class ProcessUtils { private static Logger LOG = LoggerFactory.getLogger(ProcessUtils.class); - private ProcessUtils() { } + private ProcessUtils() { + } public static Integer getPid() { // JVM_PID is exported by bin/hbase run script diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/HttpServerFunctionalTest.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/HttpServerFunctionalTest.java index 7f1223980e3..a17dbcb3d48 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/HttpServerFunctionalTest.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/HttpServerFunctionalTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.http; import java.io.BufferedReader; @@ -38,8 +37,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This is a base class for functional tests of the {@link HttpServer}. - * The methods are static for other classes to import statically. + * This is a base class for functional tests of the {@link HttpServer}. The methods are static for + * other classes to import statically. */ public class HttpServerFunctionalTest extends Assert { private static final Logger LOG = LoggerFactory.getLogger(HttpServerFunctionalTest.class); @@ -52,12 +51,10 @@ public class HttpServerFunctionalTest extends Assert { private static final String TEST = "test"; /** - * Create but do not start the test webapp server. The test webapp dir is - * prepared/checked in advance. - * + * Create but do not start the test webapp server. The test webapp dir is prepared/checked in + * advance. * @return the server instance - * - * @throws IOException if a problem occurs + * @throws IOException if a problem occurs * @throws AssertionError if a condition was not met */ public static HttpServer createTestServer() throws IOException { @@ -66,76 +63,69 @@ public class HttpServerFunctionalTest extends Assert { } /** - * Create but do not start the test webapp server. The test webapp dir is - * prepared/checked in advance. + * Create but do not start the test webapp server. The test webapp dir is prepared/checked in + * advance. * @param conf the server configuration to use * @return the server instance - * - * @throws IOException if a problem occurs + * @throws IOException if a problem occurs * @throws AssertionError if a condition was not met */ - public static HttpServer createTestServer(Configuration conf) - throws IOException { + public static HttpServer createTestServer(Configuration conf) throws IOException { prepareTestWebapp(); return createServer(TEST, conf); } public static HttpServer createTestServer(Configuration conf, AccessControlList adminsAcl) - throws IOException { + throws IOException { prepareTestWebapp(); return createServer(TEST, conf, adminsAcl); } /** - * Create but do not start the test webapp server. The test webapp dir is - * prepared/checked in advance. + * Create but do not start the test webapp server. The test webapp dir is prepared/checked in + * advance. * @param conf the server configuration to use * @return the server instance - * - * @throws IOException if a problem occurs + * @throws IOException if a problem occurs * @throws AssertionError if a condition was not met */ - public static HttpServer createTestServer(Configuration conf, - String[] pathSpecs) throws IOException { + public static HttpServer createTestServer(Configuration conf, String[] pathSpecs) + throws IOException { prepareTestWebapp(); return createServer(TEST, conf, pathSpecs); } public static HttpServer createTestServerWithSecurity(Configuration conf) throws IOException { - prepareTestWebapp(); - return localServerBuilder(TEST).setFindPort(true).setConf(conf).setSecurityEnabled(true) - // InfoServer normally sets these for us - .setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) - .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY) - .build(); - } - - public static HttpServer createTestServerWithSecurityAndAcl(Configuration conf, AccessControlList acl) throws IOException { prepareTestWebapp(); return localServerBuilder(TEST).setFindPort(true).setConf(conf).setSecurityEnabled(true) - // InfoServer normally sets these for us - .setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) - .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY) - .setSecurityEnabled(true) - .setACL(acl) - .build(); + // InfoServer normally sets these for us + .setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) + .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY).build(); + } + + public static HttpServer createTestServerWithSecurityAndAcl(Configuration conf, + AccessControlList acl) throws IOException { + prepareTestWebapp(); + return localServerBuilder(TEST).setFindPort(true).setConf(conf).setSecurityEnabled(true) + // InfoServer normally sets these for us + .setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) + .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY).setSecurityEnabled(true) + .setACL(acl).build(); } /** - * Prepare the test webapp by creating the directory from the test properties - * fail if the directory cannot be created. + * Prepare the test webapp by creating the directory from the test properties fail if the + * directory cannot be created. * @throws AssertionError if a condition was not met */ protected static void prepareTestWebapp() { String webapps = System.getProperty(TEST_BUILD_WEBAPPS, BUILD_WEBAPPS_DIR); - File testWebappDir = new File(webapps + - File.separatorChar + TEST); + File testWebappDir = new File(webapps + File.separatorChar + TEST); try { if (!testWebappDir.exists()) { fail("Test webapp dir " + testWebappDir.getCanonicalPath() + " missing"); } - } - catch (IOException e) { + } catch (IOException e) { } } @@ -146,12 +136,10 @@ public class HttpServerFunctionalTest extends Assert { * @return the server * @throws IOException if it could not be created */ - public static HttpServer createServer(String host, int port) - throws IOException { + public static HttpServer createServer(String host, int port) throws IOException { prepareTestWebapp(); return new HttpServer.Builder().setName(TEST) - .addEndpoint(URI.create("http://" + host + ":" + port)) - .setFindPort(true).build(); + .addEndpoint(URI.create("http://" + host + ":" + port)).setFindPort(true).build(); } /** @@ -163,48 +151,45 @@ public class HttpServerFunctionalTest extends Assert { public static HttpServer createServer(String webapp) throws IOException { return localServerBuilder(webapp).setFindPort(true).build(); } + /** * Create an HttpServer instance for the given webapp * @param webapp the webapp to work with - * @param conf the configuration to use for the server + * @param conf the configuration to use for the server * @return the server * @throws IOException if it could not be created */ - public static HttpServer createServer(String webapp, Configuration conf) - throws IOException { + public static HttpServer createServer(String webapp, Configuration conf) throws IOException { return localServerBuilder(webapp).setFindPort(true).setConf(conf).build(); } public static HttpServer createServer(String webapp, Configuration conf, - AccessControlList adminsAcl) throws IOException { + AccessControlList adminsAcl) throws IOException { return localServerBuilder(webapp).setFindPort(true).setConf(conf).setACL(adminsAcl).build(); } private static Builder localServerBuilder(String webapp) { - return new HttpServer.Builder().setName(webapp).addEndpoint( - URI.create("http://localhost:0")); + return new HttpServer.Builder().setName(webapp).addEndpoint(URI.create("http://localhost:0")); } /** * Create an HttpServer instance for the given webapp - * @param webapp the webapp to work with - * @param conf the configuration to use for the server + * @param webapp the webapp to work with + * @param conf the configuration to use for the server * @param pathSpecs the paths specifications the server will service * @return the server * @throws IOException if it could not be created */ - public static HttpServer createServer(String webapp, Configuration conf, - String[] pathSpecs) throws IOException { + public static HttpServer createServer(String webapp, Configuration conf, String[] pathSpecs) + throws IOException { return localServerBuilder(webapp).setFindPort(true).setConf(conf).setPathSpec(pathSpecs) - .build(); + .build(); } /** * Create and start a server with the test webapp - * * @return the newly started server - * - * @throws IOException on any failure + * @throws IOException on any failure * @throws AssertionError if a condition was not met */ public static HttpServer createAndStartTestServer() throws IOException { @@ -230,11 +215,9 @@ public class HttpServerFunctionalTest extends Assert { * @return a URL bonded to the base of the server * @throws MalformedURLException if the URL cannot be created. */ - public static URL getServerURL(HttpServer server) - throws MalformedURLException { + public static URL getServerURL(HttpServer server) throws MalformedURLException { assertNotNull("No server", server); - return new URL("http://" - + NetUtils.getHostPortString(server.getConnectorAddress(0))); + return new URL("http://" + NetUtils.getHostPortString(server.getConnectorAddress(0))); } /** @@ -297,15 +280,14 @@ public class HttpServerFunctionalTest extends Assert { URLConnection connection = url.openConnection(); connection.connect(); - try (BufferedReader in = new BufferedReader(new InputStreamReader( - connection.getInputStream(), StandardCharsets.UTF_8))){ - for(; in.readLine() != null;) { + try (BufferedReader in = new BufferedReader( + new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8))) { + for (; in.readLine() != null;) { continue; } - } catch(IOException ioe) { + } catch (IOException ioe) { LOG.info("Got exception: ", ioe); } } - } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java index 1917655d342..06c62f03fca 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,11 +38,11 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestGlobalFilter extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGlobalFilter.class); + HBaseClassTestRule.forClass(TestGlobalFilter.class); private static final Logger LOG = LoggerFactory.getLogger(HttpServer.class); private static final Set RECORDS = new TreeSet<>(); @@ -63,12 +63,12 @@ public class TestGlobalFilter extends HttpServerFunctionalTest { @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) - throws IOException, ServletException { + throws IOException, ServletException { if (filterConfig == null) { return; } - String uri = ((HttpServletRequest)request).getRequestURI(); + String uri = ((HttpServletRequest) request).getRequestURI(); LOG.info("filtering " + uri); RECORDS.add(uri); chain.doFilter(request, response); @@ -76,7 +76,8 @@ public class TestGlobalFilter extends HttpServerFunctionalTest { /** Configuration for RecordingFilter */ static public class Initializer extends FilterInitializer { - public Initializer() {} + public Initializer() { + } @Override public void initFilter(FilterContainer container, Configuration conf) { @@ -89,9 +90,8 @@ public class TestGlobalFilter extends HttpServerFunctionalTest { public void testServletFilter() throws Exception { Configuration conf = new Configuration(); - //start an http server with CountingFilter - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - RecordingFilter.Initializer.class.getName()); + // start an http server with CountingFilter + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, RecordingFilter.Initializer.class.getName()); HttpServer http = createTestServer(conf); http.start(); @@ -106,14 +106,11 @@ public class TestGlobalFilter extends HttpServerFunctionalTest { final String outURL = "/static/a.out"; final String logURL = "/logs/a.log"; - final String[] urls = { - fsckURL, stacksURL, ajspURL, listPathsURL, dataURL, streamFile, rootURL, allURL, - outURL, logURL - }; + final String[] urls = { fsckURL, stacksURL, ajspURL, listPathsURL, dataURL, streamFile, rootURL, + allURL, outURL, logURL }; - //access the urls - final String prefix = "http://" - + NetUtils.getHostPortString(http.getConnectorAddress(0)); + // access the urls + final String prefix = "http://" + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for (String url : urls) { access(prefix + url); @@ -124,7 +121,7 @@ public class TestGlobalFilter extends HttpServerFunctionalTest { LOG.info("RECORDS = " + RECORDS); - //verify records + // verify records for (String url : urls) { assertTrue(RECORDS.remove(url)); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java index 0f4c4d5d2a1..e5835fd65b6 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,13 +31,14 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestHtmlQuoting { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHtmlQuoting.class); + HBaseClassTestRule.forClass(TestHtmlQuoting.class); - @Test public void testNeedsQuoting() throws Exception { + @Test + public void testNeedsQuoting() throws Exception { assertTrue(HtmlQuoting.needsQuoting("abcde>")); assertTrue(HtmlQuoting.needsQuoting("")); assertEquals("&&&", HtmlQuoting.quoteHtmlChars("&&&")); @@ -58,18 +60,18 @@ public class TestHtmlQuoting { } private void runRoundTrip(String str) throws Exception { - assertEquals(str, - HtmlQuoting.unquoteHtmlChars(HtmlQuoting.quoteHtmlChars(str))); + assertEquals(str, HtmlQuoting.unquoteHtmlChars(HtmlQuoting.quoteHtmlChars(str))); } - @Test public void testRoundtrip() throws Exception { + @Test + public void testRoundtrip() throws Exception { runRoundTrip(""); runRoundTrip("<>&'\""); runRoundTrip("ab>cd params = request.getParameterMap(); SortedSet keys = new TreeSet<>(params.keySet()); - for(String key: keys) { + for (String key : keys) { out.print(key); out.print(':'); String[] values = params.get(key); if (values.length > 0) { out.print(values[0]); - for(int i=1; i < values.length; ++i) { + for (int i = 1; i < values.length; ++i) { out.print(','); out.print(values[i]); } @@ -120,15 +122,14 @@ public class TestHttpServer extends HttpServerFunctionalTest { @SuppressWarnings("serial") public static class EchoServlet extends HttpServlet { @Override - public void doGet(HttpServletRequest request, HttpServletResponse response) - throws IOException { + public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { PrintWriter out = response.getWriter(); SortedSet sortedKeys = new TreeSet<>(); Enumeration keys = request.getParameterNames(); - while(keys.hasMoreElements()) { + while (keys.hasMoreElements()) { sortedKeys.add(keys.nextElement()); } - for(String key: sortedKeys) { + for (String key : sortedKeys) { out.print(key); out.print(':'); out.print(request.getParameter(key)); @@ -158,7 +159,8 @@ public class TestHttpServer extends HttpServerFunctionalTest { } } - @BeforeClass public static void setup() throws Exception { + @BeforeClass + public static void setup() throws Exception { Configuration conf = new Configuration(); conf.setInt(HttpServer.HTTP_MAX_THREADS, MAX_THREADS); server = createTestServer(conf); @@ -166,14 +168,14 @@ public class TestHttpServer extends HttpServerFunctionalTest { server.addUnprivilegedServlet("echomap", "/echomap", EchoMapServlet.class); server.addUnprivilegedServlet("htmlcontent", "/htmlcontent", HtmlContentServlet.class); server.addUnprivilegedServlet("longheader", "/longheader", LongHeaderServlet.class); - server.addJerseyResourcePackage( - JerseyResource.class.getPackage().getName(), "/jersey/*"); + server.addJerseyResourcePackage(JerseyResource.class.getPackage().getName(), "/jersey/*"); server.start(); baseUrl = getServerURL(server); - LOG.info("HTTP server started: "+ baseUrl); + LOG.info("HTTP server started: " + baseUrl); } - @AfterClass public static void cleanup() throws Exception { + @AfterClass + public static void cleanup() throws Exception { server.stop(); } @@ -192,13 +194,13 @@ public class TestHttpServer extends HttpServerFunctionalTest { ready.countDown(); try { start.await(); - assertEquals("a:b\nc:d\n", - readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); + assertEquals("a:b\nc:d\n", readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); int serverThreads = server.webServer.getThreadPool().getThreads(); - assertTrue("More threads are started than expected, Server Threads count: " - + serverThreads, serverThreads <= MAX_THREADS); - LOG.info("Number of threads = " + serverThreads + - " which is less or equal than the max = " + MAX_THREADS); + assertTrue( + "More threads are started than expected, Server Threads count: " + serverThreads, + serverThreads <= MAX_THREADS); + LOG.info("Number of threads = " + serverThreads + + " which is less or equal than the max = " + MAX_THREADS); } catch (Exception e) { // do nothing } @@ -209,31 +211,30 @@ public class TestHttpServer extends HttpServerFunctionalTest { start.countDown(); } - @Test public void testEcho() throws Exception { - assertEquals("a:b\nc:d\n", - readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); - assertEquals("a:b\nc<:d\ne:>\n", - readOutput(new URL(baseUrl, "/echo?a=b&c<=d&e=>"))); + @Test + public void testEcho() throws Exception { + assertEquals("a:b\nc:d\n", readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); + assertEquals("a:b\nc<:d\ne:>\n", readOutput(new URL(baseUrl, "/echo?a=b&c<=d&e=>"))); } /** Test the echo map servlet that uses getParameterMap. */ - @Test public void testEchoMap() throws Exception { - assertEquals("a:b\nc:d\n", - readOutput(new URL(baseUrl, "/echomap?a=b&c=d"))); - assertEquals("a:b,>\nc<:d\n", - readOutput(new URL(baseUrl, "/echomap?a=b&c<=d&a=>"))); + @Test + public void testEchoMap() throws Exception { + assertEquals("a:b\nc:d\n", readOutput(new URL(baseUrl, "/echomap?a=b&c=d"))); + assertEquals("a:b,>\nc<:d\n", readOutput(new URL(baseUrl, "/echomap?a=b&c<=d&a=>"))); } /** - * Test that verifies headers can be up to 64K long. - * The test adds a 63K header leaving 1K for other headers. - * This is because the header buffer setting is for ALL headers, - * names and values included. */ - @Test public void testLongHeader() throws Exception { + * Test that verifies headers can be up to 64K long. The test adds a 63K header leaving 1K for + * other headers. This is because the header buffer setting is for ALL headers, names and values + * included. + */ + @Test + public void testLongHeader() throws Exception { URL url = new URL(baseUrl, "/longheader"); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); StringBuilder sb = new StringBuilder(); - for (int i = 0 ; i < 63 * 1024; i++) { + for (int i = 0; i < 63 * 1024; i++) { sb.append("a"); } conn.setRequestProperty("longheader", sb.toString()); @@ -244,14 +245,14 @@ public class TestHttpServer extends HttpServerFunctionalTest { public void testContentTypes() throws Exception { // Static CSS files should have text/css URL cssUrl = new URL(baseUrl, "/static/test.css"); - HttpURLConnection conn = (HttpURLConnection)cssUrl.openConnection(); + HttpURLConnection conn = (HttpURLConnection) cssUrl.openConnection(); conn.connect(); assertEquals(200, conn.getResponseCode()); assertEquals("text/css", conn.getContentType()); // Servlets should have text/plain with proper encoding by default URL servletUrl = new URL(baseUrl, "/echo?a=b"); - conn = (HttpURLConnection)servletUrl.openConnection(); + conn = (HttpURLConnection) servletUrl.openConnection(); conn.connect(); assertEquals(200, conn.getResponseCode()); assertEquals("text/plain;charset=utf-8", conn.getContentType()); @@ -259,14 +260,14 @@ public class TestHttpServer extends HttpServerFunctionalTest { // We should ignore parameters for mime types - ie a parameter // ending in .css should not change mime type servletUrl = new URL(baseUrl, "/echo?a=b.css"); - conn = (HttpURLConnection)servletUrl.openConnection(); + conn = (HttpURLConnection) servletUrl.openConnection(); conn.connect(); assertEquals(200, conn.getResponseCode()); assertEquals("text/plain;charset=utf-8", conn.getContentType()); // Servlets that specify text/html should get that content type servletUrl = new URL(baseUrl, "/htmlcontent"); - conn = (HttpURLConnection)servletUrl.openConnection(); + conn = (HttpURLConnection) servletUrl.openConnection(); conn.connect(); assertEquals(200, conn.getResponseCode()); assertEquals("text/html;charset=utf-8", conn.getContentType()); @@ -335,21 +336,20 @@ public class TestHttpServer extends HttpServerFunctionalTest { } /** - * Dummy filter that mimics as an authentication filter. Obtains user identity - * from the request parameter user.name. Wraps around the request so that - * request.getRemoteUser() returns the user identity. - * + * Dummy filter that mimics as an authentication filter. Obtains user identity from the request + * parameter user.name. Wraps around the request so that request.getRemoteUser() returns the user + * identity. */ public static class DummyServletFilter implements Filter { @Override - public void destroy() { } + public void destroy() { + } @Override - public void doFilter(ServletRequest request, ServletResponse response, - FilterChain filterChain) throws IOException, ServletException { + public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain) + throws IOException, ServletException { final String userName = request.getParameter("user.name"); - ServletRequest requestModified = - new HttpServletRequestWrapper((HttpServletRequest) request) { + ServletRequest requestModified = new HttpServletRequestWrapper((HttpServletRequest) request) { @Override public String getRemoteUser() { return userName; @@ -359,12 +359,12 @@ public class TestHttpServer extends HttpServerFunctionalTest { } @Override - public void init(FilterConfig arg0) { } + public void init(FilterConfig arg0) { + } } /** * FilterInitializer that initialized the DummyFilter. - * */ public static class DummyFilterInitializer extends FilterInitializer { public DummyFilterInitializer() { @@ -377,19 +377,17 @@ public class TestHttpServer extends HttpServerFunctionalTest { } /** - * Access a URL and get the corresponding return Http status code. The URL - * will be accessed as the passed user, by sending user.name request - * parameter. - * + * Access a URL and get the corresponding return Http status code. The URL will be accessed as the + * passed user, by sending user.name request parameter. * @param urlstring The url to access - * @param userName The user to perform access as + * @param userName The user to perform access as * @return The HTTP response code * @throws IOException if there is a problem communicating with the server */ private static int getHttpStatusCode(String urlstring, String userName) throws IOException { URL url = new URL(urlstring + "?user.name=" + userName); System.out.println("Accessing " + url + " as user " + userName); - HttpURLConnection connection = (HttpURLConnection)url.openConnection(); + HttpURLConnection connection = (HttpURLConnection) url.openConnection(); connection.connect(); return connection.getResponseCode(); } @@ -411,9 +409,8 @@ public class TestHttpServer extends HttpServerFunctionalTest { } /** - * Verify the access for /logs, /stacks, /conf, /logLevel and /metrics - * servlets, when authentication filters are set, but authorization is not - * enabled. + * Verify the access for /logs, /stacks, /conf, /logLevel and /metrics servlets, when + * authentication filters are set, but authorization is not enabled. */ @Test @Ignore @@ -421,46 +418,41 @@ public class TestHttpServer extends HttpServerFunctionalTest { Configuration conf = new Configuration(); // Authorization is disabled by default - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - DummyFilterInitializer.class.getName()); + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, DummyFilterInitializer.class.getName()); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, - MyGroupsProvider.class.getName()); + MyGroupsProvider.class.getName()); Groups.getUserToGroupsMappingService(conf); MyGroupsProvider.clearMapping(); MyGroupsProvider.mapping.put("userA", Collections.singletonList("groupA")); MyGroupsProvider.mapping.put("userB", Collections.singletonList("groupB")); HttpServer myServer = new HttpServer.Builder().setName("test") - .addEndpoint(new URI("http://localhost:0")).setFindPort(true).build(); + .addEndpoint(new URI("http://localhost:0")).setFindPort(true).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.start(); - String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; + String serverURL = + "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; for (String servlet : new String[] { "conf", "logs", "stacks", "logLevel", "metrics" }) { for (String user : new String[] { "userA", "userB" }) { - assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL - + servlet, user)); + assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL + servlet, user)); } } myServer.stop(); } /** - * Verify the administrator access for /logs, /stacks, /conf, /logLevel and - * /metrics servlets. + * Verify the administrator access for /logs, /stacks, /conf, /logLevel and /metrics servlets. */ @Test @Ignore public void testAuthorizationOfDefaultServlets() throws Exception { Configuration conf = new Configuration(); - conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, - true); - conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, - true); - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - DummyFilterInitializer.class.getName()); + conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); + conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true); + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, DummyFilterInitializer.class.getName()); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, - MyGroupsProvider.class.getName()); + MyGroupsProvider.class.getName()); Groups.getUserToGroupsMappingService(conf); MyGroupsProvider.clearMapping(); MyGroupsProvider.mapping.put("userA", Collections.singletonList("groupA")); @@ -470,20 +462,19 @@ public class TestHttpServer extends HttpServerFunctionalTest { MyGroupsProvider.mapping.put("userE", Collections.singletonList("groupE")); HttpServer myServer = new HttpServer.Builder().setName("test") - .addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf) - .setACL(new AccessControlList("userA,userB groupC,groupD")).build(); + .addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf) + .setACL(new AccessControlList("userA,userB groupC,groupD")).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.start(); - String serverURL = "http://" - + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; + String serverURL = + "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; for (String servlet : new String[] { "conf", "logs", "stacks", "logLevel", "metrics" }) { for (String user : new String[] { "userA", "userB", "userC", "userD" }) { - assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL - + servlet, user)); + assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL + servlet, user)); } - assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, getHttpStatusCode( - serverURL + servlet, "userE")); + assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, + getHttpStatusCode(serverURL + servlet, "userE")); } myServer.stop(); } @@ -494,8 +485,8 @@ public class TestHttpServer extends HttpServerFunctionalTest { Mockito.doReturn(null).when(request).getParameterValues("dummy"); RequestQuoter requestQuoter = new RequestQuoter(request); String[] parameterValues = requestQuoter.getParameterValues("dummy"); - Assert.assertNull("It should return null " - + "when there are no values for the parameter", parameterValues); + Assert.assertNull("It should return null " + "when there are no values for the parameter", + parameterValues); } @Test @@ -505,16 +496,16 @@ public class TestHttpServer extends HttpServerFunctionalTest { Mockito.doReturn(values).when(request).getParameterValues("dummy"); RequestQuoter requestQuoter = new RequestQuoter(request); String[] parameterValues = requestQuoter.getParameterValues("dummy"); - Assert.assertTrue("It should return Parameter Values", Arrays.equals( - values, parameterValues)); + Assert.assertTrue("It should return Parameter Values", Arrays.equals(values, parameterValues)); } @SuppressWarnings("unchecked") private static Map parse(String jsonString) { - return (Map)JSON.parse(jsonString); + return (Map) JSON.parse(jsonString); } - @Test public void testJersey() throws Exception { + @Test + public void testJersey() throws Exception { LOG.info("BEGIN testJersey()"); final String js = readOutput(new URL(baseUrl, "/jersey/foo?op=bar")); final Map m = parse(js); @@ -535,33 +526,33 @@ public class TestHttpServer extends HttpServerFunctionalTest { Mockito.when(request.getRemoteUser()).thenReturn(null); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); - //authorization OFF + // authorization OFF Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); - //authorization ON & user NULL + // authorization ON & user NULL response = Mockito.mock(HttpServletResponse.class); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response)); Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), - Mockito.anyString()); + Mockito.anyString()); - //authorization ON & user NOT NULL & ACLs NULL + // authorization ON & user NOT NULL & ACLs NULL response = Mockito.mock(HttpServletResponse.class); Mockito.when(request.getRemoteUser()).thenReturn("foo"); Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); - //authorization ON & user NOT NULL & ACLs NOT NULL & user not in ACLs + // authorization ON & user NOT NULL & ACLs NOT NULL & user not in ACLs response = Mockito.mock(HttpServletResponse.class); AccessControlList acls = Mockito.mock(AccessControlList.class); - Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false); + Mockito.when(acls.isUserAllowed(Mockito. any())).thenReturn(false); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response)); Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_FORBIDDEN), - Mockito.anyString()); + Mockito.anyString()); - //authorization ON & user NOT NULL & ACLs NOT NULL & user in in ACLs + // authorization ON & user NOT NULL & ACLs NOT NULL & user in in ACLs response = Mockito.mock(HttpServletResponse.class); - Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(true); + Mockito.when(acls.isUserAllowed(Mockito. any())).thenReturn(true); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); @@ -575,14 +566,14 @@ public class TestHttpServer extends HttpServerFunctionalTest { HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); - //requires admin access to instrumentation, FALSE by default + // requires admin access to instrumentation, FALSE by default Assert.assertTrue(HttpServer.isInstrumentationAccessAllowed(context, request, response)); - //requires admin access to instrumentation, TRUE + // requires admin access to instrumentation, TRUE conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); AccessControlList acls = Mockito.mock(AccessControlList.class); - Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false); + Mockito.when(acls.isUserAllowed(Mockito. any())).thenReturn(false); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertFalse(HttpServer.isInstrumentationAccessAllowed(context, request, response)); } @@ -611,8 +602,7 @@ public class TestHttpServer extends HttpServerFunctionalTest { } } - private HttpServer checkBindAddress(String host, int port, boolean findPort) - throws Exception { + private HttpServer checkBindAddress(String host, int port, boolean findPort) throws Exception { HttpServer server = createServer(host, port); try { // not bound, ephemeral should return requested port (0 for ephemeral) @@ -645,14 +635,12 @@ public class TestHttpServer extends HttpServerFunctionalTest { conf.set("hbase.http.filter.xframeoptions.mode", "SAMEORIGIN"); HttpServer myServer = new HttpServer.Builder().setName("test") - .addEndpoint(new URI("http://localhost:0")) - .setFindPort(true).setConf(conf).build(); + .addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.addUnprivilegedServlet("echo", "/echo", EchoServlet.class); myServer.start(); - String serverURL = "http://" - + NetUtils.getHostPortString(myServer.getConnectorAddress(0)); + String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)); URL url = new URL(new URL(serverURL), "/echo?a=b&c=d"); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java index ce0d6d6bc32..e517a5ffedb 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,16 +25,16 @@ import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestHttpServerLifecycle extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHttpServerLifecycle.class); + HBaseClassTestRule.forClass(TestHttpServerLifecycle.class); /** - * Check that a server is alive by probing the {@link HttpServer#isAlive()} method - * and the text of its toString() description + * Check that a server is alive by probing the {@link HttpServer#isAlive()} method and the text of + * its toString() description * @param server server */ private void assertAlive(HttpServer server) { @@ -49,16 +49,17 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest { /** * Test that the server is alive once started - * * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testCreatedServerIsNotAlive() throws Throwable { HttpServer server = createTestServer(); assertNotLive(server); } - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testStopUnstartedServer() throws Throwable { HttpServer server = createTestServer(); stop(server); @@ -66,10 +67,10 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest { /** * Test that the server is alive once started - * * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testStartedServerIsAlive() throws Throwable { HttpServer server = null; server = createTestServer(); @@ -82,20 +83,20 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest { /** * Assert that the result of {@link HttpServer#toString()} contains the specific text * @param server server to examine - * @param text text to search for + * @param text text to search for */ private void assertToStringContains(HttpServer server, String text) { String description = server.toString(); assertTrue("Did not find \"" + text + "\" in \"" + description + "\"", - description.contains(text)); + description.contains(text)); } /** * Test that the server is not alive once stopped - * * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testStoppedServerIsNotAlive() throws Throwable { HttpServer server = createAndStartTestServer(); assertAlive(server); @@ -105,10 +106,10 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest { /** * Test that the server is not alive once stopped - * * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testStoppingTwiceServerIsAllowed() throws Throwable { HttpServer server = createAndStartTestServer(); assertAlive(server); @@ -119,12 +120,10 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest { } /** - * Test that the server is alive once started - * - * @throws Throwable - * on failure + * Test that the server is alive once started n * on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testWepAppContextAfterServerStop() throws Throwable { HttpServer server = null; String key = "test.attribute.key"; diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerWebapps.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerWebapps.java index 11a7db2fbf0..a2916cafb3c 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerWebapps.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerWebapps.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.slf4j.LoggerFactory; /** * Test webapp loading */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestHttpServerWebapps extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHttpServerWebapps.class); + HBaseClassTestRule.forClass(TestHttpServerWebapps.class); private static final Logger log = LoggerFactory.getLogger(TestHttpServerWebapps.class); @@ -61,8 +61,8 @@ public class TestHttpServerWebapps extends HttpServerFunctionalTest { public void testMissingServerResource() throws Throwable { try { HttpServer server = createServer("NoSuchWebapp"); - //should not have got here. - //close the server + // should not have got here. + // close the server String serverDescription = server.toString(); stop(server); fail("Expected an exception, got " + serverDescription); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java index 7737b298b6a..9cb36db20cd 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,11 +38,11 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestPathFilter extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestPathFilter.class); + HBaseClassTestRule.forClass(TestPathFilter.class); private static final Logger LOG = LoggerFactory.getLogger(HttpServer.class); private static final Set RECORDS = new TreeSet<>(); @@ -62,13 +62,13 @@ public class TestPathFilter extends HttpServerFunctionalTest { } @Override - public void doFilter(ServletRequest request, ServletResponse response, - FilterChain chain) throws IOException, ServletException { + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { if (filterConfig == null) { return; } - String uri = ((HttpServletRequest)request).getRequestURI(); + String uri = ((HttpServletRequest) request).getRequestURI(); LOG.info("filtering " + uri); RECORDS.add(uri); chain.doFilter(request, response); @@ -76,7 +76,8 @@ public class TestPathFilter extends HttpServerFunctionalTest { /** Configuration for RecordingFilter */ static public class Initializer extends FilterInitializer { - public Initializer() {} + public Initializer() { + } @Override public void initFilter(FilterContainer container, Configuration conf) { @@ -89,9 +90,8 @@ public class TestPathFilter extends HttpServerFunctionalTest { public void testPathSpecFilters() throws Exception { Configuration conf = new Configuration(); - //start an http server with CountingFilter - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - RecordingFilter.Initializer.class.getName()); + // start an http server with CountingFilter + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, RecordingFilter.Initializer.class.getName()); String[] pathSpecs = { "/path", "/path/*" }; HttpServer http = createTestServer(conf, pathSpecs); http.start(); @@ -105,12 +105,11 @@ public class TestPathFilter extends HttpServerFunctionalTest { final String allURL = "/*"; final String[] filteredUrls = { baseURL, baseSlashURL, addedURL, addedSlashURL, longURL }; - final String[] notFilteredUrls = {rootURL, allURL}; + final String[] notFilteredUrls = { rootURL, allURL }; // access the urls and verify our paths specs got added to the // filters - final String prefix = "http://" - + NetUtils.getHostPortString(http.getConnectorAddress(0)); + final String prefix = "http://" + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for (String filteredUrl : filteredUrls) { access(prefix + filteredUrl); @@ -124,7 +123,7 @@ public class TestPathFilter extends HttpServerFunctionalTest { LOG.info("RECORDS = " + RECORDS); - //verify records + // verify records for (String filteredUrl : filteredUrls) { assertTrue(RECORDS.remove(filteredUrl)); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProfileOutputServlet.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProfileOutputServlet.java index 7723e6e7887..442fc0e37fe 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProfileOutputServlet.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProfileOutputServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,23 +22,22 @@ import static org.junit.Assert.fail; import java.util.Arrays; import java.util.List; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestProfileOutputServlet { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProfileOutputServlet.class); + HBaseClassTestRule.forClass(TestProfileOutputServlet.class); @Test public void testSanitization() { - List good = Arrays.asList("abcd", "key=value", "key1=value&key2=value2", "", - "host=host-1.example.com"); + List good = + Arrays.asList("abcd", "key=value", "key1=value&key2=value2", "", "host=host-1.example.com"); for (String input : good) { assertEquals(input, ProfileOutputServlet.sanitize(input)); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProxyUserSpnegoHttpServer.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProxyUserSpnegoHttpServer.java index ed98a2f5a7f..48de89a6fba 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProxyUserSpnegoHttpServer.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProxyUserSpnegoHttpServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,10 +23,8 @@ import java.net.URL; import java.security.Principal; import java.security.PrivilegedExceptionAction; import java.util.Set; - import javax.security.auth.Subject; import javax.security.auth.kerberos.KerberosTicket; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtility; @@ -72,11 +70,11 @@ import org.slf4j.LoggerFactory; * HttpComponents to verify that the doas= mechanicsm works, and that the proxyuser settings are * observed. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestProxyUserSpnegoHttpServer extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProxyUserSpnegoHttpServer.class); + HBaseClassTestRule.forClass(TestProxyUserSpnegoHttpServer.class); private static final Logger LOG = LoggerFactory.getLogger(TestProxyUserSpnegoHttpServer.class); private static final String KDC_SERVER_HOST = "localhost"; @@ -94,7 +92,6 @@ public class TestProxyUserSpnegoHttpServer extends HttpServerFunctionalTest { private static File privilegedKeytab; private static File privileged2Keytab; - @BeforeClass public static void setupServer() throws Exception { Configuration conf = new Configuration(); @@ -132,7 +129,7 @@ public class TestProxyUserSpnegoHttpServer extends HttpServerFunctionalTest { server.start(); baseUrl = getServerURL(server); - LOG.info("HTTP server started: "+ baseUrl); + LOG.info("HTTP server started: " + baseUrl); } @AfterClass @@ -154,14 +151,13 @@ public class TestProxyUserSpnegoHttpServer extends HttpServerFunctionalTest { } private static void setupUser(SimpleKdcServer kdc, File keytab, String principal) - throws KrbException { + throws KrbException { kdc.createPrincipal(principal); kdc.exportPrincipal(principal, keytab); } - protected static Configuration buildSpnegoConfiguration(Configuration conf, - String serverPrincipal, File serverKeytab) { + String serverPrincipal, File serverKeytab) { KerberosName.setRules("DEFAULT"); conf.setInt(HttpServer.HTTP_MAX_THREADS, TestHttpServer.MAX_THREADS); @@ -182,13 +178,13 @@ public class TestProxyUserSpnegoHttpServer extends HttpServerFunctionalTest { } /** - * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI - * which are meant only for administrators. + * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI which + * are meant only for administrators. */ public static AccessControlList buildAdminAcl(Configuration conf) { final String userGroups = conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY, null); - final String adminGroups = conf.get( - HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null); + final String adminGroups = + conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null); if (userGroups == null && adminGroups == null) { // Backwards compatibility - if the user doesn't have anything set, allow all users in. return new AccessControlList("*", null); @@ -198,20 +194,23 @@ public class TestProxyUserSpnegoHttpServer extends HttpServerFunctionalTest { @Test public void testProxyAllowed() throws Exception { - testProxy(WHEEL_PRINCIPAL, PRIVILEGED_PRINCIPAL, HttpURLConnection.HTTP_OK, null); + testProxy(WHEEL_PRINCIPAL, PRIVILEGED_PRINCIPAL, HttpURLConnection.HTTP_OK, null); } @Test public void testProxyDisallowedForUnprivileged() throws Exception { - testProxy(WHEEL_PRINCIPAL, UNPRIVILEGED_PRINCIPAL, HttpURLConnection.HTTP_FORBIDDEN, "403 User unprivileged is unauthorized to access this page."); + testProxy(WHEEL_PRINCIPAL, UNPRIVILEGED_PRINCIPAL, HttpURLConnection.HTTP_FORBIDDEN, + "403 User unprivileged is unauthorized to access this page."); } @Test public void testProxyDisallowedForNotSudoAble() throws Exception { - testProxy(WHEEL_PRINCIPAL, PRIVILEGED2_PRINCIPAL, HttpURLConnection.HTTP_FORBIDDEN, "403 Forbidden"); + testProxy(WHEEL_PRINCIPAL, PRIVILEGED2_PRINCIPAL, HttpURLConnection.HTTP_FORBIDDEN, + "403 Forbidden"); } - public void testProxy(String clientPrincipal, String doAs, int responseCode, String statusLine) throws Exception { + public void testProxy(String clientPrincipal, String doAs, int responseCode, String statusLine) + throws Exception { // Create the subject for the client final Subject clientSubject = JaasKrbUtil.loginUsingKeytab(WHEEL_PRINCIPAL, wheelKeytab); final Set clientPrincipals = clientSubject.getPrincipals(); @@ -221,7 +220,7 @@ public class TestProxyUserSpnegoHttpServer extends HttpServerFunctionalTest { // Get a TGT for the subject (might have many, different encryption types). The first should // be the default encryption type. Set privateCredentials = - clientSubject.getPrivateCredentials(KerberosTicket.class); + clientSubject.getPrivateCredentials(KerberosTicket.class); assertFalse(privateCredentials.isEmpty()); KerberosTicket tgt = privateCredentials.iterator().next(); assertNotNull(tgt); @@ -231,34 +230,32 @@ public class TestProxyUserSpnegoHttpServer extends HttpServerFunctionalTest { // Run this code, logged in as the subject (the client) HttpResponse resp = Subject.doAs(clientSubject, new PrivilegedExceptionAction() { - @Override - public HttpResponse run() throws Exception { - // Logs in with Kerberos via GSS - GSSManager gssManager = GSSManager.getInstance(); - // jGSS Kerberos login constant - Oid oid = new Oid("1.2.840.113554.1.2.2"); - GSSName gssClient = gssManager.createName(principalName, GSSName.NT_USER_NAME); - GSSCredential credential = gssManager.createCredential(gssClient, - GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); + @Override + public HttpResponse run() throws Exception { + // Logs in with Kerberos via GSS + GSSManager gssManager = GSSManager.getInstance(); + // jGSS Kerberos login constant + Oid oid = new Oid("1.2.840.113554.1.2.2"); + GSSName gssClient = gssManager.createName(principalName, GSSName.NT_USER_NAME); + GSSCredential credential = gssManager.createCredential(gssClient, + GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); - HttpClientContext context = HttpClientContext.create(); - Lookup authRegistry = RegistryBuilder.create() - .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)) - .build(); + HttpClientContext context = HttpClientContext.create(); + Lookup authRegistry = RegistryBuilder. create() + .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)).build(); - HttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry) - .build(); - BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential)); + HttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry).build(); + BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential)); - URL url = new URL(getServerURL(server), "/echo?doAs=" + doAs + "&a=b"); - context.setTargetHost(new HttpHost(url.getHost(), url.getPort())); - context.setCredentialsProvider(credentialsProvider); - context.setAuthSchemeRegistry(authRegistry); + URL url = new URL(getServerURL(server), "/echo?doAs=" + doAs + "&a=b"); + context.setTargetHost(new HttpHost(url.getHost(), url.getPort())); + context.setCredentialsProvider(credentialsProvider); + context.setAuthSchemeRegistry(authRegistry); - HttpGet get = new HttpGet(url.toURI()); - return client.execute(get, context); - } + HttpGet get = new HttpGet(url.toURI()); + return client.execute(get, context); + } }); assertNotNull(resp); @@ -266,8 +263,8 @@ public class TestProxyUserSpnegoHttpServer extends HttpServerFunctionalTest { if (responseCode == HttpURLConnection.HTTP_OK) { assertTrue(EntityUtils.toString(resp.getEntity()).trim().contains("a:b")); } else { - assertTrue(resp.getStatusLine().toString().contains(statusLine) || - EntityUtils.toString(resp.getEntity()).contains(statusLine)); + assertTrue(resp.getStatusLine().toString().contains(statusLine) + || EntityUtils.toString(resp.getEntity()).contains(statusLine)); } } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSSLHttpServer.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSSLHttpServer.java index 2b72793a690..2271cc7b4d7 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSSLHttpServer.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSSLHttpServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,19 +45,18 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This testcase issues SSL certificates configures the HttpServer to serve - * HTTPS using the created certficates and calls an echo servlet using the - * corresponding HTTPS URL. + * This testcase issues SSL certificates configures the HttpServer to serve HTTPS using the created + * certficates and calls an echo servlet using the corresponding HTTPS URL. */ -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestSSLHttpServer extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSSLHttpServer.class); + HBaseClassTestRule.forClass(TestSSLHttpServer.class); - private static final String BASEDIR = System.getProperty("test.build.dir", - "target/test-dir") + "/" + TestSSLHttpServer.class.getSimpleName(); + private static final String BASEDIR = System.getProperty("test.build.dir", "target/test-dir") + + "/" + TestSSLHttpServer.class.getSimpleName(); private static final Logger LOG = LoggerFactory.getLogger(TestSSLHttpServer.class); private static Configuration serverConf; @@ -91,22 +90,20 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest { clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, clientConf); clientSslFactory.init(); - server = new HttpServer.Builder() - .setName("test") - .addEndpoint(new URI("https://localhost")) + server = new HttpServer.Builder().setName("test").addEndpoint(new URI("https://localhost")) .setConf(serverConf) - .keyPassword(HBaseConfiguration.getPassword(serverConf, "ssl.server.keystore.keypassword", - null)) + .keyPassword( + HBaseConfiguration.getPassword(serverConf, "ssl.server.keystore.keypassword", null)) .keyStore(serverConf.get("ssl.server.keystore.location"), HBaseConfiguration.getPassword(serverConf, "ssl.server.keystore.password", null), clientConf.get("ssl.server.keystore.type", "jks")) .trustStore(serverConf.get("ssl.server.truststore.location"), HBaseConfiguration.getPassword(serverConf, "ssl.server.truststore.password", null), - serverConf.get("ssl.server.truststore.type", "jks")).build(); + serverConf.get("ssl.server.truststore.type", "jks")) + .build(); server.addUnprivilegedServlet("echo", "/echo", TestHttpServer.EchoServlet.class); server.start(); - baseUrl = new URL("https://" - + NetUtils.getHostPortString(server.getConnectorAddress(0))); + baseUrl = new URL("https://" + NetUtils.getHostPortString(server.getConnectorAddress(0))); LOG.info("HTTP server started: " + baseUrl); } @@ -121,8 +118,7 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest { @Test public void testEcho() throws Exception { assertEquals("a:b\nc:d\n", readOut(new URL(baseUrl, "/echo?a=b&c=d"))); - assertEquals("a:b\nc<:d\ne:>\n", readOut(new URL(baseUrl, - "/echo?a=b&c<=d&e=>"))); + assertEquals("a:b\nc<:d\ne:>\n", readOut(new URL(baseUrl, "/echo?a=b&c<=d&e=>"))); } @Test diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java index 6b9d2c341ed..006025e0a97 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,14 +37,14 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({HttpServerFunctionalTest.class, MediumTests.class}) +@Category({ HttpServerFunctionalTest.class, MediumTests.class }) public class TestSecurityHeadersFilter { private static URL baseUrl; private HttpServer http; @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSecurityHeadersFilter.class); + HBaseClassTestRule.forClass(TestSecurityHeadersFilter.class); @After public void tearDown() throws Exception { @@ -62,28 +62,27 @@ public class TestSecurityHeadersFilter { assertThat(conn.getResponseCode(), equalTo(HttpURLConnection.HTTP_OK)); assertThat("Header 'X-Content-Type-Options' is missing", - conn.getHeaderField("X-Content-Type-Options"), is(not((String)null))); + conn.getHeaderField("X-Content-Type-Options"), is(not((String) null))); assertThat(conn.getHeaderField("X-Content-Type-Options"), equalTo("nosniff")); - assertThat("Header 'X-XSS-Protection' is missing", - conn.getHeaderField("X-XSS-Protection"), is(not((String)null))); + assertThat("Header 'X-XSS-Protection' is missing", conn.getHeaderField("X-XSS-Protection"), + is(not((String) null))); assertThat("Header 'X-XSS-Protection' has invalid value", - conn.getHeaderField("X-XSS-Protection"), equalTo("1; mode=block")); + conn.getHeaderField("X-XSS-Protection"), equalTo("1; mode=block")); - assertThat("Header 'Strict-Transport-Security' should be missing from response," + - "but it's present", - conn.getHeaderField("Strict-Transport-Security"), is((String)null)); - assertThat("Header 'Content-Security-Policy' should be missing from response," + - "but it's present", - conn.getHeaderField("Content-Security-Policy"), is((String)null)); + assertThat( + "Header 'Strict-Transport-Security' should be missing from response," + "but it's present", + conn.getHeaderField("Strict-Transport-Security"), is((String) null)); + assertThat( + "Header 'Content-Security-Policy' should be missing from response," + "but it's present", + conn.getHeaderField("Content-Security-Policy"), is((String) null)); } @Test public void testHstsAndCspSettings() throws IOException { Configuration conf = new Configuration(); - conf.set("hbase.http.filter.hsts.value", - "max-age=63072000;includeSubDomains;preload"); + conf.set("hbase.http.filter.hsts.value", "max-age=63072000;includeSubDomains;preload"); conf.set("hbase.http.filter.csp.value", - "default-src https: data: 'unsafe-inline' 'unsafe-eval'"); + "default-src https: data: 'unsafe-inline' 'unsafe-eval'"); http = createTestServer(conf); http.start(); baseUrl = getServerURL(http); @@ -93,15 +92,15 @@ public class TestSecurityHeadersFilter { assertThat(conn.getResponseCode(), equalTo(HttpURLConnection.HTTP_OK)); assertThat("Header 'Strict-Transport-Security' is missing from Rest response", - conn.getHeaderField("Strict-Transport-Security"), Is.is(not((String)null))); + conn.getHeaderField("Strict-Transport-Security"), Is.is(not((String) null))); assertThat("Header 'Strict-Transport-Security' has invalid value", - conn.getHeaderField("Strict-Transport-Security"), - IsEqual.equalTo("max-age=63072000;includeSubDomains;preload")); + conn.getHeaderField("Strict-Transport-Security"), + IsEqual.equalTo("max-age=63072000;includeSubDomains;preload")); assertThat("Header 'Content-Security-Policy' is missing from Rest response", - conn.getHeaderField("Content-Security-Policy"), Is.is(not((String)null))); + conn.getHeaderField("Content-Security-Policy"), Is.is(not((String) null))); assertThat("Header 'Content-Security-Policy' has invalid value", - conn.getHeaderField("Content-Security-Policy"), - IsEqual.equalTo("default-src https: data: 'unsafe-inline' 'unsafe-eval'")); + conn.getHeaderField("Content-Security-Policy"), + IsEqual.equalTo("default-src https: data: 'unsafe-inline' 'unsafe-eval'")); } } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestServletFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestServletFilter.java index 1e9a2861c9e..7ea8abe066d 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestServletFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestServletFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.http; import java.io.IOException; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -42,11 +41,11 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestServletFilter extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestServletFilter.class); + HBaseClassTestRule.forClass(TestServletFilter.class); private static final Logger LOG = LoggerFactory.getLogger(HttpServer.class); private static volatile String uri = null; @@ -66,20 +65,21 @@ public class TestServletFilter extends HttpServerFunctionalTest { } @Override - public void doFilter(ServletRequest request, ServletResponse response, - FilterChain chain) throws IOException, ServletException { + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { if (filterConfig == null) { return; } - uri = ((HttpServletRequest)request).getRequestURI(); + uri = ((HttpServletRequest) request).getRequestURI(); LOG.info("filtering " + uri); chain.doFilter(request, response); } /** Configuration for the filter */ static public class Initializer extends FilterInitializer { - public Initializer() {} + public Initializer() { + } @Override public void initFilter(FilterContainer container, Configuration conf) { @@ -90,22 +90,20 @@ public class TestServletFilter extends HttpServerFunctionalTest { private static void assertExceptionContains(String string, Throwable t) { String msg = t.getMessage(); - Assert.assertTrue( - "Expected to find '" + string + "' but got unexpected exception:" - + StringUtils.stringifyException(t), msg.contains(string)); + Assert.assertTrue("Expected to find '" + string + "' but got unexpected exception:" + + StringUtils.stringifyException(t), msg.contains(string)); } @Test @Ignore - //From stack + // From stack // Its a 'foreign' test, one that came in from hadoop when we copy/pasted http // It's second class. Could comment it out if only failing test (as per @nkeywal – sort of) public void testServletFilter() throws Exception { Configuration conf = new Configuration(); - //start an http server with CountingFilter - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - SimpleFilter.Initializer.class.getName()); + // start an http server with CountingFilter + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, SimpleFilter.Initializer.class.getName()); HttpServer http = createTestServer(conf); http.start(); @@ -115,23 +113,22 @@ public class TestServletFilter extends HttpServerFunctionalTest { final String logURL = "/logs/a.log"; final String hadooplogoURL = "/static/hadoop-logo.jpg"; - final String[] urls = {fsckURL, stacksURL, ajspURL, logURL, hadooplogoURL}; + final String[] urls = { fsckURL, stacksURL, ajspURL, logURL, hadooplogoURL }; final Random rand = ThreadLocalRandom.current(); final int[] sequence = new int[50]; - //generate a random sequence and update counts - for(int i = 0; i < sequence.length; i++) { + // generate a random sequence and update counts + for (int i = 0; i < sequence.length; i++) { sequence[i] = rand.nextInt(urls.length); } - //access the urls as the sequence - final String prefix = "http://" - + NetUtils.getHostPortString(http.getConnectorAddress(0)); + // access the urls as the sequence + final String prefix = "http://" + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for (int aSequence : sequence) { access(prefix + urls[aSequence]); - //make sure everything except fsck get filtered + // make sure everything except fsck get filtered if (aSequence == 0) { assertNull(uri); } else { @@ -166,8 +163,7 @@ public class TestServletFilter extends HttpServerFunctionalTest { public void testServletFilterWhenInitThrowsException() throws Exception { Configuration conf = new Configuration(); // start an http server with ErrorFilter - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - ErrorFilter.Initializer.class.getName()); + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, ErrorFilter.Initializer.class.getName()); HttpServer http = createTestServer(conf); try { http.start(); @@ -178,17 +174,15 @@ public class TestServletFilter extends HttpServerFunctionalTest { } /** - * Similar to the above test case, except that it uses a different API to add the - * filter. Regression test for HADOOP-8786. + * Similar to the above test case, except that it uses a different API to add the filter. + * Regression test for HADOOP-8786. */ @Test - public void testContextSpecificServletFilterWhenInitThrowsException() - throws Exception { + public void testContextSpecificServletFilterWhenInitThrowsException() throws Exception { Configuration conf = new Configuration(); HttpServer http = createTestServer(conf); - HttpServer.defineFilter(http.webAppContext, - "ErrorFilter", ErrorFilter.class.getName(), - null, null); + HttpServer.defineFilter(http.webAppContext, "ErrorFilter", ErrorFilter.class.getName(), null, + null); try { http.start(); fail("expecting exception"); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSpnegoHttpServer.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSpnegoHttpServer.java index 28e4fcf093b..825396f0d88 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSpnegoHttpServer.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSpnegoHttpServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,11 +69,11 @@ import org.slf4j.LoggerFactory; * Test class for SPNEGO authentication on the HttpServer. Uses Kerby's MiniKDC and Apache * HttpComponents to verify that a simple Servlet is reachable via SPNEGO and unreachable w/o. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestSpnegoHttpServer extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSpnegoHttpServer.class); + HBaseClassTestRule.forClass(TestSpnegoHttpServer.class); private static final Logger LOG = LoggerFactory.getLogger(TestSpnegoHttpServer.class); private static final String KDC_SERVER_HOST = "localhost"; @@ -114,7 +114,7 @@ public class TestSpnegoHttpServer extends HttpServerFunctionalTest { server.start(); baseUrl = getServerURL(server); - LOG.info("HTTP server started: "+ baseUrl); + LOG.info("HTTP server started: " + baseUrl); } @AfterClass @@ -136,13 +136,13 @@ public class TestSpnegoHttpServer extends HttpServerFunctionalTest { } private static void setupUser(SimpleKdcServer kdc, File keytab, String principal) - throws KrbException { + throws KrbException { kdc.createPrincipal(principal); kdc.exportPrincipal(principal, keytab); } private static Configuration buildSpnegoConfiguration(Configuration conf, String serverPrincipal, - File serverKeytab) { + File serverKeytab) { KerberosName.setRules("DEFAULT"); conf.setInt(HttpServer.HTTP_MAX_THREADS, TestHttpServer.MAX_THREADS); @@ -174,7 +174,7 @@ public class TestSpnegoHttpServer extends HttpServerFunctionalTest { // Get a TGT for the subject (might have many, different encryption types). The first should // be the default encryption type. Set privateCredentials = - clientSubject.getPrivateCredentials(KerberosTicket.class); + clientSubject.getPrivateCredentials(KerberosTicket.class); assertFalse(privateCredentials.isEmpty()); KerberosTicket tgt = privateCredentials.iterator().next(); assertNotNull(tgt); @@ -184,34 +184,32 @@ public class TestSpnegoHttpServer extends HttpServerFunctionalTest { // Run this code, logged in as the subject (the client) HttpResponse resp = Subject.doAs(clientSubject, new PrivilegedExceptionAction() { - @Override - public HttpResponse run() throws Exception { - // Logs in with Kerberos via GSS - GSSManager gssManager = GSSManager.getInstance(); - // jGSS Kerberos login constant - Oid oid = new Oid("1.2.840.113554.1.2.2"); - GSSName gssClient = gssManager.createName(principalName, GSSName.NT_USER_NAME); - GSSCredential credential = gssManager.createCredential(gssClient, - GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); + @Override + public HttpResponse run() throws Exception { + // Logs in with Kerberos via GSS + GSSManager gssManager = GSSManager.getInstance(); + // jGSS Kerberos login constant + Oid oid = new Oid("1.2.840.113554.1.2.2"); + GSSName gssClient = gssManager.createName(principalName, GSSName.NT_USER_NAME); + GSSCredential credential = gssManager.createCredential(gssClient, + GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); - HttpClientContext context = HttpClientContext.create(); - Lookup authRegistry = RegistryBuilder.create() - .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)) - .build(); + HttpClientContext context = HttpClientContext.create(); + Lookup authRegistry = RegistryBuilder. create() + .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)).build(); - HttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry) - .build(); - BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential)); + HttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry).build(); + BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential)); - URL url = new URL(getServerURL(server), "/echo?a=b"); - context.setTargetHost(new HttpHost(url.getHost(), url.getPort())); - context.setCredentialsProvider(credentialsProvider); - context.setAuthSchemeRegistry(authRegistry); + URL url = new URL(getServerURL(server), "/echo?a=b"); + context.setTargetHost(new HttpHost(url.getHost(), url.getPort())); + context.setCredentialsProvider(credentialsProvider); + context.setAuthSchemeRegistry(authRegistry); - HttpGet get = new HttpGet(url.toURI()); - return client.execute(get, context); - } + HttpGet get = new HttpGet(url.toURI()); + return client.execute(get, context); + } }); assertNotNull(resp); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/conf/TestConfServlet.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/conf/TestConfServlet.java index ac2ef8f6649..49f5af37f4f 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/conf/TestConfServlet.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/conf/TestConfServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,15 +44,15 @@ import org.xml.sax.InputSource; import org.apache.hbase.thirdparty.org.eclipse.jetty.util.ajax.JSON; /** - * Basic test case that the ConfServlet can write configuration - * to its output in XML and JSON format. + * Basic test case that the ConfServlet can write configuration to its output in XML and JSON + * format. */ @Category({ MiscTests.class, SmallTests.class }) public class TestConfServlet { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConfServlet.class); + HBaseClassTestRule.forClass(TestConfServlet.class); private static final String TEST_KEY = "testconfservlet.key"; private static final String TEST_VAL = "testval"; @@ -74,15 +74,14 @@ public class TestConfServlet { programSet.add("programatically"); programSet.add("programmatically"); Object parsed = JSON.parse(json); - Object[] properties = ((Map)parsed).get("properties"); + Object[] properties = ((Map) parsed).get("properties"); for (Object o : properties) { - Map propertyInfo = (Map)o; - String key = (String)propertyInfo.get("key"); - String val = (String)propertyInfo.get("value"); - String resource = (String)propertyInfo.get("resource"); + Map propertyInfo = (Map) o; + String key = (String) propertyInfo.get("key"); + String val = (String) propertyInfo.get("value"); + String resource = (String) propertyInfo.get("resource"); System.err.println("k: " + key + " v: " + val + " r: " + resource); - if (TEST_KEY.equals(key) && TEST_VAL.equals(val) - && programSet.contains(resource)) { + if (TEST_KEY.equals(key) && TEST_VAL.equals(val) && programSet.contains(resource)) { foundSetting = true; } } @@ -95,8 +94,7 @@ public class TestConfServlet { ConfServlet.writeResponse(getTestConf(), sw, "xml"); String xml = sw.toString(); - DocumentBuilderFactory docBuilderFactory - = DocumentBuilderFactory.newInstance(); + DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance(); DocumentBuilder builder = docBuilderFactory.newDocumentBuilder(); Document doc = builder.parse(new InputSource(new StringReader(xml))); NodeList nameNodes = doc.getElementsByTagName("name"); @@ -107,7 +105,7 @@ public class TestConfServlet { System.err.println("xml key: " + key); if (TEST_KEY.equals(key)) { foundSetting = true; - Element propertyElem = (Element)nameNode.getParentNode(); + Element propertyElem = (Element) nameNode.getParentNode(); String val = propertyElem.getElementsByTagName("value").item(0).getTextContent(); assertEquals(TEST_VAL, val); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java index e907a3260b0..99b549b0ff4 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,18 +36,19 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestJMXJsonServlet extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestJMXJsonServlet.class); + HBaseClassTestRule.forClass(TestJMXJsonServlet.class); private static final Logger LOG = LoggerFactory.getLogger(TestJMXJsonServlet.class); private static HttpServer server; private static URL baseUrl; - @BeforeClass public static void setup() throws Exception { + @BeforeClass + public static void setup() throws Exception { // Eclipse doesn't pick this up correctly from the plugin // configuration in the pom. System.setProperty(HttpServerFunctionalTest.TEST_BUILD_WEBAPPS, "target/test-classes/webapps"); @@ -56,68 +57,67 @@ public class TestJMXJsonServlet extends HttpServerFunctionalTest { baseUrl = getServerURL(server); } - @AfterClass public static void cleanup() throws Exception { + @AfterClass + public static void cleanup() throws Exception { server.stop(); } public static void assertReFind(String re, String value) { Pattern p = Pattern.compile(re); Matcher m = p.matcher(value); - assertTrue("'"+p+"' does not match "+value, m.find()); + assertTrue("'" + p + "' does not match " + value, m.find()); } public static void assertNotFind(String re, String value) { Pattern p = Pattern.compile(re); Matcher m = p.matcher(value); - assertFalse("'"+p+"' should not match "+value, m.find()); + assertFalse("'" + p + "' should not match " + value, m.find()); } - @Test public void testQuery() throws Exception { + @Test + public void testQuery() throws Exception { String result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Runtime")); - LOG.info("/jmx?qry=java.lang:type=Runtime RESULT: "+result); + LOG.info("/jmx?qry=java.lang:type=Runtime RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Runtime\"", result); assertReFind("\"modelerType\"", result); result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Memory")); - LOG.info("/jmx?qry=java.lang:type=Memory RESULT: "+result); + LOG.info("/jmx?qry=java.lang:type=Memory RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"modelerType\"", result); result = readOutput(new URL(baseUrl, "/jmx")); - LOG.info("/jmx RESULT: "+result); + LOG.info("/jmx RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); // test to get an attribute of a mbean - result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::HeapMemoryUsage")); - LOG.info("/jmx RESULT: "+result); + result = readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::HeapMemoryUsage")); + LOG.info("/jmx RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"committed\"\\s*:", result); // negative test to get an attribute of a mbean - result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::")); - LOG.info("/jmx RESULT: "+result); + result = readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::")); + LOG.info("/jmx RESULT: " + result); assertReFind("\"ERROR\"", result); // test to get JSONP result result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Memory&callback=mycallback1")); - LOG.info("/jmx?qry=java.lang:type=Memory&callback=mycallback RESULT: "+result); + LOG.info("/jmx?qry=java.lang:type=Memory&callback=mycallback RESULT: " + result); assertReFind("^mycallback1\\(\\{", result); assertReFind("\\}\\);$", result); // negative test to get an attribute of a mbean as JSONP - result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::&callback=mycallback2")); - LOG.info("/jmx RESULT: "+result); + result = readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::&callback=mycallback2")); + LOG.info("/jmx RESULT: " + result); assertReFind("^mycallback2\\(\\{", result); assertReFind("\"ERROR\"", result); assertReFind("\\}\\);$", result); // test to get an attribute of a mbean as JSONP - result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::HeapMemoryUsage&callback=mycallback3")); - LOG.info("/jmx RESULT: "+result); + result = readOutput( + new URL(baseUrl, "/jmx?get=java.lang:type=Memory::HeapMemoryUsage&callback=mycallback3")); + LOG.info("/jmx RESULT: " + result); assertReFind("^mycallback3\\(\\{", result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"committed\"\\s*:", result); @@ -127,16 +127,15 @@ public class TestJMXJsonServlet extends HttpServerFunctionalTest { @Test public void testGetPattern() throws Exception { // test to get an attribute of a mbean as JSONP - String result = readOutput( - new URL(baseUrl, "/jmx?get=java.lang:type=Memory::[a-zA-z_]*NonHeapMemoryUsage")); + String result = + readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::[a-zA-z_]*NonHeapMemoryUsage")); LOG.info("/jmx RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"committed\"\\s*:", result); assertReFind("\"NonHeapMemoryUsage\"\\s*:", result); assertNotFind("\"HeapMemoryUsage\"\\s*:", result); - result = - readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::[^Non]*HeapMemoryUsage")); + result = readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::[^Non]*HeapMemoryUsage")); LOG.info("/jmx RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"committed\"\\s*:", result); @@ -144,7 +143,7 @@ public class TestJMXJsonServlet extends HttpServerFunctionalTest { assertNotFind("\"NonHeapHeapMemoryUsage\"\\s*:", result); result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::[a-zA-z_]*HeapMemoryUsage,[a-zA-z_]*NonHeapMemoryUsage")); + "/jmx?get=java.lang:type=Memory::[a-zA-z_]*HeapMemoryUsage,[a-zA-z_]*NonHeapMemoryUsage")); LOG.info("/jmx RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"committed\"\\s*:", result); @@ -160,8 +159,8 @@ public class TestJMXJsonServlet extends HttpServerFunctionalTest { @Test public void testDisallowedJSONPCallback() throws Exception { String callback = "function(){alert('bigproblems!')};foo"; - URL url = new URL( - baseUrl, "/jmx?qry=java.lang:type=Memory&callback="+URLEncoder.encode(callback, "UTF-8")); + URL url = new URL(baseUrl, + "/jmx?qry=java.lang:type=Memory&callback=" + URLEncoder.encode(callback, "UTF-8")); HttpURLConnection cnxn = (HttpURLConnection) url.openConnection(); assertEquals(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, cnxn.getResponseCode()); } @@ -169,8 +168,8 @@ public class TestJMXJsonServlet extends HttpServerFunctionalTest { @Test public void testUnderscoresInJSONPCallback() throws Exception { String callback = "my_function"; - URL url = new URL( - baseUrl, "/jmx?qry=java.lang:type=Memory&callback="+URLEncoder.encode(callback, "UTF-8")); + URL url = new URL(baseUrl, + "/jmx?qry=java.lang:type=Memory&callback=" + URLEncoder.encode(callback, "UTF-8")); HttpURLConnection cnxn = (HttpURLConnection) url.openConnection(); assertEquals(HttpServletResponse.SC_OK, cnxn.getResponseCode()); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/lib/TestStaticUserWebFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/lib/TestStaticUserWebFilter.java index 39855ee86ef..eff83edfa6b 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/lib/TestStaticUserWebFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/lib/TestStaticUserWebFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,17 +38,17 @@ import org.junit.experimental.categories.Category; import org.mockito.ArgumentCaptor; import org.mockito.Mockito; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestStaticUserWebFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStaticUserWebFilter.class); + HBaseClassTestRule.forClass(TestStaticUserWebFilter.class); private FilterConfig mockConfig(String username) { FilterConfig mock = Mockito.mock(FilterConfig.class); - Mockito.doReturn(username).when(mock).getInitParameter( - ServerConfigurationKeys.HBASE_HTTP_STATIC_USER); + Mockito.doReturn(username).when(mock) + .getInitParameter(ServerConfigurationKeys.HBASE_HTTP_STATIC_USER); return mock; } @@ -63,10 +63,9 @@ public class TestStaticUserWebFilter { FilterChain chain = mock(FilterChain.class); - suf.doFilter(mock(HttpServletRequest.class), mock(ServletResponse.class), - chain); + suf.doFilter(mock(HttpServletRequest.class), mock(ServletResponse.class), chain); - Mockito.verify(chain).doFilter(wrapperArg.capture(), Mockito.anyObject()); + Mockito.verify(chain).doFilter(wrapperArg.capture(), Mockito. anyObject()); HttpServletRequestWrapper wrapper = wrapperArg.getValue(); assertEquals("myuser", wrapper.getUserPrincipal().getName()); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java index d7889ea4a3b..23beacfba74 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -263,23 +263,20 @@ public class TestLogLevel { private void testDynamicLogLevel(final String bindProtocol, final String connectProtocol, final boolean isSpnego) throws Exception { - testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego, - logName, + testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego, logName, org.apache.logging.log4j.Level.DEBUG.toString()); } private void testDynamicLogLevel(final String bindProtocol, final String connectProtocol, final boolean isSpnego, final String newLevel) throws Exception { - testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego, - logName, - newLevel); + testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego, logName, newLevel); } /** * Run both client and server using the given protocol. - * @param bindProtocol specify either http or https for server + * @param bindProtocol specify either http or https for server * @param connectProtocol specify either http or https for client - * @param isSpnego true if SPNEGO is enabled + * @param isSpnego true if SPNEGO is enabled * @throws Exception if client can't accesss server. */ private void testDynamicLogLevel(final String bindProtocol, final String connectProtocol, @@ -334,7 +331,7 @@ public class TestLogLevel { /** * Run LogLevel command line to start a client to get log level of this test class. - * @param protocol specify either http or https + * @param protocol specify either http or https * @param authority daemon's web UI address * @throws Exception if unable to connect */ @@ -346,7 +343,7 @@ public class TestLogLevel { /** * Run LogLevel command line to start a client to set log level of this test class to debug. - * @param protocol specify either http or https + * @param protocol specify either http or https * @param authority daemon's web UI address * @throws Exception if unable to run or log level does not change as expected */ @@ -370,7 +367,8 @@ public class TestLogLevel { fail("Expected IO exception due to protected logger"); } catch (IOException e) { assertTrue(e.getMessage().contains("" + HttpServletResponse.SC_PRECONDITION_FAILED)); - assertTrue(e.getMessage().contains("Modification of logger " + protectedLogName + " is disallowed in configuration.")); + assertTrue(e.getMessage().contains( + "Modification of logger " + protectedLogName + " is disallowed in configuration.")); } } @@ -395,7 +393,7 @@ public class TestLogLevel { /** * Server runs HTTP, no SPNEGO. * @throws Exception if http client can't access http server, or http client can access https - * server. + * server. */ @Test public void testLogLevelByHttp() throws Exception { @@ -411,7 +409,7 @@ public class TestLogLevel { /** * Server runs HTTP + SPNEGO. * @throws Exception if http client can't access http server, or http client can access https - * server. + * server. */ @Test public void testLogLevelByHttpWithSpnego() throws Exception { @@ -427,7 +425,7 @@ public class TestLogLevel { /** * Server runs HTTPS, no SPNEGO. * @throws Exception if https client can't access https server, or https client can access http - * server. + * server. */ @Test public void testLogLevelByHttps() throws Exception { @@ -443,7 +441,7 @@ public class TestLogLevel { /** * Server runs HTTPS + SPNEGO. * @throws Exception if https client can't access https server, or https client can access http - * server. + * server. */ @Test public void testLogLevelByHttpsWithSpnego() throws Exception { @@ -473,7 +471,7 @@ public class TestLogLevel { } t = t.getCause(); } - throw new AssertionError("Expected to find '" + substr + "' but got unexpected exception:" + - StringUtils.stringifyException(throwable), throwable); + throw new AssertionError("Expected to find '" + substr + "' but got unexpected exception:" + + StringUtils.stringifyException(throwable), throwable); } -} \ No newline at end of file +} diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java index ee900db6230..89d71b403af 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,9 +34,8 @@ import org.apache.hbase.thirdparty.javax.ws.rs.core.Response; import org.apache.hbase.thirdparty.org.eclipse.jetty.util.ajax.JSON; /** - * A simple Jersey resource class TestHttpServer. - * The servlet simply puts the path and the op parameter in a map - * and return it in JSON format in the response. + * A simple Jersey resource class TestHttpServer. The servlet simply puts the path and the op + * parameter in a map and return it in JSON format in the response. */ @Path("") public class JerseyResource { @@ -47,11 +46,9 @@ public class JerseyResource { @GET @Path("{" + PATH + ":.*}") - @Produces({MediaType.APPLICATION_JSON}) - public Response get( - @PathParam(PATH) @DefaultValue("UNKNOWN_" + PATH) final String path, - @QueryParam(OP) @DefaultValue("UNKNOWN_" + OP) final String op - ) throws IOException { + @Produces({ MediaType.APPLICATION_JSON }) + public Response get(@PathParam(PATH) @DefaultValue("UNKNOWN_" + PATH) final String path, + @QueryParam(OP) @DefaultValue("UNKNOWN_" + OP) final String op) throws IOException { LOG.info("get: " + PATH + "=" + path + ", " + OP + "=" + op); final Map m = new TreeMap<>(); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java index c201c7a5232..b3b1b3e78f0 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.http.ssl; import java.io.File; @@ -63,23 +62,21 @@ public final class KeyStoreTestUtil { /** * Create a self-signed X.509 Certificate. - * - * @param dn the X.509 Distinguished Name, eg "CN=Test, L=London, C=GB" - * @param pair the KeyPair - * @param days how many days from now the Certificate is valid for + * @param dn the X.509 Distinguished Name, eg "CN=Test, L=London, C=GB" + * @param pair the KeyPair + * @param days how many days from now the Certificate is valid for * @param algorithm the signing algorithm, eg "SHA1withRSA" * @return the self-signed certificate */ public static X509Certificate generateCertificate(String dn, KeyPair pair, int days, - String algorithm) throws CertificateEncodingException, InvalidKeyException, - IllegalStateException, NoSuchProviderException, NoSuchAlgorithmException, - SignatureException { + String algorithm) throws CertificateEncodingException, InvalidKeyException, + IllegalStateException, NoSuchProviderException, NoSuchAlgorithmException, SignatureException { Date from = new Date(); Date to = new Date(from.getTime() + days * 86400000L); BigInteger sn = new BigInteger(64, new SecureRandom()); KeyPair keyPair = pair; X509V1CertificateGenerator certGen = new X509V1CertificateGenerator(); - X500Principal dnName = new X500Principal(dn); + X500Principal dnName = new X500Principal(dn); certGen.setSerialNumber(sn); certGen.setIssuerDN(dnName); @@ -92,15 +89,13 @@ public final class KeyStoreTestUtil { return cert; } - public static KeyPair generateKeyPair(String algorithm) - throws NoSuchAlgorithmException { + public static KeyPair generateKeyPair(String algorithm) throws NoSuchAlgorithmException { KeyPairGenerator keyGen = KeyPairGenerator.getInstance(algorithm); keyGen.initialize(1024); return keyGen.genKeyPair(); } - private static KeyStore createEmptyKeyStore() - throws GeneralSecurityException, IOException { + private static KeyStore createEmptyKeyStore() throws GeneralSecurityException, IOException { return createEmptyKeyStore("jks"); } @@ -111,8 +106,7 @@ public final class KeyStoreTestUtil { return ks; } - private static void saveKeyStore(KeyStore ks, String filename, - String password) + private static void saveKeyStore(KeyStore ks, String filename, String password) throws GeneralSecurityException, IOException { FileOutputStream out = new FileOutputStream(filename); try { @@ -123,109 +117,93 @@ public final class KeyStoreTestUtil { } /** - * Creates a keystore with a single key and saves it to a file. - * This method will use the same password for the keystore and for the key. - * This method will always generate a keystore file in JKS format. - * - * @param filename String file to save - * @param password String store password to set on keystore - * @param alias String alias to use for the key + * Creates a keystore with a single key and saves it to a file. This method will use the same + * password for the keystore and for the key. This method will always generate a keystore file in + * JKS format. + * @param filename String file to save + * @param password String store password to set on keystore + * @param alias String alias to use for the key * @param privateKey Key to save in keystore - * @param cert Certificate to use as certificate chain associated to key + * @param cert Certificate to use as certificate chain associated to key * @throws GeneralSecurityException for any error with the security APIs - * @throws IOException if there is an I/O error saving the file + * @throws IOException if there is an I/O error saving the file */ - public static void createKeyStore(String filename, - String password, String alias, - Key privateKey, Certificate cert) - throws GeneralSecurityException, IOException { + public static void createKeyStore(String filename, String password, String alias, Key privateKey, + Certificate cert) throws GeneralSecurityException, IOException { createKeyStore(filename, password, password, alias, privateKey, cert); } /** - * Creates a keystore with a single key and saves it to a file. - * This method will always generate a keystore file in JKS format. - * - * @param filename String file to save - * @param password String store password to set on keystore + * Creates a keystore with a single key and saves it to a file. This method will always generate a + * keystore file in JKS format. + * @param filename String file to save + * @param password String store password to set on keystore * @param keyPassword String key password to set on key - * @param alias String alias to use for the key - * @param privateKey Key to save in keystore - * @param cert Certificate to use as certificate chain associated to key + * @param alias String alias to use for the key + * @param privateKey Key to save in keystore + * @param cert Certificate to use as certificate chain associated to key * @throws GeneralSecurityException for any error with the security APIs - * @throws IOException if there is an I/O error saving the file + * @throws IOException if there is an I/O error saving the file */ - public static void createKeyStore(String filename, - String password, String keyPassword, String alias, - Key privateKey, Certificate cert) - throws GeneralSecurityException, IOException { + public static void createKeyStore(String filename, String password, String keyPassword, + String alias, Key privateKey, Certificate cert) throws GeneralSecurityException, IOException { createKeyStore(filename, password, keyPassword, alias, privateKey, cert, "JKS"); } - /** * Creates a keystore with a single key and saves it to a file. - * - * @param filename String file to save - * @param password String store password to set on keystore - * @param keyPassword String key password to set on key - * @param alias String alias to use for the key - * @param privateKey Key to save in keystore - * @param cert Certificate to use as certificate chain associated to key + * @param filename String file to save + * @param password String store password to set on keystore + * @param keyPassword String key password to set on key + * @param alias String alias to use for the key + * @param privateKey Key to save in keystore + * @param cert Certificate to use as certificate chain associated to key * @param keystoreType String keystore file type (e.g. "JKS") * @throws GeneralSecurityException for any error with the security APIs - * @throws IOException if there is an I/O error saving the file + * @throws IOException if there is an I/O error saving the file */ public static void createKeyStore(String filename, String password, String keyPassword, - String alias, Key privateKey, Certificate cert, - String keystoreType) - throws GeneralSecurityException, IOException { + String alias, Key privateKey, Certificate cert, String keystoreType) + throws GeneralSecurityException, IOException { KeyStore ks = createEmptyKeyStore(keystoreType); - ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(), - new Certificate[]{cert}); + ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(), new Certificate[] { cert }); saveKeyStore(ks, filename, password); } /** - * Creates a truststore with a single certificate and saves it to a file. - * This method uses the default JKS truststore type. - * + * Creates a truststore with a single certificate and saves it to a file. This method uses the + * default JKS truststore type. * @param filename String file to save * @param password String store password to set on truststore - * @param alias String alias to use for the certificate - * @param cert Certificate to add + * @param alias String alias to use for the certificate + * @param cert Certificate to add * @throws GeneralSecurityException for any error with the security APIs - * @throws IOException if there is an I/O error saving the file + * @throws IOException if there is an I/O error saving the file */ - public static void createTrustStore(String filename, - String password, String alias, - Certificate cert) - throws GeneralSecurityException, IOException { + public static void createTrustStore(String filename, String password, String alias, + Certificate cert) throws GeneralSecurityException, IOException { createTrustStore(filename, password, alias, cert, "JKS"); } /** * Creates a truststore with a single certificate and saves it to a file. - * - * @param filename String file to save - * @param password String store password to set on truststore - * @param alias String alias to use for the certificate - * @param cert Certificate to add + * @param filename String file to save + * @param password String store password to set on truststore + * @param alias String alias to use for the certificate + * @param cert Certificate to add * @param trustStoreType String keystore file type (e.g. "JKS") * @throws GeneralSecurityException for any error with the security APIs - * @throws IOException if there is an I/O error saving the file + * @throws IOException if there is an I/O error saving the file */ public static void createTrustStore(String filename, String password, String alias, - Certificate cert, String trustStoreType) - throws GeneralSecurityException, IOException { + Certificate cert, String trustStoreType) throws GeneralSecurityException, IOException { KeyStore ks = createEmptyKeyStore(trustStoreType); ks.setCertificateEntry(alias, cert); saveKeyStore(ks, filename, password); } - public static void createTrustStore( - String filename, String password, Map certs) - throws GeneralSecurityException, IOException { + public static void createTrustStore(String filename, String password, + Map certs) throws GeneralSecurityException, IOException { KeyStore ks = createEmptyKeyStore(); for (Map.Entry cert : certs.entrySet()) { ks.setCertificateEntry(cert.getKey(), cert.getValue()); @@ -233,46 +211,41 @@ public final class KeyStoreTestUtil { saveKeyStore(ks, filename, password); } - public static void cleanupSSLConfig(Configuration conf) - throws Exception { + public static void cleanupSSLConfig(Configuration conf) throws Exception { File f = new File(conf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER, - FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY))); + FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY))); f.delete(); f = new File(conf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER, - FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY))); + FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY))); f.delete(); - String clientKeyStore = conf.get(FileBasedKeyStoresFactory - .resolvePropertyName(SSLFactory.Mode.CLIENT, - FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY)); + String clientKeyStore = + conf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.CLIENT, + FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY)); if (clientKeyStore != null) { f = new File(clientKeyStore); f.delete(); } - f = new File(KeyStoreTestUtil.getClasspathDir(KeyStoreTestUtil.class) + "/" + conf - .get(SSLFactory.SSL_CLIENT_CONF_KEY)); + f = new File(KeyStoreTestUtil.getClasspathDir(KeyStoreTestUtil.class) + "/" + + conf.get(SSLFactory.SSL_CLIENT_CONF_KEY)); f.delete(); - f = new File(KeyStoreTestUtil.getClasspathDir(KeyStoreTestUtil.class) + "/" + conf - .get(SSLFactory.SSL_SERVER_CONF_KEY)); + f = new File(KeyStoreTestUtil.getClasspathDir(KeyStoreTestUtil.class) + "/" + + conf.get(SSLFactory.SSL_SERVER_CONF_KEY)); f.delete(); } /** - * Performs complete setup of SSL configuration in preparation for testing an - * SSLFactory. This includes keys, certs, keystores, truststores, the server - * SSL configuration file, the client SSL configuration file, and the master - * configuration file read by the SSLFactory. - * - * @param keystoresDir String directory to save keystores - * @param sslConfDir String directory to save SSL configuration files - * @param conf Configuration master configuration to be used by an SSLFactory, - * which will be mutated by this method - * @param useClientCert boolean true to make the client present a cert in the - * SSL handshake + * Performs complete setup of SSL configuration in preparation for testing an SSLFactory. This + * includes keys, certs, keystores, truststores, the server SSL configuration file, the client SSL + * configuration file, and the master configuration file read by the SSLFactory. + * @param keystoresDir String directory to save keystores + * @param sslConfDir String directory to save SSL configuration files + * @param conf Configuration master configuration to be used by an SSLFactory, which will + * be mutated by this method + * @param useClientCert boolean true to make the client present a cert in the SSL handshake */ - public static void setupSSLConfig(String keystoresDir, String sslConfDir, - Configuration conf, boolean useClientCert) - throws Exception { + public static void setupSSLConfig(String keystoresDir, String sslConfDir, Configuration conf, + boolean useClientCert) throws Exception { String clientKS = keystoresDir + "/clientKS.jks"; String clientPassword = "clientP"; String serverKS = keystoresDir + "/serverKS.jks"; @@ -280,39 +253,33 @@ public final class KeyStoreTestUtil { String trustKS = keystoresDir + "/trustKS.jks"; String trustPassword = "trustP"; - File sslClientConfFile = new File( - sslConfDir + "/ssl-client-" + System.nanoTime() + "-" + HBaseCommonTestingUtility - .getRandomUUID() + ".xml"); - File sslServerConfFile = new File( - sslConfDir + "/ssl-server-" + System.nanoTime() + "-" + HBaseCommonTestingUtility - .getRandomUUID() + ".xml"); + File sslClientConfFile = new File(sslConfDir + "/ssl-client-" + System.nanoTime() + "-" + + HBaseCommonTestingUtility.getRandomUUID() + ".xml"); + File sslServerConfFile = new File(sslConfDir + "/ssl-server-" + System.nanoTime() + "-" + + HBaseCommonTestingUtility.getRandomUUID() + ".xml"); Map certs = new HashMap<>(); if (useClientCert) { KeyPair cKP = KeyStoreTestUtil.generateKeyPair("RSA"); X509Certificate cCert = - KeyStoreTestUtil.generateCertificate("CN=localhost, O=client", cKP, 30, - "SHA1withRSA"); - KeyStoreTestUtil.createKeyStore(clientKS, clientPassword, "client", - cKP.getPrivate(), cCert); + KeyStoreTestUtil.generateCertificate("CN=localhost, O=client", cKP, 30, "SHA1withRSA"); + KeyStoreTestUtil.createKeyStore(clientKS, clientPassword, "client", cKP.getPrivate(), cCert); certs.put("client", cCert); } KeyPair sKP = KeyStoreTestUtil.generateKeyPair("RSA"); X509Certificate sCert = - KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", sKP, 30, - "SHA1withRSA"); - KeyStoreTestUtil.createKeyStore(serverKS, serverPassword, "server", - sKP.getPrivate(), sCert); + KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", sKP, 30, "SHA1withRSA"); + KeyStoreTestUtil.createKeyStore(serverKS, serverPassword, "server", sKP.getPrivate(), sCert); certs.put("server", sCert); KeyStoreTestUtil.createTrustStore(trustKS, trustPassword, certs); - Configuration clientSSLConf = createClientSSLConfig(clientKS, clientPassword, - clientPassword, trustKS); - Configuration serverSSLConf = createServerSSLConfig(serverKS, serverPassword, - serverPassword, trustKS); + Configuration clientSSLConf = + createClientSSLConfig(clientKS, clientPassword, clientPassword, trustKS); + Configuration serverSSLConf = + createServerSSLConfig(serverKS, serverPassword, serverPassword, trustKS); saveConfig(sslClientConfFile, clientSSLConf); saveConfig(sslServerConfFile, serverSSLConf); @@ -322,60 +289,50 @@ public final class KeyStoreTestUtil { conf.set(SSLFactory.SSL_SERVER_CONF_KEY, sslServerConfFile.getName()); conf.set("dfs.https.server.keystore.resource", sslServerConfFile.getName()); - conf.setBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, useClientCert); } /** * Creates SSL configuration for a client. - * - * @param clientKS String client keystore file - * @param password String store password, or null to avoid setting store - * password - * @param keyPassword String key password, or null to avoid setting key - * password - * @param trustKS String truststore file + * @param clientKS String client keystore file + * @param password String store password, or null to avoid setting store password + * @param keyPassword String key password, or null to avoid setting key password + * @param trustKS String truststore file * @return Configuration for client SSL */ - public static Configuration createClientSSLConfig(String clientKS, - String password, String keyPassword, String trustKS) { - Configuration clientSSLConf = createSSLConfig(SSLFactory.Mode.CLIENT, - clientKS, password, keyPassword, trustKS); + public static Configuration createClientSSLConfig(String clientKS, String password, + String keyPassword, String trustKS) { + Configuration clientSSLConf = + createSSLConfig(SSLFactory.Mode.CLIENT, clientKS, password, keyPassword, trustKS); return clientSSLConf; } /** * Creates SSL configuration for a server. - * - * @param serverKS String server keystore file - * @param password String store password, or null to avoid setting store - * password - * @param keyPassword String key password, or null to avoid setting key - * password - * @param trustKS String truststore file + * @param serverKS String server keystore file + * @param password String store password, or null to avoid setting store password + * @param keyPassword String key password, or null to avoid setting key password + * @param trustKS String truststore file * @return Configuration for server SSL */ - public static Configuration createServerSSLConfig(String serverKS, - String password, String keyPassword, String trustKS) throws IOException { - Configuration serverSSLConf = createSSLConfig(SSLFactory.Mode.SERVER, - serverKS, password, keyPassword, trustKS); + public static Configuration createServerSSLConfig(String serverKS, String password, + String keyPassword, String trustKS) throws IOException { + Configuration serverSSLConf = + createSSLConfig(SSLFactory.Mode.SERVER, serverKS, password, keyPassword, trustKS); return serverSSLConf; } /** * Creates SSL configuration. - * - * @param mode SSLFactory.Mode mode to configure - * @param keystore String keystore file - * @param password String store password, or null to avoid setting store - * password - * @param keyPassword String key password, or null to avoid setting key - * password - * @param trustKS String truststore file + * @param mode SSLFactory.Mode mode to configure + * @param keystore String keystore file + * @param password String store password, or null to avoid setting store password + * @param keyPassword String key password, or null to avoid setting key password + * @param trustKS String truststore file * @return Configuration for SSL */ - private static Configuration createSSLConfig(SSLFactory.Mode mode, - String keystore, String password, String keyPassword, String trustKS) { + private static Configuration createSSLConfig(SSLFactory.Mode mode, String keystore, + String password, String keyPassword, String trustKS) { String trustPassword = "trustP"; Configuration sslConf = new Configuration(false); @@ -389,8 +346,7 @@ public final class KeyStoreTestUtil { } if (keyPassword != null) { sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, - FileBasedKeyStoresFactory.SSL_KEYSTORE_KEYPASSWORD_TPL_KEY), - keyPassword); + FileBasedKeyStoresFactory.SSL_KEYSTORE_KEYPASSWORD_TPL_KEY), keyPassword); } if (trustKS != null) { sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, @@ -398,8 +354,7 @@ public final class KeyStoreTestUtil { } if (trustPassword != null) { sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, - FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), - trustPassword); + FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), trustPassword); } sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), "1000"); @@ -409,13 +364,11 @@ public final class KeyStoreTestUtil { /** * Saves configuration to a file. - * * @param file File to save * @param conf Configuration contents to write to file * @throws IOException if there is an I/O error saving the file */ - public static void saveConfig(File file, Configuration conf) - throws IOException { + public static void saveConfig(File file, Configuration conf) throws IOException { Writer writer = new FileWriter(file); try { conf.writeXml(writer); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/util/TestJSONBean.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/util/TestJSONBean.java index c277cd068da..ef36d19c046 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/util/TestJSONBean.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/util/TestJSONBean.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,6 +22,7 @@ import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import java.io.PrintWriter; import java.io.StringWriter; import java.lang.reflect.Type; @@ -39,13 +39,14 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.reflect.TypeToken; import org.apache.hbase.thirdparty.com.google.gson.Gson; /** * Test {@link JSONBean}. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestJSONBean { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -58,23 +59,17 @@ public class TestJSONBean { when(mbeanServer.queryNames(any(), any())).thenReturn(names); MBeanInfo mbeanInfo = mock(MBeanInfo.class); when(mbeanInfo.getClassName()).thenReturn("testClassName"); - String[] attributeNames = new String[] {"intAttr", "nanAttr", "infinityAttr", - "strAttr", "boolAttr", "test:Attr"}; + String[] attributeNames = + new String[] { "intAttr", "nanAttr", "infinityAttr", "strAttr", "boolAttr", "test:Attr" }; MBeanAttributeInfo[] attributeInfos = new MBeanAttributeInfo[attributeNames.length]; for (int i = 0; i < attributeInfos.length; i++) { - attributeInfos[i] = new MBeanAttributeInfo(attributeNames[i], - null, - null, - true, - false, - false); + attributeInfos[i] = new MBeanAttributeInfo(attributeNames[i], null, null, true, false, false); } when(mbeanInfo.getAttributes()).thenReturn(attributeInfos); when(mbeanServer.getMBeanInfo(any())).thenReturn(mbeanInfo); when(mbeanServer.getAttribute(any(), eq("intAttr"))).thenReturn(3); when(mbeanServer.getAttribute(any(), eq("nanAttr"))).thenReturn(Double.NaN); - when(mbeanServer.getAttribute(any(), eq("infinityAttr"))). - thenReturn(Double.POSITIVE_INFINITY); + when(mbeanServer.getAttribute(any(), eq("infinityAttr"))).thenReturn(Double.POSITIVE_INFINITY); when(mbeanServer.getAttribute(any(), eq("strAttr"))).thenReturn("aString"); when(mbeanServer.getAttribute(any(), eq("boolAttr"))).thenReturn(true); when(mbeanServer.getAttribute(any(), eq("test:Attr"))).thenReturn("aString"); @@ -105,14 +100,14 @@ public class TestJSONBean { public void testJSONBeanValueTypes() throws Exception { JSONBean bean = new JSONBean(); StringWriter stringWriter = new StringWriter(); - try ( - PrintWriter printWriter = new PrintWriter(stringWriter); + try (PrintWriter printWriter = new PrintWriter(stringWriter); JSONBean.Writer jsonWriter = bean.open(printWriter)) { jsonWriter.write(getMockMBeanServer(), null, null, false); } final Gson gson = GsonUtil.createGson().create(); - Type typeOfHashMap = new TypeToken>() {}.getType(); + Type typeOfHashMap = new TypeToken>() { + }.getType(); Map expectedJson = gson.fromJson(getExpectedJSON(), typeOfHashMap); Map actualJson = gson.fromJson(stringWriter.toString(), typeOfHashMap); assertEquals(expectedJson, actualJson); diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml index f1ce681c688..a06a41c833f 100644 --- a/hbase-it/pom.xml +++ b/hbase-it/pom.xml @@ -1,6 +1,6 @@ - + - 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -40,105 +40,6 @@ - - - - - ../hbase-server/src/test/resources - - META-INF/NOTICE - META-INF/LICENSE - - - - src/test/resources - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-failsafe-plugin - ${surefire.version} - - - org.apache.maven.surefire - surefire-junit4 - ${surefire.version} - - - - - ${integrationtest.include} - - - ${unittest.include} - **/*$* - - ${test.output.tofile} - - ${env.LD_LIBRARY_PATH}:${project.build.directory}/nativelib - ${env.DYLD_LIBRARY_PATH}:${project.build.directory}/nativelib - 4 - - false - false - - - - integration-test - integration-test - - integration-test - - - - verify - verify - - verify - - - - - - - - - - - org.apache.maven.plugins - maven-failsafe-plugin - - false - always - - 1800 - -enableassertions -Xmx${failsafe.Xmx} - -Djava.security.egd=file:/dev/./urandom -XX:+CMSClassUnloadingEnabled - -verbose:gc -XX:+PrintCommandLineFlags -XX:+PrintFlagsFinal - - - - net.revelc.code - warbucks-maven-plugin - - - - + org.apache.hbase hbase-annotations @@ -217,8 +118,8 @@ which pulls in the below. It messes up this build at assembly time. See HBASE-22029--> - com.sun.jersey - jersey-core + com.sun.jersey + jersey-core @@ -288,8 +189,8 @@ test - javax.servlet-api javax.servlet + javax.servlet-api test @@ -314,6 +215,129 @@ + + + + + ../hbase-server/src/test/resources + + META-INF/NOTICE + META-INF/LICENSE + + + + src/test/resources + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-failsafe-plugin + ${surefire.version} + + + ${integrationtest.include} + + + ${unittest.include} + **/*$* + + ${test.output.tofile} + + ${env.LD_LIBRARY_PATH}:${project.build.directory}/nativelib + ${env.DYLD_LIBRARY_PATH}:${project.build.directory}/nativelib + 4 + + false + false + + + + org.apache.maven.surefire + surefire-junit4 + ${surefire.version} + + + + + integration-test + + integration-test + + integration-test + + + verify + + verify + + verify + + + + + + + + + + org.apache.maven.plugins + maven-failsafe-plugin + + false + always + + 1800 + -enableassertions -Xmx${failsafe.Xmx} + -Djava.security.egd=file:/dev/./urandom -XX:+CMSClassUnloadingEnabled + -verbose:gc -XX:+PrintCommandLineFlags -XX:+PrintFlagsFinal + + + + net.revelc.code + warbucks-maven-plugin + + + + + + + + org.apache.maven.plugins + maven-surefire-report-plugin + ${surefire.version} + + + integration-tests + + report-only + + + failsafe-report + + ${project.build.directory}/failsafe-reports + + + + + + + + @@ -345,8 +369,9 @@ hadoop-2.0 - - !hadoop.profile + + + !hadoop.profile @@ -388,10 +413,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -33,35 +31,6 @@ Apache HBase - Logging Logging Support for HBase - - - - src/test/resources - - log4j2.properties - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase @@ -109,4 +78,33 @@ test + + + + + src/test/resources + + log4j2.properties + + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + diff --git a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/InternalLog4jUtils.java b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/InternalLog4jUtils.java index b0711d7e8f1..18c2c0f6e67 100644 --- a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/InternalLog4jUtils.java +++ b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/InternalLog4jUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/JulToSlf4jInitializer.java b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/JulToSlf4jInitializer.java index e7b5fdd3935..e389f58aacb 100644 --- a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/JulToSlf4jInitializer.java +++ b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/JulToSlf4jInitializer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/Log4jUtils.java b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/Log4jUtils.java index 9b3459194ab..ba136663e09 100644 --- a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/Log4jUtils.java +++ b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/Log4jUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-logging/src/test/java/org/apache/hadoop/hbase/logging/HBaseTestAppender.java b/hbase-logging/src/test/java/org/apache/hadoop/hbase/logging/HBaseTestAppender.java index 6ac1ce053cd..01ac73fde17 100644 --- a/hbase-logging/src/test/java/org/apache/hadoop/hbase/logging/HBaseTestAppender.java +++ b/hbase-logging/src/test/java/org/apache/hadoop/hbase/logging/HBaseTestAppender.java @@ -81,14 +81,8 @@ public final class HBaseTestAppender extends AbstractOutputStreamAppender layout = getOrCreateLayout(StandardCharsets.UTF_8); OutputStreamManager manager = OutputStreamManager.getManager(target.name(), FACTORY, new FactoryData(target, layout)); - return new HBaseTestAppender(getName(), - layout, - getFilter(), - isIgnoreExceptions(), - isImmediateFlush(), - getPropertyArray(), - manager, - size); + return new HBaseTestAppender(getName(), layout, getFilter(), isIgnoreExceptions(), + isImmediateFlush(), getPropertyArray(), manager, size); } } diff --git a/hbase-logging/src/test/java/org/apache/hadoop/hbase/logging/Target.java b/hbase-logging/src/test/java/org/apache/hadoop/hbase/logging/Target.java index 22b649cd44d..b2dceda619c 100644 --- a/hbase-logging/src/test/java/org/apache/hadoop/hbase/logging/Target.java +++ b/hbase-logging/src/test/java/org/apache/hadoop/hbase/logging/Target.java @@ -32,4 +32,4 @@ public enum Target { public PrintStream output() { return output; } -} \ No newline at end of file +} diff --git a/hbase-logging/src/test/java/org/apache/log4j/FileAppender.java b/hbase-logging/src/test/java/org/apache/log4j/FileAppender.java index 7b3876ce083..57d8732fb1c 100644 --- a/hbase-logging/src/test/java/org/apache/log4j/FileAppender.java +++ b/hbase-logging/src/test/java/org/apache/log4j/FileAppender.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.log4j; import java.io.BufferedWriter; @@ -222,7 +221,7 @@ public class FileAppender extends WriterAppender { * Do not use this method directly. To configure a FileAppender or one of its subclasses, set * its properties one by one and then call activateOptions. * @param fileName The path to the log file. - * @param append If true will append to fileName. Otherwise will truncate fileName. + * @param append If true will append to fileName. Otherwise will truncate fileName. */ public synchronized void setFile(String fileName, boolean append, boolean bufferedIO, int bufferSize) throws IOException { diff --git a/hbase-mapreduce/pom.xml b/hbase-mapreduce/pom.xml index 9d7a6fc4c08..31b68a6cfd5 100644 --- a/hbase-mapreduce/pom.xml +++ b/hbase-mapreduce/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-mapreduce Apache HBase - MapReduce - - This module contains implementations of InputFormat, OutputFormat, Mapper, Reducer, etc which + This module contains implementations of InputFormat, OutputFormat, Mapper, Reducer, etc which are needed for running MR jobs on tables, WALs, HFiles and other HBase specific constructs. It also contains a bunch of tools: RowCounter, ImportTsv, Import, Export, CompactionTool, - ExportSnapshot, WALPlayer, etc - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-jar-plugin - - - - - org/apache/hadoop/hbase/mapreduce/Driver - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - + ExportSnapshot, WALPlayer, etc @@ -326,6 +289,40 @@ test + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-jar-plugin + + + + + org/apache/hadoop/hbase/mapreduce/Driver + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + @@ -345,8 +342,9 @@ hadoop-2.0 - - !hadoop.profile + + + !hadoop.profile @@ -377,8 +375,7 @@ lifecycle-mapping - - + diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java index b1f71f057f2..60e24be5128 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,8 +23,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Driver for hbase mapreduce jobs. Select which to run by passing name of job - * to this main. + * Driver for hbase mapreduce jobs. Select which to run by passing name of job to this main. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Stable @@ -39,12 +37,11 @@ public class Driver { } /** - * @param args - * @throws Throwable + * nn */ public static void main(String[] args) throws Throwable { pgd.addClass(RowCounter.NAME, RowCounter.class, "Count rows in HBase table"); - ProgramDriver.class.getMethod("driver", new Class[] { String[].class }) - .invoke(pgd, new Object[] { args }); + ProgramDriver.class.getMethod("driver", new Class[] { String[].class }).invoke(pgd, + new Object[] { args }); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java index 594816fcf50..3d609ffd73b 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,8 +19,6 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; import java.util.ArrayList; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.client.Result; @@ -31,42 +28,37 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MapReduceBase; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; - +import org.apache.yetus.audience.InterfaceAudience; /** * Extract grouping columns from input record */ @InterfaceAudience.Public -public class GroupingTableMap -extends MapReduceBase -implements TableMap { +public class GroupingTableMap extends MapReduceBase + implements TableMap { /** - * JobConf parameter to specify the columns used to produce the key passed to - * collect from the map phase + * JobConf parameter to specify the columns used to produce the key passed to collect from the map + * phase */ - public static final String GROUP_COLUMNS = - "hbase.mapred.groupingtablemap.columns"; + public static final String GROUP_COLUMNS = "hbase.mapred.groupingtablemap.columns"; - protected byte [][] columns; + protected byte[][] columns; /** - * Use this before submitting a TableMap job. It will appropriately set up the - * JobConf. - * - * @param table table to be processed - * @param columns space separated list of columns to fetch - * @param groupColumns space separated list of columns used to form the key - * used in collect - * @param mapper map class - * @param job job configuration object + * Use this before submitting a TableMap job. It will appropriately set up the JobConf. + * @param table table to be processed + * @param columns space separated list of columns to fetch + * @param groupColumns space separated list of columns used to form the key used in collect + * @param mapper map class + * @param job job configuration object */ @SuppressWarnings("unchecked") public static void initJob(String table, String columns, String groupColumns, Class mapper, JobConf job) { - TableMapReduceUtil.initTableMapJob(table, columns, mapper, - ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapJob(table, columns, mapper, ImmutableBytesWritable.class, + Result.class, job); job.set(GROUP_COLUMNS, groupColumns); } @@ -75,50 +67,38 @@ implements TableMap { super.configure(job); String[] cols = job.get(GROUP_COLUMNS, "").split(" "); columns = new byte[cols.length][]; - for(int i = 0; i < cols.length; i++) { + for (int i = 0; i < cols.length; i++) { columns[i] = Bytes.toBytes(cols[i]); } } /** - * Extract the grouping columns from value to construct a new key. - * - * Pass the new key and value to reduce. - * If any of the grouping columns are not found in the value, the record is skipped. - * @param key - * @param value - * @param output - * @param reporter - * @throws IOException + * Extract the grouping columns from value to construct a new key. Pass the new key and value to + * reduce. If any of the grouping columns are not found in the value, the record is skipped. nnnnn */ public void map(ImmutableBytesWritable key, Result value, - OutputCollector output, - Reporter reporter) throws IOException { + OutputCollector output, Reporter reporter) throws IOException { byte[][] keyVals = extractKeyValues(value); - if(keyVals != null) { + if (keyVals != null) { ImmutableBytesWritable tKey = createGroupKey(keyVals); output.collect(tKey, value); } } /** - * Extract columns values from the current record. This method returns - * null if any of the columns are not found. - * - * Override this method if you want to deal with nulls differently. - * - * @param r - * @return array of byte values + * Extract columns values from the current record. This method returns null if any of the columns + * are not found. Override this method if you want to deal with nulls differently. n * @return + * array of byte values */ protected byte[][] extractKeyValues(Result r) { byte[][] keyVals = null; ArrayList foundList = new ArrayList<>(); int numCols = columns.length; if (numCols > 0) { - for (Cell value: r.listCells()) { - byte [] column = CellUtil.makeColumn(CellUtil.cloneFamily(value), - CellUtil.cloneQualifier(value)); + for (Cell value : r.listCells()) { + byte[] column = + CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value)); for (int i = 0; i < numCols; i++) { if (Bytes.equals(column, columns[i])) { foundList.add(CellUtil.cloneValue(value)); @@ -126,7 +106,7 @@ implements TableMap { } } } - if(foundList.size() == numCols) { + if (foundList.size() == numCols) { keyVals = foundList.toArray(new byte[numCols][]); } } @@ -134,19 +114,17 @@ implements TableMap { } /** - * Create a key by concatenating multiple column values. - * Override this function in order to produce different types of keys. - * - * @param vals - * @return key generated by concatenating multiple column values + * Create a key by concatenating multiple column values. Override this function in order to + * produce different types of keys. n * @return key generated by concatenating multiple column + * values */ protected ImmutableBytesWritable createGroupKey(byte[][] vals) { - if(vals == null) { + if (vals == null) { return null; } - StringBuilder sb = new StringBuilder(); - for(int i = 0; i < vals.length; i++) { - if(i > 0) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < vals.length; i++) { + if (i > 0) { sb.append(" "); } sb.append(Bytes.toString(vals[i])); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java index 78062588e82..91b4d071343 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,9 +20,6 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; @@ -31,18 +27,18 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.Partitioner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This is used to partition the output keys into groups of keys. - * Keys are grouped according to the regions that currently exist - * so that each reducer fills a single region so load is distributed. - * + * This is used to partition the output keys into groups of keys. Keys are grouped according to the + * regions that currently exist so that each reducer fills a single region so load is distributed. * @param * @param */ @InterfaceAudience.Public -public class HRegionPartitioner -implements Partitioner { +public class HRegionPartitioner implements Partitioner { private static final Logger LOG = LoggerFactory.getLogger(HRegionPartitioner.class); // Connection and locator are not cleaned up; they just die when partitioner is done. private Connection connection; @@ -70,7 +66,7 @@ implements Partitioner { public int getPartition(ImmutableBytesWritable key, V2 value, int numPartitions) { byte[] region = null; // Only one region return 0 - if (this.startKeys.length == 1){ + if (this.startKeys.length == 1) { return 0; } try { @@ -80,12 +76,11 @@ implements Partitioner { } catch (IOException e) { LOG.error(e.toString(), e); } - for (int i = 0; i < this.startKeys.length; i++){ - if (Bytes.compareTo(region, this.startKeys[i]) == 0 ){ - if (i >= numPartitions){ + for (int i = 0; i < this.startKeys.length; i++) { + if (Bytes.compareTo(region, this.startKeys[i]) == 0) { + if (i >= numPartitions) { // cover if we have less reduces then regions. - return (Integer.toString(i).hashCode() - & Integer.MAX_VALUE) % numPartitions; + return (Integer.toString(i).hashCode() & Integer.MAX_VALUE) % numPartitions; } return i; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java index c97bcc02523..16256942d72 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,22 +18,20 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MapReduceBase; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; +import org.apache.yetus.audience.InterfaceAudience; /** * Pass the given key and record as-is to reduce */ @InterfaceAudience.Public -public class IdentityTableMap -extends MapReduceBase -implements TableMap { +public class IdentityTableMap extends MapReduceBase + implements TableMap { /** constructor */ public IdentityTableMap() { @@ -42,33 +39,24 @@ implements TableMap { } /** - * Use this before submitting a TableMap job. It will - * appropriately set up the JobConf. - * - * @param table table name + * Use this before submitting a TableMap job. It will appropriately set up the JobConf. + * @param table table name * @param columns columns to scan - * @param mapper mapper class - * @param job job configuration + * @param mapper mapper class + * @param job job configuration */ @SuppressWarnings("unchecked") - public static void initJob(String table, String columns, - Class mapper, JobConf job) { - TableMapReduceUtil.initTableMapJob(table, columns, mapper, - ImmutableBytesWritable.class, + public static void initJob(String table, String columns, Class mapper, + JobConf job) { + TableMapReduceUtil.initTableMapJob(table, columns, mapper, ImmutableBytesWritable.class, Result.class, job); } /** - * Pass the key, value to reduce - * @param key - * @param value - * @param output - * @param reporter - * @throws IOException + * Pass the key, value to reduce nnnnn */ public void map(ImmutableBytesWritable key, Result value, - OutputCollector output, - Reporter reporter) throws IOException { + OutputCollector output, Reporter reporter) throws IOException { // convert output.collect(key, value); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java index ba1df4c3a83..79d5f3dc8c0 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,41 +19,31 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; import java.util.Iterator; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapred.MapReduceBase; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Write to table each key, record pair */ @InterfaceAudience.Public -public class IdentityTableReduce -extends MapReduceBase -implements TableReduce { +public class IdentityTableReduce extends MapReduceBase + implements TableReduce { @SuppressWarnings("unused") - private static final Logger LOG = - LoggerFactory.getLogger(IdentityTableReduce.class.getName()); + private static final Logger LOG = LoggerFactory.getLogger(IdentityTableReduce.class.getName()); /** - * No aggregation, output pairs of (key, record) - * @param key - * @param values - * @param output - * @param reporter - * @throws IOException + * No aggregation, output pairs of (key, record) nnnnn */ public void reduce(ImmutableBytesWritable key, Iterator values, - OutputCollector output, - Reporter reporter) - throws IOException { + OutputCollector output, Reporter reporter) throws IOException { - while(values.hasNext()) { + while (values.hasNext()) { output.collect(key, values.next()); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java index 7902d1a3b4c..24e9da0f28d 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,13 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapred; import edu.umd.cs.findbugs.annotations.SuppressWarnings; +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -32,33 +34,25 @@ import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.Reporter; - -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Map; +import org.apache.yetus.audience.InterfaceAudience; /** * MultiTableSnapshotInputFormat generalizes - * {@link org.apache.hadoop.hbase.mapred.TableSnapshotInputFormat} - * allowing a MapReduce job to run over one or more table snapshots, with one or more scans - * configured for each. - * Internally, the input format delegates to - * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} - * and thus has the same performance advantages; see - * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} - * for more details. - * Usage is similar to TableSnapshotInputFormat, with the following exception: - * initMultiTableSnapshotMapperJob takes in a map - * from snapshot name to a collection of scans. For each snapshot in the map, each corresponding - * scan will be applied; - * the overall dataset for the job is defined by the concatenation of the regions and tables - * included in each snapshot/scan + * {@link org.apache.hadoop.hbase.mapred.TableSnapshotInputFormat} allowing a MapReduce job to run + * over one or more table snapshots, with one or more scans configured for each. Internally, the + * input format delegates to {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} and + * thus has the same performance advantages; see + * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} for more details. Usage is + * similar to TableSnapshotInputFormat, with the following exception: + * initMultiTableSnapshotMapperJob takes in a map from snapshot name to a collection of scans. For + * each snapshot in the map, each corresponding scan will be applied; the overall dataset for the + * job is defined by the concatenation of the regions and tables included in each snapshot/scan * pair. - * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob(Map, - * Class, Class, Class, JobConf, boolean, Path)} + * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob(Map, Class, Class, Class, JobConf, boolean, Path)} * can be used to configure the job. - *
      {@code
      + *
      + * 
      + * {@code
        * Job job = new Job(conf);
        * Map> snapshotScans = ImmutableMap.of(
        *    "snapshot1", ImmutableList.of(new Scan(Bytes.toBytes("a"), Bytes.toBytes("b"))),
      @@ -70,21 +64,18 @@ import java.util.Map;
        *      MyMapOutputValueWritable.class, job, true, restoreDir);
        * }
        * 
      - * Internally, this input format restores each snapshot into a subdirectory of the given tmp - * directory. Input splits and - * record readers are created as described in - * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} - * (one per region). - * See {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} for more notes on - * permissioning; the - * same caveats apply here. * + * Internally, this input format restores each snapshot into a subdirectory of the given tmp + * directory. Input splits and record readers are created as described in + * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} (one per region). See + * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} for more notes on + * permissioning; the same caveats apply here. * @see org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat * @see org.apache.hadoop.hbase.client.TableSnapshotScanner */ @InterfaceAudience.Public public class MultiTableSnapshotInputFormat extends TableSnapshotInputFormat - implements InputFormat { + implements InputFormat { private final MultiTableSnapshotInputFormatImpl delegate; @@ -104,25 +95,20 @@ public class MultiTableSnapshotInputFormat extends TableSnapshotInputFormat @Override public RecordReader getRecordReader(InputSplit split, JobConf job, - Reporter reporter) throws IOException { + Reporter reporter) throws IOException { return new TableSnapshotRecordReader((TableSnapshotRegionSplit) split, job); } @SuppressWarnings("checkstyle:linelength") /** * Configure conf to read from snapshotScans, with snapshots restored to a subdirectory of - * restoreDir. - * Sets: + * restoreDir. Sets: * {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormatImpl#RESTORE_DIRS_KEY}, * {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormatImpl#SNAPSHOT_TO_SCANS_KEY} - * - * @param conf - * @param snapshotScans - * @param restoreDir - * @throws IOException + * nnnn */ public static void setInput(Configuration conf, Map> snapshotScans, - Path restoreDir) throws IOException { + Path restoreDir) throws IOException { new MultiTableSnapshotInputFormatImpl().setInput(conf, snapshotScans, restoreDir); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java index 75b221c5526..4f95950589c 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,8 +18,6 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -33,11 +30,11 @@ import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; /** - * A job with a map to count rows. - * Map outputs table rows IF the input row has columns that have content. - * Uses a org.apache.hadoop.mapred.lib.IdentityReducer + * A job with a map to count rows. Map outputs table rows IF the input row has columns that have + * content. Uses a org.apache.hadoop.mapred.lib.IdentityReducer */ @InterfaceAudience.Public public class RowCounter extends Configured implements Tool { @@ -47,16 +44,16 @@ public class RowCounter extends Configured implements Tool { /** * Mapper that runs the count. */ - static class RowCounterMapper - implements TableMap { - private static enum Counters {ROWS} + static class RowCounterMapper implements TableMap { + private static enum Counters { + ROWS + } public void map(ImmutableBytesWritable row, Result values, - OutputCollector output, - Reporter reporter) - throws IOException { - // Count every row containing data, whether it's in qualifiers or values - reporter.incrCounter(Counters.ROWS, 1); + OutputCollector output, Reporter reporter) + throws IOException { + // Count every row containing data, whether it's in qualifiers or values + reporter.incrCounter(Counters.ROWS, 1); } public void configure(JobConf jc) { @@ -69,9 +66,7 @@ public class RowCounter extends Configured implements Tool { } /** - * @param args - * @return the JobConf - * @throws IOException + * n * @return the JobConf n */ public JobConf createSubmittableJob(String[] args) throws IOException { JobConf c = new JobConf(getConf(), getClass()); @@ -86,8 +81,8 @@ public class RowCounter extends Configured implements Tool { sb.append(args[i]); } // Second argument is the table name. - TableMapReduceUtil.initTableMapJob(args[1], sb.toString(), - RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, c); + TableMapReduceUtil.initTableMapJob(args[1], sb.toString(), RowCounterMapper.class, + ImmutableBytesWritable.class, Result.class, c); c.setNumReduceTasks(0); // First arg is the output directory. FileOutputFormat.setOutputPath(c, new Path(args[0])); @@ -95,8 +90,7 @@ public class RowCounter extends Configured implements Tool { } static int printUsage() { - System.out.println(NAME + - " [...]"); + System.out.println(NAME + " [...]"); return -1; } @@ -111,8 +105,7 @@ public class RowCounter extends Configured implements Tool { } /** - * @param args - * @throws Exception + * nn */ public static void main(String[] args) throws Exception { int errCode = ToolRunner.run(HBaseConfiguration.create(), new RowCounter(), args); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java index d9bb66bdf07..3e38b0172ca 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,10 +18,6 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; @@ -32,13 +27,15 @@ import org.apache.hadoop.mapred.FileInputFormat; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobConfigurable; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Convert HBase tabular data into a format that is consumable by Map/Reduce. */ @InterfaceAudience.Public -public class TableInputFormat extends TableInputFormatBase implements - JobConfigurable { +public class TableInputFormat extends TableInputFormatBase implements JobConfigurable { private static final Logger LOG = LoggerFactory.getLogger(TableInputFormat.class); /** @@ -59,7 +56,7 @@ public class TableInputFormat extends TableInputFormatBase implements Path[] tableNames = FileInputFormat.getInputPaths(job); String colArg = job.get(COLUMN_LIST); String[] colNames = colArg.split(" "); - byte [][] m_cols = new byte[colNames.length][]; + byte[][] m_cols = new byte[colNames.length][]; for (int i = 0; i < m_cols.length; i++) { m_cols[i] = Bytes.toBytes(colNames[i]); } @@ -70,15 +67,14 @@ public class TableInputFormat extends TableInputFormatBase implements public void validateInput(JobConf job) throws IOException { // expecting exactly one path - Path [] tableNames = FileInputFormat.getInputPaths(job); + Path[] tableNames = FileInputFormat.getInputPaths(job); if (tableNames == null || tableNames.length > 1) { throw new IOException("expecting one table name"); } // connected to table? if (getTable() == null) { - throw new IOException("could not connect to table '" + - tableNames[0].getName() + "'"); + throw new IOException("could not connect to table '" + tableNames[0].getName() + "'"); } // expecting at least one column diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java index cef8e8a8176..34736bd6a3d 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +19,6 @@ package org.apache.hadoop.hbase.mapred; import java.io.Closeable; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; @@ -37,21 +32,23 @@ import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.Reporter; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * A Base for {@link TableInputFormat}s. Receives a {@link Table}, a - * byte[] of input columns and optionally a {@link Filter}. - * Subclasses may use other TableRecordReader implementations. + * A Base for {@link TableInputFormat}s. Receives a {@link Table}, a byte[] of input columns and + * optionally a {@link Filter}. Subclasses may use other TableRecordReader implementations. *

      * Subclasses MUST ensure initializeTable(Connection, TableName) is called for an instance to * function properly. Each of the entry points to this class used by the MapReduce framework, * {@link #getRecordReader(InputSplit, JobConf, Reporter)} and {@link #getSplits(JobConf, int)}, - * will call {@link #initialize(JobConf)} as a convenient centralized location to handle - * retrieving the necessary configuration information. If your subclass overrides either of these - * methods, either call the parent version or call initialize yourself. - * + * will call {@link #initialize(JobConf)} as a convenient centralized location to handle retrieving + * the necessary configuration information. If your subclass overrides either of these methods, + * either call the parent version or call initialize yourself. *

      * An example of a subclass: + * *

        *   class ExampleTIF extends TableInputFormatBase {
        *
      @@ -77,32 +74,28 @@ import org.apache.hadoop.mapred.Reporter;
        */
       
       @InterfaceAudience.Public
      -public abstract class TableInputFormatBase
      -implements InputFormat {
      +public abstract class TableInputFormatBase implements InputFormat {
         private static final Logger LOG = LoggerFactory.getLogger(TableInputFormatBase.class);
      -  private byte [][] inputColumns;
      +  private byte[][] inputColumns;
         private Table table;
         private RegionLocator regionLocator;
         private Connection connection;
         private TableRecordReader tableRecordReader;
         private Filter rowFilter;
       
      -  private static final String NOT_INITIALIZED = "The input format instance has not been properly " +
      -      "initialized. Ensure you call initializeTable either in your constructor or initialize " +
      -      "method";
      -  private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a" +
      -            " previous error. Please look at the previous logs lines from" +
      -            " the task's full log for more details.";
      +  private static final String NOT_INITIALIZED = "The input format instance has not been properly "
      +    + "initialized. Ensure you call initializeTable either in your constructor or initialize "
      +    + "method";
      +  private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a"
      +    + " previous error. Please look at the previous logs lines from"
      +    + " the task's full log for more details.";
       
         /**
      -   * Builds a TableRecordReader. If no TableRecordReader was provided, uses
      -   * the default.
      -   *
      +   * Builds a TableRecordReader. If no TableRecordReader was provided, uses the default.
          * @see InputFormat#getRecordReader(InputSplit, JobConf, Reporter)
          */
      -  public RecordReader getRecordReader(
      -      InputSplit split, JobConf job, Reporter reporter)
      -  throws IOException {
      +  public RecordReader getRecordReader(InputSplit split, JobConf job,
      +    Reporter reporter) throws IOException {
           // In case a subclass uses the deprecated approach or calls initializeTable directly
           if (table == null) {
             initialize(job);
      @@ -119,8 +112,8 @@ implements InputFormat {
       
           TableSplit tSplit = (TableSplit) split;
           // if no table record reader was provided use default
      -    final TableRecordReader trr = this.tableRecordReader == null ? new TableRecordReader() :
      -        this.tableRecordReader;
      +    final TableRecordReader trr =
      +      this.tableRecordReader == null ? new TableRecordReader() : this.tableRecordReader;
           trr.setStartRow(tSplit.getStartRow());
           trr.setEndRow(tSplit.getEndRow());
           trr.setHTable(this.table);
      @@ -165,20 +158,15 @@ implements InputFormat {
         /**
          * Calculates the splits that will serve as input for the map tasks.
          * 

      - * Splits are created in number equal to the smallest between numSplits and - * the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table. - * If the number of splits is smaller than the number of - * {@link org.apache.hadoop.hbase.regionserver.HRegion}s then splits are spanned across - * multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s - * and are grouped the most evenly possible. In the - * case splits are uneven the bigger splits are placed first in the - * {@link InputSplit} array. - * - * @param job the map task {@link JobConf} + * Splits are created in number equal to the smallest between numSplits and the number of + * {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table. If the number of splits is + * smaller than the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s then splits + * are spanned across multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s and are + * grouped the most evenly possible. In the case splits are uneven the bigger splits are placed + * first in the {@link InputSplit} array. + * @param job the map task {@link JobConf} * @param numSplits a hint to calculate the number of splits (mapred.map.tasks). - * * @return the input splits - * * @see InputFormat#getSplits(org.apache.hadoop.mapred.JobConf, int) */ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { @@ -195,26 +183,24 @@ implements InputFormat { throw new IOException(INITIALIZATION_ERROR, exception); } - byte [][] startKeys = this.regionLocator.getStartKeys(); + byte[][] startKeys = this.regionLocator.getStartKeys(); if (startKeys == null || startKeys.length == 0) { throw new IOException("Expecting at least one region"); } if (this.inputColumns == null || this.inputColumns.length == 0) { throw new IOException("Expecting at least one column"); } - int realNumSplits = numSplits > startKeys.length? startKeys.length: - numSplits; + int realNumSplits = numSplits > startKeys.length ? startKeys.length : numSplits; InputSplit[] splits = new InputSplit[realNumSplits]; int middle = startKeys.length / realNumSplits; int startPos = 0; for (int i = 0; i < realNumSplits; i++) { int lastPos = startPos + middle; lastPos = startKeys.length % realNumSplits > i ? lastPos + 1 : lastPos; - String regionLocation = regionLocator.getRegionLocation(startKeys[startPos]). - getHostname(); - splits[i] = new TableSplit(this.table.getName(), - startKeys[startPos], ((i + 1) < realNumSplits) ? startKeys[lastPos]: - HConstants.EMPTY_START_ROW, regionLocation); + String regionLocation = regionLocator.getRegionLocation(startKeys[startPos]).getHostname(); + splits[i] = new TableSplit(this.table.getName(), startKeys[startPos], + ((i + 1) < realNumSplits) ? startKeys[lastPos] : HConstants.EMPTY_START_ROW, + regionLocation); LOG.info("split: " + i + "->" + splits[i]); startPos = lastPos; } @@ -223,15 +209,13 @@ implements InputFormat { /** * Allows subclasses to initialize the table information. - * - * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close. - * @param tableName The {@link TableName} of the table to process. - * @throws IOException + * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close. + * @param tableName The {@link TableName} of the table to process. n */ protected void initializeTable(Connection connection, TableName tableName) throws IOException { if (this.table != null || this.connection != null) { - LOG.warn("initializeTable called multiple times. Overwriting connection and table " + - "reference; TableInputFormatBase will not close these old references when done."); + LOG.warn("initializeTable called multiple times. Overwriting connection and table " + + "reference; TableInputFormatBase will not close these old references when done."); } this.table = connection.getTable(tableName); this.regionLocator = connection.getRegionLocator(tableName); @@ -241,7 +225,7 @@ implements InputFormat { /** * @param inputColumns to be passed in {@link Result} to the map task. */ - protected void setInputColumns(byte [][] inputColumns) { + protected void setInputColumns(byte[][] inputColumns) { this.inputColumns = inputColumns; } @@ -256,27 +240,22 @@ implements InputFormat { } /** - * Allows subclasses to set the {@link TableRecordReader}. - * - * @param tableRecordReader - * to provide other {@link TableRecordReader} implementations. + * Allows subclasses to set the {@link TableRecordReader}. n * to provide other + * {@link TableRecordReader} implementations. */ protected void setTableRecordReader(TableRecordReader tableRecordReader) { this.tableRecordReader = tableRecordReader; } /** - * Allows subclasses to set the {@link Filter} to be used. - * - * @param rowFilter + * Allows subclasses to set the {@link Filter} to be used. n */ protected void setRowFilter(Filter rowFilter) { this.rowFilter = rowFilter; } /** - * Handle subclass specific set up. - * Each of the entry points used by the MapReduce framework, + * Handle subclass specific set up. Each of the entry points used by the MapReduce framework, * {@link #getRecordReader(InputSplit, JobConf, Reporter)} and {@link #getSplits(JobConf, int)}, * will call {@link #initialize(JobConf)} as a convenient centralized location to handle * retrieving the necessary configuration information and calling @@ -284,19 +263,16 @@ implements InputFormat { *

      * Subclasses should implement their initialize call such that it is safe to call multiple times. * The current TableInputFormatBase implementation relies on a non-null table reference to decide - * if an initialize call is needed, but this behavior may change in the future. In particular, - * it is critical that initializeTable not be called multiple times since this will leak - * Connection instances. - * + * if an initialize call is needed, but this behavior may change in the future. In particular, it + * is critical that initializeTable not be called multiple times since this will leak Connection + * instances. */ protected void initialize(JobConf job) throws IOException { } /** * Close the Table and related objects that were initialized via - * {@link #initializeTable(Connection, TableName)}. - * - * @throws IOException + * {@link #initializeTable(Connection, TableName)}. n */ protected void closeTable() throws IOException { close(table, connection); @@ -306,7 +282,9 @@ implements InputFormat { private void close(Closeable... closables) throws IOException { for (Closeable c : closables) { - if(c != null) { c.close(); } + if (c != null) { + c.close(); + } } } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java index d76572722b6..639ad707208 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +17,20 @@ */ package org.apache.hadoop.hbase.mapred; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapred.Mapper; +import org.apache.yetus.audience.InterfaceAudience; /** - * Scan an HBase table to sort by a specified sort column. - * If the column does not exist, the record is not passed to Reduce. - * + * Scan an HBase table to sort by a specified sort column. If the column does not exist, the record + * is not passed to Reduce. * @param WritableComparable key class * @param Writable value class */ @InterfaceAudience.Public public interface TableMap, V> -extends Mapper { + extends Mapper { } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java index 754bf2959a6..3668eb2b8cc 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +17,13 @@ */ package org.apache.hadoop.hbase.mapred; +import java.io.IOException; +import java.util.Collection; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; @@ -41,10 +42,7 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.OutputFormat; import org.apache.hadoop.mapred.TextInputFormat; import org.apache.hadoop.mapred.TextOutputFormat; - -import java.io.IOException; -import java.util.Collection; -import java.util.Map; +import org.apache.yetus.audience.InterfaceAudience; /** * Utility for {@link TableMap} and {@link TableReduce} @@ -54,49 +52,39 @@ import java.util.Map; public class TableMapReduceUtil { /** - * Use this before submitting a TableMap job. It will - * appropriately set up the JobConf. - * - * @param table The table name to read from. - * @param columns The columns to scan. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job configuration to adjust. + * Use this before submitting a TableMap job. It will appropriately set up the JobConf. + * @param table The table name to read from. + * @param columns The columns to scan. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job configuration to adjust. */ - public static void initTableMapJob(String table, String columns, - Class mapper, - Class outputKeyClass, - Class outputValueClass, JobConf job) { - initTableMapJob(table, columns, mapper, outputKeyClass, outputValueClass, job, - true, TableInputFormat.class); + public static void initTableMapJob(String table, String columns, Class mapper, + Class outputKeyClass, Class outputValueClass, JobConf job) { + initTableMapJob(table, columns, mapper, outputKeyClass, outputValueClass, job, true, + TableInputFormat.class); } - public static void initTableMapJob(String table, String columns, - Class mapper, - Class outputKeyClass, - Class outputValueClass, JobConf job, boolean addDependencyJars) { + public static void initTableMapJob(String table, String columns, Class mapper, + Class outputKeyClass, Class outputValueClass, JobConf job, boolean addDependencyJars) { initTableMapJob(table, columns, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, TableInputFormat.class); } /** - * Use this before submitting a TableMap job. It will - * appropriately set up the JobConf. - * - * @param table The table name to read from. - * @param columns The columns to scan. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. + * Use this before submitting a TableMap job. It will appropriately set up the JobConf. + * @param table The table name to read from. + * @param columns The columns to scan. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job configuration to adjust. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param job The current job configuration to adjust. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). */ - public static void initTableMapJob(String table, String columns, - Class mapper, - Class outputKeyClass, - Class outputValueClass, JobConf job, boolean addDependencyJars, + public static void initTableMapJob(String table, String columns, Class mapper, + Class outputKeyClass, Class outputValueClass, JobConf job, boolean addDependencyJars, Class inputFormat) { job.setInputFormat(inputFormat); @@ -104,7 +92,7 @@ public class TableMapReduceUtil { job.setMapOutputKeyClass(outputKeyClass); job.setMapperClass(mapper); job.setStrings("io.serializations", job.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName()); FileInputFormat.addInputPaths(job, table); job.set(TableInputFormat.COLUMN_LIST, columns); if (addDependencyJars) { @@ -117,28 +105,26 @@ public class TableMapReduceUtil { try { initCredentials(job); } catch (IOException ioe) { - // just spit out the stack trace? really? + // just spit out the stack trace? really? ioe.printStackTrace(); } } /** * Sets up the job for reading from one or more multiple table snapshots, with one or more scans - * per snapshot. - * It bypasses hbase servers and read directly from snapshot files. - * + * per snapshot. It bypasses hbase servers and read directly from snapshot files. * @param snapshotScans map of snapshot name to scans on that snapshot. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). */ public static void initMultiTableSnapshotMapperJob(Map> snapshotScans, - Class mapper, Class outputKeyClass, Class outputValueClass, - JobConf job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + JobConf job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { MultiTableSnapshotInputFormat.setInput(job, snapshotScans, tmpRestoreDir); job.setInputFormat(MultiTableSnapshotInputFormat.class); @@ -157,30 +143,27 @@ public class TableMapReduceUtil { } /** - * Sets up the job for reading from a table snapshot. It bypasses hbase servers - * and read directly from snapshot files. - * - * @param snapshotName The name of the snapshot (of a table) to read from. - * @param columns The columns to scan. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. + * Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly + * from snapshot files. + * @param snapshotName The name of the snapshot (of a table) to read from. + * @param columns The columns to scan. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). - * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restore directory can be deleted. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). + * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user + * should have write permissions to this directory, and this should not + * be a subdirectory of rootdir. After the job is finished, restore + * directory can be deleted. * @throws IOException When setting up the details fails. * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapJob(String snapshotName, String columns, - Class mapper, - Class outputKeyClass, - Class outputValueClass, JobConf job, - boolean addDependencyJars, Path tmpRestoreDir) - throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + JobConf job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir); initTableMapJob(snapshotName, columns, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, TableSnapshotInputFormat.class); @@ -188,97 +171,81 @@ public class TableMapReduceUtil { } /** - * Sets up the job for reading from a table snapshot. It bypasses hbase servers - * and read directly from snapshot files. - * - * @param snapshotName The name of the snapshot (of a table) to read from. - * @param columns The columns to scan. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param jobConf The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). - * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restore directory can be deleted. - * @param splitAlgo algorithm to split + * Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly + * from snapshot files. + * @param snapshotName The name of the snapshot (of a table) to read from. + * @param columns The columns to scan. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param jobConf The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). + * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user + * should have write permissions to this directory, and this should not + * be a subdirectory of rootdir. After the job is finished, restore + * directory can be deleted. + * @param splitAlgo algorithm to split * @param numSplitsPerRegion how many input splits to generate per one region * @throws IOException When setting up the details fails. * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapJob(String snapshotName, String columns, - Class mapper, - Class outputKeyClass, - Class outputValueClass, JobConf jobConf, - boolean addDependencyJars, Path tmpRestoreDir, - RegionSplitter.SplitAlgorithm splitAlgo, - int numSplitsPerRegion) - throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + JobConf jobConf, boolean addDependencyJars, Path tmpRestoreDir, + RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { TableSnapshotInputFormat.setInput(jobConf, snapshotName, tmpRestoreDir, splitAlgo, - numSplitsPerRegion); + numSplitsPerRegion); initTableMapJob(snapshotName, columns, mapper, outputKeyClass, outputValueClass, jobConf, - addDependencyJars, TableSnapshotInputFormat.class); + addDependencyJars, TableSnapshotInputFormat.class); org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.resetCacheConfig(jobConf); } - /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job configuration to adjust. + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job configuration to adjust. * @throws IOException When determining the region count fails. */ - public static void initTableReduceJob(String table, - Class reducer, JobConf job) - throws IOException { + public static void initTableReduceJob(String table, Class reducer, + JobConf job) throws IOException { initTableReduceJob(table, reducer, job, null); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job configuration to adjust. - * @param partitioner Partitioner to use. Pass null to use - * default partitioner. + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job configuration to adjust. + * @param partitioner Partitioner to use. Pass null to use default partitioner. * @throws IOException When determining the region count fails. */ - public static void initTableReduceJob(String table, - Class reducer, JobConf job, Class partitioner) - throws IOException { + public static void initTableReduceJob(String table, Class reducer, + JobConf job, Class partitioner) throws IOException { initTableReduceJob(table, reducer, job, partitioner, true); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job configuration to adjust. - * @param partitioner Partitioner to use. Pass null to use - * default partitioner. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job configuration to adjust. + * @param partitioner Partitioner to use. Pass null to use default partitioner. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When determining the region count fails. */ - public static void initTableReduceJob(String table, - Class reducer, JobConf job, Class partitioner, - boolean addDependencyJars) throws IOException { + public static void initTableReduceJob(String table, Class reducer, + JobConf job, Class partitioner, boolean addDependencyJars) throws IOException { job.setOutputFormat(TableOutputFormat.class); job.setReducerClass(reducer); job.set(TableOutputFormat.OUTPUT_TABLE, table); job.setOutputKeyClass(ImmutableBytesWritable.class); job.setOutputValueClass(Put.class); job.setStrings("io.serializations", job.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName()); if (partitioner == HRegionPartitioner.class) { job.setPartitionerClass(HRegionPartitioner.class); int regions = getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table)); @@ -319,11 +286,10 @@ public class TableMapReduceUtil { } /** - * Ensures that the given number of reduce tasks for the given job - * configuration does not exceed the number of regions for the given table. - * - * @param table The table to get the region count for. - * @param job The current job configuration to adjust. + * Ensures that the given number of reduce tasks for the given job configuration does not exceed + * the number of regions for the given table. + * @param table The table to get the region count for. + * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ // Used by tests. @@ -335,11 +301,10 @@ public class TableMapReduceUtil { } /** - * Ensures that the given number of map tasks for the given job - * configuration does not exceed the number of regions for the given table. - * - * @param table The table to get the region count for. - * @param job The current job configuration to adjust. + * Ensures that the given number of map tasks for the given job configuration does not exceed the + * number of regions for the given table. + * @param table The table to get the region count for. + * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ // Used by tests. @@ -351,11 +316,10 @@ public class TableMapReduceUtil { } /** - * Sets the number of reduce tasks for the given job configuration to the - * number of regions the given table has. - * - * @param table The table to get the region count for. - * @param job The current job configuration to adjust. + * Sets the number of reduce tasks for the given job configuration to the number of regions the + * given table has. + * @param table The table to get the region count for. + * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ public static void setNumReduceTasks(String table, JobConf job) throws IOException { @@ -363,11 +327,10 @@ public class TableMapReduceUtil { } /** - * Sets the number of map tasks for the given job configuration to the - * number of regions the given table has. - * - * @param table The table to get the region count for. - * @param job The current job configuration to adjust. + * Sets the number of map tasks for the given job configuration to the number of regions the given + * table has. + * @param table The table to get the region count for. + * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ public static void setNumMapTasks(String table, JobConf job) throws IOException { @@ -375,13 +338,11 @@ public class TableMapReduceUtil { } /** - * Sets the number of rows to return and cache with each scanner iteration. - * Higher caching values will enable faster mapreduce jobs at the expense of - * requiring more heap to contain the cached rows. - * - * @param job The current job configuration to adjust. - * @param batchSize The number of rows to return in batch with each scanner - * iteration. + * Sets the number of rows to return and cache with each scanner iteration. Higher caching values + * will enable faster mapreduce jobs at the expense of requiring more heap to contain the cached + * rows. + * @param job The current job configuration to adjust. + * @param batchSize The number of rows to return in batch with each scanner iteration. */ public static void setScannerCaching(JobConf job, int batchSize) { job.setInt("hbase.client.scanner.caching", batchSize); @@ -392,19 +353,14 @@ public class TableMapReduceUtil { */ public static void addDependencyJars(JobConf job) throws IOException { org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addHBaseDependencyJars(job); - org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJarsForClasses( - job, - job.getMapOutputKeyClass(), - job.getMapOutputValueClass(), - job.getOutputKeyClass(), - job.getOutputValueClass(), - job.getPartitionerClass(), + org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJarsForClasses(job, + job.getMapOutputKeyClass(), job.getMapOutputValueClass(), job.getOutputKeyClass(), + job.getOutputValueClass(), job.getPartitionerClass(), job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class), job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class), job.getCombinerClass()); } - private static int getRegionCount(Configuration conf, TableName tableName) throws IOException { try (Connection conn = ConnectionFactory.createConnection(conf); RegionLocator locator = conn.getRegionLocator(tableName)) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java index fcf6f552b7a..270aeb186a4 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +18,9 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; - import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -35,6 +32,7 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RecordWriter; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.Progressable; +import org.apache.yetus.audience.InterfaceAudience; /** * Convert Map/Reduce output and write it to an HBase table @@ -46,20 +44,18 @@ public class TableOutputFormat extends FileOutputFormat { private BufferedMutator m_mutator; private Connection conn; - /** * Instantiate a TableRecordWriter with the HBase HClient for writing. - * * @deprecated since 2.0.0 and will be removed in 3.0.0. Please use - * {@code #TableRecordWriter(JobConf)} instead. This version does not clean up connections and - * will leak connections (removed in 2.0). + * {@code #TableRecordWriter(JobConf)} instead. This version does not clean up + * connections and will leak connections (removed in 2.0). * @see HBASE-16774 */ @Deprecated @@ -103,31 +99,25 @@ public class TableOutputFormat extends FileOutputFormat { +public class TableRecordReader implements RecordReader { private TableRecordReaderImpl recordReaderImpl = new TableRecordReaderImpl(); /** - * Restart from survivable exceptions by creating a new scanner. - * - * @param firstRow - * @throws IOException + * Restart from survivable exceptions by creating a new scanner. nn */ public void restart(byte[] firstRow) throws IOException { this.recordReaderImpl.restart(firstRow); } /** - * Build the scanner. Not done in constructor to allow for extension. - * - * @throws IOException + * Build the scanner. Not done in constructor to allow for extension. n */ public void init() throws IOException { this.recordReaderImpl.restart(this.recordReaderImpl.getStartRow()); @@ -66,22 +57,21 @@ implements RecordReader { /** * @param inputColumns the columns to be placed in {@link Result}. */ - public void setInputColumns(final byte [][] inputColumns) { + public void setInputColumns(final byte[][] inputColumns) { this.recordReaderImpl.setInputColumns(inputColumns); } /** * @param startRow the first row in the split */ - public void setStartRow(final byte [] startRow) { + public void setStartRow(final byte[] startRow) { this.recordReaderImpl.setStartRow(startRow); } /** - * * @param endRow the last row in the split */ - public void setEndRow(final byte [] endRow) { + public void setEndRow(final byte[] endRow) { this.recordReaderImpl.setEndRow(endRow); } @@ -97,8 +87,7 @@ implements RecordReader { } /** - * @return ImmutableBytesWritable - * + * n * * @see org.apache.hadoop.mapred.RecordReader#createKey() */ public ImmutableBytesWritable createKey() { @@ -106,8 +95,7 @@ implements RecordReader { } /** - * @return RowResult - * + * n * * @see org.apache.hadoop.mapred.RecordReader#createValue() */ public Result createValue() { @@ -127,13 +115,11 @@ implements RecordReader { } /** - * @param key HStoreKey as input key. + * @param key HStoreKey as input key. * @param value MapWritable as input value - * @return true if there was more data - * @throws IOException + * @return true if there was more data n */ - public boolean next(ImmutableBytesWritable key, Result value) - throws IOException { + public boolean next(ImmutableBytesWritable key, Result value) throws IOException { return this.recordReaderImpl.next(key, value); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java index 046da3aa1b8..952d60fc883 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +18,7 @@ package org.apache.hadoop.hbase.mapred; import static org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl.LOG_PER_ROW_COUNT; + import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -43,13 +43,13 @@ import org.slf4j.LoggerFactory; public class TableRecordReaderImpl { private static final Logger LOG = LoggerFactory.getLogger(TableRecordReaderImpl.class); - private byte [] startRow; - private byte [] endRow; - private byte [] lastSuccessfulRow; + private byte[] startRow; + private byte[] endRow; + private byte[] lastSuccessfulRow; private Filter trrRowFilter; private ResultScanner scanner; private Table htable; - private byte [][] trrInputColumns; + private byte[][] trrInputColumns; private long timestamp; private int rowcount; private boolean logScannerActivity = false; @@ -69,17 +69,15 @@ public class TableRecordReaderImpl { this.scanner = this.htable.getScanner(scan); currentScan = scan; } else { - LOG.debug("TIFB.restart, firstRow: " + - Bytes.toStringBinary(firstRow) + ", endRow: " + - Bytes.toStringBinary(endRow)); + LOG.debug("TIFB.restart, firstRow: " + Bytes.toStringBinary(firstRow) + ", endRow: " + + Bytes.toStringBinary(endRow)); Scan scan = new Scan(firstRow, endRow); TableInputFormat.addColumns(scan, trrInputColumns); this.scanner = this.htable.getScanner(scan); currentScan = scan; } } else { - LOG.debug("TIFB.restart, firstRow: " + - Bytes.toStringBinary(firstRow) + ", no endRow"); + LOG.debug("TIFB.restart, firstRow: " + Bytes.toStringBinary(firstRow) + ", no endRow"); Scan scan = new Scan(firstRow); TableInputFormat.addColumns(scan, trrInputColumns); @@ -104,13 +102,14 @@ public class TableRecordReaderImpl { byte[] getStartRow() { return this.startRow; } + /** * @param htable the {@link org.apache.hadoop.hbase.HTableDescriptor} to scan. */ public void setHTable(Table htable) { Configuration conf = htable.getConfiguration(); logScannerActivity = conf.getBoolean( - "hbase.client.log.scanner.activity" /*ScannerCallable.LOG_SCANNER_ACTIVITY*/, false); + "hbase.client.log.scanner.activity" /* ScannerCallable.LOG_SCANNER_ACTIVITY */, false); logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100); this.htable = htable; } @@ -118,22 +117,21 @@ public class TableRecordReaderImpl { /** * @param inputColumns the columns to be placed in {@link Result}. */ - public void setInputColumns(final byte [][] inputColumns) { + public void setInputColumns(final byte[][] inputColumns) { this.trrInputColumns = inputColumns; } /** * @param startRow the first row in the split */ - public void setStartRow(final byte [] startRow) { + public void setStartRow(final byte[] startRow) { this.startRow = startRow; } /** - * * @param endRow the last row in the split */ - public void setEndRow(final byte [] endRow) { + public void setEndRow(final byte[] endRow) { this.endRow = endRow; } @@ -156,8 +154,7 @@ public class TableRecordReaderImpl { } /** - * @return ImmutableBytesWritable - * + * n * * @see org.apache.hadoop.mapred.RecordReader#createKey() */ public ImmutableBytesWritable createKey() { @@ -165,8 +162,7 @@ public class TableRecordReaderImpl { } /** - * @return RowResult - * + * n * * @see org.apache.hadoop.mapred.RecordReader#createValue() */ public Result createValue() { @@ -185,7 +181,7 @@ public class TableRecordReaderImpl { } /** - * @param key HStoreKey as input key. + * @param key HStoreKey as input key. * @param value MapWritable as input value * @return true if there was more data */ @@ -195,11 +191,10 @@ public class TableRecordReaderImpl { try { result = this.scanner.next(); if (logScannerActivity) { - rowcount ++; + rowcount++; if (rowcount >= logPerRowCount) { long now = EnvironmentEdgeManager.currentTime(); - LOG.info("Mapper took " + (now-timestamp) - + "ms to process " + rowcount + " rows"); + LOG.info("Mapper took " + (now - timestamp) + "ms to process " + rowcount + " rows"); timestamp = now; rowcount = 0; } @@ -213,16 +208,16 @@ public class TableRecordReaderImpl { // the scanner, if the second call fails, it will be rethrown LOG.debug("recovered from " + StringUtils.stringifyException(e)); if (lastSuccessfulRow == null) { - LOG.warn("We are restarting the first next() invocation," + - " if your mapper has restarted a few other times like this" + - " then you should consider killing this job and investigate" + - " why it's taking so long."); + LOG.warn("We are restarting the first next() invocation," + + " if your mapper has restarted a few other times like this" + + " then you should consider killing this job and investigate" + + " why it's taking so long."); } if (lastSuccessfulRow == null) { restart(startRow); } else { restart(lastSuccessfulRow); - this.scanner.next(); // skip presumed already mapped row + this.scanner.next(); // skip presumed already mapped row } result = this.scanner.next(); } @@ -237,11 +232,10 @@ public class TableRecordReaderImpl { } catch (IOException ioe) { if (logScannerActivity) { long now = EnvironmentEdgeManager.currentTime(); - LOG.info("Mapper took " + (now-timestamp) - + "ms to process " + rowcount + " rows"); + LOG.info("Mapper took " + (now - timestamp) + "ms to process " + rowcount + " rows"); LOG.info(ioe.toString(), ioe); - String lastRow = lastSuccessfulRow == null ? - "null" : Bytes.toStringBinary(lastSuccessfulRow); + String lastRow = + lastSuccessfulRow == null ? "null" : Bytes.toStringBinary(lastSuccessfulRow); LOG.info("lastSuccessfulRow=" + lastRow); } throw ioe; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java index a64e4cdc82f..b26d3d70adf 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +17,20 @@ */ package org.apache.hadoop.hbase.mapred; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapred.Reducer; +import org.apache.yetus.audience.InterfaceAudience; /** * Write a table, sorting by the input key - * * @param key class * @param value class */ @InterfaceAudience.Public @SuppressWarnings("unchecked") public interface TableReduce -extends Reducer { + extends Reducer { } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java index 1bacb89f565..f711ded0a28 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapred; +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -32,16 +34,11 @@ import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.Reporter; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.List; +import org.apache.yetus.audience.InterfaceAudience; /** * TableSnapshotInputFormat allows a MapReduce job to run over a table snapshot. Further * documentation available on {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat}. - * * @see org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat */ @InterfaceAudience.Public @@ -60,9 +57,9 @@ public class TableSnapshotInputFormat implements InputFormat locations, Scan scan, Path restoreDir) { + List locations, Scan scan, Path restoreDir) { this.delegate = - new TableSnapshotInputFormatImpl.InputSplit(htd, regionInfo, locations, scan, restoreDir); + new TableSnapshotInputFormatImpl.InputSplit(htd, regionInfo, locations, scan, restoreDir); } @Override @@ -86,13 +83,12 @@ public class TableSnapshotInputFormat implements InputFormat { + static class TableSnapshotRecordReader implements RecordReader { private TableSnapshotInputFormatImpl.RecordReader delegate; public TableSnapshotRecordReader(TableSnapshotRegionSplit split, JobConf job) - throws IOException { + throws IOException { delegate = new TableSnapshotInputFormatImpl.RecordReader(); delegate.initialize(split.delegate, job); } @@ -146,38 +142,41 @@ public class TableSnapshotInputFormat implements InputFormat - getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { + public RecordReader getRecordReader(InputSplit split, JobConf job, + Reporter reporter) throws IOException { return new TableSnapshotRecordReader((TableSnapshotRegionSplit) split, job); } /** * Configures the job to use TableSnapshotInputFormat to read from a snapshot. - * @param job the job to configure + * @param job the job to configure * @param snapshotName the name of the snapshot to read from - * @param restoreDir a temporary directory to restore the snapshot into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restoreDir can be deleted. + * @param restoreDir a temporary directory to restore the snapshot into. Current user should + * have write permissions to this directory, and this should not be a + * subdirectory of rootdir. After the job is finished, restoreDir can be + * deleted. * @throws IOException if an error occurs */ public static void setInput(JobConf job, String snapshotName, Path restoreDir) - throws IOException { + throws IOException { TableSnapshotInputFormatImpl.setInput(job, snapshotName, restoreDir); } /** * Configures the job to use TableSnapshotInputFormat to read from a snapshot. - * @param job the job to configure - * @param snapshotName the name of the snapshot to read from - * @param restoreDir a temporary directory to restore the snapshot into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restoreDir can be deleted. - * @param splitAlgo split algorithm to generate splits from region + * @param job the job to configure + * @param snapshotName the name of the snapshot to read from + * @param restoreDir a temporary directory to restore the snapshot into. Current user + * should have write permissions to this directory, and this should not + * be a subdirectory of rootdir. After the job is finished, restoreDir + * can be deleted. + * @param splitAlgo split algorithm to generate splits from region * @param numSplitsPerRegion how many input splits to generate per one region * @throws IOException if an error occurs */ public static void setInput(JobConf job, String snapshotName, Path restoreDir, - RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { - TableSnapshotInputFormatImpl.setInput(job, snapshotName, restoreDir, splitAlgo, numSplitsPerRegion); + RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { + TableSnapshotInputFormatImpl.setInput(job, snapshotName, restoreDir, splitAlgo, + numSplitsPerRegion); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java index d6e663730a7..8cc03b22254 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,12 +21,11 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.util.Arrays; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapred.InputSplit; +import org.apache.yetus.audience.InterfaceAudience; /** * A table split corresponds to a key range [low, high) @@ -35,35 +33,27 @@ import org.apache.hadoop.mapred.InputSplit; @InterfaceAudience.Public public class TableSplit implements InputSplit, Comparable { private TableName m_tableName; - private byte [] m_startRow; - private byte [] m_endRow; + private byte[] m_startRow; + private byte[] m_endRow; private String m_regionLocation; /** default constructor */ public TableSplit() { - this((TableName)null, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, ""); + this((TableName) null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, ""); } /** - * Constructor - * @param tableName - * @param startRow - * @param endRow - * @param location + * Constructor nnnn */ - public TableSplit(TableName tableName, byte [] startRow, byte [] endRow, - final String location) { + public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, final String location) { this.m_tableName = tableName; this.m_startRow = startRow; this.m_endRow = endRow; this.m_regionLocation = location; } - public TableSplit(byte [] tableName, byte [] startRow, byte [] endRow, - final String location) { - this(TableName.valueOf(tableName), startRow, endRow, - location); + public TableSplit(byte[] tableName, byte[] startRow, byte[] endRow, final String location) { + this(TableName.valueOf(tableName), startRow, endRow, location); } /** @return table name */ @@ -72,17 +62,17 @@ public class TableSplit implements InputSplit, Comparable { } /** @return table name */ - public byte [] getTableName() { - return this.m_tableName.getName(); - } + public byte[] getTableName() { + return this.m_tableName.getName(); + } /** @return starting row key */ - public byte [] getStartRow() { + public byte[] getStartRow() { return this.m_startRow; } /** @return end row key */ - public byte [] getEndRow() { + public byte[] getEndRow() { return this.m_endRow; } @@ -92,7 +82,7 @@ public class TableSplit implements InputSplit, Comparable { } public String[] getLocations() { - return new String[] {this.m_regionLocation}; + return new String[] { this.m_regionLocation }; } public long getLength() { @@ -116,14 +106,14 @@ public class TableSplit implements InputSplit, Comparable { @Override public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("HBase table split("); - sb.append("table name: ").append(m_tableName); - sb.append(", start row: ").append(Bytes.toStringBinary(m_startRow)); - sb.append(", end row: ").append(Bytes.toStringBinary(m_endRow)); - sb.append(", region location: ").append(m_regionLocation); - sb.append(")"); - return sb.toString(); + StringBuilder sb = new StringBuilder(); + sb.append("HBase table split("); + sb.append("table name: ").append(m_tableName); + sb.append(", start row: ").append(Bytes.toStringBinary(m_startRow)); + sb.append(", end row: ").append(Bytes.toStringBinary(m_endRow)); + sb.append(", region location: ").append(m_regionLocation); + sb.append(")"); + return sb.toString(); } @Override @@ -136,11 +126,9 @@ public class TableSplit implements InputSplit, Comparable { if (o == null || !(o instanceof TableSplit)) { return false; } - TableSplit other = (TableSplit)o; - return m_tableName.equals(other.m_tableName) && - Bytes.equals(m_startRow, other.m_startRow) && - Bytes.equals(m_endRow, other.m_endRow) && - m_regionLocation.equals(other.m_regionLocation); + TableSplit other = (TableSplit) o; + return m_tableName.equals(other.m_tableName) && Bytes.equals(m_startRow, other.m_startRow) + && Bytes.equals(m_endRow, other.m_endRow) && m_regionLocation.equals(other.m_regionLocation); } @Override diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java index ac7393a27f2..86b253f3a78 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,18 +18,14 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - -import org.apache.hadoop.hbase.CompareOperator; -import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; @@ -47,12 +42,16 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * A job with a a map and reduce phase to count cells in a table. - * The counter lists the following stats for a given table: + * A job with a a map and reduce phase to count cells in a table. The counter lists the following + * stats for a given table: + * *

        * 1. Total number of rows in the table
        * 2. Total number of CFs across all rows
      @@ -65,17 +64,14 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
        * 9. Total size of serialized cells across all rows.
        * 
      * - * The cellcounter can take optional parameters to use a user - * supplied row/family/qualifier string to use in the report and - * second a regex based or prefix based row filter to restrict the - * count operation to a limited subset of rows from the table or a - * start time and/or end time to limit the count to a time range. + * The cellcounter can take optional parameters to use a user supplied row/family/qualifier string + * to use in the report and second a regex based or prefix based row filter to restrict the count + * operation to a limited subset of rows from the table or a start time and/or end time to limit the + * count to a time range. */ @InterfaceAudience.Public public class CellCounter extends Configured implements Tool { - private static final Logger LOG = - LoggerFactory.getLogger(CellCounter.class.getName()); - + private static final Logger LOG = LoggerFactory.getLogger(CellCounter.class.getName()); /** * Name of this 'program'. @@ -87,8 +83,7 @@ public class CellCounter extends Configured implements Tool { /** * Mapper that runs the count. */ - static class CellCounterMapper - extends TableMapper { + static class CellCounterMapper extends TableMapper { /** * Counter enumeration to count the actual rows. */ @@ -117,12 +112,11 @@ public class CellCounter extends Configured implements Tool { @Override protected void setup(Context context) throws IOException, InterruptedException { conf = context.getConfiguration(); - separator = conf.get("ReportSeparator",":"); + separator = conf.get("ReportSeparator", ":"); } /** * Maps the data. - * * @param row The current table row key. * @param values The columns. * @param context The current context. @@ -130,13 +124,10 @@ public class CellCounter extends Configured implements Tool { */ @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", - justification="Findbugs is blind to the Precondition null check") - public void map(ImmutableBytesWritable row, Result values, - Context context) - throws IOException { - Preconditions.checkState(values != null, - "values passed to the map is null"); + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", + justification = "Findbugs is blind to the Precondition null check") + public void map(ImmutableBytesWritable row, Result values, Context context) throws IOException { + Preconditions.checkState(values != null, "values passed to the map is null"); try { byte[] currentRow = values.getRow(); @@ -167,14 +158,13 @@ public class CellCounter extends Configured implements Tool { context.getCounter("CF", currentFamilyName + "_Size").increment(size); context.write(new Text(currentFamilyName + "_Size"), new LongWritable(size)); } - if (currentQualifier == null || !CellUtil.matchingQualifier(value, currentQualifier)){ + if (currentQualifier == null || !CellUtil.matchingQualifier(value, currentQualifier)) { currentQualifier = CellUtil.cloneQualifier(value); - currentQualifierName = currentFamilyName + separator + - Bytes.toStringBinary(currentQualifier); + currentQualifierName = + currentFamilyName + separator + Bytes.toStringBinary(currentQualifier); currentRowQualifierName = currentRowKey + separator + currentQualifierName; - context.write(new Text("Total Qualifiers across all Rows"), - new LongWritable(1)); + context.write(new Text("Total Qualifiers across all Rows"), new LongWritable(1)); context.write(new Text(currentQualifierName), new LongWritable(1)); context.getCounter("Q", currentQualifierName + "_Size").increment(size); context.write(new Text(currentQualifierName + "_Size"), new LongWritable(size)); @@ -195,7 +185,7 @@ public class CellCounter extends Configured implements Tool { private LongWritable result = new LongWritable(); public void reduce(Key key, Iterable values, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { long sum = 0; for (LongWritable val : values) { sum += val.get(); @@ -208,23 +198,21 @@ public class CellCounter extends Configured implements Tool { /** * Sets up the actual job. - * * @param conf The current configuration. * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ - public static Job createSubmittableJob(Configuration conf, String[] args) - throws IOException { + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { String tableName = args[0]; Path outputDir = new Path(args[1]); - String reportSeparatorString = (args.length > 2) ? args[2]: ":"; + String reportSeparatorString = (args.length > 2) ? args[2] : ":"; conf.set("ReportSeparator", reportSeparatorString); Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName)); job.setJarByClass(CellCounter.class); Scan scan = getConfiguredScanForJob(conf, args); - TableMapReduceUtil.initTableMapperJob(tableName, scan, - CellCounterMapper.class, ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapperJob(tableName, scan, CellCounterMapper.class, + ImmutableBytesWritable.class, Result.class, job); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(LongWritable.class); job.setOutputFormatClass(TextOutputFormat.class); @@ -237,7 +225,7 @@ public class CellCounter extends Configured implements Tool { } private static Scan getConfiguredScanForJob(Configuration conf, String[] args) - throws IOException { + throws IOException { // create scan with any properties set from TableInputFormat Scan s = TableInputFormat.createScanFromConfiguration(conf); // Set Scan Versions @@ -248,7 +236,7 @@ public class CellCounter extends Configured implements Tool { s.setCacheBlocks(false); // Set RowFilter or Prefix Filter if applicable. Filter rowFilter = getRowFilter(args); - if (rowFilter!= null) { + if (rowFilter != null) { LOG.info("Setting Row Filter for counter."); s.setFilter(rowFilter); } @@ -261,10 +249,9 @@ public class CellCounter extends Configured implements Tool { return s; } - private static Filter getRowFilter(String[] args) { Filter rowFilter = null; - String filterCriteria = (args.length > 3) ? args[3]: null; + String filterCriteria = (args.length > 3) ? args[3] : null; if (filterCriteria == null) return null; if (filterCriteria.startsWith("^")) { String regexPattern = filterCriteria.substring(1, filterCriteria.length()); @@ -291,11 +278,10 @@ public class CellCounter extends Configured implements Tool { } } - if (startTime == 0 && endTime == 0) - return null; + if (startTime == 0 && endTime == 0) return null; endTime = endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime; - return new long [] {startTime, endTime}; + return new long[] { startTime, endTime }; } @Override @@ -311,15 +297,14 @@ public class CellCounter extends Configured implements Tool { private void printUsage(int parameterCount) { System.err.println("ERROR: Wrong number of parameters: " + parameterCount); System.err.println("Usage: hbase cellcounter [reportSeparator] " - + "[^[regex pattern] or [Prefix]] [--starttime= --endtime=]"); + + "[^[regex pattern] or [Prefix]] [--starttime= --endtime=]"); System.err.println(" Note: -D properties will be applied to the conf used."); System.err.println(" Additionally, all of the SCAN properties from TableInputFormat can be " - + "specified to get fine grained control on what is counted."); + + "specified to get fine grained control on what is counted."); System.err.println(" -D" + TableInputFormat.SCAN_ROW_START + "="); System.err.println(" -D" + TableInputFormat.SCAN_ROW_STOP + "="); System.err.println(" -D" + TableInputFormat.SCAN_COLUMNS + "=\" ...\""); - System.err.println(" -D" + TableInputFormat.SCAN_COLUMN_FAMILY - + "=,, ..."); + System.err.println(" -D" + TableInputFormat.SCAN_COLUMN_FAMILY + "=,, ..."); System.err.println(" -D" + TableInputFormat.SCAN_TIMESTAMP + "="); System.err.println(" -D" + TableInputFormat.SCAN_TIMERANGE_START + "="); System.err.println(" -D" + TableInputFormat.SCAN_TIMERANGE_END + "="); @@ -327,10 +312,10 @@ public class CellCounter extends Configured implements Tool { System.err.println(" -D" + TableInputFormat.SCAN_CACHEDROWS + "="); System.err.println(" -D" + TableInputFormat.SCAN_BATCHSIZE + "="); System.err.println(" parameter can be used to override the default report " - + "separator string : used to separate the rowId/column family name and qualifier name."); + + "separator string : used to separate the rowId/column family name and qualifier name."); System.err.println(" [^[regex pattern] or [Prefix] parameter can be used to limit the cell " - + "counter count operation to a limited subset of rows from the table based on regex or " - + "prefix pattern."); + + "counter count operation to a limited subset of rows from the table based on regex or " + + "prefix pattern."); } /** diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java index 6c69651d0a4..bc2f8040db1 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,13 +19,12 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.yetus.audience.InterfaceAudience; /** * Facade to create Cells for HFileOutputFormat. The created Cells are of Put type. @@ -34,97 +33,91 @@ import org.apache.hadoop.util.ReflectionUtils; public class CellCreator { public static final String VISIBILITY_EXP_RESOLVER_CLASS = - "hbase.mapreduce.visibility.expression.resolver.class"; + "hbase.mapreduce.visibility.expression.resolver.class"; private VisibilityExpressionResolver visExpResolver; public CellCreator(Configuration conf) { - Class clazz = conf.getClass( - VISIBILITY_EXP_RESOLVER_CLASS, DefaultVisibilityExpressionResolver.class, + Class clazz = + conf.getClass(VISIBILITY_EXP_RESOLVER_CLASS, DefaultVisibilityExpressionResolver.class, VisibilityExpressionResolver.class); this.visExpResolver = ReflectionUtils.newInstance(clazz, conf); this.visExpResolver.init(); } /** - * @param row row key - * @param roffset row offset - * @param rlength row length - * @param family family name - * @param foffset family offset - * @param flength family length + * @param row row key + * @param roffset row offset + * @param rlength row length + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length + * @param qoffset qualifier offset + * @param qlength qualifier length * @param timestamp version timestamp - * @param value column value - * @param voffset value offset - * @param vlength value length - * @return created Cell - * @throws IOException + * @param value column value + * @param voffset value offset + * @param vlength value length + * @return created Cell n */ public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength, - byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, - int vlength) throws IOException { + byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, + int vlength) throws IOException { return create(row, roffset, rlength, family, foffset, flength, qualifier, qoffset, qlength, - timestamp, value, voffset, vlength, (List)null); + timestamp, value, voffset, vlength, (List) null); } /** - * @param row row key - * @param roffset row offset - * @param rlength row length - * @param family family name - * @param foffset family offset - * @param flength family length - * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * @param timestamp version timestamp - * @param value column value - * @param voffset value offset - * @param vlength value length + * @param row row key + * @param roffset row offset + * @param rlength row length + * @param family family name + * @param foffset family offset + * @param flength family length + * @param qualifier column qualifier + * @param qoffset qualifier offset + * @param qlength qualifier length + * @param timestamp version timestamp + * @param value column value + * @param voffset value offset + * @param vlength value length * @param visExpression visibility expression to be associated with cell - * @return created Cell - * @throws IOException - * @deprecated since 0.98.9 + * @return created Cell n * @deprecated since 0.98.9 * @see HBASE-10560 */ @Deprecated public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength, - byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, - int vlength, String visExpression) throws IOException { + byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, + int vlength, String visExpression) throws IOException { List visTags = null; if (visExpression != null) { visTags = this.visExpResolver.createVisibilityExpTags(visExpression); } return new KeyValue(row, roffset, rlength, family, foffset, flength, qualifier, qoffset, - qlength, timestamp, KeyValue.Type.Put, value, voffset, vlength, visTags); + qlength, timestamp, KeyValue.Type.Put, value, voffset, vlength, visTags); } /** - * @param row row key - * @param roffset row offset - * @param rlength row length - * @param family family name - * @param foffset family offset - * @param flength family length + * @param row row key + * @param roffset row offset + * @param rlength row length + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length + * @param qoffset qualifier offset + * @param qlength qualifier length * @param timestamp version timestamp - * @param value column value - * @param voffset value offset - * @param vlength value length - * @param tags - * @return created Cell - * @throws IOException + * @param value column value + * @param voffset value offset + * @param vlength value length n * @return created Cell n */ public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength, - byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, - int vlength, List tags) throws IOException { + byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, + int vlength, List tags) throws IOException { return new KeyValue(row, roffset, rlength, family, foffset, flength, qualifier, qoffset, - qlength, timestamp, KeyValue.Type.Put, value, voffset, vlength, tags); + qlength, timestamp, KeyValue.Type.Put, value, voffset, vlength, tags); } /** diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java index 6dac6f7dd59..9b8b5ed68e9 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,16 +22,15 @@ import java.io.DataOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; +import org.apache.yetus.audience.InterfaceAudience; /** * Use to specify the type of serialization for the mappers and reducers @@ -63,7 +62,7 @@ public class CellSerialization implements Serialization { @Override public KeyValue deserialize(Cell ignore) throws IOException { - // I can't overwrite the passed in KV, not from a proto kv, not just yet. TODO + // I can't overwrite the passed in KV, not from a proto kv, not just yet. TODO return KeyValueUtil.create(this.dis); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java index de961cf3545..9380b0e7133 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; import java.util.TreeSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -30,18 +28,16 @@ import org.apache.hadoop.mapreduce.Reducer; import org.apache.yetus.audience.InterfaceAudience; /** - * Emits sorted Cells. - * Reads in all Cells from passed Iterator, sorts them, then emits - * Cells in sorted order. If lots of columns per row, it will use lots of - * memory sorting. + * Emits sorted Cells. Reads in all Cells from passed Iterator, sorts them, then emits Cells in + * sorted order. If lots of columns per row, it will use lots of memory sorting. * @see HFileOutputFormat2 */ @InterfaceAudience.Public public class CellSortReducer - extends Reducer { + extends Reducer { protected void reduce(ImmutableBytesWritable row, Iterable kvs, - Reducer.Context context) - throws java.io.IOException, InterruptedException { + Reducer.Context context) + throws java.io.IOException, InterruptedException { TreeSet map = new TreeSet<>(CellComparator.getInstance()); for (Cell kv : kvs) { try { @@ -52,7 +48,7 @@ public class CellSortReducer } context.setStatus("Read " + map.getClass()); int index = 0; - for (Cell kv: map) { + for (Cell kv : map) { context.write(row, new MapReduceExtendedCell(kv)); if (++index % 100 == 0) context.setStatus("Wrote " + index); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java index bde3519d37c..5453b8a333a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,9 +44,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Tool used to copy a table to another one which can be on a different setup. - * It is also configurable with a start and time as well as a specification - * of the region server implementation if different from the local cluster. + * Tool used to copy a table to another one which can be on a different setup. It is also + * configurable with a start and time as well as a specification of the region server implementation + * if different from the local cluster. */ @InterfaceAudience.Public public class CopyTable extends Configured implements Tool { @@ -101,8 +100,7 @@ public class CopyTable extends Configured implements Tool { /** * Sets up the actual job. - * - * @param args The command line parameters. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ @@ -145,20 +143,20 @@ public class CopyTable extends Configured implements Tool { scan.withStopRow(Bytes.toBytesBinary(stopRow)); } - if(families != null) { + if (families != null) { String[] fams = families.split(","); - Map cfRenameMap = new HashMap<>(); - for(String fam : fams) { + Map cfRenameMap = new HashMap<>(); + for (String fam : fams) { String sourceCf; - if(fam.contains(":")) { - // fam looks like "sourceCfName:destCfName" - String[] srcAndDest = fam.split(":", 2); - sourceCf = srcAndDest[0]; - String destCf = srcAndDest[1]; - cfRenameMap.put(sourceCf, destCf); + if (fam.contains(":")) { + // fam looks like "sourceCfName:destCfName" + String[] srcAndDest = fam.split(":", 2); + sourceCf = srcAndDest[0]; + String destCf = srcAndDest[1]; + cfRenameMap.put(sourceCf, destCf); } else { - // fam is just "sourceCf" - sourceCf = fam; + // fam is just "sourceCf" + sourceCf = fam; } scan.addFamily(Bytes.toBytes(sourceCf)); } @@ -176,7 +174,7 @@ public class CopyTable extends Configured implements Tool { LOG.info("HFiles will be stored at " + this.bulkloadDir); HFileOutputFormat2.setOutputPath(job, bulkloadDir); try (Connection conn = ConnectionFactory.createConnection(getConf()); - Admin admin = conn.getAdmin()) { + Admin admin = conn.getAdmin()) { HFileOutputFormat2.configureIncrementalLoadMap(job, admin.getDescriptor((TableName.valueOf(dstTableName)))); } @@ -190,14 +188,14 @@ public class CopyTable extends Configured implements Tool { } /* - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ private static void printUsage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } - System.err.println("Usage: CopyTable [general options] [--starttime=X] [--endtime=Y] " + - "[--new.name=NEW] [--peer.adr=ADR] "); + System.err.println("Usage: CopyTable [general options] [--starttime=X] [--endtime=Y] " + + "[--new.name=NEW] [--peer.adr=ADR] "); System.err.println(); System.err.println("Options:"); System.err.println(" rs.class hbase.regionserver.class of the peer cluster"); @@ -212,36 +210,36 @@ public class CopyTable extends Configured implements Tool { System.err.println(" new.name new table's name"); System.err.println(" peer.adr Address of the peer cluster given in the format"); System.err.println(" hbase.zookeeper.quorum:hbase.zookeeper.client" - + ".port:zookeeper.znode.parent"); + + ".port:zookeeper.znode.parent"); System.err.println(" families comma-separated list of families to copy"); System.err.println(" To copy from cf1 to cf2, give sourceCfName:destCfName. "); System.err.println(" To keep the same name, just give \"cfName\""); System.err.println(" all.cells also copy delete markers and deleted cells"); - System.err.println(" bulkload Write input into HFiles and bulk load to the destination " - + "table"); + System.err + .println(" bulkload Write input into HFiles and bulk load to the destination " + "table"); System.err.println(" snapshot Copy the data from snapshot to destination table."); System.err.println(); System.err.println("Args:"); System.err.println(" tablename Name of the table to copy"); System.err.println(); System.err.println("Examples:"); - System.err.println(" To copy 'TestTable' to a cluster that uses replication for a 1 hour window:"); - System.err.println(" $ hbase " + - "org.apache.hadoop.hbase.mapreduce.CopyTable --starttime=1265875194289 --endtime=1265878794289 " + - "--peer.adr=server1,server2,server3:2181:/hbase --families=myOldCf:myNewCf,cf2,cf3 TestTable "); + System.err + .println(" To copy 'TestTable' to a cluster that uses replication for a 1 hour window:"); + System.err.println(" $ hbase " + + "org.apache.hadoop.hbase.mapreduce.CopyTable --starttime=1265875194289 --endtime=1265878794289 " + + "--peer.adr=server1,server2,server3:2181:/hbase --families=myOldCf:myNewCf,cf2,cf3 TestTable "); System.err.println(" To copy data from 'sourceTableSnapshot' to 'destTable': "); System.err.println(" $ hbase org.apache.hadoop.hbase.mapreduce.CopyTable " - + "--snapshot --new.name=destTable sourceTableSnapshot"); + + "--snapshot --new.name=destTable sourceTableSnapshot"); System.err.println(" To copy data from 'sourceTableSnapshot' and bulk load to 'destTable': "); System.err.println(" $ hbase org.apache.hadoop.hbase.mapreduce.CopyTable " - + "--new.name=destTable --snapshot --bulkload sourceTableSnapshot"); + + "--new.name=destTable --snapshot --bulkload sourceTableSnapshot"); System.err.println("For performance consider the following general option:\n" - + " It is recommended that you set the following to >=100. A higher value uses more memory but\n" - + " decreases the round trip time to the server and may increase performance.\n" - + " -Dhbase.client.scanner.caching=100\n" - + " The following should always be set to false, to prevent writing data twice, which may produce \n" - + " inaccurate results.\n" - + " -Dmapreduce.map.speculative=false"); + + " It is recommended that you set the following to >=100. A higher value uses more memory but\n" + + " decreases the round trip time to the server and may increase performance.\n" + + " -Dhbase.client.scanner.caching=100\n" + + " The following should always be set to false, to prevent writing data twice, which may produce \n" + + " inaccurate results.\n" + " -Dmapreduce.map.speculative=false"); } private boolean doCommandLine(final String[] args) { @@ -332,7 +330,7 @@ public class CopyTable extends Configured implements Tool { continue; } - if(cmd.startsWith("--snapshot")){ + if (cmd.startsWith("--snapshot")) { readingSnapshot = true; continue; } @@ -369,7 +367,7 @@ public class CopyTable extends Configured implements Tool { if (readingSnapshot && dstTableName == null) { printUsage("The --new.name= for destination table should be " - + "provided when copying data from snapshot ."); + + "provided when copying data from snapshot ."); return false; } @@ -392,8 +390,7 @@ public class CopyTable extends Configured implements Tool { /** * Main entry point. - * - * @param args The command line parameters. + * @param args The command line parameters. * @throws Exception When running the job fails. */ public static void main(String[] args) throws Exception { @@ -418,7 +415,7 @@ public class CopyTable extends Configured implements Tool { LOG.info("command: ./bin/hbase org.apache.hadoop.hbase.tool.LoadIncrementalHFiles {} {}", this.bulkloadDir.toString(), this.dstTableName); code = new LoadIncrementalHFiles(this.getConf()) - .run(new String[] { this.bulkloadDir.toString(), this.dstTableName }); + .run(new String[] { this.bulkloadDir.toString(), this.dstTableName }); if (code == 0) { // bulkloadDir is deleted only LoadIncrementalHFiles was successful so that one can rerun // LoadIncrementalHFiles. diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java index 620c02eefa7..2b059520587 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,13 +25,9 @@ import java.io.IOException; import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.Tag; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; @@ -43,6 +39,9 @@ import org.apache.hadoop.hbase.security.visibility.VisibilityConstants; import org.apache.hadoop.hbase.security.visibility.VisibilityLabelOrdinalProvider; import org.apache.hadoop.hbase.security.visibility.VisibilityUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This implementation creates tags by expanding expression using label ordinal. Labels will be @@ -51,7 +50,7 @@ import org.apache.hadoop.hbase.util.Bytes; @InterfaceAudience.Private public class DefaultVisibilityExpressionResolver implements VisibilityExpressionResolver { private static final Logger LOG = - LoggerFactory.getLogger(DefaultVisibilityExpressionResolver.class); + LoggerFactory.getLogger(DefaultVisibilityExpressionResolver.class); private Configuration conf; private final Map labels = new HashMap<>(); @@ -140,7 +139,7 @@ public class DefaultVisibilityExpressionResolver implements VisibilityExpression public String getLabel(int ordinal) { // Unused throw new UnsupportedOperationException( - "getLabel should not be used in VisibilityExpressionResolver"); + "getLabel should not be used in VisibilityExpressionResolver"); } }; return VisibilityUtils.createVisibilityExpTags(visExpression, true, false, null, provider); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java index ed31c8422e7..86a6a670556 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,39 +27,36 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Driver for hbase mapreduce jobs. Select which to run by passing - * name of job to this main. + * Driver for hbase mapreduce jobs. Select which to run by passing name of job to this main. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Stable public class Driver { - private Driver() {} + private Driver() { + } public static void main(String[] args) throws Throwable { ProgramDriver pgd = new ProgramDriver(); - pgd.addClass(RowCounter.NAME, RowCounter.class, - "Count rows in HBase table."); - pgd.addClass(CellCounter.NAME, CellCounter.class, - "Count cells in HBase table."); + pgd.addClass(RowCounter.NAME, RowCounter.class, "Count rows in HBase table."); + pgd.addClass(CellCounter.NAME, CellCounter.class, "Count cells in HBase table."); pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS."); pgd.addClass(Import.NAME, Import.class, "Import data written by Export."); pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format."); - pgd.addClass(BulkLoadHFilesTool.NAME, BulkLoadHFilesTool.class, - "Complete a bulk data load."); + pgd.addClass(BulkLoadHFilesTool.NAME, BulkLoadHFilesTool.class, "Complete a bulk data load."); pgd.addClass(CopyTable.NAME, CopyTable.class, - "Export a table from local cluster to peer cluster."); - pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" + - " data from tables in two different clusters. It" + - " doesn't work for incrementColumnValues'd cells since" + - " timestamp is changed after appending to WAL."); + "Export a table from local cluster to peer cluster."); + pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, + "Compare" + " data from tables in two different clusters. It" + + " doesn't work for incrementColumnValues'd cells since" + + " timestamp is changed after appending to WAL."); pgd.addClass(WALPlayer.NAME, WALPlayer.class, "Replay WAL files."); - pgd.addClass(ExportSnapshot.NAME, ExportSnapshot.class, "Export" + - " the specific snapshot to a given FileSystem."); - pgd.addClass(MobRefReporter.NAME, MobRefReporter.class, "Check the mob cells in a particular " + - "table and cf and confirm that the files they point to are correct."); + pgd.addClass(ExportSnapshot.NAME, ExportSnapshot.class, + "Export" + " the specific snapshot to a given FileSystem."); + pgd.addClass(MobRefReporter.NAME, MobRefReporter.class, "Check the mob cells in a particular " + + "table and cf and confirm that the files they point to are correct."); - ProgramDriver.class.getMethod("driver", new Class [] {String[].class}). - invoke(pgd, new Object[]{args}); + ProgramDriver.class.getMethod("driver", new Class[] { String[].class }).invoke(pgd, + new Object[] { args }); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java index eb0f649e643..17e305cbb3a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java @@ -1,33 +1,31 @@ -/** -* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Triple; import org.apache.hadoop.mapreduce.Job; @@ -38,8 +36,7 @@ import org.apache.hadoop.util.ToolRunner; import org.apache.yetus.audience.InterfaceAudience; /** - * Export an HBase table. - * Writes content to sequence files up in HDFS. Use {@link Import} to read it + * Export an HBase table. Writes content to sequence files up in HDFS. Use {@link Import} to read it * back in again. */ @InterfaceAudience.Public @@ -49,14 +46,12 @@ public class Export extends Configured implements Tool { /** * Sets up the actual job. - * - * @param conf The current configuration. - * @param args The command line parameters. + * @param conf The current configuration. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ - public static Job createSubmittableJob(Configuration conf, String[] args) - throws IOException { + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { Triple arguments = ExportUtils.getArgumentsFromCommandLine(conf, args); String tableName = arguments.getFirst().getNameAsString(); Path outputDir = arguments.getThird(); @@ -66,12 +61,13 @@ public class Export extends Configured implements Tool { // Set optional scan parameters Scan s = arguments.getSecond(); IdentityTableMapper.initJob(tableName, s, IdentityTableMapper.class, job); - // No reducers. Just write straight to output files. + // No reducers. Just write straight to output files. job.setNumReduceTasks(0); job.setOutputFormatClass(SequenceFileOutputFormat.class); job.setOutputKeyClass(ImmutableBytesWritable.class); job.setOutputValueClass(Result.class); - FileOutputFormat.setOutputPath(job, outputDir); // job conf doesn't contain the conf so doesn't have a default fs. + FileOutputFormat.setOutputPath(job, outputDir); // job conf doesn't contain the conf so doesn't + // have a default fs. return job; } @@ -80,7 +76,7 @@ public class Export extends Configured implements Tool { if (!ExportUtils.isValidArguements(args)) { ExportUtils.usage("Wrong number of arguments: " + ArrayUtils.getLength(args)); System.err.println(" -D " + JOB_NAME_CONF_KEY - + "=jobName - use the specified mapreduce job name for the export"); + + "=jobName - use the specified mapreduce job name for the export"); System.err.println("For MR performance consider the following properties:"); System.err.println(" -D mapreduce.map.speculative=false"); System.err.println(" -D mapreduce.reduce.speculative=false"); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java index 75feab5e4bb..f780c18ac06 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; import java.util.Arrays; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CompareOperator; @@ -42,8 +40,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Some helper methods are used by {@link org.apache.hadoop.hbase.mapreduce.Export} - * and org.apache.hadoop.hbase.coprocessor.Export (in hbase-endpooint). + * Some helper methods are used by {@link org.apache.hadoop.hbase.mapreduce.Export} and + * org.apache.hadoop.hbase.coprocessor.Export (in hbase-endpooint). */ @InterfaceAudience.Private public final class ExportUtils { @@ -52,37 +50,39 @@ public final class ExportUtils { public static final String EXPORT_BATCHING = "hbase.export.scanner.batch"; public static final String EXPORT_CACHING = "hbase.export.scanner.caching"; public static final String EXPORT_VISIBILITY_LABELS = "hbase.export.visibility.labels"; + /** * Common usage for other export tools. - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ public static void usage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } - System.err.println("Usage: Export [-D ]* [ " + - "[ []] [^[regex pattern] or [Prefix] to filter]]\n"); + System.err.println("Usage: Export [-D ]* [ " + + "[ []] [^[regex pattern] or [Prefix] to filter]]\n"); System.err.println(" Note: -D properties will be applied to the conf used. "); System.err.println(" For example: "); System.err.println(" -D " + FileOutputFormat.COMPRESS + "=true"); - System.err.println(" -D " + FileOutputFormat.COMPRESS_CODEC + "=org.apache.hadoop.io.compress.GzipCodec"); + System.err.println( + " -D " + FileOutputFormat.COMPRESS_CODEC + "=org.apache.hadoop.io.compress.GzipCodec"); System.err.println(" -D " + FileOutputFormat.COMPRESS_TYPE + "=BLOCK"); System.err.println(" Additionally, the following SCAN properties can be specified"); System.err.println(" to control/limit what is exported.."); - System.err.println(" -D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=,, ..."); + System.err + .println(" -D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=,, ..."); System.err.println(" -D " + RAW_SCAN + "=true"); System.err.println(" -D " + TableInputFormat.SCAN_ROW_START + "="); System.err.println(" -D " + TableInputFormat.SCAN_ROW_STOP + "="); System.err.println(" -D " + HConstants.HBASE_CLIENT_SCANNER_CACHING + "=100"); System.err.println(" -D " + EXPORT_VISIBILITY_LABELS + "="); System.err.println("For tables with very wide rows consider setting the batch size as below:\n" - + " -D " + EXPORT_BATCHING + "=10\n" - + " -D " + EXPORT_CACHING + "=100"); + + " -D " + EXPORT_BATCHING + "=10\n" + " -D " + EXPORT_CACHING + "=100"); } private static Filter getExportFilter(String[] args) { Filter exportFilter; - String filterCriteria = (args.length > 5) ? args[5]: null; + String filterCriteria = (args.length > 5) ? args[5] : null; if (filterCriteria == null) return null; if (filterCriteria.startsWith("^")) { String regexPattern = filterCriteria.substring(1, filterCriteria.length()); @@ -97,23 +97,24 @@ public final class ExportUtils { return args != null && args.length >= 2; } - public static Triple getArgumentsFromCommandLine( - Configuration conf, String[] args) throws IOException { + public static Triple getArgumentsFromCommandLine(Configuration conf, + String[] args) throws IOException { if (!isValidArguements(args)) { return null; } - return new Triple<>(TableName.valueOf(args[0]), getScanFromCommandLine(conf, args), new Path(args[1])); + return new Triple<>(TableName.valueOf(args[0]), getScanFromCommandLine(conf, args), + new Path(args[1])); } static Scan getScanFromCommandLine(Configuration conf, String[] args) throws IOException { Scan s = new Scan(); // Optional arguments. // Set Scan Versions - int versions = args.length > 2? Integer.parseInt(args[2]): 1; + int versions = args.length > 2 ? Integer.parseInt(args[2]) : 1; s.setMaxVersions(versions); // Set Scan Range - long startTime = args.length > 3? Long.parseLong(args[3]): 0L; - long endTime = args.length > 4? Long.parseLong(args[4]): Long.MAX_VALUE; + long startTime = args.length > 3 ? Long.parseLong(args[3]) : 0L; + long endTime = args.length > 4 ? Long.parseLong(args[4]) : Long.MAX_VALUE; s.setTimeRange(startTime, endTime); // Set cache blocks s.setCacheBlocks(false); @@ -134,8 +135,8 @@ public final class ExportUtils { } // Set RowFilter or Prefix Filter if applicable. Filter exportFilter = getExportFilter(args); - if (exportFilter!= null) { - LOG.info("Setting Scan Filter for Export."); + if (exportFilter != null) { + LOG.info("Setting Scan Filter for Export."); s.setFilter(exportFilter); } List labels = null; @@ -163,9 +164,8 @@ public final class ExportUtils { LOG.error("Caching could not be set", e); } } - LOG.info("versions=" + versions + ", starttime=" + startTime - + ", endtime=" + endTime + ", keepDeletedCells=" + raw - + ", visibility labels=" + labels); + LOG.info("versions=" + versions + ", starttime=" + startTime + ", endtime=" + endTime + + ", keepDeletedCells=" + raw + ", visibility labels=" + labels); return s; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java index 1909b2d57b3..36fb493033c 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,8 +19,6 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; import java.util.ArrayList; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; @@ -31,74 +28,68 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Job; +import org.apache.yetus.audience.InterfaceAudience; /** * Extract grouping columns from input record. */ @InterfaceAudience.Public -public class GroupingTableMapper -extends TableMapper implements Configurable { +public class GroupingTableMapper extends TableMapper + implements Configurable { /** - * JobConf parameter to specify the columns used to produce the key passed to - * collect from the map phase. + * JobConf parameter to specify the columns used to produce the key passed to collect from the map + * phase. */ - public static final String GROUP_COLUMNS = - "hbase.mapred.groupingtablemap.columns"; + public static final String GROUP_COLUMNS = "hbase.mapred.groupingtablemap.columns"; /** The grouping columns. */ - protected byte [][] columns; + protected byte[][] columns; /** The current configuration. */ private Configuration conf = null; /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table to be processed. - * @param scan The scan with the columns etc. - * @param groupColumns A space separated list of columns used to form the - * key used in collect. - * @param mapper The mapper class. - * @param job The current job. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table to be processed. + * @param scan The scan with the columns etc. + * @param groupColumns A space separated list of columns used to form the key used in collect. + * @param mapper The mapper class. + * @param job The current job. * @throws IOException When setting up the job fails. */ @SuppressWarnings("unchecked") public static void initJob(String table, Scan scan, String groupColumns, Class mapper, Job job) throws IOException { - TableMapReduceUtil.initTableMapperJob(table, scan, mapper, - ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapperJob(table, scan, mapper, ImmutableBytesWritable.class, + Result.class, job); job.getConfiguration().set(GROUP_COLUMNS, groupColumns); } /** - * Extract the grouping columns from value to construct a new key. Pass the - * new key and value to reduce. If any of the grouping columns are not found - * in the value, the record is skipped. - * - * @param key The current key. - * @param value The current value. - * @param context The current context. - * @throws IOException When writing the record fails. + * Extract the grouping columns from value to construct a new key. Pass the new key and value to + * reduce. If any of the grouping columns are not found in the value, the record is skipped. + * @param key The current key. + * @param value The current value. + * @param context The current context. + * @throws IOException When writing the record fails. * @throws InterruptedException When the job is aborted. */ @Override public void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { byte[][] keyVals = extractKeyValues(value); - if(keyVals != null) { + if (keyVals != null) { ImmutableBytesWritable tKey = createGroupKey(keyVals); context.write(tKey, value); } } /** - * Extract columns values from the current record. This method returns - * null if any of the columns are not found. + * Extract columns values from the current record. This method returns null if any of the columns + * are not found. *

      * Override this method if you want to deal with nulls differently. - * - * @param r The current values. + * @param r The current values. * @return Array of byte values. */ protected byte[][] extractKeyValues(Result r) { @@ -106,9 +97,9 @@ extends TableMapper implements Configurable { ArrayList foundList = new ArrayList<>(); int numCols = columns.length; if (numCols > 0) { - for (Cell value: r.listCells()) { - byte [] column = CellUtil.makeColumn(CellUtil.cloneFamily(value), - CellUtil.cloneQualifier(value)); + for (Cell value : r.listCells()) { + byte[] column = + CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value)); for (int i = 0; i < numCols; i++) { if (Bytes.equals(column, columns[i])) { foundList.add(CellUtil.cloneValue(value)); @@ -116,7 +107,7 @@ extends TableMapper implements Configurable { } } } - if(foundList.size() == numCols) { + if (foundList.size() == numCols) { keyVals = foundList.toArray(new byte[numCols][]); } } @@ -127,17 +118,16 @@ extends TableMapper implements Configurable { * Create a key by concatenating multiple column values. *

      * Override this function in order to produce different types of keys. - * - * @param vals The current key/values. + * @param vals The current key/values. * @return A key generated by concatenating multiple column values. */ protected ImmutableBytesWritable createGroupKey(byte[][] vals) { - if(vals == null) { + if (vals == null) { return null; } - StringBuilder sb = new StringBuilder(); - for(int i = 0; i < vals.length; i++) { - if(i > 0) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < vals.length; i++) { + if (i > 0) { sb.append(" "); } sb.append(Bytes.toString(vals[i])); @@ -147,7 +137,6 @@ extends TableMapper implements Configurable { /** * Returns the current configuration. - * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -158,17 +147,15 @@ extends TableMapper implements Configurable { /** * Sets the configuration. This is used to set up the grouping details. - * - * @param configuration The configuration to set. - * @see org.apache.hadoop.conf.Configurable#setConf( - * org.apache.hadoop.conf.Configuration) + * @param configuration The configuration to set. + * @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration) */ @Override public void setConf(Configuration configuration) { this.conf = configuration; String[] cols = conf.get(GROUP_COLUMNS, "").split(" "); columns = new byte[cols.length][]; - for(int i = 0; i < cols.length; i++) { + for (int i = 0; i < cols.length; i++) { columns[i] = Bytes.toBytes(cols[i]); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java index 03254feec04..4ff4a5b95b9 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,9 +41,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Simple MR input format for HFiles. - * This code was borrowed from Apache Crunch project. - * Updated to the recent version of HBase. + * Simple MR input format for HFiles. This code was borrowed from Apache Crunch project. Updated to + * the recent version of HBase. */ @InterfaceAudience.Private public class HFileInputFormat extends FileInputFormat { @@ -51,9 +50,9 @@ public class HFileInputFormat extends FileInputFormat { private static final Logger LOG = LoggerFactory.getLogger(HFileInputFormat.class); /** - * File filter that removes all "hidden" files. This might be something worth removing from - * a more general purpose utility; it accounts for the presence of metadata files created - * in the way we're doing exports. + * File filter that removes all "hidden" files. This might be something worth removing from a more + * general purpose utility; it accounts for the presence of metadata files created in the way + * we're doing exports. */ static final PathFilter HIDDEN_FILE_FILTER = new PathFilter() { @Override @@ -81,7 +80,7 @@ public class HFileInputFormat extends FileInputFormat { @Override public void initialize(InputSplit split, TaskAttemptContext context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { FileSplit fileSplit = (FileSplit) split; conf = context.getConfiguration(); Path path = fileSplit.getPath(); @@ -95,7 +94,6 @@ public class HFileInputFormat extends FileInputFormat { } - @Override public boolean nextKeyValue() throws IOException, InterruptedException { boolean hasNext; @@ -161,8 +159,8 @@ public class HFileInputFormat extends FileInputFormat { } @Override - public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) - throws IOException, InterruptedException { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException, InterruptedException { return new HFileRecordReader(); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index a3c3f11c5aa..22ee8727466 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -93,18 +93,17 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Writes HFiles. Passed Cells must arrive in order. - * Writes current time as the sequence id for the file. Sets the major compacted - * attribute on created {@link HFile}s. Calling write(null,null) will forcibly roll - * all HFiles being written. + * Writes HFiles. Passed Cells must arrive in order. Writes current time as the sequence id for the + * file. Sets the major compacted attribute on created {@link HFile}s. Calling write(null,null) will + * forcibly roll all HFiles being written. *

      - * Using this class as part of a MapReduce job is best done - * using {@link #configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}. + * Using this class as part of a MapReduce job is best done using + * {@link #configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}. */ @InterfaceAudience.Public -public class HFileOutputFormat2 - extends FileOutputFormat { +public class HFileOutputFormat2 extends FileOutputFormat { private static final Logger LOG = LoggerFactory.getLogger(HFileOutputFormat2.class); + static class TableInfo { private TableDescriptor tableDesctiptor; private RegionLocator regionLocator; @@ -118,7 +117,7 @@ public class HFileOutputFormat2 * The modification for the returned HTD doesn't affect the inner TD. * @return A clone of inner table descriptor * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #getTableDescriptor()} - * instead. + * instead. * @see #getTableDescriptor() * @see HBASE-18241 */ @@ -147,38 +146,33 @@ public class HFileOutputFormat2 // reducer run using conf. // These should not be changed by the client. static final String COMPRESSION_FAMILIES_CONF_KEY = - "hbase.hfileoutputformat.families.compression"; - static final String BLOOM_TYPE_FAMILIES_CONF_KEY = - "hbase.hfileoutputformat.families.bloomtype"; - static final String BLOOM_PARAM_FAMILIES_CONF_KEY = - "hbase.hfileoutputformat.families.bloomparam"; - static final String BLOCK_SIZE_FAMILIES_CONF_KEY = - "hbase.mapreduce.hfileoutputformat.blocksize"; + "hbase.hfileoutputformat.families.compression"; + static final String BLOOM_TYPE_FAMILIES_CONF_KEY = "hbase.hfileoutputformat.families.bloomtype"; + static final String BLOOM_PARAM_FAMILIES_CONF_KEY = "hbase.hfileoutputformat.families.bloomparam"; + static final String BLOCK_SIZE_FAMILIES_CONF_KEY = "hbase.mapreduce.hfileoutputformat.blocksize"; static final String DATABLOCK_ENCODING_FAMILIES_CONF_KEY = - "hbase.mapreduce.hfileoutputformat.families.datablock.encoding"; + "hbase.mapreduce.hfileoutputformat.families.datablock.encoding"; // This constant is public since the client can modify this when setting // up their conf object and thus refer to this symbol. // It is present for backwards compatibility reasons. Use it only to // override the auto-detection of datablock encoding and compression. public static final String DATABLOCK_ENCODING_OVERRIDE_CONF_KEY = - "hbase.mapreduce.hfileoutputformat.datablock.encoding"; + "hbase.mapreduce.hfileoutputformat.datablock.encoding"; public static final String COMPRESSION_OVERRIDE_CONF_KEY = - "hbase.mapreduce.hfileoutputformat.compression"; + "hbase.mapreduce.hfileoutputformat.compression"; /** * Keep locality while generating HFiles for bulkload. See HBASE-12596 */ public static final String LOCALITY_SENSITIVE_CONF_KEY = - "hbase.bulkload.locality.sensitive.enabled"; + "hbase.bulkload.locality.sensitive.enabled"; private static final boolean DEFAULT_LOCALITY_SENSITIVE = true; - static final String OUTPUT_TABLE_NAME_CONF_KEY = - "hbase.mapreduce.hfileoutputformat.table.name"; + static final String OUTPUT_TABLE_NAME_CONF_KEY = "hbase.mapreduce.hfileoutputformat.table.name"; static final String MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY = - "hbase.mapreduce.use.multi.table.hfileoutputformat"; + "hbase.mapreduce.use.multi.table.hfileoutputformat"; - public static final String REMOTE_CLUSTER_CONF_PREFIX = - "hbase.hfileoutputformat.remote.cluster."; + public static final String REMOTE_CLUSTER_CONF_PREFIX = "hbase.hfileoutputformat.remote.cluster."; public static final String REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY = REMOTE_CLUSTER_CONF_PREFIX + "zookeeper.quorum"; public static final String REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY = @@ -190,8 +184,8 @@ public class HFileOutputFormat2 public static final String STORAGE_POLICY_PROPERTY_CF_PREFIX = STORAGE_POLICY_PROPERTY + "."; @Override - public RecordWriter getRecordWriter( - final TaskAttemptContext context) throws IOException, InterruptedException { + public RecordWriter + getRecordWriter(final TaskAttemptContext context) throws IOException, InterruptedException { return createRecordWriter(context, this.getOutputCommitter(context)); } @@ -203,7 +197,7 @@ public class HFileOutputFormat2 final TaskAttemptContext context, final OutputCommitter committer) throws IOException { // Get the path of the temporary output file - final Path outputDir = ((FileOutputCommitter)committer).getWorkPath(); + final Path outputDir = ((FileOutputCommitter) committer).getWorkPath(); final Configuration conf = context.getConfiguration(); final boolean writeMultipleTables = conf.getBoolean(MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); @@ -213,19 +207,19 @@ public class HFileOutputFormat2 } final FileSystem fs = outputDir.getFileSystem(conf); // These configs. are from hbase-*.xml - final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, - HConstants.DEFAULT_MAX_FILE_SIZE); - // Invented config. Add to hbase-*.xml if other than default compression. - final String defaultCompressionStr = conf.get("hfile.compression", - Compression.Algorithm.NONE.getName()); + final long maxsize = + conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE); + // Invented config. Add to hbase-*.xml if other than default compression. + final String defaultCompressionStr = + conf.get("hfile.compression", Compression.Algorithm.NONE.getName()); final Algorithm defaultCompression = HFileWriterImpl.compressionByName(defaultCompressionStr); String compressionStr = conf.get(COMPRESSION_OVERRIDE_CONF_KEY); - final Algorithm overriddenCompression = compressionStr != null ? - Compression.getCompressionAlgorithmByName(compressionStr): null; - final boolean compactionExclude = conf.getBoolean( - "hbase.mapreduce.hfileoutputformat.compaction.exclude", false); - final Set allTableNames = Arrays.stream(writeTableNames.split( - Bytes.toString(tableSeparator))).collect(Collectors.toSet()); + final Algorithm overriddenCompression = + compressionStr != null ? Compression.getCompressionAlgorithmByName(compressionStr) : null; + final boolean compactionExclude = + conf.getBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", false); + final Set allTableNames = Arrays + .stream(writeTableNames.split(Bytes.toString(tableSeparator))).collect(Collectors.toSet()); // create a map from column family to the compression algorithm final Map compressionMap = createFamilyCompressionMap(conf); @@ -234,10 +228,10 @@ public class HFileOutputFormat2 final Map blockSizeMap = createFamilyBlockSizeMap(conf); String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY); - final Map datablockEncodingMap - = createFamilyDataBlockEncodingMap(conf); - final DataBlockEncoding overriddenEncoding = dataBlockEncodingStr != null ? - DataBlockEncoding.valueOf(dataBlockEncodingStr) : null; + final Map datablockEncodingMap = + createFamilyDataBlockEncodingMap(conf); + final DataBlockEncoding overriddenEncoding = + dataBlockEncodingStr != null ? DataBlockEncoding.valueOf(dataBlockEncodingStr) : null; return new RecordWriter() { // Map of families to writers and how much has been output on the writer. @@ -262,8 +256,8 @@ public class HFileOutputFormat2 tableNameBytes = MultiTableHFileOutputFormat.getTableName(row.get()); tableNameBytes = TableName.valueOf(tableNameBytes).toBytes(); if (!allTableNames.contains(Bytes.toString(tableNameBytes))) { - throw new IllegalArgumentException("TableName " + Bytes.toString(tableNameBytes) + - " not expected"); + throw new IllegalArgumentException( + "TableName " + Bytes.toString(tableNameBytes) + " not expected"); } } else { tableNameBytes = Bytes.toBytes(writeTableNames); @@ -276,9 +270,8 @@ public class HFileOutputFormat2 if (wl == null) { Path writerPath = null; if (writeMultipleTables) { - writerPath = new Path(outputDir,new Path(tableRelPath, Bytes.toString(family))); - } - else { + writerPath = new Path(outputDir, new Path(tableRelPath, Bytes.toString(family))); + } else { writerPath = new Path(outputDir, Bytes.toString(family)); } fs.mkdirs(writerPath); @@ -286,8 +279,10 @@ public class HFileOutputFormat2 } // This can only happen once a row is finished though - if (wl != null && wl.written + length >= maxsize - && Bytes.compareTo(this.previousRows.get(family), rowKey) != 0) { + if ( + wl != null && wl.written + length >= maxsize + && Bytes.compareTo(this.previousRows.get(family), rowKey) != 0 + ) { rollWriters(wl); } @@ -298,16 +293,17 @@ public class HFileOutputFormat2 String tableName = Bytes.toString(tableNameBytes); if (tableName != null) { - try (Connection connection = ConnectionFactory.createConnection( - createRemoteClusterConf(conf)); - RegionLocator locator = - connection.getRegionLocator(TableName.valueOf(tableName))) { + try ( + Connection connection = + ConnectionFactory.createConnection(createRemoteClusterConf(conf)); + RegionLocator locator = connection.getRegionLocator(TableName.valueOf(tableName))) { loc = locator.getRegionLocation(rowKey); } catch (Throwable e) { - LOG.warn("Something wrong locating rowkey {} in {}", - Bytes.toString(rowKey), tableName, e); + LOG.warn("Something wrong locating rowkey {} in {}", Bytes.toString(rowKey), + tableName, e); loc = null; - } } + } + } if (null == loc) { LOG.trace("Failed get of location, use default writer {}", Bytes.toString(rowKey)); @@ -315,14 +311,14 @@ public class HFileOutputFormat2 } else { LOG.debug("First rowkey: [{}]", Bytes.toString(rowKey)); InetSocketAddress initialIsa = - new InetSocketAddress(loc.getHostname(), loc.getPort()); + new InetSocketAddress(loc.getHostname(), loc.getPort()); if (initialIsa.isUnresolved()) { LOG.trace("Failed resolve address {}, use default writer", loc.getHostnamePort()); wl = getNewWriter(tableNameBytes, family, conf, null); } else { LOG.debug("Use favored nodes writer: {}", initialIsa.getHostString()); - wl = getNewWriter(tableNameBytes, family, conf, new InetSocketAddress[] { initialIsa - }); + wl = getNewWriter(tableNameBytes, family, conf, + new InetSocketAddress[] { initialIsa }); } } } else { @@ -361,8 +357,8 @@ public class HFileOutputFormat2 private void closeWriter(WriterLength wl) throws IOException { if (wl.writer != null) { - LOG.info("Writer=" + wl.writer.getPath() + - ((wl.written == 0)? "": ", wrote=" + wl.written)); + LOG.info( + "Writer=" + wl.writer.getPath() + ((wl.written == 0) ? "" : ", wrote=" + wl.written)); close(wl.writer); wl.writer = null; } @@ -384,9 +380,11 @@ public class HFileOutputFormat2 for (Entry entry : conf) { String key = entry.getKey(); - if (REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY.equals(key) || - REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY.equals(key) || - REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY.equals(key)) { + if ( + REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY.equals(key) + || REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY.equals(key) + || REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY.equals(key) + ) { // Handled them above continue; } @@ -406,15 +404,15 @@ public class HFileOutputFormat2 * Create a new StoreFile.Writer. * @return A WriterLength, containing a new StoreFile.Writer. */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="BX_UNBOXING_IMMEDIATELY_REBOXED", - justification="Not important") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "BX_UNBOXING_IMMEDIATELY_REBOXED", + justification = "Not important") private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration conf, - InetSocketAddress[] favoredNodes) throws IOException { + InetSocketAddress[] favoredNodes) throws IOException { byte[] tableAndFamily = getTableNameSuffixedWithFamily(tableName, family); Path familydir = new Path(outputDir, Bytes.toString(family)); if (writeMultipleTables) { - familydir = new Path(outputDir, - new Path(getTableRelativePath(tableName), Bytes.toString(family))); + familydir = + new Path(outputDir, new Path(getTableRelativePath(tableName), Bytes.toString(family))); } WriterLength wl = new WriterLength(); Algorithm compression = overriddenCompression; @@ -442,13 +440,13 @@ public class HFileOutputFormat2 HFileContext hFileContext = contextBuilder.build(); if (null == favoredNodes) { - wl.writer = new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, fs) - .withOutputDir(familydir).withBloomType(bloomType) - .withFileContext(hFileContext).build(); + wl.writer = + new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, fs).withOutputDir(familydir) + .withBloomType(bloomType).withFileContext(hFileContext).build(); } else { wl.writer = new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, new HFileSystem(fs)) - .withOutputDir(familydir).withBloomType(bloomType) - .withFileContext(hFileContext).withFavoredNodes(favoredNodes).build(); + .withOutputDir(familydir).withBloomType(bloomType).withFileContext(hFileContext) + .withFavoredNodes(favoredNodes).build(); } this.writers.put(tableAndFamily, wl); @@ -457,10 +455,8 @@ public class HFileOutputFormat2 private void close(final StoreFileWriter w) throws IOException { if (w != null) { - w.appendFileInfo(BULKLOAD_TIME_KEY, - Bytes.toBytes(EnvironmentEdgeManager.currentTime())); - w.appendFileInfo(BULKLOAD_TASK_KEY, - Bytes.toBytes(context.getTaskAttemptID().toString())); + w.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(EnvironmentEdgeManager.currentTime())); + w.appendFileInfo(BULKLOAD_TASK_KEY, Bytes.toBytes(context.getTaskAttemptID().toString())); w.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(true)); w.appendFileInfo(EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude)); w.appendTrackedTimestampsToMetadata(); @@ -470,7 +466,7 @@ public class HFileOutputFormat2 @Override public void close(TaskAttemptContext c) throws IOException, InterruptedException { - for (WriterLength wl: this.writers.values()) { + for (WriterLength wl : this.writers.values()) { close(wl.writer); } } @@ -481,14 +477,13 @@ public class HFileOutputFormat2 * Configure block storage policy for CF after the directory is created. */ static void configureStoragePolicy(final Configuration conf, final FileSystem fs, - byte[] tableAndFamily, Path cfPath) { + byte[] tableAndFamily, Path cfPath) { if (null == conf || null == fs || null == tableAndFamily || null == cfPath) { return; } - String policy = - conf.get(STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString(tableAndFamily), - conf.get(STORAGE_POLICY_PROPERTY)); + String policy = conf.get(STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString(tableAndFamily), + conf.get(STORAGE_POLICY_PROPERTY)); CommonFSUtils.setStoragePolicy(fs, cfPath, policy); } @@ -501,22 +496,20 @@ public class HFileOutputFormat2 } /** - * Return the start keys of all of the regions in this table, - * as a list of ImmutableBytesWritable. + * Return the start keys of all of the regions in this table, as a list of ImmutableBytesWritable. */ private static List getRegionStartKeys(List regionLocators, - boolean writeMultipleTables) - throws IOException { + boolean writeMultipleTables) throws IOException { ArrayList ret = new ArrayList<>(); - for(RegionLocator regionLocator : regionLocators) { + for (RegionLocator regionLocator : regionLocators) { TableName tableName = regionLocator.getName(); LOG.info("Looking up current regions for table " + tableName); byte[][] byteKeys = regionLocator.getStartKeys(); for (byte[] byteKey : byteKeys) { - byte[] fullKey = byteKey; //HFileOutputFormat2 use case + byte[] fullKey = byteKey; // HFileOutputFormat2 use case if (writeMultipleTables) { - //MultiTableHFileOutputFormat use case + // MultiTableHFileOutputFormat use case fullKey = combineTableNameSuffix(tableName.getName(), byteKey); } if (LOG.isDebugEnabled()) { @@ -529,12 +522,12 @@ public class HFileOutputFormat2 } /** - * Write out a {@link SequenceFile} that can be read by - * {@link TotalOrderPartitioner} that contains the split points in startKeys. + * Write out a {@link SequenceFile} that can be read by {@link TotalOrderPartitioner} that + * contains the split points in startKeys. */ @SuppressWarnings("deprecation") private static void writePartitions(Configuration conf, Path partitionsPath, - List startKeys, boolean writeMultipleTables) throws IOException { + List startKeys, boolean writeMultipleTables) throws IOException { LOG.info("Writing partition information to " + partitionsPath); if (startKeys.isEmpty()) { throw new IllegalArgumentException("No regions passed"); @@ -552,16 +545,15 @@ public class HFileOutputFormat2 } if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) { throw new IllegalArgumentException( - "First region of table should have empty start key. Instead has: " + "First region of table should have empty start key. Instead has: " + Bytes.toStringBinary(first.get())); } sorted.remove(sorted.first()); // Write the actual file FileSystem fs = partitionsPath.getFileSystem(conf); - SequenceFile.Writer writer = SequenceFile.createWriter( - fs, conf, partitionsPath, ImmutableBytesWritable.class, - NullWritable.class); + SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, partitionsPath, + ImmutableBytesWritable.class, NullWritable.class); try { for (ImmutableBytesWritable startKey : sorted) { @@ -573,49 +565,47 @@ public class HFileOutputFormat2 } /** - * Configure a MapReduce Job to perform an incremental load into the given - * table. This + * Configure a MapReduce Job to perform an incremental load into the given table. This *

        - *
      • Inspects the table to configure a total order partitioner
      • - *
      • Uploads the partitions file to the cluster and adds it to the DistributedCache
      • - *
      • Sets the number of reduce tasks to match the current number of regions
      • - *
      • Sets the output key/value class to match HFileOutputFormat2's requirements
      • - *
      • Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or - * PutSortReducer)
      • - *
      • Sets the HBase cluster key to load region locations for locality-sensitive
      • + *
      • Inspects the table to configure a total order partitioner
      • + *
      • Uploads the partitions file to the cluster and adds it to the DistributedCache
      • + *
      • Sets the number of reduce tasks to match the current number of regions
      • + *
      • Sets the output key/value class to match HFileOutputFormat2's requirements
      • + *
      • Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or + * PutSortReducer)
      • + *
      • Sets the HBase cluster key to load region locations for locality-sensitive
      • *
      * The user should be sure to set the map output value class to either KeyValue or Put before * running this function. */ public static void configureIncrementalLoad(Job job, Table table, RegionLocator regionLocator) - throws IOException { + throws IOException { configureIncrementalLoad(job, table.getDescriptor(), regionLocator); configureRemoteCluster(job, table.getConfiguration()); } /** - * Configure a MapReduce Job to perform an incremental load into the given - * table. This + * Configure a MapReduce Job to perform an incremental load into the given table. This *
        - *
      • Inspects the table to configure a total order partitioner
      • - *
      • Uploads the partitions file to the cluster and adds it to the DistributedCache
      • - *
      • Sets the number of reduce tasks to match the current number of regions
      • - *
      • Sets the output key/value class to match HFileOutputFormat2's requirements
      • - *
      • Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or - * PutSortReducer)
      • + *
      • Inspects the table to configure a total order partitioner
      • + *
      • Uploads the partitions file to the cluster and adds it to the DistributedCache
      • + *
      • Sets the number of reduce tasks to match the current number of regions
      • + *
      • Sets the output key/value class to match HFileOutputFormat2's requirements
      • + *
      • Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or + * PutSortReducer)
      • *
      * The user should be sure to set the map output value class to either KeyValue or Put before * running this function. */ public static void configureIncrementalLoad(Job job, TableDescriptor tableDescriptor, - RegionLocator regionLocator) throws IOException { + RegionLocator regionLocator) throws IOException { ArrayList singleTableInfo = new ArrayList<>(); singleTableInfo.add(new TableInfo(tableDescriptor, regionLocator)); configureIncrementalLoad(job, singleTableInfo, HFileOutputFormat2.class); } static void configureIncrementalLoad(Job job, List multiTableInfo, - Class> cls) throws IOException { + Class> cls) throws IOException { Configuration conf = job.getConfiguration(); job.setOutputKeyClass(ImmutableBytesWritable.class); job.setOutputValueClass(MapReduceExtendedCell.class); @@ -632,8 +622,10 @@ public class HFileOutputFormat2 // Based on the configured map output class, set the correct reducer to properly // sort the incoming values. // TODO it would be nice to pick one or the other of these formats. - if (KeyValue.class.equals(job.getMapOutputValueClass()) - || MapReduceExtendedCell.class.equals(job.getMapOutputValueClass())) { + if ( + KeyValue.class.equals(job.getMapOutputValueClass()) + || MapReduceExtendedCell.class.equals(job.getMapOutputValueClass()) + ) { job.setReducerClass(CellSortReducer.class); } else if (Put.class.equals(job.getMapOutputValueClass())) { job.setReducerClass(PutSortReducer.class); @@ -644,8 +636,8 @@ public class HFileOutputFormat2 } conf.setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + CellSerialization.class.getName()); if (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) { LOG.info("bulkload locality sensitive enabled"); @@ -656,43 +648,43 @@ public class HFileOutputFormat2 List regionLocators = new ArrayList<>(multiTableInfo.size()); List tableDescriptors = new ArrayList<>(multiTableInfo.size()); - for(TableInfo tableInfo : multiTableInfo) { + for (TableInfo tableInfo : multiTableInfo) { regionLocators.add(tableInfo.getRegionLocator()); allTableNames.add(tableInfo.getRegionLocator().getName().getNameAsString()); tableDescriptors.add(tableInfo.getTableDescriptor()); } // Record tablenames for creating writer by favored nodes, and decoding compression, // block size and other attributes of columnfamily per table - conf.set(OUTPUT_TABLE_NAME_CONF_KEY, StringUtils.join(allTableNames, Bytes - .toString(tableSeparator))); + conf.set(OUTPUT_TABLE_NAME_CONF_KEY, + StringUtils.join(allTableNames, Bytes.toString(tableSeparator))); List startKeys = getRegionStartKeys(regionLocators, writeMultipleTables); // Use table's region boundaries for TOP split points. - LOG.info("Configuring " + startKeys.size() + " reduce partitions " + - "to match current region count for all tables"); + LOG.info("Configuring " + startKeys.size() + " reduce partitions " + + "to match current region count for all tables"); job.setNumReduceTasks(startKeys.size()); configurePartitioner(job, startKeys, writeMultipleTables); // Set compression algorithms based on column families - conf.set(COMPRESSION_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(compressionDetails, - tableDescriptors)); - conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(blockSizeDetails, - tableDescriptors)); - conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(bloomTypeDetails, - tableDescriptors)); - conf.set(BLOOM_PARAM_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(bloomParamDetails, - tableDescriptors)); + conf.set(COMPRESSION_FAMILIES_CONF_KEY, + serializeColumnFamilyAttribute(compressionDetails, tableDescriptors)); + conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, + serializeColumnFamilyAttribute(blockSizeDetails, tableDescriptors)); + conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, + serializeColumnFamilyAttribute(bloomTypeDetails, tableDescriptors)); + conf.set(BLOOM_PARAM_FAMILIES_CONF_KEY, + serializeColumnFamilyAttribute(bloomParamDetails, tableDescriptors)); conf.set(DATABLOCK_ENCODING_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(dataBlockEncodingDetails, tableDescriptors)); + serializeColumnFamilyAttribute(dataBlockEncodingDetails, tableDescriptors)); TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.initCredentials(job); LOG.info("Incremental output configured for tables: " + StringUtils.join(allTableNames, ",")); } - public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDescriptor) throws - IOException { + public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDescriptor) + throws IOException { Configuration conf = job.getConfiguration(); job.setOutputKeyClass(ImmutableBytesWritable.class); @@ -705,15 +697,15 @@ public class HFileOutputFormat2 conf.set(OUTPUT_TABLE_NAME_CONF_KEY, tableDescriptor.getTableName().getNameAsString()); // Set compression algorithms based on column families conf.set(COMPRESSION_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(compressionDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(compressionDetails, singleTableDescriptor)); conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(blockSizeDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(blockSizeDetails, singleTableDescriptor)); conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(bloomTypeDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(bloomTypeDetails, singleTableDescriptor)); conf.set(BLOOM_PARAM_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(bloomParamDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(bloomParamDetails, singleTableDescriptor)); conf.set(DATABLOCK_ENCODING_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(dataBlockEncodingDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(dataBlockEncodingDetails, singleTableDescriptor)); TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.initCredentials(job); @@ -722,21 +714,16 @@ public class HFileOutputFormat2 /** * Configure HBase cluster key for remote cluster to load region location for locality-sensitive - * if it's enabled. - * It's not necessary to call this method explicitly when the cluster key for HBase cluster to be - * used to load region location is configured in the job configuration. - * Call this method when another HBase cluster key is configured in the job configuration. - * For example, you should call when you load data from HBase cluster A using - * {@link TableInputFormat} and generate hfiles for HBase cluster B. - * Otherwise, HFileOutputFormat2 fetch location from cluster A and locality-sensitive won't - * working correctly. + * if it's enabled. It's not necessary to call this method explicitly when the cluster key for + * HBase cluster to be used to load region location is configured in the job configuration. Call + * this method when another HBase cluster key is configured in the job configuration. For example, + * you should call when you load data from HBase cluster A using {@link TableInputFormat} and + * generate hfiles for HBase cluster B. Otherwise, HFileOutputFormat2 fetch location from cluster + * A and locality-sensitive won't working correctly. * {@link #configureIncrementalLoad(Job, Table, RegionLocator)} calls this method using - * {@link Table#getConfiguration} as clusterConf. - * See HBASE-25608. - * - * @param job which has configuration to be updated + * {@link Table#getConfiguration} as clusterConf. See HBASE-25608. + * @param job which has configuration to be updated * @param clusterConf which contains cluster key of the HBase cluster to be locality-sensitive - * * @see #configureIncrementalLoad(Job, Table, RegionLocator) * @see #LOCALITY_SENSITIVE_CONF_KEY * @see #REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY @@ -751,31 +738,28 @@ public class HFileOutputFormat2 } final String quorum = clusterConf.get(HConstants.ZOOKEEPER_QUORUM); - final int clientPort = clusterConf.getInt( - HConstants.ZOOKEEPER_CLIENT_PORT, HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT); - final String parent = clusterConf.get( - HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + final int clientPort = clusterConf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, + HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT); + final String parent = + clusterConf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); conf.set(REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY, quorum); conf.setInt(REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY, clientPort); conf.set(REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY, parent); - LOG.info("ZK configs for remote cluster of bulkload is configured: " + - quorum + ":" + clientPort + "/" + parent); + LOG.info("ZK configs for remote cluster of bulkload is configured: " + quorum + ":" + clientPort + + "/" + parent); } /** - * Runs inside the task to deserialize column family to compression algorithm - * map from the configuration. - * + * Runs inside the task to deserialize column family to compression algorithm map from the + * configuration. * @param conf to read the serialized values from * @return a map from column family to the configured compression algorithm */ @InterfaceAudience.Private - static Map createFamilyCompressionMap(Configuration - conf) { - Map stringMap = createFamilyConfValueMap(conf, - COMPRESSION_FAMILIES_CONF_KEY); + static Map createFamilyCompressionMap(Configuration conf) { + Map stringMap = createFamilyConfValueMap(conf, COMPRESSION_FAMILIES_CONF_KEY); Map compressionMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { Algorithm algorithm = HFileWriterImpl.compressionByName(e.getValue()); @@ -785,16 +769,14 @@ public class HFileOutputFormat2 } /** - * Runs inside the task to deserialize column family to bloom filter type - * map from the configuration. - * + * Runs inside the task to deserialize column family to bloom filter type map from the + * configuration. * @param conf to read the serialized values from * @return a map from column family to the the configured bloom filter type */ @InterfaceAudience.Private static Map createFamilyBloomTypeMap(Configuration conf) { - Map stringMap = createFamilyConfValueMap(conf, - BLOOM_TYPE_FAMILIES_CONF_KEY); + Map stringMap = createFamilyConfValueMap(conf, BLOOM_TYPE_FAMILIES_CONF_KEY); Map bloomTypeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { BloomType bloomType = BloomType.valueOf(e.getValue()); @@ -804,9 +786,8 @@ public class HFileOutputFormat2 } /** - * Runs inside the task to deserialize column family to bloom filter param - * map from the configuration. - * + * Runs inside the task to deserialize column family to bloom filter param map from the + * configuration. * @param conf to read the serialized values from * @return a map from column family to the the configured bloom filter param */ @@ -815,18 +796,14 @@ public class HFileOutputFormat2 return createFamilyConfValueMap(conf, BLOOM_PARAM_FAMILIES_CONF_KEY); } - /** - * Runs inside the task to deserialize column family to block size - * map from the configuration. - * + * Runs inside the task to deserialize column family to block size map from the configuration. * @param conf to read the serialized values from * @return a map from column family to the configured block size */ @InterfaceAudience.Private static Map createFamilyBlockSizeMap(Configuration conf) { - Map stringMap = createFamilyConfValueMap(conf, - BLOCK_SIZE_FAMILIES_CONF_KEY); + Map stringMap = createFamilyConfValueMap(conf, BLOCK_SIZE_FAMILIES_CONF_KEY); Map blockSizeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { Integer blockSize = Integer.parseInt(e.getValue()); @@ -836,18 +813,16 @@ public class HFileOutputFormat2 } /** - * Runs inside the task to deserialize column family to data block encoding - * type map from the configuration. - * + * Runs inside the task to deserialize column family to data block encoding type map from the + * configuration. * @param conf to read the serialized values from - * @return a map from column family to HFileDataBlockEncoder for the - * configured data block type for the family + * @return a map from column family to HFileDataBlockEncoder for the configured data block type + * for the family */ @InterfaceAudience.Private - static Map createFamilyDataBlockEncodingMap( - Configuration conf) { - Map stringMap = createFamilyConfValueMap(conf, - DATABLOCK_ENCODING_FAMILIES_CONF_KEY); + static Map createFamilyDataBlockEncodingMap(Configuration conf) { + Map stringMap = + createFamilyConfValueMap(conf, DATABLOCK_ENCODING_FAMILIES_CONF_KEY); Map encoderMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { encoderMap.put(e.getKey(), DataBlockEncoding.valueOf((e.getValue()))); @@ -855,16 +830,13 @@ public class HFileOutputFormat2 return encoderMap; } - /** * Run inside the task to deserialize column family to given conf value map. - * - * @param conf to read the serialized values from + * @param conf to read the serialized values from * @param confName conf key to read from the configuration * @return a map of column family to the given configuration value */ - private static Map createFamilyConfValueMap( - Configuration conf, String confName) { + private static Map createFamilyConfValueMap(Configuration conf, String confName) { Map confValMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); String confVal = conf.get(confName, ""); for (String familyConf : confVal.split("&")) { @@ -874,7 +846,7 @@ public class HFileOutputFormat2 } try { confValMap.put(Bytes.toBytes(URLDecoder.decode(familySplit[0], "UTF-8")), - URLDecoder.decode(familySplit[1], "UTF-8")); + URLDecoder.decode(familySplit[1], "UTF-8")); } catch (UnsupportedEncodingException e) { // will not happen with UTF-8 encoding throw new AssertionError(e); @@ -887,15 +859,13 @@ public class HFileOutputFormat2 * Configure job with a TotalOrderPartitioner, partitioning against * splitPoints. Cleans up the partitions file after job exists. */ - static void configurePartitioner(Job job, List splitPoints, boolean - writeMultipleTables) - throws IOException { + static void configurePartitioner(Job job, List splitPoints, + boolean writeMultipleTables) throws IOException { Configuration conf = job.getConfiguration(); // create the partitions file FileSystem fs = FileSystem.get(conf); String hbaseTmpFsDir = - conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, - HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY); + conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY); Path partitionsPath = new Path(hbaseTmpFsDir, "partitions_" + UUID.randomUUID()); fs.makeQualified(partitionsPath); writePartitions(conf, partitionsPath, splitPoints, writeMultipleTables); @@ -906,12 +876,11 @@ public class HFileOutputFormat2 TotalOrderPartitioner.setPartitionFile(conf, partitionsPath); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = - "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE") + @edu.umd.cs.findbugs.annotations.SuppressWarnings( + value = "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE") @InterfaceAudience.Private static String serializeColumnFamilyAttribute(Function fn, - List allTables) - throws UnsupportedEncodingException { + List allTables) throws UnsupportedEncodingException { StringBuilder attributeValue = new StringBuilder(); int i = 0; for (TableDescriptor tableDescriptor : allTables) { @@ -924,8 +893,8 @@ public class HFileOutputFormat2 if (i++ > 0) { attributeValue.append('&'); } - attributeValue.append(URLEncoder.encode( - Bytes.toString(combineTableNameSuffix(tableDescriptor.getTableName().getName(), + attributeValue.append(URLEncoder + .encode(Bytes.toString(combineTableNameSuffix(tableDescriptor.getTableName().getName(), familyDescriptor.getName())), "UTF-8")); attributeValue.append('='); attributeValue.append(URLEncoder.encode(fn.apply(familyDescriptor), "UTF-8")); @@ -936,24 +905,24 @@ public class HFileOutputFormat2 } /** - * Serialize column family to compression algorithm map to configuration. - * Invoked while configuring the MR job for incremental load. - */ - @InterfaceAudience.Private - static Function compressionDetails = familyDescriptor -> - familyDescriptor.getCompressionType().getName(); - - /** - * Serialize column family to block size map to configuration. Invoked while + * Serialize column family to compression algorithm map to configuration. Invoked while * configuring the MR job for incremental load. */ @InterfaceAudience.Private - static Function blockSizeDetails = familyDescriptor -> String - .valueOf(familyDescriptor.getBlocksize()); + static Function compressionDetails = + familyDescriptor -> familyDescriptor.getCompressionType().getName(); /** - * Serialize column family to bloom type map to configuration. Invoked while - * configuring the MR job for incremental load. + * Serialize column family to block size map to configuration. Invoked while configuring the MR + * job for incremental load. + */ + @InterfaceAudience.Private + static Function blockSizeDetails = + familyDescriptor -> String.valueOf(familyDescriptor.getBlocksize()); + + /** + * Serialize column family to bloom type map to configuration. Invoked while configuring the MR + * job for incremental load. */ @InterfaceAudience.Private static Function bloomTypeDetails = familyDescriptor -> { @@ -965,8 +934,8 @@ public class HFileOutputFormat2 }; /** - * Serialize column family to bloom param map to configuration. Invoked while - * configuring the MR job for incremental load. + * Serialize column family to bloom param map to configuration. Invoked while configuring the MR + * job for incremental load. */ @InterfaceAudience.Private static Function bloomParamDetails = familyDescriptor -> { @@ -979,8 +948,8 @@ public class HFileOutputFormat2 }; /** - * Serialize column family to data block encoding map to configuration. - * Invoked while configuring the MR job for incremental load. + * Serialize column family to data block encoding map to configuration. Invoked while configuring + * the MR job for incremental load. */ @InterfaceAudience.Private static Function dataBlockEncodingDetails = familyDescriptor -> { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java index 12f2e86fa80..e8bb7975057 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,9 +18,6 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -33,24 +29,25 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapred.TableOutputFormat; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Partitioner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This is used to partition the output keys into groups of keys. - * Keys are grouped according to the regions that currently exist - * so that each reducer fills a single region so load is distributed. - * - *

      This class is not suitable as partitioner creating hfiles - * for incremental bulk loads as region spread will likely change between time of - * hfile creation and load time. See {@link org.apache.hadoop.hbase.tool.LoadIncrementalHFiles} - * and Bulk Load.

      - * - * @param The type of the key. - * @param The type of the value. + * This is used to partition the output keys into groups of keys. Keys are grouped according to the + * regions that currently exist so that each reducer fills a single region so load is distributed. + *

      + * This class is not suitable as partitioner creating hfiles for incremental bulk loads as region + * spread will likely change between time of hfile creation and load time. See + * {@link org.apache.hadoop.hbase.tool.LoadIncrementalHFiles} and + * Bulk Load. + *

      + * @param The type of the key. + * @param The type of the value. */ @InterfaceAudience.Public -public class HRegionPartitioner -extends Partitioner -implements Configurable { +public class HRegionPartitioner extends Partitioner + implements Configurable { private static final Logger LOG = LoggerFactory.getLogger(HRegionPartitioner.class); private Configuration conf = null; @@ -60,24 +57,23 @@ implements Configurable { private byte[][] startKeys; /** - * Gets the partition number for a given key (hence record) given the total - * number of partitions i.e. number of reduce-tasks for the job. - * - *

      Typically a hash function on a all or a subset of the key.

      - * - * @param key The key to be partitioned. - * @param value The entry value. - * @param numPartitions The total number of partitions. + * Gets the partition number for a given key (hence record) given the total number of partitions + * i.e. number of reduce-tasks for the job. + *

      + * Typically a hash function on a all or a subset of the key. + *

      + * @param key The key to be partitioned. + * @param value The entry value. + * @param numPartitions The total number of partitions. * @return The partition number for the key. - * @see org.apache.hadoop.mapreduce.Partitioner#getPartition( - * java.lang.Object, java.lang.Object, int) + * @see org.apache.hadoop.mapreduce.Partitioner#getPartition( java.lang.Object, java.lang.Object, + * int) */ @Override - public int getPartition(ImmutableBytesWritable key, - VALUE value, int numPartitions) { + public int getPartition(ImmutableBytesWritable key, VALUE value, int numPartitions) { byte[] region = null; // Only one region return 0 - if (this.startKeys.length == 1){ + if (this.startKeys.length == 1) { return 0; } try { @@ -87,12 +83,11 @@ implements Configurable { } catch (IOException e) { LOG.error(e.toString(), e); } - for (int i = 0; i < this.startKeys.length; i++){ - if (Bytes.compareTo(region, this.startKeys[i]) == 0 ){ - if (i >= numPartitions){ + for (int i = 0; i < this.startKeys.length; i++) { + if (Bytes.compareTo(region, this.startKeys[i]) == 0) { + if (i >= numPartitions) { // cover if we have less reduces then regions. - return (Integer.toString(i).hashCode() - & Integer.MAX_VALUE) % numPartitions; + return (Integer.toString(i).hashCode() & Integer.MAX_VALUE) % numPartitions; } return i; } @@ -103,7 +98,6 @@ implements Configurable { /** * Returns the current configuration. - * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -113,12 +107,9 @@ implements Configurable { } /** - * Sets the configuration. This is used to determine the start keys for the - * given table. - * - * @param configuration The configuration to set. - * @see org.apache.hadoop.conf.Configurable#setConf( - * org.apache.hadoop.conf.Configuration) + * Sets the configuration. This is used to determine the start keys for the given table. + * @param configuration The configuration to set. + * @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration) */ @Override public void setConf(Configuration configuration) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java index 5ec7c48fb03..e09e7be98eb 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -220,9 +220,9 @@ public class HashTable extends Configured implements Tool { } /** - * Choose partitions between row ranges to hash to a single output file - * Selects region boundaries that fall within the scan range, and groups them - * into the desired number of partitions. + * Choose partitions between row ranges to hash to a single output file Selects region + * boundaries that fall within the scan range, and groups them into the desired number of + * partitions. */ void selectPartitions(Pair regionStartEndKeys) { List startKeys = new ArrayList<>(); @@ -232,13 +232,15 @@ public class HashTable extends Configured implements Tool { // if scan begins after this region, or starts before this region, then drop this region // in other words: - // IF (scan begins before the end of this region - // AND scan ends before the start of this region) - // THEN include this region - if ((isTableStartRow(startRow) || isTableEndRow(regionEndKey) + // IF (scan begins before the end of this region + // AND scan ends before the start of this region) + // THEN include this region + if ( + (isTableStartRow(startRow) || isTableEndRow(regionEndKey) || Bytes.compareTo(startRow, regionEndKey) < 0) - && (isTableEndRow(stopRow) || isTableStartRow(regionStartKey) - || Bytes.compareTo(stopRow, regionStartKey) > 0)) { + && (isTableEndRow(stopRow) || isTableStartRow(regionStartKey) + || Bytes.compareTo(stopRow, regionStartKey) > 0) + ) { startKeys.add(regionStartKey); } } @@ -267,8 +269,8 @@ public class HashTable extends Configured implements Tool { void writePartitionFile(Configuration conf, Path path) throws IOException { FileSystem fs = path.getFileSystem(conf); @SuppressWarnings("deprecation") - SequenceFile.Writer writer = SequenceFile.createWriter( - fs, conf, path, ImmutableBytesWritable.class, NullWritable.class); + SequenceFile.Writer writer = + SequenceFile.createWriter(fs, conf, path, ImmutableBytesWritable.class, NullWritable.class); for (int i = 0; i < partitions.size(); i++) { writer.append(partitions.get(i), NullWritable.get()); @@ -277,7 +279,7 @@ public class HashTable extends Configured implements Tool { } private void readPartitionFile(FileSystem fs, Configuration conf, Path path) - throws IOException { + throws IOException { @SuppressWarnings("deprecation") SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, conf); ImmutableBytesWritable key = new ImmutableBytesWritable(); @@ -328,11 +330,10 @@ public class HashTable extends Configured implements Tool { } /** - * Open a TableHash.Reader starting at the first hash at or after the given key. - * @throws IOException + * Open a TableHash.Reader starting at the first hash at or after the given key. n */ public Reader newReader(Configuration conf, ImmutableBytesWritable startKey) - throws IOException { + throws IOException { return new Reader(conf, startKey); } @@ -351,15 +352,15 @@ public class HashTable extends Configured implements Tool { int partitionIndex = Collections.binarySearch(partitions, startKey); if (partitionIndex >= 0) { // if the key is equal to a partition, then go the file after that partition - hashFileIndex = partitionIndex+1; + hashFileIndex = partitionIndex + 1; } else { // if the key is between partitions, then go to the file between those partitions - hashFileIndex = -1-partitionIndex; + hashFileIndex = -1 - partitionIndex; } openHashFile(); // MapFile's don't make it easy to seek() so that the subsequent next() returns - // the desired key/value pair. So we cache it for the first call of next(). + // the desired key/value pair. So we cache it for the first call of next(). hash = new ImmutableBytesWritable(); key = (ImmutableBytesWritable) mapFileReader.getClosest(startKey, hash); if (key == null) { @@ -371,8 +372,8 @@ public class HashTable extends Configured implements Tool { } /** - * Read the next key/hash pair. - * Returns true if such a pair exists and false when at the end of the data. + * Read the next key/hash pair. Returns true if such a pair exists and false when at the end + * of the data. */ public boolean next() throws IOException { if (cachedNext) { @@ -443,19 +444,19 @@ public class HashTable extends Configured implements Tool { generatePartitions(partitionsPath); Job job = Job.getInstance(getConf(), - getConf().get("mapreduce.job.name", "hashTable_" + tableHash.tableName)); + getConf().get("mapreduce.job.name", "hashTable_" + tableHash.tableName)); Configuration jobConf = job.getConfiguration(); jobConf.setLong(HASH_BATCH_SIZE_CONF_KEY, tableHash.batchSize); jobConf.setBoolean(IGNORE_TIMESTAMPS, tableHash.ignoreTimestamps); job.setJarByClass(HashTable.class); TableMapReduceUtil.initTableMapperJob(tableHash.tableName, tableHash.initScan(), - HashMapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job); + HashMapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job); // use a TotalOrderPartitioner and reducers to group region output into hash files job.setPartitionerClass(TotalOrderPartitioner.class); TotalOrderPartitioner.setPartitionFile(jobConf, partitionsPath); - job.setReducerClass(Reducer.class); // identity reducer + job.setReducerClass(Reducer.class); // identity reducer job.setNumReduceTasks(tableHash.numHashFiles); job.setOutputKeyClass(ImmutableBytesWritable.class); job.setOutputValueClass(ImmutableBytesWritable.class); @@ -467,8 +468,8 @@ public class HashTable extends Configured implements Tool { private void generatePartitions(Path partitionsPath) throws IOException { Connection connection = ConnectionFactory.createConnection(getConf()); - Pair regionKeys - = connection.getRegionLocator(TableName.valueOf(tableHash.tableName)).getStartEndKeys(); + Pair regionKeys = + connection.getRegionLocator(TableName.valueOf(tableHash.tableName)).getStartEndKeys(); connection.close(); tableHash.selectPartitions(regionKeys); @@ -565,18 +566,17 @@ public class HashTable extends Configured implements Tool { @Override protected void setup(Context context) throws IOException, InterruptedException { - targetBatchSize = context.getConfiguration() - .getLong(HASH_BATCH_SIZE_CONF_KEY, DEFAULT_BATCH_SIZE); + targetBatchSize = + context.getConfiguration().getLong(HASH_BATCH_SIZE_CONF_KEY, DEFAULT_BATCH_SIZE); hasher = new ResultHasher(); - hasher.ignoreTimestamps = context.getConfiguration(). - getBoolean(IGNORE_TIMESTAMPS, false); + hasher.ignoreTimestamps = context.getConfiguration().getBoolean(IGNORE_TIMESTAMPS, false); TableSplit split = (TableSplit) context.getInputSplit(); hasher.startBatch(new ImmutableBytesWritable(split.getStartRow())); } @Override protected void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { if (currentRow == null || !currentRow.equals(key)) { currentRow = new ImmutableBytesWritable(key); // not immutable @@ -612,6 +612,7 @@ public class HashTable extends Configured implements Tool { } private static final int NUM_ARGS = 2; + private static void printUsage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); @@ -646,10 +647,10 @@ public class HashTable extends Configured implements Tool { System.err.println(); System.err.println("Examples:"); System.err.println(" To hash 'TestTable' in 32kB batches for a 1 hour window into 50 files:"); - System.err.println(" $ hbase " + - "org.apache.hadoop.hbase.mapreduce.HashTable --batchsize=32000 --numhashfiles=50" - + " --starttime=1265875194289 --endtime=1265878794289 --families=cf2,cf3" - + " TestTable /hashes/testTable"); + System.err.println(" $ hbase " + + "org.apache.hadoop.hbase.mapreduce.HashTable --batchsize=32000 --numhashfiles=50" + + " --starttime=1265875194289 --endtime=1265878794289 --families=cf2,cf3" + + " TestTable /hashes/testTable"); } private boolean doCommandLine(final String[] args) { @@ -659,8 +660,8 @@ public class HashTable extends Configured implements Tool { } try { - tableHash.tableName = args[args.length-2]; - destPath = new Path(args[args.length-1]); + tableHash.tableName = args[args.length - 2]; + destPath = new Path(args[args.length - 1]); for (int i = 0; i < args.length - NUM_ARGS; i++) { String cmd = args[i]; @@ -731,18 +732,20 @@ public class HashTable extends Configured implements Tool { final String ignoreTimestampsKey = "--ignoreTimestamps="; if (cmd.startsWith(ignoreTimestampsKey)) { - tableHash.ignoreTimestamps = Boolean. - parseBoolean(cmd.substring(ignoreTimestampsKey.length())); + tableHash.ignoreTimestamps = + Boolean.parseBoolean(cmd.substring(ignoreTimestampsKey.length())); continue; } printUsage("Invalid argument '" + cmd + "'"); return false; } - if ((tableHash.startTime != 0 || tableHash.endTime != 0) - && (tableHash.startTime >= tableHash.endTime)) { - printUsage("Invalid time range filter: starttime=" - + tableHash.startTime + " >= endtime=" + tableHash.endTime); + if ( + (tableHash.startTime != 0 || tableHash.endTime != 0) + && (tableHash.startTime >= tableHash.endTime) + ) { + printUsage("Invalid time range filter: starttime=" + tableHash.startTime + " >= endtime=" + + tableHash.endTime); return false; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java index 831607c730c..0b27a882240 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,48 +18,43 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapreduce.Job; +import org.apache.yetus.audience.InterfaceAudience; /** * Pass the given key and record as-is to the reduce phase. */ @InterfaceAudience.Public -public class IdentityTableMapper -extends TableMapper { +public class IdentityTableMapper extends TableMapper { /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * + * Use this before submitting a TableMap job. It will appropriately set up the job. * @param table The table name. - * @param scan The scan with the columns to scan. - * @param mapper The mapper class. - * @param job The job configuration. + * @param scan The scan with the columns to scan. + * @param mapper The mapper class. + * @param job The job configuration. * @throws IOException When setting up the job fails. */ @SuppressWarnings("rawtypes") - public static void initJob(String table, Scan scan, - Class mapper, Job job) throws IOException { - TableMapReduceUtil.initTableMapperJob(table, scan, mapper, - ImmutableBytesWritable.class, Result.class, job); + public static void initJob(String table, Scan scan, Class mapper, Job job) + throws IOException { + TableMapReduceUtil.initTableMapperJob(table, scan, mapper, ImmutableBytesWritable.class, + Result.class, job); } /** * Pass the key, value to reduce. - * - * @param key The current key. - * @param value The current value. - * @param context The current context. - * @throws IOException When writing the record fails. + * @param key The current key. + * @param value The current value. + * @param context The current context. + * @throws IOException When writing the record fails. * @throws InterruptedException When the job is aborted. */ public void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { context.write(key, value); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java index 876953c862b..e3e63b14eb9 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,60 +18,50 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.io.Writable; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.io.Writable; /** * Convenience class that simply writes all values (which must be - * {@link org.apache.hadoop.hbase.client.Put Put} or - * {@link org.apache.hadoop.hbase.client.Delete Delete} instances) - * passed to it out to the configured HBase table. This works in combination - * with {@link TableOutputFormat} which actually does the writing to HBase.

      - * - * Keys are passed along but ignored in TableOutputFormat. However, they can - * be used to control how your values will be divided up amongst the specified - * number of reducers.

      - * - * You can also use the {@link TableMapReduceUtil} class to set up the two - * classes in one step: + * {@link org.apache.hadoop.hbase.client.Put Put} or {@link org.apache.hadoop.hbase.client.Delete + * Delete} instances) passed to it out to the configured HBase table. This works in combination with + * {@link TableOutputFormat} which actually does the writing to HBase. + *

      + * Keys are passed along but ignored in TableOutputFormat. However, they can be used to control how + * your values will be divided up amongst the specified number of reducers. + *

      + * You can also use the {@link TableMapReduceUtil} class to set up the two classes in one step: *

      * TableMapReduceUtil.initTableReducerJob("table", IdentityTableReducer.class, job); - *
      - * This will also set the proper {@link TableOutputFormat} which is given the - * table parameter. The - * {@link org.apache.hadoop.hbase.client.Put Put} or - * {@link org.apache.hadoop.hbase.client.Delete Delete} define the - * row and columns implicitly. + * This will also set the proper {@link TableOutputFormat} which is given the + * table parameter. The {@link org.apache.hadoop.hbase.client.Put Put} or + * {@link org.apache.hadoop.hbase.client.Delete Delete} define the row and columns implicitly. */ @InterfaceAudience.Public -public class IdentityTableReducer -extends TableReducer { +public class IdentityTableReducer extends TableReducer { @SuppressWarnings("unused") private static final Logger LOG = LoggerFactory.getLogger(IdentityTableReducer.class); /** - * Writes each given record, consisting of the row key and the given values, - * to the configured {@link org.apache.hadoop.mapreduce.OutputFormat}. - * It is emitting the row key and each {@link org.apache.hadoop.hbase.client.Put Put} - * or {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs. - * - * @param key The current row key. + * Writes each given record, consisting of the row key and the given values, to the configured + * {@link org.apache.hadoop.mapreduce.OutputFormat}. It is emitting the row key and each + * {@link org.apache.hadoop.hbase.client.Put Put} or {@link org.apache.hadoop.hbase.client.Delete + * Delete} as separate pairs. + * @param key The current row key. * @param values The {@link org.apache.hadoop.hbase.client.Put Put} or - * {@link org.apache.hadoop.hbase.client.Delete Delete} list for the given - * row. - * @param context The context of the reduce. - * @throws IOException When writing the record fails. + * {@link org.apache.hadoop.hbase.client.Delete Delete} list for the given row. + * @param context The context of the reduce. + * @throws IOException When writing the record fails. * @throws InterruptedException When the job gets interrupted. */ @Override public void reduce(Writable key, Iterable values, Context context) - throws IOException, InterruptedException { - for(Mutation putOrDelete : values) { + throws IOException, InterruptedException { + for (Mutation putOrDelete : values) { context.write(key, putOrDelete); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index edcd3c5e9ea..9f507dc3eaa 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +31,6 @@ import java.util.Locale; import java.util.Map; import java.util.TreeMap; import java.util.UUID; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; @@ -41,15 +39,12 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.util.MapReduceExtendedCell; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -63,7 +58,9 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.io.WritableComparator; @@ -77,11 +74,11 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Import data written by {@link Export}. */ @@ -95,16 +92,16 @@ public class Import extends Configured implements Tool { public final static String FILTER_ARGS_CONF_KEY = "import.filter.args"; public final static String TABLE_NAME = "import.table.name"; public final static String WAL_DURABILITY = "import.wal.durability"; - public final static String HAS_LARGE_RESULT= "import.bulk.hasLargeResult"; + public final static String HAS_LARGE_RESULT = "import.bulk.hasLargeResult"; private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name"; public static class CellWritableComparablePartitioner - extends Partitioner { + extends Partitioner { private static CellWritableComparable[] START_KEYS = null; + @Override - public int getPartition(CellWritableComparable key, Cell value, - int numPartitions) { + public int getPartition(CellWritableComparable key, Cell value, int numPartitions) { for (int i = 0; i < START_KEYS.length; ++i) { if (key.compareTo(START_KEYS[i]) <= 0) { return i; @@ -116,12 +113,11 @@ public class Import extends Configured implements Tool { } /** - * @deprecated Use {@link CellWritableComparablePartitioner}. Will be removed - * from 3.0 onwards + * @deprecated Use {@link CellWritableComparablePartitioner}. Will be removed from 3.0 onwards */ @Deprecated public static class KeyValueWritableComparablePartitioner - extends Partitioner { + extends Partitioner { private static KeyValueWritableComparable[] START_KEYS = null; @Override @@ -136,7 +132,7 @@ public class Import extends Configured implements Tool { } public static class KeyValueWritableComparable - implements WritableComparable { + implements WritableComparable { private KeyValue kv = null; @@ -188,15 +184,13 @@ public class Import extends Configured implements Tool { } - public static class CellWritableComparable - implements WritableComparable { + public static class CellWritableComparable implements WritableComparable { private Cell kv = null; static { // register this comparator - WritableComparator.define(CellWritableComparable.class, - new CellWritableComparator()); + WritableComparator.define(CellWritableComparable.class, new CellWritableComparator()); } public CellWritableComparable() { @@ -249,35 +243,31 @@ public class Import extends Configured implements Tool { */ @Deprecated public static class KeyValueReducer - extends Reducer { - protected void reduce(KeyValueWritableComparable row, Iterable kvs, - Reducer.Context context) - throws java.io.IOException, InterruptedException { + extends Reducer { + protected void reduce( + KeyValueWritableComparable row, Iterable kvs, Reducer.Context context) + throws java.io.IOException, InterruptedException { int index = 0; for (KeyValue kv : kvs) { context.write(new ImmutableBytesWritable(kv.getRowArray()), kv); if (++index % 100 == 0) context.setStatus("Wrote " + index + " KeyValues, " - + "and the rowkey whose is being wrote is " + Bytes.toString(kv.getRowArray())); + + "and the rowkey whose is being wrote is " + Bytes.toString(kv.getRowArray())); } } } public static class CellReducer - extends - Reducer { - protected void reduce( - CellWritableComparable row, - Iterable kvs, - Reducer.Context context) - throws java.io.IOException, InterruptedException { + extends Reducer { + protected void reduce(CellWritableComparable row, Iterable kvs, + Reducer.Context context) + throws java.io.IOException, InterruptedException { int index = 0; for (Cell kv : kvs) { context.write(new ImmutableBytesWritable(CellUtil.cloneRow(kv)), new MapReduceExtendedCell(kv)); - if (++index % 100 == 0) - context.setStatus("Wrote " + index + " KeyValues, " - + "and the rowkey whose is being wrote is " + Bytes.toString(kv.getRowArray())); + if (++index % 100 == 0) context.setStatus("Wrote " + index + " KeyValues, " + + "and the rowkey whose is being wrote is " + Bytes.toString(kv.getRowArray())); } } } @@ -287,14 +277,14 @@ public class Import extends Configured implements Tool { */ @Deprecated public static class KeyValueSortImporter - extends TableMapper { + extends TableMapper { private Map cfRenameMap; private Filter filter; private static final Logger LOG = LoggerFactory.getLogger(KeyValueSortImporter.class); /** - * @param row The current table row key. - * @param value The columns. + * @param row The current table row key. + * @param value The columns. * @param context The current context. * @throws IOException When something is broken with the data. */ @@ -305,8 +295,10 @@ public class Import extends Configured implements Tool { LOG.trace( "Considering the row." + Bytes.toString(row.get(), row.getOffset(), row.getLength())); } - if (filter == null || !filter.filterRowKey( - PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength()))) { + if ( + filter == null || !filter.filterRowKey( + PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength())) + ) { for (Cell kv : value.rawCells()) { kv = filterKv(filter, kv); // skip if we filtered it out @@ -329,16 +321,16 @@ public class Import extends Configured implements Tool { Configuration conf = context.getConfiguration(); TableName tableName = TableName.valueOf(context.getConfiguration().get(TABLE_NAME)); try (Connection conn = ConnectionFactory.createConnection(conf); - RegionLocator regionLocator = conn.getRegionLocator(tableName)) { + RegionLocator regionLocator = conn.getRegionLocator(tableName)) { byte[][] startKeys = regionLocator.getStartKeys(); if (startKeys.length != reduceNum) { throw new IOException("Region split after job initialization"); } KeyValueWritableComparable[] startKeyWraps = - new KeyValueWritableComparable[startKeys.length - 1]; + new KeyValueWritableComparable[startKeys.length - 1]; for (int i = 1; i < startKeys.length; ++i) { startKeyWraps[i - 1] = - new KeyValueWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i])); + new KeyValueWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i])); } KeyValueWritableComparablePartitioner.START_KEYS = startKeyWraps; } @@ -358,8 +350,8 @@ public class Import extends Configured implements Tool { private static final Logger LOG = LoggerFactory.getLogger(KeyValueImporter.class); /** - * @param row The current table row key. - * @param value The columns. + * @param row The current table row key. + * @param value The columns. * @param context The current context. * @throws IOException When something is broken with the data. */ @@ -370,8 +362,10 @@ public class Import extends Configured implements Tool { LOG.trace( "Considering the row." + Bytes.toString(row.get(), row.getOffset(), row.getLength())); } - if (filter == null || !filter.filterRowKey( - PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength()))) { + if ( + filter == null || !filter.filterRowKey( + PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength())) + ) { for (Cell kv : value.rawCells()) { kv = filterKv(filter, kv); // skip if we filtered it out @@ -392,30 +386,28 @@ public class Import extends Configured implements Tool { } } - public static class CellSortImporter - extends TableMapper { + public static class CellSortImporter extends TableMapper { private Map cfRenameMap; private Filter filter; private static final Logger LOG = LoggerFactory.getLogger(CellImporter.class); /** - * @param row The current table row key. - * @param value The columns. - * @param context The current context. + * @param row The current table row key. + * @param value The columns. + * @param context The current context. * @throws IOException When something is broken with the data. */ @Override - public void map(ImmutableBytesWritable row, Result value, - Context context) - throws IOException { + public void map(ImmutableBytesWritable row, Result value, Context context) throws IOException { try { if (LOG.isTraceEnabled()) { - LOG.trace("Considering the row." - + Bytes.toString(row.get(), row.getOffset(), row.getLength())); + LOG.trace( + "Considering the row." + Bytes.toString(row.get(), row.getOffset(), row.getLength())); } - if (filter == null - || !filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), - (short) row.getLength()))) { + if ( + filter == null || !filter.filterRowKey( + PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength())) + ) { for (Cell kv : value.rawCells()) { kv = filterKv(filter, kv); // skip if we filtered it out @@ -437,16 +429,15 @@ public class Import extends Configured implements Tool { Configuration conf = context.getConfiguration(); TableName tableName = TableName.valueOf(context.getConfiguration().get(TABLE_NAME)); try (Connection conn = ConnectionFactory.createConnection(conf); - RegionLocator regionLocator = conn.getRegionLocator(tableName)) { + RegionLocator regionLocator = conn.getRegionLocator(tableName)) { byte[][] startKeys = regionLocator.getStartKeys(); if (startKeys.length != reduceNum) { throw new IOException("Region split after job initialization"); } - CellWritableComparable[] startKeyWraps = - new CellWritableComparable[startKeys.length - 1]; + CellWritableComparable[] startKeyWraps = new CellWritableComparable[startKeys.length - 1]; for (int i = 1; i < startKeys.length; ++i) { startKeyWraps[i - 1] = - new CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i])); + new CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i])); } CellWritableComparablePartitioner.START_KEYS = startKeyWraps; } @@ -456,31 +447,30 @@ public class Import extends Configured implements Tool { /** * A mapper that just writes out KeyValues. */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS", - justification="Writables are going away and this has been this way forever") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "EQ_COMPARETO_USE_OBJECT_EQUALS", + justification = "Writables are going away and this has been this way forever") public static class CellImporter extends TableMapper { private Map cfRenameMap; private Filter filter; private static final Logger LOG = LoggerFactory.getLogger(CellImporter.class); /** - * @param row The current table row key. - * @param value The columns. - * @param context The current context. + * @param row The current table row key. + * @param value The columns. + * @param context The current context. * @throws IOException When something is broken with the data. */ @Override - public void map(ImmutableBytesWritable row, Result value, - Context context) - throws IOException { + public void map(ImmutableBytesWritable row, Result value, Context context) throws IOException { try { if (LOG.isTraceEnabled()) { - LOG.trace("Considering the row." - + Bytes.toString(row.get(), row.getOffset(), row.getLength())); + LOG.trace( + "Considering the row." + Bytes.toString(row.get(), row.getOffset(), row.getLength())); } - if (filter == null - || !filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), - (short) row.getLength()))) { + if ( + filter == null || !filter.filterRowKey( + PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength())) + ) { for (Cell kv : value.rawCells()) { kv = filterKv(filter, kv); // skip if we filtered it out @@ -510,15 +500,13 @@ public class Import extends Configured implements Tool { private Durability durability; /** - * @param row The current table row key. - * @param value The columns. - * @param context The current context. + * @param row The current table row key. + * @param value The columns. + * @param context The current context. * @throws IOException When something is broken with the data. */ @Override - public void map(ImmutableBytesWritable row, Result value, - Context context) - throws IOException { + public void map(ImmutableBytesWritable row, Result value, Context context) throws IOException { try { writeResult(row, value, context); } catch (InterruptedException e) { @@ -527,22 +515,23 @@ public class Import extends Configured implements Tool { } private void writeResult(ImmutableBytesWritable key, Result result, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Put put = null; Delete delete = null; if (LOG.isTraceEnabled()) { - LOG.trace("Considering the row." - + Bytes.toString(key.get(), key.getOffset(), key.getLength())); + LOG.trace( + "Considering the row." + Bytes.toString(key.get(), key.getOffset(), key.getLength())); } - if (filter == null - || !filter.filterRowKey(PrivateCellUtil.createFirstOnRow(key.get(), key.getOffset(), - (short) key.getLength()))) { + if ( + filter == null || !filter.filterRowKey( + PrivateCellUtil.createFirstOnRow(key.get(), key.getOffset(), (short) key.getLength())) + ) { processKV(key, result, context, put, delete); } } protected void processKV(ImmutableBytesWritable key, Result result, Context context, Put put, - Delete delete) throws IOException, InterruptedException { + Delete delete) throws IOException, InterruptedException { for (Cell kv : result.rawCells()) { kv = filterKv(filter, kv); // skip if we filter it out @@ -604,7 +593,7 @@ public class Import extends Configured implements Tool { cfRenameMap = createCfRenameMap(conf); filter = instantiateFilter(conf); String durabilityStr = conf.get(WAL_DURABILITY); - if(durabilityStr != null){ + if (durabilityStr != null) { durability = Durability.valueOf(durabilityStr.toUpperCase(Locale.ROOT)); LOG.info("setting WAL durability to " + durability); } else { @@ -697,8 +686,10 @@ public class Import extends Configured implements Tool { LOG.trace("Filter returned:" + code + " for the cell:" + c); } // if its not an accept type, then skip this kv - if (!(code.equals(Filter.ReturnCode.INCLUDE) || code - .equals(Filter.ReturnCode.INCLUDE_AND_NEXT_COL))) { + if ( + !(code.equals(Filter.ReturnCode.INCLUDE) + || code.equals(Filter.ReturnCode.INCLUDE_AND_NEXT_COL)) + ) { return null; } } @@ -707,26 +698,26 @@ public class Import extends Configured implements Tool { // helper: create a new KeyValue based on CF rename map private static Cell convertKv(Cell kv, Map cfRenameMap) { - if(cfRenameMap != null) { + if (cfRenameMap != null) { // If there's a rename mapping for this CF, create a new KeyValue byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv)); if (newCfName != null) { List tags = PrivateCellUtil.getTags(kv); kv = new KeyValue(kv.getRowArray(), // row buffer - kv.getRowOffset(), // row offset - kv.getRowLength(), // row length - newCfName, // CF buffer - 0, // CF offset - newCfName.length, // CF length - kv.getQualifierArray(), // qualifier buffer - kv.getQualifierOffset(), // qualifier offset - kv.getQualifierLength(), // qualifier length - kv.getTimestamp(), // timestamp - KeyValue.Type.codeToType(kv.getTypeByte()), // KV Type - kv.getValueArray(), // value buffer - kv.getValueOffset(), // value offset - kv.getValueLength(), // value length - tags.size() == 0 ? null: tags); + kv.getRowOffset(), // row offset + kv.getRowLength(), // row length + newCfName, // CF buffer + 0, // CF offset + newCfName.length, // CF length + kv.getQualifierArray(), // qualifier buffer + kv.getQualifierOffset(), // qualifier offset + kv.getQualifierLength(), // qualifier length + kv.getTimestamp(), // timestamp + KeyValue.Type.codeToType(kv.getTypeByte()), // KV Type + kv.getValueArray(), // value buffer + kv.getValueOffset(), // value offset + kv.getValueLength(), // value length + tags.size() == 0 ? null : tags); } } return kv; @@ -736,16 +727,16 @@ public class Import extends Configured implements Tool { private static Map createCfRenameMap(Configuration conf) { Map cfRenameMap = null; String allMappingsPropVal = conf.get(CF_RENAME_PROP); - if(allMappingsPropVal != null) { + if (allMappingsPropVal != null) { // The conf value format should be sourceCf1:destCf1,sourceCf2:destCf2,... String[] allMappings = allMappingsPropVal.split(","); - for (String mapping: allMappings) { - if(cfRenameMap == null) { - cfRenameMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (String mapping : allMappings) { + if (cfRenameMap == null) { + cfRenameMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); } - String [] srcAndDest = mapping.split(":"); - if(srcAndDest.length != 2) { - continue; + String[] srcAndDest = mapping.split(":"); + if (srcAndDest.length != 2) { + continue; } cfRenameMap.put(srcAndDest[0].getBytes(), srcAndDest[1].getBytes()); } @@ -754,32 +745,36 @@ public class Import extends Configured implements Tool { } /** - *

      Sets a configuration property with key {@link #CF_RENAME_PROP} in conf that tells - * the mapper how to rename column families. - * - *

      Alternately, instead of calling this function, you could set the configuration key + *

      + * Sets a configuration property with key {@link #CF_RENAME_PROP} in conf that tells the mapper + * how to rename column families. + *

      + * Alternately, instead of calling this function, you could set the configuration key * {@link #CF_RENAME_PROP} yourself. The value should look like - *

      srcCf1:destCf1,srcCf2:destCf2,....
      . This would have the same effect on - * the mapper behavior. * - * @param conf the Configuration in which the {@link #CF_RENAME_PROP} key will be - * set + *
      +   * srcCf1:destCf1,srcCf2:destCf2,....
      +   * 
      + * + * . This would have the same effect on the mapper behavior. + * @param conf the Configuration in which the {@link #CF_RENAME_PROP} key will be set * @param renameMap a mapping from source CF names to destination CF names */ - static public void configureCfRenaming(Configuration conf, - Map renameMap) { + static public void configureCfRenaming(Configuration conf, Map renameMap) { StringBuilder sb = new StringBuilder(); - for(Map.Entry entry: renameMap.entrySet()) { + for (Map.Entry entry : renameMap.entrySet()) { String sourceCf = entry.getKey(); String destCf = entry.getValue(); - if(sourceCf.contains(":") || sourceCf.contains(",") || - destCf.contains(":") || destCf.contains(",")) { - throw new IllegalArgumentException("Illegal character in CF names: " - + sourceCf + ", " + destCf); + if ( + sourceCf.contains(":") || sourceCf.contains(",") || destCf.contains(":") + || destCf.contains(",") + ) { + throw new IllegalArgumentException( + "Illegal character in CF names: " + sourceCf + ", " + destCf); } - if(sb.length() != 0) { + if (sb.length() != 0) { sb.append(","); } sb.append(sourceCf + ":" + destCf); @@ -789,12 +784,12 @@ public class Import extends Configured implements Tool { /** * Add a Filter to be instantiated on import - * @param conf Configuration to update (will be passed to the job) - * @param clazz {@link Filter} subclass to instantiate on the server. + * @param conf Configuration to update (will be passed to the job) + * @param clazz {@link Filter} subclass to instantiate on the server. * @param filterArgs List of arguments to pass to the filter on instantiation */ public static void addFilterAndArguments(Configuration conf, Class clazz, - List filterArgs) throws IOException { + List filterArgs) throws IOException { conf.set(Import.FILTER_CLASS_CONF_KEY, clazz.getName()); conf.setStrings(Import.FILTER_ARGS_CONF_KEY, filterArgs.toArray(new String[filterArgs.size()])); } @@ -806,8 +801,7 @@ public class Import extends Configured implements Tool { * @return The newly created job. * @throws IOException When setting up the job fails. */ - public static Job createSubmittableJob(Configuration conf, String[] args) - throws IOException { + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { TableName tableName = TableName.valueOf(args[0]); conf.set(TABLE_NAME, tableName.getNameAsString()); Path inputDir = new Path(args[1]); @@ -830,8 +824,8 @@ public class Import extends Configured implements Tool { if (hfileOutPath != null && conf.getBoolean(HAS_LARGE_RESULT, false)) { LOG.info("Use Large Result!!"); try (Connection conn = ConnectionFactory.createConnection(conf); - Table table = conn.getTable(tableName); - RegionLocator regionLocator = conn.getRegionLocator(tableName)) { + Table table = conn.getTable(tableName); + RegionLocator regionLocator = conn.getRegionLocator(tableName)) { HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator); job.setMapperClass(CellSortImporter.class); job.setReducerClass(CellReducer.class); @@ -840,23 +834,22 @@ public class Import extends Configured implements Tool { job.setMapOutputKeyClass(CellWritableComparable.class); job.setMapOutputValueClass(MapReduceExtendedCell.class); job.getConfiguration().setClass("mapreduce.job.output.key.comparator.class", - CellWritableComparable.CellWritableComparator.class, - RawComparator.class); + CellWritableComparable.CellWritableComparator.class, RawComparator.class); Path partitionsPath = - new Path(TotalOrderPartitioner.getPartitionFile(job.getConfiguration())); + new Path(TotalOrderPartitioner.getPartitionFile(job.getConfiguration())); FileSystem fs = FileSystem.get(job.getConfiguration()); fs.deleteOnExit(partitionsPath); job.setPartitionerClass(CellWritableComparablePartitioner.class); job.setNumReduceTasks(regionLocator.getStartKeys().length); TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), - org.apache.hbase.thirdparty.com.google.common.base.Preconditions.class); + org.apache.hbase.thirdparty.com.google.common.base.Preconditions.class); } } else if (hfileOutPath != null) { LOG.info("writing to hfiles for bulk load."); job.setMapperClass(CellImporter.class); try (Connection conn = ConnectionFactory.createConnection(conf); - Table table = conn.getTable(tableName); - RegionLocator regionLocator = conn.getRegionLocator(tableName)){ + Table table = conn.getTable(tableName); + RegionLocator regionLocator = conn.getRegionLocator(tableName)) { job.setReducerClass(CellSortReducer.class); Path outputDir = new Path(hfileOutPath); FileOutputFormat.setOutputPath(job, outputDir); @@ -864,11 +857,11 @@ public class Import extends Configured implements Tool { job.setMapOutputValueClass(MapReduceExtendedCell.class); HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator); TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), - org.apache.hbase.thirdparty.com.google.common.base.Preconditions.class); + org.apache.hbase.thirdparty.com.google.common.base.Preconditions.class); } } else { LOG.info("writing directly to table from Mapper."); - // No reducers. Just write straight to table. Call initTableReducerJob + // No reducers. Just write straight to table. Call initTableReducerJob // because it sets up the TableOutputFormat. job.setMapperClass(Importer.class); TableMapReduceUtil.initTableReducerJob(tableName.getNameAsString(), null, job); @@ -878,7 +871,7 @@ public class Import extends Configured implements Tool { } /* - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ private static void usage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { @@ -889,29 +882,28 @@ public class Import extends Configured implements Tool { System.err.println("HFiles of data to prepare for a bulk data load, pass the option:"); System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output"); System.err.println("If there is a large result that includes too much Cell " - + "whitch can occur OOME caused by the memery sort in reducer, pass the option:"); + + "whitch can occur OOME caused by the memery sort in reducer, pass the option:"); System.err.println(" -D" + HAS_LARGE_RESULT + "=true"); System.err - .println(" To apply a generic org.apache.hadoop.hbase.filter.Filter to the input, use"); + .println(" To apply a generic org.apache.hadoop.hbase.filter.Filter to the input, use"); System.err.println(" -D" + FILTER_CLASS_CONF_KEY + "="); System.err.println(" -D" + FILTER_ARGS_CONF_KEY + "="); + + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false\n" + + " -D" + WAL_DURABILITY + "="); } /** @@ -920,15 +912,17 @@ public class Import extends Configured implements Tool { * present in the Write Ahead Log to replay in scenarios of a crash. This method flushes all the * regions of the table in the scenarios of import data to hbase with {@link Durability#SKIP_WAL} */ - public static void flushRegionsIfNecessary(Configuration conf) throws IOException, - InterruptedException { + public static void flushRegionsIfNecessary(Configuration conf) + throws IOException, InterruptedException { String tableName = conf.get(TABLE_NAME); Admin hAdmin = null; Connection connection = null; String durability = conf.get(WAL_DURABILITY); // Need to flush if the data is written to hbase and skip wal is enabled. - if (conf.get(BULK_OUTPUT_CONF_KEY) == null && durability != null - && Durability.SKIP_WAL.name().equalsIgnoreCase(durability)) { + if ( + conf.get(BULK_OUTPUT_CONF_KEY) == null && durability != null + && Durability.SKIP_WAL.name().equalsIgnoreCase(durability) + ) { LOG.info("Flushing all data that skipped the WAL."); try { connection = ConnectionFactory.createConnection(conf); @@ -957,7 +951,7 @@ public class Import extends Configured implements Tool { } Job job = createSubmittableJob(getConf(), args); boolean isJobSuccessful = job.waitForCompletion(true); - if(isJobSuccessful){ + if (isJobSuccessful) { // Flush all the regions of the table flushRegionsIfNecessary(getConf()); } @@ -966,8 +960,8 @@ public class Import extends Configured implements Tool { if (outputRecords < inputRecords) { System.err.println("Warning, not all records were imported (maybe filtered out)."); if (outputRecords == 0) { - System.err.println("If the data was exported from HBase 0.94 "+ - "consider using -Dhbase.import.version=0.94."); + System.err.println("If the data was exported from HBase 0.94 " + + "consider using -Dhbase.import.version=0.94."); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java index ba0be03b8c0..3b94399cd88 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +25,6 @@ import java.util.ArrayList; import java.util.Base64; import java.util.HashSet; import java.util.Set; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -38,15 +36,14 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -60,19 +57,17 @@ import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; -import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.base.Splitter; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * Tool to import data from a TSV file. - * - * This tool is rather simplistic - it doesn't do any quoting or - * escaping, but is useful for many data loads. - * + * Tool to import data from a TSV file. This tool is rather simplistic - it doesn't do any quoting + * or escaping, but is useful for many data loads. * @see ImportTsv#usage(String) */ @InterfaceAudience.Public @@ -96,8 +91,8 @@ public class ImportTsv extends Configured implements Tool { public final static String COLUMNS_CONF_KEY = "importtsv.columns"; public final static String SEPARATOR_CONF_KEY = "importtsv.separator"; public final static String ATTRIBUTE_SEPERATOR_CONF_KEY = "attributes.seperator"; - //This config is used to propagate credentials from parent MR jobs which launch - //ImportTSV jobs. SEE IntegrationTestImportTsv. + // This config is used to propagate credentials from parent MR jobs which launch + // ImportTSV jobs. SEE IntegrationTestImportTsv. public final static String CREDENTIALS_LOCATION = "credentials_location"; final static String DEFAULT_SEPARATOR = "\t"; final static String DEFAULT_ATTRIBUTES_SEPERATOR = "=>"; @@ -106,8 +101,8 @@ public class ImportTsv extends Configured implements Tool { public final static String CREATE_TABLE_CONF_KEY = "create.table"; public final static String NO_STRICT_COL_FAMILY = "no.strict"; /** - * If table didn't exist and was created in dry-run mode, this flag is - * flipped to delete it when MR ends. + * If table didn't exist and was created in dry-run mode, this flag is flipped to delete it when + * MR ends. */ private static boolean DRY_RUN_TABLE_CREATED; @@ -152,9 +147,8 @@ public class ImportTsv extends Configured implements Tool { private int cellTTLColumnIndex = DEFAULT_CELL_TTL_COLUMN_INDEX; /** - * @param columnsSpecification the list of columns to parser out, comma separated. - * The row key should be the special token TsvParser.ROWKEY_COLUMN_SPEC - * @param separatorStr + * @param columnsSpecification the list of columns to parser out, comma separated. The row key + * should be the special token TsvParser.ROWKEY_COLUMN_SPEC n */ public TsvParser(String columnsSpecification, String separatorStr) { // Configure separator @@ -164,8 +158,8 @@ public class ImportTsv extends Configured implements Tool { separatorByte = separator[0]; // Configure columns - ArrayList columnStrings = Lists.newArrayList( - Splitter.on(',').trimResults().split(columnsSpecification)); + ArrayList columnStrings = + Lists.newArrayList(Splitter.on(',').trimResults().split(columnsSpecification)); maxColumnCount = columnStrings.size(); families = new byte[maxColumnCount][]; @@ -243,12 +237,12 @@ public class ImportTsv extends Configured implements Tool { public byte[] getFamily(int idx) { return families[idx]; } + public byte[] getQualifier(int idx) { return qualifiers[idx]; } - public ParsedLine parse(byte[] lineBytes, int length) - throws BadTsvLineException { + public ParsedLine parse(byte[] lineBytes, int length) throws BadTsvLineException { // Enumerate separator offsets ArrayList tabOffsets = new ArrayList<>(maxColumnCount); for (int i = 0; i < length; i++) { @@ -266,8 +260,7 @@ public class ImportTsv extends Configured implements Tool { throw new BadTsvLineException("Excessive columns"); } else if (tabOffsets.size() <= getRowKeyColumnIndex()) { throw new BadTsvLineException("No row key"); - } else if (hasTimestamp() - && tabOffsets.size() <= getTimestampKeyColumnIndex()) { + } else if (hasTimestamp() && tabOffsets.size() <= getTimestampKeyColumnIndex()) { throw new BadTsvLineException("No timestamp"); } else if (hasAttributes() && tabOffsets.size() <= getAttributesKeyColumnIndex()) { throw new BadTsvLineException("No attributes specified"); @@ -291,6 +284,7 @@ public class ImportTsv extends Configured implements Tool { public int getRowKeyOffset() { return getColumnOffset(rowKeyColumnIndex); } + public int getRowKeyLength() { return getColumnLength(rowKeyColumnIndex); } @@ -301,9 +295,8 @@ public class ImportTsv extends Configured implements Tool { return ts; } - String timeStampStr = Bytes.toString(lineBytes, - getColumnOffset(timestampKeyColumnIndex), - getColumnLength(timestampKeyColumnIndex)); + String timeStampStr = Bytes.toString(lineBytes, getColumnOffset(timestampKeyColumnIndex), + getColumnLength(timestampKeyColumnIndex)); try { return Long.parseLong(timeStampStr); } catch (NumberFormatException nfe) { @@ -317,7 +310,7 @@ public class ImportTsv extends Configured implements Tool { return null; } else { return Bytes.toString(lineBytes, getColumnOffset(attrKeyColumnIndex), - getColumnLength(attrKeyColumnIndex)); + getColumnLength(attrKeyColumnIndex)); } } @@ -367,7 +360,7 @@ public class ImportTsv extends Configured implements Tool { return null; } else { return Bytes.toString(lineBytes, getColumnOffset(cellVisibilityColumnIndex), - getColumnLength(cellVisibilityColumnIndex)); + getColumnLength(cellVisibilityColumnIndex)); } } @@ -392,22 +385,23 @@ public class ImportTsv extends Configured implements Tool { return 0; } else { return Bytes.toLong(lineBytes, getColumnOffset(cellTTLColumnIndex), - getColumnLength(cellTTLColumnIndex)); + getColumnLength(cellTTLColumnIndex)); } } public int getColumnOffset(int idx) { - if (idx > 0) - return tabOffsets.get(idx - 1) + 1; - else - return 0; + if (idx > 0) return tabOffsets.get(idx - 1) + 1; + else return 0; } + public int getColumnLength(int idx) { return tabOffsets.get(idx) - getColumnOffset(idx); } + public int getColumnCount() { return tabOffsets.size(); } + public byte[] getLineBytes() { return lineBytes; } @@ -417,18 +411,16 @@ public class ImportTsv extends Configured implements Tool { public BadTsvLineException(String err) { super(err); } + private static final long serialVersionUID = 1L; } /** - * Return starting position and length of row key from the specified line bytes. - * @param lineBytes - * @param length - * @return Pair of row key offset and length. - * @throws BadTsvLineException + * Return starting position and length of row key from the specified line bytes. nn * @return + * Pair of row key offset and length. n */ public Pair parseRowKey(byte[] lineBytes, int length) - throws BadTsvLineException { + throws BadTsvLineException { int rkColumnIndex = 0; int startPos = 0, endPos = 0; for (int i = 0; i <= length; i++) { @@ -444,9 +436,8 @@ public class ImportTsv extends Configured implements Tool { } } if (i == length) { - throw new BadTsvLineException( - "Row key does not exist as number of columns in the line" - + " are less than row key position."); + throw new BadTsvLineException("Row key does not exist as number of columns in the line" + + " are less than row key position."); } } return new Pair<>(startPos, endPos - startPos + 1); @@ -455,14 +446,13 @@ public class ImportTsv extends Configured implements Tool { /** * Sets up the actual job. - * - * @param conf The current configuration. - * @param args The command line parameters. + * @param conf The current configuration. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ protected static Job createSubmittableJob(Configuration conf, String[] args) - throws IOException, ClassNotFoundException { + throws IOException, ClassNotFoundException { Job job = null; boolean isDryRun = conf.getBoolean(DRY_RUN_CONF_KEY, false); try (Connection connection = ConnectionFactory.createConnection(conf)) { @@ -472,16 +462,17 @@ public class ImportTsv extends Configured implements Tool { String actualSeparator = conf.get(SEPARATOR_CONF_KEY); if (actualSeparator != null) { conf.set(SEPARATOR_CONF_KEY, - Bytes.toString(Base64.getEncoder().encode(actualSeparator.getBytes()))); + Bytes.toString(Base64.getEncoder().encode(actualSeparator.getBytes()))); } // See if a non-default Mapper was set String mapperClassName = conf.get(MAPPER_CONF_KEY); - Class mapperClass = mapperClassName != null? Class.forName(mapperClassName): DEFAULT_MAPPER; + Class mapperClass = + mapperClassName != null ? Class.forName(mapperClassName) : DEFAULT_MAPPER; TableName tableName = TableName.valueOf(args[0]); Path inputDir = new Path(args[1]); - String jobName = conf.get(JOB_NAME_CONF_KEY,NAME + "_" + tableName.getNameAsString()); + String jobName = conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName.getNameAsString()); job = Job.getInstance(conf, jobName); job.setJarByClass(mapperClass); FileInputFormat.setInputPaths(job, inputDir); @@ -490,7 +481,7 @@ public class ImportTsv extends Configured implements Tool { job.setMapOutputKeyClass(ImmutableBytesWritable.class); String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); String[] columns = conf.getStrings(COLUMNS_CONF_KEY); - if(StringUtils.isNotEmpty(conf.get(CREDENTIALS_LOCATION))) { + if (StringUtils.isNotEmpty(conf.get(CREDENTIALS_LOCATION))) { String fileLoc = conf.get(CREDENTIALS_LOCATION); Credentials cred = Credentials.readTokenStorageFile(new File(fileLoc), conf); job.getCredentials().addAll(cred); @@ -510,37 +501,34 @@ public class ImportTsv extends Configured implements Tool { } } } else { - String errorMsg = - format("Table '%s' does not exist and '%s' is set to no.", tableName, - CREATE_TABLE_CONF_KEY); + String errorMsg = format("Table '%s' does not exist and '%s' is set to no.", + tableName, CREATE_TABLE_CONF_KEY); LOG.error(errorMsg); throw new TableNotFoundException(errorMsg); } } try (Table table = connection.getTable(tableName); - RegionLocator regionLocator = connection.getRegionLocator(tableName)) { + RegionLocator regionLocator = connection.getRegionLocator(tableName)) { boolean noStrict = conf.getBoolean(NO_STRICT_COL_FAMILY, false); // if no.strict is false then check column family - if(!noStrict) { + if (!noStrict) { ArrayList unmatchedFamilies = new ArrayList<>(); Set cfSet = getColumnFamilies(columns); TableDescriptor tDesc = table.getDescriptor(); for (String cf : cfSet) { - if(!tDesc.hasColumnFamily(Bytes.toBytes(cf))) { + if (!tDesc.hasColumnFamily(Bytes.toBytes(cf))) { unmatchedFamilies.add(cf); } } - if(unmatchedFamilies.size() > 0) { + if (unmatchedFamilies.size() > 0) { ArrayList familyNames = new ArrayList<>(); for (ColumnFamilyDescriptor family : table.getDescriptor().getColumnFamilies()) { familyNames.add(family.getNameAsString()); } - String msg = - "Column Families " + unmatchedFamilies + " specified in " + COLUMNS_CONF_KEY - + " does not match with any of the table " + tableName - + " column families " + familyNames + ".\n" - + "To disable column family check, use -D" + NO_STRICT_COL_FAMILY - + "=true.\n"; + String msg = "Column Families " + unmatchedFamilies + " specified in " + + COLUMNS_CONF_KEY + " does not match with any of the table " + tableName + + " column families " + familyNames + ".\n" + + "To disable column family check, use -D" + NO_STRICT_COL_FAMILY + "=true.\n"; usage(msg); System.exit(-1); } @@ -557,7 +545,7 @@ public class ImportTsv extends Configured implements Tool { Path outputDir = new Path(hfileOutPath); FileOutputFormat.setOutputPath(job, outputDir); HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), - regionLocator); + regionLocator); } } } else { @@ -568,9 +556,8 @@ public class ImportTsv extends Configured implements Tool { } if (mapperClass.equals(TsvImporterTextMapper.class)) { usage(TsvImporterTextMapper.class.toString() - + " should not be used for non bulkloading case. use " - + TsvImporterMapper.class.toString() - + " or custom mapper whose value type is Put."); + + " should not be used for non bulkloading case. use " + + TsvImporterMapper.class.toString() + " or custom mapper whose value type is Put."); System.exit(-1); } if (!isDryRun) { @@ -583,35 +570,37 @@ public class ImportTsv extends Configured implements Tool { if (isDryRun) { job.setOutputFormatClass(NullOutputFormat.class); job.getConfiguration().setStrings("io.serializations", - job.getConfiguration().get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + job.getConfiguration().get("io.serializations"), MutationSerialization.class.getName(), + ResultSerialization.class.getName(), CellSerialization.class.getName()); } TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), - org.apache.hbase.thirdparty.com.google.common.base.Function.class /* Guava used by TsvParser */); + org.apache.hbase.thirdparty.com.google.common.base.Function.class /* + * Guava used by + * TsvParser + */); } } return job; } private static void createTable(Admin admin, TableName tableName, String[] columns) - throws IOException { + throws IOException { HTableDescriptor htd = new HTableDescriptor(tableName); Set cfSet = getColumnFamilies(columns); for (String cf : cfSet) { HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(cf)); htd.addFamily(hcd); } - LOG.warn(format("Creating table '%s' with '%s' columns and default descriptors.", - tableName, cfSet)); + LOG.warn( + format("Creating table '%s' with '%s' columns and default descriptors.", tableName, cfSet)); admin.createTable(htd); } private static void deleteTable(Configuration conf, String[] args) { TableName tableName = TableName.valueOf(args[0]); try (Connection connection = ConnectionFactory.createConnection(conf); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { try { admin.disableTable(tableName); } catch (TableNotEnabledException e) { @@ -619,8 +608,7 @@ public class ImportTsv extends Configured implements Tool { } admin.deleteTable(tableName); } catch (IOException e) { - LOG.error(format("***Dry run: Failed to delete table '%s'.***%n%s", tableName, - e.toString())); + LOG.error(format("***Dry run: Failed to delete table '%s'.***%n%s", tableName, e.toString())); return; } LOG.info(format("Dry run: Deleted table '%s'.", tableName)); @@ -629,12 +617,13 @@ public class ImportTsv extends Configured implements Tool { private static Set getColumnFamilies(String[] columns) { Set cfSet = new HashSet<>(); for (String aColumn : columns) { - if (TsvParser.ROWKEY_COLUMN_SPEC.equals(aColumn) + if ( + TsvParser.ROWKEY_COLUMN_SPEC.equals(aColumn) || TsvParser.TIMESTAMPKEY_COLUMN_SPEC.equals(aColumn) || TsvParser.CELL_VISIBILITY_COLUMN_SPEC.equals(aColumn) || TsvParser.CELL_TTL_COLUMN_SPEC.equals(aColumn) - || TsvParser.ATTRIBUTES_COLUMN_SPEC.equals(aColumn)) - continue; + || TsvParser.ATTRIBUTES_COLUMN_SPEC.equals(aColumn) + ) continue; // we are only concerned with the first one (in case this is a cf:cq) cfSet.add(aColumn.split(":", 2)[0]); } @@ -642,64 +631,56 @@ public class ImportTsv extends Configured implements Tool { } /* - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ private static void usage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } - String usage = - "Usage: " + NAME + " -D"+ COLUMNS_CONF_KEY + "=a,b,c \n" + - "\n" + - "Imports the given input directory of TSV data into the specified table.\n" + - "\n" + - "The column names of the TSV data must be specified using the -D" + COLUMNS_CONF_KEY + "\n" + - "option. This option takes the form of comma-separated column names, where each\n" + - "column name is either a simple column family, or a columnfamily:qualifier. The special\n" + - "column name " + TsvParser.ROWKEY_COLUMN_SPEC + " is used to designate that this column should be used\n" + - "as the row key for each imported record. You must specify exactly one column\n" + - "to be the row key, and you must specify a column name for every column that exists in the\n" + - "input data. Another special column" + TsvParser.TIMESTAMPKEY_COLUMN_SPEC + - " designates that this column should be\n" + - "used as timestamp for each record. Unlike " + TsvParser.ROWKEY_COLUMN_SPEC + ", " + - TsvParser.TIMESTAMPKEY_COLUMN_SPEC + " is optional." + "\n" + - "You must specify at most one column as timestamp key for each imported record.\n" + - "Record with invalid timestamps (blank, non-numeric) will be treated as bad record.\n" + - "Note: if you use this option, then '" + TIMESTAMP_CONF_KEY + "' option will be ignored.\n" + - "\n" + - "Other special columns that can be specified are " + TsvParser.CELL_TTL_COLUMN_SPEC + - " and " + TsvParser.CELL_VISIBILITY_COLUMN_SPEC + ".\n" + - TsvParser.CELL_TTL_COLUMN_SPEC + " designates that this column will be used " + - "as a Cell's Time To Live (TTL) attribute.\n" + - TsvParser.CELL_VISIBILITY_COLUMN_SPEC + " designates that this column contains the " + - "visibility label expression.\n" + - "\n" + - TsvParser.ATTRIBUTES_COLUMN_SPEC+" can be used to specify Operation Attributes per record.\n"+ - " Should be specified as key=>value where "+TsvParser.DEFAULT_ATTRIBUTES_COLUMN_INDEX+ " is used \n"+ - " as the seperator. Note that more than one OperationAttributes can be specified.\n"+ - "By default importtsv will load data directly into HBase. To instead generate\n" + - "HFiles of data to prepare for a bulk data load, pass the option:\n" + - " -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output\n" + - " Note: if you do not use this option, then the target table must already exist in HBase\n" + - "\n" + - "Other options that may be specified with -D include:\n" + - " -D" + DRY_RUN_CONF_KEY + "=true - Dry run mode. Data is not actually populated into" + - " table. If table does not exist, it is created but deleted in the end.\n" + - " -D" + SKIP_LINES_CONF_KEY + "=false - fail if encountering an invalid line\n" + - " -D" + LOG_BAD_LINES_CONF_KEY + "=true - logs invalid lines to stderr\n" + - " -D" + SKIP_EMPTY_COLUMNS + "=false - If true then skip empty columns in bulk import\n" + - " '-D" + SEPARATOR_CONF_KEY + "=|' - eg separate on pipes instead of tabs\n" + - " -D" + TIMESTAMP_CONF_KEY + "=currentTimeAsLong - use the specified timestamp for the import\n" + - " -D" + MAPPER_CONF_KEY + "=my.Mapper - A user-defined Mapper to use instead of " + - DEFAULT_MAPPER.getName() + "\n" + - " -D" + JOB_NAME_CONF_KEY + "=jobName - use the specified mapreduce job name for the import\n" + - " -D" + CREATE_TABLE_CONF_KEY + "=no - can be used to avoid creation of table by this tool\n" + - " Note: if you set this to 'no', then the target table must already exist in HBase\n" + - " -D" + NO_STRICT_COL_FAMILY + "=true - ignore column family check in hbase table. " + - "Default is false\n\n" + - "For performance consider the following options:\n" + - " -Dmapreduce.map.speculative=false\n" + - " -Dmapreduce.reduce.speculative=false"; + String usage = "Usage: " + NAME + " -D" + COLUMNS_CONF_KEY + "=a,b,c \n" + + "\n" + "Imports the given input directory of TSV data into the specified table.\n" + "\n" + + "The column names of the TSV data must be specified using the -D" + COLUMNS_CONF_KEY + "\n" + + "option. This option takes the form of comma-separated column names, where each\n" + + "column name is either a simple column family, or a columnfamily:qualifier. The special\n" + + "column name " + TsvParser.ROWKEY_COLUMN_SPEC + + " is used to designate that this column should be used\n" + + "as the row key for each imported record. You must specify exactly one column\n" + + "to be the row key, and you must specify a column name for every column that exists in the\n" + + "input data. Another special column" + TsvParser.TIMESTAMPKEY_COLUMN_SPEC + + " designates that this column should be\n" + "used as timestamp for each record. Unlike " + + TsvParser.ROWKEY_COLUMN_SPEC + ", " + TsvParser.TIMESTAMPKEY_COLUMN_SPEC + " is optional." + + "\n" + "You must specify at most one column as timestamp key for each imported record.\n" + + "Record with invalid timestamps (blank, non-numeric) will be treated as bad record.\n" + + "Note: if you use this option, then '" + TIMESTAMP_CONF_KEY + "' option will be ignored.\n" + + "\n" + "Other special columns that can be specified are " + TsvParser.CELL_TTL_COLUMN_SPEC + + " and " + TsvParser.CELL_VISIBILITY_COLUMN_SPEC + ".\n" + TsvParser.CELL_TTL_COLUMN_SPEC + + " designates that this column will be used " + "as a Cell's Time To Live (TTL) attribute.\n" + + TsvParser.CELL_VISIBILITY_COLUMN_SPEC + " designates that this column contains the " + + "visibility label expression.\n" + "\n" + TsvParser.ATTRIBUTES_COLUMN_SPEC + + " can be used to specify Operation Attributes per record.\n" + + " Should be specified as key=>value where " + TsvParser.DEFAULT_ATTRIBUTES_COLUMN_INDEX + + " is used \n" + + " as the seperator. Note that more than one OperationAttributes can be specified.\n" + + "By default importtsv will load data directly into HBase. To instead generate\n" + + "HFiles of data to prepare for a bulk data load, pass the option:\n" + " -D" + + BULK_OUTPUT_CONF_KEY + "=/path/for/output\n" + + " Note: if you do not use this option, then the target table must already exist in HBase\n" + + "\n" + "Other options that may be specified with -D include:\n" + " -D" + DRY_RUN_CONF_KEY + + "=true - Dry run mode. Data is not actually populated into" + + " table. If table does not exist, it is created but deleted in the end.\n" + " -D" + + SKIP_LINES_CONF_KEY + "=false - fail if encountering an invalid line\n" + " -D" + + LOG_BAD_LINES_CONF_KEY + "=true - logs invalid lines to stderr\n" + " -D" + + SKIP_EMPTY_COLUMNS + "=false - If true then skip empty columns in bulk import\n" + " '-D" + + SEPARATOR_CONF_KEY + "=|' - eg separate on pipes instead of tabs\n" + " -D" + + TIMESTAMP_CONF_KEY + "=currentTimeAsLong - use the specified timestamp for the import\n" + + " -D" + MAPPER_CONF_KEY + "=my.Mapper - A user-defined Mapper to use instead of " + + DEFAULT_MAPPER.getName() + "\n" + " -D" + JOB_NAME_CONF_KEY + + "=jobName - use the specified mapreduce job name for the import\n" + " -D" + + CREATE_TABLE_CONF_KEY + "=no - can be used to avoid creation of table by this tool\n" + + " Note: if you set this to 'no', then the target table must already exist in HBase\n" + + " -D" + NO_STRICT_COL_FAMILY + "=true - ignore column family check in hbase table. " + + "Default is false\n\n" + "For performance consider the following options:\n" + + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false"; System.err.println(usage); } @@ -719,8 +700,7 @@ public class ImportTsv extends Configured implements Tool { // Make sure columns are specified String[] columns = getConf().getStrings(COLUMNS_CONF_KEY); if (columns == null) { - usage("No columns specified. Please specify with -D" + - COLUMNS_CONF_KEY+"=..."); + usage("No columns specified. Please specify with -D" + COLUMNS_CONF_KEY + "=..."); return -1; } @@ -737,30 +717,27 @@ public class ImportTsv extends Configured implements Tool { // Make sure we have at most one column as the timestamp key int tskeysFound = 0; for (String col : columns) { - if (col.equals(TsvParser.TIMESTAMPKEY_COLUMN_SPEC)) - tskeysFound++; + if (col.equals(TsvParser.TIMESTAMPKEY_COLUMN_SPEC)) tskeysFound++; } if (tskeysFound > 1) { - usage("Must specify at most one column as " - + TsvParser.TIMESTAMPKEY_COLUMN_SPEC); + usage("Must specify at most one column as " + TsvParser.TIMESTAMPKEY_COLUMN_SPEC); return -1; } int attrKeysFound = 0; for (String col : columns) { - if (col.equals(TsvParser.ATTRIBUTES_COLUMN_SPEC)) - attrKeysFound++; + if (col.equals(TsvParser.ATTRIBUTES_COLUMN_SPEC)) attrKeysFound++; } if (attrKeysFound > 1) { - usage("Must specify at most one column as " - + TsvParser.ATTRIBUTES_COLUMN_SPEC); + usage("Must specify at most one column as " + TsvParser.ATTRIBUTES_COLUMN_SPEC); return -1; } // Make sure one or more columns are specified excluding rowkey and // timestamp key if (columns.length - (rowkeysFound + tskeysFound + attrKeysFound) < 1) { - usage("One or more columns in addition to the row key and timestamp(optional) are required"); + usage( + "One or more columns in addition to the row key and timestamp(optional) are required"); return -1; } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java index 0127b51ab3f..76c64e79780 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.mapreduce; @@ -38,18 +37,16 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * Finds the Jar for a class. If the class is in a directory in the - * classpath, it creates a Jar on the fly with the contents of the directory - * and returns the path to that Jar. If a Jar is created, it is created in - * the system temporary directory. - * - * This file was forked from hadoop/common/branches/branch-2@1377176. + * Finds the Jar for a class. If the class is in a directory in the classpath, it creates a Jar on + * the fly with the contents of the directory and returns the path to that Jar. If a Jar is created, + * it is created in the system temporary directory. This file was forked from + * hadoop/common/branches/branch-2@1377176. */ @InterfaceAudience.Private public final class JarFinder { - private static void copyToZipStream(File file, ZipEntry entry, - ZipOutputStream zos) throws IOException { + private static void copyToZipStream(File file, ZipEntry entry, ZipOutputStream zos) + throws IOException { InputStream is = new FileInputStream(file); try { zos.putNextEntry(entry); @@ -68,8 +65,7 @@ public final class JarFinder { } } - public static void jarDir(File dir, String relativePath, ZipOutputStream zos) - throws IOException { + public static void jarDir(File dir, String relativePath, ZipOutputStream zos) throws IOException { Preconditions.checkNotNull(relativePath, "relativePath"); Preconditions.checkNotNull(zos, "zos"); @@ -89,8 +85,8 @@ public final class JarFinder { zos.close(); } - private static void zipDir(File dir, String relativePath, ZipOutputStream zos, - boolean start) throws IOException { + private static void zipDir(File dir, String relativePath, ZipOutputStream zos, boolean start) + throws IOException { String[] dirList = dir.list(); if (dirList == null) { return; @@ -107,8 +103,7 @@ public final class JarFinder { String filePath = f.getPath(); File file = new File(filePath); zipDir(file, relativePath + f.getName() + "/", zos, false); - } - else { + } else { String path = relativePath + f.getName(); if (!path.equals(JarFile.MANIFEST_NAME)) { ZipEntry anEntry = new ZipEntry(path); @@ -125,22 +120,18 @@ public final class JarFinder { File jarDir = jarFile.getParentFile(); if (!jarDir.exists()) { if (!jarDir.mkdirs()) { - throw new IOException(MessageFormat.format("could not create dir [{0}]", - jarDir)); + throw new IOException(MessageFormat.format("could not create dir [{0}]", jarDir)); } } try (FileOutputStream fos = new FileOutputStream(jarFile); - JarOutputStream jos = new JarOutputStream(fos)) { + JarOutputStream jos = new JarOutputStream(fos)) { jarDir(dir, "", jos); } } /** - * Returns the full path to the Jar containing the class. It always return a - * JAR. - * + * Returns the full path to the Jar containing the class. It always return a JAR. * @param klass class. - * * @return path to the Jar containing the class. */ public static String getJar(Class klass) { @@ -149,8 +140,7 @@ public final class JarFinder { if (loader != null) { String class_file = klass.getName().replaceAll("\\.", "/") + ".class"; try { - for (Enumeration itr = loader.getResources(class_file); - itr.hasMoreElements(); ) { + for (Enumeration itr = loader.getResources(class_file); itr.hasMoreElements();) { URL url = (URL) itr.nextElement(); String path = url.getPath(); if (path.startsWith("file:")) { @@ -160,8 +150,7 @@ public final class JarFinder { if ("jar".equals(url.getProtocol())) { path = URLDecoder.decode(path, "UTF-8"); return path.replaceAll("!.*$", ""); - } - else if ("file".equals(url.getProtocol())) { + } else if ("file".equals(url.getProtocol())) { String klassName = klass.getName(); klassName = klassName.replace(".", "/") + ".class"; path = path.substring(0, path.length() - klassName.length()); @@ -178,13 +167,13 @@ public final class JarFinder { return tempJar.getAbsolutePath(); } } - } - catch (IOException e) { + } catch (IOException e) { throw new RuntimeException(e); } } return null; } - private JarFinder() {} + private JarFinder() { + } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java index 3207712f72b..ad01f834112 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,18 +22,16 @@ import java.io.DataOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; +import org.apache.yetus.audience.InterfaceAudience; + /** - * Use to specify the type of serialization for the mappers - * and reducers - * @deprecated Use {@link CellSerialization}. Will be - * removed from 3.0 onwards + * Use to specify the type of serialization for the mappers and reducers + * @deprecated Use {@link CellSerialization}. Will be removed from 3.0 onwards */ @Deprecated @InterfaceAudience.Public @@ -63,7 +61,7 @@ public class KeyValueSerialization implements Serialization { @Override public KeyValue deserialize(KeyValue ignore) throws IOException { - // I can't overwrite the passed in KV, not from a proto kv, not just yet. TODO + // I can't overwrite the passed in KV, not from a proto kv, not just yet. TODO return KeyValueUtil.create(this.dis); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java index 2fb1e7aad1c..efe62d60d45 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,31 +18,27 @@ package org.apache.hadoop.hbase.mapreduce; import java.util.TreeSet; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapreduce.Reducer; +import org.apache.yetus.audience.InterfaceAudience; /** - * Emits sorted KeyValues. - * Reads in all KeyValues from passed Iterator, sorts them, then emits - * KeyValues in sorted order. If lots of columns per row, it will use lots of - * memory sorting. + * Emits sorted KeyValues. Reads in all KeyValues from passed Iterator, sorts them, then emits + * KeyValues in sorted order. If lots of columns per row, it will use lots of memory sorting. * @see HFileOutputFormat2 - * @deprecated Use {@link CellSortReducer}. Will be removed from - * 3.0 onwards + * @deprecated Use {@link CellSortReducer}. Will be removed from 3.0 onwards */ @Deprecated @InterfaceAudience.Public public class KeyValueSortReducer - extends Reducer { + extends Reducer { protected void reduce(ImmutableBytesWritable row, Iterable kvs, - Reducer.Context context) - throws java.io.IOException, InterruptedException { + Reducer.Context context) + throws java.io.IOException, InterruptedException { TreeSet map = new TreeSet<>(CellComparatorImpl.COMPARATOR); - for (KeyValue kv: kvs) { + for (KeyValue kv : kvs) { try { map.add(kv.clone()); } catch (CloneNotSupportedException e) { @@ -52,7 +47,7 @@ public class KeyValueSortReducer } context.setStatus("Read " + map.getClass()); int index = 0; - for (KeyValue kv: map) { + for (KeyValue kv : map) { context.write(row, kv); if (++index % 100 == 0) context.setStatus("Wrote " + index); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java index 6410bf8726c..fb42e332833 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,42 +6,33 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

      - * http://www.apache.org/licenses/LICENSE-2.0 - *

      + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.mapreduce.Job; - import java.io.IOException; import java.nio.charset.Charset; import java.util.List; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.mapreduce.Job; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Create 3 level tree directory, first level is using table name as parent - * directory and then use family name as child directory, and all related HFiles - * for one family are under child directory - * -tableName1 - * -columnFamilyName1 - * -columnFamilyName2 - * -HFiles - * -tableName2 - * -columnFamilyName1 - * -HFiles - * -columnFamilyName2 + * Create 3 level tree directory, first level is using table name as parent directory and then use + * family name as child directory, and all related HFiles for one family are under child directory + * -tableName1 -columnFamilyName1 -columnFamilyName2 -HFiles -tableName2 -columnFamilyName1 -HFiles + * -columnFamilyName2 */ @InterfaceAudience.Public public class MultiTableHFileOutputFormat extends HFileOutputFormat2 { @@ -50,13 +41,11 @@ public class MultiTableHFileOutputFormat extends HFileOutputFormat2 { /** * Creates a composite key to use as a mapper output key when using * MultiTableHFileOutputFormat.configureIncrementaLoad to set up bulk ingest job - * * @param tableName Name of the Table - Eg: TableName.getNameAsString() * @param suffix Usually represents a rowkey when creating a mapper key or column family - * @return byte[] representation of composite key + * @return byte[] representation of composite key */ - public static byte[] createCompositeKey(byte[] tableName, - byte[] suffix) { + public static byte[] createCompositeKey(byte[] tableName, byte[] suffix) { return combineTableNameSuffix(tableName, suffix); } @@ -64,8 +53,7 @@ public class MultiTableHFileOutputFormat extends HFileOutputFormat2 { * Alternate api which accepts an ImmutableBytesWritable for the suffix * @see MultiTableHFileOutputFormat#createCompositeKey(byte[], byte[]) */ - public static byte[] createCompositeKey(byte[] tableName, - ImmutableBytesWritable suffix) { + public static byte[] createCompositeKey(byte[] tableName, ImmutableBytesWritable suffix) { return combineTableNameSuffix(tableName, suffix.get()); } @@ -74,26 +62,22 @@ public class MultiTableHFileOutputFormat extends HFileOutputFormat2 { * suffix * @see MultiTableHFileOutputFormat#createCompositeKey(byte[], byte[]) */ - public static byte[] createCompositeKey(String tableName, - ImmutableBytesWritable suffix) { + public static byte[] createCompositeKey(String tableName, ImmutableBytesWritable suffix) { return combineTableNameSuffix(tableName.getBytes(Charset.forName("UTF-8")), suffix.get()); } /** * Analogous to - * {@link HFileOutputFormat2#configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}, - * this function will configure the requisite number of reducers to write HFiles for multple - * tables simultaneously - * + * {@link HFileOutputFormat2#configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}, this + * function will configure the requisite number of reducers to write HFiles for multple tables + * simultaneously * @param job See {@link org.apache.hadoop.mapreduce.Job} - * @param multiTableDescriptors Table descriptor and region locator pairs - * @throws IOException + * @param multiTableDescriptors Table descriptor and region locator pairs n */ - public static void configureIncrementalLoad(Job job, List - multiTableDescriptors) - throws IOException { + public static void configureIncrementalLoad(Job job, List multiTableDescriptors) + throws IOException { MultiTableHFileOutputFormat.configureIncrementalLoad(job, multiTableDescriptors, - MultiTableHFileOutputFormat.class); + MultiTableHFileOutputFormat.class); } final private static int validateCompositeKey(byte[] keyBytes) { @@ -102,8 +86,8 @@ public class MultiTableHFileOutputFormat extends HFileOutputFormat2 { // Either the separator was not found or a tablename wasn't present or a key wasn't present if (separatorIdx == -1) { - throw new IllegalArgumentException("Invalid format for composite key [" + Bytes - .toStringBinary(keyBytes) + "]. Cannot extract tablename and suffix from key"); + throw new IllegalArgumentException("Invalid format for composite key [" + + Bytes.toStringBinary(keyBytes) + "]. Cannot extract tablename and suffix from key"); } return separatorIdx; } @@ -115,6 +99,6 @@ public class MultiTableHFileOutputFormat extends HFileOutputFormat2 { protected static byte[] getSuffix(byte[] keyBytes) { int separatorIdx = validateCompositeKey(keyBytes); - return Bytes.copy(keyBytes, separatorIdx+1, keyBytes.length - separatorIdx - 1); + return Bytes.copy(keyBytes, separatorIdx + 1, keyBytes.length - separatorIdx - 1); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java index d0cffb32531..f657b62e4b5 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,16 +20,13 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; import java.util.ArrayList; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Scan; +import org.apache.yetus.audience.InterfaceAudience; /** - * Convert HBase tabular data from multiple scanners into a format that - * is consumable by Map/Reduce. - * + * Convert HBase tabular data from multiple scanners into a format that is consumable by Map/Reduce. *

      * Usage example *

      @@ -49,13 +46,12 @@ import org.apache.hadoop.hbase.client.Scan; * scan1.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, table2); * scans.add(scan2); * - * TableMapReduceUtil.initTableMapperJob(scans, TableMapper.class, Text.class, - * IntWritable.class, job); + * TableMapReduceUtil.initTableMapperJob(scans, TableMapper.class, Text.class, IntWritable.class, + * job); * */ @InterfaceAudience.Public -public class MultiTableInputFormat extends MultiTableInputFormatBase implements - Configurable { +public class MultiTableInputFormat extends MultiTableInputFormatBase implements Configurable { /** Job parameter that specifies the scan list. */ public static final String SCANS = "hbase.mapreduce.scans"; @@ -65,7 +61,6 @@ public class MultiTableInputFormat extends MultiTableInputFormatBase implements /** * Returns the current configuration. - * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -75,20 +70,17 @@ public class MultiTableInputFormat extends MultiTableInputFormatBase implements } /** - * Sets the configuration. This is used to set the details for the tables to - * be scanned. - * + * Sets the configuration. This is used to set the details for the tables to be scanned. * @param configuration The configuration to set. - * @see org.apache.hadoop.conf.Configurable#setConf( - * org.apache.hadoop.conf.Configuration) + * @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration) */ @Override public void setConf(Configuration configuration) { this.conf = configuration; String[] rawScans = conf.getStrings(SCANS); if (rawScans.length <= 0) { - throw new IllegalArgumentException("There must be at least 1 scan configuration set to : " - + SCANS); + throw new IllegalArgumentException( + "There must be at least 1 scan configuration set to : " + SCANS); } List scans = new ArrayList<>(); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java index ec789f2bdcd..91aa896349b 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +20,10 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; import java.text.MessageFormat; import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.util.Map; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; @@ -42,18 +41,17 @@ import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; /** - * A base for {@link MultiTableInputFormat}s. Receives a list of - * {@link Scan} instances that define the input tables and - * filters etc. Subclasses may use other TableRecordReader implementations. + * A base for {@link MultiTableInputFormat}s. Receives a list of {@link Scan} instances that define + * the input tables and filters etc. Subclasses may use other TableRecordReader implementations. */ @InterfaceAudience.Public -public abstract class MultiTableInputFormatBase extends - InputFormat { +public abstract class MultiTableInputFormatBase + extends InputFormat { private static final Logger LOG = LoggerFactory.getLogger(MultiTableInputFormatBase.class); @@ -64,27 +62,24 @@ public abstract class MultiTableInputFormatBase extends private TableRecordReader tableRecordReader = null; /** - * Builds a TableRecordReader. If no TableRecordReader was provided, uses the - * default. - * - * @param split The split to work with. + * Builds a TableRecordReader. If no TableRecordReader was provided, uses the default. + * @param split The split to work with. * @param context The current context. * @return The newly created record reader. - * @throws IOException When creating the reader fails. + * @throws IOException When creating the reader fails. * @throws InterruptedException when record reader initialization fails * @see InputFormat#createRecordReader(InputSplit, TaskAttemptContext) */ @Override - public RecordReader createRecordReader( - InputSplit split, TaskAttemptContext context) - throws IOException, InterruptedException { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException, InterruptedException { TableSplit tSplit = (TableSplit) split; LOG.info(MessageFormat.format("Input split length: {0} bytes.", tSplit.getLength())); if (tSplit.getTable() == null) { throw new IOException("Cannot create a record reader because of a" - + " previous error. Please look at the previous logs lines from" - + " the task's full log for more details."); + + " previous error. Please look at the previous logs lines from" + + " the task's full log for more details."); } final Connection connection = ConnectionFactory.createConnection(context.getConfiguration()); Table table = connection.getTable(tSplit.getTable()); @@ -125,7 +120,7 @@ public abstract class MultiTableInputFormatBase extends @Override public void initialize(InputSplit inputsplit, TaskAttemptContext context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { trr.initialize(inputsplit, context); } @@ -144,9 +139,8 @@ public abstract class MultiTableInputFormatBase extends } /** - * Calculates the splits that will serve as input for the map tasks. The - * number of splits matches the number of regions in a table. - * + * Calculates the splits that will serve as input for the map tasks. The number of splits matches + * the number of regions in a table. * @param context The current job context. * @return The list of input splits. * @throws IOException When creating the list of splits fails. @@ -161,8 +155,7 @@ public abstract class MultiTableInputFormatBase extends Map> tableMaps = new HashMap<>(); for (Scan scan : scans) { byte[] tableNameBytes = scan.getAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME); - if (tableNameBytes == null) - throw new IOException("A scan object did not have a table name"); + if (tableNameBytes == null) throw new IOException("A scan object did not have a table name"); TableName tableName = TableName.valueOf(tableNameBytes); @@ -183,14 +176,14 @@ public abstract class MultiTableInputFormatBase extends TableName tableName = entry.getKey(); List scanList = entry.getValue(); try (Table table = conn.getTable(tableName); - RegionLocator regionLocator = conn.getRegionLocator(tableName)) { - RegionSizeCalculator sizeCalculator = new RegionSizeCalculator( - regionLocator, conn.getAdmin()); + RegionLocator regionLocator = conn.getRegionLocator(tableName)) { + RegionSizeCalculator sizeCalculator = + new RegionSizeCalculator(regionLocator, conn.getAdmin()); Pair keys = regionLocator.getStartEndKeys(); for (Scan scan : scanList) { if (keys == null || keys.getFirst() == null || keys.getFirst().length == 0) { - throw new IOException("Expecting at least one region for table : " - + tableName.getNameAsString()); + throw new IOException( + "Expecting at least one region for table : " + tableName.getNameAsString()); } int count = 0; @@ -202,29 +195,28 @@ public abstract class MultiTableInputFormatBase extends continue; } - if ((startRow.length == 0 || keys.getSecond()[i].length == 0 || - Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) && - (stopRow.length == 0 || Bytes.compareTo(stopRow, - keys.getFirst()[i]) > 0)) { - byte[] splitStart = startRow.length == 0 || - Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 ? - keys.getFirst()[i] : startRow; - byte[] splitStop = (stopRow.length == 0 || - Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) && - keys.getSecond()[i].length > 0 ? - keys.getSecond()[i] : stopRow; + if ( + (startRow.length == 0 || keys.getSecond()[i].length == 0 + || Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) + && (stopRow.length == 0 || Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0) + ) { + byte[] splitStart = + startRow.length == 0 || Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 + ? keys.getFirst()[i] + : startRow; + byte[] splitStop = + (stopRow.length == 0 || Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) + && keys.getSecond()[i].length > 0 ? keys.getSecond()[i] : stopRow; - HRegionLocation hregionLocation = regionLocator.getRegionLocation( - keys.getFirst()[i], false); + HRegionLocation hregionLocation = + regionLocator.getRegionLocation(keys.getFirst()[i], false); String regionHostname = hregionLocation.getHostname(); HRegionInfo regionInfo = hregionLocation.getRegionInfo(); String encodedRegionName = regionInfo.getEncodedName(); - long regionSize = sizeCalculator.getRegionSize( - regionInfo.getRegionName()); + long regionSize = sizeCalculator.getRegionSize(regionInfo.getRegionName()); - TableSplit split = new TableSplit(table.getName(), - scan, splitStart, splitStop, regionHostname, - encodedRegionName, regionSize); + TableSplit split = new TableSplit(table.getName(), scan, splitStart, splitStop, + regionHostname, encodedRegionName, regionSize); splits.add(split); @@ -242,29 +234,25 @@ public abstract class MultiTableInputFormatBase extends } /** - * Test if the given region is to be included in the InputSplit while - * splitting the regions of a table. + * Test if the given region is to be included in the InputSplit while splitting the regions of a + * table. *

      - * This optimization is effective when there is a specific reasoning to - * exclude an entire region from the M-R job, (and hence, not contributing to - * the InputSplit), given the start and end keys of the same.
      - * Useful when we need to remember the last-processed top record and revisit - * the [last, current) interval for M-R processing, continuously. In addition - * to reducing InputSplits, reduces the load on the region server as well, due - * to the ordering of the keys.
      + * This optimization is effective when there is a specific reasoning to exclude an entire region + * from the M-R job, (and hence, not contributing to the InputSplit), given the start and end keys + * of the same.
      + * Useful when we need to remember the last-processed top record and revisit the [last, current) + * interval for M-R processing, continuously. In addition to reducing InputSplits, reduces the + * load on the region server as well, due to the ordering of the keys.
      *
      - * Note: It is possible that endKey.length() == 0 , for the last - * (recent) region.
      - * Override this method, if you want to bulk exclude regions altogether from - * M-R. By default, no region is excluded( i.e. all regions are included). - * + * Note: It is possible that endKey.length() == 0 , for the last (recent) region. + *
      + * Override this method, if you want to bulk exclude regions altogether from M-R. By default, no + * region is excluded( i.e. all regions are included). * @param startKey Start key of the region - * @param endKey End key of the region - * @return true, if this region needs to be included as part of the input - * (default). + * @param endKey End key of the region + * @return true, if this region needs to be included as part of the input (default). */ - protected boolean includeRegionInSplit(final byte[] startKey, - final byte[] endKey) { + protected boolean includeRegionInSplit(final byte[] startKey, final byte[] endKey) { return true; } @@ -277,7 +265,6 @@ public abstract class MultiTableInputFormatBase extends /** * Allows subclasses to set the list of {@link Scan} objects. - * * @param scans The list of {@link Scan} used to define the input */ protected void setScans(List scans) { @@ -286,9 +273,7 @@ public abstract class MultiTableInputFormatBase extends /** * Allows subclasses to set the {@link TableRecordReader}. - * - * @param tableRecordReader A different {@link TableRecordReader} - * implementation. + * @param tableRecordReader A different {@link TableRecordReader} implementation. */ protected void setTableRecordReader(TableRecordReader tableRecordReader) { this.tableRecordReader = tableRecordReader; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java index 2a4fae94409..5a5d1149755 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +20,6 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; import java.util.HashMap; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; @@ -32,9 +27,9 @@ import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.JobContext; @@ -42,21 +37,22 @@ import org.apache.hadoop.mapreduce.OutputCommitter; import org.apache.hadoop.mapreduce.OutputFormat; import org.apache.hadoop.mapreduce.RecordWriter; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** *

      - * Hadoop output format that writes to one or more HBase tables. The key is - * taken to be the table name while the output value must be either a - * {@link Put} or a {@link Delete} instance. All tables must already exist, and - * all Puts and Deletes must reference only valid column families. + * Hadoop output format that writes to one or more HBase tables. The key is taken to be the table + * name while the output value must be either a {@link Put} or a {@link Delete} instance. + * All tables must already exist, and all Puts and Deletes must reference only valid column + * families. *

      - * *

      - * Write-ahead logging (WAL) for Puts can be disabled by setting - * {@link #WAL_PROPERTY} to {@link #WAL_OFF}. Default value is {@link #WAL_ON}. - * Note that disabling write-ahead logging is only appropriate for jobs where - * loss of data due to region server failure can be tolerated (for example, - * because it is easy to rerun a bulk import). + * Write-ahead logging (WAL) for Puts can be disabled by setting {@link #WAL_PROPERTY} to + * {@link #WAL_OFF}. Default value is {@link #WAL_ON}. Note that disabling write-ahead logging is + * only appropriate for jobs where loss of data due to region server failure can be tolerated (for + * example, because it is easy to rerun a bulk import). *

      */ @InterfaceAudience.Public @@ -67,11 +63,12 @@ public class MultiTableOutputFormat extends OutputFormat { + protected static class MultiTableRecordWriter + extends RecordWriter { private static final Logger LOG = LoggerFactory.getLogger(MultiTableRecordWriter.class); Connection connection; Map mutatorMap = new HashMap<>(); @@ -79,36 +76,29 @@ public class MultiTableOutputFormat extends OutputFormatfalse) to improve performance when bulk loading data. + * n * HBaseConfiguration to used n * whether to use write ahead logging. This can be turned off + * ( false) to improve performance when bulk loading data. */ - public MultiTableRecordWriter(Configuration conf, - boolean useWriteAheadLogging) throws IOException { - LOG.debug("Created new MultiTableRecordReader with WAL " - + (useWriteAheadLogging ? "on" : "off")); + public MultiTableRecordWriter(Configuration conf, boolean useWriteAheadLogging) + throws IOException { + LOG.debug( + "Created new MultiTableRecordReader with WAL " + (useWriteAheadLogging ? "on" : "off")); this.conf = conf; this.useWriteAheadLogging = useWriteAheadLogging; } /** - * @param tableName - * the name of the table, as a string - * @return the named mutator - * @throws IOException - * if there is a problem opening a table + * n * the name of the table, as a string + * @return the named mutator n * if there is a problem opening a table */ BufferedMutator getBufferedMutator(ImmutableBytesWritable tableName) throws IOException { - if(this.connection == null){ + if (this.connection == null) { this.connection = ConnectionFactory.createConnection(conf); } if (!mutatorMap.containsKey(tableName)) { - LOG.debug("Opening HTable \"" + Bytes.toString(tableName.get())+ "\" for writing"); + LOG.debug("Opening HTable \"" + Bytes.toString(tableName.get()) + "\" for writing"); - BufferedMutator mutator = - connection.getBufferedMutator(TableName.valueOf(tableName.get())); + BufferedMutator mutator = connection.getBufferedMutator(TableName.valueOf(tableName.get())); mutatorMap.put(tableName, mutator); } return mutatorMap.get(tableName); @@ -125,14 +115,8 @@ public class MultiTableOutputFormat extends OutputFormat getRecordWriter(TaskAttemptContext context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); return new MultiTableRecordWriter(HBaseConfiguration.create(conf), - conf.getBoolean(WAL_PROPERTY, WAL_ON)); + conf.getBoolean(WAL_PROPERTY, WAL_ON)); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java index 07c0820c1ae..8b15140b46a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,43 +15,35 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.mapreduce.InputSplit; -import org.apache.hadoop.mapreduce.JobContext; - import java.io.IOException; import java.util.Collection; import java.util.List; import java.util.Map; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.mapreduce.InputSplit; +import org.apache.hadoop.mapreduce.JobContext; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * MultiTableSnapshotInputFormat generalizes - * {@link TableSnapshotInputFormat} - * allowing a MapReduce job to run over one or more table snapshots, with one or more scans - * configured for each. - * Internally, the input format delegates to - * {@link TableSnapshotInputFormat} - * and thus has the same performance advantages; - * see {@link TableSnapshotInputFormat} for - * more details. - * Usage is similar to TableSnapshotInputFormat, with the following exception: - * initMultiTableSnapshotMapperJob takes in a map - * from snapshot name to a collection of scans. For each snapshot in the map, each corresponding - * scan will be applied; - * the overall dataset for the job is defined by the concatenation of the regions and tables - * included in each snapshot/scan - * pair. - * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob - * (Map, Class, Class, Class, org.apache.hadoop.mapreduce.Job, boolean, Path)} + * MultiTableSnapshotInputFormat generalizes {@link TableSnapshotInputFormat} allowing a MapReduce + * job to run over one or more table snapshots, with one or more scans configured for each. + * Internally, the input format delegates to {@link TableSnapshotInputFormat} and thus has the same + * performance advantages; see {@link TableSnapshotInputFormat} for more details. Usage is similar + * to TableSnapshotInputFormat, with the following exception: initMultiTableSnapshotMapperJob takes + * in a map from snapshot name to a collection of scans. For each snapshot in the map, each + * corresponding scan will be applied; the overall dataset for the job is defined by the + * concatenation of the regions and tables included in each snapshot/scan pair. + * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob (Map, Class, Class, Class, org.apache.hadoop.mapreduce.Job, boolean, Path)} * can be used to configure the job. - *
      {@code
      + *
      + * 
      + * {@code
        * Job job = new Job(conf);
        * Map> snapshotScans = ImmutableMap.of(
        *    "snapshot1", ImmutableList.of(new Scan(Bytes.toBytes("a"), Bytes.toBytes("b"))),
      @@ -63,14 +55,11 @@ import java.util.Map;
        *      MyMapOutputValueWritable.class, job, true, restoreDir);
        * }
        * 
      - * Internally, this input format restores each snapshot into a subdirectory of the given tmp - * directory. Input splits and - * record readers are created as described in - * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} - * (one per region). - * See {@link TableSnapshotInputFormat} for more notes on - * permissioning; the same caveats apply here. * + * Internally, this input format restores each snapshot into a subdirectory of the given tmp + * directory. Input splits and record readers are created as described in + * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} (one per region). See + * {@link TableSnapshotInputFormat} for more notes on permissioning; the same caveats apply here. * @see TableSnapshotInputFormat * @see org.apache.hadoop.hbase.client.TableSnapshotScanner */ @@ -85,9 +74,9 @@ public class MultiTableSnapshotInputFormat extends TableSnapshotInputFormat { @Override public List getSplits(JobContext jobContext) - throws IOException, InterruptedException { + throws IOException, InterruptedException { List splits = - delegate.getSplits(jobContext.getConfiguration()); + delegate.getSplits(jobContext.getConfiguration()); List rtn = Lists.newArrayListWithCapacity(splits.size()); for (TableSnapshotInputFormatImpl.InputSplit split : splits) { @@ -98,7 +87,7 @@ public class MultiTableSnapshotInputFormat extends TableSnapshotInputFormat { } public static void setInput(Configuration configuration, - Map> snapshotScans, Path tmpRestoreDir) throws IOException { + Map> snapshotScans, Path tmpRestoreDir) throws IOException { new MultiTableSnapshotInputFormatImpl().setInput(configuration, snapshotScans, tmpRestoreDir); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java index 76254c3f6ba..93dac05101c 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; @@ -42,8 +41,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; /** - * Shared implementation of mapreduce code over multiple table snapshots. - * Utilized by both mapreduce + * Shared implementation of mapreduce code over multiple table snapshots. Utilized by both mapreduce * {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormat} and mapred * {@link org.apache.hadoop.hbase.mapred.MultiTableSnapshotInputFormat} implementations. */ @@ -51,12 +49,12 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps; @InterfaceStability.Evolving public class MultiTableSnapshotInputFormatImpl { private static final Logger LOG = - LoggerFactory.getLogger(MultiTableSnapshotInputFormatImpl.class); + LoggerFactory.getLogger(MultiTableSnapshotInputFormatImpl.class); public static final String RESTORE_DIRS_KEY = - "hbase.MultiTableSnapshotInputFormat.restore.snapshotDirMapping"; + "hbase.MultiTableSnapshotInputFormat.restore.snapshotDirMapping"; public static final String SNAPSHOT_TO_SCANS_KEY = - "hbase.MultiTableSnapshotInputFormat.snapshotsToScans"; + "hbase.MultiTableSnapshotInputFormat.snapshotsToScans"; /** * Configure conf to read from snapshotScans, with snapshots restored to a subdirectory of @@ -65,13 +63,13 @@ public class MultiTableSnapshotInputFormatImpl { * Sets: {@link #RESTORE_DIRS_KEY}, {@link #SNAPSHOT_TO_SCANS_KEY} */ public void setInput(Configuration conf, Map> snapshotScans, - Path restoreDir) throws IOException { + Path restoreDir) throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); setSnapshotToScans(conf, snapshotScans); Map restoreDirs = - generateSnapshotToRestoreDirMapping(snapshotScans.keySet(), restoreDir); + generateSnapshotToRestoreDirMapping(snapshotScans.keySet(), restoreDir); setSnapshotDirs(conf, restoreDirs); restoreSnapshots(conf, restoreDirs, fs); } @@ -79,13 +77,11 @@ public class MultiTableSnapshotInputFormatImpl { /** * Return the list of splits extracted from the scans/snapshots pushed to conf by * {@link #setInput(Configuration, Map, Path)} - * * @param conf Configuration to determine splits from - * @return Return the list of splits extracted from the scans/snapshots pushed to conf - * @throws IOException + * @return Return the list of splits extracted from the scans/snapshots pushed to conf n */ public List getSplits(Configuration conf) - throws IOException { + throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); @@ -99,13 +95,13 @@ public class MultiTableSnapshotInputFormatImpl { Path restoreDir = snapshotsToRestoreDirs.get(snapshotName); SnapshotManifest manifest = - TableSnapshotInputFormatImpl.getSnapshotManifest(conf, snapshotName, rootDir, fs); + TableSnapshotInputFormatImpl.getSnapshotManifest(conf, snapshotName, rootDir, fs); List regionInfos = - TableSnapshotInputFormatImpl.getRegionInfosFromManifest(manifest); + TableSnapshotInputFormatImpl.getRegionInfosFromManifest(manifest); for (Scan scan : entry.getValue()) { List splits = - TableSnapshotInputFormatImpl.getSplits(scan, manifest, regionInfos, restoreDir, conf); + TableSnapshotInputFormatImpl.getSplits(scan, manifest, regionInfos, restoreDir, conf); rtn.addAll(splits); } } @@ -115,17 +111,15 @@ public class MultiTableSnapshotInputFormatImpl { /** * Retrieve the snapshot name -> list<scan> mapping pushed to configuration by * {@link #setSnapshotToScans(Configuration, Map)} - * * @param conf Configuration to extract name -> list<scan> mappings from. - * @return the snapshot name -> list<scan> mapping pushed to configuration - * @throws IOException + * @return the snapshot name -> list<scan> mapping pushed to configuration n */ public Map> getSnapshotsToScans(Configuration conf) throws IOException { Map> rtn = Maps.newHashMap(); - for (Map.Entry entry : ConfigurationUtil - .getKeyValues(conf, SNAPSHOT_TO_SCANS_KEY)) { + for (Map.Entry entry : ConfigurationUtil.getKeyValues(conf, + SNAPSHOT_TO_SCANS_KEY)) { String snapshotName = entry.getKey(); String scan = entry.getValue(); @@ -142,14 +136,10 @@ public class MultiTableSnapshotInputFormatImpl { } /** - * Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY}) - * - * @param conf - * @param snapshotScans - * @throws IOException + * Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY}) nnn */ public void setSnapshotToScans(Configuration conf, Map> snapshotScans) - throws IOException { + throws IOException { // flatten out snapshotScans for serialization to the job conf List> snapshotToSerializedScans = Lists.newArrayList(); @@ -160,7 +150,7 @@ public class MultiTableSnapshotInputFormatImpl { // serialize all scans and map them to the appropriate snapshot for (Scan scan : scans) { snapshotToSerializedScans.add(new AbstractMap.SimpleImmutableEntry<>(snapshotName, - TableMapReduceUtil.convertScanToString(scan))); + TableMapReduceUtil.convertScanToString(scan))); } } @@ -170,10 +160,8 @@ public class MultiTableSnapshotInputFormatImpl { /** * Retrieve the directories into which snapshots have been restored from * ({@link #RESTORE_DIRS_KEY}) - * * @param conf Configuration to extract restore directories from - * @return the directories into which snapshots have been restored from - * @throws IOException + * @return the directories into which snapshots have been restored from n */ public Map getSnapshotDirs(Configuration conf) throws IOException { List> kvps = ConfigurationUtil.getKeyValues(conf, RESTORE_DIRS_KEY); @@ -197,20 +185,19 @@ public class MultiTableSnapshotInputFormatImpl { } /** - * Generate a random path underneath baseRestoreDir for each snapshot in snapshots and - * return a map from the snapshot to the restore directory. - * + * Generate a random path underneath baseRestoreDir for each snapshot in snapshots and return a + * map from the snapshot to the restore directory. * @param snapshots collection of snapshot names to restore * @param baseRestoreDir base directory under which all snapshots in snapshots will be restored * @return a mapping from snapshot name to the directory in which that snapshot has been restored */ private Map generateSnapshotToRestoreDirMapping(Collection snapshots, - Path baseRestoreDir) { + Path baseRestoreDir) { Map rtn = Maps.newHashMap(); for (String snapshotName : snapshots) { Path restoreSnapshotDir = - new Path(baseRestoreDir, snapshotName + "__" + UUID.randomUUID().toString()); + new Path(baseRestoreDir, snapshotName + "__" + UUID.randomUUID().toString()); rtn.put(snapshotName, restoreSnapshotDir); } @@ -219,13 +206,12 @@ public class MultiTableSnapshotInputFormatImpl { /** * Restore each (snapshot name, restore directory) pair in snapshotToDir - * * @param conf configuration to restore with * @param snapshotToDir mapping from snapshot names to restore directories * @param fs filesystem to do snapshot restoration on */ public void restoreSnapshots(Configuration conf, Map snapshotToDir, FileSystem fs) - throws IOException { + throws IOException { // TODO: restore from record readers to parallelize. Path rootDir = CommonFSUtils.getRootDir(conf); @@ -233,13 +219,13 @@ public class MultiTableSnapshotInputFormatImpl { String snapshotName = entry.getKey(); Path restoreDir = entry.getValue(); LOG.info("Restoring snapshot " + snapshotName + " into " + restoreDir - + " for MultiTableSnapshotInputFormat"); + + " for MultiTableSnapshotInputFormat"); restoreSnapshot(conf, snapshotName, rootDir, restoreDir, fs); } } void restoreSnapshot(Configuration conf, String snapshotName, Path rootDir, Path restoreDir, - FileSystem fs) throws IOException { + FileSystem fs) throws IOException { RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java index 4ad1935f37a..04f4dbf960c 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,25 +42,23 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Multithreaded implementation for @link org.apache.hbase.mapreduce.TableMapper *

      - * It can be used instead when the Map operation is not CPU - * bound in order to improve throughput. + * It can be used instead when the Map operation is not CPU bound in order to improve throughput. *

      * Mapper implementations using this MapRunnable must be thread-safe. *

      - * The Map-Reduce job has to be configured with the mapper to use via - * {@link #setMapperClass} and the number of thread the thread-pool can use with the - * {@link #getNumberOfThreads} method. The default value is 10 threads. + * The Map-Reduce job has to be configured with the mapper to use via {@link #setMapperClass} and + * the number of thread the thread-pool can use with the {@link #getNumberOfThreads} method. The + * default value is 10 threads. *

      */ @InterfaceAudience.Private public class MultithreadedTableMapper extends TableMapper { private static final Logger LOG = LoggerFactory.getLogger(MultithreadedTableMapper.class); - private Class> mapClass; + private Class> mapClass; private Context outer; private ExecutorService executor; public static final String NUMBER_OF_THREADS = "hbase.mapreduce.multithreadedmapper.threads"; @@ -72,51 +70,46 @@ public class MultithreadedTableMapper extends TableMapper { * @return the number of threads */ public static int getNumberOfThreads(JobContext job) { - return job.getConfiguration(). - getInt(NUMBER_OF_THREADS, 10); + return job.getConfiguration().getInt(NUMBER_OF_THREADS, 10); } /** * Set the number of threads in the pool for running maps. - * @param job the job to modify + * @param job the job to modify * @param threads the new number of threads */ public static void setNumberOfThreads(Job job, int threads) { - job.getConfiguration().setInt(NUMBER_OF_THREADS, - threads); + job.getConfiguration().setInt(NUMBER_OF_THREADS, threads); } /** * Get the application's mapper class. * @param the map's output key type * @param the map's output value type - * @param job the job + * @param job the job * @return the mapper class to run */ @SuppressWarnings("unchecked") - public static - Class> getMapperClass(JobContext job) { - return (Class>) - job.getConfiguration().getClass( MAPPER_CLASS, - Mapper.class); + public static Class> + getMapperClass(JobContext job) { + return (Class>) job.getConfiguration() + .getClass(MAPPER_CLASS, Mapper.class); } /** * Set the application's mapper class. * @param the map output key type * @param the map output value type - * @param job the job to modify - * @param cls the class to use as the mapper + * @param job the job to modify + * @param cls the class to use as the mapper */ - public static - void setMapperClass(Job job, - Class> cls) { + public static void setMapperClass(Job job, + Class> cls) { if (MultithreadedTableMapper.class.isAssignableFrom(cls)) { - throw new IllegalArgumentException("Can't have recursive " + - "MultithreadedTableMapper instances."); + throw new IllegalArgumentException( + "Can't have recursive " + "MultithreadedTableMapper instances."); } - job.getConfiguration().setClass(MAPPER_CLASS, - cls, Mapper.class); + job.getConfiguration().setClass(MAPPER_CLASS, cls, Mapper.class); } /** @@ -128,11 +121,10 @@ public class MultithreadedTableMapper extends TableMapper { int numberOfThreads = getNumberOfThreads(context); mapClass = getMapperClass(context); if (LOG.isDebugEnabled()) { - LOG.debug("Configuring multithread runner to use " + numberOfThreads + - " threads"); + LOG.debug("Configuring multithread runner to use " + numberOfThreads + " threads"); } executor = Executors.newFixedThreadPool(numberOfThreads); - for(int i=0; i < numberOfThreads; ++i) { + for (int i = 0; i < numberOfThreads; ++i) { MapRunner thread = new MapRunner(context); executor.execute(thread); } @@ -143,8 +135,7 @@ public class MultithreadedTableMapper extends TableMapper { } } - private class SubMapRecordReader - extends RecordReader { + private class SubMapRecordReader extends RecordReader { private ImmutableBytesWritable key; private Result value; private Configuration conf; @@ -159,9 +150,8 @@ public class MultithreadedTableMapper extends TableMapper { } @Override - public void initialize(InputSplit split, - TaskAttemptContext context - ) throws IOException, InterruptedException { + public void initialize(InputSplit split, TaskAttemptContext context) + throws IOException, InterruptedException { conf = context.getConfiguration(); } @@ -171,8 +161,7 @@ public class MultithreadedTableMapper extends TableMapper { if (!outer.nextKeyValue()) { return false; } - key = ReflectionUtils.copy(outer.getConfiguration(), - outer.getCurrentKey(), key); + key = ReflectionUtils.copy(outer.getConfiguration(), outer.getCurrentKey(), key); value = ReflectionUtils.copy(conf, outer.getCurrentValue(), value); return true; } @@ -188,16 +177,14 @@ public class MultithreadedTableMapper extends TableMapper { } } - private class SubMapRecordWriter extends RecordWriter { + private class SubMapRecordWriter extends RecordWriter { @Override - public void close(TaskAttemptContext context) throws IOException, - InterruptedException { + public void close(TaskAttemptContext context) throws IOException, InterruptedException { } @Override - public void write(K2 key, V2 value) throws IOException, - InterruptedException { + public void write(K2 key, V2 value) throws IOException, InterruptedException { synchronized (outer) { outer.write(key, value); } @@ -235,56 +222,34 @@ public class MultithreadedTableMapper extends TableMapper { justification = "Don't understand why FB is complaining about this one." + " We do throw exception") private class MapRunner implements Runnable { - private Mapper mapper; + private Mapper mapper; private Context subcontext; @SuppressWarnings({ "rawtypes", "unchecked" }) MapRunner(Context context) throws IOException, InterruptedException { - mapper = ReflectionUtils.newInstance(mapClass, - context.getConfiguration()); + mapper = ReflectionUtils.newInstance(mapClass, context.getConfiguration()); try { - Constructor c = context.getClass().getConstructor( - Mapper.class, - Configuration.class, - TaskAttemptID.class, - RecordReader.class, - RecordWriter.class, - OutputCommitter.class, - StatusReporter.class, - InputSplit.class); + Constructor c = context.getClass().getConstructor(Mapper.class, Configuration.class, + TaskAttemptID.class, RecordReader.class, RecordWriter.class, OutputCommitter.class, + StatusReporter.class, InputSplit.class); c.setAccessible(true); - subcontext = (Context) c.newInstance( - mapper, - outer.getConfiguration(), - outer.getTaskAttemptID(), - new SubMapRecordReader(), - new SubMapRecordWriter(), - context.getOutputCommitter(), - new SubMapStatusReporter(), - outer.getInputSplit()); + subcontext = (Context) c.newInstance(mapper, outer.getConfiguration(), + outer.getTaskAttemptID(), new SubMapRecordReader(), new SubMapRecordWriter(), + context.getOutputCommitter(), new SubMapStatusReporter(), outer.getInputSplit()); } catch (Exception e) { try { - Constructor c = Class.forName("org.apache.hadoop.mapreduce.task.MapContextImpl").getConstructor( - Configuration.class, - TaskAttemptID.class, - RecordReader.class, - RecordWriter.class, - OutputCommitter.class, - StatusReporter.class, - InputSplit.class); + Constructor c = Class.forName("org.apache.hadoop.mapreduce.task.MapContextImpl") + .getConstructor(Configuration.class, TaskAttemptID.class, RecordReader.class, + RecordWriter.class, OutputCommitter.class, StatusReporter.class, InputSplit.class); c.setAccessible(true); - MapContext mc = (MapContext) c.newInstance( - outer.getConfiguration(), - outer.getTaskAttemptID(), - new SubMapRecordReader(), - new SubMapRecordWriter(), - context.getOutputCommitter(), - new SubMapStatusReporter(), - outer.getInputSplit()); - Class wrappedMapperClass = Class.forName("org.apache.hadoop.mapreduce.lib.map.WrappedMapper"); + MapContext mc = (MapContext) c.newInstance(outer.getConfiguration(), + outer.getTaskAttemptID(), new SubMapRecordReader(), new SubMapRecordWriter(), + context.getOutputCommitter(), new SubMapStatusReporter(), outer.getInputSplit()); + Class wrappedMapperClass = + Class.forName("org.apache.hadoop.mapreduce.lib.map.WrappedMapper"); Method getMapContext = wrappedMapperClass.getMethod("getMapContext", MapContext.class); - subcontext = (Context) getMapContext.invoke( - wrappedMapperClass.getDeclaredConstructor().newInstance(), mc); + subcontext = (Context) getMapContext + .invoke(wrappedMapperClass.getDeclaredConstructor().newInstance(), mc); } catch (Exception ee) { // FindBugs: REC_CATCH_EXCEPTION // rethrow as IOE throw new IOException(e); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java index 7859afa496c..63ed8d1fdc1 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,17 +20,17 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; @InterfaceAudience.Public public class MutationSerialization implements Serialization { @@ -69,6 +69,7 @@ public class MutationSerialization implements Serialization { } } + private static class MutationSerializer implements Serializer { private OutputStream out; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java index 317b328df78..5ab4e5a292e 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,21 +19,19 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; import java.util.List; -import java.util.Map.Entry; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.util.Map.Entry; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.mapreduce.Reducer; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Combine Puts. Merges Put instances grouped by K into a single - * instance. + * Combine Puts. Merges Put instances grouped by K into a single instance. * @see TableMapReduceUtil */ @InterfaceAudience.Public @@ -43,14 +40,14 @@ public class PutCombiner extends Reducer { @Override protected void reduce(K row, Iterable vals, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { // Using HeapSize to create an upper bound on the memory size of // the puts and flush some portion of the content while looping. This // flush could result in multiple Puts for a single rowkey. That is // acceptable because Combiner is run as an optimization and it's not // critical that all Puts are grouped perfectly. - long threshold = context.getConfiguration().getLong( - "putcombiner.row.threshold", 1L * (1<<30)); + long threshold = + context.getConfiguration().getLong("putcombiner.row.threshold", 1L * (1 << 30)); int cnt = 0; long curSize = 0; Put put = null; @@ -61,8 +58,7 @@ public class PutCombiner extends Reducer { put = p; familyMap = put.getFamilyCellMap(); } else { - for (Entry> entry : p.getFamilyCellMap() - .entrySet()) { + for (Entry> entry : p.getFamilyCellMap().entrySet()) { List cells = familyMap.get(entry.getKey()); List kvs = (cells != null) ? (List) cells : null; for (Cell cell : entry.getValue()) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java index f4ad1f25fe4..90905090f89 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.TreeSet; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; @@ -33,7 +31,6 @@ import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.TagUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -41,40 +38,35 @@ import org.apache.hadoop.hbase.security.visibility.CellVisibility; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; /** - * Emits sorted Puts. - * Reads in all Puts from passed Iterator, sorts them, then emits - * Puts in sorted order. If lots of columns per row, it will use lots of - * memory sorting. + * Emits sorted Puts. Reads in all Puts from passed Iterator, sorts them, then emits Puts in sorted + * order. If lots of columns per row, it will use lots of memory sorting. * @see HFileOutputFormat2 * @see CellSortReducer */ @InterfaceAudience.Public -public class PutSortReducer extends - Reducer { +public class PutSortReducer + extends Reducer { // the cell creator private CellCreator kvCreator; @Override protected void - setup(Reducer.Context context) - throws IOException, InterruptedException { + setup(Reducer.Context context) + throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); this.kvCreator = new CellCreator(conf); } @Override - protected void reduce( - ImmutableBytesWritable row, - java.lang.Iterable puts, - Reducer.Context context) - throws java.io.IOException, InterruptedException - { + protected void reduce(ImmutableBytesWritable row, java.lang.Iterable puts, + Reducer.Context context) + throws java.io.IOException, InterruptedException { // although reduce() is called per-row, handle pathological case - long threshold = context.getConfiguration().getLong( - "putsortreducer.row.threshold", 1L * (1<<30)); + long threshold = + context.getConfiguration().getLong("putsortreducer.row.threshold", 1L * (1 << 30)); Iterator iter = puts.iterator(); while (iter.hasNext()) { TreeSet map = new TreeSet<>(CellComparator.getInstance()); @@ -100,15 +92,15 @@ public class PutSortReducer extends if (cellVisibility != null) { // add the visibility labels if any tags.addAll(kvCreator.getVisibilityExpressionResolver() - .createVisibilityExpTags(cellVisibility.getExpression())); + .createVisibilityExpTags(cellVisibility.getExpression())); } } catch (DeserializationException e) { // We just throw exception here. Should we allow other mutations to proceed by // just ignoring the bad one? throw new IOException("Invalid visibility expression found in mutation " + p, e); } - for (List cells: p.getFamilyCellMap().values()) { - for (Cell cell: cells) { + for (List cells : p.getFamilyCellMap().values()) { + for (Cell cell : cells) { // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. KeyValue kv = null; @@ -128,13 +120,12 @@ public class PutSortReducer extends } } } - context.setStatus("Read " + map.size() + " entries of " + map.getClass() - + "(" + StringUtils.humanReadableInt(curSize) + ")"); + context.setStatus("Read " + map.size() + " entries of " + map.getClass() + "(" + + StringUtils.humanReadableInt(curSize) + ")"); int index = 0; for (KeyValue kv : map) { context.write(row, kv); - if (++index % 100 == 0) - context.setStatus("Wrote " + index); + if (++index % 100 == 0) context.setStatus("Wrote " + index); } // if we have more entries to process diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java index 40cd34f3844..4d027196a8f 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,9 +38,9 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /** - * Computes size of each region for given table and given column families. - * The value is used by MapReduce for better scheduling. - * */ + * Computes size of each region for given table and given column families. The value is used by + * MapReduce for better scheduling. + */ @InterfaceAudience.Private public class RegionSizeCalculator { @@ -48,7 +48,7 @@ public class RegionSizeCalculator { /** * Maps each region to its size in bytes. - * */ + */ private final Map sizeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); static final String ENABLE_REGIONSIZECALCULATOR = "hbase.regionsizecalculator.enable"; @@ -56,13 +56,12 @@ public class RegionSizeCalculator { /** * Computes size of each region for table and given column families. - * */ + */ public RegionSizeCalculator(RegionLocator regionLocator, Admin admin) throws IOException { init(regionLocator, admin); } - private void init(RegionLocator regionLocator, Admin admin) - throws IOException { + private void init(RegionLocator regionLocator, Admin admin) throws IOException { if (!enabled(admin.getConfiguration())) { LOG.info("Region size calculation disabled."); return; @@ -79,12 +78,12 @@ public class RegionSizeCalculator { Set tableServers = getRegionServersOfTable(regionLocator); for (ServerName tableServerName : tableServers) { - for (RegionMetrics regionLoad : admin.getRegionMetrics( - tableServerName,regionLocator.getName())) { + for (RegionMetrics regionLoad : admin.getRegionMetrics(tableServerName, + regionLocator.getName())) { byte[] regionId = regionLoad.getRegionName(); - long regionSizeBytes - = ((long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE)) * MEGABYTE; + long regionSizeBytes = + ((long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE)) * MEGABYTE; sizeMap.put(regionId, regionSizeBytes); @@ -96,8 +95,7 @@ public class RegionSizeCalculator { LOG.debug("Region sizes calculated"); } - private Set getRegionServersOfTable(RegionLocator regionLocator) - throws IOException { + private Set getRegionServersOfTable(RegionLocator regionLocator) throws IOException { Set tableServers = Sets.newHashSet(); for (HRegionLocation regionLocation : regionLocator.getAllRegionLocations()) { @@ -112,7 +110,7 @@ public class RegionSizeCalculator { /** * Returns size of given region in bytes. Returns 0 if region was not found. - * */ + */ public long getRegionSize(byte[] regionId) { Long size = sizeMap.get(regionId); if (size == null) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java index 9fdaa7b78f7..782621e120a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,21 +24,21 @@ import java.io.InputStream; import java.io.OutputStream; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; @InterfaceAudience.Public public class ResultSerialization extends Configured implements Serialization { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RoundRobinTableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RoundRobinTableInputFormat.java index 2427e909ff2..81eacb44099 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RoundRobinTableInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RoundRobinTableInputFormat.java @@ -50,11 +50,11 @@ public class RoundRobinTableInputFormat extends TableInputFormat { private Boolean hbaseRegionsizecalculatorEnableOriginalValue = null; /** * Boolean config for whether superclass should produce InputSplits with 'lengths'. If true, TIF - * will query every RegionServer to get the 'size' of all involved Regions and this 'size' will - * be used the the InputSplit length. If false, we skip this query and the super-classes - * returned InputSplits will have lenghths of zero. This override will set the flag to false. - * All returned lengths will be zero. Makes it so sorting on 'length' becomes a noop. The sort - * returned by this override will prevail. Thats what we want. + * will query every RegionServer to get the 'size' of all involved Regions and this 'size' will be + * used the the InputSplit length. If false, we skip this query and the super-classes returned + * InputSplits will have lenghths of zero. This override will set the flag to false. All returned + * lengths will be zero. Makes it so sorting on 'length' becomes a noop. The sort returned by this + * override will prevail. Thats what we want. */ static String HBASE_REGIONSIZECALCULATOR_ENABLE = "hbase.regionsizecalculator.enable"; @@ -116,26 +116,26 @@ public class RoundRobinTableInputFormat extends TableInputFormat { } /** - * Adds a configuration to the Context disabling remote rpc'ing to figure Region size - * when calculating InputSplits. See up in super-class TIF where we rpc to every server to find - * the size of all involved Regions. Here we disable this super-class action. This means - * InputSplits will have a length of zero. If all InputSplits have zero-length InputSplits, the - * ordering done in here will 'pass-through' Hadoop's length-first sort. The superclass TIF will - * ask every node for the current size of each of the participating Table Regions. It does this - * because it wants to schedule the biggest Regions first (This fixation comes of hadoop itself - * -- see JobSubmitter where it sorts inputs by size). This extra diligence takes time and is of - * no utility in this RRTIF where spread is of more import than size-first. Also, if a rolling - * restart is happening when we go to launch the job, the job launch may fail because the request - * for Region size fails -- even after retries -- because rolled RegionServer may take a while to - * come online: e.g. it takes java 90 seconds to allocate a 160G. RegionServer is offline during - * this time. The job launch will fail with 'Connection rejected'. So, we set - * 'hbase.regionsizecalculator.enable' to false here in RRTIF. + * Adds a configuration to the Context disabling remote rpc'ing to figure Region size when + * calculating InputSplits. See up in super-class TIF where we rpc to every server to find the + * size of all involved Regions. Here we disable this super-class action. This means InputSplits + * will have a length of zero. If all InputSplits have zero-length InputSplits, the ordering done + * in here will 'pass-through' Hadoop's length-first sort. The superclass TIF will ask every node + * for the current size of each of the participating Table Regions. It does this because it wants + * to schedule the biggest Regions first (This fixation comes of hadoop itself -- see JobSubmitter + * where it sorts inputs by size). This extra diligence takes time and is of no utility in this + * RRTIF where spread is of more import than size-first. Also, if a rolling restart is happening + * when we go to launch the job, the job launch may fail because the request for Region size fails + * -- even after retries -- because rolled RegionServer may take a while to come online: e.g. it + * takes java 90 seconds to allocate a 160G. RegionServer is offline during this time. The job + * launch will fail with 'Connection rejected'. So, we set 'hbase.regionsizecalculator.enable' to + * false here in RRTIF. * @see #unconfigure() */ void configure() { if (getConf().get(HBASE_REGIONSIZECALCULATOR_ENABLE) != null) { - this.hbaseRegionsizecalculatorEnableOriginalValue = getConf(). - getBoolean(HBASE_REGIONSIZECALCULATOR_ENABLE, true); + this.hbaseRegionsizecalculatorEnableOriginalValue = + getConf().getBoolean(HBASE_REGIONSIZECALCULATOR_ENABLE, true); } getConf().setBoolean(HBASE_REGIONSIZECALCULATOR_ENABLE, false); } @@ -165,7 +165,7 @@ public class RoundRobinTableInputFormat extends TableInputFormat { configuration.set(TableInputFormat.INPUT_TABLE, args[0]); tif.setConf(configuration); List splits = tif.getSplits(new JobContextImpl(configuration, new JobID())); - for (InputSplit split: splits) { + for (InputSplit split : splits) { System.out.println(split); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java index 50d726b12cd..6258399472d 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,12 +18,26 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import java.util.List; import java.util.ArrayList; - +import java.util.List; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.filter.FilterBase; +import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; +import org.apache.hadoop.hbase.filter.MultiRowRangeFilter; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.AbstractHBaseTool; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.mapreduce.Counter; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Splitter; import org.apache.hbase.thirdparty.org.apache.commons.cli.BasicParser; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; @@ -32,24 +45,10 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; import org.apache.hbase.thirdparty.org.apache.commons.cli.MissingOptionException; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.filter.FilterBase; -import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; -import org.apache.hadoop.hbase.filter.MultiRowRangeFilter; -import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.mapreduce.Counter; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; /** - * A job with a just a map phase to count rows. Map outputs table rows IF the - * input row has columns that have content. + * A job with a just a map phase to count rows. Map outputs table rows IF the input row has columns + * that have content. */ @InterfaceAudience.Public public class RowCounter extends AbstractHBaseTool { @@ -77,25 +76,23 @@ public class RowCounter extends AbstractHBaseTool { /** * Mapper that runs the count. */ - static class RowCounterMapper - extends TableMapper { + static class RowCounterMapper extends TableMapper { /** Counter enumeration to count the actual rows. */ - public static enum Counters {ROWS} + public static enum Counters { + ROWS + } /** * Maps the data. - * - * @param row The current table row key. + * @param row The current table row key. * @param values The columns. - * @param context The current context. + * @param context The current context. * @throws IOException When something is broken with the data. * @see org.apache.hadoop.mapreduce.Mapper#map(Object, Object, Context) */ @Override - public void map(ImmutableBytesWritable row, Result values, - Context context) - throws IOException { + public void map(ImmutableBytesWritable row, Result values, Context context) throws IOException { // Count every row containing data, whether it's in qualifiers or values context.getCounter(Counters.ROWS).increment(1); } @@ -103,8 +100,7 @@ public class RowCounter extends AbstractHBaseTool { /** * Sets up the actual job. - * - * @param conf The current configuration. + * @param conf The current configuration. * @return The newly created job. * @throws IOException When setting up the job fails. */ @@ -125,30 +121,28 @@ public class RowCounter extends AbstractHBaseTool { } } - if(this.expectedCount >= 0) { + if (this.expectedCount >= 0) { conf.setLong(EXPECTED_COUNT_KEY, this.expectedCount); } scan.setTimeRange(startTime, endTime); job.setOutputFormatClass(NullOutputFormat.class); - TableMapReduceUtil.initTableMapperJob(tableName, scan, - RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapperJob(tableName, scan, RowCounterMapper.class, + ImmutableBytesWritable.class, Result.class, job); job.setNumReduceTasks(0); return job; } /** * Sets up the actual job. - * - * @param conf The current configuration. - * @param args The command line parameters. + * @param conf The current configuration. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. * @deprecated as of release 2.3.0. Will be removed on 4.0.0. Please use main method instead. */ @Deprecated - public static Job createSubmittableJob(Configuration conf, String[] args) - throws IOException { + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { String tableName = args[0]; List rowRangeList = null; long startTime = 0; @@ -166,7 +160,7 @@ public class RowCounter extends AbstractHBaseTool { if (args[i].startsWith(rangeSwitch)) { try { rowRangeList = parseRowRangeParameter( - args[i].substring(args[1].indexOf(rangeSwitch)+rangeSwitch.length())); + args[i].substring(args[1].indexOf(rangeSwitch) + rangeSwitch.length())); } catch (IllegalArgumentException e) { return null; } @@ -206,60 +200,55 @@ public class RowCounter extends AbstractHBaseTool { if (StringUtils.isBlank(qualifier)) { scan.addFamily(Bytes.toBytes(family)); - } - else { + } else { scan.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier)); } } } scan.setTimeRange(startTime, endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime); job.setOutputFormatClass(NullOutputFormat.class); - TableMapReduceUtil.initTableMapperJob(tableName, scan, - RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapperJob(tableName, scan, RowCounterMapper.class, + ImmutableBytesWritable.class, Result.class, job); job.setNumReduceTasks(0); return job; } /** - * Prints usage without error message. - * Note that we don't document --expected-count, because it's intended for test. + * Prints usage without error message. Note that we don't document --expected-count, because it's + * intended for test. */ private static void printUsage(String errorMessage) { System.err.println("ERROR: " + errorMessage); - System.err.println("Usage: hbase rowcounter [options] " - + "[--starttime= --endtime=] " - + "[--range=[startKey],[endKey][;[startKey],[endKey]...]] [ ...]"); + System.err.println( + "Usage: hbase rowcounter [options] " + "[--starttime= --endtime=] " + + "[--range=[startKey],[endKey][;[startKey],[endKey]...]] [ ...]"); System.err.println("For performance consider the following options:\n" - + "-Dhbase.client.scanner.caching=100\n" - + "-Dmapreduce.map.speculative=false"); + + "-Dhbase.client.scanner.caching=100\n" + "-Dmapreduce.map.speculative=false"); } private static List parseRowRangeParameter(String arg) { final List rangesSplit = Splitter.on(";").splitToList(arg); final List rangeList = new ArrayList<>(); for (String range : rangesSplit) { - if(range!=null && !range.isEmpty()) { + if (range != null && !range.isEmpty()) { List startEnd = Splitter.on(",").splitToList(range); if (startEnd.size() != 2 || startEnd.get(1).contains(",")) { throw new IllegalArgumentException("Wrong range specification: " + range); } String startKey = startEnd.get(0); String endKey = startEnd.get(1); - rangeList.add(new MultiRowRangeFilter.RowRange(Bytes.toBytesBinary(startKey), - true, Bytes.toBytesBinary(endKey), false)); + rangeList.add(new MultiRowRangeFilter.RowRange(Bytes.toBytesBinary(startKey), true, + Bytes.toBytesBinary(endKey), false)); } } return rangeList; } /** - * Sets filter {@link FilterBase} to the {@link Scan} instance. - * If provided rowRangeList contains more than one element, - * method sets filter which is instance of {@link MultiRowRangeFilter}. - * Otherwise, method sets filter which is instance of {@link FirstKeyOnlyFilter}. - * If rowRangeList contains exactly one element, startRow and stopRow are set to the scan. - * @param scan - * @param rowRangeList + * Sets filter {@link FilterBase} to the {@link Scan} instance. If provided rowRangeList contains + * more than one element, method sets filter which is instance of {@link MultiRowRangeFilter}. + * Otherwise, method sets filter which is instance of {@link FirstKeyOnlyFilter}. If rowRangeList + * contains exactly one element, startRow and stopRow are set to the scan. nn */ private static void setScanFilter(Scan scan, List rowRangeList) { final int size = rowRangeList == null ? 0 : rowRangeList.size(); @@ -268,8 +257,8 @@ public class RowCounter extends AbstractHBaseTool { } if (size == 1) { MultiRowRangeFilter.RowRange range = rowRangeList.get(0); - scan.setStartRow(range.getStartRow()); //inclusive - scan.setStopRow(range.getStopRow()); //exclusive + scan.setStartRow(range.getStartRow()); // inclusive + scan.setStopRow(range.getStopRow()); // exclusive } else if (size > 1) { scan.setFilter(new MultiRowRangeFilter(rowRangeList)); } @@ -281,13 +270,13 @@ public class RowCounter extends AbstractHBaseTool { footerBuilder.append("For performance, consider the following configuration properties:\n"); footerBuilder.append("-Dhbase.client.scanner.caching=100\n"); footerBuilder.append("-Dmapreduce.map.speculative=false\n"); - printUsage("hbase rowcounter [options] [ ...]", - "Options:", footerBuilder.toString()); + printUsage("hbase rowcounter [options] [ ...]", "Options:", + footerBuilder.toString()); } @Override protected void printUsage(final String usageStr, final String usageHeader, - final String usageFooter) { + final String usageFooter) { HelpFormatter helpFormatter = new HelpFormatter(); helpFormatter.setWidth(120); helpFormatter.setOptionComparator(new AbstractHBaseTool.OptionsOrderComparator()); @@ -297,15 +286,15 @@ public class RowCounter extends AbstractHBaseTool { @Override protected void addOptions() { - Option startTimeOption = Option.builder(null).valueSeparator('=').hasArg(true). - desc("starting time filter to start counting rows from.").longOpt(OPT_START_TIME).build(); - Option endTimeOption = Option.builder(null).valueSeparator('=').hasArg(true). - desc("end time filter limit, to only count rows up to this timestamp."). - longOpt(OPT_END_TIME).build(); - Option rangeOption = Option.builder(null).valueSeparator('=').hasArg(true). - desc("[startKey],[endKey][;[startKey],[endKey]...]]").longOpt(OPT_RANGE).build(); - Option expectedOption = Option.builder(null).valueSeparator('=').hasArg(true). - desc("expected number of rows to be count.").longOpt(OPT_EXPECTED_COUNT).build(); + Option startTimeOption = Option.builder(null).valueSeparator('=').hasArg(true) + .desc("starting time filter to start counting rows from.").longOpt(OPT_START_TIME).build(); + Option endTimeOption = Option.builder(null).valueSeparator('=').hasArg(true) + .desc("end time filter limit, to only count rows up to this timestamp.").longOpt(OPT_END_TIME) + .build(); + Option rangeOption = Option.builder(null).valueSeparator('=').hasArg(true) + .desc("[startKey],[endKey][;[startKey],[endKey]...]]").longOpt(OPT_RANGE).build(); + Option expectedOption = Option.builder(null).valueSeparator('=').hasArg(true) + .desc("expected number of rows to be count.").longOpt(OPT_EXPECTED_COUNT).build(); addOption(startTimeOption); addOption(endTimeOption); addOption(rangeOption); @@ -313,28 +302,31 @@ public class RowCounter extends AbstractHBaseTool { } @Override - protected void processOptions(CommandLine cmd) throws IllegalArgumentException{ + protected void processOptions(CommandLine cmd) throws IllegalArgumentException { this.tableName = cmd.getArgList().get(0); - if(cmd.getOptionValue(OPT_RANGE)!=null) { + if (cmd.getOptionValue(OPT_RANGE) != null) { this.rowRangeList = parseRowRangeParameter(cmd.getOptionValue(OPT_RANGE)); } - this.endTime = cmd.getOptionValue(OPT_END_TIME) == null ? HConstants.LATEST_TIMESTAMP : - Long.parseLong(cmd.getOptionValue(OPT_END_TIME)); - this.expectedCount = cmd.getOptionValue(OPT_EXPECTED_COUNT) == null ? Long.MIN_VALUE : - Long.parseLong(cmd.getOptionValue(OPT_EXPECTED_COUNT)); - this.startTime = cmd.getOptionValue(OPT_START_TIME) == null ? 0 : - Long.parseLong(cmd.getOptionValue(OPT_START_TIME)); + this.endTime = cmd.getOptionValue(OPT_END_TIME) == null + ? HConstants.LATEST_TIMESTAMP + : Long.parseLong(cmd.getOptionValue(OPT_END_TIME)); + this.expectedCount = cmd.getOptionValue(OPT_EXPECTED_COUNT) == null + ? Long.MIN_VALUE + : Long.parseLong(cmd.getOptionValue(OPT_EXPECTED_COUNT)); + this.startTime = cmd.getOptionValue(OPT_START_TIME) == null + ? 0 + : Long.parseLong(cmd.getOptionValue(OPT_START_TIME)); - for(int i=1; ihbase.simpletotalorder.start - * and hbase.simpletotalorder.end. The end key needs to be - * exclusive; i.e. one larger than the biggest key in your key space. - * You may be surprised at how this class partitions the space; it may not - * align with preconceptions; e.g. a start key of zero and an end key of 100 - * divided in ten will not make regions whose range is 0-10, 10-20, and so on. - * Make your own partitioner if you need the region spacing to come out a + * A partitioner that takes start and end keys and uses bigdecimal to figure which reduce a key + * belongs to. Pass the start and end keys in the Configuration using + * hbase.simpletotalorder.start and hbase.simpletotalorder.end. The end + * key needs to be exclusive; i.e. one larger than the biggest key in your key space. You may be + * surprised at how this class partitions the space; it may not align with preconceptions; e.g. a + * start key of zero and an end key of 100 divided in ten will not make regions whose range is 0-10, + * 10-20, and so on. Make your own partitioner if you need the region spacing to come out a * particular way. * @param * @see #START @@ -46,7 +42,7 @@ import org.apache.hadoop.mapreduce.Partitioner; */ @InterfaceAudience.Public public class SimpleTotalOrderPartitioner extends Partitioner -implements Configurable { + implements Configurable { private final static Logger LOG = LoggerFactory.getLogger(SimpleTotalOrderPartitioner.class); /** @@ -67,9 +63,9 @@ implements Configurable { static final String END_BASE64 = "hbase.simpletotalorder.end.base64"; private Configuration c; - private byte [] startkey; - private byte [] endkey; - private byte [][] splits; + private byte[] startkey; + private byte[] endkey; + private byte[][] splits; private int lastReduces = -1; public static void setStartKey(Configuration conf, byte[] startKey) { @@ -90,8 +86,7 @@ implements Configurable { return getKeyFromConf(conf, END_BASE64, END); } - private static byte[] getKeyFromConf(Configuration conf, - String base64Key, String deprecatedKey) { + private static byte[] getKeyFromConf(Configuration conf, String base64Key, String deprecatedKey) { String encoded = conf.get(base64Key); if (encoded != null) { return Base64.getDecoder().decode(encoded); @@ -100,14 +95,13 @@ implements Configurable { if (oldStyleVal == null) { return null; } - LOG.warn("Using deprecated configuration " + deprecatedKey + - " - please use static accessor methods instead."); + LOG.warn("Using deprecated configuration " + deprecatedKey + + " - please use static accessor methods instead."); return Bytes.toBytesBinary(oldStyleVal); } @Override - public int getPartition(final ImmutableBytesWritable key, final VALUE value, - final int reduces) { + public int getPartition(final ImmutableBytesWritable key, final VALUE value, final int reduces) { if (reduces == 1) return 0; if (this.lastReduces != reduces) { this.splits = Bytes.split(this.startkey, this.endkey, reduces - 1); @@ -116,16 +110,14 @@ implements Configurable { } this.lastReduces = reduces; } - int pos = Bytes.binarySearch(this.splits, key.get(), key.getOffset(), - key.getLength()); + int pos = Bytes.binarySearch(this.splits, key.get(), key.getOffset(), key.getLength()); // Below code is from hfile index search. if (pos < 0) { pos++; pos *= -1; if (pos == 0) { // falls before the beginning of the file. - throw new RuntimeException("Key outside start/stop range: " + - key.toString()); + throw new RuntimeException("Key outside start/stop range: " + key.toString()); } pos--; } @@ -145,8 +137,8 @@ implements Configurable { if (startkey == null || endkey == null) { throw new RuntimeException(this.getClass() + " not configured"); } - LOG.info("startkey=" + Bytes.toStringBinary(startkey) + - ", endkey=" + Bytes.toStringBinary(endkey)); + LOG.info( + "startkey=" + Bytes.toStringBinary(startkey) + ", endkey=" + Bytes.toStringBinary(endkey)); // Reset last reduces count on change of Start / End key this.lastReduces = -1; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java index 749d3572632..19e73dda904 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -90,9 +90,9 @@ public class SyncTable extends Configured implements Tool { } private void initCredentialsForHBase(String zookeeper, Job job) throws IOException { - Configuration peerConf = HBaseConfiguration.createClusterConf(job - .getConfiguration(), zookeeper); - if("kerberos".equalsIgnoreCase(peerConf.get("hbase.security.authentication"))){ + Configuration peerConf = + HBaseConfiguration.createClusterConf(job.getConfiguration(), zookeeper); + if ("kerberos".equalsIgnoreCase(peerConf.get("hbase.security.authentication"))) { TableMapReduceUtil.initCredentialsForCluster(job, peerConf); } } @@ -103,12 +103,12 @@ public class SyncTable extends Configured implements Tool { throw new IOException("Source hash dir not found: " + sourceHashDir); } - Job job = Job.getInstance(getConf(),getConf().get("mapreduce.job.name", - "syncTable_" + sourceTableName + "-" + targetTableName)); + Job job = Job.getInstance(getConf(), + getConf().get("mapreduce.job.name", "syncTable_" + sourceTableName + "-" + targetTableName)); Configuration jobConf = job.getConfiguration(); if ("kerberos".equalsIgnoreCase(jobConf.get("hadoop.security.authentication"))) { - TokenCache.obtainTokensForNamenodes(job.getCredentials(), new - Path[] { sourceHashDir }, getConf()); + TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { sourceHashDir }, + getConf()); } HashTable.TableHash tableHash = HashTable.TableHash.read(getConf(), sourceHashDir); @@ -116,13 +116,13 @@ public class SyncTable extends Configured implements Tool { LOG.info("Read " + tableHash.partitions.size() + " partition keys"); if (!tableHash.tableName.equals(sourceTableName)) { LOG.warn("Table name mismatch - manifest indicates hash was taken from: " - + tableHash.tableName + " but job is reading from: " + sourceTableName); + + tableHash.tableName + " but job is reading from: " + sourceTableName); } if (tableHash.numHashFiles != tableHash.partitions.size() + 1) { throw new RuntimeException("Hash data appears corrupt. The number of of hash files created" - + " should be 1 more than the number of partition keys. However, the manifest file " - + " says numHashFiles=" + tableHash.numHashFiles + " but the number of partition keys" - + " found in the partitions file is " + tableHash.partitions.size()); + + " should be 1 more than the number of partition keys. However, the manifest file " + + " says numHashFiles=" + tableHash.numHashFiles + " but the number of partition keys" + + " found in the partitions file is " + tableHash.partitions.size()); } Path dataDir = new Path(sourceHashDir, HashTable.HASH_DATA_DIR); @@ -135,9 +135,9 @@ public class SyncTable extends Configured implements Tool { if (dataSubdirCount != tableHash.numHashFiles) { throw new RuntimeException("Hash data appears corrupt. The number of of hash files created" - + " should be 1 more than the number of partition keys. However, the number of data dirs" - + " found is " + dataSubdirCount + " but the number of partition keys" - + " found in the partitions file is " + tableHash.partitions.size()); + + " should be 1 more than the number of partition keys. However, the number of data dirs" + + " found is " + dataSubdirCount + " but the number of partition keys" + + " found in the partitions file is " + tableHash.partitions.size()); } job.setJarByClass(HashTable.class); @@ -157,18 +157,18 @@ public class SyncTable extends Configured implements Tool { jobConf.setBoolean(DO_PUTS_CONF_KEY, doPuts); jobConf.setBoolean(IGNORE_TIMESTAMPS, ignoreTimestamps); - TableMapReduceUtil.initTableMapperJob(targetTableName, tableHash.initScan(), - SyncMapper.class, null, null, job); + TableMapReduceUtil.initTableMapperJob(targetTableName, tableHash.initScan(), SyncMapper.class, + null, null, job); job.setNumReduceTasks(0); if (dryRun) { job.setOutputFormatClass(NullOutputFormat.class); } else { - // No reducers. Just write straight to table. Call initTableReducerJob + // No reducers. Just write straight to table. Call initTableReducerJob // because it sets up the TableOutputFormat. - TableMapReduceUtil.initTableReducerJob(targetTableName, null, job, null, - targetZkCluster, null, null); + TableMapReduceUtil.initTableReducerJob(targetTableName, null, job, null, targetZkCluster, + null, null); // would be nice to add an option for bulk load instead } @@ -176,7 +176,7 @@ public class SyncTable extends Configured implements Tool { // Obtain an authentication token, for the specified cluster, on behalf of the current user if (sourceZkCluster != null) { Configuration peerConf = - HBaseConfiguration.createClusterConf(job.getConfiguration(), sourceZkCluster); + HBaseConfiguration.createClusterConf(job.getConfiguration(), sourceZkCluster); TableMapReduceUtil.initCredentialsForCluster(job, peerConf); } return job; @@ -202,9 +202,22 @@ public class SyncTable extends Configured implements Tool { Throwable mapperException; - public static enum Counter {BATCHES, HASHES_MATCHED, HASHES_NOT_MATCHED, SOURCEMISSINGROWS, - SOURCEMISSINGCELLS, TARGETMISSINGROWS, TARGETMISSINGCELLS, ROWSWITHDIFFS, DIFFERENTCELLVALUES, - MATCHINGROWS, MATCHINGCELLS, EMPTY_BATCHES, RANGESMATCHED, RANGESNOTMATCHED}; + public static enum Counter { + BATCHES, + HASHES_MATCHED, + HASHES_NOT_MATCHED, + SOURCEMISSINGROWS, + SOURCEMISSINGCELLS, + TARGETMISSINGROWS, + TARGETMISSINGCELLS, + ROWSWITHDIFFS, + DIFFERENTCELLVALUES, + MATCHINGROWS, + MATCHINGCELLS, + EMPTY_BATCHES, + RANGESMATCHED, + RANGESNOTMATCHED + }; @Override protected void setup(Context context) throws IOException { @@ -212,8 +225,8 @@ public class SyncTable extends Configured implements Tool { Configuration conf = context.getConfiguration(); sourceHashDir = new Path(conf.get(SOURCE_HASH_DIR_CONF_KEY)); sourceConnection = openConnection(conf, SOURCE_ZK_CLUSTER_CONF_KEY, null); - targetConnection = openConnection(conf, TARGET_ZK_CLUSTER_CONF_KEY, - TableOutputFormat.OUTPUT_CONF_PREFIX); + targetConnection = + openConnection(conf, TARGET_ZK_CLUSTER_CONF_KEY, TableOutputFormat.OUTPUT_CONF_PREFIX); sourceTable = openTable(sourceConnection, conf, SOURCE_TABLE_CONF_KEY); targetTable = openTable(targetConnection, conf, TARGET_TABLE_CONF_KEY); dryRun = conf.getBoolean(DRY_RUN_CONF_KEY, false); @@ -233,28 +246,27 @@ public class SyncTable extends Configured implements Tool { // create a hasher, but don't start it right away // instead, find the first hash batch at or after the start row - // and skip any rows that come before. they will be caught by the previous task + // and skip any rows that come before. they will be caught by the previous task targetHasher = new HashTable.ResultHasher(); targetHasher.ignoreTimestamps = ignoreTimestamp; } private static Connection openConnection(Configuration conf, String zkClusterConfKey, - String configPrefix) - throws IOException { - String zkCluster = conf.get(zkClusterConfKey); - Configuration clusterConf = HBaseConfiguration.createClusterConf(conf, - zkCluster, configPrefix); - return ConnectionFactory.createConnection(clusterConf); + String configPrefix) throws IOException { + String zkCluster = conf.get(zkClusterConfKey); + Configuration clusterConf = + HBaseConfiguration.createClusterConf(conf, zkCluster, configPrefix); + return ConnectionFactory.createConnection(clusterConf); } private static Table openTable(Connection connection, Configuration conf, - String tableNameConfKey) throws IOException { + String tableNameConfKey) throws IOException { return connection.getTable(TableName.valueOf(conf.get(tableNameConfKey))); } /** - * Attempt to read the next source key/hash pair. - * If there are no more, set nextSourceKey to null + * Attempt to read the next source key/hash pair. If there are no more, set nextSourceKey to + * null */ private void findNextKeyHashPair() throws IOException { boolean hasNext = sourceHashReader.next(); @@ -268,7 +280,7 @@ public class SyncTable extends Configured implements Tool { @Override protected void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { try { // first, finish any hash batches that end before the scanned row while (nextSourceKey != null && key.compareTo(nextSourceKey) >= 0) { @@ -288,8 +300,8 @@ public class SyncTable extends Configured implements Tool { } /** - * If there is an open hash batch, complete it and sync if there are diffs. - * Start a new batch, and seek to read the + * If there is an open hash batch, complete it and sync if there are diffs. Start a new batch, + * and seek to read the */ private void moveToNextBatch(Context context) throws IOException, InterruptedException { if (targetHasher.isBatchStarted()) { @@ -302,12 +314,11 @@ public class SyncTable extends Configured implements Tool { } /** - * Finish the currently open hash batch. - * Compare the target hash to the given source hash. - * If they do not match, then sync the covered key range. + * Finish the currently open hash batch. Compare the target hash to the given source hash. If + * they do not match, then sync the covered key range. */ private void finishBatchAndCompareHashes(Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { targetHasher.finishBatch(); context.getCounter(Counter.BATCHES).increment(1); if (targetHasher.getBatchSize() == 0) { @@ -320,33 +331,33 @@ public class SyncTable extends Configured implements Tool { context.getCounter(Counter.HASHES_NOT_MATCHED).increment(1); ImmutableBytesWritable stopRow = nextSourceKey == null - ? new ImmutableBytesWritable(sourceTableHash.stopRow) - : nextSourceKey; + ? new ImmutableBytesWritable(sourceTableHash.stopRow) + : nextSourceKey; if (LOG.isDebugEnabled()) { - LOG.debug("Hash mismatch. Key range: " + toHex(targetHasher.getBatchStartKey()) - + " to " + toHex(stopRow) - + " sourceHash: " + toHex(currentSourceHash) - + " targetHash: " + toHex(targetHash)); + LOG.debug("Hash mismatch. Key range: " + toHex(targetHasher.getBatchStartKey()) + " to " + + toHex(stopRow) + " sourceHash: " + toHex(currentSourceHash) + " targetHash: " + + toHex(targetHash)); } syncRange(context, targetHasher.getBatchStartKey(), stopRow); } } + private static String toHex(ImmutableBytesWritable bytes) { return Bytes.toHex(bytes.get(), bytes.getOffset(), bytes.getLength()); } - private static final CellScanner EMPTY_CELL_SCANNER - = new CellScanner(Collections.emptyIterator()); + private static final CellScanner EMPTY_CELL_SCANNER = + new CellScanner(Collections. emptyIterator()); /** - * Rescan the given range directly from the source and target tables. - * Count and log differences, and if this is not a dry run, output Puts and Deletes - * to make the target table match the source table for this range + * Rescan the given range directly from the source and target tables. Count and log differences, + * and if this is not a dry run, output Puts and Deletes to make the target table match the + * source table for this range */ private void syncRange(Context context, ImmutableBytesWritable startRow, - ImmutableBytesWritable stopRow) throws IOException, InterruptedException { + ImmutableBytesWritable stopRow) throws IOException, InterruptedException { Scan scan = sourceTableHash.initScan(); scan.setStartRow(startRow.copyBytes()); scan.setStopRow(stopRow.copyBytes()); @@ -360,7 +371,7 @@ public class SyncTable extends Configured implements Tool { boolean rangeMatched = true; byte[] nextSourceRow = sourceCells.nextRow(); byte[] nextTargetRow = targetCells.nextRow(); - while(nextSourceRow != null || nextTargetRow != null) { + while (nextSourceRow != null || nextTargetRow != null) { boolean rowMatched; int rowComparison = compareRowKeys(nextSourceRow, nextTargetRow); if (rowComparison < 0) { @@ -370,7 +381,7 @@ public class SyncTable extends Configured implements Tool { context.getCounter(Counter.TARGETMISSINGROWS).increment(1); rowMatched = syncRowCells(context, nextSourceRow, sourceCells, EMPTY_CELL_SCANNER); - nextSourceRow = sourceCells.nextRow(); // advance only source to next row + nextSourceRow = sourceCells.nextRow(); // advance only source to next row } else if (rowComparison > 0) { if (LOG.isDebugEnabled()) { LOG.debug("Source missing row: " + Bytes.toString(nextTargetRow)); @@ -378,7 +389,7 @@ public class SyncTable extends Configured implements Tool { context.getCounter(Counter.SOURCEMISSINGROWS).increment(1); rowMatched = syncRowCells(context, nextTargetRow, EMPTY_CELL_SCANNER, targetCells); - nextTargetRow = targetCells.nextRow(); // advance only target to next row + nextTargetRow = targetCells.nextRow(); // advance only target to next row } else { // current row is the same on both sides, compare cell by cell rowMatched = syncRowCells(context, nextSourceRow, sourceCells, targetCells); @@ -412,8 +423,7 @@ public class SyncTable extends Configured implements Tool { } /** - * Advance to the next row and return its row key. - * Returns null iff there are no more rows. + * Advance to the next row and return its row key. Returns null iff there are no more rows. */ public byte[] nextRow() { if (nextRowResult == null) { @@ -421,9 +431,10 @@ public class SyncTable extends Configured implements Tool { while (results.hasNext()) { nextRowResult = results.next(); Cell nextCell = nextRowResult.rawCells()[0]; - if (currentRow == null - || !Bytes.equals(currentRow, 0, currentRow.length, nextCell.getRowArray(), - nextCell.getRowOffset(), nextCell.getRowLength())) { + if ( + currentRow == null || !Bytes.equals(currentRow, 0, currentRow.length, + nextCell.getRowArray(), nextCell.getRowOffset(), nextCell.getRowLength()) + ) { // found next row break; } else { @@ -463,8 +474,10 @@ public class SyncTable extends Configured implements Tool { if (results.hasNext()) { Result result = results.next(); Cell cell = result.rawCells()[0]; - if (Bytes.equals(currentRow, 0, currentRow.length, cell.getRowArray(), - cell.getRowOffset(), cell.getRowLength())) { + if ( + Bytes.equals(currentRow, 0, currentRow.length, cell.getRowArray(), + cell.getRowOffset(), cell.getRowLength()) + ) { // result is part of current row currentRowResult = result; nextCellInRow = 0; @@ -483,31 +496,29 @@ public class SyncTable extends Configured implements Tool { } } - private Cell checkAndResetTimestamp(Cell sourceCell){ + private Cell checkAndResetTimestamp(Cell sourceCell) { if (ignoreTimestamp) { - sourceCell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setType(sourceCell.getType()) - .setRow(sourceCell.getRowArray(), - sourceCell.getRowOffset(), sourceCell.getRowLength()) - .setFamily(sourceCell.getFamilyArray(), - sourceCell.getFamilyOffset(), sourceCell.getFamilyLength()) - .setQualifier(sourceCell.getQualifierArray(), - sourceCell.getQualifierOffset(), sourceCell.getQualifierLength()) - .setTimestamp(EnvironmentEdgeManager.currentTime()) - .setValue(sourceCell.getValueArray(), - sourceCell.getValueOffset(), sourceCell.getValueLength()).build(); + sourceCell = + CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(sourceCell.getType()) + .setRow(sourceCell.getRowArray(), sourceCell.getRowOffset(), sourceCell.getRowLength()) + .setFamily(sourceCell.getFamilyArray(), sourceCell.getFamilyOffset(), + sourceCell.getFamilyLength()) + .setQualifier(sourceCell.getQualifierArray(), sourceCell.getQualifierOffset(), + sourceCell.getQualifierLength()) + .setTimestamp(EnvironmentEdgeManager.currentTime()).setValue(sourceCell.getValueArray(), + sourceCell.getValueOffset(), sourceCell.getValueLength()) + .build(); } return sourceCell; } /** - * Compare the cells for the given row from the source and target tables. - * Count and log any differences. - * If not a dry run, output a Put and/or Delete needed to sync the target table - * to match the source table. + * Compare the cells for the given row from the source and target tables. Count and log any + * differences. If not a dry run, output a Put and/or Delete needed to sync the target table to + * match the source table. */ private boolean syncRowCells(Context context, byte[] rowKey, CellScanner sourceCells, - CellScanner targetCells) throws IOException, InterruptedException { + CellScanner targetCells) throws IOException, InterruptedException { Put put = null; Delete delete = null; long matchingCells = 0; @@ -545,8 +556,8 @@ public class SyncTable extends Configured implements Tool { delete = new Delete(rowKey); } // add a tombstone to exactly match the target cell that is missing on the source - delete.addColumn(CellUtil.cloneFamily(targetCell), - CellUtil.cloneQualifier(targetCell), targetCell.getTimestamp()); + delete.addColumn(CellUtil.cloneFamily(targetCell), CellUtil.cloneQualifier(targetCell), + targetCell.getTimestamp()); } targetCell = targetCells.nextCellInRow(); @@ -557,12 +568,12 @@ public class SyncTable extends Configured implements Tool { } else { if (LOG.isDebugEnabled()) { LOG.debug("Different values: "); - LOG.debug(" source cell: " + sourceCell - + " value: " + Bytes.toString(sourceCell.getValueArray(), - sourceCell.getValueOffset(), sourceCell.getValueLength())); - LOG.debug(" target cell: " + targetCell - + " value: " + Bytes.toString(targetCell.getValueArray(), - targetCell.getValueOffset(), targetCell.getValueLength())); + LOG.debug(" source cell: " + sourceCell + " value: " + + Bytes.toString(sourceCell.getValueArray(), sourceCell.getValueOffset(), + sourceCell.getValueLength())); + LOG.debug(" target cell: " + targetCell + " value: " + + Bytes.toString(targetCell.getValueArray(), targetCell.getValueOffset(), + targetCell.getValueLength())); } context.getCounter(Counter.DIFFERENTCELLVALUES).increment(1); matchingRow = false; @@ -614,12 +625,11 @@ public class SyncTable extends Configured implements Tool { } /** - * Compare row keys of the given Result objects. - * Nulls are after non-nulls + * Compare row keys of the given Result objects. Nulls are after non-nulls */ private static int compareRowKeys(byte[] r1, byte[] r2) { if (r1 == null) { - return 1; // source missing row + return 1; // source missing row } else if (r2 == null) { return -1; // target missing row } else { @@ -630,11 +640,10 @@ public class SyncTable extends Configured implements Tool { } /** - * Compare families, qualifiers, and timestamps of the given Cells. - * They are assumed to be of the same row. - * Nulls are after non-nulls. + * Compare families, qualifiers, and timestamps of the given Cells. They are assumed to be of + * the same row. Nulls are after non-nulls. */ - private int compareCellKeysWithinRow(Cell c1, Cell c2) { + private int compareCellKeysWithinRow(Cell c1, Cell c2) { if (c1 == null) { return 1; // source missing cell } @@ -661,8 +670,7 @@ public class SyncTable extends Configured implements Tool { } @Override - protected void cleanup(Context context) - throws IOException, InterruptedException { + protected void cleanup(Context context) throws IOException, InterruptedException { if (mapperException == null) { try { finishRemainingHashRanges(context); @@ -692,23 +700,26 @@ public class SyncTable extends Configured implements Tool { } } - private void finishRemainingHashRanges(Context context) throws IOException, - InterruptedException { + private void finishRemainingHashRanges(Context context) + throws IOException, InterruptedException { TableSplit split = (TableSplit) context.getInputSplit(); byte[] splitEndRow = split.getEndRow(); boolean reachedEndOfTable = HashTable.isTableEndRow(splitEndRow); // if there are more hash batches that begin before the end of this split move to them - while (nextSourceKey != null - && (nextSourceKey.compareTo(splitEndRow) < 0 || reachedEndOfTable)) { + while ( + nextSourceKey != null && (nextSourceKey.compareTo(splitEndRow) < 0 || reachedEndOfTable) + ) { moveToNextBatch(context); } if (targetHasher.isBatchStarted()) { // need to complete the final open hash batch - if ((nextSourceKey != null && nextSourceKey.compareTo(splitEndRow) > 0) - || (nextSourceKey == null && !Bytes.equals(splitEndRow, sourceTableHash.stopRow))) { + if ( + (nextSourceKey != null && nextSourceKey.compareTo(splitEndRow) > 0) + || (nextSourceKey == null && !Bytes.equals(splitEndRow, sourceTableHash.stopRow)) + ) { // the open hash range continues past the end of this region // add a scan to complete the current hash range Scan scan = sourceTableHash.initScan(); @@ -738,6 +749,7 @@ public class SyncTable extends Configured implements Tool { } private static final int NUM_ARGS = 3; + private static void printUsage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); @@ -771,10 +783,9 @@ public class SyncTable extends Configured implements Tool { System.err.println("Examples:"); System.err.println(" For a dry run SyncTable of tableA from a remote source cluster"); System.err.println(" to a local target cluster:"); - System.err.println(" $ hbase " + - "org.apache.hadoop.hbase.mapreduce.SyncTable --dryrun=true" - + " --sourcezkcluster=zk1.example.com,zk2.example.com,zk3.example.com:2181:/hbase" - + " hdfs://nn:9000/hashes/tableA tableA tableA"); + System.err.println(" $ hbase " + "org.apache.hadoop.hbase.mapreduce.SyncTable --dryrun=true" + + " --sourcezkcluster=zk1.example.com,zk2.example.com,zk3.example.com:2181:/hbase" + + " hdfs://nn:9000/hashes/tableA tableA tableA"); } private boolean doCommandLine(final String[] args) { @@ -834,7 +845,6 @@ public class SyncTable extends Configured implements Tool { return false; } - } catch (Exception e) { e.printStackTrace(); printUsage("Can't start because " + e.getMessage()); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java index 3eb7d699bd0..1b23924ecc5 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,31 +21,29 @@ import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.Locale; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.mapreduce.InputSplit; -import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Convert HBase tabular data into a format that is consumable by Map/Reduce. */ @InterfaceAudience.Public -public class TableInputFormat extends TableInputFormatBase -implements Configurable { +public class TableInputFormat extends TableInputFormatBase implements Configurable { @SuppressWarnings("hiding") private static final Logger LOG = LoggerFactory.getLogger(TableInputFormat.class); @@ -54,12 +51,13 @@ implements Configurable { /** Job parameter that specifies the input table. */ public static final String INPUT_TABLE = "hbase.mapreduce.inputtable"; /** - * If specified, use start keys of this table to split. - * This is useful when you are preparing data for bulkload. + * If specified, use start keys of this table to split. This is useful when you are preparing data + * for bulkload. */ private static final String SPLIT_TABLE = "hbase.mapreduce.splittable"; - /** Base-64 encoded scanner. All other SCAN_ confs are ignored if this is specified. - * See {@link TableMapReduceUtil#convertScanToString(Scan)} for more details. + /** + * Base-64 encoded scanner. All other SCAN_ confs are ignored if this is specified. See + * {@link TableMapReduceUtil#convertScanToString(Scan)} for more details. */ public static final String SCAN = "hbase.mapreduce.scan"; /** Scan start row */ @@ -92,7 +90,6 @@ implements Configurable { /** * Returns the current configuration. - * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -102,16 +99,13 @@ implements Configurable { } /** - * Sets the configuration. This is used to set the details for the table to - * be scanned. - * - * @param configuration The configuration to set. - * @see org.apache.hadoop.conf.Configurable#setConf( - * org.apache.hadoop.conf.Configuration) + * Sets the configuration. This is used to set the details for the table to be scanned. + * @param configuration The configuration to set. + * @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration) */ @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION", - justification="Intentional") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION", + justification = "Intentional") public void setConf(Configuration configuration) { this.conf = configuration; @@ -127,7 +121,7 @@ implements Configurable { try { scan = createScanFromConfiguration(conf); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error(StringUtils.stringifyException(e)); } } @@ -135,13 +129,13 @@ implements Configurable { } /** - * Sets up a {@link Scan} instance, applying settings from the configuration property - * constants defined in {@code TableInputFormat}. This allows specifying things such as: + * Sets up a {@link Scan} instance, applying settings from the configuration property constants + * defined in {@code TableInputFormat}. This allows specifying things such as: *

        - *
      • start and stop rows
      • - *
      • column qualifiers or families
      • - *
      • timestamps or timerange
      • - *
      • scanner caching and batch size
      • + *
      • start and stop rows
      • + *
      • column qualifiers or families
      • + *
      • timestamps or timerange
      • + *
      • scanner caching and batch size
      • *
      */ public static Scan createScanFromConfiguration(Configuration conf) throws IOException { @@ -168,9 +162,8 @@ implements Configurable { } if (conf.get(SCAN_TIMERANGE_START) != null && conf.get(SCAN_TIMERANGE_END) != null) { - scan.setTimeRange( - Long.parseLong(conf.get(SCAN_TIMERANGE_START)), - Long.parseLong(conf.get(SCAN_TIMERANGE_END))); + scan.setTimeRange(Long.parseLong(conf.get(SCAN_TIMERANGE_START)), + Long.parseLong(conf.get(SCAN_TIMERANGE_END))); } if (conf.get(SCAN_MAXVERSIONS) != null) { @@ -204,16 +197,14 @@ implements Configurable { } /** - * Parses a combined family and qualifier and adds either both or just the - * family in case there is no qualifier. This assumes the older colon - * divided notation, e.g. "family:qualifier". - * - * @param scan The Scan to update. + * Parses a combined family and qualifier and adds either both or just the family in case there is + * no qualifier. This assumes the older colon divided notation, e.g. "family:qualifier". + * @param scan The Scan to update. * @param familyAndQualifier family and qualifier * @throws IllegalArgumentException When familyAndQualifier is invalid. */ private static void addColumn(Scan scan, byte[] familyAndQualifier) { - byte [][] fq = CellUtil.parseColumn(familyAndQualifier); + byte[][] fq = CellUtil.parseColumn(familyAndQualifier); if (fq.length == 1) { scan.addFamily(fq[0]); } else if (fq.length == 2) { @@ -228,31 +219,31 @@ implements Configurable { *

      * Overrides previous calls to {@link Scan#addColumn(byte[], byte[])}for any families in the * input. - * - * @param scan The Scan to update. + * @param scan The Scan to update. * @param columns array of columns, formatted as family:qualifier * @see Scan#addColumn(byte[], byte[]) */ - public static void addColumns(Scan scan, byte [][] columns) { + public static void addColumns(Scan scan, byte[][] columns) { for (byte[] column : columns) { addColumn(scan, column); } } /** - * Calculates the splits that will serve as input for the map tasks. The - * number of splits matches the number of regions in a table. Splits are shuffled if - * required. - * @param context The current job context. + * Calculates the splits that will serve as input for the map tasks. The number of splits matches + * the number of regions in a table. Splits are shuffled if required. + * @param context The current job context. * @return The list of input splits. * @throws IOException When creating the list of splits fails. - * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( - * org.apache.hadoop.mapreduce.JobContext) + * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( org.apache.hadoop.mapreduce.JobContext) */ @Override public List getSplits(JobContext context) throws IOException { List splits = super.getSplits(context); - if ((conf.get(SHUFFLE_MAPS) != null) && "true".equals(conf.get(SHUFFLE_MAPS).toLowerCase(Locale.ROOT))) { + if ( + (conf.get(SHUFFLE_MAPS) != null) + && "true".equals(conf.get(SHUFFLE_MAPS).toLowerCase(Locale.ROOT)) + ) { Collections.shuffle(splits); } return splits; @@ -260,9 +251,8 @@ implements Configurable { /** * Convenience method to parse a string representation of an array of column specifiers. - * - * @param scan The Scan to update. - * @param columns The columns to parse. + * @param scan The Scan to update. + * @param columns The columns to parse. */ private static void addColumns(Scan scan, String columns) { String[] cols = columns.split(" "); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java index 92d1f113639..da796e12738 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,19 +52,18 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * A base for {@link TableInputFormat}s. Receives a {@link Connection}, a {@link TableName}, - * an {@link Scan} instance that defines the input columns etc. Subclasses may use - * other TableRecordReader implementations. - * - * Subclasses MUST ensure initializeTable(Connection, TableName) is called for an instance to - * function properly. Each of the entry points to this class used by the MapReduce framework, - * {@link #createRecordReader(InputSplit, TaskAttemptContext)} and {@link #getSplits(JobContext)}, - * will call {@link #initialize(JobContext)} as a convenient centralized location to handle - * retrieving the necessary configuration information. If your subclass overrides either of these - * methods, either call the parent version or call initialize yourself. - * + * A base for {@link TableInputFormat}s. Receives a {@link Connection}, a {@link TableName}, an + * {@link Scan} instance that defines the input columns etc. Subclasses may use other + * TableRecordReader implementations. Subclasses MUST ensure initializeTable(Connection, TableName) + * is called for an instance to function properly. Each of the entry points to this class used by + * the MapReduce framework, {@link #createRecordReader(InputSplit, TaskAttemptContext)} and + * {@link #getSplits(JobContext)}, will call {@link #initialize(JobContext)} as a convenient + * centralized location to handle retrieving the necessary configuration information. If your + * subclass overrides either of these methods, either call the parent version or call initialize + * yourself. *

      * An example of a subclass: + * *

        *   class ExampleTIF extends TableInputFormatBase {
        *
      @@ -92,42 +90,43 @@ import org.slf4j.LoggerFactory;
        *   }
        * 
      * - * - * The number of InputSplits(mappers) match the number of regions in a table by default. - * Set "hbase.mapreduce.tableinput.mappers.per.region" to specify how many mappers per region, set - * this property will disable autobalance below.\ - * Set "hbase.mapreduce.tif.input.autobalance" to enable autobalance, hbase will assign mappers - * based on average region size; For regions, whose size larger than average region size may assigned - * more mappers, and for smaller one, they may group together to use one mapper. If actual average - * region size is too big, like 50G, it is not good to only assign 1 mapper for those large regions. - * Use "hbase.mapreduce.tif.ave.regionsize" to set max average region size when enable "autobalanece", - * default mas average region size is 8G. + * The number of InputSplits(mappers) match the number of regions in a table by default. Set + * "hbase.mapreduce.tableinput.mappers.per.region" to specify how many mappers per region, set this + * property will disable autobalance below.\ Set "hbase.mapreduce.tif.input.autobalance" to enable + * autobalance, hbase will assign mappers based on average region size; For regions, whose size + * larger than average region size may assigned more mappers, and for smaller one, they may group + * together to use one mapper. If actual average region size is too big, like 50G, it is not good to + * only assign 1 mapper for those large regions. Use "hbase.mapreduce.tif.ave.regionsize" to set max + * average region size when enable "autobalanece", default mas average region size is 8G. */ @InterfaceAudience.Public -public abstract class TableInputFormatBase - extends InputFormat { +public abstract class TableInputFormatBase extends InputFormat { private static final Logger LOG = LoggerFactory.getLogger(TableInputFormatBase.class); - private static final String NOT_INITIALIZED = "The input format instance has not been properly " + - "initialized. Ensure you call initializeTable either in your constructor or initialize " + - "method"; - private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a" + - " previous error. Please look at the previous logs lines from" + - " the task's full log for more details."; + private static final String NOT_INITIALIZED = "The input format instance has not been properly " + + "initialized. Ensure you call initializeTable either in your constructor or initialize " + + "method"; + private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a" + + " previous error. Please look at the previous logs lines from" + + " the task's full log for more details."; /** Specify if we enable auto-balance to set number of mappers in M/R jobs. */ public static final String MAPREDUCE_INPUT_AUTOBALANCE = "hbase.mapreduce.tif.input.autobalance"; - /** In auto-balance, we split input by ave region size, if calculated region size is too big, we can set it. */ + /** + * In auto-balance, we split input by ave region size, if calculated region size is too big, we + * can set it. + */ public static final String MAX_AVERAGE_REGION_SIZE = "hbase.mapreduce.tif.ave.regionsize"; /** Set the number of Mappers for each region, all regions have same number of Mappers */ - public static final String NUM_MAPPERS_PER_REGION = "hbase.mapreduce.tableinput.mappers.per.region"; + public static final String NUM_MAPPERS_PER_REGION = + "hbase.mapreduce.tableinput.mappers.per.region"; - - /** Holds the details for the internal scanner. - * - * @see Scan */ + /** + * Holds the details for the internal scanner. + * @see Scan + */ private Scan scan = null; /** The {@link Admin}. */ private Admin admin; @@ -142,27 +141,22 @@ public abstract class TableInputFormatBase /** Used to generate splits based on region size. */ private RegionSizeCalculator regionSizeCalculator; - /** The reverse DNS lookup cache mapping: IPAddress => HostName */ - private HashMap reverseDNSCacheMap = - new HashMap<>(); + private HashMap reverseDNSCacheMap = new HashMap<>(); /** - * Builds a {@link TableRecordReader}. If no {@link TableRecordReader} was provided, uses - * the default. - * - * @param split The split to work with. - * @param context The current context. + * Builds a {@link TableRecordReader}. If no {@link TableRecordReader} was provided, uses the + * default. + * @param split The split to work with. + * @param context The current context. * @return The newly created record reader. * @throws IOException When creating the reader fails. * @see org.apache.hadoop.mapreduce.InputFormat#createRecordReader( - * org.apache.hadoop.mapreduce.InputSplit, - * org.apache.hadoop.mapreduce.TaskAttemptContext) + * org.apache.hadoop.mapreduce.InputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override - public RecordReader createRecordReader( - InputSplit split, TaskAttemptContext context) - throws IOException { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException { // Just in case a subclass is relying on JobConfigurable magic. if (table == null) { initialize(context); @@ -179,7 +173,7 @@ public abstract class TableInputFormatBase TableSplit tSplit = (TableSplit) split; LOG.info("Input split length: " + StringUtils.humanReadableInt(tSplit.getLength()) + " bytes."); final TableRecordReader trr = - this.tableRecordReader != null ? this.tableRecordReader : new TableRecordReader(); + this.tableRecordReader != null ? this.tableRecordReader : new TableRecordReader(); Scan sc = new Scan(this.scan); sc.setStartRow(tSplit.getStartRow()); sc.setStopRow(tSplit.getEndRow()); @@ -209,8 +203,8 @@ public abstract class TableInputFormatBase } @Override - public void initialize(InputSplit inputsplit, TaskAttemptContext context) throws IOException, - InterruptedException { + public void initialize(InputSplit inputsplit, TaskAttemptContext context) + throws IOException, InterruptedException { trr.initialize(inputsplit, context); } @@ -221,17 +215,16 @@ public abstract class TableInputFormatBase }; } - protected Pair getStartEndKeys() throws IOException { + protected Pair getStartEndKeys() throws IOException { return getRegionLocator().getStartEndKeys(); } /** * Calculates the splits that will serve as input for the map tasks. - * @param context The current job context. + * @param context The current job context. * @return The list of input splits. * @throws IOException When creating the list of splits fails. - * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( - * org.apache.hadoop.mapreduce.JobContext) + * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( org.apache.hadoop.mapreduce.JobContext) */ @Override public List getSplits(JobContext context) throws IOException { @@ -267,10 +260,10 @@ public abstract class TableInputFormatBase return res; } - //The default value of "hbase.mapreduce.input.autobalance" is false. + // The default value of "hbase.mapreduce.input.autobalance" is false. if (context.getConfiguration().getBoolean(MAPREDUCE_INPUT_AUTOBALANCE, false)) { - long maxAveRegionSize = context.getConfiguration() - .getLong(MAX_AVERAGE_REGION_SIZE, 8L*1073741824); //8GB + long maxAveRegionSize = + context.getConfiguration().getLong(MAX_AVERAGE_REGION_SIZE, 8L * 1073741824); // 8GB return calculateAutoBalancedSplits(splits, maxAveRegionSize); } @@ -285,7 +278,6 @@ public abstract class TableInputFormatBase /** * Create one InputSplit per region - * * @return The list of InputSplit for all the regions * @throws IOException throws IOException */ @@ -299,10 +291,9 @@ public abstract class TableInputFormatBase TableName tableName = getTable().getName(); Pair keys = getStartEndKeys(); - if (keys == null || keys.getFirst() == null || - keys.getFirst().length == 0) { + if (keys == null || keys.getFirst() == null || keys.getFirst().length == 0) { HRegionLocation regLoc = - getRegionLocator().getRegionLocation(HConstants.EMPTY_BYTE_ARRAY, false); + getRegionLocator().getRegionLocation(HConstants.EMPTY_BYTE_ARRAY, false); if (null == regLoc) { throw new IOException("Expecting at least one region."); } @@ -311,9 +302,9 @@ public abstract class TableInputFormatBase // In the table input format for single table we do not need to // store the scan object in table split because it can be memory intensive and redundant // information to what is already stored in conf SCAN. See HBASE-25212 - TableSplit split = new TableSplit(tableName, null, - HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc - .getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0], regionSize); + TableSplit split = + new TableSplit(tableName, null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, + regLoc.getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0], regionSize); splits.add(split); return splits; } @@ -326,17 +317,18 @@ public abstract class TableInputFormatBase byte[] startRow = scan.getStartRow(); byte[] stopRow = scan.getStopRow(); // determine if the given start an stop key fall into the region - if ((startRow.length == 0 || keys.getSecond()[i].length == 0 || - Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) && - (stopRow.length == 0 || - Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0)) { - byte[] splitStart = startRow.length == 0 || - Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 ? - keys.getFirst()[i] : startRow; - byte[] splitStop = (stopRow.length == 0 || - Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) && - keys.getSecond()[i].length > 0 ? - keys.getSecond()[i] : stopRow; + if ( + (startRow.length == 0 || keys.getSecond()[i].length == 0 + || Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) + && (stopRow.length == 0 || Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0) + ) { + byte[] splitStart = + startRow.length == 0 || Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 + ? keys.getFirst()[i] + : startRow; + byte[] splitStop = + (stopRow.length == 0 || Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) + && keys.getSecond()[i].length > 0 ? keys.getSecond()[i] : stopRow; HRegionLocation location = getRegionLocator().getRegionLocation(keys.getFirst()[i], false); // The below InetSocketAddress creation does a name resolution. @@ -354,8 +346,8 @@ public abstract class TableInputFormatBase // In the table input format for single table we do not need to // store the scan object in table split because it can be memory intensive and redundant // information to what is already stored in conf SCAN. See HBASE-25212 - TableSplit split = new TableSplit(tableName, null, - splitStart, splitStop, regionLocation, encodedRegionName, regionSize); + TableSplit split = new TableSplit(tableName, null, splitStart, splitStop, regionLocation, + encodedRegionName, regionSize); splits.add(split); if (LOG.isDebugEnabled()) { LOG.debug("getSplits: split -> " + i + " -> " + split); @@ -368,19 +360,19 @@ public abstract class TableInputFormatBase /** * Create n splits for one InputSplit, For now only support uniform distribution * @param split A TableSplit corresponding to a range of rowkeys - * @param n Number of ranges after splitting. Pass 1 means no split for the range - * Pass 2 if you want to split the range in two; + * @param n Number of ranges after splitting. Pass 1 means no split for the range Pass 2 if + * you want to split the range in two; * @return A list of TableSplit, the size of the list is n * @throws IllegalArgumentIOException throws IllegalArgumentIOException */ protected List createNInputSplitsUniform(InputSplit split, int n) - throws IllegalArgumentIOException { + throws IllegalArgumentIOException { if (split == null || !(split instanceof TableSplit)) { throw new IllegalArgumentIOException( - "InputSplit for CreateNSplitsPerRegion can not be null + " - + "and should be instance of TableSplit"); + "InputSplit for CreateNSplitsPerRegion can not be null + " + + "and should be instance of TableSplit"); } - //if n < 1, then still continue using n = 1 + // if n < 1, then still continue using n = 1 n = n < 1 ? 1 : n; List res = new ArrayList<>(n); if (n == 1) { @@ -398,51 +390,48 @@ public abstract class TableInputFormatBase byte[] endRow = ts.getEndRow(); // For special case: startRow or endRow is empty - if (startRow.length == 0 && endRow.length == 0){ + if (startRow.length == 0 && endRow.length == 0) { startRow = new byte[1]; endRow = new byte[1]; startRow[0] = 0; endRow[0] = -1; } - if (startRow.length == 0 && endRow.length != 0){ + if (startRow.length == 0 && endRow.length != 0) { startRow = new byte[1]; startRow[0] = 0; } - if (startRow.length != 0 && endRow.length == 0){ - endRow =new byte[startRow.length]; - for (int k = 0; k < startRow.length; k++){ + if (startRow.length != 0 && endRow.length == 0) { + endRow = new byte[startRow.length]; + for (int k = 0; k < startRow.length; k++) { endRow[k] = -1; } } // Split Region into n chunks evenly - byte[][] splitKeys = Bytes.split(startRow, endRow, true, n-1); + byte[][] splitKeys = Bytes.split(startRow, endRow, true, n - 1); for (int i = 0; i < splitKeys.length - 1; i++) { // In the table input format for single table we do not need to // store the scan object in table split because it can be memory intensive and redundant // information to what is already stored in conf SCAN. See HBASE-25212 - //notice that the regionSize parameter may be not very accurate - TableSplit tsplit = - new TableSplit(tableName, null, splitKeys[i], splitKeys[i + 1], regionLocation, - encodedRegionName, regionSize / n); + // notice that the regionSize parameter may be not very accurate + TableSplit tsplit = new TableSplit(tableName, null, splitKeys[i], splitKeys[i + 1], + regionLocation, encodedRegionName, regionSize / n); res.add(tsplit); } return res; } + /** - * Calculates the number of MapReduce input splits for the map tasks. The number of - * MapReduce input splits depends on the average region size. - * Make it 'public' for testing - * - * @param splits The list of input splits before balance. + * Calculates the number of MapReduce input splits for the map tasks. The number of MapReduce + * input splits depends on the average region size. Make it 'public' for testing + * @param splits The list of input splits before balance. * @param maxAverageRegionSize max Average region size for one mapper * @return The list of input splits. * @throws IOException When creating the list of splits fails. - * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( - *org.apache.hadoop.mapreduce.JobContext) + * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( org.apache.hadoop.mapreduce.JobContext) */ - public List calculateAutoBalancedSplits(List splits, long maxAverageRegionSize) - throws IOException { + public List calculateAutoBalancedSplits(List splits, + long maxAverageRegionSize) throws IOException { if (splits.size() == 0) { return splits; } @@ -455,15 +444,16 @@ public abstract class TableInputFormatBase long averageRegionSize = totalRegionSize / splits.size(); // totalRegionSize might be overflow, and the averageRegionSize must be positive. if (averageRegionSize <= 0) { - LOG.warn("The averageRegionSize is not positive: " + averageRegionSize + ", " + - "set it to Long.MAX_VALUE " + splits.size()); + LOG.warn("The averageRegionSize is not positive: " + averageRegionSize + ", " + + "set it to Long.MAX_VALUE " + splits.size()); averageRegionSize = Long.MAX_VALUE / splits.size(); } - //if averageRegionSize is too big, change it to default as 1 GB, + // if averageRegionSize is too big, change it to default as 1 GB, if (averageRegionSize > maxAverageRegionSize) { averageRegionSize = maxAverageRegionSize; } - // if averageRegionSize is too small, we do not need to allocate more mappers for those 'large' region + // if averageRegionSize is too small, we do not need to allocate more mappers for those 'large' + // region // set default as 16M = (default hdfs block size) / 4; if (averageRegionSize < 16 * 1048576) { return splits; @@ -477,7 +467,8 @@ public abstract class TableInputFormatBase if (regionSize >= averageRegionSize) { // make this region as multiple MapReduce input split. - int n = (int) Math.round(Math.log(((double) regionSize) / ((double) averageRegionSize)) + 1.0); + int n = + (int) Math.round(Math.log(((double) regionSize) / ((double) averageRegionSize)) + 1.0); List temp = createNInputSplitsUniform(ts, n); resultList.addAll(temp); } else { @@ -490,8 +481,10 @@ public abstract class TableInputFormatBase while (j < splits.size()) { TableSplit nextRegion = (TableSplit) splits.get(j); long nextRegionSize = nextRegion.getLength(); - if (totalSize + nextRegionSize <= averageRegionSize - && Bytes.equals(splitEndKey, nextRegion.getStartRow())) { + if ( + totalSize + nextRegionSize <= averageRegionSize + && Bytes.equals(splitEndKey, nextRegion.getStartRow()) + ) { totalSize = totalSize + nextRegionSize; splitEndKey = nextRegion.getEndRow(); j++; @@ -504,7 +497,7 @@ public abstract class TableInputFormatBase // store the scan object in table split because it can be memory intensive and redundant // information to what is already stored in conf SCAN. See HBASE-25212 TableSplit t = new TableSplit(tableName, null, splitStartKey, splitEndKey, regionLocation, - encodedRegionName, totalSize); + encodedRegionName, totalSize); resultList.add(t); } } @@ -533,26 +526,25 @@ public abstract class TableInputFormatBase } /** - * Test if the given region is to be included in the InputSplit while splitting - * the regions of a table. + * Test if the given region is to be included in the InputSplit while splitting the regions of a + * table. *

      - * This optimization is effective when there is a specific reasoning to exclude an entire region from the M-R job, - * (and hence, not contributing to the InputSplit), given the start and end keys of the same.
      - * Useful when we need to remember the last-processed top record and revisit the [last, current) interval for M-R processing, - * continuously. In addition to reducing InputSplits, reduces the load on the region server as well, due to the ordering of the keys. - *
      + * This optimization is effective when there is a specific reasoning to exclude an entire region + * from the M-R job, (and hence, not contributing to the InputSplit), given the start and end keys + * of the same.
      + * Useful when we need to remember the last-processed top record and revisit the [last, current) + * interval for M-R processing, continuously. In addition to reducing InputSplits, reduces the + * load on the region server as well, due to the ordering of the keys.
      *
      * Note: It is possible that endKey.length() == 0 , for the last (recent) region. *
      - * Override this method, if you want to bulk exclude regions altogether from M-R. By default, no region is excluded( i.e. all regions are included). - * - * + * Override this method, if you want to bulk exclude regions altogether from M-R. By default, no + * region is excluded( i.e. all regions are included). * @param startKey Start key of the region - * @param endKey End key of the region + * @param endKey End key of the region * @return true, if this region needs to be included as part of the input (default). - * */ - protected boolean includeRegionInSplit(final byte[] startKey, final byte [] endKey) { + protected boolean includeRegionInSplit(final byte[] startKey, final byte[] endKey) { return true; } @@ -588,15 +580,13 @@ public abstract class TableInputFormatBase /** * Allows subclasses to initialize the table information. - * - * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close. - * @param tableName The {@link TableName} of the table to process. - * @throws IOException + * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close. + * @param tableName The {@link TableName} of the table to process. n */ protected void initializeTable(Connection connection, TableName tableName) throws IOException { if (this.table != null || this.connection != null) { - LOG.warn("initializeTable called multiple times. Overwriting connection and table " + - "reference; TableInputFormatBase will not close these old references when done."); + LOG.warn("initializeTable called multiple times. Overwriting connection and table " + + "reference; TableInputFormatBase will not close these old references when done."); } this.table = connection.getTable(tableName); this.regionLocator = connection.getRegionLocator(tableName); @@ -607,13 +597,12 @@ public abstract class TableInputFormatBase @InterfaceAudience.Private protected RegionSizeCalculator createRegionSizeCalculator(RegionLocator locator, Admin admin) - throws IOException { + throws IOException { return new RegionSizeCalculator(locator, admin); } /** * Gets the scan defining the actual details like columns etc. - * * @return The internal scan instance. */ public Scan getScan() { @@ -623,8 +612,7 @@ public abstract class TableInputFormatBase /** * Sets the scan defining the actual details like columns etc. - * - * @param scan The scan to set. + * @param scan The scan to set. */ public void setScan(Scan scan) { this.scan = scan; @@ -632,37 +620,29 @@ public abstract class TableInputFormatBase /** * Allows subclasses to set the {@link TableRecordReader}. - * - * @param tableRecordReader A different {@link TableRecordReader} - * implementation. + * @param tableRecordReader A different {@link TableRecordReader} implementation. */ protected void setTableRecordReader(TableRecordReader tableRecordReader) { this.tableRecordReader = tableRecordReader; } /** - * Handle subclass specific set up. - * Each of the entry points used by the MapReduce framework, + * Handle subclass specific set up. Each of the entry points used by the MapReduce framework, * {@link #createRecordReader(InputSplit, TaskAttemptContext)} and {@link #getSplits(JobContext)}, * will call {@link #initialize(JobContext)} as a convenient centralized location to handle * retrieving the necessary configuration information and calling - * {@link #initializeTable(Connection, TableName)}. - * - * Subclasses should implement their initialize call such that it is safe to call multiple times. - * The current TableInputFormatBase implementation relies on a non-null table reference to decide - * if an initialize call is needed, but this behavior may change in the future. In particular, - * it is critical that initializeTable not be called multiple times since this will leak - * Connection instances. - * + * {@link #initializeTable(Connection, TableName)}. Subclasses should implement their initialize + * call such that it is safe to call multiple times. The current TableInputFormatBase + * implementation relies on a non-null table reference to decide if an initialize call is needed, + * but this behavior may change in the future. In particular, it is critical that initializeTable + * not be called multiple times since this will leak Connection instances. */ protected void initialize(JobContext context) throws IOException { } /** * Close the Table and related objects that were initialized via - * {@link #initializeTable(Connection, TableName)}. - * - * @throws IOException + * {@link #initializeTable(Connection, TableName)}. n */ protected void closeTable() throws IOException { close(admin, table, regionLocator, connection); @@ -675,7 +655,9 @@ public abstract class TableInputFormatBase private void close(Closeable... closables) throws IOException { for (Closeable c : closables) { - if(c != null) { c.close(); } + if (c != null) { + c.close(); + } } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index eaf1b407ccc..637b7998490 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +17,7 @@ */ package org.apache.hadoop.hbase.mapreduce; +import com.codahale.metrics.MetricRegistry; import java.io.File; import java.io.IOException; import java.net.URL; @@ -33,24 +33,18 @@ import java.util.Map; import java.util.Set; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.token.TokenUtil; @@ -61,8 +55,12 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import com.codahale.metrics.MetricRegistry; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; /** * Utility for {@link TableMapper} and {@link TableReducer} @@ -74,128 +72,98 @@ public class TableMapReduceUtil { public static final String TABLE_INPUT_CLASS_KEY = "hbase.table.input.class"; /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job) - throws IOException { - initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, - job, true); + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job) throws IOException { + initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job, true); } - /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(TableName table, - Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, - Job job) throws IOException { - initTableMapperJob(table.getNameAsString(), - scan, - mapper, - outputKeyClass, - outputValueClass, - job, - true); + public static void initTableMapperJob(TableName table, Scan scan, + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job) throws IOException { + initTableMapperJob(table.getNameAsString(), scan, mapper, outputKeyClass, outputValueClass, job, + true); } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table Binary representation of the table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table Binary representation of the table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(byte[] table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job) - throws IOException { - initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, - job, true); + public static void initTableMapperJob(byte[] table, Scan scan, + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job) throws IOException { + initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, job, + true); } - /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). - * @throws IOException When setting up the details fails. - */ - public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, Class inputFormatClass) - throws IOException { - initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job, - addDependencyJars, true, inputFormatClass); - } - - /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). - * @param initCredentials whether to initialize hbase auth credentials for the job - * @param inputFormatClass the input format + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, boolean initCredentials, - Class inputFormatClass) - throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Class inputFormatClass) + throws IOException { + initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, true, inputFormatClass); + } + + /** + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). + * @param initCredentials whether to initialize hbase auth credentials for the job + * @param inputFormatClass the input format + * @throws IOException When setting up the details fails. + */ + public static void initTableMapperJob(String table, Scan scan, + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, boolean initCredentials, + Class inputFormatClass) throws IOException { job.setInputFormatClass(inputFormatClass); if (outputValueClass != null) job.setMapOutputValueClass(outputValueClass); if (outputKeyClass != null) job.setMapOutputKeyClass(outputKeyClass); @@ -208,8 +176,8 @@ public class TableMapReduceUtil { conf.set(TableInputFormat.INPUT_TABLE, table); conf.set(TableInputFormat.SCAN, convertScanToString(scan)); conf.setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + CellSerialization.class.getName()); if (addDependencyJars) { addDependencyJars(job); } @@ -219,120 +187,103 @@ public class TableMapReduceUtil { } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table Binary representation of the table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table Binary representation of the table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). - * @param inputFormatClass The class of the input format + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). + * @param inputFormatClass The class of the input format * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(byte[] table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, Class inputFormatClass) - throws IOException { - initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, - outputValueClass, job, addDependencyJars, inputFormatClass); + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Class inputFormatClass) + throws IOException { + initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, inputFormatClass); } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table Binary representation of the table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table Binary representation of the table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(byte[] table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars) - throws IOException { - initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, job, - addDependencyJars, getConfiguredInputFormat(job)); + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars) throws IOException { + initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, getConfiguredInputFormat(job)); } /** * @return {@link TableInputFormat} .class unless Configuration has something else at - * {@link #TABLE_INPUT_CLASS_KEY}. + * {@link #TABLE_INPUT_CLASS_KEY}. */ private static Class getConfiguredInputFormat(Job job) { - return (Class)job.getConfiguration(). - getClass(TABLE_INPUT_CLASS_KEY, TableInputFormat.class); + return (Class) job.getConfiguration().getClass(TABLE_INPUT_CLASS_KEY, + TableInputFormat.class); } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars) - throws IOException { - initTableMapperJob(table, scan, mapper, outputKeyClass, - outputValueClass, job, addDependencyJars, getConfiguredInputFormat(job)); + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars) throws IOException { + initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, getConfiguredInputFormat(job)); } /** - * Enable a basic on-heap cache for these jobs. Any BlockCache implementation based on - * direct memory will likely cause the map tasks to OOM when opening the region. This - * is done here instead of in TableSnapshotRegionRecordReader in case an advanced user - * wants to override this behavior in their job. + * Enable a basic on-heap cache for these jobs. Any BlockCache implementation based on direct + * memory will likely cause the map tasks to OOM when opening the region. This is done here + * instead of in TableSnapshotRegionRecordReader in case an advanced user wants to override this + * behavior in their job. */ public static void resetCacheConfig(Configuration conf) { - conf.setFloat( - HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); + conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0f); conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY); } /** - * Sets up the job for reading from one or more table snapshots, with one or more scans - * per snapshot. - * It bypasses hbase servers and read directly from snapshot files. - * + * Sets up the job for reading from one or more table snapshots, with one or more scans per + * snapshot. It bypasses hbase servers and read directly from snapshot files. * @param snapshotScans map of snapshot name to scans on that snapshot. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). */ public static void initMultiTableSnapshotMapperJob(Map> snapshotScans, - Class mapper, Class outputKeyClass, Class outputValueClass, - Job job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { MultiTableSnapshotInputFormat.setInput(job.getConfiguration(), snapshotScans, tmpRestoreDir); job.setInputFormatClass(MultiTableSnapshotInputFormat.class); @@ -357,27 +308,25 @@ public class TableMapReduceUtil { /** * Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly * from snapshot files. - * @param snapshotName The name of the snapshot (of a table) to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase - * configuration. + * @param snapshotName The name of the snapshot (of a table) to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via - * the distributed cache (tmpjars). - * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of - * rootdir. After the job is finished, restore directory can be deleted. + * the distributed cache (tmpjars). + * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user + * should have write permissions to this directory, and this should not + * be a subdirectory of rootdir. After the job is finished, restore + * directory can be deleted. * @throws IOException When setting up the details fails. * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapperJob(String snapshotName, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, Path tmpRestoreDir) - throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir); initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, false, TableSnapshotInputFormat.class); @@ -385,106 +334,87 @@ public class TableMapReduceUtil { } /** - * Sets up the job for reading from a table snapshot. It bypasses hbase servers - * and read directly from snapshot files. - * - * @param snapshotName The name of the snapshot (of a table) to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). - * - * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restore directory can be deleted. - * @param splitAlgo algorithm to split + * Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly + * from snapshot files. + * @param snapshotName The name of the snapshot (of a table) to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). + * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user + * should have write permissions to this directory, and this should not + * be a subdirectory of rootdir. After the job is finished, restore + * directory can be deleted. + * @param splitAlgo algorithm to split * @param numSplitsPerRegion how many input splits to generate per one region * @throws IOException When setting up the details fails. * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapperJob(String snapshotName, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, Path tmpRestoreDir, - RegionSplitter.SplitAlgorithm splitAlgo, - int numSplitsPerRegion) - throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Path tmpRestoreDir, RegionSplitter.SplitAlgorithm splitAlgo, + int numSplitsPerRegion) throws IOException { TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir, splitAlgo, - numSplitsPerRegion); - initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, - outputValueClass, job, addDependencyJars, false, TableSnapshotInputFormat.class); + numSplitsPerRegion); + initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, false, TableSnapshotInputFormat.class); resetCacheConfig(job.getConfiguration()); } /** - * Use this before submitting a Multi TableMap job. It will appropriately set - * up the job. - * - * @param scans The list of {@link Scan} objects to read from. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. + * Use this before submitting a Multi TableMap job. It will appropriately set up the job. + * @param scans The list of {@link Scan} objects to read from. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is carrying - * all necessary HBase configuration. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(List scans, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job) throws IOException { - initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job, - true); + public static void initTableMapperJob(List scans, Class mapper, + Class outputKeyClass, Class outputValueClass, Job job) throws IOException { + initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job, true); } /** - * Use this before submitting a Multi TableMap job. It will appropriately set - * up the job. - * - * @param scans The list of {@link Scan} objects to read from. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is carrying - * all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the - * configured job classes via the distributed cache (tmpjars). + * Use this before submitting a Multi TableMap job. It will appropriately set up the job. + * @param scans The list of {@link Scan} objects to read from. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(List scans, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars) throws IOException { - initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job, - addDependencyJars, true); + public static void initTableMapperJob(List scans, Class mapper, + Class outputKeyClass, Class outputValueClass, Job job, boolean addDependencyJars) + throws IOException { + initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, + true); } /** - * Use this before submitting a Multi TableMap job. It will appropriately set - * up the job. - * - * @param scans The list of {@link Scan} objects to read from. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is carrying - * all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the - * configured job classes via the distributed cache (tmpjars). - * @param initCredentials whether to initialize hbase auth credentials for the job + * Use this before submitting a Multi TableMap job. It will appropriately set up the job. + * @param scans The list of {@link Scan} objects to read from. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). + * @param initCredentials whether to initialize hbase auth credentials for the job * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(List scans, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, - boolean initCredentials) throws IOException { + public static void initTableMapperJob(List scans, Class mapper, + Class outputKeyClass, Class outputValueClass, Job job, boolean addDependencyJars, + boolean initCredentials) throws IOException { job.setInputFormatClass(MultiTableInputFormat.class); if (outputValueClass != null) { job.setMapOutputValueClass(outputValueClass); @@ -518,7 +448,7 @@ public class TableMapReduceUtil { // propagate delegation related props from launcher job to MR job if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) { job.getConfiguration().set("mapreduce.job.credentials.binary", - System.getenv("HADOOP_TOKEN_FILE_LOCATION")); + System.getenv("HADOOP_TOKEN_FILE_LOCATION")); } } @@ -529,7 +459,7 @@ public class TableMapReduceUtil { User user = userProvider.getCurrent(); if (quorumAddress != null) { Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(), - quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX); + quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX); Connection peerConn = ConnectionFactory.createConnection(peerConf); try { TokenUtil.addTokenForJob(peerConn, user, job); @@ -552,39 +482,33 @@ public class TableMapReduceUtil { } /** - * Obtain an authentication token, for the specified cluster, on behalf of the current user - * and add it to the credentials for the given map reduce job. - * - * The quorumAddress is the key to the ZK ensemble, which contains: - * hbase.zookeeper.quorum, hbase.zookeeper.client.port and + * Obtain an authentication token, for the specified cluster, on behalf of the current user and + * add it to the credentials for the given map reduce job. The quorumAddress is the key to the ZK + * ensemble, which contains: hbase.zookeeper.quorum, hbase.zookeeper.client.port and * zookeeper.znode.parent - * - * @param job The job that requires the permission. + * @param job The job that requires the permission. * @param quorumAddress string that contains the 3 required configuratins * @throws IOException When the authentication token cannot be obtained. * @deprecated Since 1.2.0 and will be removed in 3.0.0. Use - * {@link #initCredentialsForCluster(Job, Configuration)} instead. + * {@link #initCredentialsForCluster(Job, Configuration)} instead. * @see #initCredentialsForCluster(Job, Configuration) * @see HBASE-14886 */ @Deprecated - public static void initCredentialsForCluster(Job job, String quorumAddress) - throws IOException { - Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(), - quorumAddress); + public static void initCredentialsForCluster(Job job, String quorumAddress) throws IOException { + Configuration peerConf = + HBaseConfiguration.createClusterConf(job.getConfiguration(), quorumAddress); initCredentialsForCluster(job, peerConf); } /** - * Obtain an authentication token, for the specified cluster, on behalf of the current user - * and add it to the credentials for the given map reduce job. - * - * @param job The job that requires the permission. + * Obtain an authentication token, for the specified cluster, on behalf of the current user and + * add it to the credentials for the given map reduce job. + * @param job The job that requires the permission. * @param conf The configuration to use in connecting to the peer cluster * @throws IOException When the authentication token cannot be obtained. */ - public static void initCredentialsForCluster(Job job, Configuration conf) - throws IOException { + public static void initCredentialsForCluster(Job job, Configuration conf) throws IOException { UserProvider userProvider = UserProvider.instantiate(conf); if (userProvider.isHBaseSecurityEnabled()) { try { @@ -603,8 +527,7 @@ public class TableMapReduceUtil { /** * Writes the given scan into a Base64 encoded string. - * - * @param scan The scan to write out. + * @param scan The scan to write out. * @return The scan saved in a Base64 encoded string. * @throws IOException When writing the scan fails. */ @@ -615,110 +538,94 @@ public class TableMapReduceUtil { /** * Converts the given Base64 string back into a Scan instance. - * - * @param base64 The scan details. + * @param base64 The scan details. * @return The newly created Scan instance. * @throws IOException When reading the scan instance fails. */ public static Scan convertStringToScan(String base64) throws IOException { - byte [] decoded = Base64.getDecoder().decode(base64); + byte[] decoded = Base64.getDecoder().decode(base64); return ProtobufUtil.toScan(ClientProtos.Scan.parseFrom(decoded)); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job to adjust. + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. * @throws IOException When determining the region count fails. */ - public static void initTableReducerJob(String table, - Class reducer, Job job) - throws IOException { + public static void initTableReducerJob(String table, Class reducer, + Job job) throws IOException { initTableReducerJob(table, reducer, job, null); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job to adjust. - * @param partitioner Partitioner to use. Pass null to use - * default partitioner. + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. + * @param partitioner Partitioner to use. Pass null to use default partitioner. * @throws IOException When determining the region count fails. */ - public static void initTableReducerJob(String table, - Class reducer, Job job, - Class partitioner) throws IOException { + public static void initTableReducerJob(String table, Class reducer, + Job job, Class partitioner) throws IOException { initTableReducerJob(table, reducer, job, partitioner, null, null, null); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param partitioner Partitioner to use. Pass null to use - * default partitioner. - * @param quorumAddress Distant cluster to write to; default is null for - * output to the cluster that is designated in hbase-site.xml. - * Set this String to the zookeeper ensemble of an alternate remote cluster - * when you would have the reduce write a cluster that is other than the - * default; e.g. copying tables between clusters, the source would be - * designated by hbase-site.xml and this param would have the - * ensemble address of the remote cluster. The format to pass is particular. - * Pass <hbase.zookeeper.quorum>:< + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param partitioner Partitioner to use. Pass null to use default partitioner. + * @param quorumAddress Distant cluster to write to; default is null for output to the cluster + * that is designated in hbase-site.xml. Set this String to the + * zookeeper ensemble of an alternate remote cluster when you would have the + * reduce write a cluster that is other than the default; e.g. copying tables + * between clusters, the source would be designated by + * hbase-site.xml and this param would have the ensemble address + * of the remote cluster. The format to pass is particular. Pass + * <hbase.zookeeper.quorum>:< * hbase.zookeeper.client.port>:<zookeeper.znode.parent> - * such as server,server2,server3:2181:/hbase. - * @param serverClass redefined hbase.regionserver.class - * @param serverImpl redefined hbase.regionserver.impl + * such as server,server2,server3:2181:/hbase. + * @param serverClass redefined hbase.regionserver.class + * @param serverImpl redefined hbase.regionserver.impl * @throws IOException When determining the region count fails. */ - public static void initTableReducerJob(String table, - Class reducer, Job job, - Class partitioner, String quorumAddress, String serverClass, - String serverImpl) throws IOException { - initTableReducerJob(table, reducer, job, partitioner, quorumAddress, - serverClass, serverImpl, true); + public static void initTableReducerJob(String table, Class reducer, + Job job, Class partitioner, String quorumAddress, String serverClass, String serverImpl) + throws IOException { + initTableReducerJob(table, reducer, job, partitioner, quorumAddress, serverClass, serverImpl, + true); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param partitioner Partitioner to use. Pass null to use - * default partitioner. - * @param quorumAddress Distant cluster to write to; default is null for - * output to the cluster that is designated in hbase-site.xml. - * Set this String to the zookeeper ensemble of an alternate remote cluster - * when you would have the reduce write a cluster that is other than the - * default; e.g. copying tables between clusters, the source would be - * designated by hbase-site.xml and this param would have the - * ensemble address of the remote cluster. The format to pass is particular. - * Pass <hbase.zookeeper.quorum>:< + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param partitioner Partitioner to use. Pass null to use default partitioner. + * @param quorumAddress Distant cluster to write to; default is null for output to the cluster + * that is designated in hbase-site.xml. Set this String to + * the zookeeper ensemble of an alternate remote cluster when you would + * have the reduce write a cluster that is other than the default; e.g. + * copying tables between clusters, the source would be designated by + * hbase-site.xml and this param would have the ensemble + * address of the remote cluster. The format to pass is particular. Pass + * <hbase.zookeeper.quorum>:< * hbase.zookeeper.client.port>:<zookeeper.znode.parent> - * such as server,server2,server3:2181:/hbase. - * @param serverClass redefined hbase.regionserver.class - * @param serverImpl redefined hbase.regionserver.impl - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * such as server,server2,server3:2181:/hbase. + * @param serverClass redefined hbase.regionserver.class + * @param serverImpl redefined hbase.regionserver.impl + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When determining the region count fails. */ - public static void initTableReducerJob(String table, - Class reducer, Job job, - Class partitioner, String quorumAddress, String serverClass, - String serverImpl, boolean addDependencyJars) throws IOException { + public static void initTableReducerJob(String table, Class reducer, + Job job, Class partitioner, String quorumAddress, String serverClass, String serverImpl, + boolean addDependencyJars) throws IOException { Configuration conf = job.getConfiguration(); HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); @@ -726,12 +633,12 @@ public class TableMapReduceUtil { if (reducer != null) job.setReducerClass(reducer); conf.set(TableOutputFormat.OUTPUT_TABLE, table); conf.setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName()); // If passed a quorum/ensemble address, pass it on to TableOutputFormat. if (quorumAddress != null) { // Calling this will validate the format ZKConfig.validateClusterKey(quorumAddress); - conf.set(TableOutputFormat.QUORUM_ADDRESS,quorumAddress); + conf.set(TableOutputFormat.QUORUM_ADDRESS, quorumAddress); } if (serverClass != null && serverImpl != null) { conf.set(TableOutputFormat.REGION_SERVER_CLASS, serverClass); @@ -757,11 +664,10 @@ public class TableMapReduceUtil { } /** - * Ensures that the given number of reduce tasks for the given job - * configuration does not exceed the number of regions for the given table. - * - * @param table The table to get the region count for. - * @param job The current job to adjust. + * Ensures that the given number of reduce tasks for the given job configuration does not exceed + * the number of regions for the given table. + * @param table The table to get the region count for. + * @param job The current job to adjust. * @throws IOException When retrieving the table details fails. */ public static void limitNumReduceTasks(String table, Job job) throws IOException { @@ -772,11 +678,10 @@ public class TableMapReduceUtil { } /** - * Sets the number of reduce tasks for the given job configuration to the - * number of regions the given table has. - * - * @param table The table to get the region count for. - * @param job The current job to adjust. + * Sets the number of reduce tasks for the given job configuration to the number of regions the + * given table has. + * @param table The table to get the region count for. + * @param job The current job to adjust. * @throws IOException When retrieving the table details fails. */ public static void setNumReduceTasks(String table, Job job) throws IOException { @@ -784,13 +689,11 @@ public class TableMapReduceUtil { } /** - * Sets the number of rows to return and cache with each scanner iteration. - * Higher caching values will enable faster mapreduce jobs at the expense of - * requiring more heap to contain the cached rows. - * - * @param job The current job to adjust. - * @param batchSize The number of rows to return in batch with each scanner - * iteration. + * Sets the number of rows to return and cache with each scanner iteration. Higher caching values + * will enable faster mapreduce jobs at the expense of requiring more heap to contain the cached + * rows. + * @param job The current job to adjust. + * @param batchSize The number of rows to return in batch with each scanner iteration. */ public static void setScannerCaching(Job job, int batchSize) { job.getConfiguration().setInt("hbase.client.scanner.caching", batchSize); @@ -799,10 +702,9 @@ public class TableMapReduceUtil { /** * Add HBase and its dependencies (only) to the job configuration. *

      - * This is intended as a low-level API, facilitating code reuse between this - * class and its mapred counterpart. It also of use to external tools that - * need to build a MapReduce job that interacts with HBase but want - * fine-grained control over the jars shipped to the cluster. + * This is intended as a low-level API, facilitating code reuse between this class and its mapred + * counterpart. It also of use to external tools that need to build a MapReduce job that interacts + * with HBase but want fine-grained control over the jars shipped to the cluster. *

      * @param conf The Configuration object to extend with dependencies. * @see org.apache.hadoop.hbase.mapred.TableMapReduceUtil @@ -811,35 +713,35 @@ public class TableMapReduceUtil { public static void addHBaseDependencyJars(Configuration conf) throws IOException { addDependencyJarsForClasses(conf, // explicitly pull a class from each module - org.apache.hadoop.hbase.HConstants.class, // hbase-common + org.apache.hadoop.hbase.HConstants.class, // hbase-common org.apache.hadoop.hbase.protobuf.generated.ClientProtos.class, // hbase-protocol org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.class, // hbase-protocol-shaded - org.apache.hadoop.hbase.client.Put.class, // hbase-client - org.apache.hadoop.hbase.ipc.RpcServer.class, // hbase-server - org.apache.hadoop.hbase.CompatibilityFactory.class, // hbase-hadoop-compat - org.apache.hadoop.hbase.mapreduce.JobUtil.class, // hbase-hadoop2-compat - org.apache.hadoop.hbase.mapreduce.TableMapper.class, // hbase-mapreduce - org.apache.hadoop.hbase.metrics.impl.FastLongHistogram.class, // hbase-metrics - org.apache.hadoop.hbase.metrics.Snapshot.class, // hbase-metrics-api - org.apache.hadoop.hbase.replication.ReplicationUtils.class, // hbase-replication - org.apache.hadoop.hbase.http.HttpServer.class, // hbase-http - org.apache.hadoop.hbase.procedure2.Procedure.class, // hbase-procedure - org.apache.hadoop.hbase.zookeeper.ZKWatcher.class, // hbase-zookeeper + org.apache.hadoop.hbase.client.Put.class, // hbase-client + org.apache.hadoop.hbase.ipc.RpcServer.class, // hbase-server + org.apache.hadoop.hbase.CompatibilityFactory.class, // hbase-hadoop-compat + org.apache.hadoop.hbase.mapreduce.JobUtil.class, // hbase-hadoop2-compat + org.apache.hadoop.hbase.mapreduce.TableMapper.class, // hbase-mapreduce + org.apache.hadoop.hbase.metrics.impl.FastLongHistogram.class, // hbase-metrics + org.apache.hadoop.hbase.metrics.Snapshot.class, // hbase-metrics-api + org.apache.hadoop.hbase.replication.ReplicationUtils.class, // hbase-replication + org.apache.hadoop.hbase.http.HttpServer.class, // hbase-http + org.apache.hadoop.hbase.procedure2.Procedure.class, // hbase-procedure + org.apache.hadoop.hbase.zookeeper.ZKWatcher.class, // hbase-zookeeper org.apache.hbase.thirdparty.com.google.common.collect.Lists.class, // hb-shaded-miscellaneous org.apache.hbase.thirdparty.com.google.gson.GsonBuilder.class, // hbase-shaded-gson org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations.class, // hb-sh-protobuf - org.apache.hbase.thirdparty.io.netty.channel.Channel.class, // hbase-shaded-netty - org.apache.zookeeper.ZooKeeper.class, // zookeeper - com.google.protobuf.Message.class, // protobuf - com.codahale.metrics.MetricRegistry.class, // metrics-core - org.apache.commons.lang3.ArrayUtils.class, // commons-lang - io.opentelemetry.api.trace.Span.class, // opentelemetry-api + org.apache.hbase.thirdparty.io.netty.channel.Channel.class, // hbase-shaded-netty + org.apache.zookeeper.ZooKeeper.class, // zookeeper + com.google.protobuf.Message.class, // protobuf + com.codahale.metrics.MetricRegistry.class, // metrics-core + org.apache.commons.lang3.ArrayUtils.class, // commons-lang + io.opentelemetry.api.trace.Span.class, // opentelemetry-api io.opentelemetry.semconv.trace.attributes.SemanticAttributes.class); // opentelemetry-semconv } /** - * Returns a classpath string built from the content of the "tmpjars" value in {@code conf}. - * Also exposed to shell scripts via `bin/hbase mapredcp`. + * Returns a classpath string built from the content of the "tmpjars" value in {@code conf}. Also + * exposed to shell scripts via `bin/hbase mapredcp`. */ public static String buildDependencyClasspath(Configuration conf) { if (conf == null) { @@ -861,63 +763,52 @@ public class TableMapReduceUtil { } /** - * Add the HBase dependency jars as well as jars for any of the configured - * job classes to the job configuration, so that JobClient will ship them - * to the cluster and add them to the DistributedCache. + * Add the HBase dependency jars as well as jars for any of the configured job classes to the job + * configuration, so that JobClient will ship them to the cluster and add them to the + * DistributedCache. */ public static void addDependencyJars(Job job) throws IOException { addHBaseDependencyJars(job.getConfiguration()); try { addDependencyJarsForClasses(job.getConfiguration(), - // when making changes here, consider also mapred.TableMapReduceUtil - // pull job classes - job.getMapOutputKeyClass(), - job.getMapOutputValueClass(), - job.getInputFormatClass(), - job.getOutputKeyClass(), - job.getOutputValueClass(), - job.getOutputFormatClass(), - job.getPartitionerClass(), - job.getCombinerClass()); + // when making changes here, consider also mapred.TableMapReduceUtil + // pull job classes + job.getMapOutputKeyClass(), job.getMapOutputValueClass(), job.getInputFormatClass(), + job.getOutputKeyClass(), job.getOutputValueClass(), job.getOutputFormatClass(), + job.getPartitionerClass(), job.getCombinerClass()); } catch (ClassNotFoundException e) { throw new IOException(e); } } /** - * Add the jars containing the given classes to the job's configuration - * such that JobClient will ship them to the cluster and add them to - * the DistributedCache. + * Add the jars containing the given classes to the job's configuration such that JobClient will + * ship them to the cluster and add them to the DistributedCache. * @deprecated since 1.3.0 and will be removed in 3.0.0. Use {@link #addDependencyJars(Job)} - * instead. + * instead. * @see #addDependencyJars(Job) * @see HBASE-8386 */ @Deprecated - public static void addDependencyJars(Configuration conf, - Class... classes) throws IOException { + public static void addDependencyJars(Configuration conf, Class... classes) throws IOException { LOG.warn("The addDependencyJars(Configuration, Class...) method has been deprecated since it" - + " is easy to use incorrectly. Most users should rely on addDependencyJars(Job) " + - "instead. See HBASE-8386 for more details."); + + " is easy to use incorrectly. Most users should rely on addDependencyJars(Job) " + + "instead. See HBASE-8386 for more details."); addDependencyJarsForClasses(conf, classes); } /** - * Add the jars containing the given classes to the job's configuration - * such that JobClient will ship them to the cluster and add them to - * the DistributedCache. - * - * N.B. that this method at most adds one jar per class given. If there is more than one - * jar available containing a class with the same name as a given class, we don't define - * which of those jars might be chosen. - * - * @param conf The Hadoop Configuration to modify + * Add the jars containing the given classes to the job's configuration such that JobClient will + * ship them to the cluster and add them to the DistributedCache. N.B. that this method at most + * adds one jar per class given. If there is more than one jar available containing a class with + * the same name as a given class, we don't define which of those jars might be chosen. + * @param conf The Hadoop Configuration to modify * @param classes will add just those dependencies needed to find the given classes * @throws IOException if an underlying library call fails. */ @InterfaceAudience.Private - public static void addDependencyJarsForClasses(Configuration conf, - Class... classes) throws IOException { + public static void addDependencyJarsForClasses(Configuration conf, Class... classes) + throws IOException { FileSystem localFs = FileSystem.getLocal(conf); Set jars = new HashSet<>(); @@ -934,13 +825,11 @@ public class TableMapReduceUtil { Path path = findOrCreateJar(clazz, localFs, packagedClasses); if (path == null) { - LOG.warn("Could not find jar for class " + clazz + - " in order to ship it to the cluster."); + LOG.warn("Could not find jar for class " + clazz + " in order to ship it to the cluster."); continue; } if (!localFs.exists(path)) { - LOG.warn("Could not validate jar file " + path + " for class " - + clazz); + LOG.warn("Could not validate jar file " + path + " for class " + clazz); continue; } jars.add(path.toString()); @@ -951,21 +840,18 @@ public class TableMapReduceUtil { } /** - * Finds the Jar for a class or creates it if it doesn't exist. If the class is in - * a directory in the classpath, it creates a Jar on the fly with the - * contents of the directory and returns the path to that Jar. If a Jar is - * created, it is created in the system temporary directory. Otherwise, - * returns an existing jar that contains a class of the same name. Maintains - * a mapping from jar contents to the tmp jar created. - * @param my_class the class to find. - * @param fs the FileSystem with which to qualify the returned path. + * Finds the Jar for a class or creates it if it doesn't exist. If the class is in a directory in + * the classpath, it creates a Jar on the fly with the contents of the directory and returns the + * path to that Jar. If a Jar is created, it is created in the system temporary directory. + * Otherwise, returns an existing jar that contains a class of the same name. Maintains a mapping + * from jar contents to the tmp jar created. + * @param my_class the class to find. + * @param fs the FileSystem with which to qualify the returned path. * @param packagedClasses a map of class name to path. - * @return a jar file that contains the class. - * @throws IOException + * @return a jar file that contains the class. n */ private static Path findOrCreateJar(Class my_class, FileSystem fs, - Map packagedClasses) - throws IOException { + Map packagedClasses) throws IOException { // attempt to locate an existing jar for the class. String jar = findContainingJar(my_class, packagedClasses); if (null == jar || jar.isEmpty()) { @@ -982,12 +868,13 @@ public class TableMapReduceUtil { } /** - * Add entries to packagedClasses corresponding to class files - * contained in jar. - * @param jar The jar who's content to list. + * Add entries to packagedClasses corresponding to class files contained in + * jar. + * @param jar The jar who's content to list. * @param packagedClasses map[class -> jar] */ - private static void updateMap(String jar, Map packagedClasses) throws IOException { + private static void updateMap(String jar, Map packagedClasses) + throws IOException { if (null == jar || jar.isEmpty()) { return; } @@ -1006,16 +893,14 @@ public class TableMapReduceUtil { } /** - * Find a jar that contains a class of the same name, if any. It will return - * a jar file, even if that is not the first thing on the class path that - * has a class with the same name. Looks first on the classpath and then in - * the packagedClasses map. + * Find a jar that contains a class of the same name, if any. It will return a jar file, even if + * that is not the first thing on the class path that has a class with the same name. Looks first + * on the classpath and then in the packagedClasses map. * @param my_class the class to find. - * @return a jar file that contains the class, or null. - * @throws IOException + * @return a jar file that contains the class, or null. n */ private static String findContainingJar(Class my_class, Map packagedClasses) - throws IOException { + throws IOException { ClassLoader loader = my_class.getClassLoader(); String class_file = my_class.getName().replaceAll("\\.", "/") + ".class"; @@ -1048,9 +933,8 @@ public class TableMapReduceUtil { } /** - * Invoke 'getJar' on a custom JarFinder implementation. Useful for some job - * configuration contexts (HBASE-8140) and also for testing on MRv2. - * check if we have HADOOP-9426. + * Invoke 'getJar' on a custom JarFinder implementation. Useful for some job configuration + * contexts (HBASE-8140) and also for testing on MRv2. check if we have HADOOP-9426. * @param my_class the class to find. * @return a jar file that contains the class, or null. */ diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java index 3a63bc60ab2..d561969c9a3 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +17,19 @@ */ package org.apache.hadoop.hbase.mapreduce; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapreduce.Mapper; +import org.apache.yetus.audience.InterfaceAudience; /** - * Extends the base Mapper class to add the required input key - * and value classes. - * - * @param The type of the key. - * @param The type of the value. + * Extends the base Mapper class to add the required input key and value classes. + * @param The type of the key. + * @param The type of the value. * @see org.apache.hadoop.mapreduce.Mapper */ @InterfaceAudience.Public public abstract class TableMapper -extends Mapper { + extends Mapper { } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java index e02ba5f5435..a5965953491 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +18,10 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.OutputCommitter; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.yetus.audience.InterfaceAudience; /** * Small committer class that does not do anything. @@ -60,8 +58,6 @@ public class TableOutputCommitter extends OutputCommitter { return true; } - public void recoverTask(TaskAttemptContext taskContext) - throws IOException - { + public void recoverTask(TaskAttemptContext taskContext) throws IOException { } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java index 8da8d83d923..e8316c5016f 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,13 +42,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Convert Map/Reduce output and write it to an HBase table. The KEY is ignored - * while the output value must be either a {@link Put} or a - * {@link Delete} instance. + * Convert Map/Reduce output and write it to an HBase table. The KEY is ignored while the output + * value must be either a {@link Put} or a {@link Delete} instance. */ @InterfaceAudience.Public -public class TableOutputFormat extends OutputFormat -implements Configurable { +public class TableOutputFormat extends OutputFormat implements Configurable { private static final Logger LOG = LoggerFactory.getLogger(TableOutputFormat.class); @@ -57,20 +54,19 @@ implements Configurable { public static final String OUTPUT_TABLE = "hbase.mapred.outputtable"; /** - * Prefix for configuration property overrides to apply in {@link #setConf(Configuration)}. - * For keys matching this prefix, the prefix is stripped, and the value is set in the - * configuration with the resulting key, ie. the entry "hbase.mapred.output.key1 = value1" - * would be set in the configuration as "key1 = value1". Use this to set properties - * which should only be applied to the {@code TableOutputFormat} configuration and not the - * input configuration. + * Prefix for configuration property overrides to apply in {@link #setConf(Configuration)}. For + * keys matching this prefix, the prefix is stripped, and the value is set in the configuration + * with the resulting key, ie. the entry "hbase.mapred.output.key1 = value1" would be set in the + * configuration as "key1 = value1". Use this to set properties which should only be applied to + * the {@code TableOutputFormat} configuration and not the input configuration. */ public static final String OUTPUT_CONF_PREFIX = "hbase.mapred.output."; /** - * Optional job parameter to specify a peer cluster. - * Used specifying remote cluster when copying between hbase clusters (the - * source is picked up from hbase-site.xml). - * @see TableMapReduceUtil#initTableReducerJob(String, Class, org.apache.hadoop.mapreduce.Job, Class, String, String, String) + * Optional job parameter to specify a peer cluster. Used specifying remote cluster when copying + * between hbase clusters (the source is picked up from hbase-site.xml). + * @see TableMapReduceUtil#initTableReducerJob(String, Class, org.apache.hadoop.mapreduce.Job, + * Class, String, String, String) */ public static final String QUORUM_ADDRESS = OUTPUT_CONF_PREFIX + "quorum"; @@ -78,11 +74,9 @@ implements Configurable { public static final String QUORUM_PORT = OUTPUT_CONF_PREFIX + "quorum.port"; /** Optional specification of the rs class name of the peer cluster */ - public static final String - REGION_SERVER_CLASS = OUTPUT_CONF_PREFIX + "rs.class"; + public static final String REGION_SERVER_CLASS = OUTPUT_CONF_PREFIX + "rs.class"; /** Optional specification of the rs impl name of the peer cluster */ - public static final String - REGION_SERVER_IMPL = OUTPUT_CONF_PREFIX + "rs.impl"; + public static final String REGION_SERVER_IMPL = OUTPUT_CONF_PREFIX + "rs.impl"; /** The configuration. */ private Configuration conf = null; @@ -90,26 +84,24 @@ implements Configurable { /** * Writes the reducer output to an HBase table. */ - protected class TableRecordWriter - extends RecordWriter { + protected class TableRecordWriter extends RecordWriter { private Connection connection; private BufferedMutator mutator; /** - * @throws IOException - * + * n * */ public TableRecordWriter() throws IOException { String tableName = conf.get(OUTPUT_TABLE); this.connection = ConnectionFactory.createConnection(conf); this.mutator = connection.getBufferedMutator(TableName.valueOf(tableName)); - LOG.info("Created table instance for " + tableName); + LOG.info("Created table instance for " + tableName); } + /** * Closes the writer, in this case flush table commits. - * - * @param context The context. + * @param context The context. * @throws IOException When closing the writer fails. * @see RecordWriter#close(TaskAttemptContext) */ @@ -128,15 +120,13 @@ implements Configurable { /** * Writes a key/value pair into the table. - * - * @param key The key. - * @param value The value. + * @param key The key. + * @param value The value. * @throws IOException When writing fails. * @see RecordWriter#write(Object, Object) */ @Override - public void write(KEY key, Mutation value) - throws IOException { + public void write(KEY key, Mutation value) throws IOException { if (!(value instanceof Put) && !(value instanceof Delete)) { throw new IOException("Pass a Delete or a Put"); } @@ -145,29 +135,25 @@ implements Configurable { } /** - * Creates a new record writer. - * - * Be aware that the baseline javadoc gives the impression that there is a single - * {@link RecordWriter} per job but in HBase, it is more natural if we give you a new + * Creates a new record writer. Be aware that the baseline javadoc gives the impression that there + * is a single {@link RecordWriter} per job but in HBase, it is more natural if we give you a new * RecordWriter per call of this method. You must close the returned RecordWriter when done. * Failure to do so will drop writes. - * - * @param context The current task context. + * @param context The current task context. * @return The newly created writer instance. - * @throws IOException When creating the writer fails. + * @throws IOException When creating the writer fails. * @throws InterruptedException When the job is cancelled. */ @Override public RecordWriter getRecordWriter(TaskAttemptContext context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { return new TableRecordWriter(); } /** * Checks if the output table exists and is enabled. - * - * @param context The current context. - * @throws IOException When the check fails. + * @param context The current context. + * @throws IOException When the check fails. * @throws InterruptedException When the job is aborted. * @see OutputFormat#checkOutputSpecs(JobContext) */ @@ -182,29 +168,28 @@ implements Configurable { Admin admin = connection.getAdmin()) { TableName tableName = TableName.valueOf(hConf.get(OUTPUT_TABLE)); if (!admin.tableExists(tableName)) { - throw new TableNotFoundException("Can't write, table does not exist:" + - tableName.getNameAsString()); + throw new TableNotFoundException( + "Can't write, table does not exist:" + tableName.getNameAsString()); } if (!admin.isTableEnabled(tableName)) { - throw new TableNotEnabledException("Can't write, table is not enabled: " + - tableName.getNameAsString()); + throw new TableNotEnabledException( + "Can't write, table is not enabled: " + tableName.getNameAsString()); } } } /** * Returns the output committer. - * - * @param context The current context. + * @param context The current context. * @return The committer. - * @throws IOException When creating the committer fails. + * @throws IOException When creating the committer fails. * @throws InterruptedException When the job is aborted. * @see OutputFormat#getOutputCommitter(TaskAttemptContext) */ @Override public OutputCommitter getOutputCommitter(TaskAttemptContext context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { return new TableOutputCommitter(); } @@ -216,7 +201,7 @@ implements Configurable { @Override public void setConf(Configuration otherConf) { String tableName = otherConf.get(OUTPUT_TABLE); - if(tableName == null || tableName.length() <= 0) { + if (tableName == null || tableName.length() <= 0) { throw new IllegalArgumentException("Must specify table name"); } @@ -234,7 +219,7 @@ implements Configurable { if (zkClientPort != 0) { this.conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClientPort); } - } catch(IOException e) { + } catch (IOException e) { LOG.error(e.toString(), e); throw new RuntimeException(e); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java index 512c22f9cc9..a0df98796b4 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,8 +18,6 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; @@ -28,21 +25,19 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.yetus.audience.InterfaceAudience; /** - * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) - * pairs. + * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) pairs. */ @InterfaceAudience.Public -public class TableRecordReader -extends RecordReader { +public class TableRecordReader extends RecordReader { private TableRecordReaderImpl recordReaderImpl = new TableRecordReaderImpl(); /** * Restart from survivable exceptions by creating a new scanner. - * - * @param firstRow The first row to start at. + * @param firstRow The first row to start at. * @throws IOException When restarting fails. */ public void restart(byte[] firstRow) throws IOException { @@ -58,8 +53,7 @@ extends RecordReader { /** * Sets the scan defining the actual details like columns etc. - * - * @param scan The scan to set. + * @param scan The scan to set. */ public void setScan(Scan scan) { this.recordReaderImpl.setScan(scan); @@ -67,7 +61,6 @@ extends RecordReader { /** * Closes the split. - * * @see org.apache.hadoop.mapreduce.RecordReader#close() */ @Override @@ -77,23 +70,18 @@ extends RecordReader { /** * Returns the current key. - * - * @return The current key. - * @throws IOException - * @throws InterruptedException When the job is aborted. + * @return The current key. n * @throws InterruptedException When the job is aborted. * @see org.apache.hadoop.mapreduce.RecordReader#getCurrentKey() */ @Override - public ImmutableBytesWritable getCurrentKey() throws IOException, - InterruptedException { + public ImmutableBytesWritable getCurrentKey() throws IOException, InterruptedException { return this.recordReaderImpl.getCurrentKey(); } /** * Returns the current value. - * * @return The current value. - * @throws IOException When the value is faulty. + * @throws IOException When the value is faulty. * @throws InterruptedException When the job is aborted. * @see org.apache.hadoop.mapreduce.RecordReader#getCurrentValue() */ @@ -104,27 +92,23 @@ extends RecordReader { /** * Initializes the reader. - * - * @param inputsplit The split to work with. - * @param context The current task context. - * @throws IOException When setting up the reader fails. + * @param inputsplit The split to work with. + * @param context The current task context. + * @throws IOException When setting up the reader fails. * @throws InterruptedException When the job is aborted. * @see org.apache.hadoop.mapreduce.RecordReader#initialize( - * org.apache.hadoop.mapreduce.InputSplit, - * org.apache.hadoop.mapreduce.TaskAttemptContext) + * org.apache.hadoop.mapreduce.InputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override - public void initialize(InputSplit inputsplit, - TaskAttemptContext context) throws IOException, - InterruptedException { + public void initialize(InputSplit inputsplit, TaskAttemptContext context) + throws IOException, InterruptedException { this.recordReaderImpl.initialize(inputsplit, context); } /** * Positions the record reader to the next record. - * * @return true if there was another record. - * @throws IOException When reading the record failed. + * @throws IOException When reading the record failed. * @throws InterruptedException When the job was aborted. * @see org.apache.hadoop.mapreduce.RecordReader#nextKeyValue() */ @@ -135,7 +119,6 @@ extends RecordReader { /** * The current progress of the record reader through its data. - * * @return A number between 0.0 and 1.0, the fraction of the data read. * @see org.apache.hadoop.mapreduce.RecordReader#getProgress() */ diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java index 9c58a4c1cc4..0cf0d9f7047 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java @@ -39,13 +39,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) - * pairs. + * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) pairs. */ @InterfaceAudience.Public public class TableRecordReaderImpl { - public static final String LOG_PER_ROW_COUNT - = "hbase.mapreduce.log.scanner.rowcount"; + public static final String LOG_PER_ROW_COUNT = "hbase.mapreduce.log.scanner.rowcount"; private static final Logger LOG = LoggerFactory.getLogger(TableRecordReaderImpl.class); @@ -70,8 +68,7 @@ public class TableRecordReaderImpl { /** * Restart from survivable exceptions by creating a new scanner. - * - * @param firstRow The first row to start at. + * @param firstRow The first row to start at. * @throws IOException When restarting fails. */ public void restart(byte[] firstRow) throws IOException { @@ -97,8 +94,8 @@ public class TableRecordReaderImpl { } /** - * In new mapreduce APIs, TaskAttemptContext has two getCounter methods - * Check if getCounter(String, String) method is available. + * In new mapreduce APIs, TaskAttemptContext has two getCounter methods Check if + * getCounter(String, String) method is available. * @return The getCounter method or null if not available. * @deprecated since 2.4.0 and 2.3.2, will be removed in 4.0.0 */ @@ -107,8 +104,7 @@ public class TableRecordReaderImpl { throws IOException { Method m = null; try { - m = context.getClass().getMethod("getCounter", - new Class [] {String.class, String.class}); + m = context.getClass().getMethod("getCounter", new Class[] { String.class, String.class }); } catch (SecurityException e) { throw new IOException("Failed test for getCounter", e); } catch (NoSuchMethodException e) { @@ -119,21 +115,19 @@ public class TableRecordReaderImpl { /** * Sets the HBase table. - * - * @param htable The {@link org.apache.hadoop.hbase.HTableDescriptor} to scan. + * @param htable The {@link org.apache.hadoop.hbase.HTableDescriptor} to scan. */ public void setHTable(Table htable) { Configuration conf = htable.getConfiguration(); logScannerActivity = conf.getBoolean( - "hbase.client.log.scanner.activity" /*ScannerCallable.LOG_SCANNER_ACTIVITY*/, false); + "hbase.client.log.scanner.activity" /* ScannerCallable.LOG_SCANNER_ACTIVITY */, false); logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100); this.htable = htable; } /** * Sets the scan defining the actual details like columns etc. - * - * @param scan The scan to set. + * @param scan The scan to set. */ public void setScan(Scan scan) { this.scan = scan; @@ -142,9 +136,8 @@ public class TableRecordReaderImpl { /** * Build the scanner. Not done in constructor to allow for extension. */ - public void initialize(InputSplit inputsplit, - TaskAttemptContext context) throws IOException, - InterruptedException { + public void initialize(InputSplit inputsplit, TaskAttemptContext context) + throws IOException, InterruptedException { if (context != null) { this.context = context; } @@ -153,8 +146,6 @@ public class TableRecordReaderImpl { /** * Closes the split. - * - * */ public void close() { if (this.scanner != null) { @@ -169,32 +160,27 @@ public class TableRecordReaderImpl { /** * Returns the current key. - * * @return The current key. * @throws InterruptedException When the job is aborted. */ - public ImmutableBytesWritable getCurrentKey() throws IOException, - InterruptedException { + public ImmutableBytesWritable getCurrentKey() throws IOException, InterruptedException { return key; } /** * Returns the current value. - * * @return The current value. - * @throws IOException When the value is faulty. + * @throws IOException When the value is faulty. * @throws InterruptedException When the job is aborted. */ public Result getCurrentValue() throws IOException, InterruptedException { return value; } - /** * Positions the record reader to the next record. - * * @return true if there was another record. - * @throws IOException When reading the record failed. + * @throws IOException When reading the record failed. * @throws InterruptedException When the job was aborted. */ public boolean nextKeyValue() throws IOException, InterruptedException { @@ -211,7 +197,7 @@ public class TableRecordReaderImpl { numStale++; } if (logScannerActivity) { - rowcount ++; + rowcount++; if (rowcount >= logPerRowCount) { long now = EnvironmentEdgeManager.currentTime(); LOG.info("Mapper took {}ms to process {} rows", (now - timestamp), rowcount); @@ -229,16 +215,16 @@ public class TableRecordReaderImpl { // the scanner, if the second call fails, it will be rethrown LOG.info("recovered from " + StringUtils.stringifyException(e)); if (lastSuccessfulRow == null) { - LOG.warn("We are restarting the first next() invocation," + - " if your mapper has restarted a few other times like this" + - " then you should consider killing this job and investigate" + - " why it's taking so long."); + LOG.warn("We are restarting the first next() invocation," + + " if your mapper has restarted a few other times like this" + + " then you should consider killing this job and investigate" + + " why it's taking so long."); } if (lastSuccessfulRow == null) { restart(scan.getStartRow()); } else { restart(lastSuccessfulRow); - scanner.next(); // skip presumed already mapped row + scanner.next(); // skip presumed already mapped row } value = scanner.next(); if (value != null && value.isStale()) { @@ -268,8 +254,8 @@ public class TableRecordReaderImpl { long now = EnvironmentEdgeManager.currentTime(); LOG.info("Mapper took {}ms to process {} rows", (now - timestamp), rowcount); LOG.info(ioe.toString(), ioe); - String lastRow = lastSuccessfulRow == null ? - "null" : Bytes.toStringBinary(lastSuccessfulRow); + String lastRow = + lastSuccessfulRow == null ? "null" : Bytes.toStringBinary(lastSuccessfulRow); LOG.info("lastSuccessfulRow=" + lastRow); } throw ioe; @@ -277,10 +263,9 @@ public class TableRecordReaderImpl { } /** - * If hbase runs on new version of mapreduce, RecordReader has access to - * counters thus can update counters based on scanMetrics. - * If hbase runs on old version of mapreduce, it won't be able to get - * access to counters and TableRecorderReader can't update counter values. + * If hbase runs on new version of mapreduce, RecordReader has access to counters thus can update + * counters based on scanMetrics. If hbase runs on old version of mapreduce, it won't be able to + * get access to counters and TableRecorderReader can't update counter values. */ private void updateCounters() { ScanMetrics scanMetrics = scanner.getScanMetrics(); @@ -292,45 +277,44 @@ public class TableRecordReaderImpl { } /** - * @deprecated since 2.4.0 and 2.3.2, will be removed in 4.0.0 - * Use {@link #updateCounters(ScanMetrics, long, TaskAttemptContext, long)} instead. + * @deprecated since 2.4.0 and 2.3.2, will be removed in 4.0.0 Use + * {@link #updateCounters(ScanMetrics, long, TaskAttemptContext, long)} instead. */ @Deprecated protected static void updateCounters(ScanMetrics scanMetrics, long numScannerRestarts, - Method getCounter, TaskAttemptContext context, long numStale) { + Method getCounter, TaskAttemptContext context, long numStale) { updateCounters(scanMetrics, numScannerRestarts, context, numStale); } protected static void updateCounters(ScanMetrics scanMetrics, long numScannerRestarts, - TaskAttemptContext context, long numStale) { + TaskAttemptContext context, long numStale) { // we can get access to counters only if hbase uses new mapreduce APIs if (context == null) { return; } - for (Map.Entry entry : scanMetrics.getMetricsMap().entrySet()) { - Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, entry.getKey()); - if (counter != null) { - counter.increment(entry.getValue()); - } + for (Map.Entry entry : scanMetrics.getMetricsMap().entrySet()) { + Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, entry.getKey()); + if (counter != null) { + counter.increment(entry.getValue()); } - if (numScannerRestarts != 0L) { - Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, "NUM_SCANNER_RESTARTS"); - if (counter != null) { - counter.increment(numScannerRestarts); - } + } + if (numScannerRestarts != 0L) { + Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, "NUM_SCANNER_RESTARTS"); + if (counter != null) { + counter.increment(numScannerRestarts); } - if (numStale != 0L) { - Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, "NUM_SCAN_RESULTS_STALE"); - if (counter != null) { - counter.increment(numStale); - } + } + if (numStale != 0L) { + Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, "NUM_SCAN_RESULTS_STALE"); + if (counter != null) { + counter.increment(numStale); } + } } /** * The current progress of the record reader through its data. - * * @return A number between 0.0 and 1.0, the fraction of the data read. */ public float getProgress() { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java index 07e44cbc28b..7e128553754 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,28 +17,26 @@ */ package org.apache.hadoop.hbase.mapreduce; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.mapreduce.Reducer; +import org.apache.yetus.audience.InterfaceAudience; /** - * Extends the basic Reducer class to add the required key and - * value input/output classes. While the input key and value as well as the - * output key can be anything handed in from the previous map phase the output - * value must be either a {@link org.apache.hadoop.hbase.client.Put Put} - * or a {@link org.apache.hadoop.hbase.client.Delete Delete} instance when - * using the {@link TableOutputFormat} class. + * Extends the basic Reducer class to add the required key and value input/output + * classes. While the input key and value as well as the output key can be anything handed in from + * the previous map phase the output value must be either a + * {@link org.apache.hadoop.hbase.client.Put Put} or a {@link org.apache.hadoop.hbase.client.Delete + * Delete} instance when using the {@link TableOutputFormat} class. *

      - * This class is extended by {@link IdentityTableReducer} but can also be - * subclassed to implement similar features or any custom code needed. It has - * the advantage to enforce the output value to a specific basic type. - * - * @param The type of the input key. - * @param The type of the input value. + * This class is extended by {@link IdentityTableReducer} but can also be subclassed to implement + * similar features or any custom code needed. It has the advantage to enforce the output value to a + * specific basic type. + * @param The type of the input key. + * @param The type of the input value. * @param The type of the output key. * @see org.apache.hadoop.mapreduce.Reducer */ @InterfaceAudience.Public public abstract class TableReducer -extends Reducer { + extends Reducer { } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java index 6fd0b6e3f11..e7a124b98f1 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import java.io.DataInput; @@ -23,7 +22,6 @@ import java.io.DataOutput; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -43,40 +41,41 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.yetus.audience.InterfaceAudience; /** - * TableSnapshotInputFormat allows a MapReduce job to run over a table snapshot. The job - * bypasses HBase servers, and directly accesses the underlying files (hfile, recovered edits, - * wals, etc) directly to provide maximum performance. The snapshot is not required to be - * restored to the live cluster or cloned. This also allows to run the mapreduce job from an - * online or offline hbase cluster. The snapshot files can be exported by using the - * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster, - * and this InputFormat can be used to run the mapreduce job directly over the snapshot files. - * The snapshot should not be deleted while there are jobs reading from snapshot files. + * TableSnapshotInputFormat allows a MapReduce job to run over a table snapshot. The job bypasses + * HBase servers, and directly accesses the underlying files (hfile, recovered edits, wals, etc) + * directly to provide maximum performance. The snapshot is not required to be restored to the live + * cluster or cloned. This also allows to run the mapreduce job from an online or offline hbase + * cluster. The snapshot files can be exported by using the + * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster, and this + * InputFormat can be used to run the mapreduce job directly over the snapshot files. The snapshot + * should not be deleted while there are jobs reading from snapshot files. *

      * Usage is similar to TableInputFormat, and * {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job, boolean, Path)} * can be used to configure the job. - *

      {@code
      - * Job job = new Job(conf);
      - * Scan scan = new Scan();
      - * TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
      - *      scan, MyTableMapper.class, MyMapKeyOutput.class,
      - *      MyMapOutputValueWritable.class, job, true);
      + *
      + * 
      + * {
      + *   @code
      + *   Job job = new Job(conf);
      + *   Scan scan = new Scan();
      + *   TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, MyTableMapper.class,
      + *     MyMapKeyOutput.class, MyMapOutputValueWritable.class, job, true);
        * }
        * 
      *

      - * Internally, this input format restores the snapshot into the given tmp directory. By default, - * and similar to {@link TableInputFormat} an InputSplit is created per region, but optionally you - * can run N mapper tasks per every region, in which case the region key range will be split to - * N sub-ranges and an InputSplit will be created per sub-range. The region is opened for reading - * from each RecordReader. An internal RegionScanner is used to execute the + * Internally, this input format restores the snapshot into the given tmp directory. By default, and + * similar to {@link TableInputFormat} an InputSplit is created per region, but optionally you can + * run N mapper tasks per every region, in which case the region key range will be split to N + * sub-ranges and an InputSplit will be created per sub-range. The region is opened for reading from + * each RecordReader. An internal RegionScanner is used to execute the * {@link org.apache.hadoop.hbase.CellScanner} obtained from the user. *

      * HBase owns all the data and snapshot files on the filesystem. Only the 'hbase' user can read from - * snapshot files and data files. - * To read from snapshot files directly from the file system, the user who is running the MR job - * must have sufficient permissions to access snapshot and reference files. - * This means that to run mapreduce over snapshot files, the MR job has to be run as the HBase - * user or the user must have group or other privileges in the filesystem (See HBASE-8369). + * snapshot files and data files. To read from snapshot files directly from the file system, the + * user who is running the MR job must have sufficient permissions to access snapshot and reference + * files. This means that to run mapreduce over snapshot files, the MR job has to be run as the + * HBase user or the user must have group or other privileges in the filesystem (See HBASE-8369). * Note that, given other users access to read from snapshot/data files will completely circumvent * the access control enforced by HBase. * @see org.apache.hadoop.hbase.client.TableSnapshotScanner @@ -97,9 +96,9 @@ public class TableSnapshotInputFormat extends InputFormat locations, Scan scan, Path restoreDir) { + List locations, Scan scan, Path restoreDir) { this.delegate = - new TableSnapshotInputFormatImpl.InputSplit(htd, regionInfo, locations, scan, restoreDir); + new TableSnapshotInputFormatImpl.InputSplit(htd, regionInfo, locations, scan, restoreDir); } @Override @@ -123,8 +122,7 @@ public class TableSnapshotInputFormat extends InputFormat { + static class TableSnapshotRegionRecordReader + extends RecordReader { private TableSnapshotInputFormatImpl.RecordReader delegate = new TableSnapshotInputFormatImpl.RecordReader(); private TaskAttemptContext context; @Override - public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, - InterruptedException { + public void initialize(InputSplit split, TaskAttemptContext context) + throws IOException, InterruptedException { this.context = context; - delegate.initialize( - ((TableSnapshotRegionSplit) split).delegate, - context.getConfiguration()); + delegate.initialize(((TableSnapshotRegionSplit) split).delegate, context.getConfiguration()); } @Override @@ -190,16 +186,16 @@ public class TableSnapshotInputFormat extends InputFormat createRecordReader( - InputSplit split, TaskAttemptContext context) throws IOException { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException { return new TableSnapshotRegionRecordReader(); } @Override public List getSplits(JobContext job) throws IOException, InterruptedException { List results = new ArrayList<>(); - for (TableSnapshotInputFormatImpl.InputSplit split : - TableSnapshotInputFormatImpl.getSplits(job.getConfiguration())) { + for (TableSnapshotInputFormatImpl.InputSplit split : TableSnapshotInputFormatImpl + .getSplits(job.getConfiguration())) { results.add(new TableSnapshotRegionSplit(split)); } return results; @@ -207,38 +203,39 @@ public class TableSnapshotInputFormat extends InputFormat locations, - Scan scan, Path restoreDir) { + Scan scan, Path restoreDir) { this.htd = htd; this.regionInfo = regionInfo; if (locations == null || locations.isEmpty()) { @@ -183,7 +181,7 @@ public class TableSnapshotInputFormatImpl { } public long getLength() { - //TODO: We can obtain the file sizes of the snapshot here. + // TODO: We can obtain the file sizes of the snapshot here. return 0; } @@ -204,8 +202,7 @@ public class TableSnapshotInputFormatImpl { @Override public void write(DataOutput out) throws IOException { TableSnapshotRegionSplit.Builder builder = TableSnapshotRegionSplit.newBuilder() - .setTable(ProtobufUtil.toTableSchema(htd)) - .setRegion(HRegionInfo.convert(regionInfo)); + .setTable(ProtobufUtil.toTableSchema(htd)).setRegion(HRegionInfo.convert(regionInfo)); for (String location : locations) { builder.addLocations(location); @@ -265,7 +262,6 @@ public class TableSnapshotInputFormatImpl { HRegionInfo hri = this.split.getRegionInfo(); FileSystem fs = CommonFSUtils.getCurrentFileSystem(conf); - // region is immutable, this should be fine, // otherwise we have to set the thread read point scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED); @@ -273,13 +269,13 @@ public class TableSnapshotInputFormatImpl { scan.setCacheBlocks(false); scanner = - new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), htd, hri, scan, null); + new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), htd, hri, scan, null); } public boolean nextKeyValue() throws IOException { result = scanner.next(); if (result == null) { - //we are done + // we are done return false; } @@ -345,14 +341,13 @@ public class TableSnapshotInputFormatImpl { } try { return Class.forName(splitAlgoClassName).asSubclass(RegionSplitter.SplitAlgorithm.class) - .getDeclaredConstructor().newInstance(); - } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | - NoSuchMethodException | InvocationTargetException e) { + .getDeclaredConstructor().newInstance(); + } catch (ClassNotFoundException | InstantiationException | IllegalAccessException + | NoSuchMethodException | InvocationTargetException e) { throw new IOException("SplitAlgo class " + splitAlgoClassName + " is not found", e); } } - public static List getRegionInfosFromManifest(SnapshotManifest manifest) { List regionManifests = manifest.getRegionManifests(); if (regionManifests == null) { @@ -372,7 +367,7 @@ public class TableSnapshotInputFormatImpl { } public static SnapshotManifest getSnapshotManifest(Configuration conf, String snapshotName, - Path rootDir, FileSystem fs) throws IOException { + Path rootDir, FileSystem fs) throws IOException { Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); return SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); @@ -394,31 +389,31 @@ public class TableSnapshotInputFormatImpl { } if (scan.getReadType() == ReadType.DEFAULT) { - LOG.info("Provided Scan has DEFAULT ReadType," - + " updating STREAM for Snapshot-based InputFormat"); + LOG.info( + "Provided Scan has DEFAULT ReadType," + " updating STREAM for Snapshot-based InputFormat"); // Update the "DEFAULT" ReadType to be "STREAM" to try to improve the default case. scan.setReadType(conf.getEnum(SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE, - SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE_DEFAULT)); + SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE_DEFAULT)); } return scan; } public static List getSplits(Scan scan, SnapshotManifest manifest, - List regionManifests, Path restoreDir, Configuration conf) throws IOException { + List regionManifests, Path restoreDir, Configuration conf) throws IOException { return getSplits(scan, manifest, regionManifests, restoreDir, conf, null, 1); } public static List getSplits(Scan scan, SnapshotManifest manifest, - List regionManifests, Path restoreDir, - Configuration conf, RegionSplitter.SplitAlgorithm sa, int numSplits) throws IOException { + List regionManifests, Path restoreDir, Configuration conf, + RegionSplitter.SplitAlgorithm sa, int numSplits) throws IOException { // load table descriptor TableDescriptor htd = manifest.getTableDescriptor(); Path tableDir = CommonFSUtils.getTableDir(restoreDir, htd.getTableName()); boolean localityEnabled = conf.getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, - SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT); + SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT); boolean scanMetricsEnabled = conf.getBoolean(SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED, SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED_DEFAULT); @@ -452,8 +447,7 @@ public class TableSnapshotInputFormatImpl { if (localityEnabled) { if (regionLocator != null) { /* Get Location from the local cache */ - HRegionLocation - location = regionLocator.getRegionLocation(hri.getStartKey(), false); + HRegionLocation location = regionLocator.getRegionLocation(hri.getStartKey(), false); hosts = new ArrayList<>(1); hosts.add(location.getHostname()); @@ -465,8 +459,9 @@ public class TableSnapshotInputFormatImpl { if (numSplits > 1) { byte[][] sp = sa.split(hri.getStartKey(), hri.getEndKey(), numSplits, true); for (int i = 0; i < sp.length - 1; i++) { - if (PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), sp[i], - sp[i + 1])) { + if ( + PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), sp[i], sp[i + 1]) + ) { Scan boundedScan = new Scan(scan); if (scan.getStartRow().length == 0) { @@ -487,8 +482,10 @@ public class TableSnapshotInputFormatImpl { } } } else { - if (PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), - hri.getStartKey(), hri.getEndKey())) { + if ( + PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), hri.getStartKey(), + hri.getEndKey()) + ) { splits.add(new InputSplit(htd, hri, hosts, scan, restoreDir)); } @@ -503,8 +500,7 @@ public class TableSnapshotInputFormatImpl { * only when localityEnabled is true. */ private static List calculateLocationsForInputSplit(Configuration conf, - TableDescriptor htd, HRegionInfo hri, Path tableDir) - throws IOException { + TableDescriptor htd, HRegionInfo hri, Path tableDir) throws IOException { return getBestLocations(conf, HRegion.computeHDFSBlocksDistribution(conf, htd, hri, tableDir)); } @@ -514,15 +510,14 @@ public class TableSnapshotInputFormatImpl { * do not want to blindly pass all the locations, since we are creating one split per region, and * the region's blocks are all distributed throughout the cluster unless favorite node assignment * is used. On the expected stable case, only one location will contain most of the blocks as - * local. - * On the other hand, in favored node assignment, 3 nodes will contain highly local blocks. Here - * we are doing a simple heuristic, where we will pass all hosts which have at least 80% + * local. On the other hand, in favored node assignment, 3 nodes will contain highly local blocks. + * Here we are doing a simple heuristic, where we will pass all hosts which have at least 80% * (hbase.tablesnapshotinputformat.locality.cutoff.multiplier) as much block locality as the top - * host with the best locality. - * Return at most numTopsAtMost locations if there are more than that. + * host with the best locality. Return at most numTopsAtMost locations if there are more than + * that. */ private static List getBestLocations(Configuration conf, - HDFSBlocksDistribution blockDistribution, int numTopsAtMost) { + HDFSBlocksDistribution blockDistribution, int numTopsAtMost) { HostAndWeight[] hostAndWeights = blockDistribution.getTopHostsWithWeights(); if (hostAndWeights.length == 0) { // no matter what numTopsAtMost is @@ -543,8 +538,8 @@ public class TableSnapshotInputFormatImpl { // When top >= 2, // do the heuristic: filter all hosts which have at least cutoffMultiplier % of block locality - double cutoffMultiplier - = conf.getFloat(LOCALITY_CUTOFF_MULTIPLIER, DEFAULT_LOCALITY_CUTOFF_MULTIPLIER); + double cutoffMultiplier = + conf.getFloat(LOCALITY_CUTOFF_MULTIPLIER, DEFAULT_LOCALITY_CUTOFF_MULTIPLIER); double filterWeight = topHost.getWeight() * cutoffMultiplier; @@ -562,7 +557,7 @@ public class TableSnapshotInputFormatImpl { } public static List getBestLocations(Configuration conf, - HDFSBlocksDistribution blockDistribution) { + HDFSBlocksDistribution blockDistribution) { // 3 nodes will contain highly local blocks. So default to 3. return getBestLocations(conf, blockDistribution, 3); } @@ -577,36 +572,37 @@ public class TableSnapshotInputFormatImpl { /** * Configures the job to use TableSnapshotInputFormat to read from a snapshot. - * @param conf the job to configuration + * @param conf the job to configuration * @param snapshotName the name of the snapshot to read from - * @param restoreDir a temporary directory to restore the snapshot into. Current user should have - * write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restoreDir can be deleted. + * @param restoreDir a temporary directory to restore the snapshot into. Current user should + * have write permissions to this directory, and this should not be a + * subdirectory of rootdir. After the job is finished, restoreDir can be + * deleted. * @throws IOException if an error occurs */ public static void setInput(Configuration conf, String snapshotName, Path restoreDir) - throws IOException { + throws IOException { setInput(conf, snapshotName, restoreDir, null, 1); } /** * Configures the job to use TableSnapshotInputFormat to read from a snapshot. - * @param conf the job to configure - * @param snapshotName the name of the snapshot to read from - * @param restoreDir a temporary directory to restore the snapshot into. Current user should have - * write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restoreDir can be deleted. + * @param conf the job to configure + * @param snapshotName the name of the snapshot to read from + * @param restoreDir a temporary directory to restore the snapshot into. Current user + * should have write permissions to this directory, and this should not + * be a subdirectory of rootdir. After the job is finished, restoreDir + * can be deleted. * @param numSplitsPerRegion how many input splits to generate per one region - * @param splitAlgo SplitAlgorithm to be used when generating InputSplits + * @param splitAlgo SplitAlgorithm to be used when generating InputSplits * @throws IOException if an error occurs */ public static void setInput(Configuration conf, String snapshotName, Path restoreDir, - RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) - throws IOException { + RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { conf.set(SNAPSHOT_NAME_KEY, snapshotName); if (numSplitsPerRegion < 1) { - throw new IllegalArgumentException("numSplits must be >= 1, " + - "illegal numSplits : " + numSplitsPerRegion); + throw new IllegalArgumentException( + "numSplits must be >= 1, " + "illegal numSplits : " + numSplitsPerRegion); } if (splitAlgo == null && numSplitsPerRegion > 1) { throw new IllegalArgumentException("Split algo can't be null when numSplits > 1"); @@ -625,8 +621,8 @@ public class TableSnapshotInputFormatImpl { } /** - * clean restore directory after snapshot scan job - * @param job the snapshot scan job + * clean restore directory after snapshot scan job + * @param job the snapshot scan job * @param snapshotName the name of the snapshot to read from * @throws IOException if an error occurs */ @@ -641,6 +637,6 @@ public class TableSnapshotInputFormatImpl { if (!fs.delete(restoreDir, true)) { LOG.warn("Failed clean restore dir {} for snapshot {}", restoreDir, snapshotName); } - LOG.debug("Clean restore directory {} for {}", restoreDir, snapshotName); + LOG.debug("Clean restore directory {} for {}", restoreDir, snapshotName); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java index 93300ebb0f3..f1a71faf9ba 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,12 +33,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * A table split corresponds to a key range (low, high) and an optional scanner. - * All references to row below refer to the key of the row. + * A table split corresponds to a key range (low, high) and an optional scanner. All references to + * row below refer to the key of the row. */ @InterfaceAudience.Public -public class TableSplit extends InputSplit - implements Writable, Comparable { +public class TableSplit extends InputSplit implements Writable, Comparable { /** @deprecated LOG variable would be made private. fix in hbase 3.0 */ @Deprecated public static final Logger LOG = LoggerFactory.getLogger(TableSplit.class); @@ -79,76 +77,68 @@ public class TableSplit extends InputSplit private static final Version VERSION = Version.WITH_ENCODED_REGION_NAME; private TableName tableName; - private byte [] startRow; - private byte [] endRow; + private byte[] startRow; + private byte[] endRow; private String regionLocation; private String encodedRegionName = ""; /** - * The scan object may be null but the serialized form of scan is never null - * or empty since we serialize the scan object with default values then. - * Having no scanner in TableSplit doesn't necessarily mean there is no scanner - * for mapreduce job, it just means that we do not need to set it for each split. - * For example, it is not required to have a scan object for - * {@link org.apache.hadoop.hbase.mapred.TableInputFormatBase} since we use the scan from the - * job conf and scanner is supposed to be same for all the splits of table. + * The scan object may be null but the serialized form of scan is never null or empty since we + * serialize the scan object with default values then. Having no scanner in TableSplit doesn't + * necessarily mean there is no scanner for mapreduce job, it just means that we do not need to + * set it for each split. For example, it is not required to have a scan object for + * {@link org.apache.hadoop.hbase.mapred.TableInputFormatBase} since we use the scan from the job + * conf and scanner is supposed to be same for all the splits of table. */ private String scan = ""; // stores the serialized form of the Scan private long length; // Contains estimation of region size in bytes /** Default constructor. */ public TableSplit() { - this((TableName)null, null, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, ""); + this((TableName) null, null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, ""); } /** - * Creates a new instance while assigning all variables. - * Length of region is set to 0 - * Encoded name of the region is set to blank - * - * @param tableName The name of the current table. - * @param scan The scan associated with this split. + * Creates a new instance while assigning all variables. Length of region is set to 0 Encoded name + * of the region is set to blank + * @param tableName The name of the current table. + * @param scan The scan associated with this split. * @param startRow The start row of the split. - * @param endRow The end row of the split. + * @param endRow The end row of the split. * @param location The location of the region. */ - public TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow, - final String location) { + public TableSplit(TableName tableName, Scan scan, byte[] startRow, byte[] endRow, + final String location) { this(tableName, scan, startRow, endRow, location, 0L); } /** - * Creates a new instance while assigning all variables. - * Encoded name of region is set to blank - * - * @param tableName The name of the current table. - * @param scan The scan associated with this split. + * Creates a new instance while assigning all variables. Encoded name of region is set to blank + * @param tableName The name of the current table. + * @param scan The scan associated with this split. * @param startRow The start row of the split. - * @param endRow The end row of the split. + * @param endRow The end row of the split. * @param location The location of the region. */ - public TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow, - final String location, long length) { + public TableSplit(TableName tableName, Scan scan, byte[] startRow, byte[] endRow, + final String location, long length) { this(tableName, scan, startRow, endRow, location, "", length); } /** * Creates a new instance while assigning all variables. - * - * @param tableName The name of the current table. - * @param scan The scan associated with this split. - * @param startRow The start row of the split. - * @param endRow The end row of the split. + * @param tableName The name of the current table. + * @param scan The scan associated with this split. + * @param startRow The start row of the split. + * @param endRow The end row of the split. * @param encodedRegionName The region ID. - * @param location The location of the region. + * @param location The location of the region. */ - public TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow, - final String location, final String encodedRegionName, long length) { + public TableSplit(TableName tableName, Scan scan, byte[] startRow, byte[] endRow, + final String location, final String encodedRegionName, long length) { this.tableName = tableName; try { - this.scan = - (null == scan) ? "" : TableMapReduceUtil.convertScanToString(scan); + this.scan = (null == scan) ? "" : TableMapReduceUtil.convertScanToString(scan); } catch (IOException e) { LOG.warn("Failed to convert Scan to String", e); } @@ -160,36 +150,31 @@ public class TableSplit extends InputSplit } /** - * Creates a new instance without a scanner. - * Length of region is set to 0 - * + * Creates a new instance without a scanner. Length of region is set to 0 * @param tableName The name of the current table. - * @param startRow The start row of the split. - * @param endRow The end row of the split. - * @param location The location of the region. + * @param startRow The start row of the split. + * @param endRow The end row of the split. + * @param location The location of the region. */ - public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, - final String location) { + public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, final String location) { this(tableName, null, startRow, endRow, location); } /** * Creates a new instance without a scanner. - * * @param tableName The name of the current table. - * @param startRow The start row of the split. - * @param endRow The end row of the split. - * @param location The location of the region. - * @param length Size of region in bytes + * @param startRow The start row of the split. + * @param endRow The end row of the split. + * @param location The location of the region. + * @param length Size of region in bytes */ - public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, - final String location, long length) { + public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, final String location, + long length) { this(tableName, null, startRow, endRow, location, length); } /** * Returns a Scan object from the stored string representation. - * * @return Returns a Scan object based on the stored scanner. * @throws IOException throws IOException if deserialization fails */ @@ -199,9 +184,9 @@ public class TableSplit extends InputSplit /** * Returns a scan string - * @return scan as string. Should be noted that this is not same as getScan().toString() - * because Scan object will have the default values when empty scan string is - * deserialized. Thus, getScan().toString() can never be empty + * @return scan as string. Should be noted that this is not same as getScan().toString() because + * Scan object will have the default values when empty scan string is deserialized. Thus, + * getScan().toString() can never be empty */ @InterfaceAudience.Private public String getScanAsString() { @@ -213,17 +198,16 @@ public class TableSplit extends InputSplit * @see #getTable() * @return The table name. */ - public byte [] getTableName() { + public byte[] getTableName() { return tableName.getName(); } /** * Returns the table name. - * * @return The table name. */ public TableName getTable() { - // It is ugly that usually to get a TableName, the method is called getTableName. We can't do + // It is ugly that usually to get a TableName, the method is called getTableName. We can't do // that in here though because there was an existing getTableName in place already since // deprecated. return tableName; @@ -231,25 +215,22 @@ public class TableSplit extends InputSplit /** * Returns the start row. - * * @return The start row. */ - public byte [] getStartRow() { + public byte[] getStartRow() { return startRow; } /** * Returns the end row. - * * @return The end row. */ - public byte [] getEndRow() { + public byte[] getEndRow() { return endRow; } /** * Returns the region location. - * * @return The region's location. */ public String getRegionLocation() { @@ -258,18 +239,16 @@ public class TableSplit extends InputSplit /** * Returns the region's location as an array. - * * @return The array containing the region location. * @see org.apache.hadoop.mapreduce.InputSplit#getLocations() */ @Override public String[] getLocations() { - return new String[] {regionLocation}; + return new String[] { regionLocation }; } /** * Returns the region's encoded name. - * * @return The region's encoded name. */ public String getEncodedRegionName() { @@ -278,7 +257,6 @@ public class TableSplit extends InputSplit /** * Returns the length of the split. - * * @return The length of the split. * @see org.apache.hadoop.mapreduce.InputSplit#getLength() */ @@ -289,8 +267,7 @@ public class TableSplit extends InputSplit /** * Reads the values of each field. - * - * @param in The input to read from. + * @param in The input to read from. * @throws IOException When reading the input fails. */ @Override @@ -327,8 +304,7 @@ public class TableSplit extends InputSplit /** * Writes the field values to the output. - * - * @param out The output to write to. + * @param out The output to write to. * @throws IOException When writing the values to the output fails. */ @Override @@ -345,7 +321,6 @@ public class TableSplit extends InputSplit /** * Returns the details about this instance as a string. - * * @return The values of this instance as a string. * @see java.lang.Object#toString() */ @@ -360,8 +335,7 @@ public class TableSplit extends InputSplit try { // get the real scan here in toString, not the Base64 string printScan = TableMapReduceUtil.convertStringToScan(scan).toString(); - } - catch (IOException e) { + } catch (IOException e) { printScan = ""; } sb.append(", scan=").append(printScan); @@ -376,8 +350,7 @@ public class TableSplit extends InputSplit /** * Compares this split against the given one. - * - * @param split The split to compare to. + * @param split The split to compare to. * @return The result of the comparison. * @see java.lang.Comparable#compareTo(java.lang.Object) */ @@ -385,10 +358,10 @@ public class TableSplit extends InputSplit public int compareTo(TableSplit split) { // If The table name of the two splits is the same then compare start row // otherwise compare based on table names - int tableNameComparison = - getTable().compareTo(split.getTable()); - return tableNameComparison != 0 ? tableNameComparison : Bytes.compareTo( - getStartRow(), split.getStartRow()); + int tableNameComparison = getTable().compareTo(split.getTable()); + return tableNameComparison != 0 + ? tableNameComparison + : Bytes.compareTo(getStartRow(), split.getStartRow()); } @Override @@ -396,10 +369,10 @@ public class TableSplit extends InputSplit if (o == null || !(o instanceof TableSplit)) { return false; } - return tableName.equals(((TableSplit)o).tableName) && - Bytes.equals(startRow, ((TableSplit)o).startRow) && - Bytes.equals(endRow, ((TableSplit)o).endRow) && - regionLocation.equals(((TableSplit)o).regionLocation); + return tableName.equals(((TableSplit) o).tableName) + && Bytes.equals(startRow, ((TableSplit) o).startRow) + && Bytes.equals(endRow, ((TableSplit) o).endRow) + && regionLocation.equals(((TableSplit) o).regionLocation); } @Override diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java index 667ca97e3f1..79dfe752be0 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.Iterator; import java.util.List; import java.util.Set; import java.util.TreeSet; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; @@ -33,7 +32,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.security.visibility.InvalidLabelException; import org.apache.hadoop.hbase.util.Bytes; @@ -41,6 +39,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; /** * Emits Sorted KeyValues. Parse the passed text and creates KeyValues. Sorts them before emit. @@ -49,8 +48,8 @@ import org.apache.hadoop.util.StringUtils; * @see PutSortReducer */ @InterfaceAudience.Public -public class TextSortReducer extends - Reducer { +public class TextSortReducer + extends Reducer { /** Timestamp for all inserted rows */ private long ts; @@ -90,12 +89,10 @@ public class TextSortReducer extends } /** - * Handles initializing this class with objects specific to it (i.e., the parser). - * Common initialization that might be leveraged by a subsclass is done in - * doSetup. Hence a subclass may choose to override this method - * and call doSetup as well before handling it's own custom params. - * - * @param context + * Handles initializing this class with objects specific to it (i.e., the parser). Common + * initialization that might be leveraged by a subsclass is done in doSetup. Hence a + * subclass may choose to override this method and call doSetup as well before + * handling it's own custom params. n */ @Override protected void setup(Context context) { @@ -110,9 +107,7 @@ public class TextSortReducer extends } /** - * Handles common parameter initialization that a subclass might want to leverage. - * @param context - * @param conf + * Handles common parameter initialization that a subclass might want to leverage. nn */ protected void doSetup(Context context, Configuration conf) { // If a custom separator has been used, @@ -132,16 +127,11 @@ public class TextSortReducer extends } @Override - protected void reduce( - ImmutableBytesWritable rowKey, - java.lang.Iterable lines, - Reducer.Context context) - throws java.io.IOException, InterruptedException - { + protected void reduce(ImmutableBytesWritable rowKey, java.lang.Iterable lines, + Reducer.Context context) + throws java.io.IOException, InterruptedException { // although reduce() is called per-row, handle pathological case - long threshold = context.getConfiguration().getLong( - "reducer.row.threshold", 1L * (1<<30)); + long threshold = context.getConfiguration().getLong("reducer.row.threshold", 1L * (1 << 30)); Iterator iter = lines.iterator(); while (iter.hasNext()) { Set kvs = new TreeSet<>(CellComparator.getInstance()); @@ -160,8 +150,8 @@ public class TextSortReducer extends // create tags for the parsed line List tags = new ArrayList<>(); if (cellVisibilityExpr != null) { - tags.addAll(kvCreator.getVisibilityExpressionResolver().createVisibilityExpTags( - cellVisibilityExpr)); + tags.addAll(kvCreator.getVisibilityExpressionResolver() + .createVisibilityExpTags(cellVisibilityExpr)); } // Add TTL directly to the KV so we can vary them when packing more than one KV // into puts @@ -169,23 +159,25 @@ public class TextSortReducer extends tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(ttl))); } for (int i = 0; i < parsed.getColumnCount(); i++) { - if (i == parser.getRowKeyColumnIndex() || i == parser.getTimestampKeyColumnIndex() - || i == parser.getAttributesKeyColumnIndex() || i == parser.getCellVisibilityColumnIndex() - || i == parser.getCellTTLColumnIndex()) { + if ( + i == parser.getRowKeyColumnIndex() || i == parser.getTimestampKeyColumnIndex() + || i == parser.getAttributesKeyColumnIndex() + || i == parser.getCellVisibilityColumnIndex() || i == parser.getCellTTLColumnIndex() + ) { continue; } // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. Cell cell = this.kvCreator.create(lineBytes, parsed.getRowKeyOffset(), - parsed.getRowKeyLength(), parser.getFamily(i), 0, parser.getFamily(i).length, - parser.getQualifier(i), 0, parser.getQualifier(i).length, ts, lineBytes, - parsed.getColumnOffset(i), parsed.getColumnLength(i), tags); + parsed.getRowKeyLength(), parser.getFamily(i), 0, parser.getFamily(i).length, + parser.getQualifier(i), 0, parser.getQualifier(i).length, ts, lineBytes, + parsed.getColumnOffset(i), parsed.getColumnLength(i), tags); KeyValue kv = KeyValueUtil.ensureKeyValue(cell); kvs.add(kv); curSize += kv.heapSize(); } } catch (ImportTsv.TsvParser.BadTsvLineException | IllegalArgumentException - | InvalidLabelException badLine) { + | InvalidLabelException badLine) { if (skipBadLines) { System.err.println("Bad line." + badLine.getMessage()); incrementBadLineCount(1); @@ -194,13 +186,12 @@ public class TextSortReducer extends throw new IOException(badLine); } } - context.setStatus("Read " + kvs.size() + " entries of " + kvs.getClass() - + "(" + StringUtils.humanReadableInt(curSize) + ")"); + context.setStatus("Read " + kvs.size() + " entries of " + kvs.getClass() + "(" + + StringUtils.humanReadableInt(curSize) + ")"); int index = 0; for (KeyValue kv : kvs) { context.write(rowKey, kv); - if (++index > 0 && index % 100 == 0) - context.setStatus("Wrote " + index + " key values."); + if (++index > 0 && index % 100 == 0) context.setStatus("Wrote " + index + " key values."); } // if we have more entries to process diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java index 8dc7156d099..04c7e87d3b4 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,14 +21,12 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Base64; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser.BadTsvLineException; @@ -39,14 +37,13 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Mapper; +import org.apache.yetus.audience.InterfaceAudience; /** * Write table content out to files in hdfs. */ @InterfaceAudience.Public -public class TsvImporterMapper -extends Mapper -{ +public class TsvImporterMapper extends Mapper { /** Timestamp for all inserted rows */ protected long ts; @@ -56,7 +53,7 @@ extends Mapper /** Should skip bad lines */ private boolean skipBadLines; - /** Should skip empty columns*/ + /** Should skip empty columns */ private boolean skipEmptyColumns; private Counter badLineCount; private boolean logBadLines; @@ -93,20 +90,17 @@ extends Mapper } /** - * Handles initializing this class with objects specific to it (i.e., the parser). - * Common initialization that might be leveraged by a subsclass is done in - * doSetup. Hence a subclass may choose to override this method - * and call doSetup as well before handling it's own custom params. - * - * @param context + * Handles initializing this class with objects specific to it (i.e., the parser). Common + * initialization that might be leveraged by a subsclass is done in doSetup. Hence a + * subclass may choose to override this method and call doSetup as well before + * handling it's own custom params. n */ @Override protected void setup(Context context) { doSetup(context); conf = context.getConfiguration(); - parser = new ImportTsv.TsvParser(conf.get(ImportTsv.COLUMNS_CONF_KEY), - separator); + parser = new ImportTsv.TsvParser(conf.get(ImportTsv.COLUMNS_CONF_KEY), separator); if (parser.getRowKeyColumnIndex() == -1) { throw new RuntimeException("No row key column specified"); } @@ -115,8 +109,7 @@ extends Mapper } /** - * Handles common parameter initialization that a subclass might want to leverage. - * @param context + * Handles common parameter initialization that a subclass might want to leverage. n */ protected void doSetup(Context context) { Configuration conf = context.getConfiguration(); @@ -133,10 +126,8 @@ extends Mapper // configuration. ts = conf.getLong(ImportTsv.TIMESTAMP_CONF_KEY, 0); - skipEmptyColumns = context.getConfiguration().getBoolean( - ImportTsv.SKIP_EMPTY_COLUMNS, false); - skipBadLines = context.getConfiguration().getBoolean( - ImportTsv.SKIP_LINES_CONF_KEY, true); + skipEmptyColumns = context.getConfiguration().getBoolean(ImportTsv.SKIP_EMPTY_COLUMNS, false); + skipBadLines = context.getConfiguration().getBoolean(ImportTsv.SKIP_LINES_CONF_KEY, true); badLineCount = context.getCounter("ImportTsv", "Bad Lines"); logBadLines = context.getConfiguration().getBoolean(ImportTsv.LOG_BAD_LINES_CONF_KEY, false); hfileOutPath = conf.get(ImportTsv.BULK_OUTPUT_CONF_KEY); @@ -146,18 +137,13 @@ extends Mapper * Convert a line of TSV text into an HBase table row. */ @Override - public void map(LongWritable offset, Text value, - Context context) - throws IOException { + public void map(LongWritable offset, Text value, Context context) throws IOException { byte[] lineBytes = value.getBytes(); try { - ImportTsv.TsvParser.ParsedLine parsed = parser.parse( - lineBytes, value.getLength()); + ImportTsv.TsvParser.ParsedLine parsed = parser.parse(lineBytes, value.getLength()); ImmutableBytesWritable rowKey = - new ImmutableBytesWritable(lineBytes, - parsed.getRowKeyOffset(), - parsed.getRowKeyLength()); + new ImmutableBytesWritable(lineBytes, parsed.getRowKeyOffset(), parsed.getRowKeyLength()); // Retrieve timestamp if exists ts = parsed.getTimestamp(ts); cellVisibilityExpr = parsed.getCellVisibility(); @@ -167,8 +153,8 @@ extends Mapper if (hfileOutPath != null) { tags.clear(); if (cellVisibilityExpr != null) { - tags.addAll(kvCreator.getVisibilityExpressionResolver().createVisibilityExpTags( - cellVisibilityExpr)); + tags.addAll(kvCreator.getVisibilityExpressionResolver() + .createVisibilityExpTags(cellVisibilityExpr)); } // Add TTL directly to the KV so we can vary them when packing more than one KV // into puts @@ -178,17 +164,19 @@ extends Mapper } Put put = new Put(rowKey.copyBytes()); for (int i = 0; i < parsed.getColumnCount(); i++) { - if (i == parser.getRowKeyColumnIndex() || i == parser.getTimestampKeyColumnIndex() - || i == parser.getAttributesKeyColumnIndex() || i == parser.getCellVisibilityColumnIndex() - || i == parser.getCellTTLColumnIndex() || (skipEmptyColumns - && parsed.getColumnLength(i) == 0)) { + if ( + i == parser.getRowKeyColumnIndex() || i == parser.getTimestampKeyColumnIndex() + || i == parser.getAttributesKeyColumnIndex() + || i == parser.getCellVisibilityColumnIndex() || i == parser.getCellTTLColumnIndex() + || (skipEmptyColumns && parsed.getColumnLength(i) == 0) + ) { continue; } populatePut(lineBytes, parsed, put, i); } context.write(rowKey, put); } catch (ImportTsv.TsvParser.BadTsvLineException | IllegalArgumentException - | InvalidLabelException badLine) { + | InvalidLabelException badLine) { if (logBadLines) { System.err.println(value); } @@ -204,13 +192,13 @@ extends Mapper } protected void populatePut(byte[] lineBytes, ImportTsv.TsvParser.ParsedLine parsed, Put put, - int i) throws BadTsvLineException, IOException { + int i) throws BadTsvLineException, IOException { Cell cell = null; if (hfileOutPath == null) { cell = new KeyValue(lineBytes, parsed.getRowKeyOffset(), parsed.getRowKeyLength(), - parser.getFamily(i), 0, parser.getFamily(i).length, parser.getQualifier(i), 0, - parser.getQualifier(i).length, ts, KeyValue.Type.Put, lineBytes, - parsed.getColumnOffset(i), parsed.getColumnLength(i)); + parser.getFamily(i), 0, parser.getFamily(i).length, parser.getQualifier(i), 0, + parser.getQualifier(i).length, ts, KeyValue.Type.Put, lineBytes, parsed.getColumnOffset(i), + parsed.getColumnLength(i)); if (cellVisibilityExpr != null) { // We won't be validating the expression here. The Visibility CP will do // the validation @@ -223,9 +211,9 @@ extends Mapper // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. cell = this.kvCreator.create(lineBytes, parsed.getRowKeyOffset(), parsed.getRowKeyLength(), - parser.getFamily(i), 0, parser.getFamily(i).length, parser.getQualifier(i), 0, - parser.getQualifier(i).length, ts, lineBytes, parsed.getColumnOffset(i), - parsed.getColumnLength(i), tags); + parser.getFamily(i), 0, parser.getFamily(i).length, parser.getQualifier(i), 0, + parser.getQualifier(i).length, ts, lineBytes, parsed.getColumnOffset(i), + parsed.getColumnLength(i), tags); } put.add(cell); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java index f3f81ec1a71..3ee760af74d 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,23 +19,21 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; import java.util.Base64; - -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Text; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.mapreduce.Mapper; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Counter; +import org.apache.hadoop.mapreduce.Mapper; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; /** * Write table content out to map output files. */ @InterfaceAudience.Public public class TsvImporterTextMapper -extends Mapper -{ + extends Mapper { /** Column seperator */ private String separator; @@ -60,12 +58,10 @@ extends Mapper } /** - * Handles initializing this class with objects specific to it (i.e., the parser). - * Common initialization that might be leveraged by a subclass is done in - * doSetup. Hence a subclass may choose to override this method - * and call doSetup as well before handling it's own custom params. - * - * @param context + * Handles initializing this class with objects specific to it (i.e., the parser). Common + * initialization that might be leveraged by a subclass is done in doSetup. Hence a + * subclass may choose to override this method and call doSetup as well before + * handling it's own custom params. n */ @Override protected void setup(Context context) { @@ -80,8 +76,7 @@ extends Mapper } /** - * Handles common parameter initialization that a subclass might want to leverage. - * @param context + * Handles common parameter initialization that a subclass might want to leverage. n */ protected void doSetup(Context context) { Configuration conf = context.getConfiguration(); @@ -106,11 +101,12 @@ extends Mapper @Override public void map(LongWritable offset, Text value, Context context) throws IOException { try { - Pair rowKeyOffests = parser.parseRowKey(value.getBytes(), value.getLength()); - ImmutableBytesWritable rowKey = new ImmutableBytesWritable( - value.getBytes(), rowKeyOffests.getFirst(), rowKeyOffests.getSecond()); + Pair rowKeyOffests = + parser.parseRowKey(value.getBytes(), value.getLength()); + ImmutableBytesWritable rowKey = new ImmutableBytesWritable(value.getBytes(), + rowKeyOffests.getFirst(), rowKeyOffests.getSecond()); context.write(rowKey, value); - } catch (ImportTsv.TsvParser.BadTsvLineException|IllegalArgumentException badLine) { + } catch (ImportTsv.TsvParser.BadTsvLineException | IllegalArgumentException badLine) { if (logBadLines) { System.err.println(value); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java index f0f4c82a5ad..b42c0d9116d 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,10 +19,9 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hbase.Tag; +import org.apache.yetus.audience.InterfaceAudience; /** * Interface to convert visibility expressions into Tags for storing along with Cells in HFiles. diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java index ffc202ab0dc..dc7de148460 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java @@ -61,8 +61,7 @@ public class WALInputFormat extends InputFormat { public static final String END_TIME_KEY = "wal.end.time"; /** - * {@link InputSplit} for {@link WAL} files. Each split represent - * exactly one log file. + * {@link InputSplit} for {@link WAL} files. Each split represent exactly one log file. */ static class WALSplit extends InputSplit implements Writable { private String logFileName; @@ -71,12 +70,12 @@ public class WALInputFormat extends InputFormat { private long endTime; /** for serialization */ - public WALSplit() {} + public WALSplit() { + } /** - * Represent an WALSplit, i.e. a single WAL file. - * Start- and EndTime are managed by the split, so that WAL files can be - * filtered before WALEdits are passed to the mapper(s). + * Represent an WALSplit, i.e. a single WAL file. Start- and EndTime are managed by the split, + * so that WAL files can be filtered before WALEdits are passed to the mapper(s). */ public WALSplit(String logFileName, long fileSize, long startTime, long endTime) { this.logFileName = logFileName; @@ -131,8 +130,8 @@ public class WALInputFormat extends InputFormat { } /** - * {@link RecordReader} for an {@link WAL} file. - * Implementation shared with deprecated HLogInputFormat. + * {@link RecordReader} for an {@link WAL} file. Implementation shared with deprecated + * HLogInputFormat. */ static abstract class WALRecordReader extends RecordReader { private Reader reader = null; @@ -146,8 +145,8 @@ public class WALInputFormat extends InputFormat { @Override public void initialize(InputSplit split, TaskAttemptContext context) - throws IOException, InterruptedException { - WALSplit hsplit = (WALSplit)split; + throws IOException, InterruptedException { + WALSplit hsplit = (WALSplit) split; logFile = new Path(hsplit.getLogFileName()); conf = context.getConfiguration(); LOG.info("Opening {} for {}", logFile, split); @@ -196,7 +195,7 @@ public class WALInputFormat extends InputFormat { i++; } catch (EOFException x) { LOG.warn("Corrupted entry detected. Ignoring the rest of the file." - + " (This is normal when a RegionServer crashed.)"); + + " (This is normal when a RegionServer crashed.)"); return false; } } while (temp != null && temp.getKey().getWriteTime() < startTime); @@ -212,8 +211,8 @@ public class WALInputFormat extends InputFormat { } boolean res = temp.getKey().getWriteTime() <= endTime; if (!res) { - LOG.info("Reached ts: " + temp.getKey().getWriteTime() - + " ignoring the rest of the file."); + LOG.info( + "Reached ts: " + temp.getKey().getWriteTime() + " ignoring the rest of the file."); } return res; } catch (IOException e) { @@ -250,8 +249,8 @@ public class WALInputFormat extends InputFormat { } /** - * handler for non-deprecated WALKey version. fold into WALRecordReader once we no longer - * need to support HLogInputFormat. + * handler for non-deprecated WALKey version. fold into WALRecordReader once we no longer need to + * support HLogInputFormat. */ static class WALKeyRecordReader extends WALRecordReader { @Override @@ -261,8 +260,7 @@ public class WALInputFormat extends InputFormat { } @Override - public List getSplits(JobContext context) throws IOException, - InterruptedException { + public List getSplits(JobContext context) throws IOException, InterruptedException { return getSplits(context, START_TIME_KEY, END_TIME_KEY); } @@ -270,7 +268,7 @@ public class WALInputFormat extends InputFormat { * implementation shared with deprecated HLogInputFormat */ List getSplits(final JobContext context, final String startKey, final String endKey) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); boolean ignoreMissing = conf.getBoolean(WALPlayer.IGNORE_MISSING_FILES, false); Path[] inputPaths = getInputPaths(conf); @@ -278,14 +276,14 @@ public class WALInputFormat extends InputFormat { long endTime = conf.getLong(endKey, Long.MAX_VALUE); List allFiles = new ArrayList(); - for(Path inputPath: inputPaths){ + for (Path inputPath : inputPaths) { FileSystem fs = inputPath.getFileSystem(conf); try { List files = getFiles(fs, inputPath, startTime, endTime); allFiles.addAll(files); } catch (FileNotFoundException e) { if (ignoreMissing) { - LOG.warn("File "+ inputPath +" is missing. Skipping it."); + LOG.warn("File " + inputPath + " is missing. Skipping it."); continue; } throw e; @@ -300,20 +298,20 @@ public class WALInputFormat extends InputFormat { private Path[] getInputPaths(Configuration conf) { String inpDirs = conf.get(FileInputFormat.INPUT_DIR); - return StringUtils.stringToPath( - inpDirs.split(conf.get(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ","))); + return StringUtils + .stringToPath(inpDirs.split(conf.get(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ","))); } /** - * @param startTime If file looks like it has a timestamp in its name, we'll check if newer - * or equal to this value else we will filter out the file. If name does not - * seem to have a timestamp, we will just return it w/o filtering. - * @param endTime If file looks like it has a timestamp in its name, we'll check if older or equal - * to this value else we will filter out the file. If name does not seem to - * have a timestamp, we will just return it w/o filtering. + * @param startTime If file looks like it has a timestamp in its name, we'll check if newer or + * equal to this value else we will filter out the file. If name does not seem to + * have a timestamp, we will just return it w/o filtering. + * @param endTime If file looks like it has a timestamp in its name, we'll check if older or + * equal to this value else we will filter out the file. If name does not seem to + * have a timestamp, we will just return it w/o filtering. */ private List getFiles(FileSystem fs, Path dir, long startTime, long endTime) - throws IOException { + throws IOException { List result = new ArrayList<>(); LOG.debug("Scanning " + dir.toString() + " for WAL files"); RemoteIterator iter = fs.listLocatedStatus(dir); @@ -336,7 +334,7 @@ public class WALInputFormat extends InputFormat { } static void addFile(List result, LocatedFileStatus lfs, long startTime, - long endTime) { + long endTime) { long timestamp = WAL.getTimestamp(lfs.getPath().getName()); if (timestamp > 0) { // Looks like a valid timestamp. @@ -344,8 +342,8 @@ public class WALInputFormat extends InputFormat { LOG.info("Found {}", lfs.getPath()); result.add(lfs); } else { - LOG.info("Skipped {}, outside range [{}/{} - {}/{}]", lfs.getPath(), - startTime, Instant.ofEpochMilli(startTime), endTime, Instant.ofEpochMilli(endTime)); + LOG.info("Skipped {}, outside range [{}/{} - {}/{}]", lfs.getPath(), startTime, + Instant.ofEpochMilli(startTime), endTime, Instant.ofEpochMilli(endTime)); } } else { // If no timestamp, add it regardless. @@ -356,7 +354,7 @@ public class WALInputFormat extends InputFormat { @Override public RecordReader createRecordReader(InputSplit split, - TaskAttemptContext context) throws IOException, InterruptedException { + TaskAttemptContext context) throws IOException, InterruptedException { return new WALKeyRecordReader(); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index a6fef42b1c3..9f5658cd896 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -55,17 +55,12 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - - /** - * A tool to replay WAL files as a M/R job. - * The WAL can be replayed for a set of tables or all tables, - * and a time range can be provided (in milliseconds). - * The WAL is filtered to the passed set of tables and the output - * can optionally be mapped to another set of tables. - * - * WAL replay can also generate HFiles for later bulk importing, - * in that case the WAL is replayed for a single table only. + * A tool to replay WAL files as a M/R job. The WAL can be replayed for a set of tables or all + * tables, and a time range can be provided (in milliseconds). The WAL is filtered to the passed set + * of tables and the output can optionally be mapped to another set of tables. WAL replay can also + * generate HFiles for later bulk importing, in that case the WAL is replayed for a single table + * only. */ @InterfaceAudience.Public public class WALPlayer extends Configured implements Tool { @@ -77,10 +72,9 @@ public class WALPlayer extends Configured implements Tool { public final static String INPUT_FILES_SEPARATOR_KEY = "wal.input.separator"; public final static String IGNORE_MISSING_FILES = "wal.input.ignore.missing.files"; - private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name"; - public WALPlayer(){ + public WALPlayer() { } protected WALPlayer(final Configuration c) { @@ -88,19 +82,16 @@ public class WALPlayer extends Configured implements Tool { } /** - * A mapper that just writes out KeyValues. - * This one can be used together with {@link KeyValueSortReducer} - * @deprecated Use {@link WALCellMapper}. Will be removed from 3.0 onwards + * A mapper that just writes out KeyValues. This one can be used together with + * {@link KeyValueSortReducer} + * @deprecated Use {@link WALCellMapper}. Will be removed from 3.0 onwards */ @Deprecated - static class WALKeyValueMapper - extends Mapper { + static class WALKeyValueMapper extends Mapper { private byte[] table; @Override - public void map(WALKey key, WALEdit value, - Context context) - throws IOException { + public void map(WALKey key, WALEdit value, Context context) throws IOException { try { // skip all other tables if (Bytes.equals(table, key.getTableName().getName())) { @@ -130,18 +121,15 @@ public class WALPlayer extends Configured implements Tool { } } + /** - * A mapper that just writes out Cells. - * This one can be used together with {@link CellSortReducer} + * A mapper that just writes out Cells. This one can be used together with {@link CellSortReducer} */ - static class WALCellMapper - extends Mapper { + static class WALCellMapper extends Mapper { private byte[] table; @Override - public void map(WALKey key, WALEdit value, - Context context) - throws IOException { + public void map(WALKey key, WALEdit value, Context context) throws IOException { try { // skip all other tables if (Bytes.equals(table, key.getTableName().getName())) { @@ -173,8 +161,8 @@ public class WALPlayer extends Configured implements Tool { } /** - * Enum for map metrics. Keep it out here rather than inside in the Map - * inner-class so we can find associated properties. + * Enum for map metrics. Keep it out here rather than inside in the Map inner-class so we can find + * associated properties. */ protected static enum Counter { /** Number of aggregated writes */ @@ -187,22 +175,19 @@ public class WALPlayer extends Configured implements Tool { } /** - * A mapper that writes out {@link Mutation} to be directly applied to - * a running HBase instance. + * A mapper that writes out {@link Mutation} to be directly applied to a running HBase instance. */ protected static class WALMapper - extends Mapper { + extends Mapper { private Map tables = new TreeMap<>(); @Override - public void map(WALKey key, WALEdit value, Context context) - throws IOException { + public void map(WALKey key, WALEdit value, Context context) throws IOException { context.getCounter(Counter.WALEDITS).increment(1); try { if (tables.isEmpty() || tables.containsKey(key.getTableName())) { - TableName targetTable = tables.isEmpty() ? - key.getTableName() : - tables.get(key.getTableName()); + TableName targetTable = + tables.isEmpty() ? key.getTableName() : tables.get(key.getTableName()); ImmutableBytesWritable tableOut = new ImmutableBytesWritable(targetTable.getName()); Put put = null; Delete del = null; @@ -219,8 +204,10 @@ public class WALPlayer extends Configured implements Tool { // multiple rows (HBASE-5229). // Aggregate as much as possible into a single Put/Delete // operation before writing to the context. - if (lastCell == null || lastCell.getTypeByte() != cell.getTypeByte() - || !CellUtil.matchingRows(lastCell, cell)) { + if ( + lastCell == null || lastCell.getTypeByte() != cell.getTypeByte() + || !CellUtil.matchingRows(lastCell, cell) + ) { // row or type changed, write out aggregate KVs. if (put != null) { context.write(tableOut, put); @@ -266,8 +253,8 @@ public class WALPlayer extends Configured implements Tool { @Override protected void - cleanup(Mapper.Context context) - throws IOException, InterruptedException { + cleanup(Mapper.Context context) + throws IOException, InterruptedException { super.cleanup(context); } @@ -288,8 +275,7 @@ public class WALPlayer extends Configured implements Tool { int i = 0; if (tablesToUse != null) { for (String table : tablesToUse) { - tables.put(TableName.valueOf(table), - TableName.valueOf(tableMap[i++])); + tables.put(TableName.valueOf(table), TableName.valueOf(tableMap[i++])); } } } @@ -309,8 +295,8 @@ public class WALPlayer extends Configured implements Tool { // then see if just a number of ms's was specified ms = Long.parseLong(val); } catch (NumberFormatException nfe) { - throw new IOException(option - + " must be specified either in the form 2001-02-20T16:35:06.99 " + throw new IOException( + option + " must be specified either in the form 2001-02-20T16:35:06.99 " + "or as number of milliseconds"); } } @@ -319,8 +305,7 @@ public class WALPlayer extends Configured implements Tool { /** * Sets up the actual job. - * - * @param args The command line parameters. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ @@ -329,7 +314,7 @@ public class WALPlayer extends Configured implements Tool { setupTime(conf, WALInputFormat.START_TIME_KEY); setupTime(conf, WALInputFormat.END_TIME_KEY); String inputDirs = args[0]; - String[] tables = args.length == 1? new String [] {}: args[1].split(","); + String[] tables = args.length == 1 ? new String[] {} : args[1].split(","); String[] tableMap; if (args.length > 2) { tableMap = args[2].split(","); @@ -343,8 +328,8 @@ public class WALPlayer extends Configured implements Tool { conf.setStrings(TABLES_KEY, tables); conf.setStrings(TABLE_MAP_KEY, tableMap); conf.set(FileInputFormat.INPUT_DIR, inputDirs); - Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + - EnvironmentEdgeManager.currentTime())); + Job job = Job.getInstance(conf, + conf.get(JOB_NAME_CONF_KEY, NAME + "_" + EnvironmentEdgeManager.currentTime())); job.setJarByClass(WALPlayer.class); job.setInputFormatClass(WALInputFormat.class); @@ -365,12 +350,12 @@ public class WALPlayer extends Configured implements Tool { FileOutputFormat.setOutputPath(job, outputDir); job.setMapOutputValueClass(MapReduceExtendedCell.class); try (Connection conn = ConnectionFactory.createConnection(conf); - Table table = conn.getTable(tableName); - RegionLocator regionLocator = conn.getRegionLocator(tableName)) { + Table table = conn.getTable(tableName); + RegionLocator regionLocator = conn.getRegionLocator(tableName)) { HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator); } TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), - org.apache.hbase.thirdparty.com.google.common.base.Preconditions.class); + org.apache.hbase.thirdparty.com.google.common.base.Preconditions.class); } else { // output to live cluster job.setMapperClass(WALMapper.class); @@ -382,17 +367,17 @@ public class WALPlayer extends Configured implements Tool { } String codecCls = WALCellCodec.getWALCellCodecClass(conf).getName(); try { - TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), Class.forName(codecCls)); + TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), + Class.forName(codecCls)); } catch (Exception e) { throw new IOException("Cannot determine wal codec class " + codecCls, e); } return job; } - /** * Print usage - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ private void usage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { @@ -402,12 +387,12 @@ public class WALPlayer extends Configured implements Tool { System.err.println(" directory of WALs to replay."); System.err.println(" comma separated list of tables. If no tables specified,"); System.err.println(" all are imported (even hbase:meta if present)."); - System.err.println(" WAL entries can be mapped to a new set of tables by " + - "passing"); - System.err.println(" , a comma separated list of target " + - "tables."); - System.err.println(" If specified, each table in must have a " + - "mapping."); + System.err.println( + " WAL entries can be mapped to a new set of tables by " + "passing"); + System.err + .println(" , a comma separated list of target " + "tables."); + System.err + .println(" If specified, each table in must have a " + "mapping."); System.err.println("To generate HFiles to bulk load instead of loading HBase directly, pass:"); System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output"); System.err.println(" Only one table can be specified, and no mapping allowed!"); @@ -415,8 +400,8 @@ public class WALPlayer extends Configured implements Tool { System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]"); System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]"); System.err.println(" The start and the end date of timerange (inclusive). The dates can be"); - System.err.println(" expressed in milliseconds-since-epoch or yyyy-MM-dd'T'HH:mm:ss.SS " + - "format."); + System.err + .println(" expressed in milliseconds-since-epoch or yyyy-MM-dd'T'HH:mm:ss.SS " + "format."); System.err.println(" E.g. 1234567890120 or 2009-02-13T23:32:30.12"); System.err.println("Other options:"); System.err.println(" -D" + JOB_NAME_CONF_KEY + "=jobName"); @@ -424,14 +409,12 @@ public class WALPlayer extends Configured implements Tool { System.err.println(" -Dwal.input.separator=' '"); System.err.println(" Change WAL filename separator (WAL dir names use default ','.)"); System.err.println("For performance also consider the following options:\n" - + " -Dmapreduce.map.speculative=false\n" - + " -Dmapreduce.reduce.speculative=false"); + + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false"); } /** * Main entry point. - * - * @param args The command line parameters. + * @param args The command line parameters. * @throws Exception When running the job fails. */ public static void main(String[] args) throws Exception { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index de7954052d1..30db5a572d4 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,20 +69,18 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This map-only job compares the data from a local table with a remote one. - * Every cell is compared and must have exactly the same keys (even timestamp) - * as well as same value. It is possible to restrict the job by time range and - * families. The peer id that's provided must match the one given when the - * replication stream was setup. + * This map-only job compares the data from a local table with a remote one. Every cell is compared + * and must have exactly the same keys (even timestamp) as well as same value. It is possible to + * restrict the job by time range and families. The peer id that's provided must match the one given + * when the replication stream was setup. *

      - * Two counters are provided, Verifier.Counters.GOODROWS and BADROWS. The reason - * for a why a row is different is shown in the map's log. + * Two counters are provided, Verifier.Counters.GOODROWS and BADROWS. The reason for a why a row is + * different is shown in the map's log. */ @InterfaceAudience.Private public class VerifyReplication extends Configured implements Tool { - private static final Logger LOG = - LoggerFactory.getLogger(VerifyReplication.class); + private static final Logger LOG = LoggerFactory.getLogger(VerifyReplication.class); public final static String NAME = "verifyrep"; private final static String PEER_CONFIG_PREFIX = NAME + ".peer."; @@ -100,32 +97,34 @@ public class VerifyReplication extends Configured implements Tool { int sleepMsBeforeReCompare = 0; boolean verbose = false; boolean includeDeletedCells = false; - //Source table snapshot name + // Source table snapshot name String sourceSnapshotName = null; - //Temp location in source cluster to restore source snapshot + // Temp location in source cluster to restore source snapshot String sourceSnapshotTmpDir = null; - //Peer table snapshot name + // Peer table snapshot name String peerSnapshotName = null; - //Temp location in peer cluster to restore peer snapshot + // Temp location in peer cluster to restore peer snapshot String peerSnapshotTmpDir = null; - //Peer cluster Hadoop FS address + // Peer cluster Hadoop FS address String peerFSAddress = null; - //Peer cluster HBase root dir location + // Peer cluster HBase root dir location String peerHBaseRootAddress = null; - //Peer Table Name + // Peer Table Name String peerTableName = null; - private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name"; /** * Map-only comparator for 2 tables */ - public static class Verifier - extends TableMapper { + public static class Verifier extends TableMapper { public enum Counters { - GOODROWS, BADROWS, ONLY_IN_SOURCE_TABLE_ROWS, ONLY_IN_PEER_TABLE_ROWS, CONTENT_DIFFERENT_ROWS + GOODROWS, + BADROWS, + ONLY_IN_SOURCE_TABLE_ROWS, + ONLY_IN_PEER_TABLE_ROWS, + CONTENT_DIFFERENT_ROWS } private Connection sourceConnection; @@ -140,22 +139,20 @@ public class VerifyReplication extends Configured implements Tool { private int batch = -1; /** - * Map method that compares every scanned row with the equivalent from - * a distant cluster. - * @param row The current table row key. - * @param value The columns. - * @param context The current context. + * Map method that compares every scanned row with the equivalent from a distant cluster. + * @param row The current table row key. + * @param value The columns. + * @param context The current context. * @throws IOException When something is broken with the data. */ @Override - public void map(ImmutableBytesWritable row, final Result value, - Context context) - throws IOException { + public void map(ImmutableBytesWritable row, final Result value, Context context) + throws IOException { if (replicatedScanner == null) { Configuration conf = context.getConfiguration(); - sleepMsBeforeReCompare = conf.getInt(NAME +".sleepMsBeforeReCompare", 0); + sleepMsBeforeReCompare = conf.getInt(NAME + ".sleepMsBeforeReCompare", 0); delimiter = conf.get(NAME + ".delimiter", ""); - verbose = conf.getBoolean(NAME +".verbose", false); + verbose = conf.getBoolean(NAME + ".verbose", false); batch = conf.getInt(NAME + ".batch", -1); final Scan scan = new Scan(); if (batch > 0) { @@ -166,9 +163,9 @@ public class VerifyReplication extends Configured implements Tool { long startTime = conf.getLong(NAME + ".startTime", 0); long endTime = conf.getLong(NAME + ".endTime", Long.MAX_VALUE); String families = conf.get(NAME + ".families", null); - if(families != null) { + if (families != null) { String[] fams = families.split(","); - for(String fam : fams) { + for (String fam : fams) { scan.addFamily(Bytes.toBytes(fam)); } } @@ -177,7 +174,7 @@ public class VerifyReplication extends Configured implements Tool { String rowPrefixes = conf.get(NAME + ".rowPrefixes", null); setRowPrefixFilter(scan, rowPrefixes); scan.setTimeRange(startTime, endTime); - int versions = conf.getInt(NAME+".versions", -1); + int versions = conf.getInt(NAME + ".versions", -1); LOG.info("Setting number of version inside map as: " + versions); if (versions >= 0) { scan.setMaxVersions(versions); @@ -189,8 +186,8 @@ public class VerifyReplication extends Configured implements Tool { final InputSplit tableSplit = context.getInputSplit(); String zkClusterKey = conf.get(NAME + ".peerQuorumAddress"); - Configuration peerConf = HBaseConfiguration.createClusterConf(conf, - zkClusterKey, PEER_CONFIG_PREFIX); + Configuration peerConf = + HBaseConfiguration.createClusterConf(conf, zkClusterKey, PEER_CONFIG_PREFIX); String peerName = peerConf.get(NAME + ".peerTableName", tableName.getNameAsString()); TableName peerTableName = TableName.valueOf(peerName); @@ -201,7 +198,7 @@ public class VerifyReplication extends Configured implements Tool { byte[] endRow = null; if (tableSplit instanceof TableSnapshotInputFormat.TableSnapshotRegionSplit) { endRow = ((TableSnapshotInputFormat.TableSnapshotRegionSplit) tableSplit).getRegionInfo() - .getEndKey(); + .getEndKey(); } else { endRow = ((TableSplit) tableSplit).getEndRow(); } @@ -215,9 +212,9 @@ public class VerifyReplication extends Configured implements Tool { String peerHBaseRootAddress = conf.get(NAME + ".peerHBaseRootAddress", null); FileSystem.setDefaultUri(peerConf, peerFSAddress); CommonFSUtils.setRootDir(peerConf, new Path(peerHBaseRootAddress)); - LOG.info("Using peer snapshot:" + peerSnapshotName + " with temp dir:" + - peerSnapshotTmpDir + " peer root uri:" + CommonFSUtils.getRootDir(peerConf) + - " peerFSAddress:" + peerFSAddress); + LOG.info("Using peer snapshot:" + peerSnapshotName + " with temp dir:" + + peerSnapshotTmpDir + " peer root uri:" + CommonFSUtils.getRootDir(peerConf) + + " peerFSAddress:" + peerFSAddress); replicatedScanner = new TableSnapshotScanner(peerConf, CommonFSUtils.getRootDir(peerConf), new Path(peerFSAddress, peerSnapshotTmpDir), peerSnapshotName, scan, true); @@ -239,8 +236,8 @@ public class VerifyReplication extends Configured implements Tool { Result.compareResults(value, currentCompareRowInPeerTable, false); context.getCounter(Counters.GOODROWS).increment(1); if (verbose) { - LOG.info("Good row key: " + delimiter - + Bytes.toStringBinary(value.getRow()) + delimiter); + LOG.info( + "Good row key: " + delimiter + Bytes.toStringBinary(value.getRow()) + delimiter); } } catch (Exception e) { logFailRowAndIncreaseCounter(context, Counters.CONTENT_DIFFERENT_ROWS, value); @@ -270,21 +267,20 @@ public class VerifyReplication extends Configured implements Tool { if (!sourceResult.isEmpty()) { context.getCounter(Counters.GOODROWS).increment(1); if (verbose) { - LOG.info("Good row key (with recompare): " + delimiter + - Bytes.toStringBinary(row.getRow()) - + delimiter); + LOG.info("Good row key (with recompare): " + delimiter + + Bytes.toStringBinary(row.getRow()) + delimiter); } } return; } catch (Exception e) { - LOG.error("recompare fail after sleep, rowkey=" + delimiter + - Bytes.toStringBinary(row.getRow()) + delimiter); + LOG.error("recompare fail after sleep, rowkey=" + delimiter + + Bytes.toStringBinary(row.getRow()) + delimiter); } } context.getCounter(counter).increment(1); context.getCounter(Counters.BADROWS).increment(1); - LOG.error(counter.toString() + ", rowkey=" + delimiter + Bytes.toStringBinary(row.getRow()) + - delimiter); + LOG.error(counter.toString() + ", rowkey=" + delimiter + Bytes.toStringBinary(row.getRow()) + + delimiter); } @Override @@ -311,7 +307,7 @@ public class VerifyReplication extends Configured implements Tool { LOG.error("fail to close source table in cleanup", e); } } - if(sourceConnection != null){ + if (sourceConnection != null) { try { sourceConnection.close(); } catch (Exception e) { @@ -319,14 +315,14 @@ public class VerifyReplication extends Configured implements Tool { } } - if(replicatedTable != null){ - try{ + if (replicatedTable != null) { + try { replicatedTable.close(); } catch (Exception e) { LOG.error("fail to close replicated table in cleanup", e); } } - if(replicatedConnection != null){ + if (replicatedConnection != null) { try { replicatedConnection.close(); } catch (Exception e) { @@ -336,8 +332,8 @@ public class VerifyReplication extends Configured implements Tool { } } - private static Pair getPeerQuorumConfig( - final Configuration conf, String peerId) throws IOException { + private static Pair + getPeerQuorumConfig(final Configuration conf, String peerId) throws IOException { ZKWatcher localZKW = null; try { localZKW = new ZKWatcher(conf, "VerifyReplication", new Abortable() { @@ -357,7 +353,7 @@ public class VerifyReplication extends Configured implements Tool { ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf)); } catch (ReplicationException e) { throw new IOException("An error occurred while trying to connect to the remote peer cluster", - e); + e); } finally { if (localZKW != null) { localZKW.close(); @@ -378,30 +374,28 @@ public class VerifyReplication extends Configured implements Tool { /** * Sets up the actual job. - * - * @param conf The current configuration. - * @param args The command line parameters. + * @param conf The current configuration. + * @param args The command line parameters. * @return The newly created job. * @throws java.io.IOException When setting up the job fails. */ - public Job createSubmittableJob(Configuration conf, String[] args) - throws IOException { + public Job createSubmittableJob(Configuration conf, String[] args) throws IOException { if (!doCommandLine(args)) { return null; } - conf.set(NAME+".tableName", tableName); - conf.setLong(NAME+".startTime", startTime); - conf.setLong(NAME+".endTime", endTime); - conf.setInt(NAME +".sleepMsBeforeReCompare", sleepMsBeforeReCompare); + conf.set(NAME + ".tableName", tableName); + conf.setLong(NAME + ".startTime", startTime); + conf.setLong(NAME + ".endTime", endTime); + conf.setInt(NAME + ".sleepMsBeforeReCompare", sleepMsBeforeReCompare); conf.set(NAME + ".delimiter", delimiter); conf.setInt(NAME + ".batch", batch); - conf.setBoolean(NAME +".verbose", verbose); - conf.setBoolean(NAME +".includeDeletedCells", includeDeletedCells); + conf.setBoolean(NAME + ".verbose", verbose); + conf.setBoolean(NAME + ".includeDeletedCells", includeDeletedCells); if (families != null) { - conf.set(NAME+".families", families); + conf.set(NAME + ".families", families); } - if (rowPrefixes != null){ - conf.set(NAME+".rowPrefixes", rowPrefixes); + if (rowPrefixes != null) { + conf.set(NAME + ".rowPrefixes", rowPrefixes); } String peerQuorumAddress; @@ -410,8 +404,8 @@ public class VerifyReplication extends Configured implements Tool { peerConfigPair = getPeerQuorumConfig(conf, peerId); ReplicationPeerConfig peerConfig = peerConfigPair.getFirst(); peerQuorumAddress = peerConfig.getClusterKey(); - LOG.info("Peer Quorum Address: " + peerQuorumAddress + ", Peer Configuration: " + - peerConfig.getConfiguration()); + LOG.info("Peer Quorum Address: " + peerQuorumAddress + ", Peer Configuration: " + + peerConfig.getConfiguration()); conf.set(NAME + ".peerQuorumAddress", peerQuorumAddress); HBaseConfiguration.setWithPrefix(conf, PEER_CONFIG_PREFIX, peerConfig.getConfiguration().entrySet()); @@ -430,7 +424,7 @@ public class VerifyReplication extends Configured implements Tool { conf.setInt(NAME + ".versions", versions); LOG.info("Number of version: " + versions); - //Set Snapshot specific parameters + // Set Snapshot specific parameters if (peerSnapshotName != null) { conf.set(NAME + ".peerSnapshotName", peerSnapshotName); @@ -461,9 +455,9 @@ public class VerifyReplication extends Configured implements Tool { scan.setMaxVersions(versions); LOG.info("Number of versions set to " + versions); } - if(families != null) { + if (families != null) { String[] fams = families.split(","); - for(String fam : fams) { + for (String fam : fams) { scan.addFamily(Bytes.toBytes(fam)); } } @@ -486,8 +480,8 @@ public class VerifyReplication extends Configured implements Tool { assert peerConfigPair != null; peerClusterConf = peerConfigPair.getSecond(); } else { - peerClusterConf = HBaseConfiguration.createClusterConf(conf, - peerQuorumAddress, PEER_CONFIG_PREFIX); + peerClusterConf = + HBaseConfiguration.createClusterConf(conf, peerQuorumAddress, PEER_CONFIG_PREFIX); } // Obtain the auth token from peer cluster TableMapReduceUtil.initCredentialsForCluster(job, peerClusterConf); @@ -508,7 +502,7 @@ public class VerifyReplication extends Configured implements Tool { } scan.setFilter(filterList); byte[] startPrefixRow = Bytes.toBytes(rowPrefixArray[0]); - byte[] lastPrefixRow = Bytes.toBytes(rowPrefixArray[rowPrefixArray.length -1]); + byte[] lastPrefixRow = Bytes.toBytes(rowPrefixArray[rowPrefixArray.length - 1]); setStartAndStopRows(scan, startPrefixRow, lastPrefixRow); } } @@ -516,7 +510,7 @@ public class VerifyReplication extends Configured implements Tool { private static void setStartAndStopRows(Scan scan, byte[] startPrefixRow, byte[] lastPrefixRow) { scan.setStartRow(startPrefixRow); byte[] stopRow = Bytes.add(Bytes.head(lastPrefixRow, lastPrefixRow.length - 1), - new byte[]{(byte) (lastPrefixRow[lastPrefixRow.length - 1] + 1)}); + new byte[] { (byte) (lastPrefixRow[lastPrefixRow.length - 1] + 1) }); scan.setStopRow(stopRow); } @@ -570,7 +564,7 @@ public class VerifyReplication extends Configured implements Tool { } final String rowPrefixesKey = "--row-prefixes="; - if (cmd.startsWith(rowPrefixesKey)){ + if (cmd.startsWith(rowPrefixesKey)) { rowPrefixes = cmd.substring(rowPrefixesKey.length()); continue; } @@ -639,7 +633,7 @@ public class VerifyReplication extends Configured implements Tool { return false; } - if (i == args.length-2) { + if (i == args.length - 2) { if (isPeerQuorumAddress(cmd)) { peerQuorumAddress = cmd; } else { @@ -647,25 +641,31 @@ public class VerifyReplication extends Configured implements Tool { } } - if (i == args.length-1) { + if (i == args.length - 1) { tableName = cmd; } } - if ((sourceSnapshotName != null && sourceSnapshotTmpDir == null) - || (sourceSnapshotName == null && sourceSnapshotTmpDir != null)) { + if ( + (sourceSnapshotName != null && sourceSnapshotTmpDir == null) + || (sourceSnapshotName == null && sourceSnapshotTmpDir != null) + ) { printUsage("Source snapshot name and snapshot temp location should be provided" - + " to use snapshots in source cluster"); + + " to use snapshots in source cluster"); return false; } - if (peerSnapshotName != null || peerSnapshotTmpDir != null || peerFSAddress != null - || peerHBaseRootAddress != null) { - if (peerSnapshotName == null || peerSnapshotTmpDir == null || peerFSAddress == null - || peerHBaseRootAddress == null) { + if ( + peerSnapshotName != null || peerSnapshotTmpDir != null || peerFSAddress != null + || peerHBaseRootAddress != null + ) { + if ( + peerSnapshotName == null || peerSnapshotTmpDir == null || peerFSAddress == null + || peerHBaseRootAddress == null + ) { printUsage( "Peer snapshot name, peer snapshot temp location, Peer HBase root address and " - + "peer FSAddress should be provided to use snapshots in peer cluster"); + + "peer FSAddress should be provided to use snapshots in peer cluster"); return false; } } @@ -697,17 +697,17 @@ public class VerifyReplication extends Configured implements Tool { } /* - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ private static void printUsage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } System.err.println("Usage: verifyrep [--starttime=X]" - + " [--endtime=Y] [--families=A] [--row-prefixes=B] [--delimiter=] [--recomparesleep=] " - + "[--batch=] [--verbose] [--peerTableName=] [--sourceSnapshotName=P] " - + "[--sourceSnapshotTmpDir=Q] [--peerSnapshotName=R] [--peerSnapshotTmpDir=S] " - + "[--peerFSAddress=T] [--peerHBaseRootAddress=U] "); + + " [--endtime=Y] [--families=A] [--row-prefixes=B] [--delimiter=] [--recomparesleep=] " + + "[--batch=] [--verbose] [--peerTableName=] [--sourceSnapshotName=P] " + + "[--sourceSnapshotTmpDir=Q] [--peerSnapshotName=R] [--peerSnapshotTmpDir=S] " + + "[--peerFSAddress=T] [--peerHBaseRootAddress=U] "); System.err.println(); System.err.println("Options:"); System.err.println(" starttime beginning of the time range"); @@ -720,8 +720,8 @@ public class VerifyReplication extends Configured implements Tool { System.err.println(" families comma-separated list of families to copy"); System.err.println(" row-prefixes comma-separated list of row key prefixes to filter on "); System.err.println(" delimiter the delimiter used in display around rowkey"); - System.err.println(" recomparesleep milliseconds to sleep before recompare row, " + - "default value is 0 which disables the recompare."); + System.err.println(" recomparesleep milliseconds to sleep before recompare row, " + + "default value is 0 which disables the recompare."); System.err.println(" verbose logs row keys of good rows"); System.err.println(" peerTableName Peer Table Name"); System.err.println(" sourceSnapshotName Source Snapshot Name"); @@ -739,57 +739,53 @@ public class VerifyReplication extends Configured implements Tool { System.err.println(" tablename Name of the table to verify"); System.err.println(); System.err.println("Examples:"); - System.err.println( - " To verify the data replicated from TestTable for a 1 hour window with peer #5 "); - System.err.println(" $ hbase " + - "org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication" + - " --starttime=1265875194289 --endtime=1265878794289 5 TestTable "); + System.err + .println(" To verify the data replicated from TestTable for a 1 hour window with peer #5 "); + System.err + .println(" $ hbase " + "org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication" + + " --starttime=1265875194289 --endtime=1265878794289 5 TestTable "); System.err.println(); System.err.println( " To verify the data in TestTable between the cluster runs VerifyReplication and cluster-b"); System.err.println(" Assume quorum address for cluster-b is" + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:2181:/cluster-b"); - System.err.println( - " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + - " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" - + "2181:/cluster-b \\\n" + - " TestTable"); + System.err + .println(" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" + + "2181:/cluster-b \\\n" + " TestTable"); System.err.println(); - System.err.println( - " To verify the data in TestTable between the secured cluster runs VerifyReplication" + System.err + .println(" To verify the data in TestTable between the secured cluster runs VerifyReplication" + " and insecure cluster-b"); - System.err.println( - " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + - " -D verifyrep.peer.hbase.security.authentication=simple \\\n" + - " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" - + "2181:/cluster-b \\\n" + - " TestTable"); + System.err + .println(" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + + " -D verifyrep.peer.hbase.security.authentication=simple \\\n" + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" + + "2181:/cluster-b \\\n" + " TestTable"); System.err.println(); - System.err.println(" To verify the data in TestTable between" + - " the secured cluster runs VerifyReplication and secured cluster-b"); - System.err.println(" Assume cluster-b uses different kerberos principal, cluster-b/_HOST@E" + - ", for master and regionserver kerberos principal from another cluster"); - System.err.println( - " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + - " -D verifyrep.peer.hbase.regionserver.kerberos.principal=" - + "cluster-b/_HOST@EXAMPLE.COM \\\n" + - " -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/_HOST@EXAMPLE.COM \\\n" + - " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" - + "2181:/cluster-b \\\n" + - " TestTable"); + System.err.println(" To verify the data in TestTable between" + + " the secured cluster runs VerifyReplication and secured cluster-b"); + System.err.println(" Assume cluster-b uses different kerberos principal, cluster-b/_HOST@E" + + ", for master and regionserver kerberos principal from another cluster"); + System.err + .println(" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + + " -D verifyrep.peer.hbase.regionserver.kerberos.principal=" + + "cluster-b/_HOST@EXAMPLE.COM \\\n" + + " -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/_HOST@EXAMPLE.COM \\\n" + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" + + "2181:/cluster-b \\\n" + " TestTable"); System.err.println(); System.err.println( " To verify the data in TestTable between the insecure cluster runs VerifyReplication" + " and secured cluster-b"); - System.err.println( - " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + - " -D verifyrep.peer.hbase.security.authentication=kerberos \\\n" + - " -D verifyrep.peer.hbase.regionserver.kerberos.principal=" - + "cluster-b/_HOST@EXAMPLE.COM \\\n" + - " -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/_HOST@EXAMPLE.COM \\\n" + - " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" - + "2181:/cluster-b \\\n" + - " TestTable"); + System.err + .println(" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + + " -D verifyrep.peer.hbase.security.authentication=kerberos \\\n" + + " -D verifyrep.peer.hbase.regionserver.kerberos.principal=" + + "cluster-b/_HOST@EXAMPLE.COM \\\n" + + " -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/_HOST@EXAMPLE.COM \\\n" + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" + + "2181:/cluster-b \\\n" + " TestTable"); } @Override @@ -804,8 +800,7 @@ public class VerifyReplication extends Configured implements Tool { /** * Main entry point. - * - * @param args The command line parameters. + * @param args The command line parameters. * @throws Exception When running the job fails. */ public static void main(String[] args) throws Exception { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java index 3b9a13879e8..9b8d426df95 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,7 +62,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Scans a given table + CF for all mob reference cells to get the list of backing mob files. For * each referenced file we attempt to verify that said file is on the FileSystem in a place that the @@ -168,8 +166,8 @@ public class MobRefReporter extends Configured implements Tool { public static class MobRefMapper extends TableMapper { @Override - public void map(ImmutableBytesWritable r, Result columns, Context context) throws IOException, - InterruptedException { + public void map(ImmutableBytesWritable r, Result columns, Context context) + throws IOException, InterruptedException { if (columns == null) { return; } @@ -189,24 +187,27 @@ public class MobRefReporter extends Configured implements Tool { files.add(fileName); } final int cellsize = MobUtils.getMobValueLength(c); - context.getCounter("SIZES OF CELLS", "Number of cells with size in the " + - log10GroupedString(cellsize) + "s of bytes").increment(1L); + context + .getCounter("SIZES OF CELLS", + "Number of cells with size in the " + log10GroupedString(cellsize) + "s of bytes") + .increment(1L); size += cellsize; count++; } else { LOG.debug("cell is not a mob ref, even though we asked for only refs. cell={}", c); } } - context.getCounter("CELLS PER ROW", "Number of rows with " + log10GroupedString(count) + - "s of cells per row").increment(1L); - context.getCounter("SIZES OF ROWS", "Number of rows with total size in the " + - log10GroupedString(size) + "s of bytes").increment(1L); - context.getCounter("MOB","NUM_CELLS").increment(count); + context.getCounter("CELLS PER ROW", + "Number of rows with " + log10GroupedString(count) + "s of cells per row").increment(1L); + context + .getCounter("SIZES OF ROWS", + "Number of rows with total size in the " + log10GroupedString(size) + "s of bytes") + .increment(1L); + context.getCounter("MOB", "NUM_CELLS").increment(count); } } - public static class MobRefReducer extends - Reducer { + public static class MobRefReducer extends Reducer { TableName table; String mobRegion; @@ -220,7 +221,7 @@ public class MobRefReporter extends Configured implements Tool { final Text OK_HLINK_CLONE = new Text("HLINK TO ARCHIVE FOR OTHER TABLE"); /* Results that mean something is incorrect */ final Text INCONSISTENT_ARCHIVE_BAD_LINK = - new Text("ARCHIVE WITH HLINK BUT NOT FROM OUR TABLE"); + new Text("ARCHIVE WITH HLINK BUT NOT FROM OUR TABLE"); final Text INCONSISTENT_ARCHIVE_STALE = new Text("ARCHIVE BUT NO HLINKS"); final Text INCONSISTENT_ARCHIVE_IOE = new Text("ARCHIVE BUT FAILURE WHILE CHECKING HLINKS"); /* Results that mean data is probably already gone */ @@ -245,21 +246,21 @@ public class MobRefReporter extends Configured implements Tool { mob = MobUtils.getMobFamilyPath(conf, table, family); LOG.info("Using active mob area '{}'", mob); archive = HFileArchiveUtil.getStoreArchivePath(conf, table, - MobUtils.getMobRegionInfo(table).getEncodedName(), family); + MobUtils.getMobRegionInfo(table).getEncodedName(), family); LOG.info("Using archive mob area '{}'", archive); seperator = conf.get(TextOutputFormat.SEPERATOR, "\t"); } @Override public void reduce(Text key, Iterable rows, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { final Configuration conf = context.getConfiguration(); final String file = key.toString(); // active mob area if (mob.getFileSystem(conf).exists(new Path(mob, file))) { LOG.debug("Found file '{}' in mob area", file); context.write(OK_MOB_DIR, key); - // archive area - is there an hlink back reference (from a snapshot from same table) + // archive area - is there an hlink back reference (from a snapshot from same table) } else if (archive.getFileSystem(conf).exists(new Path(archive, file))) { Path backRefDir = HFileLink.getBackReferencesDir(archive, file); @@ -268,37 +269,38 @@ public class MobRefReporter extends Configured implements Tool { if (backRefs != null) { boolean found = false; for (FileStatus backRef : backRefs) { - Pair refParts = HFileLink.parseBackReferenceName( - backRef.getPath().getName()); + Pair refParts = + HFileLink.parseBackReferenceName(backRef.getPath().getName()); if (table.equals(refParts.getFirst()) && mobRegion.equals(refParts.getSecond())) { - Path hlinkPath = HFileLink.getHFileFromBackReference(MobUtils.getMobHome(conf), - backRef.getPath()); + Path hlinkPath = + HFileLink.getHFileFromBackReference(MobUtils.getMobHome(conf), backRef.getPath()); if (hlinkPath.getFileSystem(conf).exists(hlinkPath)) { found = true; } else { - LOG.warn("Found file '{}' in archive area with a back reference to the mob area " + LOG.warn( + "Found file '{}' in archive area with a back reference to the mob area " + "for our table, but the mob area does not have a corresponding hfilelink.", - file); + file); } } } if (found) { LOG.debug("Found file '{}' in archive area. has proper hlink back references to " - + "suggest it is from a restored snapshot for this table.", file); + + "suggest it is from a restored snapshot for this table.", file); context.write(OK_HLINK_RESTORE, key); } else { LOG.warn("Found file '{}' in archive area, but the hlink back references do not " - + "properly point to the mob area for our table.", file); + + "properly point to the mob area for our table.", file); context.write(INCONSISTENT_ARCHIVE_BAD_LINK, encodeRows(context, key, rows)); } } else { LOG.warn("Found file '{}' in archive area, but there are no hlinks pointing to it. Not " - + "yet used snapshot or an error.", file); + + "yet used snapshot or an error.", file); context.write(INCONSISTENT_ARCHIVE_STALE, encodeRows(context, key, rows)); } } catch (IOException e) { LOG.warn("Found file '{}' in archive area, but got an error while checking " - + "on back references.", file, e); + + "on back references.", file, e); context.write(INCONSISTENT_ARCHIVE_IOE, encodeRows(context, key, rows)); } @@ -306,19 +308,18 @@ public class MobRefReporter extends Configured implements Tool { // check for an hlink in the active mob area (from a snapshot of a different table) try { /** - * we are doing this ourselves instead of using FSUtils.getReferenceFilePaths because - * we know the mob region never splits, so we can only have HFileLink references - * and looking for just them is cheaper then listing everything. - * - * This glob should match the naming convention for HFileLinks to our referenced hfile. - * As simplified explanation those file names look like "table=region-hfile". For details - * see the {@link HFileLink#createHFileLinkName HFileLink implementation}. + * we are doing this ourselves instead of using FSUtils.getReferenceFilePaths because we + * know the mob region never splits, so we can only have HFileLink references and looking + * for just them is cheaper then listing everything. This glob should match the naming + * convention for HFileLinks to our referenced hfile. As simplified explanation those file + * names look like "table=region-hfile". For details see the + * {@link HFileLink#createHFileLinkName HFileLink implementation}. */ FileStatus[] hlinks = mob.getFileSystem(conf).globStatus(new Path(mob + "/*=*-" + file)); if (hlinks != null && hlinks.length != 0) { if (hlinks.length != 1) { - LOG.warn("Found file '{}' as hfilelinks in the mob area, but there are more than " + - "one: {}", file, Arrays.deepToString(hlinks)); + LOG.warn("Found file '{}' as hfilelinks in the mob area, but there are more than " + + "one: {}", file, Arrays.deepToString(hlinks)); } HFileLink found = null; for (FileStatus hlink : hlinks) { @@ -335,24 +336,24 @@ public class MobRefReporter extends Configured implements Tool { context.write(OK_HLINK_CLONE, key); } else { LOG.warn("Found file '{}' as ref(s) in the mob area but they do not point to an hfile" - + " that exists.", file); + + " that exists.", file); context.write(DATALOSS_HLINK_DANGLING, encodeRows(context, key, rows)); } } else { LOG.error("Could not find referenced file '{}'. See the docs on this tool.", file); LOG.debug("Note that we don't have the server-side tag from the mob cells that says " - + "what table the reference is originally from. So if the HFileLink in this table " - + "is missing but the referenced file is still in the table from that tag, then " - + "lookups of these impacted rows will work. Do a scan of the reference details " - + "of the cell for the hfile name and then check the entire hbase install if this " - + "table was made from a snapshot of another table. see the ref guide section on " - + "mob for details."); + + "what table the reference is originally from. So if the HFileLink in this table " + + "is missing but the referenced file is still in the table from that tag, then " + + "lookups of these impacted rows will work. Do a scan of the reference details " + + "of the cell for the hfile name and then check the entire hbase install if this " + + "table was made from a snapshot of another table. see the ref guide section on " + + "mob for details."); context.write(DATALOSS_MISSING, encodeRows(context, key, rows)); } } catch (IOException e) { LOG.error( - "Exception while checking mob area of our table for HFileLinks that point to {}", - file, e); + "Exception while checking mob area of our table for HFileLinks that point to {}", file, + e); context.write(DATALOSS_MISSING_IOE, encodeRows(context, key, rows)); } } @@ -363,7 +364,7 @@ public class MobRefReporter extends Configured implements Tool { * of base64 encoded row keys */ private Text encodeRows(Context context, Text key, Iterable rows) - throws IOException { + throws IOException { StringBuilder sb = new StringBuilder(key.toString()); sb.append(seperator); boolean moreThanOne = false; @@ -378,25 +379,27 @@ public class MobRefReporter extends Configured implements Tool { } context.getCounter("PROBLEM", "Problem MOB files").increment(1L); context.getCounter("PROBLEM", "Affected rows").increment(count); - context.getCounter("ROWS WITH PROBLEMS PER FILE", "Number of HFiles with " + - log10GroupedString(count) + "s of affected rows").increment(1L); + context + .getCounter("ROWS WITH PROBLEMS PER FILE", + "Number of HFiles with " + log10GroupedString(count) + "s of affected rows") + .increment(1L); key.set(sb.toString()); return key; } } /** - * Returns the string representation of the given number after grouping it - * into log10 buckets. e.g. 0-9 -> 1, 10-99 -> 10, ..., 100,000-999,999 -> 100,000, etc. + * Returns the string representation of the given number after grouping it into log10 buckets. + * e.g. 0-9 -> 1, 10-99 -> 10, ..., 100,000-999,999 -> 100,000, etc. */ static String log10GroupedString(long number) { - return String.format("%,d", (long)(Math.pow(10d, Math.floor(Math.log10(number))))); + return String.format("%,d", (long) (Math.pow(10d, Math.floor(Math.log10(number))))); } /** * Main method for the tool. - * @return 0 if success, 1 for bad args. 2 if job aborted with an exception, - * 3 if mr job was unsuccessful + * @return 0 if success, 1 for bad args. 2 if job aborted with an exception, 3 if mr job was + * unsuccessful */ public int run(String[] args) throws IOException, InterruptedException { // TODO make family and table optional @@ -417,8 +420,8 @@ public class MobRefReporter extends Configured implements Tool { if (hbaseRootFileStat.length > 0) { String owner = hbaseRootFileStat[0].getOwner(); if (!owner.equals(currentUserName)) { - String errorMsg = "The current user[" + currentUserName - + "] does not have hbase root credentials." + String errorMsg = + "The current user[" + currentUserName + "] does not have hbase root credentials." + " If this job fails due to an inability to read HBase's internal directories, " + "you will need to rerun as a user with sufficient permissions. The HBase superuser " + "is a safe choice."; @@ -426,7 +429,7 @@ public class MobRefReporter extends Configured implements Tool { } } else { LOG.error("The passed configs point to an HBase dir does not exist: {}", - conf.get(HConstants.HBASE_DIR)); + conf.get(HConstants.HBASE_DIR)); throw new IOException("The target HBase does not exist"); } @@ -434,7 +437,7 @@ public class MobRefReporter extends Configured implements Tool { int maxVersions; TableName tn = TableName.valueOf(tableName); try (Connection connection = ConnectionFactory.createConnection(conf); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { TableDescriptor htd = admin.getDescriptor(tn); ColumnFamilyDescriptor hcd = htd.getColumnFamily(Bytes.toBytes(familyName)); if (hcd == null || !hcd.isMobEnabled()) { @@ -444,7 +447,6 @@ public class MobRefReporter extends Configured implements Tool { maxVersions = hcd.getMaxVersions(); } - String id = getClass().getSimpleName() + UUID.randomUUID().toString().replace("-", ""); Job job = null; Scan scan = new Scan(); @@ -461,8 +463,8 @@ public class MobRefReporter extends Configured implements Tool { job = Job.getInstance(conf); job.setJarByClass(getClass()); - TableMapReduceUtil.initTableMapperJob(tn, scan, - MobRefMapper.class, Text.class, ImmutableBytesWritable.class, job); + TableMapReduceUtil.initTableMapperJob(tn, scan, MobRefMapper.class, Text.class, + ImmutableBytesWritable.class, job); job.setReducerClass(MobRefReducer.class); job.setOutputFormatClass(TextOutputFormat.class); @@ -497,7 +499,7 @@ public class MobRefReporter extends Configured implements Tool { private void printUsage() { System.err.println("Usage:\n" + "--------------------------\n" + MobRefReporter.class.getName() - + " output-dir tableName familyName"); + + " output-dir tableName familyName"); System.err.println(" output-dir Where to write output report."); System.err.println(" tableName The table name"); System.err.println(" familyName The column family name"); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index fd09e34fde1..9f2db27466c 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -82,8 +82,8 @@ public class CompactionTool extends Configured implements Tool { private final static String CONF_DELETE_COMPACTED = "hbase.compactiontool.delete"; /** - * Class responsible to execute the Compaction on the specified path. - * The path can be a table, region or family directory. + * Class responsible to execute the Compaction on the specified path. The path can be a table, + * region or family directory. */ private static class CompactionWorker { private final boolean deleteCompacted; @@ -98,20 +98,18 @@ public class CompactionTool extends Configured implements Tool { /** * Execute the compaction on the specified path. - * - * @param path Directory path on which to run compaction. + * @param path Directory path on which to run compaction. * @param compactOnce Execute just a single step of compaction. - * @param major Request major compaction. + * @param major Request major compaction. */ public void compact(final Path path, final boolean compactOnce, final boolean major) - throws IOException { + throws IOException { if (isFamilyDir(fs, path)) { Path regionDir = path.getParent(); Path tableDir = regionDir.getParent(); TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); - compactStoreFiles(tableDir, htd, hri, - path.getName(), compactOnce, major); + compactStoreFiles(tableDir, htd, hri, path.getName(), compactOnce, major); } else if (isRegionDir(fs, path)) { Path tableDir = path.getParent(); TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); @@ -125,60 +123,57 @@ public class CompactionTool extends Configured implements Tool { } private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major) - throws IOException { + throws IOException { TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); - for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) { + for (Path regionDir : FSUtils.getRegionDirs(fs, tableDir)) { compactRegion(tableDir, htd, regionDir, compactOnce, major); } } - private void compactRegion(final Path tableDir, final TableDescriptor htd, - final Path regionDir, final boolean compactOnce, final boolean major) - throws IOException { + private void compactRegion(final Path tableDir, final TableDescriptor htd, final Path regionDir, + final boolean compactOnce, final boolean major) throws IOException { RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); - for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) { + for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) { compactStoreFiles(tableDir, htd, hri, familyDir.getName(), compactOnce, major); } } /** - * Execute the actual compaction job. - * If the compact once flag is not specified, execute the compaction until - * no more compactions are needed. Uses the Configuration settings provided. + * Execute the actual compaction job. If the compact once flag is not specified, execute the + * compaction until no more compactions are needed. Uses the Configuration settings provided. */ private void compactStoreFiles(final Path tableDir, final TableDescriptor htd, - final RegionInfo hri, final String familyName, final boolean compactOnce, - final boolean major) throws IOException { + final RegionInfo hri, final String familyName, final boolean compactOnce, final boolean major) + throws IOException { HStore store = getStore(conf, fs, tableDir, htd, hri, familyName); - LOG.info("Compact table=" + htd.getTableName() + - " region=" + hri.getRegionNameAsString() + - " family=" + familyName); + LOG.info("Compact table=" + htd.getTableName() + " region=" + hri.getRegionNameAsString() + + " family=" + familyName); if (major) { store.triggerMajorCompaction(); } do { Optional compaction = - store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null); + store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null); if (!compaction.isPresent()) { break; } List storeFiles = - store.compact(compaction.get(), NoLimitThroughputController.INSTANCE, null); + store.compact(compaction.get(), NoLimitThroughputController.INSTANCE, null); if (storeFiles != null && !storeFiles.isEmpty()) { if (deleteCompacted) { - for (HStoreFile storeFile: storeFiles) { + for (HStoreFile storeFile : storeFiles) { fs.delete(storeFile.getPath(), false); } } } } while (store.needsCompaction() && !compactOnce); - //We need to close the store properly, to make sure it will archive compacted files + // We need to close the store properly, to make sure it will archive compacted files store.close(); } private static HStore getStore(final Configuration conf, final FileSystem fs, - final Path tableDir, final TableDescriptor htd, final RegionInfo hri, - final String familyName) throws IOException { + final Path tableDir, final TableDescriptor htd, final RegionInfo hri, final String familyName) + throws IOException { HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, hri); HRegion region = new HRegion(regionFs, null, conf, htd, null); return new HStore(region, htd.getColumnFamily(Bytes.toBytes(familyName)), conf, false); @@ -199,7 +194,7 @@ public class CompactionTool extends Configured implements Tool { } private static class CompactionMapper - extends Mapper { + extends Mapper { private CompactionWorker compactor = null; private boolean compactOnce = false; private boolean major = false; @@ -220,7 +215,7 @@ public class CompactionTool extends Configured implements Tool { @Override public void map(LongWritable key, Text value, Context context) - throws InterruptedException, IOException { + throws InterruptedException, IOException { Path path = new Path(value.toString()); this.compactor.compact(path, compactOnce, major); } @@ -236,8 +231,8 @@ public class CompactionTool extends Configured implements Tool { } /** - * Returns a split for each store files directory using the block location - * of each file as locality reference. + * Returns a split for each store files directory using the block location of each file as + * locality reference. */ @Override public List getSplits(JobContext job) throws IOException { @@ -245,7 +240,7 @@ public class CompactionTool extends Configured implements Tool { List files = listStatus(job); Text key = new Text(); - for (FileStatus file: files) { + for (FileStatus file : files) { Path path = file.getPath(); FileSystem fs = path.getFileSystem(job.getConfiguration()); LineReader reader = new LineReader(fs.open(path)); @@ -269,14 +264,14 @@ public class CompactionTool extends Configured implements Tool { * return the top hosts of the store files, used by the Split */ private static String[] getStoreDirHosts(final FileSystem fs, final Path path) - throws IOException { + throws IOException { FileStatus[] files = CommonFSUtils.listStatus(fs, path); if (files == null) { return new String[] {}; } HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); - for (FileStatus hfileStatus: files) { + for (FileStatus hfileStatus : files) { HDFSBlocksDistribution storeFileBlocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen()); hdfsBlocksDistribution.add(storeFileBlocksDistribution); @@ -287,22 +282,21 @@ public class CompactionTool extends Configured implements Tool { } /** - * Create the input file for the given directories to compact. - * The file is a TextFile with each line corrisponding to a - * store files directory to compact. + * Create the input file for the given directories to compact. The file is a TextFile with each + * line corrisponding to a store files directory to compact. */ public static List createInputFile(final FileSystem fs, final FileSystem stagingFs, - final Path path, final Set toCompactDirs) throws IOException { + final Path path, final Set toCompactDirs) throws IOException { // Extract the list of store dirs List storeDirs = new LinkedList<>(); - for (Path compactDir: toCompactDirs) { + for (Path compactDir : toCompactDirs) { if (isFamilyDir(fs, compactDir)) { storeDirs.add(compactDir); } else if (isRegionDir(fs, compactDir)) { storeDirs.addAll(FSUtils.getFamilyDirs(fs, compactDir)); } else if (isTableDir(fs, compactDir)) { // Lookup regions - for (Path regionDir: FSUtils.getRegionDirs(fs, compactDir)) { + for (Path regionDir : FSUtils.getRegionDirs(fs, compactDir)) { storeDirs.addAll(FSUtils.getFamilyDirs(fs, regionDir)); } } else { @@ -316,7 +310,7 @@ public class CompactionTool extends Configured implements Tool { LOG.info("Create input file=" + path + " with " + storeDirs.size() + " dirs to compact."); try { final byte[] newLine = Bytes.toBytes("\n"); - for (Path storeDir: storeDirs) { + for (Path storeDir : storeDirs) { stream.write(Bytes.toBytes(storeDir.toString())); stream.write(newLine); } @@ -331,7 +325,7 @@ public class CompactionTool extends Configured implements Tool { * Execute compaction, using a Map-Reduce job. */ private int doMapReduce(final FileSystem fs, final Set toCompactDirs, - final boolean compactOnce, final boolean major) throws Exception { + final boolean compactOnce, final boolean major) throws Exception { Configuration conf = getConf(); conf.setBoolean(CONF_COMPACT_ONCE, compactOnce); conf.setBoolean(CONF_COMPACT_MAJOR, major); @@ -352,16 +346,16 @@ public class CompactionTool extends Configured implements Tool { FileSystem stagingFs = stagingDir.getFileSystem(conf); try { // Create input file with the store dirs - Path inputPath = new Path(stagingDir, "compact-"+ EnvironmentEdgeManager.currentTime()); - List storeDirs = CompactionInputFormat.createInputFile(fs, stagingFs, - inputPath, toCompactDirs); + Path inputPath = new Path(stagingDir, "compact-" + EnvironmentEdgeManager.currentTime()); + List storeDirs = + CompactionInputFormat.createInputFile(fs, stagingFs, inputPath, toCompactDirs); CompactionInputFormat.addInputPath(job, inputPath); // Initialize credential for secure cluster TableMapReduceUtil.initCredentials(job); // Despite the method name this will get delegation token for the filesystem - TokenCache.obtainTokensForNamenodes(job.getCredentials(), - storeDirs.toArray(new Path[0]), conf); + TokenCache.obtainTokensForNamenodes(job.getCredentials(), storeDirs.toArray(new Path[0]), + conf); // Start the MR Job and wait return job.waitForCompletion(true) ? 0 : 1; @@ -374,9 +368,9 @@ public class CompactionTool extends Configured implements Tool { * Execute compaction, from this client, one path at the time. */ private int doClient(final FileSystem fs, final Set toCompactDirs, - final boolean compactOnce, final boolean major) throws IOException { + final boolean compactOnce, final boolean major) throws IOException { CompactionWorker worker = new CompactionWorker(fs, getConf()); - for (Path path: toCompactDirs) { + for (Path path : toCompactDirs) { worker.compact(path, compactOnce, major); } return 0; @@ -449,16 +443,17 @@ public class CompactionTool extends Configured implements Tool { System.err.println(); System.err.println("Note: -D properties will be applied to the conf used. "); System.err.println("For example: "); - System.err.println(" To stop delete of compacted file, pass -D"+CONF_DELETE_COMPACTED+"=false"); + System.err + .println(" To stop delete of compacted file, pass -D" + CONF_DELETE_COMPACTED + "=false"); System.err.println(); System.err.println("Examples:"); System.err.println(" To compact the full 'TestTable' using MapReduce:"); - System.err.println(" $ hbase " + this.getClass().getName() + - " -mapred hdfs://hbase/data/default/TestTable"); + System.err.println( + " $ hbase " + this.getClass().getName() + " -mapred hdfs://hbase/data/default/TestTable"); System.err.println(); System.err.println(" To compact column family 'x' of the table 'TestTable' region 'abc':"); - System.err.println(" $ hbase " + this.getClass().getName() + - " hdfs://hbase/data/default/TestTable/abc/x"); + System.err.println( + " $ hbase " + this.getClass().getName() + " hdfs://hbase/data/default/TestTable/abc/x"); } public static void main(String[] args) throws Exception { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index f586cb215f8..d37202b0a50 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.BufferedInputStream; @@ -85,11 +84,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.Snapshot import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; /** - * Export the specified snapshot to a given FileSystem. - * - * The .snapshot/name folder is copied to the destination cluster - * and then all the hfiles/wals are copied using a Map-Reduce Job in the .archive/ location. - * When everything is done, the second cluster can restore the snapshot. + * Export the specified snapshot to a given FileSystem. The .snapshot/name folder is copied to the + * destination cluster and then all the hfiles/wals are copied using a Map-Reduce Job in the + * .archive/ location. When everything is done, the second cluster can restore the snapshot. */ @InterfaceAudience.Public public class ExportSnapshot extends AbstractHBaseTool implements Tool { @@ -117,9 +114,9 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { private static final String CONF_MR_JOB_NAME = "mapreduce.job.name"; protected static final String CONF_SKIP_TMP = "snapshot.export.skip.tmp"; private static final String CONF_COPY_MANIFEST_THREADS = - "snapshot.export.copy.references.threads"; + "snapshot.export.copy.references.threads"; private static final int DEFAULT_COPY_MANIFEST_THREADS = - Runtime.getRuntime().availableProcessors(); + Runtime.getRuntime().availableProcessors(); static class Testing { static final String CONF_TEST_FAILURE = "test.snapshot.export.failure"; @@ -131,40 +128,45 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { // Command line options and defaults. static final class Options { static final Option SNAPSHOT = new Option(null, "snapshot", true, "Snapshot to restore."); - static final Option TARGET_NAME = new Option(null, "target", true, - "Target name for the snapshot."); - static final Option COPY_TO = new Option(null, "copy-to", true, "Remote " - + "destination hdfs://"); - static final Option COPY_FROM = new Option(null, "copy-from", true, - "Input folder hdfs:// (default hbase.rootdir)"); + static final Option TARGET_NAME = + new Option(null, "target", true, "Target name for the snapshot."); + static final Option COPY_TO = + new Option(null, "copy-to", true, "Remote " + "destination hdfs://"); + static final Option COPY_FROM = + new Option(null, "copy-from", true, "Input folder hdfs:// (default hbase.rootdir)"); static final Option NO_CHECKSUM_VERIFY = new Option(null, "no-checksum-verify", false, - "Do not verify checksum, use name+length only."); + "Do not verify checksum, use name+length only."); static final Option NO_TARGET_VERIFY = new Option(null, "no-target-verify", false, - "Do not verify the integrity of the exported snapshot."); - static final Option NO_SOURCE_VERIFY = new Option(null, "no-source-verify", false, - "Do not verify the source of the snapshot."); - static final Option OVERWRITE = new Option(null, "overwrite", false, - "Rewrite the snapshot manifest if already exists."); - static final Option CHUSER = new Option(null, "chuser", true, - "Change the owner of the files to the specified one."); - static final Option CHGROUP = new Option(null, "chgroup", true, - "Change the group of the files to the specified one."); - static final Option CHMOD = new Option(null, "chmod", true, - "Change the permission of the files to the specified one."); + "Do not verify the integrity of the exported snapshot."); + static final Option NO_SOURCE_VERIFY = + new Option(null, "no-source-verify", false, "Do not verify the source of the snapshot."); + static final Option OVERWRITE = + new Option(null, "overwrite", false, "Rewrite the snapshot manifest if already exists."); + static final Option CHUSER = + new Option(null, "chuser", true, "Change the owner of the files to the specified one."); + static final Option CHGROUP = + new Option(null, "chgroup", true, "Change the group of the files to the specified one."); + static final Option CHMOD = + new Option(null, "chmod", true, "Change the permission of the files to the specified one."); static final Option MAPPERS = new Option(null, "mappers", true, - "Number of mappers to use during the copy (mapreduce.job.maps)."); - static final Option BANDWIDTH = new Option(null, "bandwidth", true, - "Limit bandwidth to this value in MB/second."); + "Number of mappers to use during the copy (mapreduce.job.maps)."); + static final Option BANDWIDTH = + new Option(null, "bandwidth", true, "Limit bandwidth to this value in MB/second."); } // Export Map-Reduce Counters, to keep track of the progress public enum Counter { - MISSING_FILES, FILES_COPIED, FILES_SKIPPED, COPY_FAILED, - BYTES_EXPECTED, BYTES_SKIPPED, BYTES_COPIED + MISSING_FILES, + FILES_COPIED, + FILES_SKIPPED, + COPY_FAILED, + BYTES_EXPECTED, + BYTES_SKIPPED, + BYTES_COPIED } - private static class ExportMapper extends Mapper { + private static class ExportMapper + extends Mapper { private static final Logger LOG = LoggerFactory.getLogger(ExportMapper.class); final static int REPORT_SIZE = 1 * 1024 * 1024; final static int BUFFER_SIZE = 64 * 1024; @@ -196,7 +198,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { filesGroup = conf.get(CONF_FILES_GROUP); filesUser = conf.get(CONF_FILES_USER); - filesMode = (short)conf.getInt(CONF_FILES_MODE, 0); + filesMode = (short) conf.getInt(CONF_FILES_MODE, 0); outputRoot = new Path(conf.get(CONF_OUTPUT_ROOT)); inputRoot = new Path(conf.get(CONF_INPUT_ROOT)); @@ -214,7 +216,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true); outputFs = FileSystem.get(outputRoot.toUri(), destConf); } catch (IOException e) { - throw new IOException("Could not get the output FileSystem with root="+ outputRoot, e); + throw new IOException("Could not get the output FileSystem with root=" + outputRoot, e); } // Use the default block size of the outputFs if bigger @@ -241,7 +243,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { @Override public void map(BytesWritable key, NullWritable value, Context context) - throws InterruptedException, IOException { + throws InterruptedException, IOException { SnapshotFileInfo inputInfo = SnapshotFileInfo.parseFrom(key.copyBytes()); Path outputPath = getOutputPath(inputInfo); @@ -257,11 +259,11 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { case HFILE: Path inputPath = new Path(inputInfo.getHfile()); String family = inputPath.getParent().getName(); - TableName table =HFileLink.getReferencedTableName(inputPath.getName()); + TableName table = HFileLink.getReferencedTableName(inputPath.getName()); String region = HFileLink.getReferencedRegionName(inputPath.getName()); String hfile = HFileLink.getReferencedHFileName(inputPath.getName()); path = new Path(CommonFSUtils.getTableDir(new Path("./"), table), - new Path(region, new Path(family, hfile))); + new Path(region, new Path(family, hfile))); break; case WAL: LOG.warn("snapshot does not keeps WALs: " + inputInfo); @@ -274,22 +276,22 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { @SuppressWarnings("checkstyle:linelength") /** - * Used by TestExportSnapshot to test for retries when failures happen. - * Failure is injected in {@link #copyFile(Mapper.Context, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo, Path)}. + * Used by TestExportSnapshot to test for retries when failures happen. Failure is injected in + * {@link #copyFile(Mapper.Context, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo, Path)}. */ private void injectTestFailure(final Context context, final SnapshotFileInfo inputInfo) - throws IOException { + throws IOException { if (!context.getConfiguration().getBoolean(Testing.CONF_TEST_FAILURE, false)) return; if (testing.injectedFailureCount >= testing.failuresCountToInject) return; testing.injectedFailureCount++; context.getCounter(Counter.COPY_FAILED).increment(1); LOG.debug("Injecting failure. Count: " + testing.injectedFailureCount); throw new IOException(String.format("TEST FAILURE (%d of max %d): Unable to copy input=%s", - testing.injectedFailureCount, testing.failuresCountToInject, inputInfo)); + testing.injectedFailureCount, testing.failuresCountToInject, inputInfo)); } private void copyFile(final Context context, final SnapshotFileInfo inputInfo, - final Path outputPath) throws IOException { + final Path outputPath) throws IOException { // Get the file information FileStatus inputStat = getSourceFileStatus(context, inputInfo); @@ -357,10 +359,8 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { /** * Try to Preserve the files attribute selected by the user copying them from the source file * This is only required when you are exporting as a different user than "hbase" or on a system - * that doesn't have the "hbase" user. - * - * This is not considered a blocking failure since the user can force a chmod with the user - * that knows is available on the system. + * that doesn't have the "hbase" user. This is not considered a blocking failure since the user + * can force a chmod with the user that knows is available on the system. */ private boolean preserveAttributes(final Path path, final FileStatus refStat) { FileStatus stat; @@ -378,7 +378,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { outputFs.setPermission(path, refStat.getPermission()); } } catch (IOException e) { - LOG.warn("Unable to set the permission for file="+ stat.getPath() +": "+ e.getMessage()); + LOG.warn("Unable to set the permission for file=" + stat.getPath() + ": " + e.getMessage()); return false; } @@ -391,9 +391,10 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { outputFs.setOwner(path, user, group); } } catch (IOException e) { - LOG.warn("Unable to set the owner/group for file="+ stat.getPath() +": "+ e.getMessage()); - LOG.warn("The user/group may not exist on the destination cluster: user=" + - user + " group=" + group); + LOG.warn( + "Unable to set the owner/group for file=" + stat.getPath() + ": " + e.getMessage()); + LOG.warn("The user/group may not exist on the destination cluster: user=" + user + + " group=" + group); return false; } } @@ -405,13 +406,11 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { return str != null && str.length() > 0; } - private void copyData(final Context context, - final Path inputPath, final InputStream in, - final Path outputPath, final FSDataOutputStream out, - final long inputFileSize) - throws IOException { - final String statusMessage = "copied %s/" + StringUtils.humanReadableInt(inputFileSize) + - " (%.1f%%)"; + private void copyData(final Context context, final Path inputPath, final InputStream in, + final Path outputPath, final FSDataOutputStream out, final long inputFileSize) + throws IOException { + final String statusMessage = + "copied %s/" + StringUtils.humanReadableInt(inputFileSize) + " (%.1f%%)"; try { byte[] buffer = new byte[bufferSize]; @@ -427,33 +426,33 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { if (reportBytes >= REPORT_SIZE) { context.getCounter(Counter.BYTES_COPIED).increment(reportBytes); - context.setStatus(String.format(statusMessage, - StringUtils.humanReadableInt(totalBytesWritten), - (totalBytesWritten/(float)inputFileSize) * 100.0f) + - " from " + inputPath + " to " + outputPath); + context.setStatus( + String.format(statusMessage, StringUtils.humanReadableInt(totalBytesWritten), + (totalBytesWritten / (float) inputFileSize) * 100.0f) + " from " + inputPath + + " to " + outputPath); reportBytes = 0; } } long etime = EnvironmentEdgeManager.currentTime(); context.getCounter(Counter.BYTES_COPIED).increment(reportBytes); - context.setStatus(String.format(statusMessage, - StringUtils.humanReadableInt(totalBytesWritten), - (totalBytesWritten/(float)inputFileSize) * 100.0f) + - " from " + inputPath + " to " + outputPath); + context + .setStatus(String.format(statusMessage, StringUtils.humanReadableInt(totalBytesWritten), + (totalBytesWritten / (float) inputFileSize) * 100.0f) + " from " + inputPath + " to " + + outputPath); // Verify that the written size match if (totalBytesWritten != inputFileSize) { - String msg = "number of bytes copied not matching copied=" + totalBytesWritten + - " expected=" + inputFileSize + " for file=" + inputPath; + String msg = "number of bytes copied not matching copied=" + totalBytesWritten + + " expected=" + inputFileSize + " for file=" + inputPath; throw new IOException(msg); } LOG.info("copy completed for input=" + inputPath + " output=" + outputPath); - LOG.info("size=" + totalBytesWritten + - " (" + StringUtils.humanReadableInt(totalBytesWritten) + ")" + - " time=" + StringUtils.formatTimeDiff(etime, stime) + - String.format(" %.3fM/sec", (totalBytesWritten / ((etime - stime)/1000.0))/1048576.0)); + LOG + .info("size=" + totalBytesWritten + " (" + StringUtils.humanReadableInt(totalBytesWritten) + + ")" + " time=" + StringUtils.formatTimeDiff(etime, stime) + String + .format(" %.3fM/sec", (totalBytesWritten / ((etime - stime) / 1000.0)) / 1048576.0)); context.getCounter(Counter.FILES_COPIED).increment(1); } catch (IOException e) { LOG.error("Error copying " + inputPath + " to " + outputPath, e); @@ -463,12 +462,11 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { } /** - * Try to open the "source" file. - * Throws an IOException if the communication with the inputFs fail or - * if the file is not found. + * Try to open the "source" file. Throws an IOException if the communication with the inputFs + * fail or if the file is not found. */ private FSDataInputStream openSourceFile(Context context, final SnapshotFileInfo fileInfo) - throws IOException { + throws IOException { try { Configuration conf = context.getConfiguration(); FileLink link = null; @@ -494,7 +492,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { } private FileStatus getSourceFileStatus(Context context, final SnapshotFileInfo fileInfo) - throws IOException { + throws IOException { try { Configuration conf = context.getConfiguration(); FileLink link = null; @@ -520,12 +518,12 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { } } - private FileLink getFileLink(Path path, Configuration conf) throws IOException{ + private FileLink getFileLink(Path path, Configuration conf) throws IOException { String regionName = HFileLink.getReferencedRegionName(path.getName()); TableName tableName = HFileLink.getReferencedTableName(path.getName()); - if(MobUtils.getMobRegionInfo(tableName).getEncodedName().equals(regionName)) { + if (MobUtils.getMobRegionInfo(tableName).getEncodedName().equals(regionName)) { return HFileLink.buildFromHFileLinkPattern(MobUtils.getQualifiedMobRootDir(conf), - HFileArchiveUtil.getArchivePath(conf), path); + HFileArchiveUtil.getArchivePath(conf), path); } return HFileLink.buildFromHFileLinkPattern(inputRoot, inputArchive, path); } @@ -540,8 +538,8 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { } /** - * Check if the two files are equal by looking at the file length, - * and at the checksum (if user has specified the verifyChecksum flag). + * Check if the two files are equal by looking at the file length, and at the checksum (if user + * has specified the verifyChecksum flag). */ private boolean sameFile(final FileStatus inputStat, final FileStatus outputStat) { // Not matching length @@ -562,7 +560,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { } // ========================================================================== - // Input Format + // Input Format // ========================================================================== /** @@ -570,7 +568,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { * @return list of files referenced by the snapshot (pair of path and size) */ private static List> getSnapshotFiles(final Configuration conf, - final FileSystem fs, final Path snapshotDir) throws IOException { + final FileSystem fs, final Path snapshotDir) throws IOException { SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); final List> files = new ArrayList<>(); @@ -582,7 +580,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { new SnapshotReferenceUtil.SnapshotVisitor() { @Override public void storeFile(final RegionInfo regionInfo, final String family, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { // for storeFile.hasReference() case, copied as part of the manifest if (!storeFile.hasReference()) { String region = regionInfo.getEncodedName(); @@ -590,9 +588,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { Path path = HFileLink.createPath(table, region, family, hfile); SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder() - .setType(SnapshotFileInfo.Type.HFILE) - .setHfile(path.toString()) - .build(); + .setType(SnapshotFileInfo.Type.HFILE).setHfile(path.toString()).build(); long size; if (storeFile.hasFileSize()) { @@ -603,7 +599,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { files.add(new Pair<>(fileInfo, size)); } } - }); + }); return files; } @@ -612,12 +608,11 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { * Given a list of file paths and sizes, create around ngroups in as balanced a way as possible. * The groups created will have similar amounts of bytes. *

      - * The algorithm used is pretty straightforward; the file list is sorted by size, - * and then each group fetch the bigger file available, iterating through groups - * alternating the direction. + * The algorithm used is pretty straightforward; the file list is sorted by size, and then each + * group fetch the bigger file available, iterating through groups alternating the direction. */ - static List>> getBalancedSplits( - final List> files, final int ngroups) { + static List>> + getBalancedSplits(final List> files, final int ngroups) { // Sort files by size, from small to big Collections.sort(files, new Comparator>() { public int compare(Pair a, Pair b) { @@ -673,8 +668,8 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { private static class ExportSnapshotInputFormat extends InputFormat { @Override public RecordReader createRecordReader(InputSplit split, - TaskAttemptContext tac) throws IOException, InterruptedException { - return new ExportSnapshotRecordReader(((ExportSnapshotInputSplit)split).getSplitKeys()); + TaskAttemptContext tac) throws IOException, InterruptedException { + return new ExportSnapshotRecordReader(((ExportSnapshotInputSplit) split).getSplitKeys()); } @Override @@ -694,7 +689,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { List>> groups = getBalancedSplits(snapshotFiles, mappers); List splits = new ArrayList(groups.size()); - for (List> files: groups) { + for (List> files : groups) { splits.add(new ExportSnapshotInputSplit(files)); } return splits; @@ -710,9 +705,9 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { public ExportSnapshotInputSplit(final List> snapshotFiles) { this.files = new ArrayList(snapshotFiles.size()); - for (Pair fileInfo: snapshotFiles) { - this.files.add(new Pair<>( - new BytesWritable(fileInfo.getFirst().toByteArray()), fileInfo.getSecond())); + for (Pair fileInfo : snapshotFiles) { + this.files.add( + new Pair<>(new BytesWritable(fileInfo.getFirst().toByteArray()), fileInfo.getSecond())); this.length += fileInfo.getSecond(); } } @@ -748,7 +743,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { @Override public void write(DataOutput out) throws IOException { out.writeInt(files.size()); - for (final Pair fileInfo: files) { + for (final Pair fileInfo : files) { fileInfo.getFirst().write(out); out.writeLong(fileInfo.getSecond()); } @@ -756,7 +751,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { } private static class ExportSnapshotRecordReader - extends RecordReader { + extends RecordReader { private final List> files; private long totalSize = 0; private long procSize = 0; @@ -764,48 +759,55 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { ExportSnapshotRecordReader(final List> files) { this.files = files; - for (Pair fileInfo: files) { + for (Pair fileInfo : files) { totalSize += fileInfo.getSecond(); } } @Override - public void close() { } + public void close() { + } @Override - public BytesWritable getCurrentKey() { return files.get(index).getFirst(); } + public BytesWritable getCurrentKey() { + return files.get(index).getFirst(); + } @Override - public NullWritable getCurrentValue() { return NullWritable.get(); } + public NullWritable getCurrentValue() { + return NullWritable.get(); + } @Override - public float getProgress() { return (float)procSize / totalSize; } + public float getProgress() { + return (float) procSize / totalSize; + } @Override - public void initialize(InputSplit split, TaskAttemptContext tac) { } + public void initialize(InputSplit split, TaskAttemptContext tac) { + } @Override public boolean nextKeyValue() { if (index >= 0) { procSize += files.get(index).getSecond(); } - return(++index < files.size()); + return (++index < files.size()); } } } // ========================================================================== - // Tool + // Tool // ========================================================================== /** * Run Map-Reduce Job to perform the files copy. */ - private void runCopyJob(final Path inputRoot, final Path outputRoot, - final String snapshotName, final Path snapshotDir, final boolean verifyChecksum, - final String filesUser, final String filesGroup, final int filesMode, - final int mappers, final int bandwidthMB) - throws IOException, InterruptedException, ClassNotFoundException { + private void runCopyJob(final Path inputRoot, final Path outputRoot, final String snapshotName, + final Path snapshotDir, final boolean verifyChecksum, final String filesUser, + final String filesGroup, final int filesMode, final int mappers, final int bandwidthMB) + throws IOException, InterruptedException, ClassNotFoundException { Configuration conf = getConf(); if (filesGroup != null) conf.set(CONF_FILES_GROUP, filesGroup); if (filesUser != null) conf.set(CONF_FILES_USER, filesUser); @@ -834,11 +836,9 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { // Acquire the delegation Tokens Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); - TokenCache.obtainTokensForNamenodes(job.getCredentials(), - new Path[] { inputRoot }, srcConf); + TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { inputRoot }, srcConf); Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); - TokenCache.obtainTokensForNamenodes(job.getCredentials(), - new Path[] { outputRoot }, destConf); + TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { outputRoot }, destConf); // Run the MR Job if (!job.waitForCompletion(true)) { @@ -846,8 +846,8 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { } } - private void verifySnapshot(final Configuration baseConf, - final FileSystem fs, final Path rootDir, final Path snapshotDir) throws IOException { + private void verifySnapshot(final Configuration baseConf, final FileSystem fs, final Path rootDir, + final Path snapshotDir) throws IOException { // Update the conf with the current root dir, since may be a different cluster Configuration conf = new Configuration(baseConf); CommonFSUtils.setRootDir(conf, rootDir); @@ -857,9 +857,9 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { } private void setConfigParallel(FileSystem outputFs, List traversedPath, - BiConsumer task, Configuration conf) throws IOException { + BiConsumer task, Configuration conf) throws IOException { ExecutorService pool = Executors - .newFixedThreadPool(conf.getInt(CONF_COPY_MANIFEST_THREADS, DEFAULT_COPY_MANIFEST_THREADS)); + .newFixedThreadPool(conf.getInt(CONF_COPY_MANIFEST_THREADS, DEFAULT_COPY_MANIFEST_THREADS)); List> futures = new ArrayList<>(); for (Path dstPath : traversedPath) { Future future = (Future) pool.submit(() -> task.accept(outputFs, dstPath)); @@ -877,19 +877,19 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { } private void setOwnerParallel(FileSystem outputFs, String filesUser, String filesGroup, - Configuration conf, List traversedPath) throws IOException { + Configuration conf, List traversedPath) throws IOException { setConfigParallel(outputFs, traversedPath, (fs, path) -> { try { fs.setOwner(path, filesUser, filesGroup); } catch (IOException e) { throw new RuntimeException( - "set owner for file " + path + " to " + filesUser + ":" + filesGroup + " failed", e); + "set owner for file " + path + " to " + filesUser + ":" + filesGroup + " failed", e); } }, conf); } private void setPermissionParallel(final FileSystem outputFs, final short filesMode, - final List traversedPath, final Configuration conf) throws IOException { + final List traversedPath, final Configuration conf) throws IOException { if (filesMode <= 0) { return; } @@ -899,7 +899,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { fs.setPermission(path, perm); } catch (IOException e) { throw new RuntimeException( - "set permission for file " + path + " to " + filesMode + " failed", e); + "set permission for file " + path + " to " + filesMode + " failed", e); } }, conf); } @@ -956,8 +956,8 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { } if (outputRoot == null) { - System.err.println("Destination file-system (--" + Options.COPY_TO.getLongOpt() - + ") not provided."); + System.err + .println("Destination file-system (--" + Options.COPY_TO.getLongOpt() + ") not provided."); LOG.error("Use -h or --help for usage instructions."); return 0; } @@ -977,16 +977,17 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true); FileSystem outputFs = FileSystem.get(outputRoot.toUri(), destConf); - boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false) || - conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null; + boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false) + || conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null; Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, inputRoot); - Path snapshotTmpDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot, - destConf); - Path outputSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(targetName, outputRoot); + Path snapshotTmpDir = + SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot, destConf); + Path outputSnapshotDir = + SnapshotDescriptionUtils.getCompletedSnapshotDir(targetName, outputRoot); Path initialOutputSnapshotDir = skipTmp ? outputSnapshotDir : snapshotTmpDir; LOG.debug("inputFs={}, inputRoot={}", inputFs.getUri().toString(), inputRoot); - LOG.debug("outputFs={}, outputRoot={}, skipTmp={}, initialOutputSnapshotDir={}", - outputFs, outputRoot.toString(), skipTmp, initialOutputSnapshotDir); + LOG.debug("outputFs={}, outputRoot={}, skipTmp={}, initialOutputSnapshotDir={}", outputFs, + outputRoot.toString(), skipTmp, initialOutputSnapshotDir); // Verify snapshot source before copying files if (verifySource) { @@ -1016,8 +1017,8 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { return 1; } } else { - System.err.println("The snapshot '" + targetName + - "' already exists in the destination: " + outputSnapshotDir); + System.err.println("The snapshot '" + targetName + "' already exists in the destination: " + + outputSnapshotDir); return 1; } } @@ -1027,19 +1028,23 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { if (outputFs.exists(snapshotTmpDir)) { if (overwrite) { if (!outputFs.delete(snapshotTmpDir, true)) { - System.err.println("Unable to remove existing snapshot tmp directory: "+snapshotTmpDir); + System.err + .println("Unable to remove existing snapshot tmp directory: " + snapshotTmpDir); return 1; } } else { - System.err.println("A snapshot with the same name '"+ targetName +"' may be in-progress"); - System.err.println("Please check "+snapshotTmpDir+". If the snapshot has completed, "); - System.err.println("consider removing "+snapshotTmpDir+" by using the -overwrite option"); + System.err + .println("A snapshot with the same name '" + targetName + "' may be in-progress"); + System.err + .println("Please check " + snapshotTmpDir + ". If the snapshot has completed, "); + System.err + .println("consider removing " + snapshotTmpDir + " by using the -overwrite option"); return 1; } } } - // Step 1 - Copy fs1:/.snapshot/ to fs2:/.snapshot/.tmp/ + // Step 1 - Copy fs1:/.snapshot/ to fs2:/.snapshot/.tmp/ // The snapshot references must be copied before the hfiles otherwise the cleaner // will remove them because they are unreferenced. List travesedPaths = new ArrayList<>(); @@ -1047,43 +1052,43 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { try { LOG.info("Copy Snapshot Manifest from " + snapshotDir + " to " + initialOutputSnapshotDir); travesedPaths = - FSUtils.copyFilesParallel(inputFs, snapshotDir, outputFs, initialOutputSnapshotDir, conf, - conf.getInt(CONF_COPY_MANIFEST_THREADS, DEFAULT_COPY_MANIFEST_THREADS)); + FSUtils.copyFilesParallel(inputFs, snapshotDir, outputFs, initialOutputSnapshotDir, conf, + conf.getInt(CONF_COPY_MANIFEST_THREADS, DEFAULT_COPY_MANIFEST_THREADS)); copySucceeded = true; } catch (IOException e) { - throw new ExportSnapshotException("Failed to copy the snapshot directory: from=" + - snapshotDir + " to=" + initialOutputSnapshotDir, e); + throw new ExportSnapshotException("Failed to copy the snapshot directory: from=" + snapshotDir + + " to=" + initialOutputSnapshotDir, e); } finally { if (copySucceeded) { if (filesUser != null || filesGroup != null) { - LOG.warn((filesUser == null ? "" : "Change the owner of " + needSetOwnerDir + " to " - + filesUser) - + (filesGroup == null ? "" : ", Change the group of " + needSetOwnerDir + " to " - + filesGroup)); + LOG.warn( + (filesUser == null ? "" : "Change the owner of " + needSetOwnerDir + " to " + filesUser) + + (filesGroup == null + ? "" + : ", Change the group of " + needSetOwnerDir + " to " + filesGroup)); setOwnerParallel(outputFs, filesUser, filesGroup, conf, travesedPaths); } if (filesMode > 0) { LOG.warn("Change the permission of " + needSetOwnerDir + " to " + filesMode); - setPermissionParallel(outputFs, (short)filesMode, travesedPaths, conf); + setPermissionParallel(outputFs, (short) filesMode, travesedPaths, conf); } } } // Write a new .snapshotinfo if the target name is different from the source name if (!targetName.equals(snapshotName)) { - SnapshotDescription snapshotDesc = - SnapshotDescriptionUtils.readSnapshotInfo(inputFs, snapshotDir) - .toBuilder() - .setName(targetName) - .build(); + SnapshotDescription snapshotDesc = SnapshotDescriptionUtils + .readSnapshotInfo(inputFs, snapshotDir).toBuilder().setName(targetName).build(); SnapshotDescriptionUtils.writeSnapshotInfo(snapshotDesc, initialOutputSnapshotDir, outputFs); if (filesUser != null || filesGroup != null) { - outputFs.setOwner(new Path(initialOutputSnapshotDir, - SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), filesUser, filesGroup); + outputFs.setOwner( + new Path(initialOutputSnapshotDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), filesUser, + filesGroup); } if (filesMode > 0) { - outputFs.setPermission(new Path(initialOutputSnapshotDir, - SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), new FsPermission((short)filesMode)); + outputFs.setPermission( + new Path(initialOutputSnapshotDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), + new FsPermission((short) filesMode)); } } @@ -1091,15 +1096,15 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { // The snapshot references must be copied before the files otherwise the files gets removed // by the HFileArchiver, since they have no references. try { - runCopyJob(inputRoot, outputRoot, snapshotName, snapshotDir, verifyChecksum, - filesUser, filesGroup, filesMode, mappers, bandwidthMB); + runCopyJob(inputRoot, outputRoot, snapshotName, snapshotDir, verifyChecksum, filesUser, + filesGroup, filesMode, mappers, bandwidthMB); LOG.info("Finalize the Snapshot Export"); if (!skipTmp) { // Step 3 - Rename fs2:/.snapshot/.tmp/ fs2:/.snapshot/ if (!outputFs.rename(snapshotTmpDir, outputSnapshotDir)) { - throw new ExportSnapshotException("Unable to rename snapshot directory from=" + - snapshotTmpDir + " to=" + outputSnapshotDir); + throw new ExportSnapshotException("Unable to rename snapshot directory from=" + + snapshotTmpDir + " to=" + outputSnapshotDir); } } @@ -1127,18 +1132,16 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { @Override protected void printUsage() { super.printUsage(); - System.out.println("\n" - + "Examples:\n" - + " hbase snapshot export \\\n" - + " --snapshot MySnapshot --copy-to hdfs://srv2:8082/hbase \\\n" - + " --chuser MyUser --chgroup MyGroup --chmod 700 --mappers 16\n" - + "\n" - + " hbase snapshot export \\\n" - + " --snapshot MySnapshot --copy-from hdfs://srv2:8082/hbase \\\n" - + " --copy-to hdfs://srv1:50070/hbase"); + System.out.println("\n" + "Examples:\n" + " hbase snapshot export \\\n" + + " --snapshot MySnapshot --copy-to hdfs://srv2:8082/hbase \\\n" + + " --chuser MyUser --chgroup MyGroup --chmod 700 --mappers 16\n" + "\n" + + " hbase snapshot export \\\n" + + " --snapshot MySnapshot --copy-from hdfs://srv2:8082/hbase \\\n" + + " --copy-to hdfs://srv1:50070/hbase"); } - @Override protected void addOptions() { + @Override + protected void addOptions() { addRequiredOption(Options.SNAPSHOT); addOption(Options.COPY_TO); addOption(Options.COPY_FROM); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java index 9432f309adb..e7489fc5b8f 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,8 +27,8 @@ import org.apache.hadoop.util.ToolRunner; import org.apache.yetus.audience.InterfaceAudience; /** - * Generate a classpath string containing any jars required by mapreduce jobs. Specify - * additional values by providing a comma-separated list of paths via -Dtmpjars. + * Generate a classpath string containing any jars required by mapreduce jobs. Specify additional + * values by providing a comma-separated list of paths via -Dtmpjars. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class MapreduceDependencyClasspathTool implements Tool { @@ -49,8 +49,10 @@ public class MapreduceDependencyClasspathTool implements Tool { public int run(String[] args) throws Exception { if (args.length > 0) { System.err.println("Usage: hbase mapredcp [-Dtmpjars=...]"); - System.err.println(" Construct a CLASSPATH containing dependency jars required to run a mapreduce"); - System.err.println(" job. By default, includes any jars detected by TableMapReduceUtils. Provide"); + System.err + .println(" Construct a CLASSPATH containing dependency jars required to run a mapreduce"); + System.err + .println(" job. By default, includes any jars detected by TableMapReduceUtils. Provide"); System.err.println(" additional entries by specifying a comma-separated list in tmpjars."); return 0; } @@ -63,7 +65,7 @@ public class MapreduceDependencyClasspathTool implements Tool { public static void main(String[] argv) throws Exception { // Silence the usual noise. This is probably fragile... Log4jUtils.setLogLevel("org.apache.hadoop.hbase", "WARN"); - System.exit(ToolRunner.run( - HBaseConfiguration.create(), new MapreduceDependencyClasspathTool(), argv)); + System.exit( + ToolRunner.run(HBaseConfiguration.create(), new MapreduceDependencyClasspathTool(), argv)); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index 417349dcf85..f848bea7264 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -114,21 +113,18 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFacto import org.apache.hbase.thirdparty.com.google.gson.Gson; /** - * Script used evaluating HBase performance and scalability. Runs a HBase - * client that steps through one of a set of hardcoded tests or 'experiments' - * (e.g. a random reads test, a random writes test, etc.). Pass on the - * command-line which test to run and how many clients are participating in - * this experiment. Run {@code PerformanceEvaluation --help} to obtain usage. - * - *

      This class sets up and runs the evaluation programs described in - * Section 7, Performance Evaluation, of the Bigtable - * paper, pages 8-10. - * - *

      By default, runs as a mapreduce job where each mapper runs a single test - * client. Can also run as a non-mapreduce, multithreaded application by - * specifying {@code --nomapred}. Each client does about 1GB of data, unless - * specified otherwise. + * Script used evaluating HBase performance and scalability. Runs a HBase client that steps through + * one of a set of hardcoded tests or 'experiments' (e.g. a random reads test, a random writes test, + * etc.). Pass on the command-line which test to run and how many clients are participating in this + * experiment. Run {@code PerformanceEvaluation --help} to obtain usage. + *

      + * This class sets up and runs the evaluation programs described in Section 7, Performance + * Evaluation, of the Bigtable paper, + * pages 8-10. + *

      + * By default, runs as a mapreduce job where each mapper runs a single test client. Can also run as + * a non-mapreduce, multithreaded application by specifying {@code --nomapred}. Each client does + * about 1GB of data, unless specified otherwise. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class PerformanceEvaluation extends Configured implements Tool { @@ -167,11 +163,9 @@ public class PerformanceEvaluation extends Configured implements Tool { "Run async sequential read test"); addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite", "Run async sequential write test"); - addCommandDescriptor(AsyncScanTest.class, "asyncScan", - "Run async scan test (read every row)"); + addCommandDescriptor(AsyncScanTest.class, "asyncScan", "Run async scan test (read every row)"); addCommandDescriptor(RandomReadTest.class, RANDOM_READ, "Run random read test"); - addCommandDescriptor(MetaRandomReadTest.class, "metaRandomRead", - "Run getRegionLocation test"); + addCommandDescriptor(MetaRandomReadTest.class, "metaRandomRead", "Run getRegionLocation test"); addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN, "Run random seek and scan 100 test"); addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10", @@ -182,18 +176,15 @@ public class PerformanceEvaluation extends Configured implements Tool { "Run random seek scan with both start and stop row (max 1000 rows)"); addCommandDescriptor(RandomScanWithRange10000Test.class, "scanRange10000", "Run random seek scan with both start and stop row (max 10000 rows)"); - addCommandDescriptor(RandomWriteTest.class, "randomWrite", - "Run random write test"); - addCommandDescriptor(SequentialReadTest.class, "sequentialRead", - "Run sequential read test"); - addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite", - "Run sequential write test"); + addCommandDescriptor(RandomWriteTest.class, "randomWrite", "Run random write test"); + addCommandDescriptor(SequentialReadTest.class, "sequentialRead", "Run sequential read test"); + addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite", "Run sequential write test"); addCommandDescriptor(MetaWriteTest.class, "metaWrite", "Populate meta table;used with 1 thread; to be cleaned up by cleanMeta"); addCommandDescriptor(ScanTest.class, "scan", "Run scan test (read every row)"); addCommandDescriptor(FilteredScanTest.class, "filterScan", - "Run scan test using a filter to find a specific row based on it's value " + - "(make sure to use --rows=20)"); + "Run scan test using a filter to find a specific row based on it's value " + + "(make sure to use --rows=20)"); addCommandDescriptor(IncrementTest.class, "increment", "Increment on each row; clients overlap on keyspace so some concurrent operations"); addCommandDescriptor(AppendTest.class, "append", @@ -209,8 +200,8 @@ public class PerformanceEvaluation extends Configured implements Tool { } /** - * Enum for map metrics. Keep it out here rather than inside in the Map - * inner-class so we can find associated properties. + * Enum for map metrics. Keep it out here rather than inside in the Map inner-class so we can find + * associated properties. */ protected static enum Counter { /** elapsed time */ @@ -245,7 +236,8 @@ public class PerformanceEvaluation extends Configured implements Tool { return Long.toString(duration); } - @Override public int compareTo(RunResult o) { + @Override + public int compareTo(RunResult o) { return Long.compare(this.duration, o.duration); } } @@ -258,8 +250,8 @@ public class PerformanceEvaluation extends Configured implements Tool { super(conf); } - protected static void addCommandDescriptor(Class cmdClass, - String name, String description) { + protected static void addCommandDescriptor(Class cmdClass, String name, + String description) { CmdDescriptor cmdDescriptor = new CmdDescriptor(cmdClass, name, description); COMMANDS.put(name, cmdDescriptor); } @@ -270,8 +262,7 @@ public class PerformanceEvaluation extends Configured implements Tool { interface Status { /** * Sets status - * @param msg status message - * @throws IOException + * @param msg status message n */ void setStatus(final String msg) throws IOException; } @@ -280,7 +271,7 @@ public class PerformanceEvaluation extends Configured implements Tool { * MapReduce job that runs a performance evaluation client in each map task. */ public static class EvaluationMapTask - extends Mapper { + extends Mapper { /** configuration parameter name that contains the command */ public final static String CMD_KEY = "EvaluationMapTask.command"; @@ -296,7 +287,7 @@ public class PerformanceEvaluation extends Configured implements Tool { // this is required so that extensions of PE are instantiated within the // map reduce task... Class peClass = - forName(context.getConfiguration().get(PE_KEY), PerformanceEvaluation.class); + forName(context.getConfiguration().get(PE_KEY), PerformanceEvaluation.class); try { peClass.getConstructor(Configuration.class).newInstance(context.getConfiguration()); } catch (Exception e) { @@ -314,12 +305,12 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override protected void map(LongWritable key, Text value, final Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Status status = new Status() { @Override public void setStatus(String msg) { - context.setStatus(msg); + context.setStatus(msg); } }; @@ -334,7 +325,8 @@ public class PerformanceEvaluation extends Configured implements Tool { } // Evaluation task - RunResult result = PerformanceEvaluation.runOneClient(this.cmd, conf, con, asyncCon, opts, status); + RunResult result = + PerformanceEvaluation.runOneClient(this.cmd, conf, con, asyncCon, opts, status); // Collect how much time the thing took. Report as map output and // to the ELAPSED_TIME counter. context.getCounter(Counter.ELAPSED_TIME).increment(result.duration); @@ -345,9 +337,9 @@ public class PerformanceEvaluation extends Configured implements Tool { } /* - * If table does not already exist, create. Also create a table when - * {@code opts.presplitRegions} is specified or when the existing table's - * region replica count doesn't match {@code opts.replicas}. + * If table does not already exist, create. Also create a table when {@code opts.presplitRegions} + * is specified or when the existing table's region replica count doesn't match {@code + * opts.replicas}. */ static boolean checkTable(Admin admin, TestOptions opts) throws IOException { TableName tableName = TableName.valueOf(opts.tableName); @@ -365,23 +357,19 @@ public class PerformanceEvaluation extends Configured implements Tool { // recreate the table when user has requested presplit or when existing // {RegionSplitPolicy,replica count} does not match requested, or when the // number of column families does not match requested. - if ((exists && opts.presplitRegions != DEFAULT_OPTS.presplitRegions) - || (!isReadCmd && desc != null && - !StringUtils.equals(desc.getRegionSplitPolicyClassName(), opts.splitPolicy)) - || (!isReadCmd && desc != null && desc.getRegionReplication() != opts.replicas) - || (desc != null && desc.getColumnFamilyCount() != opts.families)) { + if ( + (exists && opts.presplitRegions != DEFAULT_OPTS.presplitRegions) + || (!isReadCmd && desc != null + && !StringUtils.equals(desc.getRegionSplitPolicyClassName(), opts.splitPolicy)) + || (!isReadCmd && desc != null && desc.getRegionReplication() != opts.replicas) + || (desc != null && desc.getColumnFamilyCount() != opts.families) + ) { needsDelete = true; // wait, why did it delete my table?!? - LOG.debug(MoreObjects.toStringHelper("needsDelete") - .add("needsDelete", needsDelete) - .add("isReadCmd", isReadCmd) - .add("exists", exists) - .add("desc", desc) - .add("presplit", opts.presplitRegions) - .add("splitPolicy", opts.splitPolicy) - .add("replicas", opts.replicas) - .add("families", opts.families) - .toString()); + LOG.debug(MoreObjects.toStringHelper("needsDelete").add("needsDelete", needsDelete) + .add("isReadCmd", isReadCmd).add("exists", exists).add("desc", desc) + .add("presplit", opts.presplitRegions).add("splitPolicy", opts.splitPolicy) + .add("replicas", opts.replicas).add("families", opts.families).toString()); } // remove an existing table @@ -439,8 +427,7 @@ public class PerformanceEvaluation extends Configured implements Tool { * generates splits based on total number of rows and specified split regions */ protected static byte[][] getSplits(TestOptions opts) { - if (opts.presplitRegions == DEFAULT_OPTS.presplitRegions) - return null; + if (opts.presplitRegions == DEFAULT_OPTS.presplitRegions) return null; int numSplitPoints = opts.presplitRegions - 1; byte[][] splits = new byte[numSplitPoints][]; @@ -467,7 +454,7 @@ public class PerformanceEvaluation extends Configured implements Tool { * Run all clients in this vm each to its own thread. */ static RunResult[] doLocalClients(final TestOptions opts, final Configuration conf) - throws IOException, InterruptedException, ExecutionException { + throws IOException, InterruptedException, ExecutionException { final Class cmd = determineCommandClass(opts.cmdName); assert cmd != null; @SuppressWarnings("unchecked") @@ -482,8 +469,8 @@ public class PerformanceEvaluation extends Configured implements Tool { cons[i] = ConnectionFactory.createConnection(conf); asyncCons[i] = ConnectionFactory.createAsyncConnection(conf).get(); } - LOG.info("Created " + opts.connCount + " connections for " + - opts.numClientThreads + " threads"); + LOG + .info("Created " + opts.connCount + " connections for " + opts.numClientThreads + " threads"); for (int i = 0; i < threads.length; i++) { final int index = i; threads[i] = pool.submit(new Callable() { @@ -499,11 +486,11 @@ public class PerformanceEvaluation extends Configured implements Tool { LOG.info(msg); } }); - LOG.info("Finished " + Thread.currentThread().getName() + " in " + run.duration + - "ms over " + threadOpts.perClientRunRows + " rows"); + LOG.info("Finished " + Thread.currentThread().getName() + " in " + run.duration + + "ms over " + threadOpts.perClientRunRows + " rows"); if (opts.latencyThreshold > 0) { - LOG.info("Number of replies over latency threshold " + opts.latencyThreshold + - "(ms) is " + run.numbOfReplyOverThreshold); + LOG.info("Number of replies over latency threshold " + opts.latencyThreshold + + "(ms) is " + run.numbOfReplyOverThreshold); } return run; } @@ -519,11 +506,10 @@ public class PerformanceEvaluation extends Configured implements Tool { } } final String test = cmd.getSimpleName(); - LOG.info("[" + test + "] Summary of timings (ms): " - + Arrays.toString(results)); + LOG.info("[" + test + "] Summary of timings (ms): " + Arrays.toString(results)); Arrays.sort(results); long total = 0; - float avgLatency = 0 ; + float avgLatency = 0; float avgTPS = 0; long replicaWins = 0; for (RunResult result : results) { @@ -534,10 +520,8 @@ public class PerformanceEvaluation extends Configured implements Tool { } avgTPS *= 1000; // ms to second avgLatency = avgLatency / results.length; - LOG.info("[" + test + " duration ]" - + "\tMin: " + results[0] + "ms" - + "\tMax: " + results[results.length - 1] + "ms" - + "\tAvg: " + (total / results.length) + "ms"); + LOG.info("[" + test + " duration ]" + "\tMin: " + results[0] + "ms" + "\tMax: " + + results[results.length - 1] + "ms" + "\tAvg: " + (total / results.length) + "ms"); LOG.info("[ Avg latency (us)]\t" + Math.round(avgLatency)); LOG.info("[ Avg TPS/QPS]\t" + Math.round(avgTPS) + "\t row per second"); if (opts.replicas > 1) { @@ -553,14 +537,12 @@ public class PerformanceEvaluation extends Configured implements Tool { } /* - * Run a mapreduce job. Run as many maps as asked-for clients. - * Before we start up the job, write out an input file with instruction - * per client regards which row they are to start on. - * @param cmd Command to run. - * @throws IOException + * Run a mapreduce job. Run as many maps as asked-for clients. Before we start up the job, write + * out an input file with instruction per client regards which row they are to start on. + * @param cmd Command to run. n */ static Job doMapReduce(TestOptions opts, final Configuration conf) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { final Class cmd = determineCommandClass(opts.cmdName); assert cmd != null; Path inputDir = writeInputFile(conf, opts); @@ -587,11 +569,11 @@ public class PerformanceEvaluation extends Configured implements Tool { TextOutputFormat.setOutputPath(job, new Path(inputDir.getParent(), "outputs")); TableMapReduceUtil.addDependencyJars(job); - TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), - Histogram.class, // yammer metrics - Gson.class, // gson + TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), Histogram.class, // yammer + // metrics + Gson.class, // gson FilterAllFilter.class // hbase-server tests jar - ); + ); TableMapReduceUtil.initCredentials(job); @@ -600,7 +582,7 @@ public class PerformanceEvaluation extends Configured implements Tool { } /** - * Each client has one mapper to do the work, and client do the resulting count in a map task. + * Each client has one mapper to do the work, and client do the resulting count in a map task. */ static String JOB_INPUT_FILENAME = "input.txt"; @@ -608,15 +590,14 @@ public class PerformanceEvaluation extends Configured implements Tool { /* * Write input file of offsets-per-client for the mapreduce job. * @param c Configuration - * @return Directory that contains file written whose name is JOB_INPUT_FILENAME - * @throws IOException + * @return Directory that contains file written whose name is JOB_INPUT_FILENAME n */ static Path writeInputFile(final Configuration c, final TestOptions opts) throws IOException { return writeInputFile(c, opts, new Path(".")); } static Path writeInputFile(final Configuration c, final TestOptions opts, final Path basedir) - throws IOException { + throws IOException { SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMddHHmmss"); Path jobdir = new Path(new Path(basedir, PERF_EVAL_DIR), formatter.format(new Date())); Path inputDir = new Path(jobdir, "inputs"); @@ -641,7 +622,7 @@ public class PerformanceEvaluation extends Configured implements Tool { int hash = h.hash(new ByteArrayHashKey(b, 0, b.length), -1); m.put(hash, s); } - for (Map.Entry e: m.entrySet()) { + for (Map.Entry e : m.entrySet()) { out.println(e.getValue()); } } finally { @@ -678,11 +659,11 @@ public class PerformanceEvaluation extends Configured implements Tool { } /** - * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation}. - * This makes tracking all these arguments a little easier. - * NOTE: ADDING AN OPTION, you need to add a data member, a getter/setter (to make JSON - * serialization of this TestOptions class behave), and you need to add to the clone constructor - * below copying your new option from the 'that' to the 'this'. Look for 'clone' below. + * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation}. This makes + * tracking all these arguments a little easier. NOTE: ADDING AN OPTION, you need to add a data + * member, a getter/setter (to make JSON serialization of this TestOptions class behave), and you + * need to add to the clone constructor below copying your new option from the 'that' to the + * 'this'. Look for 'clone' below. */ static class TestOptions { String cmdName = null; @@ -705,7 +686,7 @@ public class PerformanceEvaluation extends Configured implements Tool { boolean writeToWAL = true; boolean autoFlush = false; boolean oneCon = false; - int connCount = -1; //wil decide the actual num later + int connCount = -1; // wil decide the actual num later boolean useTags = false; int noOfTags = 1; boolean reportLatency = false; @@ -723,7 +704,7 @@ public class PerformanceEvaluation extends Configured implements Tool { boolean valueRandom = false; boolean valueZipf = false; int valueSize = DEFAULT_VALUE_LENGTH; - int period = (this.perClientRunRows / 10) == 0? perClientRunRows: perClientRunRows / 10; + int period = (this.perClientRunRows / 10) == 0 ? perClientRunRows : perClientRunRows / 10; int cycles = 1; int columns = 1; int families = 1; @@ -731,14 +712,14 @@ public class PerformanceEvaluation extends Configured implements Tool { int latencyThreshold = 0; // in millsecond boolean addColumns = true; MemoryCompactionPolicy inMemoryCompaction = - MemoryCompactionPolicy.valueOf( - CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT); + MemoryCompactionPolicy.valueOf(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT); boolean asyncPrefetch = false; boolean cacheBlocks = true; Scan.ReadType scanReadType = Scan.ReadType.DEFAULT; long bufferSize = 2l * 1024l * 1024l; - public TestOptions() {} + public TestOptions() { + } /** * Clone constructor. @@ -1131,8 +1112,7 @@ public class PerformanceEvaluation extends Configured implements Tool { } /* - * A test. - * Subclass to particularize what happens per row. + * A test. Subclass to particularize what happens per row. */ static abstract class TestBase { // Below is make it so when Tests are all running in the one @@ -1142,6 +1122,7 @@ public class PerformanceEvaluation extends Configured implements Tool { private static long nextRandomSeed() { return randomSeed.nextLong(); } + private final int everyN; protected final Random rand = new Random(nextRandomSeed()); @@ -1165,8 +1146,8 @@ public class PerformanceEvaluation extends Configured implements Tool { private long numOfReplyFromReplica = 0; /** - * Note that all subclasses of this class must provide a public constructor - * that has the exact same list of arguments. + * Note that all subclasses of this class must provide a public constructor that has the exact + * same list of arguments. */ TestBase(final Configuration conf, final TestOptions options, final Status status) { this.conf = conf; @@ -1190,13 +1171,14 @@ public class PerformanceEvaluation extends Configured implements Tool { } } - void updateValueSize(final Result [] rs) throws IOException { + void updateValueSize(final Result[] rs) throws IOException { updateValueSize(rs, 0); } - void updateValueSize(final Result [] rs, final long latency) throws IOException { + void updateValueSize(final Result[] rs, final long latency) throws IOException { if (rs == null || (latency == 0)) return; - for (Result r: rs) updateValueSize(r, latency); + for (Result r : rs) + updateValueSize(r, latency); } void updateValueSize(final Result r) throws IOException { @@ -1209,7 +1191,7 @@ public class PerformanceEvaluation extends Configured implements Tool { // update replicaHistogram if (r.isStale()) { replicaLatencyHistogram.update(latency / 1000); - numOfReplyFromReplica ++; + numOfReplyFromReplica++; } if (!isRandomValueSize()) return; @@ -1226,7 +1208,7 @@ public class PerformanceEvaluation extends Configured implements Tool { void updateScanMetrics(final ScanMetrics metrics) { if (metrics == null) return; - Map metricsMap = metrics.getMetricsMap(); + Map metricsMap = metrics.getMetricsMap(); Long rpcCalls = metricsMap.get(ScanMetrics.RPC_CALLS_METRIC_NAME); if (rpcCalls != null) { this.rpcCallsHistogram.update(rpcCalls.longValue()); @@ -1254,8 +1236,8 @@ public class PerformanceEvaluation extends Configured implements Tool { } String generateStatus(final int sr, final int i, final int lr) { - return sr + "/" + i + "/" + lr + ", latency " + getShortLatencyReport() + - (!isRandomValueSize()? "": ", value size " + getShortValueSizeReport()); + return sr + "/" + i + "/" + lr + ", latency " + getShortLatencyReport() + + (!isRandomValueSize() ? "" : ", value size " + getShortValueSizeReport()); } boolean isRandomValueSize() { @@ -1278,16 +1260,19 @@ public class PerformanceEvaluation extends Configured implements Tool { latencyHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); // If it is a replica test, set up histogram for replica. if (opts.replicas > 1) { - replicaLatencyHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); + replicaLatencyHistogram = + YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); } valueSizeHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); // scan metrics rpcCallsHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); remoteRpcCallsHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); - millisBetweenNextHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); + millisBetweenNextHistogram = + YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); regionsScannedHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); bytesInResultsHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); - bytesInRemoteResultsHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); + bytesInRemoteResultsHistogram = + YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); onStartup(); } @@ -1301,56 +1286,54 @@ public class PerformanceEvaluation extends Configured implements Tool { // output. We can't use 'this' here because each thread has its own instance of Test class. synchronized (Test.class) { status.setStatus("Test : " + testName + ", Thread : " + Thread.currentThread().getName()); - status.setStatus("Latency (us) : " + YammerHistogramUtils.getHistogramReport( - latencyHistogram)); + status + .setStatus("Latency (us) : " + YammerHistogramUtils.getHistogramReport(latencyHistogram)); if (opts.replicas > 1) { - status.setStatus("Latency (us) from Replica Regions: " + - YammerHistogramUtils.getHistogramReport(replicaLatencyHistogram)); + status.setStatus("Latency (us) from Replica Regions: " + + YammerHistogramUtils.getHistogramReport(replicaLatencyHistogram)); } status.setStatus("Num measures (latency) : " + latencyHistogram.getCount()); status.setStatus(YammerHistogramUtils.getPrettyHistogramReport(latencyHistogram)); if (valueSizeHistogram.getCount() > 0) { - status.setStatus("ValueSize (bytes) : " - + YammerHistogramUtils.getHistogramReport(valueSizeHistogram)); + status.setStatus( + "ValueSize (bytes) : " + YammerHistogramUtils.getHistogramReport(valueSizeHistogram)); status.setStatus("Num measures (ValueSize): " + valueSizeHistogram.getCount()); status.setStatus(YammerHistogramUtils.getPrettyHistogramReport(valueSizeHistogram)); } else { status.setStatus("No valueSize statistics available"); } if (rpcCallsHistogram.getCount() > 0) { - status.setStatus("rpcCalls (count): " + - YammerHistogramUtils.getHistogramReport(rpcCallsHistogram)); + status.setStatus( + "rpcCalls (count): " + YammerHistogramUtils.getHistogramReport(rpcCallsHistogram)); } if (remoteRpcCallsHistogram.getCount() > 0) { - status.setStatus("remoteRpcCalls (count): " + - YammerHistogramUtils.getHistogramReport(remoteRpcCallsHistogram)); + status.setStatus("remoteRpcCalls (count): " + + YammerHistogramUtils.getHistogramReport(remoteRpcCallsHistogram)); } if (millisBetweenNextHistogram.getCount() > 0) { - status.setStatus("millisBetweenNext (latency): " + - YammerHistogramUtils.getHistogramReport(millisBetweenNextHistogram)); + status.setStatus("millisBetweenNext (latency): " + + YammerHistogramUtils.getHistogramReport(millisBetweenNextHistogram)); } if (regionsScannedHistogram.getCount() > 0) { - status.setStatus("regionsScanned (count): " + - YammerHistogramUtils.getHistogramReport(regionsScannedHistogram)); + status.setStatus("regionsScanned (count): " + + YammerHistogramUtils.getHistogramReport(regionsScannedHistogram)); } if (bytesInResultsHistogram.getCount() > 0) { - status.setStatus("bytesInResults (size): " + - YammerHistogramUtils.getHistogramReport(bytesInResultsHistogram)); + status.setStatus("bytesInResults (size): " + + YammerHistogramUtils.getHistogramReport(bytesInResultsHistogram)); } if (bytesInRemoteResultsHistogram.getCount() > 0) { - status.setStatus("bytesInRemoteResults (size): " + - YammerHistogramUtils.getHistogramReport(bytesInRemoteResultsHistogram)); + status.setStatus("bytesInRemoteResults (size): " + + YammerHistogramUtils.getHistogramReport(bytesInRemoteResultsHistogram)); } } } abstract void onTakedown() throws IOException; - /* * Run test - * @return Elapsed time. - * @throws IOException + * @return Elapsed time. n */ long test() throws IOException, InterruptedException { testSetup(); @@ -1386,12 +1369,12 @@ public class PerformanceEvaluation extends Configured implements Tool { long startTime = System.nanoTime(); boolean requestSent = false; Span span = TraceUtil.getGlobalTracer().spanBuilder("test row").startSpan(); - try (Scope scope = span.makeCurrent()){ + try (Scope scope = span.makeCurrent()) { requestSent = testRow(i, startTime); } finally { span.end(); } - if ( (i - startRow) > opts.measureAfter) { + if ((i - startRow) > opts.measureAfter) { // If multiget or multiput is enabled, say set to 10, testRow() returns immediately // first 9 times and sends the actual get request in the 10th iteration. // We should only set latency when actual request is sent because otherwise @@ -1400,7 +1383,7 @@ public class PerformanceEvaluation extends Configured implements Tool { long latency = (System.nanoTime() - startTime) / 1000; latencyHistogram.update(latency); if ((opts.latencyThreshold > 0) && (latency / 1000 >= opts.latencyThreshold)) { - numOfReplyOverLatencyThreshold ++; + numOfReplyOverLatencyThreshold++; } } if (status != null && i > 0 && (i % getReportingPeriod()) == 0) { @@ -1425,15 +1408,14 @@ public class PerformanceEvaluation extends Configured implements Tool { return YammerHistogramUtils.getShortHistogramReport(this.valueSizeHistogram); } - /** * Test for individual row. * @param i Row index. - * @return true if the row was sent to server and need to record metrics. - * False if not, multiGet and multiPut e.g., the rows are sent - * to server only if enough gets/puts are gathered. + * @return true if the row was sent to server and need to record metrics. False if not, multiGet + * and multiPut e.g., the rows are sent to server only if enough gets/puts are gathered. */ - abstract boolean testRow(final int i, final long startTime) throws IOException, InterruptedException; + abstract boolean testRow(final int i, final long startTime) + throws IOException, InterruptedException; } static abstract class Test extends TestBase { @@ -1473,7 +1455,7 @@ public class PerformanceEvaluation extends Configured implements Tool { } /* - Parent class for all meta tests: MetaWriteTest, MetaRandomReadTest and CleanMetaTest + * Parent class for all meta tests: MetaWriteTest, MetaRandomReadTest and CleanMetaTest */ static abstract class MetaTest extends TableTest { protected int keyLength; @@ -1489,7 +1471,7 @@ public class PerformanceEvaluation extends Configured implements Tool { } /* - Generates Lexicographically ascending strings + * Generates Lexicographically ascending strings */ protected byte[] getSplitKey(final int i) { return Bytes.toBytes(String.format("%0" + keyLength + "d", i)); @@ -1537,7 +1519,7 @@ public class PerformanceEvaluation extends Configured implements Tool { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); get.addColumn(familyName, qualifier); } } else { @@ -1554,7 +1536,7 @@ public class PerformanceEvaluation extends Configured implements Tool { this.gets.add(get); if (this.gets.size() == opts.multiGet) { Result[] rs = - this.table.get(this.gets).stream().map(f -> propagate(f::get)).toArray(Result[]::new); + this.table.get(this.gets).stream().map(f -> propagate(f::get)).toArray(Result[]::new); updateValueSize(rs); this.gets.clear(); } else { @@ -1622,9 +1604,8 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override void onStartup() throws IOException { - this.asyncTable = - connection.getTable(TableName.valueOf(opts.tableName), - Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors())); + this.asyncTable = connection.getTable(TableName.valueOf(opts.tableName), + Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors())); } @Override @@ -1639,15 +1620,14 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override boolean testRow(final int i, final long startTime) throws IOException { if (this.testScanner == null) { - Scan scan = - new Scan().withStartRow(format(opts.startRow)).setCaching(opts.caching) - .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) - .setReadType(opts.scanReadType).setScanMetricsEnabled(true); + Scan scan = new Scan().withStartRow(format(opts.startRow)).setCaching(opts.caching) + .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) + .setReadType(opts.scanReadType).setScanMetricsEnabled(true); for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(familyName, qualifier); } } else { @@ -1677,7 +1657,7 @@ public class PerformanceEvaluation extends Configured implements Tool { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); get.addColumn(familyName, qualifier); } } else { @@ -1719,7 +1699,7 @@ public class PerformanceEvaluation extends Configured implements Tool { for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); byte[] value = generateData(this.rand, getValueLength(this.rand)); if (opts.useTags) { byte[] tag = generateData(this.rand, TAG_LENGTH); @@ -1728,8 +1708,8 @@ public class PerformanceEvaluation extends Configured implements Tool { Tag t = new ArrayBackedTag((byte) n, tag); tags[n] = t; } - KeyValue kv = new KeyValue(row, familyName, qualifier, HConstants.LATEST_TIMESTAMP, - value, tags); + KeyValue kv = + new KeyValue(row, familyName, qualifier, HConstants.LATEST_TIMESTAMP, value, tags); put.add(kv); updateValueSize(kv.getValueLength()); } else { @@ -1789,16 +1769,16 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override boolean testRow(final int i, final long startTime) throws IOException { - Scan scan = new Scan().withStartRow(getRandomRow(this.rand, opts.totalRows)) - .setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks) - .setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType) - .setScanMetricsEnabled(true); + Scan scan = + new Scan().withStartRow(getRandomRow(this.rand, opts.totalRows)).setCaching(opts.caching) + .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) + .setReadType(opts.scanReadType).setScanMetricsEnabled(true); FilterList list = new FilterList(); for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(familyName, qualifier); } } else { @@ -1839,14 +1819,14 @@ public class PerformanceEvaluation extends Configured implements Tool { boolean testRow(final int i, final long startTime) throws IOException { Pair startAndStopRow = getStartAndStopRow(); Scan scan = new Scan().withStartRow(startAndStopRow.getFirst()) - .withStopRow(startAndStopRow.getSecond()).setCaching(opts.caching) - .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) - .setReadType(opts.scanReadType).setScanMetricsEnabled(true); + .withStopRow(startAndStopRow.getSecond()).setCaching(opts.caching) + .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) + .setReadType(opts.scanReadType).setScanMetricsEnabled(true); for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(familyName, qualifier); } } else { @@ -1866,8 +1846,8 @@ public class PerformanceEvaluation extends Configured implements Tool { } if (i % 100 == 0) { LOG.info(String.format("Scan for key range %s - %s returned %s rows", - Bytes.toString(startAndStopRow.getFirst()), - Bytes.toString(startAndStopRow.getSecond()), count)); + Bytes.toString(startAndStopRow.getFirst()), Bytes.toString(startAndStopRow.getSecond()), + count)); } } finally { updateScanMetrics(s.getScanMetrics()); @@ -1876,7 +1856,7 @@ public class PerformanceEvaluation extends Configured implements Tool { return true; } - protected abstract Pair getStartAndStopRow(); + protected abstract Pair getStartAndStopRow(); protected Pair generateStartAndStopRows(int maxRange) { int start = this.rand.nextInt(Integer.MAX_VALUE) % opts.totalRows; @@ -1887,7 +1867,7 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override protected int getReportingPeriod() { int period = opts.perClientRunRows / 100; - return period == 0? opts.perClientRunRows: period; + return period == 0 ? opts.perClientRunRows : period; } } @@ -1958,7 +1938,7 @@ public class PerformanceEvaluation extends Configured implements Tool { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); get.addColumn(familyName, qualifier); } } else { @@ -1973,7 +1953,7 @@ public class PerformanceEvaluation extends Configured implements Tool { if (opts.multiGet > 0) { this.gets.add(get); if (this.gets.size() == opts.multiGet) { - Result [] rs = this.table.get(this.gets); + Result[] rs = this.table.get(this.gets); if (opts.replicas > 1) { long latency = System.nanoTime() - startTime; updateValueSize(rs, latency); @@ -2013,7 +1993,7 @@ public class PerformanceEvaluation extends Configured implements Tool { } /* - Send random reads against fake regions inserted by MetaWriteTest + * Send random reads against fake regions inserted by MetaWriteTest */ static class MetaRandomReadTest extends MetaTest { private Random rd = new Random(); @@ -2035,8 +2015,8 @@ public class PerformanceEvaluation extends Configured implements Tool { if (opts.randomSleep > 0) { Thread.sleep(rd.nextInt(opts.randomSleep)); } - HRegionLocation hRegionLocation = regionLocator.getRegionLocation( - getSplitKey(rd.nextInt(opts.perClientRunRows)), true); + HRegionLocation hRegionLocation = + regionLocator.getRegionLocation(getSplitKey(rd.nextInt(opts.perClientRunRows)), true); LOG.debug("get location for region: " + hRegionLocation); return true; } @@ -2063,7 +2043,6 @@ public class PerformanceEvaluation extends Configured implements Tool { return getRandomRow(this.rand, opts.totalRows); } - } static class ScanTest extends TableTest { @@ -2081,18 +2060,17 @@ public class PerformanceEvaluation extends Configured implements Tool { super.testTakedown(); } - @Override boolean testRow(final int i, final long startTime) throws IOException { if (this.testScanner == null) { Scan scan = new Scan().withStartRow(format(opts.startRow)).setCaching(opts.caching) - .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) - .setReadType(opts.scanReadType).setScanMetricsEnabled(true); + .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) + .setReadType(opts.scanReadType).setScanMetricsEnabled(true); for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(familyName, qualifier); } } else { @@ -2113,19 +2091,20 @@ public class PerformanceEvaluation extends Configured implements Tool { /** * Base class for operations that are CAS-like; that read a value and then set it based off what * they read. In this category is increment, append, checkAndPut, etc. - * - *

      These operations also want some concurrency going on. Usually when these tests run, they + *

      + * These operations also want some concurrency going on. Usually when these tests run, they * operate in their own part of the key range. In CASTest, we will have them all overlap on the * same key space. We do this with our getStartRow and getLastRow overrides. */ static abstract class CASTableTest extends TableTest { - private final byte [] qualifier; + private final byte[] qualifier; + CASTableTest(Connection con, TestOptions options, Status status) { super(con, options, status); qualifier = Bytes.toBytes(this.getClass().getSimpleName()); } - byte [] getQualifier() { + byte[] getQualifier() { return this.qualifier; } @@ -2167,7 +2146,7 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override boolean testRow(final int i, final long startTime) throws IOException { - byte [] bytes = format(i); + byte[] bytes = format(i); Append append = new Append(bytes); // unlike checkAndXXX tests, which make most sense to do on a single value, // if multiple families are specified for an append test we assume it is @@ -2188,7 +2167,7 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override boolean testRow(final int i, final long startTime) throws IOException { - final byte [] bytes = format(i); + final byte[] bytes = format(i); // checkAndXXX tests operate on only a single value // Put a known value so when we go to check it, it is there. Put put = new Put(bytes); @@ -2196,8 +2175,8 @@ public class PerformanceEvaluation extends Configured implements Tool { this.table.put(put); RowMutations mutations = new RowMutations(bytes); mutations.add(put); - this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()) - .ifEquals(bytes).thenMutate(mutations); + this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()).ifEquals(bytes) + .thenMutate(mutations); return true; } } @@ -2209,14 +2188,14 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override boolean testRow(final int i, final long startTime) throws IOException { - final byte [] bytes = format(i); + final byte[] bytes = format(i); // checkAndXXX tests operate on only a single value // Put a known value so when we go to check it, it is there. Put put = new Put(bytes); put.addColumn(FAMILY_ZERO, getQualifier(), bytes); this.table.put(put); - this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()) - .ifEquals(bytes).thenPut(put); + this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()).ifEquals(bytes) + .thenPut(put); return true; } } @@ -2228,7 +2207,7 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override boolean testRow(final int i, final long startTime) throws IOException { - final byte [] bytes = format(i); + final byte[] bytes = format(i); // checkAndXXX tests operate on only a single value // Put a known value so when we go to check it, it is there. Put put = new Put(bytes); @@ -2236,14 +2215,14 @@ public class PerformanceEvaluation extends Configured implements Tool { this.table.put(put); Delete delete = new Delete(put.getRow()); delete.addColumn(FAMILY_ZERO, getQualifier()); - this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()) - .ifEquals(bytes).thenDelete(delete); + this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()).ifEquals(bytes) + .thenDelete(delete); return true; } } /* - Delete all fake regions inserted to meta table by MetaWriteTest. + * Delete all fake regions inserted to meta table by MetaWriteTest. */ static class CleanMetaTest extends MetaTest { CleanMetaTest(Connection con, TestOptions options, Status status) { @@ -2257,8 +2236,8 @@ public class PerformanceEvaluation extends Configured implements Tool { .getRegionLocation(getSplitKey(i), false).getRegion(); LOG.debug("deleting region from meta: " + regionInfo); - Delete delete = MetaTableAccessor - .makeDeleteFromRegionInfo(regionInfo, HConstants.LATEST_TIMESTAMP); + Delete delete = + MetaTableAccessor.makeDeleteFromRegionInfo(regionInfo, HConstants.LATEST_TIMESTAMP); try (Table t = MetaTableAccessor.getMetaHTable(connection)) { t.delete(delete); } @@ -2282,7 +2261,7 @@ public class PerformanceEvaluation extends Configured implements Tool { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); get.addColumn(familyName, qualifier); } } else { @@ -2300,7 +2279,6 @@ public class PerformanceEvaluation extends Configured implements Tool { static class SequentialWriteTest extends BufferedMutatorTest { private ArrayList puts; - SequentialWriteTest(Connection con, TestOptions options, Status status) { super(con, options, status); if (opts.multiPut > 0) { @@ -2320,7 +2298,7 @@ public class PerformanceEvaluation extends Configured implements Tool { for (int family = 0; family < opts.families; family++) { byte familyName[] = Bytes.toBytes(FAMILY_NAME_BASE + family); for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); byte[] value = generateData(this.rand, getValueLength(this.rand)); if (opts.useTags) { byte[] tag = generateData(this.rand, TAG_LENGTH); @@ -2329,8 +2307,8 @@ public class PerformanceEvaluation extends Configured implements Tool { Tag t = new ArrayBackedTag((byte) n, tag); tags[n] = t; } - KeyValue kv = new KeyValue(row, familyName, qualifier, HConstants.LATEST_TIMESTAMP, - value, tags); + KeyValue kv = + new KeyValue(row, familyName, qualifier, HConstants.LATEST_TIMESTAMP, value, tags); put.add(kv); updateValueSize(kv.getValueLength()); } else { @@ -2360,7 +2338,7 @@ public class PerformanceEvaluation extends Configured implements Tool { } /* - Insert fake regions into meta table with contiguous split keys. + * Insert fake regions into meta table with contiguous split keys. */ static class MetaWriteTest extends MetaTest { @@ -2372,27 +2350,26 @@ public class PerformanceEvaluation extends Configured implements Tool { boolean testRow(final int i, final long startTime) throws IOException { List regionInfos = new ArrayList(); RegionInfo regionInfo = (RegionInfoBuilder.newBuilder(TableName.valueOf(TABLE_NAME)) - .setStartKey(getSplitKey(i)) - .setEndKey(getSplitKey(i + 1)) - .build()); + .setStartKey(getSplitKey(i)).setEndKey(getSplitKey(i + 1)).build()); regionInfos.add(regionInfo); MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 1); // write the serverName columns - MetaTableAccessor.updateRegionLocation(connection, - regionInfo, ServerName.valueOf("localhost", 60010, rand.nextLong()), i, + MetaTableAccessor.updateRegionLocation(connection, regionInfo, + ServerName.valueOf("localhost", 60010, rand.nextLong()), i, EnvironmentEdgeManager.currentTime()); return true; } } + static class FilteredScanTest extends TableTest { protected static final Logger LOG = LoggerFactory.getLogger(FilteredScanTest.class.getName()); FilteredScanTest(Connection con, TestOptions options, Status status) { super(con, options, status); if (opts.perClientRunRows == DEFAULT_ROWS_PER_GB) { - LOG.warn("Option \"rows\" unspecified. Using default value " + DEFAULT_ROWS_PER_GB + - ". This could take a very long time."); + LOG.warn("Option \"rows\" unspecified. Using default value " + DEFAULT_ROWS_PER_GB + + ". This could take a very long time."); } } @@ -2417,18 +2394,18 @@ public class PerformanceEvaluation extends Configured implements Tool { protected Scan constructScan(byte[] valuePrefix) throws IOException { FilterList list = new FilterList(); - Filter filter = new SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO, - CompareOperator.EQUAL, new BinaryComparator(valuePrefix)); + Filter filter = new SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO, CompareOperator.EQUAL, + new BinaryComparator(valuePrefix)); list.addFilter(filter); if (opts.filterAll) { list.addFilter(new FilterAllFilter()); } Scan scan = new Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks) - .setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType) - .setScanMetricsEnabled(true); + .setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType) + .setScanMetricsEnabled(true); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(FAMILY_ZERO, qualifier); } } else { @@ -2441,64 +2418,61 @@ public class PerformanceEvaluation extends Configured implements Tool { /** * Compute a throughput rate in MB/s. - * @param rows Number of records consumed. + * @param rows Number of records consumed. * @param timeMs Time taken in milliseconds. * @return String value with label, ie '123.76 MB/s' */ - private static String calculateMbps(int rows, long timeMs, final int valueSize, int families, int columns) { - BigDecimal rowSize = BigDecimal.valueOf(ROW_LENGTH + - ((valueSize + (FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families); + private static String calculateMbps(int rows, long timeMs, final int valueSize, int families, + int columns) { + BigDecimal rowSize = BigDecimal.valueOf(ROW_LENGTH + + ((valueSize + (FAMILY_NAME_BASE.length() + 1) + COLUMN_ZERO.length) * columns) * families); BigDecimal mbps = BigDecimal.valueOf(rows).multiply(rowSize, CXT) - .divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT) - .divide(BYTES_PER_MB, CXT); + .divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT).divide(BYTES_PER_MB, CXT); return FMT.format(mbps) + " MB/s"; } /* - * Format passed integer. - * @param number - * @return Returns zero-prefixed ROW_LENGTH-byte wide decimal version of passed - * number (Does absolute in case number is negative). + * Format passed integer. n * @return Returns zero-prefixed ROW_LENGTH-byte wide decimal version + * of passed number (Does absolute in case number is negative). */ - public static byte [] format(final int number) { - byte [] b = new byte[ROW_LENGTH]; + public static byte[] format(final int number) { + byte[] b = new byte[ROW_LENGTH]; int d = Math.abs(number); for (int i = b.length - 1; i >= 0; i--) { - b[i] = (byte)((d % 10) + '0'); + b[i] = (byte) ((d % 10) + '0'); d /= 10; } return b; } /* - * This method takes some time and is done inline uploading data. For - * example, doing the mapfile test, generation of the key and value - * consumes about 30% of CPU time. + * This method takes some time and is done inline uploading data. For example, doing the mapfile + * test, generation of the key and value consumes about 30% of CPU time. * @return Generated random value to insert into a table cell. */ public static byte[] generateData(final Random r, int length) { - byte [] b = new byte [length]; + byte[] b = new byte[length]; int i; - for(i = 0; i < (length-8); i += 8) { + for (i = 0; i < (length - 8); i += 8) { b[i] = (byte) (65 + r.nextInt(26)); - b[i+1] = b[i]; - b[i+2] = b[i]; - b[i+3] = b[i]; - b[i+4] = b[i]; - b[i+5] = b[i]; - b[i+6] = b[i]; - b[i+7] = b[i]; + b[i + 1] = b[i]; + b[i + 2] = b[i]; + b[i + 3] = b[i]; + b[i + 4] = b[i]; + b[i + 5] = b[i]; + b[i + 6] = b[i]; + b[i + 7] = b[i]; } byte a = (byte) (65 + r.nextInt(26)); - for(; i < length; i++) { + for (; i < length; i++) { b[i] = a; } return b; } - static byte [] getRandomRow(final Random random, final int totalRows) { + static byte[] getRandomRow(final Random random, final int totalRows) { return format(generateRandomRow(random, totalRows)); } @@ -2507,10 +2481,10 @@ public class PerformanceEvaluation extends Configured implements Tool { } static RunResult runOneClient(final Class cmd, Configuration conf, - Connection con, AsyncConnection asyncCon, TestOptions opts, final Status status) - throws IOException, InterruptedException { - status.setStatus("Start " + cmd + " at offset " + opts.startRow + " for " - + opts.perClientRunRows + " rows"); + Connection con, AsyncConnection asyncCon, TestOptions opts, final Status status) + throws IOException, InterruptedException { + status.setStatus( + "Start " + cmd + " at offset " + opts.startRow + " for " + opts.perClientRunRows + " rows"); long totalElapsedTime; final TestBase t; @@ -2518,39 +2492,40 @@ public class PerformanceEvaluation extends Configured implements Tool { if (AsyncTest.class.isAssignableFrom(cmd)) { Class newCmd = (Class) cmd; Constructor constructor = - newCmd.getDeclaredConstructor(AsyncConnection.class, TestOptions.class, Status.class); + newCmd.getDeclaredConstructor(AsyncConnection.class, TestOptions.class, Status.class); t = constructor.newInstance(asyncCon, opts, status); } else { Class newCmd = (Class) cmd; Constructor constructor = - newCmd.getDeclaredConstructor(Connection.class, TestOptions.class, Status.class); + newCmd.getDeclaredConstructor(Connection.class, TestOptions.class, Status.class); t = constructor.newInstance(con, opts, status); } } catch (NoSuchMethodException e) { throw new IllegalArgumentException("Invalid command class: " + cmd.getName() - + ". It does not provide a constructor as described by " - + "the javadoc comment. Available constructors are: " - + Arrays.toString(cmd.getConstructors())); + + ". It does not provide a constructor as described by " + + "the javadoc comment. Available constructors are: " + + Arrays.toString(cmd.getConstructors())); } catch (Exception e) { throw new IllegalStateException("Failed to construct command class", e); } totalElapsedTime = t.test(); - status.setStatus("Finished " + cmd + " in " + totalElapsedTime + - "ms at offset " + opts.startRow + " for " + opts.perClientRunRows + " rows" + - " (" + calculateMbps((int)(opts.perClientRunRows * opts.sampleRate), totalElapsedTime, - getAverageValueLength(opts), opts.families, opts.columns) + ")"); + status.setStatus("Finished " + cmd + " in " + totalElapsedTime + "ms at offset " + opts.startRow + + " for " + opts.perClientRunRows + " rows" + " (" + + calculateMbps((int) (opts.perClientRunRows * opts.sampleRate), totalElapsedTime, + getAverageValueLength(opts), opts.families, opts.columns) + + ")"); return new RunResult(totalElapsedTime, t.numOfReplyOverLatencyThreshold, t.numOfReplyFromReplica, t.getLatencyHistogram()); } private static int getAverageValueLength(final TestOptions opts) { - return opts.valueRandom? opts.valueSize/2: opts.valueSize; + return opts.valueRandom ? opts.valueSize / 2 : opts.valueSize; } - private void runTest(final Class cmd, TestOptions opts) throws IOException, - InterruptedException, ClassNotFoundException, ExecutionException { + private void runTest(final Class cmd, TestOptions opts) + throws IOException, InterruptedException, ClassNotFoundException, ExecutionException { // Log the configuration we're going to run with. Uses JSON mapper because lazy. It'll do // the TestOptions introspection for us and dump the output in a readable format. LOG.info(cmd.getSimpleName() + " test run options=" + GSON.toJson(opts)); @@ -2592,86 +2567,91 @@ public class PerformanceEvaluation extends Configured implements Tool { System.err.println(" [-D]* "); System.err.println(); System.err.println("General Options:"); - System.err.println(" nomapred Run multiple clients using threads " + - "(rather than use mapreduce)"); - System.err.println(" oneCon all the threads share the same connection. Default: False"); + System.err.println( + " nomapred Run multiple clients using threads " + "(rather than use mapreduce)"); + System.err + .println(" oneCon all the threads share the same connection. Default: False"); System.err.println(" connCount connections all threads share. " - + "For example, if set to 2, then all thread share 2 connection. " - + "Default: depend on oneCon parameter. if oneCon set to true, then connCount=1, " - + "if not, connCount=thread number"); + + "For example, if set to 2, then all thread share 2 connection. " + + "Default: depend on oneCon parameter. if oneCon set to true, then connCount=1, " + + "if not, connCount=thread number"); - System.err.println(" sampleRate Execute test on a sample of total " + - "rows. Only supported by randomRead. Default: 1.0"); - System.err.println(" period Report every 'period' rows: " + - "Default: opts.perClientRunRows / 10 = " + DEFAULT_OPTS.getPerClientRunRows()/10); + System.err.println(" sampleRate Execute test on a sample of total " + + "rows. Only supported by randomRead. Default: 1.0"); + System.err.println(" period Report every 'period' rows: " + + "Default: opts.perClientRunRows / 10 = " + DEFAULT_OPTS.getPerClientRunRows() / 10); System.err.println(" cycles How many times to cycle the test. Defaults: 1."); - System.err.println(" traceRate Enable HTrace spans. Initiate tracing every N rows. " + - "Default: 0"); + System.err.println( + " traceRate Enable HTrace spans. Initiate tracing every N rows. " + "Default: 0"); System.err.println(" latency Set to report operation latencies. Default: False"); - System.err.println(" latencyThreshold Set to report number of operations with latency " + - "over lantencyThreshold, unit in millisecond, default 0"); - System.err.println(" measureAfter Start to measure the latency once 'measureAfter'" + - " rows have been treated. Default: 0"); - System.err.println(" valueSize Pass value size to use: Default: " - + DEFAULT_OPTS.getValueSize()); - System.err.println(" valueRandom Set if we should vary value size between 0 and " + - "'valueSize'; set on read for stats on size: Default: Not set."); + System.err.println(" latencyThreshold Set to report number of operations with latency " + + "over lantencyThreshold, unit in millisecond, default 0"); + System.err.println(" measureAfter Start to measure the latency once 'measureAfter'" + + " rows have been treated. Default: 0"); + System.err + .println(" valueSize Pass value size to use: Default: " + DEFAULT_OPTS.getValueSize()); + System.err.println(" valueRandom Set if we should vary value size between 0 and " + + "'valueSize'; set on read for stats on size: Default: Not set."); System.err.println(" blockEncoding Block encoding to use. Value should be one of " - + Arrays.toString(DataBlockEncoding.values()) + ". Default: NONE"); + + Arrays.toString(DataBlockEncoding.values()) + ". Default: NONE"); System.err.println(); System.err.println("Table Creation / Write Tests:"); System.err.println(" table Alternate table name. Default: 'TestTable'"); - System.err.println(" rows Rows each client runs. Default: " - + DEFAULT_OPTS.getPerClientRunRows() + System.err.println( + " rows Rows each client runs. Default: " + DEFAULT_OPTS.getPerClientRunRows() + ". In case of randomReads and randomSeekScans this could" + " be specified along with --size to specify the number of rows to be scanned within" + " the total range specified by the size."); System.err.println( " size Total size in GiB. Mutually exclusive with --rows for writes and scans" - + ". But for randomReads and randomSeekScans when you use size with --rows you could" - + " use size to specify the end range and --rows" - + " specifies the number of rows within that range. " + "Default: 1.0."); + + ". But for randomReads and randomSeekScans when you use size with --rows you could" + + " use size to specify the end range and --rows" + + " specifies the number of rows within that range. " + "Default: 1.0."); System.err.println(" compress Compression type to use (GZ, LZO, ...). Default: 'NONE'"); - System.err.println(" flushCommits Used to determine if the test should flush the table. " + - "Default: false"); - System.err.println(" valueZipf Set if we should vary value size between 0 and " + - "'valueSize' in zipf form: Default: Not set."); + System.err.println( + " flushCommits Used to determine if the test should flush the table. " + "Default: false"); + System.err.println(" valueZipf Set if we should vary value size between 0 and " + + "'valueSize' in zipf form: Default: Not set."); System.err.println(" writeToWAL Set writeToWAL on puts. Default: True"); System.err.println(" autoFlush Set autoFlush on htable. Default: False"); - System.err.println(" multiPut Batch puts together into groups of N. Only supported " + - "by write. If multiPut is bigger than 0, autoFlush need to set to true. Default: 0"); + System.err.println(" multiPut Batch puts together into groups of N. Only supported " + + "by write. If multiPut is bigger than 0, autoFlush need to set to true. Default: 0"); System.err.println(" presplit Create presplit table. If a table with same name exists," - + " it'll be deleted and recreated (instead of verifying count of its existing regions). " - + "Recommended for accurate perf analysis (see guide). Default: disabled"); - System.err.println(" usetags Writes tags along with KVs. Use with HFile V3. " + - "Default: false"); - System.err.println(" numoftags Specify the no of tags that would be needed. " + - "This works only if usetags is true. Default: " + DEFAULT_OPTS.noOfTags); + + " it'll be deleted and recreated (instead of verifying count of its existing regions). " + + "Recommended for accurate perf analysis (see guide). Default: disabled"); + System.err.println( + " usetags Writes tags along with KVs. Use with HFile V3. " + "Default: false"); + System.err.println(" numoftags Specify the no of tags that would be needed. " + + "This works only if usetags is true. Default: " + DEFAULT_OPTS.noOfTags); System.err.println(" splitPolicy Specify a custom RegionSplitPolicy for the table."); System.err.println(" columns Columns to write per row. Default: 1"); - System.err.println(" families Specify number of column families for the table. Default: 1"); + System.err + .println(" families Specify number of column families for the table. Default: 1"); System.err.println(); System.err.println("Read Tests:"); System.err.println(" filterAll Helps to filter out all the rows on the server side" - + " there by not returning any thing back to the client. Helps to check the server side" - + " performance. Uses FilterAllFilter internally. "); - System.err.println(" multiGet Batch gets together into groups of N. Only supported " + - "by randomRead. Default: disabled"); - System.err.println(" inmemory Tries to keep the HFiles of the CF " + - "inmemory as far as possible. Not guaranteed that reads are always served " + - "from memory. Default: false"); - System.err.println(" bloomFilter Bloom filter type, one of " - + Arrays.toString(BloomType.values())); + + " there by not returning any thing back to the client. Helps to check the server side" + + " performance. Uses FilterAllFilter internally. "); + System.err.println(" multiGet Batch gets together into groups of N. Only supported " + + "by randomRead. Default: disabled"); + System.err.println(" inmemory Tries to keep the HFiles of the CF " + + "inmemory as far as possible. Not guaranteed that reads are always served " + + "from memory. Default: false"); + System.err + .println(" bloomFilter Bloom filter type, one of " + Arrays.toString(BloomType.values())); System.err.println(" blockSize Blocksize to use when writing out hfiles. "); - System.err.println(" inmemoryCompaction Makes the column family to do inmemory flushes/compactions. " + System.err + .println(" inmemoryCompaction Makes the column family to do inmemory flushes/compactions. " + "Uses the CompactingMemstore"); System.err.println(" addColumns Adds columns to scans/gets explicitly. Default: true"); System.err.println(" replicas Enable region replica testing. Defaults: 1."); - System.err.println(" randomSleep Do a random sleep before each get between 0 and entered value. Defaults: 0"); + System.err.println( + " randomSleep Do a random sleep before each get between 0 and entered value. Defaults: 0"); System.err.println(" caching Scan caching to use. Default: 30"); System.err.println(" asyncPrefetch Enable asyncPrefetch for scan"); System.err.println(" cacheBlocks Set the cacheBlocks option for scan. Default: true"); - System.err.println(" scanReadType Set the readType option for scan, stream/pread/default. Default: default"); + System.err.println( + " scanReadType Set the readType option for scan, stream/pread/default. Default: default"); System.err.println(" bufferSize Set the value of client side buffering. Default: 2MB"); System.err.println(); System.err.println(" Note: -D properties will be applied to the conf used. "); @@ -2686,7 +2666,7 @@ public class PerformanceEvaluation extends Configured implements Tool { System.err.println(); System.err.println("Args:"); System.err.println(" nclients Integer. Required. Total number of clients " - + "(and HRegionServers) running. 1 <= value <= 500"); + + "(and HRegionServers) running. 1 <= value <= 500"); System.err.println("Examples:"); System.err.println(" To run a single client doing the default 1M sequentialWrites:"); System.err.println(" $ hbase " + shortName + " sequentialWrite 1"); @@ -2695,10 +2675,10 @@ public class PerformanceEvaluation extends Configured implements Tool { } /** - * Parse options passed in via an arguments array. Assumes that array has been split - * on white-space and placed into a {@code Queue}. Any unknown arguments will remain - * in the queue at the conclusion of this method call. It's up to the caller to deal - * with these unrecognized arguments. + * Parse options passed in via an arguments array. Assumes that array has been split on + * white-space and placed into a {@code Queue}. Any unknown arguments will remain in the queue at + * the conclusion of this method call. It's up to the caller to deal with these unrecognized + * arguments. */ static TestOptions parseOpts(Queue args) { TestOptions opts = new TestOptions(); @@ -2887,7 +2867,7 @@ public class PerformanceEvaluation extends Configured implements Tool { } final String blockSize = "--blockSize="; - if(cmd.startsWith(blockSize) ) { + if (cmd.startsWith(blockSize)) { opts.blockSize = Integer.parseInt(cmd.substring(blockSize.length())); continue; } @@ -2925,7 +2905,7 @@ public class PerformanceEvaluation extends Configured implements Tool { final String inMemoryCompaction = "--inmemoryCompaction="; if (cmd.startsWith(inMemoryCompaction)) { opts.inMemoryCompaction = - MemoryCompactionPolicy.valueOf(cmd.substring(inMemoryCompaction.length())); + MemoryCompactionPolicy.valueOf(cmd.substring(inMemoryCompaction.length())); continue; } @@ -2962,7 +2942,7 @@ public class PerformanceEvaluation extends Configured implements Tool { final String scanReadType = "--scanReadType="; if (cmd.startsWith(scanReadType)) { opts.scanReadType = - Scan.ReadType.valueOf(cmd.substring(scanReadType.length()).toUpperCase()); + Scan.ReadType.valueOf(cmd.substring(scanReadType.length()).toUpperCase()); continue; } @@ -2996,17 +2976,17 @@ public class PerformanceEvaluation extends Configured implements Tool { } /** - * Validates opts after all the opts are parsed, so that caller need not to maintain order of opts - */ - private static void validateParsedOpts(TestOptions opts) { + * Validates opts after all the opts are parsed, so that caller need not to maintain order of opts + */ + private static void validateParsedOpts(TestOptions opts) { if (!opts.autoFlush && opts.multiPut > 0) { throw new IllegalArgumentException("autoFlush must be true when multiPut is more than 0"); } if (opts.oneCon && opts.connCount > 1) { - throw new IllegalArgumentException("oneCon is set to true, " - + "connCount should not bigger than 1"); + throw new IllegalArgumentException( + "oneCon is set to true, " + "connCount should not bigger than 1"); } if (opts.valueZipf && opts.valueRandom) { @@ -3016,10 +2996,11 @@ public class PerformanceEvaluation extends Configured implements Tool { static TestOptions calculateRowsAndSize(final TestOptions opts) { int rowsPerGB = getRowsPerGB(opts); - if ((opts.getCmdName() != null + if ( + (opts.getCmdName() != null && (opts.getCmdName().equals(RANDOM_READ) || opts.getCmdName().equals(RANDOM_SEEK_SCAN))) - && opts.size != DEFAULT_OPTS.size - && opts.perClientRunRows != DEFAULT_OPTS.perClientRunRows) { + && opts.size != DEFAULT_OPTS.size && opts.perClientRunRows != DEFAULT_OPTS.perClientRunRows + ) { opts.totalRows = (int) opts.size * rowsPerGB; } else if (opts.size != DEFAULT_OPTS.size) { // total size in GB specified @@ -3033,8 +3014,8 @@ public class PerformanceEvaluation extends Configured implements Tool { } static int getRowsPerGB(final TestOptions opts) { - return ONE_GB / ((opts.valueRandom? opts.valueSize/2: opts.valueSize) * opts.getFamilies() * - opts.getColumns()); + return ONE_GB / ((opts.valueRandom ? opts.valueSize / 2 : opts.valueSize) * opts.getFamilies() + * opts.getColumns()); } @Override diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java index ca2813012a9..af39a7521cd 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; @@ -49,8 +48,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Stopwatch; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; /** - * A simple performance evaluation tool for single client and MR scans - * and snapshot scans. + * A simple performance evaluation tool for single client and MR scans and snapshot scans. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class ScanPerformanceEvaluation extends AbstractHBaseTool { @@ -78,7 +76,8 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { @Override protected void addOptions() { - this.addRequiredOptWithArg("t", "type", "the type of the test. One of the following: streaming|scan|snapshotscan|scanmapreduce|snapshotscanmapreduce"); + this.addRequiredOptWithArg("t", "type", + "the type of the test. One of the following: streaming|scan|snapshotscan|scanmapreduce|snapshotscanmapreduce"); this.addOptWithArg("f", "file", "the filename to read from"); this.addOptWithArg("tn", "table", "the tablename to read from"); this.addOptWithArg("sn", "snapshot", "the snapshot name to read from"); @@ -119,15 +118,15 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { } streamTimer.stop(); - double throughput = (double)totalBytes / streamTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / streamTimer.elapsed(TimeUnit.SECONDS); System.out.println("HDFS streaming: "); - System.out.println("total time to open: " + - fileOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out + .println("total time to open: " + fileOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("total time to read: " + streamTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throghput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throghput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); } private Scan getScan() { @@ -176,30 +175,30 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { ScanMetrics metrics = scan.getScanMetrics(); long totalBytes = metrics.countOfBytesInResults.get(); - double throughput = (double)totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputRows = (double)numRows / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputCells = (double)numCells / scanTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputRows = (double) numRows / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputCells = (double) numCells / scanTimer.elapsed(TimeUnit.SECONDS); System.out.println("HBase scan: "); - System.out.println("total time to open table: " + - tableOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total time to open scanner: " + - scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total time to scan: " + - scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open table: " + tableOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open scanner: " + scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println("total time to scan: " + scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("Scan metrics:\n" + metrics.getMetricsMap()); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); System.out.println("total rows : " + numRows); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s"); + System.out + .println("throughput : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s"); System.out.println("total cells : " + numCells); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s"); } - public void testSnapshotScan() throws IOException { Stopwatch snapshotRestoreTimer = Stopwatch.createUnstarted(); Stopwatch scanOpenTimer = Stopwatch.createUnstarted(); @@ -233,27 +232,28 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { ScanMetrics metrics = scanner.getScanMetrics(); long totalBytes = metrics.countOfBytesInResults.get(); - double throughput = (double)totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputRows = (double)numRows / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputCells = (double)numCells / scanTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputRows = (double) numRows / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputCells = (double) numCells / scanTimer.elapsed(TimeUnit.SECONDS); System.out.println("HBase scan snapshot: "); - System.out.println("total time to restore snapshot: " + - snapshotRestoreTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total time to open scanner: " + - scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total time to scan: " + - scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println("total time to restore snapshot: " + + snapshotRestoreTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open scanner: " + scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println("total time to scan: " + scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("Scan metrics:\n" + metrics.getMetricsMap()); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); System.out.println("total rows : " + numRows); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s"); + System.out + .println("throughput : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s"); System.out.println("total cells : " + numCells); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s"); } @@ -264,9 +264,8 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { public static class MyMapper extends TableMapper { @Override - protected void map(ImmutableBytesWritable key, Result value, - Context context) throws IOException, - InterruptedException { + protected void map(ImmutableBytesWritable key, Result value, Context context) + throws IOException, InterruptedException { context.getCounter(ScanCounter.NUM_ROWS).increment(1); context.getCounter(ScanCounter.NUM_CELLS).increment(value.rawCells().length); } @@ -285,14 +284,8 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { job.setJarByClass(getClass()); - TableMapReduceUtil.initTableMapperJob( - this.tablename, - scan, - MyMapper.class, - NullWritable.class, - NullWritable.class, - job - ); + TableMapReduceUtil.initTableMapperJob(this.tablename, scan, MyMapper.class, NullWritable.class, + NullWritable.class, job); job.setNumReduceTasks(0); job.setOutputKeyClass(NullWritable.class); @@ -308,25 +301,28 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue(); long totalBytes = counters.findCounter(HBASE_COUNTER_GROUP_NAME, "BYTES_IN_RESULTS").getValue(); - double throughput = (double)totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputRows = (double)numRows / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputCells = (double)numCells / scanTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputRows = (double) numRows / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputCells = (double) numCells / scanTimer.elapsed(TimeUnit.SECONDS); System.out.println("HBase scan mapreduce: "); - System.out.println("total time to open scanner: " + - scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open scanner: " + scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("total time to scan: " + scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); System.out.println("total rows : " + numRows); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s"); + System.out + .println("throughput : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s"); System.out.println("total cells : " + numCells); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s"); } - public void testSnapshotScanMapReduce() throws IOException, InterruptedException, ClassNotFoundException { + public void testSnapshotScanMapReduce() + throws IOException, InterruptedException, ClassNotFoundException { Stopwatch scanOpenTimer = Stopwatch.createUnstarted(); Stopwatch scanTimer = Stopwatch.createUnstarted(); @@ -339,16 +335,8 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { job.setJarByClass(getClass()); - TableMapReduceUtil.initTableSnapshotMapperJob( - this.snapshotName, - scan, - MyMapper.class, - NullWritable.class, - NullWritable.class, - job, - true, - new Path(restoreDir) - ); + TableMapReduceUtil.initTableSnapshotMapperJob(this.snapshotName, scan, MyMapper.class, + NullWritable.class, NullWritable.class, job, true, new Path(restoreDir)); job.setNumReduceTasks(0); job.setOutputKeyClass(NullWritable.class); @@ -364,29 +352,31 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue(); long totalBytes = counters.findCounter(HBASE_COUNTER_GROUP_NAME, "BYTES_IN_RESULTS").getValue(); - double throughput = (double)totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputRows = (double)numRows / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputCells = (double)numCells / scanTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputRows = (double) numRows / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputCells = (double) numCells / scanTimer.elapsed(TimeUnit.SECONDS); System.out.println("HBase scan mapreduce: "); - System.out.println("total time to open scanner: " + - scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open scanner: " + scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("total time to scan: " + scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); System.out.println("total rows : " + numRows); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s"); + System.out + .println("throughput : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s"); System.out.println("total cells : " + numCells); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s"); } @Override protected int doWork() throws Exception { if (type.equals("streaming")) { testHdfsStreaming(new Path(file)); - } else if (type.equals("scan")){ + } else if (type.equals("scan")) { testScan(); } else if (type.equals("snapshotscan")) { testSnapshotScan(); @@ -398,7 +388,7 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { return 0; } - public static void main (String[] args) throws Exception { + public static void main(String[] args) throws Exception { int ret = ToolRunner.run(HBaseConfiguration.create(), new ScanPerformanceEvaluation(), args); System.exit(ret); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java index 475960dde35..f3718118030 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,6 @@ import java.util.NoSuchElementException; import java.util.Queue; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -54,24 +53,23 @@ import org.junit.experimental.categories.Category; import org.apache.hbase.thirdparty.com.google.gson.Gson; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestPerformanceEvaluation { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestPerformanceEvaluation.class); + HBaseClassTestRule.forClass(TestPerformanceEvaluation.class); private static final HBaseTestingUtility HTU = new HBaseTestingUtility(); @Test public void testDefaultInMemoryCompaction() { - PerformanceEvaluation.TestOptions defaultOpts = - new PerformanceEvaluation.TestOptions(); + PerformanceEvaluation.TestOptions defaultOpts = new PerformanceEvaluation.TestOptions(); assertEquals(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT, - defaultOpts.getInMemoryCompaction().toString()); + defaultOpts.getInMemoryCompaction().toString()); HTableDescriptor htd = PerformanceEvaluation.getTableDescriptor(defaultOpts); - for (HColumnDescriptor hcd: htd.getFamilies()) { + for (HColumnDescriptor hcd : htd.getFamilies()) { assertEquals(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT, - hcd.getInMemoryCompaction().toString()); + hcd.getInMemoryCompaction().toString()); } } @@ -170,7 +168,7 @@ public class TestPerformanceEvaluation { @Test public void testZipfian() throws NoSuchMethodException, SecurityException, InstantiationException, - IllegalAccessException, IllegalArgumentException, InvocationTargetException { + IllegalAccessException, IllegalArgumentException, InvocationTargetException { TestOptions opts = new PerformanceEvaluation.TestOptions(); opts.setValueZipf(true); final int valueSize = 1024; @@ -179,7 +177,7 @@ public class TestPerformanceEvaluation { Constructor ctor = Histogram.class.getDeclaredConstructor(com.codahale.metrics.Reservoir.class); ctor.setAccessible(true); - Histogram histogram = (Histogram)ctor.newInstance(new UniformReservoir(1024 * 500)); + Histogram histogram = (Histogram) ctor.newInstance(new UniformReservoir(1024 * 500)); for (int i = 0; i < 100; i++) { histogram.update(rrt.getValueLength(null)); } @@ -258,7 +256,7 @@ public class TestPerformanceEvaluation { System.out.println(e.getMessage()); } - //Re-create options + // Re-create options opts = new LinkedList<>(); opts.offer("--autoFlush=true"); opts.offer("--multiPut=10"); @@ -342,7 +340,7 @@ public class TestPerformanceEvaluation { try { options = PerformanceEvaluation.parseOpts(opts); fail("should fail"); - } catch (IllegalStateException e) { + } catch (IllegalStateException e) { System.out.println(e.getMessage()); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java index 327b7afec2f..6c49a43bf46 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,18 +29,17 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestDriver { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDriver.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestDriver.class); @Test public void testDriverMainMethod() throws Throwable { ProgramDriver programDriverMock = mock(ProgramDriver.class); Driver.setProgramDriver(programDriverMock); - Driver.main(new String[]{}); + Driver.main(new String[] {}); verify(programDriverMock).driver(Mockito.any()); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java index 12db348ba8b..b35b4dbd826 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,17 +49,16 @@ import org.junit.experimental.categories.Category; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestGroupingTableMap { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGroupingTableMap.class); + HBaseClassTestRule.forClass(TestGroupingTableMap.class); @Test @SuppressWarnings({ "deprecation", "unchecked" }) - public void shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes() - throws Exception { + public void shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes() throws Exception { GroupingTableMap gTableMap = null; try { Result result = mock(Result.class); @@ -71,19 +70,18 @@ public class TestGroupingTableMap { gTableMap.configure(jobConf); byte[] row = {}; - List keyValues = ImmutableList.of( - new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("1111")), - new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("2222")), - new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), Bytes.toBytes("3333"))); + List keyValues = ImmutableList. of( + new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("1111")), + new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("2222")), + new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), Bytes.toBytes("3333"))); when(result.listCells()).thenReturn(keyValues); OutputCollector outputCollectorMock = - mock(OutputCollector.class); + mock(OutputCollector.class); gTableMap.map(null, result, outputCollectorMock, reporter); verify(result).listCells(); verifyZeroInteractions(outputCollectorMock); } finally { - if (gTableMap != null) - gTableMap.close(); + if (gTableMap != null) gTableMap.close(); } } @@ -101,21 +99,19 @@ public class TestGroupingTableMap { gTableMap.configure(jobConf); byte[] row = {}; - List keyValues = ImmutableList.of( - new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("1111")), - new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), Bytes.toBytes("2222")), - new KeyValue(row, "familyC".getBytes(), "qualifierC".getBytes(), Bytes.toBytes("3333"))); + List keyValues = ImmutableList. of( + new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("1111")), + new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), Bytes.toBytes("2222")), + new KeyValue(row, "familyC".getBytes(), "qualifierC".getBytes(), Bytes.toBytes("3333"))); when(result.listCells()).thenReturn(keyValues); OutputCollector outputCollectorMock = - mock(OutputCollector.class); + mock(OutputCollector.class); gTableMap.map(null, result, outputCollectorMock, reporter); verify(result).listCells(); - verify(outputCollectorMock, times(1)) - .collect(any(), any()); + verify(outputCollectorMock, times(1)).collect(any(), any()); verifyNoMoreInteractions(outputCollectorMock); } finally { - if (gTableMap != null) - gTableMap.close(); + if (gTableMap != null) gTableMap.close(); } } @@ -136,22 +132,21 @@ public class TestGroupingTableMap { final byte[] firstPartKeyValue = Bytes.toBytes("34879512738945"); final byte[] secondPartKeyValue = Bytes.toBytes("35245142671437"); byte[] row = {}; - List cells = ImmutableList.of( - new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), firstPartKeyValue), - new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), secondPartKeyValue)); + List cells = ImmutableList. of( + new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), firstPartKeyValue), + new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), secondPartKeyValue)); when(result.listCells()).thenReturn(cells); final AtomicBoolean outputCollected = new AtomicBoolean(); OutputCollector outputCollector = - new OutputCollector() { - @Override - public void collect(ImmutableBytesWritable arg, Result result) throws IOException { - assertArrayEquals(org.apache.hbase.thirdparty.com.google.common.primitives. - Bytes.concat(firstPartKeyValue, bSeparator, - secondPartKeyValue), arg.copyBytes()); - outputCollected.set(true); - } - }; + new OutputCollector() { + @Override + public void collect(ImmutableBytesWritable arg, Result result) throws IOException { + assertArrayEquals(org.apache.hbase.thirdparty.com.google.common.primitives.Bytes + .concat(firstPartKeyValue, bSeparator, secondPartKeyValue), arg.copyBytes()); + outputCollected.set(true); + } + }; gTableMap.map(null, result, outputCollector, reporter); verify(result).listCells(); @@ -161,12 +156,10 @@ public class TestGroupingTableMap { final byte[] secondPartValue = Bytes.toBytes("4678456942345"); byte[][] data = { firstPartValue, secondPartValue }; ImmutableBytesWritable byteWritable = gTableMap.createGroupKey(data); - assertArrayEquals(org.apache.hbase.thirdparty.com.google.common.primitives. - Bytes.concat(firstPartValue, - bSeparator, secondPartValue), byteWritable.get()); + assertArrayEquals(org.apache.hbase.thirdparty.com.google.common.primitives.Bytes + .concat(firstPartValue, bSeparator, secondPartValue), byteWritable.get()); } finally { - if (gTableMap != null) - gTableMap.close(); + if (gTableMap != null) gTableMap.close(); } } @@ -178,8 +171,7 @@ public class TestGroupingTableMap { gTableMap = new GroupingTableMap(); assertNull(gTableMap.createGroupKey(null)); } finally { - if(gTableMap != null) - gTableMap.close(); + if (gTableMap != null) gTableMap.close(); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java index 25576c1ef42..96e25b51f65 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,12 +34,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestIdentityTableMap { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIdentityTableMap.class); + HBaseClassTestRule.forClass(TestIdentityTableMap.class); @Test @SuppressWarnings({ "deprecation", "unchecked" }) @@ -52,17 +52,14 @@ public class TestIdentityTableMap { identityTableMap = new IdentityTableMap(); ImmutableBytesWritable bytesWritableMock = mock(ImmutableBytesWritable.class); OutputCollector outputCollectorMock = - mock(OutputCollector.class); + mock(OutputCollector.class); for (int i = 0; i < recordNumber; i++) - identityTableMap.map(bytesWritableMock, resultMock, outputCollectorMock, - reporterMock); + identityTableMap.map(bytesWritableMock, resultMock, outputCollectorMock, reporterMock); - verify(outputCollectorMock, times(recordNumber)).collect( - Mockito.any(), Mockito.any()); + verify(outputCollectorMock, times(recordNumber)).collect(Mockito.any(), Mockito.any()); } finally { - if (identityTableMap != null) - identityTableMap.close(); + if (identityTableMap != null) identityTableMap.close(); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java index 1dd3e69f977..c042bd35a56 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,18 +46,18 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestMultiTableSnapshotInputFormat - extends org.apache.hadoop.hbase.mapreduce.TestMultiTableSnapshotInputFormat { + extends org.apache.hadoop.hbase.mapreduce.TestMultiTableSnapshotInputFormat { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiTableSnapshotInputFormat.class); + HBaseClassTestRule.forClass(TestMultiTableSnapshotInputFormat.class); private static final Logger LOG = - LoggerFactory.getLogger(TestMultiTableSnapshotInputFormat.class); + LoggerFactory.getLogger(TestMultiTableSnapshotInputFormat.class); @Override protected void runJob(String jobName, Configuration c, List scans) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { JobConf job = new JobConf(TEST_UTIL.getConfiguration()); job.setJobName(jobName); @@ -65,7 +65,7 @@ public class TestMultiTableSnapshotInputFormat job.setReducerClass(Reducer.class); TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), Mapper.class, - ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir); + ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir); TableMapReduceUtil.addDependencyJars(job); @@ -81,21 +81,19 @@ public class TestMultiTableSnapshotInputFormat } public static class Mapper extends TestMultiTableSnapshotInputFormat.ScanMapper - implements TableMap { + implements TableMap { @Override public void map(ImmutableBytesWritable key, Result value, - OutputCollector outputCollector, - Reporter reporter) throws IOException { + OutputCollector outputCollector, + Reporter reporter) throws IOException { makeAssertions(key, value); outputCollector.collect(key, key); } /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - * + * Closes this stream and releases any system resources associated with it. If the stream is + * already closed then invoking this method has no effect. * @throws IOException if an I/O error occurs */ @Override @@ -108,24 +106,22 @@ public class TestMultiTableSnapshotInputFormat } } - public static class Reducer extends TestMultiTableSnapshotInputFormat.ScanReducer implements - org.apache.hadoop.mapred.Reducer { + public static class Reducer extends TestMultiTableSnapshotInputFormat.ScanReducer + implements org.apache.hadoop.mapred.Reducer { private JobConf jobConf; @Override public void reduce(ImmutableBytesWritable key, Iterator values, - OutputCollector outputCollector, Reporter reporter) - throws IOException { + OutputCollector outputCollector, Reporter reporter) + throws IOException { makeAssertions(key, Lists.newArrayList(values)); } /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - * + * Closes this stream and releases any system resources associated with it. If the stream is + * already closed then invoking this method has no effect. * @throws IOException if an I/O error occurs */ @Override diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java index 13913e5fc24..dc55ff977dc 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,12 +45,12 @@ import org.mockito.Mockito; import org.apache.hbase.thirdparty.com.google.common.base.Joiner; -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestRowCounter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRowCounter.class); + HBaseClassTestRule.forClass(TestRowCounter.class); @Test @SuppressWarnings("deprecation") @@ -68,8 +68,7 @@ public class TestRowCounter { @Test @SuppressWarnings("deprecation") - public void shouldExitAndPrintUsageSinceParameterNumberLessThanThree() - throws Exception { + public void shouldExitAndPrintUsageSinceParameterNumberLessThanThree() throws Exception { final String[] args = new String[] { "one", "two" }; String line = "ERROR: Wrong number of parameters: " + args.length; String result = new OutputReader(System.err) { @@ -90,10 +89,9 @@ public class TestRowCounter { Reporter reporter = mock(Reporter.class); for (int i = 0; i < iterationNumber; i++) mapper.map(mock(ImmutableBytesWritable.class), mock(Result.class), - mock(OutputCollector.class), reporter); + mock(OutputCollector.class), reporter); - Mockito.verify(reporter, times(iterationNumber)).incrCounter( - any(), anyLong()); + Mockito.verify(reporter, times(iterationNumber)).incrCounter(any(), anyLong()); } @Test @@ -101,8 +99,7 @@ public class TestRowCounter { public void shouldCreateAndRunSubmittableJob() throws Exception { RowCounter rCounter = new RowCounter(); rCounter.setConf(HBaseConfiguration.create()); - String[] args = new String[] { "\temp", "tableA", "column1", "column2", - "column3" }; + String[] args = new String[] { "\temp", "tableA", "column1", "column2", "column3" }; JobConf jobConfig = rCounter.createSubmittableJob(args); assertNotNull(jobConfig); @@ -110,13 +107,14 @@ public class TestRowCounter { assertEquals("rowcounter", jobConfig.getJobName()); assertEquals(jobConfig.getMapOutputValueClass(), Result.class); assertEquals(jobConfig.getMapperClass(), RowCounterMapper.class); - assertEquals(jobConfig.get(TableInputFormat.COLUMN_LIST), Joiner.on(' ') - .join("column1", "column2", "column3")); + assertEquals(jobConfig.get(TableInputFormat.COLUMN_LIST), + Joiner.on(' ').join("column1", "column2", "column3")); assertEquals(jobConfig.getMapOutputKeyClass(), ImmutableBytesWritable.class); } enum Outs { - OUT, ERR + OUT, + ERR } private static abstract class OutputReader { @@ -147,17 +145,16 @@ public class TestRowCounter { return new String(outBytes.toByteArray()); } finally { switch (outs) { - case OUT: { - System.setOut(oldPrintStream); - break; - } - case ERR: { - System.setErr(oldPrintStream); - break; - } - default: - throw new IllegalStateException( - "OutputReader: unsupported PrintStream"); + case OUT: { + System.setOut(oldPrintStream); + break; + } + case ERR: { + System.setErr(oldPrintStream); + break; + } + default: + throw new IllegalStateException("OutputReader: unsupported PrintStream"); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java index 7b097d264ce..61c678544a6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,27 +33,27 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestSplitTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSplitTable.class); + HBaseClassTestRule.forClass(TestSplitTable.class); @Rule public TestName name = new TestName(); @Test - @SuppressWarnings({"deprecation", "SelfComparison"}) + @SuppressWarnings({ "deprecation", "SelfComparison" }) public void testSplitTableCompareTo() { - TableSplit aTableSplit = new TableSplit(Bytes.toBytes("tableA"), - Bytes.toBytes("aaa"), Bytes.toBytes("ddd"), "locationA"); + TableSplit aTableSplit = new TableSplit(Bytes.toBytes("tableA"), Bytes.toBytes("aaa"), + Bytes.toBytes("ddd"), "locationA"); - TableSplit bTableSplit = new TableSplit(Bytes.toBytes("tableA"), - Bytes.toBytes("iii"), Bytes.toBytes("kkk"), "locationA"); + TableSplit bTableSplit = new TableSplit(Bytes.toBytes("tableA"), Bytes.toBytes("iii"), + Bytes.toBytes("kkk"), "locationA"); - TableSplit cTableSplit = new TableSplit(Bytes.toBytes("tableA"), - Bytes.toBytes("lll"), Bytes.toBytes("zzz"), "locationA"); + TableSplit cTableSplit = new TableSplit(Bytes.toBytes("tableA"), Bytes.toBytes("lll"), + Bytes.toBytes("zzz"), "locationA"); assertEquals(0, aTableSplit.compareTo(aTableSplit)); assertEquals(0, bTableSplit.compareTo(bTableSplit)); @@ -105,18 +105,15 @@ public class TestSplitTable { @Test @SuppressWarnings("deprecation") public void testToString() { - TableSplit split = - new TableSplit(TableName.valueOf(name.getMethodName()), "row-start".getBytes(), "row-end".getBytes(), - "location"); - String str = - "HBase table split(table name: " + name.getMethodName() + ", start row: row-start, " - + "end row: row-end, region location: location)"; + TableSplit split = new TableSplit(TableName.valueOf(name.getMethodName()), + "row-start".getBytes(), "row-end".getBytes(), "location"); + String str = "HBase table split(table name: " + name.getMethodName() + + ", start row: row-start, " + "end row: row-end, region location: location)"; Assert.assertEquals(str, split.toString()); split = new TableSplit((TableName) null, null, null, null); - str = - "HBase table split(table name: null, start row: null, " - + "end row: null, region location: null)"; + str = "HBase table split(table name: null, start row: null, " + + "end row: null, region location: null)"; Assert.assertEquals(str, split.toString()); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java index 5ad1adef6a2..f9891067e6b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -75,12 +75,12 @@ import org.slf4j.LoggerFactory; /** * This tests the TableInputFormat and its recovery semantics */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestTableInputFormat { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormat.class); + HBaseClassTestRule.forClass(TestTableInputFormat.class); private static final Logger LOG = LoggerFactory.getLogger(TestTableInputFormat.class); @@ -109,21 +109,16 @@ public class TestTableInputFormat { /** * Setup a table with two rows and values. - * * @param tableName the name of the table to create - * @return A Table instance for the created table. - * @throws IOException + * @return A Table instance for the created table. n */ public static Table createTable(byte[] tableName) throws IOException { return createTable(tableName, new byte[][] { FAMILY }); } /** - * Setup a table with two rows and values per column family. - * - * @param tableName - * @return A Table instance for the created table. - * @throws IOException + * Setup a table with two rows and values per column family. n * @return A Table instance for the + * created table. n */ public static Table createTable(byte[] tableName, byte[][] families) throws IOException { Table table = UTIL.createTable(TableName.valueOf(tableName), families); @@ -142,15 +137,14 @@ public class TestTableInputFormat { /** * Verify that the result and key have expected values. - * - * @param r single row result - * @param key the row key - * @param expectedKey the expected key + * @param r single row result + * @param key the row key + * @param expectedKey the expected key * @param expectedValue the expected value * @return true if the result contains the expected key and value, false otherwise. */ - static boolean checkResult(Result r, ImmutableBytesWritable key, - byte[] expectedKey, byte[] expectedValue) { + static boolean checkResult(Result r, ImmutableBytesWritable key, byte[] expectedKey, + byte[] expectedValue) { assertEquals(0, key.compareTo(expectedKey)); Map vals = r.getFamilyMap(FAMILY); byte[] value = vals.values().iterator().next(); @@ -159,15 +153,11 @@ public class TestTableInputFormat { } /** - * Create table data and run tests on specified htable using the - * o.a.h.hbase.mapred API. - * - * @param table - * @throws IOException + * Create table data and run tests on specified htable using the o.a.h.hbase.mapred API. nn */ static void runTestMapred(Table table) throws IOException { org.apache.hadoop.hbase.mapred.TableRecordReader trr = - new org.apache.hadoop.hbase.mapred.TableRecordReader(); + new org.apache.hadoop.hbase.mapred.TableRecordReader(); trr.setStartRow("aaa".getBytes()); trr.setEndRow("zzz".getBytes()); trr.setHTable(table); @@ -191,12 +181,9 @@ public class TestTableInputFormat { } /** - * Create a table that IOE's on first scanner next call - * - * @throws IOException + * Create a table that IOE's on first scanner next call n */ - static Table createIOEScannerTable(byte[] name, final int failCnt) - throws IOException { + static Table createIOEScannerTable(byte[] name, final int failCnt) throws IOException { // build up a mock scanner stuff to fail the first time Answer a = new Answer() { int cnt = 0; @@ -225,13 +212,9 @@ public class TestTableInputFormat { } /** - * Create a table that throws a DoNoRetryIOException on first scanner next - * call - * - * @throws IOException + * Create a table that throws a DoNoRetryIOException on first scanner next call n */ - static Table createDNRIOEScannerTable(byte[] name, final int failCnt) - throws IOException { + static Table createDNRIOEScannerTable(byte[] name, final int failCnt) throws IOException { // build up a mock scanner stuff to fail the first time Answer a = new Answer() { int cnt = 0; @@ -246,9 +229,8 @@ public class TestTableInputFormat { ResultScanner scanner = mock(ResultScanner.class); invocation.callRealMethod(); // simulate NotServingRegionException - doThrow( - new NotServingRegionException("Injected simulated TimeoutException")) - .when(scanner).next(); + doThrow(new NotServingRegionException("Injected simulated TimeoutException")) + .when(scanner).next(); return scanner; } @@ -263,9 +245,7 @@ public class TestTableInputFormat { } /** - * Run test assuming no errors using mapred api. - * - * @throws IOException + * Run test assuming no errors using mapred api. n */ @Test public void testTableRecordReader() throws IOException { @@ -274,9 +254,7 @@ public class TestTableInputFormat { } /** - * Run test assuming Scanner IOException failure using mapred api, - * - * @throws IOException + * Run test assuming Scanner IOException failure using mapred api, n */ @Test public void testTableRecordReaderScannerFail() throws IOException { @@ -285,9 +263,7 @@ public class TestTableInputFormat { } /** - * Run test assuming Scanner IOException failure using mapred api, - * - * @throws IOException + * Run test assuming Scanner IOException failure using mapred api, n */ @Test(expected = IOException.class) public void testTableRecordReaderScannerFailTwice() throws IOException { @@ -297,7 +273,6 @@ public class TestTableInputFormat { /** * Run test assuming NotServingRegionException using mapred api. - * * @throws org.apache.hadoop.hbase.DoNotRetryIOException */ @Test @@ -308,7 +283,6 @@ public class TestTableInputFormat { /** * Run test assuming NotServingRegionException using mapred api. - * * @throws org.apache.hadoop.hbase.DoNotRetryIOException */ @Test(expected = org.apache.hadoop.hbase.NotServingRegionException.class) @@ -330,8 +304,8 @@ public class TestTableInputFormat { @Test public void testDeprecatedExtensionOfTableInputFormatBase() throws IOException { - LOG.info("testing use of an InputFormat taht extends InputFormatBase, " - + "as it was given in 0.98."); + LOG.info( + "testing use of an InputFormat taht extends InputFormatBase, " + "as it was given in 0.98."); final Table table = createTable(Bytes.toBytes("exampleDeprecatedTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); testInputFormat(ExampleDeprecatedTIF.class); @@ -339,8 +313,8 @@ public class TestTableInputFormat { @Test public void testJobConfigurableExtensionOfTableInputFormatBase() throws IOException { - LOG.info("testing use of an InputFormat taht extends InputFormatBase, " - + "using JobConfigurable."); + LOG.info( + "testing use of an InputFormat taht extends InputFormatBase, " + "using JobConfigurable."); final Table table = createTable(Bytes.toBytes("exampleJobConfigurableTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); testInputFormat(ExampleJobConfigurableTIF.class); @@ -357,17 +331,17 @@ public class TestTableInputFormat { final RunningJob run = JobClient.runJob(job); assertTrue("job failed!", run.isSuccessful()); assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getCounter()); + .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getCounter()); assertEquals("Saw any instances of the filtered out row.", 0, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getCounter()); + .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getCounter()); assertEquals("Saw the wrong number of instances of columnA.", 1, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getCounter()); + .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getCounter()); assertEquals("Saw the wrong number of instances of columnB.", 1, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getCounter()); + .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getCounter()); assertEquals("Saw the wrong count of values for the filtered-for row.", 2, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getCounter()); + .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getCounter()); assertEquals("Saw the wrong count of values for the filtered-out row.", 0, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getCounter()); + .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getCounter()); } public static class ExampleVerifier implements TableMap { @@ -378,18 +352,20 @@ public class TestTableInputFormat { @Override public void map(ImmutableBytesWritable key, Result value, - OutputCollector output, - Reporter reporter) throws IOException { + OutputCollector output, Reporter reporter) throws IOException { for (Cell cell : value.listCells()) { - reporter.getCounter(TestTableInputFormat.class.getName() + ":row", + reporter + .getCounter(TestTableInputFormat.class.getName() + ":row", Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) - .increment(1l); - reporter.getCounter(TestTableInputFormat.class.getName() + ":family", + .increment(1l); + reporter + .getCounter(TestTableInputFormat.class.getName() + ":family", Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) - .increment(1l); - reporter.getCounter(TestTableInputFormat.class.getName() + ":value", + .increment(1l); + reporter + .getCounter(TestTableInputFormat.class.getName() + ":value", Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())) - .increment(1l); + .increment(1l); } } @@ -408,8 +384,7 @@ public class TestTableInputFormat { Table exampleTable = connection.getTable(TableName.valueOf("exampleDeprecatedTable")); // mandatory initializeTable(connection, exampleTable.getName()); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; // mandatory setInputColumns(inputColumns); Filter exampleFilter = @@ -440,7 +415,6 @@ public class TestTableInputFormat { } } - public static class ExampleTIF extends TableInputFormatBase { @Override @@ -453,8 +427,7 @@ public class TestTableInputFormat { TableName tableName = TableName.valueOf(table); // mandatory initializeTable(connection, tableName); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; // mandatory setInputColumns(inputColumns); Filter exampleFilter = @@ -466,4 +439,3 @@ public class TestTableInputFormat { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java index e3684761306..2820d911127 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,36 +43,35 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Test Map/Reduce job over HBase tables. The map/reduce process we're testing - * on our tables is simple - take every row in the table, reverse the value of - * a particular cell, and write it back to the table. + * Test Map/Reduce job over HBase tables. The map/reduce process we're testing on our tables is + * simple - take every row in the table, reverse the value of a particular cell, and write it back + * to the table. */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) @SuppressWarnings("deprecation") public class TestTableMapReduce extends TestTableMapReduceBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableMapReduce.class); + HBaseClassTestRule.forClass(TestTableMapReduce.class); - private static final Logger LOG = - LoggerFactory.getLogger(TestTableMapReduce.class.getName()); + private static final Logger LOG = LoggerFactory.getLogger(TestTableMapReduce.class.getName()); - protected Logger getLog() { return LOG; } + protected Logger getLog() { + return LOG; + } /** * Pass the given key and processed record reduce */ - static class ProcessContentsMapper extends MapReduceBase implements - TableMap { + static class ProcessContentsMapper extends MapReduceBase + implements TableMap { /** * Pass the key, and reversed value to reduce */ public void map(ImmutableBytesWritable key, Result value, - OutputCollector output, - Reporter reporter) - throws IOException { + OutputCollector output, Reporter reporter) throws IOException { output.collect(key, TestTableMapReduceBase.map(key, value)); } } @@ -86,8 +85,8 @@ public class TestTableMapReduce extends TestTableMapReduceBase { jobConf.setJobName("process column contents"); jobConf.setNumReduceTasks(1); TableMapReduceUtil.initTableMapJob(table.getName().getNameAsString(), - Bytes.toString(INPUT_FAMILY), ProcessContentsMapper.class, - ImmutableBytesWritable.class, Put.class, jobConf); + Bytes.toString(INPUT_FAMILY), ProcessContentsMapper.class, ImmutableBytesWritable.class, + Put.class, jobConf); TableMapReduceUtil.initTableReduceJob(table.getName().getNameAsString(), IdentityTableReduce.class, jobConf); @@ -105,4 +104,3 @@ public class TestTableMapReduce extends TestTableMapReduceBase { } } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java index fe160062669..01a24fda6f3 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,15 +57,14 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestTableMapReduceUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableMapReduceUtil.class); + HBaseClassTestRule.forClass(TestTableMapReduceUtil.class); - private static final Logger LOG = LoggerFactory - .getLogger(TestTableMapReduceUtil.class); + private static final Logger LOG = LoggerFactory.getLogger(TestTableMapReduceUtil.class); private static Table presidentsTable; private static final String TABLE_NAME = "People"; @@ -73,20 +72,19 @@ public class TestTableMapReduceUtil { private static final byte[] COLUMN_FAMILY = Bytes.toBytes("info"); private static final byte[] COLUMN_QUALIFIER = Bytes.toBytes("name"); - private static ImmutableSet presidentsRowKeys = ImmutableSet.of( - "president1", "president2", "president3"); - private static Iterator presidentNames = ImmutableSet.of( - "John F. Kennedy", "George W. Bush", "Barack Obama").iterator(); + private static ImmutableSet presidentsRowKeys = + ImmutableSet.of("president1", "president2", "president3"); + private static Iterator presidentNames = + ImmutableSet.of("John F. Kennedy", "George W. Bush", "Barack Obama").iterator(); - private static ImmutableSet actorsRowKeys = ImmutableSet.of("actor1", - "actor2"); - private static Iterator actorNames = ImmutableSet.of( - "Jack Nicholson", "Martin Freeman").iterator(); + private static ImmutableSet actorsRowKeys = ImmutableSet.of("actor1", "actor2"); + private static Iterator actorNames = + ImmutableSet.of("Jack Nicholson", "Martin Freeman").iterator(); private static String PRESIDENT_PATTERN = "president"; private static String ACTOR_PATTERN = "actor"; - private static ImmutableMap> relation = ImmutableMap - .of(PRESIDENT_PATTERN, presidentsRowKeys, ACTOR_PATTERN, actorsRowKeys); + private static ImmutableMap> relation = + ImmutableMap.of(PRESIDENT_PATTERN, presidentsRowKeys, ACTOR_PATTERN, actorsRowKeys); private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); @@ -133,12 +131,11 @@ public class TestTableMapReduceUtil { } /** - * Check what the given number of reduce tasks for the given job configuration - * does not exceed the number of regions for the given table. + * Check what the given number of reduce tasks for the given job configuration does not exceed the + * number of regions for the given table. */ @Test - public void shouldNumberOfReduceTaskNotExceedNumberOfRegionsForGivenTable() - throws IOException { + public void shouldNumberOfReduceTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { Assert.assertNotNull(presidentsTable); Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); @@ -155,8 +152,7 @@ public class TestTableMapReduceUtil { } @Test - public void shouldNumberOfMapTaskNotExceedNumberOfRegionsForGivenTable() - throws IOException { + public void shouldNumberOfMapTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); TableMapReduceUtil.setNumReduceTasks(TABLE_NAME, jobConf); @@ -178,49 +174,42 @@ public class TestTableMapReduceUtil { jobConf.setJobName("process row task"); jobConf.setNumReduceTasks(1); TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY), - ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, - jobConf); - TableMapReduceUtil.initTableReduceJob(TABLE_NAME, - ClassificatorRowReduce.class, jobConf); + ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, jobConf); + TableMapReduceUtil.initTableReduceJob(TABLE_NAME, ClassificatorRowReduce.class, jobConf); RunningJob job = JobClient.runJob(jobConf); assertTrue(job.isSuccessful()); } finally { - if (jobConf != null) - FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); + if (jobConf != null) FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); } } @Test @SuppressWarnings("deprecation") - public void shoudBeValidMapReduceWithPartitionerEvaluation() - throws IOException { + public void shoudBeValidMapReduceWithPartitionerEvaluation() throws IOException { Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); try { jobConf.setJobName("process row task"); jobConf.setNumReduceTasks(2); TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY), - ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, - jobConf); + ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, jobConf); - TableMapReduceUtil.initTableReduceJob(TABLE_NAME, - ClassificatorRowReduce.class, jobConf, HRegionPartitioner.class); + TableMapReduceUtil.initTableReduceJob(TABLE_NAME, ClassificatorRowReduce.class, jobConf, + HRegionPartitioner.class); RunningJob job = JobClient.runJob(jobConf); assertTrue(job.isSuccessful()); } finally { - if (jobConf != null) - FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); + if (jobConf != null) FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); } } @SuppressWarnings("deprecation") - static class ClassificatorRowReduce extends MapReduceBase implements - TableReduce { + static class ClassificatorRowReduce extends MapReduceBase + implements TableReduce { @Override public void reduce(ImmutableBytesWritable key, Iterator values, - OutputCollector output, Reporter reporter) - throws IOException { + OutputCollector output, Reporter reporter) throws IOException { String strKey = Bytes.toString(key.get()); List result = new ArrayList<>(); while (values.hasNext()) @@ -244,18 +233,17 @@ public class TestTableMapReduceUtil { } @SuppressWarnings("deprecation") - static class ClassificatorMapper extends MapReduceBase implements - TableMap { + static class ClassificatorMapper extends MapReduceBase + implements TableMap { @Override public void map(ImmutableBytesWritable row, Result result, - OutputCollector outCollector, - Reporter reporter) throws IOException { + OutputCollector outCollector, Reporter reporter) + throws IOException { String rowKey = Bytes.toString(result.getRow()); - final ImmutableBytesWritable pKey = new ImmutableBytesWritable( - Bytes.toBytes(PRESIDENT_PATTERN)); - final ImmutableBytesWritable aKey = new ImmutableBytesWritable( - Bytes.toBytes(ACTOR_PATTERN)); + final ImmutableBytesWritable pKey = + new ImmutableBytesWritable(Bytes.toBytes(PRESIDENT_PATTERN)); + final ImmutableBytesWritable aKey = new ImmutableBytesWritable(Bytes.toBytes(ACTOR_PATTERN)); ImmutableBytesWritable outKey = null; if (rowKey.startsWith(PRESIDENT_PATTERN)) { @@ -266,11 +254,9 @@ public class TestTableMapReduceUtil { throw new AssertionError("unexpected rowKey"); } - String name = Bytes.toString(result.getValue(COLUMN_FAMILY, - COLUMN_QUALIFIER)); - outCollector.collect(outKey, - new Put(Bytes.toBytes("rowKey2")) - .addColumn(COLUMN_FAMILY, COLUMN_QUALIFIER, Bytes.toBytes(name))); + String name = Bytes.toString(result.getValue(COLUMN_FAMILY, COLUMN_QUALIFIER)); + outCollector.collect(outKey, new Put(Bytes.toBytes("rowKey2")).addColumn(COLUMN_FAMILY, + COLUMN_QUALIFIER, Bytes.toBytes(name))); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java index 746ac532ac9..1f5f7174c28 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,20 +37,19 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Spark creates many instances of TableOutputFormat within a single process. We need to make - * sure we can have many instances and not leak connections. - * - * This test creates a few TableOutputFormats and shouldn't fail due to ZK connection exhaustion. + * Spark creates many instances of TableOutputFormat within a single process. We need to make sure + * we can have many instances and not leak connections. This test creates a few TableOutputFormats + * and shouldn't fail due to ZK connection exhaustion. */ @Category(MediumTests.class) public class TestTableOutputFormatConnectionExhaust { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableOutputFormatConnectionExhaust.class); + HBaseClassTestRule.forClass(TestTableOutputFormatConnectionExhaust.class); private static final Logger LOG = - LoggerFactory.getLogger(TestTableOutputFormatConnectionExhaust.class); + LoggerFactory.getLogger(TestTableOutputFormatConnectionExhaust.class); private final static HBaseTestingUtility UTIL = new HBaseTestingUtility(); static final String TABLE = "TestTableOutputFormatConnectionExhaust"; @@ -77,16 +76,16 @@ public class TestTableOutputFormatConnectionExhaust { } /** - * Open and close a TableOutputFormat. The closing the RecordWriter should release HBase + * Open and close a TableOutputFormat. The closing the RecordWriter should release HBase * Connection (ZK) resources, and will throw exception if they are exhausted. */ - static void openCloseTableOutputFormat(int iter) throws IOException { + static void openCloseTableOutputFormat(int iter) throws IOException { LOG.info("Instantiating TableOutputFormat connection " + iter); JobConf conf = new JobConf(); conf.addResource(UTIL.getConfiguration()); conf.set(TableOutputFormat.OUTPUT_TABLE, TABLE); - TableMapReduceUtil.initTableMapJob(TABLE, FAMILY, TableMap.class, - ImmutableBytesWritable.class, ImmutableBytesWritable.class, conf); + TableMapReduceUtil.initTableMapJob(TABLE, FAMILY, TableMap.class, ImmutableBytesWritable.class, + ImmutableBytesWritable.class, conf); TableOutputFormat tof = new TableOutputFormat(); RecordWriter rw = tof.getRecordWriter(null, conf, TABLE, null); rw.close(null); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java index 9e05a85c480..756d41fc31e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,12 +53,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableSnapshotInputFormat.class); + HBaseClassTestRule.forClass(TestTableSnapshotInputFormat.class); private static final byte[] aaa = Bytes.toBytes("aaa"); private static final byte[] after_zzz = Bytes.toBytes("zz{"); // 'z' + 1 => '{' @@ -79,25 +79,24 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa } static class TestTableSnapshotMapper extends MapReduceBase - implements TableMap { + implements TableMap { @Override public void map(ImmutableBytesWritable key, Result value, - OutputCollector collector, Reporter reporter) - throws IOException { + OutputCollector collector, Reporter reporter) + throws IOException { verifyRowFromMap(key, value); collector.collect(key, NullWritable.get()); } } public static class TestTableSnapshotReducer extends MapReduceBase - implements Reducer { + implements Reducer { HBaseTestingUtility.SeenRowTracker rowTracker = new HBaseTestingUtility.SeenRowTracker(aaa, after_zzz); @Override public void reduce(ImmutableBytesWritable key, Iterator values, - OutputCollector collector, Reporter reporter) - throws IOException { + OutputCollector collector, Reporter reporter) throws IOException { rowTracker.addRow(key.get()); } @@ -117,19 +116,17 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa JobConf job = new JobConf(UTIL.getConfiguration()); Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); - TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, - COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, + tmpTableDir); // TODO: would be better to examine directly the cache instance that results from this // config. Currently this is not possible because BlockCache initialization is static. - Assert.assertEquals( - "Snapshot job should be configured for default LruBlockCache.", + Assert.assertEquals("Snapshot job should be configured for default LruBlockCache.", HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT, job.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01); - Assert.assertEquals( - "Snapshot job should not use BucketCache.", - 0, job.getFloat("hbase.bucketcache.size", -1), 0.01); + Assert.assertEquals("Snapshot job should not use BucketCache.", 0, + job.getFloat("hbase.bucketcache.size", -1), 0.01); } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); @@ -142,10 +139,9 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa @Test @Override public void testWithMockedMapReduceMultiRegion() throws Exception { - testWithMockedMapReduce( - UTIL, "testWithMockedMapReduceMultiRegion", 10, 1, 10, true); - // It does not matter whether true or false is given to setLocalityEnabledTo, - // because it is not read in testWithMockedMapReduce(). + testWithMockedMapReduce(UTIL, "testWithMockedMapReduceMultiRegion", 10, 1, 10, true); + // It does not matter whether true or false is given to setLocalityEnabledTo, + // because it is not read in testWithMockedMapReduce(). } @Test @@ -163,21 +159,19 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa @Override public void testRestoreSnapshotDoesNotCreateBackRefLinksInit(TableName tableName, - String snapshotName, Path tmpTableDir) throws Exception { + String snapshotName, Path tmpTableDir) throws Exception { JobConf job = new JobConf(UTIL.getConfiguration()); - TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, - COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, TestTableSnapshotMapper.class, + ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir); } @Override protected void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName, - int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) - throws Exception { + int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) + throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); try { - createTableAndSnapshot( - util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); + createTableAndSnapshot(util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); JobConf job = new JobConf(util.getConfiguration()); // setLocalityEnabledTo is ignored no matter what is specified, so as to test the case that @@ -186,14 +180,13 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa Path tmpTableDir = util.getDataTestDirOnTestFS(snapshotName); if (numSplitsPerRegion > 1) { - TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, - COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(), - numSplitsPerRegion); + TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, tmpTableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { - TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, - COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, tmpTableDir); } // mapred doesn't support start and end keys? o.O @@ -206,7 +199,7 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa } private void verifyWithMockedMapReduce(JobConf job, int numRegions, int expectedNumSplits, - byte[] startRow, byte[] stopRow) throws IOException, InterruptedException { + byte[] startRow, byte[] stopRow) throws IOException, InterruptedException { TableSnapshotInputFormat tsif = new TableSnapshotInputFormat(); InputSplit[] splits = tsif.getSplits(job, 0); @@ -226,7 +219,7 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa if (localityEnabled) { // When localityEnabled is true, meant to verify split.getLocations() // by the following statement: - // Assert.assertTrue(split.getLocations() != null && split.getLocations().length != 0); + // Assert.assertTrue(split.getLocations() != null && split.getLocations().length != 0); // However, getLocations() of some splits could return an empty array (length is 0), // so drop the verification on length. // TODO: investigate how to verify split.getLocations() when localityEnabled is true @@ -257,18 +250,18 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa @Override protected void testWithMapReduceImpl(HBaseTestingUtility util, TableName tableName, - String snapshotName, Path tableDir, int numRegions, int numSplitsPerRegion, - int expectedNumSplits, boolean shutdownCluster) throws Exception { + String snapshotName, Path tableDir, int numRegions, int numSplitsPerRegion, + int expectedNumSplits, boolean shutdownCluster) throws Exception { doTestWithMapReduce(util, tableName, snapshotName, getStartRow(), getEndRow(), tableDir, numRegions, numSplitsPerRegion, expectedNumSplits, shutdownCluster); } // this is also called by the IntegrationTestTableSnapshotInputFormat public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName, - String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions, - int numSplitsPerRegion,int expectedNumSplits, boolean shutdownCluster) throws Exception { + String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions, + int numSplitsPerRegion, int expectedNumSplits, boolean shutdownCluster) throws Exception { - //create the table and snapshot + // create the table and snapshot createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions); if (shutdownCluster) { @@ -283,15 +276,14 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJarsForClasses(jobConf, TestTableSnapshotInputFormat.class); - if(numSplitsPerRegion > 1) { + if (numSplitsPerRegion > 1) { TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, - TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, jobConf, true, tableDir, new RegionSplitter.UniformSplit(), - numSplitsPerRegion); + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, jobConf, + true, tableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, - TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, jobConf, true, tableDir); + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, jobConf, + true, tableDir); } jobConf.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java index 8df7a6c7470..285b3339373 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import static org.junit.Assert.assertEquals; @@ -79,8 +78,7 @@ public abstract class MultiTableInputFormatTestBase { // create and fill table for (String tableName : TABLES) { try (Table table = - TEST_UTIL.createMultiRegionTable(TableName.valueOf(tableName), - INPUT_FAMILY, 4)) { + TEST_UTIL.createMultiRegionTable(TableName.valueOf(tableName), INPUT_FAMILY, 4)) { TEST_UTIL.loadTable(table, INPUT_FAMILY, false); } } @@ -100,19 +98,18 @@ public abstract class MultiTableInputFormatTestBase { /** * Pass the key and value to reducer. */ - public static class ScanMapper extends - TableMapper { + public static class ScanMapper + extends TableMapper { /** * Pass the key and value to reduce. - * - * @param key The key, here "aaa", "aab" etc. - * @param value The value is the same as the key. + * @param key The key, here "aaa", "aab" etc. + * @param value The value is the same as the key. * @param context The task context. * @throws IOException When reading the rows fails. */ @Override public void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { makeAssertions(key, value); context.write(key, key); } @@ -121,15 +118,13 @@ public abstract class MultiTableInputFormatTestBase { if (value.size() != 1) { throw new IOException("There should only be one input column"); } - Map>> cf = - value.getMap(); + Map>> cf = value.getMap(); if (!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILY) + "'."); + throw new IOException( + "Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } String val = Bytes.toStringBinary(value.getValue(INPUT_FAMILY, null)); - LOG.debug("map: key -> " + Bytes.toStringBinary(key.get()) + - ", value -> " + val); + LOG.debug("map: key -> " + Bytes.toStringBinary(key.get()) + ", value -> " + val); } } @@ -137,26 +132,23 @@ public abstract class MultiTableInputFormatTestBase { * Checks the last and first keys seen against the scanner boundaries. */ public static class ScanReducer - extends - Reducer { + extends Reducer { private String first = null; private String last = null; @Override - protected void reduce(ImmutableBytesWritable key, - Iterable values, Context context) - throws IOException, InterruptedException { + protected void reduce(ImmutableBytesWritable key, Iterable values, + Context context) throws IOException, InterruptedException { makeAssertions(key, values); } protected void makeAssertions(ImmutableBytesWritable key, - Iterable values) { + Iterable values) { int count = 0; for (ImmutableBytesWritable value : values) { String val = Bytes.toStringBinary(value.get()); - LOG.debug("reduce: key[" + count + "] -> " + - Bytes.toStringBinary(key.get()) + ", value -> " + val); + LOG.debug( + "reduce: key[" + count + "] -> " + Bytes.toStringBinary(key.get()) + ", value -> " + val); if (first == null) first = val; last = val; count++; @@ -165,8 +157,7 @@ public abstract class MultiTableInputFormatTestBase { } @Override - protected void cleanup(Context context) throws IOException, - InterruptedException { + protected void cleanup(Context context) throws IOException, InterruptedException { Configuration c = context.getConfiguration(); cleanup(c); } @@ -174,10 +165,8 @@ public abstract class MultiTableInputFormatTestBase { protected void cleanup(Configuration c) { String startRow = c.get(KEY_STARTROW); String lastRow = c.get(KEY_LASTROW); - LOG.info("cleanup: first -> \"" + first + "\", start row -> \"" + - startRow + "\""); - LOG.info("cleanup: last -> \"" + last + "\", last row -> \"" + lastRow + - "\""); + LOG.info("cleanup: first -> \"" + first + "\", start row -> \"" + startRow + "\""); + LOG.info("cleanup: last -> \"" + last + "\", last row -> \"" + lastRow + "\""); if (startRow != null && startRow.length() > 0) { assertEquals(startRow, first); } @@ -188,41 +177,35 @@ public abstract class MultiTableInputFormatTestBase { } @Test - public void testScanEmptyToEmpty() throws IOException, InterruptedException, - ClassNotFoundException { + public void testScanEmptyToEmpty() + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, null, null); } @Test - public void testScanEmptyToAPP() throws IOException, InterruptedException, - ClassNotFoundException { + public void testScanEmptyToAPP() + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, "app", "apo"); } @Test - public void testScanOBBToOPP() throws IOException, InterruptedException, - ClassNotFoundException { + public void testScanOBBToOPP() throws IOException, InterruptedException, ClassNotFoundException { testScan("obb", "opp", "opo"); } @Test - public void testScanYZYToEmpty() throws IOException, InterruptedException, - ClassNotFoundException { + public void testScanYZYToEmpty() + throws IOException, InterruptedException, ClassNotFoundException { testScan("yzy", null, "zzz"); } /** - * Tests a MR scan using specific start and stop rows. - * - * @throws IOException - * @throws ClassNotFoundException - * @throws InterruptedException + * Tests a MR scan using specific start and stop rows. nnn */ private void testScan(String start, String stop, String last) - throws IOException, InterruptedException, ClassNotFoundException { - String jobName = - "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + - (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); + throws IOException, InterruptedException, ClassNotFoundException { + String jobName = "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); LOG.info("Before map/reduce startup - job " + jobName); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); @@ -253,7 +236,7 @@ public abstract class MultiTableInputFormatTestBase { } protected void runJob(String jobName, Configuration c, List scans) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { Job job = new Job(c, jobName); initJob(scans, job); @@ -268,5 +251,4 @@ public abstract class MultiTableInputFormatTestBase { protected abstract void initJob(List scans, Job job) throws IOException; - } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java index e022bfdbd49..910b17a57df 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +21,6 @@ import java.io.DataInput; import java.io.DataOutput; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; @@ -33,17 +31,16 @@ import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; /** - * Input format that creates a configurable number of map tasks - * each provided with a single row of NullWritables. This can be - * useful when trying to write mappers which don't have any real - * input (eg when the mapper is simply producing random data as output) + * Input format that creates a configurable number of map tasks each provided with a single row of + * NullWritables. This can be useful when trying to write mappers which don't have any real input + * (eg when the mapper is simply producing random data as output) */ public class NMapInputFormat extends InputFormat { private static final String NMAPS_KEY = "nmapinputformat.num.maps"; @Override - public RecordReader createRecordReader( - InputSplit split, TaskAttemptContext tac) { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext tac) { return new SingleRecordReader<>(NullWritable.get(), NullWritable.get()); } @@ -85,8 +82,7 @@ public class NMapInputFormat extends InputFormat { } } - private static class SingleRecordReader - extends RecordReader { + private static class SingleRecordReader extends RecordReader { private final K key; private final V value; @@ -107,7 +103,7 @@ public class NMapInputFormat extends InputFormat { } @Override - public V getCurrentValue(){ + public V getCurrentValue() { return value; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java index 83f8e0c120f..31657870fd5 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,7 +53,7 @@ public abstract class TableSnapshotInputFormatTestBase { private static final Logger LOG = LoggerFactory.getLogger(TableSnapshotInputFormatTestBase.class); protected final HBaseTestingUtility UTIL = new HBaseTestingUtility(); protected static final int NUM_REGION_SERVERS = 2; - protected static final byte[][] FAMILIES = {Bytes.toBytes("f1"), Bytes.toBytes("f2")}; + protected static final byte[][] FAMILIES = { Bytes.toBytes("f1"), Bytes.toBytes("f2") }; protected FileSystem fs; protected Path rootDir; @@ -61,9 +61,9 @@ public abstract class TableSnapshotInputFormatTestBase { @Before public void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); - StartMiniClusterOption option = StartMiniClusterOption.builder() - .numRegionServers(NUM_REGION_SERVERS).numDataNodes(NUM_REGION_SERVERS) - .createRootDir(true).build(); + StartMiniClusterOption option = + StartMiniClusterOption.builder().numRegionServers(NUM_REGION_SERVERS) + .numDataNodes(NUM_REGION_SERVERS).createRootDir(true).build(); UTIL.startMiniCluster(option); rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); fs = rootDir.getFileSystem(UTIL.getConfiguration()); @@ -128,7 +128,7 @@ public abstract class TableSnapshotInputFormatTestBase { Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); - testRestoreSnapshotDoesNotCreateBackRefLinksInit(tableName, snapshotName,tmpTableDir); + testRestoreSnapshotDoesNotCreateBackRefLinksInit(tableName, snapshotName, tmpTableDir); Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()); for (Path regionDir : FSUtils.getRegionDirs(fs, @@ -158,10 +158,10 @@ public abstract class TableSnapshotInputFormatTestBase { } public abstract void testRestoreSnapshotDoesNotCreateBackRefLinksInit(TableName tableName, - String snapshotName, Path tmpTableDir) throws Exception; + String snapshotName, Path tmpTableDir) throws Exception; protected void testWithMapReduce(HBaseTestingUtility util, String snapshotName, int numRegions, - int numSplitsPerRegion, int expectedNumSplits, boolean shutdownCluster) throws Exception { + int numSplitsPerRegion, int expectedNumSplits, boolean shutdownCluster) throws Exception { Path tableDir = util.getDataTestDirOnTestFS(snapshotName); TableName tableName = TableName.valueOf("testWithMapReduce"); testWithMapReduceImpl(util, tableName, snapshotName, tableDir, numRegions, numSplitsPerRegion, @@ -175,26 +175,24 @@ public abstract class TableSnapshotInputFormatTestBase { while (scanner.advance()) { Cell cell = scanner.current(); - //assert that all Cells in the Result have the same key - Assert.assertEquals(0, Bytes.compareTo(row, 0, row.length, - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + // assert that all Cells in the Result have the same key + Assert.assertEquals(0, Bytes.compareTo(row, 0, row.length, cell.getRowArray(), + cell.getRowOffset(), cell.getRowLength())); } for (byte[] family : FAMILIES) { byte[] actual = result.getValue(family, family); - Assert.assertArrayEquals( - "Row in snapshot does not match, expected:" + Bytes.toString(row) + " ,actual:" + Bytes - .toString(actual), row, actual); + Assert.assertArrayEquals("Row in snapshot does not match, expected:" + Bytes.toString(row) + + " ,actual:" + Bytes.toString(actual), row, actual); } } protected static void createTableAndSnapshot(HBaseTestingUtility util, TableName tableName, - String snapshotName, byte[] startRow, byte[] endRow, int numRegions) - throws Exception { + String snapshotName, byte[] startRow, byte[] endRow, int numRegions) throws Exception { try { LOG.debug("Ensuring table doesn't exist."); util.deleteTable(tableName); - } catch(Exception ex) { + } catch (Exception ex) { // ignore } @@ -214,8 +212,8 @@ public abstract class TableSnapshotInputFormatTestBase { FileSystem fs = rootDir.getFileSystem(util.getConfiguration()); LOG.info("snapshot"); - SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, - Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true); + SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, Arrays.asList(FAMILIES), null, + snapshotName, rootDir, fs, true); LOG.info("load different values"); byte[] value = Bytes.toBytes("after_snapshot_value"); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java index 0a7a9309899..b4e1b91359a 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -111,25 +110,24 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Simple test for {@link HFileOutputFormat2}. - * Sets up and runs a mapreduce job that writes hfile output. - * Creates a few inner classes to implement splits and an inputformat that - * emits keys and values like those of {@link PerformanceEvaluation}. + * Simple test for {@link HFileOutputFormat2}. Sets up and runs a mapreduce job that writes hfile + * output. Creates a few inner classes to implement splits and an inputformat that emits keys and + * values like those of {@link PerformanceEvaluation}. */ -@Category({VerySlowMapReduceTests.class, LargeTests.class}) -public class TestCellBasedHFileOutputFormat2 { +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) +public class TestCellBasedHFileOutputFormat2 { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCellBasedHFileOutputFormat2.class); + HBaseClassTestRule.forClass(TestCellBasedHFileOutputFormat2.class); private final static int ROWSPERSPLIT = 1024; public static final byte[] FAMILY_NAME = TestHRegionFileSystem.FAMILY_NAME; - private static final byte[][] FAMILIES = { - Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), Bytes.add(FAMILY_NAME, Bytes.toBytes("-B"))}; - private static final TableName[] TABLE_NAMES = Stream.of("TestTable", "TestTable2", - "TestTable3").map(TableName::valueOf).toArray(TableName[]::new); + private static final byte[][] FAMILIES = + { Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), Bytes.add(FAMILY_NAME, Bytes.toBytes("-B")) }; + private static final TableName[] TABLE_NAMES = Stream.of("TestTable", "TestTable2", "TestTable3") + .map(TableName::valueOf).toArray(TableName[]::new); private HBaseTestingUtility util = new HBaseTestingUtility(); @@ -139,45 +137,39 @@ public class TestCellBasedHFileOutputFormat2 { * Simple mapper that makes KeyValue output. */ static class RandomKVGeneratingMapper - extends Mapper { + extends Mapper { private int keyLength; - private static final int KEYLEN_DEFAULT=10; - private static final String KEYLEN_CONF="randomkv.key.length"; + private static final int KEYLEN_DEFAULT = 10; + private static final String KEYLEN_CONF = "randomkv.key.length"; private int valLength; - private static final int VALLEN_DEFAULT=10; - private static final String VALLEN_CONF="randomkv.val.length"; - private static final byte [] QUALIFIER = Bytes.toBytes("data"); + private static final int VALLEN_DEFAULT = 10; + private static final String VALLEN_CONF = "randomkv.val.length"; + private static final byte[] QUALIFIER = Bytes.toBytes("data"); private boolean multiTableMapper = false; private TableName[] tables = null; - @Override - protected void setup(Context context) throws IOException, - InterruptedException { + protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration conf = context.getConfiguration(); keyLength = conf.getInt(KEYLEN_CONF, KEYLEN_DEFAULT); valLength = conf.getInt(VALLEN_CONF, VALLEN_DEFAULT); - multiTableMapper = conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, - false); + multiTableMapper = + conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); if (multiTableMapper) { tables = TABLE_NAMES; } else { - tables = new TableName[]{TABLE_NAMES[0]}; + tables = new TableName[] { TABLE_NAMES[0] }; } } @Override - protected void map( - NullWritable n1, NullWritable n2, - Mapper.Context context) - throws java.io.IOException ,InterruptedException - { + protected void map(NullWritable n1, NullWritable n2, + Mapper.Context context) + throws java.io.IOException, InterruptedException { byte keyBytes[] = new byte[keyLength]; byte valBytes[] = new byte[valLength]; @@ -210,8 +202,7 @@ public class TestCellBasedHFileOutputFormat2 { * Simple mapper that makes Put output. */ static class RandomPutGeneratingMapper - extends Mapper { + extends Mapper { private int keyLength; private static final int KEYLEN_DEFAULT = 10; @@ -225,28 +216,25 @@ public class TestCellBasedHFileOutputFormat2 { private TableName[] tables = null; @Override - protected void setup(Context context) throws IOException, - InterruptedException { + protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration conf = context.getConfiguration(); keyLength = conf.getInt(KEYLEN_CONF, KEYLEN_DEFAULT); valLength = conf.getInt(VALLEN_CONF, VALLEN_DEFAULT); - multiTableMapper = conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, - false); + multiTableMapper = + conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); if (multiTableMapper) { tables = TABLE_NAMES; } else { - tables = new TableName[]{TABLE_NAMES[0]}; + tables = new TableName[] { TABLE_NAMES[0] }; } } @Override - protected void map( - NullWritable n1, NullWritable n2, - Mapper.Context context) - throws java.io.IOException, InterruptedException { + protected void map(NullWritable n1, NullWritable n2, + Mapper.Context context) + throws java.io.IOException, InterruptedException { byte keyBytes[] = new byte[keyLength]; byte valBytes[] = new byte[valLength]; @@ -294,28 +282,27 @@ public class TestCellBasedHFileOutputFormat2 { } /** - * Test that {@link HFileOutputFormat2} RecordWriter amends timestamps if - * passed a keyvalue whose timestamp is {@link HConstants#LATEST_TIMESTAMP}. + * Test that {@link HFileOutputFormat2} RecordWriter amends timestamps if passed a keyvalue whose + * timestamp is {@link HConstants#LATEST_TIMESTAMP}. * @see HBASE-2615 */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test - public void test_LATEST_TIMESTAMP_isReplaced() - throws Exception { + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test + public void test_LATEST_TIMESTAMP_isReplaced() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced"); + Path dir = util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced"); try { Job job = new Job(conf); FileOutputFormat.setOutputPath(job, dir); context = createTestTaskAttemptContext(job); HFileOutputFormat2 hof = new HFileOutputFormat2(); writer = hof.getRecordWriter(context); - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); - // Test 1. Pass a KV that has a ts of LATEST_TIMESTAMP. It should be - // changed by call to write. Check all in kv is same but ts. + // Test 1. Pass a KV that has a ts of LATEST_TIMESTAMP. It should be + // changed by call to write. Check all in kv is same but ts. KeyValue kv = new KeyValue(b, b, b); KeyValue original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); @@ -326,7 +313,7 @@ public class TestCellBasedHFileOutputFormat2 { assertNotSame(original.getTimestamp(), kv.getTimestamp()); assertNotSame(HConstants.LATEST_TIMESTAMP, kv.getTimestamp()); - // Test 2. Now test passing a kv that has explicit ts. It should not be + // Test 2. Now test passing a kv that has explicit ts. It should not be // changed by call to record write. kv = new KeyValue(b, b, b, kv.getTimestamp() - 1, b); original = kv.clone(); @@ -338,26 +325,25 @@ public class TestCellBasedHFileOutputFormat2 { } } - private TaskAttemptContext createTestTaskAttemptContext(final Job job) - throws Exception { + private TaskAttemptContext createTestTaskAttemptContext(final Job job) throws Exception { HadoopShims hadoop = CompatibilitySingletonFactory.getInstance(HadoopShims.class); - TaskAttemptContext context = hadoop.createTestTaskAttemptContext( - job, "attempt_201402131733_0001_m_000000_0"); + TaskAttemptContext context = + hadoop.createTestTaskAttemptContext(job, "attempt_201402131733_0001_m_000000_0"); return context; } /* - * Test that {@link HFileOutputFormat2} creates an HFile with TIMERANGE - * metadata used by time-restricted scans. + * Test that {@link HFileOutputFormat2} creates an HFile with TIMERANGE metadata used by + * time-restricted scans. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void test_TIMERANGE() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("test_TIMERANGE_present"); - LOG.info("Timerange dir writing to dir: "+ dir); + Path dir = util.getDataTestDir("test_TIMERANGE_present"); + LOG.info("Timerange dir writing to dir: " + dir); try { // build a record writer using HFileOutputFormat2 Job job = new Job(conf); @@ -367,13 +353,13 @@ public class TestCellBasedHFileOutputFormat2 { writer = hof.getRecordWriter(context); // Pass two key values with explicit times stamps - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); // value 1 with timestamp 2000 KeyValue kv = new KeyValue(b, b, b, 2000, b); KeyValue original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); - assertEquals(original,kv); + assertEquals(original, kv); // value 2 with timestamp 1000 kv = new KeyValue(b, b, b, 1000, b); @@ -394,15 +380,14 @@ public class TestCellBasedHFileOutputFormat2 { // open as HFile Reader and pull out TIMERANGE FileInfo. HFile.Reader rd = - HFile.createReader(fs, file[0].getPath(), new CacheConfig(conf), true, conf); - Map finfo = rd.getHFileInfo(); + HFile.createReader(fs, file[0].getPath(), new CacheConfig(conf), true, conf); + Map finfo = rd.getHFileInfo(); byte[] range = finfo.get("TIMERANGE".getBytes("UTF-8")); assertNotNull(range); // unmarshall and check values. TimeRangeTracker timeRangeTracker = TimeRangeTracker.parseFrom(range); - LOG.info(timeRangeTracker.getMin() + - "...." + timeRangeTracker.getMax()); + LOG.info(timeRangeTracker.getMin() + "...." + timeRangeTracker.getMax()); assertEquals(1000, timeRangeTracker.getMin()); assertEquals(2000, timeRangeTracker.getMax()); rd.close(); @@ -415,7 +400,8 @@ public class TestCellBasedHFileOutputFormat2 { /** * Run small MR job. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testWritingPEData() throws Exception { Configuration conf = util.getConfiguration(); Path testDir = util.getDataTestDirOnTestFS("testWritingPEData"); @@ -433,8 +419,8 @@ public class TestCellBasedHFileOutputFormat2 { byte[] startKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT]; byte[] endKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT]; - Arrays.fill(startKey, (byte)0); - Arrays.fill(endKey, (byte)0xff); + Arrays.fill(startKey, (byte) 0); + Arrays.fill(endKey, (byte) 0xff); job.setPartitionerClass(SimpleTotalOrderPartitioner.class); // Set start and end rows for partitioner. @@ -444,29 +430,26 @@ public class TestCellBasedHFileOutputFormat2 { job.setOutputFormatClass(HFileOutputFormat2.class); job.setNumReduceTasks(4); job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + CellSerialization.class.getName()); FileOutputFormat.setOutputPath(job, testDir); assertTrue(job.waitForCompletion(false)); - FileStatus [] files = fs.listStatus(testDir); + FileStatus[] files = fs.listStatus(testDir); assertTrue(files.length > 0); } /** - * Test that {@link HFileOutputFormat2} RecordWriter writes tags such as ttl into - * hfile. + * Test that {@link HFileOutputFormat2} RecordWriter writes tags such as ttl into hfile. */ @Test - public void test_WritingTagData() - throws Exception { + public void test_WritingTagData() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); final String HFILE_FORMAT_VERSION_CONF_KEY = "hfile.format.version"; conf.setInt(HFILE_FORMAT_VERSION_CONF_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("WritingTagData"); + Path dir = util.getDataTestDir("WritingTagData"); try { conf.set(HFileOutputFormat2.OUTPUT_TABLE_NAME_CONF_KEY, TABLE_NAMES[0].getNameAsString()); // turn locality off to eliminate getRegionLocation fail-and-retry time when writing kvs @@ -476,9 +459,9 @@ public class TestCellBasedHFileOutputFormat2 { context = createTestTaskAttemptContext(job); HFileOutputFormat2 hof = new HFileOutputFormat2(); writer = hof.getRecordWriter(context); - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); - List< Tag > tags = new ArrayList<>(); + List tags = new ArrayList<>(); tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(978670))); KeyValue kv = new KeyValue(b, b, b, HConstants.LATEST_TIMESTAMP, b, tags); writer.write(new ImmutableBytesWritable(), kv); @@ -486,15 +469,15 @@ public class TestCellBasedHFileOutputFormat2 { writer = null; FileSystem fs = dir.getFileSystem(conf); RemoteIterator iterator = fs.listFiles(dir, true); - while(iterator.hasNext()) { + while (iterator.hasNext()) { LocatedFileStatus keyFileStatus = iterator.next(); HFile.Reader reader = - HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); + HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); HFileScanner scanner = reader.getScanner(conf, false, false, false); scanner.seekTo(); Cell cell = scanner.getCell(); - List tagsFromCell = TagUtil.asList(cell.getTagsArray(), cell.getTagsOffset(), - cell.getTagsLength()); + List tagsFromCell = + TagUtil.asList(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength()); assertTrue(tagsFromCell.size() > 0); for (Tag tag : tagsFromCell) { assertTrue(tag.getType() == TagType.TTL_TAG_TYPE); @@ -506,11 +489,12 @@ public class TestCellBasedHFileOutputFormat2 { } } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testJobConfiguration() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); - conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, util.getDataTestDir("testJobConfiguration") - .toString()); + conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, + util.getDataTestDir("testJobConfiguration").toString()); Job job = new Job(conf); job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration")); Table table = Mockito.mock(Table.class); @@ -521,7 +505,7 @@ public class TestCellBasedHFileOutputFormat2 { assertEquals(job.getNumReduceTasks(), 4); } - private byte [][] generateRandomStartKeys(int numKeys) { + private byte[][] generateRandomStartKeys(int numKeys) { Random random = new Random(); byte[][] ret = new byte[numKeys][]; // first region start key is always empty @@ -538,39 +522,42 @@ public class TestCellBasedHFileOutputFormat2 { byte[][] ret = new byte[numKeys][]; for (int i = 0; i < numKeys; i++) { ret[i] = - PerformanceEvaluation.generateData(random, PerformanceEvaluation.DEFAULT_VALUE_LENGTH); + PerformanceEvaluation.generateData(random, PerformanceEvaluation.DEFAULT_VALUE_LENGTH); } return ret; } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoad() throws Exception { LOG.info("\nStarting test testMRIncrementalLoad\n"); doIncrementalLoadTest(false, false, false, "testMRIncrementalLoad"); } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoadWithSplit() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithSplit\n"); doIncrementalLoadTest(true, false, false, "testMRIncrementalLoadWithSplit"); } /** - * Test for HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY = true - * This test could only check the correctness of original logic if LOCALITY_SENSITIVE_CONF_KEY - * is set to true. Because MiniHBaseCluster always run with single hostname (and different ports), - * it's not possible to check the region locality by comparing region locations and DN hostnames. - * When MiniHBaseCluster supports explicit hostnames parameter (just like MiniDFSCluster does), - * we could test region locality features more easily. + * Test for HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY = true This test could only check the + * correctness of original logic if LOCALITY_SENSITIVE_CONF_KEY is set to true. Because + * MiniHBaseCluster always run with single hostname (and different ports), it's not possible to + * check the region locality by comparing region locations and DN hostnames. When MiniHBaseCluster + * supports explicit hostnames parameter (just like MiniDFSCluster does), we could test region + * locality features more easily. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoadWithLocality() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithLocality\n"); doIncrementalLoadTest(false, true, false, "testMRIncrementalLoadWithLocality1"); doIncrementalLoadTest(true, true, false, "testMRIncrementalLoadWithLocality2"); } - //@Ignore("Wahtevs") + // @Ignore("Wahtevs") @Test public void testMRIncrementalLoadWithPutSortReducer() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithPutSortReducer\n"); @@ -578,21 +565,20 @@ public class TestCellBasedHFileOutputFormat2 { } private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, - boolean putSortReducer, String tableStr) throws Exception { - doIncrementalLoadTest(shouldChangeRegions, shouldKeepLocality, putSortReducer, - Arrays.asList(tableStr)); + boolean putSortReducer, String tableStr) throws Exception { + doIncrementalLoadTest(shouldChangeRegions, shouldKeepLocality, putSortReducer, + Arrays.asList(tableStr)); } @Test public void testMultiMRIncrementalLoadWithPutSortReducer() throws Exception { LOG.info("\nStarting test testMultiMRIncrementalLoadWithPutSortReducer\n"); doIncrementalLoadTest(false, false, true, - Arrays.stream(TABLE_NAMES).map(TableName::getNameAsString).collect(Collectors.toList - ())); + Arrays.stream(TABLE_NAMES).map(TableName::getNameAsString).collect(Collectors.toList())); } private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, - boolean putSortReducer, List tableStr) throws Exception { + boolean putSortReducer, List tableStr) throws Exception { util = new HBaseTestingUtility(); Configuration conf = util.getConfiguration(); conf.setBoolean(MultiTableHFileOutputFormat.LOCALITY_SENSITIVE_CONF_KEY, shouldKeepLocality); @@ -643,8 +629,7 @@ public class TestCellBasedHFileOutputFormat2 { if (allTables.containsKey(tf.getPath().getName())) { ++numTableDirs; tablePath = tf.getPath(); - } - else { + } else { continue; } } @@ -678,9 +663,10 @@ public class TestCellBasedHFileOutputFormat2 { byte[][] newSplitKeys = generateRandomSplitKeys(14); Table table = util.createTable(chosenTable.getName(), FAMILIES, newSplitKeys); - while (util.getConnection().getRegionLocator(chosenTable.getName()) - .getAllRegionLocations().size() != 15 || - !admin.isTableAvailable(table.getName())) { + while ( + util.getConnection().getRegionLocator(chosenTable.getName()).getAllRegionLocations() + .size() != 15 || !admin.isTableAvailable(table.getName()) + ) { Thread.sleep(200); LOG.info("Waiting for new region assignment to happen"); } @@ -696,19 +682,19 @@ public class TestCellBasedHFileOutputFormat2 { } Table currentTable = allTables.get(tableNameStr); TableName currentTableName = currentTable.getName(); - new LoadIncrementalHFiles(conf).doBulkLoad(tableDir, admin, currentTable, singleTableInfo - .getRegionLocator()); + new LoadIncrementalHFiles(conf).doBulkLoad(tableDir, admin, currentTable, + singleTableInfo.getRegionLocator()); // Ensure data shows up int expectedRows = 0; if (putSortReducer) { // no rows should be extracted assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, - util.countRows(currentTable)); + util.countRows(currentTable)); } else { expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, - util.countRows(currentTable)); + util.countRows(currentTable)); Scan scan = new Scan(); ResultScanner results = currentTable.getScanner(scan); for (Result res : results) { @@ -741,14 +727,14 @@ public class TestCellBasedHFileOutputFormat2 { } admin.enableTable(currentTableName); util.waitTableAvailable(currentTableName); - assertEquals("Data should remain after reopening of regions", - tableDigestBefore, util.checksumRows(currentTable)); + assertEquals("Data should remain after reopening of regions", tableDigestBefore, + util.checksumRows(currentTable)); } } finally { for (HFileOutputFormat2.TableInfo tableInfoSingle : tableInfo) { - tableInfoSingle.getRegionLocator().close(); + tableInfoSingle.getRegionLocator().close(); } - for (Entry singleTable : allTables.entrySet() ) { + for (Entry singleTable : allTables.entrySet()) { singleTable.getValue().close(); util.deleteTable(singleTable.getValue().getName()); } @@ -757,14 +743,14 @@ public class TestCellBasedHFileOutputFormat2 { } } - private void runIncrementalPELoad(Configuration conf, List tableInfo, Path outDir, - boolean putSortReducer) throws IOException, - InterruptedException, ClassNotFoundException { + private void runIncrementalPELoad(Configuration conf, + List tableInfo, Path outDir, boolean putSortReducer) + throws IOException, InterruptedException, ClassNotFoundException { Job job = new Job(conf, "testLocalMRIncrementalLoad"); job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad")); job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + CellSerialization.class.getName()); setupRandomGeneratorMapper(job, putSortReducer); if (tableInfo.size() > 1) { MultiTableHFileOutputFormat.configureIncrementalLoad(job, tableInfo); @@ -773,75 +759,66 @@ public class TestCellBasedHFileOutputFormat2 { sum += tableInfoSingle.getRegionLocator().getAllRegionLocations().size(); } assertEquals(sum, job.getNumReduceTasks()); - } - else { + } else { RegionLocator regionLocator = tableInfo.get(0).getRegionLocator(); HFileOutputFormat2.configureIncrementalLoad(job, tableInfo.get(0).getHTableDescriptor(), - regionLocator); + regionLocator); assertEquals(regionLocator.getAllRegionLocations().size(), job.getNumReduceTasks()); } FileOutputFormat.setOutputPath(job, outDir); - assertFalse(util.getTestFileSystem().exists(outDir)) ; + assertFalse(util.getTestFileSystem().exists(outDir)); assertTrue(job.waitForCompletion(true)); } /** * Test for {@link HFileOutputFormat2#configureCompression(Configuration, HTableDescriptor)} and - * {@link HFileOutputFormat2#createFamilyCompressionMap(Configuration)}. - * Tests that the compression map is correctly serialized into - * and deserialized from configuration - * - * @throws IOException + * {@link HFileOutputFormat2#createFamilyCompressionMap(Configuration)}. Tests that the + * compression map is correctly serialized into and deserialized from configuration n */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyCompressionMap() throws IOException { for (int numCfs = 0; numCfs <= 3; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); Map familyToCompression = - getMockColumnFamiliesForCompression(numCfs); + getMockColumnFamiliesForCompression(numCfs); Table table = Mockito.mock(Table.class); setupMockColumnFamiliesForCompression(table, familyToCompression); conf.set(HFileOutputFormat2.COMPRESSION_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute - (HFileOutputFormat2.compressionDetails, - Arrays.asList(table.getTableDescriptor()))); + HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.compressionDetails, + Arrays.asList(table.getTableDescriptor()))); // read back family specific compression setting from the configuration - Map retrievedFamilyToCompressionMap = HFileOutputFormat2 - .createFamilyCompressionMap(conf); + Map retrievedFamilyToCompressionMap = + HFileOutputFormat2.createFamilyCompressionMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToCompression.entrySet()) { - assertEquals("Compression configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToCompressionMap.get(entry.getKey().getBytes("UTF-8"))); + assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToCompressionMap.get(entry.getKey().getBytes("UTF-8"))); } } } private void setupMockColumnFamiliesForCompression(Table table, - Map familyToCompression) throws IOException { + Map familyToCompression) throws IOException { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]); for (Entry entry : familyToCompression.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) - .setMaxVersions(1) - .setCompressionType(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0)); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()).setMaxVersions(1) + .setCompressionType(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForCompression (int numCfs) { + private Map getMockColumnFamiliesForCompression(int numCfs) { Map familyToCompression = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { @@ -859,71 +836,59 @@ public class TestCellBasedHFileOutputFormat2 { return familyToCompression; } - /** * Test for {@link HFileOutputFormat2#configureBloomType(HTableDescriptor, Configuration)} and - * {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. - * Tests that the compression map is correctly serialized into - * and deserialized from configuration - * - * @throws IOException + * {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. Tests that the compression + * map is correctly serialized into and deserialized from configuration n */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyBloomTypeMap() throws IOException { for (int numCfs = 0; numCfs <= 2; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); - Map familyToBloomType = - getMockColumnFamiliesForBloomType(numCfs); + Map familyToBloomType = getMockColumnFamiliesForBloomType(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForBloomType(table, - familyToBloomType); + setupMockColumnFamiliesForBloomType(table, familyToBloomType); conf.set(HFileOutputFormat2.BLOOM_TYPE_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.bloomTypeDetails, - Arrays.asList(table.getTableDescriptor()))); + HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.bloomTypeDetails, + Arrays.asList(table.getTableDescriptor()))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToBloomTypeMap = - HFileOutputFormat2 - .createFamilyBloomTypeMap(conf); + HFileOutputFormat2.createFamilyBloomTypeMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToBloomType.entrySet()) { - assertEquals("BloomType configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToBloomTypeMap.get(entry.getKey().getBytes("UTF-8"))); + assertEquals("BloomType configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToBloomTypeMap.get(entry.getKey().getBytes("UTF-8"))); } } } private void setupMockColumnFamiliesForBloomType(Table table, - Map familyToDataBlockEncoding) throws IOException { + Map familyToDataBlockEncoding) throws IOException { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) - .setMaxVersions(1) - .setBloomFilterType(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0)); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()).setMaxVersions(1) + .setBloomFilterType(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForBloomType (int numCfs) { + private Map getMockColumnFamiliesForBloomType(int numCfs) { Map familyToBloomType = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToBloomType.put("Family1!@#!@#&", BloomType.ROW); } if (numCfs-- > 0) { - familyToBloomType.put("Family2=asdads&!AASD", - BloomType.ROWCOL); + familyToBloomType.put("Family2=asdads&!AASD", BloomType.ROWCOL); } if (numCfs-- > 0) { familyToBloomType.put("Family3", BloomType.NONE); @@ -933,74 +898,60 @@ public class TestCellBasedHFileOutputFormat2 { /** * Test for {@link HFileOutputFormat2#configureBlockSize(HTableDescriptor, Configuration)} and - * {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. - * Tests that the compression map is correctly serialized into - * and deserialized from configuration - * - * @throws IOException + * {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. Tests that the compression + * map is correctly serialized into and deserialized from configuration n */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyBlockSizeMap() throws IOException { for (int numCfs = 0; numCfs <= 3; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); - Map familyToBlockSize = - getMockColumnFamiliesForBlockSize(numCfs); + Map familyToBlockSize = getMockColumnFamiliesForBlockSize(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForBlockSize(table, - familyToBlockSize); + setupMockColumnFamiliesForBlockSize(table, familyToBlockSize); conf.set(HFileOutputFormat2.BLOCK_SIZE_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute - (HFileOutputFormat2.blockSizeDetails, Arrays.asList(table - .getTableDescriptor()))); + HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.blockSizeDetails, + Arrays.asList(table.getTableDescriptor()))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToBlockSizeMap = - HFileOutputFormat2 - .createFamilyBlockSizeMap(conf); + HFileOutputFormat2.createFamilyBlockSizeMap(conf); // test that we have a value for all column families that matches with the // used mock values - for (Entry entry : familyToBlockSize.entrySet() - ) { - assertEquals("BlockSize configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToBlockSizeMap.get(entry.getKey().getBytes("UTF-8"))); + for (Entry entry : familyToBlockSize.entrySet()) { + assertEquals("BlockSize configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToBlockSizeMap.get(entry.getKey().getBytes("UTF-8"))); } } } private void setupMockColumnFamiliesForBlockSize(Table table, - Map familyToDataBlockEncoding) throws IOException { + Map familyToDataBlockEncoding) throws IOException { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) - .setMaxVersions(1) - .setBlocksize(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0)); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()).setMaxVersions(1) + .setBlocksize(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForBlockSize (int numCfs) { + private Map getMockColumnFamiliesForBlockSize(int numCfs) { Map familyToBlockSize = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToBlockSize.put("Family1!@#!@#&", 1234); } if (numCfs-- > 0) { - familyToBlockSize.put("Family2=asdads&!AASD", - Integer.MAX_VALUE); + familyToBlockSize.put("Family2=asdads&!AASD", Integer.MAX_VALUE); } if (numCfs-- > 0) { - familyToBlockSize.put("Family2=asdads&!AASD", - Integer.MAX_VALUE); + familyToBlockSize.put("Family2=asdads&!AASD", Integer.MAX_VALUE); } if (numCfs-- > 0) { familyToBlockSize.put("Family3", 0); @@ -1010,74 +961,64 @@ public class TestCellBasedHFileOutputFormat2 { /** * Test for {@link HFileOutputFormat2#configureDataBlockEncoding(HTableDescriptor, Configuration)} - * and {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. - * Tests that the compression map is correctly serialized into - * and deserialized from configuration - * - * @throws IOException + * and {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. Tests that the + * compression map is correctly serialized into and deserialized from configuration n */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOException { for (int numCfs = 0; numCfs <= 3; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); Map familyToDataBlockEncoding = - getMockColumnFamiliesForDataBlockEncoding(numCfs); + getMockColumnFamiliesForDataBlockEncoding(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForDataBlockEncoding(table, - familyToDataBlockEncoding); + setupMockColumnFamiliesForDataBlockEncoding(table, familyToDataBlockEncoding); HTableDescriptor tableDescriptor = table.getTableDescriptor(); conf.set(HFileOutputFormat2.DATABLOCK_ENCODING_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute - (HFileOutputFormat2.dataBlockEncodingDetails, Arrays - .asList(tableDescriptor))); + HFileOutputFormat2.serializeColumnFamilyAttribute( + HFileOutputFormat2.dataBlockEncodingDetails, Arrays.asList(tableDescriptor))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToDataBlockEncodingMap = - HFileOutputFormat2 - .createFamilyDataBlockEncodingMap(conf); + HFileOutputFormat2.createFamilyDataBlockEncodingMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToDataBlockEncoding.entrySet()) { - assertEquals("DataBlockEncoding configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToDataBlockEncodingMap.get(entry.getKey().getBytes("UTF-8"))); + assertEquals( + "DataBlockEncoding configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), + retrievedFamilyToDataBlockEncodingMap.get(entry.getKey().getBytes("UTF-8"))); } } } private void setupMockColumnFamiliesForDataBlockEncoding(Table table, - Map familyToDataBlockEncoding) throws IOException { + Map familyToDataBlockEncoding) throws IOException { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) - .setMaxVersions(1) - .setDataBlockEncoding(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0)); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()).setMaxVersions(1) + .setDataBlockEncoding(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForDataBlockEncoding (int numCfs) { + private Map getMockColumnFamiliesForDataBlockEncoding(int numCfs) { Map familyToDataBlockEncoding = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToDataBlockEncoding.put("Family1!@#!@#&", DataBlockEncoding.DIFF); } if (numCfs-- > 0) { - familyToDataBlockEncoding.put("Family2=asdads&!AASD", - DataBlockEncoding.FAST_DIFF); + familyToDataBlockEncoding.put("Family2=asdads&!AASD", DataBlockEncoding.FAST_DIFF); } if (numCfs-- > 0) { - familyToDataBlockEncoding.put("Family2=asdads&!AASD", - DataBlockEncoding.PREFIX); + familyToDataBlockEncoding.put("Family2=asdads&!AASD", DataBlockEncoding.PREFIX); } if (numCfs-- > 0) { familyToDataBlockEncoding.put("Family3", DataBlockEncoding.NONE); @@ -1086,12 +1027,8 @@ public class TestCellBasedHFileOutputFormat2 { } private void setupMockStartKeys(RegionLocator table) throws IOException { - byte[][] mockKeys = new byte[][] { - HConstants.EMPTY_BYTE_ARRAY, - Bytes.toBytes("aaa"), - Bytes.toBytes("ggg"), - Bytes.toBytes("zzz") - }; + byte[][] mockKeys = new byte[][] { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("aaa"), + Bytes.toBytes("ggg"), Bytes.toBytes("zzz") }; Mockito.doReturn(mockKeys).when(table).getStartKeys(); } @@ -1101,10 +1038,11 @@ public class TestCellBasedHFileOutputFormat2 { } /** - * Test that {@link HFileOutputFormat2} RecordWriter uses compression and - * bloom filter settings from the column family descriptor + * Test that {@link HFileOutputFormat2} RecordWriter uses compression and bloom filter settings + * from the column family descriptor */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testColumnFamilySettings() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; @@ -1116,7 +1054,7 @@ public class TestCellBasedHFileOutputFormat2 { RegionLocator regionLocator = Mockito.mock(RegionLocator.class); HTableDescriptor htd = new HTableDescriptor(TABLE_NAMES[0]); Mockito.doReturn(htd).when(table).getTableDescriptor(); - for (HColumnDescriptor hcd: HBaseTestingUtility.generateColumnDescriptors()) { + for (HColumnDescriptor hcd : HBaseTestingUtility.generateColumnDescriptors()) { htd.addFamily(hcd); } @@ -1164,11 +1102,12 @@ public class TestCellBasedHFileOutputFormat2 { byte[] bloomFilter = fileInfo.get(BLOOM_FILTER_TYPE_KEY); if (bloomFilter == null) bloomFilter = Bytes.toBytes("NONE"); - assertEquals("Incorrect bloom filter used for column family " + familyStr + - "(reader: " + reader + ")", + assertEquals( + "Incorrect bloom filter used for column family " + familyStr + "(reader: " + reader + ")", hcd.getBloomFilterType(), BloomType.valueOf(Bytes.toString(bloomFilter))); - assertEquals("Incorrect compression used for column family " + familyStr + - "(reader: " + reader + ")", hcd.getCompressionType(), reader.getFileContext().getCompression()); + assertEquals( + "Incorrect compression used for column family " + familyStr + "(reader: " + reader + ")", + hcd.getCompressionType(), reader.getFileContext().getCompression()); } } finally { dir.getFileSystem(conf).delete(dir, true); @@ -1176,19 +1115,19 @@ public class TestCellBasedHFileOutputFormat2 { } /** - * Write random values to the writer assuming a table created using - * {@link #FAMILIES} as column family descriptors + * Write random values to the writer assuming a table created using {@link #FAMILIES} as column + * family descriptors */ private void writeRandomKeyValues(RecordWriter writer, - TaskAttemptContext context, Set families, int numRows) - throws IOException, InterruptedException { + TaskAttemptContext context, Set families, int numRows) + throws IOException, InterruptedException { byte keyBytes[] = new byte[Bytes.SIZEOF_INT]; int valLength = 10; byte valBytes[] = new byte[valLength]; int taskId = context.getTaskAttemptID().getTaskID().getId(); assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!"; - final byte [] qualifier = Bytes.toBytes("data"); + final byte[] qualifier = Bytes.toBytes("data"); Random random = new Random(); for (int i = 0; i < numRows; i++) { @@ -1204,48 +1143,48 @@ public class TestCellBasedHFileOutputFormat2 { } /** - * This test is to test the scenario happened in HBASE-6901. - * All files are bulk loaded and excluded from minor compaction. - * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException - * will be thrown. + * This test is to test the scenario happened in HBASE-6901. All files are bulk loaded and + * excluded from minor compaction. Without the fix of HBASE-6901, an + * ArrayIndexOutOfBoundsException will be thrown. */ - @Ignore ("Flakey: See HBASE-9051") @Test + @Ignore("Flakey: See HBASE-9051") + @Test public void testExcludeAllFromMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); generateRandomStartKeys(5); util.startMiniCluster(); - try (Connection conn = ConnectionFactory.createConnection(); - Admin admin = conn.getAdmin(); - Table table = util.createTable(TABLE_NAMES[0], FAMILIES); - RegionLocator locator = conn.getRegionLocator(TABLE_NAMES[0])) { + try (Connection conn = ConnectionFactory.createConnection(); Admin admin = conn.getAdmin(); + Table table = util.createTable(TABLE_NAMES[0], FAMILIES); + RegionLocator locator = conn.getRegionLocator(TABLE_NAMES[0])) { final FileSystem fs = util.getDFSCluster().getFileSystem(); assertEquals("Should start with empty table", 0, util.countRows(table)); // deep inspection: get the StoreFile dir - final Path storePath = new Path( - CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), + final Path storePath = + new Path(CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), new Path(admin.getTableRegions(TABLE_NAMES[0]).get(0).getEncodedName(), Bytes.toString(FAMILIES[0]))); assertEquals(0, fs.listStatus(storePath).length); // Generate two bulk load files - conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", - true); + conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); for (int i = 0; i < 2; i++) { Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i); - runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table - .getTableDescriptor(), conn.getRegionLocator(TABLE_NAMES[0]))), testDir, false); + runIncrementalPELoad(conf, + Arrays.asList(new HFileOutputFormat2.TableInfo(table.getTableDescriptor(), + conn.getRegionLocator(TABLE_NAMES[0]))), + testDir, false); // Perform the actual load new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, locator); } // Ensure data shows up int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; - assertEquals("LoadIncrementalHFiles should put expected data in table", - expectedRows, util.countRows(table)); + assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, + util.countRows(table)); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); @@ -1290,7 +1229,8 @@ public class TestCellBasedHFileOutputFormat2 { } } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testExcludeMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); @@ -1298,15 +1238,15 @@ public class TestCellBasedHFileOutputFormat2 { util.startMiniCluster(); try (Connection conn = ConnectionFactory.createConnection(conf); - Admin admin = conn.getAdmin()){ + Admin admin = conn.getAdmin()) { Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction"); final FileSystem fs = util.getDFSCluster().getFileSystem(); Table table = util.createTable(TABLE_NAMES[0], FAMILIES); assertEquals("Should start with empty table", 0, util.countRows(table)); // deep inspection: get the StoreFile dir - final Path storePath = new Path( - CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), + final Path storePath = + new Path(CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), new Path(admin.getTableRegions(TABLE_NAMES[0]).get(0).getEncodedName(), Bytes.toString(FAMILIES[0]))); assertEquals(0, fs.listStatus(storePath).length); @@ -1325,20 +1265,20 @@ public class TestCellBasedHFileOutputFormat2 { }, 5000); // Generate a bulk load file with more rows - conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", - true); + conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAMES[0]); - runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table - .getTableDescriptor(), regionLocator)), testDir, false); + runIncrementalPELoad(conf, + Arrays.asList(new HFileOutputFormat2.TableInfo(table.getTableDescriptor(), regionLocator)), + testDir, false); // Perform the actual load new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, regionLocator); // Ensure data shows up int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; - assertEquals("LoadIncrementalHFiles should put expected data in table", - expectedRows + 1, util.countRows(table)); + assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows + 1, + util.countRows(table)); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); @@ -1396,16 +1336,16 @@ public class TestCellBasedHFileOutputFormat2 { Table table = util.createTable(tname, FAMILIES, splitKeys); } else if ("incremental".equals(args[0])) { TableName tname = TableName.valueOf(args[1]); - try(Connection c = ConnectionFactory.createConnection(conf); - Admin admin = c.getAdmin(); - RegionLocator regionLocator = c.getRegionLocator(tname)) { + try (Connection c = ConnectionFactory.createConnection(conf); Admin admin = c.getAdmin(); + RegionLocator regionLocator = c.getRegionLocator(tname)) { Path outDir = new Path("incremental-out"); - runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(admin - .getTableDescriptor(tname), regionLocator)), outDir, false); + runIncrementalPELoad(conf, + Arrays.asList( + new HFileOutputFormat2.TableInfo(admin.getTableDescriptor(tname), regionLocator)), + outDir, false); } } else { - throw new RuntimeException( - "usage: TestHFileOutputFormat2 newtable | incremental"); + throw new RuntimeException("usage: TestHFileOutputFormat2 newtable | incremental"); } } @@ -1415,9 +1355,10 @@ public class TestCellBasedHFileOutputFormat2 { Configuration conf = util.getConfiguration(); conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY, "ALL_SSD"); - conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX + - Bytes.toString(HFileOutputFormat2.combineTableNameSuffix( - TABLE_NAMES[0].getName(), FAMILIES[0])), "ONE_SSD"); + conf.set( + HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes + .toString(HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0])), + "ONE_SSD"); Path cf1Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[0])); Path cf2Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[1])); util.startMiniDFSCluster(3); @@ -1436,9 +1377,9 @@ public class TestCellBasedHFileOutputFormat2 { // alter table cf schema to change storage policies HFileOutputFormat2.configureStoragePolicy(conf, fs, - HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0]), cf1Dir); + HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0]), cf1Dir); HFileOutputFormat2.configureStoragePolicy(conf, fs, - HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[1]), cf2Dir); + HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[1]), cf2Dir); spA = getStoragePolicyName(fs, cf1Dir); spB = getStoragePolicyName(fs, cf2Dir); LOG.debug("Storage policy of cf 0: [" + spA + "]."); @@ -1493,4 +1434,3 @@ public class TestCellBasedHFileOutputFormat2 { return null; } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java index 95445461364..ac704788b42 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -94,12 +94,12 @@ import org.slf4j.LoggerFactory; /** * Tests the table import and table export MR job functionality */ -@Category({VerySlowMapReduceTests.class, MediumTests.class}) +@Category({ VerySlowMapReduceTests.class, MediumTests.class }) public class TestCellBasedImportExport2 { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCellBasedImportExport2.class); + HBaseClassTestRule.forClass(TestCellBasedImportExport2.class); private static final Logger LOG = LoggerFactory.getLogger(TestCellBasedImportExport2.class); protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); @@ -154,12 +154,8 @@ public class TestCellBasedImportExport2 { } /** - * Runs an export job with the specified command line args - * @param args - * @return true if job completed successfully - * @throws IOException - * @throws InterruptedException - * @throws ClassNotFoundException + * Runs an export job with the specified command line args n * @return true if job completed + * successfully nnn */ protected boolean runExport(String[] args) throws Throwable { // need to make a copy of the configuration because to make sure different temp dirs are used. @@ -172,12 +168,8 @@ public class TestCellBasedImportExport2 { } /** - * Runs an import job with the specified command line args - * @param args - * @return true if job completed successfully - * @throws IOException - * @throws InterruptedException - * @throws ClassNotFoundException + * Runs an import job with the specified command line args n * @return true if job completed + * successfully nnn */ boolean runImport(String[] args) throws Throwable { // need to make a copy of the configuration because to make sure different temp dirs are used. @@ -186,8 +178,7 @@ public class TestCellBasedImportExport2 { } /** - * Test simple replication case with column mapping - * @throws Exception + * Test simple replication case with column mapping n */ @Test public void testSimpleCase() throws Throwable { @@ -209,54 +200,47 @@ public class TestCellBasedImportExport2 { t.put(p); } - String[] args = new String[] { - // Only export row1 & row2. - "-D" + TableInputFormat.SCAN_ROW_START + "=\\x32row1", - "-D" + TableInputFormat.SCAN_ROW_STOP + "=\\x32row3", - name.getMethodName(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export - }; - assertTrue(runExport(args)); + String[] args = new String[] { + // Only export row1 & row2. + "-D" + TableInputFormat.SCAN_ROW_START + "=\\x32row1", + "-D" + TableInputFormat.SCAN_ROW_STOP + "=\\x32row3", name.getMethodName(), FQ_OUTPUT_DIR, + "1000", // max number of key versions per key to export + }; + assertTrue(runExport(args)); - final String IMPORT_TABLE = name.getMethodName() + "import"; - try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), FAMILYB, 3);) { - args = new String[] { - "-D" + Import.CF_RENAME_PROP + "="+FAMILYA_STRING+":"+FAMILYB_STRING, - IMPORT_TABLE, - FQ_OUTPUT_DIR - }; - assertTrue(runImport(args)); + final String IMPORT_TABLE = name.getMethodName() + "import"; + try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), FAMILYB, 3);) { + args = + new String[] { "-D" + Import.CF_RENAME_PROP + "=" + FAMILYA_STRING + ":" + FAMILYB_STRING, + IMPORT_TABLE, FQ_OUTPUT_DIR }; + assertTrue(runImport(args)); - Get g = new Get(ROW1); - g.setMaxVersions(); - Result r = t.get(g); - assertEquals(3, r.size()); - g = new Get(ROW2); - g.setMaxVersions(); - r = t.get(g); - assertEquals(3, r.size()); - g = new Get(ROW3); - r = t.get(g); - assertEquals(0, r.size()); - } + Get g = new Get(ROW1); + g.setMaxVersions(); + Result r = t.get(g); + assertEquals(3, r.size()); + g = new Get(ROW2); + g.setMaxVersions(); + r = t.get(g); + assertEquals(3, r.size()); + g = new Get(ROW3); + r = t.get(g); + assertEquals(0, r.size()); + } } /** - * Test export hbase:meta table - * - * @throws Throwable + * Test export hbase:meta table n */ @Test public void testMetaExport() throws Throwable { - String[] args = new String[] { TableName.META_TABLE_NAME.getNameAsString(), - FQ_OUTPUT_DIR, "1", "0", "0" }; + String[] args = + new String[] { TableName.META_TABLE_NAME.getNameAsString(), FQ_OUTPUT_DIR, "1", "0", "0" }; assertTrue(runExport(args)); } /** - * Test import data from 0.94 exported file - * @throws Throwable + * Test import data from 0.94 exported file n */ @Test public void testImport94Table() throws Throwable { @@ -274,34 +258,27 @@ public class TestCellBasedImportExport2 { fs.copyFromLocalFile(importPath, new Path(FQ_OUTPUT_DIR + Path.SEPARATOR + name)); String IMPORT_TABLE = name; try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), Bytes.toBytes("f1"), 3);) { - String[] args = new String[] { - "-Dhbase.import.version=0.94" , - IMPORT_TABLE, FQ_OUTPUT_DIR - }; + String[] args = new String[] { "-Dhbase.import.version=0.94", IMPORT_TABLE, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); - /* exportedTableIn94Format contains 5 rows - ROW COLUMN+CELL - r1 column=f1:c1, timestamp=1383766761171, value=val1 - r2 column=f1:c1, timestamp=1383766771642, value=val2 - r3 column=f1:c1, timestamp=1383766777615, value=val3 - r4 column=f1:c1, timestamp=1383766785146, value=val4 - r5 column=f1:c1, timestamp=1383766791506, value=val5 - */ - assertEquals(5, UTIL.countRows(t)); + /* + * exportedTableIn94Format contains 5 rows ROW COLUMN+CELL r1 column=f1:c1, + * timestamp=1383766761171, value=val1 r2 column=f1:c1, timestamp=1383766771642, value=val2 r3 + * column=f1:c1, timestamp=1383766777615, value=val3 r4 column=f1:c1, timestamp=1383766785146, + * value=val4 r5 column=f1:c1, timestamp=1383766791506, value=val5 + */ + assertEquals(5, UTIL.countRows(t)); } } /** * Test export scanner batching */ - @Test - public void testExportScannerBatching() throws Throwable { + @Test + public void testExportScannerBatching() throws Throwable { TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(1) - .build()) - .build(); + .newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(1).build()) + .build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName());) { @@ -313,11 +290,11 @@ public class TestCellBasedImportExport2 { p.addColumn(FAMILYA, QUAL, now + 4, QUAL); t.put(p); - String[] args = new String[] { - "-D" + ExportUtils.EXPORT_BATCHING + "=" + EXPORT_BATCH_SIZE, // added scanner batching arg. - name.getMethodName(), - FQ_OUTPUT_DIR - }; + String[] args = new String[] { "-D" + ExportUtils.EXPORT_BATCHING + "=" + EXPORT_BATCH_SIZE, // added + // scanner + // batching + // arg. + name.getMethodName(), FQ_OUTPUT_DIR }; assertTrue(runExport(args)); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); @@ -327,13 +304,11 @@ public class TestCellBasedImportExport2 { @Test public void testWithDeletes() throws Throwable { - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName());) { @@ -345,35 +320,26 @@ public class TestCellBasedImportExport2 { p.addColumn(FAMILYA, QUAL, now + 4, QUAL); t.put(p); - Delete d = new Delete(ROW1, now+3); + Delete d = new Delete(ROW1, now + 3); t.delete(d); d = new Delete(ROW1); - d.addColumns(FAMILYA, QUAL, now+2); + d.addColumns(FAMILYA, QUAL, now + 2); t.delete(d); } - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", - name.getMethodName(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export + String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", name.getMethodName(), + FQ_OUTPUT_DIR, "1000", // max number of key versions per key to export }; assertTrue(runExport(args)); final String IMPORT_TABLE = name.getMethodName() + "import"; desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(IMPORT_TABLE)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + .newBuilder(TableName.valueOf(IMPORT_TABLE)).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName());) { - args = new String[] { - IMPORT_TABLE, - FQ_OUTPUT_DIR - }; + args = new String[] { IMPORT_TABLE, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); Scan s = new Scan(); @@ -383,71 +349,60 @@ public class TestCellBasedImportExport2 { Result r = scanner.next(); Cell[] res = r.rawCells(); assertTrue(PrivateCellUtil.isDeleteFamily(res[0])); - assertEquals(now+4, res[1].getTimestamp()); - assertEquals(now+3, res[2].getTimestamp()); + assertEquals(now + 4, res[1].getTimestamp()); + assertEquals(now + 3, res[2].getTimestamp()); assertTrue(CellUtil.isDelete(res[3])); - assertEquals(now+2, res[4].getTimestamp()); - assertEquals(now+1, res[5].getTimestamp()); + assertEquals(now + 2, res[4].getTimestamp()); + assertEquals(now + 1, res[5].getTimestamp()); assertEquals(now, res[6].getTimestamp()); } } - @Test public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Throwable { final TableName exportTable = TableName.valueOf(name.getMethodName()); - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); Table exportT = UTIL.getConnection().getTable(exportTable); - //Add first version of QUAL + // Add first version of QUAL Put p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now, QUAL); exportT.put(p); - //Add Delete family marker - Delete d = new Delete(ROW1, now+3); + // Add Delete family marker + Delete d = new Delete(ROW1, now + 3); exportT.delete(d); - //Add second version of QUAL + // Add second version of QUAL p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now + 5, "s".getBytes()); exportT.put(p); - //Add second Delete family marker - d = new Delete(ROW1, now+7); + // Add second Delete family marker + d = new Delete(ROW1, now + 7); exportT.delete(d); - - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", exportTable.getNameAsString(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export + String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", + exportTable.getNameAsString(), FQ_OUTPUT_DIR, "1000", // max number of key versions per key to + // export }; assertTrue(runExport(args)); final String importTable = name.getMethodName() + "import"; desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(importTable)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + .newBuilder(TableName.valueOf(importTable)).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); Table importT = UTIL.getConnection().getTable(TableName.valueOf(importTable)); - args = new String[] { - importTable, - FQ_OUTPUT_DIR - }; + args = new String[] { importTable, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); Scan s = new Scan(); @@ -458,11 +413,11 @@ public class TestCellBasedImportExport2 { Result importedTResult = importedTScanner.next(); ResultScanner exportedTScanner = exportT.getScanner(s); - Result exportedTResult = exportedTScanner.next(); + Result exportedTResult = exportedTScanner.next(); try { Result.compareResults(exportedTResult, importedTResult); } catch (Throwable e) { - fail("Original and imported tables data comparision failed with error:"+e.getMessage()); + fail("Original and imported tables data comparision failed with error:" + e.getMessage()); } finally { exportT.close(); importT.close(); @@ -470,18 +425,16 @@ public class TestCellBasedImportExport2 { } /** - * Create a simple table, run an Export Job on it, Import with filtering on, verify counts, + * Create a simple table, run an Export Job on it, Import with filtering on, verify counts, * attempt with invalid values. */ @Test public void testWithFilter() throws Throwable { // Create simple table to export TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .build()) - .build(); + .newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).build()) + .build(); UTIL.getAdmin().createTable(desc); Table exportTable = UTIL.getConnection().getTable(desc.getTableName()); @@ -504,19 +457,15 @@ public class TestCellBasedImportExport2 { // Import to a new table final String IMPORT_TABLE = name.getMethodName() + "import"; - desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(IMPORT_TABLE)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .build()) - .build(); + desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(IMPORT_TABLE)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).build()) + .build(); UTIL.getAdmin().createTable(desc); Table importTable = UTIL.getConnection().getTable(desc.getTableName()); args = new String[] { "-D" + Import.FILTER_CLASS_CONF_KEY + "=" + PrefixFilter.class.getName(), - "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1), IMPORT_TABLE, - FQ_OUTPUT_DIR, - "1000" }; + "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1), IMPORT_TABLE, FQ_OUTPUT_DIR, + "1000" }; assertTrue(runImport(args)); // get the count of the source table for that time range @@ -530,8 +479,8 @@ public class TestCellBasedImportExport2 { // need to re-run the export job args = new String[] { "-D" + Import.FILTER_CLASS_CONF_KEY + "=" + Filter.class.getName(), - "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1) + "", name.getMethodName(), - FQ_OUTPUT_DIR, "1000" }; + "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1) + "", name.getMethodName(), + FQ_OUTPUT_DIR, "1000" }; assertFalse(runImport(args)); // cleanup @@ -540,10 +489,7 @@ public class TestCellBasedImportExport2 { } /** - * Count the number of keyvalues in the specified table for the given timerange - * @param table - * @return - * @throws IOException + * Count the number of keyvalues in the specified table for the given timerange nnn */ private int getCount(Table table, Filter filter) throws IOException { Scan scan = new Scan(); @@ -564,7 +510,7 @@ public class TestCellBasedImportExport2 { public void testImportMain() throws Throwable { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -594,29 +540,19 @@ public class TestCellBasedImportExport2 { String prefix = "row"; String label_0 = "label_0"; String label_1 = "label_1"; - String[] args = { - "table", - "outputDir", - String.valueOf(version), - String.valueOf(startTime), - String.valueOf(endTime), - prefix - }; + String[] args = { "table", "outputDir", String.valueOf(version), String.valueOf(startTime), + String.valueOf(endTime), prefix }; Scan scan = ExportUtils.getScanFromCommandLine(UTIL.getConfiguration(), args); assertEquals(version, scan.getMaxVersions()); assertEquals(startTime, scan.getTimeRange().getMin()); assertEquals(endTime, scan.getTimeRange().getMax()); assertEquals(true, (scan.getFilter() instanceof PrefixFilter)); - assertEquals(0, Bytes.compareTo(((PrefixFilter) scan.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); - String[] argsWithLabels = { - "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + label_0 + "," + label_1, - "table", - "outputDir", - String.valueOf(version), - String.valueOf(startTime), - String.valueOf(endTime), - prefix - }; + assertEquals(0, + Bytes.compareTo(((PrefixFilter) scan.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); + String[] argsWithLabels = + { "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + label_0 + "," + label_1, "table", + "outputDir", String.valueOf(version), String.valueOf(startTime), String.valueOf(endTime), + prefix }; Configuration conf = new Configuration(UTIL.getConfiguration()); // parse the "-D" options String[] otherArgs = new GenericOptionsParser(conf, argsWithLabels).getRemainingArgs(); @@ -625,7 +561,8 @@ public class TestCellBasedImportExport2 { assertEquals(startTime, scanWithLabels.getTimeRange().getMin()); assertEquals(endTime, scanWithLabels.getTimeRange().getMax()); assertEquals(true, (scanWithLabels.getFilter() instanceof PrefixFilter)); - assertEquals(0, Bytes.compareTo(((PrefixFilter) scanWithLabels.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); + assertEquals(0, Bytes.compareTo(((PrefixFilter) scanWithLabels.getFilter()).getPrefix(), + Bytes.toBytesBinary(prefix))); assertEquals(2, scanWithLabels.getAuthorizations().getLabels().size()); assertEquals(label_0, scanWithLabels.getAuthorizations().getLabels().get(0)); assertEquals(label_1, scanWithLabels.getAuthorizations().getLabels().get(1)); @@ -638,7 +575,7 @@ public class TestCellBasedImportExport2 { public void testExportMain() throws Throwable { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -651,11 +588,10 @@ public class TestCellBasedImportExport2 { assertEquals(-1, newSecurityManager.getExitCode()); String errMsg = data.toString(); assertTrue(errMsg.contains("Wrong number of arguments:")); - assertTrue(errMsg.contains( - "Usage: Export [-D ]* [ " + - "[ []] [^[regex pattern] or [Prefix] to filter]]")); assertTrue( - errMsg.contains("-D hbase.mapreduce.scan.column.family=,, ...")); + errMsg.contains("Usage: Export [-D ]* [ " + + "[ []] [^[regex pattern] or [Prefix] to filter]]")); + assertTrue(errMsg.contains("-D hbase.mapreduce.scan.column.family=,, ...")); assertTrue(errMsg.contains("-D hbase.mapreduce.include.deleted.rows=true")); assertTrue(errMsg.contains("-D hbase.client.scanner.caching=100")); assertTrue(errMsg.contains("-D hbase.export.scanner.batch=10")); @@ -692,18 +628,18 @@ public class TestCellBasedImportExport2 { importer.setup(ctx); Result value = mock(Result.class); KeyValue[] keys = { - new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"), Bytes.toBytes("qualifier"), - Bytes.toBytes("value")), - new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"), Bytes.toBytes("qualifier"), - Bytes.toBytes("value1")) }; + new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"), Bytes.toBytes("qualifier"), + Bytes.toBytes("value")), + new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"), Bytes.toBytes("qualifier"), + Bytes.toBytes("value1")) }; when(value.rawCells()).thenReturn(keys); importer.map(new ImmutableBytesWritable(Bytes.toBytes("Key")), value, ctx); } /** - * Test addFilterAndArguments method of Import This method set couple - * parameters into Configuration + * Test addFilterAndArguments method of Import This method set couple parameters into + * Configuration */ @Test public void testAddFilterAndArguments() throws IOException { @@ -715,7 +651,7 @@ public class TestCellBasedImportExport2 { Import.addFilterAndArguments(configuration, FilterBase.class, args); assertEquals("org.apache.hadoop.hbase.filter.FilterBase", - configuration.get(Import.FILTER_CLASS_CONF_KEY)); + configuration.get(Import.FILTER_CLASS_CONF_KEY)); assertEquals("param1,param2", configuration.get(Import.FILTER_ARGS_CONF_KEY)); } @@ -739,7 +675,7 @@ public class TestCellBasedImportExport2 { exportTable.put(put); // Run the export - String[] args = new String[] { exportTableName, FQ_OUTPUT_DIR, "1000"}; + String[] args = new String[] { exportTableName, FQ_OUTPUT_DIR, "1000" }; assertTrue(runExport(args)); // Create the table for import @@ -748,41 +684,40 @@ public class TestCellBasedImportExport2 { // Register the wal listener for the import table RegionInfo region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer() - .getRegions(importTable.getName()).get(0).getRegionInfo(); + .getRegions(importTable.getName()).get(0).getRegionInfo(); TableWALActionListener walListener = new TableWALActionListener(region); WAL wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region); wal.registerWALActionsListener(walListener); // Run the import with SKIP_WAL - args = - new String[] { "-D" + Import.WAL_DURABILITY + "=" + Durability.SKIP_WAL.name(), - importTableName, FQ_OUTPUT_DIR }; + args = new String[] { "-D" + Import.WAL_DURABILITY + "=" + Durability.SKIP_WAL.name(), + importTableName, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); - //Assert that the wal is not visisted + // Assert that the wal is not visisted assertTrue(!walListener.isWALVisited()); - //Ensure that the count is 2 (only one version of key value is obtained) + // Ensure that the count is 2 (only one version of key value is obtained) assertTrue(getCount(importTable, null) == 2); // Run the import with the default durability option importTableName = name.getMethodName() + "import2"; importTable = UTIL.createTable(TableName.valueOf(importTableName), FAMILYA, 3); region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer() - .getRegions(importTable.getName()).get(0).getRegionInfo(); + .getRegions(importTable.getName()).get(0).getRegionInfo(); wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region); walListener = new TableWALActionListener(region); wal.registerWALActionsListener(walListener); args = new String[] { importTableName, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); - //Assert that the wal is visisted + // Assert that the wal is visisted assertTrue(walListener.isWALVisited()); - //Ensure that the count is 2 (only one version of key value is obtained) + // Ensure that the count is 2 (only one version of key value is obtained) assertTrue(getCount(importTable, null) == 2); } } /** - * This listens to the {@link #visitLogEntryBeforeWrite(RegionInfo, WALKey, WALEdit)} to - * identify that an entry is written to the Write Ahead Log for the given table. + * This listens to the {@link #visitLogEntryBeforeWrite(RegionInfo, WALKey, WALEdit)} to identify + * that an entry is written to the Write Ahead Log for the given table. */ private static class TableWALActionListener implements WALActionsListener { @@ -795,8 +730,10 @@ public class TestCellBasedImportExport2 { @Override public void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit logEdit) { - if (logKey.getTableName().getNameAsString().equalsIgnoreCase( - this.regionInfo.getTable().getNameAsString()) && (!logEdit.isMetaEdit())) { + if ( + logKey.getTableName().getNameAsString() + .equalsIgnoreCase(this.regionInfo.getTable().getNameAsString()) && (!logEdit.isMetaEdit()) + ) { isVisited = true; } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedWALPlayer2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedWALPlayer2.java index 6e7375295a6..4bbacf04210 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedWALPlayer2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedWALPlayer2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,12 +71,12 @@ import org.mockito.stubbing.Answer; /** * Basic test for the WALPlayer M/R tool */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestCellBasedWALPlayer2 { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCellBasedWALPlayer2.class); + HBaseClassTestRule.forClass(TestCellBasedWALPlayer2.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static MiniHBaseCluster cluster; @@ -91,7 +91,7 @@ public class TestCellBasedWALPlayer2 { @BeforeClass public static void beforeClass() throws Exception { - conf= TEST_UTIL.getConfiguration(); + conf = TEST_UTIL.getConfiguration(); rootDir = TEST_UTIL.createRootDir(); walRootDir = TEST_UTIL.createWALRootDir(); fs = CommonFSUtils.getRootDirFileSystem(conf); @@ -107,8 +107,7 @@ public class TestCellBasedWALPlayer2 { } /** - * Simple end-to-end test - * @throws Exception + * Simple end-to-end test n */ @Test public void testWALPlayer() throws Exception { @@ -134,19 +133,17 @@ public class TestCellBasedWALPlayer2 { // replay the WAL, map table 1 to table 2 WAL log = cluster.getRegionServer(0).getWAL(null); log.rollWriter(); - String walInputDir = new Path(cluster.getMaster().getMasterFileSystem() - .getWALRootDir(), HConstants.HREGION_LOGDIR_NAME).toString(); + String walInputDir = new Path(cluster.getMaster().getMasterFileSystem().getWALRootDir(), + HConstants.HREGION_LOGDIR_NAME).toString(); - Configuration configuration= TEST_UTIL.getConfiguration(); + Configuration configuration = TEST_UTIL.getConfiguration(); WALPlayer player = new WALPlayer(configuration); - String optionName="_test_.name"; + String optionName = "_test_.name"; configuration.set(optionName, "1000"); player.setupTime(configuration, optionName); - assertEquals(1000,configuration.getLong(optionName,0)); + assertEquals(1000, configuration.getLong(optionName, 0)); assertEquals(0, ToolRunner.run(configuration, player, - new String[] {walInputDir, tableName1.getNameAsString(), - tableName2.getNameAsString() })); - + new String[] { walInputDir, tableName1.getNameAsString(), tableName2.getNameAsString() })); // verify the WAL was player into table 2 Get g = new Get(ROW); @@ -210,7 +207,7 @@ public class TestCellBasedWALPlayer2 { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -223,8 +220,8 @@ public class TestCellBasedWALPlayer2 { } catch (SecurityException e) { assertEquals(-1, newSecurityManager.getExitCode()); assertTrue(data.toString().contains("ERROR: Wrong number of arguments:")); - assertTrue(data.toString().contains("Usage: WALPlayer [options] " + - " [ ]")); + assertTrue(data.toString() + .contains("Usage: WALPlayer [options] " + " [ ]")); assertTrue(data.toString().contains("-Dwal.bulk.output=/path/for/output")); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java index 309ecc81df5..cc2b3459a86 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,11 +50,11 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestCellCounter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCellCounter.class); + HBaseClassTestRule.forClass(TestCellCounter.class); private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); private static final byte[] ROW1 = Bytes.toBytesBinary("\\x01row1"); @@ -66,8 +66,8 @@ public class TestCellCounter { private static final byte[] QUALIFIER = Bytes.toBytes("q"); private static Path FQ_OUTPUT_DIR; - private static final String OUTPUT_DIR = "target" + File.separator + "test-data" + File.separator - + "output"; + private static final String OUTPUT_DIR = + "target" + File.separator + "test-data" + File.separator + "output"; private static long now = EnvironmentEdgeManager.currentTime(); @Rule @@ -87,7 +87,6 @@ public class TestCellCounter { /** * Test CellCounter all data should print to output - * */ @Test public void testCellCounter() throws Exception { @@ -250,9 +249,8 @@ public class TestCellCounter { p.addColumn(FAMILY_A, QUALIFIER, now + 1, Bytes.toBytes("Data22")); p.addColumn(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23")); t.put(p); - String[] args = - { sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "--starttime=" + now + 1, - "--endtime=" + now + 2 }; + String[] args = { sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", + "--starttime=" + now + 1, "--endtime=" + now + 2 }; runCount(args); FileInputStream inputStream = @@ -269,8 +267,8 @@ public class TestCellCounter { private boolean runCount(String[] args) throws Exception { // need to make a copy of the configuration because to make sure // different temp dirs are used. - int status = ToolRunner.run(new Configuration(UTIL.getConfiguration()), new CellCounter(), - args); + int status = + ToolRunner.run(new Configuration(UTIL.getConfiguration()), new CellCounter(), args); return status == 0; } @@ -282,7 +280,7 @@ public class TestCellCounter { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -314,9 +312,8 @@ public class TestCellCounter { final TableName sourceTable = TableName.valueOf(name.getMethodName()); String outputPath = OUTPUT_DIR + sourceTable; LocalFileSystem localFileSystem = new LocalFileSystem(); - Path outputDir = - new Path(outputPath).makeQualified(localFileSystem.getUri(), - localFileSystem.getWorkingDirectory()); + Path outputDir = new Path(outputPath).makeQualified(localFileSystem.getUri(), + localFileSystem.getWorkingDirectory()); byte[][] families = { FAMILY_A, FAMILY_B }; Table t = UTIL.createTable(sourceTable, families); try { @@ -333,7 +330,7 @@ public class TestCellCounter { String[] args = { sourceTable.getNameAsString(), outputDir.toString(), ";" }; runCount(args); FileInputStream inputStream = - new FileInputStream(outputPath + File.separator + "part-r-00000"); + new FileInputStream(outputPath + File.separator + "part-r-00000"); String data = IOUtils.toString(inputStream); inputStream.close(); assertTrue(data.contains("Total Families Across all Rows" + "\t" + "2")); @@ -348,7 +345,7 @@ public class TestCellCounter { FileUtil.fullyDelete(new File(outputPath)); args = new String[] { "-D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=a, b", - sourceTable.getNameAsString(), outputDir.toString(), ";"}; + sourceTable.getNameAsString(), outputDir.toString(), ";" }; runCount(args); inputStream = new FileInputStream(outputPath + File.separator + "part-r-00000"); String data2 = IOUtils.toString(inputStream); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java index 0271983428a..40e9d19c5d4 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,12 +59,12 @@ import org.junit.rules.TestName; /** * Basic test for the CopyTable M/R tool */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestCopyTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCopyTable.class); + HBaseClassTestRule.forClass(TestCopyTable.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static final byte[] ROW1 = Bytes.toBytes("row1"); @@ -95,20 +95,19 @@ public class TestCopyTable { final byte[] COLUMN1 = Bytes.toBytes("c1"); try (Table t1 = TEST_UTIL.createTable(tableName1, FAMILY); - Table t2 = TEST_UTIL.createTable(tableName2, FAMILY);) { + Table t2 = TEST_UTIL.createTable(tableName2, FAMILY);) { // put rows into the first table loadData(t1, FAMILY, COLUMN1); CopyTable copy = new CopyTable(); int code; if (bulkload) { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2.getNameAsString(), - "--bulkload", tableName1.getNameAsString() }); - } else { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2.getNameAsString(), + code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, + new String[] { "--new.name=" + tableName2.getNameAsString(), "--bulkload", tableName1.getNameAsString() }); + } else { + code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, new String[] { + "--new.name=" + tableName2.getNameAsString(), tableName1.getNameAsString() }); } assertEquals("copy job failed", 0, code); @@ -130,15 +129,13 @@ public class TestCopyTable { cfd.setMobEnabled(true); cfd.setMobThreshold(5); - TableDescriptor desc1 = TableDescriptorBuilder.newBuilder(tableName1) - .setColumnFamily(cfd.build()) - .build(); - TableDescriptor desc2 = TableDescriptorBuilder.newBuilder(tableName2) - .setColumnFamily(cfd.build()) - .build(); + TableDescriptor desc1 = + TableDescriptorBuilder.newBuilder(tableName1).setColumnFamily(cfd.build()).build(); + TableDescriptor desc2 = + TableDescriptorBuilder.newBuilder(tableName2).setColumnFamily(cfd.build()).build(); try (Table t1 = TEST_UTIL.createTable(desc1, null); - Table t2 = TEST_UTIL.createTable(desc2, null);) { + Table t2 = TEST_UTIL.createTable(desc2, null);) { // put rows into the first table for (int i = 0; i < 10; i++) { @@ -151,13 +148,12 @@ public class TestCopyTable { int code; if (bulkload) { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2.getNameAsString(), - "--bulkload", tableName1.getNameAsString() }); - } else { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2.getNameAsString(), + code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, + new String[] { "--new.name=" + tableName2.getNameAsString(), "--bulkload", tableName1.getNameAsString() }); + } else { + code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, new String[] { + "--new.name=" + tableName2.getNameAsString(), tableName1.getNameAsString() }); } assertEquals("copy job failed", 0, code); @@ -168,17 +164,15 @@ public class TestCopyTable { assertEquals(1, r.size()); assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1)); assertEquals("compare row values between two tables", - t1.getDescriptor().getValue("row" + i), - t2.getDescriptor().getValue("row" + i)); + t1.getDescriptor().getValue("row" + i), t2.getDescriptor().getValue("row" + i)); } - assertEquals("compare count of mob rows after table copy", MobTestUtil.countMobRows(TEST_UTIL, t1), - MobTestUtil.countMobRows(TEST_UTIL, t2)); + assertEquals("compare count of mob rows after table copy", + MobTestUtil.countMobRows(TEST_UTIL, t1), MobTestUtil.countMobRows(TEST_UTIL, t2)); assertEquals("compare count of mob row values between two tables", - t1.getDescriptor().getValues().size(), - t2.getDescriptor().getValues().size()); + t1.getDescriptor().getValues().size(), t2.getDescriptor().getValues().size()); assertTrue("The mob row count is 0 but should be > 0", - MobTestUtil.countMobRows(TEST_UTIL, t2) > 0); + MobTestUtil.countMobRows(TEST_UTIL, t2) > 0); } finally { TEST_UTIL.deleteTable(tableName1); TEST_UTIL.deleteTable(tableName2); @@ -186,8 +180,7 @@ public class TestCopyTable { } /** - * Simple end-to-end test - * @throws Exception + * Simple end-to-end test n */ @Test public void testCopyTable() throws Exception { @@ -243,11 +236,10 @@ public class TestCopyTable { t1.put(p); CopyTable copy = new CopyTable(); - assertEquals( - 0, - ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2, "--startrow=\\x01row1", - "--stoprow=\\x01row2", tableName1.getNameAsString() })); + assertEquals(0, + ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, + new String[] { "--new.name=" + tableName2, "--startrow=\\x01row1", "--stoprow=\\x01row2", + tableName1.getNameAsString() })); // verify the data was copied into table 2 // row1 exist, row0, row2 do not exist @@ -295,8 +287,8 @@ public class TestCopyTable { long currentTime = EnvironmentEdgeManager.currentTime(); String[] args = new String[] { "--new.name=" + targetTable, "--families=a:b", "--all.cells", - "--starttime=" + (currentTime - 100000), "--endtime=" + (currentTime + 100000), - "--versions=1", sourceTable.getNameAsString() }; + "--starttime=" + (currentTime - 100000), "--endtime=" + (currentTime + 100000), + "--versions=1", sourceTable.getNameAsString() }; assertNull(t2.get(new Get(ROW1)).getRow()); assertTrue(runCopy(args)); @@ -324,7 +316,7 @@ public class TestCopyTable { PrintStream writer = new PrintStream(data); System.setErr(writer); SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); try { CopyTable.main(emptyArgs); @@ -341,8 +333,8 @@ public class TestCopyTable { } private boolean runCopy(String[] args) throws Exception { - int status = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), new CopyTable(), - args); + int status = + ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), new CopyTable(), args); return status == 0; } @@ -372,9 +364,9 @@ public class TestCopyTable { private Table createTable(TableName tableName, byte[] family, boolean isMob) throws IOException { if (isMob) { ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(family) - .setMobEnabled(true).setMobThreshold(1).build(); + .setMobEnabled(true).setMobThreshold(1).build(); TableDescriptor desc = - TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(cfd).build(); + TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(cfd).build(); return TEST_UTIL.createTable(desc, null); } else { return TEST_UTIL.createTable(tableName, family); @@ -382,7 +374,7 @@ public class TestCopyTable { } private void testCopyTableBySnapshot(String tablePrefix, boolean bulkLoad, boolean isMob) - throws Exception { + throws Exception { TableName table1 = TableName.valueOf(tablePrefix + 1); TableName table2 = TableName.valueOf(tablePrefix + 2); Table t1 = createTable(table1, FAMILY_A, isMob); @@ -393,7 +385,7 @@ public class TestCopyTable { boolean success; if (bulkLoad) { success = - runCopy(new String[] { "--snapshot", "--new.name=" + table2, "--bulkload", snapshot }); + runCopy(new String[] { "--snapshot", "--new.name=" + table2, "--bulkload", snapshot }); } else { success = runCopy(new String[] { "--snapshot", "--new.name=" + table2, snapshot }); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java index 46a449a4359..34d197be02f 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,12 +35,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestGroupingTableMapper { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGroupingTableMapper.class); + HBaseClassTestRule.forClass(TestGroupingTableMapper.class); /** * Test GroupingTableMapper class @@ -56,14 +56,14 @@ public class TestGroupingTableMapper { Result result = mock(Result.class); @SuppressWarnings("unchecked") Mapper.Context context = - mock(Mapper.Context.class); + mock(Mapper.Context.class); context.write(any(), any()); List keyValue = new ArrayList<>(); byte[] row = {}; - keyValue.add(new KeyValue(row, Bytes.toBytes("family2"), Bytes.toBytes("clm"), Bytes - .toBytes("value1"))); - keyValue.add(new KeyValue(row, Bytes.toBytes("family1"), Bytes.toBytes("clm"), Bytes - .toBytes("value2"))); + keyValue.add( + new KeyValue(row, Bytes.toBytes("family2"), Bytes.toBytes("clm"), Bytes.toBytes("value1"))); + keyValue.add( + new KeyValue(row, Bytes.toBytes("family1"), Bytes.toBytes("clm"), Bytes.toBytes("value2"))); when(result.listCells()).thenReturn(keyValue); mapper.map(null, result, context); // template data diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java index 2467dcace23..04dddfb8fa3 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +17,12 @@ */ package org.apache.hadoop.hbase.mapreduce; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -25,18 +31,11 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestHBaseMRTestingUtility { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHBaseMRTestingUtility.class); + HBaseClassTestRule.forClass(TestHBaseMRTestingUtility.class); @Test public void testMRYarnConfigsPopulation() throws IOException { @@ -55,17 +54,21 @@ public class TestHBaseMRTestingUtility { hbt.getConfiguration().set(entry.getKey(), entry.getValue()); } - for (Map.Entry entry : dummyProps.entrySet()) { - assertTrue("The Configuration for key " + entry.getKey() +" and value: " + entry.getValue() + - " is not populated correctly", hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); + for (Map.Entry entry : dummyProps.entrySet()) { + assertTrue( + "The Configuration for key " + entry.getKey() + " and value: " + entry.getValue() + + " is not populated correctly", + hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); } hbt.startMiniMapReduceCluster(); // Confirm that MiniMapReduceCluster overwrites the mr properties and updates the Configuration - for (Map.Entry entry : dummyProps.entrySet()) { - assertFalse("The MR prop: " + entry.getValue() + " is not overwritten when map reduce mini"+ - "cluster is started", hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); + for (Map.Entry entry : dummyProps.entrySet()) { + assertFalse( + "The MR prop: " + entry.getValue() + " is not overwritten when map reduce mini" + + "cluster is started", + hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); } hbt.shutdownMiniMapReduceCluster(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 9b82cbdfc87..31472b0474e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -124,26 +124,25 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Simple test for {@link HFileOutputFormat2}. - * Sets up and runs a mapreduce job that writes hfile output. - * Creates a few inner classes to implement splits and an inputformat that - * emits keys and values like those of {@link PerformanceEvaluation}. + * Simple test for {@link HFileOutputFormat2}. Sets up and runs a mapreduce job that writes hfile + * output. Creates a few inner classes to implement splits and an inputformat that emits keys and + * values like those of {@link PerformanceEvaluation}. */ -@Category({VerySlowMapReduceTests.class, LargeTests.class}) -//TODO : Remove this in 3.0 -public class TestHFileOutputFormat2 { +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) +// TODO : Remove this in 3.0 +public class TestHFileOutputFormat2 { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileOutputFormat2.class); + HBaseClassTestRule.forClass(TestHFileOutputFormat2.class); private final static int ROWSPERSPLIT = 1024; public static final byte[] FAMILY_NAME = TestHRegionFileSystem.FAMILY_NAME; - private static final byte[][] FAMILIES = { - Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), Bytes.add(FAMILY_NAME, Bytes.toBytes("-B"))}; - private static final TableName[] TABLE_NAMES = Stream.of("TestTable", "TestTable2", - "TestTable3").map(TableName::valueOf).toArray(TableName[]::new); + private static final byte[][] FAMILIES = + { Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), Bytes.add(FAMILY_NAME, Bytes.toBytes("-B")) }; + private static final TableName[] TABLE_NAMES = Stream.of("TestTable", "TestTable2", "TestTable3") + .map(TableName::valueOf).toArray(TableName[]::new); private HBaseTestingUtility util = new HBaseTestingUtility(); @@ -153,45 +152,39 @@ public class TestHFileOutputFormat2 { * Simple mapper that makes KeyValue output. */ static class RandomKVGeneratingMapper - extends Mapper { + extends Mapper { private int keyLength; - private static final int KEYLEN_DEFAULT=10; - private static final String KEYLEN_CONF="randomkv.key.length"; + private static final int KEYLEN_DEFAULT = 10; + private static final String KEYLEN_CONF = "randomkv.key.length"; private int valLength; - private static final int VALLEN_DEFAULT=10; - private static final String VALLEN_CONF="randomkv.val.length"; - private static final byte [] QUALIFIER = Bytes.toBytes("data"); + private static final int VALLEN_DEFAULT = 10; + private static final String VALLEN_CONF = "randomkv.val.length"; + private static final byte[] QUALIFIER = Bytes.toBytes("data"); private boolean multiTableMapper = false; private TableName[] tables = null; - @Override - protected void setup(Context context) throws IOException, - InterruptedException { + protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration conf = context.getConfiguration(); keyLength = conf.getInt(KEYLEN_CONF, KEYLEN_DEFAULT); valLength = conf.getInt(VALLEN_CONF, VALLEN_DEFAULT); - multiTableMapper = conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, - false); + multiTableMapper = + conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); if (multiTableMapper) { tables = TABLE_NAMES; } else { - tables = new TableName[]{TABLE_NAMES[0]}; + tables = new TableName[] { TABLE_NAMES[0] }; } } @Override - protected void map( - NullWritable n1, NullWritable n2, - Mapper.Context context) - throws java.io.IOException ,InterruptedException - { + protected void map(NullWritable n1, NullWritable n2, + Mapper.Context context) + throws java.io.IOException, InterruptedException { byte keyBytes[] = new byte[keyLength]; byte valBytes[] = new byte[valLength]; @@ -223,8 +216,7 @@ public class TestHFileOutputFormat2 { * Simple mapper that makes Put output. */ static class RandomPutGeneratingMapper - extends Mapper { + extends Mapper { private int keyLength; private static final int KEYLEN_DEFAULT = 10; @@ -238,28 +230,25 @@ public class TestHFileOutputFormat2 { private TableName[] tables = null; @Override - protected void setup(Context context) throws IOException, - InterruptedException { + protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration conf = context.getConfiguration(); keyLength = conf.getInt(KEYLEN_CONF, KEYLEN_DEFAULT); valLength = conf.getInt(VALLEN_CONF, VALLEN_DEFAULT); - multiTableMapper = conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, - false); + multiTableMapper = + conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); if (multiTableMapper) { tables = TABLE_NAMES; } else { - tables = new TableName[]{TABLE_NAMES[0]}; + tables = new TableName[] { TABLE_NAMES[0] }; } } @Override - protected void map( - NullWritable n1, NullWritable n2, - Mapper.Context context) - throws java.io.IOException, InterruptedException { + protected void map(NullWritable n1, NullWritable n2, + Mapper.Context context) + throws java.io.IOException, InterruptedException { byte keyBytes[] = new byte[keyLength]; byte valBytes[] = new byte[valLength]; @@ -306,28 +295,27 @@ public class TestHFileOutputFormat2 { } /** - * Test that {@link HFileOutputFormat2} RecordWriter amends timestamps if - * passed a keyvalue whose timestamp is {@link HConstants#LATEST_TIMESTAMP}. + * Test that {@link HFileOutputFormat2} RecordWriter amends timestamps if passed a keyvalue whose + * timestamp is {@link HConstants#LATEST_TIMESTAMP}. * @see HBASE-2615 */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test - public void test_LATEST_TIMESTAMP_isReplaced() - throws Exception { + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test + public void test_LATEST_TIMESTAMP_isReplaced() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced"); + Path dir = util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced"); try { Job job = new Job(conf); FileOutputFormat.setOutputPath(job, dir); context = createTestTaskAttemptContext(job); HFileOutputFormat2 hof = new HFileOutputFormat2(); writer = hof.getRecordWriter(context); - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); - // Test 1. Pass a KV that has a ts of LATEST_TIMESTAMP. It should be - // changed by call to write. Check all in kv is same but ts. + // Test 1. Pass a KV that has a ts of LATEST_TIMESTAMP. It should be + // changed by call to write. Check all in kv is same but ts. KeyValue kv = new KeyValue(b, b, b); KeyValue original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); @@ -338,7 +326,7 @@ public class TestHFileOutputFormat2 { assertNotSame(original.getTimestamp(), kv.getTimestamp()); assertNotSame(HConstants.LATEST_TIMESTAMP, kv.getTimestamp()); - // Test 2. Now test passing a kv that has explicit ts. It should not be + // Test 2. Now test passing a kv that has explicit ts. It should not be // changed by call to record write. kv = new KeyValue(b, b, b, kv.getTimestamp() - 1, b); original = kv.clone(); @@ -350,26 +338,25 @@ public class TestHFileOutputFormat2 { } } - private TaskAttemptContext createTestTaskAttemptContext(final Job job) - throws Exception { + private TaskAttemptContext createTestTaskAttemptContext(final Job job) throws Exception { HadoopShims hadoop = CompatibilitySingletonFactory.getInstance(HadoopShims.class); - TaskAttemptContext context = hadoop.createTestTaskAttemptContext( - job, "attempt_201402131733_0001_m_000000_0"); + TaskAttemptContext context = + hadoop.createTestTaskAttemptContext(job, "attempt_201402131733_0001_m_000000_0"); return context; } /* - * Test that {@link HFileOutputFormat2} creates an HFile with TIMERANGE - * metadata used by time-restricted scans. + * Test that {@link HFileOutputFormat2} creates an HFile with TIMERANGE metadata used by + * time-restricted scans. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void test_TIMERANGE() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("test_TIMERANGE_present"); - LOG.info("Timerange dir writing to dir: "+ dir); + Path dir = util.getDataTestDir("test_TIMERANGE_present"); + LOG.info("Timerange dir writing to dir: " + dir); try { // build a record writer using HFileOutputFormat2 Job job = new Job(conf); @@ -379,13 +366,13 @@ public class TestHFileOutputFormat2 { writer = hof.getRecordWriter(context); // Pass two key values with explicit times stamps - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); // value 1 with timestamp 2000 KeyValue kv = new KeyValue(b, b, b, 2000, b); KeyValue original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); - assertEquals(original,kv); + assertEquals(original, kv); // value 2 with timestamp 1000 kv = new KeyValue(b, b, b, 1000, b); @@ -406,15 +393,14 @@ public class TestHFileOutputFormat2 { // open as HFile Reader and pull out TIMERANGE FileInfo. HFile.Reader rd = - HFile.createReader(fs, file[0].getPath(), new CacheConfig(conf), true, conf); - Map finfo = rd.getHFileInfo(); + HFile.createReader(fs, file[0].getPath(), new CacheConfig(conf), true, conf); + Map finfo = rd.getHFileInfo(); byte[] range = finfo.get(Bytes.toBytes("TIMERANGE")); assertNotNull(range); // unmarshall and check values. - TimeRangeTracker timeRangeTracker =TimeRangeTracker.parseFrom(range); - LOG.info(timeRangeTracker.getMin() + - "...." + timeRangeTracker.getMax()); + TimeRangeTracker timeRangeTracker = TimeRangeTracker.parseFrom(range); + LOG.info(timeRangeTracker.getMin() + "...." + timeRangeTracker.getMax()); assertEquals(1000, timeRangeTracker.getMin()); assertEquals(2000, timeRangeTracker.getMax()); rd.close(); @@ -427,7 +413,8 @@ public class TestHFileOutputFormat2 { /** * Run small MR job. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testWritingPEData() throws Exception { Configuration conf = util.getConfiguration(); Path testDir = util.getDataTestDirOnTestFS("testWritingPEData"); @@ -446,8 +433,8 @@ public class TestHFileOutputFormat2 { byte[] startKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT]; byte[] endKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT]; - Arrays.fill(startKey, (byte)0); - Arrays.fill(endKey, (byte)0xff); + Arrays.fill(startKey, (byte) 0); + Arrays.fill(endKey, (byte) 0xff); job.setPartitionerClass(SimpleTotalOrderPartitioner.class); // Set start and end rows for partitioner. @@ -457,49 +444,46 @@ public class TestHFileOutputFormat2 { job.setOutputFormatClass(HFileOutputFormat2.class); job.setNumReduceTasks(4); job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - KeyValueSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + KeyValueSerialization.class.getName()); FileOutputFormat.setOutputPath(job, testDir); assertTrue(job.waitForCompletion(false)); - FileStatus [] files = fs.listStatus(testDir); + FileStatus[] files = fs.listStatus(testDir); assertTrue(files.length > 0); - //check output file num and size. + // check output file num and size. for (byte[] family : FAMILIES) { - long kvCount= 0; + long kvCount = 0; RemoteIterator iterator = - fs.listFiles(testDir.suffix("/" + new String(family)), true); + fs.listFiles(testDir.suffix("/" + new String(family)), true); while (iterator.hasNext()) { LocatedFileStatus keyFileStatus = iterator.next(); HFile.Reader reader = - HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); + HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); HFileScanner scanner = reader.getScanner(conf, false, false, false); kvCount += reader.getEntries(); scanner.seekTo(); long perKVSize = scanner.getCell().getSerializedSize(); assertTrue("Data size of each file should not be too large.", - perKVSize * reader.getEntries() <= hregionMaxFilesize); + perKVSize * reader.getEntries() <= hregionMaxFilesize); } assertEquals("Should write expected data in output file.", ROWSPERSPLIT, kvCount); } } /** - * Test that {@link HFileOutputFormat2} RecordWriter writes tags such as ttl into - * hfile. + * Test that {@link HFileOutputFormat2} RecordWriter writes tags such as ttl into hfile. */ @Test - public void test_WritingTagData() - throws Exception { + public void test_WritingTagData() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); final String HFILE_FORMAT_VERSION_CONF_KEY = "hfile.format.version"; conf.setInt(HFILE_FORMAT_VERSION_CONF_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("WritingTagData"); + Path dir = util.getDataTestDir("WritingTagData"); try { conf.set(HFileOutputFormat2.OUTPUT_TABLE_NAME_CONF_KEY, TABLE_NAMES[0].getNameAsString()); // turn locality off to eliminate getRegionLocation fail-and-retry time when writing kvs @@ -509,9 +493,9 @@ public class TestHFileOutputFormat2 { context = createTestTaskAttemptContext(job); HFileOutputFormat2 hof = new HFileOutputFormat2(); writer = hof.getRecordWriter(context); - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); - List< Tag > tags = new ArrayList<>(); + List tags = new ArrayList<>(); tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(978670))); KeyValue kv = new KeyValue(b, b, b, HConstants.LATEST_TIMESTAMP, b, tags); writer.write(new ImmutableBytesWritable(), kv); @@ -519,10 +503,10 @@ public class TestHFileOutputFormat2 { writer = null; FileSystem fs = dir.getFileSystem(conf); RemoteIterator iterator = fs.listFiles(dir, true); - while(iterator.hasNext()) { + while (iterator.hasNext()) { LocatedFileStatus keyFileStatus = iterator.next(); HFile.Reader reader = - HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); + HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); HFileScanner scanner = reader.getScanner(conf, false, false, false); scanner.seekTo(); Cell cell = scanner.getCell(); @@ -538,11 +522,12 @@ public class TestHFileOutputFormat2 { } } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testJobConfiguration() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); - conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, util.getDataTestDir("testJobConfiguration") - .toString()); + conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, + util.getDataTestDir("testJobConfiguration").toString()); Job job = new Job(conf); job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration")); Table table = Mockito.mock(Table.class); @@ -553,7 +538,7 @@ public class TestHFileOutputFormat2 { assertEquals(job.getNumReduceTasks(), 4); } - private byte [][] generateRandomStartKeys(int numKeys) { + private byte[][] generateRandomStartKeys(int numKeys) { Random random = ThreadLocalRandom.current(); byte[][] ret = new byte[numKeys][]; // first region start key is always empty @@ -570,39 +555,42 @@ public class TestHFileOutputFormat2 { byte[][] ret = new byte[numKeys][]; for (int i = 0; i < numKeys; i++) { ret[i] = - PerformanceEvaluation.generateData(random, PerformanceEvaluation.DEFAULT_VALUE_LENGTH); + PerformanceEvaluation.generateData(random, PerformanceEvaluation.DEFAULT_VALUE_LENGTH); } return ret; } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoad() throws Exception { LOG.info("\nStarting test testMRIncrementalLoad\n"); doIncrementalLoadTest(false, false, false, "testMRIncrementalLoad"); } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoadWithSplit() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithSplit\n"); doIncrementalLoadTest(true, false, false, "testMRIncrementalLoadWithSplit"); } /** - * Test for HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY = true - * This test could only check the correctness of original logic if LOCALITY_SENSITIVE_CONF_KEY - * is set to true. Because MiniHBaseCluster always run with single hostname (and different ports), - * it's not possible to check the region locality by comparing region locations and DN hostnames. - * When MiniHBaseCluster supports explicit hostnames parameter (just like MiniDFSCluster does), - * we could test region locality features more easily. + * Test for HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY = true This test could only check the + * correctness of original logic if LOCALITY_SENSITIVE_CONF_KEY is set to true. Because + * MiniHBaseCluster always run with single hostname (and different ports), it's not possible to + * check the region locality by comparing region locations and DN hostnames. When MiniHBaseCluster + * supports explicit hostnames parameter (just like MiniDFSCluster does), we could test region + * locality features more easily. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoadWithLocality() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithLocality\n"); doIncrementalLoadTest(false, true, false, "testMRIncrementalLoadWithLocality1"); doIncrementalLoadTest(true, true, false, "testMRIncrementalLoadWithLocality2"); } - //@Ignore("Wahtevs") + // @Ignore("Wahtevs") @Test public void testMRIncrementalLoadWithPutSortReducer() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithPutSortReducer\n"); @@ -610,21 +598,20 @@ public class TestHFileOutputFormat2 { } private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, - boolean putSortReducer, String tableStr) throws Exception { - doIncrementalLoadTest(shouldChangeRegions, shouldKeepLocality, putSortReducer, - Arrays.asList(tableStr)); + boolean putSortReducer, String tableStr) throws Exception { + doIncrementalLoadTest(shouldChangeRegions, shouldKeepLocality, putSortReducer, + Arrays.asList(tableStr)); } @Test public void testMultiMRIncrementalLoadWithPutSortReducer() throws Exception { LOG.info("\nStarting test testMultiMRIncrementalLoadWithPutSortReducer\n"); doIncrementalLoadTest(false, false, true, - Arrays.stream(TABLE_NAMES).map(TableName::getNameAsString).collect(Collectors.toList - ())); + Arrays.stream(TABLE_NAMES).map(TableName::getNameAsString).collect(Collectors.toList())); } private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, - boolean putSortReducer, List tableStr) throws Exception { + boolean putSortReducer, List tableStr) throws Exception { util = new HBaseTestingUtility(); Configuration conf = util.getConfiguration(); conf.setBoolean(MultiTableHFileOutputFormat.LOCALITY_SENSITIVE_CONF_KEY, shouldKeepLocality); @@ -641,8 +628,8 @@ public class TestHFileOutputFormat2 { for (int i = 0; i < hostCount; ++i) { hostnames[i] = "datanode_" + i; } - StartMiniClusterOption option = StartMiniClusterOption.builder() - .numRegionServers(hostCount).dataNodeHosts(hostnames).build(); + StartMiniClusterOption option = + StartMiniClusterOption.builder().numRegionServers(hostCount).dataNodeHosts(hostnames).build(); util.startMiniCluster(option); Map allTables = new HashMap<>(tableStr.size()); @@ -677,8 +664,7 @@ public class TestHFileOutputFormat2 { if (allTables.containsKey(tf.getPath().getName())) { ++numTableDirs; tablePath = tf.getPath(); - } - else { + } else { continue; } } @@ -712,9 +698,10 @@ public class TestHFileOutputFormat2 { byte[][] newSplitKeys = generateRandomSplitKeys(14); Table table = util.createTable(chosenTable.getName(), FAMILIES, newSplitKeys); - while (util.getConnection().getRegionLocator(chosenTable.getName()) - .getAllRegionLocations().size() != 15 || - !admin.isTableAvailable(table.getName())) { + while ( + util.getConnection().getRegionLocator(chosenTable.getName()).getAllRegionLocations() + .size() != 15 || !admin.isTableAvailable(table.getName()) + ) { Thread.sleep(200); LOG.info("Waiting for new region assignment to happen"); } @@ -730,19 +717,19 @@ public class TestHFileOutputFormat2 { } Table currentTable = allTables.get(tableNameStr); TableName currentTableName = currentTable.getName(); - new LoadIncrementalHFiles(conf).doBulkLoad(tableDir, admin, currentTable, singleTableInfo - .getRegionLocator()); + new LoadIncrementalHFiles(conf).doBulkLoad(tableDir, admin, currentTable, + singleTableInfo.getRegionLocator()); // Ensure data shows up int expectedRows = 0; if (putSortReducer) { // no rows should be extracted assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, - util.countRows(currentTable)); + util.countRows(currentTable)); } else { expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, - util.countRows(currentTable)); + util.countRows(currentTable)); Scan scan = new Scan(); ResultScanner results = currentTable.getScanner(scan); for (Result res : results) { @@ -775,14 +762,14 @@ public class TestHFileOutputFormat2 { } admin.enableTable(currentTableName); util.waitTableAvailable(currentTableName); - assertEquals("Data should remain after reopening of regions", - tableDigestBefore, util.checksumRows(currentTable)); + assertEquals("Data should remain after reopening of regions", tableDigestBefore, + util.checksumRows(currentTable)); } } finally { for (HFileOutputFormat2.TableInfo tableInfoSingle : tableInfo) { - tableInfoSingle.getRegionLocator().close(); + tableInfoSingle.getRegionLocator().close(); } - for (Entry singleTable : allTables.entrySet() ) { + for (Entry singleTable : allTables.entrySet()) { singleTable.getValue().close(); util.deleteTable(singleTable.getValue().getName()); } @@ -791,14 +778,14 @@ public class TestHFileOutputFormat2 { } } - private void runIncrementalPELoad(Configuration conf, List tableInfo, Path outDir, - boolean putSortReducer) throws IOException, - InterruptedException, ClassNotFoundException { + private void runIncrementalPELoad(Configuration conf, + List tableInfo, Path outDir, boolean putSortReducer) + throws IOException, InterruptedException, ClassNotFoundException { Job job = new Job(conf, "testLocalMRIncrementalLoad"); job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad")); job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - KeyValueSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + KeyValueSerialization.class.getName()); setupRandomGeneratorMapper(job, putSortReducer); if (tableInfo.size() > 1) { MultiTableHFileOutputFormat.configureIncrementalLoad(job, tableInfo); @@ -807,74 +794,65 @@ public class TestHFileOutputFormat2 { sum += tableInfoSingle.getRegionLocator().getAllRegionLocations().size(); } assertEquals(sum, job.getNumReduceTasks()); - } - else { + } else { RegionLocator regionLocator = tableInfo.get(0).getRegionLocator(); HFileOutputFormat2.configureIncrementalLoad(job, tableInfo.get(0).getHTableDescriptor(), - regionLocator); + regionLocator); assertEquals(regionLocator.getAllRegionLocations().size(), job.getNumReduceTasks()); } FileOutputFormat.setOutputPath(job, outDir); - assertFalse(util.getTestFileSystem().exists(outDir)) ; + assertFalse(util.getTestFileSystem().exists(outDir)); assertTrue(job.waitForCompletion(true)); } /** - * Test for {@link HFileOutputFormat2#createFamilyCompressionMap(Configuration)}. - * Tests that the family compression map is correctly serialized into - * and deserialized from configuration - * - * @throws IOException + * Test for {@link HFileOutputFormat2#createFamilyCompressionMap(Configuration)}. Tests that the + * family compression map is correctly serialized into and deserialized from configuration n */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyCompressionMap() throws IOException { for (int numCfs = 0; numCfs <= 3; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); Map familyToCompression = - getMockColumnFamiliesForCompression(numCfs); + getMockColumnFamiliesForCompression(numCfs); Table table = Mockito.mock(Table.class); setupMockColumnFamiliesForCompression(table, familyToCompression); conf.set(HFileOutputFormat2.COMPRESSION_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute - (HFileOutputFormat2.compressionDetails, - Arrays.asList(table.getTableDescriptor()))); + HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.compressionDetails, + Arrays.asList(table.getTableDescriptor()))); // read back family specific compression setting from the configuration - Map retrievedFamilyToCompressionMap = HFileOutputFormat2 - .createFamilyCompressionMap(conf); + Map retrievedFamilyToCompressionMap = + HFileOutputFormat2.createFamilyCompressionMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToCompression.entrySet()) { - assertEquals("Compression configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToCompressionMap.get(entry.getKey().getBytes("UTF-8"))); + assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToCompressionMap.get(entry.getKey().getBytes("UTF-8"))); } } } private void setupMockColumnFamiliesForCompression(Table table, - Map familyToCompression) throws IOException { + Map familyToCompression) throws IOException { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]); for (Entry entry : familyToCompression.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) - .setMaxVersions(1) - .setCompressionType(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0)); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()).setMaxVersions(1) + .setCompressionType(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForCompression (int numCfs) { + private Map getMockColumnFamiliesForCompression(int numCfs) { Map familyToCompression = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { @@ -892,70 +870,58 @@ public class TestHFileOutputFormat2 { return familyToCompression; } - /** - * Test for {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. - * Tests that the family bloom type map is correctly serialized into - * and deserialized from configuration - * - * @throws IOException + * Test for {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. Tests that the + * family bloom type map is correctly serialized into and deserialized from configuration n */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyBloomTypeMap() throws IOException { for (int numCfs = 0; numCfs <= 2; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); - Map familyToBloomType = - getMockColumnFamiliesForBloomType(numCfs); + Map familyToBloomType = getMockColumnFamiliesForBloomType(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForBloomType(table, - familyToBloomType); + setupMockColumnFamiliesForBloomType(table, familyToBloomType); conf.set(HFileOutputFormat2.BLOOM_TYPE_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.bloomTypeDetails, - Arrays.asList(table.getTableDescriptor()))); + HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.bloomTypeDetails, + Arrays.asList(table.getTableDescriptor()))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToBloomTypeMap = - HFileOutputFormat2 - .createFamilyBloomTypeMap(conf); + HFileOutputFormat2.createFamilyBloomTypeMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToBloomType.entrySet()) { - assertEquals("BloomType configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToBloomTypeMap.get(entry.getKey().getBytes("UTF-8"))); + assertEquals("BloomType configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToBloomTypeMap.get(entry.getKey().getBytes("UTF-8"))); } } } private void setupMockColumnFamiliesForBloomType(Table table, - Map familyToDataBlockEncoding) throws IOException { + Map familyToDataBlockEncoding) throws IOException { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) - .setMaxVersions(1) - .setBloomFilterType(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0)); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()).setMaxVersions(1) + .setBloomFilterType(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForBloomType (int numCfs) { + private Map getMockColumnFamiliesForBloomType(int numCfs) { Map familyToBloomType = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToBloomType.put("Family1!@#!@#&", BloomType.ROW); } if (numCfs-- > 0) { - familyToBloomType.put("Family2=asdads&!AASD", - BloomType.ROWCOL); + familyToBloomType.put("Family2=asdads&!AASD", BloomType.ROWCOL); } if (numCfs-- > 0) { familyToBloomType.put("Family3", BloomType.NONE); @@ -964,74 +930,60 @@ public class TestHFileOutputFormat2 { } /** - * Test for {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. - * Tests that the family block size map is correctly serialized into - * and deserialized from configuration - * - * @throws IOException + * Test for {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. Tests that the + * family block size map is correctly serialized into and deserialized from configuration n */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyBlockSizeMap() throws IOException { for (int numCfs = 0; numCfs <= 3; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); - Map familyToBlockSize = - getMockColumnFamiliesForBlockSize(numCfs); + Map familyToBlockSize = getMockColumnFamiliesForBlockSize(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForBlockSize(table, - familyToBlockSize); + setupMockColumnFamiliesForBlockSize(table, familyToBlockSize); conf.set(HFileOutputFormat2.BLOCK_SIZE_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute - (HFileOutputFormat2.blockSizeDetails, Arrays.asList(table - .getTableDescriptor()))); + HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.blockSizeDetails, + Arrays.asList(table.getTableDescriptor()))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToBlockSizeMap = - HFileOutputFormat2 - .createFamilyBlockSizeMap(conf); + HFileOutputFormat2.createFamilyBlockSizeMap(conf); // test that we have a value for all column families that matches with the // used mock values - for (Entry entry : familyToBlockSize.entrySet() - ) { - assertEquals("BlockSize configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToBlockSizeMap.get(entry.getKey().getBytes("UTF-8"))); + for (Entry entry : familyToBlockSize.entrySet()) { + assertEquals("BlockSize configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToBlockSizeMap.get(entry.getKey().getBytes("UTF-8"))); } } } private void setupMockColumnFamiliesForBlockSize(Table table, - Map familyToDataBlockEncoding) throws IOException { + Map familyToDataBlockEncoding) throws IOException { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) - .setMaxVersions(1) - .setBlocksize(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0)); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()).setMaxVersions(1) + .setBlocksize(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForBlockSize (int numCfs) { + private Map getMockColumnFamiliesForBlockSize(int numCfs) { Map familyToBlockSize = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToBlockSize.put("Family1!@#!@#&", 1234); } if (numCfs-- > 0) { - familyToBlockSize.put("Family2=asdads&!AASD", - Integer.MAX_VALUE); + familyToBlockSize.put("Family2=asdads&!AASD", Integer.MAX_VALUE); } if (numCfs-- > 0) { - familyToBlockSize.put("Family2=asdads&!AASD", - Integer.MAX_VALUE); + familyToBlockSize.put("Family2=asdads&!AASD", Integer.MAX_VALUE); } if (numCfs-- > 0) { familyToBlockSize.put("Family3", 0); @@ -1040,74 +992,65 @@ public class TestHFileOutputFormat2 { } /** - * Test for {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. - * Tests that the family data block encoding map is correctly serialized into - * and deserialized from configuration - * - * @throws IOException + * Test for {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. Tests that + * the family data block encoding map is correctly serialized into and deserialized from + * configuration n */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOException { for (int numCfs = 0; numCfs <= 3; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); Map familyToDataBlockEncoding = - getMockColumnFamiliesForDataBlockEncoding(numCfs); + getMockColumnFamiliesForDataBlockEncoding(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForDataBlockEncoding(table, - familyToDataBlockEncoding); + setupMockColumnFamiliesForDataBlockEncoding(table, familyToDataBlockEncoding); HTableDescriptor tableDescriptor = table.getTableDescriptor(); conf.set(HFileOutputFormat2.DATABLOCK_ENCODING_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute - (HFileOutputFormat2.dataBlockEncodingDetails, Arrays - .asList(tableDescriptor))); + HFileOutputFormat2.serializeColumnFamilyAttribute( + HFileOutputFormat2.dataBlockEncodingDetails, Arrays.asList(tableDescriptor))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToDataBlockEncodingMap = - HFileOutputFormat2 - .createFamilyDataBlockEncodingMap(conf); + HFileOutputFormat2.createFamilyDataBlockEncodingMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToDataBlockEncoding.entrySet()) { - assertEquals("DataBlockEncoding configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToDataBlockEncodingMap.get(entry.getKey().getBytes("UTF-8"))); + assertEquals( + "DataBlockEncoding configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), + retrievedFamilyToDataBlockEncodingMap.get(entry.getKey().getBytes("UTF-8"))); } } } private void setupMockColumnFamiliesForDataBlockEncoding(Table table, - Map familyToDataBlockEncoding) throws IOException { + Map familyToDataBlockEncoding) throws IOException { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) - .setMaxVersions(1) - .setDataBlockEncoding(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0)); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()).setMaxVersions(1) + .setDataBlockEncoding(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForDataBlockEncoding (int numCfs) { + private Map getMockColumnFamiliesForDataBlockEncoding(int numCfs) { Map familyToDataBlockEncoding = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToDataBlockEncoding.put("Family1!@#!@#&", DataBlockEncoding.DIFF); } if (numCfs-- > 0) { - familyToDataBlockEncoding.put("Family2=asdads&!AASD", - DataBlockEncoding.FAST_DIFF); + familyToDataBlockEncoding.put("Family2=asdads&!AASD", DataBlockEncoding.FAST_DIFF); } if (numCfs-- > 0) { - familyToDataBlockEncoding.put("Family2=asdads&!AASD", - DataBlockEncoding.PREFIX); + familyToDataBlockEncoding.put("Family2=asdads&!AASD", DataBlockEncoding.PREFIX); } if (numCfs-- > 0) { familyToDataBlockEncoding.put("Family3", DataBlockEncoding.NONE); @@ -1116,12 +1059,8 @@ public class TestHFileOutputFormat2 { } private void setupMockStartKeys(RegionLocator table) throws IOException { - byte[][] mockKeys = new byte[][] { - HConstants.EMPTY_BYTE_ARRAY, - Bytes.toBytes("aaa"), - Bytes.toBytes("ggg"), - Bytes.toBytes("zzz") - }; + byte[][] mockKeys = new byte[][] { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("aaa"), + Bytes.toBytes("ggg"), Bytes.toBytes("zzz") }; Mockito.doReturn(mockKeys).when(table).getStartKeys(); } @@ -1131,10 +1070,11 @@ public class TestHFileOutputFormat2 { } /** - * Test that {@link HFileOutputFormat2} RecordWriter uses compression and - * bloom filter settings from the column family descriptor + * Test that {@link HFileOutputFormat2} RecordWriter uses compression and bloom filter settings + * from the column family descriptor */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testColumnFamilySettings() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; @@ -1146,7 +1086,7 @@ public class TestHFileOutputFormat2 { RegionLocator regionLocator = Mockito.mock(RegionLocator.class); HTableDescriptor htd = new HTableDescriptor(TABLE_NAMES[0]); Mockito.doReturn(htd).when(table).getTableDescriptor(); - for (HColumnDescriptor hcd: HBaseTestingUtility.generateColumnDescriptors()) { + for (HColumnDescriptor hcd : HBaseTestingUtility.generateColumnDescriptors()) { htd.addFamily(hcd); } @@ -1194,8 +1134,8 @@ public class TestHFileOutputFormat2 { byte[] bloomFilter = fileInfo.get(BLOOM_FILTER_TYPE_KEY); if (bloomFilter == null) bloomFilter = Bytes.toBytes("NONE"); - assertEquals("Incorrect bloom filter used for column family " + familyStr + - "(reader: " + reader + ")", + assertEquals( + "Incorrect bloom filter used for column family " + familyStr + "(reader: " + reader + ")", hcd.getBloomFilterType(), BloomType.valueOf(Bytes.toString(bloomFilter))); assertEquals( "Incorrect compression used for column family " + familyStr + "(reader: " + reader + ")", @@ -1207,19 +1147,19 @@ public class TestHFileOutputFormat2 { } /** - * Write random values to the writer assuming a table created using - * {@link #FAMILIES} as column family descriptors + * Write random values to the writer assuming a table created using {@link #FAMILIES} as column + * family descriptors */ private void writeRandomKeyValues(RecordWriter writer, - TaskAttemptContext context, Set families, int numRows) - throws IOException, InterruptedException { + TaskAttemptContext context, Set families, int numRows) + throws IOException, InterruptedException { byte keyBytes[] = new byte[Bytes.SIZEOF_INT]; int valLength = 10; byte valBytes[] = new byte[valLength]; int taskId = context.getTaskAttemptID().getTaskID().getId(); assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!"; - final byte [] qualifier = Bytes.toBytes("data"); + final byte[] qualifier = Bytes.toBytes("data"); for (int i = 0; i < numRows; i++) { Bytes.putInt(keyBytes, 0, i); Bytes.random(valBytes); @@ -1232,48 +1172,48 @@ public class TestHFileOutputFormat2 { } /** - * This test is to test the scenario happened in HBASE-6901. - * All files are bulk loaded and excluded from minor compaction. - * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException - * will be thrown. + * This test is to test the scenario happened in HBASE-6901. All files are bulk loaded and + * excluded from minor compaction. Without the fix of HBASE-6901, an + * ArrayIndexOutOfBoundsException will be thrown. */ - @Ignore ("Flakey: See HBASE-9051") @Test + @Ignore("Flakey: See HBASE-9051") + @Test public void testExcludeAllFromMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); generateRandomStartKeys(5); util.startMiniCluster(); - try (Connection conn = ConnectionFactory.createConnection(); - Admin admin = conn.getAdmin(); - Table table = util.createTable(TABLE_NAMES[0], FAMILIES); - RegionLocator locator = conn.getRegionLocator(TABLE_NAMES[0])) { + try (Connection conn = ConnectionFactory.createConnection(); Admin admin = conn.getAdmin(); + Table table = util.createTable(TABLE_NAMES[0], FAMILIES); + RegionLocator locator = conn.getRegionLocator(TABLE_NAMES[0])) { final FileSystem fs = util.getDFSCluster().getFileSystem(); assertEquals("Should start with empty table", 0, util.countRows(table)); // deep inspection: get the StoreFile dir - final Path storePath = new Path( - CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), + final Path storePath = + new Path(CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), new Path(admin.getTableRegions(TABLE_NAMES[0]).get(0).getEncodedName(), Bytes.toString(FAMILIES[0]))); assertEquals(0, fs.listStatus(storePath).length); // Generate two bulk load files - conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", - true); + conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); for (int i = 0; i < 2; i++) { Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i); - runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table - .getTableDescriptor(), conn.getRegionLocator(TABLE_NAMES[0]))), testDir, false); + runIncrementalPELoad(conf, + Arrays.asList(new HFileOutputFormat2.TableInfo(table.getTableDescriptor(), + conn.getRegionLocator(TABLE_NAMES[0]))), + testDir, false); // Perform the actual load new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, locator); } // Ensure data shows up int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; - assertEquals("LoadIncrementalHFiles should put expected data in table", - expectedRows, util.countRows(table)); + assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, + util.countRows(table)); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); @@ -1318,7 +1258,8 @@ public class TestHFileOutputFormat2 { } } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testExcludeMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); @@ -1326,15 +1267,15 @@ public class TestHFileOutputFormat2 { util.startMiniCluster(); try (Connection conn = ConnectionFactory.createConnection(conf); - Admin admin = conn.getAdmin()){ + Admin admin = conn.getAdmin()) { Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction"); final FileSystem fs = util.getDFSCluster().getFileSystem(); Table table = util.createTable(TABLE_NAMES[0], FAMILIES); assertEquals("Should start with empty table", 0, util.countRows(table)); // deep inspection: get the StoreFile dir - final Path storePath = new Path( - CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), + final Path storePath = + new Path(CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), new Path(admin.getTableRegions(TABLE_NAMES[0]).get(0).getEncodedName(), Bytes.toString(FAMILIES[0]))); assertEquals(0, fs.listStatus(storePath).length); @@ -1353,20 +1294,20 @@ public class TestHFileOutputFormat2 { }, 5000); // Generate a bulk load file with more rows - conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", - true); + conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAMES[0]); - runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table - .getTableDescriptor(), regionLocator)), testDir, false); + runIncrementalPELoad(conf, + Arrays.asList(new HFileOutputFormat2.TableInfo(table.getTableDescriptor(), regionLocator)), + testDir, false); // Perform the actual load new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, regionLocator); // Ensure data shows up int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; - assertEquals("LoadIncrementalHFiles should put expected data in table", - expectedRows + 1, util.countRows(table)); + assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows + 1, + util.countRows(table)); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); @@ -1424,16 +1365,16 @@ public class TestHFileOutputFormat2 { Table table = util.createTable(tname, FAMILIES, splitKeys); } else if ("incremental".equals(args[0])) { TableName tname = TableName.valueOf(args[1]); - try(Connection c = ConnectionFactory.createConnection(conf); - Admin admin = c.getAdmin(); - RegionLocator regionLocator = c.getRegionLocator(tname)) { + try (Connection c = ConnectionFactory.createConnection(conf); Admin admin = c.getAdmin(); + RegionLocator regionLocator = c.getRegionLocator(tname)) { Path outDir = new Path("incremental-out"); - runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(admin - .getTableDescriptor(tname), regionLocator)), outDir, false); + runIncrementalPELoad(conf, + Arrays.asList( + new HFileOutputFormat2.TableInfo(admin.getTableDescriptor(tname), regionLocator)), + outDir, false); } } else { - throw new RuntimeException( - "usage: TestHFileOutputFormat2 newtable | incremental"); + throw new RuntimeException("usage: TestHFileOutputFormat2 newtable | incremental"); } } @@ -1443,9 +1384,10 @@ public class TestHFileOutputFormat2 { Configuration conf = util.getConfiguration(); conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY, "ALL_SSD"); - conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX + - Bytes.toString(HFileOutputFormat2.combineTableNameSuffix( - TABLE_NAMES[0].getName(), FAMILIES[0])), "ONE_SSD"); + conf.set( + HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes + .toString(HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0])), + "ONE_SSD"); Path cf1Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[0])); Path cf2Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[1])); util.startMiniDFSCluster(3); @@ -1464,9 +1406,9 @@ public class TestHFileOutputFormat2 { // alter table cf schema to change storage policies HFileOutputFormat2.configureStoragePolicy(conf, fs, - HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0]), cf1Dir); + HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0]), cf1Dir); HFileOutputFormat2.configureStoragePolicy(conf, fs, - HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[1]), cf2Dir); + HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[1]), cf2Dir); spA = getStoragePolicyName(fs, cf1Dir); spB = getStoragePolicyName(fs, cf2Dir); LOG.debug("Storage policy of cf 0: [" + spA + "]."); @@ -1551,9 +1493,9 @@ public class TestHFileOutputFormat2 { while (iterator.hasNext()) { LocatedFileStatus keyFileStatus = iterator.next(); HFile.Reader reader = - HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); + HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); assertEquals(reader.getTrailer().getCompressionCodec().getName(), - hfileoutputformatCompression); + hfileoutputformatCompression); } } finally { if (writer != null && context != null) { @@ -1575,8 +1517,8 @@ public class TestHFileOutputFormat2 { for (int i = 0; i < hostCount; ++i) { hostnames[i] = "datanode_" + i; } - StartMiniClusterOption option = StartMiniClusterOption.builder() - .numRegionServers(hostCount).dataNodeHosts(hostnames).build(); + StartMiniClusterOption option = + StartMiniClusterOption.builder().numRegionServers(hostCount).dataNodeHosts(hostnames).build(); util.startMiniCluster(option); // Start cluster B @@ -1631,8 +1573,7 @@ public class TestHFileOutputFormat2 { assertEquals(confB.get(HConstants.ZOOKEEPER_ZNODE_PARENT), config.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); - assertEquals(bSpecificConfigValue, - config.get(bSpecificConfigKey)); + assertEquals(bSpecificConfigValue, config.get(bSpecificConfigKey)); } } finally { utilB.deleteTable(tableName); @@ -1730,8 +1671,7 @@ public class TestHFileOutputFormat2 { } @Override - public Hbck getHbck() - throws IOException { + public Hbck getHbck() throws IOException { return delegate.getHbck(); } @@ -1752,4 +1692,3 @@ public class TestHFileOutputFormat2 { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java index 77245f3c360..3408ae43034 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,12 +35,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestHRegionPartitioner { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHRegionPartitioner.class); + HBaseClassTestRule.forClass(TestHRegionPartitioner.class); private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); @@ -65,8 +65,8 @@ public class TestHRegionPartitioner { byte[][] families = { Bytes.toBytes("familyA"), Bytes.toBytes("familyB") }; - UTIL.createTable(TableName.valueOf(name.getMethodName()), families, 1, - Bytes.toBytes("aa"), Bytes.toBytes("cc"), 3); + UTIL.createTable(TableName.valueOf(name.getMethodName()), families, 1, Bytes.toBytes("aa"), + Bytes.toBytes("cc"), 3); HRegionPartitioner partitioner = new HRegionPartitioner<>(); Configuration configuration = UTIL.getConfiguration(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java index ffcc900acc7..da48b0028b7 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps; public class TestHashTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHashTable.class); + HBaseClassTestRule.forClass(TestHashTable.class); private static final Logger LOG = LoggerFactory.getLogger(TestHashTable.class); @@ -85,9 +85,9 @@ public class TestHashTable { int numRegions = 10; int numHashFiles = 3; - byte[][] splitRows = new byte[numRegions-1][]; + byte[][] splitRows = new byte[numRegions - 1][]; for (int i = 1; i < numRegions; i++) { - splitRows[i-1] = Bytes.toBytes(numRows * i / numRegions); + splitRows[i - 1] = Bytes.toBytes(numRows * i / numRegions); } long timestamp = 1430764183454L; @@ -107,13 +107,9 @@ public class TestHashTable { Path testDir = TEST_UTIL.getDataTestDirOnTestFS(tableName.getNameAsString()); long batchSize = 300; - int code = hashTable.run(new String[] { - "--batchsize=" + batchSize, - "--numhashfiles=" + numHashFiles, - "--scanbatch=2", - tableName.getNameAsString(), - testDir.toString() - }); + int code = + hashTable.run(new String[] { "--batchsize=" + batchSize, "--numhashfiles=" + numHashFiles, + "--scanbatch=2", tableName.getNameAsString(), testDir.toString() }); assertEquals("test job failed", 0, code); FileSystem fs = TEST_UTIL.getTestFileSystem(); @@ -127,29 +123,29 @@ public class TestHashTable { LOG.debug("partition: " + Bytes.toInt(bytes.get())); } - ImmutableMap expectedHashes - = ImmutableMap.builder() - .put(-1, new ImmutableBytesWritable(Bytes.fromHex("714cb10a9e3b5569852980edd8c6ca2f"))) - .put(5, new ImmutableBytesWritable(Bytes.fromHex("28d961d9252ce8f8d44a07b38d3e1d96"))) - .put(10, new ImmutableBytesWritable(Bytes.fromHex("f6bbc4a224d8fd929b783a92599eaffa"))) - .put(15, new ImmutableBytesWritable(Bytes.fromHex("522deb5d97f73a414ecc11457be46881"))) - .put(20, new ImmutableBytesWritable(Bytes.fromHex("b026f2611aaa46f7110116d807545352"))) - .put(25, new ImmutableBytesWritable(Bytes.fromHex("39ffc1a3094aa12a2e90ffd9cef2ce93"))) - .put(30, new ImmutableBytesWritable(Bytes.fromHex("f6b4d75727ce9a30ac29e4f08f601666"))) - .put(35, new ImmutableBytesWritable(Bytes.fromHex("422e2d2f1eb79a8f02171a705a42c090"))) - .put(40, new ImmutableBytesWritable(Bytes.fromHex("559ad61c900fffefea0a15abf8a97bc3"))) - .put(45, new ImmutableBytesWritable(Bytes.fromHex("23019084513eca41cee436b2a29611cb"))) - .put(50, new ImmutableBytesWritable(Bytes.fromHex("b40467d222ddb4949b142fe145ee9edc"))) - .put(55, new ImmutableBytesWritable(Bytes.fromHex("372bf89fcd8ca4b7ab3c1add9d07f7e4"))) - .put(60, new ImmutableBytesWritable(Bytes.fromHex("69ae0585e6255de27dce974e332b8f8b"))) - .put(65, new ImmutableBytesWritable(Bytes.fromHex("8029610044297aad0abdbecd485d8e59"))) - .put(70, new ImmutableBytesWritable(Bytes.fromHex("de5f784f7f78987b6e57ecfd81c8646f"))) - .put(75, new ImmutableBytesWritable(Bytes.fromHex("1cd757cc4e1715c8c3b1c24447a1ec56"))) - .put(80, new ImmutableBytesWritable(Bytes.fromHex("f9a53aacfeb6142b08066615e7038095"))) - .put(85, new ImmutableBytesWritable(Bytes.fromHex("89b872b7e639df32d3276b33928c0c91"))) - .put(90, new ImmutableBytesWritable(Bytes.fromHex("45eeac0646d46a474ea0484175faed38"))) - .put(95, new ImmutableBytesWritable(Bytes.fromHex("f57c447e32a08f4bf1abb2892839ac56"))) - .build(); + ImmutableMap expectedHashes = + ImmutableMap. builder() + .put(-1, new ImmutableBytesWritable(Bytes.fromHex("714cb10a9e3b5569852980edd8c6ca2f"))) + .put(5, new ImmutableBytesWritable(Bytes.fromHex("28d961d9252ce8f8d44a07b38d3e1d96"))) + .put(10, new ImmutableBytesWritable(Bytes.fromHex("f6bbc4a224d8fd929b783a92599eaffa"))) + .put(15, new ImmutableBytesWritable(Bytes.fromHex("522deb5d97f73a414ecc11457be46881"))) + .put(20, new ImmutableBytesWritable(Bytes.fromHex("b026f2611aaa46f7110116d807545352"))) + .put(25, new ImmutableBytesWritable(Bytes.fromHex("39ffc1a3094aa12a2e90ffd9cef2ce93"))) + .put(30, new ImmutableBytesWritable(Bytes.fromHex("f6b4d75727ce9a30ac29e4f08f601666"))) + .put(35, new ImmutableBytesWritable(Bytes.fromHex("422e2d2f1eb79a8f02171a705a42c090"))) + .put(40, new ImmutableBytesWritable(Bytes.fromHex("559ad61c900fffefea0a15abf8a97bc3"))) + .put(45, new ImmutableBytesWritable(Bytes.fromHex("23019084513eca41cee436b2a29611cb"))) + .put(50, new ImmutableBytesWritable(Bytes.fromHex("b40467d222ddb4949b142fe145ee9edc"))) + .put(55, new ImmutableBytesWritable(Bytes.fromHex("372bf89fcd8ca4b7ab3c1add9d07f7e4"))) + .put(60, new ImmutableBytesWritable(Bytes.fromHex("69ae0585e6255de27dce974e332b8f8b"))) + .put(65, new ImmutableBytesWritable(Bytes.fromHex("8029610044297aad0abdbecd485d8e59"))) + .put(70, new ImmutableBytesWritable(Bytes.fromHex("de5f784f7f78987b6e57ecfd81c8646f"))) + .put(75, new ImmutableBytesWritable(Bytes.fromHex("1cd757cc4e1715c8c3b1c24447a1ec56"))) + .put(80, new ImmutableBytesWritable(Bytes.fromHex("f9a53aacfeb6142b08066615e7038095"))) + .put(85, new ImmutableBytesWritable(Bytes.fromHex("89b872b7e639df32d3276b33928c0c91"))) + .put(90, new ImmutableBytesWritable(Bytes.fromHex("45eeac0646d46a474ea0484175faed38"))) + .put(95, new ImmutableBytesWritable(Bytes.fromHex("f57c447e32a08f4bf1abb2892839ac56"))) + .build(); Map actualHashes = new HashMap<>(); Path dataDir = new Path(testDir, HashTable.HASH_DATA_DIR); @@ -162,11 +158,11 @@ public class TestHashTable { while (reader.next(key, hash)) { String keyString = Bytes.toHex(key.get(), key.getOffset(), key.getLength()); LOG.debug("Key: " + (keyString.isEmpty() ? "-1" : Integer.parseInt(keyString, 16)) - + " Hash: " + Bytes.toHex(hash.get(), hash.getOffset(), hash.getLength())); + + " Hash: " + Bytes.toHex(hash.get(), hash.getOffset(), hash.getLength())); int intKey = -1; if (key.getLength() > 0) { - intKey = Bytes.toInt(key.get(), key.getOffset(), key.getLength()); + intKey = Bytes.toInt(key.get(), key.getOffset(), key.getLength()); } if (actualHashes.containsKey(intKey)) { Assert.fail("duplicate key in data files: " + intKey); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index fd6d112f10c..77aadf561e8 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -109,13 +109,13 @@ import org.slf4j.LoggerFactory; /** * Tests the table import and table export MR job functionality */ -@Category({VerySlowMapReduceTests.class, MediumTests.class}) -//TODO : Remove this in 3.0 +@Category({ VerySlowMapReduceTests.class, MediumTests.class }) +// TODO : Remove this in 3.0 public class TestImportExport { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportExport.class); + HBaseClassTestRule.forClass(TestImportExport.class); private static final Logger LOG = LoggerFactory.getLogger(TestImportExport.class); protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); @@ -134,7 +134,7 @@ public class TestImportExport { private static final long now = EnvironmentEdgeManager.currentTime(); private final TableName EXPORT_TABLE = TableName.valueOf("export_table"); private final TableName IMPORT_TABLE = TableName.valueOf("import_table"); - public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1); + public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1); public static final String TEST_ATTR = "source_op"; public static final String TEST_TAG = "test_tag"; @@ -173,12 +173,8 @@ public class TestImportExport { } /** - * Runs an export job with the specified command line args - * @param args - * @return true if job completed successfully - * @throws IOException - * @throws InterruptedException - * @throws ClassNotFoundException + * Runs an export job with the specified command line args n * @return true if job completed + * successfully nnn */ protected boolean runExport(String[] args) throws Throwable { // need to make a copy of the configuration because to make sure different temp dirs are used. @@ -191,12 +187,8 @@ public class TestImportExport { } /** - * Runs an import job with the specified command line args - * @param args - * @return true if job completed successfully - * @throws IOException - * @throws InterruptedException - * @throws ClassNotFoundException + * Runs an import job with the specified command line args n * @return true if job completed + * successfully nnn */ boolean runImport(String[] args) throws Throwable { // need to make a copy of the configuration because to make sure different temp dirs are used. @@ -205,8 +197,7 @@ public class TestImportExport { } /** - * Test simple replication case with column mapping - * @throws Exception + * Test simple replication case with column mapping n */ @Test public void testSimpleCase() throws Throwable { @@ -228,54 +219,47 @@ public class TestImportExport { t.put(p); } - String[] args = new String[] { - // Only export row1 & row2. - "-D" + TableInputFormat.SCAN_ROW_START + "=\\x32row1", - "-D" + TableInputFormat.SCAN_ROW_STOP + "=\\x32row3", - name.getMethodName(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export - }; - assertTrue(runExport(args)); + String[] args = new String[] { + // Only export row1 & row2. + "-D" + TableInputFormat.SCAN_ROW_START + "=\\x32row1", + "-D" + TableInputFormat.SCAN_ROW_STOP + "=\\x32row3", name.getMethodName(), FQ_OUTPUT_DIR, + "1000", // max number of key versions per key to export + }; + assertTrue(runExport(args)); - final String IMPORT_TABLE = name.getMethodName() + "import"; - try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), FAMILYB, 3);) { - args = new String[] { - "-D" + Import.CF_RENAME_PROP + "="+FAMILYA_STRING+":"+FAMILYB_STRING, - IMPORT_TABLE, - FQ_OUTPUT_DIR - }; - assertTrue(runImport(args)); + final String IMPORT_TABLE = name.getMethodName() + "import"; + try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), FAMILYB, 3);) { + args = + new String[] { "-D" + Import.CF_RENAME_PROP + "=" + FAMILYA_STRING + ":" + FAMILYB_STRING, + IMPORT_TABLE, FQ_OUTPUT_DIR }; + assertTrue(runImport(args)); - Get g = new Get(ROW1); - g.setMaxVersions(); - Result r = t.get(g); - assertEquals(3, r.size()); - g = new Get(ROW2); - g.setMaxVersions(); - r = t.get(g); - assertEquals(3, r.size()); - g = new Get(ROW3); - r = t.get(g); - assertEquals(0, r.size()); - } + Get g = new Get(ROW1); + g.setMaxVersions(); + Result r = t.get(g); + assertEquals(3, r.size()); + g = new Get(ROW2); + g.setMaxVersions(); + r = t.get(g); + assertEquals(3, r.size()); + g = new Get(ROW3); + r = t.get(g); + assertEquals(0, r.size()); + } } /** - * Test export hbase:meta table - * - * @throws Throwable + * Test export hbase:meta table n */ @Test public void testMetaExport() throws Throwable { - String[] args = new String[] { TableName.META_TABLE_NAME.getNameAsString(), - FQ_OUTPUT_DIR, "1", "0", "0" }; + String[] args = + new String[] { TableName.META_TABLE_NAME.getNameAsString(), FQ_OUTPUT_DIR, "1", "0", "0" }; assertTrue(runExport(args)); } /** - * Test import data from 0.94 exported file - * @throws Throwable + * Test import data from 0.94 exported file n */ @Test public void testImport94Table() throws Throwable { @@ -293,10 +277,7 @@ public class TestImportExport { fs.copyFromLocalFile(importPath, new Path(FQ_OUTPUT_DIR + Path.SEPARATOR + name)); String IMPORT_TABLE = name; try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), Bytes.toBytes("f1"), 3);) { - String[] args = new String[] { - "-Dhbase.import.version=0.94" , - IMPORT_TABLE, FQ_OUTPUT_DIR - }; + String[] args = new String[] { "-Dhbase.import.version=0.94", IMPORT_TABLE, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); // @formatter:off // exportedTableIn94Format contains 5 rows @@ -314,14 +295,12 @@ public class TestImportExport { /** * Test export scanner batching */ - @Test - public void testExportScannerBatching() throws Throwable { + @Test + public void testExportScannerBatching() throws Throwable { TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(1) - .build()) - .build(); + .newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(1).build()) + .build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName());) { @@ -344,13 +323,11 @@ public class TestImportExport { @Test public void testWithDeletes() throws Throwable { - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName());) { @@ -362,35 +339,26 @@ public class TestImportExport { p.addColumn(FAMILYA, QUAL, now + 4, QUAL); t.put(p); - Delete d = new Delete(ROW1, now+3); + Delete d = new Delete(ROW1, now + 3); t.delete(d); d = new Delete(ROW1); - d.addColumns(FAMILYA, QUAL, now+2); + d.addColumns(FAMILYA, QUAL, now + 2); t.delete(d); } - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", - name.getMethodName(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export + String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", name.getMethodName(), + FQ_OUTPUT_DIR, "1000", // max number of key versions per key to export }; assertTrue(runExport(args)); final String IMPORT_TABLE = name.getMethodName() + "import"; desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(IMPORT_TABLE)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + .newBuilder(TableName.valueOf(IMPORT_TABLE)).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName());) { - args = new String[] { - IMPORT_TABLE, - FQ_OUTPUT_DIR - }; + args = new String[] { IMPORT_TABLE, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); Scan s = new Scan(); @@ -400,71 +368,60 @@ public class TestImportExport { Result r = scanner.next(); Cell[] res = r.rawCells(); assertTrue(PrivateCellUtil.isDeleteFamily(res[0])); - assertEquals(now+4, res[1].getTimestamp()); - assertEquals(now+3, res[2].getTimestamp()); + assertEquals(now + 4, res[1].getTimestamp()); + assertEquals(now + 3, res[2].getTimestamp()); assertTrue(CellUtil.isDelete(res[3])); - assertEquals(now+2, res[4].getTimestamp()); - assertEquals(now+1, res[5].getTimestamp()); + assertEquals(now + 2, res[4].getTimestamp()); + assertEquals(now + 1, res[5].getTimestamp()); assertEquals(now, res[6].getTimestamp()); } } - @Test public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Throwable { final TableName exportTable = TableName.valueOf(name.getMethodName()); - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); Table exportT = UTIL.getConnection().getTable(exportTable); - //Add first version of QUAL + // Add first version of QUAL Put p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now, QUAL); exportT.put(p); - //Add Delete family marker - Delete d = new Delete(ROW1, now+3); + // Add Delete family marker + Delete d = new Delete(ROW1, now + 3); exportT.delete(d); - //Add second version of QUAL + // Add second version of QUAL p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now + 5, "s".getBytes()); exportT.put(p); - //Add second Delete family marker - d = new Delete(ROW1, now+7); + // Add second Delete family marker + d = new Delete(ROW1, now + 7); exportT.delete(d); - - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", exportTable.getNameAsString(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export + String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", + exportTable.getNameAsString(), FQ_OUTPUT_DIR, "1000", // max number of key versions per key to + // export }; assertTrue(runExport(args)); final String importTable = name.getMethodName() + "import"; desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(importTable)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + .newBuilder(TableName.valueOf(importTable)).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); Table importT = UTIL.getConnection().getTable(TableName.valueOf(importTable)); - args = new String[] { - importTable, - FQ_OUTPUT_DIR - }; + args = new String[] { importTable, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); Scan s = new Scan(); @@ -475,11 +432,11 @@ public class TestImportExport { Result importedTResult = importedTScanner.next(); ResultScanner exportedTScanner = exportT.getScanner(s); - Result exportedTResult = exportedTScanner.next(); + Result exportedTResult = exportedTScanner.next(); try { Result.compareResults(exportedTResult, importedTResult); } catch (Throwable e) { - fail("Original and imported tables data comparision failed with error:"+e.getMessage()); + fail("Original and imported tables data comparision failed with error:" + e.getMessage()); } finally { exportT.close(); importT.close(); @@ -487,18 +444,16 @@ public class TestImportExport { } /** - * Create a simple table, run an Export Job on it, Import with filtering on, verify counts, + * Create a simple table, run an Export Job on it, Import with filtering on, verify counts, * attempt with invalid values. */ @Test public void testWithFilter() throws Throwable { // Create simple table to export TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .build()) - .build(); + .newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).build()) + .build(); UTIL.getAdmin().createTable(desc); Table exportTable = UTIL.getConnection().getTable(desc.getTableName()); @@ -521,19 +476,15 @@ public class TestImportExport { // Import to a new table final String IMPORT_TABLE = name.getMethodName() + "import"; - desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(IMPORT_TABLE)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .build()) - .build(); + desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(IMPORT_TABLE)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).build()) + .build(); UTIL.getAdmin().createTable(desc); Table importTable = UTIL.getConnection().getTable(desc.getTableName()); args = new String[] { "-D" + Import.FILTER_CLASS_CONF_KEY + "=" + PrefixFilter.class.getName(), - "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1), IMPORT_TABLE, - FQ_OUTPUT_DIR, - "1000" }; + "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1), IMPORT_TABLE, FQ_OUTPUT_DIR, + "1000" }; assertTrue(runImport(args)); // get the count of the source table for that time range @@ -547,8 +498,8 @@ public class TestImportExport { // need to re-run the export job args = new String[] { "-D" + Import.FILTER_CLASS_CONF_KEY + "=" + Filter.class.getName(), - "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1) + "", name.getMethodName(), - FQ_OUTPUT_DIR, "1000" }; + "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1) + "", name.getMethodName(), + FQ_OUTPUT_DIR, "1000" }; assertFalse(runImport(args)); // cleanup @@ -559,8 +510,7 @@ public class TestImportExport { /** * Count the number of keyvalues in the specified table with the given filter * @param table the table to scan - * @return the number of keyvalues found - * @throws IOException + * @return the number of keyvalues found n */ private int getCount(Table table, Filter filter) throws IOException { Scan scan = new Scan(); @@ -581,7 +531,7 @@ public class TestImportExport { public void testImportMain() throws Throwable { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -611,29 +561,19 @@ public class TestImportExport { String prefix = "row"; String label_0 = "label_0"; String label_1 = "label_1"; - String[] args = { - "table", - "outputDir", - String.valueOf(version), - String.valueOf(startTime), - String.valueOf(endTime), - prefix - }; + String[] args = { "table", "outputDir", String.valueOf(version), String.valueOf(startTime), + String.valueOf(endTime), prefix }; Scan scan = ExportUtils.getScanFromCommandLine(UTIL.getConfiguration(), args); assertEquals(version, scan.getMaxVersions()); assertEquals(startTime, scan.getTimeRange().getMin()); assertEquals(endTime, scan.getTimeRange().getMax()); assertEquals(true, (scan.getFilter() instanceof PrefixFilter)); - assertEquals(0, Bytes.compareTo(((PrefixFilter) scan.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); - String[] argsWithLabels = { - "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + label_0 + "," + label_1, - "table", - "outputDir", - String.valueOf(version), - String.valueOf(startTime), - String.valueOf(endTime), - prefix - }; + assertEquals(0, + Bytes.compareTo(((PrefixFilter) scan.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); + String[] argsWithLabels = + { "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + label_0 + "," + label_1, "table", + "outputDir", String.valueOf(version), String.valueOf(startTime), String.valueOf(endTime), + prefix }; Configuration conf = new Configuration(UTIL.getConfiguration()); // parse the "-D" options String[] otherArgs = new GenericOptionsParser(conf, argsWithLabels).getRemainingArgs(); @@ -642,7 +582,8 @@ public class TestImportExport { assertEquals(startTime, scanWithLabels.getTimeRange().getMin()); assertEquals(endTime, scanWithLabels.getTimeRange().getMax()); assertEquals(true, (scanWithLabels.getFilter() instanceof PrefixFilter)); - assertEquals(0, Bytes.compareTo(((PrefixFilter) scanWithLabels.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); + assertEquals(0, Bytes.compareTo(((PrefixFilter) scanWithLabels.getFilter()).getPrefix(), + Bytes.toBytesBinary(prefix))); assertEquals(2, scanWithLabels.getAuthorizations().getLabels().size()); assertEquals(label_0, scanWithLabels.getAuthorizations().getLabels().get(0)); assertEquals(label_1, scanWithLabels.getAuthorizations().getLabels().get(1)); @@ -655,7 +596,7 @@ public class TestImportExport { public void testExportMain() throws Throwable { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -668,11 +609,10 @@ public class TestImportExport { assertEquals(-1, newSecurityManager.getExitCode()); String errMsg = data.toString(); assertTrue(errMsg.contains("Wrong number of arguments:")); - assertTrue(errMsg.contains( - "Usage: Export [-D ]* [ " + - "[ []] [^[regex pattern] or [Prefix] to filter]]")); assertTrue( - errMsg.contains("-D hbase.mapreduce.scan.column.family=,, ...")); + errMsg.contains("Usage: Export [-D ]* [ " + + "[ []] [^[regex pattern] or [Prefix] to filter]]")); + assertTrue(errMsg.contains("-D hbase.mapreduce.scan.column.family=,, ...")); assertTrue(errMsg.contains("-D hbase.mapreduce.include.deleted.rows=true")); assertTrue(errMsg.contains("-D hbase.client.scanner.caching=100")); assertTrue(errMsg.contains("-D hbase.export.scanner.batch=10")); @@ -709,18 +649,18 @@ public class TestImportExport { importer.setup(ctx); Result value = mock(Result.class); KeyValue[] keys = { - new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"), Bytes.toBytes("qualifier"), - Bytes.toBytes("value")), - new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"), Bytes.toBytes("qualifier"), - Bytes.toBytes("value1")) }; + new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"), Bytes.toBytes("qualifier"), + Bytes.toBytes("value")), + new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"), Bytes.toBytes("qualifier"), + Bytes.toBytes("value1")) }; when(value.rawCells()).thenReturn(keys); importer.map(new ImmutableBytesWritable(Bytes.toBytes("Key")), value, ctx); } /** - * Test addFilterAndArguments method of Import This method set couple - * parameters into Configuration + * Test addFilterAndArguments method of Import This method set couple parameters into + * Configuration */ @Test public void testAddFilterAndArguments() throws IOException { @@ -732,7 +672,7 @@ public class TestImportExport { Import.addFilterAndArguments(configuration, FilterBase.class, args); assertEquals("org.apache.hadoop.hbase.filter.FilterBase", - configuration.get(Import.FILTER_CLASS_CONF_KEY)); + configuration.get(Import.FILTER_CLASS_CONF_KEY)); assertEquals("param1,param2", configuration.get(Import.FILTER_ARGS_CONF_KEY)); } @@ -756,7 +696,7 @@ public class TestImportExport { exportTable.put(put); // Run the export - String[] args = new String[] { exportTableName, FQ_OUTPUT_DIR, "1000"}; + String[] args = new String[] { exportTableName, FQ_OUTPUT_DIR, "1000" }; assertTrue(runExport(args)); // Create the table for import @@ -765,41 +705,40 @@ public class TestImportExport { // Register the wal listener for the import table RegionInfo region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer() - .getRegions(importTable.getName()).get(0).getRegionInfo(); + .getRegions(importTable.getName()).get(0).getRegionInfo(); TableWALActionListener walListener = new TableWALActionListener(region); WAL wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region); wal.registerWALActionsListener(walListener); // Run the import with SKIP_WAL - args = - new String[] { "-D" + Import.WAL_DURABILITY + "=" + Durability.SKIP_WAL.name(), - importTableName, FQ_OUTPUT_DIR }; + args = new String[] { "-D" + Import.WAL_DURABILITY + "=" + Durability.SKIP_WAL.name(), + importTableName, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); - //Assert that the wal is not visisted + // Assert that the wal is not visisted assertTrue(!walListener.isWALVisited()); - //Ensure that the count is 2 (only one version of key value is obtained) + // Ensure that the count is 2 (only one version of key value is obtained) assertTrue(getCount(importTable, null) == 2); // Run the import with the default durability option importTableName = name.getMethodName() + "import2"; importTable = UTIL.createTable(TableName.valueOf(importTableName), FAMILYA, 3); region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer() - .getRegions(importTable.getName()).get(0).getRegionInfo(); + .getRegions(importTable.getName()).get(0).getRegionInfo(); wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region); walListener = new TableWALActionListener(region); wal.registerWALActionsListener(walListener); args = new String[] { importTableName, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); - //Assert that the wal is visisted + // Assert that the wal is visisted assertTrue(walListener.isWALVisited()); - //Ensure that the count is 2 (only one version of key value is obtained) + // Ensure that the count is 2 (only one version of key value is obtained) assertTrue(getCount(importTable, null) == 2); } } /** - * This listens to the {@link #visitLogEntryBeforeWrite(RegionInfo, WALKey, WALEdit)} to - * identify that an entry is written to the Write Ahead Log for the given table. + * This listens to the {@link #visitLogEntryBeforeWrite(RegionInfo, WALKey, WALEdit)} to identify + * that an entry is written to the Write Ahead Log for the given table. */ private static class TableWALActionListener implements WALActionsListener { @@ -812,8 +751,10 @@ public class TestImportExport { @Override public void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit logEdit) { - if (logKey.getTableName().getNameAsString().equalsIgnoreCase( - this.regionInfo.getTable().getNameAsString()) && (!logEdit.isMetaEdit())) { + if ( + logKey.getTableName().getNameAsString() + .equalsIgnoreCase(this.regionInfo.getTable().getNameAsString()) && (!logEdit.isMetaEdit()) + ) { isVisited = true; } } @@ -824,45 +765,39 @@ public class TestImportExport { } /** - * Add cell tags to delete mutations, run export and import tool and - * verify that tags are present in import table also. + * Add cell tags to delete mutations, run export and import tool and verify that tags are present + * in import table also. * @throws Throwable throws Throwable. */ @Test public void testTagsAddition() throws Throwable { final TableName exportTable = TableName.valueOf(name.getMethodName()); - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(exportTable) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .setCoprocessor(MetadataController.class.getName()) - .build(); + TableDescriptor desc = TableDescriptorBuilder.newBuilder(exportTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .setCoprocessor(MetadataController.class.getName()).build(); UTIL.getAdmin().createTable(desc); Table exportT = UTIL.getConnection().getTable(exportTable); - //Add first version of QUAL + // Add first version of QUAL Put p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now, QUAL); exportT.put(p); - //Add Delete family marker - Delete d = new Delete(ROW1, now+3); + // Add Delete family marker + Delete d = new Delete(ROW1, now + 3); // Add test attribute to delete mutation. d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); exportT.delete(d); // Run export tool with KeyValueCodecWithTags as Codec. This will ensure that export tool // will use KeyValueCodecWithTags. - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", + String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", // This will make sure that codec will encode and decode tags in rpc call. "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", - exportTable.getNameAsString(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export + exportTable.getNameAsString(), FQ_OUTPUT_DIR, "1000", // max number of key versions per key to + // export }; assertTrue(runExport(args)); // Assert tag exists in exportTable @@ -870,23 +805,17 @@ public class TestImportExport { // Create an import table with MetadataController. final TableName importTable = TableName.valueOf("importWithTestTagsAddition"); - TableDescriptor importTableDesc = TableDescriptorBuilder - .newBuilder(importTable) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .setCoprocessor(MetadataController.class.getName()) - .build(); + TableDescriptor importTableDesc = TableDescriptorBuilder.newBuilder(importTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .setCoprocessor(MetadataController.class.getName()).build(); UTIL.getAdmin().createTable(importTableDesc); // Run import tool. args = new String[] { // This will make sure that codec will encode and decode tags in rpc call. "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", - importTable.getNameAsString(), - FQ_OUTPUT_DIR - }; + importTable.getNameAsString(), FQ_OUTPUT_DIR }; assertTrue(runImport(args)); // Make sure that tags exists in imported table. checkWhetherTagExists(importTable, true); @@ -909,7 +838,7 @@ public class TestImportExport { } } boolean deleteFound = false; - for (Cell cell: values) { + for (Cell cell : values) { if (PrivateCellUtil.isDelete(cell.getType().getCode())) { deleteFound = true; List tags = PrivateCellUtil.getTags(cell); @@ -929,7 +858,7 @@ public class TestImportExport { } /* - This co-proc will add a cell tag to delete mutation. + * This co-proc will add a cell tag to delete mutation. */ public static class MetadataController implements RegionCoprocessor, RegionObserver { @Override @@ -939,8 +868,7 @@ public class TestImportExport { @Override public void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) - throws IOException { + MiniBatchOperationInProgress miniBatchOp) throws IOException { if (c.getEnvironment().getRegion().getRegionInfo().getTable().isSystemTable()) { return; } @@ -955,7 +883,7 @@ public class TestImportExport { } Tag sourceOpTag = new ArrayBackedTag(TEST_TAG_TYPE, sourceOpAttr); List updatedCells = new ArrayList<>(); - for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance(); ) { + for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { Cell cell = cellScanner.current(); List tags = PrivateCellUtil.getTags(cell); tags.add(sourceOpTag); @@ -973,34 +901,30 @@ public class TestImportExport { } /** - * Set hbase.client.rpc.codec and hbase.client.default.rpc.codec both to empty string - * This means it will use no Codec. Make sure that we don't return Tags in response. + * Set hbase.client.rpc.codec and hbase.client.default.rpc.codec both to empty string This means + * it will use no Codec. Make sure that we don't return Tags in response. * @throws Exception Exception */ @Test public void testTagsWithEmptyCodec() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); - TableDescriptor tableDesc = TableDescriptorBuilder - .newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .setCoprocessor(MetadataController.class.getName()) - .build(); + TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .setCoprocessor(MetadataController.class.getName()).build(); UTIL.getAdmin().createTable(tableDesc); Configuration conf = new Configuration(UTIL.getConfiguration()); conf.set(RPC_CODEC_CONF_KEY, ""); conf.set(DEFAULT_CODEC_CLASS, ""); try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { - //Add first version of QUAL + Table table = connection.getTable(tableName)) { + // Add first version of QUAL Put p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now, QUAL); table.put(p); - //Add Delete family marker - Delete d = new Delete(ROW1, now+3); + // Add Delete family marker + Delete d = new Delete(ROW1, now + 3); // Add test attribute to delete mutation. d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); table.delete(d); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java index 83a457ff071..3ce402accd6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,21 +65,20 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestImportTSVWithOperationAttributes implements Configurable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportTSVWithOperationAttributes.class); + HBaseClassTestRule.forClass(TestImportTSVWithOperationAttributes.class); private static final Logger LOG = - LoggerFactory.getLogger(TestImportTSVWithOperationAttributes.class); + LoggerFactory.getLogger(TestImportTSVWithOperationAttributes.class); protected static final String NAME = TestImportTsv.class.getSimpleName(); protected static HBaseTestingUtility util = new HBaseTestingUtility(); /** - * Delete the tmp directory after running doMROnTableTest. Boolean. Default is - * false. + * Delete the tmp directory after running doMROnTableTest. Boolean. Default is false. */ protected static final String DELETE_AFTER_LOAD_CONF = NAME + ".deleteAfterLoad"; @@ -126,10 +125,10 @@ public class TestImportTSVWithOperationAttributes implements Configurable { // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapperForOprAttr", - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_ATTRIBUTES_KEY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + "-D" + ImportTsv.MAPPER_CONF_KEY + + "=org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapperForOprAttr", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_ATTRIBUTES_KEY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001btest=>myvalue\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1, true); @@ -142,10 +141,10 @@ public class TestImportTSVWithOperationAttributes implements Configurable { // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapperForOprAttr", - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_ATTRIBUTES_KEY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + "-D" + ImportTsv.MAPPER_CONF_KEY + + "=org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapperForOprAttr", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_ATTRIBUTES_KEY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001btest1=>myvalue\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1, false); @@ -153,18 +152,14 @@ public class TestImportTSVWithOperationAttributes implements Configurable { } /** - * Run an ImportTsv job and perform basic validation on the results. Returns - * the ImportTsv Tool instance so that other tests can inspect it - * for further validation as necessary. This method is static to insure - * non-reliance on instance's util/conf facilities. - * - * @param args - * Any arguments to pass BEFORE inputFile path is appended. - * @param dataAvailable - * @return The Tool instance used to run the test. + * Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv + * Tool instance so that other tests can inspect it for further validation as + * necessary. This method is static to insure non-reliance on instance's util/conf facilities. n * + * Any arguments to pass BEFORE inputFile path is appended. n * @return The Tool instance used to + * run the test. */ private Tool doMROnTableTest(HBaseTestingUtility util, String family, String data, String[] args, - int valueMultiplier, boolean dataAvailable) throws Exception { + int valueMultiplier, boolean dataAvailable) throws Exception { String table = args[args.length - 1]; Configuration conf = new Configuration(util.getConfiguration()); @@ -198,12 +193,10 @@ public class TestImportTSVWithOperationAttributes implements Configurable { } /** - * Confirm ImportTsv via data in online table. - * - * @param dataAvailable + * Confirm ImportTsv via data in online table. n */ private static void validateTable(Configuration conf, TableName tableName, String family, - int valueMultiplier, boolean dataAvailable) throws IOException { + int valueMultiplier, boolean dataAvailable) throws IOException { LOG.debug("Validating table."); Connection connection = ConnectionFactory.createConnection(conf); @@ -224,9 +217,10 @@ public class TestImportTSVWithOperationAttributes implements Configurable { List kvs = res.listCells(); assertTrue(CellUtil.matchingRows(kvs.get(0), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingRows(kvs.get(1), Bytes.toBytes("KEY"))); - assertTrue(CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier))); - assertTrue(CellUtil.matchingValue(kvs.get(1), - Bytes.toBytes("VALUE" + 2 * valueMultiplier))); + assertTrue( + CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier))); + assertTrue( + CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier))); // Only one result set is expected, so let it loop. verified = true; } @@ -254,7 +248,7 @@ public class TestImportTSVWithOperationAttributes implements Configurable { } public static class OperationAttributesTestController - implements RegionCoprocessor, RegionObserver { + implements RegionCoprocessor, RegionObserver { @Override public Optional getRegionObserver() { @@ -263,10 +257,11 @@ public class TestImportTSVWithOperationAttributes implements Configurable { @Override public void prePut(ObserverContext e, Put put, WALEdit edit, - Durability durability) throws IOException { + Durability durability) throws IOException { Region region = e.getEnvironment().getRegion(); - if (!region.getRegionInfo().isMetaRegion() - && !region.getRegionInfo().getTable().isSystemTable()) { + if ( + !region.getRegionInfo().isMetaRegion() && !region.getRegionInfo().getTable().isSystemTable() + ) { if (put.getAttribute(TEST_ATR_KEY) != null) { LOG.debug("allow any put to happen " + region.getRegionInfo().getRegionNameAsString()); } else { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java index b8b6bb81155..4d6ffd8cca0 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,20 +55,19 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestImportTSVWithTTLs implements Configurable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportTSVWithTTLs.class); + HBaseClassTestRule.forClass(TestImportTSVWithTTLs.class); protected static final Logger LOG = LoggerFactory.getLogger(TestImportTSVWithTTLs.class); protected static final String NAME = TestImportTsv.class.getSimpleName(); protected static HBaseTestingUtility util = new HBaseTestingUtility(); /** - * Delete the tmp directory after running doMROnTableTest. Boolean. Default is - * false. + * Delete the tmp directory after running doMROnTableTest. Boolean. Default is false. */ protected static final String DELETE_AFTER_LOAD_CONF = NAME + ".deleteAfterLoad"; @@ -114,10 +113,9 @@ public class TestImportTSVWithTTLs implements Configurable { // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_TTL", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_TTL", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001b1000000\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1); @@ -125,14 +123,14 @@ public class TestImportTSVWithTTLs implements Configurable { } protected static Tool doMROnTableTest(HBaseTestingUtility util, String family, String data, - String[] args, int valueMultiplier) throws Exception { + String[] args, int valueMultiplier) throws Exception { TableName table = TableName.valueOf(args[args.length - 1]); Configuration conf = new Configuration(util.getConfiguration()); // populate input file FileSystem fs = FileSystem.get(conf); - Path inputPath = fs.makeQualified(new Path(util - .getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); + Path inputPath = + fs.makeQualified(new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); FSDataOutputStream op = fs.create(inputPath, true); op.write(Bytes.toBytes(data)); op.close(); @@ -171,10 +169,11 @@ public class TestImportTSVWithTTLs implements Configurable { @Override public void prePut(ObserverContext e, Put put, WALEdit edit, - Durability durability) throws IOException { + Durability durability) throws IOException { Region region = e.getEnvironment().getRegion(); - if (!region.getRegionInfo().isMetaRegion() - && !region.getRegionInfo().getTable().isSystemTable()) { + if ( + !region.getRegionInfo().isMetaRegion() && !region.getRegionInfo().getTable().isSystemTable() + ) { // The put carries the TTL attribute if (put.getTTL() != Long.MAX_VALUE) { return; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java index cad5b49bd6c..f4281e6b708 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -75,21 +75,20 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestImportTSVWithVisibilityLabels implements Configurable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportTSVWithVisibilityLabels.class); + HBaseClassTestRule.forClass(TestImportTSVWithVisibilityLabels.class); private static final Logger LOG = - LoggerFactory.getLogger(TestImportTSVWithVisibilityLabels.class); + LoggerFactory.getLogger(TestImportTSVWithVisibilityLabels.class); protected static final String NAME = TestImportTsv.class.getSimpleName(); protected static HBaseTestingUtility util = new HBaseTestingUtility(); /** - * Delete the tmp directory after running doMROnTableTest. Boolean. Default is - * false. + * Delete the tmp directory after running doMROnTableTest. Boolean. Default is false. */ protected static final String DELETE_AFTER_LOAD_CONF = NAME + ".deleteAfterLoad"; @@ -124,10 +123,10 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { public static void provisionCluster() throws Exception { conf = util.getConfiguration(); SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); - conf.set("hbase.superuser", "admin,"+User.getCurrent().getName()); + conf.set("hbase.superuser", "admin," + User.getCurrent().getName()); VisibilityTestUtil.enableVisiblityLabels(conf); conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class, - ScanLabelGenerator.class); + ScanLabelGenerator.class); util.startMiniCluster(); // Wait for the labels table to become available util.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME.getName(), 50000); @@ -136,20 +135,20 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { private static void createLabels() throws IOException, InterruptedException { PrivilegedExceptionAction action = - new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE }; - try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.addLabels(conn, labels); - LOG.info("Added labels "); - } catch (Throwable t) { - LOG.error("Error in adding labels" , t); - throw new IOException(t); + new PrivilegedExceptionAction() { + @Override + public VisibilityLabelsResponse run() throws Exception { + String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE }; + try (Connection conn = ConnectionFactory.createConnection(conf)) { + VisibilityClient.addLabels(conn, labels); + LOG.info("Added labels "); + } catch (Throwable t) { + LOG.error("Error in adding labels", t); + throw new IOException(t); + } + return null; } - return null; - } - }; + }; SUPERUSER.runAs(action); } @@ -164,10 +163,9 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001bsecret&private\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1); @@ -180,9 +178,9 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001bsecret&private\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1); @@ -231,11 +229,9 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. - String[] args = new String[] { - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), - "-D" + ImportTsv.COLUMNS_CONF_KEY - + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + String[] args = new String[] { "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001bsecret&private\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1); @@ -246,18 +242,14 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { public void testBulkOutputWithTsvImporterTextMapper() throws Exception { final TableName table = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); String FAMILY = "FAM"; - Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(table.getNameAsString()),"hfiles"); + Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. - String[] args = - new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", - "-D" + ImportTsv.COLUMNS_CONF_KEY - + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), - table.getNameAsString() - }; + String[] args = new String[] { + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", + "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), + table.getNameAsString() }; String data = "KEY\u001bVALUE4\u001bVALUE8\u001bsecret&private\n"; doMROnTableTest(util, FAMILY, data, args, 4); util.deleteTable(table); @@ -269,11 +261,10 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", + "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE4\u001bVALUE8\u001bsecret&private\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1); @@ -285,14 +276,13 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. - String[] args = - new String[] { "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + String[] args = new String[] { "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; // 2 Data rows, one with valid label and one with invalid label String data = - "KEY\u001bVALUE1\u001bVALUE2\u001bprivate\nKEY1\u001bVALUE1\u001bVALUE2\u001binvalid\n"; + "KEY\u001bVALUE1\u001bVALUE2\u001bprivate\nKEY1\u001bVALUE1\u001bVALUE2\u001binvalid\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1, 2); util.deleteTable(tableName); @@ -303,49 +293,42 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. - String[] args = - new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + String[] args = new String[] { + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", + "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; // 2 Data rows, one with valid label and one with invalid label String data = - "KEY\u001bVALUE1\u001bVALUE2\u001bprivate\nKEY1\u001bVALUE1\u001bVALUE2\u001binvalid\n"; + "KEY\u001bVALUE1\u001bVALUE2\u001bprivate\nKEY1\u001bVALUE1\u001bVALUE2\u001binvalid\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1, 2); util.deleteTable(tableName); } protected static Tool doMROnTableTest(HBaseTestingUtility util, String family, String data, - String[] args, int valueMultiplier) throws Exception { + String[] args, int valueMultiplier) throws Exception { return doMROnTableTest(util, family, data, args, valueMultiplier, -1); } /** - * Run an ImportTsv job and perform basic validation on the results. Returns - * the ImportTsv Tool instance so that other tests can inspect it - * for further validation as necessary. This method is static to insure - * non-reliance on instance's util/conf facilities. - * - * @param args - * Any arguments to pass BEFORE inputFile path is appended. - * + * Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv + * Tool instance so that other tests can inspect it for further validation as + * necessary. This method is static to insure non-reliance on instance's util/conf facilities. n * + * Any arguments to pass BEFORE inputFile path is appended. * @param expectedKVCount Expected KV count. pass -1 to skip the kvcount check - * * @return The Tool instance used to run the test. */ protected static Tool doMROnTableTest(HBaseTestingUtility util, String family, String data, - String[] args, int valueMultiplier,int expectedKVCount) throws Exception { + String[] args, int valueMultiplier, int expectedKVCount) throws Exception { TableName table = TableName.valueOf(args[args.length - 1]); Configuration conf = new Configuration(util.getConfiguration()); // populate input file FileSystem fs = FileSystem.get(conf); - Path inputPath = fs.makeQualified(new Path(util - .getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); + Path inputPath = + fs.makeQualified(new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); FSDataOutputStream op = fs.create(inputPath, true); if (data == null) { data = "KEY\u001bVALUE1\u001bVALUE2\n"; @@ -380,10 +363,8 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { } } LOG.debug("validating the table " + createdHFiles); - if (createdHFiles) - validateHFiles(fs, outputPath, family,expectedKVCount); - else - validateTable(conf, table, family, valueMultiplier); + if (createdHFiles) validateHFiles(fs, outputPath, family, expectedKVCount); + else validateTable(conf, table, family, valueMultiplier); if (conf.getBoolean(DELETE_AFTER_LOAD_CONF, true)) { LOG.debug("Deleting test subdirectory"); @@ -396,7 +377,7 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { * Confirm ImportTsv via HFiles on fs. */ private static void validateHFiles(FileSystem fs, String outputPath, String family, - int expectedKVCount) throws IOException { + int expectedKVCount) throws IOException { // validate number and content of output columns LOG.debug("Validating HFiles."); @@ -410,20 +391,21 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { String cf = elements[elements.length - 1]; foundFamilies.add(cf); assertTrue(String.format( - "HFile ouput contains a column family (%s) not present in input families (%s)", cf, - configFamilies), configFamilies.contains(cf)); + "HFile ouput contains a column family (%s) not present in input families (%s)", cf, + configFamilies), configFamilies.contains(cf)); for (FileStatus hfile : fs.listStatus(cfStatus.getPath())) { assertTrue(String.format("HFile %s appears to contain no data.", hfile.getPath()), - hfile.getLen() > 0); + hfile.getLen() > 0); if (expectedKVCount > -1) { actualKVCount += getKVCountFromHfile(fs, hfile.getPath()); } } } if (expectedKVCount > -1) { - assertTrue(String.format( - "KV count in output hfile=<%d> doesn't match with expected KV count=<%d>", actualKVCount, - expectedKVCount), actualKVCount == expectedKVCount); + assertTrue( + String.format("KV count in output hfile=<%d> doesn't match with expected KV count=<%d>", + actualKVCount, expectedKVCount), + actualKVCount == expectedKVCount); } } @@ -431,7 +413,7 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { * Confirm ImportTsv via data in online table. */ private static void validateTable(Configuration conf, TableName tableName, String family, - int valueMultiplier) throws IOException { + int valueMultiplier) throws IOException { LOG.debug("Validating table."); Table table = util.getConnection().getTable(tableName); @@ -443,7 +425,7 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { Scan scan = new Scan(); // Scan entire family. scan.addFamily(Bytes.toBytes(family)); - scan.setAuthorizations(new Authorizations("secret","private")); + scan.setAuthorizations(new Authorizations("secret", "private")); ResultScanner resScanner = table.getScanner(scan); Result[] next = resScanner.next(5); assertEquals(1, next.length); @@ -454,8 +436,8 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { assertTrue(CellUtil.matchingRows(kvs.get(0), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingRows(kvs.get(1), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier))); - assertTrue(CellUtil.matchingValue(kvs.get(1), - Bytes.toBytes("VALUE" + 2 * valueMultiplier))); + assertTrue( + CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier))); // Only one result set is expected, so let it loop. } verified = true; @@ -477,9 +459,8 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { /** * Method returns the total KVs in given hfile * @param fs File System - * @param p HFile path - * @return KV count in the given hfile - * @throws IOException + * @param p HFile path + * @return KV count in the given hfile n */ private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException { Configuration conf = util.getConfiguration(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java index 3eee930d0f7..51196d95370 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,12 +71,12 @@ import org.junit.rules.ExpectedException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestImportTsv implements Configurable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportTsv.class); + HBaseClassTestRule.forClass(TestImportTsv.class); private static final Logger LOG = LoggerFactory.getLogger(TestImportTsv.class); protected static final String NAME = TestImportTsv.class.getSimpleName(); @@ -143,11 +143,10 @@ public class TestImportTsv implements Configurable { } @Test - public void testMROnTableWithCustomMapper() - throws Exception { + public void testMROnTableWithCustomMapper() throws Exception { util.createTable(tn, FAMILY); args.put(ImportTsv.MAPPER_CONF_KEY, - "org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapper"); + "org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapper"); doMROnTableTest(null, 3); util.deleteTable(tn); @@ -189,39 +188,33 @@ public class TestImportTsv implements Configurable { @Test public void testJobConfigurationsWithTsvImporterTextMapper() throws Exception { - Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()),"hfiles"); + Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); String INPUT_FILE = "InputFile1.csv"; // Prepare the arguments required for the test. - String[] args = - new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", - "-D" + ImportTsv.COLUMNS_CONF_KEY - + "=HBASE_ROW_KEY,FAM:A,FAM:B", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), - tn.getNameAsString(), - INPUT_FILE - }; - assertEquals("running test job configuration failed.", 0, ToolRunner.run( - new Configuration(util.getConfiguration()), - new ImportTsv() { - @Override - public int run(String[] args) throws Exception { - Job job = createSubmittableJob(getConf(), args); - assertTrue(job.getMapperClass().equals(TsvImporterTextMapper.class)); - assertTrue(job.getReducerClass().equals(TextSortReducer.class)); - assertTrue(job.getMapOutputValueClass().equals(Text.class)); - return 0; - } - }, args)); + String[] args = new String[] { + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", + "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), tn.getNameAsString(), + INPUT_FILE }; + assertEquals("running test job configuration failed.", 0, + ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + Job job = createSubmittableJob(getConf(), args); + assertTrue(job.getMapperClass().equals(TsvImporterTextMapper.class)); + assertTrue(job.getReducerClass().equals(TextSortReducer.class)); + assertTrue(job.getMapOutputValueClass().equals(Text.class)); + return 0; + } + }, args)); // Delete table created by createSubmittableJob. util.deleteTable(tn); } @Test public void testBulkOutputWithTsvImporterTextMapper() throws Exception { - Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()),"hfiles"); + Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); args.put(ImportTsv.MAPPER_CONF_KEY, "org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper"); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString()); String data = "KEY\u001bVALUE4\u001bVALUE8\n"; @@ -239,53 +232,49 @@ public class TestImportTsv implements Configurable { conf.set(ImportTsv.CREATE_TABLE_CONF_KEY, "no"); exception.expect(TableNotFoundException.class); assertEquals("running test job configuration failed.", 0, - ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { - @Override public int run(String[] args) throws Exception { - createSubmittableJob(getConf(), args); - return 0; - } - }, args)); + ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + createSubmittableJob(getConf(), args); + return 0; + } + }, args)); } @Test public void testMRWithoutAnExistingTable() throws Exception { - String[] args = - new String[] { tn.getNameAsString(), "/inputFile" }; + String[] args = new String[] { tn.getNameAsString(), "/inputFile" }; exception.expect(TableNotFoundException.class); - assertEquals("running test job configuration failed.", 0, ToolRunner.run( - new Configuration(util.getConfiguration()), - new ImportTsv() { - @Override - public int run(String[] args) throws Exception { - createSubmittableJob(getConf(), args); - return 0; - } - }, args)); + assertEquals("running test job configuration failed.", 0, + ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + createSubmittableJob(getConf(), args); + return 0; + } + }, args)); } @Test public void testJobConfigurationsWithDryMode() throws Exception { - Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()),"hfiles"); + Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); String INPUT_FILE = "InputFile1.csv"; // Prepare the arguments required for the test. - String[] argsArray = new String[] { - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B", + String[] argsArray = + new String[] { "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B", "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), - "-D" + ImportTsv.DRY_RUN_CONF_KEY + "=true", - tn.getNameAsString(), - INPUT_FILE }; - assertEquals("running test job configuration failed.", 0, ToolRunner.run( - new Configuration(util.getConfiguration()), - new ImportTsv() { - @Override - public int run(String[] args) throws Exception { - Job job = createSubmittableJob(getConf(), args); - assertTrue(job.getOutputFormatClass().equals(NullOutputFormat.class)); - return 0; - } - }, argsArray)); + "-D" + ImportTsv.DRY_RUN_CONF_KEY + "=true", tn.getNameAsString(), INPUT_FILE }; + assertEquals("running test job configuration failed.", 0, + ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + Job job = createSubmittableJob(getConf(), args); + assertTrue(job.getOutputFormatClass().equals(NullOutputFormat.class)); + return 0; + } + }, argsArray)); // Delete table created by createSubmittableJob. util.deleteTable(tn); } @@ -301,8 +290,7 @@ public class TestImportTsv implements Configurable { } /** - * If table is not present in non-bulk mode, dry run should fail just like - * normal mode. + * If table is not present in non-bulk mode, dry run should fail just like normal mode. */ @Test public void testDryModeWithoutBulkOutputAndTableDoesNotExists() throws Exception { @@ -311,7 +299,8 @@ public class TestImportTsv implements Configurable { doMROnTableTest(null, 1); } - @Test public void testDryModeWithBulkOutputAndTableExists() throws Exception { + @Test + public void testDryModeWithBulkOutputAndTableExists() throws Exception { util.createTable(tn, FAMILY); // Prepare the arguments required for the test. Path hfiles = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); @@ -324,12 +313,11 @@ public class TestImportTsv implements Configurable { } /** - * If table is not present in bulk mode and create.table is not set to yes, - * import should fail with TableNotFoundException. + * If table is not present in bulk mode and create.table is not set to yes, import should fail + * with TableNotFoundException. */ @Test - public void testDryModeWithBulkOutputAndTableDoesNotExistsCreateTableSetToNo() throws - Exception { + public void testDryModeWithBulkOutputAndTableDoesNotExistsCreateTableSetToNo() throws Exception { // Prepare the arguments required for the test. Path hfiles = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString()); @@ -382,31 +370,30 @@ public class TestImportTsv implements Configurable { } private Tool doMROnTableTest(String data, int valueMultiplier) throws Exception { - return doMROnTableTest(util, tn, FAMILY, data, args, valueMultiplier,-1); + return doMROnTableTest(util, tn, FAMILY, data, args, valueMultiplier, -1); } - protected static Tool doMROnTableTest(HBaseTestingUtility util, TableName table, - String family, String data, Map args) throws Exception { - return doMROnTableTest(util, table, family, data, args, 1,-1); + protected static Tool doMROnTableTest(HBaseTestingUtility util, TableName table, String family, + String data, Map args) throws Exception { + return doMROnTableTest(util, table, family, data, args, 1, -1); } /** - * Run an ImportTsv job and perform basic validation on the results. - * Returns the ImportTsv Tool instance so that other tests can - * inspect it for further validation as necessary. This method is static to - * insure non-reliance on instance's util/conf facilities. + * Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv + * Tool instance so that other tests can inspect it for further validation as + * necessary. This method is static to insure non-reliance on instance's util/conf facilities. * @param args Any arguments to pass BEFORE inputFile path is appended. * @return The Tool instance used to run the test. */ - protected static Tool doMROnTableTest(HBaseTestingUtility util, TableName table, - String family, String data, Map args, int valueMultiplier,int expectedKVCount) - throws Exception { + protected static Tool doMROnTableTest(HBaseTestingUtility util, TableName table, String family, + String data, Map args, int valueMultiplier, int expectedKVCount) + throws Exception { Configuration conf = new Configuration(util.getConfiguration()); // populate input file FileSystem fs = FileSystem.get(conf); - Path inputPath = fs.makeQualified( - new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); + Path inputPath = + fs.makeQualified(new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); FSDataOutputStream op = fs.create(inputPath, true); if (data == null) { data = "KEY\u001bVALUE1\u001bVALUE2\n"; @@ -440,15 +427,14 @@ public class TestImportTsv implements Configurable { // Perform basic validation. If the input args did not include // ImportTsv.BULK_OUTPUT_CONF_KEY then validate data in the table. // Otherwise, validate presence of hfiles. - boolean isDryRun = args.containsKey(ImportTsv.DRY_RUN_CONF_KEY) && - "true".equalsIgnoreCase(args.get(ImportTsv.DRY_RUN_CONF_KEY)); + boolean isDryRun = args.containsKey(ImportTsv.DRY_RUN_CONF_KEY) + && "true".equalsIgnoreCase(args.get(ImportTsv.DRY_RUN_CONF_KEY)); if (args.containsKey(ImportTsv.BULK_OUTPUT_CONF_KEY)) { if (isDryRun) { assertFalse(String.format("Dry run mode, %s should not have been created.", - ImportTsv.BULK_OUTPUT_CONF_KEY), - fs.exists(new Path(ImportTsv.BULK_OUTPUT_CONF_KEY))); + ImportTsv.BULK_OUTPUT_CONF_KEY), fs.exists(new Path(ImportTsv.BULK_OUTPUT_CONF_KEY))); } else { - validateHFiles(fs, args.get(ImportTsv.BULK_OUTPUT_CONF_KEY), family,expectedKVCount); + validateHFiles(fs, args.get(ImportTsv.BULK_OUTPUT_CONF_KEY), family, expectedKVCount); } } else { validateTable(conf, table, family, valueMultiplier, isDryRun); @@ -464,8 +450,8 @@ public class TestImportTsv implements Configurable { /** * Confirm ImportTsv via data in online table. */ - private static void validateTable(Configuration conf, TableName tableName, - String family, int valueMultiplier, boolean isDryRun) throws IOException { + private static void validateTable(Configuration conf, TableName tableName, String family, + int valueMultiplier, boolean isDryRun) throws IOException { LOG.debug("Validating table."); Connection connection = ConnectionFactory.createConnection(conf); @@ -487,7 +473,8 @@ public class TestImportTsv implements Configurable { assertTrue(CellUtil.matchingRows(kvs.get(0), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingRows(kvs.get(1), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier))); - assertTrue(CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier))); + assertTrue( + CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier))); // Only one result set is expected, so let it loop. } if (isDryRun) { @@ -516,7 +503,7 @@ public class TestImportTsv implements Configurable { * Confirm ImportTsv via HFiles on fs. */ private static void validateHFiles(FileSystem fs, String outputPath, String family, - int expectedKVCount) throws IOException { + int expectedKVCount) throws IOException { // validate number and content of output columns LOG.debug("Validating HFiles."); Set configFamilies = new HashSet<>(); @@ -527,14 +514,11 @@ public class TestImportTsv implements Configurable { String[] elements = cfStatus.getPath().toString().split(Path.SEPARATOR); String cf = elements[elements.length - 1]; foundFamilies.add(cf); - assertTrue( - String.format( - "HFile output contains a column family (%s) not present in input families (%s)", - cf, configFamilies), - configFamilies.contains(cf)); + assertTrue(String.format( + "HFile output contains a column family (%s) not present in input families (%s)", cf, + configFamilies), configFamilies.contains(cf)); for (FileStatus hfile : fs.listStatus(cfStatus.getPath())) { - assertTrue( - String.format("HFile %s appears to contain no data.", hfile.getPath()), + assertTrue(String.format("HFile %s appears to contain no data.", hfile.getPath()), hfile.getLen() > 0); // count the number of KVs from all the hfiles if (expectedKVCount > -1) { @@ -543,20 +527,20 @@ public class TestImportTsv implements Configurable { } } assertTrue(String.format("HFile output does not contain the input family '%s'.", family), - foundFamilies.contains(family)); + foundFamilies.contains(family)); if (expectedKVCount > -1) { - assertTrue(String.format( - "KV count in ouput hfile=<%d> doesn't match with expected KV count=<%d>", actualKVCount, - expectedKVCount), actualKVCount == expectedKVCount); + assertTrue( + String.format("KV count in ouput hfile=<%d> doesn't match with expected KV count=<%d>", + actualKVCount, expectedKVCount), + actualKVCount == expectedKVCount); } } /** * Method returns the total KVs in given hfile * @param fs File System - * @param p HFile path - * @return KV count in the given hfile - * @throws IOException + * @param p HFile path + * @return KV count in the given hfile n */ private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException { Configuration conf = util.getConfiguration(); @@ -571,4 +555,3 @@ public class TestImportTsv implements Configurable { return count; } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java index a0d1cf7b6cf..adb0589c980 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,11 +44,11 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; /** * Tests for {@link TsvParser}. */ -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestImportTsvParser { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportTsvParser.class); + HBaseClassTestRule.forClass(TestImportTsvParser.class); private void assertBytesEquals(byte[] a, byte[] b) { assertEquals(Bytes.toStringBinary(a), Bytes.toStringBinary(b)); @@ -58,11 +58,11 @@ public class TestImportTsvParser { ArrayList parsedCols = new ArrayList<>(); for (int i = 0; i < parsed.getColumnCount(); i++) { parsedCols.add(Bytes.toString(parsed.getLineBytes(), parsed.getColumnOffset(i), - parsed.getColumnLength(i))); + parsed.getColumnLength(i))); } if (!Iterables.elementsEqual(parsedCols, expected)) { fail("Expected: " + Joiner.on(",").join(expected) + "\n" + "Got:" - + Joiner.on(",").join(parsedCols)); + + Joiner.on(",").join(parsedCols)); } } @@ -105,8 +105,8 @@ public class TestImportTsvParser { assertTrue(parser.hasTimestamp()); assertEquals(2, parser.getTimestampKeyColumnIndex()); - parser = new TsvParser("HBASE_ROW_KEY,col1:scol1,HBASE_TS_KEY,col1:scol2,HBASE_ATTRIBUTES_KEY", - "\t"); + parser = + new TsvParser("HBASE_ROW_KEY,col1:scol1,HBASE_TS_KEY,col1:scol2,HBASE_ATTRIBUTES_KEY", "\t"); assertNull(parser.getFamily(0)); assertNull(parser.getQualifier(0)); assertBytesEquals(Bytes.toBytes("col1"), parser.getFamily(1)); @@ -118,8 +118,8 @@ public class TestImportTsvParser { assertEquals(2, parser.getTimestampKeyColumnIndex()); assertEquals(4, parser.getAttributesKeyColumnIndex()); - parser = new TsvParser("HBASE_ATTRIBUTES_KEY,col1:scol1,HBASE_TS_KEY,col1:scol2,HBASE_ROW_KEY", - "\t"); + parser = + new TsvParser("HBASE_ATTRIBUTES_KEY,col1:scol1,HBASE_TS_KEY,col1:scol2,HBASE_ROW_KEY", "\t"); assertNull(parser.getFamily(0)); assertNull(parser.getQualifier(0)); assertBytesEquals(Bytes.toBytes("col1"), parser.getFamily(1)); @@ -293,7 +293,7 @@ public class TestImportTsvParser { assertEquals(6, parse.getAttributeKeyOffset()); String[] attr = parse.getIndividualAttributes(); int i = 0; - for (String str : attr) { + for (String str : attr) { assertEquals(("key" + i + "=>" + "value" + i), str); i++; } @@ -302,7 +302,7 @@ public class TestImportTsvParser { @Test public void testTsvParserWithCellVisibilityCol() throws BadTsvLineException { TsvParser parser = new TsvParser( - "HBASE_ROW_KEY,col_a,HBASE_TS_KEY,HBASE_ATTRIBUTES_KEY,HBASE_CELL_VISIBILITY", "\t"); + "HBASE_ROW_KEY,col_a,HBASE_TS_KEY,HBASE_ATTRIBUTES_KEY,HBASE_CELL_VISIBILITY", "\t"); assertEquals(0, parser.getRowKeyColumnIndex()); assertEquals(4, parser.getCellVisibilityColumnIndex()); byte[] line = Bytes.toBytes("rowkey\tval_a\t1234\tkey=>value\tPRIVATE&SECRET"); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java index d1f48bb299e..87461c2735f 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,12 +46,12 @@ public class TestJarFinder { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestJarFinder.class); + HBaseClassTestRule.forClass(TestJarFinder.class); @Test public void testJar() throws Exception { - //picking a class that is for sure in a JAR in the classpath + // picking a class that is for sure in a JAR in the classpath String jar = JarFinder.getJar(LoggerFactory.class); Assert.assertTrue(new File(jar).exists()); } @@ -59,8 +59,7 @@ public class TestJarFinder { private static void delete(File file) throws IOException { if (file.getAbsolutePath().length() < 5) { throw new IllegalArgumentException( - MessageFormat.format("Path [{0}] is too short, not deleting", - file.getAbsolutePath())); + MessageFormat.format("Path [{0}] is too short, not deleting", file.getAbsolutePath())); } if (file.exists()) { if (file.isDirectory()) { @@ -73,16 +72,15 @@ public class TestJarFinder { } if (!file.delete()) { throw new RuntimeException( - MessageFormat.format("Could not delete path [{0}]", - file.getAbsolutePath())); + MessageFormat.format("Could not delete path [{0}]", file.getAbsolutePath())); } } } @Test public void testExpandedClasspath() throws Exception { - //picking a class that is for sure in a directory in the classpath - //in this case the JAR is created on the fly + // picking a class that is for sure in a directory in the classpath + // in this case the JAR is created on the fly String jar = JarFinder.getJar(TestJarFinder.class); Assert.assertTrue(new File(jar).exists()); } @@ -90,7 +88,7 @@ public class TestJarFinder { @Test public void testExistingManifest() throws Exception { File dir = new File(System.getProperty("test.build.dir", "target/test-dir"), - TestJarFinder.class.getName() + "-testExistingManifest"); + TestJarFinder.class.getName() + "-testExistingManifest"); delete(dir); dir.mkdirs(); @@ -109,8 +107,7 @@ public class TestJarFinder { ByteArrayOutputStream baos = new ByteArrayOutputStream(); JarOutputStream zos = new JarOutputStream(baos); JarFinder.jarDir(dir, "", zos); - JarInputStream jis = - new JarInputStream(new ByteArrayInputStream(baos.toByteArray())); + JarInputStream jis = new JarInputStream(new ByteArrayInputStream(baos.toByteArray())); Assert.assertNotNull(jis.getManifest()); jis.close(); } @@ -118,7 +115,7 @@ public class TestJarFinder { @Test public void testNoManifest() throws Exception { File dir = new File(System.getProperty("test.build.dir", "target/test-dir"), - TestJarFinder.class.getName() + "-testNoManifest"); + TestJarFinder.class.getName() + "-testNoManifest"); delete(dir); dir.mkdirs(); File propsFile = new File(dir, "props.properties"); @@ -128,8 +125,7 @@ public class TestJarFinder { ByteArrayOutputStream baos = new ByteArrayOutputStream(); JarOutputStream zos = new JarOutputStream(baos); JarFinder.jarDir(dir, "", zos); - JarInputStream jis = - new JarInputStream(new ByteArrayInputStream(baos.toByteArray())); + JarInputStream jis = new JarInputStream(new ByteArrayInputStream(baos.toByteArray())); Assert.assertNotNull(jis.getManifest()); jis.close(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java index b4a53ea65e8..e4a6cda8cab 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,25 +30,24 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; /** - * Tests various scan start and stop row scenarios. This is set in a scan and - * tested in a MapReduce job to see if that is handed over and done properly - * too. + * Tests various scan start and stop row scenarios. This is set in a scan and tested in a MapReduce + * job to see if that is handed over and done properly too. */ -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestMultiTableInputFormat extends MultiTableInputFormatTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiTableInputFormat.class); + HBaseClassTestRule.forClass(TestMultiTableInputFormat.class); @BeforeClass public static void setupLogging() { TEST_UTIL.enableDebug(MultiTableInputFormat.class); - } + } - @Override + @Override protected void initJob(List scans, Job job) throws IOException { - TableMapReduceUtil.initTableMapperJob(scans, ScanMapper.class, - ImmutableBytesWritable.class, ImmutableBytesWritable.class, job); + TableMapReduceUtil.initTableMapperJob(scans, ScanMapper.class, ImmutableBytesWritable.class, + ImmutableBytesWritable.class, job); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java index 906abca05c9..3fce8cba2e9 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,21 +66,20 @@ import org.mockito.stubbing.Answer; /** * Tests of MultiTableInputFormatBase. */ -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestMultiTableInputFormatBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiTableInputFormatBase.class); + HBaseClassTestRule.forClass(TestMultiTableInputFormatBase.class); - @Rule public final TestName name = new TestName(); + @Rule + public final TestName name = new TestName(); /** - * Test getSplits only puts up one Connection. - * In past it has put up many Connections. Each Connection setup comes with a fresh new cache - * so we have to do fresh hit on hbase:meta. Should only do one Connection when doing getSplits - * even if a MultiTableInputFormat. - * @throws IOException + * Test getSplits only puts up one Connection. In past it has put up many Connections. Each + * Connection setup comes with a fresh new cache so we have to do fresh hit on hbase:meta. Should + * only do one Connection when doing getSplits even if a MultiTableInputFormat. n */ @Test public void testMRSplitsConnectionCount() throws IOException { @@ -88,8 +87,7 @@ public class TestMultiTableInputFormatBase { MultiTableInputFormatBase mtif = new MultiTableInputFormatBase() { @Override public RecordReader createRecordReader(InputSplit split, - TaskAttemptContext context) - throws IOException, InterruptedException { + TaskAttemptContext context) throws IOException, InterruptedException { return super.createRecordReader(split, context); } }; @@ -124,7 +122,7 @@ public class TestMultiTableInputFormatBase { private final Configuration configuration; static final AtomicInteger creations = new AtomicInteger(0); - MRSplitsConnection (Configuration conf, ExecutorService pool, User user) throws IOException { + MRSplitsConnection(Configuration conf, ExecutorService pool, User user) throws IOException { this.configuration = conf; creations.incrementAndGet(); } @@ -157,33 +155,27 @@ public class TestMultiTableInputFormatBase { @Override public RegionLocator getRegionLocator(final TableName tableName) throws IOException { // Make up array of start keys. We start off w/ empty byte array. - final byte [][] startKeys = new byte [][] {HConstants.EMPTY_BYTE_ARRAY, - Bytes.toBytes("aaaa"), Bytes.toBytes("bbb"), - Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), - Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), - Bytes.toBytes("iii"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), - Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), - Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), Bytes.toBytes("sss"), - Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), - Bytes.toBytes("zzz")}; + final byte[][] startKeys = new byte[][] { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("aaaa"), + Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), + Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), + Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), + Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), Bytes.toBytes("sss"), + Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("zzz") }; // Make an array of end keys. We end with the empty byte array. - final byte [][] endKeys = new byte[][] { - Bytes.toBytes("aaaa"), Bytes.toBytes("bbb"), - Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), - Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), - Bytes.toBytes("iii"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), - Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), - Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), Bytes.toBytes("sss"), - Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), - Bytes.toBytes("zzz"), - HConstants.EMPTY_BYTE_ARRAY}; + final byte[][] endKeys = + new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), + Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"), Bytes.toBytes("ggg"), + Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), + Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), + Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), + Bytes.toBytes("vvv"), Bytes.toBytes("zzz"), HConstants.EMPTY_BYTE_ARRAY }; // Now make a map of start keys to HRegionLocations. Let the server namber derive from // the start key. - final Map map = - new TreeMap(Bytes.BYTES_COMPARATOR); - for (byte [] startKey: startKeys) { - HRegionLocation hrl = new HRegionLocation( - RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).build(), + final Map map = + new TreeMap(Bytes.BYTES_COMPARATOR); + for (byte[] startKey : startKeys) { + HRegionLocation hrl = + new HRegionLocation(RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).build(), ServerName.valueOf(Bytes.toString(startKey), 0, 0)); map.put(startKey, hrl); } @@ -191,19 +183,20 @@ public class TestMultiTableInputFormatBase { final List locations = new ArrayList(map.values()); // Now make a RegionLocator mock backed by the abpve map and list of locations. RegionLocator mockedRegionLocator = Mockito.mock(RegionLocator.class); - Mockito.when(mockedRegionLocator.getRegionLocation(Mockito.any(byte [].class), - Mockito.anyBoolean())). - thenAnswer(new Answer() { - @Override - public HRegionLocation answer(InvocationOnMock invocationOnMock) throws Throwable { - Object [] args = invocationOnMock.getArguments(); - byte [] key = (byte [])args[0]; - return map.get(key); - } - }); + Mockito + .when( + mockedRegionLocator.getRegionLocation(Mockito.any(byte[].class), Mockito.anyBoolean())) + .thenAnswer(new Answer() { + @Override + public HRegionLocation answer(InvocationOnMock invocationOnMock) throws Throwable { + Object[] args = invocationOnMock.getArguments(); + byte[] key = (byte[]) args[0]; + return map.get(key); + } + }); Mockito.when(mockedRegionLocator.getAllRegionLocations()).thenReturn(locations); - Mockito.when(mockedRegionLocator.getStartEndKeys()). - thenReturn(new Pair(startKeys, endKeys)); + Mockito.when(mockedRegionLocator.getStartEndKeys()) + .thenReturn(new Pair(startKeys, endKeys)); Mockito.when(mockedRegionLocator.getName()).thenReturn(tableName); return mockedRegionLocator; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java index 89e7b49eb69..023b8404bcb 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ public class TestMultiTableSnapshotInputFormat extends MultiTableInputFormatTest @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiTableSnapshotInputFormat.class); + HBaseClassTestRule.forClass(TestMultiTableSnapshotInputFormat.class); protected Path restoreDir; @@ -72,9 +72,9 @@ public class TestMultiTableSnapshotInputFormat extends MultiTableInputFormatTest @Override protected void initJob(List scans, Job job) throws IOException { - TableMapReduceUtil - .initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), ScanMapper.class, - ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir); + TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), + ScanMapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, + restoreDir); } protected Map> getSnapshotScanMapping(final List scans) { @@ -83,7 +83,7 @@ public class TestMultiTableSnapshotInputFormat extends MultiTableInputFormatTest @Override public String apply(Scan input) { return snapshotNameForTable( - Bytes.toStringBinary(input.getAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME))); + Bytes.toStringBinary(input.getAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME))); } }).asMap(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java index 49f6ed41b31..adf6744406d 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,7 +51,7 @@ public class TestMultiTableSnapshotInputFormatImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiTableSnapshotInputFormatImpl.class); + HBaseClassTestRule.forClass(TestMultiTableSnapshotInputFormatImpl.class); private MultiTableSnapshotInputFormatImpl subject; private Map> snapshotScans; @@ -71,17 +71,15 @@ public class TestMultiTableSnapshotInputFormatImpl { // feels weird to introduce a RestoreSnapshotHelperFactory and inject that, which would // probably be the more "pure" // way of doing things. This is the lesser of two evils, perhaps? - doNothing().when(this.subject). - restoreSnapshot(any(), any(), any(), - any(), any()); + doNothing().when(this.subject).restoreSnapshot(any(), any(), any(), any(), any()); this.conf = new Configuration(); this.rootDir = new Path("file:///test-root-dir"); CommonFSUtils.setRootDir(conf, rootDir); - this.snapshotScans = ImmutableMap.>of("snapshot1", - ImmutableList.of(new Scan(Bytes.toBytes("1"), Bytes.toBytes("2"))), "snapshot2", - ImmutableList.of(new Scan(Bytes.toBytes("3"), Bytes.toBytes("4")), - new Scan(Bytes.toBytes("5"), Bytes.toBytes("6")))); + this.snapshotScans = ImmutableMap.> of("snapshot1", + ImmutableList.of(new Scan(Bytes.toBytes("1"), Bytes.toBytes("2"))), "snapshot2", + ImmutableList.of(new Scan(Bytes.toBytes("3"), Bytes.toBytes("4")), + new Scan(Bytes.toBytes("5"), Bytes.toBytes("6")))); this.restoreDir = new Path(CommonFSUtils.getRootDir(conf), "restore-dir"); @@ -91,8 +89,8 @@ public class TestMultiTableSnapshotInputFormatImpl { subject.setInput(this.conf, snapshotScans, restoreDir); } - public Map> toScanWithEquals( - Map> snapshotScans) throws IOException { + public Map> + toScanWithEquals(Map> snapshotScans) throws IOException { Map> rtn = Maps.newHashMap(); for (Map.Entry> entry : snapshotScans.entrySet()) { @@ -114,7 +112,6 @@ public class TestMultiTableSnapshotInputFormatImpl { /** * Creates a new instance of this class while copying all values. - * * @param scan The scan instance to copy from. * @throws java.io.IOException When copying the values fails. */ @@ -129,8 +126,8 @@ public class TestMultiTableSnapshotInputFormatImpl { return false; } ScanWithEquals otherScan = (ScanWithEquals) obj; - return Objects.equals(this.startRow, otherScan.startRow) && Objects - .equals(this.stopRow, otherScan.stopRow); + return Objects.equals(this.startRow, otherScan.startRow) + && Objects.equals(this.stopRow, otherScan.stopRow); } @Override @@ -140,9 +137,8 @@ public class TestMultiTableSnapshotInputFormatImpl { @Override public String toString() { - return org.apache.hbase.thirdparty.com.google.common.base.MoreObjects. - toStringHelper(this).add("startRow", startRow) - .add("stopRow", stopRow).toString(); + return org.apache.hbase.thirdparty.com.google.common.base.MoreObjects.toStringHelper(this) + .add("startRow", startRow).add("stopRow", stopRow).toString(); } } @@ -177,7 +173,7 @@ public class TestMultiTableSnapshotInputFormatImpl { for (Path snapshotDir : restoreDirs.values()) { assertEquals("Expected " + snapshotDir + " to be a child of " + restoreDir, restoreDir, - snapshotDir.getParent()); + snapshotDir.getParent()); } } @@ -189,7 +185,7 @@ public class TestMultiTableSnapshotInputFormatImpl { for (Map.Entry entry : snapshotDirs.entrySet()) { verify(this.subject).restoreSnapshot(eq(this.conf), eq(entry.getKey()), eq(this.rootDir), - eq(entry.getValue()), any()); + eq(entry.getValue()), any()); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java index dacc15c74b2..647f243aed0 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,33 +53,31 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Test Map/Reduce job over HBase tables. The map/reduce process we're testing - * on our tables is simple - take every row in the table, reverse the value of - * a particular cell, and write it back to the table. + * Test Map/Reduce job over HBase tables. The map/reduce process we're testing on our tables is + * simple - take every row in the table, reverse the value of a particular cell, and write it back + * to the table. */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestMultithreadedTableMapper { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultithreadedTableMapper.class); + HBaseClassTestRule.forClass(TestMultithreadedTableMapper.class); private static final Logger LOG = LoggerFactory.getLogger(TestMultithreadedTableMapper.class); - private static final HBaseTestingUtility UTIL = - new HBaseTestingUtility(); + private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); static final TableName MULTI_REGION_TABLE_NAME = TableName.valueOf("mrtest"); static final byte[] INPUT_FAMILY = Bytes.toBytes("contents"); static final byte[] OUTPUT_FAMILY = Bytes.toBytes("text"); - static final int NUMBER_OF_THREADS = 10; + static final int NUMBER_OF_THREADS = 10; @BeforeClass public static void beforeClass() throws Exception { // Up the handlers; this test needs more than usual. UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); UTIL.startMiniCluster(); - Table table = - UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY, - OUTPUT_FAMILY }); + Table table = UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, + new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); UTIL.loadTable(table, INPUT_FAMILY, false); UTIL.waitUntilAllRegionsAssigned(MULTI_REGION_TABLE_NAME); } @@ -92,29 +90,21 @@ public class TestMultithreadedTableMapper { /** * Pass the given key and processed record reduce */ - public static class ProcessContentsMapper - extends TableMapper { + public static class ProcessContentsMapper extends TableMapper { /** - * Pass the key, and reversed value to reduce - * - * @param key - * @param value - * @param context - * @throws IOException + * Pass the key, and reversed value to reduce nnnn */ @Override - public void map(ImmutableBytesWritable key, Result value, - Context context) - throws IOException, InterruptedException { + public void map(ImmutableBytesWritable key, Result value, Context context) + throws IOException, InterruptedException { if (value.size() != 1) { throw new IOException("There should only be one input column"); } - Map>> - cf = value.getMap(); - if(!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILY) + "'."); + Map>> cf = value.getMap(); + if (!cf.containsKey(INPUT_FAMILY)) { + throw new IOException( + "Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } // Get the original value and reverse it String originalValue = Bytes.toString(value.getValue(INPUT_FAMILY, INPUT_FAMILY)); @@ -128,19 +118,16 @@ public class TestMultithreadedTableMapper { } /** - * Test multithreadedTableMappper map/reduce against a multi-region table - * @throws IOException - * @throws ClassNotFoundException - * @throws InterruptedException + * Test multithreadedTableMappper map/reduce against a multi-region table nnn */ @Test public void testMultithreadedTableMapper() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { runTestOnTable(UTIL.getConnection().getTable(MULTI_REGION_TABLE_NAME)); } private void runTestOnTable(Table table) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { Job job = null; try { LOG.info("Before map/reduce startup"); @@ -148,15 +135,12 @@ public class TestMultithreadedTableMapper { job.setNumReduceTasks(1); Scan scan = new Scan(); scan.addFamily(INPUT_FAMILY); - TableMapReduceUtil.initTableMapperJob( - table.getName(), scan, - MultithreadedTableMapper.class, ImmutableBytesWritable.class, - Put.class, job); + TableMapReduceUtil.initTableMapperJob(table.getName(), scan, MultithreadedTableMapper.class, + ImmutableBytesWritable.class, Put.class, job); MultithreadedTableMapper.setMapperClass(job, ProcessContentsMapper.class); MultithreadedTableMapper.setNumberOfThreads(job, NUMBER_OF_THREADS); - TableMapReduceUtil.initTableReducerJob( - table.getName().getNameAsString(), - IdentityTableReducer.class, job); + TableMapReduceUtil.initTableReducerJob(table.getName().getNameAsString(), + IdentityTableReducer.class, job); FileOutputFormat.setOutputPath(job, new Path("test")); LOG.info("Started " + table.getName()); assertTrue(job.waitForCompletion(true)); @@ -166,8 +150,7 @@ public class TestMultithreadedTableMapper { } finally { table.close(); if (job != null) { - FileUtil.fullyDelete( - new File(job.getConfiguration().get("hadoop.tmp.dir"))); + FileUtil.fullyDelete(new File(job.getConfiguration().get("hadoop.tmp.dir"))); } } } @@ -184,8 +167,8 @@ public class TestMultithreadedTableMapper { verified = true; break; } catch (NullPointerException e) { - // If here, a cell was empty. Presume its because updates came in - // after the scanner had been opened. Wait a while and retry. + // If here, a cell was empty. Presume its because updates came in + // after the scanner had been opened. Wait a while and retry. LOG.debug("Verification attempt failed: " + e.getMessage()); } try { @@ -199,15 +182,11 @@ public class TestMultithreadedTableMapper { } /** - * Looks at every value of the mapreduce output and verifies that indeed - * the values have been reversed. - * - * @param table Table to scan. - * @throws IOException - * @throws NullPointerException if we failed to find a cell value + * Looks at every value of the mapreduce output and verifies that indeed the values have been + * reversed. + * @param table Table to scan. n * @throws NullPointerException if we failed to find a cell value */ - private void verifyAttempt(final Table table) - throws IOException, NullPointerException { + private void verifyAttempt(final Table table) throws IOException, NullPointerException { Scan scan = new Scan(); scan.addFamily(INPUT_FAMILY); scan.addFamily(OUTPUT_FAMILY); @@ -215,37 +194,34 @@ public class TestMultithreadedTableMapper { try { Iterator itr = scanner.iterator(); assertTrue(itr.hasNext()); - while(itr.hasNext()) { + while (itr.hasNext()) { Result r = itr.next(); if (LOG.isDebugEnabled()) { - if (r.size() > 2 ) { - throw new IOException("Too many results, expected 2 got " + - r.size()); + if (r.size() > 2) { + throw new IOException("Too many results, expected 2 got " + r.size()); } } byte[] firstValue = null; byte[] secondValue = null; int count = 0; - for(Cell kv : r.listCells()) { + for (Cell kv : r.listCells()) { if (count == 0) { firstValue = CellUtil.cloneValue(kv); - }else if (count == 1) { + } else if (count == 1) { secondValue = CellUtil.cloneValue(kv); - }else if (count == 2) { + } else if (count == 2) { break; } count++; } String first = ""; if (firstValue == null) { - throw new NullPointerException(Bytes.toString(r.getRow()) + - ": first value is null"); + throw new NullPointerException(Bytes.toString(r.getRow()) + ": first value is null"); } first = Bytes.toString(firstValue); String second = ""; if (secondValue == null) { - throw new NullPointerException(Bytes.toString(r.getRow()) + - ": second value is null"); + throw new NullPointerException(Bytes.toString(r.getRow()) + ": second value is null"); } byte[] secondReversed = new byte[secondValue.length]; for (int i = 0, j = secondValue.length - 1; j >= 0; j--, i++) { @@ -254,9 +230,9 @@ public class TestMultithreadedTableMapper { second = Bytes.toString(secondReversed); if (first.compareTo(second) != 0) { if (LOG.isDebugEnabled()) { - LOG.debug("second key is not the reverse of first. row=" + - Bytes.toStringBinary(r.getRow()) + ", first value=" + first + - ", second value=" + second); + LOG.debug( + "second key is not the reverse of first. row=" + Bytes.toStringBinary(r.getRow()) + + ", first value=" + first + ", second value=" + second); } fail(); } @@ -267,4 +243,3 @@ public class TestMultithreadedTableMapper { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java index 85d0f1c8ddd..dab81245e9d 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,28 +41,25 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestRegionSizeCalculator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionSizeCalculator.class); + HBaseClassTestRule.forClass(TestRegionSizeCalculator.class); private Configuration configuration = new Configuration(); private final long megabyte = 1024L * 1024L; - private final ServerName sn = ServerName.valueOf("local-rs", DEFAULT_REGIONSERVER_PORT, - ServerName.NON_STARTCODE); + private final ServerName sn = + ServerName.valueOf("local-rs", DEFAULT_REGIONSERVER_PORT, ServerName.NON_STARTCODE); @Test public void testSimpleTestCase() throws Exception { RegionLocator regionLocator = mockRegionLocator("region1", "region2", "region3"); - Admin admin = mockAdmin( - mockRegion("region1", 123), - mockRegion("region3", 1232), - mockRegion("region2", 54321) - ); + Admin admin = mockAdmin(mockRegion("region1", 123), mockRegion("region3", 1232), + mockRegion("region2", 54321)); RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin); @@ -75,40 +72,36 @@ public class TestRegionSizeCalculator { assertEquals(3, calculator.getRegionSizeMap().size()); } - /** - * When size of region in megabytes is larger than largest possible integer there could be - * error caused by lost of precision. - * */ + * When size of region in megabytes is larger than largest possible integer there could be error + * caused by lost of precision. + */ @Test public void testLargeRegion() throws Exception { RegionLocator regionLocator = mockRegionLocator("largeRegion"); - Admin admin = mockAdmin( - mockRegion("largeRegion", Integer.MAX_VALUE) - ); + Admin admin = mockAdmin(mockRegion("largeRegion", Integer.MAX_VALUE)); RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin); - assertEquals(((long) Integer.MAX_VALUE) * megabyte, calculator.getRegionSize("largeRegion".getBytes())); + assertEquals(((long) Integer.MAX_VALUE) * megabyte, + calculator.getRegionSize("largeRegion".getBytes())); } - /** When calculator is disabled, it should return 0 for each request.*/ + /** When calculator is disabled, it should return 0 for each request. */ @Test public void testDisabled() throws Exception { String regionName = "cz.goout:/index.html"; RegionLocator table = mockRegionLocator(regionName); - Admin admin = mockAdmin( - mockRegion(regionName, 999) - ); + Admin admin = mockAdmin(mockRegion(regionName, 999)); - //first request on enabled calculator + // first request on enabled calculator RegionSizeCalculator calculator = new RegionSizeCalculator(table, admin); assertEquals(999 * megabyte, calculator.getRegionSize(regionName.getBytes())); - //then disabled calculator. + // then disabled calculator. configuration.setBoolean(RegionSizeCalculator.ENABLE_REGIONSIZECALCULATOR, false); RegionSizeCalculator disabledCalculator = new RegionSizeCalculator(table, admin); assertEquals(0 * megabyte, disabledCalculator.getRegionSize(regionName.getBytes())); @@ -118,7 +111,7 @@ public class TestRegionSizeCalculator { /** * Makes some table with given region names. - * */ + */ private RegionLocator mockRegionLocator(String... regionNames) throws IOException { RegionLocator mockedTable = Mockito.mock(RegionLocator.class); when(mockedTable.getName()).thenReturn(TableName.valueOf("sizeTestTable")); @@ -136,7 +129,7 @@ public class TestRegionSizeCalculator { /** * Creates mock returning RegionLoad info about given servers. - */ + */ private Admin mockAdmin(RegionMetrics... regionLoadArray) throws Exception { Admin mockAdmin = Mockito.mock(Admin.class); List regionLoads = new ArrayList<>(); @@ -145,15 +138,14 @@ public class TestRegionSizeCalculator { } when(mockAdmin.getConfiguration()).thenReturn(configuration); when(mockAdmin.getRegionMetrics(sn, TableName.valueOf("sizeTestTable"))) - .thenReturn(regionLoads); + .thenReturn(regionLoads); return mockAdmin; } /** * Creates mock of region with given name and size. - * - * @param fileSizeMb number of megabytes occupied by region in file store in megabytes - * */ + * @param fileSizeMb number of megabytes occupied by region in file store in megabytes + */ private RegionMetrics mockRegion(String regionName, int fileSizeMb) { RegionMetrics region = Mockito.mock(RegionMetrics.class); when(region.getRegionName()).thenReturn(regionName.getBytes()); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java index c3abf4d544e..df9d1597857 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.mapreduce; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -43,25 +44,17 @@ import org.mockito.Mockito; /** * Basic test of {@link RoundRobinTableInputFormat}; i.e. RRTIF. */ -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestRoundRobinTableInputFormat { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRoundRobinTableInputFormat.class); + HBaseClassTestRule.forClass(TestRoundRobinTableInputFormat.class); private static final int SERVERS_COUNT = 5; - private static final String[] KEYS = { - "aa", "ab", "ac", "ad", "ae", - "ba", "bb", "bc", "bd", "be", - "ca", "cb", "cc", "cd", "ce", - "da", "db", "dc", "dd", "de", - "ea", "eb", "ec", "ed", "ee", - "fa", "fb", "fc", "fd", "fe", - "ga", "gb", "gc", "gd", "ge", - "ha", "hb", "hc", "hd", "he", - "ia", "ib", "ic", "id", "ie", - "ja", "jb", "jc", "jd", "je", "jf" - }; + private static final String[] KEYS = { "aa", "ab", "ac", "ad", "ae", "ba", "bb", "bc", "bd", "be", + "ca", "cb", "cc", "cd", "ce", "da", "db", "dc", "dd", "de", "ea", "eb", "ec", "ed", "ee", "fa", + "fb", "fc", "fd", "fe", "ga", "gb", "gc", "gd", "ge", "ha", "hb", "hc", "hd", "he", "ia", "ib", + "ic", "id", "ie", "ja", "jb", "jc", "jd", "je", "jf" }; /** * Test default behavior. @@ -78,8 +71,8 @@ public class TestRoundRobinTableInputFormat { Arrays.sort(copy.toArray(new InputSplit[0]), new SplitComparator()); // Assert the sort is retained even after passing through SplitComparator. for (int i = 0; i < sortedSplits.size(); i++) { - TableSplit sortedTs = (TableSplit)sortedSplits.get(i); - TableSplit copyTs = (TableSplit)copy.get(i); + TableSplit sortedTs = (TableSplit) sortedSplits.get(i); + TableSplit copyTs = (TableSplit) copy.get(i); assertEquals(sortedTs.getEncodedRegionName(), copyTs.getEncodedRegionName()); } } @@ -90,17 +83,17 @@ public class TestRoundRobinTableInputFormat { private List createSplits() { List splits = new ArrayList<>(KEYS.length - 1); for (int i = 0; i < KEYS.length - 1; i++) { - InputSplit split = new TableSplit(TableName.valueOf("test"), new Scan(), - Bytes.toBytes(KEYS[i]), Bytes.toBytes(KEYS[i + 1]), String.valueOf(i % SERVERS_COUNT + 1), - "", 0); + InputSplit split = + new TableSplit(TableName.valueOf("test"), new Scan(), Bytes.toBytes(KEYS[i]), + Bytes.toBytes(KEYS[i + 1]), String.valueOf(i % SERVERS_COUNT + 1), "", 0); splits.add(split); } return splits; } private void testDistribution(List list) throws IOException, InterruptedException { - for (int i = 0; i < KEYS.length/SERVERS_COUNT; i++) { - int [] counts = new int[SERVERS_COUNT]; + for (int i = 0; i < KEYS.length / SERVERS_COUNT; i++) { + int[] counts = new int[SERVERS_COUNT]; for (int j = i * SERVERS_COUNT; j < i * SERVERS_COUNT + SERVERS_COUNT; j++) { counts[Integer.parseInt(list.get(j).getLocations()[0]) - 1]++; } @@ -120,21 +113,21 @@ public class TestRoundRobinTableInputFormat { public int compare(InputSplit o1, InputSplit o2) { try { return Long.compare(o1.getLength(), o2.getLength()); - } catch (IOException|InterruptedException e) { + } catch (IOException | InterruptedException e) { throw new RuntimeException("exception in compare", e); } } } /** - * Assert that lengths are descending. RRTIF writes lengths in descending order so any - * subsequent sort using dump SplitComparator as is done in JobSubmitter up in Hadoop keeps - * our RRTIF ordering. + * Assert that lengths are descending. RRTIF writes lengths in descending order so any subsequent + * sort using dump SplitComparator as is done in JobSubmitter up in Hadoop keeps our RRTIF + * ordering. */ private void assertLengthDescending(List list) throws IOException, InterruptedException { long previousLength = Long.MAX_VALUE; - for (InputSplit is: list) { + for (InputSplit is : list) { long length = is.getLength(); assertTrue(previousLength + " " + length, previousLength > length); previousLength = length; @@ -165,13 +158,13 @@ public class TestRoundRobinTableInputFormat { } private void checkRetainsBooleanValue(JobContext jobContext, RoundRobinTableInputFormat rrtif, - final boolean b) { - jobContext.getConfiguration(). - setBoolean(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE, b); + final boolean b) { + jobContext.getConfiguration() + .setBoolean(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE, b); rrtif.configure(); rrtif.unconfigure(); - String value = jobContext.getConfiguration(). - get(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE); + String value = jobContext.getConfiguration() + .get(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE); assertEquals(b, Boolean.valueOf(value)); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java index 5793dfad588..3fcb251392a 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.io.IOException; import java.io.PrintStream; import java.util.ArrayList; import java.util.Arrays; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; @@ -50,7 +49,7 @@ import org.slf4j.LoggerFactory; /** * Test the rowcounter map reduce job. */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestRowCounter { @ClassRule @@ -88,133 +87,96 @@ public class TestRowCounter { } /** - * Test a case when no column was specified in command line arguments. - * - * @throws Exception + * Test a case when no column was specified in command line arguments. n */ @Test public void testRowCounterNoColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME - }; + String[] args = new String[] { TABLE_NAME }; runRowCount(args, 10); } /** - * Test a case when the column specified in command line arguments is - * exclusive for few rows. - * - * @throws Exception + * Test a case when the column specified in command line arguments is exclusive for few rows. n */ @Test public void testRowCounterExclusiveColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COL1 - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COL1 }; runRowCount(args, 8); } /** - * Test a case when the column specified in command line arguments is - * one for which the qualifier contains colons. - * - * @throws Exception + * Test a case when the column specified in command line arguments is one for which the qualifier + * contains colons. n */ @Test public void testRowCounterColumnWithColonInQualifier() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COMPOSITE_COLUMN - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COMPOSITE_COLUMN }; runRowCount(args, 8); } /** - * Test a case when the column specified in command line arguments is not part - * of first KV for a row. - * - * @throws Exception + * Test a case when the column specified in command line arguments is not part of first KV for a + * row. n */ @Test public void testRowCounterHiddenColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COL2 - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COL2 }; runRowCount(args, 10); } - /** - * Test a case when the column specified in command line arguments is - * exclusive for few rows and also a row range filter is specified - * - * @throws Exception + * Test a case when the column specified in command line arguments is exclusive for few rows and + * also a row range filter is specified n */ @Test public void testRowCounterColumnAndRowRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00rov,\\x00rox", COL_FAM + ":" + COL1 - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00rov,\\x00rox", COL_FAM + ":" + COL1 }; runRowCount(args, 8); } /** - * Test a case when a range is specified with single range of start-end keys - * @throws Exception + * Test a case when a range is specified with single range of start-end keys n */ @Test public void testRowCounterRowSingleRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3" }; runRowCount(args, 2); } /** - * Test a case when a range is specified with single range with end key only - * @throws Exception + * Test a case when a range is specified with single range with end key only n */ @Test public void testRowCounterRowSingleRangeUpperBound() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=,\\x00row3" - }; + String[] args = new String[] { TABLE_NAME, "--range=,\\x00row3" }; runRowCount(args, 3); } /** - * Test a case when a range is specified with two ranges where one range is with end key only - * @throws Exception + * Test a case when a range is specified with two ranges where one range is with end key only n */ @Test public void testRowCounterRowMultiRangeUpperBound() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=,\\x00row3;\\x00row5,\\x00row7" - }; + String[] args = new String[] { TABLE_NAME, "--range=,\\x00row3;\\x00row5,\\x00row7" }; runRowCount(args, 5); } /** - * Test a case when a range is specified with multiple ranges of start-end keys - * @throws Exception + * Test a case when a range is specified with multiple ranges of start-end keys n */ @Test public void testRowCounterRowMultiRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3;\\x00row5,\\x00row8" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3;\\x00row5,\\x00row8" }; runRowCount(args, 5); } /** - * Test a case when a range is specified with multiple ranges of start-end keys; - * one range is filled, another two are not - * @throws Exception + * Test a case when a range is specified with multiple ranges of start-end keys; one range is + * filled, another two are not n */ @Test public void testRowCounterRowMultiEmptyRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3;;" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3;;" }; runRowCount(args, 2); } @@ -222,20 +184,16 @@ public class TestRowCounter { public void testRowCounter10kRowRange() throws Exception { String tableName = TABLE_NAME + "10k"; - try (Table table = TEST_UTIL.createTable( - TableName.valueOf(tableName), Bytes.toBytes(COL_FAM))) { + try ( + Table table = TEST_UTIL.createTable(TableName.valueOf(tableName), Bytes.toBytes(COL_FAM))) { writeRows(table, 10000, 0); } - String[] args = new String[] { - tableName, "--range=\\x00row9872,\\x00row9875" - }; + String[] args = new String[] { tableName, "--range=\\x00row9872,\\x00row9875" }; runRowCount(args, 3); } /** - * Test a case when the timerange is specified with --starttime and --endtime options - * - * @throws Exception + * Test a case when the timerange is specified with --starttime and --endtime options n */ @Test public void testRowCounterTimeRange() throws Exception { @@ -248,7 +206,8 @@ public class TestRowCounter { long ts; // clean up content of TABLE_NAME - Table table = TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME_TS_RANGE), Bytes.toBytes(COL_FAM)); + Table table = + TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME_TS_RANGE), Bytes.toBytes(COL_FAM)); ts = EnvironmentEdgeManager.currentTime(); put1.addColumn(family, col1, ts, Bytes.toBytes("val1")); @@ -262,58 +221,43 @@ public class TestRowCounter { table.put(put3); table.close(); - String[] args = new String[] { - TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, - "--starttime=" + 0, - "--endtime=" + ts - }; + String[] args = new String[] { TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, "--starttime=" + 0, + "--endtime=" + ts }; runRowCount(args, 1); - args = new String[] { - TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, - "--starttime=" + 0, - "--endtime=" + (ts - 10) - }; + args = new String[] { TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, "--starttime=" + 0, + "--endtime=" + (ts - 10) }; runRowCount(args, 1); - args = new String[] { - TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, - "--starttime=" + ts, - "--endtime=" + (ts + 1000) - }; + args = new String[] { TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, "--starttime=" + ts, + "--endtime=" + (ts + 1000) }; runRowCount(args, 2); - args = new String[] { - TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, - "--starttime=" + (ts - 30 * 1000), - "--endtime=" + (ts + 30 * 1000), - }; + args = new String[] { TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, + "--starttime=" + (ts - 30 * 1000), "--endtime=" + (ts + 30 * 1000), }; runRowCount(args, 3); } /** * Run the RowCounter map reduce job and verify the row count. - * - * @param args the command line arguments to be used for rowcounter job. - * @param expectedCount the expected row count (result of map reduce job). - * @throws Exception + * @param args the command line arguments to be used for rowcounter job. + * @param expectedCount the expected row count (result of map reduce job). n */ private void runRowCount(String[] args, int expectedCount) throws Exception { RowCounter rowCounter = new RowCounter(); rowCounter.setConf(TEST_UTIL.getConfiguration()); - args = Arrays.copyOf(args, args.length+1); - args[args.length-1]="--expectedCount=" + expectedCount; + args = Arrays.copyOf(args, args.length + 1); + args[args.length - 1] = "--expectedCount=" + expectedCount; long start = EnvironmentEdgeManager.currentTime(); int result = rowCounter.run(args); long duration = EnvironmentEdgeManager.currentTime() - start; LOG.debug("row count duration (ms): " + duration); - assertTrue(result==0); + assertTrue(result == 0); } /** * Run the RowCounter map reduce job and verify the row count. - * - * @param args the command line arguments to be used for rowcounter job. + * @param args the command line arguments to be used for rowcounter job. * @param expectedCount the expected row count (result of map reduce job). * @throws Exception in case of any unexpected error. */ @@ -330,66 +274,50 @@ public class TestRowCounter { @Test public void testCreateSubmittableJobWithArgsNoColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME - }; + String[] args = new String[] { TABLE_NAME }; runCreateSubmittableJobWithArgs(args, 10); } /** - * Test a case when the column specified in command line arguments is - * exclusive for few rows. - * + * Test a case when the column specified in command line arguments is exclusive for few rows. * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsExclusiveColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COL1 - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COL1 }; runCreateSubmittableJobWithArgs(args, 8); } /** - * Test a case when the column specified in command line arguments is - * one for which the qualifier contains colons. - * + * Test a case when the column specified in command line arguments is one for which the qualifier + * contains colons. * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsColumnWithColonInQualifier() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COMPOSITE_COLUMN - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COMPOSITE_COLUMN }; runCreateSubmittableJobWithArgs(args, 8); } /** - * Test a case when the column specified in command line arguments is not part - * of first KV for a row. - * + * Test a case when the column specified in command line arguments is not part of first KV for a + * row. * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsHiddenColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COL2 - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COL2 }; runCreateSubmittableJobWithArgs(args, 10); } - /** - * Test a case when the column specified in command line arguments is - * exclusive for few rows and also a row range filter is specified - * + * Test a case when the column specified in command line arguments is exclusive for few rows and + * also a row range filter is specified * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsColumnAndRowRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00rov,\\x00rox", COL_FAM + ":" + COL1 - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00rov,\\x00rox", COL_FAM + ":" + COL1 }; runCreateSubmittableJobWithArgs(args, 8); } @@ -399,9 +327,7 @@ public class TestRowCounter { */ @Test public void testCreateSubmittableJobWithArgsRowSingleRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3" }; runCreateSubmittableJobWithArgs(args, 2); } @@ -411,9 +337,7 @@ public class TestRowCounter { */ @Test public void testCreateSubmittableJobWithArgsRowSingleRangeUpperBound() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=,\\x00row3" - }; + String[] args = new String[] { TABLE_NAME, "--range=,\\x00row3" }; runCreateSubmittableJobWithArgs(args, 3); } @@ -423,9 +347,7 @@ public class TestRowCounter { */ @Test public void testCreateSubmittableJobWithArgsRowMultiRangeUpperBound() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=,\\x00row3;\\x00row5,\\x00row7" - }; + String[] args = new String[] { TABLE_NAME, "--range=,\\x00row3;\\x00row5,\\x00row7" }; runCreateSubmittableJobWithArgs(args, 5); } @@ -435,22 +357,18 @@ public class TestRowCounter { */ @Test public void testCreateSubmittableJobWithArgsRowMultiRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3;\\x00row5,\\x00row8" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3;\\x00row5,\\x00row8" }; runCreateSubmittableJobWithArgs(args, 5); } /** - * Test a case when a range is specified with multiple ranges of start-end keys; - * one range is filled, another two are not + * Test a case when a range is specified with multiple ranges of start-end keys; one range is + * filled, another two are not * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsRowMultiEmptyRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3;;" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3;;" }; runCreateSubmittableJobWithArgs(args, 2); } @@ -458,19 +376,16 @@ public class TestRowCounter { public void testCreateSubmittableJobWithArgs10kRowRange() throws Exception { String tableName = TABLE_NAME + "CreateSubmittableJobWithArgs10kRowRange"; - try (Table table = TEST_UTIL.createTable( - TableName.valueOf(tableName), Bytes.toBytes(COL_FAM))) { + try ( + Table table = TEST_UTIL.createTable(TableName.valueOf(tableName), Bytes.toBytes(COL_FAM))) { writeRows(table, 10000, 0); } - String[] args = new String[] { - tableName, "--range=\\x00row9872,\\x00row9875" - }; + String[] args = new String[] { tableName, "--range=\\x00row9872,\\x00row9875" }; runCreateSubmittableJobWithArgs(args, 3); } /** * Test a case when the timerange is specified with --starttime and --endtime options - * * @throws Exception in case of any unexpected error. */ @Test @@ -483,7 +398,7 @@ public class TestRowCounter { long ts; - String tableName = TABLE_NAME_TS_RANGE+"CreateSubmittableJobWithArgs"; + String tableName = TABLE_NAME_TS_RANGE + "CreateSubmittableJobWithArgs"; // clean up content of TABLE_NAME Table table = TEST_UTIL.createTable(TableName.valueOf(tableName), Bytes.toBytes(COL_FAM)); @@ -499,41 +414,26 @@ public class TestRowCounter { table.put(put3); table.close(); - String[] args = new String[] { - tableName, COL_FAM + ":" + COL1, - "--starttime=" + 0, - "--endtime=" + ts - }; + String[] args = + new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + 0, "--endtime=" + ts }; runCreateSubmittableJobWithArgs(args, 1); - args = new String[] { - tableName, COL_FAM + ":" + COL1, - "--starttime=" + 0, - "--endtime=" + (ts - 10) - }; + args = new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + 0, + "--endtime=" + (ts - 10) }; runCreateSubmittableJobWithArgs(args, 1); - args = new String[] { - tableName, COL_FAM + ":" + COL1, - "--starttime=" + ts, - "--endtime=" + (ts + 1000) - }; + args = new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + ts, + "--endtime=" + (ts + 1000) }; runCreateSubmittableJobWithArgs(args, 2); - args = new String[] { - tableName, COL_FAM + ":" + COL1, - "--starttime=" + (ts - 30 * 1000), - "--endtime=" + (ts + 30 * 1000), - }; + args = new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + (ts - 30 * 1000), + "--endtime=" + (ts + 30 * 1000), }; runCreateSubmittableJobWithArgs(args, 3); } /** - * Writes TOTAL_ROWS number of distinct rows in to the table. Few rows have - * two columns, Few have one. - * - * @param table - * @throws IOException + * Writes TOTAL_ROWS number of distinct rows in to the table. Few rows have two columns, Few have + * one. nn */ private static void writeRows(Table table, int totalRows, int rowsWithOneCol) throws IOException { final byte[] family = Bytes.toBytes(COL_FAM); @@ -570,7 +470,7 @@ public class TestRowCounter { @Test public void testImportMain() throws Exception { SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); String[] args = {}; try { @@ -602,13 +502,13 @@ public class TestRowCounter { ByteArrayOutputStream data = new ByteArrayOutputStream(); PrintStream stream = new PrintStream(data); System.setOut(stream); - String[] args = {"-h"}; + String[] args = { "-h" }; runRowCount(args, 0); assertUsageContent(data.toString()); - args = new String[]{"--help"}; + args = new String[] { "--help" }; runRowCount(args, 0); assertUsageContent(data.toString()); - }finally { + } finally { System.setOut(oldPrintStream); } } @@ -616,27 +516,27 @@ public class TestRowCounter { @Test public void testInvalidTable() throws Exception { try { - String[] args = {"invalid"}; + String[] args = { "invalid" }; runRowCount(args, 0); fail("RowCounter should had failed with invalid table."); - }catch (Throwable e){ + } catch (Throwable e) { assertTrue(e instanceof AssertionError); } } private void assertUsageContent(String usage) { - assertTrue(usage.contains("usage: hbase rowcounter " - + " [options] [ ...]")); + assertTrue(usage + .contains("usage: hbase rowcounter " + " [options] [ ...]")); assertTrue(usage.contains("Options:\n")); - assertTrue(usage.contains("--starttime= " - + "starting time filter to start counting rows from.\n")); + assertTrue(usage.contains( + "--starttime= " + "starting time filter to start counting rows from.\n")); assertTrue(usage.contains("--endtime= " + "end time filter limit, to only count rows up to this timestamp.\n")); - assertTrue(usage.contains("--range= " - + "[startKey],[endKey][;[startKey],[endKey]...]]\n")); + assertTrue(usage + .contains("--range= " + "[startKey],[endKey][;[startKey],[endKey]...]]\n")); assertTrue(usage.contains("--expectedCount= expected number of rows to be count.\n")); - assertTrue(usage.contains("For performance, " - + "consider the following configuration properties:\n")); + assertTrue( + usage.contains("For performance, " + "consider the following configuration properties:\n")); assertTrue(usage.contains("-Dhbase.client.scanner.caching=100\n")); assertTrue(usage.contains("-Dmapreduce.map.speculative=false\n")); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java index b28d8d9e89b..61911670ac9 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,12 +34,12 @@ import org.junit.experimental.categories.Category; /** * Test of simple partitioner. */ -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestSimpleTotalOrderPartitioner { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSimpleTotalOrderPartitioner.class); + HBaseClassTestRule.forClass(TestSimpleTotalOrderPartitioner.class); protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); Configuration conf = TEST_UTIL.getConfiguration(); @@ -48,7 +48,7 @@ public class TestSimpleTotalOrderPartitioner { public void testSplit() throws Exception { String start = "a"; String end = "{"; - SimpleTotalOrderPartitioner p = new SimpleTotalOrderPartitioner<>(); + SimpleTotalOrderPartitioner p = new SimpleTotalOrderPartitioner<>(); this.conf.set(SimpleTotalOrderPartitioner.START, start); this.conf.set(SimpleTotalOrderPartitioner.END, end); @@ -69,14 +69,12 @@ public class TestSimpleTotalOrderPartitioner { partition = p.getPartition(q, HConstants.EMPTY_BYTE_ARRAY, 3); assertEquals(2, partition); // What about end and start keys. - ImmutableBytesWritable startBytes = - new ImmutableBytesWritable(Bytes.toBytes(start)); + ImmutableBytesWritable startBytes = new ImmutableBytesWritable(Bytes.toBytes(start)); partition = p.getPartition(startBytes, HConstants.EMPTY_BYTE_ARRAY, 2); assertEquals(0, partition); partition = p.getPartition(startBytes, HConstants.EMPTY_BYTE_ARRAY, 3); assertEquals(0, partition); - ImmutableBytesWritable endBytes = - new ImmutableBytesWritable(Bytes.toBytes("z")); + ImmutableBytesWritable endBytes = new ImmutableBytesWritable(Bytes.toBytes("z")); partition = p.getPartition(endBytes, HConstants.EMPTY_BYTE_ARRAY, 2); assertEquals(1, partition); partition = p.getPartition(endBytes, HConstants.EMPTY_BYTE_ARRAY, 3); @@ -84,4 +82,3 @@ public class TestSimpleTotalOrderPartitioner { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java index caacfc64855..4070da2e0e7 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.mapreduce; import static org.junit.Assert.assertEquals; import java.util.Arrays; - import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -59,7 +58,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Throwables; public class TestSyncTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSyncTable.class); + HBaseClassTestRule.forClass(TestSyncTable.class); private static final Logger LOG = LoggerFactory.getLogger(TestSyncTable.class); @@ -80,9 +79,9 @@ public class TestSyncTable { } private static byte[][] generateSplits(int numRows, int numRegions) { - byte[][] splitRows = new byte[numRegions-1][]; + byte[][] splitRows = new byte[numRegions - 1][]; for (int i = 1; i < numRegions; i++) { - splitRows[i-1] = Bytes.toBytes(numRows * i / numRegions); + splitRows[i - 1] = Bytes.toBytes(numRows * i / numRegions); } return splitRows; } @@ -117,8 +116,8 @@ public class TestSyncTable { writeTestData(sourceTableName, targetTableName); hashSourceTable(sourceTableName, testDir); - Counters syncCounters = syncTables(sourceTableName, targetTableName, - testDir, "--doDeletes=false"); + Counters syncCounters = + syncTables(sourceTableName, targetTableName, testDir, "--doDeletes=false"); assertTargetDoDeletesFalse(100, sourceTableName, targetTableName); assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); @@ -140,8 +139,7 @@ public class TestSyncTable { writeTestData(sourceTableName, targetTableName); hashSourceTable(sourceTableName, testDir); - Counters syncCounters = syncTables(sourceTableName, targetTableName, - testDir, "--doPuts=false"); + Counters syncCounters = syncTables(sourceTableName, targetTableName, testDir, "--doPuts=false"); assertTargetDoPutsFalse(70, sourceTableName, targetTableName); assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); @@ -163,8 +161,8 @@ public class TestSyncTable { long current = EnvironmentEdgeManager.currentTime(); writeTestData(sourceTableName, targetTableName, current - 1000, current); hashSourceTable(sourceTableName, testDir, "--ignoreTimestamps=true"); - Counters syncCounters = syncTables(sourceTableName, targetTableName, - testDir, "--ignoreTimestamps=true"); + Counters syncCounters = + syncTables(sourceTableName, targetTableName, testDir, "--ignoreTimestamps=true"); assertEqualTables(90, sourceTableName, targetTableName, true); assertEquals(50, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); @@ -179,7 +177,7 @@ public class TestSyncTable { } private void assertEqualTables(int expectedRows, TableName sourceTableName, - TableName targetTableName, boolean ignoreTimestamps) throws Exception { + TableName targetTableName, boolean ignoreTimestamps) throws Exception { Table sourceTable = TEST_UTIL.getConnection().getTable(sourceTableName); Table targetTable = TEST_UTIL.getConnection().getTable(targetTableName); @@ -191,27 +189,23 @@ public class TestSyncTable { Result targetRow = targetScanner.next(); LOG.debug("SOURCE row: " + (sourceRow == null ? "null" : Bytes.toInt(sourceRow.getRow())) - + " cells:" + sourceRow); + + " cells:" + sourceRow); LOG.debug("TARGET row: " + (targetRow == null ? "null" : Bytes.toInt(targetRow.getRow())) - + " cells:" + targetRow); + + " cells:" + targetRow); if (sourceRow == null) { - Assert.fail("Expected " + expectedRows - + " source rows but only found " + i); + Assert.fail("Expected " + expectedRows + " source rows but only found " + i); } if (targetRow == null) { - Assert.fail("Expected " + expectedRows - + " target rows but only found " + i); + Assert.fail("Expected " + expectedRows + " target rows but only found " + i); } Cell[] sourceCells = sourceRow.rawCells(); Cell[] targetCells = targetRow.rawCells(); if (sourceCells.length != targetCells.length) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) - + " has " + sourceCells.length - + " cells in source table but " + targetCells.length - + " cells in target table"); + Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) + " has " + sourceCells.length + + " cells in source table but " + targetCells.length + " cells in target table"); } for (int j = 0; j < sourceCells.length; j++) { Cell sourceCell = sourceCells[j]; @@ -240,13 +234,13 @@ public class TestSyncTable { } Result sourceRow = sourceScanner.next(); if (sourceRow != null) { - Assert.fail("Source table has more than " + expectedRows - + " rows. Next row: " + Bytes.toInt(sourceRow.getRow())); + Assert.fail("Source table has more than " + expectedRows + " rows. Next row: " + + Bytes.toInt(sourceRow.getRow())); } Result targetRow = targetScanner.next(); if (targetRow != null) { - Assert.fail("Target table has more than " + expectedRows - + " rows. Next row: " + Bytes.toInt(targetRow.getRow())); + Assert.fail("Target table has more than " + expectedRows + " rows. Next row: " + + Bytes.toInt(targetRow.getRow())); } sourceScanner.close(); targetScanner.close(); @@ -255,7 +249,7 @@ public class TestSyncTable { } private void assertTargetDoDeletesFalse(int expectedRows, TableName sourceTableName, - TableName targetTableName) throws Exception { + TableName targetTableName) throws Exception { Table sourceTable = TEST_UTIL.getConnection().getTable(sourceTableName); Table targetTable = TEST_UTIL.getConnection().getTable(targetTableName); @@ -266,19 +260,17 @@ public class TestSyncTable { int rowsCount = 0; while (targetRow != null) { rowsCount++; - //only compares values for existing rows, skipping rows existing on - //target only that were not deleted given --doDeletes=false + // only compares values for existing rows, skipping rows existing on + // target only that were not deleted given --doDeletes=false if (Bytes.toInt(sourceRow.getRow()) != Bytes.toInt(targetRow.getRow())) { targetRow = targetScanner.next(); continue; } - LOG.debug("SOURCE row: " + (sourceRow == null ? "null" - : Bytes.toInt(sourceRow.getRow())) - + " cells:" + sourceRow); - LOG.debug("TARGET row: " + (targetRow == null ? "null" - : Bytes.toInt(targetRow.getRow())) - + " cells:" + targetRow); + LOG.debug("SOURCE row: " + (sourceRow == null ? "null" : Bytes.toInt(sourceRow.getRow())) + + " cells:" + sourceRow); + LOG.debug("TARGET row: " + (targetRow == null ? "null" : Bytes.toInt(targetRow.getRow())) + + " cells:" + targetRow); Cell[] sourceCells = sourceRow.rawCells(); Cell[] targetCells = targetRow.rawCells(); @@ -287,18 +279,16 @@ public class TestSyncTable { if (sourceCells.length == targetCells.length) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + targetRowKey + " should have more cells in " - + "target than in source"); + Assert + .fail("Row " + targetRowKey + " should have more cells in " + "target than in source"); } } else { if (sourceCells.length != targetCells.length) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) - + " has " + sourceCells.length - + " cells in source table but " + targetCells.length - + " cells in target table"); + Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) + " has " + sourceCells.length + + " cells in source table but " + targetCells.length + " cells in target table"); } } for (int j = 0; j < sourceCells.length; j++) { @@ -314,7 +304,7 @@ public class TestSyncTable { if (!CellUtil.matchingQualifier(sourceCell, targetCell)) { Assert.fail("Qualifiers don't match"); } - if (targetRowKey < 80 && targetRowKey >= 90){ + if (targetRowKey < 80 && targetRowKey >= 90) { if (!CellUtil.matchingTimestamp(sourceCell, targetCell)) { Assert.fail("Timestamps don't match"); } @@ -323,16 +313,14 @@ public class TestSyncTable { Assert.fail("Values don't match"); } } catch (Throwable t) { - LOG.debug("Source cell: " + sourceCell + " target cell: " - + targetCell); + LOG.debug("Source cell: " + sourceCell + " target cell: " + targetCell); Throwables.propagate(t); } } targetRow = targetScanner.next(); sourceRow = sourceScanner.next(); } - assertEquals("Target expected rows does not match.",expectedRows, - rowsCount); + assertEquals("Target expected rows does not match.", expectedRows, rowsCount); sourceScanner.close(); targetScanner.close(); sourceTable.close(); @@ -340,7 +328,7 @@ public class TestSyncTable { } private void assertTargetDoPutsFalse(int expectedRows, TableName sourceTableName, - TableName targetTableName) throws Exception { + TableName targetTableName) throws Exception { Table sourceTable = TEST_UTIL.getConnection().getTable(sourceTableName); Table targetTable = TEST_UTIL.getConnection().getTable(targetTableName); @@ -350,22 +338,18 @@ public class TestSyncTable { Result sourceRow = sourceScanner.next(); int rowsCount = 0; - while (targetRow!=null) { - //only compares values for existing rows, skipping rows existing on - //source only that were not added to target given --doPuts=false + while (targetRow != null) { + // only compares values for existing rows, skipping rows existing on + // source only that were not added to target given --doPuts=false if (Bytes.toInt(sourceRow.getRow()) != Bytes.toInt(targetRow.getRow())) { sourceRow = sourceScanner.next(); continue; } - LOG.debug("SOURCE row: " + (sourceRow == null ? - "null" : - Bytes.toInt(sourceRow.getRow())) - + " cells:" + sourceRow); - LOG.debug("TARGET row: " + (targetRow == null ? - "null" : - Bytes.toInt(targetRow.getRow())) - + " cells:" + targetRow); + LOG.debug("SOURCE row: " + (sourceRow == null ? "null" : Bytes.toInt(sourceRow.getRow())) + + " cells:" + sourceRow); + LOG.debug("TARGET row: " + (targetRow == null ? "null" : Bytes.toInt(targetRow.getRow())) + + " cells:" + targetRow); LOG.debug("rowsCount: " + rowsCount); @@ -376,27 +360,26 @@ public class TestSyncTable { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); Assert.fail("There shouldn't exist any rows between 40 and 60, since " - + "Puts are disabled and Deletes are enabled."); + + "Puts are disabled and Deletes are enabled."); } else if (targetRowKey >= 60 && targetRowKey < 70) { if (sourceCells.length == targetCells.length) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) - + " shouldn't have same number of cells."); + Assert.fail( + "Row " + Bytes.toInt(sourceRow.getRow()) + " shouldn't have same number of cells."); } } else if (targetRowKey >= 80 && targetRowKey < 90) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); Assert.fail("There should be no rows between 80 and 90 on target, as " - + "these had different timestamps and should had been deleted."); + + "these had different timestamps and should had been deleted."); } else if (targetRowKey >= 90 && targetRowKey < 100) { for (int j = 0; j < sourceCells.length; j++) { Cell sourceCell = sourceCells[j]; Cell targetCell = targetCells[j]; if (CellUtil.matchingValue(sourceCell, targetCell)) { Assert.fail("Cells values should not match for rows between " - + "90 and 100. Target row id: " + (Bytes.toInt(targetRow - .getRow()))); + + "90 and 100. Target row id: " + (Bytes.toInt(targetRow.getRow()))); } } } else { @@ -420,8 +403,7 @@ public class TestSyncTable { Assert.fail("Values don't match"); } } catch (Throwable t) { - LOG.debug( - "Source cell: " + sourceCell + " target cell: " + targetCell); + LOG.debug("Source cell: " + sourceCell + " target cell: " + targetCell); Throwables.propagate(t); } } @@ -430,21 +412,20 @@ public class TestSyncTable { targetRow = targetScanner.next(); sourceRow = sourceScanner.next(); } - assertEquals("Target expected rows does not match.",expectedRows, - rowsCount); + assertEquals("Target expected rows does not match.", expectedRows, rowsCount); sourceScanner.close(); targetScanner.close(); sourceTable.close(); targetTable.close(); } - private Counters syncTables(TableName sourceTableName, TableName targetTableName, - Path testDir, String... options) throws Exception { + private Counters syncTables(TableName sourceTableName, TableName targetTableName, Path testDir, + String... options) throws Exception { SyncTable syncTable = new SyncTable(TEST_UTIL.getConfiguration()); - String[] args = Arrays.copyOf(options, options.length+3); + String[] args = Arrays.copyOf(options, options.length + 3); args[options.length] = testDir.toString(); - args[options.length+1] = sourceTableName.getNameAsString(); - args[options.length+2] = targetTableName.getNameAsString(); + args[options.length + 1] = sourceTableName.getNameAsString(); + args[options.length + 2] = targetTableName.getNameAsString(); int code = syncTable.run(args); assertEquals("sync table job failed", 0, code); @@ -453,12 +434,12 @@ public class TestSyncTable { } private void hashSourceTable(TableName sourceTableName, Path testDir, String... options) - throws Exception { + throws Exception { int numHashFiles = 3; - long batchSize = 100; // should be 2 batches per region + long batchSize = 100; // should be 2 batches per region int scanBatch = 1; HashTable hashTable = new HashTable(TEST_UTIL.getConfiguration()); - String[] args = Arrays.copyOf(options, options.length+5); + String[] args = Arrays.copyOf(options, options.length + 5); args[options.length] = "--batchsize=" + batchSize; args[options.length + 1] = "--numhashfiles=" + numHashFiles; args[options.length + 2] = "--scanbatch=" + scanBatch; @@ -479,7 +460,7 @@ public class TestSyncTable { } private void writeTestData(TableName sourceTableName, TableName targetTableName, - long... timestamps) throws Exception { + long... timestamps) throws Exception { final byte[] family = Bytes.toBytes("family"); final byte[] column1 = Bytes.toBytes("c1"); final byte[] column2 = Bytes.toBytes("c2"); @@ -492,14 +473,14 @@ public class TestSyncTable { int targetRegions = 6; if (ArrayUtils.isEmpty(timestamps)) { long current = EnvironmentEdgeManager.currentTime(); - timestamps = new long[]{current,current}; + timestamps = new long[] { current, current }; } - Table sourceTable = TEST_UTIL.createTable(sourceTableName, - family, generateSplits(numRows, sourceRegions)); + Table sourceTable = + TEST_UTIL.createTable(sourceTableName, family, generateSplits(numRows, sourceRegions)); - Table targetTable = TEST_UTIL.createTable(targetTableName, - family, generateSplits(numRows, targetRegions)); + Table targetTable = + TEST_UTIL.createTable(targetTableName, family, generateSplits(numRows, targetRegions)); int rowIndex = 0; // a bunch of identical rows @@ -571,8 +552,8 @@ public class TestSyncTable { sourceTable.put(sourcePut); Put targetPut = new Put(Bytes.toBytes(rowIndex)); - targetPut.addColumn(family, column1, timestamps[1]+1, column1); - targetPut.addColumn(family, column2, timestamps[1]-1, value2); + targetPut.addColumn(family, column1, timestamps[1] + 1, column1); + targetPut.addColumn(family, column2, timestamps[1] - 1, value2); targetTable.put(targetPut); } // some rows with different values diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java index a915d85d5ba..1986200c187 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,14 +71,13 @@ import org.slf4j.LoggerFactory; /** * This tests the TableInputFormat and its recovery semantics - * */ @Category(LargeTests.class) public class TestTableInputFormat { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormat.class); + HBaseClassTestRule.forClass(TestTableInputFormat.class); private static final Logger LOG = LoggerFactory.getLogger(TestTableInputFormat.class); @@ -106,22 +105,15 @@ public class TestTableInputFormat { } /** - * Setup a table with two rows and values. - * - * @param tableName - * @return A Table instance for the created table. - * @throws IOException + * Setup a table with two rows and values. n * @return A Table instance for the created table. n */ public static Table createTable(byte[] tableName) throws IOException { return createTable(tableName, new byte[][] { FAMILY }); } /** - * Setup a table with two rows and values per column family. - * - * @param tableName - * @return A Table instance for the created table. - * @throws IOException + * Setup a table with two rows and values per column family. n * @return A Table instance for the + * created table. n */ public static Table createTable(byte[] tableName, byte[][] families) throws IOException { Table table = UTIL.createTable(TableName.valueOf(tableName), families); @@ -140,15 +132,14 @@ public class TestTableInputFormat { /** * Verify that the result and key have expected values. - * - * @param r single row result - * @param key the row key - * @param expectedKey the expected key + * @param r single row result + * @param key the row key + * @param expectedKey the expected key * @param expectedValue the expected value * @return true if the result contains the expected key and value, false otherwise. */ - static boolean checkResult(Result r, ImmutableBytesWritable key, - byte[] expectedKey, byte[] expectedValue) { + static boolean checkResult(Result r, ImmutableBytesWritable key, byte[] expectedKey, + byte[] expectedValue) { assertEquals(0, key.compareTo(expectedKey)); Map vals = r.getFamilyMap(FAMILY); byte[] value = vals.values().iterator().next(); @@ -157,17 +148,11 @@ public class TestTableInputFormat { } /** - * Create table data and run tests on specified htable using the - * o.a.h.hbase.mapreduce API. - * - * @param table - * @throws IOException - * @throws InterruptedException + * Create table data and run tests on specified htable using the o.a.h.hbase.mapreduce API. nnn */ - static void runTestMapreduce(Table table) throws IOException, - InterruptedException { + static void runTestMapreduce(Table table) throws IOException, InterruptedException { org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl trr = - new org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl(); + new org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl(); Scan s = new Scan(); s.setStartRow("aaa".getBytes()); s.setStopRow("zzz".getBytes()); @@ -197,12 +182,9 @@ public class TestTableInputFormat { } /** - * Create a table that IOE's on first scanner next call - * - * @throws IOException + * Create a table that IOE's on first scanner next call n */ - static Table createIOEScannerTable(byte[] name, final int failCnt) - throws IOException { + static Table createIOEScannerTable(byte[] name, final int failCnt) throws IOException { // build up a mock scanner stuff to fail the first time Answer a = new Answer() { int cnt = 0; @@ -231,13 +213,9 @@ public class TestTableInputFormat { } /** - * Create a table that throws a NotServingRegionException on first scanner - * next call - * - * @throws IOException + * Create a table that throws a NotServingRegionException on first scanner next call n */ - static Table createDNRIOEScannerTable(byte[] name, final int failCnt) - throws IOException { + static Table createDNRIOEScannerTable(byte[] name, final int failCnt) throws IOException { // build up a mock scanner stuff to fail the first time Answer a = new Answer() { int cnt = 0; @@ -252,9 +230,8 @@ public class TestTableInputFormat { ResultScanner scanner = mock(ResultScanner.class); invocation.callRealMethod(); // simulate NotServingRegionException - doThrow( - new NotServingRegionException("Injected simulated TimeoutException")) - .when(scanner).next(); + doThrow(new NotServingRegionException("Injected simulated TimeoutException")) + .when(scanner).next(); return scanner; } @@ -269,66 +246,51 @@ public class TestTableInputFormat { } /** - * Run test assuming no errors using newer mapreduce api - * - * @throws IOException - * @throws InterruptedException + * Run test assuming no errors using newer mapreduce api nn */ @Test - public void testTableRecordReaderMapreduce() throws IOException, - InterruptedException { + public void testTableRecordReaderMapreduce() throws IOException, InterruptedException { Table table = createTable("table1-mr".getBytes()); runTestMapreduce(table); } /** - * Run test assuming Scanner IOException failure using newer mapreduce api - * - * @throws IOException - * @throws InterruptedException + * Run test assuming Scanner IOException failure using newer mapreduce api nn */ @Test - public void testTableRecordReaderScannerFailMapreduce() throws IOException, - InterruptedException { + public void testTableRecordReaderScannerFailMapreduce() throws IOException, InterruptedException { Table htable = createIOEScannerTable("table2-mr".getBytes(), 1); runTestMapreduce(htable); } /** - * Run test assuming Scanner IOException failure using newer mapreduce api - * - * @throws IOException - * @throws InterruptedException + * Run test assuming Scanner IOException failure using newer mapreduce api nn */ @Test(expected = IOException.class) - public void testTableRecordReaderScannerFailMapreduceTwice() throws IOException, - InterruptedException { + public void testTableRecordReaderScannerFailMapreduceTwice() + throws IOException, InterruptedException { Table htable = createIOEScannerTable("table3-mr".getBytes(), 2); runTestMapreduce(htable); } /** - * Run test assuming NotServingRegionException using newer mapreduce api - * - * @throws InterruptedException - * @throws org.apache.hadoop.hbase.DoNotRetryIOException + * Run test assuming NotServingRegionException using newer mapreduce api n * @throws + * org.apache.hadoop.hbase.DoNotRetryIOException */ @Test public void testTableRecordReaderScannerTimeoutMapreduce() - throws IOException, InterruptedException { + throws IOException, InterruptedException { Table htable = createDNRIOEScannerTable("table4-mr".getBytes(), 1); runTestMapreduce(htable); } /** - * Run test assuming NotServingRegionException using newer mapreduce api - * - * @throws InterruptedException - * @throws org.apache.hadoop.hbase.NotServingRegionException + * Run test assuming NotServingRegionException using newer mapreduce api n * @throws + * org.apache.hadoop.hbase.NotServingRegionException */ @Test(expected = org.apache.hadoop.hbase.NotServingRegionException.class) public void testTableRecordReaderScannerTimeoutMapreduceTwice() - throws IOException, InterruptedException { + throws IOException, InterruptedException { Table htable = createDNRIOEScannerTable("table5-mr".getBytes(), 2); runTestMapreduce(htable); } @@ -338,7 +300,7 @@ public class TestTableInputFormat { */ @Test public void testExtensionOfTableInputFormatBase() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { LOG.info("testing use of an InputFormat taht extends InputFormatBase"); final Table htable = createTable(Bytes.toBytes("exampleTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); @@ -347,9 +309,9 @@ public class TestTableInputFormat { @Test public void testJobConfigurableExtensionOfTableInputFormatBase() - throws IOException, InterruptedException, ClassNotFoundException { - LOG.info("testing use of an InputFormat taht extends InputFormatBase, " + - "using JobConfigurable."); + throws IOException, InterruptedException, ClassNotFoundException { + LOG.info( + "testing use of an InputFormat taht extends InputFormatBase, " + "using JobConfigurable."); final Table htable = createTable(Bytes.toBytes("exampleJobConfigurableTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); testInputFormat(ExampleJobConfigurableTIF.class); @@ -357,16 +319,16 @@ public class TestTableInputFormat { @Test public void testDeprecatedExtensionOfTableInputFormatBase() - throws IOException, InterruptedException, ClassNotFoundException { - LOG.info("testing use of an InputFormat taht extends InputFormatBase, " + - "using the approach documented in 0.98."); + throws IOException, InterruptedException, ClassNotFoundException { + LOG.info("testing use of an InputFormat taht extends InputFormatBase, " + + "using the approach documented in 0.98."); final Table htable = createTable(Bytes.toBytes("exampleDeprecatedTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); testInputFormat(ExampleDeprecatedTIF.class); } void testInputFormat(Class clazz) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { final Job job = MapreduceTestingShim.createJob(UTIL.getConfiguration()); job.setInputFormatClass(clazz); job.setOutputFormatClass(NullOutputFormat.class); @@ -376,34 +338,36 @@ public class TestTableInputFormat { LOG.debug("submitting job."); assertTrue("job failed!", job.waitForCompletion(true)); assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getValue()); + .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getValue()); assertEquals("Saw any instances of the filtered out row.", 0, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getValue()); + .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getValue()); assertEquals("Saw the wrong number of instances of columnA.", 1, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getValue()); + .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getValue()); assertEquals("Saw the wrong number of instances of columnB.", 1, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getValue()); + .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getValue()); assertEquals("Saw the wrong count of values for the filtered-for row.", 2, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getValue()); + .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getValue()); assertEquals("Saw the wrong count of values for the filtered-out row.", 0, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getValue()); + .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getValue()); } public static class ExampleVerifier extends TableMapper { @Override - public void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException { + public void map(ImmutableBytesWritable key, Result value, Context context) throws IOException { for (Cell cell : value.listCells()) { - context.getCounter(TestTableInputFormat.class.getName() + ":row", + context + .getCounter(TestTableInputFormat.class.getName() + ":row", Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) - .increment(1l); - context.getCounter(TestTableInputFormat.class.getName() + ":family", + .increment(1l); + context + .getCounter(TestTableInputFormat.class.getName() + ":family", Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) - .increment(1l); - context.getCounter(TestTableInputFormat.class.getName() + ":value", + .increment(1l); + context + .getCounter(TestTableInputFormat.class.getName() + ":value", Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())) - .increment(1l); + .increment(1l); } } @@ -418,8 +382,7 @@ public class TestTableInputFormat { Table exampleTable = connection.getTable(TableName.valueOf(("exampleDeprecatedTable"))); // mandatory initializeTable(connection, exampleTable.getName()); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; // optional Scan scan = new Scan(); for (byte[] family : inputColumns) { @@ -436,9 +399,8 @@ public class TestTableInputFormat { } - public static class ExampleJobConfigurableTIF extends TableInputFormatBase - implements JobConfigurable { + implements JobConfigurable { @Override public void configure(JobConf job) { @@ -447,9 +409,8 @@ public class TestTableInputFormat { TableName tableName = TableName.valueOf("exampleJobConfigurableTable"); // mandatory initializeTable(connection, tableName); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; - //optional + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; + // optional Scan scan = new Scan(); for (byte[] family : inputColumns) { scan.addFamily(family); @@ -464,19 +425,17 @@ public class TestTableInputFormat { } } - public static class ExampleTIF extends TableInputFormatBase { @Override protected void initialize(JobContext job) throws IOException { - Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create( - job.getConfiguration())); + Connection connection = + ConnectionFactory.createConnection(HBaseConfiguration.create(job.getConfiguration())); TableName tableName = TableName.valueOf("exampleTable"); // mandatory initializeTable(connection, tableName); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; - //optional + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; + // optional Scan scan = new Scan(); for (byte[] family : inputColumns) { scan.addFamily(family); @@ -489,4 +448,3 @@ public class TestTableInputFormat { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java index ee46726d916..2b7be6a80e5 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,6 +22,7 @@ import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import java.io.IOException; import java.net.Inet6Address; import java.net.InetAddress; @@ -58,12 +59,12 @@ import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestTableInputFormatBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatBase.class); + HBaseClassTestRule.forClass(TestTableInputFormatBase.class); @Test public void testReuseRegionSizeCalculator() throws IOException { @@ -89,13 +90,12 @@ public class TestTableInputFormatBase { format.getSplits(context); // should only be 2 despite calling getSplits 4 times - Mockito.verify(format, Mockito.times(2)) - .createRegionSizeCalculator(Mockito.any(), Mockito.any()); + Mockito.verify(format, Mockito.times(2)).createRegionSizeCalculator(Mockito.any(), + Mockito.any()); } @Test - public void testTableInputFormatBaseReverseDNSForIPv6() - throws UnknownHostException { + public void testTableInputFormatBaseReverseDNSForIPv6() throws UnknownHostException { String address = "ipv6.google.com"; String localhost = null; InetAddress addr = null; @@ -107,11 +107,10 @@ public class TestTableInputFormatBase { // google.com is down, we can probably forgive this test. return; } - System.out.println("Should retrun the hostname for this host " + - localhost + " addr : " + addr); + System.out.println("Should retrun the hostname for this host " + localhost + " addr : " + addr); String actualHostName = inputFormat.reverseDNS(addr); - assertEquals("Should retrun the hostname for this host. Expected : " + - localhost + " Actual : " + actualHostName, localhost, actualHostName); + assertEquals("Should retrun the hostname for this host. Expected : " + localhost + " Actual : " + + actualHostName, localhost, actualHostName); } @Test @@ -119,7 +118,7 @@ public class TestTableInputFormatBase { JobContext context = mock(JobContext.class); Configuration conf = HBaseConfiguration.create(); conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, - ConnectionForMergeTesting.class.getName()); + ConnectionForMergeTesting.class.getName()); conf.set(TableInputFormat.INPUT_TABLE, "testTable"); conf.setBoolean(TableInputFormatBase.MAPREDUCE_INPUT_AUTOBALANCE, true); when(context.getConfiguration()).thenReturn(conf); @@ -129,13 +128,13 @@ public class TestTableInputFormatBase { // split["b", "c"] is excluded, split["o", "p"] and split["p", "q"] are merged, // but split["a", "b"] and split["c", "d"] are not merged. assertEquals(ConnectionForMergeTesting.START_KEYS.length - 1 - 1, - tifExclude.getSplits(context).size()); + tifExclude.getSplits(context).size()); } /** * Subclass of {@link TableInputFormat} to use in {@link #testNonSuccessiveSplitsAreNotMerged}. - * This class overrides {@link TableInputFormatBase#includeRegionInSplit} - * to exclude specific splits. + * This class overrides {@link TableInputFormatBase#includeRegionInSplit} to exclude specific + * splits. */ private static class TableInputFormatForMergeTesting extends TableInputFormat { private byte[] prefixStartKey = Bytes.toBytes("b"); @@ -146,10 +145,11 @@ public class TestTableInputFormatBase { * Exclude regions which contain rows starting with "b". */ @Override - protected boolean includeRegionInSplit(final byte[] startKey, final byte [] endKey) { - if (Bytes.compareTo(startKey, prefixEndKey) < 0 - && (Bytes.compareTo(prefixStartKey, endKey) < 0 - || Bytes.equals(endKey, HConstants.EMPTY_END_ROW))) { + protected boolean includeRegionInSplit(final byte[] startKey, final byte[] endKey) { + if ( + Bytes.compareTo(startKey, prefixEndKey) < 0 && (Bytes.compareTo(prefixStartKey, endKey) < 0 + || Bytes.equals(endKey, HConstants.EMPTY_END_ROW)) + ) { return false; } else { return true; @@ -171,20 +171,17 @@ public class TestTableInputFormatBase { } /** - * Connection class to use in {@link #testNonSuccessiveSplitsAreNotMerged}. - * This class returns mocked {@link Table}, {@link RegionLocator}, {@link RegionSizeCalculator}, - * and {@link Admin}. + * Connection class to use in {@link #testNonSuccessiveSplitsAreNotMerged}. This class returns + * mocked {@link Table}, {@link RegionLocator}, {@link RegionSizeCalculator}, and {@link Admin}. */ private static class ConnectionForMergeTesting implements Connection { - public static final byte[][] SPLITS = new byte[][] { - Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c"), Bytes.toBytes("d"), - Bytes.toBytes("e"), Bytes.toBytes("f"), Bytes.toBytes("g"), Bytes.toBytes("h"), - Bytes.toBytes("i"), Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"), - Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"), Bytes.toBytes("p"), - Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"), Bytes.toBytes("t"), - Bytes.toBytes("u"), Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"), - Bytes.toBytes("y"), Bytes.toBytes("z") - }; + public static final byte[][] SPLITS = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), + Bytes.toBytes("c"), Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"), + Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"), Bytes.toBytes("j"), + Bytes.toBytes("k"), Bytes.toBytes("l"), Bytes.toBytes("m"), Bytes.toBytes("n"), + Bytes.toBytes("o"), Bytes.toBytes("p"), Bytes.toBytes("q"), Bytes.toBytes("r"), + Bytes.toBytes("s"), Bytes.toBytes("t"), Bytes.toBytes("u"), Bytes.toBytes("v"), + Bytes.toBytes("w"), Bytes.toBytes("x"), Bytes.toBytes("y"), Bytes.toBytes("z") }; public static final byte[][] START_KEYS; public static final byte[][] END_KEYS; @@ -215,7 +212,7 @@ public class TestTableInputFormatBase { } ConnectionForMergeTesting(Configuration conf, ExecutorService pool, User user) - throws IOException { + throws IOException { } @Override @@ -258,39 +255,38 @@ public class TestTableInputFormatBase { public RegionLocator getRegionLocator(TableName tableName) throws IOException { final Map locationMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (byte[] startKey : START_KEYS) { - HRegionLocation hrl = new HRegionLocation( - RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).build(), + HRegionLocation hrl = + new HRegionLocation(RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).build(), ServerName.valueOf("localhost", 0, 0)); locationMap.put(startKey, hrl); } RegionLocator locator = mock(RegionLocator.class); - when(locator.getRegionLocation(any(byte [].class), anyBoolean())). - thenAnswer(new Answer() { + when(locator.getRegionLocation(any(byte[].class), anyBoolean())) + .thenAnswer(new Answer() { @Override public HRegionLocation answer(InvocationOnMock invocationOnMock) throws Throwable { - Object [] args = invocationOnMock.getArguments(); - byte [] key = (byte [])args[0]; + Object[] args = invocationOnMock.getArguments(); + byte[] key = (byte[]) args[0]; return locationMap.get(key); } }); - when(locator.getStartEndKeys()). - thenReturn(new Pair(START_KEYS, END_KEYS)); + when(locator.getStartEndKeys()) + .thenReturn(new Pair(START_KEYS, END_KEYS)); return locator; } public RegionSizeCalculator getRegionSizeCalculator() { RegionSizeCalculator sizeCalculator = mock(RegionSizeCalculator.class); - when(sizeCalculator.getRegionSize(any(byte[].class))). - thenAnswer(new Answer() { - @Override - public Long answer(InvocationOnMock invocationOnMock) throws Throwable { - Object [] args = invocationOnMock.getArguments(); - byte [] regionId = (byte [])args[0]; - byte[] startKey = RegionInfo.getStartKey(regionId); - return SIZE_MAP.get(startKey); - } - }); + when(sizeCalculator.getRegionSize(any(byte[].class))).thenAnswer(new Answer() { + @Override + public Long answer(InvocationOnMock invocationOnMock) throws Throwable { + Object[] args = invocationOnMock.getArguments(); + byte[] regionId = (byte[]) args[0]; + byte[] startKey = RegionInfo.getStartKey(regionId); + return SIZE_MAP.get(startKey); + } + }); return sizeCalculator; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java index a116ecb72fa..aeea1dffbf5 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ public class TestTableInputFormatScan extends TestTableInputFormatScanBase { */ @Test public void testSpecifiedNumOfMappersMR() - throws InterruptedException, IOException, ClassNotFoundException { + throws InterruptedException, IOException, ClassNotFoundException { testNumOfSplitsMR(2, 52); testNumOfSplitsMR(4, 104); } @@ -61,7 +61,7 @@ public class TestTableInputFormatScan extends TestTableInputFormatScanBase { @Test public void testScanFromConfiguration() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScanFromConfiguration("bba", "bbd", "bbc"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java index 1e5fb819079..63a0b7fd495 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +20,7 @@ package org.apache.hadoop.hbase.mapreduce; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -50,7 +50,6 @@ import org.junit.BeforeClass; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Tests various scan start and stop row scenarios. This is set in a scan and tested in a MapReduce * job to see if that is handed over and done properly too. @@ -61,7 +60,7 @@ public abstract class TestTableInputFormatScanBase { static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); static final TableName TABLE_NAME = TableName.valueOf("scantest"); - static final byte[][] INPUT_FAMILYS = {Bytes.toBytes("content1"), Bytes.toBytes("content2")}; + static final byte[][] INPUT_FAMILYS = { Bytes.toBytes("content1"), Bytes.toBytes("content2") }; static final String KEY_STARTROW = "startRow"; static final String KEY_LASTROW = "stpRow"; @@ -89,31 +88,28 @@ public abstract class TestTableInputFormatScanBase { /** * Pass the key and value to reduce. - * - * @param key The key, here "aaa", "aab" etc. - * @param value The value is the same as the key. - * @param context The task context. + * @param key The key, here "aaa", "aab" etc. + * @param value The value is the same as the key. + * @param context The task context. * @throws IOException When reading the rows fails. */ @Override - public void map(ImmutableBytesWritable key, Result value, - Context context) - throws IOException, InterruptedException { + public void map(ImmutableBytesWritable key, Result value, Context context) + throws IOException, InterruptedException { if (value.size() != 2) { throw new IOException("There should be two input columns"); } - Map>> - cfMap = value.getMap(); + Map>> cfMap = value.getMap(); if (!cfMap.containsKey(INPUT_FAMILYS[0]) || !cfMap.containsKey(INPUT_FAMILYS[1])) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILYS[0]) + "' or '" + Bytes.toString(INPUT_FAMILYS[1]) + "'."); + throw new IOException("Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILYS[0]) + + "' or '" + Bytes.toString(INPUT_FAMILYS[1]) + "'."); } String val0 = Bytes.toStringBinary(value.getValue(INPUT_FAMILYS[0], null)); String val1 = Bytes.toStringBinary(value.getValue(INPUT_FAMILYS[1], null)); - LOG.info("map: key -> " + Bytes.toStringBinary(key.get()) + - ", value -> (" + val0 + ", " + val1 + ")"); + LOG.info("map: key -> " + Bytes.toStringBinary(key.get()) + ", value -> (" + val0 + ", " + + val1 + ")"); context.write(key, key); } } @@ -122,28 +118,25 @@ public abstract class TestTableInputFormatScanBase { * Checks the last and first key seen against the scanner boundaries. */ public static class ScanReducer - extends Reducer { + extends Reducer { private String first = null; private String last = null; - protected void reduce(ImmutableBytesWritable key, - Iterable values, Context context) - throws IOException ,InterruptedException { + protected void reduce(ImmutableBytesWritable key, Iterable values, + Context context) throws IOException, InterruptedException { int count = 0; for (ImmutableBytesWritable value : values) { String val = Bytes.toStringBinary(value.get()); - LOG.info("reduce: key[" + count + "] -> " + - Bytes.toStringBinary(key.get()) + ", value -> " + val); + LOG.info( + "reduce: key[" + count + "] -> " + Bytes.toStringBinary(key.get()) + ", value -> " + val); if (first == null) first = val; last = val; count++; } } - protected void cleanup(Context context) - throws IOException, InterruptedException { + protected void cleanup(Context context) throws IOException, InterruptedException { Configuration c = context.getConfiguration(); String startRow = c.get(KEY_STARTROW); String lastRow = c.get(KEY_LASTROW); @@ -163,9 +156,9 @@ public abstract class TestTableInputFormatScanBase { * Tests an MR Scan initialized from properties set in the Configuration. */ protected void testScanFromConfiguration(String start, String stop, String last) - throws IOException, InterruptedException, ClassNotFoundException { - String jobName = "ScanFromConfig" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + - "To" + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); + throws IOException, InterruptedException, ClassNotFoundException { + String jobName = "ScanFromConfig" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + + "To" + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); c.set(TableInputFormat.INPUT_TABLE, TABLE_NAME.getNameAsString()); c.set(TableInputFormat.SCAN_COLUMN_FAMILY, @@ -197,9 +190,9 @@ public abstract class TestTableInputFormatScanBase { * Tests a MR scan using specific start and stop rows. */ protected void testScan(String start, String stop, String last) - throws IOException, InterruptedException, ClassNotFoundException { - String jobName = "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + - (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); + throws IOException, InterruptedException, ClassNotFoundException { + String jobName = "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); LOG.info("Before map/reduce startup - job " + jobName); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); Scan scan = new Scan(); @@ -225,13 +218,12 @@ public abstract class TestTableInputFormatScanBase { LOG.info("After map/reduce completion - job " + jobName); } - /** * Tests Number of inputSplits for MR job when specify number of mappers for TableInputFormatXXX * This test does not run MR job */ protected void testNumOfSplits(int splitsPerRegion, int expectedNumOfSplits) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { String jobName = "TestJobForNumOfSplits"; LOG.info("Before map/reduce startup - job " + jobName); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); @@ -261,7 +253,7 @@ public abstract class TestTableInputFormatScanBase { * Run MR job to check the number of mapper = expectedNumOfSplits */ protected void testNumOfSplitsMR(int splitsPerRegion, int expectedNumOfSplits) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { String jobName = "TestJobForNumOfSplits-MR"; LOG.info("Before map/reduce startup - job " + jobName); JobConf c = new JobConf(TEST_UTIL.getConfiguration()); @@ -311,4 +303,3 @@ public abstract class TestTableInputFormatScanBase { assertNotEquals("The seventh split start key should not be", 4, Bytes.toInt(ts4.getStartRow())); } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java index d7cefd61b14..addcdc898c8 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanEmptyToAPP extends TestTableInputFormatScan */ @Test public void testScanEmptyToAPP() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, "app", "apo"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java index 598a345834d..e395b36e2a7 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanEmptyToBBA extends TestTableInputFormatScan */ @Test public void testScanEmptyToBBA() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, "bba", "baz"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java index 6d3674caad8..f86578712ae 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanEmptyToBBB extends TestTableInputFormatScan */ @Test public void testScanEmptyToBBB() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, "bbb", "bba"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java index f5d4de10a88..ef7b38b21be 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanEmptyToEmpty extends TestTableInputFormatSc */ @Test public void testScanEmptyToEmpty() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, null, null); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java index 939fc936f95..f20d8113f78 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanEmptyToOPP extends TestTableInputFormatScan */ @Test public void testScanEmptyToOPP() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, "opp", "opo"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java index 32f768c00fb..7d833eb66a1 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java index 5ecb4e60f4e..f6985a3fd77 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java index 7b2ccded7e1..e57051dfd19 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanOPPToEmpty extends TestTableInputFormatScan */ @Test public void testScanOPPToEmpty() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan("opp", null, "zzz"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java index 2801f4eb8bf..c8b3394e54b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanYYXToEmpty extends TestTableInputFormatScan */ @Test public void testScanYYXToEmpty() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan("yyx", null, "zzz"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java index 97a4998e553..175d10e1f75 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanYYYToEmpty extends TestTableInputFormatScan */ @Test public void testScanYYYToEmpty() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan("yyy", null, "zzz"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java index 3d91ff2b7b3..9ce2f0782b2 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanYZYToEmpty extends TestTableInputFormatScan */ @Test public void testScanYZYToEmpty() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan("yzy", null, "zzz"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java index 786da1a0204..e1bd1626870 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,22 +51,24 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Test Map/Reduce job over HBase tables. The map/reduce process we're testing - * on our tables is simple - take every row in the table, reverse the value of - * a particular cell, and write it back to the table. + * Test Map/Reduce job over HBase tables. The map/reduce process we're testing on our tables is + * simple - take every row in the table, reverse the value of a particular cell, and write it back + * to the table. */ -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestTableMapReduce extends TestTableMapReduceBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableMapReduce.class); + HBaseClassTestRule.forClass(TestTableMapReduce.class); private static final Logger LOG = LoggerFactory.getLogger(TestTableMapReduce.class); @Override - protected Logger getLog() { return LOG; } + protected Logger getLog() { + return LOG; + } /** * Pass the given key and processed record reduce @@ -74,25 +76,18 @@ public class TestTableMapReduce extends TestTableMapReduceBase { static class ProcessContentsMapper extends TableMapper { /** - * Pass the key, and reversed value to reduce - * - * @param key - * @param value - * @param context - * @throws IOException + * Pass the key, and reversed value to reduce nnnn */ @Override - public void map(ImmutableBytesWritable key, Result value, - Context context) - throws IOException, InterruptedException { + public void map(ImmutableBytesWritable key, Result value, Context context) + throws IOException, InterruptedException { if (value.size() != 1) { throw new IOException("There should only be one input column"); } - Map>> - cf = value.getMap(); - if(!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILY) + "'."); + Map>> cf = value.getMap(); + if (!cf.containsKey(INPUT_FAMILY)) { + throw new IOException( + "Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } // Get the original value and reverse it @@ -115,12 +110,9 @@ public class TestTableMapReduce extends TestTableMapReduceBase { job.setNumReduceTasks(1); Scan scan = new Scan(); scan.addFamily(INPUT_FAMILY); - TableMapReduceUtil.initTableMapperJob( - table.getName().getNameAsString(), scan, - ProcessContentsMapper.class, ImmutableBytesWritable.class, - Put.class, job); - TableMapReduceUtil.initTableReducerJob( - table.getName().getNameAsString(), + TableMapReduceUtil.initTableMapperJob(table.getName().getNameAsString(), scan, + ProcessContentsMapper.class, ImmutableBytesWritable.class, Put.class, job); + TableMapReduceUtil.initTableReducerJob(table.getName().getNameAsString(), IdentityTableReducer.class, job); FileOutputFormat.setOutputPath(job, new Path("test")); LOG.info("Started " + table.getName().getNameAsString()); @@ -138,21 +130,18 @@ public class TestTableMapReduce extends TestTableMapReduceBase { } finally { table.close(); if (job != null) { - FileUtil.fullyDelete( - new File(job.getConfiguration().get("hadoop.tmp.dir"))); + FileUtil.fullyDelete(new File(job.getConfiguration().get("hadoop.tmp.dir"))); } } } /** - * Verify scan counters are emitted from the job - * @param job - * @throws IOException + * Verify scan counters are emitted from the job nn */ private void verifyJobCountersAreEmitted(Job job) throws IOException { Counters counters = job.getCounters(); - Counter counter - = counters.findCounter(TableRecordReaderImpl.HBASE_COUNTER_GROUP_NAME, "RPC_CALLS"); + Counter counter = + counters.findCounter(TableRecordReaderImpl.HBASE_COUNTER_GROUP_NAME, "RPC_CALLS"); assertNotNull("Unable to find Job counter for HBase scan metrics, RPC_CALLS", counter); assertTrue("Counter value for RPC_CALLS should be larger than 0", counter.getValue() > 0); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java index f86f20d6412..536aa5d4cbc 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,9 +43,9 @@ import org.junit.Test; import org.slf4j.Logger; /** - * A base class for a test Map/Reduce job over HBase tables. The map/reduce process we're testing - * on our tables is simple - take every row in the table, reverse the value of a particular cell, - * and write it back to the table. Implements common components between mapred and mapreduce + * A base class for a test Map/Reduce job over HBase tables. The map/reduce process we're testing on + * our tables is simple - take every row in the table, reverse the value of a particular cell, and + * write it back to the table. Implements common components between mapred and mapreduce * implementations. */ public abstract class TestTableMapReduceBase { @@ -56,10 +55,7 @@ public abstract class TestTableMapReduceBase { protected static final byte[] INPUT_FAMILY = Bytes.toBytes("contents"); protected static final byte[] OUTPUT_FAMILY = Bytes.toBytes("text"); - protected static final byte[][] columns = new byte[][] { - INPUT_FAMILY, - OUTPUT_FAMILY - }; + protected static final byte[][] columns = new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }; /** * Retrieve my logger instance. @@ -74,9 +70,8 @@ public abstract class TestTableMapReduceBase { @BeforeClass public static void beforeClass() throws Exception { UTIL.startMiniCluster(); - Table table = - UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY, - OUTPUT_FAMILY }); + Table table = UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, + new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); UTIL.loadTable(table, INPUT_FAMILY, false); UTIL.createTable(TABLE_FOR_NEGATIVE_TESTS, new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); } @@ -88,8 +83,7 @@ public abstract class TestTableMapReduceBase { } /** - * Test a map/reduce against a multi-region table - * @throws IOException + * Test a map/reduce against a multi-region table n */ @Test public void testMultiRegionTable() throws IOException { @@ -111,11 +105,10 @@ public abstract class TestTableMapReduceBase { if (value.size() != 1) { throw new IOException("There should only be one input column"); } - Map>> - cf = value.getMap(); - if(!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILY) + "'."); + Map>> cf = value.getMap(); + if (!cf.containsKey(INPUT_FAMILY)) { + throw new IOException( + "Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } // Get the original value and reverse it @@ -157,11 +150,9 @@ public abstract class TestTableMapReduceBase { } /** - * Looks at every value of the mapreduce output and verifies that indeed - * the values have been reversed. - * @param table Table to scan. - * @throws IOException - * @throws NullPointerException if we failed to find a cell value + * Looks at every value of the mapreduce output and verifies that indeed the values have been + * reversed. + * @param table Table to scan. n * @throws NullPointerException if we failed to find a cell value */ private void verifyAttempt(final Table table) throws IOException, NullPointerException { Scan scan = new Scan(); @@ -170,18 +161,17 @@ public abstract class TestTableMapReduceBase { try { Iterator itr = scanner.iterator(); assertTrue(itr.hasNext()); - while(itr.hasNext()) { + while (itr.hasNext()) { Result r = itr.next(); if (getLog().isDebugEnabled()) { - if (r.size() > 2 ) { - throw new IOException("Too many results, expected 2 got " + - r.size()); + if (r.size() > 2) { + throw new IOException("Too many results, expected 2 got " + r.size()); } } byte[] firstValue = null; byte[] secondValue = null; int count = 0; - for(Cell kv : r.listCells()) { + for (Cell kv : r.listCells()) { if (count == 0) { firstValue = CellUtil.cloneValue(kv); } @@ -194,16 +184,13 @@ public abstract class TestTableMapReduceBase { } } - if (firstValue == null) { - throw new NullPointerException(Bytes.toString(r.getRow()) + - ": first value is null"); + throw new NullPointerException(Bytes.toString(r.getRow()) + ": first value is null"); } String first = Bytes.toString(firstValue); if (secondValue == null) { - throw new NullPointerException(Bytes.toString(r.getRow()) + - ": second value is null"); + throw new NullPointerException(Bytes.toString(r.getRow()) + ": second value is null"); } byte[] secondReversed = new byte[secondValue.length]; for (int i = 0, j = secondValue.length - 1; j >= 0; j--, i++) { @@ -213,9 +200,9 @@ public abstract class TestTableMapReduceBase { if (first.compareTo(second) != 0) { if (getLog().isDebugEnabled()) { - getLog().debug("second key is not the reverse of first. row=" + - Bytes.toStringBinary(r.getRow()) + ", first value=" + first + - ", second value=" + second); + getLog().debug( + "second key is not the reverse of first. row=" + Bytes.toStringBinary(r.getRow()) + + ", first value=" + first + ", second value=" + second); } fail(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java index e329e912c3a..3b7392b3ae4 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +21,7 @@ import static org.apache.hadoop.security.UserGroupInformation.loginUserFromKeyta import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; + import java.io.Closeable; import java.io.File; import java.util.Collection; @@ -58,13 +59,13 @@ import org.junit.experimental.categories.Category; /** * Test different variants of initTableMapperJob method */ -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestTableMapReduceUtil { private static final String HTTP_PRINCIPAL = "HTTP/localhost"; @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableMapReduceUtil.class); + HBaseClassTestRule.forClass(TestTableMapReduceUtil.class); @After public void after() { @@ -72,8 +73,8 @@ public class TestTableMapReduceUtil { } /* - * initTableSnapshotMapperJob is tested in {@link TestTableSnapshotInputFormat} because - * the method depends on an online cluster. + * initTableSnapshotMapperJob is tested in {@link TestTableSnapshotInputFormat} because the method + * depends on an online cluster. */ @Test @@ -81,9 +82,8 @@ public class TestTableMapReduceUtil { Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration, "tableName"); // test - TableMapReduceUtil.initTableMapperJob( - "Table", new Scan(), Import.Importer.class, Text.class, Text.class, job, - false, WALInputFormat.class); + TableMapReduceUtil.initTableMapperJob("Table", new Scan(), Import.Importer.class, Text.class, + Text.class, job, false, WALInputFormat.class); assertEquals(WALInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); @@ -96,9 +96,8 @@ public class TestTableMapReduceUtil { public void testInitTableMapperJob2() throws Exception { Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration, "tableName"); - TableMapReduceUtil.initTableMapperJob( - Bytes.toBytes("Table"), new Scan(), Import.Importer.class, Text.class, - Text.class, job, false, WALInputFormat.class); + TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"), new Scan(), Import.Importer.class, + Text.class, Text.class, job, false, WALInputFormat.class); assertEquals(WALInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); @@ -111,9 +110,8 @@ public class TestTableMapReduceUtil { public void testInitTableMapperJob3() throws Exception { Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration, "tableName"); - TableMapReduceUtil.initTableMapperJob( - Bytes.toBytes("Table"), new Scan(), Import.Importer.class, Text.class, - Text.class, job); + TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"), new Scan(), Import.Importer.class, + Text.class, Text.class, job); assertEquals(TableInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); @@ -126,9 +124,8 @@ public class TestTableMapReduceUtil { public void testInitTableMapperJob4() throws Exception { Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration, "tableName"); - TableMapReduceUtil.initTableMapperJob( - Bytes.toBytes("Table"), new Scan(), Import.Importer.class, Text.class, - Text.class, job, false); + TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"), new Scan(), Import.Importer.class, + Text.class, Text.class, job, false); assertEquals(TableInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); @@ -137,8 +134,8 @@ public class TestTableMapReduceUtil { assertEquals("Table", job.getConfiguration().get(TableInputFormat.INPUT_TABLE)); } - private static Closeable startSecureMiniCluster( - HBaseTestingUtility util, MiniKdc kdc, String principal) throws Exception { + private static Closeable startSecureMiniCluster(HBaseTestingUtility util, MiniKdc kdc, + String principal) throws Exception { Configuration conf = util.getConfiguration(); SecureTestUtil.enableSecurity(conf); @@ -148,8 +145,8 @@ public class TestTableMapReduceUtil { conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName() + ',' + TokenProvider.class.getName()); - HBaseKerberosUtils.setSecuredConfiguration(conf, - principal + '@' + kdc.getRealm(), HTTP_PRINCIPAL + '@' + kdc.getRealm()); + HBaseKerberosUtils.setSecuredConfiguration(conf, principal + '@' + kdc.getRealm(), + HTTP_PRINCIPAL + '@' + kdc.getRealm()); KerberosName.resetDefaultRealm(); @@ -191,8 +188,7 @@ public class TestTableMapReduceUtil { @Test @SuppressWarnings("unchecked") - public void testInitCredentialsForCluster2() - throws Exception { + public void testInitCredentialsForCluster2() throws Exception { HBaseTestingUtility util1 = new HBaseTestingUtility(); HBaseTestingUtility util2 = new HBaseTestingUtility(); @@ -260,8 +256,7 @@ public class TestTableMapReduceUtil { @Test @SuppressWarnings("unchecked") - public void testInitCredentialsForCluster4() - throws Exception { + public void testInitCredentialsForCluster4() throws Exception { HBaseTestingUtility util1 = new HBaseTestingUtility(); // Assume util1 is insecure cluster // Do not start util1 because cannot boot secured mini cluster and insecure mini cluster at once diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java index 88e40a75c53..5dd9ca43d52 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import static org.junit.Assert.assertEquals; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -50,7 +49,7 @@ public class TestTableRecordReader { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableRecordReader.class); + HBaseClassTestRule.forClass(TestTableRecordReader.class); private static TableName TABLE_NAME = TableName.valueOf("TestTableRecordReader"); @@ -86,12 +85,12 @@ public class TestTableRecordReader { } private static void createTestTable(TableName name, byte[][] rows, byte[][] families, - byte[][] qualifiers, byte[] cellValue) throws IOException { + byte[][] qualifiers, byte[] cellValue) throws IOException { TEST_UTIL.createTable(name, families).put(createPuts(rows, families, qualifiers, cellValue)); } private static List createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers, - byte[] value) throws IOException { + byte[] value) throws IOException { List puts = new ArrayList<>(); for (int row = 0; row < rows.length; row++) { Put put = new Put(rows[row]); @@ -114,11 +113,11 @@ public class TestTableRecordReader { @Test public void test() throws Exception { try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Table table = conn.getTable(TABLE_NAME)) { + Table table = conn.getTable(TABLE_NAME)) { org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl trr = - new org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl(); + new org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl(); Scan scan = - new Scan().setMaxResultSize(1).setCaching(Integer.MAX_VALUE).setNeedCursorResult(true); + new Scan().setMaxResultSize(1).setCaching(Integer.MAX_VALUE).setNeedCursorResult(true); trr.setScan(scan); trr.setHTable(table); trr.initialize(null, null); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java index 188fc1f7070..eca275cf0a9 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,11 +17,11 @@ */ package org.apache.hadoop.hbase.mapreduce; +import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION; +import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_ROW_LIMIT_PER_INPUTSPLIT; -import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION; -import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; @@ -70,12 +70,12 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableSnapshotInputFormat.class); + HBaseClassTestRule.forClass(TestTableSnapshotInputFormat.class); private static final Logger LOG = LoggerFactory.getLogger(TestTableSnapshotInputFormat.class); @@ -97,7 +97,6 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa return yyy; } - @Test public void testGetBestLocations() throws IOException { TableSnapshotInputFormatImpl tsif = new TableSnapshotInputFormatImpl(); @@ -107,36 +106,36 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa Assert.assertEquals(null, TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h1"}, 1); + blockDistribution.addHostsAndBlockWeight(new String[] { "h1" }, 1); Assert.assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h1"}, 1); + blockDistribution.addHostsAndBlockWeight(new String[] { "h1" }, 1); Assert.assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 1); + blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 1); Assert.assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); blockDistribution = new HDFSBlocksDistribution(); - blockDistribution.addHostsAndBlockWeight(new String[] {"h1"}, 10); - blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 7); - blockDistribution.addHostsAndBlockWeight(new String[] {"h3"}, 5); - blockDistribution.addHostsAndBlockWeight(new String[] {"h4"}, 1); + blockDistribution.addHostsAndBlockWeight(new String[] { "h1" }, 10); + blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 7); + blockDistribution.addHostsAndBlockWeight(new String[] { "h3" }, 5); + blockDistribution.addHostsAndBlockWeight(new String[] { "h4" }, 1); Assert.assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 2); + blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 2); Assert.assertEquals(Lists.newArrayList("h1", "h2"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 3); + blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 3); Assert.assertEquals(Lists.newArrayList("h2", "h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h3"}, 6); - blockDistribution.addHostsAndBlockWeight(new String[] {"h4"}, 9); + blockDistribution.addHostsAndBlockWeight(new String[] { "h3" }, 6); + blockDistribution.addHostsAndBlockWeight(new String[] { "h4" }, 9); Assert.assertEquals(Lists.newArrayList("h2", "h3", "h4"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); @@ -149,8 +148,8 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa public static class TestTableSnapshotMapper extends TableMapper { @Override - protected void map(ImmutableBytesWritable key, Result value, - Context context) throws IOException, InterruptedException { + protected void map(ImmutableBytesWritable key, Result value, Context context) + throws IOException, InterruptedException { // Validate a single row coming from the snapshot, and emit the row key verifyRowFromMap(key, value); context.write(key, NullWritable.get()); @@ -160,16 +159,16 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa public static class TestTableSnapshotReducer extends Reducer { HBaseTestingUtility.SeenRowTracker rowTracker = - new HBaseTestingUtility.SeenRowTracker(bbb, yyy); + new HBaseTestingUtility.SeenRowTracker(bbb, yyy); + @Override protected void reduce(ImmutableBytesWritable key, Iterable values, - Context context) throws IOException, InterruptedException { + Context context) throws IOException, InterruptedException { rowTracker.addRow(key.get()); } @Override - protected void cleanup(Context context) throws IOException, - InterruptedException { + protected void cleanup(Context context) throws IOException, InterruptedException { rowTracker.validate(); } } @@ -184,19 +183,17 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa Job job = new Job(UTIL.getConfiguration()); Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, + tmpTableDir); // TODO: would be better to examine directly the cache instance that results from this // config. Currently this is not possible because BlockCache initialization is static. - Assert.assertEquals( - "Snapshot job should be configured for default LruBlockCache.", + Assert.assertEquals("Snapshot job should be configured for default LruBlockCache.", HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT, job.getConfiguration().getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01); - Assert.assertEquals( - "Snapshot job should not use BucketCache.", - 0, job.getConfiguration().getFloat("hbase.bucketcache.size", -1), 0.01); + Assert.assertEquals("Snapshot job should not use BucketCache.", 0, + job.getConfiguration().getFloat("hbase.bucketcache.size", -1), 0.01); } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); @@ -208,8 +205,7 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa Configuration conf = UTIL.getConfiguration(); conf.setBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION, true); try { - testWithMockedMapReduce(UTIL, name.getMethodName() + "Snapshot", 1, 1, 1, - true); + testWithMockedMapReduce(UTIL, name.getMethodName() + "Snapshot", 1, 1, 1, true); } finally { conf.unset(SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION); } @@ -217,21 +213,19 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa @Override public void testRestoreSnapshotDoesNotCreateBackRefLinksInit(TableName tableName, - String snapshotName, Path tmpTableDir) throws Exception { + String snapshotName, Path tmpTableDir) throws Exception { Job job = new Job(UTIL.getConfiguration()); - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, + tmpTableDir); } @Override - public void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName, - int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) - throws Exception { + public void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName, int numRegions, + int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); try { - createTableAndSnapshot( - util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); + createTableAndSnapshot(util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); Configuration conf = util.getConfiguration(); conf.setBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, setLocalityEnabledTo); @@ -242,14 +236,13 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa Scan scan = new Scan(getStartRow(), getEndRow()); // limit the scan if (numSplitsPerRegion > 1) { - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(), - numSplitsPerRegion); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, tmpTableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, tmpTableDir); } verifyWithMockedMapReduce(job, numRegions, expectedNumSplits, getStartRow(), getEndRow()); @@ -418,7 +411,7 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa Scan scanWithReadType = new Scan(); scanWithReadType.setReadType(readType); assertEquals(scanWithReadType.getReadType(), - serializeAndReturn(conf, scanWithReadType).getReadType()); + serializeAndReturn(conf, scanWithReadType).getReadType()); } // We should only see the DEFAULT ReadType getting updated to STREAM. Scan scanWithoutReadType = new Scan(); @@ -432,8 +425,8 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa } /** - * Serializes and deserializes the given scan in the same manner that - * TableSnapshotInputFormat does. + * Serializes and deserializes the given scan in the same manner that TableSnapshotInputFormat + * does. */ private Scan serializeAndReturn(Configuration conf, Scan s) throws IOException { conf.set(TableInputFormat.SCAN, TableMapReduceUtil.convertScanToString(s)); @@ -441,23 +434,21 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa } private void verifyWithMockedMapReduce(Job job, int numRegions, int expectedNumSplits, - byte[] startRow, byte[] stopRow) - throws IOException, InterruptedException { + byte[] startRow, byte[] stopRow) throws IOException, InterruptedException { TableSnapshotInputFormat tsif = new TableSnapshotInputFormat(); List splits = tsif.getSplits(job); Assert.assertEquals(expectedNumSplits, splits.size()); HBaseTestingUtility.SeenRowTracker rowTracker = new HBaseTestingUtility.SeenRowTracker(startRow, - stopRow.length > 0 ? stopRow : Bytes.toBytes("\uffff")); + stopRow.length > 0 ? stopRow : Bytes.toBytes("\uffff")); - boolean localityEnabled = - job.getConfiguration().getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, - SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT); + boolean localityEnabled = job.getConfiguration().getBoolean( + SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT); boolean byRegionLoc = job.getConfiguration().getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION, - SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT); + SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT); for (int i = 0; i < splits.size(); i++) { // validate input split InputSplit split = splits.get(i); @@ -480,7 +471,7 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa } Scan scan = - TableMapReduceUtil.convertStringToScan(snapshotRegionSplit.getDelegate().getScan()); + TableMapReduceUtil.convertStringToScan(snapshotRegionSplit.getDelegate().getScan()); if (startRow.length > 0) { Assert.assertTrue( Bytes.toStringBinary(startRow) + " should <= " + Bytes.toStringBinary(scan.getStartRow()), @@ -498,7 +489,7 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa TaskAttemptContext taskAttemptContext = mock(TaskAttemptContext.class); when(taskAttemptContext.getConfiguration()).thenReturn(job.getConfiguration()); RecordReader rr = - tsif.createRecordReader(split, taskAttemptContext); + tsif.createRecordReader(split, taskAttemptContext); rr.initialize(split, taskAttemptContext); // validate we can read all the data back @@ -517,16 +508,16 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa @Override protected void testWithMapReduceImpl(HBaseTestingUtility util, TableName tableName, - String snapshotName, Path tableDir, int numRegions, int numSplitsPerRegion, - int expectedNumSplits, boolean shutdownCluster) throws Exception { + String snapshotName, Path tableDir, int numRegions, int numSplitsPerRegion, + int expectedNumSplits, boolean shutdownCluster) throws Exception { doTestWithMapReduce(util, tableName, snapshotName, getStartRow(), getEndRow(), tableDir, numRegions, numSplitsPerRegion, expectedNumSplits, shutdownCluster); } // this is also called by the IntegrationTestTableSnapshotInputFormat public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName, - String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions, - int numSplitsPerRegion, int expectedNumSplits, boolean shutdownCluster) throws Exception { + String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions, + int numSplitsPerRegion, int expectedNumSplits, boolean shutdownCluster) throws Exception { LOG.info("testing with MapReduce"); @@ -545,17 +536,16 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa job.setJarByClass(util.getClass()); TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), - TestTableSnapshotInputFormat.class); + TestTableSnapshotInputFormat.class); if (numSplitsPerRegion > 1) { - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, true, tableDir, new RegionSplitter.UniformSplit(), - numSplitsPerRegion); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + true, tableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, true, tableDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + true, tableDir); } job.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class); @@ -583,12 +573,12 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1); Job job = Job.getInstance(UTIL.getConfiguration()); Path workingDir = UTIL.getDataTestDirOnTestFS(snapshotName); - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, workingDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, + workingDir); FileSystem fs = workingDir.getFileSystem(job.getConfiguration()); - Path restorePath = new Path(job.getConfiguration() - .get("hbase.TableSnapshotInputFormat.restore.dir")); + Path restorePath = + new Path(job.getConfiguration().get("hbase.TableSnapshotInputFormat.restore.dir")); Assert.assertTrue(fs.exists(restorePath)); TableSnapshotInputFormat.cleanRestoreDir(job, snapshotName); Assert.assertFalse(fs.exists(restorePath)); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java index efa0a1b7e69..c33f761d17e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,11 +34,11 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestTableSplit { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableSplit.class); + HBaseClassTestRule.forClass(TestTableSplit.class); @Rule public TestName name = new TestName(); @@ -46,11 +46,9 @@ public class TestTableSplit { @Test public void testHashCode() { TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()), - "row-start".getBytes(), - "row-end".getBytes(), "location"); + "row-start".getBytes(), "row-end".getBytes(), "location"); TableSplit split2 = new TableSplit(TableName.valueOf(name.getMethodName()), - "row-start".getBytes(), - "row-end".getBytes(), "location"); + "row-start".getBytes(), "row-end".getBytes(), "location"); assertEquals(split1, split2); assertTrue(split1.hashCode() == split2.hashCode()); HashSet set = new HashSet<>(2); @@ -61,15 +59,13 @@ public class TestTableSplit { /** * length of region should not influence hashcode - * */ + */ @Test public void testHashCode_length() { TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()), - "row-start".getBytes(), - "row-end".getBytes(), "location", 1984); + "row-start".getBytes(), "row-end".getBytes(), "location", 1984); TableSplit split2 = new TableSplit(TableName.valueOf(name.getMethodName()), - "row-start".getBytes(), - "row-end".getBytes(), "location", 1982); + "row-start".getBytes(), "row-end".getBytes(), "location", 1982); assertEquals(split1, split2); assertTrue(split1.hashCode() == split2.hashCode()); @@ -81,16 +77,14 @@ public class TestTableSplit { /** * Length of region need to be properly serialized. - * */ + */ @Test public void testLengthIsSerialized() throws Exception { TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()), - "row-start".getBytes(), - "row-end".getBytes(), "location", 666); + "row-start".getBytes(), "row-end".getBytes(), "location", 666); TableSplit deserialized = new TableSplit(TableName.valueOf(name.getMethodName()), - "row-start2".getBytes(), - "row-end2".getBytes(), "location1"); + "row-start2".getBytes(), "row-end2".getBytes(), "location1"); ReflectionUtils.copy(new Configuration(), split1, deserialized); Assert.assertEquals(666, deserialized.getLength()); @@ -98,36 +92,26 @@ public class TestTableSplit { @Test public void testToString() { - TableSplit split = - new TableSplit(TableName.valueOf(name.getMethodName()), "row-start".getBytes(), "row-end".getBytes(), - "location"); - String str = - "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " - + "endrow=row-end, regionLocation=location, " - + "regionname=)"; + TableSplit split = new TableSplit(TableName.valueOf(name.getMethodName()), + "row-start".getBytes(), "row-end".getBytes(), "location"); + String str = "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " + + "endrow=row-end, regionLocation=location, " + "regionname=)"; Assert.assertEquals(str, split.toString()); - split = - new TableSplit(TableName.valueOf(name.getMethodName()), null, "row-start".getBytes(), - "row-end".getBytes(), "location", "encoded-region-name", 1000L); - str = - "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " - + "endrow=row-end, regionLocation=location, " - + "regionname=encoded-region-name)"; + split = new TableSplit(TableName.valueOf(name.getMethodName()), null, "row-start".getBytes(), + "row-end".getBytes(), "location", "encoded-region-name", 1000L); + str = "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " + + "endrow=row-end, regionLocation=location, " + "regionname=encoded-region-name)"; Assert.assertEquals(str, split.toString()); split = new TableSplit(null, null, null, null); - str = - "Split(tablename=null, startrow=null, " - + "endrow=null, regionLocation=null, " - + "regionname=)"; + str = "Split(tablename=null, startrow=null, " + "endrow=null, regionLocation=null, " + + "regionname=)"; Assert.assertEquals(str, split.toString()); split = new TableSplit(null, null, null, null, null, null, 1000L); - str = - "Split(tablename=null, startrow=null, " - + "endrow=null, regionLocation=null, " - + "regionname=null)"; + str = "Split(tablename=null, startrow=null, " + "endrow=null, regionLocation=null, " + + "regionname=null)"; Assert.assertEquals(str, split.toString()); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java index 6c4d0b6f06b..4a29eadb9c0 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,28 +60,27 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestTimeRangeMapRed { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTimeRangeMapRed.class); + HBaseClassTestRule.forClass(TestTimeRangeMapRed.class); private final static Logger log = LoggerFactory.getLogger(TestTimeRangeMapRed.class); - private static final HBaseTestingUtility UTIL = - new HBaseTestingUtility(); + private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); private Admin admin; - private static final byte [] KEY = Bytes.toBytes("row1"); + private static final byte[] KEY = Bytes.toBytes("row1"); private static final NavigableMap TIMESTAMP = new TreeMap<>(); static { - TIMESTAMP.put((long)1245620000, false); - TIMESTAMP.put((long)1245620005, true); // include - TIMESTAMP.put((long)1245620010, true); // include - TIMESTAMP.put((long)1245620055, true); // include - TIMESTAMP.put((long)1245620100, true); // include - TIMESTAMP.put((long)1245620150, false); - TIMESTAMP.put((long)1245620250, false); + TIMESTAMP.put((long) 1245620000, false); + TIMESTAMP.put((long) 1245620005, true); // include + TIMESTAMP.put((long) 1245620010, true); // include + TIMESTAMP.put((long) 1245620055, true); // include + TIMESTAMP.put((long) 1245620100, true); // include + TIMESTAMP.put((long) 1245620150, false); + TIMESTAMP.put((long) 1245620250, false); } static final long MINSTAMP = 1245620005; static final long MAXSTAMP = 1245620100 + 1; // maxStamp itself is excluded. so increment it. @@ -106,16 +105,13 @@ public class TestTimeRangeMapRed { } private static class ProcessTimeRangeMapper - extends TableMapper - implements Configurable { + extends TableMapper implements Configurable { private Configuration conf = null; private Table table = null; @Override - public void map(ImmutableBytesWritable key, Result result, - Context context) - throws IOException { + public void map(ImmutableBytesWritable key, Result result, Context context) throws IOException { List tsList = new ArrayList<>(); for (Cell kv : result.listCells()) { tsList.add(kv.getTimestamp()); @@ -150,7 +146,7 @@ public class TestTimeRangeMapRed { @Test public void testTimeRangeMapRed() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { final HTableDescriptor desc = new HTableDescriptor(TABLE_NAME); final HColumnDescriptor col = new HColumnDescriptor(FAMILY_NAME); col.setMaxVersions(Integer.MAX_VALUE); @@ -170,8 +166,7 @@ public class TestTimeRangeMapRed { table.close(); } - private void runTestOnTable() - throws IOException, InterruptedException, ClassNotFoundException { + private void runTestOnTable() throws IOException, InterruptedException, ClassNotFoundException { Job job = null; try { job = new Job(UTIL.getConfiguration(), "test123"); @@ -181,16 +176,15 @@ public class TestTimeRangeMapRed { scan.addColumn(FAMILY_NAME, COLUMN_NAME); scan.setTimeRange(MINSTAMP, MAXSTAMP); scan.setMaxVersions(); - TableMapReduceUtil.initTableMapperJob(TABLE_NAME, - scan, ProcessTimeRangeMapper.class, Text.class, Text.class, job); + TableMapReduceUtil.initTableMapperJob(TABLE_NAME, scan, ProcessTimeRangeMapper.class, + Text.class, Text.class, job); job.waitForCompletion(true); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } finally { if (job != null) { - FileUtil.fullyDelete( - new File(job.getConfiguration().get("hadoop.tmp.dir"))); + FileUtil.fullyDelete(new File(job.getConfiguration().get("hadoop.tmp.dir"))); } } } @@ -200,11 +194,11 @@ public class TestTimeRangeMapRed { scan.addColumn(FAMILY_NAME, COLUMN_NAME); scan.setMaxVersions(1); ResultScanner scanner = table.getScanner(scan); - for (Result r: scanner) { + for (Result r : scanner) { for (Cell kv : r.listCells()) { log.debug(Bytes.toString(r.getRow()) + "\t" + Bytes.toString(CellUtil.cloneFamily(kv)) - + "\t" + Bytes.toString(CellUtil.cloneQualifier(kv)) - + "\t" + kv.getTimestamp() + "\t" + Bytes.toBoolean(CellUtil.cloneValue(kv))); + + "\t" + Bytes.toString(CellUtil.cloneQualifier(kv)) + "\t" + kv.getTimestamp() + "\t" + + Bytes.toBoolean(CellUtil.cloneValue(kv))); org.junit.Assert.assertEquals(TIMESTAMP.get(kv.getTimestamp()), Bytes.toBoolean(CellUtil.cloneValue(kv))); } @@ -213,4 +207,3 @@ public class TestTimeRangeMapRed { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java index 48e85183923..70602a37166 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.mapreduce; import static org.junit.Assert.assertEquals; + import java.util.ArrayList; import java.util.List; import org.apache.hadoop.fs.FileStatus; @@ -32,7 +33,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({ MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestWALInputFormat { @ClassRule public static final HBaseClassTestRule CLASS_RULE = diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index 49554515817..52bfd8fbc18 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -24,6 +24,7 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import java.io.ByteArrayOutputStream; import java.io.File; import java.io.PrintStream; @@ -71,12 +72,12 @@ import org.mockito.stubbing.Answer; /** * Basic test for the WALPlayer M/R tool */ -@Category({MapReduceTests.class, LargeTests.class}) -//TODO : Remove this in 3.0 +@Category({ MapReduceTests.class, LargeTests.class }) +// TODO : Remove this in 3.0 public class TestWALPlayer { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALPlayer.class); + HBaseClassTestRule.forClass(TestWALPlayer.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static MiniHBaseCluster cluster; @@ -115,9 +116,9 @@ public class TestWALPlayer { TEST_UTIL.createTable(tn, TestRecoveredEdits.RECOVEREDEDITS_COLUMNFAMILY); // Copy testing recovered.edits file that is over under hbase-server test resources // up into a dir in our little hdfs cluster here. - String hbaseServerTestResourcesEdits = System.getProperty("test.build.classes") + - "/../../../hbase-server/src/test/resources/" + - TestRecoveredEdits.RECOVEREDEDITS_PATH.getName(); + String hbaseServerTestResourcesEdits = + System.getProperty("test.build.classes") + "/../../../hbase-server/src/test/resources/" + + TestRecoveredEdits.RECOVEREDEDITS_PATH.getName(); assertTrue(new File(hbaseServerTestResourcesEdits).exists()); FileSystem dfs = TEST_UTIL.getDFSCluster().getFileSystem(); // Target dir. @@ -125,7 +126,7 @@ public class TestWALPlayer { assertTrue(dfs.mkdirs(targetDir)); dfs.copyFromLocalFile(new Path(hbaseServerTestResourcesEdits), targetDir); assertEquals(0, - ToolRunner.run(new WALPlayer(this.conf), new String [] {targetDir.toString()})); + ToolRunner.run(new WALPlayer(this.conf), new String[] { targetDir.toString() })); // I don't know how many edits are in this file for this table... so just check more than 1. assertTrue(TEST_UTIL.countRows(tn) > 0); } @@ -157,19 +158,17 @@ public class TestWALPlayer { // replay the WAL, map table 1 to table 2 WAL log = cluster.getRegionServer(0).getWAL(null); log.rollWriter(); - String walInputDir = new Path(cluster.getMaster().getMasterFileSystem() - .getWALRootDir(), HConstants.HREGION_LOGDIR_NAME).toString(); + String walInputDir = new Path(cluster.getMaster().getMasterFileSystem().getWALRootDir(), + HConstants.HREGION_LOGDIR_NAME).toString(); - Configuration configuration= TEST_UTIL.getConfiguration(); + Configuration configuration = TEST_UTIL.getConfiguration(); WALPlayer player = new WALPlayer(configuration); - String optionName="_test_.name"; + String optionName = "_test_.name"; configuration.set(optionName, "1000"); player.setupTime(configuration, optionName); - assertEquals(1000,configuration.getLong(optionName,0)); + assertEquals(1000, configuration.getLong(optionName, 0)); assertEquals(0, ToolRunner.run(configuration, player, - new String[] {walInputDir, tableName1.getNameAsString(), - tableName2.getNameAsString() })); - + new String[] { walInputDir, tableName1.getNameAsString(), tableName2.getNameAsString() })); // verify the WAL was player into table 2 Get g = new Get(ROW); @@ -233,7 +232,7 @@ public class TestWALPlayer { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -246,8 +245,8 @@ public class TestWALPlayer { } catch (SecurityException e) { assertEquals(-1, newSecurityManager.getExitCode()); assertTrue(data.toString().contains("ERROR: Wrong number of arguments:")); - assertTrue(data.toString().contains("Usage: WALPlayer [options] " + - " [ ]")); + assertTrue(data.toString() + .contains("Usage: WALPlayer [options] " + " [ ]")); assertTrue(data.toString().contains("-Dwal.bulk.output=/path/for/output")); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java index 41f8f351cfd..cfe67e087a1 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.List; import java.util.NavigableMap; @@ -71,7 +72,7 @@ public class TestWALRecordReader { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALRecordReader.class); + HBaseClassTestRule.forClass(TestWALRecordReader.class); private static final Logger LOG = LoggerFactory.getLogger(TestWALRecordReader.class); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -82,7 +83,7 @@ public class TestWALRecordReader { private static Path walRootDir; // visible for TestHLogRecordReader static final TableName tableName = TableName.valueOf(getName()); - private static final byte [] rowName = tableName.getName(); + private static final byte[] rowName = tableName.getName(); // visible for TestHLogRecordReader static final RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); private static final byte[] family = Bytes.toBytes("column"); @@ -145,8 +146,8 @@ public class TestWALRecordReader { edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), ts, value)); log.appendData(info, getWalKeyImpl(ts, scopes), edit); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), ts+1, value)); - log.appendData(info, getWalKeyImpl(ts+1, scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), ts + 1, value)); + log.appendData(info, getWalKeyImpl(ts + 1, scopes), edit); log.sync(); Threads.sleep(10); LOG.info("Before 1st WAL roll " + log.toString()); @@ -157,17 +158,16 @@ public class TestWALRecordReader { long ts1 = EnvironmentEdgeManager.currentTime(); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("3"), ts1+1, value)); - log.appendData(info, getWalKeyImpl(ts1+1, scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("3"), ts1 + 1, value)); + log.appendData(info, getWalKeyImpl(ts1 + 1, scopes), edit); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("4"), ts1+2, value)); - log.appendData(info, getWalKeyImpl(ts1+2, scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("4"), ts1 + 2, value)); + log.appendData(info, getWalKeyImpl(ts1 + 2, scopes), edit); log.sync(); log.shutdown(); walfactory.shutdown(); LOG.info("Closed WAL " + log.toString()); - WALInputFormat input = new WALInputFormat(); Configuration jobConf = new Configuration(conf); jobConf.set("mapreduce.input.fileinputformat.inputdir", logDir.toString()); @@ -178,7 +178,7 @@ public class TestWALRecordReader { assertEquals(1, splits.size()); testSplit(splits.get(0), Bytes.toBytes("1")); - jobConf.setLong(WALInputFormat.END_TIME_KEY, ts1+1); + jobConf.setLong(WALInputFormat.END_TIME_KEY, ts1 + 1); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(2, splits.size()); // Both entries from first file are in-range. @@ -201,12 +201,12 @@ public class TestWALRecordReader { public void testWALRecordReader() throws Exception { final WALFactory walfactory = new WALFactory(conf, getName()); WAL log = walfactory.getWAL(info); - byte [] value = Bytes.toBytes("value"); + byte[] value = Bytes.toBytes("value"); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), - EnvironmentEdgeManager.currentTime(), value)); - long txid = log.appendData(info, - getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), EnvironmentEdgeManager.currentTime(), + value)); + long txid = + log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); Thread.sleep(1); // make sure 2nd log gets a later timestamp @@ -214,10 +214,9 @@ public class TestWALRecordReader { log.rollWriter(); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), - EnvironmentEdgeManager.currentTime(), value)); - txid = log.appendData(info, - getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), EnvironmentEdgeManager.currentTime(), + value)); + txid = log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); log.shutdown(); walfactory.shutdown(); @@ -240,7 +239,7 @@ public class TestWALRecordReader { // now test basic time ranges: // set an endtime, the 2nd log file can be ignored completely. - jobConf.setLong(WALInputFormat.END_TIME_KEY, secondTs-1); + jobConf.setLong(WALInputFormat.END_TIME_KEY, secondTs - 1); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(1, splits.size()); testSplit(splits.get(0), Bytes.toBytes("1")); @@ -270,8 +269,10 @@ public class TestWALRecordReader { for (byte[] column : columns) { assertTrue(reader.nextKeyValue()); Cell cell = reader.getCurrentValue().getCells().get(0); - if (!Bytes.equals(column, 0, column.length, cell.getQualifierArray(), - cell.getQualifierOffset(), cell.getQualifierLength())) { + if ( + !Bytes.equals(column, 0, column.length, cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()) + ) { assertTrue( "expected [" + Bytes.toString(column) + "], actual [" + Bytes.toString( cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) + "]", @@ -283,8 +284,8 @@ public class TestWALRecordReader { } /** - * Create a new reader from the split, match the edits against the passed columns, - * moving WAL to archive in between readings + * Create a new reader from the split, match the edits against the passed columns, moving WAL to + * archive in between readings */ private void testSplitWithMovingWAL(InputSplit split, byte[] col1, byte[] col2) throws Exception { WALRecordReader reader = getReader(); @@ -292,8 +293,10 @@ public class TestWALRecordReader { assertTrue(reader.nextKeyValue()); Cell cell = reader.getCurrentValue().getCells().get(0); - if (!Bytes.equals(col1, 0, col1.length, cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength())) { + if ( + !Bytes.equals(col1, 0, col1.length, cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()) + ) { assertTrue( "expected [" + Bytes.toString(col1) + "], actual [" + Bytes.toString( cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) + "]", @@ -314,8 +317,10 @@ public class TestWALRecordReader { // TODO: the archivedLogLocation to read next key value. assertTrue(reader.nextKeyValue()); cell = reader.getCurrentValue().getCells().get(0); - if (!Bytes.equals(col2, 0, col2.length, cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength())) { + if ( + !Bytes.equals(col2, 0, col2.length, cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()) + ) { assertTrue( "expected [" + Bytes.toString(col2) + "], actual [" + Bytes.toString( cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) + "]", diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java index 8925615b5f2..7c02ae85255 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; @@ -28,9 +27,9 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; /** - * Dummy mapper used for unit tests to verify that the mapper can be injected. - * This approach would be used if a custom transformation needed to be done after - * reading the input data before writing it to HFiles. + * Dummy mapper used for unit tests to verify that the mapper can be injected. This approach would + * be used if a custom transformation needed to be done after reading the input data before writing + * it to HFiles. */ public class TsvImporterCustomTestMapper extends TsvImporterMapper { @Override @@ -39,12 +38,11 @@ public class TsvImporterCustomTestMapper extends TsvImporterMapper { } /** - * Convert a line of TSV text into an HBase table row after transforming the - * values by multiplying them by 3. + * Convert a line of TSV text into an HBase table row after transforming the values by multiplying + * them by 3. */ @Override - public void map(LongWritable offset, Text value, Context context) - throws IOException { + public void map(LongWritable offset, Text value, Context context) throws IOException { byte[] family = Bytes.toBytes("FAM"); final byte[][] qualifiers = { Bytes.toBytes("A"), Bytes.toBytes("B") }; @@ -53,20 +51,19 @@ public class TsvImporterCustomTestMapper extends TsvImporterMapper { String[] valueTokens = new String(lineBytes, "UTF-8").split("\u001b"); // create the rowKey and Put - ImmutableBytesWritable rowKey = - new ImmutableBytesWritable(Bytes.toBytes(valueTokens[0])); + ImmutableBytesWritable rowKey = new ImmutableBytesWritable(Bytes.toBytes(valueTokens[0])); Put put = new Put(rowKey.copyBytes()); put.setDurability(Durability.SKIP_WAL); - //The value should look like this: VALUE1 or VALUE2. Let's multiply - //the integer by 3 - for(int i = 1; i < valueTokens.length; i++) { + // The value should look like this: VALUE1 or VALUE2. Let's multiply + // the integer by 3 + for (int i = 1; i < valueTokens.length; i++) { String prefix = valueTokens[i].substring(0, "VALUE".length()); String suffix = valueTokens[i].substring("VALUE".length()); String newValue = prefix + Integer.parseInt(suffix) * 3; - KeyValue kv = new KeyValue(rowKey.copyBytes(), family, - qualifiers[i-1], Bytes.toBytes(newValue)); + KeyValue kv = + new KeyValue(rowKey.copyBytes(), family, qualifiers[i - 1], Bytes.toBytes(newValue)); put.add(kv); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java index 850d4abac80..e1f4dcdf9e8 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; import java.util.Arrays; - import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser.BadTsvLineException; @@ -27,18 +26,17 @@ import org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser.ParsedLine; import org.apache.hadoop.hbase.util.Bytes; /** - * Just shows a simple example of how the attributes can be extracted and added - * to the puts + * Just shows a simple example of how the attributes can be extracted and added to the puts */ public class TsvImporterCustomTestMapperForOprAttr extends TsvImporterMapper { @Override protected void populatePut(byte[] lineBytes, ParsedLine parsed, Put put, int i) - throws BadTsvLineException, IOException { + throws BadTsvLineException, IOException { KeyValue kv; kv = new KeyValue(lineBytes, parsed.getRowKeyOffset(), parsed.getRowKeyLength(), - parser.getFamily(i), 0, parser.getFamily(i).length, parser.getQualifier(i), 0, - parser.getQualifier(i).length, ts, KeyValue.Type.Put, lineBytes, parsed.getColumnOffset(i), - parsed.getColumnLength(i)); + parser.getFamily(i), 0, parser.getFamily(i).length, parser.getQualifier(i), 0, + parser.getQualifier(i).length, ts, KeyValue.Type.Put, lineBytes, parsed.getColumnOffset(i), + parsed.getColumnLength(i)); if (parsed.getIndividualAttributes() != null) { String[] attributes = parsed.getIndividualAttributes(); for (String attr : attributes) { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java index 26cee49e89e..71614a29277 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -84,13 +84,13 @@ public class TestCompactionTool { Configuration config = HBaseConfiguration.create(); config.set("fs.defaultFS", defaultFS); int result = ToolRunner.run(config, new CompactionTool(), - new String[]{"-compactOnce", "-major", storePath}); - assertEquals(0,result); + new String[] { "-compactOnce", "-major", storePath }); + assertEquals(0, result); regionDirFiles = fs.listStatus(new Path(storePath)); assertEquals(1, regionDirFiles.length); } - private void putAndFlush(int key) throws Exception{ + private void putAndFlush(int key) throws Exception { Put put = new Put(Bytes.toBytes(key)); put.addColumn(HBaseTestingUtility.fam1, qualifier, Bytes.toBytes("val" + key)); region.put(put); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java index 04b766df435..4ccba1578d9 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java @@ -74,7 +74,7 @@ public class TestVerifyReplication extends TestReplicationBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyReplication.class); + HBaseClassTestRule.forClass(TestVerifyReplication.class); private static final Logger LOG = LoggerFactory.getLogger(TestVerifyReplication.class); @@ -95,9 +95,11 @@ public class TestVerifyReplication extends TestReplicationBase { public static void setUpBeforeClass() throws Exception { TestReplicationBase.setUpBeforeClass(); - TableDescriptor peerTable = TableDescriptorBuilder.newBuilder(peerTableName).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100) - .build()).build(); + TableDescriptor peerTable = + TableDescriptorBuilder.newBuilder(peerTableName) + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100).build()) + .build(); Connection connection2 = ConnectionFactory.createConnection(CONF2); try (Admin admin2 = connection2.getAdmin()) { @@ -107,7 +109,7 @@ public class TestVerifyReplication extends TestReplicationBase { } static void runVerifyReplication(String[] args, int expectedGoodRows, int expectedBadRows) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { Job job = new VerifyReplication().createSubmittableJob(new Configuration(CONF1), args); if (job == null) { fail("Job wasn't created, see the log"); @@ -167,9 +169,9 @@ public class TestVerifyReplication extends TestReplicationBase { try { ColumnFamilyDescriptor fam = ColumnFamilyDescriptorBuilder.newBuilder(familyname) - .setMaxVersions(100).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build(); + .setMaxVersions(100).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build(); TableDescriptor table = - TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(fam).build(); + TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(fam).build(); Connection connection1 = ConnectionFactory.createConnection(CONF1); Connection connection2 = ConnectionFactory.createConnection(CONF2); @@ -239,7 +241,7 @@ public class TestVerifyReplication extends TestReplicationBase { } static void checkRestoreTmpDir(Configuration conf, String restoreTmpDir, int expectedCount) - throws IOException { + throws IOException { FileSystem fs = FileSystem.get(conf); FileStatus[] subDirectories = fs.listStatus(new Path(restoreTmpDir)); assertNotNull(subDirectories); @@ -249,7 +251,6 @@ public class TestVerifyReplication extends TestReplicationBase { } } - @Test public void testVerifyRepJobWithQuorumAddress() throws Exception { // Populate the tables, at the same time it guarantees that the tables are @@ -374,7 +375,7 @@ public class TestVerifyReplication extends TestReplicationBase { // with a peerTableName along with quorum address (a cluster key) String[] args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(), - UTIL2.getClusterKey(), tableName.getNameAsString() }; + UTIL2.getClusterKey(), tableName.getNameAsString() }; runVerifyReplication(args, NB_ROWS_IN_BATCH, 0); UTIL2.deleteTableData(peerTableName); @@ -391,23 +392,23 @@ public class TestVerifyReplication extends TestReplicationBase { FileSystem fs = rootDir.getFileSystem(CONF1); String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName, - Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); + Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); // Take target snapshot Path peerRootDir = CommonFSUtils.getRootDir(CONF2); FileSystem peerFs = peerRootDir.getFileSystem(CONF2); String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), peerTableName, - Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); + Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); String peerFSAddress = peerFs.getUri().toString(); String tmpPath1 = UTIL1.getRandomDir().toString(); String tmpPath2 = "/tmp" + EnvironmentEdgeManager.currentTime(); String[] args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(), - "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, + "--sourceSnapshotName=" + sourceSnapshotName, "--sourceSnapshotTmpDir=" + tmpPath1, + "--peerSnapshotName=" + peerSnapshotName, "--peerSnapshotTmpDir=" + tmpPath2, + "--peerFSAddress=" + peerFSAddress, "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), tableName.getNameAsString() }; runVerifyReplication(args, NB_ROWS_IN_BATCH, 0); @@ -421,7 +422,7 @@ public class TestVerifyReplication extends TestReplicationBase { put = new Put(result.getRow()); Cell firstVal = result.rawCells()[0]; put.addColumn(CellUtil.cloneFamily(firstVal), CellUtil.cloneQualifier(firstVal), - Bytes.toBytes("diff data")); + Bytes.toBytes("diff data")); htable3.put(put); } Delete delete = new Delete(put.getRow()); @@ -429,16 +430,16 @@ public class TestVerifyReplication extends TestReplicationBase { sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName, - Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); + Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), peerTableName, - Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); + Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(), - "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, + "--sourceSnapshotName=" + sourceSnapshotName, "--sourceSnapshotTmpDir=" + tmpPath1, + "--peerSnapshotName=" + peerSnapshotName, "--peerSnapshotTmpDir=" + tmpPath2, + "--peerFSAddress=" + peerFSAddress, "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), tableName.getNameAsString() }; runVerifyReplication(args, 0, NB_ROWS_IN_BATCH); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java index 51a0748b0d6..1a715392b27 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java @@ -70,7 +70,7 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyReplicationAdjunct.class); + HBaseClassTestRule.forClass(TestVerifyReplicationAdjunct.class); private static final Logger LOG = LoggerFactory.getLogger(TestVerifyReplicationAdjunct.class); @@ -90,9 +90,11 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase { @BeforeClass public static void setUpBeforeClass() throws Exception { TestReplicationBase.setUpBeforeClass(); - TableDescriptor peerTable = TableDescriptorBuilder.newBuilder(peerTableName).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100) - .build()).build(); + TableDescriptor peerTable = + TableDescriptorBuilder.newBuilder(peerTableName) + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100).build()) + .build(); Connection connection2 = ConnectionFactory.createConnection(CONF2); try (Admin admin2 = connection2.getAdmin()) { admin2.createTable(peerTable, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); @@ -243,21 +245,21 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase { loadData("zzz", row); waitForReplication(NB_ROWS_IN_BATCH * 4, NB_RETRIES * 4); String[] args = - new String[] { "--row-prefixes=prefixrow,secondrow", PEER_ID, tableName.getNameAsString() }; + new String[] { "--row-prefixes=prefixrow,secondrow", PEER_ID, tableName.getNameAsString() }; TestVerifyReplication.runVerifyReplication(args, NB_ROWS_IN_BATCH * 2, 0); } @Test public void testVerifyReplicationSnapshotArguments() { String[] args = - new String[] { "--sourceSnapshotName=snapshot1", "2", tableName.getNameAsString() }; + new String[] { "--sourceSnapshotName=snapshot1", "2", tableName.getNameAsString() }; assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); args = new String[] { "--sourceSnapshotTmpDir=tmp", "2", tableName.getNameAsString() }; assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); args = new String[] { "--sourceSnapshotName=snapshot1", "--sourceSnapshotTmpDir=tmp", "2", - tableName.getNameAsString() }; + tableName.getNameAsString() }; assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); args = new String[] { "--peerSnapshotName=snapshot1", "2", tableName.getNameAsString() }; @@ -267,13 +269,13 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase { assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); args = new String[] { "--peerSnapshotName=snapshot1", "--peerSnapshotTmpDir=/tmp/", - "--peerFSAddress=tempfs", "--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2", - tableName.getNameAsString() }; + "--peerFSAddress=tempfs", "--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2", + tableName.getNameAsString() }; assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); args = new String[] { "--sourceSnapshotName=snapshot1", "--sourceSnapshotTmpDir=/tmp/", - "--peerSnapshotName=snapshot2", "--peerSnapshotTmpDir=/tmp/", "--peerFSAddress=tempfs", - "--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2", tableName.getNameAsString() }; + "--peerSnapshotName=snapshot2", "--peerSnapshotTmpDir=/tmp/", "--peerFSAddress=tempfs", + "--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2", tableName.getNameAsString() }; assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java index 1609865d737..2d83eac7762 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import static org.junit.Assert.assertEquals; @@ -64,10 +63,10 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; public class TestVerifyReplicationCrossDiffHdfs { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyReplicationCrossDiffHdfs.class); + HBaseClassTestRule.forClass(TestVerifyReplicationCrossDiffHdfs.class); private static final Logger LOG = - LoggerFactory.getLogger(TestVerifyReplicationCrossDiffHdfs.class); + LoggerFactory.getLogger(TestVerifyReplicationCrossDiffHdfs.class); private static HBaseTestingUtility util1; private static HBaseTestingUtility util2; @@ -111,16 +110,16 @@ public class TestVerifyReplicationCrossDiffHdfs { private static void createTestingTable(Admin admin) throws IOException { TableDescriptor table = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(100) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(100) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .build(); admin.createTable(table); } private static void addTestingPeer() throws IOException { ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() - .setClusterKey(util2.getClusterKey()).setReplicateAllUserTables(false) - .setTableCFsMap(ImmutableMap.of(TABLE_NAME, ImmutableList.of())).build(); + .setClusterKey(util2.getClusterKey()).setReplicateAllUserTables(false) + .setTableCFsMap(ImmutableMap.of(TABLE_NAME, ImmutableList.of())).build(); util1.getAdmin().addReplicationPeer(PEER_ID, rpc); } @@ -139,7 +138,7 @@ public class TestVerifyReplicationCrossDiffHdfs { results = rs.next(numOfRows); if (results == null || results.length < numOfRows) { LOG.info("Retrying, wait until the peer received all the rows, currentRows:" - + (results == null ? 0 : results.length)); + + (results == null ? 0 : results.length)); Thread.sleep(100); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java index 7df9640b525..48da7a05d60 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +18,7 @@ package org.apache.hadoop.hbase.replication; import static org.junit.Assert.assertEquals; + import java.io.File; import java.io.IOException; import java.util.Arrays; @@ -96,8 +96,8 @@ public class TestVerifyReplicationSecureClusterCredentials { conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName() + ',' + TokenProvider.class.getName()); - HBaseKerberosUtils.setSecuredConfiguration(conf, - CLUSTER_PRINCIPAL + '@' + KDC.getRealm(), HTTP_PRINCIPAL + '@' + KDC.getRealm()); + HBaseKerberosUtils.setSecuredConfiguration(conf, CLUSTER_PRINCIPAL + '@' + KDC.getRealm(), + HTTP_PRINCIPAL + '@' + KDC.getRealm()); util.startMiniCluster(); } @@ -112,13 +112,14 @@ public class TestVerifyReplicationSecureClusterCredentials { setupCluster(UTIL2); try (Admin admin = UTIL1.getAdmin()) { - admin.addReplicationPeer("1", ReplicationPeerConfig.newBuilder() - .setClusterKey(ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration())) - .putConfiguration(HBaseKerberosUtils.KRB_PRINCIPAL, - UTIL2.getConfiguration().get(HBaseKerberosUtils.KRB_PRINCIPAL)) - .putConfiguration(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL, - UTIL2.getConfiguration().get(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL)) - .build()); + admin.addReplicationPeer("1", + ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration())) + .putConfiguration(HBaseKerberosUtils.KRB_PRINCIPAL, + UTIL2.getConfiguration().get(HBaseKerberosUtils.KRB_PRINCIPAL)) + .putConfiguration(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL, + UTIL2.getConfiguration().get(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL)) + .build()); } } @@ -130,10 +131,8 @@ public class TestVerifyReplicationSecureClusterCredentials { @Parameters public static Collection> peer() { - return Arrays.asList( - () -> "1", - () -> ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration()) - ); + return Arrays.asList(() -> "1", + () -> ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration())); } @Parameter @@ -143,11 +142,7 @@ public class TestVerifyReplicationSecureClusterCredentials { @SuppressWarnings("unchecked") public void testJobCredentials() throws Exception { Job job = new VerifyReplication().createSubmittableJob( - new Configuration(UTIL1.getConfiguration()), - new String[] { - peer.get(), - "table" - }); + new Configuration(UTIL1.getConfiguration()), new String[] { peer.get(), "table" }); Credentials credentials = job.getCredentials(); Collection> tokens = credentials.getAllTokens(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java index 3560ca4162b..4a1135b1b10 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java @@ -64,12 +64,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.Snapshot * Test Export Snapshot Tool */ @Ignore // HBASE-24493 -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestExportSnapshot { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestExportSnapshot.class); + HBaseClassTestRule.forClass(TestExportSnapshot.class); private static final Logger LOG = LoggerFactory.getLogger(TestExportSnapshot.class); @@ -189,32 +189,30 @@ public class TestExportSnapshot { } private void testExportFileSystemState(final TableName tableName, final byte[] snapshotName, - final byte[] targetName, int filesExpected) throws Exception { - testExportFileSystemState(tableName, snapshotName, targetName, - filesExpected, getHdfsDestinationDir(), false); + final byte[] targetName, int filesExpected) throws Exception { + testExportFileSystemState(tableName, snapshotName, targetName, filesExpected, + getHdfsDestinationDir(), false); } - protected void testExportFileSystemState(final TableName tableName, - final byte[] snapshotName, final byte[] targetName, int filesExpected, - Path copyDir, boolean overwrite) throws Exception { + protected void testExportFileSystemState(final TableName tableName, final byte[] snapshotName, + final byte[] targetName, int filesExpected, Path copyDir, boolean overwrite) throws Exception { testExportFileSystemState(TEST_UTIL.getConfiguration(), tableName, snapshotName, targetName, - filesExpected, TEST_UTIL.getDefaultRootDirPath(), copyDir, - overwrite, getBypassRegionPredicate(), true); + filesExpected, TEST_UTIL.getDefaultRootDirPath(), copyDir, overwrite, + getBypassRegionPredicate(), true); } /** * Creates destination directory, runs ExportSnapshot() tool, and runs some verifications. */ protected static void testExportFileSystemState(final Configuration conf, - final TableName tableName, - final byte[] snapshotName, final byte[] targetName, final int filesExpected, - final Path srcDir, Path rawTgtDir, final boolean overwrite, - final RegionPredicate bypassregionPredicate, boolean success) throws Exception { + final TableName tableName, final byte[] snapshotName, final byte[] targetName, + final int filesExpected, final Path srcDir, Path rawTgtDir, final boolean overwrite, + final RegionPredicate bypassregionPredicate, boolean success) throws Exception { FileSystem tgtFs = rawTgtDir.getFileSystem(conf); FileSystem srcFs = srcDir.getFileSystem(conf); Path tgtDir = rawTgtDir.makeQualified(tgtFs.getUri(), tgtFs.getWorkingDirectory()); - LOG.info("tgtFsUri={}, tgtDir={}, rawTgtDir={}, srcFsUri={}, srcDir={}", - tgtFs.getUri(), tgtDir, rawTgtDir, srcFs.getUri(), srcDir); + LOG.info("tgtFsUri={}, tgtDir={}, rawTgtDir={}, srcFsUri={}, srcDir={}", tgtFs.getUri(), tgtDir, + rawTgtDir, srcFs.getUri(), srcDir); List opts = new ArrayList<>(); opts.add("--snapshot"); opts.add(Bytes.toString(snapshotName)); @@ -242,11 +240,11 @@ public class TestExportSnapshot { // Verify File-System state FileStatus[] rootFiles = tgtFs.listStatus(tgtDir); assertEquals(filesExpected > 0 ? 2 : 1, rootFiles.length); - for (FileStatus fileStatus: rootFiles) { + for (FileStatus fileStatus : rootFiles) { String name = fileStatus.getPath().getName(); assertTrue(fileStatus.toString(), fileStatus.isDirectory()); - assertTrue(name.toString(), name.equals(HConstants.SNAPSHOT_DIR_NAME) || - name.equals(HConstants.HFILE_ARCHIVE_DIRECTORY)); + assertTrue(name.toString(), name.equals(HConstants.SNAPSHOT_DIR_NAME) + || name.equals(HConstants.HFILE_ARCHIVE_DIRECTORY)); } LOG.info("Verified filesystem state"); @@ -263,7 +261,7 @@ public class TestExportSnapshot { * verify if the snapshot folder on file-system 1 match the one on file-system 2 */ protected static void verifySnapshotDir(final FileSystem fs1, final Path root1, - final FileSystem fs2, final Path root2) throws IOException { + final FileSystem fs2, final Path root2) throws IOException { assertEquals(listFiles(fs1, root1, root1), listFiles(fs2, root2, root2)); } @@ -271,17 +269,17 @@ public class TestExportSnapshot { * Verify if the files exists */ protected static Set verifySnapshot(final Configuration conf, final FileSystem fs, - final Path rootDir, final TableName tableName, final String snapshotName, - final RegionPredicate bypassregionPredicate) throws IOException { - final Path exportedSnapshot = new Path(rootDir, - new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName)); + final Path rootDir, final TableName tableName, final String snapshotName, + final RegionPredicate bypassregionPredicate) throws IOException { + final Path exportedSnapshot = + new Path(rootDir, new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName)); final Set snapshotFiles = new HashSet<>(); final Path exportedArchive = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY); SnapshotReferenceUtil.visitReferencedFiles(conf, fs, exportedSnapshot, - new SnapshotReferenceUtil.SnapshotVisitor() { + new SnapshotReferenceUtil.SnapshotVisitor() { @Override public void storeFile(final RegionInfo regionInfo, final String family, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { if (bypassregionPredicate != null && bypassregionPredicate.evaluate(regionInfo)) { return; } @@ -291,7 +289,7 @@ public class TestExportSnapshot { if (!storeFile.hasReference()) { verifyNonEmptyFile(new Path(exportedArchive, new Path(CommonFSUtils.getTableDir(new Path("./"), tableName), - new Path(regionInfo.getEncodedName(), new Path(family, hfile))))); + new Path(regionInfo.getEncodedName(), new Path(family, hfile))))); } } @@ -309,13 +307,13 @@ public class TestExportSnapshot { } private static Set listFiles(final FileSystem fs, final Path root, final Path dir) - throws IOException { + throws IOException { Set files = new HashSet<>(); LOG.debug("List files in {} in root {} at {}", fs, root, dir); int rootPrefix = root.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString().length(); FileStatus[] list = CommonFSUtils.listStatus(fs, dir); if (list != null) { - for (FileStatus fstat: list) { + for (FileStatus fstat : list) { LOG.debug(Objects.toString(fstat.getPath())); if (fstat.isDirectory()) { files.addAll(listFiles(fs, root, fstat.getPath())); @@ -329,8 +327,8 @@ public class TestExportSnapshot { private Path getHdfsDestinationDir() { Path rootDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); - Path path = new Path(new Path(rootDir, "export-test"), "export-" + - EnvironmentEdgeManager.currentTime()); + Path path = + new Path(new Path(rootDir, "export-test"), "export-" + EnvironmentEdgeManager.currentTime()); LOG.info("HDFS export destination path: " + path); return path; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java index 7238433d4db..55fdbfeec64 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.snapshot; import static org.junit.Assert.assertFalse; + import java.util.Iterator; import java.util.Map; import org.apache.hadoop.conf.Configuration; @@ -44,19 +45,19 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Tests that are adjunct to {@link TestExportSnapshot}. They used to be in same test suite but - * the test suite ran too close to the maximum time limit so we split these out. Uses - * facility from TestExportSnapshot where possible. + * Tests that are adjunct to {@link TestExportSnapshot}. They used to be in same test suite but the + * test suite ran too close to the maximum time limit so we split these out. Uses facility from + * TestExportSnapshot where possible. * @see TestExportSnapshot */ @Ignore // HBASE-24493 -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestExportSnapshotAdjunct { private static final Logger LOG = LoggerFactory.getLogger(TestExportSnapshotAdjunct.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestExportSnapshotAdjunct.class); + HBaseClassTestRule.forClass(TestExportSnapshotAdjunct.class); @Rule public final TestName testName = new TestName(); @@ -76,12 +77,11 @@ public class TestExportSnapshotAdjunct { } /** - * Check for references to '/tmp'. We are trying to avoid having references to outside of the - * test data dir when running tests. References outside of the test dir makes it so concurrent - * tests can stamp on each other by mistake. This check is for references to the 'tmp'. - * - * This is a strange place for this test but I want somewhere where the configuration is - * full -- filed w/ hdfs and mapreduce configurations. + * Check for references to '/tmp'. We are trying to avoid having references to outside of the test + * data dir when running tests. References outside of the test dir makes it so concurrent tests + * can stamp on each other by mistake. This check is for references to the 'tmp'. This is a + * strange place for this test but I want somewhere where the configuration is full -- filed w/ + * hdfs and mapreduce configurations. */ private void checkForReferencesToTmpDir() { Configuration conf = TEST_UTIL.getConfiguration(); @@ -127,8 +127,7 @@ public class TestExportSnapshotAdjunct { admin.snapshot(emptySnapshotName, tableName); // Add some rows - SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, - TestExportSnapshot.FAMILY); + SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, TestExportSnapshot.FAMILY); tableNumFiles = admin.getRegions(tableName).size(); // take a snapshot @@ -152,9 +151,8 @@ public class TestExportSnapshotAdjunct { conf.setBoolean(ExportSnapshot.Testing.CONF_TEST_FAILURE, true); conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 2); conf.setInt("mapreduce.map.maxattempts", 3); - TestExportSnapshot.testExportFileSystemState(conf, tableName, - Bytes.toBytes(snapshotName), Bytes.toBytes(snapshotName), - tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, + TestExportSnapshot.testExportFileSystemState(conf, tableName, Bytes.toBytes(snapshotName), + Bytes.toBytes(snapshotName), tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, null, true); } @@ -170,8 +168,8 @@ public class TestExportSnapshotAdjunct { conf.setBoolean(ExportSnapshot.Testing.CONF_TEST_FAILURE, true); conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 4); conf.setInt("mapreduce.map.maxattempts", 3); - TestExportSnapshot.testExportFileSystemState(conf, tableName, - Bytes.toBytes(snapshotName), Bytes.toBytes(snapshotName), - tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, null, false); + TestExportSnapshot.testExportFileSystemState(conf, tableName, Bytes.toBytes(snapshotName), + Bytes.toBytes(snapshotName), tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, + null, false); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java index d104d830985..71402d0989d 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,57 +34,52 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.Snapshot /** * Test Export Snapshot Tool helpers */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestExportSnapshotHelpers { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestExportSnapshotHelpers.class); + HBaseClassTestRule.forClass(TestExportSnapshotHelpers.class); /** - * Verfy the result of getBalanceSplits() method. - * The result are groups of files, used as input list for the "export" mappers. - * All the groups should have similar amount of data. - * - * The input list is a pair of file path and length. - * The getBalanceSplits() function sort it by length, - * and assign to each group a file, going back and forth through the groups. + * Verfy the result of getBalanceSplits() method. The result are groups of files, used as input + * list for the "export" mappers. All the groups should have similar amount of data. The input + * list is a pair of file path and length. The getBalanceSplits() function sort it by length, and + * assign to each group a file, going back and forth through the groups. */ @Test public void testBalanceSplit() throws Exception { // Create a list of files List> files = new ArrayList<>(21); for (long i = 0; i <= 20; i++) { - SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder() - .setType(SnapshotFileInfo.Type.HFILE) - .setHfile("file-" + i) - .build(); + SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder().setType(SnapshotFileInfo.Type.HFILE) + .setHfile("file-" + i).build(); files.add(new Pair<>(fileInfo, i)); } // Create 5 groups (total size 210) - // group 0: 20, 11, 10, 1 (total size: 42) - // group 1: 19, 12, 9, 2 (total size: 42) - // group 2: 18, 13, 8, 3 (total size: 42) - // group 3: 17, 12, 7, 4 (total size: 42) - // group 4: 16, 11, 6, 5 (total size: 42) + // group 0: 20, 11, 10, 1 (total size: 42) + // group 1: 19, 12, 9, 2 (total size: 42) + // group 2: 18, 13, 8, 3 (total size: 42) + // group 3: 17, 12, 7, 4 (total size: 42) + // group 4: 16, 11, 6, 5 (total size: 42) List>> splits = ExportSnapshot.getBalancedSplits(files, 5); assertEquals(5, splits.size()); - String[] split0 = new String[] {"file-20", "file-11", "file-10", "file-1", "file-0"}; + String[] split0 = new String[] { "file-20", "file-11", "file-10", "file-1", "file-0" }; verifyBalanceSplit(splits.get(0), split0, 42); - String[] split1 = new String[] {"file-19", "file-12", "file-9", "file-2"}; + String[] split1 = new String[] { "file-19", "file-12", "file-9", "file-2" }; verifyBalanceSplit(splits.get(1), split1, 42); - String[] split2 = new String[] {"file-18", "file-13", "file-8", "file-3"}; + String[] split2 = new String[] { "file-18", "file-13", "file-8", "file-3" }; verifyBalanceSplit(splits.get(2), split2, 42); - String[] split3 = new String[] {"file-17", "file-14", "file-7", "file-4"}; + String[] split3 = new String[] { "file-17", "file-14", "file-7", "file-4" }; verifyBalanceSplit(splits.get(3), split3, 42); - String[] split4 = new String[] {"file-16", "file-15", "file-6", "file-5"}; + String[] split4 = new String[] { "file-16", "file-15", "file-6", "file-5" }; verifyBalanceSplit(splits.get(4), split4, 42); } private void verifyBalanceSplit(final List> split, - final String[] expected, final long expectedSize) { + final String[] expected, final long expectedSize) { assertEquals(expected.length, split.size()); long totalSize = 0; for (int i = 0; i < expected.length; ++i) { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java index 2ab9f11adef..60fb387f8cc 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java @@ -41,16 +41,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Test Export Snapshot Tool - * Tests V1 snapshots only. Used to ALSO test v2 but strange failure so separate the tests. - * See companion file for test of v2 snapshot. + * Test Export Snapshot Tool Tests V1 snapshots only. Used to ALSO test v2 but strange failure so + * separate the tests. See companion file for test of v2 snapshot. * @see TestExportSnapshotV2NoCluster */ -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestExportSnapshotV1NoCluster { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestExportSnapshotV1NoCluster.class); + HBaseClassTestRule.forClass(TestExportSnapshotV1NoCluster.class); private static final Logger LOG = LoggerFactory.getLogger(TestExportSnapshotV1NoCluster.class); private HBaseCommonTestingUtility testUtil = new HBaseCommonTestingUtility(); @@ -88,36 +87,36 @@ public class TestExportSnapshotV1NoCluster { */ @Test public void testSnapshotWithRefsExportFileSystemState() throws Exception { - final SnapshotMock snapshotMock = new SnapshotMock(testUtil.getConfiguration(), - this.fs, testDir); - final SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV1("tableWithRefsV1", - "tableWithRefsV1"); + final SnapshotMock snapshotMock = + new SnapshotMock(testUtil.getConfiguration(), this.fs, testDir); + final SnapshotMock.SnapshotBuilder builder = + snapshotMock.createSnapshotV1("tableWithRefsV1", "tableWithRefsV1"); testSnapshotWithRefsExportFileSystemState(this.fs, builder, testUtil, testDir); } /** - * Generates a couple of regions for the specified SnapshotMock, - * and then it will run the export and verification. + * Generates a couple of regions for the specified SnapshotMock, and then it will run the export + * and verification. */ static void testSnapshotWithRefsExportFileSystemState(FileSystem fs, - SnapshotMock.SnapshotBuilder builder, HBaseCommonTestingUtility testUtil, Path testDir) - throws Exception { + SnapshotMock.SnapshotBuilder builder, HBaseCommonTestingUtility testUtil, Path testDir) + throws Exception { Path[] r1Files = builder.addRegion(); Path[] r2Files = builder.addRegion(); builder.commit(); int snapshotFilesCount = r1Files.length + r2Files.length; byte[] snapshotName = Bytes.toBytes(builder.getSnapshotDescription().getName()); TableName tableName = builder.getTableDescriptor().getTableName(); - TestExportSnapshot.testExportFileSystemState(testUtil.getConfiguration(), - tableName, snapshotName, snapshotName, snapshotFilesCount, - testDir, getDestinationDir(fs, testUtil, testDir), false, null, true); + TestExportSnapshot.testExportFileSystemState(testUtil.getConfiguration(), tableName, + snapshotName, snapshotName, snapshotFilesCount, testDir, + getDestinationDir(fs, testUtil, testDir), false, null, true); } static Path getDestinationDir(FileSystem fs, HBaseCommonTestingUtility hctu, Path testDir) - throws IOException { - Path path = new Path(new Path(testDir, "export-test"), - "export-" + EnvironmentEdgeManager.currentTime()).makeQualified(fs.getUri(), - fs.getWorkingDirectory()); + throws IOException { + Path path = + new Path(new Path(testDir, "export-test"), "export-" + EnvironmentEdgeManager.currentTime()) + .makeQualified(fs.getUri(), fs.getWorkingDirectory()); LOG.info("Export destination={}, fs={}, fsurl={}, fswd={}, testDir={}", path, fs, fs.getUri(), fs.getWorkingDirectory(), testDir); return path; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java index 4cd1dfdfb71..70c598d520f 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java @@ -16,7 +16,9 @@ * limitations under the License. */ package org.apache.hadoop.hbase.snapshot; + import static org.junit.Assert.assertTrue; + import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; @@ -36,11 +38,11 @@ import org.slf4j.LoggerFactory; * Test Export Snapshot Tool; tests v2 snapshots. * @see TestExportSnapshotV1NoCluster */ -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestExportSnapshotV2NoCluster { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestExportSnapshotV2NoCluster.class); + HBaseClassTestRule.forClass(TestExportSnapshotV2NoCluster.class); private static final Logger LOG = LoggerFactory.getLogger(TestExportSnapshotV2NoCluster.class); @@ -61,8 +63,8 @@ public class TestExportSnapshotV2NoCluster { public void testSnapshotWithRefsExportFileSystemState() throws Exception { final SnapshotMock snapshotMock = new SnapshotMock(testUtil.getConfiguration(), testDir.getFileSystem(testUtil.getConfiguration()), testDir); - final SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2("tableWithRefsV2", - "tableWithRefsV2"); + final SnapshotMock.SnapshotBuilder builder = + snapshotMock.createSnapshotV2("tableWithRefsV2", "tableWithRefsV2"); TestExportSnapshotV1NoCluster.testSnapshotWithRefsExportFileSystemState(this.fs, builder, this.testUtil, this.testDir); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java index 5560555e9f3..fe380e683db 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

      - * http://www.apache.org/licenses/LICENSE-2.0 - *

      + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -31,12 +31,12 @@ import org.junit.Ignore; import org.junit.experimental.categories.Category; @Ignore // HBASE-24493 -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestExportSnapshotWithTemporaryDirectory extends TestExportSnapshot { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestExportSnapshotWithTemporaryDirectory.class); + HBaseClassTestRule.forClass(TestExportSnapshotWithTemporaryDirectory.class); @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -54,8 +54,8 @@ public class TestExportSnapshotWithTemporaryDirectory extends TestExportSnapshot Path tmpDir = null; try { FileSystem localFs = FileSystem.getLocal(conf); - tmpDir = TEST_UTIL.getDataTestDir(UUID.randomUUID().toString()). - makeQualified(localFs.getUri(), localFs.getWorkingDirectory()); + tmpDir = TEST_UTIL.getDataTestDir(UUID.randomUUID().toString()) + .makeQualified(localFs.getUri(), localFs.getWorkingDirectory()); } catch (IOException ioe) { throw new RuntimeException(ioe); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java index 4f0d3deebe2..4943b40d6a7 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,12 +34,12 @@ import org.junit.experimental.categories.Category; * Test Export Snapshot Tool */ @Ignore // HBASE-24493 -@Category({VerySlowRegionServerTests.class, LargeTests.class}) +@Category({ VerySlowRegionServerTests.class, LargeTests.class }) public class TestMobExportSnapshot extends TestExportSnapshot { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobExportSnapshot.class); + HBaseClassTestRule.forClass(TestMobExportSnapshot.class); public static void setUpBaseConf(Configuration conf) { TestExportSnapshot.setUpBaseConf(conf); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java index 484f88afecf..2fa686f768f 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,12 +31,12 @@ import org.junit.experimental.categories.Category; /** * Reruns TestMobExportSnapshot using MobExportSnapshot in secure mode. */ -@Category({VerySlowRegionServerTests.class, LargeTests.class}) +@Category({ VerySlowRegionServerTests.class, LargeTests.class }) public class TestMobSecureExportSnapshot extends TestMobExportSnapshot { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobSecureExportSnapshot.class); + HBaseClassTestRule.forClass(TestMobSecureExportSnapshot.class); @BeforeClass public static void setUpBeforeClass() throws Exception { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java index ce1c4cb39a0..a2a588ac572 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,12 +31,12 @@ import org.junit.experimental.categories.Category; /** * Reruns TestExportSnapshot using ExportSnapshot in secure mode. */ -@Category({VerySlowRegionServerTests.class, LargeTests.class}) +@Category({ VerySlowRegionServerTests.class, LargeTests.class }) public class TestSecureExportSnapshot extends TestExportSnapshot { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSecureExportSnapshot.class); + HBaseClassTestRule.forClass(TestSecureExportSnapshot.class); @BeforeClass public static void setUpBeforeClass() throws Exception { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java index 5e61a4b1268..314ee93bff5 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.util; @@ -68,8 +69,8 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException; /** * A command-line utility that reads, writes, and verifies data. Unlike - * {@link org.apache.hadoop.hbase.PerformanceEvaluation}, this tool validates the data written, - * and supports simultaneously writing and reading the same set of keys. + * {@link org.apache.hadoop.hbase.PerformanceEvaluation}, this tool validates the data written, and + * supports simultaneously writing and reading the same set of keys. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class LoadTestTool extends AbstractHBaseTool { @@ -94,23 +95,21 @@ public class LoadTestTool extends AbstractHBaseTool { /** Usage string for the load option */ protected static final String OPT_USAGE_LOAD = - ":" + - "[:<#threads=" + DEFAULT_NUM_THREADS + ">]"; + ":" + "[:<#threads=" + DEFAULT_NUM_THREADS + ">]"; /** Usage string for the read option */ protected static final String OPT_USAGE_READ = - "[:<#threads=" + DEFAULT_NUM_THREADS + ">]"; + "[:<#threads=" + DEFAULT_NUM_THREADS + ">]"; /** Usage string for the update option */ - protected static final String OPT_USAGE_UPDATE = - "[:<#threads=" + DEFAULT_NUM_THREADS - + ">][:<#whether to ignore nonce collisions=0>]"; + protected static final String OPT_USAGE_UPDATE = "[:<#threads=" + + DEFAULT_NUM_THREADS + ">][:<#whether to ignore nonce collisions=0>]"; - protected static final String OPT_USAGE_BLOOM = "Bloom filter type, one of " + - Arrays.toString(BloomType.values()); + protected static final String OPT_USAGE_BLOOM = + "Bloom filter type, one of " + Arrays.toString(BloomType.values()); - protected static final String OPT_USAGE_COMPRESSION = "Compression type, " + - "one of " + Arrays.toString(Compression.Algorithm.values()); + protected static final String OPT_USAGE_COMPRESSION = + "Compression type, " + "one of " + Arrays.toString(Compression.Algorithm.values()); protected static final String OPT_VERBOSE = "verbose"; @@ -121,12 +120,12 @@ public class LoadTestTool extends AbstractHBaseTool { public static final String OPT_DEFERRED_LOG_FLUSH_USAGE = "Enable deferred log flush."; public static final String OPT_INMEMORY = "in_memory"; - public static final String OPT_USAGE_IN_MEMORY = "Tries to keep the HFiles of the CF " + - "inmemory as far as possible. Not guaranteed that reads are always served from inmemory"; + public static final String OPT_USAGE_IN_MEMORY = "Tries to keep the HFiles of the CF " + + "inmemory as far as possible. Not guaranteed that reads are always served from inmemory"; public static final String OPT_GENERATOR = "generator"; public static final String OPT_GENERATOR_USAGE = "The class which generates load for the tool." - + " Any args for this class can be passed as colon separated after class name"; + + " Any args for this class can be passed as colon separated after class name"; public static final String OPT_WRITER = "writer"; public static final String OPT_WRITER_USAGE = "The class for executing the write requests"; @@ -157,25 +156,25 @@ public class LoadTestTool extends AbstractHBaseTool { public static final String OPT_ENCRYPTION = "encryption"; protected static final String OPT_ENCRYPTION_USAGE = - "Enables transparent encryption on the test table, one of " + - Arrays.toString(Encryption.getSupportedCiphers()); + "Enables transparent encryption on the test table, one of " + + Arrays.toString(Encryption.getSupportedCiphers()); public static final String OPT_NUM_REGIONS_PER_SERVER = "num_regions_per_server"; - protected static final String OPT_NUM_REGIONS_PER_SERVER_USAGE - = "Desired number of regions per region server. Defaults to 5."; + protected static final String OPT_NUM_REGIONS_PER_SERVER_USAGE = + "Desired number of regions per region server. Defaults to 5."; public static int DEFAULT_NUM_REGIONS_PER_SERVER = 5; public static final String OPT_REGION_REPLICATION = "region_replication"; protected static final String OPT_REGION_REPLICATION_USAGE = - "Desired number of replicas per region"; + "Desired number of replicas per region"; public static final String OPT_REGION_REPLICA_ID = "region_replica_id"; protected static final String OPT_REGION_REPLICA_ID_USAGE = - "Region replica id to do the reads from"; + "Region replica id to do the reads from"; public static final String OPT_MOB_THRESHOLD = "mob_threshold"; protected static final String OPT_MOB_THRESHOLD_USAGE = - "Desired cell size to exceed in bytes that will use the MOB write path"; + "Desired cell size to exceed in bytes that will use the MOB write path"; protected static final long DEFAULT_START_KEY = 0; @@ -222,7 +221,7 @@ public class LoadTestTool extends AbstractHBaseTool { private String superUser; private String userNames; - //This file is used to read authentication information in secure clusters. + // This file is used to read authentication information in secure clusters. private String authnFileName; private int numRegionsPerServer = DEFAULT_NUM_REGIONS_PER_SERVER; @@ -232,21 +231,19 @@ public class LoadTestTool extends AbstractHBaseTool { private int mobThreshold = -1; // not set // TODO: refactor LoadTestToolImpl somewhere to make the usage from tests less bad, - // console tool itself should only be used from console. + // console tool itself should only be used from console. protected boolean isSkipInit = false; protected boolean isInitOnly = false; protected Cipher cipher = null; - protected String[] splitColonSeparated(String option, - int minNumCols, int maxNumCols) { + protected String[] splitColonSeparated(String option, int minNumCols, int maxNumCols) { String optVal = cmd.getOptionValue(option); String[] cols = optVal.split(COLON); if (cols.length < minNumCols || cols.length > maxNumCols) { - throw new IllegalArgumentException("Expected at least " - + minNumCols + " columns but no more than " + maxNumCols + - " in the colon-separated value '" + optVal + "' of the " + - "-" + option + " option"); + throw new IllegalArgumentException( + "Expected at least " + minNumCols + " columns but no more than " + maxNumCols + + " in the colon-separated value '" + optVal + "' of the " + "-" + option + " option"); } return cols; } @@ -260,22 +257,21 @@ public class LoadTestTool extends AbstractHBaseTool { } /** - * Apply column family options such as Bloom filters, compression, and data - * block encoding. + * Apply column family options such as Bloom filters, compression, and data block encoding. */ - protected void applyColumnFamilyOptions(TableName tableName, - byte[][] columnFamilies) throws IOException { + protected void applyColumnFamilyOptions(TableName tableName, byte[][] columnFamilies) + throws IOException { try (Connection conn = ConnectionFactory.createConnection(conf); - Admin admin = conn.getAdmin()) { + Admin admin = conn.getAdmin()) { TableDescriptor tableDesc = admin.getDescriptor(tableName); LOG.info("Disabling table " + tableName); admin.disableTable(tableName); for (byte[] cf : columnFamilies) { ColumnFamilyDescriptor columnDesc = tableDesc.getColumnFamily(cf); boolean isNewCf = columnDesc == null; - ColumnFamilyDescriptorBuilder columnDescBuilder = isNewCf ? - ColumnFamilyDescriptorBuilder.newBuilder(cf) : - ColumnFamilyDescriptorBuilder.newBuilder(columnDesc); + ColumnFamilyDescriptorBuilder columnDescBuilder = isNewCf + ? ColumnFamilyDescriptorBuilder.newBuilder(cf) + : ColumnFamilyDescriptorBuilder.newBuilder(columnDesc); if (bloomType != null) { columnDescBuilder.setBloomFilterType(bloomType); } @@ -292,11 +288,8 @@ public class LoadTestTool extends AbstractHBaseTool { byte[] keyBytes = new byte[cipher.getKeyLength()]; Bytes.secureRandom(keyBytes); columnDescBuilder.setEncryptionType(cipher.getName()); - columnDescBuilder.setEncryptionKey( - EncryptionUtil.wrapKey(conf, - User.getCurrent().getShortName(), - new SecretKeySpec(keyBytes, - cipher.getName()))); + columnDescBuilder.setEncryptionKey(EncryptionUtil.wrapKey(conf, + User.getCurrent().getShortName(), new SecretKeySpec(keyBytes, cipher.getName()))); } if (mobThreshold >= 0) { columnDescBuilder.setMobEnabled(true); @@ -317,8 +310,8 @@ public class LoadTestTool extends AbstractHBaseTool { @Override protected void addOptions() { addOptNoArg("v", OPT_VERBOSE, "Will display a full readout of logs, including ZooKeeper"); - addOptWithArg(OPT_ZK_QUORUM, "ZK quorum as comma-separated host names " + - "without port numbers"); + addOptWithArg(OPT_ZK_QUORUM, + "ZK quorum as comma-separated host names " + "without port numbers"); addOptWithArg(OPT_ZK_PARENT_NODE, "name of parent znode in zookeeper"); addOptWithArg(OPT_TABLE_NAME, "The name of the table to read or write"); addOptWithArg(OPT_COLUMN_FAMILIES, "The name of the column families to use separated by comma"); @@ -329,20 +322,23 @@ public class LoadTestTool extends AbstractHBaseTool { addOptWithArg(OPT_BLOOM, OPT_USAGE_BLOOM); addOptWithArg(OPT_BLOOM_PARAM, "the parameter of bloom filter type"); addOptWithArg(OPT_COMPRESSION, OPT_USAGE_COMPRESSION); - addOptWithArg(HFileTestUtil.OPT_DATA_BLOCK_ENCODING, HFileTestUtil.OPT_DATA_BLOCK_ENCODING_USAGE); - addOptWithArg(OPT_MAX_READ_ERRORS, "The maximum number of read errors " + - "to tolerate before terminating all reader threads. The default is " + - MultiThreadedReader.DEFAULT_MAX_ERRORS + "."); - addOptWithArg(OPT_MULTIGET, "Whether to use multi-gets as opposed to " + - "separate gets for every column in a row"); - addOptWithArg(OPT_KEY_WINDOW, "The 'key window' to maintain between " + - "reads and writes for concurrent write/read workload. The default " + - "is " + MultiThreadedReader.DEFAULT_KEY_WINDOW + "."); + addOptWithArg(HFileTestUtil.OPT_DATA_BLOCK_ENCODING, + HFileTestUtil.OPT_DATA_BLOCK_ENCODING_USAGE); + addOptWithArg(OPT_MAX_READ_ERRORS, + "The maximum number of read errors " + + "to tolerate before terminating all reader threads. The default is " + + MultiThreadedReader.DEFAULT_MAX_ERRORS + "."); + addOptWithArg(OPT_MULTIGET, + "Whether to use multi-gets as opposed to " + "separate gets for every column in a row"); + addOptWithArg(OPT_KEY_WINDOW, + "The 'key window' to maintain between " + + "reads and writes for concurrent write/read workload. The default " + "is " + + MultiThreadedReader.DEFAULT_KEY_WINDOW + "."); - addOptNoArg(OPT_MULTIPUT, "Whether to use multi-puts as opposed to " + - "separate puts for every column in a row"); - addOptNoArg(OPT_BATCHUPDATE, "Whether to use batch as opposed to " + - "separate updates for every column in a row"); + addOptNoArg(OPT_MULTIPUT, + "Whether to use multi-puts as opposed to " + "separate puts for every column in a row"); + addOptNoArg(OPT_BATCHUPDATE, + "Whether to use batch as opposed to " + "separate updates for every column in a row"); addOptNoArg(OPT_INMEMORY, OPT_USAGE_IN_MEMORY); addOptWithArg(OPT_GENERATOR, OPT_GENERATOR_USAGE); addOptWithArg(OPT_WRITER, OPT_WRITER_USAGE); @@ -350,16 +346,14 @@ public class LoadTestTool extends AbstractHBaseTool { addOptWithArg(OPT_READER, OPT_READER_USAGE); addOptWithArg(OPT_NUM_KEYS, "The number of keys to read/write"); - addOptWithArg(OPT_START_KEY, "The first key to read/write " + - "(a 0-based index). The default value is " + - DEFAULT_START_KEY + "."); - addOptNoArg(OPT_SKIP_INIT, "Skip the initialization; assume test table " - + "already exists"); + addOptWithArg(OPT_START_KEY, "The first key to read/write " + + "(a 0-based index). The default value is " + DEFAULT_START_KEY + "."); + addOptNoArg(OPT_SKIP_INIT, "Skip the initialization; assume test table " + "already exists"); addOptWithArg(NUM_TABLES, "A positive integer number. When a number n is specified, load test " - + "tool will load n table parallely. -tn parameter value becomes " - + "table name prefix. Each table name is in format _1..._n"); + + "tool will load n table parallely. -tn parameter value becomes " + + "table name prefix. Each table name is in format _1..._n"); addOptWithArg(OPT_ENCRYPTION, OPT_ENCRYPTION_USAGE); addOptNoArg(OPT_DEFERRED_LOG_FLUSH, OPT_DEFERRED_LOG_FLUSH_USAGE); @@ -376,22 +370,21 @@ public class LoadTestTool extends AbstractHBaseTool { return new DefaultParser() { @Override public CommandLine parse(Options opts, String[] args, Properties props, boolean stop) - throws ParseException { + throws ParseException { CommandLine cl = super.parse(opts, args, props, stop); - boolean isReadWriteUpdate = cmd.hasOption(OPT_READ) - || cmd.hasOption(OPT_WRITE) - || cmd.hasOption(OPT_UPDATE); + boolean isReadWriteUpdate = + cmd.hasOption(OPT_READ) || cmd.hasOption(OPT_WRITE) || cmd.hasOption(OPT_UPDATE); boolean isInitOnly = cmd.hasOption(OPT_INIT_ONLY); if (!isInitOnly && !isReadWriteUpdate) { throw new MissingOptionException("Must specify either -" + OPT_INIT_ONLY - + " or at least one of -" + OPT_READ + ", -" + OPT_WRITE + ", -" + OPT_UPDATE); + + " or at least one of -" + OPT_READ + ", -" + OPT_WRITE + ", -" + OPT_UPDATE); } if (isInitOnly && isReadWriteUpdate) { throw new AlreadySelectedException(OPT_INIT_ONLY + " cannot be specified with any of -" - + OPT_READ + ", -" + OPT_WRITE + ", -" + OPT_UPDATE); + + OPT_READ + ", -" + OPT_WRITE + ", -" + OPT_UPDATE); } if (isReadWriteUpdate && !cmd.hasOption(OPT_NUM_KEYS)) { @@ -407,8 +400,7 @@ public class LoadTestTool extends AbstractHBaseTool { protected void processOptions(CommandLine cmd) { this.cmd = cmd; - tableName = TableName.valueOf(cmd.getOptionValue(OPT_TABLE_NAME, - DEFAULT_TABLE_NAME)); + tableName = TableName.valueOf(cmd.getOptionValue(OPT_TABLE_NAME, DEFAULT_TABLE_NAME)); if (cmd.hasOption(OPT_COLUMN_FAMILIES)) { String[] list = cmd.getOptionValue(OPT_COLUMN_FAMILIES).split(","); @@ -428,10 +420,9 @@ public class LoadTestTool extends AbstractHBaseTool { deferredLogFlush = cmd.hasOption(OPT_DEFERRED_LOG_FLUSH); if (!isInitOnly) { - startKey = parseLong(cmd.getOptionValue(OPT_START_KEY, - String.valueOf(DEFAULT_START_KEY)), 0, Long.MAX_VALUE); - long numKeys = parseLong(cmd.getOptionValue(OPT_NUM_KEYS), 1, - Long.MAX_VALUE - startKey); + startKey = parseLong(cmd.getOptionValue(OPT_START_KEY, String.valueOf(DEFAULT_START_KEY)), 0, + Long.MAX_VALUE); + long numKeys = parseLong(cmd.getOptionValue(OPT_NUM_KEYS), 1, Long.MAX_VALUE - startKey); endKey = startKey + numKeys; isSkipInit = cmd.hasOption(OPT_SKIP_INIT); System.out.println("Key range: [" + startKey + ".." + (endKey - 1) + "]"); @@ -445,8 +436,7 @@ public class LoadTestTool extends AbstractHBaseTool { int colIndex = 0; minColsPerKey = 1; maxColsPerKey = 2 * Integer.parseInt(writeOpts[colIndex++]); - int avgColDataSize = - parseInt(writeOpts[colIndex++], 1, Integer.MAX_VALUE); + int avgColDataSize = parseInt(writeOpts[colIndex++], 1, Integer.MAX_VALUE); minColDataSize = avgColDataSize / 2; maxColDataSize = avgColDataSize * 3 / 2; @@ -462,10 +452,8 @@ public class LoadTestTool extends AbstractHBaseTool { } System.out.println("Multi-puts: " + isMultiPut); - System.out.println("Columns per key: " + minColsPerKey + ".." - + maxColsPerKey); - System.out.println("Data size per column: " + minColDataSize + ".." - + maxColDataSize); + System.out.println("Columns per key: " + minColsPerKey + ".." + maxColsPerKey); + System.out.println("Data size per column: " + minColDataSize + ".." + maxColDataSize); } if (isUpdate) { @@ -496,18 +484,15 @@ public class LoadTestTool extends AbstractHBaseTool { } if (cmd.hasOption(OPT_MAX_READ_ERRORS)) { - maxReadErrors = parseInt(cmd.getOptionValue(OPT_MAX_READ_ERRORS), - 0, Integer.MAX_VALUE); + maxReadErrors = parseInt(cmd.getOptionValue(OPT_MAX_READ_ERRORS), 0, Integer.MAX_VALUE); } if (cmd.hasOption(OPT_KEY_WINDOW)) { - keyWindow = parseInt(cmd.getOptionValue(OPT_KEY_WINDOW), - 0, Integer.MAX_VALUE); + keyWindow = parseInt(cmd.getOptionValue(OPT_KEY_WINDOW), 0, Integer.MAX_VALUE); } if (cmd.hasOption(OPT_MULTIGET)) { - multiGetBatchSize = parseInt(cmd.getOptionValue(OPT_MULTIGET), - 0, Integer.MAX_VALUE); + multiGetBatchSize = parseInt(cmd.getOptionValue(OPT_MULTIGET), 0, Integer.MAX_VALUE); } System.out.println("Multi-gets (value of 1 means no multigets): " + multiGetBatchSize); @@ -538,16 +523,15 @@ public class LoadTestTool extends AbstractHBaseTool { private void parseColumnFamilyOptions(CommandLine cmd) { String dataBlockEncodingStr = cmd.getOptionValue(HFileTestUtil.OPT_DATA_BLOCK_ENCODING); - dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null : - DataBlockEncoding.valueOf(dataBlockEncodingStr); + dataBlockEncodingAlgo = + dataBlockEncodingStr == null ? null : DataBlockEncoding.valueOf(dataBlockEncodingStr); String compressStr = cmd.getOptionValue(OPT_COMPRESSION); - compressAlgo = compressStr == null ? Compression.Algorithm.NONE : - Compression.Algorithm.valueOf(compressStr); + compressAlgo = + compressStr == null ? Compression.Algorithm.NONE : Compression.Algorithm.valueOf(compressStr); String bloomStr = cmd.getOptionValue(OPT_BLOOM); - bloomType = bloomStr == null ? BloomType.ROW : - BloomType.valueOf(bloomStr); + bloomType = bloomStr == null ? BloomType.ROW : BloomType.valueOf(bloomStr); if (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH) { if (!cmd.hasOption(OPT_BLOOM_PARAM)) { @@ -570,9 +554,8 @@ public class LoadTestTool extends AbstractHBaseTool { durability = Durability.ASYNC_WAL; } - HBaseTestingUtility.createPreSplitLoadTestTable(conf, tableName, - getColumnFamilies(), compressAlgo, dataBlockEncodingAlgo, numRegionsPerServer, - regionReplication, durability); + HBaseTestingUtility.createPreSplitLoadTestTable(conf, tableName, getColumnFamilies(), + compressAlgo, dataBlockEncodingAlgo, numRegionsPerServer, regionReplication, durability); applyColumnFamilyOptions(tableName, getColumnFamilies()); } @@ -634,27 +617,27 @@ public class LoadTestTool extends AbstractHBaseTool { userOwner = User.createUserForTesting(conf, superUser, new String[0]); } } else { - args = clazzAndArgs.length == 1 ? new String[0] : Arrays.copyOfRange(clazzAndArgs, 1, - clazzAndArgs.length); + args = clazzAndArgs.length == 1 + ? new String[0] + : Arrays.copyOfRange(clazzAndArgs, 1, clazzAndArgs.length); } dataGen.initialize(args); } else { // Default DataGenerator is MultiThreadedAction.DefaultDataGenerator dataGen = new MultiThreadedAction.DefaultDataGenerator(minColDataSize, maxColDataSize, - minColsPerKey, maxColsPerKey, families); + minColsPerKey, maxColsPerKey, families); } if (userOwner != null) { LOG.info("Granting permissions for user " + userOwner.getShortName()); - Permission.Action[] actions = { - Permission.Action.ADMIN, Permission.Action.CREATE, + Permission.Action[] actions = { Permission.Action.ADMIN, Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE }; try { - AccessControlClient.grant(ConnectionFactory.createConnection(conf), - tableName, userOwner.getShortName(), null, null, actions); + AccessControlClient.grant(ConnectionFactory.createConnection(conf), tableName, + userOwner.getShortName(), null, null, actions); } catch (Throwable e) { - LOG.error(HBaseMarkers.FATAL, "Error in granting permission for the user " + - userOwner.getShortName(), e); + LOG.error(HBaseMarkers.FATAL, + "Error in granting permission for the user " + userOwner.getShortName(), e); return EXIT_FAILURE; } } @@ -691,7 +674,7 @@ public class LoadTestTool extends AbstractHBaseTool { if (isUpdate) { if (userOwner != null) { updaterThreads = new MultiThreadedUpdaterWithACL(dataGen, conf, tableName, updatePercent, - userOwner, userNames); + userOwner, userNames); } else { String updaterClass = null; if (cmd.hasOption(OPT_UPDATER)) { @@ -707,8 +690,8 @@ public class LoadTestTool extends AbstractHBaseTool { if (isRead) { if (userOwner != null) { - readerThreads = new MultiThreadedReaderWithACL(dataGen, conf, tableName, verifyPercent, - userNames); + readerThreads = + new MultiThreadedReaderWithACL(dataGen, conf, tableName, verifyPercent, userNames); } else { String readerClass = null; if (cmd.hasOption(OPT_READER)) { @@ -725,14 +708,12 @@ public class LoadTestTool extends AbstractHBaseTool { } if (isUpdate && isWrite) { - LOG.info("Concurrent write/update workload: making updaters aware of the " + - "write point"); + LOG.info("Concurrent write/update workload: making updaters aware of the " + "write point"); updaterThreads.linkToWriter(writerThreads); } if (isRead && (isUpdate || isWrite)) { - LOG.info("Concurrent write/read workload: making readers aware of the " + - "write point"); + LOG.info("Concurrent write/read workload: making readers aware of the " + "write point"); readerThreads.linkToWriter(isUpdate ? updaterThreads : writerThreads); } @@ -774,8 +755,8 @@ public class LoadTestTool extends AbstractHBaseTool { success = success && updaterThreads.getNumWriteFailures() == 0; } if (isRead) { - success = success && readerThreads.getNumReadErrors() == 0 - && readerThreads.getNumReadFailures() == 0; + success = + success && readerThreads.getNumReadErrors() == 0 && readerThreads.getNumReadFailures() == 0; } return success ? EXIT_SUCCESS : EXIT_FAILURE; } @@ -783,46 +764,46 @@ public class LoadTestTool extends AbstractHBaseTool { private LoadTestDataGenerator getLoadGeneratorInstance(String clazzName) throws IOException { try { Class clazz = Class.forName(clazzName); - Constructor constructor = clazz.getConstructor(int.class, int.class, int.class, int.class, - byte[][].class); + Constructor constructor = + clazz.getConstructor(int.class, int.class, int.class, int.class, byte[][].class); return (LoadTestDataGenerator) constructor.newInstance(minColDataSize, maxColDataSize, - minColsPerKey, maxColsPerKey, families); + minColsPerKey, maxColsPerKey, families); } catch (Exception e) { throw new IOException(e); } } - private MultiThreadedWriter getMultiThreadedWriterInstance(String clazzName - , LoadTestDataGenerator dataGen) throws IOException { + private MultiThreadedWriter getMultiThreadedWriterInstance(String clazzName, + LoadTestDataGenerator dataGen) throws IOException { try { Class clazz = Class.forName(clazzName); - Constructor constructor = clazz.getConstructor( - LoadTestDataGenerator.class, Configuration.class, TableName.class); + Constructor constructor = + clazz.getConstructor(LoadTestDataGenerator.class, Configuration.class, TableName.class); return (MultiThreadedWriter) constructor.newInstance(dataGen, conf, tableName); } catch (Exception e) { throw new IOException(e); } } - private MultiThreadedUpdater getMultiThreadedUpdaterInstance(String clazzName - , LoadTestDataGenerator dataGen) throws IOException { + private MultiThreadedUpdater getMultiThreadedUpdaterInstance(String clazzName, + LoadTestDataGenerator dataGen) throws IOException { try { Class clazz = Class.forName(clazzName); - Constructor constructor = clazz.getConstructor( - LoadTestDataGenerator.class, Configuration.class, TableName.class, double.class); - return (MultiThreadedUpdater) constructor.newInstance( - dataGen, conf, tableName, updatePercent); + Constructor constructor = clazz.getConstructor(LoadTestDataGenerator.class, + Configuration.class, TableName.class, double.class); + return (MultiThreadedUpdater) constructor.newInstance(dataGen, conf, tableName, + updatePercent); } catch (Exception e) { throw new IOException(e); } } - private MultiThreadedReader getMultiThreadedReaderInstance(String clazzName - , LoadTestDataGenerator dataGen) throws IOException { + private MultiThreadedReader getMultiThreadedReaderInstance(String clazzName, + LoadTestDataGenerator dataGen) throws IOException { try { Class clazz = Class.forName(clazzName); - Constructor constructor = clazz.getConstructor( - LoadTestDataGenerator.class, Configuration.class, TableName.class, double.class); + Constructor constructor = clazz.getConstructor(LoadTestDataGenerator.class, + Configuration.class, TableName.class, double.class); return (MultiThreadedReader) constructor.newInstance(dataGen, conf, tableName, verifyPercent); } catch (Exception e) { throw new IOException(e); @@ -834,15 +815,12 @@ public class LoadTestTool extends AbstractHBaseTool { } /** - * When NUM_TABLES is specified, the function starts multiple worker threads - * which individually start a LoadTestTool instance to load a table. Each - * table name is in format <tn>_<index>. For example, "-tn test -num_tables 2" - * , table names will be "test_1", "test_2" - * + * When NUM_TABLES is specified, the function starts multiple worker threads which individually + * start a LoadTestTool instance to load a table. Each table name is in format <tn>_<index>. + * For example, "-tn test -num_tables 2" , table names will be "test_1", "test_2" * @throws IOException if one of the load tasks is unable to complete */ - private int parallelLoadTables() - throws IOException { + private int parallelLoadTables() throws IOException { // create new command args String tableName = cmd.getOptionValue(OPT_TABLE_NAME, DEFAULT_TABLE_NAME); String[] newArgs = null; @@ -869,7 +847,7 @@ public class LoadTestTool extends AbstractHBaseTool { List workers = new ArrayList<>(); for (int i = 0; i < numTables; i++) { String[] workerArgs = newArgs.clone(); - workerArgs[tableNameValueIndex] = tableName + "_" + (i+1); + workerArgs[tableNameValueIndex] = tableName + "_" + (i + 1); WorkerThread worker = new WorkerThread(i, workerArgs); workers.add(worker); LOG.info(worker + " starting"); @@ -936,7 +914,7 @@ public class LoadTestTool extends AbstractHBaseTool { } private void addAuthInfoToConf(Properties authConfig, Configuration conf, String owner, - String userList) throws IOException { + String userList) throws IOException { List users = new ArrayList<>(Arrays.asList(userList.split(","))); users.add(owner); for (String user : users) { diff --git a/hbase-mapreduce/src/test/resources/mapred-site.xml b/hbase-mapreduce/src/test/resources/mapred-site.xml index 787ffb75511..b8949fef6a0 100644 --- a/hbase-mapreduce/src/test/resources/mapred-site.xml +++ b/hbase-mapreduce/src/test/resources/mapred-site.xml @@ -31,4 +31,3 @@ -Djava.awt.headless=true - diff --git a/hbase-metrics-api/README.txt b/hbase-metrics-api/README.txt index dfaa29f2e9f..ddba89b506d 100644 --- a/hbase-metrics-api/README.txt +++ b/hbase-metrics-api/README.txt @@ -75,4 +75,4 @@ References 1. https://hbase.apache.org/book.html#hbase.versioning 2. http://metrics.dropwizard.io/ 3. https://hadoop.apache.org/docs/r2.7.2/api/org/apache/hadoop/metrics2/package-summary.html -4. https://issues.apache.org/jira/browse/HBASE-9774 \ No newline at end of file +4. https://issues.apache.org/jira/browse/HBASE-9774 diff --git a/hbase-metrics-api/pom.xml b/hbase-metrics-api/pom.xml index 7865b237136..ff086a6b87e 100644 --- a/hbase-metrics-api/pom.xml +++ b/hbase-metrics-api/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -31,50 +31,6 @@ Apache HBase - Metrics API HBase Metrics API descriptions - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - maven-surefire-plugin - - - - secondPartTestsExecution - test - - test - - - true - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - @@ -149,6 +105,50 @@ + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + maven-surefire-plugin + + + + secondPartTestsExecution + + test + + test + + true + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Counter.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Counter.java index 6e041590ee3..78d9ade0423 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Counter.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Counter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,6 @@ public interface Counter extends Metric { /** * Increment {@code this} by {@code n}. - * * @param n The amount to increment. */ void increment(long n); @@ -47,7 +46,6 @@ public interface Counter extends Metric { /** * Decrement {@code this} by {@code n}. - * * @param n The amount to decrement. */ void decrement(long n); diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Gauge.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Gauge.java index ba171c2cab2..b20da242629 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Gauge.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Gauge.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import org.apache.yetus.audience.InterfaceStability; /** * A metrics which measures a discrete value. - * * @param The value of the Gauge. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Histogram.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Histogram.java index 891bc6df2ea..da4ff89c59b 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Histogram.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Histogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,14 +30,12 @@ public interface Histogram extends Metric { /** * Adds a new value to the distribution. - * * @param value The value to add */ void update(int value); /** * Adds a new value to the distribution. - * * @param value The value to add */ void update(long value); diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Meter.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Meter.java index 5f38a005b3e..9217a2af4a4 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Meter.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Meter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,6 @@ public interface Meter extends Metric { /** * Records {@code events} occurrences. - * * @param events Number of occurrences to record. */ void mark(long events); @@ -53,14 +52,13 @@ public interface Meter extends Metric { double getMeanRate(); /** - * Returns the one-minute exponentially-weighted moving average rate at which events have - * occurred since the meter was created. + * Returns the one-minute exponentially-weighted moving average rate at which events have occurred + * since the meter was created. *

      * This rate has the same exponential decay factor as the one-minute load average in the {@code * top} Unix command. - * - * @return the one-minute exponentially-weighted moving average rate at which events have - * occurred since the meter was created + * @return the one-minute exponentially-weighted moving average rate at which events have occurred + * since the meter was created */ double getOneMinuteRate(); @@ -70,7 +68,6 @@ public interface Meter extends Metric { *

      * This rate has the same exponential decay factor as the five-minute load average in the {@code * top} Unix command. - * * @return the five-minute exponentially-weighted moving average rate at which events have * occurred since the meter was created */ @@ -82,7 +79,6 @@ public interface Meter extends Metric { *

      * This rate has the same exponential decay factor as the fifteen-minute load average in the * {@code top} Unix command. - * * @return the fifteen-minute exponentially-weighted moving average rate at which events have * occurred since the meter was created */ diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Metric.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Metric.java index 2f6d49e01fc..e79a9f3631a 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Metric.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Metric.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistries.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistries.java index 33e989cfe01..9e7b13d89c8 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistries.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistries.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import java.util.Collection; import java.util.Optional; import java.util.Set; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -53,8 +50,8 @@ public abstract class MetricRegistries { public abstract void clear(); /** - * Create or return MetricRegistry with the given info. MetricRegistry will only be created - * if current reference count is 0. Otherwise ref counted is incremented, and an existing instance + * Create or return MetricRegistry with the given info. MetricRegistry will only be created if + * current reference count is 0. Otherwise ref counted is incremented, and an existing instance * will be returned. * @param info the info object for the MetricRegistrytry. * @return created or existing MetricRegistry. diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java index edc813d95b9..88c61079630 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import java.util.ArrayList; import java.util.List; import java.util.ServiceLoader; - import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -32,8 +29,8 @@ import org.slf4j.LoggerFactory; public final class MetricRegistriesLoader { private static final Logger LOG = LoggerFactory.getLogger(MetricRegistries.class); - private static final String defaultClass - = "org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl"; + private static final String defaultClass = + "org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl"; private MetricRegistriesLoader() { } @@ -64,7 +61,7 @@ public final class MetricRegistriesLoader { return impl; } else if (availableImplementations.isEmpty()) { try { - return ReflectionUtils.newInstance((Class)Class.forName(defaultClass)); + return ReflectionUtils.newInstance((Class) Class.forName(defaultClass)); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } @@ -78,7 +75,7 @@ public final class MetricRegistriesLoader { sb.append(factory.getClass()); } LOG.warn("Found multiple MetricRegistries implementations: " + sb - + ". Using first found implementation: " + availableImplementations.get(0)); + + ". Using first found implementation: " + availableImplementations.get(0)); return availableImplementations.get(0); } } diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java index 3bd5f6cd844..b70526e1c5a 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.metrics; import java.util.Optional; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -32,7 +31,6 @@ public interface MetricRegistry extends MetricSet { /** * Get or construct a {@link Timer} used to measure durations and report rates. - * * @param name the name of the timer. * @return An instance of {@link Timer}. */ @@ -40,7 +38,6 @@ public interface MetricRegistry extends MetricSet { /** * Get or construct a {@link Histogram} used to measure a distribution of values. - * * @param name The name of the Histogram. * @return An instance of {@link Histogram}. */ @@ -49,7 +46,6 @@ public interface MetricRegistry extends MetricSet { /** * Get or construct a {@link Meter} used to measure durations and report distributions (a * combination of a {@link Timer} and a {@link Histogram}. - * * @param name The name of the Meter. * @return An instance of {@link Meter}. */ @@ -57,7 +53,6 @@ public interface MetricRegistry extends MetricSet { /** * Get or construct a {@link Counter} used to track a mutable number. - * * @param name The name of the Counter * @return An instance of {@link Counter}. */ @@ -66,7 +61,7 @@ public interface MetricRegistry extends MetricSet { /** * Register a {@link Gauge}. The Gauge will be invoked at a period defined by the implementation * of {@link MetricRegistry}. - * @param name The name of the Gauge. + * @param name The name of the Gauge. * @param gauge A callback to compute the current value. * @return the registered gauge, or the existing gauge */ @@ -75,7 +70,7 @@ public interface MetricRegistry extends MetricSet { /** * Registers the {@link Metric} with the given name if there does not exist one with the same * name. Returns the newly registered or existing Metric. - * @param name The name of the Metric. + * @param name The name of the Metric. * @param metric the metric to register * @return the registered metric, or the existing metrid */ @@ -96,7 +91,6 @@ public interface MetricRegistry extends MetricSet { /** * Removes the metric with the given name. - * * @param name the name of the metric * @return true if the metric is removed. */ diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryFactory.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryFactory.java index be77c42985d..9d53a8cbf53 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryFactory.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryFactory.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,8 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.java index c4396bd24d5..d83453f6af8 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; - import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.yetus.audience.InterfaceAudience; /** * HBase Metrics are grouped in different MetricRegistry'ies. All metrics that correspond to a - * subcomponent (like RPC, GC, WAL) are managed in a single MetricRegistry. - * This class holds the name and description and JMX related context names for such group of - * metrics. + * subcomponent (like RPC, GC, WAL) are managed in a single MetricRegistry. This class holds the + * name and description and JMX related context names for such group of metrics. */ @InterfaceAudience.Private public class MetricRegistryInfo { @@ -37,12 +34,8 @@ public class MetricRegistryInfo { protected final String metricsJmxContext; protected final boolean existingSource; - public MetricRegistryInfo( - String metricsName, - String metricsDescription, - String metricsJmxContext, - String metricsContext, - boolean existingSource) { + public MetricRegistryInfo(String metricsName, String metricsDescription, String metricsJmxContext, + String metricsContext, boolean existingSource) { this.metricsName = metricsName; this.metricsDescription = metricsDescription; this.metricsContext = metricsContext; @@ -51,9 +44,8 @@ public class MetricRegistryInfo { } /** - * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. + * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. * eg. regionserver, master, thriftserver - * * @return The string context used to register this source to hadoop's metrics2 system. */ public String getMetricsContext() { @@ -68,16 +60,15 @@ public class MetricRegistryInfo { } /** - * Get the name of the context in JMX that this source will be exposed through. - * This is in ObjectName format. With the default context being Hadoop -> HBase + * Get the name of the context in JMX that this source will be exposed through. This is in + * ObjectName format. With the default context being Hadoop -> HBase */ public String getMetricsJmxContext() { return metricsJmxContext; } /** - * Get the name of the metrics that are being exported by this source. - * Eg. IPC, GC, WAL + * Get the name of the metrics that are being exported by this source. Eg. IPC, GC, WAL */ public String getMetricsName() { return metricsName; @@ -102,11 +93,7 @@ public class MetricRegistryInfo { @Override public int hashCode() { - return new HashCodeBuilder() - .append(metricsName) - .append(metricsDescription) - .append(metricsContext) - .append(metricsJmxContext) - .toHashCode(); + return new HashCodeBuilder().append(metricsName).append(metricsDescription) + .append(metricsContext).append(metricsJmxContext).toHashCode(); } } diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricSet.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricSet.java index 5e1c873ce8b..60d7e9e39dd 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricSet.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricSet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import java.util.Map; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** * A set of named metrics. - * * @see MetricRegistry#registerAll(MetricSet) */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -34,7 +31,6 @@ import org.apache.yetus.audience.InterfaceStability; public interface MetricSet extends Metric { /** * A map of metric names to metrics. - * * @return the metrics */ Map getMetrics(); diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/PackageMarker.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/PackageMarker.java index ecb01ad57c0..e3830236069 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/PackageMarker.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/PackageMarker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,19 +19,16 @@ package org.apache.hadoop.hbase.metrics; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; - import org.apache.yetus.audience.InterfaceAudience; /** - * This is a dummy annotation that forces javac to produce output for - * otherwise empty package-info.java. - * - *

      The result is maven-compiler-plugin can properly identify the scope of - * changed files - * - *

      See more details in - * - * maven-compiler-plugin: incremental compilation broken + * This is a dummy annotation that forces javac to produce output for otherwise empty + * package-info.java. + *

      + * The result is maven-compiler-plugin can properly identify the scope of changed files + *

      + * See more details in + * maven-compiler-plugin: incremental compilation broken */ @Retention(RetentionPolicy.SOURCE) @InterfaceAudience.Private diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java index a7b9869a0d2..26aee2804ee 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,8 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; @@ -43,7 +40,6 @@ public interface Snapshot { /** * Returns the number of values in the snapshot. - * * @return the number of values */ long getCount(); @@ -57,77 +53,66 @@ public interface Snapshot { /** * Returns the value at the 25th percentile in the distribution. - * * @return the value at the 25th percentile */ long get25thPercentile(); /** * Returns the value at the 75th percentile in the distribution. - * * @return the value at the 75th percentile */ long get75thPercentile(); /** * Returns the value at the 90th percentile in the distribution. - * * @return the value at the 90th percentile */ long get90thPercentile(); /** * Returns the value at the 95th percentile in the distribution. - * * @return the value at the 95th percentile */ long get95thPercentile(); /** * Returns the value at the 98th percentile in the distribution. - * * @return the value at the 98th percentile */ long get98thPercentile(); /** * Returns the value at the 99th percentile in the distribution. - * * @return the value at the 99th percentile */ long get99thPercentile(); /** * Returns the value at the 99.9th percentile in the distribution. - * * @return the value at the 99.9th percentile */ long get999thPercentile(); /** * Returns the median value in the distribution. - * * @return the median value */ long getMedian(); /** * Returns the highest value in the snapshot. - * * @return the highest value */ long getMax(); /** * Returns the arithmetic mean of the values in the snapshot. - * * @return the arithmetic mean */ long getMean(); /** * Returns the lowest value in the snapshot. - * * @return the lowest value */ long getMin(); diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java index 30c64fb5ce4..d8df01720de 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.metrics; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -32,7 +31,7 @@ public interface Timer extends Metric { /** * Update the timer with the given duration in given time unit. * @param duration the duration of the event - * @param unit the time unit for the duration + * @param unit the time unit for the duration */ void update(long duration, TimeUnit unit); diff --git a/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java b/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java index 59f26999bd2..b9df823e756 100644 --- a/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java +++ b/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestMetricRegistriesLoader { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricRegistriesLoader.class); + HBaseClassTestRule.forClass(TestMetricRegistriesLoader.class); @Test public void testLoadSinleInstance() { @@ -51,8 +51,8 @@ public class TestMetricRegistriesLoader { MetricRegistries loader1 = mock(MetricRegistries.class); MetricRegistries loader2 = mock(MetricRegistries.class); MetricRegistries loader3 = mock(MetricRegistries.class); - MetricRegistries instance = MetricRegistriesLoader.load(Lists.newArrayList(loader1, loader2, - loader3)); + MetricRegistries instance = + MetricRegistriesLoader.load(Lists.newArrayList(loader1, loader2, loader3)); // the load() returns the first instance assertEquals(loader1, instance); diff --git a/hbase-metrics/README.txt b/hbase-metrics/README.txt index d80064c2d6f..6216fc8daeb 100644 --- a/hbase-metrics/README.txt +++ b/hbase-metrics/README.txt @@ -1 +1 @@ -See the documentation at hbase-metrics-api/README. \ No newline at end of file +See the documentation at hbase-metrics-api/README. diff --git a/hbase-metrics/pom.xml b/hbase-metrics/pom.xml index ee0ec004b94..96821b2693a 100644 --- a/hbase-metrics/pom.xml +++ b/hbase-metrics/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -31,50 +31,6 @@ Apache HBase - Metrics Implementation HBase Metrics Implementation - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - maven-surefire-plugin - - - - secondPartTestsExecution - test - - test - - - true - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase.thirdparty @@ -157,6 +113,50 @@ + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + maven-surefire-plugin + + + + secondPartTestsExecution + + test + + test + + true + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/CounterImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/CounterImpl.java index 8021c068939..ad30fbe1674 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/CounterImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/CounterImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.metrics.impl; import java.util.concurrent.atomic.LongAdder; - import org.apache.hadoop.hbase.metrics.Counter; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/DropwizardMeter.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/DropwizardMeter.java index b5c52cf840c..d2723a22e94 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/DropwizardMeter.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/DropwizardMeter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,9 +18,7 @@ package org.apache.hadoop.hbase.metrics.impl; import com.codahale.metrics.Meter; - import java.util.Objects; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -39,11 +37,13 @@ public class DropwizardMeter implements org.apache.hadoop.hbase.metrics.Meter { this.meter = Objects.requireNonNull(meter); } - @Override public void mark() { + @Override + public void mark() { this.meter.mark(); } - @Override public void mark(long count) { + @Override + public void mark(long count) { this.meter.mark(count); } diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/FastLongHistogram.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/FastLongHistogram.java index 81544607f5f..b1b47e3904e 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/FastLongHistogram.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/FastLongHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ public class FastLongHistogram { public static final int DEFAULT_NBINS = 255; public static final double[] DEFAULT_QUANTILES = - new double[]{0.25, 0.5, 0.75, 0.90, 0.95, 0.98, 0.99, 0.999}; + new double[] { 0.25, 0.5, 0.75, 0.90, 0.95, 0.98, 0.99, 0.999 }; /** * Bins is a class containing a list of buckets(or bins) for estimation histogram of some data. @@ -105,8 +105,8 @@ public class FastLongHistogram { return this.counts.length - 2; } // compute the position - return 1 + (int) ((value - this.binsMin) * (this.counts.length - 3) / - (this.binsMax - this.binsMin)); + return 1 + + (int) ((value - this.binsMin) * (this.counts.length - 3) / (this.binsMax - this.binsMin)); } @@ -245,7 +245,7 @@ public class FastLongHistogram { /** * Constructor. * @param numOfBins the number of bins for the histogram. A larger value results in more precise - * results but with lower efficiency, and vice versus. + * results but with lower efficiency, and vice versus. */ public FastLongHistogram(int numOfBins) { this.bins = new Bins(numOfBins); @@ -254,9 +254,9 @@ public class FastLongHistogram { /** * Constructor setting the bins assuming a uniform distribution within a range. * @param numOfBins the number of bins for the histogram. A larger value results in more precise - * results but with lower efficiency, and vice versus. - * @param min lower bound of the region, inclusive. - * @param max higher bound of the region, inclusive. + * results but with lower efficiency, and vice versus. + * @param min lower bound of the region, inclusive. + * @param max higher bound of the region, inclusive. */ public FastLongHistogram(int numOfBins, long min, long max) { this(numOfBins); diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java index 2e0aa55808f..c29b267e347 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,9 +22,8 @@ import org.apache.hadoop.hbase.metrics.Snapshot; import org.apache.yetus.audience.InterfaceAudience; /** - * Custom histogram implementation based on FastLongHistogram. Dropwizard-based histograms are - * slow compared to this implementation, so we are using our implementation here. - * See HBASE-15222. + * Custom histogram implementation based on FastLongHistogram. Dropwizard-based histograms are slow + * compared to this implementation, so we are using our implementation here. See HBASE-15222. */ @InterfaceAudience.Private public class HistogramImpl implements Histogram { diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistriesImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistriesImpl.java index 3826e66093b..39da41eeec0 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistriesImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistriesImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +21,6 @@ import java.util.Collection; import java.util.Collections; import java.util.Optional; import java.util.Set; - import org.apache.hadoop.hbase.metrics.MetricRegistries; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.metrics.MetricRegistryFactory; diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryFactoryImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryFactoryImpl.java index 6f9e16366aa..5ebdf0d479a 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryFactoryImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryFactoryImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java index 03c669fee0e..0ecd707b481 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.Map; import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; - import org.apache.hadoop.hbase.metrics.Counter; import org.apache.hadoop.hbase.metrics.Gauge; import org.apache.hadoop.hbase.metrics.Histogram; @@ -97,7 +96,7 @@ public class MetricRegistryImpl implements MetricRegistry { @Override public Gauge register(String name, Gauge gauge) { - return (Gauge) register(name, (Metric)gauge); + return (Gauge) register(name, (Metric) gauge); } @Override diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/RefCountingMap.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/RefCountingMap.java index 63131a100e9..19ec192211a 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/RefCountingMap.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/RefCountingMap.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,22 +22,23 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Supplier; import java.util.stream.Collectors; - import org.apache.yetus.audience.InterfaceAudience; /** - * A map of K to V, but does ref counting for added and removed values. The values are - * not added directly, but instead requested from the given Supplier if ref count == 0. Each put() - * call will increment the ref count, and each remove() will decrement it. The values are removed - * from the map iff ref count == 0. + * A map of K to V, but does ref counting for added and removed values. The values are not added + * directly, but instead requested from the given Supplier if ref count == 0. Each put() call will + * increment the ref count, and each remove() will decrement it. The values are removed from the map + * iff ref count == 0. */ @InterfaceAudience.Private class RefCountingMap { private ConcurrentHashMap> map = new ConcurrentHashMap<>(); + private static class Payload { V v; int refCount; + Payload(V v) { this.v = v; this.refCount = 1; // create with ref count = 1 @@ -46,7 +46,7 @@ class RefCountingMap { } V put(K k, Supplier supplier) { - return ((Payload)map.compute(k, (k1, oldValue) -> { + return ((Payload) map.compute(k, (k1, oldValue) -> { if (oldValue != null) { oldValue.refCount++; return oldValue; diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/TimerImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/TimerImpl.java index 03a8c65915e..3ad560a3d74 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/TimerImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/TimerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.metrics.impl; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.metrics.Timer; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestCounterImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestCounterImpl.java index 5b5e26f13a8..0d86e2aeb49 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestCounterImpl.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestCounterImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,15 +35,17 @@ public class TestCounterImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCounterImpl.class); + HBaseClassTestRule.forClass(TestCounterImpl.class); private Counter counter; - @Before public void setup() { + @Before + public void setup() { this.counter = new CounterImpl(); } - @Test public void testCounting() { + @Test + public void testCounting() { counter.increment(); assertEquals(1L, counter.getCount()); counter.increment(); diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestDropwizardMeter.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestDropwizardMeter.java index 072f18a3b15..9af666b095f 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestDropwizardMeter.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestDropwizardMeter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,15 +34,17 @@ public class TestDropwizardMeter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDropwizardMeter.class); + HBaseClassTestRule.forClass(TestDropwizardMeter.class); private Meter meter; - @Before public void setup() { + @Before + public void setup() { this.meter = Mockito.mock(Meter.class); } - @Test public void test() { + @Test + public void test() { DropwizardMeter dwMeter = new DropwizardMeter(this.meter); dwMeter.mark(); diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestFastLongHistogram.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestFastLongHistogram.java index 120f91169c5..ca5c6a47668 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestFastLongHistogram.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestFastLongHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import static org.junit.Assert.assertEquals; import java.util.Arrays; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -34,12 +33,12 @@ import org.junit.experimental.categories.Category; /** * Testcases for FastLongHistogram. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestFastLongHistogram { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFastLongHistogram.class); + HBaseClassTestRule.forClass(TestFastLongHistogram.class); private static void doTestUniform(FastLongHistogram hist) { long[] VALUES = { 0, 10, 20, 30, 40, 50 }; @@ -97,7 +96,6 @@ public class TestFastLongHistogram { } } - @Test public void testGetNumAtOrBelow() { long[] VALUES = { 1, 10, 20, 30, 40, 50 }; @@ -126,7 +124,6 @@ public class TestFastLongHistogram { assertEquals(601, h.getNumAtOrBelow(Long.MAX_VALUE)); } - @Test public void testSameValues() { FastLongHistogram hist = new FastLongHistogram(100); diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestGauge.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestGauge.java index 52d29fc700c..e1ed9cf6a5b 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestGauge.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestGauge.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,18 +34,17 @@ import org.junit.experimental.categories.Category; public class TestGauge { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGauge.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestGauge.class); @Test public void testGetValue() { SimpleGauge gauge = new SimpleGauge(); - assertEquals(0, (long)gauge.getValue()); + assertEquals(0, (long) gauge.getValue()); gauge.setValue(1000L); - assertEquals(1000L, (long)gauge.getValue()); + assertEquals(1000L, (long) gauge.getValue()); } /** @@ -55,7 +54,8 @@ public class TestGauge { private final AtomicLong value = new AtomicLong(0L); - @Override public Long getValue() { + @Override + public Long getValue() { return this.value.get(); } diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java index 9be3fcee20f..70d9598570d 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestHistogramImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHistogramImpl.class); + HBaseClassTestRule.forClass(TestHistogramImpl.class); @Test public void testUpdate() { diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java index 1115529a051..56b3f0d6a9e 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ public class TestMetricRegistryImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricRegistryImpl.class); + HBaseClassTestRule.forClass(TestMetricRegistryImpl.class); private MetricRegistryInfo info; private MetricRegistryImpl registry; @@ -59,7 +59,7 @@ public class TestMetricRegistryImpl { counter.increment(42L); Optional metric = registry.get("mycounter"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Counter)metric.get()).getCount()); + assertEquals(42L, (long) ((Counter) metric.get()).getCount()); } @Test @@ -72,7 +72,7 @@ public class TestMetricRegistryImpl { }); Optional metric = registry.get("mygauge"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Gauge)metric.get()).getValue()); + assertEquals(42L, (long) ((Gauge) metric.get()).getValue()); } @Test @@ -81,7 +81,7 @@ public class TestMetricRegistryImpl { registry.register("gaugeLambda", () -> 42L); Optional metric = registry.get("gaugeLambda"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Gauge)metric.get()).getValue()); + assertEquals(42L, (long) ((Gauge) metric.get()).getValue()); } @Test @@ -106,7 +106,7 @@ public class TestMetricRegistryImpl { Optional metric = registry.get("mycounter"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Counter)metric.get()).getCount()); + assertEquals(42L, (long) ((Counter) metric.get()).getCount()); } @Test @@ -119,8 +119,7 @@ public class TestMetricRegistryImpl { Optional metric = registry.get("mygauge"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Gauge)metric.get()).getValue()); - + assertEquals(42L, (long) ((Gauge) metric.get()).getValue()); Counter c1 = registry.counter("mycounter"); Counter c2 = registry.counter("mycounter"); diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestRefCountingMap.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestRefCountingMap.java index c5ed1edb9eb..2d29ff17943 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestRefCountingMap.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestRefCountingMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ public class TestRefCountingMap { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRefCountingMap.class); + HBaseClassTestRule.forClass(TestRefCountingMap.class); private RefCountingMap map; @@ -59,7 +59,7 @@ public class TestRefCountingMap { @Test public void testPutMulti() { String v1 = map.put("foo", () -> "foovalue"); - String v2 = map.put("foo", () -> "foovalue2"); + String v2 = map.put("foo", () -> "foovalue2"); String v3 = map.put("foo", () -> "foovalue3"); String v = map.get("foo"); @@ -127,7 +127,6 @@ public class TestRefCountingMap { assertEquals(0, map.size()); } - @Test public void testKeySet() { map.put("foo", () -> "foovalue"); @@ -151,6 +150,6 @@ public class TestRefCountingMap { assertEquals(3, values.size()); Lists.newArrayList("foovalue", "foovalue3", "foovalue4").stream() - .forEach(v -> assertTrue(values.contains(v))); + .forEach(v -> assertTrue(values.contains(v))); } } diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestTimerImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestTimerImpl.java index d9d3632b731..1bfa02fc4b6 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestTimerImpl.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestTimerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestTimerImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTimerImpl.class); + HBaseClassTestRule.forClass(TestTimerImpl.class); private Timer timer; diff --git a/hbase-procedure/pom.xml b/hbase-procedure/pom.xml index f04329ddc21..257970c35a7 100644 --- a/hbase-procedure/pom.xml +++ b/hbase-procedure/pom.xml @@ -1,6 +1,6 @@ - + - 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -30,35 +30,6 @@ hbase-procedure Apache HBase - Procedure Procedure Framework - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - @@ -145,10 +116,39 @@ test + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + - - + + skipProcedureTests @@ -167,8 +167,9 @@ hadoop-2.0 - - !hadoop.profile + + + !hadoop.profile diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java index 7786f552da3..34fc1594702 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.Iterator; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -69,13 +68,12 @@ public abstract class AbstractProcedureScheduler implements ProcedureScheduler { } // ========================================================================== - // Add related + // Add related // ========================================================================== /** - * Add the procedure to the queue. - * NOTE: this method is called with the sched lock held. + * Add the procedure to the queue. NOTE: this method is called with the sched lock held. * @param procedure the Procedure to add - * @param addFront true if the item should be added to the front of the queue + * @param addFront true if the item should be added to the front of the queue */ protected abstract void enqueue(Procedure procedure, boolean addFront); @@ -131,11 +129,10 @@ public abstract class AbstractProcedureScheduler implements ProcedureScheduler { } // ========================================================================== - // Poll related + // Poll related // ========================================================================== /** - * Fetch one Procedure from the queue - * NOTE: this method is called with the sched lock held. + * Fetch one Procedure from the queue NOTE: this method is called with the sched lock held. * @return the Procedure to execute, or null if nothing is available. */ protected abstract Procedure dequeue(); @@ -187,18 +184,18 @@ public abstract class AbstractProcedureScheduler implements ProcedureScheduler { } // ========================================================================== - // Utils + // Utils // ========================================================================== /** - * Returns the number of elements in this queue. - * NOTE: this method is called with the sched lock held. + * Returns the number of elements in this queue. NOTE: this method is called with the sched lock + * held. * @return the number of elements in this queue. */ protected abstract int queueSize(); /** - * Returns true if there are procedures available to process. - * NOTE: this method is called with the sched lock held. + * Returns true if there are procedures available to process. NOTE: this method is called with the + * sched lock held. * @return true if there are procedures available to process, otherwise false. */ protected abstract boolean queueHasRunnables(); @@ -224,7 +221,7 @@ public abstract class AbstractProcedureScheduler implements ProcedureScheduler { } // ============================================================================ - // TODO: Metrics + // TODO: Metrics // ============================================================================ public long getPollCalls() { return pollCalls; @@ -235,13 +232,13 @@ public abstract class AbstractProcedureScheduler implements ProcedureScheduler { } // ========================================================================== - // Procedure Events + // Procedure Events // ========================================================================== /** - * Wake up all of the given events. - * Note that we first take scheduler lock and then wakeInternal() synchronizes on the event. - * Access should remain package-private. Use ProcedureEvent class to wake/suspend events. + * Wake up all of the given events. Note that we first take scheduler lock and then wakeInternal() + * synchronizes on the event. Access should remain package-private. Use ProcedureEvent class to + * wake/suspend events. * @param events the list of events to wake */ void wakeEvents(ProcedureEvent[] events) { @@ -276,7 +273,7 @@ public abstract class AbstractProcedureScheduler implements ProcedureScheduler { } // ========================================================================== - // Internal helpers + // Internal helpers // ========================================================================== protected void schedLock() { schedulerLock.lock(); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureCleaner.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureCleaner.java index 796a8e47c91..c16b79bde61 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureCleaner.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,8 +66,8 @@ class CompletedProcedureCleaner extends ProcedureInMemoryChore> completedMap, - Map nonceKeysToProcIdsMap) { + IdLock procExecutionLock, Map> completedMap, + Map nonceKeysToProcIdsMap) { // set the timeout interval that triggers the periodic-procedure super(conf.getInt(CLEANER_INTERVAL_CONF_KEY, DEFAULT_CLEANER_INTERVAL)); this.completed = completedMap; @@ -138,4 +138,4 @@ class CompletedProcedureCleaner extends ProcedureInMemoryChore { } public boolean isExpired(long now, long evictTtl, long evictAckTtl) { - return (hasClientAckTime() && (now - getClientAckTime()) >= evictAckTtl) || - (now - procedure.getLastUpdate()) >= evictTtl; + return (hasClientAckTime() && (now - getClientAckTime()) >= evictAckTtl) + || (now - procedure.getLastUpdate()) >= evictTtl; } -} \ No newline at end of file +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/DelayedProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/DelayedProcedure.java index 3fc97507860..a00a710a959 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/DelayedProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/DelayedProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,7 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Private class DelayedProcedure - extends DelayedUtil.DelayedContainerWithTimestamp> { + extends DelayedUtil.DelayedContainerWithTimestamp> { public DelayedProcedure(Procedure procedure) { super(procedure, procedure.getTimeoutTimestamp()); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedProcedure.java index 40eb22c3b56..63b8be7a479 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class FailedProcedure extends Procedure { } public FailedProcedure(long procId, String procName, User owner, NonceKey nonceKey, - IOException exception) { + IOException exception) { this.procName = procName; setProcId(procId); setState(ProcedureState.ROLLEDBACK); @@ -54,7 +54,7 @@ public class FailedProcedure extends Procedure { @Override protected Procedure[] execute(TEnvironment env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { throw new UnsupportedOperationException(); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedRemoteDispatchException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedRemoteDispatchException.java index dfe8e7d3c53..5561661d73b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedRemoteDispatchException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedRemoteDispatchException.java @@ -21,8 +21,7 @@ import org.apache.hadoop.hbase.HBaseIOException; import org.apache.yetus.audience.InterfaceAudience; /** - * Used internally signaling failed queue of a remote procedure - * operation. + * Used internally signaling failed queue of a remote procedure operation. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/InlineChore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/InlineChore.java index 32b4922a0b1..21350b56c23 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/InlineChore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/InlineChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java index bfeb7398fa0..e6b7d7d0397 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.util.function.Function; @@ -142,8 +141,10 @@ public class LockAndQueue implements LockStatus { * @return whether we should wake the procedures waiting on the lock here. */ public boolean releaseExclusiveLock(Procedure proc) { - if (exclusiveLockOwnerProcedure == null || - exclusiveLockOwnerProcedure.getProcId() != proc.getProcId()) { + if ( + exclusiveLockOwnerProcedure == null + || exclusiveLockOwnerProcedure.getProcId() != proc.getProcId() + ) { // We are not the lock owner, it is probably inherited from the parent procedures. return false; } @@ -187,7 +188,7 @@ public class LockAndQueue implements LockStatus { @Override public String toString() { - return "exclusiveLockOwner=" + (hasExclusiveLock() ? getExclusiveLockProcIdOwner() : "NONE") + - ", sharedLockCount=" + getSharedLockCount() + ", waitingProcCount=" + queue.size(); + return "exclusiveLockOwner=" + (hasExclusiveLock() ? getExclusiveLockProcIdOwner() : "NONE") + + ", sharedLockCount=" + getSharedLockCount() + ", waitingProcCount=" + queue.size(); } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java index 33d2a38c80a..d3723e1a35a 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockType.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockType.java index 8599af90d38..1b23a5c7373 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockType.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockType.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public enum LockType { - EXCLUSIVE, SHARED + EXCLUSIVE, + SHARED } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResource.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResource.java index 81d1e721229..1503d8d6710 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResource.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResource.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -30,9 +29,9 @@ public class LockedResource { private final int sharedLockCount; private final List> waitingProcedures; - public LockedResource(LockedResourceType resourceType, String resourceName, - LockType lockType, Procedure exclusiveLockOwnerProcedure, - int sharedLockCount, List> waitingProcedures) { + public LockedResource(LockedResourceType resourceType, String resourceName, LockType lockType, + Procedure exclusiveLockOwnerProcedure, int sharedLockCount, + List> waitingProcedures) { this.resourceType = resourceType; this.resourceName = resourceName; this.lockType = lockType; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java index 55d195b3920..12f899d7565 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,12 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public enum LockedResourceType { - SERVER, NAMESPACE, TABLE, REGION, PEER, META + SERVER, + NAMESPACE, + TABLE, + REGION, + PEER, + META } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java index d2e13f13536..a6faf501682 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java @@ -20,9 +20,8 @@ package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; /** - * Used internally signaling failed queue of a remote procedure operation. - * In particular, no dispatch Node was found for the passed server name - * key AFTER queuing dispatch. + * Used internally signaling failed queue of a remote procedure operation. In particular, no + * dispatch Node was found for the passed server name key AFTER queuing dispatch. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java index 5cdbcd417de..95265d00a7b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java @@ -20,9 +20,8 @@ package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; /** - * Used internally signaling failed queue of a remote procedure operation. - * In particular, no dispatch Node was found for the passed server name - * key. + * Used internally signaling failed queue of a remote procedure operation. In particular, no + * dispatch Node was found for the passed server name key. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NullTargetServerDispatchException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NullTargetServerDispatchException.java index 9deac23e154..502d7ee0b6e 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NullTargetServerDispatchException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NullTargetServerDispatchException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,8 +20,8 @@ package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; /** - * Used internally signaling failed queue of a remote procedure operation. - * The target server passed is null. + * Used internally signaling failed queue of a remote procedure operation. The target server passed + * is null. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java index 2d6e065da67..4f0bc6ce6b2 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java index c0bbfce583a..ec46e523fc5 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -113,9 +113,9 @@ public abstract class Procedure implements Comparable implements Comparable

      Bypassing a procedure is not like aborting. Aborting a procedure will trigger - * a rollback. And since the {@link #abort(Object)} method is overrideable - * Some procedures may have chosen to ignore the aborting. + * If bypass is set to true, when executing it will return null when {@link #doExecute(Object)} is + * called to finish the procedure and release any locks it may currently hold. The bypass does + * cleanup around the Procedure as far as the Procedure framework is concerned. It does not clean + * any internal state that the Procedure's themselves may have set. That is for the Procedures to + * do themselves when bypass is called. They should override bypass and do their cleanup in the + * overridden bypass method (be sure to call the parent bypass to ensure proper processing). + *

      + *

      + * Bypassing a procedure is not like aborting. Aborting a procedure will trigger a rollback. And + * since the {@link #abort(Object)} method is overrideable Some procedures may have chosen to + * ignore the aborting. */ private volatile boolean bypass = false; @@ -176,13 +176,13 @@ public abstract class Procedure implements Comparable implements Comparable[] execute(TEnvironment env) throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException; /** - * The code to undo what was done by the execute() code. - * It is called when the procedure or one of the sub-procedures failed or an - * abort was requested. It should cleanup all the resources created by - * the execute() call. The implementation must be idempotent since rollback() - * may be called multiple time in case of machine failure in the middle - * of the execution. + * The code to undo what was done by the execute() code. It is called when the procedure or one of + * the sub-procedures failed or an abort was requested. It should cleanup all the resources + * created by the execute() call. The implementation must be idempotent since rollback() may be + * called multiple time in case of machine failure in the middle of the execution. * @param env the environment passed to the ProcedureExecutor - * @throws IOException temporary failure, the rollback will retry later + * @throws IOException temporary failure, the rollback will retry later * @throws InterruptedException the procedure will be added back to the queue and retried later */ - protected abstract void rollback(TEnvironment env) - throws IOException, InterruptedException; + protected abstract void rollback(TEnvironment env) throws IOException, InterruptedException; /** - * The abort() call is asynchronous and each procedure must decide how to deal - * with it, if they want to be abortable. The simplest implementation - * is to have an AtomicBoolean set in the abort() method and then the execute() - * will check if the abort flag is set or not. - * abort() may be called multiple times from the client, so the implementation - * must be idempotent. - * - *

      NOTE: abort() is not like Thread.interrupt(). It is just a notification - * that allows the procedure implementor abort. + * The abort() call is asynchronous and each procedure must decide how to deal with it, if they + * want to be abortable. The simplest implementation is to have an AtomicBoolean set in the + * abort() method and then the execute() will check if the abort flag is set or not. abort() may + * be called multiple times from the client, so the implementation must be idempotent. + *

      + * NOTE: abort() is not like Thread.interrupt(). It is just a notification that allows the + * procedure implementor abort. */ protected abstract boolean abort(TEnvironment env); /** - * The user-level code of the procedure may have some state to - * persist (e.g. input arguments or current position in the processing state) to - * be able to resume on failure. + * The user-level code of the procedure may have some state to persist (e.g. input arguments or + * current position in the processing state) to be able to resume on failure. * @param serializer stores the serializable state */ protected abstract void serializeStateData(ProcedureStateSerializer serializer) throws IOException; /** - * Called on store load to allow the user to decode the previously serialized - * state. + * Called on store load to allow the user to decode the previously serialized state. * @param serializer contains the serialized state */ protected abstract void deserializeStateData(ProcedureStateSerializer serializer) @@ -321,9 +315,9 @@ public abstract class Procedure implements Comparable implements Comparable implements Comparable implements Comparable implements Comparable 0: this; + assert childrenLatch > 0 : this; boolean b = --childrenLatch == 0; if (LOG.isTraceEnabled()) { LOG.trace("CHILD LATCH DECREMENT " + childrenLatch, new Throwable(this.toString())); @@ -871,8 +852,7 @@ public abstract class Procedure implements Comparable implements Comparable implements Comparable stackIndexes) { this.stackIndexes = new int[stackIndexes.size()]; @@ -937,16 +916,17 @@ public abstract class Procedure implements Comparable[] doExecute(TEnvironment env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { try { updateTimestamp(); if (bypass) { @@ -962,8 +942,7 @@ public abstract class Procedure implements Comparable implements Comparable implements Comparable Long getRootProcedureId(Map> procedures, - Procedure proc) { + Procedure proc) { while (proc.hasParent()) { proc = procedures.get(proc.getParentProcId()); if (proc == null) { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java index 1b6b93db70c..9d6f9a4965c 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureDeque.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureDeque.java index c5f02e950bc..ad42634edb9 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureDeque.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureDeque.java @@ -1,5 +1,4 @@ -/** - +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -12,20 +11,18 @@ * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUTKey WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.procedure2; import java.util.ArrayDeque; - import org.apache.yetus.audience.InterfaceAudience; /** - * Type class. - * For conceptual purpose only. Seeing ProcedureDeque as type instead of just ArrayDeque gives - * more understanding that it's a queue of waiting procedures. + * Type class. For conceptual purpose only. Seeing ProcedureDeque as type instead of just ArrayDeque + * gives more understanding that it's a queue of waiting procedures. */ @InterfaceAudience.Private public class ProcedureDeque extends ArrayDeque { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java index 50034515973..e55f73211d7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; @@ -61,12 +60,12 @@ public class ProcedureEvent { } /** - * Wakes up the suspended procedures by pushing them back into scheduler queues and sets the - * event as ready. - * See {@link #wakeInternal(AbstractProcedureScheduler)} for why this is not synchronized. + * Wakes up the suspended procedures by pushing them back into scheduler queues and sets the event + * as ready. See {@link #wakeInternal(AbstractProcedureScheduler)} for why this is not + * synchronized. */ public void wake(AbstractProcedureScheduler procedureScheduler) { - procedureScheduler.wakeEvents(new ProcedureEvent[]{this}); + procedureScheduler.wakeEvents(new ProcedureEvent[] { this }); } /** @@ -77,7 +76,7 @@ public class ProcedureEvent { * event. */ public synchronized boolean wakeIfSuspended(AbstractProcedureScheduler procedureScheduler, - Procedure proc) { + Procedure proc) { if (suspendedProcedures.stream().anyMatch(p -> p.getProcId() == proc.getProcId())) { wake(procedureScheduler); return true; @@ -89,22 +88,19 @@ public class ProcedureEvent { * Wakes up all the given events and puts the procedures waiting on them back into * ProcedureScheduler queues. */ - public static void wakeEvents(AbstractProcedureScheduler scheduler, ProcedureEvent ... events) { + public static void wakeEvents(AbstractProcedureScheduler scheduler, ProcedureEvent... events) { scheduler.wakeEvents(events); } /** - * Only to be used by ProcedureScheduler implementations. - * Reason: To wake up multiple events, locking sequence is - * schedLock --> synchronized (event) - * To wake up an event, both schedLock() and synchronized(event) are required. - * The order is schedLock() --> synchronized(event) because when waking up multiple events - * simultaneously, we keep the scheduler locked until all procedures suspended on these events - * have been added back to the queue (Maybe it's not required? Evaluate!) - * To avoid deadlocks, we want to keep the locking order same even when waking up single event. - * That's why, {@link #wake(AbstractProcedureScheduler)} above uses the same code path as used - * when waking up multiple events. - * Access should remain package-private. + * Only to be used by ProcedureScheduler implementations. Reason: To wake up multiple events, + * locking sequence is schedLock --> synchronized (event) To wake up an event, both schedLock() + * and synchronized(event) are required. The order is schedLock() --> synchronized(event) because + * when waking up multiple events simultaneously, we keep the scheduler locked until all + * procedures suspended on these events have been added back to the queue (Maybe it's not + * required? Evaluate!) To avoid deadlocks, we want to keep the locking order same even when + * waking up single event. That's why, {@link #wake(AbstractProcedureScheduler)} above uses the + * same code path as used when waking up multiple events. Access should remain package-private. */ synchronized void wakeInternal(AbstractProcedureScheduler procedureScheduler) { if (ready && !suspendedProcedures.isEmpty()) { @@ -122,8 +118,8 @@ public class ProcedureEvent { } /** - * Access to suspendedProcedures is 'synchronized' on this object, but it's fine to return it - * here for tests. + * Access to suspendedProcedures is 'synchronized' on this object, but it's fine to return it here + * for tests. */ public ProcedureDeque getSuspendedProcedures() { return suspendedProcedures; @@ -131,7 +127,7 @@ public class ProcedureEvent { @Override public String toString() { - return getClass().getSimpleName() + " for " + object + ", ready=" + isReady() + - ", " + suspendedProcedures; + return getClass().getSimpleName() + " for " + object + ", ready=" + isReady() + ", " + + suspendedProcedures; } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java index 93cd355c4e0..b52510286d9 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index b1dc1a420c8..024c3341adf 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -62,17 +62,12 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFacto import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; /** - * Thread Pool that executes the submitted procedures. - * The executor has a ProcedureStore associated. - * Each operation is logged and on restart the pending procedures are resumed. - * - * Unless the Procedure code throws an error (e.g. invalid user input) - * the procedure will complete (at some point in time), On restart the pending - * procedures are resumed and the once failed will be rolledback. - * - * The user can add procedures to the executor via submitProcedure(proc) - * check for the finished state via isFinished(procId) - * and get the result via getResult(procId) + * Thread Pool that executes the submitted procedures. The executor has a ProcedureStore associated. + * Each operation is logged and on restart the pending procedures are resumed. Unless the Procedure + * code throws an error (e.g. invalid user input) the procedure will complete (at some point in + * time), On restart the pending procedures are resumed and the once failed will be rolledback. The + * user can add procedures to the executor via submitProcedure(proc) check for the finished state + * via isFinished(procId) and get the result via getResult(procId) */ @InterfaceAudience.Private public class ProcedureExecutor { @@ -82,19 +77,19 @@ public class ProcedureExecutor { private static final boolean DEFAULT_CHECK_OWNER_SET = false; public static final String WORKER_KEEP_ALIVE_TIME_CONF_KEY = - "hbase.procedure.worker.keep.alive.time.msec"; + "hbase.procedure.worker.keep.alive.time.msec"; private static final long DEFAULT_WORKER_KEEP_ALIVE_TIME = TimeUnit.MINUTES.toMillis(1); public static final String EVICT_TTL_CONF_KEY = "hbase.procedure.cleaner.evict.ttl"; static final int DEFAULT_EVICT_TTL = 15 * 60000; // 15min - public static final String EVICT_ACKED_TTL_CONF_KEY ="hbase.procedure.cleaner.acked.evict.ttl"; + public static final String EVICT_ACKED_TTL_CONF_KEY = "hbase.procedure.cleaner.acked.evict.ttl"; static final int DEFAULT_ACKED_EVICT_TTL = 5 * 60000; // 5min /** - * {@link #testing} is non-null when ProcedureExecutor is being tested. Tests will try to - * break PE having it fail at various junctures. When non-null, testing is set to an instance of - * the below internal {@link Testing} class with flags set for the particular test. + * {@link #testing} is non-null when ProcedureExecutor is being tested. Tests will try to break PE + * having it fail at various junctures. When non-null, testing is set to an instance of the below + * internal {@link Testing} class with flags set for the particular test. */ volatile Testing testing = null; @@ -114,8 +109,8 @@ public class ProcedureExecutor { /** * Set when we want to fail AFTER state has been stored into the WAL. Rarely used. HBASE-20978 - * is about a case where memory-state was being set after store to WAL where a crash could - * cause us to get stuck. This flag allows killing at what was a vulnerable time. + * is about a case where memory-state was being set after store to WAL where a crash could cause + * us to get stuck. This flag allows killing at what was a vulnerable time. */ protected volatile boolean killAfterStoreUpdate = false; protected volatile boolean toggleKillAfterStoreUpdate = false; @@ -155,29 +150,31 @@ public class ProcedureExecutor { public interface ProcedureExecutorListener { void procedureLoaded(long procId); + void procedureAdded(long procId); + void procedureFinished(long procId); } /** - * Map the the procId returned by submitProcedure(), the Root-ProcID, to the Procedure. - * Once a Root-Procedure completes (success or failure), the result will be added to this map. - * The user of ProcedureExecutor should call getResult(procId) to get the result. + * Map the the procId returned by submitProcedure(), the Root-ProcID, to the Procedure. Once a + * Root-Procedure completes (success or failure), the result will be added to this map. The user + * of ProcedureExecutor should call getResult(procId) to get the result. */ private final ConcurrentHashMap> completed = new ConcurrentHashMap<>(); /** * Map the the procId returned by submitProcedure(), the Root-ProcID, to the RootProcedureState. - * The RootProcedureState contains the execution stack of the Root-Procedure, - * It is added to the map by submitProcedure() and removed on procedure completion. + * The RootProcedureState contains the execution stack of the Root-Procedure, It is added to the + * map by submitProcedure() and removed on procedure completion. */ private final ConcurrentHashMap> rollbackStack = new ConcurrentHashMap<>(); /** - * Helper map to lookup the live procedures by ID. - * This map contains every procedure. root-procedures and subprocedures. + * Helper map to lookup the live procedures by ID. This map contains every procedure. + * root-procedures and subprocedures. */ private final ConcurrentHashMap> procedures = new ConcurrentHashMap<>(); @@ -195,34 +192,31 @@ public class ProcedureExecutor { /** * Created in the {@link #init(int, boolean)} method. Destroyed in {@link #join()} (FIX! Doing - * resource handling rather than observing in a #join is unexpected). - * Overridden when we do the ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery - * (Should be ok). + * resource handling rather than observing in a #join is unexpected). Overridden when we do the + * ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery (Should be ok). */ private ThreadGroup threadGroup; /** - * Created in the {@link #init(int, boolean)} method. Terminated in {@link #join()} (FIX! Doing - * resource handling rather than observing in a #join is unexpected). - * Overridden when we do the ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery - * (Should be ok). + * Created in the {@link #init(int, boolean)} method. Terminated in {@link #join()} (FIX! Doing + * resource handling rather than observing in a #join is unexpected). Overridden when we do the + * ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery (Should be ok). */ private CopyOnWriteArrayList workerThreads; /** * Created in the {@link #init(int, boolean)} method. Terminated in {@link #join()} (FIX! Doing - * resource handling rather than observing in a #join is unexpected). - * Overridden when we do the ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery - * (Should be ok). + * resource handling rather than observing in a #join is unexpected). Overridden when we do the + * ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery (Should be ok). */ private TimeoutExecutorThread timeoutExecutor; /** * WorkerMonitor check for stuck workers and new worker thread when necessary, for example if * there is no worker to assign meta, it will new worker thread for it, so it is very important. - * TimeoutExecutor execute many tasks like DeadServerMetricRegionChore RegionInTransitionChore - * and so on, some tasks may execute for a long time so will block other tasks like - * WorkerMonitor, so use a dedicated thread for executing WorkerMonitor. + * TimeoutExecutor execute many tasks like DeadServerMetricRegionChore RegionInTransitionChore and + * so on, some tasks may execute for a long time so will block other tasks like WorkerMonitor, so + * use a dedicated thread for executing WorkerMonitor. */ private TimeoutExecutorThread workerMonitorExecutor; @@ -257,7 +251,7 @@ public class ProcedureExecutor { private final IdLock procExecutionLock = new IdLock(); public ProcedureExecutor(final Configuration conf, final TEnvironment environment, - final ProcedureStore store) { + final ProcedureStore store) { this(conf, environment, store, new SimpleProcedureScheduler()); } @@ -272,8 +266,8 @@ public class ProcedureExecutor { Procedure proc = procedures.get(procId); if (proc != null) { if (proc.isFinished() && proc.hasParent() && isRootFinished(proc)) { - LOG.debug("Procedure {} has already been finished and parent is succeeded," + - " skip force updating", proc); + LOG.debug("Procedure {} has already been finished and parent is succeeded," + + " skip force updating", proc); return; } } else { @@ -299,7 +293,7 @@ public class ProcedureExecutor { } public ProcedureExecutor(final Configuration conf, final TEnvironment environment, - final ProcedureStore store, final ProcedureScheduler scheduler) { + final ProcedureStore store, final ProcedureScheduler scheduler) { this.environment = environment; this.scheduler = scheduler; this.store = store; @@ -399,7 +393,7 @@ public class ProcedureExecutor { } private void loadProcedures(ProcedureIterator procIter, boolean abortOnCorruption) - throws IOException { + throws IOException { // 1. Build the rollback stack int runnableCount = 0; int failedCount = 0; @@ -556,9 +550,9 @@ public class ProcedureExecutor { * It calls ProcedureStore.recoverLease() and ProcedureStore.load() to recover the lease, and * ensure a single executor, and start the procedure replay to resume and recover the previous * pending and in-progress procedures. - * @param numThreads number of threads available for procedure execution. + * @param numThreads number of threads available for procedure execution. * @param abortOnCorruption true if you want to abort your service in case a corrupted procedure - * is found on replay. otherwise false. + * is found on replay. otherwise false. */ public void init(int numThreads, boolean abortOnCorruption) throws IOException { // We have numThreads executor + one timer thread used for timing out @@ -566,7 +560,7 @@ public class ProcedureExecutor { this.corePoolSize = numThreads; this.maxPoolSize = 10 * numThreads; LOG.info("Starting {} core workers (bigger of cpus/4 or 16) with max (burst) worker count={}", - corePoolSize, maxPoolSize); + corePoolSize, maxPoolSize); this.threadGroup = new ThreadGroup("PEWorkerGroup"); this.timeoutExecutor = new TimeoutExecutorThread<>(this, threadGroup, "ProcExecTimeout"); @@ -615,7 +609,7 @@ public class ProcedureExecutor { LOG.trace("Start workers {}", workerThreads.size()); timeoutExecutor.start(); workerMonitorExecutor.start(); - for (WorkerThread worker: workerThreads) { + for (WorkerThread worker : workerThreads) { worker.start(); } @@ -647,7 +641,7 @@ public class ProcedureExecutor { workerMonitorExecutor.awaitTermination(); // stop the worker threads - for (WorkerThread worker: workerThreads) { + for (WorkerThread worker : workerThreads) { worker.awaitTermination(); } @@ -656,8 +650,8 @@ public class ProcedureExecutor { try { threadGroup.destroy(); } catch (IllegalThreadStateException e) { - LOG.error("ThreadGroup {} contains running threads; {}: See STDOUT", - this.threadGroup, e.getMessage()); + LOG.error("ThreadGroup {} contains running threads; {}: See STDOUT", this.threadGroup, + e.getMessage()); // This dumps list of threads on STDOUT. this.threadGroup.list(); } @@ -673,12 +667,12 @@ public class ProcedureExecutor { public void refreshConfiguration(final Configuration conf) { this.conf = conf; - setKeepAliveTime(conf.getLong(WORKER_KEEP_ALIVE_TIME_CONF_KEY, - DEFAULT_WORKER_KEEP_ALIVE_TIME), TimeUnit.MILLISECONDS); + setKeepAliveTime(conf.getLong(WORKER_KEEP_ALIVE_TIME_CONF_KEY, DEFAULT_WORKER_KEEP_ALIVE_TIME), + TimeUnit.MILLISECONDS); } // ========================================================================== - // Accessors + // Accessors // ========================================================================== public boolean isRunning() { return running.get(); @@ -724,7 +718,7 @@ public class ProcedureExecutor { } // ========================================================================== - // Submit/Remove Chores + // Submit/Remove Chores // ========================================================================== /** @@ -753,12 +747,12 @@ public class ProcedureExecutor { } // ========================================================================== - // Nonce Procedure helpers + // Nonce Procedure helpers // ========================================================================== /** * Create a NonceKey from the specified nonceGroup and nonce. * @param nonceGroup the group to use for the {@link NonceKey} - * @param nonce the nonce to use in the {@link NonceKey} + * @param nonce the nonce to use in the {@link NonceKey} * @return the generated NonceKey */ public NonceKey createNonceKey(final long nonceGroup, final long nonce) { @@ -766,13 +760,10 @@ public class ProcedureExecutor { } /** - * Register a nonce for a procedure that is going to be submitted. - * A procId will be reserved and on submitProcedure(), - * the procedure with the specified nonce will take the reserved ProcId. - * If someone already reserved the nonce, this method will return the procId reserved, - * otherwise an invalid procId will be returned. and the caller should procede - * and submit the procedure. - * + * Register a nonce for a procedure that is going to be submitted. A procId will be reserved and + * on submitProcedure(), the procedure with the specified nonce will take the reserved ProcId. If + * someone already reserved the nonce, this method will return the procId reserved, otherwise an + * invalid procId will be returned. and the caller should procede and submit the procedure. * @param nonceKey A unique identifier for this operation from the client or process. * @return the procId associated with the nonce, if any otherwise an invalid procId. */ @@ -796,9 +787,10 @@ public class ProcedureExecutor { // we found a registered nonce, but the procedure may not have been submitted yet. // since the client expect the procedure to be submitted, spin here until it is. final boolean traceEnabled = LOG.isTraceEnabled(); - while (isRunning() && - !(procedures.containsKey(oldProcId) || completed.containsKey(oldProcId)) && - nonceKeysToProcIdsMap.containsKey(nonceKey)) { + while ( + isRunning() && !(procedures.containsKey(oldProcId) || completed.containsKey(oldProcId)) + && nonceKeysToProcIdsMap.containsKey(nonceKey) + ) { if (traceEnabled) { LOG.trace("Waiting for pid=" + oldProcId.longValue() + " to be submitted"); } @@ -828,16 +820,15 @@ public class ProcedureExecutor { } /** - * If the failure failed before submitting it, we may want to give back the - * same error to the requests with the same nonceKey. - * - * @param nonceKey A unique identifier for this operation from the client or process - * @param procName name of the procedure, used to inform the user + * If the failure failed before submitting it, we may want to give back the same error to the + * requests with the same nonceKey. + * @param nonceKey A unique identifier for this operation from the client or process + * @param procName name of the procedure, used to inform the user * @param procOwner name of the owner of the procedure, used to inform the user * @param exception the failure to report to the user */ public void setFailureResultForNonce(NonceKey nonceKey, String procName, User procOwner, - IOException exception) { + IOException exception) { if (nonceKey == null) { return; } @@ -854,7 +845,7 @@ public class ProcedureExecutor { } // ========================================================================== - // Submit/Abort Procedure + // Submit/Abort Procedure // ========================================================================== /** * Add a new root-procedure to the executor. @@ -866,52 +857,46 @@ public class ProcedureExecutor { } /** - * Bypass a procedure. If the procedure is set to bypass, all the logic in - * execute/rollback will be ignored and it will return success, whatever. - * It is used to recover buggy stuck procedures, releasing the lock resources - * and letting other procedures run. Bypassing one procedure (and its ancestors will - * be bypassed automatically) may leave the cluster in a middle state, e.g. region - * not assigned, or some hdfs files left behind. After getting rid of those stuck procedures, - * the operators may have to do some clean up on hdfs or schedule some assign procedures - * to let region online. DO AT YOUR OWN RISK. + * Bypass a procedure. If the procedure is set to bypass, all the logic in execute/rollback will + * be ignored and it will return success, whatever. It is used to recover buggy stuck procedures, + * releasing the lock resources and letting other procedures run. Bypassing one procedure (and its + * ancestors will be bypassed automatically) may leave the cluster in a middle state, e.g. region + * not assigned, or some hdfs files left behind. After getting rid of those stuck procedures, the + * operators may have to do some clean up on hdfs or schedule some assign procedures to let region + * online. DO AT YOUR OWN RISK. *

      - * A procedure can be bypassed only if - * 1. The procedure is in state of RUNNABLE, WAITING, WAITING_TIMEOUT - * or it is a root procedure without any child. - * 2. No other worker thread is executing it - * 3. No child procedure has been submitted - * + * A procedure can be bypassed only if 1. The procedure is in state of RUNNABLE, WAITING, + * WAITING_TIMEOUT or it is a root procedure without any child. 2. No other worker thread is + * executing it 3. No child procedure has been submitted *

      - * If all the requirements are meet, the procedure and its ancestors will be - * bypassed and persisted to WAL. - * + * If all the requirements are meet, the procedure and its ancestors will be bypassed and + * persisted to WAL. *

      - * If the procedure is in WAITING state, will set it to RUNNABLE add it to run queue. - * TODO: What about WAITING_TIMEOUT? - * @param pids the procedure id - * @param lockWait time to wait lock - * @param force if force set to true, we will bypass the procedure even if it is executing. - * This is for procedures which can't break out during executing(due to bug, mostly) - * In this case, bypassing the procedure is not enough, since it is already stuck - * there. We need to restart the master after bypassing, and letting the problematic - * procedure to execute wth bypass=true, so in that condition, the procedure can be - * successfully bypassed. + * If the procedure is in WAITING state, will set it to RUNNABLE add it to run queue. TODO: What + * about WAITING_TIMEOUT? + * @param pids the procedure id + * @param lockWait time to wait lock + * @param force if force set to true, we will bypass the procedure even if it is executing. + * This is for procedures which can't break out during executing(due to bug, + * mostly) In this case, bypassing the procedure is not enough, since it is + * already stuck there. We need to restart the master after bypassing, and + * letting the problematic procedure to execute wth bypass=true, so in that + * condition, the procedure can be successfully bypassed. * @param recursive We will do an expensive search for children of each pid. EXPENSIVE! * @return true if bypass success * @throws IOException IOException */ public List bypassProcedure(List pids, long lockWait, boolean force, - boolean recursive) - throws IOException { + boolean recursive) throws IOException { List result = new ArrayList(pids.size()); - for(long pid: pids) { + for (long pid : pids) { result.add(bypassProcedure(pid, lockWait, force, recursive)); } return result; } boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recursive) - throws IOException { + throws IOException { Preconditions.checkArgument(lockWait > 0, "lockWait should be positive"); final Procedure procedure = getProcedure(pid); if (procedure == null) { @@ -919,16 +904,16 @@ public class ProcedureExecutor { return false; } - LOG.debug("Begin bypass {} with lockWait={}, override={}, recursive={}", - procedure, lockWait, override, recursive); + LOG.debug("Begin bypass {} with lockWait={}, override={}, recursive={}", procedure, lockWait, + override, recursive); IdLock.Entry lockEntry = procExecutionLock.tryLockEntry(procedure.getProcId(), lockWait); if (lockEntry == null && !override) { - LOG.debug("Waited {} ms, but {} is still running, skipping bypass with force={}", - lockWait, procedure, override); + LOG.debug("Waited {} ms, but {} is still running, skipping bypass with force={}", lockWait, + procedure, override); return false; } else if (lockEntry == null) { - LOG.debug("Waited {} ms, but {} is still running, begin bypass with force={}", - lockWait, procedure, override); + LOG.debug("Waited {} ms, but {} is still running, begin bypass with force={}", lockWait, + procedure, override); } try { // check whether the procedure is already finished @@ -942,9 +927,9 @@ public class ProcedureExecutor { // EXPENSIVE. Checks each live procedure of which there could be many!!! // Is there another way to get children of a procedure? LOG.info("Recursive bypass on children of pid={}", procedure.getProcId()); - this.procedures.forEachValue(1 /*Single-threaded*/, + this.procedures.forEachValue(1 /* Single-threaded */, // Transformer - v -> v.getParentProcId() == procedure.getProcId()? v: null, + v -> v.getParentProcId() == procedure.getProcId() ? v : null, // Consumer v -> { try { @@ -960,12 +945,13 @@ public class ProcedureExecutor { } // If the procedure has no parent or no child, we are safe to bypass it in whatever state - if (procedure.hasParent() && procedure.getState() != ProcedureState.RUNNABLE + if ( + procedure.hasParent() && procedure.getState() != ProcedureState.RUNNABLE && procedure.getState() != ProcedureState.WAITING - && procedure.getState() != ProcedureState.WAITING_TIMEOUT) { + && procedure.getState() != ProcedureState.WAITING_TIMEOUT + ) { LOG.debug("Bypassing procedures in RUNNABLE, WAITING and WAITING_TIMEOUT states " - + "(with no parent), {}", - procedure); + + "(with no parent), {}", procedure); // Question: how is the bypass done here? return false; } @@ -982,7 +968,7 @@ public class ProcedureExecutor { current = getProcedure(parentID); } - //wake up waiting procedure, already checked there is no child + // wake up waiting procedure, already checked there is no child if (procedure.getState() == ProcedureState.WAITING) { procedure.setState(ProcedureState.RUNNABLE); store.update(procedure); @@ -1005,8 +991,7 @@ public class ProcedureExecutor { // need to restart the master. With the procedure set to bypass, the procedureExecutor // will bypass it and won't get stuck again. LOG.debug("Bypassing {} and its ancestors successfully, but since it is already running, " - + "skipping add to queue", - procedure); + + "skipping add to queue", procedure); } return true; @@ -1019,11 +1004,11 @@ public class ProcedureExecutor { /** * Add a new root-procedure to the executor. - * @param proc the new procedure to execute. + * @param proc the new procedure to execute. * @param nonceKey the registered unique identifier for this operation from the client or process. * @return the procedure id, that can be used to monitor the operation */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", justification = "FindBugs is blind to the check-for-null") public long submitProcedure(Procedure proc, NonceKey nonceKey) { Preconditions.checkArgument(lastProcId.get() >= 0); @@ -1107,8 +1092,8 @@ public class ProcedureExecutor { } /** - * Send an abort notification the specified procedure. - * Depending on the procedure implementation the abort can be considered or ignored. + * Send an abort notification the specified procedure. Depending on the procedure implementation + * the abort can be considered or ignored. * @param procId the procedure to abort * @return true if the procedure exists and has received the abort, otherwise false. */ @@ -1117,9 +1102,9 @@ public class ProcedureExecutor { } /** - * Send an abort notification to the specified procedure. - * Depending on the procedure implementation, the abort can be considered or ignored. - * @param procId the procedure to abort + * Send an abort notification to the specified procedure. Depending on the procedure + * implementation, the abort can be considered or ignored. + * @param procId the procedure to abort * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? * @return true if the procedure exists and has received the abort, otherwise false. */ @@ -1135,7 +1120,7 @@ public class ProcedureExecutor { } // ========================================================================== - // Executor query helpers + // Executor query helpers // ========================================================================== public Procedure getProcedure(final long procId) { return procedures.get(procId); @@ -1159,9 +1144,8 @@ public class ProcedureExecutor { } /** - * Return true if the procedure is finished. - * The state may be "completed successfully" or "failed and rolledback". - * Use getResult() to check the state or get the result data. + * Return true if the procedure is finished. The state may be "completed successfully" or "failed + * and rolledback". Use getResult() to check the state or get the result data. * @param procId the ID of the procedure to check * @return true if the procedure execution is finished, otherwise false. */ @@ -1210,9 +1194,9 @@ public class ProcedureExecutor { /** * Check if the user is this procedure's owner * @param procId the target procedure - * @param user the user - * @return true if the user is the owner of the procedure, - * false otherwise or the owner is unknown. + * @param user the user + * @return true if the user is the owner of the procedure, false otherwise or the owner is + * unknown. */ public boolean isProcedureOwner(long procId, User user) { if (user == null) { @@ -1262,7 +1246,7 @@ public class ProcedureExecutor { } // ========================================================================== - // Listeners helpers + // Listeners helpers // ========================================================================== public void registerListener(ProcedureExecutorListener listener) { this.listeners.add(listener); @@ -1274,7 +1258,7 @@ public class ProcedureExecutor { private void sendProcedureLoadedNotification(final long procId) { if (!this.listeners.isEmpty()) { - for (ProcedureExecutorListener listener: this.listeners) { + for (ProcedureExecutorListener listener : this.listeners) { try { listener.procedureLoaded(procId); } catch (Throwable e) { @@ -1286,7 +1270,7 @@ public class ProcedureExecutor { private void sendProcedureAddedNotification(final long procId) { if (!this.listeners.isEmpty()) { - for (ProcedureExecutorListener listener: this.listeners) { + for (ProcedureExecutorListener listener : this.listeners) { try { listener.procedureAdded(procId); } catch (Throwable e) { @@ -1298,7 +1282,7 @@ public class ProcedureExecutor { private void sendProcedureFinishedNotification(final long procId) { if (!this.listeners.isEmpty()) { - for (ProcedureExecutorListener listener: this.listeners) { + for (ProcedureExecutorListener listener : this.listeners) { try { listener.procedureFinished(procId); } catch (Throwable e) { @@ -1309,7 +1293,7 @@ public class ProcedureExecutor { } // ========================================================================== - // Procedure IDs helpers + // Procedure IDs helpers // ========================================================================== private long nextProcId() { long procId = lastProcId.incrementAndGet(); @@ -1341,7 +1325,7 @@ public class ProcedureExecutor { } // ========================================================================== - // Executions + // Executions // ========================================================================== private void executeProcedure(Procedure proc) { if (proc.isFinished()) { @@ -1577,9 +1561,8 @@ public class ProcedureExecutor { } /** - * Execute the rollback of the procedure step. - * It updates the store with the new state (stack index) - * or will remove completly the procedure in case it is a child. + * Execute the rollback of the procedure step. It updates the store with the new state (stack + * index) or will remove completly the procedure in case it is a child. */ private LockState executeRollback(Procedure proc) { try { @@ -1617,36 +1600,38 @@ public class ProcedureExecutor { /** * Executes procedure *

        - *
      • Calls the doExecute() of the procedure - *
      • If the procedure execution didn't fail (i.e. valid user input) - *
          - *
        • ...and returned subprocedures - *
          • The subprocedures are initialized. - *
          • The subprocedures are added to the store - *
          • The subprocedures are added to the runnable queue - *
          • The procedure is now in a WAITING state, waiting for the subprocedures to complete - *
          - *
        • - *
        • ...if there are no subprocedure - *
          • the procedure completed successfully - *
          • if there is a parent (WAITING) - *
          • the parent state will be set to RUNNABLE - *
          - *
        • - *
        - *
      • - *
      • In case of failure - *
          - *
        • The store is updated with the new state
        • - *
        • The executor (caller of this method) will start the rollback of the procedure
        • - *
        - *
      • - *
      + *
    • Calls the doExecute() of the procedure + *
    • If the procedure execution didn't fail (i.e. valid user input) + *
        + *
      • ...and returned subprocedures + *
          + *
        • The subprocedures are initialized. + *
        • The subprocedures are added to the store + *
        • The subprocedures are added to the runnable queue + *
        • The procedure is now in a WAITING state, waiting for the subprocedures to complete + *
        + *
      • + *
      • ...if there are no subprocedure + *
          + *
        • the procedure completed successfully + *
        • if there is a parent (WAITING) + *
        • the parent state will be set to RUNNABLE + *
        + *
      • + *
      + *
    • + *
    • In case of failure + *
        + *
      • The store is updated with the new state
      • + *
      • The executor (caller of this method) will start the rollback of the procedure
      • + *
      + *
    • + * */ private void execProcedure(RootProcedureState procStack, - Procedure procedure) { + Procedure procedure) { Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE, - "NOT RUNNABLE! " + procedure.toString()); + "NOT RUNNABLE! " + procedure.toString()); // Procedures can suspend themselves. They skip out by throwing a ProcedureSuspendedException. // The exception is caught below and then we hurry to the exit without disturbing state. The @@ -1697,10 +1682,10 @@ public class ProcedureExecutor { // Yield the current procedure, and make the subprocedure runnable // subprocs may come back 'null'. subprocs = initializeChildren(procStack, procedure, subprocs); - LOG.info("Initialized subprocedures=" + - (subprocs == null? null: - Stream.of(subprocs).map(e -> "{" + e.toString() + "}"). - collect(Collectors.toList()).toString())); + LOG.info("Initialized subprocedures=" + (subprocs == null + ? null + : Stream.of(subprocs).map(e -> "{" + e.toString() + "}").collect(Collectors.toList()) + .toString())); } } else if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) { LOG.trace("Added to timeoutExecutor {}", procedure); @@ -1716,8 +1701,9 @@ public class ProcedureExecutor { // allows to kill the executor before something is stored to the wal. // useful to test the procedure recovery. - if (testing != null && - testing.shouldKillBeforeStoreUpdate(suspended, procedure.hasParent())) { + if ( + testing != null && testing.shouldKillBeforeStoreUpdate(suspended, procedure.hasParent()) + ) { kill("TESTING: Kill BEFORE store update: " + procedure); } @@ -1738,8 +1724,10 @@ public class ProcedureExecutor { return; } // if the procedure is kind enough to pass the slot to someone else, yield - if (procedure.isRunnable() && !suspended && - procedure.isYieldAfterExecutionStep(getEnvironment())) { + if ( + procedure.isRunnable() && !suspended + && procedure.isYieldAfterExecutionStep(getEnvironment()) + ) { yieldProcedure(procedure); return; } @@ -1778,15 +1766,15 @@ public class ProcedureExecutor { } private Procedure[] initializeChildren(RootProcedureState procStack, - Procedure procedure, Procedure[] subprocs) { + Procedure procedure, Procedure[] subprocs) { assert subprocs != null : "expected subprocedures"; final long rootProcId = getRootProcedureId(procedure); for (int i = 0; i < subprocs.length; ++i) { Procedure subproc = subprocs[i]; if (subproc == null) { String msg = "subproc[" + i + "] is null, aborting the procedure"; - procedure.setFailure(new RemoteProcedureException(msg, - new IllegalArgumentIOException(msg))); + procedure + .setFailure(new RemoteProcedureException(msg, new IllegalArgumentIOException(msg))); return null; } @@ -1824,7 +1812,7 @@ public class ProcedureExecutor { } private void countDownChildren(RootProcedureState procStack, - Procedure procedure) { + Procedure procedure) { Procedure parent = procedures.get(procedure.getParentProcId()); if (parent == null) { assert procStack.isRollingback(); @@ -1837,14 +1825,14 @@ public class ProcedureExecutor { // children have completed, move parent to front of the queue. store.update(parent); scheduler.addFront(parent); - LOG.info("Finished subprocedure pid={}, resume processing ppid={}", - procedure.getProcId(), parent.getProcId()); + LOG.info("Finished subprocedure pid={}, resume processing ppid={}", procedure.getProcId(), + parent.getProcId()); return; } } private void updateStoreOnExec(RootProcedureState procStack, - Procedure procedure, Procedure[] subprocs) { + Procedure procedure, Procedure[] subprocs) { if (subprocs != null && !procedure.isFailed()) { if (LOG.isTraceEnabled()) { LOG.trace("Stored " + procedure + ", children " + Arrays.toString(subprocs)); @@ -1881,10 +1869,10 @@ public class ProcedureExecutor { private void execCompletionCleanup(Procedure proc) { final TEnvironment env = getEnvironment(); if (proc.hasLock()) { - LOG.warn("Usually this should not happen, we will release the lock before if the procedure" + - " is finished, even if the holdLock is true, arrive here means we have some holes where" + - " we do not release the lock. And the releaseLock below may fail since the procedure may" + - " have already been deleted from the procedure store."); + LOG.warn("Usually this should not happen, we will release the lock before if the procedure" + + " is finished, even if the holdLock is true, arrive here means we have some holes where" + + " we do not release the lock. And the releaseLock below may fail since the procedure may" + + " have already been deleted from the procedure store."); releaseLock(proc, true); } try { @@ -1939,7 +1927,7 @@ public class ProcedureExecutor { } // ========================================================================== - // Worker Thread + // Worker Thread // ========================================================================== private class WorkerThread extends StoppableThread { private final AtomicLong executionStartTime = new AtomicLong(Long.MAX_VALUE); @@ -1958,6 +1946,7 @@ public class ProcedureExecutor { public void sendStopSignal() { scheduler.signalAll(); } + @Override public void run() { long lastUpdate = EnvironmentEdgeManager.currentTime(); @@ -1984,8 +1973,8 @@ public class ProcedureExecutor { procExecutionLock.releaseLockEntry(lockEntry); activeCount = activeExecutorCount.decrementAndGet(); runningCount = store.setRunningProcedureCount(activeCount); - LOG.trace("Halt pid={} runningCount={}, activeCount={}", proc.getProcId(), - runningCount, activeCount); + LOG.trace("Halt pid={} runningCount={}, activeCount={}", proc.getProcId(), runningCount, + activeCount); this.activeProcedure = null; lastUpdate = EnvironmentEdgeManager.currentTime(); executionStartTime.set(Long.MAX_VALUE); @@ -2002,7 +1991,7 @@ public class ProcedureExecutor { @Override public String toString() { Procedure p = this.activeProcedure; - return getName() + "(pid=" + (p == null? Procedure.NO_PROC_ID: p.getProcId() + ")"); + return getName() + "(pid=" + (p == null ? Procedure.NO_PROC_ID : p.getProcId() + ")"); } /** @@ -2041,15 +2030,15 @@ public class ProcedureExecutor { private final class WorkerMonitor extends InlineChore { public static final String WORKER_MONITOR_INTERVAL_CONF_KEY = - "hbase.procedure.worker.monitor.interval.msec"; + "hbase.procedure.worker.monitor.interval.msec"; private static final int DEFAULT_WORKER_MONITOR_INTERVAL = 5000; // 5sec public static final String WORKER_STUCK_THRESHOLD_CONF_KEY = - "hbase.procedure.worker.stuck.threshold.msec"; + "hbase.procedure.worker.stuck.threshold.msec"; private static final int DEFAULT_WORKER_STUCK_THRESHOLD = 10000; // 10sec public static final String WORKER_ADD_STUCK_PERCENTAGE_CONF_KEY = - "hbase.procedure.worker.add.stuck.percentage"; + "hbase.procedure.worker.add.stuck.percentage"; private static final float DEFAULT_WORKER_ADD_STUCK_PERCENTAGE = 0.5f; // 50% stuck private float addWorkerStuckPercentage = DEFAULT_WORKER_ADD_STUCK_PERCENTAGE; @@ -2105,12 +2094,11 @@ public class ProcedureExecutor { } private void refreshConfig() { - addWorkerStuckPercentage = conf.getFloat(WORKER_ADD_STUCK_PERCENTAGE_CONF_KEY, - DEFAULT_WORKER_ADD_STUCK_PERCENTAGE); - timeoutInterval = conf.getInt(WORKER_MONITOR_INTERVAL_CONF_KEY, - DEFAULT_WORKER_MONITOR_INTERVAL); - stuckThreshold = conf.getInt(WORKER_STUCK_THRESHOLD_CONF_KEY, - DEFAULT_WORKER_STUCK_THRESHOLD); + addWorkerStuckPercentage = + conf.getFloat(WORKER_ADD_STUCK_PERCENTAGE_CONF_KEY, DEFAULT_WORKER_ADD_STUCK_PERCENTAGE); + timeoutInterval = + conf.getInt(WORKER_MONITOR_INTERVAL_CONF_KEY, DEFAULT_WORKER_MONITOR_INTERVAL); + stuckThreshold = conf.getInt(WORKER_STUCK_THRESHOLD_CONF_KEY, DEFAULT_WORKER_STUCK_THRESHOLD); } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.java index cd65c1f74ae..f8232cce950 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.io.IOException; @@ -23,13 +22,10 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Special procedure used as a chore. - * Instead of bringing the Chore class in (dependencies reason), - * we reuse the executor timeout thread for this special case. - * - * The assumption is that procedure is used as hook to dispatch other procedures - * or trigger some cleanups. It does not store state in the ProcedureStore. - * this is just for in-memory chore executions. + * Special procedure used as a chore. Instead of bringing the Chore class in (dependencies reason), + * we reuse the executor timeout thread for this special case. The assumption is that procedure is + * used as hook to dispatch other procedures or trigger some cleanups. It does not store state in + * the ProcedureStore. this is just for in-memory chore executions. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -56,12 +52,10 @@ public abstract class ProcedureInMemoryChore extends Procedure - *
    • Count of submitted procedure instances
    • - *
    • Time histogram for successfully completed procedure instances
    • - *
    • Count of failed procedure instances
    • - * - * - * Please implement this interface to return appropriate metrics. + *
    • Count of submitted procedure instances
    • + *
    • Time histogram for successfully completed procedure instances
    • + *
    • Count of failed procedure instances
    • + * + * Please implement this interface to return appropriate metrics. */ @InterfaceAudience.Private public interface ProcedureMetrics { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java index 72b2b284ca1..f89cac5137c 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,8 +38,8 @@ public interface ProcedureScheduler { void stop(); /** - * In case the class is blocking on poll() waiting for items to be added, - * this method should awake poll() and poll() should return. + * In case the class is blocking on poll() waiting for items to be added, this method should awake + * poll() and poll() should return. */ void signalAll(); @@ -51,7 +51,7 @@ public interface ProcedureScheduler { /** * Inserts the specified element at the front of this queue. - * @param proc the Procedure to add + * @param proc the Procedure to add * @param notify whether need to notify worker */ void addFront(Procedure proc, boolean notify); @@ -69,21 +69,20 @@ public interface ProcedureScheduler { /** * Inserts the specified element at the end of this queue. - * @param proc the Procedure to add + * @param proc the Procedure to add * @param notify whether need to notify worker */ void addBack(Procedure proc, boolean notify); /** - * The procedure can't run at the moment. - * add it back to the queue, giving priority to someone else. + * The procedure can't run at the moment. add it back to the queue, giving priority to someone + * else. * @param proc the Procedure to add back to the list */ void yield(Procedure proc); /** - * The procedure in execution completed. - * This can be implemented to perform cleanups. + * The procedure in execution completed. This can be implemented to perform cleanups. * @param proc the Procedure that completed the execution. */ void completionCleanup(Procedure proc); @@ -102,7 +101,7 @@ public interface ProcedureScheduler { /** * Fetch one Procedure from the queue * @param timeout how long to wait before giving up, in units of unit - * @param unit a TimeUnit determining how to interpret the timeout parameter + * @param unit a TimeUnit determining how to interpret the timeout parameter * @return the Procedure to execute, or null if nothing present. */ Procedure poll(long timeout, TimeUnit unit); @@ -126,9 +125,9 @@ public interface ProcedureScheduler { int size(); /** - * Clear current state of scheduler such that it is equivalent to newly created scheduler. - * Used for testing failure and recovery. To emulate server crash/restart, - * {@link ProcedureExecutor} resets its own state and calls clear() on scheduler. + * Clear current state of scheduler such that it is equivalent to newly created scheduler. Used + * for testing failure and recovery. To emulate server crash/restart, {@link ProcedureExecutor} + * resets its own state and calls clear() on scheduler. */ void clear(); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureStateSerializer.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureStateSerializer.java index 216022f1c79..fc4eb1532ee 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureStateSerializer.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureStateSerializer.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.Message; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java index 9f521214f07..95fafae7266 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java index c557c2021b4..4a225161dbf 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,10 +46,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; */ @InterfaceAudience.Private public final class ProcedureUtil { - private ProcedureUtil() { } + private ProcedureUtil() { + } // ========================================================================== - // Reflection helpers to create/validate a Procedure object + // Reflection helpers to create/validate a Procedure object // ========================================================================== private static Procedure newProcedure(String className) throws BadProcedureException { try { @@ -85,18 +86,18 @@ public final class ProcedureUtil { throw new Exception("the " + clazz + " constructor is not public"); } } catch (Exception e) { - throw new BadProcedureException("The procedure class " + proc.getClass().getName() + - " must be accessible and have an empty constructor", e); + throw new BadProcedureException("The procedure class " + proc.getClass().getName() + + " must be accessible and have an empty constructor", e); } } // ========================================================================== - // convert to and from Procedure object + // convert to and from Procedure object // ========================================================================== /** - * A serializer for our Procedures. Instead of the previous serializer, it - * uses the stateMessage list to store the internal state of the Procedures. + * A serializer for our Procedures. Instead of the previous serializer, it uses the stateMessage + * list to store the internal state of the Procedures. */ private static class StateSerializer implements ProcedureStateSerializer { private final ProcedureProtos.Procedure.Builder builder; @@ -113,8 +114,7 @@ public final class ProcedureUtil { } @Override - public M deserialize(Class clazz) - throws IOException { + public M deserialize(Class clazz) throws IOException { if (deserializeIndex >= builder.getStateMessageCount()) { throw new IOException("Invalid state message index: " + deserializeIndex); } @@ -129,8 +129,8 @@ public final class ProcedureUtil { } /** - * A serializer (deserializer) for those Procedures which were serialized - * before this patch. It deserializes the old, binary stateData field. + * A serializer (deserializer) for those Procedures which were serialized before this patch. It + * deserializes the old, binary stateData field. */ private static class CompatStateSerializer implements ProcedureStateSerializer { private InputStream inputStream; @@ -146,8 +146,7 @@ public final class ProcedureUtil { @SuppressWarnings("unchecked") @Override - public M deserialize(Class clazz) - throws IOException { + public M deserialize(Class clazz) throws IOException { Parser parser = (Parser) Internal.getDefaultInstance(clazz).getParserForType(); try { return parser.parseDelimitedFrom(inputStream); @@ -163,16 +162,13 @@ public final class ProcedureUtil { * Used by ProcedureStore implementations. */ public static ProcedureProtos.Procedure convertToProtoProcedure(Procedure proc) - throws IOException { + throws IOException { Preconditions.checkArgument(proc != null); validateClass(proc); final ProcedureProtos.Procedure.Builder builder = ProcedureProtos.Procedure.newBuilder() - .setClassName(proc.getClass().getName()) - .setProcId(proc.getProcId()) - .setState(proc.getState()) - .setSubmittedTime(proc.getSubmittedTime()) - .setLastUpdate(proc.getLastUpdate()); + .setClassName(proc.getClass().getName()).setProcId(proc.getProcId()).setState(proc.getState()) + .setSubmittedTime(proc.getSubmittedTime()).setLastUpdate(proc.getLastUpdate()); if (proc.hasParent()) { builder.setParentId(proc.getParentProcId()); @@ -232,7 +228,7 @@ public final class ProcedureUtil { * it by storing the data only on insert(). */ public static Procedure convertToProcedure(ProcedureProtos.Procedure proto) - throws IOException { + throws IOException { // Procedure from class name Procedure proc = newProcedure(proto.getClassName()); @@ -259,9 +255,9 @@ public final class ProcedureUtil { } if (proto.hasException()) { - assert proc.getState() == ProcedureProtos.ProcedureState.FAILED || - proc.getState() == ProcedureProtos.ProcedureState.ROLLEDBACK : - "The procedure must be failed (waiting to rollback) or rolledback"; + assert proc.getState() == ProcedureProtos.ProcedureState.FAILED + || proc.getState() == ProcedureProtos.ProcedureState.ROLLEDBACK + : "The procedure must be failed (waiting to rollback) or rolledback"; proc.setFailure(RemoteProcedureException.fromProto(proto.getException())); } @@ -298,11 +294,11 @@ public final class ProcedureUtil { } // ========================================================================== - // convert from LockedResource object + // convert from LockedResource object // ========================================================================== - public static LockServiceProtos.LockedResourceType convertToProtoResourceType( - LockedResourceType resourceType) { + public static LockServiceProtos.LockedResourceType + convertToProtoResourceType(LockedResourceType resourceType) { return LockServiceProtos.LockedResourceType.valueOf(resourceType.name()); } @@ -310,29 +306,27 @@ public final class ProcedureUtil { return LockServiceProtos.LockType.valueOf(lockType.name()); } - public static LockServiceProtos.LockedResource convertToProtoLockedResource( - LockedResource lockedResource) throws IOException { + public static LockServiceProtos.LockedResource + convertToProtoLockedResource(LockedResource lockedResource) throws IOException { LockServiceProtos.LockedResource.Builder builder = - LockServiceProtos.LockedResource.newBuilder(); + LockServiceProtos.LockedResource.newBuilder(); - builder - .setResourceType(convertToProtoResourceType(lockedResource.getResourceType())) - .setResourceName(lockedResource.getResourceName()) - .setLockType(convertToProtoLockType(lockedResource.getLockType())); + builder.setResourceType(convertToProtoResourceType(lockedResource.getResourceType())) + .setResourceName(lockedResource.getResourceName()) + .setLockType(convertToProtoLockType(lockedResource.getLockType())); Procedure exclusiveLockOwnerProcedure = lockedResource.getExclusiveLockOwnerProcedure(); if (exclusiveLockOwnerProcedure != null) { ProcedureProtos.Procedure exclusiveLockOwnerProcedureProto = - convertToProtoProcedure(exclusiveLockOwnerProcedure); + convertToProtoProcedure(exclusiveLockOwnerProcedure); builder.setExclusiveLockOwnerProcedure(exclusiveLockOwnerProcedureProto); } builder.setSharedLockCount(lockedResource.getSharedLockCount()); for (Procedure waitingProcedure : lockedResource.getWaitingProcedures()) { - ProcedureProtos.Procedure waitingProcedureProto = - convertToProtoProcedure(waitingProcedure); + ProcedureProtos.Procedure waitingProcedureProto = convertToProtoProcedure(waitingProcedure); builder.addWaitingProcedures(waitingProcedureProto); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java index 0487ac5b094..bf78916c48f 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java index 296b97b000f..b880043c016 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.io.IOException; @@ -36,42 +35,43 @@ import org.apache.hadoop.hbase.procedure2.util.DelayedUtil.DelayedWithTimeout; import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * A procedure dispatcher that aggregates and sends after elapsed time or after we hit - * count threshold. Creates its own threadpool to run RPCs with timeout. + * A procedure dispatcher that aggregates and sends after elapsed time or after we hit count + * threshold. Creates its own threadpool to run RPCs with timeout. *
        *
      • Each server queue has a dispatch buffer
      • - *
      • Once the dispatch buffer reaches a threshold-size/time we send
      • + *
      • Once the dispatch buffer reaches a threshold-size/time we send + *
      • *
      - *

      Call {@link #start()} and then {@link #submitTask(Runnable)}. When done, - * call {@link #stop()}. + *

      + * Call {@link #start()} and then {@link #submitTask(Runnable)}. When done, call {@link #stop()}. */ @InterfaceAudience.Private public abstract class RemoteProcedureDispatcher> { private static final Logger LOG = LoggerFactory.getLogger(RemoteProcedureDispatcher.class); public static final String THREAD_POOL_SIZE_CONF_KEY = - "hbase.procedure.remote.dispatcher.threadpool.size"; + "hbase.procedure.remote.dispatcher.threadpool.size"; private static final int DEFAULT_THREAD_POOL_SIZE = 128; public static final String DISPATCH_DELAY_CONF_KEY = - "hbase.procedure.remote.dispatcher.delay.msec"; + "hbase.procedure.remote.dispatcher.delay.msec"; private static final int DEFAULT_DISPATCH_DELAY = 150; public static final String DISPATCH_MAX_QUEUE_SIZE_CONF_KEY = - "hbase.procedure.remote.dispatcher.max.queue.size"; + "hbase.procedure.remote.dispatcher.max.queue.size"; private static final int DEFAULT_MAX_QUEUE_SIZE = 32; private final AtomicBoolean running = new AtomicBoolean(false); private final ConcurrentHashMap nodeMap = - new ConcurrentHashMap(); + new ConcurrentHashMap(); private final int operationDelay; private final int queueMaxSize; @@ -92,8 +92,8 @@ public abstract class RemoteProcedureDispatcher operations); + protected abstract void abortPendingOperations(TRemote key, Set operations); /** @@ -237,11 +237,11 @@ public abstract class RemoteProcedureDispatcher { /** - * For building the remote operation. - * May be empty if no need to send remote call. Usually, this means the RemoteProcedure has been - * finished already. This is possible, as we may have already sent the procedure to RS but then - * the rpc connection is broken so the executeProcedures call fails, but the RS does receive the - * procedure and execute it and then report back, before we retry again. + * For building the remote operation. May be empty if no need to send remote call. Usually, this + * means the RemoteProcedure has been finished already. This is possible, as we may have already + * sent the procedure to RS but then the rpc connection is broken so the executeProcedures call + * fails, but the RS does receive the procedure and execute it and then report back, before we + * retry again. */ Optional remoteCallBuild(TEnv env, TRemote remote); @@ -263,9 +263,8 @@ public abstract class RemoteProcedureDispatcher, RemoteOperation> buildAndGroupRequestByType(final TEnv env, - final TRemote remote, final Set remoteProcedures) { + final TRemote remote, final Set remoteProcedures) { final ArrayListMultimap, RemoteOperation> requestByType = ArrayListMultimap.create(); for (RemoteProcedure proc : remoteProcedures) { Optional operation = proc.remoteCallBuild(env, remote); @@ -294,12 +293,12 @@ public abstract class RemoteProcedureDispatcher List fetchType( - final ArrayListMultimap, RemoteOperation> requestByType, final Class type) { - return (List)requestByType.removeAll(type); + final ArrayListMultimap, RemoteOperation> requestByType, final Class type) { + return (List) requestByType.removeAll(type); } // ============================================================================================ - // Timeout Helpers + // Timeout Helpers // ============================================================================================ private final class TimeoutExecutorThread extends Thread { private final DelayQueue queue = new DelayQueue(); @@ -311,8 +310,8 @@ public abstract class RemoteProcedureDispatcher 0) { LOG.error("DelayQueue for RemoteProcedureDispatcher is not empty when timed waiting" @@ -349,8 +348,8 @@ public abstract class RemoteProcedureDispatcher 0 && (i % 8) == 0) { - LOG.warn("Waiting termination of thread " + getName() + ", " + - StringUtils.humanTimeDiff(EnvironmentEdgeManager.currentTime() - startTime)); + LOG.warn("Waiting termination of thread " + getName() + ", " + + StringUtils.humanTimeDiff(EnvironmentEdgeManager.currentTime() - startTime)); } } } catch (InterruptedException e) { @@ -360,14 +359,14 @@ public abstract class RemoteProcedureDispatcher - implements RemoteNode { + implements RemoteNode { private Set operations; private final Set dispatchedOperations = new HashSet<>(); @@ -399,7 +398,7 @@ public abstract class RemoteProcedureDispatcher operation.storeInDispatchedQueue()) - .forEach(operation -> dispatchedOperations.add(operation)); + .forEach(operation -> dispatchedOperations.add(operation)); this.operations = null; } } @@ -413,7 +412,7 @@ public abstract class RemoteProcedureDispatcher - * RemoteProcedureException exceptions contain a Throwable as its cause. - * This can be a "regular" exception generated locally or a ProxyThrowable that is a representation - * of the original exception created on original 'remote' source. These ProxyThrowables have their - * their stacks traces and messages overridden to reflect the original 'remote' exception. + * RemoteProcedureException exceptions contain a Throwable as its cause. This can be a "regular" + * exception generated locally or a ProxyThrowable that is a representation of the original + * exception created on original 'remote' source. These ProxyThrowables have their their stacks + * traces and messages overridden to reflect the original 'remote' exception. */ @InterfaceAudience.Private @InterfaceStability.Evolving @SuppressWarnings("serial") public class RemoteProcedureException extends ProcedureException { /** - * Name of the throwable's source such as a host or thread name. Must be non-null. + * Name of the throwable's source such as a host or thread name. Must be non-null. */ private final String source; /** - * Create a new RemoteProcedureException that can be serialized. - * It is assumed that this came form a local source. + * Create a new RemoteProcedureException that can be serialized. It is assumed that this came form + * a local source. * @param source the host or thread name of the source - * @param cause the actual cause of the exception + * @param cause the actual cause of the exception */ public RemoteProcedureException(String source, Throwable cause) { super(cause); @@ -66,10 +65,10 @@ public class RemoteProcedureException extends ProcedureException { public Exception unwrapRemoteException() { final Throwable cause = getCause(); if (cause instanceof RemoteException) { - return ((RemoteException)cause).unwrapRemoteException(); + return ((RemoteException) cause).unwrapRemoteException(); } if (cause instanceof Exception) { - return (Exception)cause; + return (Exception) cause; } return new Exception(cause); } @@ -81,7 +80,7 @@ public class RemoteProcedureException extends ProcedureException { public IOException unwrapRemoteIOException() { final Exception cause = unwrapRemoteException(); if (cause instanceof IOException) { - return (IOException)cause; + return (IOException) cause; } return new IOException(cause); } @@ -95,7 +94,7 @@ public class RemoteProcedureException extends ProcedureException { /** * Converts a RemoteProcedureException to an array of bytes. * @param source the name of the external exception source - * @param t the "local" external exception (local) + * @param t the "local" external exception (local) * @return protobuf serialized version of RemoteProcedureException */ public static byte[] serialize(String source, Throwable t) { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java index 440f9e7d6ec..9990bdeb430 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,15 +29,11 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; /** - * Internal state of the ProcedureExecutor that describes the state of a "Root Procedure". - * A "Root Procedure" is a Procedure without parent, each subprocedure will be - * added to the "Root Procedure" stack (or rollback-stack). - * - * RootProcedureState is used and managed only by the ProcedureExecutor. - * Long rootProcId = getRootProcedureId(proc); - * rollbackStack.get(rootProcId).acquire(proc) - * rollbackStack.get(rootProcId).release(proc) - * ... + * Internal state of the ProcedureExecutor that describes the state of a "Root Procedure". A "Root + * Procedure" is a Procedure without parent, each subprocedure will be added to the "Root Procedure" + * stack (or rollback-stack). RootProcedureState is used and managed only by the ProcedureExecutor. + * Long rootProcId = getRootProcedureId(proc); rollbackStack.get(rootProcId).acquire(proc) + * rollbackStack.get(rootProcId).release(proc) ... */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -45,9 +41,9 @@ class RootProcedureState { private static final Logger LOG = LoggerFactory.getLogger(RootProcedureState.class); private enum State { - RUNNING, // The Procedure is running or ready to run - FAILED, // The Procedure failed, waiting for the rollback executing - ROLLINGBACK, // The Procedure failed and the execution was rolledback + RUNNING, // The Procedure is running or ready to run + FAILED, // The Procedure failed, waiting for the rollback executing + ROLLINGBACK, // The Procedure failed and the execution was rolledback } private Set> subprocs = null; @@ -102,7 +98,7 @@ class RootProcedureState { protected synchronized RemoteProcedureException getException() { if (subprocStack != null) { - for (Procedure proc: subprocStack) { + for (Procedure proc : subprocStack) { if (proc.hasException()) { return proc.getException(); } @@ -137,8 +133,8 @@ class RootProcedureState { } /** - * Called by the ProcedureExecutor after the procedure step is completed, - * to add the step to the rollback list (or procedure stack) + * Called by the ProcedureExecutor after the procedure step is completed, to add the step to the + * rollback list (or procedure stack) */ protected synchronized void addRollbackStep(Procedure proc) { if (proc.isFailed()) { @@ -163,11 +159,10 @@ class RootProcedureState { } /** - * Called on store load by the ProcedureExecutor to load part of the stack. - * - * Each procedure has its own stack-positions. Which means we have to write - * to the store only the Procedure we executed, and nothing else. - * on load we recreate the full stack by aggregating each procedure stack-positions. + * Called on store load by the ProcedureExecutor to load part of the stack. Each procedure has its + * own stack-positions. Which means we have to write to the store only the Procedure we executed, + * and nothing else. on load we recreate the full stack by aggregating each procedure + * stack-positions. */ protected synchronized void loadStack(Procedure proc) { addSubProcedure(proc); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java index 20abf651e30..131128cf04f 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,24 +15,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.SequentialProcedureData; /** * A SequentialProcedure describes one step in a procedure chain: + * *

        *   -> Step 1 -> Step 2 -> Step 3
        * 
      - * The main difference from a base Procedure is that the execute() of a - * SequentialProcedure will be called only once; there will be no second - * execute() call once the children are finished. which means once the child - * of a SequentialProcedure are completed the SequentialProcedure is completed too. + * + * The main difference from a base Procedure is that the execute() of a SequentialProcedure will be + * called only once; there will be no second execute() call once the children are finished. which + * means once the child of a SequentialProcedure are completed the SequentialProcedure is completed + * too. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -41,7 +42,7 @@ public abstract class SequentialProcedure extends Procedure extends Procedure extends ProcedureThe procedure implementor must have an enum of 'states', describing - * the various step of the procedure. - * Once the procedure is running, the procedure-framework will call executeFromState() - * using the 'state' provided by the user. The first call to executeFromState() - * will be performed with 'state = null'. The implementor can jump between - * states using setNextState(MyStateEnum.ordinal()). - * The rollback will call rollbackState() for each state that was executed, in reverse order. + *

      + * The procedure implementor must have an enum of 'states', describing the various step of the + * procedure. Once the procedure is running, the procedure-framework will call executeFromState() + * using the 'state' provided by the user. The first call to executeFromState() will be performed + * with 'state = null'. The implementor can jump between states using + * setNextState(MyStateEnum.ordinal()). The rollback will call rollbackState() for each state that + * was executed, in reverse order. */ @InterfaceAudience.Private @InterfaceStability.Evolving -public abstract class StateMachineProcedure - extends Procedure { +public abstract class StateMachineProcedure extends Procedure { private static final Logger LOG = LoggerFactory.getLogger(StateMachineProcedure.class); private static final int EOF_STATE = Integer.MIN_VALUE; @@ -79,11 +76,11 @@ public abstract class StateMachineProcedure /** * called to perform a single step of the specified 'state' of the procedure * @param state state to execute - * @return Flow.NO_MORE_STATE if the procedure is completed, - * Flow.HAS_MORE_STATE if there is another step. + * @return Flow.NO_MORE_STATE if the procedure is completed, Flow.HAS_MORE_STATE if there is + * another step. */ protected abstract Flow executeFromState(TEnvironment env, TState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException; + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException; /** * called to perform the rollback of the specified state @@ -123,9 +120,9 @@ public abstract class StateMachineProcedure } /** - * By default, the executor will try ro run all the steps of the procedure start to finish. - * Return true to make the executor yield between execution steps to - * give other procedures time to run their steps. + * By default, the executor will try ro run all the steps of the procedure start to finish. Return + * true to make the executor yield between execution steps to give other procedures time to run + * their steps. * @param state the state we are going to execute next. * @return Return true if the executor should yield before the execution of the specified step. * Defaults to return false. @@ -138,8 +135,8 @@ public abstract class StateMachineProcedure * Add a child procedure to execute * @param subProcedure the child procedure */ - protected > void addChildProcedure( - @SuppressWarnings("unchecked") T... subProcedure) { + protected > void + addChildProcedure(@SuppressWarnings("unchecked") T... subProcedure) { if (subProcedure == null) { return; } @@ -162,7 +159,7 @@ public abstract class StateMachineProcedure @Override protected Procedure[] execute(final TEnvironment env) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { updateTimestamp(); try { failIfAborted(); @@ -177,7 +174,7 @@ public abstract class StateMachineProcedure } if (LOG.isTraceEnabled()) { - LOG.trace(state + " " + this + "; cycles=" + this.cycles); + LOG.trace(state + " " + this + "; cycles=" + this.cycles); } // Keep running count of cycles if (getStateId(state) != this.previousState) { @@ -198,15 +195,14 @@ public abstract class StateMachineProcedure subProcList = null; return subProcedures; } - return (isWaiting() || isFailed() || !hasMoreState()) ? null : new Procedure[] {this}; + return (isWaiting() || isFailed() || !hasMoreState()) ? null : new Procedure[] { this }; } finally { updateTimestamp(); } } @Override - protected void rollback(final TEnvironment env) - throws IOException, InterruptedException { + protected void rollback(final TEnvironment env) throws IOException, InterruptedException { if (isEofState()) { stateCount--; } @@ -221,7 +217,7 @@ public abstract class StateMachineProcedure } protected boolean isEofState() { - return stateCount > 0 && states[stateCount-1] == EOF_STATE; + return stateCount > 0 && states[stateCount - 1] == EOF_STATE; } @Override @@ -254,8 +250,8 @@ public abstract class StateMachineProcedure } /** - * Used by the default implementation of abort() to know if the current state can be aborted - * and rollback can be triggered. + * Used by the default implementation of abort() to know if the current state can be aborted and + * rollback can be triggered. */ protected boolean isRollbackSupported(final TState state) { return false; @@ -271,7 +267,7 @@ public abstract class StateMachineProcedure } protected TState getCurrentState() { - return stateCount > 0 ? getState(states[stateCount-1]) : getInitialState(); + return stateCount > 0 ? getState(states[stateCount - 1]) : getInitialState(); } /** @@ -308,8 +304,7 @@ public abstract class StateMachineProcedure } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { StateMachineProcedureData.Builder data = StateMachineProcedureData.newBuilder(); for (int i = 0; i < stateCount; ++i) { data.addState(states[i]); @@ -318,8 +313,7 @@ public abstract class StateMachineProcedure } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { StateMachineProcedureData data = serializer.deserialize(StateMachineProcedureData.class); stateCount = data.getStateCount(); if (stateCount > 0) { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StoppableThread.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StoppableThread.java index b58b571a934..4d0d8941ded 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StoppableThread.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StoppableThread.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java index fc917b6f36e..3b99781a558 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,8 +53,7 @@ class TimeoutExecutorThread extends StoppableThread { @Override public void run() { while (executor.isRunning()) { - final DelayedWithTimeout task = DelayedUtil.takeWithoutInterrupt(queue, 20, - TimeUnit.SECONDS); + final DelayedWithTimeout task = DelayedUtil.takeWithoutInterrupt(queue, 20, TimeUnit.SECONDS); if (task == null || task == DelayedUtil.DELAYED_POISON) { // the executor may be shutting down, // and the task is just the shutdown request diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java index b8ddad21866..7a15ebfc494 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/InMemoryProcedureIterator.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/InMemoryProcedureIterator.java index aba71b95d6d..de44ad5b3df 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/InMemoryProcedureIterator.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/InMemoryProcedureIterator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -91,4 +91,4 @@ public class InMemoryProcedureIterator implements ProcedureIterator { moveToNext(); return proc; } -} \ No newline at end of file +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/LeaseRecovery.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/LeaseRecovery.java index 7a9ea1b0d31..fb2a725177d 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/LeaseRecovery.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/LeaseRecovery.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,4 +26,4 @@ import org.apache.yetus.audience.InterfaceAudience; public interface LeaseRecovery { void recoverFileLease(FileSystem fs, Path path) throws IOException; -} \ No newline at end of file +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java index 8fbc1473ed7..8a4dd403cd2 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2.store; import java.io.IOException; - import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java index c1eaa73230f..7e54cfa9a12 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2.store; import java.io.IOException; - import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -85,9 +84,8 @@ public interface ProcedureStore { void reset(); /** - * Returns true if the iterator has more elements. - * (In other words, returns true if next() would return a Procedure - * rather than throwing an exception.) + * Returns true if the iterator has more elements. (In other words, returns true if next() would + * return a Procedure rather than throwing an exception.) * @return true if the iterator has more procedures */ boolean hasNext(); @@ -135,8 +133,8 @@ public interface ProcedureStore { void load(ProcedureIterator procIter) throws IOException; /** - * Called by the ProcedureStore.load() in case we have procedures not-ready to be added to - * the executor, which probably means they are corrupted since some information/link is missing. + * Called by the ProcedureStore.load() in case we have procedures not-ready to be added to the + * executor, which probably means they are corrupted since some information/link is missing. * @param procIter iterator over the procedures not ready to be added to the executor, corrupted */ void handleCorrupted(ProcedureIterator procIter) throws IOException; @@ -178,8 +176,8 @@ public interface ProcedureStore { int getNumThreads(); /** - * Set the number of procedure running. - * This can be used, for example, by the store to know how long to wait before a sync. + * Set the number of procedure running. This can be used, for example, by the store to know how + * long to wait before a sync. * @return how many procedures are running (may not be same as count). */ int setRunningProcedureCount(int count); @@ -201,57 +199,48 @@ public interface ProcedureStore { void load(ProcedureLoader loader) throws IOException; /** - * When a procedure is submitted to the executor insert(proc, null) will be called. - * 'proc' has a 'RUNNABLE' state and the initial information required to start up. - * - * When a procedure is executed and it returns children insert(proc, subprocs) will be called. - * 'proc' has a 'WAITING' state and an update state. - * 'subprocs' are the children in 'RUNNABLE' state with the initial information. - * - * @param proc the procedure to serialize and write to the store. + * When a procedure is submitted to the executor insert(proc, null) will be called. 'proc' has a + * 'RUNNABLE' state and the initial information required to start up. When a procedure is executed + * and it returns children insert(proc, subprocs) will be called. 'proc' has a 'WAITING' state and + * an update state. 'subprocs' are the children in 'RUNNABLE' state with the initial information. + * @param proc the procedure to serialize and write to the store. * @param subprocs the newly created child of the proc. */ void insert(Procedure proc, Procedure[] subprocs); /** - * Serialize a set of new procedures. - * These procedures are freshly submitted to the executor and each procedure - * has a 'RUNNABLE' state and the initial information required to start up. - * + * Serialize a set of new procedures. These procedures are freshly submitted to the executor and + * each procedure has a 'RUNNABLE' state and the initial information required to start up. * @param procs the procedures to serialize and write to the store. */ void insert(Procedure[] procs); /** - * The specified procedure was executed, - * and the new state should be written to the store. + * The specified procedure was executed, and the new state should be written to the store. * @param proc the procedure to serialize and write to the store. */ void update(Procedure proc); /** - * The specified procId was removed from the executor, - * due to completion, abort or failure. - * The store implementor should remove all the information about the specified procId. + * The specified procId was removed from the executor, due to completion, abort or failure. The + * store implementor should remove all the information about the specified procId. * @param procId the ID of the procedure to remove. */ void delete(long procId); /** - * The parent procedure completed. - * Update the state and mark all the child deleted. + * The parent procedure completed. Update the state and mark all the child deleted. * @param parentProc the parent procedure to serialize and write to the store. * @param subProcIds the IDs of the sub-procedure to remove. */ void delete(Procedure parentProc, long[] subProcIds); /** - * The specified procIds were removed from the executor, - * due to completion, abort or failure. - * The store implementor should remove all the information about the specified procIds. + * The specified procIds were removed from the executor, due to completion, abort or failure. The + * store implementor should remove all the information about the specified procIds. * @param procIds the IDs of the procedures to remove. - * @param offset the array offset from where to start to delete - * @param count the number of IDs to delete + * @param offset the array offset from where to start to delete + * @param count the number of IDs to delete */ void delete(long[] procIds, int offset, int count); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java index a5c04fab200..4efb6d34b5a 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,14 +27,13 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public abstract class ProcedureStoreBase implements ProcedureStore { private final CopyOnWriteArrayList listeners = - new CopyOnWriteArrayList<>(); + new CopyOnWriteArrayList<>(); private final AtomicBoolean running = new AtomicBoolean(false); /** - * Change the state to 'isRunning', - * returns true if the store state was changed, - * false if the store was already in that state. + * Change the state to 'isRunning', returns true if the store state was changed, false if the + * store was already in that state. * @param isRunning the state to set. * @return true if the store state was changed, otherwise false. */ diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureTree.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureTree.java index 4e615b971d8..fc61d41c7f0 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureTree.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureTree.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -167,8 +167,8 @@ public final class ProcedureTree { rootEntry); valid = false; } else if (entries.size() > 1) { - LOG.error("Multiple procedures {} have the same stack id {}, max stack id is {}," + - " root procedure is {}", entries, i, maxStackId, rootEntry); + LOG.error("Multiple procedures {} have the same stack id {}, max stack id is {}," + + " root procedure is {}", entries, i, maxStackId, rootEntry); valid = false; } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProtoAndProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProtoAndProcedure.java index 0cdc4804100..89d8cabf9d6 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProtoAndProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProtoAndProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,4 +48,4 @@ public class ProtoAndProcedure { public ProcedureProtos.Procedure getProto() { return proto; } -} \ No newline at end of file +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/BitSetNode.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/BitSetNode.java index 98416a527b8..fdbf183b19f 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/BitSetNode.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/BitSetNode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -415,7 +415,7 @@ class BitSetNode { } catch (ArrayIndexOutOfBoundsException aioobe) { // We've gotten a AIOOBE in here; add detail to help debug. ArrayIndexOutOfBoundsException aioobe2 = - new ArrayIndexOutOfBoundsException("pid=" + procId + ", deleted=" + isDeleted); + new ArrayIndexOutOfBoundsException("pid=" + procId + ", deleted=" + isDeleted); aioobe2.initCause(aioobe); throw aioobe2; } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java index dc9d16c41f8..a47b2664a9e 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureStoreTracker.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureStoreTracker.java index 3436e8b7669..eeb5a3a827b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureStoreTracker.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureStoreTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,10 +32,8 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; /** - * Keeps track of live procedures. - * - * It can be used by the ProcedureStore to identify which procedures are already - * deleted/completed to avoid the deserialization step on restart + * Keeps track of live procedures. It can be used by the ProcedureStore to identify which procedures + * are already deleted/completed to avoid the deserialization step on restart * @deprecated Since 2.3.0, will be removed in 4.0.0. Keep here only for rolling upgrading, now we * use the new region based procedure store. */ @@ -48,29 +46,32 @@ class ProcedureStoreTracker { private final TreeMap map = new TreeMap<>(); /** - * If true, do not remove bits corresponding to deleted procedures. Note that this can result - * in huge bitmaps overtime. - * Currently, it's set to true only when building tracker state from logs during recovery. During - * recovery, if we are sure that a procedure has been deleted, reading its old update entries - * can be skipped. + * If true, do not remove bits corresponding to deleted procedures. Note that this can result in + * huge bitmaps overtime. Currently, it's set to true only when building tracker state from logs + * during recovery. During recovery, if we are sure that a procedure has been deleted, reading its + * old update entries can be skipped. */ private boolean keepDeletes = false; /** - * If true, it means tracker has incomplete information about the active/deleted procedures. - * It's set to true only when recovering from old logs. See {@link #isDeleted(long)} docs to - * understand it's real use. + * If true, it means tracker has incomplete information about the active/deleted procedures. It's + * set to true only when recovering from old logs. See {@link #isDeleted(long)} docs to understand + * it's real use. */ boolean partial = false; private long minModifiedProcId = Long.MAX_VALUE; private long maxModifiedProcId = Long.MIN_VALUE; - public enum DeleteState { YES, NO, MAYBE } + public enum DeleteState { + YES, + NO, + MAYBE + } public void resetToProto(ProcedureProtos.ProcedureStoreTracker trackerProtoBuf) { reset(); - for (ProcedureProtos.ProcedureStoreTracker.TrackerNode protoNode : - trackerProtoBuf.getNodeList()) { + for (ProcedureProtos.ProcedureStoreTracker.TrackerNode protoNode : trackerProtoBuf + .getNodeList()) { final BitSetNode node = new BitSetNode(protoNode); map.put(node.getStart(), node); } @@ -182,6 +183,7 @@ class ProcedureStoreTracker { this.minModifiedProcId = min; this.maxModifiedProcId = max; } + /** * This method is used when restarting where we need to rebuild the ProcedureStoreTracker. The * {@link #delete(long)} method above assume that the {@link BitSetNode} exists, but when restart @@ -212,7 +214,7 @@ class ProcedureStoreTracker { } private void setDeleteIf(ProcedureStoreTracker tracker, - BiFunction func) { + BiFunction func) { BitSetNode trackerNode = null; for (BitSetNode node : map.values()) { long minProcId = node.getStart(); @@ -236,8 +238,8 @@ class ProcedureStoreTracker { * @see #setDeletedIfModifiedInBoth(ProcedureStoreTracker) */ public void setDeletedIfDeletedByThem(ProcedureStoreTracker tracker) { - setDeleteIf(tracker, (node, procId) -> node == null || !node.contains(procId) || - node.isDeleted(procId) == DeleteState.YES); + setDeleteIf(tracker, (node, procId) -> node == null || !node.contains(procId) + || node.isDeleted(procId) == DeleteState.YES); } /** @@ -252,7 +254,7 @@ class ProcedureStoreTracker { /** * lookup the node containing the specified procId. - * @param node cached node to check before doing a lookup + * @param node cached node to check before doing a lookup * @param procId the procId to lookup * @return the node that may contains the procId or null */ @@ -288,16 +290,15 @@ class ProcedureStoreTracker { public boolean isModified(long procId) { final Map.Entry entry = map.floorEntry(procId); - return entry != null && entry.getValue().contains(procId) && - entry.getValue().isModified(procId); + return entry != null && entry.getValue().contains(procId) + && entry.getValue().isModified(procId); } /** * If {@link #partial} is false, returns state from the bitmap. If no state is found for - * {@code procId}, returns YES. - * If partial is true, tracker doesn't have complete view of system state, so it returns MAYBE - * if there is no update for the procedure or if it doesn't have a state in bitmap. Otherwise, - * returns state from the bitmap. + * {@code procId}, returns YES. If partial is true, tracker doesn't have complete view of system + * state, so it returns MAYBE if there is no update for the procedure or if it doesn't have a + * state in bitmap. Otherwise, returns state from the bitmap. */ public DeleteState isDeleted(long procId) { Map.Entry entry = map.floorEntry(procId); @@ -378,8 +379,8 @@ class ProcedureStoreTracker { } /** - * Clears the list of updated procedure ids. This doesn't affect global list of active - * procedure ids. + * Clears the list of updated procedure ids. This doesn't affect global list of active procedure + * ids. */ public void resetModified() { for (Map.Entry entry : map.entrySet()) { @@ -472,17 +473,16 @@ class ProcedureStoreTracker { } // ======================================================================== - // Convert to/from Protocol Buffer. + // Convert to/from Protocol Buffer. // ======================================================================== /** - * Builds - * org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker + * Builds org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker * protocol buffer from current state. */ public ProcedureProtos.ProcedureStoreTracker toProto() throws IOException { ProcedureProtos.ProcedureStoreTracker.Builder builder = - ProcedureProtos.ProcedureStoreTracker.newBuilder(); + ProcedureProtos.ProcedureStoreTracker.newBuilder(); for (Map.Entry entry : map.entrySet()) { builder.addNode(entry.getValue().convert()); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java index 947d5bd9d65..74f3e778107 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,8 +64,8 @@ class ProcedureWALFile implements Comparable { tracker.setPartialFlag(true); } - public ProcedureWALFile(FileSystem fs, Path logFile, ProcedureWALHeader header, - long startPos, long timestamp) { + public ProcedureWALFile(FileSystem fs, Path logFile, ProcedureWALHeader header, long startPos, + long timestamp) { this.fs = fs; this.header = header; this.logFile = logFile; @@ -101,7 +101,7 @@ class ProcedureWALFile implements Comparable { try { stream.seek(trailer.getTrackerPos()); final ProcedureProtos.ProcedureStoreTracker trackerProtoBuf = - ProcedureProtos.ProcedureStoreTracker.parseDelimitedFrom(stream); + ProcedureProtos.ProcedureStoreTracker.parseDelimitedFrom(stream); tracker.resetToProto(trackerProtoBuf); } finally { stream.seek(startPos); @@ -205,7 +205,7 @@ class ProcedureWALFile implements Comparable { return false; } - return compareTo((ProcedureWALFile)o) == 0; + return compareTo((ProcedureWALFile) o) == 0; } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java index bc60584126f..e480f7200b7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2.store.wal; import java.io.IOException; @@ -73,7 +72,8 @@ final class ProcedureWALFormat { void markCorruptedWAL(ProcedureWALFile log, IOException e); } - private ProcedureWALFormat() {} + private ProcedureWALFormat() { + } /** * Load all the procedures in these ProcedureWALFiles, and rebuild the given {@code tracker} if @@ -87,7 +87,7 @@ final class ProcedureWALFormat { * procedures. */ public static void load(Iterator logs, ProcedureStoreTracker tracker, - Loader loader) throws IOException { + Loader loader) throws IOException { ProcedureWALFormatReader reader = new ProcedureWALFormatReader(tracker, loader); tracker.setKeepDeletes(true); // Ignore the last log which is current active log. @@ -111,33 +111,22 @@ final class ProcedureWALFormat { } public static void writeHeader(OutputStream stream, ProcedureWALHeader header) - throws IOException { + throws IOException { header.writeDelimitedTo(stream); } /* - * +-----------------+ - * | END OF WAL DATA | <---+ - * +-----------------+ | - * | | | - * | Tracker | | - * | | | - * +-----------------+ | - * | version | | - * +-----------------+ | - * | TRAILER_MAGIC | | - * +-----------------+ | - * | offset |-----+ - * +-----------------+ + * +-----------------+ | END OF WAL DATA | <---+ +-----------------+ | | | | | Tracker | | | | | + * +-----------------+ | | version | | +-----------------+ | | TRAILER_MAGIC | | + * +-----------------+ | | offset |-----+ +-----------------+ */ public static long writeTrailer(FSDataOutputStream stream, ProcedureStoreTracker tracker) - throws IOException { + throws IOException { long offset = stream.getPos(); // Write EOF Entry - ProcedureWALEntry.newBuilder() - .setType(ProcedureWALEntry.Type.PROCEDURE_WAL_EOF) - .build().writeDelimitedTo(stream); + ProcedureWALEntry.newBuilder().setType(ProcedureWALEntry.Type.PROCEDURE_WAL_EOF).build() + .writeDelimitedTo(stream); // Write Tracker tracker.toProto().writeDelimitedTo(stream); @@ -148,8 +137,7 @@ final class ProcedureWALFormat { return stream.getPos() - offset; } - public static ProcedureWALHeader readHeader(InputStream stream) - throws IOException { + public static ProcedureWALHeader readHeader(InputStream stream) throws IOException { ProcedureWALHeader header; try { header = ProcedureWALHeader.parseDelimitedFrom(stream); @@ -162,8 +150,8 @@ final class ProcedureWALFormat { } if (header.getVersion() < 0 || header.getVersion() != HEADER_VERSION) { - throw new InvalidWALDataException("Invalid Header version. got " + header.getVersion() + - " expected " + HEADER_VERSION); + throw new InvalidWALDataException( + "Invalid Header version. got " + header.getVersion() + " expected " + HEADER_VERSION); } if (header.getType() < 0 || header.getType() > LOG_TYPE_MAX_VALID) { @@ -174,7 +162,7 @@ final class ProcedureWALFormat { } public static ProcedureWALTrailer readTrailer(FSDataInputStream stream, long startPos, long size) - throws IOException { + throws IOException { // Beginning of the Trailer Jump. 17 = 1 byte version + 8 byte magic + 8 byte offset long trailerPos = size - 17; @@ -185,14 +173,14 @@ final class ProcedureWALFormat { stream.seek(trailerPos); int version = stream.read(); if (version != TRAILER_VERSION) { - throw new InvalidWALDataException("Invalid Trailer version. got " + version + - " expected " + TRAILER_VERSION); + throw new InvalidWALDataException( + "Invalid Trailer version. got " + version + " expected " + TRAILER_VERSION); } long magic = StreamUtils.readLong(stream); if (magic != TRAILER_MAGIC) { - throw new InvalidWALDataException("Invalid Trailer magic. got " + magic + - " expected " + TRAILER_MAGIC); + throw new InvalidWALDataException( + "Invalid Trailer magic. got " + magic + " expected " + TRAILER_MAGIC); } long trailerOffset = StreamUtils.readLong(stream); @@ -203,10 +191,8 @@ final class ProcedureWALFormat { throw new InvalidWALDataException("Invalid Trailer begin"); } - ProcedureWALTrailer trailer = ProcedureWALTrailer.newBuilder() - .setVersion(version) - .setTrackerPos(stream.getPos()) - .build(); + ProcedureWALTrailer trailer = + ProcedureWALTrailer.newBuilder().setVersion(version).setTrackerPos(stream.getPos()).build(); return trailer; } @@ -214,8 +200,8 @@ final class ProcedureWALFormat { return ProcedureWALEntry.parseDelimitedFrom(stream); } - public static void writeEntry(ByteSlot slot, ProcedureWALEntry.Type type, - Procedure proc, Procedure[] subprocs) throws IOException { + public static void writeEntry(ByteSlot slot, ProcedureWALEntry.Type type, Procedure proc, + Procedure[] subprocs) throws IOException { final ProcedureWALEntry.Builder builder = ProcedureWALEntry.newBuilder(); builder.setType(type); builder.addProcedure(ProcedureUtil.convertToProtoProcedure(proc)); @@ -227,23 +213,20 @@ final class ProcedureWALFormat { builder.build().writeDelimitedTo(slot); } - public static void writeInsert(ByteSlot slot, Procedure proc) - throws IOException { + public static void writeInsert(ByteSlot slot, Procedure proc) throws IOException { writeEntry(slot, ProcedureWALEntry.Type.PROCEDURE_WAL_INIT, proc, null); } public static void writeInsert(ByteSlot slot, Procedure proc, Procedure[] subprocs) - throws IOException { + throws IOException { writeEntry(slot, ProcedureWALEntry.Type.PROCEDURE_WAL_INSERT, proc, subprocs); } - public static void writeUpdate(ByteSlot slot, Procedure proc) - throws IOException { + public static void writeUpdate(ByteSlot slot, Procedure proc) throws IOException { writeEntry(slot, ProcedureWALEntry.Type.PROCEDURE_WAL_UPDATE, proc, null); } - public static void writeDelete(ByteSlot slot, long procId) - throws IOException { + public static void writeDelete(ByteSlot slot, long procId) throws IOException { final ProcedureWALEntry.Builder builder = ProcedureWALEntry.newBuilder(); builder.setType(ProcedureWALEntry.Type.PROCEDURE_WAL_DELETE); builder.setProcId(procId); @@ -251,7 +234,7 @@ final class ProcedureWALFormat { } public static void writeDelete(ByteSlot slot, Procedure proc, long[] subprocs) - throws IOException { + throws IOException { final ProcedureWALEntry.Builder builder = ProcedureWALEntry.newBuilder(); builder.setType(ProcedureWALEntry.Type.PROCEDURE_WAL_DELETE); builder.setProcId(proc.getProcId()); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java index 31150cad8fb..c7647c72f7d 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,11 +56,10 @@ class ProcedureWALFormatReader { private final ProcedureWALFormat.Loader loader; /** - * Global tracker that will be used by the WALProcedureStore after load. - * If the last WAL was closed cleanly we already have a full tracker ready to be used. - * If the last WAL was truncated (e.g. master killed) the tracker will be empty - * and the 'partial' flag will be set. In this case, on WAL replay we are going - * to rebuild the tracker. + * Global tracker that will be used by the WALProcedureStore after load. If the last WAL was + * closed cleanly we already have a full tracker ready to be used. If the last WAL was truncated + * (e.g. master killed) the tracker will be empty and the 'partial' flag will be set. In this + * case, on WAL replay we are going to rebuild the tracker. */ private final ProcedureStoreTracker tracker; @@ -79,7 +78,7 @@ class ProcedureWALFormatReader { private long maxProcId = 0; public ProcedureWALFormatReader(final ProcedureStoreTracker tracker, - ProcedureWALFormat.Loader loader) { + ProcedureWALFormat.Loader loader) { this.tracker = tracker; this.loader = loader; } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java index 89e32c30880..f11c97c1952 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,13 +63,11 @@ public class ProcedureWALPrettyPrinter extends Configured implements Tool { /** * Reads a log file and outputs its contents. - * - * @param conf HBase configuration relevant to this log file - * @param p path of the log file to be read - * @throws IOException IOException + * @param conf HBase configuration relevant to this log file + * @param p path of the log file to be read + * @throws IOException IOException */ - public void processFile(final Configuration conf, final Path p) - throws IOException { + public void processFile(final Configuration conf, final Path p) throws IOException { FileSystem fs = p.getFileSystem(conf); if (!fs.exists(p)) { @@ -117,8 +115,7 @@ public class ProcedureWALPrettyPrinter extends Configured implements Tool { } } catch (IOException e) { out.println("got an exception while reading the procedure WAL " + e.getMessage()); - } - finally { + } finally { log.close(); } } @@ -146,13 +143,9 @@ public class ProcedureWALPrettyPrinter extends Configured implements Tool { } /** - * Pass one or more log file names and formatting options and it will dump out - * a text version of the contents on stdout. - * - * @param args - * Command line arguments - * @throws IOException - * Thrown upon file system errors etc. + * Pass one or more log file names and formatting options and it will dump out a text version of + * the contents on stdout. n * Command line arguments n * Thrown upon file system + * errors etc. */ @Override public int run(final String[] args) throws IOException { @@ -172,19 +165,19 @@ public class ProcedureWALPrettyPrinter extends Configured implements Tool { if (files.isEmpty() || cmd.hasOption("h")) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("ProcedureWALPrettyPrinter ", options, true); - return(-1); + return (-1); } } catch (ParseException e) { e.printStackTrace(); HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("ProcedureWALPrettyPrinter ", options, true); - return(-1); + return (-1); } // get configuration, file system, and process the given files for (Path file : files) { processFile(getConf(), file); } - return(0); + return (0); } public static void main(String[] args) throws Exception { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.java index 5e1983f4696..6251f4a4a5f 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,7 +72,7 @@ class WALProcedureMap { * See HBASE-18152. */ private static boolean isIncreasing(ProcedureProtos.Procedure current, - ProcedureProtos.Procedure candidate) { + ProcedureProtos.Procedure candidate) { // Check that the procedures we see are 'increasing'. We used to compare // procedure id first and then update time but it can legitimately go backwards if the // procedure is failed or rolled back so that was unreliable. Was going to compare diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java index d2d661f956a..42710b29a45 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -99,8 +99,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedu * will first be initialized to the oldest file's tracker(which is stored in the trailer), using the * method {@link ProcedureStoreTracker#resetTo(ProcedureStoreTracker, boolean)}, and then merge it * with the tracker of every newer wal files, using the - * {@link ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. - * If we find out + * {@link ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. If we find out * that all the modified procedures for the oldest wal file are modified or deleted in newer wal * files, then we can delete it. This is because that, every time we call * {@link ProcedureStore#insert(Procedure[])} or {@link ProcedureStore#update(Procedure)}, we will @@ -119,7 +118,6 @@ public class WALProcedureStore extends ProcedureStoreBase { /** Used to construct the name of the log directory for master procedures */ public static final String MASTER_PROCEDURE_LOGDIR = "MasterProcWALs"; - public static final String WAL_COUNT_WARN_THRESHOLD_CONF_KEY = "hbase.procedure.store.wal.warn.threshold"; private static final int DEFAULT_WAL_COUNT_WARN_THRESHOLD = 10; @@ -136,8 +134,7 @@ public class WALProcedureStore extends ProcedureStoreBase { "hbase.procedure.store.wal.wait.before.roll"; private static final int DEFAULT_WAIT_BEFORE_ROLL = 500; - public static final String ROLL_RETRIES_CONF_KEY = - "hbase.procedure.store.wal.max.roll.retries"; + public static final String ROLL_RETRIES_CONF_KEY = "hbase.procedure.store.wal.max.roll.retries"; private static final int DEFAULT_ROLL_RETRIES = 3; public static final String MAX_SYNC_FAILURE_ROLL_CONF_KEY = @@ -158,7 +155,7 @@ public class WALProcedureStore extends ProcedureStoreBase { private static final long DEFAULT_ROLL_THRESHOLD = 32 * 1024 * 1024; // 32M public static final String STORE_WAL_SYNC_STATS_COUNT = - "hbase.procedure.store.wal.sync.stats.count"; + "hbase.procedure.store.wal.sync.stats.count"; private static final int DEFAULT_SYNC_STATS_COUNT = 10; private final LinkedList logs = new LinkedList<>(); @@ -241,14 +238,14 @@ public class WALProcedureStore extends ProcedureStoreBase { } public WALProcedureStore(final Configuration conf, final Path walDir, final Path walArchiveDir, - final LeaseRecovery leaseRecovery) throws IOException { + final LeaseRecovery leaseRecovery) throws IOException { this.conf = conf; this.leaseRecovery = leaseRecovery; this.walDir = walDir; this.walArchiveDir = walArchiveDir; this.fs = CommonFSUtils.getWALFileSystem(conf); - this.enforceStreamCapability = conf.getBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, - true); + this.enforceStreamCapability = + conf.getBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, true); // Create the log directory for the procedure store if (!fs.exists(walDir)) { @@ -258,7 +255,7 @@ public class WALProcedureStore extends ProcedureStoreBase { } // Now that it exists, set the log policy String storagePolicy = - conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY); + conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY); CommonFSUtils.setStoragePolicy(fs, walDir, storagePolicy); // Create archive dir up front. Rename won't work w/o it up on HDFS. @@ -301,8 +298,8 @@ public class WALProcedureStore extends ProcedureStoreBase { useHsync = conf.getBoolean(USE_HSYNC_CONF_KEY, DEFAULT_USE_HSYNC); // WebUI - syncMetricsQueue = new CircularFifoQueue<>( - conf.getInt(STORE_WAL_SYNC_STATS_COUNT, DEFAULT_SYNC_STATS_COUNT)); + syncMetricsQueue = + new CircularFifoQueue<>(conf.getInt(STORE_WAL_SYNC_STATS_COUNT, DEFAULT_SYNC_STATS_COUNT)); // Init sync thread syncThread = new Thread("WALProcedureStoreSyncThread") { @@ -327,8 +324,8 @@ public class WALProcedureStore extends ProcedureStoreBase { return; } - LOG.info("Stopping the WAL Procedure Store, isAbort=" + abort + - (isSyncAborted() ? " (self aborting)" : "")); + LOG.info("Stopping the WAL Procedure Store, isAbort=" + abort + + (isSyncAborted() ? " (self aborting)" : "")); sendStopSignal(); if (!isSyncAborted()) { try { @@ -348,7 +345,7 @@ public class WALProcedureStore extends ProcedureStoreBase { // Close the old logs // they should be already closed, this is just in case the load fails // and we call start() and then stop() - for (ProcedureWALFile log: logs) { + for (ProcedureWALFile log : logs) { log.close(); } logs.clear(); @@ -403,8 +400,7 @@ public class WALProcedureStore extends ProcedureStoreBase { while (isRunning()) { // Don't sleep before first attempt if (afterFirstAttempt) { - LOG.trace("Sleep {} ms after first lease recovery attempt.", - waitBeforeRoll); + LOG.trace("Sleep {} ms after first lease recovery attempt.", waitBeforeRoll); Threads.sleepWithoutInterrupt(waitBeforeRoll); } else { afterFirstAttempt = true; @@ -511,8 +507,9 @@ public class WALProcedureStore extends ProcedureStoreBase { } // the config says to not cleanup wals on load. - if (!conf.getBoolean(EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY, - DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY)) { + if ( + !conf.getBoolean(EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY, DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY) + ) { LOG.debug("WALs cleanup on load is not enabled: " + getActiveLogs()); return; } @@ -550,8 +547,8 @@ public class WALProcedureStore extends ProcedureStoreBase { } catch (IOException e) { // We are not able to serialize the procedure. // this is a code error, and we are not able to go on. - LOG.error(HBaseMarkers.FATAL, "Unable to serialize one of the procedure: proc=" + - proc + ", subprocs=" + Arrays.toString(subprocs), e); + LOG.error(HBaseMarkers.FATAL, "Unable to serialize one of the procedure: proc=" + proc + + ", subprocs=" + Arrays.toString(subprocs), e); throw new RuntimeException(e); } finally { releaseSlot(slot); @@ -579,8 +576,8 @@ public class WALProcedureStore extends ProcedureStoreBase { } catch (IOException e) { // We are not able to serialize the procedure. // this is a code error, and we are not able to go on. - LOG.error(HBaseMarkers.FATAL, "Unable to serialize one of the procedure: " + - Arrays.toString(procs), e); + LOG.error(HBaseMarkers.FATAL, + "Unable to serialize one of the procedure: " + Arrays.toString(procs), e); throw new RuntimeException(e); } finally { releaseSlot(slot); @@ -704,10 +701,14 @@ public class WALProcedureStore extends ProcedureStoreBase { slotsCache.offer(slot); } - private enum PushType { INSERT, UPDATE, DELETE }; + private enum PushType { + INSERT, + UPDATE, + DELETE + }; - private long pushData(final PushType type, final ByteSlot slot, - final long procId, final long[] subProcIds) { + private long pushData(final PushType type, final ByteSlot slot, final long procId, + final long[] subProcIds) { if (!isRunning()) { throw new RuntimeException("the store must be running before inserting data"); } @@ -766,8 +767,7 @@ public class WALProcedureStore extends ProcedureStoreBase { return logId; } - private void updateStoreTracker(final PushType type, - final long procId, final long[] subProcIds) { + private void updateStoreTracker(final PushType type, final long procId, final long[] subProcIds) { switch (type) { case INSERT: if (subProcIds == null) { @@ -817,8 +817,8 @@ public class WALProcedureStore extends ProcedureStoreBase { if (LOG.isTraceEnabled()) { float rollTsSec = getMillisFromLastRoll() / 1000.0f; LOG.trace(String.format("Waiting for data. flushed=%s (%s/sec)", - StringUtils.humanSize(totalSynced.get()), - StringUtils.humanSize(totalSynced.get() / rollTsSec))); + StringUtils.humanSize(totalSynced.get()), + StringUtils.humanSize(totalSynced.get() / rollTsSec))); } waitCond.await(getMillisToNextPeriodicRoll(), TimeUnit.MILLISECONDS); @@ -841,9 +841,8 @@ public class WALProcedureStore extends ProcedureStoreBase { final float syncedPerSec = totalSyncedToStore / rollSec; if (LOG.isTraceEnabled() && (syncWaitMs > 10 || slotIndex < syncMaxSlot)) { LOG.trace(String.format("Sync wait %s, slotIndex=%s , totalSynced=%s (%s/sec)", - StringUtils.humanTimeDiff(syncWaitMs), slotIndex, - StringUtils.humanSize(totalSyncedToStore), - StringUtils.humanSize(syncedPerSec))); + StringUtils.humanTimeDiff(syncWaitMs), slotIndex, + StringUtils.humanSize(totalSyncedToStore), StringUtils.humanSize(syncedPerSec))); } // update webui circular buffers (TODO: get rid of allocations) @@ -919,7 +918,7 @@ public class WALProcedureStore extends ProcedureStoreBase { } protected long syncSlots(final FSDataOutputStream stream, final ByteSlot[] slots, - final int offset, final int count) throws IOException { + final int offset, final int count) throws IOException { long totalSynced = 0; for (int i = 0; i < count; ++i) { final ByteSlot data = slots[offset + i]; @@ -931,8 +930,8 @@ public class WALProcedureStore extends ProcedureStoreBase { sendPostSyncSignal(); if (LOG.isTraceEnabled()) { - LOG.trace("Sync slots=" + count + '/' + syncMaxSlot + - ", flushed=" + StringUtils.humanSize(totalSynced)); + LOG.trace("Sync slots=" + count + '/' + syncMaxSlot + ", flushed=" + + StringUtils.humanSize(totalSynced)); } return totalSynced; } @@ -1005,7 +1004,7 @@ public class WALProcedureStore extends ProcedureStoreBase { lock.lock(); try { removeInactiveLogs(); - } finally { + } finally { lock.unlock(); } } @@ -1059,11 +1058,8 @@ public class WALProcedureStore extends ProcedureStoreBase { assert lock.isHeldByCurrentThread() : "expected to be the lock owner. " + lock.isLocked(); ProcedureWALHeader header = ProcedureWALHeader.newBuilder() - .setVersion(ProcedureWALFormat.HEADER_VERSION) - .setType(ProcedureWALFormat.LOG_TYPE_STREAM) - .setMinProcId(storeTracker.getActiveMinProcId()) - .setLogId(logId) - .build(); + .setVersion(ProcedureWALFormat.HEADER_VERSION).setType(ProcedureWALFormat.LOG_TYPE_STREAM) + .setMinProcId(storeTracker.getActiveMinProcId()).setLogId(logId).build(); FSDataOutputStream newStream = null; Path newLogFile = null; @@ -1083,11 +1079,11 @@ public class WALProcedureStore extends ProcedureStoreBase { // to provide. final String durability = useHsync ? StreamCapabilities.HSYNC : StreamCapabilities.HFLUSH; if (enforceStreamCapability && !newStream.hasCapability(durability)) { - throw new IllegalStateException("The procedure WAL relies on the ability to " + durability + - " for proper operation during component failures, but the underlying filesystem does " + - "not support doing so. Please check the config value of '" + USE_HSYNC_CONF_KEY + - "' to set the desired level of robustness and ensure the config value of '" + - CommonFSUtils.HBASE_WAL_DIR + "' points to a FileSystem mount that can provide it."); + throw new IllegalStateException("The procedure WAL relies on the ability to " + durability + + " for proper operation during component failures, but the underlying filesystem does " + + "not support doing so. Please check the config value of '" + USE_HSYNC_CONF_KEY + + "' to set the desired level of robustness and ensure the config value of '" + + CommonFSUtils.HBASE_WAL_DIR + "' points to a FileSystem mount that can provide it."); } try { ProcedureWALFormat.writeHeader(newStream, header); @@ -1112,8 +1108,8 @@ public class WALProcedureStore extends ProcedureStoreBase { if (logs.size() == 2) { buildHoldingCleanupTracker(); } else if (logs.size() > walCountWarnThreshold) { - LOG.warn("procedure WALs count={} above the warning threshold {}. check running procedures" + - " to see if something is stuck.", logs.size(), walCountWarnThreshold); + LOG.warn("procedure WALs count={} above the warning threshold {}. check running procedures" + + " to see if something is stuck.", logs.size(), walCountWarnThreshold); // This is just like what we have done at RS side when there are too many wal files. For RS, // if there are too many wal files, we will find out the wal entries in the oldest file, and // tell the upper layer to flush these regions so the wal entries will be useless and then we @@ -1160,7 +1156,7 @@ public class WALProcedureStore extends ProcedureStoreBase { } // ========================================================================== - // Log Files cleaner helpers + // Log Files cleaner helpers // ========================================================================== private void removeInactiveLogs() throws IOException { // We keep track of which procedures are holding the oldest WAL in 'holdingCleanupTracker'. @@ -1246,7 +1242,7 @@ public class WALProcedureStore extends ProcedureStoreBase { } // ========================================================================== - // FileSystem Log Files helpers + // FileSystem Log Files helpers // ========================================================================== public Path getWALDir() { return this.walDir; @@ -1279,14 +1275,14 @@ public class WALProcedureStore extends ProcedureStoreBase { }; private static final Comparator FILE_STATUS_ID_COMPARATOR = - new Comparator() { - @Override - public int compare(FileStatus a, FileStatus b) { - final long aId = getLogIdFromName(a.getPath().getName()); - final long bId = getLogIdFromName(b.getPath().getName()); - return Long.compare(aId, bId); - } - }; + new Comparator() { + @Override + public int compare(FileStatus a, FileStatus b) { + final long aId = getLogIdFromName(a.getPath().getName()); + final long bId = getLogIdFromName(b.getPath().getName()); + return Long.compare(aId, bId); + } + }; private FileStatus[] getLogFiles() throws IOException { try { @@ -1359,7 +1355,7 @@ public class WALProcedureStore extends ProcedureStoreBase { * Loads given log file and it's tracker. */ private ProcedureWALFile initOldLog(final FileStatus logFile, final Path walArchiveDir) - throws IOException { + throws IOException { final ProcedureWALFile log = new ProcedureWALFile(fs, logFile); if (logFile.getLen() == 0) { LOG.warn("Remove uninitialized log: {}", logFile); @@ -1392,19 +1388,18 @@ public class WALProcedureStore extends ProcedureStoreBase { } /** - * Parses a directory of WALs building up ProcedureState. - * For testing parse and profiling. + * Parses a directory of WALs building up ProcedureState. For testing parse and profiling. * @param args Include pointer to directory of WAL files for a store instance to parse & load. */ - public static void main(String [] args) throws IOException { + public static void main(String[] args) throws IOException { Configuration conf = HBaseConfiguration.create(); if (args == null || args.length != 1) { System.out.println("ERROR: Empty arguments list; pass path to MASTERPROCWALS_DIR."); System.out.println("Usage: WALProcedureStore MASTERPROCWALS_DIR"); System.exit(-1); } - WALProcedureStore store = new WALProcedureStore(conf, new Path(args[0]), null, - new LeaseRecovery() { + WALProcedureStore store = + new WALProcedureStore(conf, new Path(args[0]), null, new LeaseRecovery() { @Override public void recoverFileLease(FileSystem fs, Path path) throws IOException { // no-op @@ -1412,7 +1407,8 @@ public class WALProcedureStore extends ProcedureStoreBase { }); try { store.start(16); - ProcedureExecutor pe = new ProcedureExecutor<>(conf, new Object()/*Pass anything*/, store); + ProcedureExecutor pe = + new ProcedureExecutor<>(conf, new Object()/* Pass anything */, store); pe.init(1, true); } finally { store.stop(true); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java index 3e95de56f25..0a88b3fc206 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,20 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2.util; import java.io.IOException; import java.io.OutputStream; import java.util.Arrays; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Similar to the ByteArrayOutputStream, with the exception that we can prepend an header. - * e.g. you write some data and you want to prepend an header that contains the data len or cksum. - * + * Similar to the ByteArrayOutputStream, with the exception that we can prepend an header. e.g. you + * write some data and you want to prepend an header that contains the data len or cksum. * ByteSlot slot = new ByteSlot(); * // write data * slot.write(...); @@ -78,13 +75,13 @@ public class ByteSlot extends OutputStream { public void writeAt(int offset, int b) { head = Math.min(head, offset); - buf[offset] = (byte)b; + buf[offset] = (byte) b; } @Override public void write(int b) { ensureCapacity(size + 1); - buf[size++] = (byte)b; + buf[size++] = (byte) b; } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java index fa796ae9742..19c4f8bcb58 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.Objects; import java.util.concurrent.DelayQueue; import java.util.concurrent.Delayed; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -30,7 +29,8 @@ import org.apache.yetus.audience.InterfaceStability; @InterfaceAudience.Private @InterfaceStability.Evolving public final class DelayedUtil { - private DelayedUtil() { } + private DelayedUtil() { + } /** * Add a timeout to a Delay @@ -78,7 +78,7 @@ public final class DelayedUtil { * @return null (if an interrupt) or an instance of E; resets interrupt on calling thread. */ public static E takeWithoutInterrupt(final DelayQueue queue, - final long timeout, final TimeUnit timeUnit) { + final long timeout, final TimeUnit timeUnit) { try { return queue.poll(timeout, timeUnit); } catch (InterruptedException e) { @@ -104,7 +104,7 @@ public final class DelayedUtil { private static long getTimeout(final Delayed o) { assert o instanceof DelayedWithTimeout : "expected DelayedWithTimeout instance, got " + o; - return ((DelayedWithTimeout)o).getTimeout(); + return ((DelayedWithTimeout) o).getTimeout(); } public static abstract class DelayedObject implements DelayedWithTimeout { @@ -146,7 +146,7 @@ public final class DelayedUtil { return false; } - return Objects.equals(getObject(), ((DelayedContainer)other).getObject()); + return Objects.equals(getObject(), ((DelayedContainer) other).getObject()); } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java index fddc999bec3..cddfd94d3da 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,8 @@ import org.apache.yetus.audience.InterfaceStability; @InterfaceAudience.Private @InterfaceStability.Evolving public final class StringUtils { - private StringUtils() {} + private StringUtils() { + } public static String humanTimeDiff(long timeDiff) { if (timeDiff < 1000) { @@ -31,17 +32,17 @@ public final class StringUtils { } StringBuilder buf = new StringBuilder(); - long hours = timeDiff / (60*60*1000); - long rem = (timeDiff % (60*60*1000)); - long minutes = rem / (60*1000); - rem = rem % (60*1000); + long hours = timeDiff / (60 * 60 * 1000); + long rem = (timeDiff % (60 * 60 * 1000)); + long minutes = rem / (60 * 1000); + rem = rem % (60 * 1000); float seconds = rem / 1000.0f; - if (hours != 0){ + if (hours != 0) { buf.append(hours); buf.append(" hrs, "); } - if (minutes != 0){ + if (minutes != 0) { buf.append(minutes); buf.append(" mins, "); } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java index 6c66a49c201..926a46e9c56 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -228,8 +228,9 @@ public final class ProcedureTestingUtility { return; } - if (procExecutor.testing.killBeforeStoreUpdate || - procExecutor.testing.toggleKillBeforeStoreUpdate) { + if ( + procExecutor.testing.killBeforeStoreUpdate || procExecutor.testing.toggleKillBeforeStoreUpdate + ) { assertEquals("expected only one executor running during test with kill/restart", 1, procExecutor.getCorePoolSize()); } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestChildProcedures.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestChildProcedures.java index 4f3c443faa9..b8642b675f5 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestChildProcedures.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestChildProcedures.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,12 +36,12 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestChildProcedures { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestChildProcedures.class); + HBaseClassTestRule.forClass(TestChildProcedures.class); private static final Logger LOG = LoggerFactory.getLogger(TestChildProcedures.class); @@ -109,11 +109,9 @@ public class TestChildProcedures { ProcedureTestingUtility.assertProcNotFailed(procExecutor, procId); } - /** - * Test the state setting that happens after store to WAL; in particular the bit where we - * set the parent runnable again after its children have all completed successfully. - * See HBASE-20978. + * Test the state setting that happens after store to WAL; in particular the bit where we set the + * parent runnable again after its children have all completed successfully. See HBASE-20978. */ @Test public void testChildLoadWithRestartAfterChildSuccess() throws Exception { @@ -170,7 +168,8 @@ public class TestChildProcedures { } public static class TestRootProcedure extends SequentialProcedure { - public TestRootProcedure() {} + public TestRootProcedure() { + } @Override public Procedure[] execute(TestProcEnv env) { @@ -194,7 +193,8 @@ public class TestChildProcedures { } public static class TestChildProcedure extends SequentialProcedure { - public TestChildProcedure() {} + public TestChildProcedure() { + } @Override public Procedure[] execute(TestProcEnv env) { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestForceUpdateProcedure.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestForceUpdateProcedure.java index 178b0cbc98a..e3965a51aa6 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestForceUpdateProcedure.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestForceUpdateProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -103,7 +103,7 @@ public class TestForceUpdateProcedure { @Override protected Procedure[] execute(Void env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { EXCHANGER.exchange(Boolean.TRUE); setState(ProcedureState.WAITING_TIMEOUT); setTimeout(Integer.MAX_VALUE); @@ -116,7 +116,7 @@ public class TestForceUpdateProcedure { @SuppressWarnings("unchecked") @Override protected Procedure[] execute(Void env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { return new Procedure[] { new NoopProcedure<>(), new WaitingProcedure() }; } } @@ -126,7 +126,7 @@ public class TestForceUpdateProcedure { @SuppressWarnings("unchecked") @Override protected Procedure[] execute(Void env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { if (EXCHANGER.exchange(Boolean.TRUE)) { return new Procedure[] { this }; } else { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestLockAndQueue.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestLockAndQueue.java index 9f24403dc7d..58c3bb3277a 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestLockAndQueue.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestLockAndQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java index 976b718d79b..f4ec5a2ad56 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -40,12 +39,12 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; - -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureBypass { - @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule - .forClass(TestProcedureBypass.class); + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestProcedureBypass.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureBypass.class); @@ -77,11 +76,9 @@ public class TestProcedureBypass { logDir = new Path(testDir, "proc-logs"); procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), logDir); - procExecutor = new ProcedureExecutor<>(htu.getConfiguration(), procEnv, - procStore); + procExecutor = new ProcedureExecutor<>(htu.getConfiguration(), procEnv, procStore); procStore.start(PROCEDURE_EXECUTOR_SLOTS); - ProcedureTestingUtility - .initAndStartWorkers(procExecutor, PROCEDURE_EXECUTOR_SLOTS, true); + ProcedureTestingUtility.initAndStartWorkers(procExecutor, PROCEDURE_EXECUTOR_SLOTS, true); } @Test @@ -89,7 +86,7 @@ public class TestProcedureBypass { final SuspendProcedure proc = new SuspendProcedure(); long id = procExecutor.submitProcedure(proc); Thread.sleep(500); - //bypass the procedure + // bypass the procedure assertTrue(procExecutor.bypassProcedure(id, 30000, false, false)); htu.waitFor(5000, () -> proc.isSuccess() && proc.isBypass()); LOG.info("{} finished", proc); @@ -100,9 +97,9 @@ public class TestProcedureBypass { final StuckProcedure proc = new StuckProcedure(); long id = procExecutor.submitProcedure(proc); Thread.sleep(500); - //bypass the procedure + // bypass the procedure assertTrue(procExecutor.bypassProcedure(id, 1000, true, false)); - //Since the procedure is stuck there, we need to restart the executor to recovery. + // Since the procedure is stuck there, we need to restart the executor to recovery. ProcedureTestingUtility.restart(procExecutor); htu.waitFor(5000, () -> proc.isSuccess() && proc.isBypass()); LOG.info("{} finished", proc); @@ -113,10 +110,9 @@ public class TestProcedureBypass { final RootProcedure proc = new RootProcedure(); long rootId = procExecutor.submitProcedure(proc); htu.waitFor(5000, () -> procExecutor.getProcedures().stream() - .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()) - .size() > 0); - SuspendProcedure suspendProcedure = (SuspendProcedure)procExecutor.getProcedures().stream() - .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).get(0); + .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).size() > 0); + SuspendProcedure suspendProcedure = (SuspendProcedure) procExecutor.getProcedures().stream() + .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).get(0); assertTrue(procExecutor.bypassProcedure(suspendProcedure.getProcId(), 1000, false, false)); htu.waitFor(5000, () -> proc.isSuccess() && proc.isBypass()); LOG.info("{} finished", proc); @@ -125,7 +121,7 @@ public class TestProcedureBypass { @Test public void testBypassingStuckStateMachineProcedure() throws Exception { final StuckStateMachineProcedure proc = - new StuckStateMachineProcedure(procEnv, StuckStateMachineState.START); + new StuckStateMachineProcedure(procEnv, StuckStateMachineState.START); long id = procExecutor.submitProcedure(proc); Thread.sleep(500); // bypass the procedure @@ -141,10 +137,9 @@ public class TestProcedureBypass { final RootProcedure proc = new RootProcedure(); long rootId = procExecutor.submitProcedure(proc); htu.waitFor(5000, () -> procExecutor.getProcedures().stream() - .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()) - .size() > 0); - SuspendProcedure suspendProcedure = (SuspendProcedure)procExecutor.getProcedures().stream() - .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).get(0); + .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).size() > 0); + SuspendProcedure suspendProcedure = (SuspendProcedure) procExecutor.getProcedures().stream() + .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).get(0); assertTrue(procExecutor.bypassProcedure(rootId, 1000, false, true)); htu.waitFor(5000, () -> proc.isSuccess() && proc.isBypass()); LOG.info("{} finished", proc); @@ -176,8 +171,7 @@ public class TestProcedureBypass { } @Override - protected Procedure[] execute(final TestProcEnv env) - throws ProcedureSuspendedException { + protected Procedure[] execute(final TestProcEnv env) throws ProcedureSuspendedException { // Always suspend the procedure throw new ProcedureSuspendedException(); } @@ -201,7 +195,6 @@ public class TestProcedureBypass { } - public static class RootProcedure extends ProcedureTestingUtility.NoopProcedure { private boolean childSpwaned = false; @@ -210,11 +203,10 @@ public class TestProcedureBypass { } @Override - protected Procedure[] execute(final TestProcEnv env) - throws ProcedureSuspendedException { + protected Procedure[] execute(final TestProcEnv env) throws ProcedureSuspendedException { if (!childSpwaned) { childSpwaned = true; - return new Procedure[] {new SuspendProcedure()}; + return new Procedure[] { new SuspendProcedure() }; } else { return null; } @@ -222,14 +214,13 @@ public class TestProcedureBypass { } public static class WaitingTimeoutProcedure - extends ProcedureTestingUtility.NoopProcedure { + extends ProcedureTestingUtility.NoopProcedure { public WaitingTimeoutProcedure() { super(); } @Override - protected Procedure[] execute(final TestProcEnv env) - throws ProcedureSuspendedException { + protected Procedure[] execute(final TestProcEnv env) throws ProcedureSuspendedException { // Always suspend the procedure setTimeout(50000); setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT); @@ -246,11 +237,13 @@ public class TestProcedureBypass { } public enum StuckStateMachineState { - START, THEN, END + START, + THEN, + END } - public static class StuckStateMachineProcedure extends - ProcedureTestingUtility.NoopStateMachineProcedure { + public static class StuckStateMachineProcedure + extends ProcedureTestingUtility.NoopStateMachineProcedure { private AtomicBoolean stop = new AtomicBoolean(false); public StuckStateMachineProcedure() { @@ -263,7 +256,7 @@ public class TestProcedureBypass { @Override protected Flow executeFromState(TestProcEnv env, StuckStateMachineState tState) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { switch (tState) { case START: LOG.info("PHASE 1: START"); @@ -292,5 +285,4 @@ public class TestProcedureBypass { } } - } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.java index 82917ea5315..f54af62ef70 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,7 +51,6 @@ public class TestProcedureCleanup { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestProcedureCleanup.class); - private static final Logger LOG = LoggerFactory.getLogger(TestProcedureCleanup.class); private static final int PROCEDURE_EXECUTOR_SLOTS = 2; @@ -95,8 +94,7 @@ public class TestProcedureCleanup { LOG.info("Begin to execute " + rootProc); // wait until the child procedure arrival htu.waitFor(10000, () -> procExecutor.getProcedures().size() >= 2); - SuspendProcedure suspendProcedure = (SuspendProcedure) procExecutor - .getProcedures().get(1); + SuspendProcedure suspendProcedure = (SuspendProcedure) procExecutor.getProcedures().get(1); // wait until the suspendProcedure executed suspendProcedure.latch.countDown(); Thread.sleep(100); @@ -188,7 +186,6 @@ public class TestProcedureCleanup { fs.rename(tmpFile, file.getPath()); } - public static final class ExchangeProcedure extends ProcedureTestingUtility.NoopProcedure { private final Exchanger exchanger = new Exchanger<>(); @@ -196,7 +193,7 @@ public class TestProcedureCleanup { @SuppressWarnings("unchecked") @Override protected Procedure[] execute(Void env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { if (exchanger.exchange(Boolean.TRUE)) { return new Procedure[] { this }; } else { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureEvents.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureEvents.java index f13a46c2f7c..039b25d347e 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureEvents.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureEvents.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,12 +41,12 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Int32Value; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestProcedureEvents { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureEvents.class); + HBaseClassTestRule.forClass(TestProcedureEvents.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureEvents.class); @@ -81,11 +81,10 @@ public class TestProcedureEvents { } /** - * Tests being able to suspend a Procedure for N timeouts and then failing.s - * Resets the timeout after each elapses. See {@link TestTimeoutEventProcedure} for example - * of how to do this sort of trickery with the ProcedureExecutor; i.e. suspend for a while, - * check for a condition and if not set, suspend again, etc., ultimately failing or succeeding - * eventually. + * Tests being able to suspend a Procedure for N timeouts and then failing.s Resets the timeout + * after each elapses. See {@link TestTimeoutEventProcedure} for example of how to do this sort of + * trickery with the ProcedureExecutor; i.e. suspend for a while, check for a condition and if not + * set, suspend again, etc., ultimately failing or succeeding eventually. */ @Test public void testTimeoutEventProcedure() throws Exception { @@ -110,7 +109,7 @@ public class TestProcedureEvents { } private void testTimeoutEventProcedureDoubleExecution(final boolean killIfSuspended) - throws Exception { + throws Exception { TestTimeoutEventProcedure proc = new TestTimeoutEventProcedure(1000, 3); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExecutor, true); ProcedureTestingUtility.setKillIfSuspended(procExecutor, killIfSuspended); @@ -122,20 +121,19 @@ public class TestProcedureEvents { /** * This Event+Procedure exhibits following behavior: *

        - *
      • On procedure execute() - *
          - *
        • If had enough timeouts, abort the procedure. Else....
        • - *
        • Suspend the event and add self to its suspend queue
        • - *
        • Go into waiting state
        • - *
        - *
      • - *
      • - * On waiting timeout - *
          - *
        • Wake the event (which adds this procedure back into scheduler queue), and set own's - * state to RUNNABLE (so can be executed again).
        • - *
        - *
      • + *
      • On procedure execute() + *
          + *
        • If had enough timeouts, abort the procedure. Else....
        • + *
        • Suspend the event and add self to its suspend queue
        • + *
        • Go into waiting state
        • + *
        + *
      • + *
      • On waiting timeout + *
          + *
        • Wake the event (which adds this procedure back into scheduler queue), and set own's state + * to RUNNABLE (so can be executed again).
        • + *
        + *
      • *
      */ public static class TestTimeoutEventProcedure extends NoopProcedure { @@ -144,7 +142,8 @@ public class TestProcedureEvents { private final AtomicInteger ntimeouts = new AtomicInteger(0); private int maxTimeouts = 1; - public TestTimeoutEventProcedure() {} + public TestTimeoutEventProcedure() { + } public TestTimeoutEventProcedure(final int timeoutMsec, final int maxTimeouts) { this.maxTimeouts = maxTimeouts; @@ -190,8 +189,7 @@ public class TestProcedureEvents { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { Int32Value.Builder ntimeoutsBuilder = Int32Value.newBuilder().setValue(ntimeouts.get()); serializer.serialize(ntimeoutsBuilder.build()); @@ -200,8 +198,7 @@ public class TestProcedureEvents { } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { Int32Value ntimeoutsValue = serializer.deserialize(Int32Value.class); ntimeouts.set(ntimeoutsValue.getValue()); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java index bbe37780d2c..a89f0f64ca7 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,11 +42,11 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureExecution { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureExecution.class); + HBaseClassTestRule.forClass(TestProcedureExecution.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureExecution.class); @@ -204,7 +204,8 @@ public class TestProcedureExecution { public static class TestFaultyRollback extends SequentialProcedure { private int retries = 0; - public TestFaultyRollback() { } + public TestFaultyRollback() { + } @Override protected Procedure[] execute(Void env) { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecutor.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecutor.java index 7f130caf4a7..41f521cd29a 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecutor.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecutor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,12 +37,12 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureExecutor { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureExecutor.class); + HBaseClassTestRule.forClass(TestProcedureExecutor.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureExecutor.class); @@ -155,8 +155,8 @@ public class TestProcedureExecutor { if (procExecutor.getWorkerThreadCount() == expectedThreads) { break; } - LOG.debug("waiting for thread count=" + expectedThreads + - " current=" + procExecutor.getWorkerThreadCount()); + LOG.debug("waiting for thread count=" + expectedThreads + " current=" + + procExecutor.getWorkerThreadCount()); Threads.sleepWithoutInterrupt(250); } return procExecutor.getWorkerThreadCount(); @@ -189,5 +189,6 @@ public class TestProcedureExecutor { } } - private static class TestProcEnv { } + private static class TestProcEnv { + } } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java index 75c8d16485b..9e83cc2e9e4 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,12 +36,12 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureInMemoryChore { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureInMemoryChore.class); + HBaseClassTestRule.forClass(TestProcedureInMemoryChore.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureInMemoryChore.class); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java index 4d9df1a5389..345ee520b5f 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,11 +37,11 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureMetrics { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureMetrics.class); + HBaseClassTestRule.forClass(TestProcedureMetrics.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureMetrics.class); @@ -204,7 +204,7 @@ public class TestProcedureMetrics { } public ProcedureMetrics(boolean success, boolean yield, int yieldCount, - ProcedureMetrics[] subprocs) { + ProcedureMetrics[] subprocs) { this.success = success; this.yield = yield; this.yieldCount = yieldCount; @@ -218,8 +218,8 @@ public class TestProcedureMetrics { } @Override - protected Procedure[] execute(TestProcEnv env) throws ProcedureYieldException, - ProcedureSuspendedException, InterruptedException { + protected Procedure[] execute(TestProcEnv env) + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { if (this.yield) { if (yieldNum < yieldCount) { yieldNum++; diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java index e4d039085da..6ebc51471a7 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,11 +43,11 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureNonce { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureNonce.class); + HBaseClassTestRule.forClass(TestProcedureNonce.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureNonce.class); @@ -174,8 +174,8 @@ public class TestProcedureNonce { testConcurrentNonceRegistration(false, 890, 55555); } - private void testConcurrentNonceRegistration(final boolean submitProcedure, - final long nonceGroup, final long nonce) throws IOException { + private void testConcurrentNonceRegistration(final boolean submitProcedure, final long nonceGroup, + final long nonce) throws IOException { // register the nonce final NonceKey nonceKey = procExecutor.createNonceKey(nonceGroup, nonce); @@ -229,8 +229,7 @@ public class TestProcedureNonce { // register the nonce t2BeforeNonceRegisteredLatch.countDown(); - assertFalse("unexpected non registered nonce", - procExecutor.registerNonce(nonceKey) < 0); + assertFalse("unexpected non registered nonce", procExecutor.registerNonce(nonceKey) < 0); } catch (Throwable e) { t2Exception.set(e); } finally { @@ -256,7 +255,8 @@ public class TestProcedureNonce { public static class TestSingleStepProcedure extends SequentialProcedure { private int step = 0; - public TestSingleStepProcedure() { } + public TestSingleStepProcedure() { + } @Override protected Procedure[] execute(TestProcEnv env) throws InterruptedException { @@ -269,7 +269,8 @@ public class TestProcedureNonce { } @Override - protected void rollback(TestProcEnv env) { } + protected void rollback(TestProcEnv env) { + } @Override protected boolean abort(TestProcEnv env) { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java index acfd4f4a696..72b7f61471f 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,11 +45,11 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.protobuf.Int32Value; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestProcedureRecovery { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureRecovery.class); + HBaseClassTestRule.forClass(TestProcedureRecovery.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureRecovery.class); @@ -98,7 +98,8 @@ public class TestProcedureRecovery { public static class TestSingleStepProcedure extends SequentialProcedure { private int step = 0; - public TestSingleStepProcedure() { } + public TestSingleStepProcedure() { + } @Override protected Procedure[] execute(TestProcEnv env) throws InterruptedException { @@ -110,7 +111,8 @@ public class TestProcedureRecovery { } @Override - protected void rollback(TestProcEnv env) { } + protected void rollback(TestProcEnv env) { + } @Override protected boolean abort(TestProcEnv env) { @@ -130,9 +132,8 @@ public class TestProcedureRecovery { step++; Threads.sleepWithoutInterrupt(procSleepInterval); if (isAborted()) { - setFailure(new RemoteProcedureException(getClass().getName(), - new ProcedureAbortedException( - "got an abort at " + getClass().getName() + " step=" + step))); + setFailure(new RemoteProcedureException(getClass().getName(), new ProcedureAbortedException( + "got an abort at " + getClass().getName() + " step=" + step))); return null; } return null; @@ -155,7 +156,7 @@ public class TestProcedureRecovery { boolean aborted = abort.get(); BaseTestStepProcedure proc = this; while (proc.hasParent() && !aborted) { - proc = (BaseTestStepProcedure)procExecutor.getProcedure(proc.getParentProcId()); + proc = (BaseTestStepProcedure) procExecutor.getProcedure(proc.getParentProcId()); aborted = proc.isAborted(); } return aborted; @@ -163,7 +164,8 @@ public class TestProcedureRecovery { } public static class TestMultiStepProcedure extends BaseTestStepProcedure { - public TestMultiStepProcedure() { } + public TestMultiStepProcedure() { + } @Override public Procedure[] execute(TestProcEnv env) throws InterruptedException { @@ -172,7 +174,8 @@ public class TestProcedureRecovery { } public static class Step1Procedure extends BaseTestStepProcedure { - public Step1Procedure() { } + public Step1Procedure() { + } @Override protected Procedure[] execute(TestProcEnv env) throws InterruptedException { @@ -182,7 +185,8 @@ public class TestProcedureRecovery { } public static class Step2Procedure extends BaseTestStepProcedure { - public Step2Procedure() { } + public Step2Procedure() { + } } } @@ -294,10 +298,16 @@ public class TestProcedureRecovery { } public static class TestStateMachineProcedure - extends StateMachineProcedure { - enum State { STATE_1, STATE_2, STATE_3, DONE } + extends StateMachineProcedure { + enum State { + STATE_1, + STATE_2, + STATE_3, + DONE + } - public TestStateMachineProcedure() {} + public TestStateMachineProcedure() { + } public TestStateMachineProcedure(final boolean testSubmitChildProc) { this.submitChildProc = testSubmitChildProc; @@ -388,16 +398,14 @@ public class TestProcedureRecovery { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { super.serializeStateData(serializer); Int32Value.Builder builder = Int32Value.newBuilder().setValue(iResult); serializer.serialize(builder.build()); } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { super.deserializeStateData(serializer); Int32Value value = serializer.deserialize(Int32Value.class); iResult = value.getValue(); @@ -515,7 +523,7 @@ public class TestProcedureRecovery { try { FileStatus[] files = fs.listStatus(logDir); if (files != null && files.length > 0) { - for (FileStatus file: files) { + for (FileStatus file : files) { assertTrue(file.toString(), file.isFile()); LOG.debug("log file " + file.getPath() + " size=" + file.getLen()); } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java index db54f22f535..c2564eabd72 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.procedure2; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.io.IOException; import java.util.ArrayList; import java.util.concurrent.atomic.AtomicLong; @@ -51,7 +52,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Int64Value; public class TestProcedureReplayOrder { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureReplayOrder.class); + HBaseClassTestRule.forClass(TestProcedureReplayOrder.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureReplayOrder.class); @@ -133,7 +134,7 @@ public class TestProcedureReplayOrder { } private void submitProcedures(final int nthreads, final int nprocPerThread, - final Class procClazz) throws Exception { + final Class procClazz) throws Exception { Thread[] submitThreads = new Thread[nthreads]; for (int i = 0; i < submitThreads.length; ++i) { submitThreads[i] = new Thread() { @@ -141,8 +142,8 @@ public class TestProcedureReplayOrder { public void run() { for (int i = 0; i < nprocPerThread; ++i) { try { - procExecutor.submitProcedure((Procedure) - procClazz.getDeclaredConstructor().newInstance()); + procExecutor + .submitProcedure((Procedure) procClazz.getDeclaredConstructor().newInstance()); } catch (Exception e) { LOG.error("unable to instantiate the procedure", e); fail("failure during the proc.newInstance(): " + e.getMessage()); @@ -197,7 +198,8 @@ public class TestProcedureReplayOrder { } @Override - protected void rollback(TestProcedureEnv env) { } + protected void rollback(TestProcedureEnv env) { + } @Override protected boolean abort(TestProcedureEnv env) { @@ -205,15 +207,13 @@ public class TestProcedureReplayOrder { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { Int64Value.Builder builder = Int64Value.newBuilder().setValue(execId); serializer.serialize(builder.build()); } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { Int64Value value = serializer.deserialize(Int64Value.class); execId = value.getValue(); step = 2; @@ -221,7 +221,8 @@ public class TestProcedureReplayOrder { } public static class TestSingleStepProcedure extends TestProcedure { - public TestSingleStepProcedure() { } + public TestSingleStepProcedure() { + } @Override protected Procedure[] execute(TestProcedureEnv env) throws ProcedureYieldException { @@ -244,7 +245,8 @@ public class TestProcedureReplayOrder { } public static class TestTwoStepProcedure extends TestProcedure { - public TestTwoStepProcedure() { } + public TestTwoStepProcedure() { + } @Override protected Procedure[] execute(TestProcedureEnv env) throws ProcedureYieldException { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRollbackAIOOB.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRollbackAIOOB.java index fc12924244e..21d8efb1d15 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRollbackAIOOB.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRollbackAIOOB.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ import org.junit.rules.TestName; public class TestProcedureRollbackAIOOB { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureRollbackAIOOB.class); + HBaseClassTestRule.forClass(TestProcedureRollbackAIOOB.class); private static final HBaseCommonTestingUtility UTIL = new HBaseCommonTestingUtility(); @@ -53,7 +53,7 @@ public class TestProcedureRollbackAIOOB { @Override protected Procedure[] execute(Void env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { latch.await(); if (scheduled) { return null; @@ -67,7 +67,7 @@ public class TestProcedureRollbackAIOOB { @Override protected Procedure[] execute(Void env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { setFailure("Inject error", new RuntimeException("Inject error")); return null; } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java index f56cdb31b6b..186d3859534 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,11 +34,11 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureSchedulerConcurrency { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureSchedulerConcurrency.class); + HBaseClassTestRule.forClass(TestProcedureSchedulerConcurrency.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureEvents.class); @@ -105,8 +105,10 @@ public class TestProcedureSchedulerConcurrency { } if (wakeCount.get() != oldWakeCount) { lastUpdate = EnvironmentEdgeManager.currentTime(); - } else if (wakeCount.get() >= NRUNS && - (EnvironmentEdgeManager.currentTime() - lastUpdate) > WAIT_THRESHOLD) { + } else if ( + wakeCount.get() >= NRUNS + && (EnvironmentEdgeManager.currentTime() - lastUpdate) > WAIT_THRESHOLD + ) { break; } Threads.sleepWithoutInterrupt(25); @@ -119,7 +121,7 @@ public class TestProcedureSchedulerConcurrency { @Override public void run() { while (true) { - TestProcedureWithEvent proc = (TestProcedureWithEvent)sched.poll(); + TestProcedureWithEvent proc = (TestProcedureWithEvent) sched.poll(); if (proc == null) { continue; } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.java index 4b887928514..108acd11af6 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,7 +70,7 @@ public class TestProcedureSkipPersistence { @Override protected Procedure[] execute(ProcEnv env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { if (STEP == 0) { STEP = 1; setTimeout(60 * 60 * 1000); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java index d17c00c620f..94c48647635 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,11 +38,11 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureSuspended { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureSuspended.class); + HBaseClassTestRule.forClass(TestProcedureSuspended.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureSuspended.class); @@ -181,8 +181,8 @@ public class TestProcedureSuspended { private AtomicBoolean lock = null; private boolean hasLock = false; - public TestLockProcedure(final AtomicBoolean lock, final String key, - final boolean throwYield, final boolean throwSuspend) { + public TestLockProcedure(final AtomicBoolean lock, final String key, final boolean throwYield, + final boolean throwSuspend) { this.lock = lock; this.key = key; this.throwYield = throwYield; @@ -203,7 +203,7 @@ public class TestProcedureSuspended { @Override protected Procedure[] execute(final TestProcEnv env) - throws ProcedureYieldException, ProcedureSuspendedException { + throws ProcedureYieldException, ProcedureSuspendedException { LOG.info("EXECUTE " + this + " suspend " + (lock != null)); timestamps.add(env.nextTimestamp()); if (triggerRollback) { @@ -259,13 +259,11 @@ public class TestProcedureSuspended { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { } } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java index 807614529fc..1ff020d19f1 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,17 +30,18 @@ import org.junit.experimental.categories.Category; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ServerCrashState; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureToString { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureToString.class); + HBaseClassTestRule.forClass(TestProcedureToString.class); /** * A do-nothing environment for BasicProcedure. */ - static class BasicProcedureEnv {}; + static class BasicProcedureEnv { + }; /** * A do-nothing basic procedure just for testing toString. @@ -48,8 +49,8 @@ public class TestProcedureToString { static class BasicProcedure extends Procedure { @Override protected Procedure[] execute(BasicProcedureEnv env) - throws ProcedureYieldException, InterruptedException { - return new Procedure [] {this}; + throws ProcedureYieldException, InterruptedException { + return new Procedure[] { this }; } @Override @@ -62,13 +63,11 @@ public class TestProcedureToString { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { } } @@ -106,17 +105,17 @@ public class TestProcedureToString { * Do-nothing SimpleMachineProcedure for checking its toString. */ static class SimpleStateMachineProcedure - extends StateMachineProcedure { + extends StateMachineProcedure { @Override - protected org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow executeFromState( - BasicProcedureEnv env, ServerCrashState state) - throws ProcedureYieldException, InterruptedException { + protected org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow + executeFromState(BasicProcedureEnv env, ServerCrashState state) + throws ProcedureYieldException, InterruptedException { return null; } @Override - protected void rollbackState(BasicProcedureEnv env, ServerCrashState state) throws IOException, - InterruptedException { + protected void rollbackState(BasicProcedureEnv env, ServerCrashState state) + throws IOException, InterruptedException { } @Override diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureUtil.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureUtil.java index 4d57c37ac61..885ba88a832 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureUtil.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,13 +51,14 @@ public class TestProcedureUtil { // check Procedure to protobuf conversion final TestProcedure proc1 = new TestProcedure(10, 1, new byte[] { 65 }); final ProcedureProtos.Procedure proto1 = ProcedureUtil.convertToProtoProcedure(proc1); - final TestProcedure proc2 = (TestProcedure)ProcedureUtil.convertToProcedure(proto1); + final TestProcedure proc2 = (TestProcedure) ProcedureUtil.convertToProcedure(proto1); final ProcedureProtos.Procedure proto2 = ProcedureUtil.convertToProtoProcedure(proc2); assertEquals(false, proto2.hasResult()); assertEquals("Procedure protobuf does not match", proto1, proto2); } public static class TestProcedureNoDefaultConstructor extends TestProcedure { - public TestProcedureNoDefaultConstructor(int x) {} + public TestProcedureNoDefaultConstructor(int x) { + } } } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestRemoteProcedureDispatcherUncaughtExceptionHandler.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestRemoteProcedureDispatcherUncaughtExceptionHandler.java index 7f44fc31322..3bf6c474114 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestRemoteProcedureDispatcherUncaughtExceptionHandler.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestRemoteProcedureDispatcherUncaughtExceptionHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java index b076fb906ac..df648413b89 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,11 +38,11 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestStateMachineProcedure { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStateMachineProcedure.class); + HBaseClassTestRule.forClass(TestStateMachineProcedure.class); private static final Logger LOG = LoggerFactory.getLogger(TestStateMachineProcedure.class); @@ -62,7 +62,7 @@ public class TestStateMachineProcedure { // we are going to serialize the exception in the test, // so the instance comparison will not match - return getMessage().equals(((Exception)other).getMessage()); + return getMessage().equals(((Exception) other).getMessage()); } @Override @@ -179,9 +179,13 @@ public class TestStateMachineProcedure { assertEquals(TEST_FAILURE_EXCEPTION, cause); } - public enum TestSMProcedureState { STEP_1, STEP_2 }; + public enum TestSMProcedureState { + STEP_1, + STEP_2 + }; + public static class TestSMProcedure - extends StateMachineProcedure { + extends StateMachineProcedure { @Override protected Flow executeFromState(TestProcEnv env, TestSMProcedureState state) { LOG.info("EXEC " + state + " " + this); @@ -227,7 +231,7 @@ public class TestStateMachineProcedure { } public static class TestSMProcedureBadRollback - extends StateMachineProcedure { + extends StateMachineProcedure { @Override protected Flow executeFromState(TestProcEnv env, TestSMProcedureState state) { LOG.info("EXEC " + state + " " + this); @@ -244,6 +248,7 @@ public class TestStateMachineProcedure { } return Flow.HAS_MORE_STATE; } + @Override protected void rollbackState(TestProcEnv env, TestSMProcedureState state) { LOG.info("ROLLBACK " + state + " " + this); @@ -266,8 +271,7 @@ public class TestStateMachineProcedure { } @Override - protected void rollback(final TestProcEnv env) - throws IOException, InterruptedException { + protected void rollback(final TestProcEnv env) throws IOException, InterruptedException { if (isEofState()) { stateCount--; } @@ -275,8 +279,8 @@ public class TestStateMachineProcedure { updateTimestamp(); rollbackState(env, getCurrentState()); throw new IOException(); - } catch(IOException e) { - //do nothing for now + } catch (IOException e) { + // do nothing for now } finally { stateCount--; updateTimestamp(); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java index e359e5cedfe..f84e1f70940 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,11 +40,11 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestYieldProcedures { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestYieldProcedures.class); + HBaseClassTestRule.forClass(TestYieldProcedures.class); private static final Logger LOG = LoggerFactory.getLogger(TestYieldProcedures.class); @@ -188,8 +188,12 @@ public class TestYieldProcedures { } public static class TestStateMachineProcedure - extends StateMachineProcedure { - enum State { STATE_1, STATE_2, STATE_3 } + extends StateMachineProcedure { + enum State { + STATE_1, + STATE_2, + STATE_3 + } public static class ExecutionInfo { private final boolean rollback; @@ -225,7 +229,7 @@ public class TestYieldProcedures { } public TestStateMachineProcedure(boolean abortOnFinalStep, - boolean throwInterruptOnceOnEachStep) { + boolean throwInterruptOnceOnEachStep) { this.abortOnFinalStep = abortOnFinalStep; this.throwInterruptOnceOnEachStep = throwInterruptOnceOnEachStep; } @@ -236,7 +240,7 @@ public class TestYieldProcedures { @Override protected StateMachineProcedure.Flow executeFromState(TestProcEnv env, State state) - throws InterruptedException { + throws InterruptedException { final long ts = env.nextTimestamp(); LOG.info(getProcId() + " execute step " + state + " ts=" + ts); executionInfo.add(new ExecutionInfo(ts, state, false)); @@ -266,8 +270,7 @@ public class TestYieldProcedures { } @Override - protected void rollbackState(TestProcEnv env, final State state) - throws InterruptedException { + protected void rollbackState(TestProcEnv env, final State state) throws InterruptedException { final long ts = env.nextTimestamp(); LOG.debug(getProcId() + " rollback state " + state + " ts=" + ts); executionInfo.add(new ExecutionInfo(ts, state, true)); @@ -347,13 +350,11 @@ public class TestYieldProcedures { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { } } @@ -364,7 +365,8 @@ public class TestYieldProcedures { private int yieldCalls; private int pollCalls; - public TestScheduler() {} + public TestScheduler() { + } @Override public void addFront(final Procedure proc) { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java index d88d93e571f..d937ba6ace1 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; @@ -50,24 +51,24 @@ public abstract class ProcedureStorePerformanceEvaluation future : futures) { - long timeout = start + WORKER_THREADS_TIMEOUT_SEC * 1000 - - EnvironmentEdgeManager.currentTime(); + long timeout = + start + WORKER_THREADS_TIMEOUT_SEC * 1000 - EnvironmentEdgeManager.currentTime(); failure |= (future.get(timeout, TimeUnit.MILLISECONDS).equals(EXIT_FAILURE)); } } catch (Exception e) { @@ -219,8 +220,8 @@ public abstract class ProcedureStorePerformanceEvaluation[] execute(Void env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { return null; } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java index eb6446de7ea..852d11f01fe 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseCommonTestingUtility; @@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; @@ -42,24 +42,24 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool { protected static final HBaseCommonTestingUtility UTIL = new HBaseCommonTestingUtility(); // Command line options and defaults. - public static int DEFAULT_NUM_PROCS = 1000000; // 1M - public static Option NUM_PROCS_OPTION = new Option("procs", true, - "Total number of procedures. Default: " + DEFAULT_NUM_PROCS); + public static int DEFAULT_NUM_PROCS = 1000000; // 1M + public static Option NUM_PROCS_OPTION = + new Option("procs", true, "Total number of procedures. Default: " + DEFAULT_NUM_PROCS); public static int DEFAULT_NUM_WALS = 0; public static Option NUM_WALS_OPTION = new Option("wals", true, - "Number of WALs to write. If -ve or 0, uses " + WALProcedureStore.ROLL_THRESHOLD_CONF_KEY + - " conf to roll the logs. Default: " + DEFAULT_NUM_WALS); - public static int DEFAULT_STATE_SIZE = 1024; // 1KB - public static Option STATE_SIZE_OPTION = new Option("state_size", true, - "Size of serialized state in bytes to write on update. Default: " + DEFAULT_STATE_SIZE - + " bytes"); + "Number of WALs to write. If -ve or 0, uses " + WALProcedureStore.ROLL_THRESHOLD_CONF_KEY + + " conf to roll the logs. Default: " + DEFAULT_NUM_WALS); + public static int DEFAULT_STATE_SIZE = 1024; // 1KB + public static Option STATE_SIZE_OPTION = + new Option("state_size", true, "Size of serialized state in bytes to write on update. Default: " + + DEFAULT_STATE_SIZE + " bytes"); public static int DEFAULT_UPDATES_PER_PROC = 5; public static Option UPDATES_PER_PROC_OPTION = new Option("updates_per_proc", true, - "Number of update states to write for each proc. Default: " + DEFAULT_UPDATES_PER_PROC); + "Number of update states to write for each proc. Default: " + DEFAULT_UPDATES_PER_PROC); public static double DEFAULT_DELETE_PROCS_FRACTION = 0.50; public static Option DELETE_PROCS_FRACTION_OPTION = new Option("delete_procs_fraction", true, - "Fraction of procs for which to write delete state. Distribution of procs chosen for " - + "delete is uniform across all procs. Default: " + DEFAULT_DELETE_PROCS_FRACTION); + "Fraction of procs for which to write delete state. Distribution of procs chosen for " + + "delete is uniform across all procs. Default: " + DEFAULT_DELETE_PROCS_FRACTION); public int numProcs; public int updatesPerProc; @@ -69,7 +69,8 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool { static byte[] serializedState; private static class LoadCounter implements ProcedureStore.ProcedureLoader { - public LoadCounter() {} + public LoadCounter() { + } @Override public void setMaxProcId(long maxProcId) { @@ -105,10 +106,10 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool { numWals = getOptionAsInt(cmd, NUM_WALS_OPTION.getOpt(), DEFAULT_NUM_WALS); int stateSize = getOptionAsInt(cmd, STATE_SIZE_OPTION.getOpt(), DEFAULT_STATE_SIZE); serializedState = new byte[stateSize]; - updatesPerProc = getOptionAsInt(cmd, UPDATES_PER_PROC_OPTION.getOpt(), - DEFAULT_UPDATES_PER_PROC); - deleteProcsFraction = getOptionAsDouble(cmd, DELETE_PROCS_FRACTION_OPTION.getOpt(), - DEFAULT_DELETE_PROCS_FRACTION); + updatesPerProc = + getOptionAsInt(cmd, UPDATES_PER_PROC_OPTION.getOpt(), DEFAULT_UPDATES_PER_PROC); + deleteProcsFraction = + getOptionAsDouble(cmd, DELETE_PROCS_FRACTION_OPTION.getOpt(), DEFAULT_DELETE_PROCS_FRACTION); setupConf(); } @@ -140,7 +141,7 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool { Set toBeDeletedProcs = new HashSet<>(); // Add n + 1 entries of the proc id for insert + updates. If proc is chosen for delete, add // extra entry which is marked -ve in the loop after shuffle. - for (int procId = 1; procId <= numProcs; ++procId) { + for (int procId = 1; procId <= numProcs; ++procId) { procStatesSequence.addAll(Collections.nCopies(updatesPerProc + 1, procId)); if (ThreadLocalRandom.current().nextFloat() < deleteProcsFraction) { procStatesSequence.add(procId); @@ -161,7 +162,7 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool { private void writeWals() throws IOException { List procStates = shuffleProcWriteSequence(); - TestProcedure[] procs = new TestProcedure[numProcs + 1]; // 0 is not used. + TestProcedure[] procs = new TestProcedure[numProcs + 1]; // 0 is not used. int numProcsPerWal = numWals > 0 ? procStates.size() / numWals : Integer.MAX_VALUE; long startTime = EnvironmentEdgeManager.currentTime(); long lastTime = startTime; @@ -179,15 +180,15 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool { } if (i > 0 && i % numProcsPerWal == 0) { long currentTime = EnvironmentEdgeManager.currentTime(); - System.out.println("Forcing wall roll. Time taken on last WAL: " + - (currentTime - lastTime) / 1000.0f + " sec"); + System.out.println("Forcing wall roll. Time taken on last WAL: " + + (currentTime - lastTime) / 1000.0f + " sec"); store.rollWriterForTesting(); lastTime = currentTime; } } long timeTaken = EnvironmentEdgeManager.currentTime() - startTime; System.out.println("\n\nDone writing WALs.\nNum procs : " + numProcs + "\nTotal time taken : " - + StringUtils.humanTimeDiff(timeTaken) + "\n\n"); + + StringUtils.humanTimeDiff(timeTaken) + "\n\n"); } private void storeRestart(ProcedureStore.ProcedureLoader loader) throws IOException { @@ -203,11 +204,11 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool { System.out.println("Load time : " + (timeTaken / 1000.0f) + "sec"); System.out.println("******************************************"); System.out.println("Raw format for scripts"); - System.out.println(String.format("RESULT [%s=%s, %s=%s, %s=%s, %s=%s, %s=%s, " - + "total_time_ms=%s]", - NUM_PROCS_OPTION.getOpt(), numProcs, STATE_SIZE_OPTION.getOpt(), serializedState.length, - UPDATES_PER_PROC_OPTION.getOpt(), updatesPerProc, DELETE_PROCS_FRACTION_OPTION.getOpt(), - deleteProcsFraction, NUM_WALS_OPTION.getOpt(), numWals, timeTaken)); + System.out + .println(String.format("RESULT [%s=%s, %s=%s, %s=%s, %s=%s, %s=%s, " + "total_time_ms=%s]", + NUM_PROCS_OPTION.getOpt(), numProcs, STATE_SIZE_OPTION.getOpt(), serializedState.length, + UPDATES_PER_PROC_OPTION.getOpt(), updatesPerProc, DELETE_PROCS_FRACTION_OPTION.getOpt(), + deleteProcsFraction, NUM_WALS_OPTION.getOpt(), numWals, timeTaken)); } public void tearDownProcedureStore() { @@ -216,7 +217,7 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool { store.getFileSystem().delete(store.getWALDir(), true); } catch (IOException e) { System.err.println("Error: Couldn't delete log dir. You can delete it manually to free up " - + "disk space. Location: " + store.getWALDir().toString()); + + "disk space. Location: " + store.getWALDir().toString()); System.err.println(e.toString()); } } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java index cab44264f29..f9bad936ee2 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,8 +36,8 @@ public class ProcedureWALPerformanceEvaluation // Command line options and defaults. public static int DEFAULT_NUM_WALS = 0; public static Option NUM_WALS_OPTION = new Option("wals", true, - "Number of WALs to write. If -ve or 0, uses " + WALProcedureStore.ROLL_THRESHOLD_CONF_KEY + - " conf to roll the logs. Default: " + DEFAULT_NUM_WALS); + "Number of WALs to write. If -ve or 0, uses " + WALProcedureStore.ROLL_THRESHOLD_CONF_KEY + + " conf to roll the logs. Default: " + DEFAULT_NUM_WALS); private long numProcsPerWal = Long.MAX_VALUE; // never roll wall based on this value. private int numWals; diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestBitSetNode.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestBitSetNode.java index 9d897cf878c..251cb39842b 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestBitSetNode.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestBitSetNode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestProcedureStoreTracker.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestProcedureStoreTracker.java index e3064c9ab82..46164be6469 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestProcedureStoreTracker.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestProcedureStoreTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -33,11 +32,11 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestProcedureStoreTracker { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureStoreTracker.class); + HBaseClassTestRule.forClass(TestProcedureStoreTracker.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureStoreTracker.class); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java index 3d46883f2de..4bdd3f268a1 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.procedure2.store.wal; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; @@ -43,12 +44,12 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestStressWALProcedureStore { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStressWALProcedureStore.class); + HBaseClassTestRule.forClass(TestStressWALProcedureStore.class); private static final Logger LOG = LoggerFactory.getLogger(TestWALProcedureStore.class); @@ -115,7 +116,8 @@ public class TestStressWALProcedureStore { for (int i = 0, nupdates = rand.nextInt(10); i <= nupdates; ++i) { try { Thread.sleep(0, rand.nextInt(15)); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { + } procStore.update(proc); } // Delete @@ -136,7 +138,8 @@ public class TestStressWALProcedureStore { assertEquals(1, procStore.getActiveLogs().size()); } - @Ignore @Test // REENABLE after merge of + @Ignore + @Test // REENABLE after merge of // https://github.com/google/protobuf/issues/2228#issuecomment-252058282 public void testEntrySizeLimit() throws Exception { final int NITEMS = 20; diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java index c8335eeb7d0..01fc8666dae 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,11 +61,11 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.protobuf.Int64Value; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestWALProcedureStore { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALProcedureStore.class); + HBaseClassTestRule.forClass(TestWALProcedureStore.class); private static final Logger LOG = LoggerFactory.getLogger(TestWALProcedureStore.class); @@ -161,7 +161,7 @@ public class TestWALProcedureStore { procStore.insert(procs[i], null); procStore.rollWriterForTesting(); logs = procStore.getActiveLogs(); - assertEquals(logs.size(), i + 2); // Extra 1 for current ongoing wal. + assertEquals(logs.size(), i + 2); // Extra 1 for current ongoing wal. } // Delete procedures in sequential order make sure that only the corresponding wal is deleted @@ -176,7 +176,6 @@ public class TestWALProcedureStore { } } - // Test that wal cleaner doesn't create holes in wal files list i.e. it only deletes files if // they are in the starting of the list. @Test @@ -189,7 +188,7 @@ public class TestWALProcedureStore { procStore.insert(procs[i], null); procStore.rollWriterForTesting(); logs = procStore.getActiveLogs(); - assertEquals(i + 2, logs.size()); // Extra 1 for current ongoing wal. + assertEquals(i + 2, logs.size()); // Extra 1 for current ongoing wal. } for (int i = 1; i < procs.length; i++) { @@ -222,18 +221,18 @@ public class TestWALProcedureStore { TestSequentialProcedure p2 = new TestSequentialProcedure(); procStore.insert(p1, null); procStore.insert(p2, null); - procStore.rollWriterForTesting(); // generates first log with p1 + p2 + procStore.rollWriterForTesting(); // generates first log with p1 + p2 ProcedureWALFile log1 = procStore.getActiveLogs().get(0); procStore.update(p2); - procStore.rollWriterForTesting(); // generates second log with p2 + procStore.rollWriterForTesting(); // generates second log with p2 ProcedureWALFile log2 = procStore.getActiveLogs().get(1); procStore.update(p2); - procStore.rollWriterForTesting(); // generates third log with p2 - procStore.removeInactiveLogsForTesting(); // Shouldn't remove 2nd log. + procStore.rollWriterForTesting(); // generates third log with p2 + procStore.removeInactiveLogsForTesting(); // Shouldn't remove 2nd log. assertEquals(4, procStore.getActiveLogs().size()); procStore.update(p1); - procStore.rollWriterForTesting(); // generates fourth log with p1 - procStore.removeInactiveLogsForTesting(); // Should remove first two logs. + procStore.rollWriterForTesting(); // generates fourth log with p1 + procStore.removeInactiveLogsForTesting(); // Should remove first two logs. assertEquals(3, procStore.getActiveLogs().size()); assertFalse(procStore.getActiveLogs().contains(log1)); assertFalse(procStore.getActiveLogs().contains(log2)); @@ -418,8 +417,8 @@ public class TestWALProcedureStore { assertEquals(0, loader.getCorruptedCount()); } - private static void assertUpdated(final ProcedureStoreTracker tracker, - final Procedure[] procs, final int[] updatedProcs, final int[] nonUpdatedProcs) { + private static void assertUpdated(final ProcedureStoreTracker tracker, final Procedure[] procs, + final int[] updatedProcs, final int[] nonUpdatedProcs) { for (int index : updatedProcs) { long procId = procs[index].getProcId(); assertTrue("Procedure id : " + procId, tracker.isModified(procId)); @@ -430,17 +429,17 @@ public class TestWALProcedureStore { } } - private static void assertDeleted(final ProcedureStoreTracker tracker, - final Procedure[] procs, final int[] deletedProcs, final int[] nonDeletedProcs) { + private static void assertDeleted(final ProcedureStoreTracker tracker, final Procedure[] procs, + final int[] deletedProcs, final int[] nonDeletedProcs) { for (int index : deletedProcs) { long procId = procs[index].getProcId(); - assertEquals("Procedure id : " + procId, - ProcedureStoreTracker.DeleteState.YES, tracker.isDeleted(procId)); + assertEquals("Procedure id : " + procId, ProcedureStoreTracker.DeleteState.YES, + tracker.isDeleted(procId)); } for (int index : nonDeletedProcs) { long procId = procs[index].getProcId(); - assertEquals("Procedure id : " + procId, - ProcedureStoreTracker.DeleteState.NO, tracker.isDeleted(procId)); + assertEquals("Procedure id : " + procId, ProcedureStoreTracker.DeleteState.NO, + tracker.isDeleted(procId)); } } @@ -451,13 +450,13 @@ public class TestWALProcedureStore { procs[i] = new TestSequentialProcedure(); } // Log State (I=insert, U=updated, D=delete) - // | log 1 | log 2 | log 3 | - // 0 | I, D | | | - // 1 | I | | | - // 2 | I | D | | - // 3 | I | U | | - // 4 | | I | D | - // 5 | | | I | + // | log 1 | log 2 | log 3 | + // 0 | I, D | | | + // 1 | I | | | + // 2 | I | D | | + // 3 | I | U | | + // 4 | | I | D | + // 5 | | | I | procStore.insert(procs[0], null); procStore.insert(procs[1], null); procStore.insert(procs[2], null); @@ -485,7 +484,7 @@ public class TestWALProcedureStore { htu.getConfiguration().setBoolean(WALProcedureStore.EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY, false); final LoadCounter loader = new LoadCounter(); storeRestart(loader); - assertEquals(3, loader.getLoadedCount()); // procs 1, 3 and 5 + assertEquals(3, loader.getLoadedCount()); // procs 1, 3 and 5 assertEquals(0, loader.getCorruptedCount()); // Check the Trackers @@ -493,13 +492,16 @@ public class TestWALProcedureStore { LOG.info("WALs " + walFiles); assertEquals(4, walFiles.size()); LOG.info("Checking wal " + walFiles.get(0)); - assertUpdated(walFiles.get(0).getTracker(), procs, new int[]{0, 1, 2, 3}, new int[] {4, 5}); + assertUpdated(walFiles.get(0).getTracker(), procs, new int[] { 0, 1, 2, 3 }, + new int[] { 4, 5 }); LOG.info("Checking wal " + walFiles.get(1)); - assertUpdated(walFiles.get(1).getTracker(), procs, new int[]{2, 3, 4}, new int[] {0, 1, 5}); + assertUpdated(walFiles.get(1).getTracker(), procs, new int[] { 2, 3, 4 }, + new int[] { 0, 1, 5 }); LOG.info("Checking wal " + walFiles.get(2)); - assertUpdated(walFiles.get(2).getTracker(), procs, new int[]{4, 5}, new int[] {0, 1, 2, 3}); + assertUpdated(walFiles.get(2).getTracker(), procs, new int[] { 4, 5 }, + new int[] { 0, 1, 2, 3 }); LOG.info("Checking global tracker "); - assertDeleted(procStore.getStoreTracker(), procs, new int[]{0, 2, 4}, new int[] {1, 3, 5}); + assertDeleted(procStore.getStoreTracker(), procs, new int[] { 0, 2, 4 }, new int[] { 1, 3, 5 }); } @Test @@ -531,17 +533,17 @@ public class TestWALProcedureStore { // Insert root-procedures TestProcedure[] rootProcs = new TestProcedure[10]; for (int i = 1; i <= rootProcs.length; i++) { - rootProcs[i-1] = new TestProcedure(i, 0); - procStore.insert(rootProcs[i-1], null); - rootProcs[i-1].addStackId(0); - procStore.update(rootProcs[i-1]); + rootProcs[i - 1] = new TestProcedure(i, 0); + procStore.insert(rootProcs[i - 1], null); + rootProcs[i - 1].addStackId(0); + procStore.update(rootProcs[i - 1]); } // insert root-child txn procStore.rollWriterForTesting(); for (int i = 1; i <= rootProcs.length; i++) { TestProcedure b = new TestProcedure(rootProcs.length + i, i); - rootProcs[i-1].addStackId(1); - procStore.insert(rootProcs[i-1], new Procedure[] { b }); + rootProcs[i - 1].addStackId(1); + procStore.insert(rootProcs[i - 1], new Procedure[] { b }); } // insert child updates procStore.rollWriterForTesting(); @@ -629,20 +631,19 @@ public class TestWALProcedureStore { assertEquals(procs.length + 1, status.length); // simulate another active master removing the wals - procStore = new WALProcedureStore(htu.getConfiguration(), logDir, null, - new LeaseRecovery() { - private int count = 0; + procStore = new WALProcedureStore(htu.getConfiguration(), logDir, null, new LeaseRecovery() { + private int count = 0; - @Override - public void recoverFileLease(FileSystem fs, Path path) throws IOException { - if (++count <= 2) { - fs.delete(path, false); - LOG.debug("Simulate FileNotFound at count=" + count + " for " + path); - throw new FileNotFoundException("test file not found " + path); - } - LOG.debug("Simulate recoverFileLease() at count=" + count + " for " + path); + @Override + public void recoverFileLease(FileSystem fs, Path path) throws IOException { + if (++count <= 2) { + fs.delete(path, false); + LOG.debug("Simulate FileNotFound at count=" + count + " for " + path); + throw new FileNotFoundException("test file not found " + path); } - }); + LOG.debug("Simulate recoverFileLease() at count=" + count + " for " + path); + } + }); final LoadCounter loader = new LoadCounter(); procStore.start(PROCEDURE_STORE_SLOTS); @@ -656,7 +657,7 @@ public class TestWALProcedureStore { @Test public void testLogFileAlreadyExists() throws IOException { - final boolean[] tested = {false}; + final boolean[] tested = { false }; WALProcedureStore mStore = Mockito.spy(procStore); Answer ans = new Answer() { @@ -806,20 +807,19 @@ public class TestWALProcedureStore { }); } - private LoadCounter restartAndAssert(long maxProcId, long runnableCount, - int completedCount, int corruptedCount) throws Exception { - return ProcedureTestingUtility.storeRestartAndAssert(procStore, maxProcId, - runnableCount, completedCount, corruptedCount); + private LoadCounter restartAndAssert(long maxProcId, long runnableCount, int completedCount, + int corruptedCount) throws Exception { + return ProcedureTestingUtility.storeRestartAndAssert(procStore, maxProcId, runnableCount, + completedCount, corruptedCount); } - private void corruptLog(final FileStatus logFile, final long dropBytes) - throws IOException { + private void corruptLog(final FileStatus logFile, final long dropBytes) throws IOException { assertTrue(logFile.getLen() > dropBytes); - LOG.debug("corrupt log " + logFile.getPath() + - " size=" + logFile.getLen() + " drop=" + dropBytes); + LOG.debug( + "corrupt log " + logFile.getPath() + " size=" + logFile.getLen() + " drop=" + dropBytes); Path tmpPath = new Path(testDir, "corrupted.log"); InputStream in = fs.open(logFile.getPath()); - OutputStream out = fs.create(tmpPath); + OutputStream out = fs.create(tmpPath); IOUtils.copyBytes(in, out, logFile.getLen() - dropBytes, true); if (!fs.rename(tmpPath, logFile.getPath())) { throw new IOException("Unable to rename"); @@ -856,8 +856,7 @@ public class TestWALProcedureStore { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { long procId = getProcId(); if (procId % 2 == 0) { Int64Value.Builder builder = Int64Value.newBuilder().setValue(procId); @@ -866,8 +865,7 @@ public class TestWALProcedureStore { } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { long procId = getProcId(); if (procId % 2 == 0) { Int64Value value = serializer.deserialize(Int64Value.class); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java index 0d494fcdd6b..27f280ad13a 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,12 +28,12 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestDelayedUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDelayedUtil.class); + HBaseClassTestRule.forClass(TestDelayedUtil.class); private static final Logger LOG = LoggerFactory.getLogger(TestDelayedUtil.class); @@ -50,9 +50,8 @@ public class TestDelayedUtil { ZeroDelayContainer o1cb = new ZeroDelayContainer<>(o1); ZeroDelayContainer o2c = new ZeroDelayContainer<>(o2); - ZeroDelayContainer[] items = new ZeroDelayContainer[] { - lnull, l10a, l10b, l15, onull, o1ca, o1cb, o2c, - }; + ZeroDelayContainer[] items = + new ZeroDelayContainer[] { lnull, l10a, l10b, l15, onull, o1ca, o1cb, o2c, }; assertContainersEquals(lnull, items, lnull, onull); assertContainersEquals(l10a, items, l10a, l10b); @@ -65,7 +64,7 @@ public class TestDelayedUtil { } private void assertContainersEquals(final ZeroDelayContainer src, - final ZeroDelayContainer[] items, final ZeroDelayContainer... matches) { + final ZeroDelayContainer[] items, final ZeroDelayContainer... matches) { for (int i = 0; i < items.length; ++i) { boolean shouldMatch = false; for (int j = 0; j < matches.length; ++j) { @@ -75,8 +74,8 @@ public class TestDelayedUtil { } } boolean isMatching = src.equals(items[i]); - assertEquals(src.getObject() + " unexpectedly match " + items[i].getObject(), - shouldMatch, isMatching); + assertEquals(src.getObject() + " unexpectedly match " + items[i].getObject(), shouldMatch, + isMatching); } } diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml index 8b269c8644e..8c116b910ea 100644 --- a/hbase-protocol-shaded/pom.xml +++ b/hbase-protocol-shaded/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -36,13 +36,28 @@ --> 3.17.3 + + + + + org.apache.hbase.thirdparty + hbase-shaded-protobuf + + + junit + junit + test + + org.apache.maven.plugins maven-source-plugin - + maven-assembly-plugin @@ -56,10 +71,10 @@ secondPartTestsExecution - test test + test true @@ -72,10 +87,10 @@ compile-protoc - generate-sources compile + generate-sources com.google.protobuf:protoc:${internal.protobuf.version}:exe:${os.detected.classifier} true @@ -95,48 +110,48 @@ com.google.code.maven-replacer-plugin replacer 1.5.3 + + ${basedir}/target/generated-sources/ + + **/*.java + + + true + + + ([^\.])com.google.protobuf + $1org.apache.hbase.thirdparty.com.google.protobuf + + + (public)(\W+static)?(\W+final)?(\W+class) + @javax.annotation.Generated("proto") $1$2$3$4 + + + + (@javax.annotation.Generated\("proto"\) ){2} + $1 + + + - process-sources replace + process-sources - - ${basedir}/target/generated-sources/ - - **/*.java - - - true - - - ([^\.])com.google.protobuf - $1org.apache.hbase.thirdparty.com.google.protobuf - - - (public)(\W+static)?(\W+final)?(\W+class) - @javax.annotation.Generated("proto") $1$2$3$4 - - - - (@javax.annotation.Generated\("proto"\) ){2} - $1 - - - org.apache.maven.plugins maven-shade-plugin - package shade + package true true @@ -187,21 +202,6 @@ - - - - - org.apache.hbase.thirdparty - hbase-shaded-protobuf - - - junit - junit - test - - @@ -260,9 +260,7 @@ - - com.google.code.maven-replacer-plugin - + com.google.code.maven-replacer-plugin replacer [1.5.3,) @@ -271,7 +269,7 @@ - false + false diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java index f8cef893d7d..a864f41fcd6 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,19 +22,20 @@ import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage; import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage; import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage; /** - * Helper to convert Exceptions and StackTraces from/to protobuf. - * (see ErrorHandling.proto for the internal of the proto messages) + * Helper to convert Exceptions and StackTraces from/to protobuf. (see ErrorHandling.proto for the + * internal of the proto messages) */ @InterfaceAudience.Private public final class ForeignExceptionUtil { - private ForeignExceptionUtil() { } + private ForeignExceptionUtil() { + } public static Exception toException(final ForeignExceptionMessage eem) { Exception re; @@ -57,8 +58,8 @@ public final class ForeignExceptionUtil { } private static T createException(final Class clazz, - final ForeignExceptionMessage eem) throws ClassNotFoundException, NoSuchMethodException, - InstantiationException, IllegalAccessException, InvocationTargetException { + final ForeignExceptionMessage eem) throws ClassNotFoundException, NoSuchMethodException, + InstantiationException, IllegalAccessException, InvocationTargetException { final GenericExceptionMessage gem = eem.getGenericException(); final Class realClass = Class.forName(gem.getClassName()); final Class cls = realClass.asSubclass(clazz); @@ -68,7 +69,7 @@ public final class ForeignExceptionUtil { } private static T setExceptionDetails(final T exception, - final ForeignExceptionMessage eem) { + final ForeignExceptionMessage eem) { final GenericExceptionMessage gem = eem.getGenericException(); final StackTraceElement[] trace = toStackTrace(gem.getTraceList()); exception.setStackTrace(trace); @@ -127,8 +128,7 @@ public final class ForeignExceptionUtil { } /** - * Unwind a serialized array of {@link StackTraceElementMessage}s to a - * {@link StackTraceElement}s. + * Unwind a serialized array of {@link StackTraceElementMessage}s to a {@link StackTraceElement}s. * @param traceList list that was serialized * @return the deserialized list or null if it couldn't be unwound (e.g. wasn't set on * the sender). @@ -140,10 +140,8 @@ public final class ForeignExceptionUtil { StackTraceElement[] trace = new StackTraceElement[traceList.size()]; for (int i = 0; i < traceList.size(); i++) { StackTraceElementMessage elem = traceList.get(i); - trace[i] = new StackTraceElement( - elem.getDeclaringClass(), elem.getMethodName(), - elem.hasFileName() ? elem.getFileName() : null, - elem.getLineNumber()); + trace[i] = new StackTraceElement(elem.getDeclaringClass(), elem.getMethodName(), + elem.hasFileName() ? elem.getFileName() : null, elem.getLineNumber()); } return trace; } diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml index b184ef1b503..9ec28bf4ac5 100644 --- a/hbase-protocol/pom.xml +++ b/hbase-protocol/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -32,6 +32,17 @@ true + + + + com.google.protobuf + protobuf-java + + + org.slf4j + slf4j-api + + @@ -52,10 +63,10 @@ secondPartTestsExecution - test test + test true @@ -68,10 +79,10 @@ compile-protoc - generate-sources compile + generate-sources @@ -83,45 +94,34 @@ com.google.code.maven-replacer-plugin replacer 1.5.3 + + ${basedir}/target/generated-sources/ + + **/*.java + + + + (public)(\W+static)?(\W+final)?(\W+class) + @javax.annotation.Generated("proto") $1$2$3$4 + + + + (@javax.annotation.Generated\("proto"\) ){2} + $1 + + + - generate-sources replace + generate-sources - - ${basedir}/target/generated-sources/ - - **/*.java - - - - (public)(\W+static)?(\W+final)?(\W+class) - @javax.annotation.Generated("proto") $1$2$3$4 - - - - (@javax.annotation.Generated\("proto"\) ){2} - $1 - - - - - - - com.google.protobuf - protobuf-java - - - org.slf4j - slf4j-api - - @@ -181,9 +181,7 @@ - - com.google.code.maven-replacer-plugin - + com.google.code.maven-replacer-plugin replacer [1.5.3,) @@ -191,7 +189,7 @@ - + diff --git a/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java b/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java index f10d30f9d4b..0720656aac2 100644 --- a/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java +++ b/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.google.protobuf; // This is a lie. +package com.google.protobuf; // This is a lie. import org.apache.yetus.audience.InterfaceAudience; /** * Helper class to extract byte arrays from {@link ByteString} without copy. *

      - * Without this protobufs would force us to copy every single byte array out - * of the objects de-serialized from the wire (which already do one copy, on - * top of the copies the JVM does to go from kernel buffer to C buffer and - * from C buffer to JVM buffer). - * + * Without this protobufs would force us to copy every single byte array out of the objects + * de-serialized from the wire (which already do one copy, on top of the copies the JVM does to go + * from kernel buffer to C buffer and from C buffer to JVM buffer). * @since 0.96.1 */ @InterfaceAudience.Private @@ -49,7 +47,7 @@ public final class HBaseZeroCopyByteString extends LiteralByteString { /** * Wraps a subset of a byte array in a {@link ByteString} without copying it. - * @param array array to be wrapped + * @param array array to be wrapped * @param offset from * @param length length * @return wrapped array @@ -63,15 +61,15 @@ public final class HBaseZeroCopyByteString extends LiteralByteString { /** * Extracts the byte array from the given {@link ByteString} without copy. - * @param buf A buffer from which to extract the array. This buffer must be - * actually an instance of a {@code LiteralByteString}. + * @param buf A buffer from which to extract the array. This buffer must be actually an instance + * of a {@code LiteralByteString}. * @return byte[] representation */ public static byte[] zeroCopyGetBytes(final ByteString buf) { if (buf instanceof LiteralByteString) { return ((LiteralByteString) buf).bytes; } - throw new UnsupportedOperationException("Need a LiteralByteString, got a " - + buf.getClass().getName()); + throw new UnsupportedOperationException( + "Need a LiteralByteString, got a " + buf.getClass().getName()); } } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java index 65f1cc67214..fa84a16e577 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,13 +17,12 @@ */ package org.apache.hadoop.hbase.util; +import com.google.protobuf.ByteString; +import com.google.protobuf.HBaseZeroCopyByteString; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.protobuf.ByteString; -import com.google.protobuf.HBaseZeroCopyByteString; - /** * Hack to workaround HBASE-10304 issue that keeps bubbling up when a mapreduce context. */ @@ -41,7 +40,7 @@ public class ByteStringer { // because it makes a copy of the passed in array. static { try { - HBaseZeroCopyByteString.wrap(new byte [0]); + HBaseZeroCopyByteString.wrap(new byte[0]); } catch (IllegalAccessError iae) { USE_ZEROCOPYBYTESTRING = false; LOG.debug("Failed to classload HBaseZeroCopyByteString: " + iae.toString()); @@ -56,14 +55,17 @@ public class ByteStringer { * Wraps a byte array in a {@link ByteString} without copying it. */ public static ByteString wrap(final byte[] array) { - return USE_ZEROCOPYBYTESTRING? HBaseZeroCopyByteString.wrap(array): ByteString.copyFrom(array); + return USE_ZEROCOPYBYTESTRING + ? HBaseZeroCopyByteString.wrap(array) + : ByteString.copyFrom(array); } /** * Wraps a subset of a byte array in a {@link ByteString} without copying it. */ public static ByteString wrap(final byte[] array, int offset, int length) { - return USE_ZEROCOPYBYTESTRING? HBaseZeroCopyByteString.wrap(array, offset, length): - ByteString.copyFrom(array, offset, length); + return USE_ZEROCOPYBYTESTRING + ? HBaseZeroCopyByteString.wrap(array, offset, length) + : ByteString.copyFrom(array, offset, length); } } diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml index 593a79ae738..67087d453ac 100644 --- a/hbase-replication/pom.xml +++ b/hbase-replication/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -31,27 +30,6 @@ Apache HBase - Replication HBase Replication Support - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase.thirdparty @@ -152,14 +130,36 @@ + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + + hadoop-2.0 - - !hadoop.profile + + + !hadoop.profile @@ -251,8 +251,7 @@ lifecycle-mapping - - + diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java index 83421600aa0..6dba30a34c0 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationListener.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationListener.java index 5c21e1e023c..36b958d2fa2 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationListener.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationListener.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java index f1103b268e9..7cba6618d54 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.replication; import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; @@ -99,7 +98,7 @@ public interface ReplicationPeer { /** * @deprecated since 2.1.0 and will be removed in 4.0.0. Use - * {@link #registerPeerConfigListener(ReplicationPeerConfigListener)} instead. + * {@link #registerPeerConfigListener(ReplicationPeerConfigListener)} instead. * @see #registerPeerConfigListener(ReplicationPeerConfigListener) * @see HBASE-19573 */ @@ -107,4 +106,4 @@ public interface ReplicationPeer { default void trackPeerConfigChanges(ReplicationPeerConfigListener listener) { registerPeerConfigListener(listener); } -} \ No newline at end of file +} diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigListener.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigListener.java index d4d8023ead7..d0bacda6d49 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigListener.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigListener.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -24,8 +22,8 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) public interface ReplicationPeerConfigListener { - /** Callback method for when users update the ReplicationPeerConfig for this peer - * + /** + * Callback method for when users update the ReplicationPeerConfig for this peer * @param rpc The updated ReplicationPeerConfig */ void peerConfigUpdated(ReplicationPeerConfig rpc); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java index 5e33aa90b27..1bcc667fcce 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java @@ -41,12 +41,12 @@ public class ReplicationPeerImpl implements ReplicationPeer { /** * Constructor that takes all the objects required to communicate with the specified peer, except * for the region server addresses. - * @param conf configuration object to this peer - * @param id string representation of this peer's identifier + * @param conf configuration object to this peer + * @param id string representation of this peer's identifier * @param peerConfig configuration for the replication peer */ public ReplicationPeerImpl(Configuration conf, String id, boolean peerState, - ReplicationPeerConfig peerConfig) { + ReplicationPeerConfig peerConfig) { this.conf = conf; this.id = id; setPeerState(peerState); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java index 1adda02e631..3f9d67c3d16 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.replication; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -32,7 +31,7 @@ public interface ReplicationPeerStorage { * @throws ReplicationException if there are errors accessing the storage service. */ void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled) - throws ReplicationException; + throws ReplicationException; /** * Remove a replication peer. @@ -51,7 +50,7 @@ public interface ReplicationPeerStorage { * @throws ReplicationException if there are errors accessing the storage service. */ void updatePeerConfig(String peerId, ReplicationPeerConfig peerConfig) - throws ReplicationException; + throws ReplicationException; /** * Return the peer ids of all replication peers. diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java index ea5a7ac4c4a..f852066ed90 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,8 +65,7 @@ public class ReplicationPeers { * Method called after a peer has been connected. It will create a ReplicationPeer to track the * newly connected cluster. * @param peerId a short that identifies the cluster - * @return whether a ReplicationPeer was successfully created - * @throws ReplicationException + * @return whether a ReplicationPeer was successfully created n */ public boolean addPeer(String peerId) throws ReplicationException { if (this.peerCache.containsKey(peerId)) { @@ -132,6 +131,6 @@ public class ReplicationPeers { ReplicationPeerConfig peerConfig = peerStorage.getPeerConfig(peerId); boolean enabled = peerStorage.isPeerEnabled(peerId); return new ReplicationPeerImpl(ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf), - peerId, enabled, peerConfig); + peerId, enabled, peerConfig); } } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java index cd65f9b3a89..55cdf0d8c9e 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,20 +17,18 @@ */ package org.apache.hadoop.hbase.replication; - import java.util.ArrayList; import java.util.Collections; import java.util.List; - +import org.apache.hadoop.hbase.ServerName; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.ServerName; /** - * This class is responsible for the parsing logic for a queue id representing a queue. - * It will extract the peerId if it's recovered as well as the dead region servers - * that were part of the queue's history. + * This class is responsible for the parsing logic for a queue id representing a queue. It will + * extract the peerId if it's recovered as well as the dead region servers that were part of the + * queue's history. */ @InterfaceAudience.Private public class ReplicationQueueInfo { @@ -44,8 +41,8 @@ public class ReplicationQueueInfo { private List deadRegionServers = new ArrayList<>(); /** - * The passed queueId will be either the id of the peer or the handling story of that queue - * in the form of id-servername-* + * The passed queueId will be either the id of the peer or the handling story of that queue in the + * form of id-servername-* */ public ReplicationQueueInfo(String queueId) { this.queueId = queueId; @@ -63,10 +60,10 @@ public class ReplicationQueueInfo { * "ip-10-46-221-101.ec2.internal", so we need skip some "-" during parsing for the following * cases: 2-ip-10-46-221-101.ec2.internal,52170,1364333181125-<server name>-... */ - private static void - extractDeadServersFromZNodeString(String deadServerListStr, List result) { + private static void extractDeadServersFromZNodeString(String deadServerListStr, + List result) { - if(deadServerListStr == null || result == null || deadServerListStr.isEmpty()) return; + if (deadServerListStr == null || result == null || deadServerListStr.isEmpty()) return; // valid server name delimiter "-" has to be after "," in a server name int seenCommaCnt = 0; @@ -75,32 +72,32 @@ public class ReplicationQueueInfo { for (int i = 0; i < len; i++) { switch (deadServerListStr.charAt(i)) { - case ',': - seenCommaCnt += 1; - break; - case '-': - if(seenCommaCnt>=2) { - if (i > startIndex) { - String serverName = deadServerListStr.substring(startIndex, i); - if(ServerName.isFullServerName(serverName)){ - result.add(ServerName.valueOf(serverName)); - } else { - LOG.error("Found invalid server name:" + serverName); + case ',': + seenCommaCnt += 1; + break; + case '-': + if (seenCommaCnt >= 2) { + if (i > startIndex) { + String serverName = deadServerListStr.substring(startIndex, i); + if (ServerName.isFullServerName(serverName)) { + result.add(ServerName.valueOf(serverName)); + } else { + LOG.error("Found invalid server name:" + serverName); + } + startIndex = i + 1; } - startIndex = i + 1; + seenCommaCnt = 0; } - seenCommaCnt = 0; - } - break; - default: - break; + break; + default: + break; } } // add tail - if(startIndex < len - 1){ + if (startIndex < len - 1) { String serverName = deadServerListStr.substring(startIndex, len); - if(ServerName.isFullServerName(serverName)){ + if (ServerName.isFullServerName(serverName)) { result.add(ServerName.valueOf(serverName)); } else { LOG.error("Found invalid server name at the end:" + serverName); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java index 59278e9807d..0f95c04b254 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.SortedSet; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Pair; @@ -36,7 +35,7 @@ public interface ReplicationQueueStorage { /** * Remove a replication queue for a given regionserver. * @param serverName the name of the regionserver - * @param queueId a String that identifies the queue. + * @param queueId a String that identifies the queue. */ void removeQueue(ServerName serverName, String queueId) throws ReplicationException; @@ -44,36 +43,36 @@ public interface ReplicationQueueStorage { * Add a new WAL file to the given queue for a given regionserver. If the queue does not exist it * is created. * @param serverName the name of the regionserver - * @param queueId a String that identifies the queue. - * @param fileName name of the WAL + * @param queueId a String that identifies the queue. + * @param fileName name of the WAL */ void addWAL(ServerName serverName, String queueId, String fileName) throws ReplicationException; /** * Remove an WAL file from the given queue for a given regionserver. * @param serverName the name of the regionserver - * @param queueId a String that identifies the queue. - * @param fileName name of the WAL + * @param queueId a String that identifies the queue. + * @param fileName name of the WAL */ void removeWAL(ServerName serverName, String queueId, String fileName) - throws ReplicationException; + throws ReplicationException; /** * Set the current position for a specific WAL in a given queue for a given regionserver. * @param serverName the name of the regionserver - * @param queueId a String that identifies the queue - * @param fileName name of the WAL - * @param position the current position in the file. Will ignore if less than or equal to 0. + * @param queueId a String that identifies the queue + * @param fileName name of the WAL + * @param position the current position in the file. Will ignore if less than or equal to 0. * @param lastSeqIds map with {encodedRegionName, sequenceId} pairs for serial replication. */ void setWALPosition(ServerName serverName, String queueId, String fileName, long position, - Map lastSeqIds) throws ReplicationException; + Map lastSeqIds) throws ReplicationException; /** * Read the max sequence id of the specific region for a given peer. For serial replication, we * need the max sequenced id to decide whether we can push the next entries. * @param encodedRegionName the encoded region name - * @param peerId peer id + * @param peerId peer id * @return the max sequence id of the specific region for a given peer. */ long getLastSequenceId(String encodedRegionName, String peerId) throws ReplicationException; @@ -81,7 +80,7 @@ public interface ReplicationQueueStorage { /** * Set the max sequence id of a bunch of regions for a given peer. Will be called when setting up * a serial replication peer. - * @param peerId peer id + * @param peerId peer id * @param lastSeqIds map with {encodedRegionName, sequenceId} pairs for serial replication. */ void setLastSequenceIds(String peerId, Map lastSeqIds) throws ReplicationException; @@ -94,26 +93,26 @@ public interface ReplicationQueueStorage { /** * Remove the max sequence id record for the given peer and regions. - * @param peerId peer id + * @param peerId peer id * @param encodedRegionNames the encoded region names */ void removeLastSequenceIds(String peerId, List encodedRegionNames) - throws ReplicationException; + throws ReplicationException; /** * Get the current position for a specific WAL in a given queue for a given regionserver. * @param serverName the name of the regionserver - * @param queueId a String that identifies the queue - * @param fileName name of the WAL + * @param queueId a String that identifies the queue + * @param fileName name of the WAL * @return the current position in the file */ long getWALPosition(ServerName serverName, String queueId, String fileName) - throws ReplicationException; + throws ReplicationException; /** * Get a list of all WALs in the given queue on the given region server. * @param serverName the server name of the region server that owns the queue - * @param queueId a String that identifies the queue + * @param queueId a String that identifies the queue * @return a list of WALs */ List getWALsInQueue(ServerName serverName, String queueId) throws ReplicationException; @@ -128,12 +127,12 @@ public interface ReplicationQueueStorage { /** * Change ownership for the queue identified by queueId and belongs to a dead region server. * @param sourceServerName the name of the dead region server - * @param destServerName the name of the target region server - * @param queueId the id of the queue + * @param destServerName the name of the target region server + * @param queueId the id of the queue * @return the new PeerId and A SortedSet of WALs in its queue */ Pair> claimQueue(ServerName sourceServerName, String queueId, - ServerName destServerName) throws ReplicationException; + ServerName destServerName) throws ReplicationException; /** * Remove the record of region server if the queue is empty. @@ -170,8 +169,8 @@ public interface ReplicationQueueStorage { /** * Add new hfile references to the queue. * @param peerId peer cluster id to which the hfiles need to be replicated - * @param pairs list of pairs of { HFile location in staging dir, HFile path in region dir which - * will be added in the queue } + * @param pairs list of pairs of { HFile location in staging dir, HFile path in region dir which + * will be added in the queue } * @throws ReplicationException if fails to add a hfile reference */ void addHFileRefs(String peerId, List> pairs) throws ReplicationException; @@ -179,7 +178,7 @@ public interface ReplicationQueueStorage { /** * Remove hfile references from the queue. * @param peerId peer cluster id from which this hfile references needs to be removed - * @param files list of hfile references to be removed + * @param files list of hfile references to be removed */ void removeHFileRefs(String peerId, List files) throws ReplicationException; diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java index 462cfedd0a0..1080b2125c7 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ public final class ReplicationStorageFactory { * Create a new {@link ReplicationQueueStorage}. */ public static ReplicationQueueStorage getReplicationQueueStorage(ZKWatcher zk, - Configuration conf) { + Configuration conf) { return new ZKReplicationQueueStorage(zk, conf); } } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java index fb2b0d517d8..d0f8e872828 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ public final class ReplicationUtils { } public static Configuration getPeerClusterConfiguration(ReplicationPeerConfig peerConfig, - Configuration baseConf) throws ReplicationException { + Configuration baseConf) throws ReplicationException { Configuration otherConf; try { otherConf = HBaseConfiguration.createClusterConf(baseConf, peerConfig.getClusterKey()); @@ -59,7 +59,7 @@ public final class ReplicationUtils { } public static void removeAllQueues(ReplicationQueueStorage queueStorage, String peerId) - throws ReplicationException { + throws ReplicationException { for (ServerName replicator : queueStorage.getListOfReplicators()) { List queueIds = queueStorage.getAllQueues(replicator); for (String queueId : queueIds) { @@ -87,7 +87,7 @@ public final class ReplicationUtils { } private static boolean isTableCFsEqual(Map> tableCFs1, - Map> tableCFs2) { + Map> tableCFs2) { if (tableCFs1 == null) { return tableCFs2 == null; } @@ -112,16 +112,16 @@ public final class ReplicationUtils { } public static boolean isNamespacesAndTableCFsEqual(ReplicationPeerConfig rpc1, - ReplicationPeerConfig rpc2) { + ReplicationPeerConfig rpc2) { if (rpc1.replicateAllUserTables() != rpc2.replicateAllUserTables()) { return false; } if (rpc1.replicateAllUserTables()) { - return isNamespacesEqual(rpc1.getExcludeNamespaces(), rpc2.getExcludeNamespaces()) && - isTableCFsEqual(rpc1.getExcludeTableCFsMap(), rpc2.getExcludeTableCFsMap()); + return isNamespacesEqual(rpc1.getExcludeNamespaces(), rpc2.getExcludeNamespaces()) + && isTableCFsEqual(rpc1.getExcludeTableCFsMap(), rpc2.getExcludeTableCFsMap()); } else { - return isNamespacesEqual(rpc1.getNamespaces(), rpc2.getNamespaces()) && - isTableCFsEqual(rpc1.getTableCFsMap(), rpc2.getTableCFsMap()); + return isNamespacesEqual(rpc1.getNamespaces(), rpc2.getNamespaces()) + && isTableCFsEqual(rpc1.getTableCFsMap(), rpc2.getTableCFsMap()); } } @@ -135,10 +135,10 @@ public final class ReplicationUtils { } /** - * @deprecated Will be removed in HBase 3. - * Use {@link ReplicationPeerConfig#needToReplicate(TableName)} instead. + * @deprecated Will be removed in HBase 3. Use + * {@link ReplicationPeerConfig#needToReplicate(TableName)} instead. * @param peerConfig configuration for the replication peer cluster - * @param tableName name of the table + * @param tableName name of the table * @return true if the table need replicate to the peer cluster */ @Deprecated diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java index 4b3b70220c3..eeeee4375cf 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -31,12 +30,14 @@ import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; + /** * ZK based replication peer storage. */ @InterfaceAudience.Private public class ZKReplicationPeerStorage extends ZKReplicationStorageBase - implements ReplicationPeerStorage { + implements ReplicationPeerStorage { public static final String PEERS_ZNODE = "zookeeper.znode.replication.peers"; public static final String PEERS_ZNODE_DEFAULT = "peers"; @@ -77,7 +78,7 @@ public class ZKReplicationPeerStorage extends ZKReplicationStorageBase @Override public void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled) - throws ReplicationException { + throws ReplicationException { try { ZKUtil.createWithParents(zookeeper, peersZNode); ZKUtil.multiOrSequential(zookeeper, @@ -89,7 +90,7 @@ public class ZKReplicationPeerStorage extends ZKReplicationStorageBase false); } catch (KeeperException e) { throw new ReplicationException("Could not add peer with id=" + peerId + ", peerConfif=>" - + peerConfig + ", state=" + (enabled ? "ENABLED" : "DISABLED"), e); + + peerConfig + ", state=" + (enabled ? "ENABLED" : "DISABLED"), e); } } @@ -114,13 +115,13 @@ public class ZKReplicationPeerStorage extends ZKReplicationStorageBase @Override public void updatePeerConfig(String peerId, ReplicationPeerConfig peerConfig) - throws ReplicationException { + throws ReplicationException { try { ZKUtil.setData(this.zookeeper, getPeerNode(peerId), ReplicationPeerConfigUtil.toByteArray(peerConfig)); } catch (KeeperException e) { throw new ReplicationException( - "There was a problem trying to save changes to the " + "replication peer " + peerId, e); + "There was a problem trying to save changes to the " + "replication peer " + peerId, e); } } @@ -154,13 +155,13 @@ public class ZKReplicationPeerStorage extends ZKReplicationStorageBase } if (data == null || data.length == 0) { throw new ReplicationException( - "Replication peer config data shouldn't be empty, peerId=" + peerId); + "Replication peer config data shouldn't be empty, peerId=" + peerId); } try { return ReplicationPeerConfigUtil.parsePeerFrom(data); } catch (DeserializationException e) { throw new ReplicationException( - "Failed to parse replication peer config for peer with id=" + peerId, e); + "Failed to parse replication peer config for peer with id=" + peerId, e); } } } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java index c51bdfcc283..f3506ad3555 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -80,16 +80,16 @@ import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUti */ @InterfaceAudience.Private class ZKReplicationQueueStorage extends ZKReplicationStorageBase - implements ReplicationQueueStorage { + implements ReplicationQueueStorage { private static final Logger LOG = LoggerFactory.getLogger(ZKReplicationQueueStorage.class); public static final String ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_KEY = - "zookeeper.znode.replication.hfile.refs"; + "zookeeper.znode.replication.hfile.refs"; public static final String ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_DEFAULT = "hfile-refs"; public static final String ZOOKEEPER_ZNODE_REPLICATION_REGIONS_KEY = - "zookeeper.znode.replication.regions"; + "zookeeper.znode.replication.regions"; public static final String ZOOKEEPER_ZNODE_REPLICATION_REGIONS_DEFAULT = "regions"; /** @@ -113,7 +113,7 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase this.queuesZNode = ZNodePaths.joinZNode(replicationZNode, queuesZNodeName); this.hfileRefsZNode = ZNodePaths.joinZNode(replicationZNode, hfileRefsZNodeName); this.regionsZNode = ZNodePaths.joinZNode(replicationZNode, conf - .get(ZOOKEEPER_ZNODE_REPLICATION_REGIONS_KEY, ZOOKEEPER_ZNODE_REPLICATION_REGIONS_DEFAULT)); + .get(ZOOKEEPER_ZNODE_REPLICATION_REGIONS_KEY, ZOOKEEPER_ZNODE_REPLICATION_REGIONS_DEFAULT)); } @Override @@ -152,20 +152,20 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase * is the peer id. *

      * @param encodedRegionName the encoded region name. - * @param peerId peer id for replication. + * @param peerId peer id for replication. * @return ZNode path to persist the max sequence id that we've pushed for the given region and * peer. */ String getSerialReplicationRegionPeerNode(String encodedRegionName, String peerId) { if (encodedRegionName == null || encodedRegionName.length() != RegionInfo.MD5_HEX_LENGTH) { throw new IllegalArgumentException( - "Invalid encoded region name: " + encodedRegionName + ", length should be 32."); + "Invalid encoded region name: " + encodedRegionName + ", length should be 32."); } return new StringBuilder(regionsZNode).append(ZNodePaths.ZNODE_PATH_SEPARATOR) - .append(encodedRegionName, 0, 2).append(ZNodePaths.ZNODE_PATH_SEPARATOR) - .append(encodedRegionName, 2, 4).append(ZNodePaths.ZNODE_PATH_SEPARATOR) - .append(encodedRegionName, 4, encodedRegionName.length()).append("-").append(peerId) - .toString(); + .append(encodedRegionName, 0, 2).append(ZNodePaths.ZNODE_PATH_SEPARATOR) + .append(encodedRegionName, 2, 4).append(ZNodePaths.ZNODE_PATH_SEPARATOR) + .append(encodedRegionName, 4, encodedRegionName.length()).append("-").append(peerId) + .toString(); } @Override @@ -174,37 +174,37 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase ZKUtil.deleteNodeRecursively(zookeeper, getQueueNode(serverName, queueId)); } catch (KeeperException e) { throw new ReplicationException( - "Failed to delete queue (serverName=" + serverName + ", queueId=" + queueId + ")", e); + "Failed to delete queue (serverName=" + serverName + ", queueId=" + queueId + ")", e); } } @Override public void addWAL(ServerName serverName, String queueId, String fileName) - throws ReplicationException { + throws ReplicationException { try { ZKUtil.createWithParents(zookeeper, getFileNode(serverName, queueId, fileName)); } catch (KeeperException e) { throw new ReplicationException("Failed to add wal to queue (serverName=" + serverName - + ", queueId=" + queueId + ", fileName=" + fileName + ")", e); + + ", queueId=" + queueId + ", fileName=" + fileName + ")", e); } } @Override public void removeWAL(ServerName serverName, String queueId, String fileName) - throws ReplicationException { + throws ReplicationException { String fileNode = getFileNode(serverName, queueId, fileName); try { ZKUtil.deleteNode(zookeeper, fileNode); } catch (NoNodeException e) { LOG.warn("{} already deleted when removing log", fileNode); } catch (KeeperException e) { - throw new ReplicationException("Failed to remove wal from queue (serverName=" + serverName + - ", queueId=" + queueId + ", fileName=" + fileName + ")", e); + throw new ReplicationException("Failed to remove wal from queue (serverName=" + serverName + + ", queueId=" + queueId + ", fileName=" + fileName + ")", e); } } private void addLastSeqIdsToOps(String queueId, Map lastSeqIds, - List listOfOps) throws KeeperException, ReplicationException { + List listOfOps) throws KeeperException, ReplicationException { String peerId = new ReplicationQueueInfo(queueId).getPeerId(); for (Entry lastSeqEntry : lastSeqIds.entrySet()) { String path = getSerialReplicationRegionPeerNode(lastSeqEntry.getKey(), peerId); @@ -228,7 +228,7 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase @Override public void setWALPosition(ServerName serverName, String queueId, String fileName, long position, - Map lastSeqIds) throws ReplicationException { + Map lastSeqIds) throws ReplicationException { try { for (int retry = 0;; retry++) { List listOfOps = new ArrayList<>(); @@ -247,13 +247,13 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase } catch (KeeperException.BadVersionException | KeeperException.NodeExistsException e) { LOG.warn( "Bad version(or node exist) when persist the last pushed sequence id to zookeeper " - + "storage, Retry = " + retry + ", serverName=" + serverName + ", queueId=" - + queueId + ", fileName=" + fileName); + + "storage, Retry = " + retry + ", serverName=" + serverName + ", queueId=" + queueId + + ", fileName=" + fileName); } } } catch (KeeperException e) { throw new ReplicationException("Failed to set log position (serverName=" + serverName - + ", queueId=" + queueId + ", fileName=" + fileName + ", position=" + position + ")", e); + + ", queueId=" + queueId + ", fileName=" + fileName + ", position=" + position + ")", e); } } @@ -262,7 +262,7 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase * that the ZNode does not exist. */ protected Pair getLastSequenceIdWithVersion(String encodedRegionName, - String peerId) throws KeeperException { + String peerId) throws KeeperException { Stat stat = new Stat(); String path = getSerialReplicationRegionPeerNode(encodedRegionName, peerId); byte[] data = ZKUtil.getDataNoWatch(zookeeper, path, stat); @@ -274,25 +274,25 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase return Pair.newPair(ZKUtil.parseWALPositionFrom(data), stat.getVersion()); } catch (DeserializationException de) { LOG.warn("Failed to parse log position (region=" + encodedRegionName + ", peerId=" + peerId - + "), data=" + Bytes.toStringBinary(data)); + + "), data=" + Bytes.toStringBinary(data)); } return Pair.newPair(HConstants.NO_SEQNUM, stat.getVersion()); } @Override public long getLastSequenceId(String encodedRegionName, String peerId) - throws ReplicationException { + throws ReplicationException { try { return getLastSequenceIdWithVersion(encodedRegionName, peerId).getFirst(); } catch (KeeperException e) { throw new ReplicationException("Failed to get last pushed sequence id (encodedRegionName=" - + encodedRegionName + ", peerId=" + peerId + ")", e); + + encodedRegionName + ", peerId=" + peerId + ")", e); } } @Override public void setLastSequenceIds(String peerId, Map lastSeqIds) - throws ReplicationException { + throws ReplicationException { try { // No need CAS and retry here, because it'll call setLastSequenceIds() for disabled peers // only, so no conflict happen. @@ -307,7 +307,7 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase } } catch (KeeperException e) { throw new ReplicationException("Failed to set last sequence ids, peerId=" + peerId - + ", size of lastSeqIds=" + lastSeqIds.size(), e); + + ", size of lastSeqIds=" + lastSeqIds.size(), e); } } @@ -347,33 +347,33 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase @Override public void removeLastSequenceIds(String peerId, List encodedRegionNames) - throws ReplicationException { + throws ReplicationException { try { List listOfOps = encodedRegionNames.stream().map(n -> getSerialReplicationRegionPeerNode(n, peerId)) .map(ZKUtilOp::deleteNodeFailSilent).collect(Collectors.toList()); ZKUtil.multiOrSequential(zookeeper, listOfOps, true); } catch (KeeperException e) { - throw new ReplicationException("Failed to remove last sequence ids, peerId=" + peerId + - ", encodedRegionNames.size=" + encodedRegionNames.size(), e); + throw new ReplicationException("Failed to remove last sequence ids, peerId=" + peerId + + ", encodedRegionNames.size=" + encodedRegionNames.size(), e); } } @Override public long getWALPosition(ServerName serverName, String queueId, String fileName) - throws ReplicationException { + throws ReplicationException { byte[] bytes; try { bytes = ZKUtil.getData(zookeeper, getFileNode(serverName, queueId, fileName)); } catch (KeeperException | InterruptedException e) { - throw new ReplicationException("Failed to get log position (serverName=" + serverName + - ", queueId=" + queueId + ", fileName=" + fileName + ")", e); + throw new ReplicationException("Failed to get log position (serverName=" + serverName + + ", queueId=" + queueId + ", fileName=" + fileName + ")", e); } try { return ZKUtil.parseWALPositionFrom(bytes); } catch (DeserializationException de) { - LOG.warn("Failed parse log position (serverName={}, queueId={}, fileName={})", - serverName, queueId, fileName); + LOG.warn("Failed parse log position (serverName={}, queueId={}, fileName={})", serverName, + queueId, fileName); } // if we can not parse the position, start at the beginning of the wal file again return 0; @@ -386,15 +386,13 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase */ @Override public Pair> claimQueue(ServerName sourceServerName, String queueId, - ServerName destServerName) throws ReplicationException { + ServerName destServerName) throws ReplicationException { LOG.info("Atomically moving {}/{}'s WALs to {}", sourceServerName, queueId, destServerName); try { ZKUtil.createWithParents(zookeeper, getRsNode(destServerName)); } catch (KeeperException e) { - throw new ReplicationException( - "Claim queue queueId=" + queueId + " from " + sourceServerName + " to " + destServerName + - " failed when creating the node for " + destServerName, - e); + throw new ReplicationException("Claim queue queueId=" + queueId + " from " + sourceServerName + + " to " + destServerName + " failed when creating the node for " + destServerName, e); } String newQueueId = queueId + "-" + sourceServerName; try { @@ -440,11 +438,11 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase // queue to tell the upper layer that claim nothing. For other types of exception should be // thrown out to notify the upper layer. LOG.info("Claim queue queueId={} from {} to {} failed with {}, someone else took the log?", - queueId,sourceServerName, destServerName, e.toString()); + queueId, sourceServerName, destServerName, e.toString()); return new Pair<>(newQueueId, Collections.emptySortedSet()); } catch (KeeperException | InterruptedException e) { - throw new ReplicationException("Claim queue queueId=" + queueId + " from " + - sourceServerName + " to " + destServerName + " failed", e); + throw new ReplicationException("Claim queue queueId=" + queueId + " from " + sourceServerName + + " to " + destServerName + " failed", e); } } @@ -477,21 +475,20 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase } private List getWALsInQueue0(ServerName serverName, String queueId) - throws KeeperException { - List children = ZKUtil.listChildrenNoWatch(zookeeper, getQueueNode(serverName, - queueId)); + throws KeeperException { + List children = + ZKUtil.listChildrenNoWatch(zookeeper, getQueueNode(serverName, queueId)); return children != null ? children : Collections.emptyList(); } @Override public List getWALsInQueue(ServerName serverName, String queueId) - throws ReplicationException { + throws ReplicationException { try { return getWALsInQueue0(serverName, queueId); } catch (KeeperException e) { throw new ReplicationException( - "Failed to get wals in queue (serverName=" + serverName + ", queueId=" + queueId + ")", - e); + "Failed to get wals in queue (serverName=" + serverName + ", queueId=" + queueId + ")", e); } } @@ -521,7 +518,7 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase * Therefore, we must update the cversion of root {@link #queuesZNode} when migrate wal nodes to * other queues. * @see #claimQueue(ServerName, String, ServerName) as an example of updating root - * {@link #queuesZNode} cversion. + * {@link #queuesZNode} cversion. */ @Override public Set getAllWALs() throws ReplicationException { @@ -543,8 +540,8 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase if (v0 == v1) { return wals; } - LOG.info("Replication queue node cversion changed from %d to %d, retry = %d", - v0, v1, retry); + LOG.info("Replication queue node cversion changed from %d to %d, retry = %d", v0, v1, + retry); } } catch (KeeperException e) { throw new ReplicationException("Failed to get all wals", e); @@ -569,7 +566,7 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase } } catch (KeeperException e) { throw new ReplicationException("Failed to add peer " + peerId + " to hfile reference queue.", - e); + e); } } @@ -585,20 +582,20 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase } } catch (KeeperException e) { throw new ReplicationException( - "Failed to remove peer " + peerId + " from hfile reference queue.", e); + "Failed to remove peer " + peerId + " from hfile reference queue.", e); } } @Override public void addHFileRefs(String peerId, List> pairs) - throws ReplicationException { + throws ReplicationException { String peerNode = getHFileRefsPeerNode(peerId); LOG.debug("Adding hfile references {} in queue {}", pairs, peerNode); - List listOfOps = pairs.stream().map(p -> p.getSecond().getName()) - .map(n -> getHFileNode(peerNode, n)) + List listOfOps = + pairs.stream().map(p -> p.getSecond().getName()).map(n -> getHFileNode(peerNode, n)) .map(f -> ZKUtilOp.createAndFailSilent(f, HConstants.EMPTY_BYTE_ARRAY)).collect(toList()); - LOG.debug("The multi list size for adding hfile references in zk for node {} is {}", - peerNode, listOfOps.size()); + LOG.debug("The multi list size for adding hfile references in zk for node {} is {}", peerNode, + listOfOps.size()); try { ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true); } catch (KeeperException e) { @@ -612,9 +609,9 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase LOG.debug("Removing hfile references {} from queue {}", files, peerNode); List listOfOps = files.stream().map(n -> getHFileNode(peerNode, n)) - .map(ZKUtilOp::deleteNodeFailSilent).collect(toList()); - LOG.debug("The multi list size for removing hfile references in zk for node {} is {}", - peerNode, listOfOps.size()); + .map(ZKUtilOp::deleteNodeFailSilent).collect(toList()); + LOG.debug("The multi list size for removing hfile references in zk for node {} is {}", peerNode, + listOfOps.size()); try { ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true); } catch (KeeperException e) { @@ -633,13 +630,13 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase return getAllPeersFromHFileRefsQueue0(); } catch (KeeperException e) { throw new ReplicationException("Failed to get list of all peers in hfile references node.", - e); + e); } } private List getReplicableHFiles0(String peerId) throws KeeperException { - List children = ZKUtil.listChildrenNoWatch(this.zookeeper, - getHFileRefsPeerNode(peerId)); + List children = + ZKUtil.listChildrenNoWatch(this.zookeeper, getHFileRefsPeerNode(peerId)); return children != null ? children : Collections.emptyList(); } @@ -649,7 +646,7 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase return getReplicableHFiles0(peerId); } catch (KeeperException e) { throw new ReplicationException("Failed to get list of hfile references for peer " + peerId, - e); + e); } } @@ -683,7 +680,7 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase return hfileRefs; } LOG.debug("Replication hfile references node cversion changed from %d to %d, retry = %d", - v0, v1, retry); + v0, v1, retry); } } catch (KeeperException e) { throw new ReplicationException("Failed to get all hfile refs", e); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationStorageBase.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationStorageBase.java index d6e692aef38..51df6a3b18f 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationStorageBase.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationStorageBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.replication; import java.io.ByteArrayOutputStream; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; @@ -61,7 +60,7 @@ public class ZKReplicationStorageBase { */ protected static byte[] toByteArray(final ReplicationProtos.ReplicationState.State state) { ReplicationProtos.ReplicationState msg = - ReplicationProtos.ReplicationState.newBuilder().setState(state).build(); + ReplicationProtos.ReplicationState.newBuilder().setState(state).build(); // There is no toByteArray on this pb Message? // 32 bytes is default which seems fair enough here. try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) { diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java index cf8e97398f5..acbcd7aad0f 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java @@ -79,11 +79,11 @@ public abstract class TestReplicationStateBasic { */ rqs.addWAL(server1, "qId1", "trash"); rqs.removeWAL(server1, "qId1", "trash"); - rqs.addWAL(server1,"qId2", "filename1"); - rqs.addWAL(server1,"qId3", "filename2"); - rqs.addWAL(server1,"qId3", "filename3"); - rqs.addWAL(server2,"trash", "trash"); - rqs.removeQueue(server2,"trash"); + rqs.addWAL(server1, "qId2", "filename1"); + rqs.addWAL(server1, "qId3", "filename2"); + rqs.addWAL(server1, "qId3", "filename3"); + rqs.addWAL(server2, "trash", "trash"); + rqs.removeQueue(server2, "trash"); List reps = rqs.getListOfReplicators(); assertEquals(2, reps.size()); @@ -105,10 +105,11 @@ public abstract class TestReplicationStateBasic { } private void removeAllQueues(ServerName serverName) throws ReplicationException { - for (String queue: rqs.getAllQueues(serverName)) { + for (String queue : rqs.getAllQueues(serverName)) { rqs.removeQueue(serverName, queue); } } + @Test public void testReplicationQueues() throws ReplicationException { // Initialize ReplicationPeer so we can add peers (we don't transfer lone queues) @@ -166,7 +167,7 @@ public abstract class TestReplicationStateBasic { assertTrue(rqs.getReplicableHFiles(ID_ONE).isEmpty()); assertEquals(0, rqs.getAllPeersFromHFileRefsQueue().size()); rp.getPeerStorage().addPeer(ID_ONE, - ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build(), true); + ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build(), true); rqs.addPeerToHFileRefs(ID_ONE); rqs.addHFileRefs(ID_ONE, files1); assertEquals(1, rqs.getAllPeersFromHFileRefsQueue().size()); @@ -247,7 +248,7 @@ public abstract class TestReplicationStateBasic { assertNumberOfPeers(2); assertEquals(KEY_ONE, ZKConfig.getZooKeeperClusterKey(ReplicationUtils - .getPeerClusterConfiguration(rp.getPeerStorage().getPeerConfig(ID_ONE), rp.getConf()))); + .getPeerClusterConfiguration(rp.getPeerStorage().getPeerConfig(ID_ONE), rp.getConf()))); rp.getPeerStorage().removePeer(ID_ONE); rp.removePeer(ID_ONE); assertNumberOfPeers(1); @@ -335,7 +336,7 @@ public abstract class TestReplicationStateBasic { } if (zkTimeoutCount < ZK_MAX_COUNT) { LOG.debug("ConnectedPeerStatus was " + !status + " but expected " + status - + ", sleeping and trying again."); + + ", sleeping and trying again."); Thread.sleep(ZK_SLEEP_INTERVAL); } else { fail("Timed out waiting for ConnectedPeerStatus to be " + status); @@ -363,9 +364,8 @@ public abstract class TestReplicationStateBasic { rqs.addWAL(server3, "qId" + i, "filename" + j); } // Add peers for the corresponding queues so they are not orphans - rp.getPeerStorage().addPeer("qId" + i, - ReplicationPeerConfig.newBuilder(). - setClusterKey(MiniZooKeeperCluster.HOST + ":2818:/bogus" + i).build(), true); + rp.getPeerStorage().addPeer("qId" + i, ReplicationPeerConfig.newBuilder() + .setClusterKey(MiniZooKeeperCluster.HOST + ":2818:/bogus" + i).build(), true); } } } diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java index 9eb67f9037d..5ece97b2859 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationStateZKImpl.class); + HBaseClassTestRule.forClass(TestReplicationStateZKImpl.class); private static Configuration conf; private static HBaseZKTestingUtility utility; @@ -64,13 +64,13 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { } private static String initPeerClusterState(String baseZKNode) - throws IOException, KeeperException { + throws IOException, KeeperException { // Add a dummy region server and set up the cluster id Configuration testConf = new Configuration(conf); testConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, baseZKNode); ZKWatcher zkw1 = new ZKWatcher(testConf, "test1", null); - String fakeRs = ZNodePaths.joinZNode(zkw1.getZNodePaths().rsZNode, - "hostname1.example.org:1234"); + String fakeRs = + ZNodePaths.joinZNode(zkw1.getZNodePaths().rsZNode, "hostname1.example.org:1234"); ZKUtil.createWithParents(zkw1, fakeRs); ZKClusterId.setClusterId(zkw1, new ClusterId()); return ZKConfig.getZooKeeperClusterKey(testConf); diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java index 51a3408c1e3..af2561448a9 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,7 +56,7 @@ public class TestZKReplicationPeerStorage { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestZKReplicationPeerStorage.class); + HBaseClassTestRule.forClass(TestZKReplicationPeerStorage.class); private static final HBaseZKTestingUtility UTIL = new HBaseZKTestingUtility(); private static final Random RNG = new Random(); // Seed may be set with Random#setSeed @@ -80,7 +80,7 @@ public class TestZKReplicationPeerStorage { private Set randNamespaces(Random rand) { return Stream.generate(() -> Long.toHexString(rand.nextLong())).limit(rand.nextInt(5)) - .collect(toSet()); + .collect(toSet()); } private Map> randTableCFs(Random rand) { @@ -89,7 +89,7 @@ public class TestZKReplicationPeerStorage { for (int i = 0; i < size; i++) { TableName tn = TableName.valueOf(Long.toHexString(rand.nextLong())); List cfs = Stream.generate(() -> Long.toHexString(rand.nextLong())) - .limit(rand.nextInt(5)).collect(toList()); + .limit(rand.nextInt(5)).collect(toList()); map.put(tn, cfs); } return map; @@ -98,10 +98,10 @@ public class TestZKReplicationPeerStorage { private ReplicationPeerConfig getConfig(int seed) { RNG.setSeed(seed); return ReplicationPeerConfig.newBuilder().setClusterKey(Long.toHexString(RNG.nextLong())) - .setReplicationEndpointImpl(Long.toHexString(RNG.nextLong())) - .setNamespaces(randNamespaces(RNG)).setExcludeNamespaces(randNamespaces(RNG)) - .setTableCFsMap(randTableCFs(RNG)).setReplicateAllUserTables(RNG.nextBoolean()) - .setBandwidth(RNG.nextInt(1000)).build(); + .setReplicationEndpointImpl(Long.toHexString(RNG.nextLong())) + .setNamespaces(randNamespaces(RNG)).setExcludeNamespaces(randNamespaces(RNG)) + .setTableCFsMap(randTableCFs(RNG)).setReplicateAllUserTables(RNG.nextBoolean()) + .setBandwidth(RNG.nextInt(1000)).build(); } private void assertSetEquals(Set expected, Set actual) { @@ -114,7 +114,7 @@ public class TestZKReplicationPeerStorage { } private void assertMapEquals(Map> expected, - Map> actual) { + Map> actual) { if (expected == null || expected.size() == 0) { assertTrue(actual == null || actual.size() == 0); return; @@ -128,8 +128,8 @@ public class TestZKReplicationPeerStorage { } else { assertNotNull(actualCFs); assertEquals(expectedCFs.size(), actualCFs.size()); - for (Iterator expectedIt = expectedCFs.iterator(), actualIt = actualCFs.iterator(); - expectedIt.hasNext();) { + for (Iterator expectedIt = expectedCFs.iterator(), + actualIt = actualCFs.iterator(); expectedIt.hasNext();) { assertEquals(expectedIt.next(), actualIt.next()); } } @@ -205,31 +205,31 @@ public class TestZKReplicationPeerStorage { Configuration conf = UTIL.getConfiguration(); conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, - customPeerConfigKey.concat("=").concat(customPeerConfigValue).concat(";"). - concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondValue)); + customPeerConfigKey.concat("=").concat(customPeerConfigValue).concat(";") + .concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondValue)); - ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); // validates base configs are present in replicationPeerConfig - assertEquals(customPeerConfigValue, updatedReplicationPeerConfig.getConfiguration(). - get(customPeerConfigKey)); - assertEquals(customPeerConfigSecondValue, updatedReplicationPeerConfig.getConfiguration(). - get(customPeerConfigSecondKey)); + assertEquals(customPeerConfigValue, + updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigKey)); + assertEquals(customPeerConfigSecondValue, + updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigSecondKey)); // validates base configs get updated values even if config already present conf.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, - customPeerConfigKey.concat("=").concat(customPeerConfigUpdatedValue).concat(";"). - concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondUpdatedValue)); + customPeerConfigKey.concat("=").concat(customPeerConfigUpdatedValue).concat(";") + .concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondUpdatedValue)); - ReplicationPeerConfig replicationPeerConfigAfterValueUpdate = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); + ReplicationPeerConfig replicationPeerConfigAfterValueUpdate = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); - assertEquals(customPeerConfigUpdatedValue, replicationPeerConfigAfterValueUpdate. - getConfiguration().get(customPeerConfigKey)); - assertEquals(customPeerConfigSecondUpdatedValue, replicationPeerConfigAfterValueUpdate. - getConfiguration().get(customPeerConfigSecondKey)); + assertEquals(customPeerConfigUpdatedValue, + replicationPeerConfigAfterValueUpdate.getConfiguration().get(customPeerConfigKey)); + assertEquals(customPeerConfigSecondUpdatedValue, + replicationPeerConfigAfterValueUpdate.getConfiguration().get(customPeerConfigSecondKey)); } @Test @@ -245,19 +245,19 @@ public class TestZKReplicationPeerStorage { conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, customPeerConfigKey.concat("=").concat(customPeerConfigValue)); - ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); // validates base configs are present in replicationPeerConfig - assertEquals(customPeerConfigValue, updatedReplicationPeerConfig.getConfiguration(). - get(customPeerConfigKey)); + assertEquals(customPeerConfigValue, + updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigKey)); conf.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, customPeerConfigKey.concat("=").concat("")); - ReplicationPeerConfig replicationPeerConfigRemoved = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); + ReplicationPeerConfig replicationPeerConfigRemoved = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); assertNull(replicationPeerConfigRemoved.getConfiguration().get(customPeerConfigKey)); } @@ -274,15 +274,16 @@ public class TestZKReplicationPeerStorage { conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, customPeerConfigKey.concat("=").concat("")); - ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); assertNull(updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigKey)); } @Test public void testPeerNameControl() throws Exception { String clusterKey = "key"; - STORAGE.addPeer("6", ReplicationPeerConfig.newBuilder().setClusterKey(clusterKey).build(), true); + STORAGE.addPeer("6", ReplicationPeerConfig.newBuilder().setClusterKey(clusterKey).build(), + true); try { STORAGE.addPeer("6", ReplicationPeerConfig.newBuilder().setClusterKey(clusterKey).build(), diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java index f56e8ce5063..df8f065dc7c 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ public class TestZKReplicationQueueStorage { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestZKReplicationQueueStorage.class); + HBaseClassTestRule.forClass(TestZKReplicationQueueStorage.class); private static final HBaseZKTestingUtility UTIL = new HBaseZKTestingUtility(); @@ -247,7 +247,7 @@ public class TestZKReplicationQueueStorage { @Override protected Pair getLastSequenceIdWithVersion(String encodedRegionName, - String peerId) throws KeeperException { + String peerId) throws KeeperException { Pair oldPair = super.getLastSequenceIdWithVersion(encodedRegionName, peerId); if (getLastSeqIdOpIndex < 100) { // Let the ZNode version increase. diff --git a/hbase-resource-bundle/pom.xml b/hbase-resource-bundle/pom.xml index 6a380cf93cb..5c96f073a52 100644 --- a/hbase-resource-bundle/pom.xml +++ b/hbase-resource-bundle/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -36,15 +36,15 @@ true - + - - maven-assembly-plugin - - true - + + maven-assembly-plugin + + true + org.apache.maven.plugins diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml index 23530f2fa4d..c9b3f562daf 100644 --- a/hbase-rest/pom.xml +++ b/hbase-rest/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-rest Apache HBase - Rest HBase Rest Server - - - - - - ${project.build.directory} - - hbase-webapps/** - - - - - - src/test/resources - - **/** - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-antrun-plugin - - - - generate - generate-sources - - - - - - - - - - - - - - - - - - - - - run - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - - jspcSource-packageInfo-source - generate-sources - - add-source - - - - ${project.build.directory}/generated-sources/java - - - - - - - - maven-surefire-plugin - - - target/test-classes/webapps - - - - - org.xolstice.maven.plugins - protobuf-maven-plugin - - - compile-protoc - generate-sources - - compile - - - - - - net.revelc.code - warbucks-maven-plugin - - - - com.sun.jersey - jersey-core + com.sun.jersey + jersey-core @@ -300,12 +187,12 @@ --> org.codehaus.jettison jettison - - - stax - stax-api - - + + + stax + stax-api + + @@ -392,6 +279,119 @@ test + + + + + + ${project.build.directory} + + hbase-webapps/** + + + + + + src/test/resources + + **/** + + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-antrun-plugin + + + + generate + + run + + generate-sources + + + + + + + + + + + + + + + + + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + + jspcSource-packageInfo-source + + add-source + + generate-sources + + + ${project.build.directory}/generated-sources/java + + + + + + + + maven-surefire-plugin + + + target/test-classes/webapps + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + + + compile-protoc + + compile + + generate-sources + + + + + net.revelc.code + warbucks-maven-plugin + + + @@ -508,8 +508,8 @@ test - com.google.guava - guava + com.google.guava + guava diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java index 56bc9297f85..af8b9e303bd 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import org.apache.yetus.audience.InterfaceAudience; @@ -29,7 +28,7 @@ public interface Constants { String VERSION_STRING = "0.0.3"; - int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours + int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours int DEFAULT_LISTEN_PORT = 8080; @@ -83,11 +82,13 @@ public interface Constants { String SCAN_FILTER = "filter"; String SCAN_REVERSED = "reversed"; String SCAN_CACHE_BLOCKS = "cacheblocks"; - String CUSTOM_FILTERS = "hbase.rest.custom.filters"; + String CUSTOM_FILTERS = "hbase.rest.custom.filters"; String ROW_KEYS_PARAM_NAME = "row"; - /** If this query parameter is present when processing row or scanner resources, - it disables server side block caching */ + /** + * If this query parameter is present when processing row or scanner resources, it disables server + * side block caching + */ String NOCACHE_PARAM_NAME = "nocache"; /** Configuration parameter to set rest client connection timeout */ diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java index 0a6fd0e1d5a..61dede2ae83 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -43,9 +41,7 @@ public class ExistsResource extends ResourceBase { TableResource tableResource; /** - * Constructor - * @param tableResource - * @throws IOException + * Constructor nn */ public ExistsResource(TableResource tableResource) throws IOException { super(); @@ -53,19 +49,17 @@ public class ExistsResource extends ResourceBase { } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF, MIMETYPE_BINARY}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF, + MIMETYPE_BINARY }) public Response get(final @Context UriInfo uriInfo) { try { if (!tableResource.exists()) { - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } ResponseBuilder response = Response.ok(); response.cacheControl(cacheControl); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java index f1b2cea6e95..a16d3530ad7 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,13 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; - -import org.apache.hadoop.hbase.rest.MetricsRESTSource; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class MetricsREST { @@ -34,23 +30,23 @@ public class MetricsREST { private MetricsRESTSource source; public MetricsREST() { - source = CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class); + source = CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class); } - + /** * @param inc How much to add to requests. */ public void incrementRequests(final int inc) { source.incrementRequests(inc); } - + /** * @param inc How much to add to sucessfulGetCount. */ public void incrementSucessfulGetRequests(final int inc) { source.incrementSucessfulGetRequests(inc); } - + /** * @param inc How much to add to sucessfulPutCount. */ @@ -64,7 +60,7 @@ public class MetricsREST { public void incrementFailedPutRequests(final int inc) { source.incrementFailedPutRequests(inc); } - + /** * @param inc How much to add to failedGetCount. */ diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java index 2d097752bd9..68d774e420c 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,14 +44,10 @@ public class MultiRowResource extends ResourceBase implements Constants { String[] columns = null; /** - * Constructor - * - * @param tableResource - * @param versions - * @throws java.io.IOException + * Constructor nn * @throws java.io.IOException */ public MultiRowResource(TableResource tableResource, String versions, String columnsStr) - throws IOException { + throws IOException { super(); this.tableResource = tableResource; @@ -87,15 +82,14 @@ public class MultiRowResource extends ResourceBase implements Constants { } } - ResultGenerator generator = - ResultGenerator.fromRowSpec(this.tableResource.getName(), rowSpec, null, - !params.containsKey(NOCACHE_PARAM_NAME)); + ResultGenerator generator = ResultGenerator.fromRowSpec(this.tableResource.getName(), + rowSpec, null, !params.containsKey(NOCACHE_PARAM_NAME)); Cell value = null; RowModel rowModel = new RowModel(rowSpec.getRow()); if (generator.hasNext()) { while ((value = generator.next()) != null) { - rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil - .cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value))); + rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), + CellUtil.cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value))); } model.addRow(rowModel); } else { @@ -106,11 +100,10 @@ public class MultiRowResource extends ResourceBase implements Constants { } if (model.getRows().isEmpty()) { - //If no rows found. + // If no rows found. servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("No rows found." + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("No rows found." + CRLF).build(); } else { servlet.getMetrics().incrementSucessfulGetRequests(1); return Response.ok(model).build(); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java index e1623af96a1..b661e46f928 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,16 +60,14 @@ public class NamespacesInstanceResource extends ResourceBase { boolean queryTables = false; /** - * Constructor for standard NamespaceInstanceResource. - * @throws IOException + * Constructor for standard NamespaceInstanceResource. n */ public NamespacesInstanceResource(String namespace) throws IOException { this(namespace, false); } /** - * Constructor for querying namespace table list via NamespaceInstanceResource. - * @throws IOException + * Constructor for querying namespace table list via NamespaceInstanceResource. n */ public NamespacesInstanceResource(String namespace, boolean queryTables) throws IOException { super(); @@ -83,30 +80,29 @@ public class NamespacesInstanceResource extends ResourceBase { * @param context servlet context * @param uriInfo (JAX-RS context variable) request URL * @return A response containing NamespacesInstanceModel for a namespace descriptions and - * TableListModel for a list of namespace tables. + * TableListModel for a list of namespace tables. */ @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response get(final @Context ServletContext context, - final @Context UriInfo uriInfo) { + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) + public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); // Respond to list of namespace tables requests. - if(queryTables){ + if (queryTables) { TableListModel tableModel = new TableListModel(); - try{ + try { HTableDescriptor[] tables = servlet.getAdmin().listTableDescriptorsByNamespace(namespace); - for(int i = 0; i < tables.length; i++){ + for (int i = 0; i < tables.length; i++) { tableModel.add(new TableModel(tables[i].getTableName().getQualifierAsString())); } servlet.getMetrics().incrementSucessfulGetRequests(1); return Response.ok(tableModel).build(); - }catch(IOException e) { + } catch (IOException e) { servlet.getMetrics().incrementFailedGetRequests(1); throw new RuntimeException("Cannot retrieve table list for '" + namespace + "'."); } @@ -114,8 +110,7 @@ public class NamespacesInstanceResource extends ResourceBase { // Respond to namespace description requests. try { - NamespacesInstanceModel rowModel = - new NamespacesInstanceModel(servlet.getAdmin(), namespace); + NamespacesInstanceModel rowModel = new NamespacesInstanceModel(servlet.getAdmin(), namespace); servlet.getMetrics().incrementSucessfulGetRequests(1); return Response.ok(rowModel).build(); } catch (IOException e) { @@ -126,42 +121,38 @@ public class NamespacesInstanceResource extends ResourceBase { /** * Build a response for PUT alter namespace with properties specified. - * @param model properties used for alter. + * @param model properties used for alter. * @param uriInfo (JAX-RS context variable) request URL * @return response code. */ @PUT - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) public Response put(final NamespacesInstanceModel model, final @Context UriInfo uriInfo) { return processUpdate(model, true, uriInfo); } /** * Build a response for POST create namespace with properties specified. - * @param model properties used for create. + * @param model properties used for create. * @param uriInfo (JAX-RS context variable) request URL * @return response code. */ @POST - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response post(final NamespacesInstanceModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response post(final NamespacesInstanceModel model, final @Context UriInfo uriInfo) { return processUpdate(model, false, uriInfo); } - // Check that POST or PUT is valid and then update namespace. private Response processUpdate(NamespacesInstanceModel model, final boolean updateExisting, - final UriInfo uriInfo) { + final UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace((updateExisting ? "PUT " : "POST ") + uriInfo.getAbsolutePath()); } if (model == null) { try { model = new NamespacesInstanceModel(namespace); - } catch(IOException ioe) { + } catch (IOException ioe) { servlet.getMetrics().incrementFailedPutRequests(1); throw new RuntimeException("Cannot retrieve info for '" + namespace + "'."); } @@ -171,7 +162,7 @@ public class NamespacesInstanceResource extends ResourceBase { if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) - .entity("Forbidden" + CRLF).build(); + .entity("Forbidden" + CRLF).build(); } Admin admin = null; @@ -179,25 +170,25 @@ public class NamespacesInstanceResource extends ResourceBase { try { admin = servlet.getAdmin(); namespaceExists = doesNamespaceExist(admin, namespace); - }catch (IOException e) { + } catch (IOException e) { servlet.getMetrics().incrementFailedPutRequests(1); return processException(e); } // Do not allow creation if namespace already exists. - if(!updateExisting && namespaceExists){ + if (!updateExisting && namespaceExists) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT). - entity("Namespace '" + namespace + "' already exists. Use REST PUT " + - "to alter the existing namespace.").build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Namespace '" + + namespace + "' already exists. Use REST PUT " + "to alter the existing namespace.") + .build(); } // Do not allow altering if namespace does not exist. - if (updateExisting && !namespaceExists){ + if (updateExisting && !namespaceExists) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT). - entity("Namespace '" + namespace + "' does not exist. Use " + - "REST POST to create the namespace.").build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity( + "Namespace '" + namespace + "' does not exist. Use " + "REST POST to create the namespace.") + .build(); } return createOrUpdate(model, uriInfo, admin, updateExisting); @@ -205,35 +196,36 @@ public class NamespacesInstanceResource extends ResourceBase { // Do the actual namespace create or alter. private Response createOrUpdate(final NamespacesInstanceModel model, final UriInfo uriInfo, - final Admin admin, final boolean updateExisting) { + final Admin admin, final boolean updateExisting) { NamespaceDescriptor.Builder builder = NamespaceDescriptor.create(namespace); builder.addConfiguration(model.getProperties()); - if(model.getProperties().size() > 0){ + if (model.getProperties().size() > 0) { builder.addConfiguration(model.getProperties()); } NamespaceDescriptor nsd = builder.build(); - try{ - if(updateExisting){ + try { + if (updateExisting) { admin.modifyNamespace(nsd); - }else{ + } else { admin.createNamespace(nsd); } - }catch (IOException e) { + } catch (IOException e) { servlet.getMetrics().incrementFailedPutRequests(1); return processException(e); } servlet.getMetrics().incrementSucessfulPutRequests(1); - return updateExisting ? Response.ok(uriInfo.getAbsolutePath()).build() : - Response.created(uriInfo.getAbsolutePath()).build(); + return updateExisting + ? Response.ok(uriInfo.getAbsolutePath()).build() + : Response.created(uriInfo.getAbsolutePath()).build(); } - private boolean doesNamespaceExist(Admin admin, String namespaceName) throws IOException{ + private boolean doesNamespaceExist(Admin admin, String namespaceName) throws IOException { NamespaceDescriptor[] nd = admin.listNamespaceDescriptors(); - for(int i = 0; i < nd.length; i++){ - if(nd[i].getName().equals(namespaceName)){ + for (int i = 0; i < nd.length; i++) { + if (nd[i].getName().equals(namespaceName)) { return true; } } @@ -247,23 +239,23 @@ public class NamespacesInstanceResource extends ResourceBase { * @return response code. */ @DELETE - public Response deleteNoBody(final byte[] message, - final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { + public Response deleteNoBody(final byte[] message, final @Context UriInfo uriInfo, + final @Context HttpHeaders headers) { if (LOG.isTraceEnabled()) { LOG.trace("DELETE " + uriInfo.getAbsolutePath()); } if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedDeleteRequests(1); return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) - .entity("Forbidden" + CRLF).build(); + .entity("Forbidden" + CRLF).build(); } - try{ + try { Admin admin = servlet.getAdmin(); - if (!doesNamespaceExist(admin, namespace)){ - return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT). - entity("Namespace '" + namespace + "' does not exists. Cannot " + - "drop namespace.").build(); + if (!doesNamespaceExist(admin, namespace)) { + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Namespace '" + namespace + "' does not exists. Cannot " + "drop namespace.") + .build(); } admin.deleteNamespace(namespace); @@ -280,8 +272,8 @@ public class NamespacesInstanceResource extends ResourceBase { * Dispatch to NamespaceInstanceResource for getting list of tables. */ @Path("tables") - public NamespacesInstanceResource getNamespaceInstanceResource( - final @PathParam("tables") String namespace) throws IOException { + public NamespacesInstanceResource + getNamespaceInstanceResource(final @PathParam("tables") String namespace) throws IOException { return new NamespacesInstanceResource(this.namespace, true); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java index e458d463f67..a3c0e2d2f1a 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -46,8 +44,7 @@ public class NamespacesResource extends ResourceBase { private static final Logger LOG = LoggerFactory.getLogger(NamespacesResource.class); /** - * Constructor - * @throws IOException + * Constructor n */ public NamespacesResource() throws IOException { super(); @@ -60,8 +57,8 @@ public class NamespacesResource extends ResourceBase { * @return a response for a version request */ @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -83,7 +80,7 @@ public class NamespacesResource extends ResourceBase { */ @Path("{namespace}") public NamespacesInstanceResource getNamespaceInstanceResource( - final @PathParam("namespace") String namespace) throws IOException { + final @PathParam("namespace") String namespace) throws IOException { return new NamespacesInstanceResource(namespace); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java index d5e4354e439..f90354b7ab7 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,17 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * Common interface for models capable of supporting protobuf marshalling - * and unmarshalling. Hooks up to the ProtobufMessageBodyConsumer and - * ProtobufMessageBodyProducer adapters. + * Common interface for models capable of supporting protobuf marshalling and unmarshalling. Hooks + * up to the ProtobufMessageBodyConsumer and ProtobufMessageBodyProducer adapters. */ @InterfaceAudience.Private public interface ProtobufMessageHandler { @@ -38,9 +34,7 @@ public interface ProtobufMessageHandler { /** * Initialize the model from a protobuf representation. * @param message the raw bytes of the protobuf message - * @return reference to self for convenience - * @throws IOException + * @return reference to self for convenience n */ - ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException; + ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java index d1ba5b7dd82..eadd6a9334b 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,15 +50,15 @@ public class ProtobufStreamingOutput implements StreamingOutput { this.limit = limit; this.fetchSize = fetchSize; if (LOG.isTraceEnabled()) { - LOG.trace("Created StreamingOutput with content type = " + this.contentType - + " user limit : " + this.limit + " scan fetch size : " + this.fetchSize); + LOG.trace("Created StreamingOutput with content type = " + this.contentType + " user limit : " + + this.limit + " scan fetch size : " + this.fetchSize); } } @Override public void write(OutputStream outStream) throws IOException, WebApplicationException { Result[] rowsToSend; - if(limit < fetchSize){ + if (limit < fetchSize) { rowsToSend = this.resultScanner.next(limit); writeToStream(createModelFromResults(rowsToSend), this.contentType, outStream); } else { @@ -69,7 +69,7 @@ public class ProtobufStreamingOutput implements StreamingOutput { } else { rowsToSend = this.resultScanner.next(this.fetchSize); } - if(rowsToSend.length == 0){ + if (rowsToSend.length == 0) { break; } count = count - rowsToSend.length; @@ -79,9 +79,9 @@ public class ProtobufStreamingOutput implements StreamingOutput { } private void writeToStream(CellSetModel model, String contentType, OutputStream outStream) - throws IOException { + throws IOException { byte[] objectBytes = model.createProtobufOutput(); - outStream.write(Bytes.toBytes((short)objectBytes.length)); + outStream.write(Bytes.toBytes((short) objectBytes.length)); outStream.write(objectBytes); outStream.flush(); if (LOG.isTraceEnabled()) { @@ -96,8 +96,8 @@ public class ProtobufStreamingOutput implements StreamingOutput { RowModel rModel = new RowModel(rowKey); List kvs = rs.listCells(); for (Cell kv : kvs) { - rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), kv - .getTimestamp(), CellUtil.cloneValue(kv))); + rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), + kv.getTimestamp(), CellUtil.cloneValue(kv))); } cellSetModel.addRow(rModel); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index 954503242da..02f8cb6a678 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.lang.management.ManagementFactory; @@ -88,14 +87,14 @@ public class RESTServer implements Constants { static final String REST_CSRF_ENABLED_KEY = "hbase.rest.csrf.enabled"; static final boolean REST_CSRF_ENABLED_DEFAULT = false; boolean restCSRFEnabled = false; - static final String REST_CSRF_CUSTOM_HEADER_KEY ="hbase.rest.csrf.custom.header"; + static final String REST_CSRF_CUSTOM_HEADER_KEY = "hbase.rest.csrf.custom.header"; static final String REST_CSRF_CUSTOM_HEADER_DEFAULT = "X-XSRF-HEADER"; static final String REST_CSRF_METHODS_TO_IGNORE_KEY = "hbase.rest.csrf.methods.to.ignore"; static final String REST_CSRF_METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE"; public static final String SKIP_LOGIN_KEY = "hbase.rest.skip.login"; static final int DEFAULT_HTTP_MAX_HEADER_SIZE = 64 * 1024; // 64k static final String HTTP_HEADER_CACHE_SIZE = "hbase.rest.http.header.cache.size"; - static final int DEFAULT_HTTP_HEADER_CACHE_SIZE = Character.MAX_VALUE -1; + static final int DEFAULT_HTTP_HEADER_CACHE_SIZE = Character.MAX_VALUE - 1; private static final String PATH_SPEC_ANY = "/*"; @@ -107,8 +106,8 @@ public class RESTServer implements Constants { // HACK, making this static for AuthFilter to get at our configuration. Necessary for unit tests. @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value={"ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", "MS_CANNOT_BE_FINAL"}, - justification="For testing") + value = { "ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", "MS_CANNOT_BE_FINAL" }, + justification = "For testing") public static Configuration conf = null; private final UserProvider userProvider; private Server server; @@ -122,16 +121,17 @@ public class RESTServer implements Constants { private static void printUsageAndExit(Options options, int exitCode) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("hbase rest start", "", options, - "\nTo run the REST server as a daemon, execute " + - "hbase-daemon.sh start|stop rest [-i ] [-p ] [-ro]\n", true); + "\nTo run the REST server as a daemon, execute " + + "hbase-daemon.sh start|stop rest [-i ] [-p ] [-ro]\n", + true); System.exit(exitCode); } void addCSRFFilter(ServletContextHandler ctxHandler, Configuration conf) { restCSRFEnabled = conf.getBoolean(REST_CSRF_ENABLED_KEY, REST_CSRF_ENABLED_DEFAULT); if (restCSRFEnabled) { - Map restCsrfParams = RestCsrfPreventionFilter - .getFilterParams(conf, "hbase.rest-csrf."); + Map restCsrfParams = + RestCsrfPreventionFilter.getFilterParams(conf, "hbase.rest-csrf."); FilterHolder holder = new FilterHolder(); holder.setName("csrf"); holder.setClassName(RestCsrfPreventionFilter.class.getName()); @@ -141,7 +141,7 @@ public class RESTServer implements Constants { } private void addClickjackingPreventionFilter(ServletContextHandler ctxHandler, - Configuration conf) { + Configuration conf) { FilterHolder holder = new FilterHolder(); holder.setName("clickjackingprevention"); holder.setClassName(ClickjackingPreventionFilter.class.getName()); @@ -149,8 +149,8 @@ public class RESTServer implements Constants { ctxHandler.addFilter(holder, PATH_SPEC_ANY, EnumSet.allOf(DispatcherType.class)); } - private void addSecurityHeadersFilter(ServletContextHandler ctxHandler, - Configuration conf, boolean isSecure) { + private void addSecurityHeadersFilter(ServletContextHandler ctxHandler, Configuration conf, + boolean isSecure) { FilterHolder holder = new FilterHolder(); holder.setName("securityheaders"); holder.setClassName(SecurityHeadersFilter.class.getName()); @@ -159,13 +159,12 @@ public class RESTServer implements Constants { } // login the server principal (if using secure Hadoop) - private static Pair> loginServerPrincipal( - UserProvider userProvider, Configuration conf) throws Exception { + private static Pair> + loginServerPrincipal(UserProvider userProvider, Configuration conf) throws Exception { Class containerClass = ServletContainer.class; if (userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled()) { - String machineName = Strings.domainNamePointerToHostName( - DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"), - conf.get(REST_DNS_NAMESERVER, "default"))); + String machineName = Strings.domainNamePointerToHostName(DNS.getDefaultHost( + conf.get(REST_DNS_INTERFACE, "default"), conf.get(REST_DNS_NAMESERVER, "default"))); String keytabFilename = conf.get(REST_KEYTAB_FILE); Preconditions.checkArgument(keytabFilename != null && !keytabFilename.isEmpty(), REST_KEYTAB_FILE + " should be set if security is enabled"); @@ -181,7 +180,7 @@ public class RESTServer implements Constants { FilterHolder authFilter = new FilterHolder(); authFilter.setClassName(AuthFilter.class.getName()); authFilter.setName("AuthenticationFilter"); - return new Pair<>(authFilter,containerClass); + return new Pair<>(authFilter, containerClass); } } return new Pair<>(null, containerClass); @@ -190,8 +189,8 @@ public class RESTServer implements Constants { private static void parseCommandLine(String[] args, Configuration conf) { Options options = new Options(); options.addOption("p", "port", true, "Port to bind to [default: " + DEFAULT_LISTEN_PORT + "]"); - options.addOption("ro", "readonly", false, "Respond only to GET HTTP " + - "method requests [default: false]"); + options.addOption("ro", "readonly", false, + "Respond only to GET HTTP " + "method requests [default: false]"); options.addOption("i", "infoport", true, "Port for WEB UI"); CommandLine commandLine = null; @@ -250,23 +249,22 @@ public class RESTServer implements Constants { } } - /** * Runs the REST server. */ public synchronized void run() throws Exception { - Pair> pair = loginServerPrincipal( - userProvider, conf); + Pair> pair = + loginServerPrincipal(userProvider, conf); FilterHolder authFilter = pair.getFirst(); Class containerClass = pair.getSecond(); RESTServlet servlet = RESTServlet.getInstance(conf, userProvider); - // Set up the Jersey servlet container for Jetty // The Jackson1Feature is a signal to Jersey that it should use jackson doing json. - // See here: https://stackoverflow.com/questions/39458230/how-register-jacksonfeature-on-clientconfig - ResourceConfig application = new ResourceConfig(). - packages("org.apache.hadoop.hbase.rest").register(JacksonJaxbJsonProvider.class); + // See here: + // https://stackoverflow.com/questions/39458230/how-register-jacksonfeature-on-clientconfig + ResourceConfig application = new ResourceConfig().packages("org.apache.hadoop.hbase.rest") + .register(JacksonJaxbJsonProvider.class); // Using our custom ServletContainer is tremendously important. This is what makes sure the // UGI.doAs() is done for the remoteUser, and calls are not made as the REST server itself. ServletContainer servletContainer = ReflectionUtils.newInstance(containerClass, application); @@ -282,23 +280,24 @@ public class RESTServer implements Constants { // Use the default queue (unbounded with Jetty 9.3) if the queue size is negative, otherwise use // bounded {@link ArrayBlockingQueue} with the given size int queueSize = servlet.getConfiguration().getInt(REST_THREAD_POOL_TASK_QUEUE_SIZE, -1); - int idleTimeout = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREAD_IDLE_TIMEOUT, 60000); - QueuedThreadPool threadPool = queueSize > 0 ? - new QueuedThreadPool(maxThreads, minThreads, idleTimeout, new ArrayBlockingQueue<>(queueSize)) : - new QueuedThreadPool(maxThreads, minThreads, idleTimeout); + int idleTimeout = + servlet.getConfiguration().getInt(REST_THREAD_POOL_THREAD_IDLE_TIMEOUT, 60000); + QueuedThreadPool threadPool = queueSize > 0 + ? new QueuedThreadPool(maxThreads, minThreads, idleTimeout, + new ArrayBlockingQueue<>(queueSize)) + : new QueuedThreadPool(maxThreads, minThreads, idleTimeout); this.server = new Server(threadPool); // Setup JMX - MBeanContainer mbContainer=new MBeanContainer(ManagementFactory.getPlatformMBeanServer()); + MBeanContainer mbContainer = new MBeanContainer(ManagementFactory.getPlatformMBeanServer()); server.addEventListener(mbContainer); server.addBean(mbContainer); - String host = servlet.getConfiguration().get("hbase.rest.host", "0.0.0.0"); int servicePort = servlet.getConfiguration().getInt("hbase.rest.port", 8080); - int httpHeaderCacheSize = servlet.getConfiguration().getInt(HTTP_HEADER_CACHE_SIZE, - DEFAULT_HTTP_HEADER_CACHE_SIZE); + int httpHeaderCacheSize = + servlet.getConfiguration().getInt(HTTP_HEADER_CACHE_SIZE, DEFAULT_HTTP_HEADER_CACHE_SIZE); HttpConfiguration httpConfig = new HttpConfiguration(); httpConfig.setSecureScheme("https"); httpConfig.setSecurePort(servicePort); @@ -318,56 +317,55 @@ public class RESTServer implements Constants { SslContextFactory sslCtxFactory = new SslContextFactory(); String keystore = conf.get(REST_SSL_KEYSTORE_STORE); String keystoreType = conf.get(REST_SSL_KEYSTORE_TYPE); - String password = HBaseConfiguration.getPassword(conf, - REST_SSL_KEYSTORE_PASSWORD, null); - String keyPassword = HBaseConfiguration.getPassword(conf, - REST_SSL_KEYSTORE_KEYPASSWORD, password); + String password = HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_PASSWORD, null); + String keyPassword = + HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_KEYPASSWORD, password); sslCtxFactory.setKeyStorePath(keystore); - if(StringUtils.isNotBlank(keystoreType)) { + if (StringUtils.isNotBlank(keystoreType)) { sslCtxFactory.setKeyStoreType(keystoreType); } sslCtxFactory.setKeyStorePassword(password); sslCtxFactory.setKeyManagerPassword(keyPassword); String trustStore = conf.get(REST_SSL_TRUSTSTORE_STORE); - if(StringUtils.isNotBlank(trustStore)) { + if (StringUtils.isNotBlank(trustStore)) { sslCtxFactory.setTrustStorePath(trustStore); } String trustStorePassword = HBaseConfiguration.getPassword(conf, REST_SSL_TRUSTSTORE_PASSWORD, null); - if(StringUtils.isNotBlank(trustStorePassword)) { + if (StringUtils.isNotBlank(trustStorePassword)) { sslCtxFactory.setTrustStorePassword(trustStorePassword); } String trustStoreType = conf.get(REST_SSL_TRUSTSTORE_TYPE); - if(StringUtils.isNotBlank(trustStoreType)) { + if (StringUtils.isNotBlank(trustStoreType)) { sslCtxFactory.setTrustStoreType(trustStoreType); } - String[] excludeCiphers = servlet.getConfiguration().getStrings( - REST_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); + String[] excludeCiphers = servlet.getConfiguration() + .getStrings(REST_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); if (excludeCiphers.length != 0) { sslCtxFactory.setExcludeCipherSuites(excludeCiphers); } - String[] includeCiphers = servlet.getConfiguration().getStrings( - REST_SSL_INCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); + String[] includeCiphers = servlet.getConfiguration() + .getStrings(REST_SSL_INCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); if (includeCiphers.length != 0) { sslCtxFactory.setIncludeCipherSuites(includeCiphers); } - String[] excludeProtocols = servlet.getConfiguration().getStrings( - REST_SSL_EXCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY); + String[] excludeProtocols = servlet.getConfiguration().getStrings(REST_SSL_EXCLUDE_PROTOCOLS, + ArrayUtils.EMPTY_STRING_ARRAY); if (excludeProtocols.length != 0) { sslCtxFactory.setExcludeProtocols(excludeProtocols); } - String[] includeProtocols = servlet.getConfiguration().getStrings( - REST_SSL_INCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY); + String[] includeProtocols = servlet.getConfiguration().getStrings(REST_SSL_INCLUDE_PROTOCOLS, + ArrayUtils.EMPTY_STRING_ARRAY); if (includeProtocols.length != 0) { sslCtxFactory.setIncludeProtocols(includeProtocols); } serverConnector = new ServerConnector(server, - new SslConnectionFactory(sslCtxFactory, HttpVersion.HTTP_1_1.toString()), - new HttpConnectionFactory(httpsConfig)); + new SslConnectionFactory(sslCtxFactory, HttpVersion.HTTP_1_1.toString()), + new HttpConnectionFactory(httpsConfig)); } else { serverConnector = new ServerConnector(server, new HttpConnectionFactory(httpConfig)); } @@ -384,15 +382,16 @@ public class RESTServer implements Constants { server.setStopAtShutdown(true); // set up context - ServletContextHandler ctxHandler = new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS); + ServletContextHandler ctxHandler = + new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS); ctxHandler.addServlet(sh, PATH_SPEC_ANY); if (authFilter != null) { ctxHandler.addFilter(authFilter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST)); } // Load filters from configuration. - String[] filterClasses = servlet.getConfiguration().getStrings(FILTER_CLASSES, - GzipFilter.class.getName()); + String[] filterClasses = + servlet.getConfiguration().getStrings(FILTER_CLASSES, GzipFilter.class.getName()); for (String filter : filterClasses) { filter = filter.trim(); ctxHandler.addFilter(filter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST)); @@ -401,7 +400,7 @@ public class RESTServer implements Constants { addClickjackingPreventionFilter(ctxHandler, conf); addSecurityHeadersFilter(ctxHandler, conf, isSecure); HttpServerUtil.constrainHttpMethods(ctxHandler, servlet.getConfiguration() - .getBoolean(REST_HTTP_ALLOW_OPTIONS_METHOD, REST_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT)); + .getBoolean(REST_HTTP_ALLOW_OPTIONS_METHOD, REST_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT)); // Put up info server. int port = conf.getInt("hbase.rest.info.port", 8085); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java index 6c71bb6222e..10b96ec9284 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,15 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.ParseFilter; @@ -32,6 +27,9 @@ import org.apache.hadoop.hbase.util.ConnectionCache; import org.apache.hadoop.hbase.util.JvmPauseMonitor; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Singleton class encapsulating global REST servlet state and functions. @@ -58,7 +56,7 @@ public class RESTServlet implements Constants { * @return the RESTServlet singleton instance */ public synchronized static RESTServlet getInstance() { - assert(INSTANCE != null); + assert (INSTANCE != null); return INSTANCE; } @@ -70,13 +68,12 @@ public class RESTServlet implements Constants { } /** - * @param conf Existing configuration to use in rest servlet + * @param conf Existing configuration to use in rest servlet * @param userProvider the login user provider - * @return the RESTServlet singleton instance - * @throws IOException + * @return the RESTServlet singleton instance n */ - public synchronized static RESTServlet getInstance(Configuration conf, - UserProvider userProvider) throws IOException { + public synchronized static RESTServlet getInstance(Configuration conf, UserProvider userProvider) + throws IOException { if (INSTANCE == null) { INSTANCE = new RESTServlet(conf, userProvider); } @@ -92,20 +89,17 @@ public class RESTServlet implements Constants { /** * Constructor with existing configuration - * @param conf existing configuration - * @param userProvider the login user provider - * @throws IOException + * @param conf existing configuration + * @param userProvider the login user provider n */ - RESTServlet(final Configuration conf, - final UserProvider userProvider) throws IOException { + RESTServlet(final Configuration conf, final UserProvider userProvider) throws IOException { this.realUser = userProvider.getCurrent().getUGI(); this.conf = conf; registerCustomFilter(conf); int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000); int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000); - connectionCache = new ConnectionCache( - conf, userProvider, cleanInterval, maxIdleTime); + connectionCache = new ConnectionCache(conf, userProvider, cleanInterval, maxIdleTime); if (supportsProxyuser()) { ProxyUsers.refreshSuperUserGroupsConfiguration(conf); } @@ -136,8 +130,7 @@ public class RESTServlet implements Constants { } /** - * Helper method to determine if server should - * only respond to GET HTTP method requests. + * Helper method to determine if server should only respond to GET HTTP method requests. * @return boolean for server read-only state */ boolean isReadOnly() { @@ -166,8 +159,7 @@ public class RESTServlet implements Constants { for (String filterClass : filterList) { String[] filterPart = filterClass.split(":"); if (filterPart.length != 2) { - LOG.warn( - "Invalid filter specification " + filterClass + " - skipping"); + LOG.warn("Invalid filter specification " + filterClass + " - skipping"); } else { ParseFilter.registerFilter(filterPart[0], filterPart[1]); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java index 28cf4cba9fa..d605aec39c8 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; +import static org.apache.hadoop.hbase.http.ProxyUserAuthenticationFilter.toLowerCase; + import java.io.IOException; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AuthorizationException; @@ -31,11 +31,10 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig; import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer; -import static org.apache.hadoop.hbase.http.ProxyUserAuthenticationFilter.toLowerCase; /** - * REST servlet container. It is used to get the remote request user - * without going through @HttpContext, so that we can minimize code changes. + * REST servlet container. It is used to get the remote request user without going + * through @HttpContext, so that we can minimize code changes. */ @InterfaceAudience.Private public class RESTServletContainer extends ServletContainer { @@ -46,13 +45,12 @@ public class RESTServletContainer extends ServletContainer { } /** - * This container is used only if authentication and - * impersonation is enabled. The remote request user is used - * as a proxy user for impersonation in invoking any REST service. + * This container is used only if authentication and impersonation is enabled. The remote request + * user is used as a proxy user for impersonation in invoking any REST service. */ @Override - public void service(final HttpServletRequest request, - final HttpServletResponse response) throws ServletException, IOException { + public void service(final HttpServletRequest request, final HttpServletResponse response) + throws ServletException, IOException { final HttpServletRequest lowerCaseRequest = toLowerCase(request); final String doAsUserFromQuery = lowerCaseRequest.getParameter("doas"); RESTServlet servlet = RESTServlet.getInstance(); @@ -69,7 +67,7 @@ public class RESTServletContainer extends ServletContainer { // validate the proxy user authorization try { ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf); - } catch(AuthorizationException e) { + } catch (AuthorizationException e) { throw new ServletException(e.getMessage()); } servlet.setEffectiveUser(doAsUserFromQuery); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java index 784894e2757..21c97302603 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -57,9 +55,7 @@ public class RegionsResource extends ResourceBase { TableResource tableResource; /** - * Constructor - * @param tableResource - * @throws IOException + * Constructor nn */ public RegionsResource(TableResource tableResource) throws IOException { super(); @@ -67,8 +63,8 @@ public class RegionsResource extends ResourceBase { } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -98,14 +94,12 @@ public class RegionsResource extends ResourceBase { return response.build(); } catch (TableNotFoundException e) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } catch (IOException e) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java index 11e0949711a..0f00479ff15 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -33,7 +31,7 @@ import org.apache.hbase.thirdparty.javax.ws.rs.core.Response; public class ResourceBase implements Constants { RESTServlet servlet; - Class accessDeniedClazz; + Class accessDeniedClazz; public ResourceBase() throws IOException { servlet = RESTServlet.getInstance(); @@ -42,54 +40,43 @@ public class ResourceBase implements Constants { } catch (ClassNotFoundException e) { } } - + protected Response processException(Throwable exp) { Throwable curr = exp; - if(accessDeniedClazz != null) { - //some access denied exceptions are buried + if (accessDeniedClazz != null) { + // some access denied exceptions are buried while (curr != null) { - if(accessDeniedClazz.isAssignableFrom(curr.getClass())) { + if (accessDeniedClazz.isAssignableFrom(curr.getClass())) { throw new WebApplicationException( - Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } curr = curr.getCause(); } } - //TableNotFound may also be buried one level deep - if (exp instanceof TableNotFoundException || - exp.getCause() instanceof TableNotFoundException) { + // TableNotFound may also be buried one level deep + if (exp instanceof TableNotFoundException || exp.getCause() instanceof TableNotFoundException) { throw new WebApplicationException( - Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } - if (exp instanceof NoSuchColumnFamilyException){ + if (exp instanceof NoSuchColumnFamilyException) { throw new WebApplicationException( - Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } if (exp instanceof RuntimeException) { throw new WebApplicationException( - Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } if (exp instanceof RetriesExhaustedWithDetailsException) { RetriesExhaustedWithDetailsException retryException = - (RetriesExhaustedWithDetailsException) exp; + (RetriesExhaustedWithDetailsException) exp; processException(retryException.getCause(0)); } throw new WebApplicationException( - Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java index 41135a814f3..c73e86603d1 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,19 +19,16 @@ package org.apache.hadoop.hbase.rest; import java.io.IOException; import java.util.Iterator; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.rest.model.ScannerModel; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public abstract class ResultGenerator implements Iterator { - public static ResultGenerator fromRowSpec(final String table, - final RowSpec rowspec, final Filter filter, final boolean cacheBlocks) - throws IOException { + public static ResultGenerator fromRowSpec(final String table, final RowSpec rowspec, + final Filter filter, final boolean cacheBlocks) throws IOException { if (rowspec.isSingleRow()) { return new RowResultGenerator(table, rowspec, filter, cacheBlocks); } else { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java index 3f5e1e1f6f8..9baf7aa7c04 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -50,8 +48,7 @@ public class RootResource extends ResourceBase { } /** - * Constructor - * @throws IOException + * Constructor n */ public RootResource() throws IOException { super(); @@ -60,15 +57,15 @@ public class RootResource extends ResourceBase { private final TableListModel getTableList() throws IOException { TableListModel tableList = new TableListModel(); TableName[] tableNames = servlet.getAdmin().listTableNames(); - for (TableName name: tableNames) { + for (TableName name : tableNames) { tableList.add(new TableModel(name.getNameAsString())); } return tableList; } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -86,8 +83,7 @@ public class RootResource extends ResourceBase { } @Path("status/cluster") - public StorageClusterStatusResource getClusterStatusResource() - throws IOException { + public StorageClusterStatusResource getClusterStatusResource() throws IOException { return new StorageClusterStatusResource(); } @@ -97,8 +93,7 @@ public class RootResource extends ResourceBase { } @Path("{table}") - public TableResource getTableResource( - final @PathParam("table") String table) throws IOException { + public TableResource getTableResource(final @PathParam("table") String table) throws IOException { return new TableResource(table); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java index 3489177d892..16259c34167 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -71,16 +69,10 @@ public class RowResource extends ResourceBase { private boolean returnResult = false; /** - * Constructor - * @param tableResource - * @param rowspec - * @param versions - * @param check - * @param returnResult - * @throws IOException + * Constructor nnnnnn */ - public RowResource(TableResource tableResource, String rowspec, - String versions, String check, String returnResult) throws IOException { + public RowResource(TableResource tableResource, String rowspec, String versions, String check, + String returnResult) throws IOException { super(); this.tableResource = tableResource; this.rowspec = new RowSpec(rowspec); @@ -94,8 +86,7 @@ public class RowResource extends ResourceBase { } @GET - @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -103,14 +94,12 @@ public class RowResource extends ResourceBase { servlet.getMetrics().incrementRequests(1); MultivaluedMap params = uriInfo.getQueryParameters(); try { - ResultGenerator generator = - ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null, - !params.containsKey(NOCACHE_PARAM_NAME)); + ResultGenerator generator = ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, + null, !params.containsKey(NOCACHE_PARAM_NAME)); if (!generator.hasNext()) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } int count = 0; CellSetModel model = new CellSetModel(); @@ -143,7 +132,7 @@ public class RowResource extends ResourceBase { @Produces(MIMETYPE_BINARY) public Response getBinary(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { - LOG.trace("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY); + LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } servlet.getMetrics().incrementRequests(1); // doesn't make sense to use a non specific coordinate as this can only @@ -151,24 +140,22 @@ public class RowResource extends ResourceBase { if (!rowspec.hasColumns() || rowspec.getColumns().length > 1) { servlet.getMetrics().incrementFailedGetRequests(1); return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) - .entity("Bad request: Default 'GET' method only works if there is exactly 1 column " + - "in the row. Using the 'Accept' header with one of these formats lets you " + - "retrieve the entire row if it has multiple columns: " + - // Same as the @Produces list for the get method. - MIMETYPE_XML + ", " + MIMETYPE_JSON + ", " + - MIMETYPE_PROTOBUF + ", " + MIMETYPE_PROTOBUF_IETF + - CRLF).build(); + .entity("Bad request: Default 'GET' method only works if there is exactly 1 column " + + "in the row. Using the 'Accept' header with one of these formats lets you " + + "retrieve the entire row if it has multiple columns: " + + // Same as the @Produces list for the get method. + MIMETYPE_XML + ", " + MIMETYPE_JSON + ", " + MIMETYPE_PROTOBUF + ", " + + MIMETYPE_PROTOBUF_IETF + CRLF) + .build(); } MultivaluedMap params = uriInfo.getQueryParameters(); try { - ResultGenerator generator = - ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null, - !params.containsKey(NOCACHE_PARAM_NAME)); + ResultGenerator generator = ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, + null, !params.containsKey(NOCACHE_PARAM_NAME)); if (!generator.hasNext()) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } Cell value = generator.next(); ResponseBuilder response = Response.ok(CellUtil.cloneValue(value)); @@ -185,9 +172,8 @@ public class RowResource extends ResourceBase { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } if (CHECK_PUT.equalsIgnoreCase(check)) { @@ -199,29 +185,27 @@ public class RowResource extends ResourceBase { } else if (CHECK_INCREMENT.equalsIgnoreCase(check)) { return increment(model); } else if (check != null && check.length() > 0) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Invalid check value '" + check + "'" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Invalid check value '" + check + "'" + CRLF).build(); } Table table = null; try { List rows = model.getRows(); List puts = new ArrayList<>(); - for (RowModel row: rows) { + for (RowModel row : rows) { byte[] key = row.getKey(); if (key == null) { key = rowspec.getRow(); } if (key == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Row key not specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Row key not specified." + CRLF).build(); } Put put = new Put(key); int i = 0; - for (CellModel cell: row.getCells()) { + for (CellModel cell : row.getCells()) { byte[] col = cell.getColumn(); if (col == null) { try { @@ -232,24 +216,17 @@ public class RowResource extends ResourceBase { } if (col == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } - byte [][] parts = CellUtil.parseColumn(col); + byte[][] parts = CellUtil.parseColumn(col); if (parts.length != 2) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(parts[0]) - .setQualifier(parts[1]) - .setTimestamp(cell.getTimestamp()) - .setType(Type.Put) - .setValue(cell.getValue()) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(parts[0]).setQualifier(parts[1]).setTimestamp(cell.getTimestamp()) + .setType(Type.Put).setValue(cell.getValue()).build()); } puts.add(put); if (LOG.isTraceEnabled()) { @@ -276,14 +253,12 @@ public class RowResource extends ResourceBase { } // This currently supports only update of one row at a time. - Response updateBinary(final byte[] message, final HttpHeaders headers, - final boolean replace) { + Response updateBinary(final byte[] message, final HttpHeaders headers, final boolean replace) { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } Table table = null; try { @@ -308,25 +283,18 @@ public class RowResource extends ResourceBase { } if (column == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } Put put = new Put(row); byte parts[][] = CellUtil.parseColumn(column); if (parts.length != 2) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(parts[0]) - .setQualifier(parts[1]) - .setTimestamp(timestamp) - .setType(Type.Put) - .setValue(message) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(parts[0]).setQualifier(parts[1]).setTimestamp(timestamp).setType(Type.Put) + .setValue(message).build()); table = servlet.getTable(tableResource.getName()); table.put(put); if (LOG.isTraceEnabled()) { @@ -349,45 +317,39 @@ public class RowResource extends ResourceBase { } @PUT - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response put(final CellSetModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response put(final CellSetModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { - LOG.trace("PUT " + uriInfo.getAbsolutePath() - + " " + uriInfo.getQueryParameters()); + LOG.trace("PUT " + uriInfo.getAbsolutePath() + " " + uriInfo.getQueryParameters()); } return update(model, true); } @PUT @Consumes(MIMETYPE_BINARY) - public Response putBinary(final byte[] message, - final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { + public Response putBinary(final byte[] message, final @Context UriInfo uriInfo, + final @Context HttpHeaders headers) { if (LOG.isTraceEnabled()) { - LOG.trace("PUT " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY); + LOG.trace("PUT " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } return updateBinary(message, headers, true); } @POST - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response post(final CellSetModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response post(final CellSetModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { - LOG.trace("POST " + uriInfo.getAbsolutePath() - + " " + uriInfo.getQueryParameters()); + LOG.trace("POST " + uriInfo.getAbsolutePath() + " " + uriInfo.getQueryParameters()); } return update(model, false); } @POST @Consumes(MIMETYPE_BINARY) - public Response postBinary(final byte[] message, - final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { + public Response postBinary(final byte[] message, final @Context UriInfo uriInfo, + final @Context HttpHeaders headers) { if (LOG.isTraceEnabled()) { - LOG.trace("POST " + uriInfo.getAbsolutePath() + " as "+MIMETYPE_BINARY); + LOG.trace("POST " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } return updateBinary(message, headers, false); } @@ -400,9 +362,8 @@ public class RowResource extends ResourceBase { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } Delete delete = null; if (rowspec.hasTimestamp()) { @@ -411,7 +372,7 @@ public class RowResource extends ResourceBase { delete = new Delete(rowspec.getRow()); } - for (byte[] column: rowspec.getColumns()) { + for (byte[] column : rowspec.getColumns()) { byte[][] split = CellUtil.parseColumn(column); if (rowspec.hasTimestamp()) { if (split.length == 1) { @@ -419,9 +380,8 @@ public class RowResource extends ResourceBase { } else if (split.length == 2) { delete.addColumns(split[0], split[1], rowspec.getTimestamp()); } else { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } } else { if (split.length == 1) { @@ -429,9 +389,8 @@ public class RowResource extends ResourceBase { } else if (split.length == 2) { delete.addColumns(split[0], split[1]); } else { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } } } @@ -459,9 +418,8 @@ public class RowResource extends ResourceBase { } /** - * Validates the input request parameters, parses columns from CellSetModel, - * and invokes checkAndPut on HTable. - * + * Validates the input request parameters, parses columns from CellSetModel, and invokes + * checkAndPut on HTable. * @param model instance of CellSetModel * @return Response 200 OK, 304 Not modified, 400 Bad request */ @@ -472,7 +430,7 @@ public class RowResource extends ResourceBase { if (model.getRows().size() != 1) { servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) - .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); + .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); } RowModel rowModel = model.getRows().get(0); @@ -485,12 +443,10 @@ public class RowResource extends ResourceBase { int cellModelCount = cellModels.size(); if (key == null || cellModelCount <= 1) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response - .status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT) - .entity( - "Bad request: Either row key is null or no data found for columns specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity( + "Bad request: Either row key is null or no data found for columns specified." + CRLF) + .build(); } Put put = new Put(key); @@ -503,34 +459,26 @@ public class RowResource extends ResourceBase { // Copy all the cells to the Put request // and track if the check cell's latest value is also sent - for (int i = 0, n = cellModelCount - 1; i < n ; i++) { + for (int i = 0, n = cellModelCount - 1; i < n; i++) { CellModel cell = cellModels.get(i); byte[] col = cell.getColumn(); if (col == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } - byte [][] parts = CellUtil.parseColumn(col); + byte[][] parts = CellUtil.parseColumn(col); if (parts.length != 2) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(parts[0]) - .setQualifier(parts[1]) - .setTimestamp(cell.getTimestamp()) - .setType(Type.Put) - .setValue(cell.getValue()) - .build()); - if(Bytes.equals(col, - valueToCheckCell.getColumn())) { + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(parts[0]).setQualifier(parts[1]).setTimestamp(cell.getTimestamp()) + .setType(Type.Put).setValue(cell.getValue()).build()); + if (Bytes.equals(col, valueToCheckCell.getColumn())) { valueToPutCell = cell; } } @@ -538,16 +486,15 @@ public class RowResource extends ResourceBase { if (valueToPutCell == null) { servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) - .entity("Bad request: The column to put and check do not match." + CRLF).build(); + .entity("Bad request: The column to put and check do not match." + CRLF).build(); } else { retValue = table.checkAndMutate(key, valueToPutParts[0]).qualifier(valueToPutParts[1]) .ifEquals(valueToCheckCell.getValue()).thenPut(put); } } else { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column incorrectly specified." + CRLF).build(); } if (LOG.isTraceEnabled()) { @@ -555,9 +502,8 @@ public class RowResource extends ResourceBase { } if (!retValue) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity("Value not Modified" + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity("Value not Modified" + CRLF).build(); } ResponseBuilder response = Response.ok(); servlet.getMetrics().incrementSucessfulPutRequests(1); @@ -577,9 +523,8 @@ public class RowResource extends ResourceBase { } /** - * Validates the input request parameters, parses columns from CellSetModel, - * and invokes checkAndDelete on HTable. - * + * Validates the input request parameters, parses columns from CellSetModel, and invokes + * checkAndDelete on HTable. * @param model instance of CellSetModel * @return Response 200 OK, 304 Not modified, 400 Bad request */ @@ -590,9 +535,8 @@ public class RowResource extends ResourceBase { table = servlet.getTable(tableResource.getName()); if (model.getRows().size() != 1) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); } RowModel rowModel = model.getRows().get(0); byte[] key = rowModel.getKey(); @@ -601,9 +545,8 @@ public class RowResource extends ResourceBase { } if (key == null) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Row key found to be null." + CRLF).build(); } List cellModels = rowModel.getCells(); @@ -611,31 +554,29 @@ public class RowResource extends ResourceBase { delete = new Delete(key); boolean retValue; - CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount -1); + CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount - 1); byte[] valueToDeleteColumn = valueToDeleteCell.getColumn(); if (valueToDeleteColumn == null) { try { valueToDeleteColumn = rowspec.getColumns()[0]; } catch (final ArrayIndexOutOfBoundsException e) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column not specified for check." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column not specified for check." + CRLF).build(); } } - byte[][] parts ; + byte[][] parts; // Copy all the cells to the Delete request if extra cells are sent - if(cellModelCount > 1) { + if (cellModelCount > 1) { for (int i = 0, n = cellModelCount - 1; i < n; i++) { CellModel cell = cellModels.get(i); byte[] col = cell.getColumn(); if (col == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } parts = CellUtil.parseColumn(col); @@ -647,10 +588,8 @@ public class RowResource extends ResourceBase { delete.addColumn(parts[0], parts[1], cell.getTimestamp()); } else { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT) - .entity("Bad request: Column to delete incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column to delete incorrectly specified." + CRLF).build(); } } } @@ -660,36 +599,33 @@ public class RowResource extends ResourceBase { if (parts[1].length != 0) { // To support backcompat of deleting a cell // if that is the only cell passed to the rest api - if(cellModelCount == 1) { + if (cellModelCount == 1) { delete.addColumns(parts[0], parts[1]); } retValue = table.checkAndMutate(key, parts[0]).qualifier(parts[1]) - .ifEquals(valueToDeleteCell.getValue()).thenDelete(delete); + .ifEquals(valueToDeleteCell.getValue()).thenDelete(delete); } else { // The case of empty qualifier. - if(cellModelCount == 1) { + if (cellModelCount == 1) { delete.addColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY)); } - retValue = table.checkAndMutate(key, parts[0]) - .ifEquals(valueToDeleteCell.getValue()).thenDelete(delete); + retValue = table.checkAndMutate(key, parts[0]).ifEquals(valueToDeleteCell.getValue()) + .thenDelete(delete); } } else { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column to check incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column to check incorrectly specified." + CRLF).build(); } if (LOG.isTraceEnabled()) { - LOG.trace("CHECK-AND-DELETE " + delete.toString() + ", returns " - + retValue); + LOG.trace("CHECK-AND-DELETE " + delete.toString() + ", returns " + retValue); } if (!retValue) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity(" Delete check failed." + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity(" Delete check failed." + CRLF).build(); } ResponseBuilder response = Response.ok(); servlet.getMetrics().incrementSucessfulDeleteRequests(1); @@ -709,9 +645,8 @@ public class RowResource extends ResourceBase { } /** - * Validates the input request parameters, parses columns from CellSetModel, - * and invokes Append on HTable. - * + * Validates the input request parameters, parses columns from CellSetModel, and invokes Append on + * HTable. * @param model instance of CellSetModel * @return Response 200 OK, 304 Not modified, 400 Bad request */ @@ -722,9 +657,8 @@ public class RowResource extends ResourceBase { table = servlet.getTable(tableResource.getName()); if (model.getRows().size() != 1) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); } RowModel rowModel = model.getRows().get(0); byte[] key = rowModel.getKey(); @@ -733,15 +667,14 @@ public class RowResource extends ResourceBase { } if (key == null) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Row key found to be null." + CRLF).build(); } append = new Append(key); append.setReturnResults(returnResult); int i = 0; - for (CellModel cell: rowModel.getCells()) { + for (CellModel cell : rowModel.getCells()) { byte[] col = cell.getColumn(); if (col == null) { try { @@ -752,16 +685,14 @@ public class RowResource extends ResourceBase { } if (col == null) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } - byte [][] parts = CellUtil.parseColumn(col); + byte[][] parts = CellUtil.parseColumn(col); if (parts.length != 2) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column incorrectly specified." + CRLF).build(); } append.add(parts[0], parts[1], cell.getValue()); } @@ -773,16 +704,15 @@ public class RowResource extends ResourceBase { if (returnResult) { if (result.isEmpty()) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity("Append return empty." + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity("Append return empty." + CRLF).build(); } CellSetModel rModel = new CellSetModel(); RowModel rRowModel = new RowModel(result.getRow()); for (Cell cell : result.listCells()) { rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), - cell.getTimestamp(), CellUtil.cloneValue(cell))); + cell.getTimestamp(), CellUtil.cloneValue(cell))); } rModel.addRow(rRowModel); servlet.getMetrics().incrementSucessfulAppendRequests(1); @@ -805,9 +735,8 @@ public class RowResource extends ResourceBase { } /** - * Validates the input request parameters, parses columns from CellSetModel, - * and invokes Increment on HTable. - * + * Validates the input request parameters, parses columns from CellSetModel, and invokes Increment + * on HTable. * @param model instance of CellSetModel * @return Response 200 OK, 304 Not modified, 400 Bad request */ @@ -818,9 +747,8 @@ public class RowResource extends ResourceBase { table = servlet.getTable(tableResource.getName()); if (model.getRows().size() != 1) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); } RowModel rowModel = model.getRows().get(0); byte[] key = rowModel.getKey(); @@ -829,15 +757,14 @@ public class RowResource extends ResourceBase { } if (key == null) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Row key found to be null." + CRLF).build(); } increment = new Increment(key); increment.setReturnResults(returnResult); int i = 0; - for (CellModel cell: rowModel.getCells()) { + for (CellModel cell : rowModel.getCells()) { byte[] col = cell.getColumn(); if (col == null) { try { @@ -848,18 +775,17 @@ public class RowResource extends ResourceBase { } if (col == null) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } - byte [][] parts = CellUtil.parseColumn(col); + byte[][] parts = CellUtil.parseColumn(col); if (parts.length != 2) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column incorrectly specified." + CRLF).build(); } - increment.addColumn(parts[0], parts[1], Long.parseLong(Bytes.toStringBinary(cell.getValue()))); + increment.addColumn(parts[0], parts[1], + Long.parseLong(Bytes.toStringBinary(cell.getValue()))); } if (LOG.isDebugEnabled()) { @@ -870,16 +796,15 @@ public class RowResource extends ResourceBase { if (returnResult) { if (result.isEmpty()) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity("Increment return empty." + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity("Increment return empty." + CRLF).build(); } CellSetModel rModel = new CellSetModel(); RowModel rRowModel = new RowModel(result.getRow()); for (Cell cell : result.listCells()) { rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), - cell.getTimestamp(), CellUtil.cloneValue(cell))); + cell.getTimestamp(), CellUtil.cloneValue(cell))); } rModel.addRow(rowModel); servlet.getMetrics().incrementSucessfulIncrementRequests(1); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java index f3d48fd49f0..761b5b91a40 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ package org.apache.hadoop.hbase.rest; import java.io.IOException; import java.util.Iterator; import java.util.NoSuchElementException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -30,11 +28,8 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.security.AccessDeniedException; - import org.apache.hadoop.util.StringUtils; - import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,9 +40,8 @@ public class RowResultGenerator extends ResultGenerator { private Iterator valuesI; private Cell cache; - public RowResultGenerator(final String tableName, final RowSpec rowspec, - final Filter filter, final boolean cacheBlocks) - throws IllegalArgumentException, IOException { + public RowResultGenerator(final String tableName, final RowSpec rowspec, final Filter filter, + final boolean cacheBlocks) throws IllegalArgumentException, IOException { try (Table table = RESTServlet.getInstance().getTable(tableName)) { Get get = new Get(rowspec.getRow()); if (rowspec.hasColumns()) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java index c510c9ed797..c9993336fa1 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.UnsupportedEncodingException; @@ -26,22 +24,19 @@ import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.TreeSet; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** - * Parses a path based row/column/timestamp specification into its component - * elements. + * Parses a path based row/column/timestamp specification into its component elements. *

      - * */ @InterfaceAudience.Private public class RowSpec { public static final long DEFAULT_START_TIMESTAMP = 0; public static final long DEFAULT_END_TIMESTAMP = Long.MAX_VALUE; - + private byte[] row = HConstants.EMPTY_START_ROW; private byte[] endRow = null; private TreeSet columns = new TreeSet<>(Bytes.BYTES_COMPARATOR); @@ -62,8 +57,7 @@ public class RowSpec { i = parseQueryParams(path, i); } - private int parseRowKeys(final String path, int i) - throws IllegalArgumentException { + private int parseRowKeys(final String path, int i) throws IllegalArgumentException { String startRow = null, endRow = null; try { StringBuilder sb = new StringBuilder(); @@ -76,10 +70,8 @@ public class RowSpec { String row = startRow = sb.toString(); int idx = startRow.indexOf(','); if (idx != -1) { - startRow = URLDecoder.decode(row.substring(0, idx), - HConstants.UTF8_ENCODING); - endRow = URLDecoder.decode(row.substring(idx + 1), - HConstants.UTF8_ENCODING); + startRow = URLDecoder.decode(row.substring(0, idx), HConstants.UTF8_ENCODING); + endRow = URLDecoder.decode(row.substring(idx + 1), HConstants.UTF8_ENCODING); } else { startRow = URLDecoder.decode(row, HConstants.UTF8_ENCODING); } @@ -93,13 +85,11 @@ public class RowSpec { // table scanning if (startRow.charAt(startRow.length() - 1) == '*') { if (endRow != null) - throw new IllegalArgumentException("invalid path: start row "+ - "specified with wildcard"); - this.row = Bytes.toBytes(startRow.substring(0, - startRow.lastIndexOf("*"))); + throw new IllegalArgumentException("invalid path: start row " + "specified with wildcard"); + this.row = Bytes.toBytes(startRow.substring(0, startRow.lastIndexOf("*"))); this.endRow = new byte[this.row.length + 1]; System.arraycopy(this.row, 0, this.endRow, 0, this.row.length); - this.endRow[this.row.length] = (byte)255; + this.endRow[this.row.length] = (byte) 255; } else { this.row = Bytes.toBytes(startRow.toString()); if (endRow != null) { @@ -145,8 +135,7 @@ public class RowSpec { return i; } - private int parseTimestamp(final String path, int i) - throws IllegalArgumentException { + private int parseTimestamp(final String path, int i) throws IllegalArgumentException { if (i >= path.length()) { return i; } @@ -163,8 +152,7 @@ public class RowSpec { i++; } try { - time0 = Long.parseLong(URLDecoder.decode(stamp.toString(), - HConstants.UTF8_ENCODING)); + time0 = Long.parseLong(URLDecoder.decode(stamp.toString(), HConstants.UTF8_ENCODING)); } catch (NumberFormatException e) { throw new IllegalArgumentException(e); } @@ -176,8 +164,7 @@ public class RowSpec { i++; } try { - time1 = Long.parseLong(URLDecoder.decode(stamp.toString(), - HConstants.UTF8_ENCODING)); + time1 = Long.parseLong(URLDecoder.decode(stamp.toString(), HConstants.UTF8_ENCODING)); } catch (NumberFormatException e) { throw new IllegalArgumentException(e); } @@ -206,8 +193,7 @@ public class RowSpec { } StringBuilder query = new StringBuilder(); try { - query.append(URLDecoder.decode(path.substring(i), - HConstants.UTF8_ENCODING)); + query.append(URLDecoder.decode(path.substring(i), HConstants.UTF8_ENCODING)); } catch (UnsupportedEncodingException e) { // should not happen throw new RuntimeException(e); @@ -234,39 +220,41 @@ public class RowSpec { break; } switch (what) { - case 'm': { - StringBuilder sb = new StringBuilder(); - while (j <= query.length()) { - c = query.charAt(j); - if (c < '0' || c > '9') { - j--; - break; + case 'm': { + StringBuilder sb = new StringBuilder(); + while (j <= query.length()) { + c = query.charAt(j); + if (c < '0' || c > '9') { + j--; + break; + } + sb.append(c); } - sb.append(c); + maxVersions = Integer.parseInt(sb.toString()); } - maxVersions = Integer.parseInt(sb.toString()); - } break; - case 'n': { - StringBuilder sb = new StringBuilder(); - while (j <= query.length()) { - c = query.charAt(j); - if (c < '0' || c > '9') { - j--; - break; + break; + case 'n': { + StringBuilder sb = new StringBuilder(); + while (j <= query.length()) { + c = query.charAt(j); + if (c < '0' || c > '9') { + j--; + break; + } + sb.append(c); } - sb.append(c); + maxValues = Integer.parseInt(sb.toString()); } - maxValues = Integer.parseInt(sb.toString()); - } break; - default: - throw new IllegalArgumentException("unknown parameter '" + c + "'"); + break; + default: + throw new IllegalArgumentException("unknown parameter '" + c + "'"); } } return i; } - public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns, - long startTime, long endTime, int maxVersions) { + public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns, long startTime, long endTime, + int maxVersions) { this.row = startRow; this.endRow = endRow; if (columns != null) { @@ -277,15 +265,16 @@ public class RowSpec { this.maxVersions = maxVersions; } - public RowSpec(byte[] startRow, byte[] endRow, Collection columns, - long startTime, long endTime, int maxVersions, Collection labels) { + public RowSpec(byte[] startRow, byte[] endRow, Collection columns, long startTime, + long endTime, int maxVersions, Collection labels) { this(startRow, endRow, columns, startTime, endTime, maxVersions); - if(labels != null) { + if (labels != null) { this.labels.addAll(labels); } } - public RowSpec(byte[] startRow, byte[] endRow, Collection columns, - long startTime, long endTime, int maxVersions) { + + public RowSpec(byte[] startRow, byte[] endRow, Collection columns, long startTime, + long endTime, int maxVersions) { this.row = startRow; this.endRow = endRow; if (columns != null) { @@ -319,7 +308,7 @@ public class RowSpec { public boolean hasColumns() { return !columns.isEmpty(); } - + public boolean hasLabels() { return !labels.isEmpty(); } @@ -347,7 +336,7 @@ public class RowSpec { public byte[][] getColumns() { return columns.toArray(new byte[columns.size()][]); } - + public List getLabels() { return labels; } @@ -384,11 +373,11 @@ public class RowSpec { result.append(Bytes.toString(row)); } result.append("', endRow => '"); - if (endRow != null) { + if (endRow != null) { result.append(Bytes.toString(endRow)); } result.append("', columns => ["); - for (byte[] col: columns) { + for (byte[] col : columns) { result.append(" '"); result.append(Bytes.toString(col)); result.append("'"); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java index 4bbc2cf1126..81ab8e24692 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -44,8 +42,7 @@ import org.apache.hbase.thirdparty.javax.ws.rs.core.UriInfo; @InterfaceAudience.Private public class ScannerInstanceResource extends ResourceBase { - private static final Logger LOG = - LoggerFactory.getLogger(ScannerInstanceResource.class); + private static final Logger LOG = LoggerFactory.getLogger(ScannerInstanceResource.class); static CacheControl cacheControl; static { @@ -58,29 +55,28 @@ public class ScannerInstanceResource extends ResourceBase { String id = null; int batch = 1; - public ScannerInstanceResource() throws IOException { } + public ScannerInstanceResource() throws IOException { + } - public ScannerInstanceResource(String table, String id, - ResultGenerator generator, int batch) throws IOException { + public ScannerInstanceResource(String table, String id, ResultGenerator generator, int batch) + throws IOException { this.id = id; this.generator = generator; this.batch = batch; } @GET - @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response get(final @Context UriInfo uriInfo, - @QueryParam("n") int maxRows, final @QueryParam("c") int maxValues) { + @Produces({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response get(final @Context UriInfo uriInfo, @QueryParam("n") int maxRows, + final @QueryParam("c") int maxValues) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); if (generator == null) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } else { // Updated the connection access time for each client next() call RESTServlet.getInstance().getConnectionCache().updateConnectionAccessTime(); @@ -104,15 +100,13 @@ public class ScannerInstanceResource extends ResourceBase { servlet.getMetrics().incrementFailedDeleteRequests(1); } servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.GONE) - .type(MIMETYPE_TEXT).entity("Gone" + CRLF) + return Response.status(Response.Status.GONE).type(MIMETYPE_TEXT).entity("Gone" + CRLF) .build(); } catch (IllegalArgumentException e) { Throwable t = e.getCause(); if (t instanceof TableNotFoundException) { - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } throw e; } @@ -144,9 +138,8 @@ public class ScannerInstanceResource extends ResourceBase { rowKey = CellUtil.cloneRow(value); rowModel = new RowModel(rowKey); } - rowModel.addCell( - new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), - value.getTimestamp(), CellUtil.cloneValue(value))); + rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), + value.getTimestamp(), CellUtil.cloneValue(value))); } while (--count > 0); model.addRow(rowModel); ResponseBuilder response = Response.ok(model); @@ -159,8 +152,7 @@ public class ScannerInstanceResource extends ResourceBase { @Produces(MIMETYPE_BINARY) public Response getBinary(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { - LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + - MIMETYPE_BINARY); + LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } servlet.getMetrics().incrementRequests(1); try { @@ -173,10 +165,10 @@ public class ScannerInstanceResource extends ResourceBase { } ResponseBuilder response = Response.ok(CellUtil.cloneValue(value)); response.cacheControl(cacheControl); - response.header("X-Row", Bytes.toString(Base64.getEncoder().encode( - CellUtil.cloneRow(value)))); - response.header("X-Column", Bytes.toString(Base64.getEncoder().encode( - CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value))))); + response.header("X-Row", + Bytes.toString(Base64.getEncoder().encode(CellUtil.cloneRow(value)))); + response.header("X-Column", Bytes.toString(Base64.getEncoder() + .encode(CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value))))); response.header("X-Timestamp", value.getTimestamp()); servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); @@ -187,8 +179,7 @@ public class ScannerInstanceResource extends ResourceBase { servlet.getMetrics().incrementFailedDeleteRequests(1); } servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.GONE) - .type(MIMETYPE_TEXT).entity("Gone" + CRLF) + return Response.status(Response.Status.GONE).type(MIMETYPE_TEXT).entity("Gone" + CRLF) .build(); } } @@ -200,9 +191,8 @@ public class ScannerInstanceResource extends ResourceBase { } servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } if (ScannerResource.delete(id)) { servlet.getMetrics().incrementSucessfulDeleteRequests(1); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java index 785c7c4b34a..1c2929aab7a 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import com.fasterxml.jackson.core.JsonParseException; @@ -48,17 +46,15 @@ public class ScannerResource extends ResourceBase { private static final Logger LOG = LoggerFactory.getLogger(ScannerResource.class); - static final Map scanners = - Collections.synchronizedMap(new HashMap()); + static final Map scanners = + Collections.synchronizedMap(new HashMap()); TableResource tableResource; /** - * Constructor - * @param tableResource - * @throws IOException + * Constructor nn */ - public ScannerResource(TableResource tableResource)throws IOException { + public ScannerResource(TableResource tableResource) throws IOException { super(); this.tableResource = tableResource; } @@ -73,30 +69,27 @@ public class ScannerResource extends ResourceBase { } } - Response update(final ScannerModel model, final boolean replace, - final UriInfo uriInfo) { + Response update(final ScannerModel model, final boolean replace, final UriInfo uriInfo) { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } byte[] endRow = model.hasEndRow() ? model.getEndRow() : null; RowSpec spec = null; if (model.getLabels() != null) { spec = new RowSpec(model.getStartRow(), endRow, model.getColumns(), model.getStartTime(), - model.getEndTime(), model.getMaxVersions(), model.getLabels()); + model.getEndTime(), model.getMaxVersions(), model.getLabels()); } else { spec = new RowSpec(model.getStartRow(), endRow, model.getColumns(), model.getStartTime(), - model.getEndTime(), model.getMaxVersions()); + model.getEndTime(), model.getMaxVersions()); } try { Filter filter = ScannerResultGenerator.buildFilterFromModel(model); String tableName = tableResource.getName(); - ScannerResultGenerator gen = - new ScannerResultGenerator(tableName, spec, filter, model.getCaching(), - model.getCacheBlocks()); + ScannerResultGenerator gen = new ScannerResultGenerator(tableName, spec, filter, + model.getCaching(), model.getCacheBlocks()); String id = gen.getID(); ScannerInstanceResource instance = new ScannerInstanceResource(tableName, id, gen, model.getBatch()); @@ -112,26 +105,23 @@ public class ScannerResource extends ResourceBase { LOG.error("Exception occurred while processing " + uriInfo.getAbsolutePath() + " : ", e); servlet.getMetrics().incrementFailedPutRequests(1); if (e instanceof TableNotFoundException) { - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); - } else if (e instanceof RuntimeException - || e instanceof JsonMappingException | e instanceof JsonParseException) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); + } else if ( + e instanceof RuntimeException + || e instanceof JsonMappingException | e instanceof JsonParseException + ) { + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } } @PUT - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response put(final ScannerModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response put(final ScannerModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("PUT " + uriInfo.getAbsolutePath()); } @@ -139,10 +129,8 @@ public class ScannerResource extends ResourceBase { } @POST - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response post(final ScannerModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response post(final ScannerModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("POST " + uriInfo.getAbsolutePath()); } @@ -150,8 +138,8 @@ public class ScannerResource extends ResourceBase { } @Path("{scanner: .+}") - public ScannerInstanceResource getScannerInstanceResource( - final @PathParam("scanner") String id) throws IOException { + public ScannerInstanceResource getScannerInstanceResource(final @PathParam("scanner") String id) + throws IOException { ScannerInstanceResource instance = scanners.get(id); if (instance == null) { servlet.getMetrics().incrementFailedGetRequests(1); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java index d31b4b1a807..e6a242610d5 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; import java.util.Iterator; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.TableNotEnabledException; @@ -43,11 +40,9 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.Private public class ScannerResultGenerator extends ResultGenerator { - private static final Logger LOG = - LoggerFactory.getLogger(ScannerResultGenerator.class); + private static final Logger LOG = LoggerFactory.getLogger(ScannerResultGenerator.class); - public static Filter buildFilterFromModel(final ScannerModel model) - throws Exception { + public static Filter buildFilterFromModel(final ScannerModel model) throws Exception { String filter = model.getFilter(); if (filter == null || filter.length() == 0) { return null; @@ -61,15 +56,13 @@ public class ScannerResultGenerator extends ResultGenerator { private ResultScanner scanner; private Result cached; - public ScannerResultGenerator(final String tableName, final RowSpec rowspec, - final Filter filter, final boolean cacheBlocks) - throws IllegalArgumentException, IOException { + public ScannerResultGenerator(final String tableName, final RowSpec rowspec, final Filter filter, + final boolean cacheBlocks) throws IllegalArgumentException, IOException { this(tableName, rowspec, filter, -1, cacheBlocks); } - public ScannerResultGenerator(final String tableName, final RowSpec rowspec, - final Filter filter, final int caching, final boolean cacheBlocks) - throws IllegalArgumentException, IOException { + public ScannerResultGenerator(final String tableName, final RowSpec rowspec, final Filter filter, + final int caching, final boolean cacheBlocks) throws IllegalArgumentException, IOException { Table table = RESTServlet.getInstance().getTable(tableName); try { Scan scan; @@ -80,7 +73,7 @@ public class ScannerResultGenerator extends ResultGenerator { } if (rowspec.hasColumns()) { byte[][] columns = rowspec.getColumns(); - for (byte[] column: columns) { + for (byte[] column : columns) { byte[][] split = CellUtil.parseColumn(column); if (split.length == 1) { scan.addFamily(split[0]); @@ -91,12 +84,12 @@ public class ScannerResultGenerator extends ResultGenerator { } } } - scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime()); + scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime()); scan.setMaxVersions(rowspec.getMaxVersions()); if (filter != null) { scan.setFilter(filter); } - if (caching > 0 ) { + if (caching > 0) { scan.setCaching(caching); } scan.setCacheBlocks(cacheBlocks); @@ -105,8 +98,8 @@ public class ScannerResultGenerator extends ResultGenerator { } scanner = table.getScanner(scan); cached = null; - id = Long.toString(EnvironmentEdgeManager.currentTime()) + - Integer.toHexString(scanner.hashCode()); + id = Long.toString(EnvironmentEdgeManager.currentTime()) + + Integer.toHexString(scanner.hashCode()); } finally { table.close(); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java index b0fc0276018..ee15cecccd3 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -63,17 +61,14 @@ public class SchemaResource extends ResourceBase { TableResource tableResource; /** - * Constructor - * @param tableResource - * @throws IOException + * Constructor nn */ public SchemaResource(TableResource tableResource) throws IOException { super(); this.tableResource = tableResource; } - private HTableDescriptor getTableSchema() throws IOException, - TableNotFoundException { + private HTableDescriptor getTableSchema() throws IOException, TableNotFoundException { Table table = servlet.getTable(tableResource.getName()); try { return table.getTableDescriptor(); @@ -83,16 +78,15 @@ public class SchemaResource extends ResourceBase { } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); try { - ResponseBuilder response = - Response.ok(new TableSchemaModel(getTableSchema())); + ResponseBuilder response = Response.ok(new TableSchemaModel(getTableSchema())); response.cacheControl(cacheControl); servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); @@ -103,20 +97,19 @@ public class SchemaResource extends ResourceBase { } private Response replace(final TableName name, final TableSchemaModel model, - final UriInfo uriInfo, final Admin admin) { + final UriInfo uriInfo, final Admin admin) { if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } try { HTableDescriptor htd = new HTableDescriptor(name); - for (Map.Entry e: model.getAny().entrySet()) { + for (Map.Entry e : model.getAny().entrySet()) { htd.setValue(e.getKey().getLocalPart(), e.getValue().toString()); } - for (ColumnSchemaModel family: model.getColumns()) { + for (ColumnSchemaModel family : model.getColumns()) { HColumnDescriptor hcd = new HColumnDescriptor(family.getName()); - for (Map.Entry e: family.getAny().entrySet()) { + for (Map.Entry e : family.getAny().entrySet()) { hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString()); } htd.addFamily(hcd); @@ -131,9 +124,8 @@ public class SchemaResource extends ResourceBase { servlet.getMetrics().incrementSucessfulPutRequests(1); } catch (TableExistsException e) { // race, someone else created a table with the same name - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity("Not modified" + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity("Not modified" + CRLF).build(); } return Response.created(uriInfo.getAbsolutePath()).build(); } catch (Exception e) { @@ -143,20 +135,19 @@ public class SchemaResource extends ResourceBase { } } - private Response update(final TableName name, final TableSchemaModel model, - final UriInfo uriInfo, final Admin admin) { + private Response update(final TableName name, final TableSchemaModel model, final UriInfo uriInfo, + final Admin admin) { if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } try { HTableDescriptor htd = admin.getTableDescriptor(name); admin.disableTable(name); try { - for (ColumnSchemaModel family: model.getColumns()) { + for (ColumnSchemaModel family : model.getColumns()) { HColumnDescriptor hcd = new HColumnDescriptor(family.getName()); - for (Map.Entry e: family.getAny().entrySet()) { + for (Map.Entry e : family.getAny().entrySet()) { hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString()); } if (htd.hasFamily(hcd.getName())) { @@ -166,9 +157,8 @@ public class SchemaResource extends ResourceBase { } } } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } finally { admin.enableTable(TableName.valueOf(tableResource.getName())); } @@ -181,7 +171,7 @@ public class SchemaResource extends ResourceBase { } private Response update(final TableSchemaModel model, final boolean replace, - final UriInfo uriInfo) { + final UriInfo uriInfo) { try { TableName name = TableName.valueOf(tableResource.getName()); Admin admin = servlet.getAdmin(); @@ -201,10 +191,8 @@ public class SchemaResource extends ResourceBase { } @PUT - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response put(final TableSchemaModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response put(final TableSchemaModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("PUT " + uriInfo.getAbsolutePath()); } @@ -213,10 +201,8 @@ public class SchemaResource extends ResourceBase { } @POST - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response post(final TableSchemaModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response post(final TableSchemaModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("PUT " + uriInfo.getAbsolutePath()); } @@ -224,8 +210,8 @@ public class SchemaResource extends ResourceBase { return update(model, false, uriInfo); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE", - justification="Expected") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DE_MIGHT_IGNORE", + justification = "Expected") @DELETE public Response delete(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { @@ -234,13 +220,14 @@ public class SchemaResource extends ResourceBase { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) - .entity("Forbidden" + CRLF).build(); + .entity("Forbidden" + CRLF).build(); } try { Admin admin = servlet.getAdmin(); try { admin.disableTable(TableName.valueOf(tableResource.getName())); - } catch (TableNotEnabledException e) { /* this is what we want anyway */ } + } catch (TableNotEnabledException e) { + /* this is what we want anyway */ } admin.deleteTable(TableName.valueOf(tableResource.getName())); servlet.getMetrics().incrementSucessfulDeleteRequests(1); return Response.ok().build(); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java index 0947058aaa0..85b3b3f6556 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -43,8 +41,7 @@ import org.apache.hbase.thirdparty.javax.ws.rs.core.UriInfo; @InterfaceAudience.Private public class StorageClusterStatusResource extends ResourceBase { - private static final Logger LOG = - LoggerFactory.getLogger(StorageClusterStatusResource.class); + private static final Logger LOG = LoggerFactory.getLogger(StorageClusterStatusResource.class); static CacheControl cacheControl; static { @@ -54,54 +51,48 @@ public class StorageClusterStatusResource extends ResourceBase { } /** - * Constructor - * @throws IOException + * Constructor n */ public StorageClusterStatusResource() throws IOException { super(); } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); try { - ClusterMetrics status = servlet.getAdmin().getClusterMetrics( - EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS)); + ClusterMetrics status = + servlet.getAdmin().getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS)); StorageClusterStatusModel model = new StorageClusterStatusModel(); model.setRegions(status.getRegionCount()); model.setRequests(status.getRequestCount()); model.setAverageLoad(status.getAverageLoad()); - for (Map.Entry entry: status.getLiveServerMetrics().entrySet()) { + for (Map.Entry entry : status.getLiveServerMetrics().entrySet()) { ServerName sn = entry.getKey(); ServerMetrics load = entry.getValue(); StorageClusterStatusModel.Node node = - model.addLiveNode( - sn.getHostname() + ":" + - Integer.toString(sn.getPort()), + model.addLiveNode(sn.getHostname() + ":" + Integer.toString(sn.getPort()), sn.getStartcode(), (int) load.getUsedHeapSize().get(Size.Unit.MEGABYTE), (int) load.getMaxHeapSize().get(Size.Unit.MEGABYTE)); node.setRequests(load.getRequestCount()); - for (RegionMetrics region: load.getRegionMetrics().values()) { - node.addRegion(region.getRegionName(), region.getStoreCount(), - region.getStoreFileCount(), + for (RegionMetrics region : load.getRegionMetrics().values()) { + node.addRegion(region.getRegionName(), region.getStoreCount(), region.getStoreFileCount(), (int) region.getStoreFileSize().get(Size.Unit.MEGABYTE), (int) region.getMemStoreSize().get(Size.Unit.MEGABYTE), (long) region.getStoreFileIndexSize().get(Size.Unit.KILOBYTE), - region.getReadRequestCount(), - region.getWriteRequestCount(), + region.getReadRequestCount(), region.getWriteRequestCount(), (int) region.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE), (int) region.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE), (int) region.getBloomFilterSize().get(Size.Unit.KILOBYTE), - region.getCompactingCellCount(), - region.getCompactedCellCount()); + region.getCompactingCellCount(), region.getCompactedCellCount()); } } - for (ServerName name: status.getDeadServerNames()) { + for (ServerName name : status.getDeadServerNames()) { model.addDeadNode(name.toString()); } ResponseBuilder response = Response.ok(model); @@ -110,9 +101,8 @@ public class StorageClusterStatusResource extends ResourceBase { return response.build(); } catch (IOException e) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java index ffa17e44239..ea7641e54cd 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -37,8 +35,7 @@ import org.apache.hbase.thirdparty.javax.ws.rs.core.UriInfo; @InterfaceAudience.Private public class StorageClusterVersionResource extends ResourceBase { - private static final Logger LOG = - LoggerFactory.getLogger(StorageClusterVersionResource.class); + private static final Logger LOG = LoggerFactory.getLogger(StorageClusterVersionResource.class); static CacheControl cacheControl; static { @@ -48,15 +45,14 @@ public class StorageClusterVersionResource extends ResourceBase { } /** - * Constructor - * @throws IOException + * Constructor n */ public StorageClusterVersionResource() throws IOException { super(); } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -65,17 +61,15 @@ public class StorageClusterVersionResource extends ResourceBase { try { StorageClusterVersionModel model = new StorageClusterVersionModel(); model.setVersion( - servlet.getAdmin().getClusterMetrics(EnumSet.of(Option.HBASE_VERSION)) - .getHBaseVersion()); + servlet.getAdmin().getClusterMetrics(EnumSet.of(Option.HBASE_VERSION)).getHBaseVersion()); ResponseBuilder response = Response.ok(model); response.cacheControl(cacheControl); servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); } catch (IOException e) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java index 7f62ef99f1d..c10ad88b4b7 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -48,9 +46,7 @@ public class TableResource extends ResourceBase { private static final Logger LOG = LoggerFactory.getLogger(TableResource.class); /** - * Constructor - * @param table - * @throws IOException + * Constructor nn */ public TableResource(String table) throws IOException { super(); @@ -63,8 +59,7 @@ public class TableResource extends ResourceBase { } /** - * @return true if the table exists - * @throws IOException + * @return true if the table exists n */ boolean exists() throws IOException { return servlet.getAdmin().tableExists(TableName.valueOf(table)); @@ -92,47 +87,43 @@ public class TableResource extends ResourceBase { @Path("{multiget: multiget.*}") public MultiRowResource getMultipleRowResource(final @QueryParam("v") String versions, - @PathParam("multiget") String path) throws IOException { + @PathParam("multiget") String path) throws IOException { return new MultiRowResource(this, versions, path.replace("multiget", "").replace("/", "")); } @Path("{rowspec: [^*]+}") public RowResource getRowResource( - // We need the @Encoded decorator so Jersey won't urldecode before - // the RowSpec constructor has a chance to parse - final @PathParam("rowspec") @Encoded String rowspec, - final @QueryParam("v") String versions, - final @QueryParam("check") String check, - final @QueryParam("rr") String returnResult) throws IOException { + // We need the @Encoded decorator so Jersey won't urldecode before + // the RowSpec constructor has a chance to parse + final @PathParam("rowspec") @Encoded String rowspec, final @QueryParam("v") String versions, + final @QueryParam("check") String check, final @QueryParam("rr") String returnResult) + throws IOException { return new RowResource(this, rowspec, versions, check, returnResult); } @Path("{suffixglobbingspec: .*\\*/.+}") public RowResource getRowResourceWithSuffixGlobbing( - // We need the @Encoded decorator so Jersey won't urldecode before - // the RowSpec constructor has a chance to parse - final @PathParam("suffixglobbingspec") @Encoded String suffixglobbingspec, - final @QueryParam("v") String versions, - final @QueryParam("check") String check, - final @QueryParam("rr") String returnResult) throws IOException { + // We need the @Encoded decorator so Jersey won't urldecode before + // the RowSpec constructor has a chance to parse + final @PathParam("suffixglobbingspec") @Encoded String suffixglobbingspec, + final @QueryParam("v") String versions, final @QueryParam("check") String check, + final @QueryParam("rr") String returnResult) throws IOException { return new RowResource(this, suffixglobbingspec, versions, check, returnResult); } @Path("{scanspec: .*[*]$}") - public TableScanResource getScanResource( - final @PathParam("scanspec") String scanSpec, - @DefaultValue(Integer.MAX_VALUE + "") - @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit, - @DefaultValue("") @QueryParam(Constants.SCAN_START_ROW) String startRow, - @DefaultValue("") @QueryParam(Constants.SCAN_END_ROW) String endRow, - @QueryParam(Constants.SCAN_COLUMN) List column, - @DefaultValue("1") @QueryParam(Constants.SCAN_MAX_VERSIONS) int maxVersions, - @DefaultValue("-1") @QueryParam(Constants.SCAN_BATCH_SIZE) int batchSize, - @DefaultValue("0") @QueryParam(Constants.SCAN_START_TIME) long startTime, - @DefaultValue(Long.MAX_VALUE + "") @QueryParam(Constants.SCAN_END_TIME) long endTime, - @DefaultValue("true") @QueryParam(Constants.SCAN_CACHE_BLOCKS) boolean cacheBlocks, - @DefaultValue("false") @QueryParam(Constants.SCAN_REVERSED) boolean reversed, - @DefaultValue("") @QueryParam(Constants.SCAN_FILTER) String paramFilter) { + public TableScanResource getScanResource(final @PathParam("scanspec") String scanSpec, + @DefaultValue(Integer.MAX_VALUE + "") @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit, + @DefaultValue("") @QueryParam(Constants.SCAN_START_ROW) String startRow, + @DefaultValue("") @QueryParam(Constants.SCAN_END_ROW) String endRow, + @QueryParam(Constants.SCAN_COLUMN) List column, + @DefaultValue("1") @QueryParam(Constants.SCAN_MAX_VERSIONS) int maxVersions, + @DefaultValue("-1") @QueryParam(Constants.SCAN_BATCH_SIZE) int batchSize, + @DefaultValue("0") @QueryParam(Constants.SCAN_START_TIME) long startTime, + @DefaultValue(Long.MAX_VALUE + "") @QueryParam(Constants.SCAN_END_TIME) long endTime, + @DefaultValue("true") @QueryParam(Constants.SCAN_CACHE_BLOCKS) boolean cacheBlocks, + @DefaultValue("false") @QueryParam(Constants.SCAN_REVERSED) boolean reversed, + @DefaultValue("") @QueryParam(Constants.SCAN_FILTER) String paramFilter) { try { Filter prefixFilter = null; Scan tableScan = new Scan(); @@ -146,9 +137,9 @@ public class TableResource extends ResourceBase { } if (LOG.isTraceEnabled()) { LOG.trace("Query parameters : Table Name = > " + this.table + " Start Row => " + startRow - + " End Row => " + endRow + " Columns => " + column + " Start Time => " + startTime - + " End Time => " + endTime + " Cache Blocks => " + cacheBlocks + " Max Versions => " - + maxVersions + " Batch Size => " + batchSize); + + " End Row => " + endRow + " Columns => " + column + " Start Time => " + startTime + + " End Time => " + endTime + " Cache Blocks => " + cacheBlocks + " Max Versions => " + + maxVersions + " Batch Size => " + batchSize); } Table hTable = RESTServlet.getInstance().getTable(this.table); tableScan.setBatch(batchSize); @@ -159,7 +150,7 @@ public class TableResource extends ResourceBase { } tableScan.setStopRow(Bytes.toBytes(endRow)); for (String col : column) { - byte [][] parts = CellUtil.parseColumn(Bytes.toBytes(col.trim())); + byte[][] parts = CellUtil.parseColumn(Bytes.toBytes(col.trim())); if (parts.length == 1) { if (LOG.isTraceEnabled()) { LOG.trace("Scan family : " + Bytes.toStringBinary(parts[0])); @@ -167,8 +158,8 @@ public class TableResource extends ResourceBase { tableScan.addFamily(parts[0]); } else if (parts.length == 2) { if (LOG.isTraceEnabled()) { - LOG.trace("Scan family and column : " + Bytes.toStringBinary(parts[0]) - + " " + Bytes.toStringBinary(parts[1])); + LOG.trace("Scan family and column : " + Bytes.toStringBinary(parts[0]) + " " + + Bytes.toStringBinary(parts[1])); } tableScan.addColumn(parts[0], parts[1]); } else { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java index d31a346757b..e30beaa37df 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +47,7 @@ import org.apache.hbase.thirdparty.javax.ws.rs.core.StreamingOutput; import org.apache.hbase.thirdparty.javax.ws.rs.core.UriInfo; @InterfaceAudience.Private -public class TableScanResource extends ResourceBase { +public class TableScanResource extends ResourceBase { private static final Logger LOG = LoggerFactory.getLogger(TableScanResource.class); TableResource tableResource; @@ -93,7 +92,7 @@ public class TableScanResource extends ResourceBase { List kvs = rs.listCells(); for (Cell kv : kvs) { rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), - kv.getTimestamp(), CellUtil.cloneValue(kv))); + kv.getTimestamp(), CellUtil.cloneValue(kv))); } count--; if (count == 0) { @@ -108,18 +107,16 @@ public class TableScanResource extends ResourceBase { @GET @Produces({ Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF }) - public Response getProtobuf( - final @Context UriInfo uriInfo, - final @HeaderParam("Accept") String contentType) { + public Response getProtobuf(final @Context UriInfo uriInfo, + final @HeaderParam("Accept") String contentType) { if (LOG.isTraceEnabled()) { - LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + - MIMETYPE_BINARY); + LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } servlet.getMetrics().incrementRequests(1); try { int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10); - StreamingOutput stream = new ProtobufStreamingOutput(this.results, contentType, - userRequestedLimit, fetchSize); + StreamingOutput stream = + new ProtobufStreamingOutput(this.results, contentType, userRequestedLimit, fetchSize); servlet.getMetrics().incrementSucessfulScanRequests(1); ResponseBuilder response = Response.ok(stream); response.header("content-type", contentType); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java index e12ff9907b8..8b71f708645 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -55,8 +53,7 @@ public class VersionResource extends ResourceBase { } /** - * Constructor - * @throws IOException + * Constructor n */ public VersionResource() throws IOException { super(); @@ -69,10 +66,9 @@ public class VersionResource extends ResourceBase { * @return a response for a version request */ @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response get(final @Context ServletContext context, - final @Context UriInfo uriInfo) { + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) + public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } @@ -87,8 +83,7 @@ public class VersionResource extends ResourceBase { * Dispatch to StorageClusterVersionResource */ @Path("cluster") - public StorageClusterVersionResource getClusterVersionResource() - throws IOException { + public StorageClusterVersionResource getClusterVersionResource() throws IOException { return new StorageClusterVersionResource(); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java index 7459f8af0ad..ff1fe141bb8 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.client; import java.io.BufferedInputStream; @@ -68,8 +66,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * A wrapper around HttpClient which provides some useful function and - * semantics for interacting with the REST gateway. + * A wrapper around HttpClient which provides some useful function and semantics for interacting + * with the REST gateway. */ @InterfaceAudience.Public public class Client { @@ -98,7 +96,7 @@ public class Client { } private void initialize(Cluster cluster, Configuration conf, boolean sslEnabled, - Optional trustStore) { + Optional trustStore) { this.cluster = cluster; this.conf = conf; this.sslEnabled = sslEnabled; @@ -111,10 +109,9 @@ public class Client { Constants.DEFAULT_REST_CLIENT_CONN_TIMEOUT); int socketTimeout = this.conf.getInt(Constants.REST_CLIENT_SOCKET_TIMEOUT, Constants.DEFAULT_REST_CLIENT_SOCKET_TIMEOUT); - RequestConfig requestConfig = RequestConfig.custom() - .setConnectTimeout(connTimeout) - .setSocketTimeout(socketTimeout) - .setNormalizeUri(false) // URIs should not be normalized, see HBASE-26903 + RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(connTimeout) + .setSocketTimeout(socketTimeout).setNormalizeUri(false) // URIs should not be normalized, see + // HBASE-26903 .build(); httpClientBuilder.setDefaultRequestConfig(requestConfig); @@ -124,7 +121,7 @@ public class Client { // automatic content compression. httpClientBuilder.disableContentCompression(); - if(sslEnabled && trustStore.isPresent()) { + if (sslEnabled && trustStore.isPresent()) { try { SSLContext sslcontext = SSLContexts.custom().loadTrustMaterial(trustStore.get(), null).build(); @@ -147,7 +144,7 @@ public class Client { /** * Constructor - * @param cluster the cluster definition + * @param cluster the cluster definition * @param sslEnabled enable SSL or not */ public Client(Cluster cluster, boolean sslEnabled) { @@ -156,8 +153,8 @@ public class Client { /** * Constructor - * @param cluster the cluster definition - * @param conf Configuration + * @param cluster the cluster definition + * @param conf Configuration * @param sslEnabled enable SSL or not */ public Client(Cluster cluster, Configuration conf, boolean sslEnabled) { @@ -166,31 +163,28 @@ public class Client { /** * Constructor, allowing to define custom trust store (only for SSL connections) - * - * @param cluster the cluster definition - * @param trustStorePath custom trust store to use for SSL connections + * @param cluster the cluster definition + * @param trustStorePath custom trust store to use for SSL connections * @param trustStorePassword password to use for custom trust store - * @param trustStoreType type of custom trust store - * + * @param trustStoreType type of custom trust store * @throws ClientTrustStoreInitializationException if the trust store file can not be loaded */ public Client(Cluster cluster, String trustStorePath, Optional trustStorePassword, - Optional trustStoreType) { + Optional trustStoreType) { this(cluster, HBaseConfiguration.create(), trustStorePath, trustStorePassword, trustStoreType); } /** * Constructor, allowing to define custom trust store (only for SSL connections) - * - * @param cluster the cluster definition - * @param conf Configuration - * @param trustStorePath custom trust store to use for SSL connections + * @param cluster the cluster definition + * @param conf Configuration + * @param trustStorePath custom trust store to use for SSL connections * @param trustStorePassword password to use for custom trust store - * @param trustStoreType type of custom trust store + * @param trustStoreType type of custom trust store * @throws ClientTrustStoreInitializationException if the trust store file can not be loaded */ public Client(Cluster cluster, Configuration conf, String trustStorePath, - Optional trustStorePassword, Optional trustStoreType) { + Optional trustStorePassword, Optional trustStoreType) { char[] password = trustStorePassword.map(String::toCharArray).orElse(null); String type = trustStoreType.orElse(KeyStore.getDefaultType()); @@ -201,8 +195,8 @@ public class Client { } catch (KeyStoreException e) { throw new ClientTrustStoreInitializationException("Invalid trust store type: " + type, e); } - try (InputStream inputStream = new BufferedInputStream( - Files.newInputStream(new File(trustStorePath).toPath()))) { + try (InputStream inputStream = + new BufferedInputStream(Files.newInputStream(new File(trustStorePath).toPath()))) { trustStore.load(inputStream, password); } catch (CertificateException | NoSuchAlgorithmException | IOException e) { throw new ClientTrustStoreInitializationException("Trust store load error: " + trustStorePath, @@ -226,9 +220,8 @@ public class Client { } /** - * Add extra headers. These extra headers will be applied to all http - * methods before they are removed. If any header is not used any more, - * client needs to remove it explicitly. + * Add extra headers. These extra headers will be applied to all http methods before they are + * removed. If any header is not used any more, client needs to remove it explicitly. */ public void addExtraHeader(final String name, final String value) { extraHeaders.put(name, value); @@ -256,25 +249,23 @@ public class Client { } /** - * Execute a transaction method given only the path. Will select at random - * one of the members of the supplied cluster definition and iterate through - * the list until a transaction can be successfully completed. The - * definition of success here is a complete HTTP transaction, irrespective - * of result code. + * Execute a transaction method given only the path. Will select at random one of the members of + * the supplied cluster definition and iterate through the list until a transaction can be + * successfully completed. The definition of success here is a complete HTTP transaction, + * irrespective of result code. * @param cluster the cluster definition - * @param method the transaction method + * @param method the transaction method * @param headers HTTP header values to send - * @param path the properly urlencoded path - * @return the HTTP response code - * @throws IOException + * @param path the properly urlencoded path + * @return the HTTP response code n */ - public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, - Header[] headers, String path) throws IOException { + public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, Header[] headers, + String path) throws IOException { IOException lastException; if (cluster.nodes.size() < 1) { throw new IOException("Cluster is empty"); } - int start = (int)Math.round((cluster.nodes.size() - 1) * Math.random()); + int start = (int) Math.round((cluster.nodes.size() - 1) * Math.random()); int i = start; do { cluster.lastHost = cluster.nodes.get(i); @@ -317,20 +308,19 @@ public class Client { /** * Execute a transaction method given a complete URI. - * @param method the transaction method + * @param method the transaction method * @param headers HTTP header values to send - * @param uri a properly urlencoded URI - * @return the HTTP response code - * @throws IOException + * @param uri a properly urlencoded URI + * @return the HTTP response code n */ public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String uri) - throws IOException { + throws IOException { // method.setURI(new URI(uri, true)); - for (Map.Entry e: extraHeaders.entrySet()) { + for (Map.Entry e : extraHeaders.entrySet()) { method.addHeader(e.getKey(), e.getValue()); } if (headers != null) { - for (Header header: headers) { + for (Header header : headers) { method.addHeader(header); } } @@ -346,25 +336,24 @@ public class Client { long endTime = EnvironmentEdgeManager.currentTime(); if (LOG.isTraceEnabled()) { - LOG.trace(method.getMethod() + " " + uri + " " + resp.getStatusLine().getStatusCode() + " " + - resp.getStatusLine().getReasonPhrase() + " in " + (endTime - startTime) + " ms"); + LOG.trace(method.getMethod() + " " + uri + " " + resp.getStatusLine().getStatusCode() + " " + + resp.getStatusLine().getReasonPhrase() + " in " + (endTime - startTime) + " ms"); } return resp; } /** - * Execute a transaction method. Will call either executePathOnly - * or executeURI depending on whether a path only is supplied in - * 'path', or if a complete URI is passed instead, respectively. + * Execute a transaction method. Will call either executePathOnly or executeURI + * depending on whether a path only is supplied in 'path', or if a complete URI is passed instead, + * respectively. * @param cluster the cluster definition - * @param method the HTTP method + * @param method the HTTP method * @param headers HTTP header values to send - * @param path the properly urlencoded path or URI - * @return the HTTP response code - * @throws IOException + * @param path the properly urlencoded path or URI + * @return the HTTP response code n */ - public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] headers, - String path) throws IOException { + public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] headers, String path) + throws IOException { if (path.startsWith("/")) { return executePathOnly(cluster, method, headers, path); } @@ -374,7 +363,7 @@ public class Client { /** * Initiate client side Kerberos negotiation with the server. * @param method method to inject the authentication token into. - * @param uri the String to parse as a URL. + * @param uri the String to parse as a URL. * @throws IOException if unknown protocol is found. */ private void negotiate(HttpUriRequest method, String uri) throws IOException { @@ -393,7 +382,7 @@ public class Client { /** * Helper method that injects an authentication token to send with the method. * @param method method to inject the authentication token into. - * @param token authentication token to inject. + * @param token authentication token to inject. */ private void injectToken(HttpUriRequest method, AuthenticatedURL.Token token) { String t = token.toString(); @@ -422,8 +411,7 @@ public class Client { /** * Send a HEAD request * @param path the path or URI - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ public Response head(String path) throws IOException { return head(cluster, path, null); @@ -432,13 +420,11 @@ public class Client { /** * Send a HEAD request * @param cluster the cluster definition - * @param path the path or URI + * @param path the path or URI * @param headers the HTTP headers to include in the request - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ - public Response head(Cluster cluster, String path, Header[] headers) - throws IOException { + public Response head(Cluster cluster, String path, Header[] headers) throws IOException { HttpHead method = new HttpHead(path); try { HttpResponse resp = execute(cluster, method, null, path); @@ -451,8 +437,7 @@ public class Client { /** * Send a GET request * @param path the path or URI - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ public Response get(String path) throws IOException { return get(cluster, path); @@ -461,9 +446,8 @@ public class Client { /** * Send a GET request * @param cluster the cluster definition - * @param path the path or URI - * @return a Response object with response detail - * @throws IOException + * @param path the path or URI + * @return a Response object with response detail n */ public Response get(Cluster cluster, String path) throws IOException { return get(cluster, path, EMPTY_HEADER_ARRAY); @@ -471,10 +455,9 @@ public class Client { /** * Send a GET request - * @param path the path or URI + * @param path the path or URI * @param accept Accept header value - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ public Response get(String path, String accept) throws IOException { return get(cluster, path, accept); @@ -483,13 +466,11 @@ public class Client { /** * Send a GET request * @param cluster the cluster definition - * @param path the path or URI - * @param accept Accept header value - * @return a Response object with response detail - * @throws IOException + * @param path the path or URI + * @param accept Accept header value + * @return a Response object with response detail n */ - public Response get(Cluster cluster, String path, String accept) - throws IOException { + public Response get(Cluster cluster, String path, String accept) throws IOException { Header[] headers = new Header[1]; headers[0] = new BasicHeader("Accept", accept); return get(cluster, path, headers); @@ -497,43 +478,37 @@ public class Client { /** * Send a GET request - * @param path the path or URI - * @param headers the HTTP headers to include in the request, - * Accept must be supplied - * @return a Response object with response detail - * @throws IOException + * @param path the path or URI + * @param headers the HTTP headers to include in the request, Accept must be supplied + * @return a Response object with response detail n */ public Response get(String path, Header[] headers) throws IOException { return get(cluster, path, headers); } /** - * Returns the response body of the HTTPResponse, if any, as an array of bytes. - * If response body is not available or cannot be read, returns null - * - * Note: This will cause the entire response body to be buffered in memory. A - * malicious server may easily exhaust all the VM memory. It is strongly - * recommended, to use getResponseAsStream if the content length of the response - * is unknown or reasonably large. - * + * Returns the response body of the HTTPResponse, if any, as an array of bytes. If response body + * is not available or cannot be read, returns null Note: This will cause the entire + * response body to be buffered in memory. A malicious server may easily exhaust all the VM + * memory. It is strongly recommended, to use getResponseAsStream if the content length of the + * response is unknown or reasonably large. * @param resp HttpResponse * @return The response body, null if body is empty - * @throws IOException If an I/O (transport) problem occurs while obtaining the - * response body. + * @throws IOException If an I/O (transport) problem occurs while obtaining the response body. */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = - "NP_LOAD_OF_KNOWN_NULL_VALUE", justification = "null is possible return value") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_LOAD_OF_KNOWN_NULL_VALUE", + justification = "null is possible return value") public static byte[] getResponseBody(HttpResponse resp) throws IOException { if (resp.getEntity() == null) return null; try (InputStream instream = resp.getEntity().getContent()) { if (instream != null) { long contentLength = resp.getEntity().getContentLength(); if (contentLength > Integer.MAX_VALUE) { - //guard integer cast from overflow - throw new IOException("Content too large to be buffered: " + contentLength +" bytes"); + // guard integer cast from overflow + throw new IOException("Content too large to be buffered: " + contentLength + " bytes"); } - ByteArrayOutputStream outstream = new ByteArrayOutputStream( - contentLength > 0 ? (int) contentLength : 4*1024); + ByteArrayOutputStream outstream = + new ByteArrayOutputStream(contentLength > 0 ? (int) contentLength : 4 * 1024); byte[] buffer = new byte[4096]; int len; while ((len = instream.read(buffer)) > 0) { @@ -548,61 +523,56 @@ public class Client { /** * Send a GET request - * @param c the cluster definition - * @param path the path or URI + * @param c the cluster definition + * @param path the path or URI * @param headers the HTTP headers to include in the request - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ - public Response get(Cluster c, String path, Header[] headers) - throws IOException { + public Response get(Cluster c, String path, Header[] headers) throws IOException { if (httpGet != null) { httpGet.releaseConnection(); } httpGet = new HttpGet(path); HttpResponse resp = execute(c, httpGet, headers, path); - return new Response(resp.getStatusLine().getStatusCode(), resp.getAllHeaders(), - resp, resp.getEntity() == null ? null : resp.getEntity().getContent()); + return new Response(resp.getStatusLine().getStatusCode(), resp.getAllHeaders(), resp, + resp.getEntity() == null ? null : resp.getEntity().getContent()); } /** * Send a PUT request - * @param path the path or URI + * @param path the path or URI * @param contentType the content MIME type - * @param content the content bytes - * @return a Response object with response detail - * @throws IOException + * @param content the content bytes + * @return a Response object with response detail n */ - public Response put(String path, String contentType, byte[] content) - throws IOException { + public Response put(String path, String contentType, byte[] content) throws IOException { return put(cluster, path, contentType, content); } /** * Send a PUT request - * @param path the path or URI + * @param path the path or URI * @param contentType the content MIME type - * @param content the content bytes - * @param extraHdr extra Header to send - * @return a Response object with response detail - * @throws IOException + * @param content the content bytes + * @param extraHdr extra Header to send + * @return a Response object with response detail n */ public Response put(String path, String contentType, byte[] content, Header extraHdr) - throws IOException { + throws IOException { return put(cluster, path, contentType, content, extraHdr); } /** * Send a PUT request - * @param cluster the cluster definition - * @param path the path or URI + * @param cluster the cluster definition + * @param path the path or URI * @param contentType the content MIME type - * @param content the content bytes + * @param content the content bytes * @return a Response object with response detail * @throws IOException for error */ - public Response put(Cluster cluster, String path, String contentType, - byte[] content) throws IOException { + public Response put(Cluster cluster, String path, String contentType, byte[] content) + throws IOException { Header[] headers = new Header[1]; headers[0] = new BasicHeader("Content-Type", contentType); return put(cluster, path, headers, content); @@ -610,16 +580,16 @@ public class Client { /** * Send a PUT request - * @param cluster the cluster definition - * @param path the path or URI + * @param cluster the cluster definition + * @param path the path or URI * @param contentType the content MIME type - * @param content the content bytes - * @param extraHdr additional Header to send + * @param content the content bytes + * @param extraHdr additional Header to send * @return a Response object with response detail * @throws IOException for error */ - public Response put(Cluster cluster, String path, String contentType, - byte[] content, Header extraHdr) throws IOException { + public Response put(Cluster cluster, String path, String contentType, byte[] content, + Header extraHdr) throws IOException { int cnt = extraHdr == null ? 1 : 2; Header[] headers = new Header[cnt]; headers[0] = new BasicHeader("Content-Type", contentType); @@ -631,30 +601,25 @@ public class Client { /** * Send a PUT request - * @param path the path or URI - * @param headers the HTTP headers to include, Content-Type must be - * supplied + * @param path the path or URI + * @param headers the HTTP headers to include, Content-Type must be supplied * @param content the content bytes - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ - public Response put(String path, Header[] headers, byte[] content) - throws IOException { + public Response put(String path, Header[] headers, byte[] content) throws IOException { return put(cluster, path, headers, content); } /** * Send a PUT request * @param cluster the cluster definition - * @param path the path or URI - * @param headers the HTTP headers to include, Content-Type must be - * supplied + * @param path the path or URI + * @param headers the HTTP headers to include, Content-Type must be supplied * @param content the content bytes - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ - public Response put(Cluster cluster, String path, Header[] headers, - byte[] content) throws IOException { + public Response put(Cluster cluster, String path, Header[] headers, byte[] content) + throws IOException { HttpPut method = new HttpPut(path); try { method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length)); @@ -669,42 +634,39 @@ public class Client { /** * Send a POST request - * @param path the path or URI + * @param path the path or URI * @param contentType the content MIME type - * @param content the content bytes - * @return a Response object with response detail - * @throws IOException + * @param content the content bytes + * @return a Response object with response detail n */ - public Response post(String path, String contentType, byte[] content) - throws IOException { + public Response post(String path, String contentType, byte[] content) throws IOException { return post(cluster, path, contentType, content); } /** * Send a POST request - * @param path the path or URI + * @param path the path or URI * @param contentType the content MIME type - * @param content the content bytes - * @param extraHdr additional Header to send - * @return a Response object with response detail - * @throws IOException + * @param content the content bytes + * @param extraHdr additional Header to send + * @return a Response object with response detail n */ public Response post(String path, String contentType, byte[] content, Header extraHdr) - throws IOException { + throws IOException { return post(cluster, path, contentType, content, extraHdr); } /** * Send a POST request - * @param cluster the cluster definition - * @param path the path or URI + * @param cluster the cluster definition + * @param path the path or URI * @param contentType the content MIME type - * @param content the content bytes + * @param content the content bytes * @return a Response object with response detail * @throws IOException for error */ - public Response post(Cluster cluster, String path, String contentType, - byte[] content) throws IOException { + public Response post(Cluster cluster, String path, String contentType, byte[] content) + throws IOException { Header[] headers = new Header[1]; headers[0] = new BasicHeader("Content-Type", contentType); return post(cluster, path, headers, content); @@ -712,16 +674,16 @@ public class Client { /** * Send a POST request - * @param cluster the cluster definition - * @param path the path or URI + * @param cluster the cluster definition + * @param path the path or URI * @param contentType the content MIME type - * @param content the content bytes - * @param extraHdr additional Header to send + * @param content the content bytes + * @param extraHdr additional Header to send * @return a Response object with response detail * @throws IOException for error */ - public Response post(Cluster cluster, String path, String contentType, - byte[] content, Header extraHdr) throws IOException { + public Response post(Cluster cluster, String path, String contentType, byte[] content, + Header extraHdr) throws IOException { int cnt = extraHdr == null ? 1 : 2; Header[] headers = new Header[cnt]; headers[0] = new BasicHeader("Content-Type", contentType); @@ -733,30 +695,25 @@ public class Client { /** * Send a POST request - * @param path the path or URI - * @param headers the HTTP headers to include, Content-Type must be - * supplied + * @param path the path or URI + * @param headers the HTTP headers to include, Content-Type must be supplied * @param content the content bytes - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ - public Response post(String path, Header[] headers, byte[] content) - throws IOException { + public Response post(String path, Header[] headers, byte[] content) throws IOException { return post(cluster, path, headers, content); } /** * Send a POST request * @param cluster the cluster definition - * @param path the path or URI - * @param headers the HTTP headers to include, Content-Type must be - * supplied + * @param path the path or URI + * @param headers the HTTP headers to include, Content-Type must be supplied * @param content the content bytes - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ - public Response post(Cluster cluster, String path, Header[] headers, - byte[] content) throws IOException { + public Response post(Cluster cluster, String path, Header[] headers, byte[] content) + throws IOException { HttpPost method = new HttpPost(path); try { method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length)); @@ -772,8 +729,7 @@ public class Client { /** * Send a DELETE request * @param path the path or URI - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ public Response delete(String path) throws IOException { return delete(cluster, path); @@ -781,10 +737,9 @@ public class Client { /** * Send a DELETE request - * @param path the path or URI + * @param path the path or URI * @param extraHdr additional Header to send - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ public Response delete(String path, Header extraHdr) throws IOException { return delete(cluster, path, extraHdr); @@ -793,7 +748,7 @@ public class Client { /** * Send a DELETE request * @param cluster the cluster definition - * @param path the path or URI + * @param path the path or URI * @return a Response object with response detail * @throws IOException for error */ @@ -812,7 +767,7 @@ public class Client { /** * Send a DELETE request * @param cluster the cluster definition - * @param path the path or URI + * @param path the path or URI * @return a Response object with response detail * @throws IOException for error */ @@ -829,7 +784,6 @@ public class Client { } } - public static class ClientTrustStoreInitializationException extends RuntimeException { public ClientTrustStoreInitializationException(String message, Throwable cause) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java index 008470826de..dbb30adbc74 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,29 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.client; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; /** - * A list of 'host:port' addresses of HTTP servers operating as a single - * entity, for example multiple redundant web service gateways. + * A list of 'host:port' addresses of HTTP servers operating as a single entity, for example + * multiple redundant web service gateways. */ @InterfaceAudience.Public public class Cluster { - protected List nodes = - Collections.synchronizedList(new ArrayList()); + protected List nodes = Collections.synchronizedList(new ArrayList()); protected String lastHost; /** * Constructor */ - public Cluster() {} + public Cluster() { + } /** * Constructor @@ -99,10 +96,8 @@ public class Cluster { return remove(sb.toString()); } - @Override public String toString() { - return "Cluster{" + - "nodes=" + nodes + - ", lastHost='" + lastHost + '\'' + - '}'; + @Override + public String toString() { + return "Cluster{" + "nodes=" + nodes + ", lastHost='" + lastHost + '\'' + '}'; } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java index 0e91005ab2b..cd0ac33dd79 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,12 +19,9 @@ package org.apache.hadoop.hbase.rest.client; import java.io.IOException; import java.io.InputStream; - import org.apache.http.Header; import org.apache.http.HttpResponse; - import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -52,7 +48,7 @@ public class Response { /** * Constructor - * @param code the HTTP response code + * @param code the HTTP response code * @param headers the HTTP response headers */ public Response(int code, Header[] headers) { @@ -61,23 +57,22 @@ public class Response { /** * Constructor - * @param code the HTTP response code + * @param code the HTTP response code * @param headers the HTTP response headers - * @param body the response body, can be null + * @param body the response body, can be null */ public Response(int code, Header[] headers, byte[] body) { this.code = code; this.headers = headers; this.body = body; } - + /** * Constructor. Note: this is not thread-safe - * - * @param code the HTTP response code + * @param code the HTTP response code * @param headers headers the HTTP response headers - * @param resp the response - * @param in Inputstream if the response had one. + * @param resp the response + * @param in Inputstream if the response had one. */ public Response(int code, Header[] headers, HttpResponse resp, InputStream in) { this.code = code; @@ -93,13 +88,12 @@ public class Response { public int getCode() { return code; } - + /** * Gets the input stream instance. - * * @return an instance of InputStream class. */ - public InputStream getStream(){ + public InputStream getStream() { return this.stream; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java index b9b8a006437..c996e75f937 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -44,18 +44,16 @@ public class AuthFilter extends AuthenticationFilter { private static final int REST_PREFIX_LEN = REST_PREFIX.length(); /** - * Returns the configuration to be used by the authentication filter - * to initialize the authentication handler. - * - * This filter retrieves all HBase configurations and passes those started - * with REST_PREFIX to the authentication handler. It is useful to support - * plugging different authentication handlers. - */ + * Returns the configuration to be used by the authentication filter to initialize the + * authentication handler. This filter retrieves all HBase configurations and passes those started + * with REST_PREFIX to the authentication handler. It is useful to support plugging different + * authentication handlers. + */ @Override - protected Properties getConfiguration( - String configPrefix, FilterConfig filterConfig) throws ServletException { + protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) + throws ServletException { Properties props = super.getConfiguration(configPrefix, filterConfig); - //setting the cookie path to root '/' so it is used for all resources. + // setting the cookie path to root '/' so it is used for all resources. props.setProperty(AuthenticationFilter.COOKIE_PATH, "/"); Configuration conf = null; @@ -70,11 +68,10 @@ public class AuthFilter extends AuthenticationFilter { String name = entry.getKey(); if (name.startsWith(REST_PREFIX)) { String value = entry.getValue(); - if(name.equals(REST_AUTHENTICATION_PRINCIPAL)) { + if (name.equals(REST_AUTHENTICATION_PRINCIPAL)) { try { - String machineName = Strings.domainNamePointerToHostName( - DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"), - conf.get(REST_DNS_NAMESERVER, "default"))); + String machineName = Strings.domainNamePointerToHostName(DNS.getDefaultHost( + conf.get(REST_DNS_INTERFACE, "default"), conf.get(REST_DNS_NAMESERVER, "default"))); value = SecurityUtil.getServerPrincipal(value, machineName); } catch (IOException ie) { throw new ServletException("Failed to retrieve server principal", ie); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java index f74e10cae74..efb7e2a227a 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +19,9 @@ package org.apache.hadoop.hbase.rest.filter; import java.io.IOException; import java.util.zip.GZIPInputStream; - import javax.servlet.ReadListener; import javax.servlet.ServletInputStream; import javax.servlet.http.HttpServletRequest; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java index 51eba665f3f..db41fbb5b84 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,17 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.filter; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; - import javax.servlet.ServletInputStream; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequestWrapper; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java index 3fa1ad6f857..7c1a4f99547 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +19,9 @@ package org.apache.hadoop.hbase.rest.filter; import java.io.IOException; import java.util.zip.GZIPOutputStream; - import javax.servlet.ServletOutputStream; import javax.servlet.WriteListener; import javax.servlet.http.HttpServletResponse; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java index 53a26ea1ac8..41342214100 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,16 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.filter; import java.io.IOException; import java.io.PrintWriter; - import javax.servlet.ServletOutputStream; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpServletResponseWrapper; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -80,7 +76,7 @@ public class GZIPResponseWrapper extends HttpServletResponseWrapper { writer.flush(); } if (os != null && (os instanceof GZIPResponseStream)) { - ((GZIPResponseStream)os).finish(); + ((GZIPResponseStream) os).finish(); } else { getResponse().flushBuffer(); } @@ -90,7 +86,7 @@ public class GZIPResponseWrapper extends HttpServletResponseWrapper { public void reset() { super.reset(); if (os != null && (os instanceof GZIPResponseStream)) { - ((GZIPResponseStream)os).resetBuffer(); + ((GZIPResponseStream) os).resetBuffer(); } writer = null; os = null; @@ -101,7 +97,7 @@ public class GZIPResponseWrapper extends HttpServletResponseWrapper { public void resetBuffer() { super.resetBuffer(); if (os != null && (os instanceof GZIPResponseStream)) { - ((GZIPResponseStream)os).resetBuffer(); + ((GZIPResponseStream) os).resetBuffer(); } writer = null; os = null; @@ -129,7 +125,7 @@ public class GZIPResponseWrapper extends HttpServletResponseWrapper { public ServletOutputStream getOutputStream() throws IOException { if (os == null) { if (!response.isCommitted() && compress) { - os = (ServletOutputStream)new GZIPResponseStream(response); + os = (ServletOutputStream) new GZIPResponseStream(response); } else { os = response.getOutputStream(); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java index 4ba9eca302d..282e8b100c4 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +23,6 @@ import java.util.HashSet; import java.util.Locale; import java.util.Set; import java.util.StringTokenizer; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -33,9 +31,7 @@ import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @@ -58,27 +54,27 @@ public class GzipFilter implements Filter { } @Override - public void doFilter(ServletRequest req, ServletResponse rsp, - FilterChain chain) throws IOException, ServletException { - HttpServletRequest request = (HttpServletRequest)req; - HttpServletResponse response = (HttpServletResponse)rsp; + public void doFilter(ServletRequest req, ServletResponse rsp, FilterChain chain) + throws IOException, ServletException { + HttpServletRequest request = (HttpServletRequest) req; + HttpServletResponse response = (HttpServletResponse) rsp; String contentEncoding = request.getHeader("content-encoding"); String acceptEncoding = request.getHeader("accept-encoding"); String contentType = request.getHeader("content-type"); - if ((contentEncoding != null) && - (contentEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) { + if ((contentEncoding != null) && (contentEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) { request = new GZIPRequestWrapper(request); } - if (((acceptEncoding != null) && - (acceptEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) || - ((contentType != null) && mimeTypes.contains(contentType))) { + if ( + ((acceptEncoding != null) && (acceptEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) + || ((contentType != null) && mimeTypes.contains(contentType)) + ) { response = new GZIPResponseWrapper(response); } chain.doFilter(request, response); if (response instanceof GZIPResponseWrapper) { OutputStream os = response.getOutputStream(); if (os instanceof GZIPResponseStream) { - ((GZIPResponseStream)os).finish(); + ((GZIPResponseStream) os).finish(); } } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java index 94eb314e01a..47e67dbea5a 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.Map; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -34,36 +33,29 @@ import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; - import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This filter provides protection against cross site request forgery (CSRF) - * attacks for REST APIs. Enabling this filter on an endpoint results in the - * requirement of all client to send a particular (configurable) HTTP header - * with every request. In the absense of this header the filter will reject the - * attempt as a bad request. + * This filter provides protection against cross site request forgery (CSRF) attacks for REST APIs. + * Enabling this filter on an endpoint results in the requirement of all client to send a particular + * (configurable) HTTP header with every request. In the absense of this header the filter will + * reject the attempt as a bad request. */ @InterfaceAudience.Public public class RestCsrfPreventionFilter implements Filter { - private static final Logger LOG = - LoggerFactory.getLogger(RestCsrfPreventionFilter.class); + private static final Logger LOG = LoggerFactory.getLogger(RestCsrfPreventionFilter.class); public static final String HEADER_USER_AGENT = "User-Agent"; - public static final String BROWSER_USER_AGENT_PARAM = - "browser-useragents-regex"; + public static final String BROWSER_USER_AGENT_PARAM = "browser-useragents-regex"; public static final String CUSTOM_HEADER_PARAM = "custom-header"; - public static final String CUSTOM_METHODS_TO_IGNORE_PARAM = - "methods-to-ignore"; - static final String BROWSER_USER_AGENTS_DEFAULT = "^Mozilla.*,^Opera.*"; + public static final String CUSTOM_METHODS_TO_IGNORE_PARAM = "methods-to-ignore"; + static final String BROWSER_USER_AGENTS_DEFAULT = "^Mozilla.*,^Opera.*"; public static final String HEADER_DEFAULT = "X-XSRF-HEADER"; - static final String METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE"; - private String headerName = HEADER_DEFAULT; + static final String METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE"; + private String headerName = HEADER_DEFAULT; private Set methodsToIgnore = null; private Set browserUserAgents; @@ -73,8 +65,7 @@ public class RestCsrfPreventionFilter implements Filter { if (customHeader != null) { headerName = customHeader; } - String customMethodsToIgnore = - filterConfig.getInitParameter(CUSTOM_METHODS_TO_IGNORE_PARAM); + String customMethodsToIgnore = filterConfig.getInitParameter(CUSTOM_METHODS_TO_IGNORE_PARAM); if (customMethodsToIgnore != null) { parseMethodsToIgnore(customMethodsToIgnore); } else { @@ -86,13 +77,14 @@ public class RestCsrfPreventionFilter implements Filter { agents = BROWSER_USER_AGENTS_DEFAULT; } parseBrowserUserAgents(agents); - LOG.info(String.format("Adding cross-site request forgery (CSRF) protection, " + LOG.info(String.format( + "Adding cross-site request forgery (CSRF) protection, " + "headerName = %s, methodsToIgnore = %s, browserUserAgents = %s", - headerName, methodsToIgnore, browserUserAgents)); + headerName, methodsToIgnore, browserUserAgents)); } void parseBrowserUserAgents(String userAgents) { - String[] agentsArray = userAgents.split(","); + String[] agentsArray = userAgents.split(","); browserUserAgents = new HashSet<>(); for (String patternString : agentsArray) { browserUserAgents.add(Pattern.compile(patternString)); @@ -106,17 +98,14 @@ public class RestCsrfPreventionFilter implements Filter { } /** - * This method interrogates the User-Agent String and returns whether it - * refers to a browser. If its not a browser, then the requirement for the - * CSRF header will not be enforced; if it is a browser, the requirement will - * be enforced. + * This method interrogates the User-Agent String and returns whether it refers to a browser. If + * its not a browser, then the requirement for the CSRF header will not be enforced; if it is a + * browser, the requirement will be enforced. *

      - * A User-Agent String is considered to be a browser if it matches - * any of the regex patterns from browser-useragent-regex; the default - * behavior is to consider everything a browser that matches the following: - * "^Mozilla.*,^Opera.*". Subclasses can optionally override - * this method to use different behavior. - * + * A User-Agent String is considered to be a browser if it matches any of the regex patterns from + * browser-useragent-regex; the default behavior is to consider everything a browser that matches + * the following: "^Mozilla.*,^Opera.*". Subclasses can optionally override this method to use + * different behavior. * @param userAgent The User-Agent String, or null if there isn't one * @return true if the User-Agent String refers to a browser, false if not */ @@ -134,44 +123,38 @@ public class RestCsrfPreventionFilter implements Filter { } /** - * Defines the minimal API requirements for the filter to execute its - * filtering logic. This interface exists to facilitate integration in - * components that do not run within a servlet container and therefore cannot - * rely on a servlet container to dispatch to the {@link #doFilter} method. - * Applications that do run inside a servlet container will not need to write - * code that uses this interface. Instead, they can use typical servlet - * container configuration mechanisms to insert the filter. + * Defines the minimal API requirements for the filter to execute its filtering logic. This + * interface exists to facilitate integration in components that do not run within a servlet + * container and therefore cannot rely on a servlet container to dispatch to the {@link #doFilter} + * method. Applications that do run inside a servlet container will not need to write code that + * uses this interface. Instead, they can use typical servlet container configuration mechanisms + * to insert the filter. */ public interface HttpInteraction { /** * Returns the value of a header. - * * @param header name of header * @return value of header */ String getHeader(String header); /** - * Returns the method. - * - * @return method + * Returns the method. n */ String getMethod(); /** * Called by the filter after it decides that the request may proceed. - * - * @throws IOException if there is an I/O error - * @throws ServletException if the implementation relies on the servlet API - * and a servlet API call has failed + * @throws IOException if there is an I/O error + * @throws ServletException if the implementation relies on the servlet API and a servlet API + * call has failed */ void proceed() throws IOException, ServletException; /** - * Called by the filter after it decides that the request is a potential - * CSRF attack and therefore must be rejected. - * - * @param code status code to send + * Called by the filter after it decides that the request is a potential CSRF attack and + * therefore must be rejected. + * @param code status code to send * @param message response message * @throws IOException if there is an I/O error */ @@ -180,31 +163,31 @@ public class RestCsrfPreventionFilter implements Filter { /** * Handles an {@link HttpInteraction} by applying the filtering logic. - * * @param httpInteraction caller's HTTP interaction - * @throws IOException if there is an I/O error - * @throws ServletException if the implementation relies on the servlet API - * and a servlet API call has failed + * @throws IOException if there is an I/O error + * @throws ServletException if the implementation relies on the servlet API and a servlet API call + * has failed */ public void handleHttpInteraction(HttpInteraction httpInteraction) - throws IOException, ServletException { - if (!isBrowser(httpInteraction.getHeader(HEADER_USER_AGENT)) || - methodsToIgnore.contains(httpInteraction.getMethod()) || - httpInteraction.getHeader(headerName) != null) { + throws IOException, ServletException { + if ( + !isBrowser(httpInteraction.getHeader(HEADER_USER_AGENT)) + || methodsToIgnore.contains(httpInteraction.getMethod()) + || httpInteraction.getHeader(headerName) != null + ) { httpInteraction.proceed(); } else { httpInteraction.sendError(HttpServletResponse.SC_BAD_REQUEST, - "Missing Required Header for CSRF Vulnerability Protection"); + "Missing Required Header for CSRF Vulnerability Protection"); } } @Override - public void doFilter(ServletRequest request, ServletResponse response, - final FilterChain chain) throws IOException, ServletException { - final HttpServletRequest httpRequest = (HttpServletRequest)request; - final HttpServletResponse httpResponse = (HttpServletResponse)response; - handleHttpInteraction(new ServletFilterHttpInteraction(httpRequest, - httpResponse, chain)); + public void doFilter(ServletRequest request, ServletResponse response, final FilterChain chain) + throws IOException, ServletException { + final HttpServletRequest httpRequest = (HttpServletRequest) request; + final HttpServletResponse httpResponse = (HttpServletResponse) response; + handleHttpInteraction(new ServletFilterHttpInteraction(httpRequest, httpResponse, chain)); } @Override @@ -212,15 +195,12 @@ public class RestCsrfPreventionFilter implements Filter { } /** - * Constructs a mapping of configuration properties to be used for filter - * initialization. The mapping includes all properties that start with the - * specified configuration prefix. Property names in the mapping are trimmed - * to remove the configuration prefix. - * - * @param conf configuration to read + * Constructs a mapping of configuration properties to be used for filter initialization. The + * mapping includes all properties that start with the specified configuration prefix. Property + * names in the mapping are trimmed to remove the configuration prefix. + * @param conf configuration to read * @param confPrefix configuration prefix - * @return mapping of configuration properties to be used for filter - * initialization + * @return mapping of configuration properties to be used for filter initialization */ public static Map getFilterParams(Configuration conf, String confPrefix) { Map filterConfigMap = new HashMap<>(); @@ -245,13 +225,12 @@ public class RestCsrfPreventionFilter implements Filter { /** * Creates a new ServletFilterHttpInteraction. - * - * @param httpRequest request to process + * @param httpRequest request to process * @param httpResponse response to process - * @param chain filter chain to forward to if HTTP interaction is allowed + * @param chain filter chain to forward to if HTTP interaction is allowed */ public ServletFilterHttpInteraction(HttpServletRequest httpRequest, - HttpServletResponse httpResponse, FilterChain chain) { + HttpServletResponse httpResponse, FilterChain chain) { this.httpRequest = httpRequest; this.httpResponse = httpResponse; this.chain = chain; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java index ffb6743f5e8..ae64a009289 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,34 +15,31 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; +import com.fasterxml.jackson.annotation.JsonProperty; import java.io.IOException; import java.io.Serializable; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlValue; - -import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.ToStringBuilder; -import org.apache.hadoop.hbase.util.ByteStringer; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell; +import org.apache.hadoop.hbase.util.ByteStringer; +import org.apache.yetus.audience.InterfaceAudience; /** - * Representation of a cell. A cell is a single value associated a column and - * optional qualifier, and either the timestamp when it was stored or the user- - * provided timestamp if one was explicitly supplied. + * Representation of a cell. A cell is a single value associated a column and optional qualifier, + * and either the timestamp when it was stored or the user- provided timestamp if one was explicitly + * supplied. * *

        * <complexType name="Cell">
      @@ -59,7 +55,7 @@ import org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell;
        * </complexType>
        * 
      */ -@XmlRootElement(name="Cell") +@XmlRootElement(name = "Cell") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class CellModel implements ProtobufMessageHandler, Serializable { @@ -80,41 +76,33 @@ public class CellModel implements ProtobufMessageHandler, Serializable { /** * Default constructor */ - public CellModel() {} + public CellModel() { + } /** - * Constructor - * @param column - * @param value + * Constructor nn */ public CellModel(byte[] column, byte[] value) { this(column, HConstants.LATEST_TIMESTAMP, value); } /** - * Constructor - * @param column - * @param qualifier - * @param value + * Constructor nnn */ public CellModel(byte[] column, byte[] qualifier, byte[] value) { this(column, qualifier, HConstants.LATEST_TIMESTAMP, value); } /** - * Constructor from KeyValue - * @param cell + * Constructor from KeyValue n */ public CellModel(org.apache.hadoop.hbase.Cell cell) { - this(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), CellUtil - .cloneValue(cell)); + this(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), + CellUtil.cloneValue(cell)); } /** - * Constructor - * @param column - * @param timestamp - * @param value + * Constructor nnn */ public CellModel(byte[] column, long timestamp, byte[] value) { this.column = column; @@ -123,14 +111,9 @@ public class CellModel implements ProtobufMessageHandler, Serializable { } /** - * Constructor - * @param column - * @param qualifier - * @param timestamp - * @param value + * Constructor nnnn */ - public CellModel(byte[] column, byte[] qualifier, long timestamp, - byte[] value) { + public CellModel(byte[] column, byte[] qualifier, long timestamp, byte[] value) { this.column = CellUtil.makeColumn(column, qualifier); this.timestamp = timestamp; this.value = value; @@ -151,8 +134,7 @@ public class CellModel implements ProtobufMessageHandler, Serializable { } /** - * @return true if the timestamp property has been specified by the - * user + * @return true if the timestamp property has been specified by the user */ public boolean hasUserTimestamp() { return timestamp != HConstants.LATEST_TIMESTAMP; @@ -198,8 +180,7 @@ public class CellModel implements ProtobufMessageHandler, Serializable { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { Cell.Builder builder = Cell.newBuilder(); ProtobufUtil.mergeFrom(builder, message); setColumn(builder.getColumn().toByteArray()); @@ -222,28 +203,18 @@ public class CellModel implements ProtobufMessageHandler, Serializable { return false; } CellModel cellModel = (CellModel) obj; - return new EqualsBuilder(). - append(column, cellModel.column). - append(timestamp, cellModel.timestamp). - append(value, cellModel.value). - isEquals(); + return new EqualsBuilder().append(column, cellModel.column) + .append(timestamp, cellModel.timestamp).append(value, cellModel.value).isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(). - append(column). - append(timestamp). - append(value). - toHashCode(); + return new HashCodeBuilder().append(column).append(timestamp).append(value).toHashCode(); } @Override public String toString() { - return new ToStringBuilder(this). - append("column", column). - append("timestamp", timestamp). - append("value", value). - toString(); + return new ToStringBuilder(this).append("column", column).append("timestamp", timestamp) + .append("value", value).toString(); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java index 7fbfa0109d1..d962c71d314 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,24 +21,21 @@ import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell; import org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet; import org.apache.hadoop.hbase.util.ByteStringer; - import org.apache.yetus.audience.InterfaceAudience; /** - * Representation of a grouping of cells. May contain cells from more than - * one row. Encapsulates RowModel and CellModel models. + * Representation of a grouping of cells. May contain cells from more than one row. Encapsulates + * RowModel and CellModel models. * *
        * <complexType name="CellSet">
      @@ -70,13 +66,13 @@ import org.apache.yetus.audience.InterfaceAudience;
        * </complexType>
        * 
      */ -@XmlRootElement(name="CellSet") +@XmlRootElement(name = "CellSet") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class CellSetModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; - @XmlElement(name="Row") + @XmlElement(name = "Row") private List rows; /** @@ -130,8 +126,7 @@ public class CellSetModel implements Serializable, ProtobufMessageHandler { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { CellSet.Builder builder = CellSet.newBuilder(); ProtobufUtil.mergeFrom(builder, message); for (CellSet.Row row : builder.getRowsList()) { @@ -142,8 +137,7 @@ public class CellSetModel implements Serializable, ProtobufMessageHandler { timestamp = cell.getTimestamp(); } rowModel.addCell( - new CellModel(cell.getColumn().toByteArray(), timestamp, - cell.getData().toByteArray())); + new CellModel(cell.getColumn().toByteArray(), timestamp, cell.getData().toByteArray())); } addRow(rowModel); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java index 967f6ba2ce3..c15a96d2d67 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,28 +15,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; +import com.fasterxml.jackson.annotation.JsonAnyGetter; +import com.fasterxml.jackson.annotation.JsonAnySetter; import java.io.Serializable; import java.util.LinkedHashMap; import java.util.Map; - import javax.xml.bind.annotation.XmlAnyAttribute; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.namespace.QName; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; - -import com.fasterxml.jackson.annotation.JsonAnyGetter; -import com.fasterxml.jackson.annotation.JsonAnySetter; +import org.apache.yetus.audience.InterfaceAudience; /** * Representation of a column family schema. - * + * *
        * <complexType name="ColumnSchema">
        *   <attribute name="name" type="string"></attribute>
      @@ -45,7 +40,7 @@ import com.fasterxml.jackson.annotation.JsonAnySetter;
        * </complexType>
        * 
      */ -@XmlRootElement(name="ColumnSchema") +@XmlRootElement(name = "ColumnSchema") @InterfaceAudience.Private public class ColumnSchemaModel implements Serializable { private static final long serialVersionUID = 1L; @@ -58,16 +53,17 @@ public class ColumnSchemaModel implements Serializable { private static QName VERSIONS = new QName(HConstants.VERSIONS); private String name; - private Map attrs = new LinkedHashMap<>(); + private Map attrs = new LinkedHashMap<>(); /** * Default constructor */ - public ColumnSchemaModel() {} + public ColumnSchemaModel() { + } /** * Add an attribute to the column family schema - * @param name the attribute name + * @param name the attribute name * @param value the attribute value */ @JsonAnySetter @@ -81,7 +77,7 @@ public class ColumnSchemaModel implements Serializable { */ public String getAttribute(String name) { Object o = attrs.get(new QName(name)); - return o != null ? o.toString(): null; + return o != null ? o.toString() : null; } /** @@ -97,7 +93,7 @@ public class ColumnSchemaModel implements Serializable { */ @XmlAnyAttribute @JsonAnyGetter - public Map getAny() { + public Map getAny() { return attrs; } @@ -108,7 +104,8 @@ public class ColumnSchemaModel implements Serializable { this.name = name; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override @@ -117,7 +114,7 @@ public class ColumnSchemaModel implements Serializable { sb.append("{ NAME => '"); sb.append(name); sb.append('\''); - for (Map.Entry e: attrs.entrySet()) { + for (Map.Entry e : attrs.entrySet()) { sb.append(", "); sb.append(e.getKey().getLocalPart()); sb.append(" => '"); @@ -138,8 +135,7 @@ public class ColumnSchemaModel implements Serializable { */ public boolean __getBlockcache() { Object o = attrs.get(BLOCKCACHE); - return o != null ? - Boolean.parseBoolean(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKCACHE; + return o != null ? Boolean.parseBoolean(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKCACHE; } /** @@ -147,8 +143,7 @@ public class ColumnSchemaModel implements Serializable { */ public int __getBlocksize() { Object o = attrs.get(BLOCKSIZE); - return o != null ? - Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKSIZE; + return o != null ? Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKSIZE; } /** @@ -172,8 +167,7 @@ public class ColumnSchemaModel implements Serializable { */ public boolean __getInMemory() { Object o = attrs.get(IN_MEMORY); - return o != null ? - Boolean.parseBoolean(o.toString()) : HColumnDescriptor.DEFAULT_IN_MEMORY; + return o != null ? Boolean.parseBoolean(o.toString()) : HColumnDescriptor.DEFAULT_IN_MEMORY; } /** @@ -181,8 +175,7 @@ public class ColumnSchemaModel implements Serializable { */ public int __getTTL() { Object o = attrs.get(TTL); - return o != null ? - Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_TTL; + return o != null ? Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_TTL; } /** @@ -190,8 +183,7 @@ public class ColumnSchemaModel implements Serializable { */ public int __getVersions() { Object o = attrs.get(VERSIONS); - return o != null ? - Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_VERSIONS; + return o != null ? Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_VERSIONS; } /** @@ -216,7 +208,7 @@ public class ColumnSchemaModel implements Serializable { * @param value the desired value of the COMPRESSION attribute */ public void __setCompression(String value) { - attrs.put(COMPRESSION, value); + attrs.put(COMPRESSION, value); } /** diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java index af3b0b067a4..aeeff20b041 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.IOException; @@ -24,18 +22,15 @@ import java.io.Serializable; import java.util.HashMap; import java.util.List; import java.util.Map; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlTransient; - import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; -import org.apache.hadoop.hbase.rest.protobuf - .generated.NamespacePropertiesMessage.NamespaceProperties; +import org.apache.hadoop.hbase.rest.protobuf.generated.NamespacePropertiesMessage.NamespaceProperties; +import org.apache.yetus.audience.InterfaceAudience; /** * List a HBase namespace's key/value properties. @@ -47,7 +42,7 @@ import org.apache.hadoop.hbase.rest.protobuf *
    • value: property value
    • * */ -@XmlRootElement(name="NamespaceProperties") +@XmlRootElement(name = "NamespaceProperties") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class NamespacesInstanceModel implements Serializable, ProtobufMessageHandler { @@ -55,7 +50,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan private static final long serialVersionUID = 1L; // JAX-RS automatically converts Map to XMLAnyElement. - private Map properties = null; + private Map properties = null; @XmlTransient private String namespaceName; @@ -63,12 +58,12 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan /** * Default constructor. Do not use. */ - public NamespacesInstanceModel() {} + public NamespacesInstanceModel() { + } /** * Constructor to use if namespace does not exist in HBASE. - * @param namespaceName the namespace name. - * @throws IOException + * @param namespaceName the namespace name. n */ public NamespacesInstanceModel(String namespaceName) throws IOException { this(null, namespaceName); @@ -76,18 +71,21 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan /** * Constructor - * @param admin the administrative API - * @param namespaceName the namespace name. - * @throws IOException + * @param admin the administrative API + * @param namespaceName the namespace name. n */ public NamespacesInstanceModel(Admin admin, String namespaceName) throws IOException { this.namespaceName = namespaceName; - if(admin == null) { return; } + if (admin == null) { + return; + } NamespaceDescriptor nd = admin.getNamespaceDescriptor(namespaceName); // For properly formed JSON, if no properties, field has to be null (not just no elements). - if(nd.getConfiguration().isEmpty()){ return; } + if (nd.getConfiguration().isEmpty()) { + return; + } properties = new HashMap<>(); properties.putAll(nd.getConfiguration()); @@ -95,11 +93,11 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan /** * Add property to the namespace. - * @param key attribute name + * @param key attribute name * @param value attribute value */ public void addProperty(String key, String value) { - if(properties == null){ + if (properties == null) { properties = new HashMap<>(); } properties.put(key, value); @@ -108,18 +106,19 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan /** * @return The map of uncategorized namespace properties. */ - public Map getProperties() { - if(properties == null){ + public Map getProperties() { + if (properties == null) { properties = new HashMap<>(); } return properties; } - public String getNamespaceName(){ + public String getNamespaceName() { return namespaceName; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override @@ -128,7 +127,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan sb.append("{NAME => \'"); sb.append(namespaceName); sb.append("\'"); - if(properties != null){ + if (properties != null) { for (Map.Entry entry : properties.entrySet()) { sb.append(", "); sb.append(entry.getKey()); @@ -144,7 +143,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan @Override public byte[] createProtobufOutput() { NamespaceProperties.Builder builder = NamespaceProperties.newBuilder(); - if(properties != null){ + if (properties != null) { for (Map.Entry entry : properties.entrySet()) { String key = entry.getKey(); NamespaceProperties.Property.Builder property = NamespaceProperties.Property.newBuilder(); @@ -161,7 +160,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan NamespaceProperties.Builder builder = NamespaceProperties.newBuilder(); builder.mergeFrom(message); List properties = builder.getPropsList(); - for(NamespaceProperties.Property property: properties){ + for (NamespaceProperties.Property property : properties) { addProperty(property.getKey(), property.getValue()); } return this; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java index c0dfa75355e..cfd94d1ebc4 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,27 +15,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; +import com.fasterxml.jackson.annotation.JsonProperty; import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.hadoop.hbase.rest.protobuf.generated.NamespacesMessage.Namespaces; - -import com.fasterxml.jackson.annotation.JsonProperty; - +import org.apache.yetus.audience.InterfaceAudience; /** * A list of HBase namespaces. @@ -44,7 +38,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; *
    • Namespace: namespace name
    • * */ -@XmlRootElement(name="Namespaces") +@XmlRootElement(name = "Namespaces") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class NamespacesModel implements Serializable, ProtobufMessageHandler { @@ -52,18 +46,18 @@ public class NamespacesModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; @JsonProperty("Namespace") - @XmlElement(name="Namespace") + @XmlElement(name = "Namespace") private List namespaces = new ArrayList<>(); /** * Default constructor. Do not use. */ - public NamespacesModel() {} + public NamespacesModel() { + } /** * Constructor - * @param admin the administrative API - * @throws IOException + * @param admin the administrative API n */ public NamespacesModel(Admin admin) throws IOException { NamespaceDescriptor[] nds = admin.listNamespaceDescriptors(); @@ -87,7 +81,8 @@ public class NamespacesModel implements Serializable, ProtobufMessageHandler { this.namespaces = namespaces; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java index b560f697dea..34f065105d6 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,22 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import com.fasterxml.jackson.annotation.JsonProperty; - import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.ToStringBuilder; @@ -40,21 +35,20 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** - * Representation of a row. A row is a related set of cells, grouped by common - * row key. RowModels do not appear in results by themselves. They are always - * encapsulated within CellSetModels. - * + * Representation of a row. A row is a related set of cells, grouped by common row key. RowModels do + * not appear in results by themselves. They are always encapsulated within CellSetModels. + * *
        * <complexType name="Row">
        *   <sequence>
        *     <element name="key" type="base64Binary"></element>
      - *     <element name="cell" type="tns:Cell" 
      + *     <element name="cell" type="tns:Cell"
        *       maxOccurs="unbounded" minOccurs="1"></element>
        *   </sequence>
        * </complexType>
        * 
      */ -@XmlRootElement(name="Row") +@XmlRootElement(name = "Row") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class RowModel implements ProtobufMessageHandler, Serializable { @@ -65,14 +59,14 @@ public class RowModel implements ProtobufMessageHandler, Serializable { private byte[] key; @JsonProperty("Cell") - @XmlElement(name="Cell") + @XmlElement(name = "Cell") private List cells = new ArrayList<>(); - /** * Default constructor */ - public RowModel() { } + public RowModel() { + } /** * Constructor @@ -81,7 +75,7 @@ public class RowModel implements ProtobufMessageHandler, Serializable { public RowModel(final String key) { this(Bytes.toBytes(key)); } - + /** * Constructor * @param key the row key @@ -93,23 +87,23 @@ public class RowModel implements ProtobufMessageHandler, Serializable { /** * Constructor - * @param key the row key + * @param key the row key * @param cells the cells */ public RowModel(final String key, final List cells) { this(Bytes.toBytes(key), cells); } - + /** * Constructor - * @param key the row key + * @param key the row key * @param cells the cells */ public RowModel(final byte[] key, final List cells) { this.key = key; this.cells = cells; } - + /** * Adds a cell to the list of cells for this row * @param cell the cell @@ -142,16 +136,13 @@ public class RowModel implements ProtobufMessageHandler, Serializable { @Override public byte[] createProtobufOutput() { // there is no standalone row protobuf message - throw new UnsupportedOperationException( - "no protobuf equivalent to RowModel"); + throw new UnsupportedOperationException("no protobuf equivalent to RowModel"); } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { // there is no standalone row protobuf message - throw new UnsupportedOperationException( - "no protobuf equivalent to RowModel"); + throw new UnsupportedOperationException("no protobuf equivalent to RowModel"); } @Override @@ -166,25 +157,16 @@ public class RowModel implements ProtobufMessageHandler, Serializable { return false; } RowModel rowModel = (RowModel) obj; - return new EqualsBuilder(). - append(key, rowModel.key). - append(cells, rowModel.cells). - isEquals(); + return new EqualsBuilder().append(key, rowModel.key).append(cells, rowModel.cells).isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(). - append(key). - append(cells). - toHashCode(); + return new HashCodeBuilder().append(key).append(cells).toHashCode(); } @Override public String toString() { - return new ToStringBuilder(this). - append("key", key). - append("cells", cells). - toString(); + return new ToStringBuilder(this).append("key", key).append("cells", cells).toString(); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java index 228d2885ec8..fc59d14d56c 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import com.fasterxml.jackson.annotation.JsonInclude; @@ -97,7 +95,7 @@ import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; * </complexType> * */ -@XmlRootElement(name="Scanner") +@XmlRootElement(name = "Scanner") @JsonInclude(JsonInclude.Include.NON_NULL) @InterfaceAudience.Private public class ScannerModel implements ProtobufMessageHandler, Serializable { @@ -117,8 +115,8 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { private boolean cacheBlocks = true; /** - * Implement lazily-instantiated singleton as per recipe - * here: http://literatejava.com/jvm/fastest-threadsafe-singleton-jvm/ + * Implement lazily-instantiated singleton as per recipe here: + * http://literatejava.com/jvm/fastest-threadsafe-singleton-jvm/ */ private static class JaxbJsonProviderHolder { static final JacksonJaxbJsonProvider INSTANCE = new JacksonJaxbJsonProvider(); @@ -129,9 +127,12 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { @XmlRootElement static class ByteArrayComparableModel { - @XmlAttribute public String type; - @XmlAttribute public String value; - @XmlAttribute public String op; + @XmlAttribute + public String type; + @XmlAttribute + public String value; + @XmlAttribute + public String op; static enum ComparatorType { BinaryComparator, @@ -142,10 +143,10 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { SubstringComparator } - public ByteArrayComparableModel() { } + public ByteArrayComparableModel() { + } - public ByteArrayComparableModel( - ByteArrayComparable comparator) { + public ByteArrayComparableModel(ByteArrayComparable comparator) { String typeName = comparator.getClass().getSimpleName(); ComparatorType type = ComparatorType.valueOf(typeName); this.type = typeName; @@ -156,7 +157,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { break; case BitComparator: this.value = Bytes.toString(Base64.getEncoder().encode(comparator.getValue())); - this.op = ((BitComparator)comparator).getOperator().toString(); + this.op = ((BitComparator) comparator).getOperator().toString(); break; case NullComparator: break; @@ -180,7 +181,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { break; case BitComparator: comparator = new BitComparator(Base64.getDecoder().decode(value), - BitComparator.BitwiseOp.valueOf(op)); + BitComparator.BitwiseOp.valueOf(op)); break; case NullComparator: comparator = new NullComparator(); @@ -201,26 +202,46 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { // A grab bag of fields, would have been a union if this were C. // These are null by default and will only be serialized if set (non null). - @XmlAttribute public String type; - @XmlAttribute public String op; - @XmlElement ByteArrayComparableModel comparator; - @XmlAttribute public String value; - @XmlElement public List filters; - @XmlAttribute public Integer limit; - @XmlAttribute public Integer offset; - @XmlAttribute public String family; - @XmlAttribute public String qualifier; - @XmlAttribute public Boolean ifMissing; - @XmlAttribute public Boolean latestVersion; - @XmlAttribute public String minColumn; - @XmlAttribute public Boolean minColumnInclusive; - @XmlAttribute public String maxColumn; - @XmlAttribute public Boolean maxColumnInclusive; - @XmlAttribute public Boolean dropDependentColumn; - @XmlAttribute public Float chance; - @XmlElement public List prefixes; - @XmlElement private List ranges; - @XmlElement public List timestamps; + @XmlAttribute + public String type; + @XmlAttribute + public String op; + @XmlElement + ByteArrayComparableModel comparator; + @XmlAttribute + public String value; + @XmlElement + public List filters; + @XmlAttribute + public Integer limit; + @XmlAttribute + public Integer offset; + @XmlAttribute + public String family; + @XmlAttribute + public String qualifier; + @XmlAttribute + public Boolean ifMissing; + @XmlAttribute + public Boolean latestVersion; + @XmlAttribute + public String minColumn; + @XmlAttribute + public Boolean minColumnInclusive; + @XmlAttribute + public String maxColumn; + @XmlAttribute + public Boolean maxColumnInclusive; + @XmlAttribute + public Boolean dropDependentColumn; + @XmlAttribute + public Float chance; + @XmlElement + public List prefixes; + @XmlElement + private List ranges; + @XmlElement + public List timestamps; static enum FilterType { ColumnCountGetFilter, @@ -248,7 +269,8 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { WhileMatchFilter } - public FilterModel() { } + public FilterModel() { + } public FilterModel(Filter filter) { String typeName = filter.getClass().getSimpleName(); @@ -256,25 +278,25 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { this.type = typeName; switch (type) { case ColumnCountGetFilter: - this.limit = ((ColumnCountGetFilter)filter).getLimit(); + this.limit = ((ColumnCountGetFilter) filter).getLimit(); break; case ColumnPaginationFilter: - this.limit = ((ColumnPaginationFilter)filter).getLimit(); - this.offset = ((ColumnPaginationFilter)filter).getOffset(); + this.limit = ((ColumnPaginationFilter) filter).getLimit(); + this.offset = ((ColumnPaginationFilter) filter).getOffset(); break; case ColumnPrefixFilter: - byte[] src = ((ColumnPrefixFilter)filter).getPrefix(); + byte[] src = ((ColumnPrefixFilter) filter).getPrefix(); this.value = Bytes.toString(Base64.getEncoder().encode(src)); break; case ColumnRangeFilter: - ColumnRangeFilter crf = (ColumnRangeFilter)filter; + ColumnRangeFilter crf = (ColumnRangeFilter) filter; this.minColumn = Bytes.toString(Base64.getEncoder().encode(crf.getMinColumn())); this.minColumnInclusive = crf.getMinColumnInclusive(); this.maxColumn = Bytes.toString(Base64.getEncoder().encode(crf.getMaxColumn())); this.maxColumnInclusive = crf.getMaxColumnInclusive(); break; case DependentColumnFilter: { - DependentColumnFilter dcf = (DependentColumnFilter)filter; + DependentColumnFilter dcf = (DependentColumnFilter) filter; this.family = Bytes.toString(Base64.getEncoder().encode(dcf.getFamily())); byte[] qualifier = dcf.getQualifier(); if (qualifier != null) { @@ -283,11 +305,12 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { this.op = dcf.getOperator().toString(); this.comparator = new ByteArrayComparableModel(dcf.getComparator()); this.dropDependentColumn = dcf.dropDependentColumn(); - } break; + } + break; case FilterList: - this.op = ((FilterList)filter).getOperator().toString(); + this.op = ((FilterList) filter).getOperator().toString(); this.filters = new ArrayList<>(); - for (Filter child: ((FilterList)filter).getFilters()) { + for (Filter child : ((FilterList) filter).getFilters()) { this.filters.add(new FilterModel(child)); } break; @@ -295,40 +318,38 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { case KeyOnlyFilter: break; case InclusiveStopFilter: - this.value = Bytes.toString(Base64.getEncoder().encode( - ((InclusiveStopFilter)filter).getStopRowKey())); + this.value = Bytes + .toString(Base64.getEncoder().encode(((InclusiveStopFilter) filter).getStopRowKey())); break; case MultipleColumnPrefixFilter: this.prefixes = new ArrayList<>(); - for (byte[] prefix: ((MultipleColumnPrefixFilter)filter).getPrefix()) { + for (byte[] prefix : ((MultipleColumnPrefixFilter) filter).getPrefix()) { this.prefixes.add(Bytes.toString(Base64.getEncoder().encode(prefix))); } break; case MultiRowRangeFilter: this.ranges = new ArrayList<>(); - for(RowRange range : ((MultiRowRangeFilter)filter).getRowRanges()) { + for (RowRange range : ((MultiRowRangeFilter) filter).getRowRanges()) { this.ranges.add(new RowRange(range.getStartRow(), range.isStartRowInclusive(), - range.getStopRow(), range.isStopRowInclusive())); + range.getStopRow(), range.isStopRowInclusive())); } break; case PageFilter: - this.value = Long.toString(((PageFilter)filter).getPageSize()); + this.value = Long.toString(((PageFilter) filter).getPageSize()); break; case PrefixFilter: - this.value = Bytes.toString(Base64.getEncoder().encode( - ((PrefixFilter)filter).getPrefix())); + this.value = + Bytes.toString(Base64.getEncoder().encode(((PrefixFilter) filter).getPrefix())); break; case FamilyFilter: case QualifierFilter: case RowFilter: case ValueFilter: - this.op = ((CompareFilter)filter).getOperator().toString(); - this.comparator = - new ByteArrayComparableModel( - ((CompareFilter)filter).getComparator()); + this.op = ((CompareFilter) filter).getOperator().toString(); + this.comparator = new ByteArrayComparableModel(((CompareFilter) filter).getComparator()); break; case RandomRowFilter: - this.chance = ((RandomRowFilter)filter).getChance(); + this.chance = ((RandomRowFilter) filter).getChance(); break; case SingleColumnValueExcludeFilter: case SingleColumnValueFilter: { @@ -339,26 +360,25 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { this.qualifier = Bytes.toString(Base64.getEncoder().encode(qualifier)); } this.op = scvf.getOperator().toString(); - this.comparator = - new ByteArrayComparableModel(scvf.getComparator()); + this.comparator = new ByteArrayComparableModel(scvf.getComparator()); if (scvf.getFilterIfMissing()) { this.ifMissing = true; } if (scvf.getLatestVersionOnly()) { this.latestVersion = true; } - } break; + } + break; case SkipFilter: this.filters = new ArrayList<>(); - this.filters.add(new FilterModel(((SkipFilter)filter).getFilter())); + this.filters.add(new FilterModel(((SkipFilter) filter).getFilter())); break; case TimestampsFilter: - this.timestamps = ((TimestampsFilter)filter).getTimestamps(); + this.timestamps = ((TimestampsFilter) filter).getTimestamps(); break; case WhileMatchFilter: this.filters = new ArrayList<>(); - this.filters.add( - new FilterModel(((WhileMatchFilter)filter).getFilter())); + this.filters.add(new FilterModel(((WhileMatchFilter) filter).getFilter())); break; default: throw new RuntimeException("unhandled filter type " + type); @@ -368,105 +388,107 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { public Filter build() { Filter filter; switch (FilterType.valueOf(type)) { - case ColumnCountGetFilter: - filter = new ColumnCountGetFilter(limit); - break; - case ColumnPaginationFilter: - filter = new ColumnPaginationFilter(limit, offset); - break; - case ColumnPrefixFilter: - filter = new ColumnPrefixFilter(Base64.getDecoder().decode(value)); - break; - case ColumnRangeFilter: - filter = new ColumnRangeFilter(Base64.getDecoder().decode(minColumn), - minColumnInclusive, Base64.getDecoder().decode(maxColumn), - maxColumnInclusive); - break; - case DependentColumnFilter: - filter = new DependentColumnFilter(Base64.getDecoder().decode(family), + case ColumnCountGetFilter: + filter = new ColumnCountGetFilter(limit); + break; + case ColumnPaginationFilter: + filter = new ColumnPaginationFilter(limit, offset); + break; + case ColumnPrefixFilter: + filter = new ColumnPrefixFilter(Base64.getDecoder().decode(value)); + break; + case ColumnRangeFilter: + filter = new ColumnRangeFilter(Base64.getDecoder().decode(minColumn), minColumnInclusive, + Base64.getDecoder().decode(maxColumn), maxColumnInclusive); + break; + case DependentColumnFilter: + filter = new DependentColumnFilter(Base64.getDecoder().decode(family), + qualifier != null ? Base64.getDecoder().decode(qualifier) : null, dropDependentColumn, + CompareOperator.valueOf(op), comparator.build()); + break; + case FamilyFilter: + filter = new FamilyFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case FilterList: { + List list = new ArrayList<>(filters.size()); + for (FilterModel model : filters) { + list.add(model.build()); + } + filter = new FilterList(FilterList.Operator.valueOf(op), list); + } + break; + case FirstKeyOnlyFilter: + filter = new FirstKeyOnlyFilter(); + break; + case InclusiveStopFilter: + filter = new InclusiveStopFilter(Base64.getDecoder().decode(value)); + break; + case KeyOnlyFilter: + filter = new KeyOnlyFilter(); + break; + case MultipleColumnPrefixFilter: { + byte[][] values = new byte[prefixes.size()][]; + for (int i = 0; i < prefixes.size(); i++) { + values[i] = Base64.getDecoder().decode(prefixes.get(i)); + } + filter = new MultipleColumnPrefixFilter(values); + } + break; + case MultiRowRangeFilter: { + filter = new MultiRowRangeFilter(ranges); + } + break; + case PageFilter: + filter = new PageFilter(Long.parseLong(value)); + break; + case PrefixFilter: + filter = new PrefixFilter(Base64.getDecoder().decode(value)); + break; + case QualifierFilter: + filter = new QualifierFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case RandomRowFilter: + filter = new RandomRowFilter(chance); + break; + case RowFilter: + filter = new RowFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case SingleColumnValueFilter: + filter = new SingleColumnValueFilter(Base64.getDecoder().decode(family), qualifier != null ? Base64.getDecoder().decode(qualifier) : null, - dropDependentColumn, CompareOperator.valueOf(op), comparator.build()); - break; - case FamilyFilter: - filter = new FamilyFilter(CompareOperator.valueOf(op), comparator.build()); - break; - case FilterList: { - List list = new ArrayList<>(filters.size()); - for (FilterModel model: filters) { - list.add(model.build()); - } - filter = new FilterList(FilterList.Operator.valueOf(op), list); - } break; - case FirstKeyOnlyFilter: - filter = new FirstKeyOnlyFilter(); - break; - case InclusiveStopFilter: - filter = new InclusiveStopFilter(Base64.getDecoder().decode(value)); - break; - case KeyOnlyFilter: - filter = new KeyOnlyFilter(); - break; - case MultipleColumnPrefixFilter: { - byte[][] values = new byte[prefixes.size()][]; - for (int i = 0; i < prefixes.size(); i++) { - values[i] = Base64.getDecoder().decode(prefixes.get(i)); - } - filter = new MultipleColumnPrefixFilter(values); - } break; - case MultiRowRangeFilter: { - filter = new MultiRowRangeFilter(ranges); - } break; - case PageFilter: - filter = new PageFilter(Long.parseLong(value)); - break; - case PrefixFilter: - filter = new PrefixFilter(Base64.getDecoder().decode(value)); - break; - case QualifierFilter: - filter = new QualifierFilter(CompareOperator.valueOf(op), comparator.build()); - break; - case RandomRowFilter: - filter = new RandomRowFilter(chance); - break; - case RowFilter: - filter = new RowFilter(CompareOperator.valueOf(op), comparator.build()); - break; - case SingleColumnValueFilter: - filter = new SingleColumnValueFilter(Base64.getDecoder().decode(family), - qualifier != null ? Base64.getDecoder().decode(qualifier) : null, - CompareOperator.valueOf(op), comparator.build()); - if (ifMissing != null) { - ((SingleColumnValueFilter)filter).setFilterIfMissing(ifMissing); - } - if (latestVersion != null) { - ((SingleColumnValueFilter)filter).setLatestVersionOnly(latestVersion); - } - break; - case SingleColumnValueExcludeFilter: - filter = new SingleColumnValueExcludeFilter(Base64.getDecoder().decode(family), - qualifier != null ? Base64.getDecoder().decode(qualifier) : null, - CompareOperator.valueOf(op), comparator.build()); - if (ifMissing != null) { - ((SingleColumnValueExcludeFilter)filter).setFilterIfMissing(ifMissing); - } - if (latestVersion != null) { - ((SingleColumnValueExcludeFilter)filter).setLatestVersionOnly(latestVersion); - } - break; - case SkipFilter: - filter = new SkipFilter(filters.get(0).build()); - break; - case TimestampsFilter: - filter = new TimestampsFilter(timestamps); - break; - case ValueFilter: - filter = new ValueFilter(CompareOperator.valueOf(op), comparator.build()); - break; - case WhileMatchFilter: - filter = new WhileMatchFilter(filters.get(0).build()); - break; - default: - throw new RuntimeException("unhandled filter type: " + type); + CompareOperator.valueOf(op), comparator.build()); + if (ifMissing != null) { + ((SingleColumnValueFilter) filter).setFilterIfMissing(ifMissing); + } + if (latestVersion != null) { + ((SingleColumnValueFilter) filter).setLatestVersionOnly(latestVersion); + } + break; + case SingleColumnValueExcludeFilter: + filter = new SingleColumnValueExcludeFilter(Base64.getDecoder().decode(family), + qualifier != null ? Base64.getDecoder().decode(qualifier) : null, + CompareOperator.valueOf(op), comparator.build()); + if (ifMissing != null) { + ((SingleColumnValueExcludeFilter) filter).setFilterIfMissing(ifMissing); + } + if (latestVersion != null) { + ((SingleColumnValueExcludeFilter) filter).setLatestVersionOnly(latestVersion); + } + break; + case SkipFilter: + filter = new SkipFilter(filters.get(0).build()); + break; + case TimestampsFilter: + filter = new TimestampsFilter(timestamps); + break; + case ValueFilter: + filter = new ValueFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case WhileMatchFilter: + filter = new WhileMatchFilter(filters.get(0).build()); + break; + default: + throw new RuntimeException("unhandled filter type: " + type); } return filter; } @@ -475,7 +497,6 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { /** * Get the JacksonJaxbJsonProvider instance; - * * @return A JacksonJaxbJsonProvider. */ private static JacksonJaxbJsonProvider getJasonProvider() { @@ -484,40 +505,38 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { /** * @param s the JSON representation of the filter - * @return the filter - * @throws Exception + * @return the filter n */ public static Filter buildFilter(String s) throws Exception { - FilterModel model = getJasonProvider().locateMapper(FilterModel.class, - MediaType.APPLICATION_JSON_TYPE).readValue(s, FilterModel.class); + FilterModel model = + getJasonProvider().locateMapper(FilterModel.class, MediaType.APPLICATION_JSON_TYPE) + .readValue(s, FilterModel.class); return model.build(); } /** * @param filter the filter - * @return the JSON representation of the filter - * @throws Exception + * @return the JSON representation of the filter n */ public static String stringifyFilter(final Filter filter) throws Exception { - return getJasonProvider().locateMapper(FilterModel.class, - MediaType.APPLICATION_JSON_TYPE).writeValueAsString(new FilterModel(filter)); + return getJasonProvider().locateMapper(FilterModel.class, MediaType.APPLICATION_JSON_TYPE) + .writeValueAsString(new FilterModel(filter)); } private static final byte[] COLUMN_DIVIDER = Bytes.toBytes(":"); /** - * @param scan the scan specification - * @throws Exception + * @param scan the scan specification n */ public static ScannerModel fromScan(Scan scan) throws Exception { ScannerModel model = new ScannerModel(); model.setStartRow(scan.getStartRow()); model.setEndRow(scan.getStopRow()); - Map> families = scan.getFamilyMap(); + Map> families = scan.getFamilyMap(); if (families != null) { - for (Map.Entry> entry : families.entrySet()) { + for (Map.Entry> entry : families.entrySet()) { if (entry.getValue() != null) { - for (byte[] qualifier: entry.getValue()) { + for (byte[] qualifier : entry.getValue()) { model.addColumn(Bytes.add(entry.getKey(), COLUMN_DIVIDER, qualifier)); } } else { @@ -557,22 +576,22 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { /** * Default constructor */ - public ScannerModel() {} + public ScannerModel() { + } /** * Constructor - * @param startRow the start key of the row-range - * @param endRow the end key of the row-range - * @param columns the columns to scan - * @param batch the number of values to return in batch - * @param caching the number of rows that the scanner will fetch at once - * @param endTime the upper bound on timestamps of values of interest + * @param startRow the start key of the row-range + * @param endRow the end key of the row-range + * @param columns the columns to scan + * @param batch the number of values to return in batch + * @param caching the number of rows that the scanner will fetch at once + * @param endTime the upper bound on timestamps of values of interest * @param maxVersions the maximum number of versions to return - * @param filter a filter specification - * (values with timestamps later than this are excluded) + * @param filter a filter specification (values with timestamps later than this are excluded) */ - public ScannerModel(byte[] startRow, byte[] endRow, List columns, - int batch, int caching, long endTime, int maxVersions, String filter) { + public ScannerModel(byte[] startRow, byte[] endRow, List columns, int batch, int caching, + long endTime, int maxVersions, String filter) { super(); this.startRow = startRow; this.endRow = endRow; @@ -586,19 +605,19 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { /** * Constructor - * @param startRow the start key of the row-range - * @param endRow the end key of the row-range - * @param columns the columns to scan - * @param batch the number of values to return in batch - * @param caching the number of rows that the scanner will fetch at once - * @param startTime the lower bound on timestamps of values of interest - * (values with timestamps earlier than this are excluded) - * @param endTime the upper bound on timestamps of values of interest - * (values with timestamps later than this are excluded) - * @param filter a filter specification + * @param startRow the start key of the row-range + * @param endRow the end key of the row-range + * @param columns the columns to scan + * @param batch the number of values to return in batch + * @param caching the number of rows that the scanner will fetch at once + * @param startTime the lower bound on timestamps of values of interest (values with timestamps + * earlier than this are excluded) + * @param endTime the upper bound on timestamps of values of interest (values with timestamps + * later than this are excluded) + * @param filter a filter specification */ - public ScannerModel(byte[] startRow, byte[] endRow, List columns, - int batch, int caching, long startTime, long endTime, String filter) { + public ScannerModel(byte[] startRow, byte[] endRow, List columns, int batch, int caching, + long startTime, long endTime, String filter) { super(); this.startRow = startRow; this.endRow = endRow; @@ -624,6 +643,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { public void addLabel(String label) { labels.add(label); } + /** * @return true if a start row was specified */ @@ -657,12 +677,12 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { /** * @return list of columns of interest in column:qualifier format, or empty for all */ - @XmlElement(name="column") + @XmlElement(name = "column") public List getColumns() { return columns; } - @XmlElement(name="labels") + @XmlElement(name = "labels") public List getLabels() { return labels; } @@ -759,7 +779,8 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { } /** - * @param value true if HFile blocks should be cached on the servers for this scan, false otherwise + * @param value true if HFile blocks should be cached on the servers for this scan, false + * otherwise */ public void setCacheBlocks(boolean value) { this.cacheBlocks = value; @@ -802,7 +823,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { if (!Bytes.equals(endRow, HConstants.EMPTY_START_ROW)) { builder.setEndRow(ByteStringer.wrap(endRow)); } - for (byte[] column: columns) { + for (byte[] column : columns) { builder.addColumns(ByteStringer.wrap(column)); } if (startTime != 0) { @@ -828,8 +849,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { Scanner.Builder builder = Scanner.newBuilder(); ProtobufUtil.mergeFrom(builder, message); if (builder.hasStartRow()) { @@ -838,7 +858,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { if (builder.hasEndRow()) { endRow = builder.getEndRow().toByteArray(); } - for (ByteString column: builder.getColumnsList()) { + for (ByteString column : builder.getColumnsList()) { addColumn(column.toByteArray()); } if (builder.hasBatch()) { @@ -861,7 +881,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { } if (builder.getLabelsList() != null) { List labels = builder.getLabelsList(); - for(String label : labels) { + for (String label : labels) { addLabel(label); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java index 4f4276a190a..46bfb9091c3 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,23 +18,19 @@ package org.apache.hadoop.hbase.rest.model; import com.fasterxml.jackson.annotation.JsonProperty; - import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlElementWrapper; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -43,8 +38,8 @@ import org.apache.yetus.audience.InterfaceAudience; *

      *

        *
      • regions: the total number of regions served by the cluster
      • - *
      • requests: the total number of requests per second handled by the - * cluster in the last reporting interval
      • + *
      • requests: the total number of requests per second handled by the cluster in the last + * reporting interval
      • *
      • averageLoad: the average load of the region servers in the cluster
      • *
      • liveNodes: detailed status of the live region servers
      • *
      • deadNodes: the names of region servers declared dead
      • @@ -94,7 +89,7 @@ import org.apache.yetus.audience.InterfaceAudience; * </complexType> * */ -@XmlRootElement(name="ClusterStatus") +@XmlRootElement(name = "ClusterStatus") @InterfaceAudience.Private public class StorageClusterStatusModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; @@ -141,18 +136,17 @@ public class StorageClusterStatusModel implements Serializable, ProtobufMessageH /** * Constructor - * @param name the region name - * @param stores the number of stores - * @param storefiles the number of store files - * @param storefileSizeMB total size of store files, in MB - * @param memstoreSizeMB total size of memstore, in MB + * @param name the region name + * @param stores the number of stores + * @param storefiles the number of store files + * @param storefileSizeMB total size of store files, in MB + * @param memstoreSizeMB total size of memstore, in MB * @param storefileIndexSizeKB total size of store file indexes, in KB */ - public Region(byte[] name, int stores, int storefiles, - int storefileSizeMB, int memstoreSizeMB, long storefileIndexSizeKB, - long readRequestsCount, long writeRequestsCount, int rootIndexSizeKB, - int totalStaticIndexSizeKB, int totalStaticBloomSizeKB, - long totalCompactingKVs, long currentCompactedKVs) { + public Region(byte[] name, int stores, int storefiles, int storefileSizeMB, + int memstoreSizeMB, long storefileIndexSizeKB, long readRequestsCount, + long writeRequestsCount, int rootIndexSizeKB, int totalStaticIndexSizeKB, + int totalStaticBloomSizeKB, long totalCompactingKVs, long currentCompactedKVs) { this.name = name; this.stores = stores; this.storefiles = storefiles; @@ -280,8 +274,7 @@ public class StorageClusterStatusModel implements Serializable, ProtobufMessageH } /** - * @param rootIndexSizeKB The current total size of root-level indexes - * for the region, in KB + * @param rootIndexSizeKB The current total size of root-level indexes for the region, in KB */ public void setRootIndexSizeKB(int rootIndexSizeKB) { this.rootIndexSizeKB = rootIndexSizeKB; @@ -295,32 +288,31 @@ public class StorageClusterStatusModel implements Serializable, ProtobufMessageH } /** - * @param currentCompactedKVs The completed count of key values - * in currently running compaction + * @param currentCompactedKVs The completed count of key values in currently running + * compaction */ public void setCurrentCompactedKVs(long currentCompactedKVs) { this.currentCompactedKVs = currentCompactedKVs; } /** - * @param totalCompactingKVs The total compacting key values - * in currently running compaction + * @param totalCompactingKVs The total compacting key values in currently running compaction */ public void setTotalCompactingKVs(long totalCompactingKVs) { this.totalCompactingKVs = totalCompactingKVs; } /** - * @param totalStaticBloomSizeKB The total size of all Bloom filter blocks, - * not just loaded into the block cache, in KB. + * @param totalStaticBloomSizeKB The total size of all Bloom filter blocks, not just loaded + * into the block cache, in KB. */ public void setTotalStaticBloomSizeKB(int totalStaticBloomSizeKB) { this.totalStaticBloomSizeKB = totalStaticBloomSizeKB; } /** - * @param totalStaticIndexSizeKB The total size of all index blocks, - * not just the root level, in KB. + * @param totalStaticIndexSizeKB The total size of all index blocks, not just the root level, + * in KB. */ public void setTotalStaticIndexSizeKB(int totalStaticIndexSizeKB) { this.totalStaticIndexSizeKB = totalStaticIndexSizeKB; @@ -380,15 +372,13 @@ public class StorageClusterStatusModel implements Serializable, ProtobufMessageH * Add a region name to the list * @param name the region name */ - public void addRegion(byte[] name, int stores, int storefiles, - int storefileSizeMB, int memstoreSizeMB, long storefileIndexSizeKB, - long readRequestsCount, long writeRequestsCount, int rootIndexSizeKB, - int totalStaticIndexSizeKB, int totalStaticBloomSizeKB, - long totalCompactingKVs, long currentCompactedKVs) { - regions.add(new Region(name, stores, storefiles, storefileSizeMB, - memstoreSizeMB, storefileIndexSizeKB, readRequestsCount, - writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB, - totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs)); + public void addRegion(byte[] name, int stores, int storefiles, int storefileSizeMB, + int memstoreSizeMB, long storefileIndexSizeKB, long readRequestsCount, + long writeRequestsCount, int rootIndexSizeKB, int totalStaticIndexSizeKB, + int totalStaticBloomSizeKB, long totalCompactingKVs, long currentCompactedKVs) { + regions.add(new Region(name, stores, storefiles, storefileSizeMB, memstoreSizeMB, + storefileIndexSizeKB, readRequestsCount, writeRequestsCount, rootIndexSizeKB, + totalStaticIndexSizeKB, totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs)); } /** @@ -402,11 +392,12 @@ public class StorageClusterStatusModel implements Serializable, ProtobufMessageH /** * Default constructor */ - public Node() {} + public Node() { + } /** * Constructor - * @param name the region server name + * @param name the region server name * @param startCode the region server's start code */ public Node(String name, long startCode) { @@ -449,7 +440,7 @@ public class StorageClusterStatusModel implements Serializable, ProtobufMessageH /** * @return the list of regions served by the region server */ - @XmlElement(name="Region") + @XmlElement(name = "Region") public List getRegions() { return regions; } @@ -513,9 +504,9 @@ public class StorageClusterStatusModel implements Serializable, ProtobufMessageH /** * Add a live node to the cluster representation. - * @param name the region server name - * @param startCode the region server's start code - * @param heapSizeMB the current heap size, in MB + * @param name the region server name + * @param startCode the region server's start code + * @param heapSizeMB the current heap size, in MB * @param maxHeapSizeMB the maximum heap size, in MB */ public Node addLiveNode(String name, long startCode, int heapSizeMB, int maxHeapSizeMB) { @@ -588,7 +579,7 @@ public class StorageClusterStatusModel implements Serializable, ProtobufMessageH /** * @return the total number of requests per second handled by the cluster in the last reporting - * interval + * interval */ @XmlAttribute public long getRequests() { @@ -641,9 +632,8 @@ public class StorageClusterStatusModel implements Serializable, ProtobufMessageH @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append(String.format("%d live servers, %d dead servers, " + - "%.4f average load%n%n", liveNodes.size(), deadNodes.size(), - averageLoad)); + sb.append(String.format("%d live servers, %d dead servers, " + "%.4f average load%n%n", + liveNodes.size(), deadNodes.size(), averageLoad)); if (!liveNodes.isEmpty()) { sb.append(liveNodes.size()); sb.append(" live servers\n"); @@ -713,8 +703,7 @@ public class StorageClusterStatusModel implements Serializable, ProtobufMessageH builder.setRequests(requests); builder.setAverageLoad(averageLoad); for (Node node : liveNodes) { - StorageClusterStatus.Node.Builder nodeBuilder = - StorageClusterStatus.Node.newBuilder(); + StorageClusterStatus.Node.Builder nodeBuilder = StorageClusterStatus.Node.newBuilder(); nodeBuilder.setName(node.name); nodeBuilder.setStartCode(node.startCode); nodeBuilder.setRequests(node.requests); @@ -762,25 +751,16 @@ public class StorageClusterStatusModel implements Serializable, ProtobufMessageH for (StorageClusterStatus.Node node : builder.getLiveNodesList()) { long startCode = node.hasStartCode() ? node.getStartCode() : -1; StorageClusterStatusModel.Node nodeModel = - addLiveNode(node.getName(), startCode, node.getHeapSizeMB(), - node.getMaxHeapSizeMB()); + addLiveNode(node.getName(), startCode, node.getHeapSizeMB(), node.getMaxHeapSizeMB()); long requests = node.hasRequests() ? node.getRequests() : 0; nodeModel.setRequests(requests); for (StorageClusterStatus.Region region : node.getRegionsList()) { - nodeModel.addRegion( - region.getName().toByteArray(), - region.getStores(), - region.getStorefiles(), - region.getStorefileSizeMB(), - region.getMemStoreSizeMB(), - region.getStorefileIndexSizeKB(), - region.getReadRequestsCount(), - region.getWriteRequestsCount(), - region.getRootIndexSizeKB(), - region.getTotalStaticIndexSizeKB(), - region.getTotalStaticBloomSizeKB(), - region.getTotalCompactingKVs(), - region.getCurrentCompactedKVs()); + nodeModel.addRegion(region.getName().toByteArray(), region.getStores(), + region.getStorefiles(), region.getStorefileSizeMB(), region.getMemStoreSizeMB(), + region.getStorefileIndexSizeKB(), region.getReadRequestsCount(), + region.getWriteRequestsCount(), region.getRootIndexSizeKB(), + region.getTotalStaticIndexSizeKB(), region.getTotalStaticBloomSizeKB(), + region.getTotalCompactingKVs(), region.getCurrentCompactedKVs()); } } for (String node : builder.getDeadNodesList()) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java index 584099765c7..e23c6af8177 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,26 +15,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.Serializable; - import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.yetus.audience.InterfaceAudience; /** * Simple representation of the version of the storage cluster - * + * *
          * <complexType name="StorageClusterVersion">
          *   <attribute name="version" type="string"></attribute>
          * </complexType>
          * 
        */ -@XmlRootElement(name="ClusterVersion") +@XmlRootElement(name = "ClusterVersion") @InterfaceAudience.Private public class StorageClusterVersionModel implements Serializable { private static final long serialVersionUID = 1L; @@ -45,7 +41,7 @@ public class StorageClusterVersionModel implements Serializable { /** * @return the storage cluster version */ - @XmlAttribute(name="Version") + @XmlAttribute(name = "Version") public String getVersion() { return version; } @@ -57,7 +53,8 @@ public class StorageClusterVersionModel implements Serializable { this.version = version; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override @@ -65,10 +62,9 @@ public class StorageClusterVersionModel implements Serializable { return version; } - //needed for jackson deserialization + // needed for jackson deserialization private static StorageClusterVersionModel valueOf(String value) { - StorageClusterVersionModel versionModel - = new StorageClusterVersionModel(); + StorageClusterVersionModel versionModel = new StorageClusterVersionModel(); versionModel.setVersion(value); return versionModel; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java index cb96901c199..6a73c9227a5 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,23 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; - -import org.apache.hadoop.hbase.util.ByteStringer; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo; +import org.apache.hadoop.hbase.util.ByteStringer; +import org.apache.yetus.audience.InterfaceAudience; /** * Representation of a list of table regions. @@ -47,7 +43,7 @@ import org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInf * </complexType> * */ -@XmlRootElement(name="TableInfo") +@XmlRootElement(name = "TableInfo") @InterfaceAudience.Private public class TableInfoModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; @@ -58,11 +54,11 @@ public class TableInfoModel implements Serializable, ProtobufMessageHandler { /** * Default constructor */ - public TableInfoModel() {} + public TableInfoModel() { + } /** - * Constructor - * @param name + * Constructor n */ public TableInfoModel(String name) { this.name = name; @@ -95,7 +91,7 @@ public class TableInfoModel implements Serializable, ProtobufMessageHandler { /** * @return the regions */ - @XmlElement(name="Region") + @XmlElement(name = "Region") public List getRegions() { return regions; } @@ -114,13 +110,14 @@ public class TableInfoModel implements Serializable, ProtobufMessageHandler { this.regions = regions; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); - for(TableRegionModel aRegion : regions) { + for (TableRegionModel aRegion : regions) { sb.append(aRegion.toString()); sb.append('\n'); } @@ -131,7 +128,7 @@ public class TableInfoModel implements Serializable, ProtobufMessageHandler { public byte[] createProtobufOutput() { TableInfo.Builder builder = TableInfo.newBuilder(); builder.setName(name); - for (TableRegionModel aRegion: regions) { + for (TableRegionModel aRegion : regions) { TableInfo.Region.Builder regionBuilder = TableInfo.Region.newBuilder(); regionBuilder.setName(aRegion.getName()); regionBuilder.setId(aRegion.getId()); @@ -144,16 +141,14 @@ public class TableInfoModel implements Serializable, ProtobufMessageHandler { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { TableInfo.Builder builder = TableInfo.newBuilder(); ProtobufUtil.mergeFrom(builder, message); setName(builder.getName()); - for (TableInfo.Region region: builder.getRegionsList()) { - add(new TableRegionModel(builder.getName(), region.getId(), - region.getStartKey().toByteArray(), - region.getEndKey().toByteArray(), - region.getLocation())); + for (TableInfo.Region region : builder.getRegionsList()) { + add( + new TableRegionModel(builder.getName(), region.getId(), region.getStartKey().toByteArray(), + region.getEndKey().toByteArray(), region.getLocation())); } return this; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java index 01ee026627f..7118d159c2e 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,26 +15,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlElementRef; import javax.xml.bind.annotation.XmlRootElement; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList; +import org.apache.yetus.audience.InterfaceAudience; /** * Simple representation of a list of table names. */ -@XmlRootElement(name="TableList") +@XmlRootElement(name = "TableList") @InterfaceAudience.Private public class TableListModel implements Serializable, ProtobufMessageHandler { @@ -46,7 +42,8 @@ public class TableListModel implements Serializable, ProtobufMessageHandler { /** * Default constructor */ - public TableListModel() {} + public TableListModel() { + } /** * Add the table name model to the list @@ -67,7 +64,7 @@ public class TableListModel implements Serializable, ProtobufMessageHandler { /** * @return the tables */ - @XmlElementRef(name="table") + @XmlElementRef(name = "table") public List getTables() { return tables; } @@ -79,13 +76,14 @@ public class TableListModel implements Serializable, ProtobufMessageHandler { this.tables = tables; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); - for(TableModel aTable : tables) { + for (TableModel aTable : tables) { sb.append(aTable.toString()); sb.append('\n'); } @@ -102,11 +100,10 @@ public class TableListModel implements Serializable, ProtobufMessageHandler { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { TableList.Builder builder = TableList.newBuilder(); ProtobufUtil.mergeFrom(builder, message); - for (String table: builder.getNameList()) { + for (String table : builder.getNameList()) { this.add(new TableModel(table)); } return this; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java index 4628263e992..75513eadbaa 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,19 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.Serializable; - import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.yetus.audience.InterfaceAudience; /** * Simple representation of a table name. - * + * *
          * <complexType name="Table">
          *   <sequence>
        @@ -37,22 +33,22 @@ import org.apache.yetus.audience.InterfaceAudience;
          * </complexType>
          * 
        */ -@XmlRootElement(name="table") +@XmlRootElement(name = "table") @InterfaceAudience.Private public class TableModel implements Serializable { private static final long serialVersionUID = 1L; - + private String name; - + /** * Default constructor */ - public TableModel() {} + public TableModel() { + } /** - * Constructor - * @param name + * Constructor n */ public TableModel(String name) { super(); @@ -74,7 +70,8 @@ public class TableModel implements Serializable { this.name = name; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java index 2ed5d9d5113..63d955a9929 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,23 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.Serializable; - import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** - * Representation of a region of a table and its current location on the - * storage cluster. - * + * Representation of a region of a table and its current location on the storage cluster. + * *
          * <complexType name="TableRegion">
          *   <attribute name="name" type="string"></attribute>
        @@ -43,7 +38,7 @@ import org.apache.hadoop.hbase.util.Bytes;
          *  </complexType>
          * 
        */ -@XmlRootElement(name="Region") +@XmlRootElement(name = "Region") @InterfaceAudience.Private public class TableRegionModel implements Serializable { @@ -51,37 +46,36 @@ public class TableRegionModel implements Serializable { private String table; private long id; - private byte[] startKey; + private byte[] startKey; private byte[] endKey; private String location; /** * Constructor */ - public TableRegionModel() {} + public TableRegionModel() { + } /** * Constructor - * @param table the table name - * @param id the encoded id of the region + * @param table the table name + * @param id the encoded id of the region * @param startKey the start key of the region - * @param endKey the end key of the region + * @param endKey the end key of the region */ - public TableRegionModel(String table, long id, byte[] startKey, - byte[] endKey) { + public TableRegionModel(String table, long id, byte[] startKey, byte[] endKey) { this(table, id, startKey, endKey, null); } /** * Constructor - * @param table the table name - * @param id the encoded id of the region + * @param table the table name + * @param id the encoded id of the region * @param startKey the start key of the region - * @param endKey the end key of the region + * @param endKey the end key of the region * @param location the name and port of the region server hosting the region */ - public TableRegionModel(String table, long id, byte[] startKey, - byte[] endKey, String location) { + public TableRegionModel(String table, long id, byte[] startKey, byte[] endKey, String location) { this.table = table; this.id = id; this.startKey = startKey; @@ -94,17 +88,17 @@ public class TableRegionModel implements Serializable { */ @XmlAttribute public String getName() { - byte [] tableNameAsBytes = Bytes.toBytes(this.table); + byte[] tableNameAsBytes = Bytes.toBytes(this.table); TableName tableName = TableName.valueOf(tableNameAsBytes); - byte [] nameAsBytes = HRegionInfo.createRegionName( - tableName, this.startKey, this.id, !tableName.isSystemTable()); + byte[] nameAsBytes = + HRegionInfo.createRegionName(tableName, this.startKey, this.id, !tableName.isSystemTable()); return Bytes.toString(nameAsBytes); } /** * @return the encoded region id */ - @XmlAttribute + @XmlAttribute public long getId() { return id; } @@ -112,7 +106,7 @@ public class TableRegionModel implements Serializable { /** * @return the start key */ - @XmlAttribute + @XmlAttribute public byte[] getStartKey() { return startKey; } @@ -120,7 +114,7 @@ public class TableRegionModel implements Serializable { /** * @return the end key */ - @XmlAttribute + @XmlAttribute public byte[] getEndKey() { return endKey; } @@ -128,7 +122,7 @@ public class TableRegionModel implements Serializable { /** * @return the name and port of the region server hosting the region */ - @XmlAttribute + @XmlAttribute public String getLocation() { return location; } @@ -173,7 +167,8 @@ public class TableRegionModel implements Serializable { this.location = location; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java index 9ab259d2c86..80896afa18f 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ package org.apache.hadoop.hbase.rest.model; import com.fasterxml.jackson.annotation.JsonAnyGetter; import com.fasterxml.jackson.annotation.JsonAnySetter; import com.fasterxml.jackson.annotation.JsonIgnore; - import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; @@ -29,13 +27,11 @@ import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; - import javax.xml.bind.annotation.XmlAnyAttribute; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.namespace.QName; - import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; @@ -61,7 +57,7 @@ import org.apache.yetus.audience.InterfaceAudience; * </complexType> * */ -@XmlRootElement(name="TableSchema") +@XmlRootElement(name = "TableSchema") @InterfaceAudience.Private public class TableSchemaModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; @@ -70,17 +66,17 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { private static final QName READONLY = new QName(HTableDescriptor.READONLY); private static final QName TTL = new QName(HColumnDescriptor.TTL); private static final QName VERSIONS = new QName(HConstants.VERSIONS); - private static final QName COMPRESSION = - new QName(HColumnDescriptor.COMPRESSION); + private static final QName COMPRESSION = new QName(HColumnDescriptor.COMPRESSION); private String name; - private Map attrs = new LinkedHashMap<>(); + private Map attrs = new LinkedHashMap<>(); private List columns = new ArrayList<>(); /** * Default constructor. */ - public TableSchemaModel() {} + public TableSchemaModel() { + } /** * Constructor @@ -89,16 +85,14 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { public TableSchemaModel(HTableDescriptor htd) { setName(htd.getTableName().getNameAsString()); for (Map.Entry e : htd.getValues().entrySet()) { - addAttribute(Bytes.toString(e.getKey().get()), - Bytes.toString(e.getValue().get())); + addAttribute(Bytes.toString(e.getKey().get()), Bytes.toString(e.getValue().get())); } for (HColumnDescriptor hcd : htd.getFamilies()) { ColumnSchemaModel columnModel = new ColumnSchemaModel(); columnModel.setName(hcd.getNameAsString()); - for (Map.Entry e: - hcd.getValues().entrySet()) { + for (Map.Entry e : hcd.getValues().entrySet()) { columnModel.addAttribute(Bytes.toString(e.getKey().get()), - Bytes.toString(e.getValue().get())); + Bytes.toString(e.getValue().get())); } addColumnFamily(columnModel); } @@ -106,7 +100,7 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { /** * Add an attribute to the table descriptor - * @param name attribute name + * @param name attribute name * @param value attribute value */ @JsonAnySetter @@ -115,8 +109,8 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { } /** - * Return a table descriptor value as a string. Calls toString() on the - * object stored in the descriptor value map. + * Return a table descriptor value as a string. Calls toString() on the object stored in the + * descriptor value map. * @param name the attribute name * @return the attribute value */ @@ -155,14 +149,14 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { */ @XmlAnyAttribute @JsonAnyGetter - public Map getAny() { + public Map getAny() { return attrs; } /** * @return the columns */ - @XmlElement(name="ColumnSchema") + @XmlElement(name = "ColumnSchema") public List getColumns() { return columns; } @@ -181,7 +175,8 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { this.columns = columns; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override @@ -190,7 +185,7 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { sb.append("{ NAME=> '"); sb.append(name); sb.append('\''); - for (Map.Entry e : attrs.entrySet()) { + for (Map.Entry e : attrs.entrySet()) { sb.append(", "); sb.append(e.getKey().getLocalPart()); sb.append(" => '"); @@ -266,8 +261,7 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { TableSchema.Builder builder = TableSchema.newBuilder(); builder.setName(name); for (Map.Entry e : attrs.entrySet()) { - TableSchema.Attribute.Builder attrBuilder = - TableSchema.Attribute.newBuilder(); + TableSchema.Attribute.Builder attrBuilder = TableSchema.Attribute.newBuilder(); attrBuilder.setName(e.getKey().getLocalPart()); attrBuilder.setValue(e.getValue().toString()); builder.addAttrs(attrBuilder); @@ -277,8 +271,7 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { ColumnSchema.Builder familyBuilder = ColumnSchema.newBuilder(); familyBuilder.setName(family.getName()); for (Map.Entry e : familyAttrs.entrySet()) { - ColumnSchema.Attribute.Builder attrBuilder = - ColumnSchema.Attribute.newBuilder(); + ColumnSchema.Attribute.Builder attrBuilder = ColumnSchema.Attribute.newBuilder(); attrBuilder.setName(e.getKey().getLocalPart()); attrBuilder.setValue(e.getValue().toString()); familyBuilder.addAttrs(attrBuilder); @@ -301,8 +294,7 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { TableSchema.Builder builder = TableSchema.newBuilder(); ProtobufUtil.mergeFrom(builder, message); this.setName(builder.getName()); @@ -322,12 +314,10 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { familyModel.addAttribute(HColumnDescriptor.TTL, family.getTtl()); } if (family.hasMaxVersions()) { - familyModel.addAttribute(HConstants.VERSIONS, - family.getMaxVersions()); + familyModel.addAttribute(HConstants.VERSIONS, family.getMaxVersions()); } if (family.hasCompression()) { - familyModel.addAttribute(HColumnDescriptor.COMPRESSION, - family.getCompression()); + familyModel.addAttribute(HColumnDescriptor.COMPRESSION, family.getCompression()); } this.addColumnFamily(familyModel); } @@ -343,7 +333,7 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { for (Map.Entry e : getAny().entrySet()) { htd.setValue(e.getKey().getLocalPart(), e.getValue().toString()); } - for (ColumnSchemaModel column: getColumns()) { + for (ColumnSchemaModel column : getColumns()) { HColumnDescriptor hcd = new HColumnDescriptor(column.getName()); for (Map.Entry e : column.getAny().entrySet()) { hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString()); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java index bfee4f20577..41331070907 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.IOException; @@ -24,18 +22,16 @@ import java.io.Serializable; import javax.servlet.ServletContext; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.hadoop.hbase.rest.RESTServlet; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer; - /** - * A representation of the collection of versions of the REST gateway software - * components. + * A representation of the collection of versions of the REST gateway software components. *
          *
        • restVersion: REST gateway revision
        • *
        • jvmVersion: the JVM vendor and version information
        • @@ -44,7 +40,7 @@ import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer *
        • jerseyVersion: the version of the embedded Jersey framework
        • *
        */ -@XmlRootElement(name="Version") +@XmlRootElement(name = "Version") @InterfaceAudience.Private public class VersionModel implements Serializable, ProtobufMessageHandler { @@ -59,7 +55,8 @@ public class VersionModel implements Serializable, ProtobufMessageHandler { /** * Default constructor. Do not use. */ - public VersionModel() {} + public VersionModel() { + } /** * Constructor @@ -67,12 +64,10 @@ public class VersionModel implements Serializable, ProtobufMessageHandler { */ public VersionModel(ServletContext context) { restVersion = RESTServlet.VERSION_STRING; - jvmVersion = System.getProperty("java.vm.vendor") + ' ' + - System.getProperty("java.version") + '-' + - System.getProperty("java.vm.version"); - osVersion = System.getProperty("os.name") + ' ' + - System.getProperty("os.version") + ' ' + - System.getProperty("os.arch"); + jvmVersion = System.getProperty("java.vm.vendor") + ' ' + System.getProperty("java.version") + + '-' + System.getProperty("java.vm.version"); + osVersion = System.getProperty("os.name") + ' ' + System.getProperty("os.version") + ' ' + + System.getProperty("os.arch"); serverVersion = context.getServerInfo(); jerseyVersion = ServletContainer.class.getPackage().getImplementationVersion(); // Currently, this will always be null because the manifest doesn't have any useful information @@ -82,7 +77,7 @@ public class VersionModel implements Serializable, ProtobufMessageHandler { /** * @return the REST gateway version */ - @XmlAttribute(name="REST") + @XmlAttribute(name = "REST") public String getRESTVersion() { return restVersion; } @@ -90,7 +85,7 @@ public class VersionModel implements Serializable, ProtobufMessageHandler { /** * @return the JVM vendor and version */ - @XmlAttribute(name="JVM") + @XmlAttribute(name = "JVM") public String getJVMVersion() { return jvmVersion; } @@ -98,7 +93,7 @@ public class VersionModel implements Serializable, ProtobufMessageHandler { /** * @return the OS name, version, and hardware architecture */ - @XmlAttribute(name="OS") + @XmlAttribute(name = "OS") public String getOSVersion() { return osVersion; } @@ -106,7 +101,7 @@ public class VersionModel implements Serializable, ProtobufMessageHandler { /** * @return the servlet container version */ - @XmlAttribute(name="Server") + @XmlAttribute(name = "Server") public String getServerVersion() { return serverVersion; } @@ -114,7 +109,7 @@ public class VersionModel implements Serializable, ProtobufMessageHandler { /** * @return the version of the embedded Jersey framework */ - @XmlAttribute(name="Jersey") + @XmlAttribute(name = "Jersey") public String getJerseyVersion() { return jerseyVersion; } @@ -154,7 +149,8 @@ public class VersionModel implements Serializable, ProtobufMessageHandler { this.jerseyVersion = version; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override @@ -186,8 +182,7 @@ public class VersionModel implements Serializable, ProtobufMessageHandler { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { Version.Builder builder = Version.newBuilder(); ProtobufUtil.mergeFrom(builder, message); if (builder.hasRestVersion()) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java index 3aa81db5f03..e87e516c5cf 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.provider; import java.util.Arrays; @@ -44,10 +42,9 @@ import org.apache.hbase.thirdparty.javax.ws.rs.ext.ContextResolver; import org.apache.hbase.thirdparty.javax.ws.rs.ext.Provider; /** - * Plumbing for hooking up Jersey's JSON entity body encoding and decoding - * support to JAXB. Modify how the context is created (by using e.g. a - * different configuration builder) to control how JSON is processed and - * created. + * Plumbing for hooking up Jersey's JSON entity body encoding and decoding support to JAXB. Modify + * how the context is created (by using e.g. a different configuration builder) to control how JSON + * is processed and created. */ @Provider @InterfaceAudience.Private @@ -57,23 +54,11 @@ public class JAXBContextResolver implements ContextResolver { private final Set> types; - private final Class[] cTypes = { - CellModel.class, - CellSetModel.class, - ColumnSchemaModel.class, - NamespacesModel.class, - NamespacesInstanceModel.class, - RowModel.class, - ScannerModel.class, - StorageClusterStatusModel.class, - StorageClusterVersionModel.class, - TableInfoModel.class, - TableListModel.class, - TableModel.class, - TableRegionModel.class, - TableSchemaModel.class, - VersionModel.class - }; + private final Class[] cTypes = { CellModel.class, CellSetModel.class, ColumnSchemaModel.class, + NamespacesModel.class, NamespacesInstanceModel.class, RowModel.class, ScannerModel.class, + StorageClusterStatusModel.class, StorageClusterVersionModel.class, TableInfoModel.class, + TableListModel.class, TableModel.class, TableRegionModel.class, TableSchemaModel.class, + VersionModel.class }; @SuppressWarnings("unchecked") public JAXBContextResolver() throws Exception { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java index 9c94611355a..7c3f6f8ea40 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.provider.consumer; import java.io.ByteArrayOutputStream; @@ -39,28 +37,25 @@ import org.apache.hbase.thirdparty.javax.ws.rs.ext.MessageBodyReader; import org.apache.hbase.thirdparty.javax.ws.rs.ext.Provider; /** - * Adapter for hooking up Jersey content processing dispatch to - * ProtobufMessageHandler interface capable handlers for decoding protobuf input. + * Adapter for hooking up Jersey content processing dispatch to ProtobufMessageHandler interface + * capable handlers for decoding protobuf input. */ @Provider -@Consumes({Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF}) +@Consumes({ Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF }) @InterfaceAudience.Private -public class ProtobufMessageBodyConsumer - implements MessageBodyReader { - private static final Logger LOG = - LoggerFactory.getLogger(ProtobufMessageBodyConsumer.class); +public class ProtobufMessageBodyConsumer implements MessageBodyReader { + private static final Logger LOG = LoggerFactory.getLogger(ProtobufMessageBodyConsumer.class); @Override - public boolean isReadable(Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { + public boolean isReadable(Class type, Type genericType, Annotation[] annotations, + MediaType mediaType) { return ProtobufMessageHandler.class.isAssignableFrom(type); } @Override public ProtobufMessageHandler readFrom(Class type, Type genericType, - Annotation[] annotations, MediaType mediaType, - MultivaluedMap httpHeaders, InputStream inputStream) - throws IOException, WebApplicationException { + Annotation[] annotations, MediaType mediaType, MultivaluedMap httpHeaders, + InputStream inputStream) throws IOException, WebApplicationException { ProtobufMessageHandler obj = null; try { obj = type.getDeclaredConstructor().newInstance(); @@ -74,12 +69,11 @@ public class ProtobufMessageBodyConsumer } } while (read > 0); if (LOG.isTraceEnabled()) { - LOG.trace(getClass() + ": read " + baos.size() + " bytes from " + - inputStream); + LOG.trace(getClass() + ": read " + baos.size() + " bytes from " + inputStream); } obj = obj.getObjectFromMessage(baos.toByteArray()); } catch (InstantiationException | NoSuchMethodException | InvocationTargetException - | IllegalAccessException e) { + | IllegalAccessException e) { throw new WebApplicationException(e); } return obj; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java index 9eef5bf3df4..973665f00fa 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.provider.producer; import java.io.IOException; @@ -35,35 +33,31 @@ import org.apache.hbase.thirdparty.javax.ws.rs.ext.MessageBodyWriter; import org.apache.hbase.thirdparty.javax.ws.rs.ext.Provider; /** - * An adapter between Jersey and Object.toString(). Hooks up plain text output - * to the Jersey content handling framework. - * Jersey will first call getSize() to learn the number of bytes that will be + * An adapter between Jersey and Object.toString(). Hooks up plain text output to the Jersey content + * handling framework. Jersey will first call getSize() to learn the number of bytes that will be * sent, then writeTo to perform the actual I/O. */ @Provider @Produces(Constants.MIMETYPE_TEXT) @InterfaceAudience.Private -public class PlainTextMessageBodyProducer - implements MessageBodyWriter { +public class PlainTextMessageBodyProducer implements MessageBodyWriter { @Override - public boolean isWriteable(Class arg0, Type arg1, Annotation[] arg2, - MediaType arg3) { + public boolean isWriteable(Class arg0, Type arg1, Annotation[] arg2, MediaType arg3) { return true; } @Override - public long getSize(Object object, Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { + public long getSize(Object object, Class type, Type genericType, Annotation[] annotations, + MediaType mediaType) { // deprecated by JAX-RS 2.0 and ignored by Jersey runtime return -1; } @Override - public void writeTo(Object object, Class type, Type genericType, - Annotation[] annotations, MediaType mediaType, - MultivaluedMap httpHeaders, OutputStream outStream) - throws IOException, WebApplicationException { + public void writeTo(Object object, Class type, Type genericType, Annotation[] annotations, + MediaType mediaType, MultivaluedMap httpHeaders, OutputStream outStream) + throws IOException, WebApplicationException { outStream.write(Bytes.toBytes(object.toString())); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java index 3f21893470d..1d95e6f343e 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.provider.producer; import java.io.IOException; @@ -35,35 +33,32 @@ import org.apache.hbase.thirdparty.javax.ws.rs.ext.MessageBodyWriter; import org.apache.hbase.thirdparty.javax.ws.rs.ext.Provider; /** - * An adapter between Jersey and ProtobufMessageHandler implementors. Hooks up - * protobuf output producing methods to the Jersey content handling framework. - * Jersey will first call getSize() to learn the number of bytes that will be - * sent, then writeTo to perform the actual I/O. + * An adapter between Jersey and ProtobufMessageHandler implementors. Hooks up protobuf output + * producing methods to the Jersey content handling framework. Jersey will first call getSize() to + * learn the number of bytes that will be sent, then writeTo to perform the actual I/O. */ @Provider -@Produces({Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF}) +@Produces({ Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF }) @InterfaceAudience.Private -public class ProtobufMessageBodyProducer - implements MessageBodyWriter { +public class ProtobufMessageBodyProducer implements MessageBodyWriter { @Override - public boolean isWriteable(Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { + public boolean isWriteable(Class type, Type genericType, Annotation[] annotations, + MediaType mediaType) { return ProtobufMessageHandler.class.isAssignableFrom(type); } @Override public long getSize(ProtobufMessageHandler m, Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { + Annotation[] annotations, MediaType mediaType) { // deprecated by JAX-RS 2.0 and ignored by Jersey runtime return -1; } @Override public void writeTo(ProtobufMessageHandler m, Class type, Type genericType, - Annotation[] annotations, MediaType mediaType, - MultivaluedMap httpHeaders, OutputStream entityStream) - throws IOException, WebApplicationException { + Annotation[] annotations, MediaType mediaType, MultivaluedMap httpHeaders, + OutputStream entityStream) throws IOException, WebApplicationException { entityStream.write(m.createProtobufOutput()); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java index 5af8ee2bfaf..4c9b20c6ace 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.rest; import java.io.IOException; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -27,7 +26,6 @@ import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,9 +38,11 @@ public class DummyFilter implements Filter { @Override public void doFilter(ServletRequest paramServletRequest, ServletResponse paramServletResponse, - FilterChain paramFilterChain) throws IOException, ServletException { - if (paramServletRequest instanceof HttpServletRequest - && paramServletResponse instanceof HttpServletResponse) { + FilterChain paramFilterChain) throws IOException, ServletException { + if ( + paramServletRequest instanceof HttpServletRequest + && paramServletResponse instanceof HttpServletResponse + ) { HttpServletRequest request = (HttpServletRequest) paramServletRequest; HttpServletResponse response = (HttpServletResponse) paramServletResponse; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java index 00b28c7534b..e1e52d2c184 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ package org.apache.hadoop.hbase.rest; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.StringUtils; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,8 +44,7 @@ public class HBaseRESTTestingUtility { server = new RESTServer(conf); server.run(); - LOG.info("started " + server.getClass().getName() + " on port " + - server.getPort()); + LOG.info("started " + server.getClass().getName() + " on port " + server.getPort()); } public void shutdownServletContainer() { diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java index fe66d9b0f76..1322eee32b9 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +32,6 @@ import java.util.Random; import java.util.TreeMap; import java.util.regex.Matcher; import java.util.regex.Pattern; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FSDataInputStream; @@ -76,7 +74,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Hash; import org.apache.hadoop.hbase.util.MurmurHash; import org.apache.hadoop.hbase.util.Pair; - import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; @@ -93,29 +90,24 @@ import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; import org.apache.hadoop.util.LineReader; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Script used evaluating Stargate performance and scalability. Runs a SG - * client that steps through one of a set of hardcoded tests or 'experiments' - * (e.g. a random reads test, a random writes test, etc.). Pass on the - * command-line which test to run and how many clients are participating in - * this experiment. Run java PerformanceEvaluation --help to - * obtain usage. - * - *

        This class sets up and runs the evaluation programs described in - * Section 7, Performance Evaluation, of the Bigtable - * paper, pages 8-10. - * - *

        If number of clients > 1, we start up a MapReduce job. Each map task - * runs an individual client. Each client does about 1GB of data. + * Script used evaluating Stargate performance and scalability. Runs a SG client that steps through + * one of a set of hardcoded tests or 'experiments' (e.g. a random reads test, a random writes test, + * etc.). Pass on the command-line which test to run and how many clients are participating in this + * experiment. Run java PerformanceEvaluation --help to obtain usage. + *

        + * This class sets up and runs the evaluation programs described in Section 7, Performance + * Evaluation, of the Bigtable paper, + * pages 8-10. + *

        + * If number of clients > 1, we start up a MapReduce job. Each map task runs an individual client. + * Each client does about 1GB of data. */ public class PerformanceEvaluation extends Configured implements Tool { - protected static final Logger LOG = - LoggerFactory.getLogger(PerformanceEvaluation.class); + protected static final Logger LOG = LoggerFactory.getLogger(PerformanceEvaluation.class); private static final int DEFAULT_ROW_PREFIX_LENGTH = 16; private static final int ROW_LENGTH = 1000; @@ -151,20 +143,14 @@ public class PerformanceEvaluation extends Configured implements Tool { /** * Regex to parse lines in input file passed to mapreduce task. */ - public static final Pattern LINE_PATTERN = - Pattern.compile("tableName=(\\w+),\\s+" + - "startRow=(\\d+),\\s+" + - "perClientRunRows=(\\d+),\\s+" + - "totalRows=(\\d+),\\s+" + - "clients=(\\d+),\\s+" + - "flushCommits=(\\w+),\\s+" + - "writeToWAL=(\\w+),\\s+" + - "useTags=(\\w+),\\s+" + - "noOfTags=(\\d+)"); + public static final Pattern LINE_PATTERN = Pattern + .compile("tableName=(\\w+),\\s+" + "startRow=(\\d+),\\s+" + "perClientRunRows=(\\d+),\\s+" + + "totalRows=(\\d+),\\s+" + "clients=(\\d+),\\s+" + "flushCommits=(\\w+),\\s+" + + "writeToWAL=(\\w+),\\s+" + "useTags=(\\w+),\\s+" + "noOfTags=(\\d+)"); /** - * Enum for map metrics. Keep it out here rather than inside in the Map - * inner-class so we can find associated properties. + * Enum for map metrics. Keep it out here rather than inside in the Map inner-class so we can find + * associated properties. */ protected enum Counter { /** elapsed time */ @@ -180,33 +166,28 @@ public class PerformanceEvaluation extends Configured implements Tool { public PerformanceEvaluation(final Configuration c) { this.conf = c; - addCommandDescriptor(RandomReadTest.class, "randomRead", - "Run random read test"); + addCommandDescriptor(RandomReadTest.class, "randomRead", "Run random read test"); addCommandDescriptor(RandomSeekScanTest.class, "randomSeekScan", - "Run random seek and scan 100 test"); + "Run random seek and scan 100 test"); addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10", - "Run random seek scan with both start and stop row (max 10 rows)"); + "Run random seek scan with both start and stop row (max 10 rows)"); addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100", - "Run random seek scan with both start and stop row (max 100 rows)"); + "Run random seek scan with both start and stop row (max 100 rows)"); addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000", - "Run random seek scan with both start and stop row (max 1000 rows)"); + "Run random seek scan with both start and stop row (max 1000 rows)"); addCommandDescriptor(RandomScanWithRange10000Test.class, "scanRange10000", - "Run random seek scan with both start and stop row (max 10000 rows)"); - addCommandDescriptor(RandomWriteTest.class, "randomWrite", - "Run random write test"); - addCommandDescriptor(SequentialReadTest.class, "sequentialRead", - "Run sequential read test"); - addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite", - "Run sequential write test"); - addCommandDescriptor(ScanTest.class, "scan", - "Run scan test (read every row)"); + "Run random seek scan with both start and stop row (max 10000 rows)"); + addCommandDescriptor(RandomWriteTest.class, "randomWrite", "Run random write test"); + addCommandDescriptor(SequentialReadTest.class, "sequentialRead", "Run sequential read test"); + addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite", "Run sequential write test"); + addCommandDescriptor(ScanTest.class, "scan", "Run scan test (read every row)"); addCommandDescriptor(FilteredScanTest.class, "filterScan", - "Run scan test using a filter to find a specific row based " + - "on it's value (make sure to use --rows=20)"); + "Run scan test using a filter to find a specific row based " + + "on it's value (make sure to use --rows=20)"); } - protected void addCommandDescriptor(Class cmdClass, - String name, String description) { + protected void addCommandDescriptor(Class cmdClass, String name, + String description) { CmdDescriptor cmdDescriptor = new CmdDescriptor(cmdClass, name, description); commands.put(name, cmdDescriptor); } @@ -224,10 +205,9 @@ public class PerformanceEvaluation extends Configured implements Tool { } /** - * This class works as the InputSplit of Performance Evaluation - * MapReduce InputFormat, and the Record Value of RecordReader. - * Each map task will only read one record from a PeInputSplit, - * the record value is the PeInputSplit itself. + * This class works as the InputSplit of Performance Evaluation MapReduce InputFormat, and the + * Record Value of RecordReader. Each map task will only read one record from a PeInputSplit, the + * record value is the PeInputSplit itself. */ public static class PeInputSplit extends InputSplit implements Writable { private TableName tableName; @@ -241,7 +221,7 @@ public class PerformanceEvaluation extends Configured implements Tool { private int noOfTags; public PeInputSplit(TableName tableName, int startRow, int rows, int totalRows, int clients, - boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags) { + boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags) { this.tableName = tableName; this.startRow = startRow; this.rows = rows; @@ -328,8 +308,8 @@ public class PerformanceEvaluation extends Configured implements Tool { } /** - * InputFormat of Performance Evaluation MapReduce job. - * It extends from FileInputFormat, want to use it's methods such as setInputPaths(). + * InputFormat of Performance Evaluation MapReduce job. It extends from FileInputFormat, want to + * use it's methods such as setInputPaths(). */ public static class PeInputFormat extends FileInputFormat { @Override @@ -364,20 +344,13 @@ public class PerformanceEvaluation extends Configured implements Tool { boolean useTags = Boolean.parseBoolean(m.group(8)); int noOfTags = Integer.parseInt(m.group(9)); - LOG.debug("tableName=" + tableName + - " split["+ splitList.size() + "] " + - " startRow=" + startRow + - " rows=" + rows + - " totalRows=" + totalRows + - " clients=" + clients + - " flushCommits=" + flushCommits + - " writeToWAL=" + writeToWAL + - " useTags=" + useTags + - " noOfTags=" + noOfTags); + LOG.debug("tableName=" + tableName + " split[" + splitList.size() + "] " + " startRow=" + + startRow + " rows=" + rows + " totalRows=" + totalRows + " clients=" + clients + + " flushCommits=" + flushCommits + " writeToWAL=" + writeToWAL + " useTags=" + + useTags + " noOfTags=" + noOfTags); - PeInputSplit newSplit = - new PeInputSplit(tableName, startRow, rows, totalRows, clients, - flushCommits, writeToWAL, useTags, noOfTags); + PeInputSplit newSplit = new PeInputSplit(tableName, startRow, rows, totalRows, clients, + flushCommits, writeToWAL, useTags, noOfTags); splitList.add(newSplit); } } @@ -390,7 +363,7 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override public RecordReader createRecordReader(InputSplit split, - TaskAttemptContext context) { + TaskAttemptContext context) { return new PeRecordReader(); } @@ -403,7 +376,7 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override public void initialize(InputSplit split, TaskAttemptContext context) { this.readOver = false; - this.split = (PeInputSplit)split; + this.split = (PeInputSplit) split; } @Override @@ -449,7 +422,7 @@ public class PerformanceEvaluation extends Configured implements Tool { * MapReduce job that runs a performance evaluation client in each map task. */ public static class EvaluationMapTask - extends Mapper { + extends Mapper { /** configuration parameter name that contains the command */ public final static String CMD_KEY = "EvaluationMapTask.command"; @@ -466,10 +439,10 @@ public class PerformanceEvaluation extends Configured implements Tool { // this is required so that extensions of PE are instantiated within the // map reduce task... Class peClass = - forName(context.getConfiguration().get(PE_KEY), PerformanceEvaluation.class); + forName(context.getConfiguration().get(PE_KEY), PerformanceEvaluation.class); try { - this.pe = peClass.getConstructor(Configuration.class) - .newInstance(context.getConfiguration()); + this.pe = + peClass.getConstructor(Configuration.class).newInstance(context.getConfiguration()); } catch (Exception e) { throw new IllegalStateException("Could not instantiate PE instance", e); } @@ -487,16 +460,15 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override protected void map(NullWritable key, PeInputSplit value, final Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Status status = context::setStatus; // Evaluation task pe.tableName = value.getTableName(); - long elapsedTime = this.pe.runOneClient(this.cmd, value.getStartRow(), - value.getRows(), value.getTotalRows(), - value.isFlushCommits(), value.isWriteToWAL(), - value.isUseTags(), value.getNoOfTags(), - ConnectionFactory.createConnection(context.getConfiguration()), status); + long elapsedTime = + this.pe.runOneClient(this.cmd, value.getStartRow(), value.getRows(), value.getTotalRows(), + value.isFlushCommits(), value.isWriteToWAL(), value.isUseTags(), value.getNoOfTags(), + ConnectionFactory.createConnection(context.getConfiguration()), status); // Collect how much time the thing took. Report as map output and // to the ELAPSED_TIME counter. context.getCounter(Counter.ELAPSED_TIME).increment(elapsedTime); @@ -521,7 +493,7 @@ public class PerformanceEvaluation extends Configured implements Tool { } byte[][] splits = getSplits(); - for (int i=0; i < splits.length; i++) { + for (int i = 0; i < splits.length; i++) { LOG.debug(" split " + i + ": " + Bytes.toStringBinary(splits[i])); } admin.createTable(tableDescriptor); @@ -553,17 +525,16 @@ public class PerformanceEvaluation extends Configured implements Tool { /** * Generates splits based on total number of rows and specified split regions - * * @return splits : array of byte [] */ - protected byte[][] getSplits() { + protected byte[][] getSplits() { if (this.presplitRegions == 0) { return new byte[0][]; } int numSplitPoints = presplitRegions - 1; byte[][] splits = new byte[numSplitPoints][]; - int jump = this.R / this.presplitRegions; + int jump = this.R / this.presplitRegions; for (int i = 0; i < numSplitPoints; i++) { int rowkey = jump * (1 + i); splits[i] = format(rowkey); @@ -572,12 +543,12 @@ public class PerformanceEvaluation extends Configured implements Tool { } /** - * We're to run multiple clients concurrently. Setup a mapreduce job. Run - * one map per client. Then run a single reduce to sum the elapsed times. + * We're to run multiple clients concurrently. Setup a mapreduce job. Run one map per client. Then + * run a single reduce to sum the elapsed times. * @param cmd Command to run. */ private void runNIsMoreThanOne(final Class cmd) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { RemoteAdmin remoteAdmin = new RemoteAdmin(new Client(cluster), getConf()); checkTable(remoteAdmin); if (nomapred) { @@ -595,7 +566,7 @@ public class PerformanceEvaluation extends Configured implements Tool { private void doMultipleClients(final Class cmd) throws IOException { final List threads = new ArrayList<>(this.N); final long[] timings = new long[this.N]; - final int perClientRows = R/N; + final int perClientRows = R / N; final TableName tableName = this.tableName; final DataBlockEncoding encoding = this.blockEncoding; final boolean flushCommits = this.flushCommits; @@ -623,13 +594,12 @@ public class PerformanceEvaluation extends Configured implements Tool { pe.useTags = useTags; pe.noOfTags = numTags; try { - long elapsedTime = pe.runOneClient(cmd, index * perClientRows, - perClientRows, R, - flushCommits, writeToWAL, useTags, noOfTags, connection, + long elapsedTime = pe.runOneClient(cmd, index * perClientRows, perClientRows, R, + flushCommits, writeToWAL, useTags, noOfTags, connection, msg -> LOG.info("client-" + getName() + " " + msg)); timings[index] = elapsedTime; - LOG.info("Finished " + getName() + " in " + elapsedTime + - "ms writing " + perClientRows + " rows"); + LOG.info("Finished " + getName() + " in " + elapsedTime + "ms writing " + perClientRows + + " rows"); } catch (IOException e) { throw new RuntimeException(e); } @@ -650,27 +620,23 @@ public class PerformanceEvaluation extends Configured implements Tool { } } final String test = cmd.getSimpleName(); - LOG.info("[" + test + "] Summary of timings (ms): " - + Arrays.toString(timings)); + LOG.info("[" + test + "] Summary of timings (ms): " + Arrays.toString(timings)); Arrays.sort(timings); long total = 0; for (int i = 0; i < this.N; i++) { total += timings[i]; } - LOG.info("[" + test + "]" - + "\tMin: " + timings[0] + "ms" - + "\tMax: " + timings[this.N - 1] + "ms" - + "\tAvg: " + (total / this.N) + "ms"); + LOG.info("[" + test + "]" + "\tMin: " + timings[0] + "ms" + "\tMax: " + timings[this.N - 1] + + "ms" + "\tAvg: " + (total / this.N) + "ms"); } /** - * Run a mapreduce job. Run as many maps as asked-for clients. - * Before we start up the job, write out an input file with instruction - * per client regards which row they are to start on. + * Run a mapreduce job. Run as many maps as asked-for clients. Before we start up the job, write + * out an input file with instruction per client regards which row they are to start on. * @param cmd Command to run. */ private void doMapReduce(final Class cmd) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { Configuration conf = getConf(); Path inputDir = writeInputFile(conf); conf.set(EvaluationMapTask.CMD_KEY, cmd.getName()); @@ -768,8 +734,8 @@ public class PerformanceEvaluation extends Configured implements Tool { } /** - * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation} tests - * This makes the reflection logic a little easier to understand... + * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation} tests This + * makes the reflection logic a little easier to understand... */ static class TestOptions { private int startRow; @@ -783,8 +749,8 @@ public class PerformanceEvaluation extends Configured implements Tool { private Connection connection; TestOptions(int startRow, int perClientRunRows, int totalRows, TableName tableName, - boolean flushCommits, boolean writeToWAL, boolean useTags, - int noOfTags, Connection connection) { + boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags, + Connection connection) { this.startRow = startRow; this.perClientRunRows = perClientRunRows; this.totalRows = totalRows; @@ -834,17 +800,17 @@ public class PerformanceEvaluation extends Configured implements Tool { } /* - * A test. - * Subclass to particularize what happens per row. + * A test. Subclass to particularize what happens per row. */ static abstract class Test { // Below is make it so when Tests are all running in the one // jvm, that they each have a differently seeded Random. - private static final Random randomSeed = - new Random(EnvironmentEdgeManager.currentTime()); + private static final Random randomSeed = new Random(EnvironmentEdgeManager.currentTime()); + private static long nextRandomSeed() { return randomSeed.nextLong(); } + protected final Random rand = new Random(nextRandomSeed()); protected final int startRow; @@ -859,8 +825,8 @@ public class PerformanceEvaluation extends Configured implements Tool { protected Connection connection; /** - * Note that all subclasses of this class must provide a public contructor - * that has the exact same list of arguments. + * Note that all subclasses of this class must provide a public contructor that has the exact + * same list of arguments. */ Test(final Configuration conf, final TestOptions options, final Status status) { super(); @@ -882,10 +848,10 @@ public class PerformanceEvaluation extends Configured implements Tool { protected int getReportingPeriod() { int period = this.perClientRunRows / 10; - return period == 0? this.perClientRunRows: period; + return period == 0 ? this.perClientRunRows : period; } - abstract void testTakedown() throws IOException; + abstract void testTakedown() throws IOException; /** * Run test @@ -960,7 +926,7 @@ public class PerformanceEvaluation extends Configured implements Tool { } @Override - void testTakedown() throws IOException { + void testTakedown() throws IOException { if (flushCommits) { this.mutator.flush(); } @@ -985,7 +951,7 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override protected int getReportingPeriod() { int period = this.perClientRunRows / 100; - return period == 0? this.perClientRunRows: period; + return period == 0 ? this.perClientRunRows : period; } } @@ -1008,8 +974,8 @@ public class PerformanceEvaluation extends Configured implements Tool { if (i % 100 == 0) { LOG.info(String.format("Scan for key range %s - %s returned %s rows", - Bytes.toString(startAndStopRow.getFirst()), - Bytes.toString(startAndStopRow.getSecond()), count)); + Bytes.toString(startAndStopRow.getFirst()), Bytes.toString(startAndStopRow.getSecond()), + count)); } s.close(); @@ -1026,7 +992,7 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override protected int getReportingPeriod() { int period = this.perClientRunRows / 100; - return period == 0? this.perClientRunRows: period; + return period == 0 ? this.perClientRunRows : period; } } @@ -1089,7 +1055,7 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override protected int getReportingPeriod() { int period = this.perClientRunRows / 100; - return period == 0? this.perClientRunRows: period; + return period == 0 ? this.perClientRunRows : period; } } @@ -1110,8 +1076,8 @@ public class PerformanceEvaluation extends Configured implements Tool { Tag t = new ArrayBackedTag((byte) n, tag); tags[n] = t; } - KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP, - value, tags); + KeyValue kv = + new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP, value, tags); put.add(kv); } else { put.addColumn(FAMILY_NAME, QUALIFIER_NAME, value); @@ -1177,8 +1143,8 @@ public class PerformanceEvaluation extends Configured implements Tool { Tag t = new ArrayBackedTag((byte) n, tag); tags[n] = t; } - KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP, - value, tags); + KeyValue kv = + new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP, value, tags); put.add(kv); } else { put.addColumn(FAMILY_NAME, QUALIFIER_NAME, value); @@ -1206,10 +1172,8 @@ public class PerformanceEvaluation extends Configured implements Tool { } protected Scan constructScan(byte[] valuePrefix) { - Filter filter = new SingleColumnValueFilter( - FAMILY_NAME, QUALIFIER_NAME, CompareOperator.EQUAL, - new BinaryComparator(valuePrefix) - ); + Filter filter = new SingleColumnValueFilter(FAMILY_NAME, QUALIFIER_NAME, + CompareOperator.EQUAL, new BinaryComparator(valuePrefix)); Scan scan = new Scan(); scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); scan.setFilter(filter); @@ -1221,31 +1185,31 @@ public class PerformanceEvaluation extends Configured implements Tool { * Format passed integer. * @param number the integer to format * @return Returns zero-prefixed 10-byte wide decimal version of passed number (Does absolute in - * case number is negative). + * case number is negative). */ - public static byte [] format(final int number) { + public static byte[] format(final int number) { byte[] b = new byte[DEFAULT_ROW_PREFIX_LENGTH + 10]; int d = Math.abs(number); for (int i = b.length - 1; i >= 0; i--) { - b[i] = (byte)((d % 10) + '0'); + b[i] = (byte) ((d % 10) + '0'); d /= 10; } return b; } public static byte[] generateData(final Random r, int length) { - byte[] b = new byte [length]; + byte[] b = new byte[length]; int i; - for (i = 0; i < (length-8); i += 8) { + for (i = 0; i < (length - 8); i += 8) { b[i] = (byte) (65 + r.nextInt(26)); - b[i+1] = b[i]; - b[i+2] = b[i]; - b[i+3] = b[i]; - b[i+4] = b[i]; - b[i+5] = b[i]; - b[i+6] = b[i]; - b[i+7] = b[i]; + b[i + 1] = b[i]; + b[i + 2] = b[i]; + b[i + 3] = b[i]; + b[i + 4] = b[i]; + b[i + 5] = b[i]; + b[i + 6] = b[i]; + b[i + 7] = b[i]; } byte a = (byte) (65 + r.nextInt(26)); @@ -1256,7 +1220,7 @@ public class PerformanceEvaluation extends Configured implements Tool { } public static byte[] generateValue(final Random r) { - byte [] b = new byte [ROW_LENGTH]; + byte[] b = new byte[ROW_LENGTH]; r.nextBytes(b); return b; } @@ -1265,33 +1229,32 @@ public class PerformanceEvaluation extends Configured implements Tool { return format(random.nextInt(Integer.MAX_VALUE) % totalRows); } - long runOneClient(final Class cmd, final int startRow, - final int perClientRunRows, final int totalRows, - boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags, - Connection connection, final Status status) throws IOException { - status.setStatus("Start " + cmd + " at offset " + startRow + " for " + - perClientRunRows + " rows"); + long runOneClient(final Class cmd, final int startRow, final int perClientRunRows, + final int totalRows, boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags, + Connection connection, final Status status) throws IOException { + status + .setStatus("Start " + cmd + " at offset " + startRow + " for " + perClientRunRows + " rows"); long totalElapsedTime; - TestOptions options = new TestOptions(startRow, perClientRunRows, - totalRows, tableName, flushCommits, writeToWAL, useTags, noOfTags, connection); + TestOptions options = new TestOptions(startRow, perClientRunRows, totalRows, tableName, + flushCommits, writeToWAL, useTags, noOfTags, connection); final Test t; try { - Constructor constructor = cmd.getDeclaredConstructor( - Configuration.class, TestOptions.class, Status.class); + Constructor constructor = + cmd.getDeclaredConstructor(Configuration.class, TestOptions.class, Status.class); t = constructor.newInstance(this.conf, options, status); } catch (NoSuchMethodException e) { - throw new IllegalArgumentException("Invalid command class: " + - cmd.getName() + ". It does not provide a constructor as described by" + - "the javadoc comment. Available constructors are: " + - Arrays.toString(cmd.getConstructors())); + throw new IllegalArgumentException("Invalid command class: " + cmd.getName() + + ". It does not provide a constructor as described by" + + "the javadoc comment. Available constructors are: " + + Arrays.toString(cmd.getConstructors())); } catch (Exception e) { throw new IllegalStateException("Failed to construct command class", e); } totalElapsedTime = t.test(); - status.setStatus("Finished " + cmd + " in " + totalElapsedTime + - "ms at offset " + startRow + " for " + perClientRunRows + " rows"); + status.setStatus("Finished " + cmd + " in " + totalElapsedTime + "ms at offset " + startRow + + " for " + perClientRunRows + " rows"); return totalElapsedTime; } @@ -1303,15 +1266,15 @@ public class PerformanceEvaluation extends Configured implements Tool { Client client = new Client(cluster); admin = new RemoteAdmin(client, getConf()); checkTable(admin); - runOneClient(cmd, 0, this.R, this.R, this.flushCommits, this.writeToWAL, - this.useTags, this.noOfTags, this.connection, status); + runOneClient(cmd, 0, this.R, this.R, this.flushCommits, this.writeToWAL, this.useTags, + this.noOfTags, this.connection, status); } catch (Exception e) { LOG.error("Failed", e); } } private void runTest(final Class cmd) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { if (N == 1) { // If there is only one client and one HRegionServer, we assume nothing // has been set up at all. @@ -1332,30 +1295,30 @@ public class PerformanceEvaluation extends Configured implements Tool { } System.err.println("Usage: java " + this.getClass().getName() + " \\"); System.err.println(" [--nomapred] [--rows=ROWS] [--table=NAME] \\"); - System.err.println(" [--compress=TYPE] [--blockEncoding=TYPE] " + - "[-D]* "); + System.err.println( + " [--compress=TYPE] [--blockEncoding=TYPE] " + "[-D]* "); System.err.println(); System.err.println("General Options:"); - System.err.println(" nomapred Run multiple clients using threads " + - "(rather than use mapreduce)"); + System.err.println( + " nomapred Run multiple clients using threads " + "(rather than use mapreduce)"); System.err.println(" rows Rows each client runs. Default: One million"); System.err.println(); System.err.println("Table Creation / Write Tests:"); System.err.println(" table Alternate table name. Default: 'TestTable'"); System.err.println(" compress Compression type to use (GZ, LZO, ...). Default: 'NONE'"); - System.err.println(" flushCommits Used to determine if the test should flush the table. " + - "Default: false"); + System.err.println( + " flushCommits Used to determine if the test should flush the table. " + "Default: false"); System.err.println(" writeToWAL Set writeToWAL on puts. Default: True"); - System.err.println(" presplit Create presplit table. Recommended for accurate perf " + - "analysis (see guide). Default: disabled"); - System.err.println(" usetags Writes tags along with KVs. Use with HFile V3. " + - "Default : false"); - System.err.println(" numoftags Specify the no of tags that would be needed. " + - "This works only if usetags is true."); + System.err.println(" presplit Create presplit table. Recommended for accurate perf " + + "analysis (see guide). Default: disabled"); + System.err.println( + " usetags Writes tags along with KVs. Use with HFile V3. " + "Default : false"); + System.err.println(" numoftags Specify the no of tags that would be needed. " + + "This works only if usetags is true."); System.err.println(); System.err.println("Read Tests:"); - System.err.println(" inmemory Tries to keep the HFiles of the CF inmemory as far as " + - "possible. Not guaranteed that reads are always served from inmemory. Default: false"); + System.err.println(" inmemory Tries to keep the HFiles of the CF inmemory as far as " + + "possible. Not guaranteed that reads are always served from inmemory. Default: false"); System.err.println(); System.err.println(" Note: -D properties will be applied to the conf used. "); System.err.println(" For example: "); @@ -1368,13 +1331,12 @@ public class PerformanceEvaluation extends Configured implements Tool { } System.err.println(); System.err.println("Args:"); - System.err.println(" nclients Integer. Required. Total number of " + - "clients (and HRegionServers)"); + System.err.println( + " nclients Integer. Required. Total number of " + "clients (and HRegionServers)"); System.err.println(" running: 1 <= value <= 500"); System.err.println("Examples:"); System.err.println(" To run a single evaluation client:"); - System.err.println(" $ hbase " + this.getClass().getName() - + " sequentialWrite 1"); + System.err.println(" $ hbase " + this.getClass().getName() + " sequentialWrite 1"); } private void getArgs(final int start, final String[] args) { diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java index e8ea1125e7c..774d7eaba29 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,8 +72,7 @@ public class RowResourceBase { protected static final String VALUE_6 = "6"; protected static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - protected static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + protected static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); protected static Client client; protected static JAXBContext context; protected static Marshaller xmlMarshaller; @@ -86,16 +85,12 @@ public class RowResourceBase { conf = TEST_UTIL.getConfiguration(); TEST_UTIL.startMiniCluster(3); REST_TEST_UTIL.startServletContainer(conf); - context = JAXBContext.newInstance( - CellModel.class, - CellSetModel.class, - RowModel.class); + context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class); xmlMarshaller = context.createMarshaller(); xmlUnmarshaller = context.createUnmarshaller(); - jsonMapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + jsonMapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); } @AfterClass @@ -124,8 +119,8 @@ public class RowResourceBase { } } - static Response putValuePB(String table, String row, String column, - String value) throws IOException { + static Response putValuePB(String table, String row, String column, String value) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -136,39 +131,38 @@ public class RowResourceBase { return putValuePB(path.toString(), table, row, column, value); } - static Response putValuePB(String url, String table, String row, - String column, String value) throws IOException { + static Response putValuePB(String url, String table, String row, String column, String value) + throws IOException { RowModel rowModel = new RowModel(row); - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(value))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(value))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); - Response response = client.put(url, Constants.MIMETYPE_PROTOBUF, - cellSetModel.createProtobufOutput()); + Response response = + client.put(url, Constants.MIMETYPE_PROTOBUF, cellSetModel.createProtobufOutput()); Thread.yield(); return response; } - protected static void checkValueXML(String url, String table, String row, - String column, String value) throws IOException, JAXBException { + protected static void checkValueXML(String url, String table, String row, String column, + String value) throws IOException, JAXBException { Response response = getValueXML(url); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); RowModel rowModel = cellSet.getRows().get(0); CellModel cell = rowModel.getCells().get(0); assertEquals(Bytes.toString(cell.getColumn()), column); assertEquals(Bytes.toString(cell.getValue()), value); } - protected static void checkValueXML(String table, String row, String column, - String value) throws IOException, JAXBException { + protected static void checkValueXML(String table, String row, String column, String value) + throws IOException, JAXBException { Response response = getValueXML(table, row, column); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); RowModel rowModel = cellSet.getRows().get(0); CellModel cell = rowModel.getCells().get(0); assertEquals(Bytes.toString(cell.getColumn()), column); @@ -176,12 +170,12 @@ public class RowResourceBase { } protected static void checkIncrementValueXML(String table, String row, String column, long value) - throws IOException, JAXBException { + throws IOException, JAXBException { Response response1 = getValueXML(table, row, column); assertEquals(200, response1.getCode()); assertEquals(Constants.MIMETYPE_XML, response1.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response1.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response1.getBody())); RowModel rowModel = cellSet.getRows().get(0); CellModel cell = rowModel.getCells().get(0); assertEquals(Bytes.toString(cell.getColumn()), column); @@ -189,12 +183,12 @@ public class RowResourceBase { } protected static Response getValuePB(String url) throws IOException { - Response response = client.get(url, Constants.MIMETYPE_PROTOBUF); + Response response = client.get(url, Constants.MIMETYPE_PROTOBUF); return response; } - protected static Response putValueXML(String table, String row, String column, - String value) throws IOException, JAXBException { + protected static Response putValueXML(String table, String row, String column, String value) + throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -205,23 +199,20 @@ public class RowResourceBase { return putValueXML(path.toString(), table, row, column, value); } - protected static Response putValueXML(String url, String table, String row, - String column, String value) throws IOException, JAXBException { + protected static Response putValueXML(String url, String table, String row, String column, + String value) throws IOException, JAXBException { RowModel rowModel = new RowModel(row); - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(value))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(value))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(url, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(url, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); return response; } - protected static Response getValuePB(String table, String row, String column) - throws IOException { + protected static Response getValuePB(String table, String row, String column) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -232,8 +223,8 @@ public class RowResourceBase { return getValuePB(path.toString()); } - protected static void checkValuePB(String table, String row, String column, - String value) throws IOException { + protected static void checkValuePB(String table, String row, String column, String value) + throws IOException { Response response = getValuePB(table, row, column); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); @@ -245,8 +236,8 @@ public class RowResourceBase { assertEquals(Bytes.toString(cell.getValue()), value); } - protected static void checkIncrementValuePB(String table, String row, String column, - long value) throws IOException { + protected static void checkIncrementValuePB(String table, String row, String column, long value) + throws IOException { Response response = getValuePB(table, row, column); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); @@ -259,126 +250,115 @@ public class RowResourceBase { } protected static Response checkAndPutValuePB(String url, String table, String row, String column, - String valueToCheck, String valueToPut, HashMap otherCells) - throws IOException { + String valueToCheck, String valueToPut, HashMap otherCells) throws IOException { RowModel rowModel = new RowModel(row); - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToPut))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToPut))); if (otherCells != null) { - for (Map.Entry entry : otherCells.entrySet()) { - rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), - Bytes.toBytes(entry.getValue()))); + for (Map.Entry entry : otherCells.entrySet()) { + rowModel + .addCell(new CellModel(Bytes.toBytes(entry.getKey()), Bytes.toBytes(entry.getValue()))); } } // This Cell need to be added as last cell. - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToCheck))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToCheck))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); - Response response = client.put(url, Constants.MIMETYPE_PROTOBUF, - cellSetModel.createProtobufOutput()); + Response response = + client.put(url, Constants.MIMETYPE_PROTOBUF, cellSetModel.createProtobufOutput()); Thread.yield(); return response; } - protected static Response checkAndPutValuePB(String table, String row, - String column, String valueToCheck, String valueToPut) throws IOException { - return checkAndPutValuePB(table,row,column,valueToCheck,valueToPut,null); + protected static Response checkAndPutValuePB(String table, String row, String column, + String valueToCheck, String valueToPut) throws IOException { + return checkAndPutValuePB(table, row, column, valueToCheck, valueToPut, null); } protected static Response checkAndPutValuePB(String table, String row, String column, - String valueToCheck, String valueToPut, HashMap otherCells) - throws IOException { + String valueToCheck, String valueToPut, HashMap otherCells) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); path.append('/'); path.append(row); path.append("?check=put"); - return checkAndPutValuePB(path.toString(), table, row, column, - valueToCheck, valueToPut, otherCells); + return checkAndPutValuePB(path.toString(), table, row, column, valueToCheck, valueToPut, + otherCells); } protected static Response checkAndPutValueXML(String url, String table, String row, String column, - String valueToCheck, String valueToPut, HashMap otherCells) - throws IOException, JAXBException { + String valueToCheck, String valueToPut, HashMap otherCells) + throws IOException, JAXBException { RowModel rowModel = new RowModel(row); - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToPut))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToPut))); if (otherCells != null) { - for (Map.Entry entry : otherCells.entrySet()) { - rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), - Bytes.toBytes(entry.getValue()))); + for (Map.Entry entry : otherCells.entrySet()) { + rowModel + .addCell(new CellModel(Bytes.toBytes(entry.getKey()), Bytes.toBytes(entry.getValue()))); } } // This Cell need to be added as last cell. - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToCheck))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToCheck))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(url, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(url, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); return response; } protected static Response checkAndPutValueXML(String table, String row, String column, - String valueToCheck, String valueToPut) throws IOException, JAXBException { - return checkAndPutValueXML(table,row,column,valueToCheck,valueToPut, null); + String valueToCheck, String valueToPut) throws IOException, JAXBException { + return checkAndPutValueXML(table, row, column, valueToCheck, valueToPut, null); } - protected static Response checkAndPutValueXML(String table, String row, - String column, String valueToCheck, String valueToPut, HashMap otherCells) - throws IOException, JAXBException { + protected static Response checkAndPutValueXML(String table, String row, String column, + String valueToCheck, String valueToPut, HashMap otherCells) + throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); path.append('/'); path.append(row); path.append("?check=put"); - return checkAndPutValueXML(path.toString(), table, row, column, - valueToCheck, valueToPut, otherCells); + return checkAndPutValueXML(path.toString(), table, row, column, valueToCheck, valueToPut, + otherCells); } - protected static Response checkAndDeleteXML(String url, String table, - String row, String column, String valueToCheck, HashMap cellsToDelete) - throws IOException, JAXBException { + protected static Response checkAndDeleteXML(String url, String table, String row, String column, + String valueToCheck, HashMap cellsToDelete) throws IOException, JAXBException { RowModel rowModel = new RowModel(row); if (cellsToDelete != null) { - for (Map.Entry entry : cellsToDelete.entrySet()) { - rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), - Bytes.toBytes(entry.getValue()))); + for (Map.Entry entry : cellsToDelete.entrySet()) { + rowModel + .addCell(new CellModel(Bytes.toBytes(entry.getKey()), Bytes.toBytes(entry.getValue()))); } } // Add this at the end - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToCheck))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToCheck))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(url, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(url, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); return response; } - protected static Response checkAndDeleteXML(String table, String row, - String column, String valueToCheck) throws IOException, JAXBException { + protected static Response checkAndDeleteXML(String table, String row, String column, + String valueToCheck) throws IOException, JAXBException { return checkAndDeleteXML(table, row, column, valueToCheck, null); } - protected static Response checkAndDeleteXML(String table, String row, - String column, String valueToCheck, HashMap cellsToDelete) - throws IOException, JAXBException { + protected static Response checkAndDeleteXML(String table, String row, String column, + String valueToCheck, HashMap cellsToDelete) throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -388,14 +368,13 @@ public class RowResourceBase { return checkAndDeleteXML(path.toString(), table, row, column, valueToCheck, cellsToDelete); } - protected static Response checkAndDeleteJson(String table, String row, - String column, String valueToCheck) throws IOException { + protected static Response checkAndDeleteJson(String table, String row, String column, + String valueToCheck) throws IOException { return checkAndDeleteJson(table, row, column, valueToCheck, null); } - protected static Response checkAndDeleteJson(String table, String row, - String column, String valueToCheck, HashMap cellsToDelete) - throws IOException { + protected static Response checkAndDeleteJson(String table, String row, String column, + String valueToCheck, HashMap cellsToDelete) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -405,36 +384,33 @@ public class RowResourceBase { return checkAndDeleteJson(path.toString(), table, row, column, valueToCheck, cellsToDelete); } - protected static Response checkAndDeleteJson(String url, String table, - String row, String column, String valueToCheck, HashMap cellsToDelete) - throws IOException { + protected static Response checkAndDeleteJson(String url, String table, String row, String column, + String valueToCheck, HashMap cellsToDelete) throws IOException { RowModel rowModel = new RowModel(row); if (cellsToDelete != null) { - for (Map.Entry entry : cellsToDelete.entrySet()) { - rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), - Bytes.toBytes(entry.getValue()))); + for (Map.Entry entry : cellsToDelete.entrySet()) { + rowModel + .addCell(new CellModel(Bytes.toBytes(entry.getKey()), Bytes.toBytes(entry.getValue()))); } } // Add this at the end - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToCheck))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToCheck))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); String jsonString = jsonMapper.writeValueAsString(cellSetModel); - Response response = client.put(url, Constants.MIMETYPE_JSON, - Bytes.toBytes(jsonString)); + Response response = client.put(url, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); Thread.yield(); return response; } protected static Response checkAndDeletePB(String table, String row, String column, String value) - throws IOException { + throws IOException { return checkAndDeletePB(table, row, column, value, null); } - protected static Response checkAndDeletePB(String table, String row, - String column, String value, HashMap cellsToDelete) throws IOException { + protected static Response checkAndDeletePB(String table, String row, String column, String value, + HashMap cellsToDelete) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -443,30 +419,29 @@ public class RowResourceBase { path.append("?check=delete"); return checkAndDeleteValuePB(path.toString(), table, row, column, value, cellsToDelete); } - protected static Response checkAndDeleteValuePB(String url, String table, - String row, String column, String valueToCheck, HashMap cellsToDelete) - throws IOException { + + protected static Response checkAndDeleteValuePB(String url, String table, String row, + String column, String valueToCheck, HashMap cellsToDelete) throws IOException { RowModel rowModel = new RowModel(row); if (cellsToDelete != null) { - for (Map.Entry entry : cellsToDelete.entrySet()) { - rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), - Bytes.toBytes(entry.getValue()))); + for (Map.Entry entry : cellsToDelete.entrySet()) { + rowModel + .addCell(new CellModel(Bytes.toBytes(entry.getKey()), Bytes.toBytes(entry.getValue()))); } } // Add this at the end - rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes - .toBytes(valueToCheck))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToCheck))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); - Response response = client.put(url, Constants.MIMETYPE_PROTOBUF, - cellSetModel.createProtobufOutput()); + Response response = + client.put(url, Constants.MIMETYPE_PROTOBUF, cellSetModel.createProtobufOutput()); Thread.yield(); return response; } - protected static Response getValueXML(String table, String startRow, - String endRow, String column) throws IOException { + protected static Response getValueXML(String table, String startRow, String endRow, String column) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -490,7 +465,7 @@ public class RowResourceBase { } protected static Response deleteValue(String table, String row, String column) - throws IOException { + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -504,7 +479,7 @@ public class RowResourceBase { } protected static Response getValueXML(String table, String row, String column) - throws IOException { + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -515,8 +490,7 @@ public class RowResourceBase { return getValueXML(path.toString()); } - protected static Response deleteRow(String table, String row) - throws IOException { + protected static Response deleteRow(String table, String row) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -527,8 +501,8 @@ public class RowResourceBase { return response; } - protected static Response getValueJson(String table, String row, - String column) throws IOException { + protected static Response getValueJson(String table, String row, String column) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -539,8 +513,8 @@ public class RowResourceBase { return getValueJson(path.toString()); } - protected static void checkValueJSON(String table, String row, String column, - String value) throws IOException { + protected static void checkValueJSON(String table, String row, String column, String value) + throws IOException { Response response = getValueJson(table, row, column); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); @@ -553,13 +527,13 @@ public class RowResourceBase { assertEquals(Bytes.toString(cell.getValue()), value); } - protected static void checkIncrementValueJSON(String table, String row, String column, - long value) throws IOException { + protected static void checkIncrementValueJSON(String table, String row, String column, long value) + throws IOException { Response response = getValueJson(table, row, column); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); - ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); CellSetModel cellSet = mapper.readValue(response.getBody(), CellSetModel.class); RowModel rowModel = cellSet.getRows().get(0); CellModel cell = rowModel.getCells().get(0); @@ -567,8 +541,8 @@ public class RowResourceBase { assertEquals(Bytes.toLong(cell.getValue()), value); } - protected static Response putValueJson(String table, String row, String column, - String value) throws IOException { + protected static Response putValueJson(String table, String row, String column, String value) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -580,21 +554,19 @@ public class RowResourceBase { } protected static Response putValueJson(String url, String table, String row, String column, - String value) throws IOException { + String value) throws IOException { RowModel rowModel = new RowModel(row); - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(value))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(value))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); String jsonString = jsonMapper.writeValueAsString(cellSetModel); - Response response = client.put(url, Constants.MIMETYPE_JSON, - Bytes.toBytes(jsonString)); + Response response = client.put(url, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); Thread.yield(); return response; } - protected static Response appendValueXML(String table, String row, String column, - String value) throws IOException, JAXBException { + protected static Response appendValueXML(String table, String row, String column, String value) + throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -604,8 +576,8 @@ public class RowResourceBase { return putValueXML(path.toString(), table, row, column, value); } - protected static Response appendValuePB(String table, String row, String column, - String value) throws IOException { + protected static Response appendValuePB(String table, String row, String column, String value) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -615,8 +587,8 @@ public class RowResourceBase { return putValuePB(path.toString(), table, row, column, value); } - protected static Response appendValueJson(String table, String row, String column, - String value) throws IOException, JAXBException { + protected static Response appendValueJson(String table, String row, String column, String value) + throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -626,8 +598,8 @@ public class RowResourceBase { return putValueJson(path.toString(), table, row, column, value); } - protected static Response incrementValueXML(String table, String row, String column, - String value) throws IOException, JAXBException { + protected static Response incrementValueXML(String table, String row, String column, String value) + throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -637,8 +609,8 @@ public class RowResourceBase { return putValueXML(path.toString(), table, row, column, value); } - protected static Response incrementValuePB(String table, String row, String column, - String value) throws IOException { + protected static Response incrementValuePB(String table, String row, String column, String value) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -649,7 +621,7 @@ public class RowResourceBase { } protected static Response incrementValueJson(String table, String row, String column, - String value) throws IOException, JAXBException { + String value) throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java index f4f9c757208..9d9d2f33769 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,12 +29,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestDeleteRow extends RowResourceBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDeleteRow.class); + HBaseClassTestRule.forClass(TestDeleteRow.class); @Test public void testDeleteNonExistentColumn() throws Exception { @@ -91,11 +91,11 @@ public class TestDeleteRow extends RowResourceBase { response = getValueXML(TABLE, ROW_1, COLUMN_2); assertEquals(404, response.getCode()); - //Delete a row in non existent table + // Delete a row in non existent table response = deleteValue("dummy", ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //Delete non existent column + // Delete non existent column response = deleteValue(TABLE, ROW_1, "dummy"); assertEquals(404, response.getCode()); } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java index 42e38fc99a6..fbff87c3d0c 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,12 +36,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestGZIPResponseWrapper { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGZIPResponseWrapper.class); + HBaseClassTestRule.forClass(TestGZIPResponseWrapper.class); private final HttpServletResponse response = mock(HttpServletResponse.class); private final GZIPResponseWrapper wrapper = new GZIPResponseWrapper(response); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java index e1dec900d49..b2c45e8cbd7 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,14 +44,14 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestGetAndPutResource extends RowResourceBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGetAndPutResource.class); + HBaseClassTestRule.forClass(TestGetAndPutResource.class); private static final MetricsAssertHelper METRICS_ASSERT = - CompatibilityFactory.getInstance(MetricsAssertHelper.class); + CompatibilityFactory.getInstance(MetricsAssertHelper.class); @Test public void testForbidden() throws IOException, JAXBException { @@ -146,8 +146,8 @@ public class TestGetAndPutResource extends RowResourceBase { assertEquals(200, response.getCode()); checkValuePB(TABLE, ROW_1, COLUMN_2, VALUE_2); - HashMap otherCells = new HashMap<>(); - otherCells.put(COLUMN_2,VALUE_3); + HashMap otherCells = new HashMap<>(); + otherCells.put(COLUMN_2, VALUE_3); // On Success update both the cells response = checkAndPutValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_3, otherCells); @@ -179,8 +179,8 @@ public class TestGetAndPutResource extends RowResourceBase { assertEquals(200, response.getCode()); checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2); - HashMap otherCells = new HashMap<>(); - otherCells.put(COLUMN_2,VALUE_3); + HashMap otherCells = new HashMap<>(); + otherCells.put(COLUMN_2, VALUE_3); // On Success update both the cells response = checkAndPutValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_3, otherCells); @@ -217,9 +217,9 @@ public class TestGetAndPutResource extends RowResourceBase { checkValuePB(TABLE, ROW_1, COLUMN_3, VALUE_3); // Deletes the following columns based on Column1 check - HashMap cellsToDelete = new HashMap<>(); - cellsToDelete.put(COLUMN_2,VALUE_2); // Value does not matter - cellsToDelete.put(COLUMN_3,VALUE_3); // Value does not matter + HashMap cellsToDelete = new HashMap<>(); + cellsToDelete.put(COLUMN_2, VALUE_2); // Value does not matter + cellsToDelete.put(COLUMN_3, VALUE_3); // Value does not matter // On Success update both the cells response = checkAndDeletePB(TABLE, ROW_1, COLUMN_1, VALUE_1, cellsToDelete); @@ -265,7 +265,7 @@ public class TestGetAndPutResource extends RowResourceBase { assertEquals(Constants.MIMETYPE_BINARY, response.getHeader("content-type")); assertTrue(Bytes.equals(response.getBody(), body)); boolean foundTimestampHeader = false; - for (Header header: response.getHeaders()) { + for (Header header : response.getHeaders()) { if (header.getName().equals("X-Timestamp")) { foundTimestampHeader = true; break; @@ -280,8 +280,7 @@ public class TestGetAndPutResource extends RowResourceBase { @Test public void testSingleCellGetJSON() throws IOException { final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1; - Response response = client.put(path, Constants.MIMETYPE_BINARY, - Bytes.toBytes(VALUE_4)); + Response response = client.put(path, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_4)); assertEquals(200, response.getCode()); Thread.yield(); response = client.get(path, Constants.MIMETYPE_JSON); @@ -296,16 +295,13 @@ public class TestGetAndPutResource extends RowResourceBase { final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1; CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_4); - CellModel cellOne = new CellModel(Bytes.toBytes(COLUMN_1), 1L, - Bytes.toBytes(VALUE_1)); - CellModel cellTwo = new CellModel(Bytes.toBytes(COLUMN_1), 2L, - Bytes.toBytes(VALUE_2)); + CellModel cellOne = new CellModel(Bytes.toBytes(COLUMN_1), 1L, Bytes.toBytes(VALUE_1)); + CellModel cellTwo = new CellModel(Bytes.toBytes(COLUMN_1), 2L, Bytes.toBytes(VALUE_2)); rowModel.addCell(cellOne); rowModel.addCell(cellTwo); cellSetModel.addRow(rowModel); String jsonString = jsonMapper.writeValueAsString(cellSetModel); - Response response = client.put(path, Constants.MIMETYPE_JSON, - Bytes.toBytes(jsonString)); + Response response = client.put(path, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); assertEquals(200, response.getCode()); Thread.yield(); response = client.get(path, Constants.MIMETYPE_JSON); @@ -315,8 +311,8 @@ public class TestGetAndPutResource extends RowResourceBase { assertTrue(cellSet.getRows().size() == 1); assertTrue(cellSet.getRows().get(0).getCells().size() == 1); CellModel cell = cellSet.getRows().get(0).getCells().get(0); - assertEquals(VALUE_2 , Bytes.toString(cell.getValue())); - assertEquals(2L , cell.getTimestamp()); + assertEquals(VALUE_2, Bytes.toString(cell.getValue())); + assertEquals(2L, cell.getTimestamp()); response = deleteRow(TABLE, ROW_4); assertEquals(200, response.getCode()); } @@ -332,18 +328,16 @@ public class TestGetAndPutResource extends RowResourceBase { path.append('/'); path.append(COLUMN_1); Response response; - response = putValueXML(path.toString(), TABLE, urlKey, COLUMN_1, - VALUE_1); + response = putValueXML(path.toString(), TABLE, urlKey, COLUMN_1, VALUE_1); assertEquals(200, response.getCode()); checkValueXML(path.toString(), TABLE, urlKey, COLUMN_1, VALUE_1); } @Test public void testNoSuchCF() throws IOException { - final String goodPath = "/" + TABLE + "/" + ROW_1 + "/" + CFA+":"; + final String goodPath = "/" + TABLE + "/" + ROW_1 + "/" + CFA + ":"; final String badPath = "/" + TABLE + "/" + ROW_1 + "/" + "BAD"; - Response response = client.post(goodPath, Constants.MIMETYPE_BINARY, - Bytes.toBytes(VALUE_1)); + Response response = client.post(goodPath, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1)); assertEquals(200, response.getCode()); assertEquals(200, client.get(goodPath, Constants.MIMETYPE_BINARY).getCode()); assertEquals(404, client.get(badPath, Constants.MIMETYPE_BINARY).getCode()); @@ -352,25 +346,20 @@ public class TestGetAndPutResource extends RowResourceBase { @Test public void testMultiCellGetPutXML() throws IOException, JAXBException { - String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_2))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel = new RowModel(ROW_2); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_3))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_4))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(path, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); // make sure the fake row was not actually created @@ -391,23 +380,19 @@ public class TestGetAndPutResource extends RowResourceBase { @Test public void testMultiCellGetPutPB() throws IOException { - String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_2))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel = new RowModel(ROW_2); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_3))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_4))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); - Response response = client.put(path, Constants.MIMETYPE_PROTOBUF, - cellSetModel.createProtobufOutput()); + Response response = + client.put(path, Constants.MIMETYPE_PROTOBUF, cellSetModel.createProtobufOutput()); Thread.yield(); // make sure the fake row was not actually created @@ -438,12 +423,12 @@ public class TestGetAndPutResource extends RowResourceBase { } response = getValueXML(TABLE, rows[0], rows[2], COLUMN_1); assertEquals(200, response.getCode()); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); assertEquals(2, cellSet.getRows().size()); - for (int i = 0; i < cellSet.getRows().size()-1; i++) { + for (int i = 0; i < cellSet.getRows().size() - 1; i++) { RowModel rowModel = cellSet.getRows().get(i); - for (CellModel cell: rowModel.getCells()) { + for (CellModel cell : rowModel.getCells()) { assertEquals(COLUMN_1, Bytes.toString(cell.getColumn())); assertEquals(values[i], Bytes.toString(cell.getValue())); } @@ -458,16 +443,14 @@ public class TestGetAndPutResource extends RowResourceBase { public void testInvalidCheckParam() throws IOException, JAXBException { CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); final String path = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1 + "?check=blah"; - Response response = client.put(path, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); assertEquals(400, response.getCode()); } @@ -476,40 +459,33 @@ public class TestGetAndPutResource extends RowResourceBase { String dummyColumn = "doesnot:exist"; CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(dummyColumn), - Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(dummyColumn), Bytes.toBytes(VALUE_1))); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); final String path = "/" + TABLE + "/" + ROW_1 + "/" + dummyColumn; - Response response = client.put(path, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); assertEquals(404, response.getCode()); } @Test public void testMultiCellGetJson() throws IOException, JAXBException { - String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_2))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel = new RowModel(ROW_2); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_3))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_4))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); String jsonString = jsonMapper.writeValueAsString(cellSetModel); - Response response = client.put(path, Constants.MIMETYPE_JSON, - Bytes.toBytes(jsonString)); + Response response = client.put(path, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); Thread.yield(); // make sure the fake row was not actually created @@ -531,8 +507,7 @@ public class TestGetAndPutResource extends RowResourceBase { @Test public void testMetrics() throws IOException { final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1; - Response response = client.put(path, Constants.MIMETYPE_BINARY, - Bytes.toBytes(VALUE_4)); + Response response = client.put(path, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_4)); assertEquals(200, response.getCode()); Thread.yield(); response = client.get(path, Constants.MIMETYPE_JSON); @@ -579,7 +554,7 @@ public class TestGetAndPutResource extends RowResourceBase { response = client.get(path, Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); CellSetModel cellSet = - (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); assertTrue(cellSet.getRows().size() == 1); assertTrue(cellSet.getRows().get(0).getCells().size() == 3); List cells = cellSet.getRows().get(0).getCells(); @@ -594,8 +569,10 @@ public class TestGetAndPutResource extends RowResourceBase { private boolean containsCellModel(List cells, String column, String value) { boolean contains = false; for (CellModel cell : cells) { - if (Bytes.toString(cell.getColumn()).equals(column) - && Bytes.toString(cell.getValue()).equals(value)) { + if ( + Bytes.toString(cell.getColumn()).equals(column) + && Bytes.toString(cell.getValue()).equals(value) + ) { contains = true; return contains; } @@ -605,25 +582,20 @@ public class TestGetAndPutResource extends RowResourceBase { @Test public void testSuffixGlobbingXMLWithNewScanner() throws IOException, JAXBException { - String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_2))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel = new RowModel(ROW_2); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_3))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_4))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(path, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); // make sure the fake row was not actually created @@ -639,8 +611,8 @@ public class TestGetAndPutResource extends RowResourceBase { response = client.get(query.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); assertTrue(cellSet.getRows().size() == 2); response = deleteRow(TABLE, ROW_1); @@ -651,25 +623,20 @@ public class TestGetAndPutResource extends RowResourceBase { @Test public void testSuffixGlobbingXML() throws IOException, JAXBException { - String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_2))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel = new RowModel(ROW_2); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_3))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_4))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(path, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); // make sure the fake row was not actually created @@ -687,8 +654,8 @@ public class TestGetAndPutResource extends RowResourceBase { response = client.get(query.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); List rows = cellSet.getRows(); assertTrue(rows.size() == 2); for (RowModel row : rows) { @@ -706,7 +673,7 @@ public class TestGetAndPutResource extends RowResourceBase { Response response = getValueXML(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append cell + // append cell response = appendValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); assertEquals(200, response.getCode()); checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); @@ -723,7 +690,7 @@ public class TestGetAndPutResource extends RowResourceBase { Response response = getValuePB(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append cell + // append cell response = appendValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); assertEquals(200, response.getCode()); checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); @@ -740,7 +707,7 @@ public class TestGetAndPutResource extends RowResourceBase { Response response = getValueJson(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append cell + // append cell response = appendValueJson(TABLE, ROW_1, COLUMN_1, VALUE_1); assertEquals(200, response.getCode()); putValueJson(TABLE, ROW_1, COLUMN_1, VALUE_1); @@ -757,14 +724,14 @@ public class TestGetAndPutResource extends RowResourceBase { Response response = getValueXML(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append single cell + // append single cell response = incrementValueXML(TABLE, ROW_1, COLUMN_1, VALUE_5); assertEquals(200, response.getCode()); checkIncrementValueXML(TABLE, ROW_1, COLUMN_1, Long.parseLong(VALUE_5)); response = incrementValueXML(TABLE, ROW_1, COLUMN_1, VALUE_6); assertEquals(200, response.getCode()); checkIncrementValueXML(TABLE, ROW_1, COLUMN_1, - Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); + Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); response = deleteRow(TABLE, ROW_1); assertEquals(200, response.getCode()); @@ -775,14 +742,14 @@ public class TestGetAndPutResource extends RowResourceBase { Response response = getValuePB(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append cell + // append cell response = incrementValuePB(TABLE, ROW_1, COLUMN_1, VALUE_5); assertEquals(200, response.getCode()); checkIncrementValuePB(TABLE, ROW_1, COLUMN_1, Long.parseLong(VALUE_5)); response = incrementValuePB(TABLE, ROW_1, COLUMN_1, VALUE_6); assertEquals(200, response.getCode()); checkIncrementValuePB(TABLE, ROW_1, COLUMN_1, - Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); + Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); response = deleteRow(TABLE, ROW_1); assertEquals(200, response.getCode()); @@ -793,14 +760,14 @@ public class TestGetAndPutResource extends RowResourceBase { Response response = getValueJson(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append cell + // append cell response = incrementValueJson(TABLE, ROW_1, COLUMN_1, VALUE_5); assertEquals(200, response.getCode()); checkIncrementValueJSON(TABLE, ROW_1, COLUMN_1, Long.parseLong(VALUE_5)); response = incrementValueJson(TABLE, ROW_1, COLUMN_1, VALUE_6); assertEquals(200, response.getCode()); checkIncrementValueJSON(TABLE, ROW_1, COLUMN_1, - Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); + Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); response = deleteRow(TABLE, ROW_1); assertEquals(200, response.getCode()); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java index 32d330564d3..fa3c0a228b8 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,12 +48,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestGzipFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGzipFilter.class); + HBaseClassTestRule.forClass(TestGzipFilter.class); private static final TableName TABLE = TableName.valueOf("TestGzipFilter"); private static final String CFA = "a"; @@ -63,16 +63,14 @@ public class TestGzipFilter { private static final byte[] VALUE_1 = Bytes.toBytes("testvalue1"); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(TABLE)) { return; @@ -147,4 +145,3 @@ public class TestGzipFilter { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java index 388596b2248..61734734871 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,12 +55,12 @@ import org.junit.runners.Parameterized; import org.apache.hbase.thirdparty.com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) @RunWith(Parameterized.class) public class TestMultiRowResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiRowResource.class); + HBaseClassTestRule.forClass(TestMultiRowResource.class); private static final TableName TABLE = TableName.valueOf("TestRowResource"); private static final String CFA = "a"; @@ -103,10 +103,7 @@ public class TestMultiRowResource { extraHdr = new BasicHeader(RESTServer.REST_CSRF_CUSTOM_HEADER_DEFAULT, ""); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); - context = JAXBContext.newInstance( - CellModel.class, - CellSetModel.class, - RowModel.class); + context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class); marshaller = context.createMarshaller(); unmarshaller = context.createUnmarshaller(); client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); @@ -258,8 +255,8 @@ public class TestMultiRowResource { Response response = client.get(path.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); - ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper( - CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); CellSetModel cellSet = mapper.readValue(response.getBody(), CellSetModel.class); assertEquals(1, cellSet.getRows().size()); assertEquals(ROW_1, Bytes.toString(cellSet.getRows().get(0).getKey())); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java index 804db4342d3..2d09ae0da4a 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -62,24 +62,23 @@ import org.junit.experimental.categories.Category; import org.apache.hbase.thirdparty.com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestNamespacesInstanceResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestNamespacesInstanceResource.class); + HBaseClassTestRule.forClass(TestNamespacesInstanceResource.class); private static String NAMESPACE1 = "TestNamespacesInstanceResource1"; - private static Map NAMESPACE1_PROPS = new HashMap<>(); + private static Map NAMESPACE1_PROPS = new HashMap<>(); private static String NAMESPACE2 = "TestNamespacesInstanceResource2"; - private static Map NAMESPACE2_PROPS = new HashMap<>(); + private static Map NAMESPACE2_PROPS = new HashMap<>(); private static String NAMESPACE3 = "TestNamespacesInstanceResource3"; - private static Map NAMESPACE3_PROPS = new HashMap<>(); + private static Map NAMESPACE3_PROPS = new HashMap<>(); private static String NAMESPACE4 = "TestNamespacesInstanceResource4"; - private static Map NAMESPACE4_PROPS = new HashMap<>(); + private static Map NAMESPACE4_PROPS = new HashMap<>(); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Configuration conf; @@ -91,12 +90,11 @@ public class TestNamespacesInstanceResource { conf = TEST_UTIL.getConfiguration(); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); testNamespacesInstanceModel = new TestNamespacesInstanceModel(); context = JAXBContext.newInstance(NamespacesInstanceModel.class, TableListModel.class); - jsonMapper = new JacksonJaxbJsonProvider() - .locateMapper(NamespacesInstanceModel.class, MediaType.APPLICATION_JSON_TYPE); + jsonMapper = new JacksonJaxbJsonProvider().locateMapper(NamespacesInstanceModel.class, + MediaType.APPLICATION_JSON_TYPE); NAMESPACE1_PROPS.put("key1", "value1"); NAMESPACE2_PROPS.put("key2a", "value2a"); NAMESPACE2_PROPS.put("key2b", "value2b"); @@ -118,12 +116,11 @@ public class TestNamespacesInstanceResource { } @SuppressWarnings("unchecked") - private static T fromXML(byte[] content) - throws JAXBException { + private static T fromXML(byte[] content) throws JAXBException { return (T) context.createUnmarshaller().unmarshal(new ByteArrayInputStream(content)); } - private NamespaceDescriptor findNamespace(Admin admin, String namespaceName) throws IOException{ + private NamespaceDescriptor findNamespace(Admin admin, String namespaceName) throws IOException { NamespaceDescriptor[] nd = admin.listNamespaceDescriptors(); for (NamespaceDescriptor namespaceDescriptor : nd) { if (namespaceDescriptor.getName().equals(namespaceName)) { @@ -133,19 +130,19 @@ public class TestNamespacesInstanceResource { return null; } - private void checkNamespaceProperties(NamespaceDescriptor nd, Map testProps){ + private void checkNamespaceProperties(NamespaceDescriptor nd, Map testProps) { checkNamespaceProperties(nd.getConfiguration(), testProps); } - private void checkNamespaceProperties(Map namespaceProps, - Map testProps){ + private void checkNamespaceProperties(Map namespaceProps, + Map testProps) { assertTrue(namespaceProps.size() == testProps.size()); - for (String key: testProps.keySet()) { + for (String key : testProps.keySet()) { assertEquals(testProps.get(key), namespaceProps.get(key)); } } - private void checkNamespaceTables(List namespaceTables, List testTables){ + private void checkNamespaceTables(List namespaceTables, List testTables) { assertEquals(namespaceTables.size(), testTables.size()); for (TableModel namespaceTable : namespaceTables) { String tableName = namespaceTable.getName(); @@ -333,7 +330,7 @@ public class TestNamespacesInstanceResource { jsonString = jsonMapper.writeValueAsString(model2); response = client.post(namespacePath2, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); assertEquals(201, response.getCode()); - //check passing null content-type with a payload returns 415 + // check passing null content-type with a payload returns 415 Header[] nullHeaders = null; response = client.post(namespacePath1, nullHeaders, toXML(model1)); assertEquals(415, response.getCode()); @@ -389,23 +386,23 @@ public class TestNamespacesInstanceResource { model4 = testNamespacesInstanceModel.buildTestModel(NAMESPACE4, NAMESPACE4_PROPS); testNamespacesInstanceModel.checkModel(model4, NAMESPACE4, NAMESPACE4_PROPS); - //Defines null headers for use in tests where no body content is provided, so that we set + // Defines null headers for use in tests where no body content is provided, so that we set // no content-type in the request Header[] nullHeaders = null; // Test cannot PUT (alter) non-existent namespace. - response = client.put(namespacePath3, nullHeaders, new byte[]{}); + response = client.put(namespacePath3, nullHeaders, new byte[] {}); assertEquals(403, response.getCode()); - response = client.put(namespacePath4, Constants.MIMETYPE_PROTOBUF, - model4.createProtobufOutput()); + response = + client.put(namespacePath4, Constants.MIMETYPE_PROTOBUF, model4.createProtobufOutput()); assertEquals(403, response.getCode()); // Test cannot create tables when in read only mode. conf.set("hbase.rest.readonly", "true"); - response = client.post(namespacePath3, nullHeaders, new byte[]{}); + response = client.post(namespacePath3, nullHeaders, new byte[] {}); assertEquals(403, response.getCode()); - response = client.put(namespacePath4, Constants.MIMETYPE_PROTOBUF, - model4.createProtobufOutput()); + response = + client.put(namespacePath4, Constants.MIMETYPE_PROTOBUF, model4.createProtobufOutput()); assertEquals(403, response.getCode()); NamespaceDescriptor nd3 = findNamespace(admin, NAMESPACE3); NamespaceDescriptor nd4 = findNamespace(admin, NAMESPACE4); @@ -414,14 +411,14 @@ public class TestNamespacesInstanceResource { conf.set("hbase.rest.readonly", "false"); // Create namespace with no body and binary content type. - response = client.post(namespacePath3, nullHeaders, new byte[]{}); + response = client.post(namespacePath3, nullHeaders, new byte[] {}); assertEquals(201, response.getCode()); // Create namespace with protobuf content-type. - response = client.post(namespacePath4, Constants.MIMETYPE_PROTOBUF, - model4.createProtobufOutput()); + response = + client.post(namespacePath4, Constants.MIMETYPE_PROTOBUF, model4.createProtobufOutput()); assertEquals(201, response.getCode()); - //check setting unsupported content-type returns 415 - response = client.post(namespacePath3, Constants.MIMETYPE_BINARY, new byte[]{}); + // check setting unsupported content-type returns 415 + response = client.post(namespacePath3, Constants.MIMETYPE_BINARY, new byte[] {}); assertEquals(415, response.getCode()); // Check that created namespaces correctly. @@ -433,10 +430,10 @@ public class TestNamespacesInstanceResource { checkNamespaceProperties(nd4, NAMESPACE4_PROPS); // Check cannot post tables that already exist. - response = client.post(namespacePath3, nullHeaders, new byte[]{}); + response = client.post(namespacePath3, nullHeaders, new byte[] {}); assertEquals(403, response.getCode()); - response = client.post(namespacePath4, Constants.MIMETYPE_PROTOBUF, - model4.createProtobufOutput()); + response = + client.post(namespacePath4, Constants.MIMETYPE_PROTOBUF, model4.createProtobufOutput()); assertEquals(403, response.getCode()); // Check cannot post tables when in read only mode. diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesResource.java index 3d0bfc32a9f..4802d230fac 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,18 +43,17 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestNamespacesResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestNamespacesResource.class); + HBaseClassTestRule.forClass(TestNamespacesResource.class); private static String NAMESPACE1 = "TestNamespacesInstanceResource1"; private static String NAMESPACE2 = "TestNamespacesInstanceResource2"; private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Configuration conf; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java index fbd80808613..5731dd94fc6 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.rest; import static org.junit.Assert.assertEquals; + import java.io.File; import java.lang.reflect.Method; import java.security.KeyPair; @@ -42,12 +43,12 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestRESTServerSSL { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRESTServerSSL.class); + HBaseClassTestRule.forClass(TestRESTServerSSL.class); private static final Logger LOG = LoggerFactory.getLogger(TestRESTServerSSL.class); @@ -79,8 +80,8 @@ public class TestRESTServerSSL { initializeAlgorithmId(); keyDir = initKeystoreDir(); KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); - X509Certificate serverCertificate = KeyStoreTestUtil.generateCertificate( - "CN=localhost, O=server", keyPair, 30, "SHA1withRSA"); + X509Certificate serverCertificate = + KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", keyPair, 30, "SHA1withRSA"); generateTrustStore("jks", serverCertificate); generateTrustStore("jceks", serverCertificate); @@ -162,8 +163,6 @@ public class TestRESTServerSSL { assertEquals(200, response.getCode()); } - - private static File initKeystoreDir() { String dataTestDir = TEST_UTIL.getDataTestDir().toString(); File keystoreDir = new File(dataTestDir, TestRESTServerSSL.class.getSimpleName() + "_keys"); @@ -213,7 +212,7 @@ public class TestRESTServerSSL { REST_TEST_UTIL.startServletContainer(conf); Cluster localCluster = new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()); sslClient = new Client(localCluster, getTruststoreFilePath(storeType), - Optional.of(TRUST_STORE_PASSWORD), Optional.of(storeType)); + Optional.of(TRUST_STORE_PASSWORD), Optional.of(storeType)); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java index ba0390d465d..4168c75a7c5 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,16 +32,15 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestResourceFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestResourceFilter.class); + HBaseClassTestRule.forClass(TestResourceFilter.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; @BeforeClass @@ -49,8 +48,7 @@ public class TestResourceFilter { TEST_UTIL.getConfiguration().set(Constants.FILTER_CLASSES, DummyFilter.class.getName()); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); } @AfterClass diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java index b8ff62da72b..c5da2f6e4ff 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,6 @@ import java.util.Iterator; import java.util.List; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import javax.xml.bind.Marshaller; @@ -67,12 +66,12 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestScannerResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannerResource.class); + HBaseClassTestRule.forClass(TestScannerResource.class); private static final Logger LOG = LoggerFactory.getLogger(TestScannerResource.class); private static final TableName TABLE = TableName.valueOf("TestScannerResource"); @@ -84,8 +83,7 @@ public class TestScannerResource { private static final String COLUMN_2 = CFB + ":2"; private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Marshaller marshaller; @@ -95,10 +93,10 @@ public class TestScannerResource { private static Configuration conf; static int insertData(Configuration conf, TableName tableName, String column, double prob) - throws IOException { + throws IOException { Random rng = ThreadLocalRandom.current(); byte[] k = new byte[3]; - byte [][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(column)); + byte[][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(column)); List puts = new ArrayList<>(); for (byte b1 = 'a'; b1 < 'z'; b1++) { for (byte b2 = 'a'; b2 < 'z'; b2++) { @@ -116,7 +114,7 @@ public class TestScannerResource { } } try (Connection conn = ConnectionFactory.createConnection(conf); - Table table = conn.getTable(tableName)) { + Table table = conn.getTable(tableName)) { table.put(puts); } return puts.size(); @@ -138,8 +136,8 @@ public class TestScannerResource { private static int fullTableScan(ScannerModel model) throws IOException { model.setBatch(100); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput()); assertEquals(201, response.getCode()); String scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -175,12 +173,8 @@ public class TestScannerResource { conf = TEST_UTIL.getConfiguration(); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); - context = JAXBContext.newInstance( - CellModel.class, - CellSetModel.class, - RowModel.class, + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); + context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class, ScannerModel.class); marshaller = context.createMarshaller(); unmarshaller = context.createUnmarshaller(); @@ -220,16 +214,14 @@ public class TestScannerResource { // test put operation is forbidden in read-only mode conf.set("hbase.rest.readonly", "true"); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_XML, body); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body); assertEquals(403, response.getCode()); String scannerURI = response.getLocation(); assertNull(scannerURI); // recall previous put operation with read-only off conf.set("hbase.rest.readonly", "false"); - response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, - body); + response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body); assertEquals(201, response.getCode()); scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -238,8 +230,8 @@ public class TestScannerResource { response = client.get(scannerURI, Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); // confirm batch size conformance assertEquals(BATCH_SIZE, countCellSet(cellSet)); @@ -264,16 +256,16 @@ public class TestScannerResource { // test put operation is forbidden in read-only mode conf.set("hbase.rest.readonly", "true"); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput()); assertEquals(403, response.getCode()); String scannerURI = response.getLocation(); assertNull(scannerURI); // recall previous put operation with read-only off conf.set("hbase.rest.readonly", "false"); - response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput()); assertEquals(201, response.getCode()); scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -307,16 +299,16 @@ public class TestScannerResource { // test put operation is forbidden in read-only mode conf.set("hbase.rest.readonly", "true"); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput()); assertEquals(403, response.getCode()); String scannerURI = response.getLocation(); assertNull(scannerURI); // recall previous put operation with read-only off conf.set("hbase.rest.readonly", "false"); - response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput()); assertEquals(201, response.getCode()); scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -328,9 +320,8 @@ public class TestScannerResource { // verify that data was returned assertTrue(response.getBody().length > 0); // verify that the expected X-headers are present - boolean foundRowHeader = false, foundColumnHeader = false, - foundTimestampHeader = false; - for (Header header: response.getHeaders()) { + boolean foundRowHeader = false, foundColumnHeader = false, foundTimestampHeader = false; + for (Header header : response.getHeaders()) { if (header.getName().equals("X-Row")) { foundRowHeader = true; } else if (header.getName().equals("X-Column")) { @@ -371,8 +362,8 @@ public class TestScannerResource { StringWriter writer = new StringWriter(); marshaller.marshal(model, writer); byte[] body = Bytes.toBytes(writer.toString()); - Response response = client.put("/" + NONEXISTENT_TABLE + - "/scanner", Constants.MIMETYPE_XML, body); + Response response = + client.put("/" + NONEXISTENT_TABLE + "/scanner", Constants.MIMETYPE_XML, body); String scannerURI = response.getLocation(); assertNotNull(scannerURI); response = client.get(scannerURI, Constants.MIMETYPE_XML); @@ -392,9 +383,8 @@ public class TestScannerResource { String scannerURI = response.getLocation(); assertNotNull(scannerURI); TEST_UTIL.getAdmin().disableTable(TABLE_TO_BE_DISABLED); - response = client.get(scannerURI, Constants.MIMETYPE_PROTOBUF); + response = client.get(scannerURI, Constants.MIMETYPE_PROTOBUF); assertTrue("got " + response.getCode(), response.getCode() == 410); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java index 7d548886b98..a6fe3ad3e21 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -77,47 +77,38 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestScannersWithFilters { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannersWithFilters.class); + HBaseClassTestRule.forClass(TestScannersWithFilters.class); private static final Logger LOG = LoggerFactory.getLogger(TestScannersWithFilters.class); private static final TableName TABLE = TableName.valueOf("TestScannersWithFilters"); - private static final byte[][] ROWS_ONE = { - Bytes.toBytes("testRowOne-0"), Bytes.toBytes("testRowOne-1"), - Bytes.toBytes("testRowOne-2"), Bytes.toBytes("testRowOne-3") - }; + private static final byte[][] ROWS_ONE = { Bytes.toBytes("testRowOne-0"), + Bytes.toBytes("testRowOne-1"), Bytes.toBytes("testRowOne-2"), Bytes.toBytes("testRowOne-3") }; - private static final byte[][] ROWS_TWO = { - Bytes.toBytes("testRowTwo-0"), Bytes.toBytes("testRowTwo-1"), - Bytes.toBytes("testRowTwo-2"), Bytes.toBytes("testRowTwo-3") - }; + private static final byte[][] ROWS_TWO = { Bytes.toBytes("testRowTwo-0"), + Bytes.toBytes("testRowTwo-1"), Bytes.toBytes("testRowTwo-2"), Bytes.toBytes("testRowTwo-3") }; - private static final byte[][] FAMILIES = { - Bytes.toBytes("testFamilyOne"), Bytes.toBytes("testFamilyTwo") - }; + private static final byte[][] FAMILIES = + { Bytes.toBytes("testFamilyOne"), Bytes.toBytes("testFamilyTwo") }; - private static final byte[][] QUALIFIERS_ONE = { - Bytes.toBytes("testQualifierOne-0"), Bytes.toBytes("testQualifierOne-1"), - Bytes.toBytes("testQualifierOne-2"), Bytes.toBytes("testQualifierOne-3") - }; + private static final byte[][] QUALIFIERS_ONE = + { Bytes.toBytes("testQualifierOne-0"), Bytes.toBytes("testQualifierOne-1"), + Bytes.toBytes("testQualifierOne-2"), Bytes.toBytes("testQualifierOne-3") }; - private static final byte[][] QUALIFIERS_TWO = { - Bytes.toBytes("testQualifierTwo-0"), Bytes.toBytes("testQualifierTwo-1"), - Bytes.toBytes("testQualifierTwo-2"), Bytes.toBytes("testQualifierTwo-3") - }; + private static final byte[][] QUALIFIERS_TWO = + { Bytes.toBytes("testQualifierTwo-0"), Bytes.toBytes("testQualifierTwo-1"), + Bytes.toBytes("testQualifierTwo-2"), Bytes.toBytes("testQualifierTwo-3") }; - private static final byte[][] VALUES = { - Bytes.toBytes("testValueOne"), Bytes.toBytes("testValueTwo") - }; + private static final byte[][] VALUES = + { Bytes.toBytes("testValueOne"), Bytes.toBytes("testValueTwo") }; private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Marshaller marshaller; @@ -129,15 +120,11 @@ public class TestScannersWithFilters { public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(3); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - context = JAXBContext.newInstance( - CellModel.class, - CellSetModel.class, - RowModel.class, - ScannerModel.class); + context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class, + ScannerModel.class); marshaller = context.createMarshaller(); unmarshaller = context.createUnmarshaller(); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); Admin admin = TEST_UTIL.getAdmin(); if (!admin.tableExists(TABLE)) { HTableDescriptor htd = new HTableDescriptor(TABLE); @@ -220,16 +207,14 @@ public class TestScannersWithFilters { TEST_UTIL.shutdownMiniCluster(); } - private static void verifyScan(Scan s, long expectedRows, long expectedKeys) - throws Exception { + private static void verifyScan(Scan s, long expectedRows, long expectedKeys) throws Exception { ScannerModel model = ScannerModel.fromScan(s); model.setBatch(Integer.MAX_VALUE); // fetch it all at once StringWriter writer = new StringWriter(); marshaller.marshal(model, writer); LOG.debug(writer.toString()); byte[] body = Bytes.toBytes(writer.toString()); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_XML, body); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body); assertEquals(201, response.getCode()); String scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -238,16 +223,17 @@ public class TestScannersWithFilters { response = client.get(scannerURI, Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cells = (CellSetModel) - unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cells = + (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); int rows = cells.getRows().size(); - assertEquals("Scanned too many rows! Only expected " + expectedRows + - " total but scanned " + rows, expectedRows, rows); + assertEquals( + "Scanned too many rows! Only expected " + expectedRows + " total but scanned " + rows, + expectedRows, rows); for (RowModel row : cells.getRows()) { int count = row.getCells().size(); - assertEquals("Expected " + expectedKeys + " keys per row but " + - "returned " + count, expectedKeys, count); + assertEquals("Expected " + expectedKeys + " keys per row but " + "returned " + count, + expectedKeys, count); } // delete the scanner @@ -255,15 +241,14 @@ public class TestScannersWithFilters { assertEquals(200, response.getCode()); } - private static void verifyScanFull(Scan s, KeyValue [] kvs) throws Exception { + private static void verifyScanFull(Scan s, KeyValue[] kvs) throws Exception { ScannerModel model = ScannerModel.fromScan(s); model.setBatch(Integer.MAX_VALUE); // fetch it all at once StringWriter writer = new StringWriter(); marshaller.marshal(model, writer); LOG.debug(writer.toString()); byte[] body = Bytes.toBytes(writer.toString()); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_XML, body); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body); assertEquals(201, response.getCode()); String scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -272,8 +257,8 @@ public class TestScannersWithFilters { response = client.get(scannerURI, Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); // delete the scanner response = client.delete(scannerURI); @@ -294,36 +279,29 @@ public class TestScannersWithFilters { break; } - assertTrue("Scanned too many keys! Only expected " + kvs.length + - " total but already scanned " + (cells.size() + idx), - kvs.length >= idx + cells.size()); - for (CellModel cell: cells) { - assertTrue("Row mismatch", - Bytes.equals(rowModel.getKey(), CellUtil.cloneRow(kvs[idx]))); + assertTrue("Scanned too many keys! Only expected " + kvs.length + + " total but already scanned " + (cells.size() + idx), kvs.length >= idx + cells.size()); + for (CellModel cell : cells) { + assertTrue("Row mismatch", Bytes.equals(rowModel.getKey(), CellUtil.cloneRow(kvs[idx]))); byte[][] split = CellUtil.parseColumn(cell.getColumn()); - assertTrue("Family mismatch", - Bytes.equals(split[0], CellUtil.cloneFamily(kvs[idx]))); - assertTrue("Qualifier mismatch", - Bytes.equals(split[1], CellUtil.cloneQualifier(kvs[idx]))); - assertTrue("Value mismatch", - Bytes.equals(cell.getValue(), CellUtil.cloneValue(kvs[idx]))); + assertTrue("Family mismatch", Bytes.equals(split[0], CellUtil.cloneFamily(kvs[idx]))); + assertTrue("Qualifier mismatch", Bytes.equals(split[1], CellUtil.cloneQualifier(kvs[idx]))); + assertTrue("Value mismatch", Bytes.equals(cell.getValue(), CellUtil.cloneValue(kvs[idx]))); idx++; } } - assertEquals("Expected " + kvs.length + " total keys but scanned " + idx, - kvs.length, idx); + assertEquals("Expected " + kvs.length + " total keys but scanned " + idx, kvs.length, idx); } - private static void verifyScanNoEarlyOut(Scan s, long expectedRows, - long expectedKeys) throws Exception { + private static void verifyScanNoEarlyOut(Scan s, long expectedRows, long expectedKeys) + throws Exception { ScannerModel model = ScannerModel.fromScan(s); model.setBatch(Integer.MAX_VALUE); // fetch it all at once StringWriter writer = new StringWriter(); marshaller.marshal(model, writer); LOG.debug(writer.toString()); byte[] body = Bytes.toBytes(writer.toString()); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_XML, body); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body); assertEquals(201, response.getCode()); String scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -332,8 +310,8 @@ public class TestScannersWithFilters { response = client.get(scannerURI, Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); // delete the scanner response = client.delete(scannerURI); @@ -353,13 +331,12 @@ public class TestScannersWithFilters { break; } - assertTrue("Scanned too many rows! Only expected " + expectedRows + - " total but already scanned " + (j+1), expectedRows > j); - assertEquals("Expected " + expectedKeys + " keys per row but " + - "returned " + cells.size(), expectedKeys, cells.size()); + assertTrue("Scanned too many rows! Only expected " + expectedRows + + " total but already scanned " + (j + 1), expectedRows > j); + assertEquals("Expected " + expectedKeys + " keys per row but " + "returned " + cells.size(), + expectedKeys, cells.size()); } - assertEquals("Expected " + expectedRows + " rows but scanned " + j + - " rows", expectedRows, j); + assertEquals("Expected " + expectedRows + " rows but scanned " + j + " rows", expectedRows, j); } @Test @@ -375,7 +352,7 @@ public class TestScannersWithFilters { // One family s = new Scan(); s.addFamily(FAMILIES[0]); - verifyScan(s, expectedRows, expectedKeys/2); + verifyScan(s, expectedRows, expectedKeys / 2); } @Test @@ -391,7 +368,7 @@ public class TestScannersWithFilters { @Test public void testPageFilter() throws Exception { // KVs in first 6 rows - KeyValue [] expectedKVs = { + KeyValue[] expectedKVs = { // testRowOne-0 new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), @@ -433,8 +410,7 @@ public class TestScannersWithFilters { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) }; // Grab all 6 rows long expectedRows = 6; @@ -480,8 +456,7 @@ public class TestScannersWithFilters { // If we just use start/stop row, we get total/2 - 1 rows long expectedRows = (numRows / 2) - 1; long expectedKeys = colsPerRow; - Scan s = new Scan(Bytes.toBytes("testRowOne-0"), - Bytes.toBytes("testRowOne-3")); + Scan s = new Scan(Bytes.toBytes("testRowOne-0"), Bytes.toBytes("testRowOne-3")); verifyScan(s, expectedRows, expectedKeys); // Now use start row with inclusive stop filter @@ -495,8 +470,7 @@ public class TestScannersWithFilters { // If we just use start/stop row, we get total/2 - 1 rows expectedRows = (numRows / 2) - 1; expectedKeys = colsPerRow; - s = new Scan(Bytes.toBytes("testRowTwo-0"), - Bytes.toBytes("testRowTwo-3")); + s = new Scan(Bytes.toBytes("testRowTwo-0"), Bytes.toBytes("testRowTwo-3")); verifyScan(s, expectedRows, expectedKeys); // Now use start row with inclusive stop filter @@ -512,7 +486,7 @@ public class TestScannersWithFilters { long expectedRows = numRows / 2; long expectedKeys = 2; Filter f = new QualifierFilter(CompareOperator.EQUAL, - new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); Scan s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -522,7 +496,7 @@ public class TestScannersWithFilters { expectedRows = numRows / 2; expectedKeys = 2; f = new QualifierFilter(CompareOperator.LESS, - new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -531,7 +505,7 @@ public class TestScannersWithFilters { expectedRows = numRows / 2; expectedKeys = 4; f = new QualifierFilter(CompareOperator.LESS_OR_EQUAL, - new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -541,7 +515,7 @@ public class TestScannersWithFilters { expectedRows = numRows / 2; expectedKeys = 4; f = new QualifierFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); s = new Scan(HConstants.EMPTY_START_ROW, Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -551,7 +525,7 @@ public class TestScannersWithFilters { expectedRows = numRows / 2; expectedKeys = 4; f = new QualifierFilter(CompareOperator.GREATER_OR_EQUAL, - new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); s = new Scan(HConstants.EMPTY_START_ROW, Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -561,19 +535,18 @@ public class TestScannersWithFilters { expectedRows = numRows / 2; expectedKeys = 2; f = new QualifierFilter(CompareOperator.GREATER, - new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); s = new Scan(HConstants.EMPTY_START_ROW, Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); // Match keys not equal to. Look across rows and fully validate the keys and ordering // Expect varied numbers of keys, 4 per row in group one, 6 per row in group two - f = new QualifierFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(QUALIFIERS_ONE[2])); + f = new QualifierFilter(CompareOperator.NOT_EQUAL, new BinaryComparator(QUALIFIERS_ONE[2])); s = new Scan(); s.setFilter(f); - KeyValue [] kvs = { + KeyValue[] kvs = { // testRowOne-0 new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), @@ -609,18 +582,16 @@ public class TestScannersWithFilters { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); // Test across rows and groups with a regex. Filter out "test*-2" // Expect 4 keys per row across both groups - f = new QualifierFilter(CompareOperator.NOT_EQUAL, - new RegexStringComparator("test.+-2")); + f = new QualifierFilter(CompareOperator.NOT_EQUAL, new RegexStringComparator("test.+-2")); s = new Scan(); s.setFilter(f); - kvs = new KeyValue [] { + kvs = new KeyValue[] { // testRowOne-0 new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), @@ -650,8 +621,7 @@ public class TestScannersWithFilters { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); } @@ -660,8 +630,8 @@ public class TestScannersWithFilters { // Match a single row, all keys long expectedRows = 1; long expectedKeys = colsPerRow; - Filter f = new RowFilter(CompareOperator.EQUAL, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + Filter f = + new RowFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); Scan s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -669,8 +639,7 @@ public class TestScannersWithFilters { // Match a two rows, one from each group, using regex expectedRows = 2; expectedKeys = colsPerRow; - f = new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator("testRow.+-2")); + f = new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("testRow.+-2")); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -679,8 +648,7 @@ public class TestScannersWithFilters { // Expect all keys in one row expectedRows = 1; expectedKeys = colsPerRow; - f = new RowFilter(CompareOperator.LESS, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + f = new RowFilter(CompareOperator.LESS, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -690,7 +658,7 @@ public class TestScannersWithFilters { expectedRows = 2; expectedKeys = colsPerRow; f = new RowFilter(CompareOperator.LESS_OR_EQUAL, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + new BinaryComparator(Bytes.toBytes("testRowOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -699,8 +667,8 @@ public class TestScannersWithFilters { // Expect all keys in all but one row expectedRows = numRows - 1; expectedKeys = colsPerRow; - f = new RowFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + f = + new RowFilter(CompareOperator.NOT_EQUAL, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -710,7 +678,7 @@ public class TestScannersWithFilters { expectedRows = numRows - 1; expectedKeys = colsPerRow; f = new RowFilter(CompareOperator.GREATER_OR_EQUAL, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + new BinaryComparator(Bytes.toBytes("testRowOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -719,8 +687,7 @@ public class TestScannersWithFilters { // Expect all keys in all but two rows expectedRows = numRows - 2; expectedKeys = colsPerRow; - f = new RowFilter(CompareOperator.GREATER, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + f = new RowFilter(CompareOperator.GREATER, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -728,12 +695,12 @@ public class TestScannersWithFilters { // Match rows not equal to testRowTwo-2 // Look across rows and fully validate the keys and ordering // Should see all keys in all rows but testRowTwo-2 - f = new RowFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + f = + new RowFilter(CompareOperator.NOT_EQUAL, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); s = new Scan(); s.setFilter(f); - KeyValue [] kvs = { + KeyValue[] kvs = { // testRowOne-0 new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), @@ -768,19 +735,17 @@ public class TestScannersWithFilters { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); // Test across rows and groups with a regex // Filter out everything that doesn't match "*-2" // Expect all keys in two rows - f = new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(".+-2")); + f = new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(".+-2")); s = new Scan(); s.setFilter(f); - kvs = new KeyValue [] { + kvs = new KeyValue[] { // testRowOne-2 new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), @@ -794,8 +759,7 @@ public class TestScannersWithFilters { new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) - }; + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) }; verifyScanFull(s, kvs); } @@ -804,8 +768,8 @@ public class TestScannersWithFilters { // Match group one rows long expectedRows = numRows / 2; long expectedKeys = colsPerRow; - Filter f = new ValueFilter(CompareOperator.EQUAL, - new BinaryComparator(Bytes.toBytes("testValueOne"))); + Filter f = + new ValueFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("testValueOne"))); Scan s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -813,8 +777,7 @@ public class TestScannersWithFilters { // Match group two rows expectedRows = numRows / 2; expectedKeys = colsPerRow; - f = new ValueFilter(CompareOperator.EQUAL, - new BinaryComparator(Bytes.toBytes("testValueTwo"))); + f = new ValueFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("testValueTwo"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -822,8 +785,7 @@ public class TestScannersWithFilters { // Match all values using regex expectedRows = numRows; expectedKeys = colsPerRow; - f = new ValueFilter(CompareOperator.EQUAL, - new RegexStringComparator("testValue((One)|(Two))")); + f = new ValueFilter(CompareOperator.EQUAL, new RegexStringComparator("testValue((One)|(Two))")); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -832,8 +794,7 @@ public class TestScannersWithFilters { // Expect group one rows expectedRows = numRows / 2; expectedKeys = colsPerRow; - f = new ValueFilter(CompareOperator.LESS, - new BinaryComparator(Bytes.toBytes("testValueTwo"))); + f = new ValueFilter(CompareOperator.LESS, new BinaryComparator(Bytes.toBytes("testValueTwo"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -843,7 +804,7 @@ public class TestScannersWithFilters { expectedRows = numRows; expectedKeys = colsPerRow; f = new ValueFilter(CompareOperator.LESS_OR_EQUAL, - new BinaryComparator(Bytes.toBytes("testValueTwo"))); + new BinaryComparator(Bytes.toBytes("testValueTwo"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -853,7 +814,7 @@ public class TestScannersWithFilters { expectedRows = numRows / 2; expectedKeys = colsPerRow; f = new ValueFilter(CompareOperator.LESS_OR_EQUAL, - new BinaryComparator(Bytes.toBytes("testValueOne"))); + new BinaryComparator(Bytes.toBytes("testValueOne"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -863,7 +824,7 @@ public class TestScannersWithFilters { expectedRows = numRows / 2; expectedKeys = colsPerRow; f = new ValueFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(Bytes.toBytes("testValueOne"))); + new BinaryComparator(Bytes.toBytes("testValueOne"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -873,7 +834,7 @@ public class TestScannersWithFilters { expectedRows = numRows; expectedKeys = colsPerRow; f = new ValueFilter(CompareOperator.GREATER_OR_EQUAL, - new BinaryComparator(Bytes.toBytes("testValueOne"))); + new BinaryComparator(Bytes.toBytes("testValueOne"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -882,8 +843,8 @@ public class TestScannersWithFilters { // Expect half rows expectedRows = numRows / 2; expectedKeys = colsPerRow; - f = new ValueFilter(CompareOperator.GREATER, - new BinaryComparator(Bytes.toBytes("testValueOne"))); + f = + new ValueFilter(CompareOperator.GREATER, new BinaryComparator(Bytes.toBytes("testValueOne"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -892,11 +853,11 @@ public class TestScannersWithFilters { // Look across rows and fully validate the keys and ordering // Should see all keys in all group two rows f = new ValueFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(Bytes.toBytes("testValueOne"))); + new BinaryComparator(Bytes.toBytes("testValueOne"))); s = new Scan(); s.setFilter(f); - KeyValue [] kvs = { + KeyValue[] kvs = { // testRowTwo-0 new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), @@ -917,8 +878,7 @@ public class TestScannersWithFilters { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); } @@ -927,11 +887,11 @@ public class TestScannersWithFilters { // Test for qualifier regex: "testQualifierOne-2" // Should only get rows from second group, and all keys Filter f = new SkipFilter(new QualifierFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(Bytes.toBytes("testQualifierOne-2")))); + new BinaryComparator(Bytes.toBytes("testQualifierOne-2")))); Scan s = new Scan(); s.setFilter(f); - KeyValue [] kvs = { + KeyValue[] kvs = { // testRowTwo-0 new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), @@ -952,8 +912,7 @@ public class TestScannersWithFilters { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); } @@ -963,30 +922,22 @@ public class TestScannersWithFilters { // regular expression and substring filters // Use must pass all List filters = new ArrayList<>(3); - filters.add(new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(".+-2"))); - filters.add(new QualifierFilter(CompareOperator.EQUAL, - new RegexStringComparator(".+-2"))); - filters.add(new ValueFilter(CompareOperator.EQUAL, - new SubstringComparator("One"))); + filters.add(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(".+-2"))); + filters.add(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator(".+-2"))); + filters.add(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("One"))); Filter f = new FilterList(Operator.MUST_PASS_ALL, filters); Scan s = new Scan(); s.addFamily(FAMILIES[0]); s.setFilter(f); - KeyValue [] kvs = { - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]) - }; + KeyValue[] kvs = { new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]) }; verifyScanFull(s, kvs); // Test getting everything with a MUST_PASS_ONE filter including row, qf, // val, regular expression and substring filters filters.clear(); - filters.add(new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(".+Two.+"))); - filters.add(new QualifierFilter(CompareOperator.EQUAL, - new RegexStringComparator(".+-2"))); - filters.add(new ValueFilter(CompareOperator.EQUAL, - new SubstringComparator("One"))); + filters.add(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(".+Two.+"))); + filters.add(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator(".+-2"))); + filters.add(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("One"))); f = new FilterList(Operator.MUST_PASS_ONE, filters); s = new Scan(); s.setFilter(f); @@ -998,14 +949,12 @@ public class TestScannersWithFilters { Scan s = new Scan(); s.setFilter(new FirstKeyOnlyFilter()); // Expected KVs, the first KV from each of the remaining 6 rows - KeyValue [] kvs = { - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + KeyValue[] kvs = { new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]) - }; + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]) }; verifyScanFull(s, kvs); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java index ccbd4df4e93..a17ebb1789d 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,11 +69,11 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestScannersWithLabels { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannersWithLabels.class); + HBaseClassTestRule.forClass(TestScannersWithLabels.class); private static final TableName TABLE = TableName.valueOf("TestScannersWithLabels"); private static final String CFA = "a"; @@ -96,7 +96,7 @@ public class TestScannersWithLabels { private static Configuration conf; private static int insertData(TableName tableName, String column, double prob) - throws IOException { + throws IOException { byte[] k = new byte[3]; byte[][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(column)); @@ -105,8 +105,8 @@ public class TestScannersWithLabels { Put put = new Put(Bytes.toBytes("row" + i)); put.setDurability(Durability.SKIP_WAL); put.addColumn(famAndQf[0], famAndQf[1], k); - put.setCellVisibility(new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!" - + TOPSECRET)); + put.setCellVisibility( + new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!" + TOPSECRET)); puts.add(put); } try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { @@ -131,11 +131,10 @@ public class TestScannersWithLabels { @BeforeClass public static void setUpBeforeClass() throws Exception { - SUPERUSER = User.createUserForTesting(conf, "admin", - new String[] { "supergroup" }); + SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); conf = TEST_UTIL.getConfiguration(); - conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, - SimpleScanLabelGenerator.class, ScanLabelGenerator.class); + conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class, + ScanLabelGenerator.class); conf.set("hbase.superuser", SUPERUSER.getShortName()); VisibilityTestUtil.enableVisiblityLabels(conf); TEST_UTIL.startMiniCluster(1); @@ -146,7 +145,7 @@ public class TestScannersWithLabels { REST_TEST_UTIL.startServletContainer(conf); client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class, - ScannerModel.class); + ScannerModel.class); marshaller = context.createMarshaller(); unmarshaller = context.createUnmarshaller(); Admin admin = TEST_UTIL.getAdmin(); @@ -237,8 +236,8 @@ public class TestScannersWithLabels { // Respond with 204 as there are no cells to be retrieved assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response - .getBody())); + CellSetModel cellSet = + (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); assertEquals(5, countCellSet(cellSet)); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java index 609ee011e76..191eae70d3f 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,19 +53,18 @@ import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) @RunWith(Parameterized.class) public class TestSchemaResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSchemaResource.class); + HBaseClassTestRule.forClass(TestSchemaResource.class); private static String TABLE1 = "TestSchemaResource1"; private static String TABLE2 = "TestSchemaResource2"; private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Configuration conf; @@ -93,12 +92,9 @@ public class TestSchemaResource { extraHdr = new BasicHeader(RESTServer.REST_CSRF_CUSTOM_HEADER_DEFAULT, ""); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); testTableSchemaModel = new TestTableSchemaModel(); - context = JAXBContext.newInstance( - ColumnSchemaModel.class, - TableSchemaModel.class); + context = JAXBContext.newInstance(ColumnSchemaModel.class, TableSchemaModel.class); } @AfterClass @@ -111,7 +107,7 @@ public class TestSchemaResource { public void tearDown() throws Exception { Admin admin = TEST_UTIL.getAdmin(); - for (String table : new String[] {TABLE1, TABLE2}) { + for (String table : new String[] { TABLE1, TABLE2 }) { TableName t = TableName.valueOf(table); if (admin.tableExists(t)) { admin.disableTable(t); @@ -128,8 +124,7 @@ public class TestSchemaResource { return Bytes.toBytes(writer.toString()); } - private static TableSchemaModel fromXML(byte[] content) - throws JAXBException { + private static TableSchemaModel fromXML(byte[] content) throws JAXBException { return (TableSchemaModel) context.createUnmarshaller() .unmarshal(new ByteArrayInputStream(content)); } @@ -142,7 +137,7 @@ public class TestSchemaResource { Admin admin = TEST_UTIL.getAdmin(); assertFalse("Table " + TABLE1 + " should not exist", - admin.tableExists(TableName.valueOf(TABLE1))); + admin.tableExists(TableName.valueOf(TABLE1))); // create the table model = testTableSchemaModel.buildTestModel(TABLE1); @@ -154,8 +149,8 @@ public class TestSchemaResource { } response = client.put(schemaPath, Constants.MIMETYPE_XML, toXML(model), extraHdr); - assertEquals("put failed with csrf " + (csrfEnabled ? "enabled" : "disabled"), - 201, response.getCode()); + assertEquals("put failed with csrf " + (csrfEnabled ? "enabled" : "disabled"), 201, + response.getCode()); // recall the same put operation but in read-only mode conf.set("hbase.rest.readonly", "true"); @@ -213,15 +208,15 @@ public class TestSchemaResource { response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); assertEquals(400, response.getCode()); } - response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, - model.createProtobufOutput(), extraHdr); - assertEquals("put failed with csrf " + (csrfEnabled ? "enabled" : "disabled"), - 201, response.getCode()); + response = + client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput(), extraHdr); + assertEquals("put failed with csrf " + (csrfEnabled ? "enabled" : "disabled"), 201, + response.getCode()); // recall the same put operation but in read-only mode conf.set("hbase.rest.readonly", "true"); - response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, - model.createProtobufOutput(), extraHdr); + response = + client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput(), extraHdr); assertNotNull(extraHdr); assertEquals(403, response.getCode()); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java index ef66ac59077..48ef22355e9 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -101,12 +101,12 @@ import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; * Test class for SPNEGO authentication on the HttpServer. Uses Kerby's MiniKDC and Apache * HttpComponents to verify that a simple Servlet is reachable via SPNEGO and unreachable w/o. */ -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestSecureRESTServer { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSecureRESTServer.class); + HBaseClassTestRule.forClass(TestSecureRESTServer.class); private static final Logger LOG = LoggerFactory.getLogger(TestSecureRESTServer.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -138,8 +138,7 @@ public class TestSecureRESTServer { /* * Keytabs */ - File keytabDir = new File(target, TestSecureRESTServer.class.getSimpleName() - + "_keytabs"); + File keytabDir = new File(target, TestSecureRESTServer.class.getSimpleName() + "_keytabs"); if (keytabDir.exists()) { FileUtils.deleteDirectory(keytabDir); } @@ -175,15 +174,14 @@ public class TestSecureRESTServer { conf.set("hbase.master.keytab.file", serviceKeytab.getAbsolutePath()); conf.set("hbase.unsafe.regionserver.hostname", "localhost"); conf.set("hbase.master.hostname", "localhost"); - HBaseKerberosUtils.setSecuredConfiguration(conf, - SERVICE_PRINCIPAL+ "@" + KDC.getRealm(), SPNEGO_SERVICE_PRINCIPAL+ "@" + KDC.getRealm()); + HBaseKerberosUtils.setSecuredConfiguration(conf, SERVICE_PRINCIPAL + "@" + KDC.getRealm(), + SPNEGO_SERVICE_PRINCIPAL + "@" + KDC.getRealm()); setHdfsSecuredConfiguration(conf); - conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - TokenProvider.class.getName(), AccessController.class.getName()); - conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - AccessController.class.getName()); + conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, TokenProvider.class.getName(), + AccessController.class.getName()); + conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName()); conf.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, - AccessController.class.getName()); + AccessController.class.getName()); // Enable EXEC permission checking conf.setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true); conf.set("hbase.superuser", "hbase"); @@ -194,18 +192,15 @@ public class TestSecureRESTServer { UserGroupInformation.setConfiguration(conf); updateKerberosConfiguration(conf, REST_SERVER_PRINCIPAL, SPNEGO_SERVICE_PRINCIPAL, - restServerKeytab); + restServerKeytab); // Start HDFS - TEST_UTIL.startMiniCluster(StartMiniClusterOption.builder() - .numMasters(1) - .numRegionServers(1) - .numZkServers(1) - .build()); + TEST_UTIL.startMiniCluster( + StartMiniClusterOption.builder().numMasters(1).numRegionServers(1).numZkServers(1).build()); // Start REST - UserGroupInformation restUser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - REST_SERVER_PRINCIPAL, restServerKeytab.getAbsolutePath()); + UserGroupInformation restUser = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(REST_SERVER_PRINCIPAL, restServerKeytab.getAbsolutePath()); restUser.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { @@ -215,18 +210,18 @@ public class TestSecureRESTServer { }); baseUrl = new URL("http://localhost:" + REST_TEST.getServletPort()); - LOG.info("HTTP server started: "+ baseUrl); + LOG.info("HTTP server started: " + baseUrl); TEST_UTIL.waitTableAvailable(TableName.valueOf("hbase:acl")); // Let the REST server create, read, and write globally - UserGroupInformation superuser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); + UserGroupInformation superuser = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); superuser.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { - AccessControlClient.grant( - conn, REST_SERVER_PRINCIPAL, Action.CREATE, Action.READ, Action.WRITE); + AccessControlClient.grant(conn, REST_SERVER_PRINCIPAL, Action.CREATE, Action.READ, + Action.WRITE); } catch (Throwable t) { if (t instanceof Exception) { throw (Exception) t; @@ -268,13 +263,13 @@ public class TestSecureRESTServer { private static void setHdfsSecuredConfiguration(Configuration conf) throws Exception { // Set principal+keytab configuration for HDFS conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, - SERVICE_PRINCIPAL + "@" + KDC.getRealm()); + SERVICE_PRINCIPAL + "@" + KDC.getRealm()); conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, serviceKeytab.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, - SERVICE_PRINCIPAL + "@" + KDC.getRealm()); + SERVICE_PRINCIPAL + "@" + KDC.getRealm()); conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, serviceKeytab.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, - SPNEGO_SERVICE_PRINCIPAL + "@" + KDC.getRealm()); + SPNEGO_SERVICE_PRINCIPAL + "@" + KDC.getRealm()); // Enable token access for HDFS blocks conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); // Only use HTTPS (required because we aren't using "secure" ports) @@ -293,8 +288,8 @@ public class TestSecureRESTServer { conf.setBoolean("ignore.secure.ports.for.testing", true); } - private static void updateKerberosConfiguration(Configuration conf, - String serverPrincipal, String spnegoPrincipal, File serverKeytab) { + private static void updateKerberosConfiguration(Configuration conf, String serverPrincipal, + String spnegoPrincipal, File serverKeytab) { KerberosName.setRules("DEFAULT"); // Enable Kerberos (pre-req) @@ -312,16 +307,15 @@ public class TestSecureRESTServer { private static void instertData() throws IOException, InterruptedException { // Create a table, write a row to it, grant read perms to the client - UserGroupInformation superuser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); + UserGroupInformation superuser = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); final TableName table = TableName.valueOf("publicTable"); superuser.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { TableDescriptor desc = TableDescriptorBuilder.newBuilder(table) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f1")) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f1")).build(); conn.getAdmin().createTable(desc); try (Table t = conn.getTable(table)) { Put p = new Put(Bytes.toBytes("a")); @@ -341,21 +335,22 @@ public class TestSecureRESTServer { }); } - public void testProxy(String extraArgs, String PRINCIPAL, File keytab, int responseCode) throws Exception{ - UserGroupInformation superuser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); + public void testProxy(String extraArgs, String PRINCIPAL, File keytab, int responseCode) + throws Exception { + UserGroupInformation superuser = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); final TableName table = TableName.valueOf("publicTable"); // Read that row as the client - Pair pair = getClient(); + Pair pair = getClient(); CloseableHttpClient client = pair.getFirst(); HttpClientContext context = pair.getSecond(); - HttpGet get = new HttpGet(new URL("http://localhost:"+ REST_TEST.getServletPort()).toURI() + HttpGet get = new HttpGet(new URL("http://localhost:" + REST_TEST.getServletPort()).toURI() + "/" + table + "/a" + extraArgs); get.addHeader("Accept", "application/json"); - UserGroupInformation user = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - PRINCIPAL, keytab.getAbsolutePath()); + UserGroupInformation user = + UserGroupInformation.loginUserFromKeytabAndReturnUGI(PRINCIPAL, keytab.getAbsolutePath()); String jsonResponse = user.doAs(new PrivilegedExceptionAction() { @Override public String run() throws Exception { @@ -367,8 +362,9 @@ public class TestSecureRESTServer { } } }); - if(responseCode == HttpURLConnection.HTTP_OK) { - ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + if (responseCode == HttpURLConnection.HTTP_OK) { + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); CellSetModel model = mapper.readValue(jsonResponse, CellSetModel.class); assertEquals(1, model.getRows().size()); RowModel row = model.getRows().get(0); @@ -386,12 +382,12 @@ public class TestSecureRESTServer { @Test public void testDoAs() throws Exception { - testProxy("?doAs="+CLIENT_PRINCIPAL, WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_OK); + testProxy("?doAs=" + CLIENT_PRINCIPAL, WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_OK); } @Test public void testDoas() throws Exception { - testProxy("?doas="+CLIENT_PRINCIPAL, WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_OK); + testProxy("?doas=" + CLIENT_PRINCIPAL, WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_OK); } @Test @@ -399,48 +395,44 @@ public class TestSecureRESTServer { testProxy("", WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_FORBIDDEN); } - @Test public void testNegativeAuthorization() throws Exception { - Pair pair = getClient(); + Pair pair = getClient(); CloseableHttpClient client = pair.getFirst(); HttpClientContext context = pair.getSecond(); StringEntity entity = new StringEntity( - "{\"name\":\"test\", \"ColumnSchema\":[{\"name\":\"f\"}]}", ContentType.APPLICATION_JSON); - HttpPut put = new HttpPut("http://localhost:"+ REST_TEST.getServletPort() + "/test/schema"); + "{\"name\":\"test\", \"ColumnSchema\":[{\"name\":\"f\"}]}", ContentType.APPLICATION_JSON); + HttpPut put = new HttpPut("http://localhost:" + REST_TEST.getServletPort() + "/test/schema"); put.setEntity(entity); - - UserGroupInformation unprivileged = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - CLIENT_PRINCIPAL, clientKeytab.getAbsolutePath()); + UserGroupInformation unprivileged = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(CLIENT_PRINCIPAL, clientKeytab.getAbsolutePath()); unprivileged.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (CloseableHttpResponse response = client.execute(put, context)) { final int statusCode = response.getStatusLine().getStatusCode(); HttpEntity entity = response.getEntity(); - assertEquals("Got response: "+ EntityUtils.toString(entity), - HttpURLConnection.HTTP_FORBIDDEN, statusCode); + assertEquals("Got response: " + EntityUtils.toString(entity), + HttpURLConnection.HTTP_FORBIDDEN, statusCode); } return null; } }); } - private Pair getClient() { + private Pair getClient() { HttpClientConnectionManager pool = new PoolingHttpClientConnectionManager(); HttpHost host = new HttpHost("localhost", REST_TEST.getServletPort()); - Registry authRegistry = - RegistryBuilder.create().register(AuthSchemes.SPNEGO, - new SPNegoSchemeFactory(true, true)).build(); + Registry authRegistry = RegistryBuilder. create() + .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)).build(); CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); credentialsProvider.setCredentials(AuthScope.ANY, EmptyCredentials.INSTANCE); AuthCache authCache = new BasicAuthCache(); - CloseableHttpClient client = HttpClients.custom() - .setDefaultAuthSchemeRegistry(authRegistry) - .setConnectionManager(pool).build(); + CloseableHttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry) + .setConnectionManager(pool).build(); HttpClientContext context = HttpClientContext.create(); context.setTargetHost(host); @@ -454,10 +446,13 @@ public class TestSecureRESTServer { private static class EmptyCredentials implements Credentials { public static final EmptyCredentials INSTANCE = new EmptyCredentials(); - @Override public String getPassword() { + @Override + public String getPassword() { return null; } - @Override public Principal getUserPrincipal() { + + @Override + public Principal getUserPrincipal() { return null; } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java index 02611dfaf90..6a24d6ac59b 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,16 +34,15 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestSecurityHeadersFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSecurityHeadersFilter.class); + HBaseClassTestRule.forClass(TestSecurityHeadersFilter.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; @After @@ -56,56 +55,53 @@ public class TestSecurityHeadersFilter { public void testDefaultValues() throws Exception { TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); String path = "/version/cluster"; Response response = client.get(path); assertThat(response.getCode(), equalTo(200)); assertThat("Header 'X-Content-Type-Options' is missing from Rest response", - response.getHeader("X-Content-Type-Options"), is(not((String)null))); + response.getHeader("X-Content-Type-Options"), is(not((String) null))); assertThat("Header 'X-Content-Type-Options' has invalid default value", - response.getHeader("X-Content-Type-Options"), equalTo("nosniff")); + response.getHeader("X-Content-Type-Options"), equalTo("nosniff")); assertThat("Header 'X-XSS-Protection' is missing from Rest response", - response.getHeader("X-XSS-Protection"), is(not((String)null))); + response.getHeader("X-XSS-Protection"), is(not((String) null))); assertThat("Header 'X-XSS-Protection' has invalid default value", - response.getHeader("X-XSS-Protection"), equalTo("1; mode=block")); + response.getHeader("X-XSS-Protection"), equalTo("1; mode=block")); - assertThat("Header 'Strict-Transport-Security' should be missing from Rest response," + - "but it's present", - response.getHeader("Strict-Transport-Security"), is((String)null)); - assertThat("Header 'Content-Security-Policy' should be missing from Rest response," + - "but it's present", - response.getHeader("Content-Security-Policy"), is((String)null)); + assertThat("Header 'Strict-Transport-Security' should be missing from Rest response," + + "but it's present", response.getHeader("Strict-Transport-Security"), is((String) null)); + assertThat( + "Header 'Content-Security-Policy' should be missing from Rest response," + "but it's present", + response.getHeader("Content-Security-Policy"), is((String) null)); } @Test public void testHstsAndCspSettings() throws Exception { TEST_UTIL.getConfiguration().set("hbase.http.filter.hsts.value", - "max-age=63072000;includeSubDomains;preload"); + "max-age=63072000;includeSubDomains;preload"); TEST_UTIL.getConfiguration().set("hbase.http.filter.csp.value", - "default-src https: data: 'unsafe-inline' 'unsafe-eval'"); + "default-src https: data: 'unsafe-inline' 'unsafe-eval'"); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); String path = "/version/cluster"; Response response = client.get(path); assertThat(response.getCode(), equalTo(200)); assertThat("Header 'Strict-Transport-Security' is missing from Rest response", - response.getHeader("Strict-Transport-Security"), is(not((String)null))); + response.getHeader("Strict-Transport-Security"), is(not((String) null))); assertThat("Header 'Strict-Transport-Security' has invalid value", - response.getHeader("Strict-Transport-Security"), - equalTo("max-age=63072000;includeSubDomains;preload")); + response.getHeader("Strict-Transport-Security"), + equalTo("max-age=63072000;includeSubDomains;preload")); assertThat("Header 'Content-Security-Policy' is missing from Rest response", - response.getHeader("Content-Security-Policy"), is(not((String)null))); + response.getHeader("Content-Security-Policy"), is(not((String) null))); assertThat("Header 'Content-Security-Policy' has invalid value", - response.getHeader("Content-Security-Policy"), - equalTo("default-src https: data: 'unsafe-inline' 'unsafe-eval'")); + response.getHeader("Content-Security-Policy"), + equalTo("default-src https: data: 'unsafe-inline' 'unsafe-eval'")); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java index 5e4c2d7d159..28bf14d7a75 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,20 +46,19 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestStatusResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStatusResource.class); + HBaseClassTestRule.forClass(TestStatusResource.class); private static final Logger LOG = LoggerFactory.getLogger(TestStatusResource.class); private static final byte[] META_REGION_NAME = Bytes.toBytes(TableName.META_TABLE_NAME + ",,1"); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Configuration conf; @@ -73,11 +72,11 @@ public class TestStatusResource { assertNotNull(model.getDeadNodes()); assertFalse(model.getLiveNodes().isEmpty()); boolean foundMeta = false; - for (StorageClusterStatusModel.Node node: model.getLiveNodes()) { + for (StorageClusterStatusModel.Node node : model.getLiveNodes()) { assertNotNull(node.getName()); assertTrue(node.getStartCode() > 0L); assertTrue(node.getRequests() >= 0); - for (StorageClusterStatusModel.Node.Region region: node.getRegions()) { + for (StorageClusterStatusModel.Node.Region region : node.getRegions()) { if (Bytes.equals(region.getName(), META_REGION_NAME)) { foundMeta = true; } @@ -116,9 +115,8 @@ public class TestStatusResource { Response response = client.get("/status/cluster", Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - StorageClusterStatusModel model = (StorageClusterStatusModel) - context.createUnmarshaller().unmarshal( - new ByteArrayInputStream(response.getBody())); + StorageClusterStatusModel model = (StorageClusterStatusModel) context.createUnmarshaller() + .unmarshal(new ByteArrayInputStream(response.getBody())); validate(model); } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java index b069693b99f..097d3566435 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,12 +58,12 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestTableResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableResource.class); + HBaseClassTestRule.forClass(TestTableResource.class); private static final Logger LOG = LoggerFactory.getLogger(TestTableResource.class); @@ -74,8 +74,7 @@ public class TestTableResource { private static List regionMap; private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; @@ -83,16 +82,12 @@ public class TestTableResource { public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(3); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); - context = JAXBContext.newInstance( - TableModel.class, - TableInfoModel.class, - TableListModel.class, - TableRegionModel.class); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); + context = JAXBContext.newInstance(TableModel.class, TableInfoModel.class, TableListModel.class, + TableRegionModel.class); TEST_UTIL.createMultiRegionTable(TABLE, Bytes.toBytes(COLUMN_FAMILY), NUM_REGIONS); byte[] k = new byte[3]; - byte [][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(COLUMN)); + byte[][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(COLUMN)); List puts = new ArrayList<>(); for (byte b1 = 'a'; b1 < 'z'; b1++) { for (byte b2 = 'a'; b2 < 'z'; b2++) { @@ -110,7 +105,7 @@ public class TestTableResource { Connection connection = TEST_UTIL.getConnection(); - Table table = connection.getTable(TABLE); + Table table = connection.getTable(TABLE); table.put(puts); table.close(); @@ -152,7 +147,7 @@ public class TestTableResource { TableRegionModel region = regions.next(); boolean found = false; LOG.debug("looking for region " + region.getName()); - for (HRegionLocation e: regionMap) { + for (HRegionLocation e : regionMap) { HRegionInfo hri = e.getRegionInfo(); // getRegionNameAsString uses Bytes.toStringBinary which escapes some non-printable // characters @@ -165,9 +160,8 @@ public class TestTableResource { byte[] endKey = hri.getEndKey(); ServerName serverName = e.getServerName(); InetSocketAddress sa = - new InetSocketAddress(serverName.getHostname(), serverName.getPort()); - String location = sa.getHostName() + ":" + - Integer.valueOf(sa.getPort()); + new InetSocketAddress(serverName.getHostname(), serverName.getPort()); + String location = sa.getHostName() + ":" + Integer.valueOf(sa.getPort()); assertEquals(hri.getRegionId(), region.getId()); assertTrue(Bytes.equals(startKey, region.getStartKey())); assertTrue(Bytes.equals(endKey, region.getEndKey())); @@ -191,9 +185,8 @@ public class TestTableResource { Response response = client.get("/", Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - TableListModel model = (TableListModel) - context.createUnmarshaller() - .unmarshal(new ByteArrayInputStream(response.getBody())); + TableListModel model = (TableListModel) context.createUnmarshaller() + .unmarshal(new ByteArrayInputStream(response.getBody())); checkTableList(model); } @@ -229,12 +222,11 @@ public class TestTableResource { @Test public void testTableInfoXML() throws IOException, JAXBException { - Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - TableInfoModel model = (TableInfoModel) - context.createUnmarshaller() - .unmarshal(new ByteArrayInputStream(response.getBody())); + TableInfoModel model = (TableInfoModel) context.createUnmarshaller() + .unmarshal(new ByteArrayInputStream(response.getBody())); checkTableInfo(model); } @@ -271,4 +263,3 @@ public class TestTableResource { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java index f5b6b3e6f66..2e7a4af13b7 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -74,11 +74,11 @@ import org.xml.sax.XMLReader; import org.apache.hbase.thirdparty.com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestTableScan { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableScan.class); + HBaseClassTestRule.forClass(TestTableScan.class); private static final TableName TABLE = TableName.valueOf("TestScanResource"); private static final String CFA = "a"; @@ -93,8 +93,7 @@ public class TestTableScan { private static Configuration conf; private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -102,8 +101,7 @@ public class TestTableScan { conf.set(Constants.CUSTOM_FILTERS, "CustomFilter:" + CustomFilter.class.getName()); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); Admin admin = TEST_UTIL.getAdmin(); if (!admin.tableExists(TABLE)) { HTableDescriptor htd = new HTableDescriptor(TABLE); @@ -133,8 +131,7 @@ public class TestTableScan { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=10"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); @@ -144,13 +141,12 @@ public class TestTableScan { assertEquals(10, count); checkRowsNotNull(model); - //Test with no limit. + // Test with no limit. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_XML); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); model = (CellSetModel) ush.unmarshal(response.getStream()); @@ -158,7 +154,7 @@ public class TestTableScan { assertEquals(expectedRows1, count); checkRowsNotNull(model); - //Test with start and end row. + // Test with start and end row. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -167,8 +163,7 @@ public class TestTableScan { builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_END_ROW + "=aay"); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_XML); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); model = (CellSetModel) ush.unmarshal(response.getStream()); count = TestScannerResource.countCellSet(model); @@ -179,7 +174,7 @@ public class TestTableScan { assertEquals(24, count); checkRowsNotNull(model); - //Test with start row and limit. + // Test with start row and limit. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -188,8 +183,7 @@ public class TestTableScan { builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=15"); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_XML); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); model = (CellSetModel) ush.unmarshal(response.getStream()); @@ -209,24 +203,22 @@ public class TestTableScan { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=2"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); - ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); CellSetModel model = mapper.readValue(response.getStream(), CellSetModel.class); int count = TestScannerResource.countCellSet(model); assertEquals(2, count); checkRowsNotNull(model); - //Test scanning with no limit. + // Test scanning with no limit. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_2); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); model = mapper.readValue(response.getStream(), CellSetModel.class); @@ -234,7 +226,7 @@ public class TestTableScan { assertEquals(expectedRows2, count); checkRowsNotNull(model); - //Test with start row and end row. + // Test with start row and end row. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -243,8 +235,7 @@ public class TestTableScan { builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_END_ROW + "=aay"); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); model = mapper.readValue(response.getStream(), CellSetModel.class); RowModel startRow = model.getRows().get(0); @@ -268,12 +259,11 @@ public class TestTableScan { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=10"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - JAXBContext context = JAXBContext.newInstance(ClientSideCellSetModel.class, RowModel.class, - CellModel.class); + JAXBContext context = + JAXBContext.newInstance(ClientSideCellSetModel.class, RowModel.class, CellModel.class); Unmarshaller unmarshaller = context.createUnmarshaller(); final ClientSideCellSetModel.Listener listener = new ClientSideCellSetModel.Listener() { @@ -286,19 +276,19 @@ public class TestTableScan { // install the callback on all ClientSideCellSetModel instances unmarshaller.setListener(new Unmarshaller.Listener() { - @Override - public void beforeUnmarshal(Object target, Object parent) { - if (target instanceof ClientSideCellSetModel) { - ((ClientSideCellSetModel) target).setCellSetModelListener(listener); - } + @Override + public void beforeUnmarshal(Object target, Object parent) { + if (target instanceof ClientSideCellSetModel) { + ((ClientSideCellSetModel) target).setCellSetModelListener(listener); } + } - @Override - public void afterUnmarshal(Object target, Object parent) { - if (target instanceof ClientSideCellSetModel) { - ((ClientSideCellSetModel) target).setCellSetModelListener(null); - } + @Override + public void afterUnmarshal(Object target, Object parent) { + if (target instanceof ClientSideCellSetModel) { + ((ClientSideCellSetModel) target).setCellSetModelListener(null); } + } }); // create a new XML parser @@ -314,7 +304,7 @@ public class TestTableScan { @Test public void testStreamingJSON() throws Exception { - //Test with start row and end row. + // Test with start row and end row. StringBuilder builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -323,18 +313,17 @@ public class TestTableScan { builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_END_ROW + "=aay"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); int count = 0; - ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); JsonFactory jfactory = new JsonFactory(mapper); JsonParser jParser = jfactory.createJsonParser(response.getStream()); boolean found = false; while (jParser.nextToken() != JsonToken.END_OBJECT) { - if(jParser.getCurrentToken() == JsonToken.START_OBJECT && found) { + if (jParser.getCurrentToken() == JsonToken.START_OBJECT && found) { RowModel row = jParser.readValueAs(RowModel.class); assertNotNull(row.getKey()); for (int i = 0; i < row.getCells().size(); i++) { @@ -362,14 +351,13 @@ public class TestTableScan { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=15"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_PROTOBUF); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_PROTOBUF); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); int rowCount = readProtobufStream(response.getStream()); assertEquals(15, rowCount); - //Test with start row and end row. + // Test with start row and end row. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -378,8 +366,7 @@ public class TestTableScan { builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_END_ROW + "=aay"); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_PROTOBUF); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_PROTOBUF); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); rowCount = readProtobufStream(response.getStream()); @@ -387,7 +374,7 @@ public class TestTableScan { } private void checkRowsNotNull(CellSetModel model) { - for (RowModel row: model.getRows()) { + for (RowModel row : model.getRows()) { assertTrue(row.getKey() != null); assertTrue(row.getCells().size() > 0); } @@ -399,7 +386,7 @@ public class TestTableScan { * @return The number of rows in the cell set model. * @throws IOException Signals that an I/O exception has occurred. */ - public int readProtobufStream(InputStream inputStream) throws IOException{ + public int readProtobufStream(InputStream inputStream) throws IOException { DataInputStream stream = new DataInputStream(inputStream); CellSetModel model = null; int rowCount = 0; @@ -434,8 +421,7 @@ public class TestTableScan { builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=a:test"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, @@ -457,8 +443,7 @@ public class TestTableScan { builder.append(Constants.SCAN_END_ROW + "=aay"); builder.append("&"); builder.append(Constants.SCAN_FILTER + "=" + URLEncoder.encode("PrefixFilter('aab')", "UTF-8")); - Response response = - client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush = ctx.createUnmarshaller(); @@ -466,7 +451,7 @@ public class TestTableScan { int count = TestScannerResource.countCellSet(model); assertEquals(1, count); assertEquals("aab", - new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); + new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); } @Test @@ -474,10 +459,9 @@ public class TestTableScan { StringBuilder builder = new StringBuilder(); builder.append("/abc*"); builder.append("?"); - builder.append(Constants.SCAN_FILTER + "=" - + URLEncoder.encode("QualifierFilter(=,'binary:1')", "UTF-8")); - Response response = - client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + builder.append( + Constants.SCAN_FILTER + "=" + URLEncoder.encode("QualifierFilter(=,'binary:1')", "UTF-8")); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush = ctx.createUnmarshaller(); @@ -485,7 +469,7 @@ public class TestTableScan { int count = TestScannerResource.countCellSet(model); assertEquals(1, count); assertEquals("abc", - new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); + new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); } @Test @@ -494,9 +478,8 @@ public class TestTableScan { builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_FILTER + "=" - + URLEncoder.encode("PrefixFilter('abc') AND QualifierFilter(=,'binary:1')", "UTF-8")); - Response response = - client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + + URLEncoder.encode("PrefixFilter('abc') AND QualifierFilter(=,'binary:1')", "UTF-8")); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush = ctx.createUnmarshaller(); @@ -504,7 +487,7 @@ public class TestTableScan { int count = TestScannerResource.countCellSet(model); assertEquals(1, count); assertEquals("abc", - new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); + new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); } @Test @@ -515,8 +498,7 @@ public class TestTableScan { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_FILTER + "=" + URLEncoder.encode("CustomFilter('abc')", "UTF-8")); - Response response = - client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush = ctx.createUnmarshaller(); @@ -524,7 +506,7 @@ public class TestTableScan { int count = TestScannerResource.countCellSet(model); assertEquals(1, count); assertEquals("abc", - new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); + new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); } @Test @@ -535,8 +517,7 @@ public class TestTableScan { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_FILTER + "=" + URLEncoder.encode("CustomFilter('abc')", "UTF-8")); - Response response = - client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush = ctx.createUnmarshaller(); @@ -565,7 +546,7 @@ public class TestTableScan { assertEquals(24, count); List rowModels = model.getRows().subList(1, count); - //reversed + // reversed builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -590,9 +571,9 @@ public class TestTableScan { RowModel reversedRowModel = reversedRowModels.get(i); assertEquals(new String(rowModel.getKey(), "UTF-8"), - new String(reversedRowModel.getKey(), "UTF-8")); + new String(reversedRowModel.getKey(), "UTF-8")); assertEquals(new String(rowModel.getCells().get(0).getValue(), "UTF-8"), - new String(reversedRowModel.getCells().get(0).getValue(), "UTF-8")); + new String(reversedRowModel.getCells().get(0).getValue(), "UTF-8")); } } @@ -603,12 +584,11 @@ public class TestTableScan { builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_EMPTY); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); - ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); CellSetModel model = mapper.readValue(response.getStream(), CellSetModel.class); int count = TestScannerResource.countCellSet(model); assertEquals(expectedRows3, count); @@ -624,12 +604,11 @@ public class TestTableScan { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_EMPTY); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); - mapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); model = mapper.readValue(response.getStream(), CellSetModel.class); count = TestScannerResource.countCellSet(model); assertEquals(expectedRows1 + expectedRows3, count); @@ -668,14 +647,13 @@ public class TestTableScan { * This list is not a real list; instead it will notify a listener whenever JAXB has * unmarshalled the next row. */ - @XmlElement(name="Row") + @XmlElement(name = "Row") private List row; static boolean listenerInvoked = false; /** - * Install a listener for row model on this object. If l is null, the listener - * is removed again. + * Install a listener for row model on this object. If l is null, the listener is removed again. */ public void setCellSetModelListener(final Listener l) { row = (l == null) ? null : new ArrayList() { diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java index 403f0db011a..542d39ada9b 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,18 +47,17 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestVersionResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVersionResource.class); + HBaseClassTestRule.forClass(TestVersionResource.class); private static final Logger LOG = LoggerFactory.getLogger(TestVersionResource.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; @@ -66,11 +65,8 @@ public class TestVersionResource { public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); - context = JAXBContext.newInstance( - VersionModel.class, - StorageClusterVersionModel.class); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); + context = JAXBContext.newInstance(VersionModel.class, StorageClusterVersionModel.class); } @AfterClass @@ -123,9 +119,8 @@ public class TestVersionResource { Response response = client.get("/version", Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - VersionModel model = (VersionModel) - context.createUnmarshaller().unmarshal( - new ByteArrayInputStream(response.getBody())); + VersionModel model = (VersionModel) context.createUnmarshaller() + .unmarshal(new ByteArrayInputStream(response.getBody())); validate(model); LOG.info("success retrieving Stargate version as XML"); } @@ -135,10 +130,9 @@ public class TestVersionResource { Response response = client.get("/version", Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); - ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(VersionModel.class, MediaType.APPLICATION_JSON_TYPE); - VersionModel model - = mapper.readValue(response.getBody(), VersionModel.class); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(VersionModel.class, + MediaType.APPLICATION_JSON_TYPE); + VersionModel model = mapper.readValue(response.getBody(), VersionModel.class); validate(model); LOG.info("success retrieving Stargate version as JSON"); } @@ -167,15 +161,12 @@ public class TestVersionResource { } @Test - public void testGetStorageClusterVersionXML() throws IOException, - JAXBException { - Response response = client.get("/version/cluster",Constants.MIMETYPE_XML); + public void testGetStorageClusterVersionXML() throws IOException, JAXBException { + Response response = client.get("/version/cluster", Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - StorageClusterVersionModel clusterVersionModel = - (StorageClusterVersionModel) - context.createUnmarshaller().unmarshal( - new ByteArrayInputStream(response.getBody())); + StorageClusterVersionModel clusterVersionModel = (StorageClusterVersionModel) context + .createUnmarshaller().unmarshal(new ByteArrayInputStream(response.getBody())); assertNotNull(clusterVersionModel); assertNotNull(clusterVersionModel.getVersion()); LOG.info("success retrieving storage cluster version as XML"); @@ -187,12 +178,11 @@ public class TestVersionResource { assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(StorageClusterVersionModel.class, MediaType.APPLICATION_JSON_TYPE); - StorageClusterVersionModel clusterVersionModel - = mapper.readValue(response.getBody(), StorageClusterVersionModel.class); + .locateMapper(StorageClusterVersionModel.class, MediaType.APPLICATION_JSON_TYPE); + StorageClusterVersionModel clusterVersionModel = + mapper.readValue(response.getBody(), StorageClusterVersionModel.class); assertNotNull(clusterVersionModel); assertNotNull(clusterVersionModel.getVersion()); LOG.info("success retrieving storage cluster version as JSON"); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java index ca1566cac37..9e076d433f0 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,23 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.client; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InterruptedIOException; - import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import javax.xml.bind.Unmarshaller; import javax.xml.stream.XMLInputFactory; import javax.xml.stream.XMLStreamException; import javax.xml.stream.XMLStreamReader; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; - import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.rest.Constants; import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel; @@ -41,6 +35,7 @@ import org.apache.hadoop.hbase.rest.model.TableListModel; import org.apache.hadoop.hbase.rest.model.TableSchemaModel; import org.apache.hadoop.hbase.rest.model.VersionModel; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class RemoteAdmin { @@ -57,10 +52,7 @@ public class RemoteAdmin { private static volatile Unmarshaller versionClusterUnmarshaller; /** - * Constructor - * - * @param client - * @param conf + * Constructor nn */ public RemoteAdmin(Client client, Configuration conf) { this(client, conf, null); @@ -70,17 +62,14 @@ public class RemoteAdmin { if (versionClusterUnmarshaller == null) { - RemoteAdmin.versionClusterUnmarshaller = JAXBContext.newInstance( - StorageClusterVersionModel.class).createUnmarshaller(); + RemoteAdmin.versionClusterUnmarshaller = + JAXBContext.newInstance(StorageClusterVersionModel.class).createUnmarshaller(); } return RemoteAdmin.versionClusterUnmarshaller; } /** - * Constructor - * @param client - * @param conf - * @param accessToken + * Constructor nnn */ public RemoteAdmin(Client client, Configuration conf, String accessToken) { this.client = client; @@ -100,10 +89,8 @@ public class RemoteAdmin { } /** - * @return string representing the rest api's version - * @throws IOException - * if the endpoint does not exist, there is a timeout, or some other - * general failure mode + * @return string representing the rest api's version n * if the endpoint does not exist, there is + * a timeout, or some other general failure mode */ public VersionModel getRestVersion() throws IOException { @@ -118,26 +105,24 @@ public class RemoteAdmin { int code = 0; for (int i = 0; i < maxRetries; i++) { - Response response = client.get(path.toString(), - Constants.MIMETYPE_PROTOBUF); + Response response = client.get(path.toString(), Constants.MIMETYPE_PROTOBUF); code = response.getCode(); switch (code) { - case 200: + case 200: - VersionModel v = new VersionModel(); - return (VersionModel) v.getObjectFromMessage(response.getBody()); - case 404: - throw new IOException("REST version not found"); - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("get request to " + path.toString() - + " returned " + code); + VersionModel v = new VersionModel(); + return (VersionModel) v.getObjectFromMessage(response.getBody()); + case 404: + throw new IOException("REST version not found"); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("get request to " + path.toString() + " returned " + code); } } throw new IOException("get request to " + path.toString() + " timed out"); @@ -145,50 +130,47 @@ public class RemoteAdmin { /** * @return string representing the cluster's version - * @throws IOException if the endpoint does not exist, there is a timeout, or some other general failure mode + * @throws IOException if the endpoint does not exist, there is a timeout, or some other general + * failure mode */ public StorageClusterStatusModel getClusterStatus() throws IOException { - StringBuilder path = new StringBuilder(); + StringBuilder path = new StringBuilder(); + path.append('/'); + if (accessToken != null) { + path.append(accessToken); path.append('/'); - if (accessToken !=null) { - path.append(accessToken); - path.append('/'); - } + } path.append("status/cluster"); int code = 0; for (int i = 0; i < maxRetries; i++) { - Response response = client.get(path.toString(), - Constants.MIMETYPE_PROTOBUF); + Response response = client.get(path.toString(), Constants.MIMETYPE_PROTOBUF); code = response.getCode(); switch (code) { - case 200: - StorageClusterStatusModel s = new StorageClusterStatusModel(); - return (StorageClusterStatusModel) s.getObjectFromMessage(response - .getBody()); - case 404: - throw new IOException("Cluster version not found"); - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("get request to " + path + " returned " + code); + case 200: + StorageClusterStatusModel s = new StorageClusterStatusModel(); + return (StorageClusterStatusModel) s.getObjectFromMessage(response.getBody()); + case 404: + throw new IOException("Cluster version not found"); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("get request to " + path + " returned " + code); } } throw new IOException("get request to " + path + " timed out"); } /** - * @return string representing the cluster's version - * @throws IOException - * if the endpoint does not exist, there is a timeout, or some other - * general failure mode + * @return string representing the cluster's version n * if the endpoint does not exist, there is + * a timeout, or some other general failure mode */ public StorageClusterVersionModel getClusterVersion() throws IOException { @@ -206,32 +188,30 @@ public class RemoteAdmin { Response response = client.get(path.toString(), Constants.MIMETYPE_XML); code = response.getCode(); switch (code) { - case 200: - try { + case 200: + try { - return (StorageClusterVersionModel) getUnmarsheller().unmarshal( - getInputStream(response)); - } catch (JAXBException jaxbe) { + return (StorageClusterVersionModel) getUnmarsheller() + .unmarshal(getInputStream(response)); + } catch (JAXBException jaxbe) { - throw new IOException( - "Issue parsing StorageClusterVersionModel object in XML form: " - + jaxbe.getLocalizedMessage(), jaxbe); - } - case 404: - throw new IOException("Cluster version not found"); - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException(path.toString() + " request returned " + code); + throw new IOException("Issue parsing StorageClusterVersionModel object in XML form: " + + jaxbe.getLocalizedMessage(), jaxbe); + } + case 404: + throw new IOException("Cluster version not found"); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException(path.toString() + " request returned " + code); } } - throw new IOException("get request to " + path.toString() - + " request timed out"); + throw new IOException("get request to " + path.toString() + " request timed out"); } /** @@ -254,19 +234,19 @@ public class RemoteAdmin { Response response = client.get(path.toString(), Constants.MIMETYPE_PROTOBUF); code = response.getCode(); switch (code) { - case 200: - return true; - case 404: - return false; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("get request to " + path.toString() + " returned " + code); + case 200: + return true; + case 404: + return false; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("get request to " + path.toString() + " returned " + code); } } throw new IOException("get request to " + path.toString() + " timed out"); @@ -277,8 +257,7 @@ public class RemoteAdmin { * @param desc table descriptor for table * @throws IOException if a remote or network exception occurs */ - public void createTable(HTableDescriptor desc) - throws IOException { + public void createTable(HTableDescriptor desc) throws IOException { TableSchemaModel model = new TableSchemaModel(desc); StringBuilder path = new StringBuilder(); path.append('/'); @@ -291,21 +270,21 @@ public class RemoteAdmin { path.append("schema"); int code = 0; for (int i = 0; i < maxRetries; i++) { - Response response = client.put(path.toString(), Constants.MIMETYPE_PROTOBUF, - model.createProtobufOutput()); + Response response = + client.put(path.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); code = response.getCode(); switch (code) { - case 201: - return; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("create request to " + path.toString() + " returned " + code); + case 201: + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("create request to " + path.toString() + " returned " + code); } } throw new IOException("create request to " + path.toString() + " timed out"); @@ -325,7 +304,7 @@ public class RemoteAdmin { * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs */ - public void deleteTable(final byte [] tableName) throws IOException { + public void deleteTable(final byte[] tableName) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); if (accessToken != null) { @@ -340,27 +319,25 @@ public class RemoteAdmin { Response response = client.delete(path.toString()); code = response.getCode(); switch (code) { - case 200: - return; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("delete request to " + path.toString() + " returned " + code); + case 200: + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("delete request to " + path.toString() + " returned " + code); } } throw new IOException("delete request to " + path.toString() + " timed out"); } /** - * @return string representing the cluster's version - * @throws IOException - * if the endpoint does not exist, there is a timeout, or some other - * general failure mode + * @return string representing the cluster's version n * if the endpoint does not exist, there is + * a timeout, or some other general failure mode */ public TableListModel getTableList() throws IOException { @@ -375,34 +352,30 @@ public class RemoteAdmin { for (int i = 0; i < maxRetries; i++) { // Response response = client.get(path.toString(), // Constants.MIMETYPE_XML); - Response response = client.get(path.toString(), - Constants.MIMETYPE_PROTOBUF); + Response response = client.get(path.toString(), Constants.MIMETYPE_PROTOBUF); code = response.getCode(); switch (code) { - case 200: - TableListModel t = new TableListModel(); - return (TableListModel) t.getObjectFromMessage(response.getBody()); - case 404: - throw new IOException("Table list not found"); - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("get request to " + path.toString() - + " request returned " + code); + case 200: + TableListModel t = new TableListModel(); + return (TableListModel) t.getObjectFromMessage(response.getBody()); + case 404: + throw new IOException("Table list not found"); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("get request to " + path.toString() + " request returned " + code); } } - throw new IOException("get request to " + path.toString() - + " request timed out"); + throw new IOException("get request to " + path.toString() + " request timed out"); } /** * Convert the REST server's response to an XML reader. - * * @param response The REST server's response. * @return A reader over the parsed XML document. * @throws IOException If the document fails to parse diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java index 86de2399953..714b4da2661 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.client; import com.google.protobuf.Descriptors; import com.google.protobuf.Message; import com.google.protobuf.Service; import com.google.protobuf.ServiceException; - import java.io.IOException; import java.io.InterruptedIOException; import java.io.UnsupportedEncodingException; @@ -46,15 +43,11 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.filter.Filter; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.CheckAndMutate; import org.apache.hadoop.hbase.client.CheckAndMutateResult; import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; @@ -70,6 +63,7 @@ import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; +import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.rest.Constants; @@ -80,6 +74,9 @@ import org.apache.hadoop.hbase.rest.model.ScannerModel; import org.apache.hadoop.hbase.rest.model.TableSchemaModel; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; @@ -98,8 +95,8 @@ public class RemoteHTable implements Table { final long sleepTime; @SuppressWarnings("rawtypes") - protected String buildRowSpec(final byte[] row, final Map familyMap, - final long startTime, final long endTime, final int maxVersions) { + protected String buildRowSpec(final byte[] row, final Map familyMap, final long startTime, + final long endTime, final int maxVersions) { StringBuffer sb = new StringBuffer(); sb.append('/'); sb.append(Bytes.toString(name)); @@ -110,15 +107,15 @@ public class RemoteHTable implements Table { Iterator i = familyMap.entrySet().iterator(); sb.append('/'); while (i.hasNext()) { - Map.Entry e = (Map.Entry)i.next(); - Collection quals = (Collection)e.getValue(); + Map.Entry e = (Map.Entry) i.next(); + Collection quals = (Collection) e.getValue(); if (quals == null || quals.isEmpty()) { // this is an unqualified family. append the family name and NO ':' - sb.append(toURLEncodedBytes((byte[])e.getKey())); + sb.append(toURLEncodedBytes((byte[]) e.getKey())); } else { Iterator ii = quals.iterator(); while (ii.hasNext()) { - sb.append(toURLEncodedBytes((byte[])e.getKey())); + sb.append(toURLEncodedBytes((byte[]) e.getKey())); Object o = ii.next(); // Puts use byte[] but Deletes use KeyValue if (o instanceof byte[]) { @@ -169,7 +166,7 @@ public class RemoteHTable implements Table { return sb.toString(); } sb.append("?"); - for(int i=0; i results = new ArrayList<>(); - for (RowModel row: model.getRows()) { + for (RowModel row : model.getRows()) { List kvs = new ArrayList<>(row.getCells().size()); - for (CellModel cell: row.getCells()) { + for (CellModel cell : row.getCells()) { byte[][] split = CellUtil.parseColumn(cell.getColumn()); byte[] column = split[0]; byte[] qualifier = null; @@ -198,8 +195,8 @@ public class RemoteHTable implements Table { } else { throw new IllegalArgumentException("Invalid familyAndQualifier provided."); } - kvs.add(new KeyValue(row.getKey(), column, qualifier, - cell.getTimestamp(), cell.getValue())); + kvs + .add(new KeyValue(row.getKey(), column, qualifier, cell.getTimestamp(), cell.getValue())); } results.add(Result.create(kvs)); } @@ -209,11 +206,10 @@ public class RemoteHTable implements Table { protected CellSetModel buildModelFromPut(Put put) { RowModel row = new RowModel(put.getRow()); long ts = put.getTimestamp(); - for (List cells: put.getFamilyCellMap().values()) { - for (Cell cell: cells) { + for (List cells : put.getFamilyCellMap().values()) { + for (Cell cell : cells) { row.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), - ts != HConstants.LATEST_TIMESTAMP ? ts : cell.getTimestamp(), - CellUtil.cloneValue(cell))); + ts != HConstants.LATEST_TIMESTAMP ? ts : cell.getTimestamp(), CellUtil.cloneValue(cell))); } } CellSetModel model = new CellSetModel(); @@ -272,19 +268,19 @@ public class RemoteHTable implements Table { Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF); int code = response.getCode(); switch (code) { - case 200: - TableSchemaModel schema = new TableSchemaModel(); - schema.getObjectFromMessage(response.getBody()); - return schema.getTableDescriptor(); - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("schema request returned " + code); + case 200: + TableSchemaModel schema = new TableSchemaModel(); + schema.getObjectFromMessage(response.getBody()); + return schema.getTableDescriptor(); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("schema request returned " + code); } } throw new IOException("schema request timed out"); @@ -298,8 +294,8 @@ public class RemoteHTable implements Table { @Override public Result get(Get get) throws IOException { TimeRange range = get.getTimeRange(); - String spec = buildRowSpec(get.getRow(), get.getFamilyMap(), - range.getMin(), range.getMax(), get.getMaxVersions()); + String spec = buildRowSpec(get.getRow(), get.getFamilyMap(), range.getMin(), range.getMax(), + get.getMaxVersions()); if (get.getFilter() != null) { LOG.warn("filters not supported on gets"); } @@ -320,12 +316,13 @@ public class RemoteHTable implements Table { int maxVersions = 1; int count = 0; - for(Get g:gets) { + for (Get g : gets) { - if ( count == 0 ) { + if (count == 0) { maxVersions = g.getMaxVersions(); } else if (g.getMaxVersions() != maxVersions) { - LOG.warn("MaxVersions on Gets do not match, using the first in the list ("+maxVersions+")"); + LOG.warn( + "MaxVersions on Gets do not match, using the first in the list (" + maxVersions + ")"); } if (g.getFilter() != null) { @@ -333,7 +330,7 @@ public class RemoteHTable implements Table { } rows[count] = g.getRow(); - count ++; + count++; } String spec = buildMultiRowSpec(rows, maxVersions); @@ -350,7 +347,7 @@ public class RemoteHTable implements Table { CellSetModel model = new CellSetModel(); model.getObjectFromMessage(response.getBody()); Result[] results = buildResultFromModel(model); - if ( results.length > 0) { + if (results.length > 0) { return results; } // fall through @@ -361,7 +358,7 @@ public class RemoteHTable implements Table { try { Thread.sleep(sleepTime); } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); + throw (InterruptedIOException) new InterruptedIOException().initCause(e); } break; default: @@ -397,21 +394,21 @@ public class RemoteHTable implements Table { sb.append('/'); sb.append(toURLEncodedBytes(put.getRow())); for (int i = 0; i < maxRetries; i++) { - Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, - model.createProtobufOutput()); + Response response = + client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); int code = response.getCode(); switch (code) { - case 200: - return; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("put request failed with " + code); + case 200: + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("put request failed with " + code); } } throw new IOException("put request timed out"); @@ -423,24 +420,24 @@ public class RemoteHTable implements Table { // ignores the row specification in the URI // separate puts by row - TreeMap> map = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for (Put put: puts) { + TreeMap> map = new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (Put put : puts) { byte[] row = put.getRow(); List cells = map.get(row); if (cells == null) { cells = new ArrayList<>(); map.put(row, cells); } - for (List l: put.getFamilyCellMap().values()) { + for (List l : put.getFamilyCellMap().values()) { cells.addAll(l); } } // build the cell set CellSetModel model = new CellSetModel(); - for (Map.Entry> e: map.entrySet()) { + for (Map.Entry> e : map.entrySet()) { RowModel row = new RowModel(e.getKey()); - for (Cell cell: e.getValue()) { + for (Cell cell : e.getValue()) { row.addCell(new CellModel(cell)); } model.addRow(row); @@ -452,21 +449,21 @@ public class RemoteHTable implements Table { sb.append(Bytes.toString(name)); sb.append("/$multiput"); // can be any nonexistent row for (int i = 0; i < maxRetries; i++) { - Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, - model.createProtobufOutput()); + Response response = + client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); int code = response.getCode(); switch (code) { - case 200: - return; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("multiput request failed with " + code); + case 200: + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("multiput request failed with " + code); } } throw new IOException("multiput request timed out"); @@ -474,23 +471,23 @@ public class RemoteHTable implements Table { @Override public void delete(Delete delete) throws IOException { - String spec = buildRowSpec(delete.getRow(), delete.getFamilyCellMap(), - delete.getTimestamp(), delete.getTimestamp(), 1); + String spec = buildRowSpec(delete.getRow(), delete.getFamilyCellMap(), delete.getTimestamp(), + delete.getTimestamp(), 1); for (int i = 0; i < maxRetries; i++) { Response response = client.delete(spec); int code = response.getCode(); switch (code) { - case 200: - return; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("delete request failed with " + code); + case 200: + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("delete request failed with " + code); } } throw new IOException("delete request timed out"); @@ -498,7 +495,7 @@ public class RemoteHTable implements Table { @Override public void delete(List deletes) throws IOException { - for (Delete delete: deletes) { + for (Delete delete : deletes) { delete(delete); } } @@ -529,22 +526,22 @@ public class RemoteHTable implements Table { sb.append('/'); sb.append("scanner"); for (int i = 0; i < maxRetries; i++) { - Response response = client.post(sb.toString(), - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + Response response = + client.post(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); int code = response.getCode(); switch (code) { - case 201: - uri = response.getLocation(); - return; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("scan request failed with " + code); + case 201: + uri = response.getLocation(); + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("scan request failed with " + code); } } throw new IOException("scan request timed out"); @@ -556,26 +553,25 @@ public class RemoteHTable implements Table { sb.append("?n="); sb.append(nbRows); for (int i = 0; i < maxRetries; i++) { - Response response = client.get(sb.toString(), - Constants.MIMETYPE_PROTOBUF); + Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF); int code = response.getCode(); switch (code) { - case 200: - CellSetModel model = new CellSetModel(); - model.getObjectFromMessage(response.getBody()); - return buildResultFromModel(model); - case 204: - case 206: - return null; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("scanner.next request failed with " + code); + case 200: + CellSetModel model = new CellSetModel(); + model.getObjectFromMessage(response.getBody()); + return buildResultFromModel(model); + case 204: + case 206: + return null; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("scanner.next request failed with " + code); } } throw new IOException("scanner.next request timed out"); @@ -664,8 +660,7 @@ public class RemoteHTable implements Table { } @Override - public ResultScanner getScanner(byte[] family, byte[] qualifier) - throws IOException { + public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException { Scan scan = new Scan(); scan.addColumn(family, qualifier); return new Scanner(scan); @@ -677,13 +672,13 @@ public class RemoteHTable implements Table { @Override @Deprecated - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Put put) throws IOException { + public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) + throws IOException { return doCheckAndPut(row, family, qualifier, value, put); } - private boolean doCheckAndPut(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Put put) throws IOException { + private boolean doCheckAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) + throws IOException { // column to check-the-value put.add(new KeyValue(row, family, qualifier, value)); @@ -696,23 +691,23 @@ public class RemoteHTable implements Table { sb.append("?check=put"); for (int i = 0; i < maxRetries; i++) { - Response response = client.put(sb.toString(), - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + Response response = + client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); int code = response.getCode(); switch (code) { - case 200: - return true; - case 304: // NOT-MODIFIED - return false; - case 509: - try { - Thread.sleep(sleepTime); - } catch (final InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("checkAndPut request failed with " + code); + case 200: + return true; + case 304: // NOT-MODIFIED + return false; + case 509: + try { + Thread.sleep(sleepTime); + } catch (final InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("checkAndPut request failed with " + code); } } throw new IOException("checkAndPut request timed out"); @@ -720,26 +715,26 @@ public class RemoteHTable implements Table { @Override @Deprecated - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value, Put put) throws IOException { + public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, Put put) throws IOException { throw new IOException("checkAndPut for non-equal comparison not implemented"); } @Override @Deprecated - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, - CompareOperator compareOp, byte[] value, Put put) throws IOException { + public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator compareOp, + byte[] value, Put put) throws IOException { throw new IOException("checkAndPut for non-equal comparison not implemented"); } @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Delete delete) throws IOException { + public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, + Delete delete) throws IOException { return doCheckAndDelete(row, family, qualifier, value, delete); } - private boolean doCheckAndDelete(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Delete delete) throws IOException { + private boolean doCheckAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, + Delete delete) throws IOException { Put put = new Put(row); put.setFamilyCellMap(delete.getFamilyCellMap()); // column to check-the-value @@ -753,23 +748,23 @@ public class RemoteHTable implements Table { sb.append("?check=delete"); for (int i = 0; i < maxRetries; i++) { - Response response = client.put(sb.toString(), - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + Response response = + client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); int code = response.getCode(); switch (code) { - case 200: - return true; - case 304: // NOT-MODIFIED - return false; - case 509: - try { - Thread.sleep(sleepTime); - } catch (final InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("checkAndDelete request failed with " + code); + case 200: + return true; + case 304: // NOT-MODIFIED + return false; + case 509: + try { + Thread.sleep(sleepTime); + } catch (final InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("checkAndDelete request failed with " + code); } } throw new IOException("checkAndDelete request timed out"); @@ -777,15 +772,15 @@ public class RemoteHTable implements Table { @Override @Deprecated - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value, Delete delete) throws IOException { + public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, Delete delete) throws IOException { throw new IOException("checkAndDelete for non-equal comparison not implemented"); } @Override @Deprecated public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - CompareOperator compareOp, byte[] value, Delete delete) throws IOException { + CompareOperator compareOp, byte[] value, Delete delete) throws IOException { throw new IOException("checkAndDelete for non-equal comparison not implemented"); } @@ -801,15 +796,15 @@ public class RemoteHTable implements Table { @Override @Deprecated - public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value, RowMutations rm) throws IOException { + public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, RowMutations rm) throws IOException { throw new UnsupportedOperationException("checkAndMutate not implemented"); } @Override @Deprecated public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, - CompareOperator compareOp, byte[] value, RowMutations rm) throws IOException { + CompareOperator compareOp, byte[] value, RowMutations rm) throws IOException { throw new UnsupportedOperationException("checkAndMutate not implemented"); } @@ -833,14 +828,14 @@ public class RemoteHTable implements Table { } @Override - public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, - long amount) throws IOException { + public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) + throws IOException { throw new IOException("incrementColumnValue not supported"); } @Override - public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, - long amount, Durability durability) throws IOException { + public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, + Durability durability) throws IOException { throw new IOException("incrementColumnValue not supported"); } @@ -851,7 +846,7 @@ public class RemoteHTable implements Table { @Override public void batchCallback(List actions, Object[] results, - Batch.Callback callback) throws IOException, InterruptedException { + Batch.Callback callback) throws IOException, InterruptedException { throw new IOException("batchCallback not supported"); } @@ -861,16 +856,15 @@ public class RemoteHTable implements Table { } @Override - public Map coprocessorService(Class service, - byte[] startKey, byte[] endKey, Batch.Call callable) - throws ServiceException, Throwable { + public Map coprocessorService(Class service, byte[] startKey, + byte[] endKey, Batch.Call callable) throws ServiceException, Throwable { throw new UnsupportedOperationException("coprocessorService not implemented"); } @Override - public void coprocessorService(Class service, - byte[] startKey, byte[] endKey, Batch.Call callable, Batch.Callback callback) - throws ServiceException, Throwable { + public void coprocessorService(Class service, byte[] startKey, + byte[] endKey, Batch.Call callable, Batch.Callback callback) + throws ServiceException, Throwable { throw new UnsupportedOperationException("coprocessorService not implemented"); } @@ -881,16 +875,15 @@ public class RemoteHTable implements Table { @Override public Map batchCoprocessorService( - Descriptors.MethodDescriptor method, Message request, - byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable { + Descriptors.MethodDescriptor method, Message request, byte[] startKey, byte[] endKey, + R responsePrototype) throws ServiceException, Throwable { throw new UnsupportedOperationException("batchCoprocessorService not implemented"); } @Override - public void batchCoprocessorService( - Descriptors.MethodDescriptor method, Message request, - byte[] startKey, byte[] endKey, R responsePrototype, Callback callback) - throws ServiceException, Throwable { + public void batchCoprocessorService(Descriptors.MethodDescriptor method, + Message request, byte[] startKey, byte[] endKey, R responsePrototype, Callback callback) + throws ServiceException, Throwable { throw new UnsupportedOperationException("batchCoprocessorService not implemented"); } @@ -963,11 +956,9 @@ public class RemoteHTable implements Table { } /* - * Only a small subset of characters are valid in URLs. - * - * Row keys, column families, and qualifiers cannot be appended to URLs without first URL - * escaping. Table names are ok because they can only contain alphanumeric, ".","_", and "-" - * which are valid characters in URLs. + * Only a small subset of characters are valid in URLs. Row keys, column families, and qualifiers + * cannot be appended to URLs without first URL escaping. Table names are ok because they can only + * contain alphanumeric, ".","_", and "-" which are valid characters in URLs. */ private static String toURLEncodedBytes(byte[] row) { try { @@ -991,8 +982,8 @@ public class RemoteHTable implements Table { @Override public CheckAndMutateBuilder qualifier(byte[] qualifier) { - this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" + - " an empty byte array, or just do not call this method if you want a null qualifier"); + this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" + + " an empty byte array, or just do not call this method if you want a null qualifier"); return this; } @@ -1003,8 +994,8 @@ public class RemoteHTable implements Table { @Override public CheckAndMutateBuilder ifNotExists() { - throw new UnsupportedOperationException("CheckAndMutate for non-equal comparison " - + "not implemented"); + throw new UnsupportedOperationException( + "CheckAndMutate for non-equal comparison " + "not implemented"); } @Override @@ -1013,8 +1004,8 @@ public class RemoteHTable implements Table { this.value = Preconditions.checkNotNull(value, "value is null"); return this; } else { - throw new UnsupportedOperationException("CheckAndMutate for non-equal comparison " + - "not implemented"); + throw new UnsupportedOperationException( + "CheckAndMutate for non-equal comparison " + "not implemented"); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java index adb5ae6abb6..90bfc2b7142 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,12 +45,12 @@ import org.junit.experimental.categories.Category; /** * Tests {@link RemoteAdmin} retries. */ -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestRemoteAdminRetries { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRemoteAdminRetries.class); + HBaseClassTestRule.forClass(TestRemoteAdminRetries.class); private static final int SLEEP_TIME = 50; private static final int RETRIES = 3; @@ -78,7 +78,7 @@ public class TestRemoteAdminRetries { } @Test - public void testFailingGetRestVersion() throws Exception { + public void testFailingGetRestVersion() throws Exception { testTimedOutGetCall(new CallExecutor() { @Override public void run() throws Exception { @@ -88,7 +88,7 @@ public class TestRemoteAdminRetries { } @Test - public void testFailingGetClusterStatus() throws Exception { + public void testFailingGetClusterStatus() throws Exception { testTimedOutGetCall(new CallExecutor() { @Override public void run() throws Exception { diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java index 6338df83f3f..15219f19104 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,11 +49,11 @@ import org.junit.experimental.categories.Category; /** * Test RemoteHTable retries. */ -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestRemoteHTableRetries { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRemoteHTableRetries.class); + HBaseClassTestRule.forClass(TestRemoteHTableRetries.class); private static final int SLEEP_TIME = 50; private static final int RETRIES = 3; @@ -75,17 +75,14 @@ public class TestRemoteHTableRetries { Response response = new Response(509); when(client.get(anyString(), anyString())).thenReturn(response); when(client.delete(anyString())).thenReturn(response); - when(client.put(anyString(), anyString(), any())).thenReturn( - response); - when(client.post(anyString(), anyString(), any())).thenReturn( - response); + when(client.put(anyString(), anyString(), any())).thenReturn(response); + when(client.post(anyString(), anyString(), any())).thenReturn(response); Configuration configuration = TEST_UTIL.getConfiguration(); configuration.setInt("hbase.rest.client.max.retries", RETRIES); configuration.setInt("hbase.rest.client.sleep", SLEEP_TIME); - remoteTable = new RemoteHTable(client, TEST_UTIL.getConfiguration(), - "MyTable"); + remoteTable = new RemoteHTable(client, TEST_UTIL.getConfiguration(), "MyTable"); } @After @@ -156,8 +153,8 @@ public class TestRemoteHTableRetries { public void run() throws Exception { Put put = new Put(ROW_1); put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); - remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) - .ifEquals(VALUE_1).thenPut(put); + remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1).ifEquals(VALUE_1) + .thenPut(put); } }); verify(client, times(RETRIES)).put(anyString(), anyString(), any()); @@ -170,9 +167,9 @@ public class TestRemoteHTableRetries { public void run() throws Exception { Put put = new Put(ROW_1); put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); - Delete delete= new Delete(ROW_1); - remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) - .ifEquals(VALUE_1).thenDelete(delete); + Delete delete = new Delete(ROW_1); + remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1).ifEquals(VALUE_1) + .thenDelete(delete); } }); } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java index 6c904697004..3a19934d749 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,19 +59,19 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestRemoteTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRemoteTable.class); + HBaseClassTestRule.forClass(TestRemoteTable.class); // Verify that invalid URL characters and arbitrary bytes are escaped when // constructing REST URLs per HBASE-7621. RemoteHTable should support row keys // and qualifiers containing any byte for all table operations. private static final String INVALID_URL_CHARS_1 = - "|\"\\^{}\u0001\u0002\u0003\u0004\u0005\u0006\u0007\u0008\u0009\u000B\u000C"; + "|\"\\^{}\u0001\u0002\u0003\u0004\u0005\u0006\u0007\u0008\u0009\u000B\u000C"; - // HColumnDescriptor prevents certain characters in column names. The following + // HColumnDescriptor prevents certain characters in column names. The following // are examples of characters are allowed in column names but are not valid in // URLs. private static final String INVALID_URL_CHARS_2 = "|^{}\u0242"; @@ -80,12 +80,12 @@ public class TestRemoteTable { private static final String VALID_TABLE_NAME_CHARS = "_-."; private static final TableName TABLE = - TableName.valueOf("TestRemoteTable" + VALID_TABLE_NAME_CHARS); + TableName.valueOf("TestRemoteTable" + VALID_TABLE_NAME_CHARS); private static final byte[] ROW_1 = Bytes.toBytes("testrow1" + INVALID_URL_CHARS_1); private static final byte[] ROW_2 = Bytes.toBytes("testrow2" + INVALID_URL_CHARS_1); private static final byte[] ROW_3 = Bytes.toBytes("testrow3" + INVALID_URL_CHARS_1); - private static final byte[] ROW_4 = Bytes.toBytes("testrow4"+ INVALID_URL_CHARS_1); + private static final byte[] ROW_4 = Bytes.toBytes("testrow4" + INVALID_URL_CHARS_1); private static final byte[] COLUMN_1 = Bytes.toBytes("a" + INVALID_URL_CHARS_2); private static final byte[] COLUMN_2 = Bytes.toBytes("b" + INVALID_URL_CHARS_2); @@ -101,8 +101,7 @@ public class TestRemoteTable { private static final long TS_1 = TS_2 - ONE_HOUR; private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private RemoteHTable remoteTable; @BeforeClass @@ -112,7 +111,7 @@ public class TestRemoteTable { } @Before - public void before() throws Exception { + public void before() throws Exception { Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(TABLE)) { if (admin.isTableEnabled(TABLE)) { @@ -136,9 +135,8 @@ public class TestRemoteTable { put.addColumn(COLUMN_2, QUALIFIER_2, TS_2, VALUE_2); table.put(put); } - remoteTable = new RemoteHTable( - new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())), + remoteTable = + new RemoteHTable(new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())), TEST_UTIL.getConfiguration(), TABLE.toBytes()); } @@ -252,7 +250,7 @@ public class TestRemoteTable { get.setMaxVersions(2); result = remoteTable.get(get); int count = 0; - for (Cell kv: result.listCells()) { + for (Cell kv : result.listCells()) { if (CellUtil.matchingFamily(kv, COLUMN_1) && TS_1 == kv.getTimestamp()) { assertTrue(CellUtil.matchingValue(kv, VALUE_1)); // @TS_1 count++; @@ -276,7 +274,7 @@ public class TestRemoteTable { assertEquals(1, results[0].size()); assertEquals(2, results[1].size()); - //Test Versions + // Test Versions gets = new ArrayList<>(2); Get g = new Get(ROW_1); g.setMaxVersions(3); @@ -288,7 +286,7 @@ public class TestRemoteTable { assertEquals(1, results[0].size()); assertEquals(3, results[1].size()); - //404 + // 404 gets = new ArrayList<>(1); gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE"))); results = remoteTable.get(gets); @@ -346,7 +344,7 @@ public class TestRemoteTable { assertTrue(Bytes.equals(VALUE_2, value)); assertTrue(Bytes.equals(Bytes.toBytes("TestRemoteTable" + VALID_TABLE_NAME_CHARS), - remoteTable.getTableName())); + remoteTable.getTableName())); } @Test @@ -482,7 +480,7 @@ public class TestRemoteTable { scanner.close(); - scanner = remoteTable.getScanner(COLUMN_1,QUALIFIER_1); + scanner = remoteTable.getScanner(COLUMN_1, QUALIFIER_1); results = scanner.next(4); assertNotNull(results); assertEquals(4, results.length); @@ -507,18 +505,18 @@ public class TestRemoteTable { assertEquals(1, remoteTable.existsAll(Collections.singletonList(get)).length); Delete delete = new Delete(ROW_1); - remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) - .ifEquals(VALUE_1).thenDelete(delete); + remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1).ifEquals(VALUE_1) + .thenDelete(delete); assertFalse(remoteTable.exists(get)); Put put = new Put(ROW_1); put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); remoteTable.put(put); - assertTrue(remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) - .ifEquals(VALUE_1).thenPut(put)); - assertFalse(remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) - .ifEquals(VALUE_2).thenPut(put)); + assertTrue(remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1).ifEquals(VALUE_1) + .thenPut(put)); + assertFalse(remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1).ifEquals(VALUE_2) + .thenPut(put)); } /** @@ -556,7 +554,7 @@ public class TestRemoteTable { * Test a some methods of class Response. */ @Test - public void testResponse(){ + public void testResponse() { Response response = new Response(200); assertEquals(200, response.getCode()); Header[] headers = new Header[2]; @@ -579,7 +577,6 @@ public class TestRemoteTable { /** * Tests keeping a HBase scanner alive for long periods of time. Each call to next() should reset * the ConnectionCache timeout for the scanner's connection. - * * @throws Exception if starting the servlet container or disabling or truncating the table fails */ @Test @@ -602,8 +599,8 @@ public class TestRemoteTable { TEST_UTIL.getHBaseAdmin().disableTable(TABLE); TEST_UTIL.getHBaseAdmin().truncateTable(TABLE, false); - remoteTable = new RemoteHTable( - new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())), + remoteTable = + new RemoteHTable(new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())), TEST_UTIL.getConfiguration(), TABLE.toBytes()); String row = "testrow"; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestXmlParsing.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestXmlParsing.java index 26190f66f47..2618ff54180 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestXmlParsing.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestXmlParsing.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,14 +46,14 @@ public class TestXmlParsing { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestXmlParsing.class); + HBaseClassTestRule.forClass(TestXmlParsing.class); private static final Logger LOG = LoggerFactory.getLogger(TestXmlParsing.class); @Test public void testParsingClusterVersion() throws Exception { final String xml = "" - + ""; + + ""; Client client = mock(Client.class); RemoteAdmin admin = new RemoteAdmin(client, HBaseConfiguration.create(), null); Response resp = new Response(200, null, Bytes.toBytes(xml)); @@ -66,10 +66,9 @@ public class TestXmlParsing { @Test public void testFailOnExternalEntities() throws Exception { - final String externalEntitiesXml = - "" - + " ] >" - + " &xee;"; + final String externalEntitiesXml = "" + + " ] >" + + " &xee;"; Client client = mock(Client.class); RemoteAdmin admin = new RemoteAdmin(client, HBaseConfiguration.create(), null); Response resp = new Response(200, null, Bytes.toBytes(externalEntitiesXml)); @@ -80,9 +79,9 @@ public class TestXmlParsing { admin.getClusterVersion(); fail("Expected getClusterVersion() to throw an exception"); } catch (IOException e) { - assertEquals("Cause of exception ought to be a failure to parse the stream due to our " + - "invalid external entity. Make sure this isn't just a false positive due to " + - "implementation. see HBASE-19020.", UnmarshalException.class, e.getCause().getClass()); + assertEquals("Cause of exception ought to be a failure to parse the stream due to our " + + "invalid external entity. Make sure this isn't just a false positive due to " + + "implementation. see HBASE-19020.", UnmarshalException.class, e.getCause().getClass()); final String exceptionText = StringUtils.stringifyException(e); final String expectedText = "\"xee\""; LOG.debug("exception text: '" + exceptionText + "'", e); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java index b8305d56a18..82909170857 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,12 +32,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestCellModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCellModel.class); + HBaseClassTestRule.forClass(TestCellModel.class); private static final long TIMESTAMP = 1245219839331L; private static final byte[] COLUMN = Bytes.toBytes("testcolumn"); @@ -45,11 +45,9 @@ public class TestCellModel extends TestModelBase { public TestCellModel() throws Exception { super(CellModel.class); - AS_XML = - "dGVzdHZhbHVl"; - AS_PB = - "Egp0ZXN0Y29sdW1uGOO6i+eeJCIJdGVzdHZhbHVl"; + AS_XML = "dGVzdHZhbHVl"; + AS_PB = "Egp0ZXN0Y29sdW1uGOO6i+eeJCIJdGVzdHZhbHVl"; AS_JSON = "{\"column\":\"dGVzdGNvbHVtbg==\",\"timestamp\":1245219839331,\"$\":\"dGVzdHZhbHVl\"}"; @@ -108,4 +106,3 @@ public class TestCellModel extends TestModelBase { assertTrue(StringUtils.contains(cellModel.toString(), expectedColumn)); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java index 1d40effb47d..38f0f43c0ce 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,12 +29,12 @@ import org.apache.hadoop.hbase.util.Bytes; import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestCellSetModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCellSetModel.class); + HBaseClassTestRule.forClass(TestCellSetModel.class); private static final byte[] ROW1 = Bytes.toBytes("testrow1"); private static final byte[] COLUMN1 = Bytes.toBytes("testcolumn1"); @@ -50,41 +50,30 @@ public class TestCellSetModel extends TestModelBase { public TestCellSetModel() throws Exception { super(CellSetModel.class); - AS_XML = - "" + - "" + - "" + - "dGVzdHZhbHVlMQ==" + - "" + - "" + - "" + - "dGVzdHZhbHVlMg==" + - "" + - "dGVzdHZhbHVlMw==" + - "" + - ""; + AS_XML = "" + "" + + "" + "dGVzdHZhbHVlMQ==" + + "" + "" + + "" + "dGVzdHZhbHVlMg==" + + "" + "dGVzdHZhbHVlMw==" + + "" + ""; - AS_PB = - "CiwKCHRlc3Ryb3cxEiASC3Rlc3Rjb2x1bW4xGOO6i+eeJCIKdGVzdHZhbHVlMQpOCgh0ZXN0cm93" + - "MRIgEgt0ZXN0Y29sdW1uMhjHyc7wniQiCnRlc3R2YWx1ZTISIBILdGVzdGNvbHVtbjMYsOLnuZ8k" + - "Igp0ZXN0dmFsdWUz"; + AS_PB = "CiwKCHRlc3Ryb3cxEiASC3Rlc3Rjb2x1bW4xGOO6i+eeJCIKdGVzdHZhbHVlMQpOCgh0ZXN0cm93" + + "MRIgEgt0ZXN0Y29sdW1uMhjHyc7wniQiCnRlc3R2YWx1ZTISIBILdGVzdGNvbHVtbjMYsOLnuZ8k" + + "Igp0ZXN0dmFsdWUz"; - AS_XML = - "" + - "" + - "dGVzdHZhbHVlMQ==" + - "" + - "dGVzdHZhbHVlMg==" + - "dGVzdHZhbHVlMw==" + - ""; + AS_XML = "" + + "" + + "dGVzdHZhbHVlMQ==" + + "" + "dGVzdHZhbHVlMg==" + + "dGVzdHZhbHVlMw==" + + ""; - AS_JSON = - "{\"Row\":[{\"key\":\"dGVzdHJvdzE=\"," + - "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\",\"timestamp\":1245219839331," + - "\"$\":\"dGVzdHZhbHVlMQ==\"}]},{\"key\":\"dGVzdHJvdzE=\"," + - "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjI=\",\"timestamp\":1245239813319," + - "\"$\":\"dGVzdHZhbHVlMg==\"},{\"column\":\"dGVzdGNvbHVtbjM=\"," + - "\"timestamp\":1245393318192,\"$\":\"dGVzdHZhbHVlMw==\"}]}]}"; + AS_JSON = "{\"Row\":[{\"key\":\"dGVzdHJvdzE=\"," + + "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\",\"timestamp\":1245219839331," + + "\"$\":\"dGVzdHZhbHVlMQ==\"}]},{\"key\":\"dGVzdHJvdzE=\"," + + "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjI=\",\"timestamp\":1245239813319," + + "\"$\":\"dGVzdHZhbHVlMg==\"},{\"column\":\"dGVzdGNvbHVtbjM=\"," + + "\"timestamp\":1245393318192,\"$\":\"dGVzdHZhbHVlMw==\"}]}]}"; } @Override @@ -147,4 +136,3 @@ public class TestCellSetModel extends TestModelBase { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java index a52358cbe52..0001abe02d0 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,12 +27,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestColumnSchemaModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestColumnSchemaModel.class); + HBaseClassTestRule.forClass(TestColumnSchemaModel.class); protected static final String COLUMN_NAME = "testcolumn"; protected static final boolean BLOCKCACHE = true; @@ -45,15 +45,13 @@ public class TestColumnSchemaModel extends TestModelBase { public TestColumnSchemaModel() throws Exception { super(ColumnSchemaModel.class); - AS_XML = - ""; + AS_XML = ""; - AS_JSON = - "{\"name\":\"testcolumn\",\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\"," + - "\"BLOCKCACHE\":\"true\",\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\"," + - "\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}"; + AS_JSON = "{\"name\":\"testcolumn\",\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\"," + + "\"BLOCKCACHE\":\"true\",\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\"," + + "\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}"; } @Override @@ -87,4 +85,3 @@ public class TestColumnSchemaModel extends TestModelBase { public void testFromPB() throws Exception { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java index 63124113da5..4cfe70e0639 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,8 +52,7 @@ public abstract class TestModelBase { super(); this.clazz = clazz; context = new JAXBContextResolver().getContext(clazz); - mapper = new JacksonJaxbJsonProvider().locateMapper(clazz, - MediaType.APPLICATION_JSON_TYPE); + mapper = new JacksonJaxbJsonProvider().locateMapper(clazz, MediaType.APPLICATION_JSON_TYPE); } protected abstract T buildTestModel(); @@ -68,19 +67,17 @@ public abstract class TestModelBase { protected String toJSON(T model) throws JAXBException, IOException { StringWriter writer = new StringWriter(); mapper.writeValue(writer, model); -// original marshaller, uncomment this and comment mapper to verify backward compatibility -// ((JSONJAXBContext)context).createJSONMarshaller().marshallToJSON(model, writer); + // original marshaller, uncomment this and comment mapper to verify backward compatibility + // ((JSONJAXBContext)context).createJSONMarshaller().marshallToJSON(model, writer); return writer.toString(); } public T fromJSON(String json) throws JAXBException, IOException { - return (T) - mapper.readValue(json, clazz); + return (T) mapper.readValue(json, clazz); } public T fromXML(String xml) throws JAXBException { - return (T) - context.createUnmarshaller().unmarshal(new StringReader(xml)); + return (T) context.createUnmarshaller().unmarshal(new StringReader(xml)); } @SuppressWarnings("unused") @@ -88,14 +85,12 @@ public abstract class TestModelBase { return model.createProtobufOutput(); } - protected T fromPB(String pb) throws - Exception { - return (T)clazz.getMethod("getObjectFromMessage", byte[].class).invoke( - clazz.getDeclaredConstructor().newInstance(), - Base64.getDecoder().decode(AS_PB)); + protected T fromPB(String pb) throws Exception { + return (T) clazz.getMethod("getObjectFromMessage", byte[].class) + .invoke(clazz.getDeclaredConstructor().newInstance(), Base64.getDecoder().decode(AS_PB)); } - protected abstract void checkModel(T model); + protected abstract void checkModel(T model); @Test public void testBuildModel() throws Exception { @@ -124,7 +119,7 @@ public abstract class TestModelBase { ObjectNode expObj = mapper.readValue(AS_JSON, ObjectNode.class); ObjectNode actObj = mapper.readValue(toJSON(buildTestModel()), ObjectNode.class); assertEquals(expObj, actObj); - } catch(Exception e) { + } catch (Exception e) { assertEquals(AS_JSON, toJSON(buildTestModel())); } } @@ -134,4 +129,3 @@ public abstract class TestModelBase { checkModel(fromJSON(AS_JSON)); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java index e7a9188b5e3..831a5642fb6 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,34 +29,33 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestNamespacesInstanceModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestNamespacesInstanceModel.class); + HBaseClassTestRule.forClass(TestNamespacesInstanceModel.class); - public static final Map NAMESPACE_PROPERTIES = new HashMap<>(); + public static final Map NAMESPACE_PROPERTIES = new HashMap<>(); public static final String NAMESPACE_NAME = "namespaceName"; public TestNamespacesInstanceModel() throws Exception { super(NamespacesInstanceModel.class); - NAMESPACE_PROPERTIES.put("KEY_1","VALUE_1"); - NAMESPACE_PROPERTIES.put("KEY_2","VALUE_2"); - NAMESPACE_PROPERTIES.put("NAME","testNamespace"); + NAMESPACE_PROPERTIES.put("KEY_1", "VALUE_1"); + NAMESPACE_PROPERTIES.put("KEY_2", "VALUE_2"); + NAMESPACE_PROPERTIES.put("NAME", "testNamespace"); - AS_XML = - "" + - "NAMEtestNamespace" + - "KEY_2VALUE_2" + - "KEY_1VALUE_1" + - ""; + AS_XML = "" + + "NAMEtestNamespace" + + "KEY_2VALUE_2" + + "KEY_1VALUE_1" + + ""; AS_PB = "ChUKBE5BTUUSDXRlc3ROYW1lc3BhY2UKEAoFS0VZXzESB1ZBTFVFXzEKEAoFS0VZXzISB1ZBTFVFXzI="; - AS_JSON = "{\"properties\":{\"NAME\":\"testNamespace\"," + - "\"KEY_1\":\"VALUE_1\",\"KEY_2\":\"VALUE_2\"}}"; + AS_JSON = "{\"properties\":{\"NAME\":\"testNamespace\"," + + "\"KEY_1\":\"VALUE_1\",\"KEY_2\":\"VALUE_2\"}}"; } @Override @@ -64,9 +63,9 @@ public class TestNamespacesInstanceModel extends TestModelBase properties) { + public NamespacesInstanceModel buildTestModel(String namespace, Map properties) { NamespacesInstanceModel model = new NamespacesInstanceModel(); - for(String key: properties.keySet()){ + for (String key : properties.keySet()) { model.addProperty(key, properties.get(key)); } return model; @@ -78,12 +77,12 @@ public class TestNamespacesInstanceModel extends TestModelBase properties) { - Map modProperties = model.getProperties(); + Map properties) { + Map modProperties = model.getProperties(); assertEquals(properties.size(), modProperties.size()); // Namespace name comes from REST URI, not properties. assertNotSame(namespace, model.getNamespaceName()); - for(String property: properties.keySet()){ + for (String property : properties.keySet()) { assertEquals(properties.get(property), modProperties.get(property)); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesModel.java index 5da776ab735..30e0c44bcd8 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,12 +29,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestNamespacesModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestNamespacesModel.class); + HBaseClassTestRule.forClass(TestNamespacesModel.class); public static final String NAMESPACE_NAME_1 = "testNamespace1"; public static final String NAMESPACE_NAME_2 = "testNamespace2"; @@ -42,10 +42,9 @@ public class TestNamespacesModel extends TestModelBase { public TestNamespacesModel() throws Exception { super(NamespacesModel.class); - AS_XML = - "" + - "testNamespace1" + - "testNamespace2"; + AS_XML = "" + + "testNamespace1" + + "testNamespace2"; AS_PB = "Cg50ZXN0TmFtZXNwYWNlMQoOdGVzdE5hbWVzcGFjZTI="; @@ -71,7 +70,7 @@ public class TestNamespacesModel extends TestModelBase { public void checkModel(NamespacesModel model, String... namespaceName) { List namespaces = model.getNamespaces(); assertEquals(namespaceName.length, namespaces.size()); - for(int i = 0; i < namespaceName.length; i++){ + for (int i = 0; i < namespaceName.length; i++) { assertTrue(namespaces.contains(namespaceName[i])); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java index 808b77bc9d6..ad539e12848 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,12 +33,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestRowModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRowModel.class); + HBaseClassTestRule.forClass(TestRowModel.class); private static final byte[] ROW1 = Bytes.toBytes("testrow1"); private static final byte[] COLUMN1 = Bytes.toBytes("testcolumn1"); @@ -52,9 +52,8 @@ public class TestRowModel extends TestModelBase { + "dGVzdHZhbHVlMQ==" + ""; - AS_JSON = - "{\"key\":\"dGVzdHJvdzE=\",\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\"," + - "\"timestamp\":1245219839331,\"$\":\"dGVzdHZhbHVlMQ==\"}]}"; + AS_JSON = "{\"key\":\"dGVzdHJvdzE=\",\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\"," + + "\"timestamp\":1245219839331,\"$\":\"dGVzdHZhbHVlMQ==\"}]}"; } @Override @@ -79,7 +78,7 @@ public class TestRowModel extends TestModelBase { @Override public void testFromPB() throws Exception { - //do nothing row model has no PB + // do nothing row model has no PB } @Test @@ -103,4 +102,3 @@ public class TestRowModel extends TestModelBase { assertTrue(StringUtils.contains(rowModel.toString(), expectedRowKey)); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java index a834ac7d193..a7c87036e4f 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue; import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.databind.JsonMappingException; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.rest.ScannerResultGenerator; import org.apache.hadoop.hbase.testclassification.RestTests; @@ -32,11 +31,11 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestScannerModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannerModel.class); + HBaseClassTestRule.forClass(TestScannerModel.class); private static final String PRIVATE = "private"; private static final String PUBLIC = "public"; @@ -53,21 +52,19 @@ public class TestScannerModel extends TestModelBase { public TestScannerModel() throws Exception { super(ScannerModel.class); AS_XML = "" - + "" - + "Y29sdW1uMQ==Y29sdW1uMjpmb28=" - + "privatepublic" - + ""; + + "" + + "Y29sdW1uMQ==Y29sdW1uMjpmb28=" + + "privatepublic" + ""; AS_JSON = "{\"batch\":100,\"caching\":1000,\"cacheBlocks\":false,\"endRow\":\"enp5eng=\"," - + "\"endTime\":1245393318192,\"maxVersions\":2147483647,\"startRow\":\"YWJyYWNhZGFicmE=\"," - + "\"startTime\":1245219839331,\"column\":[\"Y29sdW1uMQ==\",\"Y29sdW1uMjpmb28=\"]," - +"\"labels\":[\"private\",\"public\"]" - +"}"; + + "\"endTime\":1245393318192,\"maxVersions\":2147483647,\"startRow\":\"YWJyYWNhZGFicmE=\"," + + "\"startTime\":1245219839331,\"column\":[\"Y29sdW1uMQ==\",\"Y29sdW1uMjpmb28=\"]," + + "\"labels\":[\"private\",\"public\"]" + "}"; AS_PB = "CgthYnJhY2FkYWJyYRIFenp5engaB2NvbHVtbjEaC2NvbHVtbjI6Zm9vIGQo47qL554kMLDi57mf" - + "JDj/////B0joB1IHcHJpdmF0ZVIGcHVibGljWAA="; + + "JDj/////B0joB1IHcHJpdmF0ZVIGcHVibGljWAA="; } @Override diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java index 68ca6954bac..22cc2230cc5 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,57 +30,53 @@ import org.apache.hadoop.hbase.util.Bytes; import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestStorageClusterStatusModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStorageClusterStatusModel.class); + HBaseClassTestRule.forClass(TestStorageClusterStatusModel.class); public TestStorageClusterStatusModel() throws Exception { super(StorageClusterStatusModel.class); - AS_XML = - "" + - "" + - "" + - "" + - "" + - "" + - ""; + AS_XML = "" + + "" + + "" + + "" + + "" + "" + + ""; - AS_PB = - "Cj8KBXRlc3QxEOO6i+eeJBgAIIABKIAIMicKDWhiYXNlOnJvb3QsLDAQARgBIAAoADAAOAFAAkgB" + - "UAFYAWABaAEKSwoFdGVzdDIQ/pKx8J4kGAAggAQogAgyMwoZaGJhc2U6bWV0YSwsMTI0NjAwMDA0" + - "MzcyNBABGAEgACgAMAA4AUACSAFQAVgBYAFoARgCIAApAAAAAAAA8D8="; + AS_PB = "Cj8KBXRlc3QxEOO6i+eeJBgAIIABKIAIMicKDWhiYXNlOnJvb3QsLDAQARgBIAAoADAAOAFAAkgB" + + "UAFYAWABaAEKSwoFdGVzdDIQ/pKx8J4kGAAggAQogAgyMwoZaGJhc2U6bWV0YSwsMTI0NjAwMDA0" + + "MzcyNBABGAEgACgAMAA4AUACSAFQAVgBYAFoARgCIAApAAAAAAAA8D8="; - - //Using jackson will break json backward compatibilty for this representation - //but the original one was broken as it would only print one Node element - //so the format itself was broken + // Using jackson will break json backward compatibilty for this representation + // but the original one was broken as it would only print one Node element + // so the format itself was broken AS_JSON = - "{\"regions\":2,\"requests\":0,\"averageLoad\":1.0,\"LiveNodes\":[{\"name\":\"test1\"," + - "\"Region\":[{\"name\":\"aGJhc2U6cm9vdCwsMA==\",\"stores\":1,\"storefiles\":1," + - "\"storefileSizeMB\":0,\"memStoreSizeMB\":0,\"storefileIndexSizeKB\":0," + - "\"readRequestsCount\":1,\"writeRequestsCount\":2,\"rootIndexSizeKB\":1," + - "\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1," + - "\"currentCompactedKVs\":1}],\"requests\":0,\"startCode\":1245219839331," + - "\"heapSizeMB\":128,\"maxHeapSizeMB\":1024},{\"name\":\"test2\"," + - "\"Region\":[{\"name\":\"aGJhc2U6bWV0YSwsMTI0NjAwMDA0MzcyNA==\",\"stores\":1," + - "\"storefiles\":1,\"storefileSizeMB\":0,\"memStoreSizeMB\":0,\"storefileIndexSizeKB\":0," + - "\"readRequestsCount\":1,\"writeRequestsCount\":2,\"rootIndexSizeKB\":1," + - "\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1," + - "\"currentCompactedKVs\":1}],\"requests\":0,\"startCode\":1245239331198," + - "\"heapSizeMB\":512,\"maxHeapSizeMB\":1024}],\"DeadNodes\":[]}"; + "{\"regions\":2,\"requests\":0,\"averageLoad\":1.0,\"LiveNodes\":[{\"name\":\"test1\"," + + "\"Region\":[{\"name\":\"aGJhc2U6cm9vdCwsMA==\",\"stores\":1,\"storefiles\":1," + + "\"storefileSizeMB\":0,\"memStoreSizeMB\":0,\"storefileIndexSizeKB\":0," + + "\"readRequestsCount\":1,\"writeRequestsCount\":2,\"rootIndexSizeKB\":1," + + "\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1," + + "\"currentCompactedKVs\":1}],\"requests\":0,\"startCode\":1245219839331," + + "\"heapSizeMB\":128,\"maxHeapSizeMB\":1024},{\"name\":\"test2\"," + + "\"Region\":[{\"name\":\"aGJhc2U6bWV0YSwsMTI0NjAwMDA0MzcyNA==\",\"stores\":1," + + "\"storefiles\":1,\"storefileSizeMB\":0,\"memStoreSizeMB\":0,\"storefileIndexSizeKB\":0," + + "\"readRequestsCount\":1,\"writeRequestsCount\":2,\"rootIndexSizeKB\":1," + + "\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1," + + "\"currentCompactedKVs\":1}],\"requests\":0,\"startCode\":1245239331198," + + "\"heapSizeMB\":512,\"maxHeapSizeMB\":1024}],\"DeadNodes\":[]}"; } @Override @@ -89,11 +85,11 @@ public class TestStorageClusterStatusModel extends TestModelBase nodes = - model.getLiveNodes().iterator(); + Iterator nodes = model.getLiveNodes().iterator(); StorageClusterStatusModel.Node node = nodes.next(); assertEquals("test1", node.getName()); assertEquals(1245219839331L, node.getStartCode()); assertEquals(128, node.getHeapSizeMB()); assertEquals(1024, node.getMaxHeapSizeMB()); - Iterator regions = - node.getRegions().iterator(); + Iterator regions = node.getRegions().iterator(); StorageClusterStatusModel.Node.Region region = regions.next(); - assertTrue(Bytes.toString(region.getName()).equals( - "hbase:root,,0")); + assertTrue(Bytes.toString(region.getName()).equals("hbase:root,,0")); assertEquals(1, region.getStores()); assertEquals(1, region.getStorefiles()); assertEquals(0, region.getStorefileSizeMB()); @@ -134,8 +127,7 @@ public class TestStorageClusterStatusModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStorageClusterVersionModel.class); + HBaseClassTestRule.forClass(TestStorageClusterVersionModel.class); private static final String VERSION = "0.0.1-testing"; public TestStorageClusterVersionModel() throws Exception { super(StorageClusterVersionModel.class); - AS_XML = - ""+ - ""; + AS_XML = "" + + ""; AS_JSON = "{\"Version\": \"0.0.1-testing\"}"; } @@ -57,7 +56,6 @@ public class TestStorageClusterVersionModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInfoModel.class); + HBaseClassTestRule.forClass(TestTableInfoModel.class); private static final String TABLE = "testtable"; private static final byte[] START_KEY = Bytes.toBytes("abracadbra"); @@ -44,22 +44,19 @@ public class TestTableInfoModel extends TestModelBase { public TestTableInfoModel() throws Exception { super(TableInfoModel.class); - AS_XML = - ""; + AS_XML = ""; - AS_PB = - "Cgl0ZXN0dGFibGUSSQofdGVzdHRhYmxlLGFicmFjYWRicmEsODczMTA0MjQyNBIKYWJyYWNhZGJy" + - "YRoFenp5engg+MSkwyAqDXRlc3Rob3N0Ojk4NzY="; + AS_PB = "Cgl0ZXN0dGFibGUSSQofdGVzdHRhYmxlLGFicmFjYWRicmEsODczMTA0MjQyNBIKYWJyYWNhZGJy" + + "YRoFenp5engg+MSkwyAqDXRlc3Rob3N0Ojk4NzY="; - AS_JSON = - "{\"name\":\"testtable\",\"Region\":[{\"endKey\":\"enp5eng=\",\"id\":8731042424," + - "\"location\":\"testhost:9876\",\"" + - "name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" + - "startKey\":\"YWJyYWNhZGJyYQ==\"}]}"; + AS_JSON = "{\"name\":\"testtable\",\"Region\":[{\"endKey\":\"enp5eng=\",\"id\":8731042424," + + "\"location\":\"testhost:9876\",\"" + + "name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" + + "startKey\":\"YWJyYWNhZGJyYQ==\"}]}"; } @Override @@ -98,4 +95,3 @@ public class TestTableInfoModel extends TestModelBase { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java index eca14978c90..c034f0602dd 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,12 +27,12 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestTableListModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableListModel.class); + HBaseClassTestRule.forClass(TestTableListModel.class); private static final String TABLE1 = "table1"; private static final String TABLE2 = "table2"; @@ -40,14 +40,12 @@ public class TestTableListModel extends TestModelBase { public TestTableListModel() throws Exception { super(TableListModel.class); - AS_XML = - "

      "; + AS_XML = "
      "; AS_PB = "CgZ0YWJsZTEKBnRhYmxlMgoGdGFibGUz"; - AS_JSON = - "{\"table\":[{\"name\":\"table1\"},{\"name\":\"table2\"},{\"name\":\"table3\"}]}"; + AS_JSON = "{\"table\":[{\"name\":\"table1\"},{\"name\":\"table2\"},{\"name\":\"table3\"}]}"; } @Override @@ -71,4 +69,3 @@ public class TestTableListModel extends TestModelBase { assertFalse(tables.hasNext()); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java index 4285c9bfbde..3a85b9ce0be 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestTableRegionModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableRegionModel.class); + HBaseClassTestRule.forClass(TestTableRegionModel.class); private static final String TABLE = "testtable"; private static final byte[] START_KEY = Bytes.toBytes("abracadbra"); @@ -47,21 +47,19 @@ public class TestTableRegionModel extends TestModelBase { super(TableRegionModel.class); AS_XML = - ""; + ""; - AS_JSON = - "{\"endKey\":\"enp5eng=\",\"id\":8731042424,\"location\":\"testhost:9876\"," + - "\"name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" + - "startKey\":\"YWJyYWNhZGJyYQ==\"}"; + AS_JSON = "{\"endKey\":\"enp5eng=\",\"id\":8731042424,\"location\":\"testhost:9876\"," + + "\"name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" + + "startKey\":\"YWJyYWNhZGJyYQ==\"}"; } @Override protected TableRegionModel buildTestModel() { - TableRegionModel model = - new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION); + TableRegionModel model = new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION); return model; } @@ -71,17 +69,15 @@ public class TestTableRegionModel extends TestModelBase { assertTrue(Bytes.equals(model.getEndKey(), END_KEY)); assertEquals(ID, model.getId()); assertEquals(LOCATION, model.getLocation()); - assertEquals(model.getName(), - TABLE + "," + Bytes.toString(START_KEY) + "," + Long.toString(ID) + - ".ad9860f031282c46ed431d7af8f94aca."); + assertEquals(model.getName(), TABLE + "," + Bytes.toString(START_KEY) + "," + Long.toString(ID) + + ".ad9860f031282c46ed431d7af8f94aca."); } @Test public void testGetName() { TableRegionModel model = buildTestModel(); String modelName = model.getName(); - HRegionInfo hri = new HRegionInfo(TableName.valueOf(TABLE), - START_KEY, END_KEY, false, ID); + HRegionInfo hri = new HRegionInfo(TableName.valueOf(TABLE), START_KEY, END_KEY, false, ID); assertEquals(modelName, hri.getRegionNameAsString()); } @@ -95,7 +91,6 @@ public class TestTableRegionModel extends TestModelBase { @Override public void testFromPB() throws Exception { - //no pb ignore + // no pb ignore } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java index 6b50ab70048..c12288e0209 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,12 +31,12 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestTableSchemaModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableSchemaModel.class); + HBaseClassTestRule.forClass(TestTableSchemaModel.class); private static final Logger LOG = LoggerFactory.getLogger(TestTableSchemaModel.class); @@ -51,24 +51,21 @@ public class TestTableSchemaModel extends TestModelBase { super(TableSchemaModel.class); testColumnSchemaModel = new TestColumnSchemaModel(); - AS_XML = - "" + - "" + - "" + - ""; + AS_XML = "" + + "" + + "" + + ""; - AS_PB = - "Cgl0ZXN0VGFibGUSEAoHSVNfTUVUQRIFZmFsc2USEAoHSVNfUk9PVBIFZmFsc2USEQoIUkVBRE9O" + - "TFkSBWZhbHNlGpcBCgp0ZXN0Y29sdW1uEhIKCUJMT0NLU0laRRIFMTYzODQSEwoLQkxPT01GSUxU" + - "RVISBE5PTkUSEgoKQkxPQ0tDQUNIRRIEdHJ1ZRIRCgtDT01QUkVTU0lPThICR1oSDQoIVkVSU0lP" + - "TlMSATESDAoDVFRMEgU4NjQwMBISCglJTl9NRU1PUlkSBWZhbHNlGICjBSABKgJHWigA"; + AS_PB = "Cgl0ZXN0VGFibGUSEAoHSVNfTUVUQRIFZmFsc2USEAoHSVNfUk9PVBIFZmFsc2USEQoIUkVBRE9O" + + "TFkSBWZhbHNlGpcBCgp0ZXN0Y29sdW1uEhIKCUJMT0NLU0laRRIFMTYzODQSEwoLQkxPT01GSUxU" + + "RVISBE5PTkUSEgoKQkxPQ0tDQUNIRRIEdHJ1ZRIRCgtDT01QUkVTU0lPThICR1oSDQoIVkVSU0lP" + + "TlMSATESDAoDVFRMEgU4NjQwMBISCglJTl9NRU1PUlkSBWZhbHNlGICjBSABKgJHWigA"; - AS_JSON = - "{\"name\":\"testTable\",\"IS_META\":\"false\",\"IS_ROOT\":\"false\"," + - "\"READONLY\":\"false\",\"ColumnSchema\":[{\"name\":\"testcolumn\"," + - "\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\",\"BLOCKCACHE\":\"true\"," + - "\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\",\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}]}"; + AS_JSON = "{\"name\":\"testTable\",\"IS_META\":\"false\",\"IS_ROOT\":\"false\"," + + "\"READONLY\":\"false\",\"ColumnSchema\":[{\"name\":\"testcolumn\"," + + "\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\",\"BLOCKCACHE\":\"true\"," + + "\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\",\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}]}"; } @Override @@ -122,4 +119,3 @@ public class TestTableSchemaModel extends TestModelBase { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java index b35295059cf..166a68c5228 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,36 +25,31 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestVersionModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVersionModel.class); + HBaseClassTestRule.forClass(TestVersionModel.class); private static final String REST_VERSION = "0.0.1"; - private static final String OS_VERSION = - "Linux 2.6.18-128.1.6.el5.centos.plusxen amd64"; - private static final String JVM_VERSION = - "Sun Microsystems Inc. 1.6.0_13-11.3-b02"; + private static final String OS_VERSION = "Linux 2.6.18-128.1.6.el5.centos.plusxen amd64"; + private static final String JVM_VERSION = "Sun Microsystems Inc. 1.6.0_13-11.3-b02"; private static final String JETTY_VERSION = "6.1.14"; private static final String JERSEY_VERSION = "1.1.0-ea"; public TestVersionModel() throws Exception { super(VersionModel.class); - AS_XML = - ""; + AS_XML = ""; - AS_PB = - "CgUwLjAuMRInU3VuIE1pY3Jvc3lzdGVtcyBJbmMuIDEuNi4wXzEzLTExLjMtYjAyGi1MaW51eCAy" + - "LjYuMTgtMTI4LjEuNi5lbDUuY2VudG9zLnBsdXN4ZW4gYW1kNjQiBjYuMS4xNCoIMS4xLjAtZWE="; + AS_PB = "CgUwLjAuMRInU3VuIE1pY3Jvc3lzdGVtcyBJbmMuIDEuNi4wXzEzLTExLjMtYjAyGi1MaW51eCAy" + + "LjYuMTgtMTI4LjEuNi5lbDUuY2VudG9zLnBsdXN4ZW4gYW1kNjQiBjYuMS4xNCoIMS4xLjAtZWE="; - AS_JSON = - "{\"JVM\":\"Sun Microsystems Inc. 1.6.0_13-11.3-b02\",\"Jersey\":\"1.1.0-ea\"," + - "\"OS\":\"Linux 2.6.18-128.1.6.el5.centos.plusxen amd64\",\"" + - "REST\":\"0.0.1\",\"Server\":\"6.1.14\"}"; + AS_JSON = "{\"JVM\":\"Sun Microsystems Inc. 1.6.0_13-11.3-b02\",\"Jersey\":\"1.1.0-ea\"," + + "\"OS\":\"Linux 2.6.18-128.1.6.el5.centos.plusxen amd64\",\"" + + "REST\":\"0.0.1\",\"Server\":\"6.1.14\"}"; } @Override @@ -77,4 +72,3 @@ public class TestVersionModel extends TestModelBase { assertEquals(JERSEY_VERSION, model.getJerseyVersion()); } } - diff --git a/hbase-rest/src/test/resources/mapred-site.xml b/hbase-rest/src/test/resources/mapred-site.xml index 787ffb75511..b8949fef6a0 100644 --- a/hbase-rest/src/test/resources/mapred-site.xml +++ b/hbase-rest/src/test/resources/mapred-site.xml @@ -31,4 +31,3 @@ -Djava.awt.headless=true - diff --git a/hbase-rsgroup/pom.xml b/hbase-rsgroup/pom.xml index f30fad9ef66..0962b24c027 100644 --- a/hbase-rsgroup/pom.xml +++ b/hbase-rsgroup/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-rsgroup Apache HBase - RSGroup Regionserver Groups for HBase - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - org.xolstice.maven.plugins - protobuf-maven-plugin - - - compile-protoc - generate-sources - - compile - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - @@ -206,6 +166,46 @@ test + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + org.xolstice.maven.plugins + protobuf-maven-plugin + + + compile-protoc + + compile + + generate-sources + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + @@ -318,8 +318,7 @@ lifecycle-mapping - - + diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java index c1c591633da..c9d65f1723a 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.BalanceRequest; import org.apache.hadoop.hbase.client.BalanceResponse; @@ -49,8 +48,8 @@ public interface RSGroupAdmin { void moveServers(Set
      servers, String targetGroup) throws IOException; /** - * Move given set of tables to the specified target RegionServer group. - * This will unassign all of a table's region so it can be reassigned to the correct group. + * Move given set of tables to the specified target RegionServer group. This will unassign all of + * a table's region so it can be reassigned to the correct group. */ void moveTables(Set tables, String targetGroup) throws IOException; @@ -66,7 +65,6 @@ public interface RSGroupAdmin { /** * Balance regions in the given RegionServer group. - * * @return boolean Whether balance ran or not */ default BalanceResponse balanceRSGroup(String groupName) throws IOException { @@ -74,9 +72,8 @@ public interface RSGroupAdmin { } /** - * Balance regions in the given RegionServer group, running based on - * the given {@link BalanceRequest}. - * + * Balance regions in the given RegionServer group, running based on the given + * {@link BalanceRequest}. * @return boolean Whether balance ran or not */ BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException; @@ -94,20 +91,19 @@ public interface RSGroupAdmin { /** * Move given set of servers and tables to the specified target RegionServer group. - * @param servers set of servers to move - * @param tables set of tables to move + * @param servers set of servers to move + * @param tables set of tables to move * @param targetGroup the target group name * @throws IOException if moving the server and tables fail */ - void moveServersAndTables(Set
      servers, Set tables, - String targetGroup) throws IOException; + void moveServersAndTables(Set
      servers, Set tables, String targetGroup) + throws IOException; /** - * Remove decommissioned servers from rsgroup. - * 1. Sometimes we may find the server aborted due to some hardware failure and we must offline - * the server for repairing. Or we need to move some servers to join other clusters. - * So we need to remove these servers from the rsgroup. - * 2. Dead/recovering/live servers will be disallowed. + * Remove decommissioned servers from rsgroup. 1. Sometimes we may find the server aborted due to + * some hardware failure and we must offline the server for repairing. Or we need to move some + * servers to join other clusters. So we need to remove these servers from the rsgroup. 2. + * Dead/recovering/live servers will be disallowed. * @param servers set of servers to remove */ void removeServers(Set
      servers) throws IOException; @@ -121,15 +117,15 @@ public interface RSGroupAdmin { /** * Update RSGroup configuration - * @param groupName the group name + * @param groupName the group name * @param configuration new configuration of the group name to be set * @throws IOException if a remote or network exception occurs */ void updateRSGroupConfig(String groupName, Map configuration) throws IOException; /** - * Update the configuration and trigger an online config change - * on all the regionservers in the RSGroup. + * Update the configuration and trigger an online config change on all the regionservers in the + * RSGroup. * @param groupName the group name * @throws IOException if a remote or network exception occurs */ diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java index 9e5c167cbee..f90f87d9010 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.rsgroup; import com.google.protobuf.ServiceException; - import java.io.IOException; import java.util.ArrayList; import java.util.EnumSet; @@ -26,7 +25,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -78,8 +76,8 @@ public class RSGroupAdminClient implements RSGroupAdmin { public RSGroupInfo getRSGroupInfo(String groupName) throws IOException { try { GetRSGroupInfoResponse resp = stub.getRSGroupInfo(null, - GetRSGroupInfoRequest.newBuilder().setRSGroupName(groupName).build()); - if(resp.hasRSGroupInfo()) { + GetRSGroupInfoRequest.newBuilder().setRSGroupName(groupName).build()); + if (resp.hasRSGroupInfo()) { return RSGroupProtobufUtil.toGroupInfo(resp.getRSGroupInfo()); } return null; @@ -90,8 +88,8 @@ public class RSGroupAdminClient implements RSGroupAdmin { @Override public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException { - GetRSGroupInfoOfTableRequest request = GetRSGroupInfoOfTableRequest.newBuilder().setTableName( - ProtobufUtil.toProtoTableName(tableName)).build(); + GetRSGroupInfoOfTableRequest request = GetRSGroupInfoOfTableRequest.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName(tableName)).build(); try { GetRSGroupInfoOfTableResponse resp = stub.getRSGroupInfoOfTable(null, request); if (resp.hasRSGroupInfo()) { @@ -106,16 +104,12 @@ public class RSGroupAdminClient implements RSGroupAdmin { @Override public void moveServers(Set
      servers, String targetGroup) throws IOException { Set hostPorts = Sets.newHashSet(); - for(Address el: servers) { - hostPorts.add(HBaseProtos.ServerName.newBuilder() - .setHostName(el.getHostname()) - .setPort(el.getPort()) - .build()); + for (Address el : servers) { + hostPorts.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()) + .setPort(el.getPort()).build()); } - MoveServersRequest request = MoveServersRequest.newBuilder() - .setTargetGroup(targetGroup) - .addAllServers(hostPorts) - .build(); + MoveServersRequest request = + MoveServersRequest.newBuilder().setTargetGroup(targetGroup).addAllServers(hostPorts).build(); try { stub.moveServers(null, request); } catch (ServiceException e) { @@ -126,7 +120,7 @@ public class RSGroupAdminClient implements RSGroupAdmin { @Override public void moveTables(Set tables, String targetGroup) throws IOException { MoveTablesRequest.Builder builder = MoveTablesRequest.newBuilder().setTargetGroup(targetGroup); - for(TableName tableName: tables) { + for (TableName tableName : tables) { builder.addTableName(ProtobufUtil.toProtoTableName(tableName)); if (!admin.tableExists(tableName)) { throw new TableNotFoundException(tableName); @@ -160,7 +154,8 @@ public class RSGroupAdminClient implements RSGroupAdmin { } @Override - public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException { + public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) + throws IOException { try { RSGroupAdminProtos.BalanceRSGroupRequest req = RSGroupProtobufUtil.createBalanceRSGroupRequest(groupName, request); @@ -173,10 +168,10 @@ public class RSGroupAdminClient implements RSGroupAdmin { @Override public List listRSGroups() throws IOException { try { - List resp = stub.listRSGroupInfos(null, - ListRSGroupInfosRequest.getDefaultInstance()).getRSGroupInfoList(); + List resp = stub + .listRSGroupInfos(null, ListRSGroupInfosRequest.getDefaultInstance()).getRSGroupInfoList(); List result = new ArrayList<>(resp.size()); - for(RSGroupProtos.RSGroupInfo entry : resp) { + for (RSGroupProtos.RSGroupInfo entry : resp) { result.add(RSGroupProtobufUtil.toGroupInfo(entry)); } return result; @@ -187,12 +182,9 @@ public class RSGroupAdminClient implements RSGroupAdmin { @Override public RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException { - GetRSGroupInfoOfServerRequest request = GetRSGroupInfoOfServerRequest.newBuilder() - .setServer(HBaseProtos.ServerName.newBuilder() - .setHostName(hostPort.getHostname()) - .setPort(hostPort.getPort()) - .build()) - .build(); + GetRSGroupInfoOfServerRequest request = + GetRSGroupInfoOfServerRequest.newBuilder().setServer(HBaseProtos.ServerName.newBuilder() + .setHostName(hostPort.getHostname()).setPort(hostPort.getPort()).build()).build(); try { GetRSGroupInfoOfServerResponse resp = stub.getRSGroupInfoOfServer(null, request); if (resp.hasRSGroupInfo()) { @@ -206,16 +198,14 @@ public class RSGroupAdminClient implements RSGroupAdmin { @Override public void moveServersAndTables(Set
      servers, Set tables, String targetGroup) - throws IOException { + throws IOException { MoveServersAndTablesRequest.Builder builder = - MoveServersAndTablesRequest.newBuilder().setTargetGroup(targetGroup); - for(Address el: servers) { - builder.addServers(HBaseProtos.ServerName.newBuilder() - .setHostName(el.getHostname()) - .setPort(el.getPort()) - .build()); + MoveServersAndTablesRequest.newBuilder().setTargetGroup(targetGroup); + for (Address el : servers) { + builder.addServers(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()) + .setPort(el.getPort()).build()); } - for(TableName tableName: tables) { + for (TableName tableName : tables) { builder.addTableName(ProtobufUtil.toProtoTableName(tableName)); if (!admin.tableExists(tableName)) { throw new TableNotFoundException(tableName); @@ -231,15 +221,12 @@ public class RSGroupAdminClient implements RSGroupAdmin { @Override public void removeServers(Set
      servers) throws IOException { Set hostPorts = Sets.newHashSet(); - for(Address el: servers) { - hostPorts.add(HBaseProtos.ServerName.newBuilder() - .setHostName(el.getHostname()) - .setPort(el.getPort()) - .build()); + for (Address el : servers) { + hostPorts.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()) + .setPort(el.getPort()).build()); } - RemoveServersRequest request = RemoveServersRequest.newBuilder() - .addAllServers(hostPorts) - .build(); + RemoveServersRequest request = + RemoveServersRequest.newBuilder().addAllServers(hostPorts).build(); try { stub.removeServers(null, request); } catch (ServiceException e) { @@ -249,8 +236,7 @@ public class RSGroupAdminClient implements RSGroupAdmin { @Override public void renameRSGroup(String oldName, String newName) throws IOException { - RenameRSGroupRequest request = RenameRSGroupRequest.newBuilder() - .setOldRsgroupName(oldName) + RenameRSGroupRequest request = RenameRSGroupRequest.newBuilder().setOldRsgroupName(oldName) .setNewRsgroupName(newName).build(); try { stub.renameRSGroup(null, request); @@ -261,13 +247,12 @@ public class RSGroupAdminClient implements RSGroupAdmin { @Override public void updateRSGroupConfig(String groupName, Map configuration) - throws IOException { - UpdateRSGroupConfigRequest.Builder builder = UpdateRSGroupConfigRequest.newBuilder() - .setGroupName(groupName); + throws IOException { + UpdateRSGroupConfigRequest.Builder builder = + UpdateRSGroupConfigRequest.newBuilder().setGroupName(groupName); if (configuration != null) { - configuration.entrySet().forEach(e -> - builder.addConfiguration(NameStringPair.newBuilder().setName(e.getKey()) - .setValue(e.getValue()).build())); + configuration.entrySet().forEach(e -> builder.addConfiguration( + NameStringPair.newBuilder().setName(e.getKey()).setValue(e.getValue()).build())); } try { stub.updateRSGroupConfig(null, builder.build()); @@ -282,10 +267,9 @@ public class RSGroupAdminClient implements RSGroupAdmin { if (rsGroupInfo == null) { throw new IllegalArgumentException("RSGroup does not exist: " + groupName); } - ClusterMetrics status = - admin.getClusterMetrics(EnumSet.of(ClusterMetrics.Option.SERVERS_NAME)); - List groupServers = status.getServersName().stream().filter( - s -> rsGroupInfo.containsServer(s.getAddress())).collect(Collectors.toList()); + ClusterMetrics status = admin.getClusterMetrics(EnumSet.of(ClusterMetrics.Option.SERVERS_NAME)); + List groupServers = status.getServersName().stream() + .filter(s -> rsGroupInfo.containsServer(s.getAddress())).collect(Collectors.toList()); for (ServerName server : groupServers) { admin.updateConfiguration(server); } diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java index 5488fa81d93..7b22b8a3061 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rsgroup; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; - import java.io.IOException; import java.util.Collections; import java.util.HashSet; @@ -30,7 +28,6 @@ import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; @@ -90,12 +87,13 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.access.AccessChecker; import org.apache.hadoop.hbase.security.access.Permission.Action; -import org.apache.hbase.thirdparty.com.google.common.collect.Maps; -import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.Maps; +import org.apache.hbase.thirdparty.com.google.common.collect.Sets; + // TODO: Encapsulate MasterObserver functions into separate subclass. @CoreCoprocessor @InterfaceAudience.Private @@ -119,11 +117,11 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { throw new IOException("Does not implement HMasterServices"); } - master = ((HasMasterServices)env).getMasterServices(); + master = ((HasMasterServices) env).getMasterServices(); groupInfoManager = RSGroupInfoManagerImpl.getInstance(master); groupAdminServer = new RSGroupAdminServer(master, groupInfoManager); Class clazz = - master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, null); + master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, null); if (!RSGroupableBalancer.class.isAssignableFrom(clazz)) { throw new IOException("Configured balancer does not support RegionServer groups."); } @@ -154,20 +152,20 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { RSGroupAdminServer getGroupAdminServer() { return groupAdminServer; } + /** - * Implementation of RSGroupAdminService defined in RSGroupAdmin.proto. - * This class calls {@link RSGroupAdminServer} for actual work, converts result to protocol - * buffer response, handles exceptions if any occurred and then calls the {@code RpcCallback} with - * the response. + * Implementation of RSGroupAdminService defined in RSGroupAdmin.proto. This class calls + * {@link RSGroupAdminServer} for actual work, converts result to protocol buffer response, + * handles exceptions if any occurred and then calls the {@code RpcCallback} with the response. */ private class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService { @Override - public void getRSGroupInfo(RpcController controller, - GetRSGroupInfoRequest request, RpcCallback done) { + public void getRSGroupInfo(RpcController controller, GetRSGroupInfoRequest request, + RpcCallback done) { GetRSGroupInfoResponse.Builder builder = GetRSGroupInfoResponse.newBuilder(); String groupName = request.getRSGroupName(); - LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group=" - + groupName); + LOG.info( + master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group=" + groupName); try { checkPermission("getRSGroupInfo"); RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName); @@ -182,11 +180,11 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { @Override public void getRSGroupInfoOfTable(RpcController controller, - GetRSGroupInfoOfTableRequest request, RpcCallback done) { + GetRSGroupInfoOfTableRequest request, RpcCallback done) { GetRSGroupInfoOfTableResponse.Builder builder = GetRSGroupInfoOfTableResponse.newBuilder(); TableName tableName = ProtobufUtil.toTableName(request.getTableName()); - LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table=" - + tableName); + LOG.info( + master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table=" + tableName); try { checkPermission("getRSGroupInfoOfTable"); RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupInfoOfTable(tableName); @@ -201,14 +199,14 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { @Override public void moveServers(RpcController controller, MoveServersRequest request, - RpcCallback done) { + RpcCallback done) { MoveServersResponse.Builder builder = MoveServersResponse.newBuilder(); Set
      hostPorts = Sets.newHashSet(); for (HBaseProtos.ServerName el : request.getServersList()) { hostPorts.add(Address.fromParts(el.getHostName(), el.getPort())); } - LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts +" to rsgroup " - + request.getTargetGroup()); + LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " to rsgroup " + + request.getTargetGroup()); try { if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup()); @@ -226,14 +224,14 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { @Override public void moveTables(RpcController controller, MoveTablesRequest request, - RpcCallback done) { + RpcCallback done) { MoveTablesResponse.Builder builder = MoveTablesResponse.newBuilder(); Set tables = new HashSet<>(request.getTableNameList().size()); for (TableProtos.TableName tableName : request.getTableNameList()) { tables.add(ProtobufUtil.toTableName(tableName)); } - LOG.info(master.getClientIdAuditPrefix() + " move tables " + tables +" to rsgroup " - + request.getTargetGroup()); + LOG.info(master.getClientIdAuditPrefix() + " move tables " + tables + " to rsgroup " + + request.getTargetGroup()); try { if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().preMoveTables(tables, request.getTargetGroup()); @@ -251,7 +249,7 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { @Override public void addRSGroup(RpcController controller, AddRSGroupRequest request, - RpcCallback done) { + RpcCallback done) { AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder(); LOG.info(master.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName()); try { @@ -270,10 +268,9 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { } @Override - public void removeRSGroup(RpcController controller, - RemoveRSGroupRequest request, RpcCallback done) { - RemoveRSGroupResponse.Builder builder = - RemoveRSGroupResponse.newBuilder(); + public void removeRSGroup(RpcController controller, RemoveRSGroupRequest request, + RpcCallback done) { + RemoveRSGroupResponse.Builder builder = RemoveRSGroupResponse.newBuilder(); LOG.info(master.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName()); try { if (master.getMasterCoprocessorHost() != null) { @@ -291,28 +288,29 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { } @Override - public void balanceRSGroup(RpcController controller, - BalanceRSGroupRequest request, RpcCallback done) { + public void balanceRSGroup(RpcController controller, BalanceRSGroupRequest request, + RpcCallback done) { BalanceRequest balanceRequest = RSGroupProtobufUtil.toBalanceRequest(request); - BalanceRSGroupResponse.Builder builder = BalanceRSGroupResponse.newBuilder() - .setBalanceRan(false); + BalanceRSGroupResponse.Builder builder = + BalanceRSGroupResponse.newBuilder().setBalanceRan(false); - LOG.info(master.getClientIdAuditPrefix() + " balance rsgroup, group=" - + request.getRSGroupName()); + LOG.info( + master.getClientIdAuditPrefix() + " balance rsgroup, group=" + request.getRSGroupName()); try { if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preBalanceRSGroup(request.getRSGroupName(), balanceRequest); + master.getMasterCoprocessorHost().preBalanceRSGroup(request.getRSGroupName(), + balanceRequest); } checkPermission("balanceRSGroup"); - BalanceResponse response = groupAdminServer.balanceRSGroup(request.getRSGroupName(), balanceRequest); + BalanceResponse response = + groupAdminServer.balanceRSGroup(request.getRSGroupName(), balanceRequest); RSGroupProtobufUtil.populateBalanceRSGroupResponse(builder, response); if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().postBalanceRSGroup(request.getRSGroupName(), - balanceRequest, - response); + balanceRequest, response); } } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -322,8 +320,8 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { } @Override - public void listRSGroupInfos(RpcController controller, - ListRSGroupInfosRequest request, RpcCallback done) { + public void listRSGroupInfos(RpcController controller, ListRSGroupInfosRequest request, + RpcCallback done) { ListRSGroupInfosResponse.Builder builder = ListRSGroupInfosResponse.newBuilder(); LOG.info(master.getClientIdAuditPrefix() + " list rsgroup"); try { @@ -339,12 +337,11 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { @Override public void getRSGroupInfoOfServer(RpcController controller, - GetRSGroupInfoOfServerRequest request, RpcCallback done) { + GetRSGroupInfoOfServerRequest request, RpcCallback done) { GetRSGroupInfoOfServerResponse.Builder builder = GetRSGroupInfoOfServerResponse.newBuilder(); - Address hp = Address.fromParts(request.getServer().getHostName(), - request.getServer().getPort()); - LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" - + hp); + Address hp = + Address.fromParts(request.getServer().getHostName(), request.getServer().getPort()); + LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" + hp); try { checkPermission("getRSGroupInfoOfServer"); RSGroupInfo info = groupAdminServer.getRSGroupOfServer(hp); @@ -358,8 +355,8 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { } @Override - public void moveServersAndTables(RpcController controller, - MoveServersAndTablesRequest request, RpcCallback done) { + public void moveServersAndTables(RpcController controller, MoveServersAndTablesRequest request, + RpcCallback done) { MoveServersAndTablesResponse.Builder builder = MoveServersAndTablesResponse.newBuilder(); Set
      hostPorts = Sets.newHashSet(); for (HBaseProtos.ServerName el : request.getServersList()) { @@ -369,18 +366,18 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { for (TableProtos.TableName tableName : request.getTableNameList()) { tables.add(ProtobufUtil.toTableName(tableName)); } - LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts - + " and tables " + tables + " to rsgroup" + request.getTargetGroup()); + LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " and tables " + + tables + " to rsgroup" + request.getTargetGroup()); try { if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().preMoveServersAndTables(hostPorts, tables, - request.getTargetGroup()); + request.getTargetGroup()); } checkPermission("moveServersAndTables"); groupAdminServer.moveServersAndTables(hostPorts, tables, request.getTargetGroup()); if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().postMoveServersAndTables(hostPorts, tables, - request.getTargetGroup()); + request.getTargetGroup()); } } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -389,17 +386,15 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { } @Override - public void removeServers(RpcController controller, - RemoveServersRequest request, - RpcCallback done) { - RemoveServersResponse.Builder builder = - RemoveServersResponse.newBuilder(); + public void removeServers(RpcController controller, RemoveServersRequest request, + RpcCallback done) { + RemoveServersResponse.Builder builder = RemoveServersResponse.newBuilder(); Set
      servers = Sets.newHashSet(); for (HBaseProtos.ServerName el : request.getServersList()) { servers.add(Address.fromParts(el.getHostName(), el.getPort())); } - LOG.info(master.getClientIdAuditPrefix() - + " remove decommissioned servers from rsgroup: " + servers); + LOG.info(master.getClientIdAuditPrefix() + " remove decommissioned servers from rsgroup: " + + servers); try { if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().preRemoveServers(servers); @@ -416,13 +411,12 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { } @Override - public void renameRSGroup(RpcController controller, - RenameRSGroupRequest request, - RpcCallback done) { + public void renameRSGroup(RpcController controller, RenameRSGroupRequest request, + RpcCallback done) { String oldRSGroup = request.getOldRsgroupName(); String newRSGroup = request.getNewRsgroupName(); - LOG.info("{} rename rsgroup from {} to {}", - master.getClientIdAuditPrefix(), oldRSGroup, newRSGroup); + LOG.info("{} rename rsgroup from {} to {}", master.getClientIdAuditPrefix(), oldRSGroup, + newRSGroup); RenameRSGroupResponse.Builder builder = RenameRSGroupResponse.newBuilder(); try { @@ -442,13 +436,13 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { @Override public void updateRSGroupConfig(RpcController controller, UpdateRSGroupConfigRequest request, - RpcCallback done) { + RpcCallback done) { UpdateRSGroupConfigResponse.Builder builder = UpdateRSGroupConfigResponse.newBuilder(); String groupName = request.getGroupName(); Map configuration = Maps.newHashMap(); request.getConfigurationList().forEach(p -> configuration.put(p.getName(), p.getValue())); LOG.info("{} update rsgroup {} configuration {}", master.getClientIdAuditPrefix(), groupName, - configuration); + configuration); try { if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().preUpdateRSGroupConfig(groupName, configuration); @@ -467,22 +461,21 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { boolean rsgroupHasServersOnline(TableDescriptor desc) throws IOException { String groupName; try { - groupName = - master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString()) + groupName = master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString()) .getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP); if (groupName == null) { groupName = RSGroupInfo.DEFAULT_GROUP; } } catch (MasterNotRunningException | PleaseHoldException e) { - LOG.info("Master has not initialized yet; temporarily using default RSGroup '" + - RSGroupInfo.DEFAULT_GROUP + "' for deploy of system table"); + LOG.info("Master has not initialized yet; temporarily using default RSGroup '" + + RSGroupInfo.DEFAULT_GROUP + "' for deploy of system table"); groupName = RSGroupInfo.DEFAULT_GROUP; } RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName); if (rsGroupInfo == null) { throw new ConstraintException( - "Default RSGroup (" + groupName + ") for this table's " + "namespace does not exist."); + "Default RSGroup (" + groupName + ") for this table's " + "namespace does not exist."); } for (ServerName onlineServer : master.getServerManager().createDestinationServersList()) { @@ -496,8 +489,8 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { void assignTableToGroup(TableDescriptor desc) throws IOException { RSGroupInfo rsGroupInfo = determineRSGroupInfoForTable(desc); if (rsGroupInfo == null) { - throw new ConstraintException("Default RSGroup for this table " + desc.getTableName() - + " does not exist."); + throw new ConstraintException( + "Default RSGroup for this table " + desc.getTableName() + " does not exist."); } if (!rsGroupInfo.containsTable(desc.getTableName())) { LOG.debug("Pre-moving table " + desc.getTableName() + " to RSGroup " + rsGroupInfo.getName()); @@ -510,10 +503,8 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { ///////////////////////////////////////////////////////////////////////////// @Override - public void preCreateTableAction( - final ObserverContext ctx, - final TableDescriptor desc, - final RegionInfo[] regions) throws IOException { + public void preCreateTableAction(final ObserverContext ctx, + final TableDescriptor desc, final RegionInfo[] regions) throws IOException { if (desc.getTableName().isSystemTable()) { return; } @@ -555,9 +546,8 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { "Default RSGroup for this table " + desc.getTableName() + " does not exist."); } if (!RSGroupUtil.rsGroupHasOnlineServer(master, rsGroupInfo)) { - throw new HBaseIOException( - "No online servers in the rsgroup " + rsGroupInfo.getName() + " which table " + desc - .getTableName().getNameAsString() + " belongs to"); + throw new HBaseIOException("No online servers in the rsgroup " + rsGroupInfo.getName() + + " which table " + desc.getTableName().getNameAsString() + " belongs to"); } } @@ -566,8 +556,8 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { // In case of modify table, when rs group is not changed, move is not required. if (!rsGroupInfo.containsTable(desc.getTableName())) { synchronized (groupInfoManager) { - groupInfoManager - .moveTables(Collections.singleton(desc.getTableName()), rsGroupInfo.getName()); + groupInfoManager.moveTables(Collections.singleton(desc.getTableName()), + rsGroupInfo.getName()); } } } @@ -579,22 +569,23 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { if (rsGroup == null) { // When rs group is set in table descriptor then it must exist throw new ConstraintException( - "Region server group " + optGroupNameOfTable.get() + " does not exist."); + "Region server group " + optGroupNameOfTable.get() + " does not exist."); } else { return rsGroup; } } return groupInfoManager.determineRSGroupInfoForTable(desc.getTableName()); } + // Remove table from its RSGroup. @Override public void postDeleteTable(ObserverContext ctx, - TableName tableName) throws IOException { + TableName tableName) throws IOException { try { RSGroupInfo group = groupAdminServer.getRSGroupInfoOfTable(tableName); if (group != null) { LOG.debug(String.format("Removing deleted table '%s' from rsgroup '%s'", tableName, - group.getName())); + group.getName())); groupAdminServer.moveTables(Sets.newHashSet(tableName), null); } } catch (IOException ex) { @@ -604,34 +595,32 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { @Override public void preCreateNamespace(ObserverContext ctx, - NamespaceDescriptor ns) throws IOException { + NamespaceDescriptor ns) throws IOException { String group = ns.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP); - if(group != null && groupAdminServer.getRSGroupInfo(group) == null) { + if (group != null && groupAdminServer.getRSGroupInfo(group) == null) { throw new ConstraintException("Region server group " + group + " does not exist."); } } @Override public void preModifyNamespace(ObserverContext ctx, - NamespaceDescriptor ns) throws IOException { + NamespaceDescriptor ns) throws IOException { preCreateNamespace(ctx, ns); } @Override public void preCloneSnapshot(ObserverContext ctx, - SnapshotDescription snapshot, TableDescriptor desc) throws IOException { + SnapshotDescription snapshot, TableDescriptor desc) throws IOException { assignTableToGroup(desc); } @Override public void postClearDeadServers(ObserverContext ctx, - List servers, List notClearedServers) - throws IOException { - Set
      clearedServer = servers.stream(). - filter(server -> !notClearedServers.contains(server)). - map(ServerName::getAddress). - collect(Collectors.toSet()); - if(!clearedServer.isEmpty()) { + List servers, List notClearedServers) throws IOException { + Set
      clearedServer = + servers.stream().filter(server -> !notClearedServers.contains(server)) + .map(ServerName::getAddress).collect(Collectors.toSet()); + if (!clearedServer.isEmpty()) { groupAdminServer.removeServers(clearedServer); } } @@ -641,9 +630,9 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { } /** - * Returns the active user to which authorization checks should be applied. - * If we are in the context of an RPC call, the remote user is used, - * otherwise the currently logged in user is used. + * Returns the active user to which authorization checks should be applied. If we are in the + * context of an RPC call, the remote user is used, otherwise the currently logged in user is + * used. */ private User getActiveUser() throws IOException { // for non-rpc handling, fallback to system user diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java index d25cb5e7780..16d562bbb9f 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,19 +53,20 @@ import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.Maps; + /** * Service to support Region Server Grouping (HBase-6721). */ @InterfaceAudience.Private public class RSGroupAdminServer implements RSGroupAdmin { private static final Logger LOG = LoggerFactory.getLogger(RSGroupAdminServer.class); - public static final String KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE = "should keep at least " + - "one server in 'default' RSGroup."; + public static final String KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE = + "should keep at least " + "one server in 'default' RSGroup."; private MasterServices master; private final RSGroupInfoManager rsGroupInfoManager; @@ -85,20 +86,20 @@ public class RSGroupAdminServer implements RSGroupAdmin { // We are reading across two Maps in the below with out synchronizing across // them; should be safe most of the time. String groupName = rsGroupInfoManager.getRSGroupOfTable(tableName); - return groupName == null? null: rsGroupInfoManager.getRSGroup(groupName); + return groupName == null ? null : rsGroupInfoManager.getRSGroup(groupName); } private void checkOnlineServersOnly(Set
      servers) throws ConstraintException { // This uglyness is because we only have Address, not ServerName. // Online servers are keyed by ServerName. Set
      onlineServers = new HashSet<>(); - for(ServerName server: master.getServerManager().getOnlineServers().keySet()) { + for (ServerName server : master.getServerManager().getOnlineServers().keySet()) { onlineServers.add(server.getAddress()); } - for (Address address: servers) { + for (Address address : servers) { if (!onlineServers.contains(address)) { throw new ConstraintException( - "Server " + address + " is not an online server in 'default' RSGroup."); + "Server " + address + " is not an online server in 'default' RSGroup."); } } } @@ -123,8 +124,8 @@ public class RSGroupAdminServer implements RSGroupAdmin { */ private List getRegions(final Address server) { LinkedList regions = new LinkedList<>(); - for (Map.Entry el : - master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { + for (Map.Entry el : master.getAssignmentManager().getRegionStates() + .getRegionAssignments().entrySet()) { if (el.getValue() == null) { continue; } @@ -134,8 +135,9 @@ public class RSGroupAdminServer implements RSGroupAdmin { } } for (RegionStateNode state : master.getAssignmentManager().getRegionsInTransition()) { - if (state.getRegionLocation() != null && - state.getRegionLocation().getAddress().equals(server)) { + if ( + state.getRegionLocation() != null && state.getRegionLocation().getAddress().equals(server) + ) { addRegion(regions, state.getRegionInfo()); } } @@ -155,21 +157,20 @@ public class RSGroupAdminServer implements RSGroupAdmin { /** * Check servers and tables. - * - * @param servers servers to move - * @param tables tables to move + * @param servers servers to move + * @param tables tables to move * @param targetGroupName target group name * @throws IOException if nulls or if servers and tables not belong to the same group */ private void checkServersAndTables(Set
      servers, Set tables, - String targetGroupName) throws IOException { + String targetGroupName) throws IOException { // Presume first server's source group. Later ensure all servers are from this group. Address firstServer = servers.iterator().next(); RSGroupInfo tmpSrcGrp = rsGroupInfoManager.getRSGroupOfServer(firstServer); if (tmpSrcGrp == null) { // Be careful. This exception message is tested for in TestRSGroupsAdmin2... - throw new ConstraintException("Server " + firstServer - + " is either offline or it does not exist."); + throw new ConstraintException( + "Server " + firstServer + " is either offline or it does not exist."); } RSGroupInfo srcGrp = new RSGroupInfo(tmpSrcGrp); @@ -177,11 +178,11 @@ public class RSGroupAdminServer implements RSGroupAdmin { checkOnlineServersOnly(servers); // Ensure all servers are of same rsgroup. - for (Address server: servers) { + for (Address server : servers) { String tmpGroup = rsGroupInfoManager.getRSGroupOfServer(server).getName(); if (!tmpGroup.equals(srcGrp.getName())) { - throw new ConstraintException("Move server request should only come from one source " + - "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup); + throw new ConstraintException("Move server request should only come from one source " + + "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup); } } @@ -189,31 +190,29 @@ public class RSGroupAdminServer implements RSGroupAdmin { for (TableName table : tables) { String tmpGroup = rsGroupInfoManager.getRSGroupOfTable(table); if (!tmpGroup.equals(srcGrp.getName())) { - throw new ConstraintException("Move table request should only come from one source " + - "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup); + throw new ConstraintException("Move table request should only come from one source " + + "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup); } } if (srcGrp.getServers().size() <= servers.size() && srcGrp.getTables().size() > tables.size()) { - throw new ConstraintException("Cannot leave a RSGroup " + srcGrp.getName() + - " that contains tables without servers to host them."); + throw new ConstraintException("Cannot leave a RSGroup " + srcGrp.getName() + + " that contains tables without servers to host them."); } } /** - * Move every region from servers which are currently located on these servers, - * but should not be located there. - * - * @param movedServers the servers that are moved to new group - * @param movedTables the tables that are moved to new group - * @param srcGrpServers all servers in the source group, excluding the movedServers + * Move every region from servers which are currently located on these servers, but should not be + * located there. + * @param movedServers the servers that are moved to new group + * @param movedTables the tables that are moved to new group + * @param srcGrpServers all servers in the source group, excluding the movedServers * @param targetGroupName the target group * @param sourceGroupName the source group * @throws IOException if any error while moving regions */ private void moveServerRegionsFromGroup(Set
      movedServers, Set movedTables, - Set
      srcGrpServers, String targetGroupName, - String sourceGroupName) throws IOException { + Set
      srcGrpServers, String targetGroupName, String sourceGroupName) throws IOException { // Get server names corresponding to given Addresses List movedServerNames = new ArrayList<>(movedServers.size()); List srcGrpServerNames = new ArrayList<>(srcGrpServers.size()); @@ -236,8 +235,10 @@ public class RSGroupAdminServer implements RSGroupAdmin { for (ServerName server : movedServerNames) { List regionsOnServer = getRegions(server.getAddress()); for (RegionInfo region : regionsOnServer) { - if (!movedTables.contains(region.getTable()) && !srcGrpServers - .contains(getRegionAddress(region))) { + if ( + !movedTables.contains(region.getTable()) + && !srcGrpServers.contains(getRegionAddress(region)) + ) { LOG.info("Moving server region {}, which do not belong to RSGroup {}", region.getShortNameToLog(), targetGroupName); // Move region back to source RSGroup servers @@ -259,8 +260,7 @@ public class RSGroupAdminServer implements RSGroupAdmin { } } } - boolean allRegionsMoved = - waitForRegionMovement(assignmentFutures, sourceGroupName, retry); + boolean allRegionsMoved = waitForRegionMovement(assignmentFutures, sourceGroupName, retry); if (allRegionsMoved && !errorInRegionMove) { LOG.info("All regions from {} are moved back to {}", movedServerNames, sourceGroupName); return; @@ -282,8 +282,8 @@ public class RSGroupAdminServer implements RSGroupAdmin { } /** - * Wait for all the region move to complete. Keep waiting for other region movement - * completion even if some region movement fails. + * Wait for all the region move to complete. Keep waiting for other region movement completion + * even if some region movement fails. */ private boolean waitForRegionMovement(List>> regionMoveFutures, String groupName, int retryCount) { @@ -293,8 +293,10 @@ public class RSGroupAdminServer implements RSGroupAdmin { for (Pair> pair : regionMoveFutures) { try { pair.getSecond().get(); - if (master.getAssignmentManager().getRegionStates(). - getRegionState(pair.getFirst()).isFailedOpen()) { + if ( + master.getAssignmentManager().getRegionStates().getRegionState(pair.getFirst()) + .isFailedOpen() + ) { allRegionsMoved = false; } } catch (InterruptedException e) { @@ -312,7 +314,6 @@ public class RSGroupAdminServer implements RSGroupAdmin { /** * Moves regions of tables which are not on target group servers. - * * @param tables the tables that will move to new group * @param targetGrp the target group * @throws IOException if moving the region fails @@ -325,15 +326,17 @@ public class RSGroupAdminServer implements RSGroupAdmin { targetGrpSevers.add(serverName); } } - //Set true to indicate at least one region movement failed + // Set true to indicate at least one region movement failed boolean errorInRegionMove; int retry = 0; List>> assignmentFutures = new ArrayList<>(); do { errorInRegionMove = false; for (TableName table : tables) { - if (master.getTableStateManager().isTableState(table, TableState.State.DISABLED, - TableState.State.DISABLING)) { + if ( + master.getTableStateManager().isTableState(table, TableState.State.DISABLED, + TableState.State.DISABLING) + ) { LOG.debug("Skipping move regions because the table {} is disabled", table); continue; } @@ -383,8 +386,8 @@ public class RSGroupAdminServer implements RSGroupAdmin { } @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE", - justification="Ignoring complaint because don't know what it is complaining about") + value = "RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE", + justification = "Ignoring complaint because don't know what it is complaining about") @Override public void moveServers(Set
      servers, String targetGroupName) throws IOException { if (servers == null) { @@ -395,7 +398,7 @@ public class RSGroupAdminServer implements RSGroupAdmin { // TODO. Why? Stuff breaks if I equate them. return; } - //check target group + // check target group getAndCheckRSGroupInfo(targetGroupName); // Hold a lock on the manager instance while moving servers to prevent @@ -406,8 +409,8 @@ public class RSGroupAdminServer implements RSGroupAdmin { RSGroupInfo srcGrp = rsGroupInfoManager.getRSGroupOfServer(firstServer); if (srcGrp == null) { // Be careful. This exception message is tested for in TestRSGroupsAdmin2... - throw new ConstraintException("Server " + firstServer - + " is either offline or it does not exist."); + throw new ConstraintException( + "Server " + firstServer + " is either offline or it does not exist."); } // Only move online servers (when moving from 'default') or servers from other // groups. This prevents bogus servers from entering groups @@ -418,24 +421,24 @@ public class RSGroupAdminServer implements RSGroupAdmin { checkOnlineServersOnly(servers); } // Ensure all servers are of same rsgroup. - for (Address server: servers) { + for (Address server : servers) { String tmpGroup = rsGroupInfoManager.getRSGroupOfServer(server).getName(); if (!tmpGroup.equals(srcGrp.getName())) { - throw new ConstraintException("Move server request should only come from one source " + - "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup); + throw new ConstraintException("Move server request should only come from one source " + + "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup); } } if (srcGrp.getServers().size() <= servers.size() && srcGrp.getTables().size() > 0) { - throw new ConstraintException("Cannot leave a RSGroup " + srcGrp.getName() + - " that contains tables without servers to host them."); + throw new ConstraintException("Cannot leave a RSGroup " + srcGrp.getName() + + " that contains tables without servers to host them."); } // MovedServers may be < passed in 'servers'. - Set
      movedServers = rsGroupInfoManager.moveServers(servers, srcGrp.getName(), - targetGroupName); + Set
      movedServers = + rsGroupInfoManager.moveServers(servers, srcGrp.getName(), targetGroupName); moveServerRegionsFromGroup(movedServers, Collections.emptySet(), - rsGroupInfoManager.getRSGroup(srcGrp.getName()).getServers(), - targetGroupName, srcGrp.getName()); + rsGroupInfoManager.getRSGroup(srcGrp.getName()).getServers(), targetGroupName, + srcGrp.getName()); LOG.info("Move servers done: {} => {}", srcGrp.getName(), targetGroupName); } } @@ -453,12 +456,12 @@ public class RSGroupAdminServer implements RSGroupAdmin { // Hold a lock on the manager instance while moving servers to prevent // another writer changing our state while we are working. synchronized (rsGroupInfoManager) { - if(targetGroup != null) { + if (targetGroup != null) { RSGroupInfo destGroup = rsGroupInfoManager.getRSGroup(targetGroup); - if(destGroup == null) { + if (destGroup == null) { throw new ConstraintException("Target " + targetGroup + " RSGroup does not exist."); } - if(destGroup.getServers().size() < 1) { + if (destGroup.getServers().size() < 1) { throw new ConstraintException("Target RSGroup must have at least one server."); } } @@ -488,15 +491,15 @@ public class RSGroupAdminServer implements RSGroupAdmin { } int tableCount = rsGroupInfo.getTables().size(); if (tableCount > 0) { - throw new ConstraintException("RSGroup " + name + " has " + tableCount + - " tables; you must remove these tables from the rsgroup before " + - "the rsgroup can be removed."); + throw new ConstraintException("RSGroup " + name + " has " + tableCount + + " tables; you must remove these tables from the rsgroup before " + + "the rsgroup can be removed."); } int serverCount = rsGroupInfo.getServers().size(); if (serverCount > 0) { - throw new ConstraintException("RSGroup " + name + " has " + serverCount + - " servers; you must remove these servers from the RSGroup before" + - "the RSGroup can be removed."); + throw new ConstraintException("RSGroup " + name + " has " + serverCount + + " servers; you must remove these servers from the RSGroup before" + + "the RSGroup can be removed."); } for (NamespaceDescriptor ns : master.getClusterSchema().getNamespaces()) { String nsGroup = ns.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP); @@ -510,7 +513,8 @@ public class RSGroupAdminServer implements RSGroupAdmin { } @Override - public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException { + public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) + throws IOException { ServerManager serverManager = master.getServerManager(); LoadBalancer balancer = master.getLoadBalancer(); @@ -523,28 +527,28 @@ public class RSGroupAdminServer implements RSGroupAdmin { } if (getRSGroupInfo(groupName) == null) { - throw new ConstraintException("RSGroup does not exist: "+groupName); + throw new ConstraintException("RSGroup does not exist: " + groupName); } // Only allow one balance run at at time. Map groupRIT = rsGroupGetRegionsInTransition(groupName); if (groupRIT.size() > 0 && !request.isIgnoreRegionsInTransition()) { LOG.debug("Not running balancer because {} region(s) in transition: {}", groupRIT.size(), - StringUtils.abbreviate( - master.getAssignmentManager().getRegionStates().getRegionsInTransition().toString(), - 256)); + StringUtils.abbreviate( + master.getAssignmentManager().getRegionStates().getRegionsInTransition().toString(), + 256)); return responseBuilder.build(); } if (serverManager.areDeadServersInProgress()) { LOG.debug("Not running balancer because processing dead regionserver(s): {}", - serverManager.getDeadServers()); + serverManager.getDeadServers()); return responseBuilder.build(); } - //We balance per group instead of per table + // We balance per group instead of per table Map>> assignmentsByTable = - getRSGroupAssignmentsByTable(master.getTableStateManager(), groupName); + getRSGroupAssignmentsByTable(master.getTableStateManager(), groupName); List plans = balancer.balanceCluster(assignmentsByTable); boolean balancerRan = !plans.isEmpty(); @@ -573,7 +577,7 @@ public class RSGroupAdminServer implements RSGroupAdmin { @Override public void moveServersAndTables(Set
      servers, Set tables, String targetGroup) - throws IOException { + throws IOException { if (servers == null || servers.isEmpty()) { throw new ConstraintException("The list of servers to move cannot be null or empty."); } @@ -581,28 +585,27 @@ public class RSGroupAdminServer implements RSGroupAdmin { throw new ConstraintException("The list of tables to move cannot be null or empty."); } - //check target group + // check target group getAndCheckRSGroupInfo(targetGroup); // Hold a lock on the manager instance while moving servers and tables to prevent // another writer changing our state while we are working. synchronized (rsGroupInfoManager) { - //check servers and tables status + // check servers and tables status checkServersAndTables(servers, tables, targetGroup); - //Move servers and tables to a new group. + // Move servers and tables to a new group. String srcGroup = getRSGroupOfServer(servers.iterator().next()).getName(); rsGroupInfoManager.moveServersAndTables(servers, tables, srcGroup, targetGroup); - //move regions on these servers which do not belong to group tables + // move regions on these servers which do not belong to group tables moveServerRegionsFromGroup(servers, tables, - rsGroupInfoManager.getRSGroup(srcGroup).getServers(), - targetGroup, srcGroup); - //move regions of these tables which are not on group servers + rsGroupInfoManager.getRSGroup(srcGroup).getServers(), targetGroup, srcGroup); + // move regions of these tables which are not on group servers modifyOrMoveTables(tables, rsGroupInfoManager.getRSGroup(targetGroup)); } LOG.info("Move servers and tables done. Severs: {}, Tables: {} => {}", servers, tables, - targetGroup); + targetGroup); } @Override @@ -614,7 +617,7 @@ public class RSGroupAdminServer implements RSGroupAdmin { // Hold a lock on the manager instance while moving servers to prevent // another writer changing our state while we are working. synchronized (rsGroupInfoManager) { - //check the set of servers + // check the set of servers checkForDeadOrOnlineServers(servers); rsGroupInfoManager.removeServers(servers); LOG.info("Remove decommissioned servers {} from RSGroup done", servers); @@ -636,7 +639,7 @@ public class RSGroupAdminServer implements RSGroupAdmin { @Override public void updateRSGroupConfig(String groupName, Map configuration) - throws IOException { + throws IOException { synchronized (rsGroupInfoManager) { rsGroupInfoManager.updateRSGroupConfig(groupName, configuration); } @@ -652,13 +655,13 @@ public class RSGroupAdminServer implements RSGroupAdmin { } private Map rsGroupGetRegionsInTransition(String groupName) - throws IOException { + throws IOException { Map rit = Maps.newTreeMap(); AssignmentManager am = master.getAssignmentManager(); - for(TableName tableName : getRSGroupInfo(groupName).getTables()) { - for(RegionInfo regionInfo: am.getRegionStates().getRegionsOfTable(tableName)) { + for (TableName tableName : getRSGroupInfo(groupName).getTables()) { + for (RegionInfo regionInfo : am.getRegionStates().getRegionsOfTable(tableName)) { RegionState state = am.getRegionStates().getRegionTransitionState(regionInfo); - if(state != null) { + if (state != null) { rit.put(regionInfo.getEncodedName(), state); } } @@ -674,18 +677,20 @@ public class RSGroupAdminServer implements RSGroupAdmin { * @return A clone of current assignments for this group. */ Map>> getRSGroupAssignmentsByTable( - TableStateManager tableStateManager, String groupName) throws IOException { + TableStateManager tableStateManager, String groupName) throws IOException { Map>> result = Maps.newHashMap(); RSGroupInfo rsGroupInfo = getRSGroupInfo(groupName); Map>> assignments = Maps.newHashMap(); for (Map.Entry entry : master.getAssignmentManager().getRegionStates() - .getRegionAssignments().entrySet()) { + .getRegionAssignments().entrySet()) { TableName currTable = entry.getKey().getTable(); ServerName currServer = entry.getValue(); RegionInfo currRegion = entry.getKey(); if (rsGroupInfo.getTables().contains(currTable)) { - if (tableStateManager.isTableState(currTable, TableState.State.DISABLED, - TableState.State.DISABLING)) { + if ( + tableStateManager.isTableState(currTable, TableState.State.DISABLED, + TableState.State.DISABLING) + ) { continue; } if (currRegion.isSplitParent()) { @@ -698,15 +703,15 @@ public class RSGroupAdminServer implements RSGroupAdmin { } Map> serverMap = Maps.newHashMap(); - for(ServerName serverName: master.getServerManager().getOnlineServers().keySet()) { - if(rsGroupInfo.getServers().contains(serverName.getAddress())) { + for (ServerName serverName : master.getServerManager().getOnlineServers().keySet()) { + if (rsGroupInfo.getServers().contains(serverName.getAddress())) { serverMap.put(serverName, Collections.emptyList()); } } // add all tables that are members of the group - for(TableName tableName : rsGroupInfo.getTables()) { - if(assignments.containsKey(tableName)) { + for (TableName tableName : rsGroupInfo.getTables()) { + if (assignments.containsKey(tableName)) { result.put(tableName, new HashMap<>()); result.get(tableName).putAll(serverMap); result.get(tableName).putAll(assignments.get(tableName)); @@ -733,19 +738,18 @@ public class RSGroupAdminServer implements RSGroupAdmin { } Set
      deadServers = new HashSet<>(); - for(ServerName server: master.getServerManager().getDeadServers().copyServerNames()) { + for (ServerName server : master.getServerManager().getDeadServers().copyServerNames()) { deadServers.add(server.getAddress()); } - for (Address address: servers) { + for (Address address : servers) { if (onlineServers.contains(address)) { throw new ConstraintException( - "Server " + address + " is an online server, not allowed to remove."); + "Server " + address + " is an online server, not allowed to remove."); } if (deadServers.contains(address)) { - throw new ConstraintException( - "Server " + address + " is on the dead servers list," - + " Maybe it will come back again, not allowed to remove."); + throw new ConstraintException("Server " + address + " is on the dead servers list," + + " Maybe it will come back again, not allowed to remove."); } } } @@ -794,8 +798,8 @@ public class RSGroupAdminServer implements RSGroupAdmin { for (TableDescriptor oldTd : tableDescriptors) { TableDescriptor newTd = TableDescriptorBuilder.newBuilder(oldTd).setRegionServerGroup(targetGroup).build(); - procIds.add(master - .modifyTable(oldTd.getTableName(), newTd, HConstants.NO_NONCE, HConstants.NO_NONCE)); + procIds.add( + master.modifyTable(oldTd.getTableName(), newTd, HConstants.NO_NONCE, HConstants.NO_NONCE)); } return procIds; } @@ -806,8 +810,8 @@ public class RSGroupAdminServer implements RSGroupAdmin { if (proc == null) { continue; } - ProcedureSyncWait - .waitForProcedureToCompleteIOE(master.getMasterProcedureExecutor(), proc, Long.MAX_VALUE); + ProcedureSyncWait.waitForProcedureToCompleteIOE(master.getMasterProcedureExecutor(), proc, + Long.MAX_VALUE); } } } diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index 5db124e27ee..b33be41f42e 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rsgroup; import com.google.errorprone.annotations.RestrictedApi; @@ -54,18 +53,13 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; /** - * GroupBasedLoadBalancer, used when Region Server Grouping is configured (HBase-6721) - * It does region balance based on a table's group membership. - * - * Most assignment methods contain two exclusive code paths: Online - when the group - * table is online and Offline - when it is unavailable. - * - * During Offline, assignments are assigned based on cached information in zookeeper. - * If unavailable (ie bootstrap) then regions are assigned randomly. - * - * Once the GROUP table has been assigned, the balancer switches to Online and will then - * start providing appropriate assignments for user tables. - * + * GroupBasedLoadBalancer, used when Region Server Grouping is configured (HBase-6721) It does + * region balance based on a table's group membership. Most assignment methods contain two exclusive + * code paths: Online - when the group table is online and Offline - when it is unavailable. During + * Offline, assignments are assigned based on cached information in zookeeper. If unavailable (ie + * bootstrap) then regions are assigned randomly. Once the GROUP table has been assigned, the + * balancer switches to Online and will then start providing appropriate assignments for user + * tables. */ @InterfaceAudience.Private public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { @@ -76,11 +70,9 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { private volatile LoadBalancer internalBalancer; /** - * Set this key to {@code true} to allow region fallback. - * Fallback to the default rsgroup first, then fallback to any group if no online servers in - * default rsgroup. - * Please keep balancer switch on at the same time, which is relied on to correct misplaced - * regions + * Set this key to {@code true} to allow region fallback. Fallback to the default rsgroup first, + * then fallback to any group if no online servers in default rsgroup. Please keep balancer switch + * on at the same time, which is relied on to correct misplaced regions */ public static final String FALLBACK_GROUP_ENABLE_KEY = "hbase.rsgroup.fallback.enable"; @@ -90,7 +82,8 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { * Used by reflection in {@link org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory}. */ @InterfaceAudience.Private - public RSGroupBasedLoadBalancer() {} + public RSGroupBasedLoadBalancer() { + } // must be called after calling initialize @Override @@ -100,8 +93,8 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { } @Override - public synchronized void updateBalancerLoadInfo(Map>> - loadOfAllTable){ + public synchronized void + updateBalancerLoadInfo(Map>> loadOfAllTable) { internalBalancer.updateBalancerLoadInfo(loadOfAllTable); } @@ -110,7 +103,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") public void setRsGroupInfoManager(RSGroupInfoManager rsGroupInfoManager) { this.rsGroupInfoManager = rsGroupInfoManager; } @@ -120,16 +113,16 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { */ @Override public synchronized List balanceCluster( - Map>> loadOfAllTable) throws IOException { + Map>> loadOfAllTable) throws IOException { if (!isOnline()) { - throw new ConstraintException(RSGroupInfoManager.RSGROUP_TABLE_NAME + - " is not online, unable to perform balance"); + throw new ConstraintException( + RSGroupInfoManager.RSGROUP_TABLE_NAME + " is not online, unable to perform balance"); } // Calculate correct assignments and a list of RegionPlan for mis-placed regions - Pair>>, List> - correctedStateAndRegionPlans = correctAssignments(loadOfAllTable); + Pair>>, + List> correctedStateAndRegionPlans = correctAssignments(loadOfAllTable); Map>> correctedLoadOfAllTable = - correctedStateAndRegionPlans.getFirst(); + correctedStateAndRegionPlans.getFirst(); List regionPlans = correctedStateAndRegionPlans.getSecond(); // Add RegionPlan for the regions which have been placed according to the region server group // assignment into the movement list @@ -138,7 +131,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { for (RSGroupInfo rsgroup : rsGroupInfoManager.listRSGroups()) { Map>> loadOfTablesInGroup = new HashMap<>(); for (Map.Entry>> entry : correctedLoadOfAllTable - .entrySet()) { + .entrySet()) { TableName tableName = entry.getKey(); String targetRSGroupName = rsGroupInfoManager.getRSGroupOfTable(tableName); if (targetRSGroupName == null) { @@ -167,15 +160,15 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { @Override @NonNull public Map> roundRobinAssignment(List regions, - List servers) throws HBaseIOException { + List servers) throws HBaseIOException { Map> assignments = Maps.newHashMap(); List, List>> pairs = generateGroupAssignments(regions, servers); for (Pair, List> pair : pairs) { Map> result = - this.internalBalancer.roundRobinAssignment(pair.getFirst(), pair.getSecond()); + this.internalBalancer.roundRobinAssignment(pair.getFirst(), pair.getSecond()); result.forEach((server, regionInfos) -> assignments - .computeIfAbsent(server, s -> Lists.newArrayList()).addAll(regionInfos)); + .computeIfAbsent(server, s -> Lists.newArrayList()).addAll(regionInfos)); } return assignments; } @@ -183,19 +176,19 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { @Override @NonNull public Map> retainAssignment(Map regions, - List servers) throws HBaseIOException { + List servers) throws HBaseIOException { try { Map> assignments = new TreeMap<>(); List, List>> pairs = - generateGroupAssignments(Lists.newArrayList(regions.keySet()), servers); + generateGroupAssignments(Lists.newArrayList(regions.keySet()), servers); for (Pair, List> pair : pairs) { List regionList = pair.getFirst(); Map currentAssignmentMap = Maps.newTreeMap(); regionList.forEach(r -> currentAssignmentMap.put(r, regions.get(r))); Map> pairResult = - this.internalBalancer.retainAssignment(currentAssignmentMap, pair.getSecond()); + this.internalBalancer.retainAssignment(currentAssignmentMap, pair.getSecond()); pairResult.forEach((server, rs) -> assignments - .computeIfAbsent(server, s -> Lists.newArrayList()).addAll(rs)); + .computeIfAbsent(server, s -> Lists.newArrayList()).addAll(rs)); } return assignments; } catch (IOException e) { @@ -204,8 +197,8 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { } @Override - public ServerName randomAssignment(RegionInfo region, - List servers) throws HBaseIOException { + public ServerName randomAssignment(RegionInfo region, List servers) + throws HBaseIOException { List, List>> pairs = generateGroupAssignments(Lists.newArrayList(region), servers); List filteredServers = pairs.iterator().next().getSecond(); @@ -243,18 +236,19 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { if (isFallbackEnabled()) { candidates = getFallBackCandidates(servers); } - candidates = (candidates == null || candidates.isEmpty()) ? - Lists.newArrayList(BOGUS_SERVER_NAME) : candidates; + candidates = (candidates == null || candidates.isEmpty()) + ? Lists.newArrayList(BOGUS_SERVER_NAME) + : candidates; result.add(Pair.newPair(fallbackRegions, candidates)); } return result; - } catch(IOException e) { + } catch (IOException e) { throw new HBaseIOException("Failed to generate group assignments", e); } } private List filterOfflineServers(RSGroupInfo RSGroupInfo, - List onlineServers) { + List onlineServers) { if (RSGroupInfo != null) { return filterServers(RSGroupInfo.getServers(), onlineServers); } else { @@ -271,7 +265,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { *

      * TODO: consider using HashSet to pursue O(1) for contains() throughout the calling chain if * needed. - * @param servers the servers + * @param servers the servers * @param onlineServers List of servers which are online. * @return the list */ @@ -286,13 +280,13 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { } private Pair>>, List> - correctAssignments(Map>> existingAssignments) - throws IOException { + correctAssignments(Map>> existingAssignments) + throws IOException { // To return Map>> correctAssignments = new HashMap<>(); List regionPlansForMisplacedRegions = new ArrayList<>(); for (Map.Entry>> assignments : existingAssignments - .entrySet()) { + .entrySet()) { TableName tableName = assignments.getKey(); Map> clusterLoad = assignments.getValue(); Map> correctServerRegion = new TreeMap<>(); @@ -310,8 +304,9 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { for (Map.Entry> serverRegionMap : clusterLoad.entrySet()) { ServerName currentHostServer = serverRegionMap.getKey(); List regionInfoList = serverRegionMap.getValue(); - if (targetRSGInfo == null - || !targetRSGInfo.containsServer(currentHostServer.getAddress())) { + if ( + targetRSGInfo == null || !targetRSGInfo.containsServer(currentHostServer.getAddress()) + ) { regionInfoList.forEach(regionInfo -> { regionPlansForMisplacedRegions.add(new RegionPlan(regionInfo, currentHostServer, null)); }); @@ -323,7 +318,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { } // Return correct assignments and region movement plan for mis-placed regions together return new Pair>>, List>( - correctAssignments, regionPlansForMisplacedRegions); + correctAssignments, regionPlansForMisplacedRegions); } @Override @@ -354,8 +349,8 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { Class balancerClass = conf.getClass(HBASE_RSGROUP_LOADBALANCER_CLASS, StochasticLoadBalancer.class, LoadBalancer.class); if (this.getClass().isAssignableFrom(balancerClass)) { - LOG.warn("The internal balancer of RSGroupBasedLoadBalancer cannot be itself, " + - "falling back to the default LoadBalancer class"); + LOG.warn("The internal balancer of RSGroupBasedLoadBalancer cannot be itself, " + + "falling back to the default LoadBalancer class"); balancerClass = LoadBalancerFactory.getDefaultLoadBalancerClass(); } internalBalancer = ReflectionUtils.newInstance(balancerClass); @@ -389,8 +384,8 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { public synchronized void onConfigurationChange(Configuration conf) { boolean newFallbackEnabled = conf.getBoolean(FALLBACK_GROUP_ENABLE_KEY, false); if (fallbackEnabled != newFallbackEnabled) { - LOG.info("Changing the value of {} from {} to {}", FALLBACK_GROUP_ENABLE_KEY, - fallbackEnabled, newFallbackEnabled); + LOG.info("Changing the value of {} from {} to {}", FALLBACK_GROUP_ENABLE_KEY, fallbackEnabled, + newFallbackEnabled); fallbackEnabled = newFallbackEnabled; } internalBalancer.onConfigurationChange(conf); diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java index e64e2d20847..d2527790b09 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rsgroup; - import java.io.IOException; import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.net.Address; @@ -31,9 +28,8 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** - * Interface used to manage RSGroupInfo storage. An implementation - * has the option to support offline mode. - * See {@link RSGroupBasedLoadBalancer} + * Interface used to manage RSGroupInfo storage. An implementation has the option to support offline + * mode. See {@link RSGroupBasedLoadBalancer} */ @InterfaceAudience.Private public interface RSGroupInfoManager { @@ -41,13 +37,13 @@ public interface RSGroupInfoManager { String REASSIGN_WAIT_INTERVAL_KEY = "hbase.rsgroup.reassign.wait"; long DEFAULT_REASSIGN_WAIT_INTERVAL = 30 * 1000L; - //Assigned before user tables + // Assigned before user tables TableName RSGROUP_TABLE_NAME = - TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "rsgroup"); + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "rsgroup"); String rsGroupZNode = "rsgroup"; byte[] META_FAMILY_BYTES = Bytes.toBytes("m"); byte[] META_QUALIFIER_BYTES = Bytes.toBytes("i"); - byte[] ROW_KEY = {0}; + byte[] ROW_KEY = { 0 }; void start(); @@ -63,13 +59,13 @@ public interface RSGroupInfoManager { /** * Move servers to a new group. - * @param servers list of servers, must be part of the same group + * @param servers list of servers, must be part of the same group * @param srcGroup groupName being moved from * @param dstGroup groupName being moved to * @return Set of servers moved (May be a subset of {@code servers}). */ Set

      moveServers(Set
      servers, String srcGroup, String dstGroup) - throws IOException; + throws IOException; /** * Gets the group info of server. @@ -88,9 +84,8 @@ public interface RSGroupInfoManager { /** * Set the group membership of a set of tables - * * @param tableNames set of tables to move - * @param groupName name of group of tables to move to + * @param groupName name of group of tables to move to */ void moveTables(Set tableNames, String groupName) throws IOException; @@ -106,20 +101,19 @@ public interface RSGroupInfoManager { /** * Whether the manager is able to fully return group metadata - * * @return whether the manager is in online mode */ boolean isOnline(); /** * Move servers and tables to a new group. - * @param servers list of servers, must be part of the same group - * @param tables set of tables to move + * @param servers list of servers, must be part of the same group + * @param tables set of tables to move * @param srcGroup groupName being moved from * @param dstGroup groupName being moved to */ - void moveServersAndTables(Set
      servers, Set tables, - String srcGroup, String dstGroup) throws IOException; + void moveServersAndTables(Set
      servers, Set tables, String srcGroup, + String dstGroup) throws IOException; /** * Remove decommissioned servers from rsgroup @@ -143,7 +137,7 @@ public interface RSGroupInfoManager { /** * Update RSGroup configuration - * @param groupName the group name + * @param groupName the group name * @param configuration new configuration of the group name to be set * @throws IOException if a remote or network exception occurs */ diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java index 143c7b88ed3..08baab22abb 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,6 @@ import java.util.OptionalLong; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -75,14 +74,15 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.hadoop.util.Shell; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hbase.thirdparty.com.google.common.collect.Maps; -import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.collect.Maps; +import org.apache.hbase.thirdparty.com.google.common.collect.Sets; + /** * This is an implementation of {@link RSGroupInfoManager} which makes use of an HBase table as the * persistence store for the group information. It also makes use of zookeeper to store group @@ -155,9 +155,8 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { return; } - rsgroupMappingScript = new Shell.ShellCommandExecutor( - new String[] { script, "", "" }, null, null, - conf.getLong(RS_GROUP_MAPPING_SCRIPT_TIMEOUT, 5000) // 5 seconds + rsgroupMappingScript = new Shell.ShellCommandExecutor(new String[] { script, "", "" }, null, + null, conf.getLong(RS_GROUP_MAPPING_SCRIPT_TIMEOUT, 5000) // 5 seconds ); } @@ -206,8 +205,10 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { @Override public synchronized void addRSGroup(RSGroupInfo rsGroupInfo) throws IOException { checkGroupName(rsGroupInfo.getName()); - if (rsGroupMap.get(rsGroupInfo.getName()) != null || - rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { + if ( + rsGroupMap.get(rsGroupInfo.getName()) != null + || rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP) + ) { throw new DoNotRetryIOException("Group already exists: " + rsGroupInfo.getName()); } Map newGroupMap = Maps.newHashMap(rsGroupMap); @@ -241,16 +242,16 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { @Override public synchronized Set
      moveServers(Set
      servers, String srcGroup, - String dstGroup) throws IOException { + String dstGroup) throws IOException { RSGroupInfo src = getRSGroupInfo(srcGroup); RSGroupInfo dst = getRSGroupInfo(dstGroup); Set
      movedServers = new HashSet<>(); // If destination is 'default' rsgroup, only add servers that are online. If not online, drop // it. If not 'default' group, add server to 'dst' rsgroup EVEN IF IT IS NOT online (could be a // rsgroup of dead servers that are to come back later). - Set
      onlineServers = - dst.getName().equals(RSGroupInfo.DEFAULT_GROUP) ? getOnlineServers(this.masterServices) - : null; + Set
      onlineServers = dst.getName().equals(RSGroupInfo.DEFAULT_GROUP) + ? getOnlineServers(this.masterServices) + : null; for (Address el : servers) { src.removeServer(el); if (onlineServers != null) { @@ -293,7 +294,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { @Override public synchronized void moveTables(Set tableNames, String groupName) - throws IOException { + throws IOException { // Check if rsGroup contains the destination rsgroup if (groupName != null && !rsGroupMap.containsKey(groupName)) { throw new DoNotRetryIOException("Group " + groupName + " does not exist"); @@ -347,7 +348,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { @Override public void moveServersAndTables(Set
      servers, Set tables, String srcGroup, - String dstGroup) throws IOException { + String dstGroup) throws IOException { // get server's group RSGroupInfo srcGroupInfo = getRSGroupInfo(srcGroup); RSGroupInfo dstGroupInfo = getRSGroupInfo(dstGroup); @@ -411,23 +412,21 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { throw new ConstraintException("Group already exists: " + newName); } - Map newGroupMap = Maps.newHashMap(rsGroupMap); + Map newGroupMap = Maps.newHashMap(rsGroupMap); newGroupMap.remove(oldName); - RSGroupInfo newGroup = new RSGroupInfo(newName, - (SortedSet
      ) oldGroup.getServers(), oldGroup.getTables()); + RSGroupInfo newGroup = + new RSGroupInfo(newName, (SortedSet
      ) oldGroup.getServers(), oldGroup.getTables()); newGroupMap.put(newName, newGroup); flushConfig(newGroupMap); } /** - * Will try to get the rsgroup from {@code tableMap} first - * then try to get the rsgroup from {@code script} - * try to get the rsgroup from the {@link NamespaceDescriptor} lastly. - * If still not present, return default group. + * Will try to get the rsgroup from {@code tableMap} first then try to get the rsgroup from + * {@code script} try to get the rsgroup from the {@link NamespaceDescriptor} lastly. If still not + * present, return default group. */ @Override - public RSGroupInfo determineRSGroupInfoForTable(TableName tableName) - throws IOException { + public RSGroupInfo determineRSGroupInfoForTable(TableName tableName) throws IOException { RSGroupInfo groupFromOldRSGroupInfo = getRSGroup(getRSGroupOfTable(tableName)); if (groupFromOldRSGroupInfo != null) { return groupFromOldRSGroupInfo; @@ -442,8 +441,8 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { ClusterSchema clusterSchema = masterServices.getClusterSchema(); if (clusterSchema == null) { if (TableName.isMetaTableName(tableName)) { - LOG.info("Can not get the namespace rs group config for meta table, since the" + - " meta table is not online yet, will use default group to assign meta first"); + LOG.info("Can not get the namespace rs group config for meta table, since the" + + " meta table is not online yet, will use default group to assign meta first"); } else { LOG.warn("ClusterSchema is null, can only use default rsgroup, should not happen?"); } @@ -460,16 +459,16 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { @Override public void updateRSGroupConfig(String groupName, Map configuration) - throws IOException { + throws IOException { if (RSGroupInfo.DEFAULT_GROUP.equals(groupName)) { // We do not persist anything of default group, therefore, it is not supported to update // default group's configuration which lost once master down. - throw new ConstraintException("configuration of " + RSGroupInfo.DEFAULT_GROUP - + " can't be stored persistently"); + throw new ConstraintException( + "configuration of " + RSGroupInfo.DEFAULT_GROUP + " can't be stored persistently"); } RSGroupInfo rsGroupInfo = getRSGroupInfo(groupName); new HashSet<>(rsGroupInfo.getConfiguration().keySet()) - .forEach(rsGroupInfo::removeConfiguration); + .forEach(rsGroupInfo::removeConfiguration); configuration.forEach(rsGroupInfo::setConfiguration); flushConfig(); } @@ -477,7 +476,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { List retrieveGroupListFromGroupTable() throws IOException { List rsGroupInfoList = Lists.newArrayList(); try (Table table = conn.getTable(RSGROUP_TABLE_NAME); - ResultScanner scanner = table.getScanner(new Scan())) { + ResultScanner scanner = table.getScanner(new Scan())) { for (Result result;;) { result = scanner.next(); if (result == null) { @@ -554,8 +553,8 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { // This is added to the last of the list so it overwrites the 'default' rsgroup loaded // from region group table or zk - groupList.add( - new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, getDefaultServers(groupList), orphanTables)); + groupList + .add(new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, getDefaultServers(groupList), orphanTables)); // populate the data HashMap newGroupMap = Maps.newHashMap(); @@ -571,7 +570,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { } private synchronized Map flushConfigTable(Map groupMap) - throws IOException { + throws IOException { Map newTableMap = Maps.newHashMap(); List mutations = Lists.newArrayList(); @@ -619,9 +618,12 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { Map oldGroupMap = Maps.newHashMap(rsGroupMap); RSGroupInfo oldDefaultGroup = oldGroupMap.remove(RSGroupInfo.DEFAULT_GROUP); RSGroupInfo newDefaultGroup = newGroupMap.remove(RSGroupInfo.DEFAULT_GROUP); - if (!oldGroupMap.equals(newGroupMap) /* compare both tables and servers in other groups */ || - !oldDefaultGroup.getTables().equals(newDefaultGroup.getTables()) - /* compare tables in default group */) { + if ( + !oldGroupMap.equals(newGroupMap) + /* compare both tables and servers in other groups */ || !oldDefaultGroup.getTables() + .equals(newDefaultGroup.getTables()) + /* compare tables in default group */ + ) { throw new IOException("Only servers in default group can be updated during offline mode"); } @@ -680,7 +682,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { * Make changes visible. Caller must be synchronized on 'this'. */ private void resetRSGroupAndTableMaps(Map newRSGroupMap, - Map newTableMap) { + Map newTableMap) { // Make maps Immutable. this.rsGroupMap = Collections.unmodifiableMap(newRSGroupMap); this.tableMap = Collections.unmodifiableMap(newTableMap); @@ -719,7 +721,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { // Called by ServerEventsListenerThread. Presume it has lock on this manager when it runs. private SortedSet
      getDefaultServers(List rsGroupInfoList) - throws IOException { + throws IOException { // Build a list of servers in other groups than default group, from rsGroupMap Set
      serversInOtherGroup = new HashSet<>(); for (RSGroupInfo group : rsGroupInfoList) { @@ -866,8 +868,10 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { } // wait for region to be online int tries = 600; - while (!(masterServices.getMasterProcedureExecutor().isFinished(procId)) && - masterServices.getMasterProcedureExecutor().isRunning() && tries > 0) { + while ( + !(masterServices.getMasterProcedureExecutor().isFinished(procId)) + && masterServices.getMasterProcedureExecutor().isRunning() && tries > 0 + ) { try { Thread.sleep(100); } catch (InterruptedException e) { diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java index 6936c721137..8e00e57ae51 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rsgroup; import java.util.Arrays; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -40,8 +38,8 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.Options; import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException; /** - * This script takes an rsgroup as argument and compacts part/all of regions of that table - * based on the table's TTL. + * This script takes an rsgroup as argument and compacts part/all of regions of that table based on + * the table's TTL. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class RSGroupMajorCompactionTTL extends MajorCompactorTTL { @@ -54,8 +52,7 @@ public class RSGroupMajorCompactionTTL extends MajorCompactorTTL { } public int compactTTLRegionsOnGroup(Configuration conf, String rsgroup, int concurrency, - long sleep, int numServers, int numRegions, boolean dryRun, boolean skipWait) - throws Exception { + long sleep, int numServers, int numRegions, boolean dryRun, boolean skipWait) throws Exception { Connection conn = ConnectionFactory.createConnection(conf); RSGroupAdmin rsGroupAdmin = new RSGroupAdminClient(conn); @@ -68,7 +65,7 @@ public class RSGroupMajorCompactionTTL extends MajorCompactorTTL { for (TableName tableName : rsGroupInfo.getTables()) { int status = compactRegionsTTLOnTable(conf, tableName.getNameAsString(), concurrency, sleep, - numServers, numRegions, dryRun, skipWait); + numServers, numRegions, dryRun, skipWait); if (status != 0) { LOG.error("Failed to compact table: " + tableName); return status; @@ -80,13 +77,8 @@ public class RSGroupMajorCompactionTTL extends MajorCompactorTTL { protected Options getOptions() { Options options = getCommonOptions(); - options.addOption( - Option.builder("rsgroup") - .required() - .desc("Tables of rsgroup to be compacted") - .hasArg() - .build() - ); + options.addOption(Option.builder("rsgroup").required().desc("Tables of rsgroup to be compacted") + .hasArg().build()); return options; } @@ -100,9 +92,8 @@ public class RSGroupMajorCompactionTTL extends MajorCompactorTTL { try { commandLine = cmdLineParser.parse(options, args); } catch (ParseException parseException) { - System.out.println( - "ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + " due to: " - + parseException); + System.out.println("ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + + " due to: " + parseException); printUsage(options); return -1; } @@ -122,7 +113,7 @@ public class RSGroupMajorCompactionTTL extends MajorCompactorTTL { Configuration conf = getConf(); return compactTTLRegionsOnGroup(conf, rsgroup, concurrency, sleep, numServers, numRegions, - dryRun, skipWait); + dryRun, skipWait); } public static void main(String[] args) throws Exception { diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupProtobufUtil.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupProtobufUtil.java index 42472d3cd37..01d1d41cfb7 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupProtobufUtil.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupProtobufUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rsgroup; import java.util.ArrayList; @@ -39,66 +38,61 @@ final class RSGroupProtobufUtil { private RSGroupProtobufUtil() { } - static void populateBalanceRSGroupResponse(BalanceRSGroupResponse.Builder responseBuilder, BalanceResponse response) { - responseBuilder - .setBalanceRan(response.isBalancerRan()) + static void populateBalanceRSGroupResponse(BalanceRSGroupResponse.Builder responseBuilder, + BalanceResponse response) { + responseBuilder.setBalanceRan(response.isBalancerRan()) .setMovesCalculated(response.getMovesCalculated()) .setMovesExecuted(response.getMovesExecuted()); } static BalanceResponse toBalanceResponse(BalanceRSGroupResponse response) { - return BalanceResponse.newBuilder() - .setBalancerRan(response.getBalanceRan()) + return BalanceResponse.newBuilder().setBalancerRan(response.getBalanceRan()) .setMovesExecuted(response.hasMovesExecuted() ? response.getMovesExecuted() : 0) .setMovesCalculated(response.hasMovesCalculated() ? response.getMovesCalculated() : 0) .build(); } - static BalanceRSGroupRequest createBalanceRSGroupRequest(String groupName, BalanceRequest request) { - return BalanceRSGroupRequest.newBuilder() - .setRSGroupName(groupName) - .setDryRun(request.isDryRun()) - .setIgnoreRit(request.isIgnoreRegionsInTransition()) - .build(); + static BalanceRSGroupRequest createBalanceRSGroupRequest(String groupName, + BalanceRequest request) { + return BalanceRSGroupRequest.newBuilder().setRSGroupName(groupName) + .setDryRun(request.isDryRun()).setIgnoreRit(request.isIgnoreRegionsInTransition()).build(); } static BalanceRequest toBalanceRequest(BalanceRSGroupRequest request) { - return BalanceRequest.newBuilder() - .setDryRun(request.hasDryRun() && request.getDryRun()) - .setIgnoreRegionsInTransition(request.hasIgnoreRit() && request.getIgnoreRit()) - .build(); + return BalanceRequest.newBuilder().setDryRun(request.hasDryRun() && request.getDryRun()) + .setIgnoreRegionsInTransition(request.hasIgnoreRit() && request.getIgnoreRit()).build(); } static RSGroupInfo toGroupInfo(RSGroupProtos.RSGroupInfo proto) { RSGroupInfo rsGroupInfo = new RSGroupInfo(proto.getName()); - for(HBaseProtos.ServerName el: proto.getServersList()) { + for (HBaseProtos.ServerName el : proto.getServersList()) { rsGroupInfo.addServer(Address.fromParts(el.getHostName(), el.getPort())); } - for(TableProtos.TableName pTableName: proto.getTablesList()) { + for (TableProtos.TableName pTableName : proto.getTablesList()) { rsGroupInfo.addTable(ProtobufUtil.toTableName(pTableName)); } - proto.getConfigurationList().forEach(pair -> - rsGroupInfo.setConfiguration(pair.getName(), pair.getValue())); + proto.getConfigurationList() + .forEach(pair -> rsGroupInfo.setConfiguration(pair.getName(), pair.getValue())); return rsGroupInfo; } static RSGroupProtos.RSGroupInfo toProtoGroupInfo(RSGroupInfo pojo) { List tables = new ArrayList<>(pojo.getTables().size()); - for(TableName arg: pojo.getTables()) { + for (TableName arg : pojo.getTables()) { tables.add(ProtobufUtil.toProtoTableName(arg)); } List hostports = new ArrayList<>(pojo.getServers().size()); - for(Address el: pojo.getServers()) { - hostports.add(HBaseProtos.ServerName.newBuilder() - .setHostName(el.getHostname()) - .setPort(el.getPort()) - .build()); + for (Address el : pojo.getServers()) { + hostports.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()) + .setPort(el.getPort()).build()); } - List configuration = pojo.getConfiguration().entrySet() - .stream().map(entry -> NameStringPair.newBuilder() + List< + NameStringPair> configuration = + pojo + .getConfiguration().entrySet().stream().map(entry -> NameStringPair.newBuilder() .setName(entry.getKey()).setValue(entry.getValue()).build()) - .collect(Collectors.toList()); - return RSGroupProtos.RSGroupInfo.newBuilder().setName(pojo.getName()) - .addAllServers(hostports).addAllTables(tables).addAllConfiguration(configuration).build(); + .collect(Collectors.toList()); + return RSGroupProtos.RSGroupInfo.newBuilder().setName(pojo.getName()).addAllServers(hostports) + .addAllTables(tables).addAllConfiguration(configuration).build(); } } diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java index e410ec04eba..4e4d88fe23d 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rsgroup; import org.apache.hadoop.hbase.ServerName; diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java index d091b3cfbde..14540ad898b 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rsgroup; import org.apache.hadoop.hbase.master.LoadBalancer; import org.apache.yetus.audience.InterfaceAudience; /** - * Marker Interface. RSGroups feature will check for a LoadBalancer - * marked with this Interface before it runs. + * Marker Interface. RSGroups feature will check for a LoadBalancer marked with this Interface + * before it runs. */ @InterfaceAudience.Private public interface RSGroupableBalancer extends LoadBalancer { diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java index 4644b66cae5..d55483b5aaa 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,21 +53,19 @@ import org.apache.hadoop.hbase.util.Bytes; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; + import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** * Base UT of RSGroupableBalancer. */ -public class RSGroupableBalancerTestBase extends BalancerTestBase{ +public class RSGroupableBalancerTestBase extends BalancerTestBase { - static String[] groups = new String[] {RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3", "dg4"}; + static String[] groups = new String[] { RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3", "dg4" }; static TableName table0 = TableName.valueOf("dt0"); - static TableName[] tables = - new TableName[] { TableName.valueOf("dt1"), - TableName.valueOf("dt2"), - TableName.valueOf("dt3"), - TableName.valueOf("dt4")}; + static TableName[] tables = new TableName[] { TableName.valueOf("dt1"), TableName.valueOf("dt2"), + TableName.valueOf("dt3"), TableName.valueOf("dt4") }; static List servers; static Map groupMap; static Map tableMap = new HashMap<>(); @@ -77,11 +75,10 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{ static Configuration conf = HBaseConfiguration.create(); /** - * Invariant is that all servers of a group have load between floor(avg) and - * ceiling(avg) number of regions. + * Invariant is that all servers of a group have load between floor(avg) and ceiling(avg) number + * of regions. */ - protected void assertClusterAsBalanced( - ArrayListMultimap groupLoadMap) { + protected void assertClusterAsBalanced(ArrayListMultimap groupLoadMap) { for (String gName : groupLoadMap.keySet()) { List groupLoad = groupLoadMap.get(gName); int numServers = groupLoad.size(); @@ -115,10 +112,8 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{ /** * All regions have an assignment. */ - protected void assertImmediateAssignment(List regions, - List servers, - Map assignments) - throws IOException { + protected void assertImmediateAssignment(List regions, List servers, + Map assignments) throws IOException { for (RegionInfo region : regions) { assertTrue(assignments.containsKey(region)); ServerName server = assignments.get(region); @@ -128,7 +123,7 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{ assertTrue(StringUtils.isNotEmpty(groupName)); RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(groupName); assertTrue("Region is not correctly assigned to group servers.", - gInfo.containsServer(server.getAddress())); + gInfo.containsServer(server.getAddress())); } } @@ -138,21 +133,19 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{ * Must meet the following conditions: *
        *
      • Every input region has an assignment, and to an online server - *
      • If a region had an existing assignment to a server with the same - * address a a currently online server, it will be assigned to it + *
      • If a region had an existing assignment to a server with the same address a a currently + * online server, it will be assigned to it *
      */ - protected void assertRetainedAssignment( - Map existing, List servers, - Map> assignment) - throws FileNotFoundException, IOException { + protected void assertRetainedAssignment(Map existing, + List servers, Map> assignment) + throws FileNotFoundException, IOException { // Verify condition 1, every region assigned, and to online server Set onlineServerSet = new TreeSet<>(servers); Set assignedRegions = new TreeSet<>(RegionInfo.COMPARATOR); for (Map.Entry> a : assignment.entrySet()) { - assertTrue( - "Region assigned to server that was not listed as online", - onlineServerSet.contains(a.getKey())); + assertTrue("Region assigned to server that was not listed as online", + onlineServerSet.contains(a.getKey())); for (RegionInfo r : a.getValue()) { assignedRegions.add(r); } @@ -170,17 +163,14 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{ for (RegionInfo r : a.getValue()) { ServerName oldAssignedServer = existing.get(r); TableName tableName = r.getTable(); - String groupName = - getMockedGroupInfoManager().getRSGroupOfTable(tableName); + String groupName = getMockedGroupInfoManager().getRSGroupOfTable(tableName); assertTrue(StringUtils.isNotEmpty(groupName)); - RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup( - groupName); - assertTrue( - "Region is not correctly assigned to group servers.", - gInfo.containsServer(currentServer.getAddress())); - if (oldAssignedServer != null - && onlineHostNames.contains(oldAssignedServer - .getHostname())) { + RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(groupName); + assertTrue("Region is not correctly assigned to group servers.", + gInfo.containsServer(currentServer.getAddress())); + if ( + oldAssignedServer != null && onlineHostNames.contains(oldAssignedServer.getHostname()) + ) { // this region was previously assigned somewhere, and that // host is still around, then the host must have been is a // different group. @@ -192,8 +182,7 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{ } } - protected String printStats( - ArrayListMultimap groupBasedLoad) { + protected String printStats(ArrayListMultimap groupBasedLoad) { StringBuilder sb = new StringBuilder(); sb.append("\n"); for (String groupName : groupBasedLoad.keySet()) { @@ -206,16 +195,15 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{ int totalRegions = 0; sb.append("Per Server Load: \n"); for (ServerAndLoad sLoad : groupLoad) { - sb.append("Server :" + sLoad.getServerName() + " Load : " - + sLoad.getLoad() + "\n"); + sb.append("Server :" + sLoad.getServerName() + " Load : " + sLoad.getLoad() + "\n"); totalRegions += sLoad.getLoad(); } sb.append(" Group Statistics : \n"); float average = (float) totalRegions / numServers; int max = (int) Math.ceil(average); int min = (int) Math.floor(average); - sb.append("[srvr=" + numServers + " rgns=" + totalRegions + " avg=" - + average + " max=" + max + " min=" + min + "]"); + sb.append("[srvr=" + numServers + " rgns=" + totalRegions + " avg=" + average + " max=" + max + + " min=" + min + "]"); sb.append("\n"); sb.append("==============================="); sb.append("\n"); @@ -223,34 +211,30 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{ return sb.toString(); } - protected ArrayListMultimap convertToGroupBasedMap( - final Map> serversMap) throws IOException { - ArrayListMultimap loadMap = ArrayListMultimap - .create(); + protected ArrayListMultimap + convertToGroupBasedMap(final Map> serversMap) throws IOException { + ArrayListMultimap loadMap = ArrayListMultimap.create(); for (RSGroupInfo gInfo : getMockedGroupInfoManager().listRSGroups()) { Set
      groupServers = gInfo.getServers(); for (Address hostPort : groupServers) { ServerName actual = null; - for(ServerName entry: servers) { - if(entry.getAddress().equals(hostPort)) { + for (ServerName entry : servers) { + if (entry.getAddress().equals(hostPort)) { actual = entry; break; } } List regions = serversMap.get(actual); assertTrue("No load for " + actual, regions != null); - loadMap.put(gInfo.getName(), - new ServerAndLoad(actual, regions.size())); + loadMap.put(gInfo.getName(), new ServerAndLoad(actual, regions.size())); } } return loadMap; } - protected ArrayListMultimap reconcile( - ArrayListMultimap previousLoad, - List plans) { - ArrayListMultimap result = ArrayListMultimap - .create(); + protected ArrayListMultimap + reconcile(ArrayListMultimap previousLoad, List plans) { + ArrayListMultimap result = ArrayListMultimap.create(); result.putAll(previousLoad); if (plans != null) { for (RegionPlan plan : plans) { @@ -263,9 +247,8 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{ return result; } - protected void updateLoad( - ArrayListMultimap previousLoad, - final ServerName sn, final int diff) { + protected void updateLoad(ArrayListMultimap previousLoad, + final ServerName sn, final int diff) { for (String groupName : previousLoad.keySet()) { ServerAndLoad newSAL = null; ServerAndLoad oldSAL = null; @@ -297,7 +280,6 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{ /** * Generate a list of regions evenly distributed between the tables. - * * @param numRegions The number of regions to be generated. * @return List of RegionInfo. */ @@ -312,21 +294,16 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{ Bytes.putInt(start, 0, numRegions << 1); Bytes.putInt(end, 0, (numRegions << 1) + 1); int tableIndex = (i + regionIdx) % tables.length; - regions.add(RegionInfoBuilder.newBuilder(tables[tableIndex]) - .setStartKey(start) - .setEndKey(end) - .setSplit(false) - .setRegionId(regionId++) - .build()); + regions.add(RegionInfoBuilder.newBuilder(tables[tableIndex]).setStartKey(start).setEndKey(end) + .setSplit(false).setRegionId(regionId++).build()); } return regions; } /** * Generate assigned regions to a given server using group information. - * * @param numRegions the num regions to generate - * @param sn the servername + * @param sn the servername * @return the list of regions * @throws java.io.IOException Signals that an I/O exception has occurred. */ @@ -338,12 +315,8 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{ Bytes.putInt(end, 0, (numRegions << 1) + 1); for (int i = 0; i < numRegions; i++) { TableName tableName = getTableName(sn); - regions.add(RegionInfoBuilder.newBuilder(tableName) - .setStartKey(start) - .setEndKey(end) - .setSplit(false) - .setRegionId(regionId++) - .build()); + regions.add(RegionInfoBuilder.newBuilder(tableName).setStartKey(start).setEndKey(end) + .setSplit(false).setRegionId(regionId++).build()); } return regions; } @@ -361,13 +334,12 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{ /** * Construct group info, with each group having at least one server. - * * @param servers the servers - * @param groups the groups + * @param groups the groups * @return the map */ - protected static Map constructGroupInfo( - List servers, String[] groups) { + protected static Map constructGroupInfo(List servers, + String[] groups) { assertTrue(servers != null); assertTrue(servers.size() >= groups.length); int index = 0; @@ -381,8 +353,7 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{ Random rand = ThreadLocalRandom.current(); while (index < servers.size()) { int grpIndex = rand.nextInt(groups.length); - groupMap.get(groups[grpIndex]).addServer( - servers.get(index).getAddress()); + groupMap.get(groups[grpIndex]).addServer(servers.get(index).getAddress()); index++; } return groupMap; @@ -433,16 +404,14 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{ return groupMap.get(invocation.getArgument(0)); } }); - Mockito.when(gm.listRSGroups()).thenReturn( - Lists.newLinkedList(groupMap.values())); + Mockito.when(gm.listRSGroups()).thenReturn(Lists.newLinkedList(groupMap.values())); Mockito.when(gm.isOnline()).thenReturn(true); - Mockito.when(gm.getRSGroupOfTable(Mockito.any())) - .thenAnswer(new Answer() { - @Override - public String answer(InvocationOnMock invocation) throws Throwable { - return tableMap.get(invocation.getArgument(0)); - } - }); + Mockito.when(gm.getRSGroupOfTable(Mockito.any())).thenAnswer(new Answer() { + @Override + public String answer(InvocationOnMock invocation) throws Throwable { + return tableMap.get(invocation.getArgument(0)); + } + }); return gm; } @@ -450,15 +419,15 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{ TableName tableName = null; RSGroupInfoManager gm = getMockedGroupInfoManager(); RSGroupInfo groupOfServer = null; - for(RSGroupInfo gInfo : gm.listRSGroups()){ - if(gInfo.containsServer(sn.getAddress())){ + for (RSGroupInfo gInfo : gm.listRSGroups()) { + if (gInfo.containsServer(sn.getAddress())) { groupOfServer = gInfo; break; } } - for(TableDescriptor desc : tableDescs){ - if(gm.getRSGroupOfTable(desc.getTableName()).endsWith(groupOfServer.getName())){ + for (TableDescriptor desc : tableDescs) { + if (gm.getRSGroupOfTable(desc.getTableName()).endsWith(groupOfServer.getName())) { tableName = desc.getTableName(); } } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java index c2e9e0f34bd..93edc25aec4 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,6 +50,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; + /** * Test RSGroupBasedLoadBalancer with SimpleLoadBalancer as internal balancer */ @@ -57,7 +58,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; public class TestRSGroupBasedLoadBalancer extends RSGroupableBalancerTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRSGroupBasedLoadBalancer.class); + HBaseClassTestRule.forClass(TestRSGroupBasedLoadBalancer.class); private static final Logger LOG = LoggerFactory.getLogger(TestRSGroupBasedLoadBalancer.class); private static RSGroupBasedLoadBalancer loadBalancer; @@ -76,10 +77,8 @@ public class TestRSGroupBasedLoadBalancer extends RSGroupableBalancerTestBase { } /** - * Test the load balancing algorithm. - * - * Invariant is that all servers of the group should be hosting either floor(average) or - * ceiling(average) + * Test the load balancing algorithm. Invariant is that all servers of the group should be hosting + * either floor(average) or ceiling(average) */ @Test public void testBalanceCluster() throws Exception { @@ -92,7 +91,7 @@ public class TestRSGroupBasedLoadBalancer extends RSGroupableBalancerTestBase { ArrayListMultimap list = convertToGroupBasedMap(servers); LOG.info("Mock Cluster : " + printStats(list)); Map>> LoadOfAllTable = - (Map) mockClusterServersWithTables(servers); + (Map) mockClusterServersWithTables(servers); List plans = loadBalancer.balanceCluster(LoadOfAllTable); ArrayListMultimap balancedCluster = reconcile(list, plans); LOG.info("Mock Balance : " + printStats(balancedCluster)); @@ -101,34 +100,29 @@ public class TestRSGroupBasedLoadBalancer extends RSGroupableBalancerTestBase { } /** - * Tests the bulk assignment used during cluster startup. - * - * Round-robin. Should yield a balanced cluster so same invariant as the - * load balancer holds, all servers holding either floor(avg) or + * Tests the bulk assignment used during cluster startup. Round-robin. Should yield a balanced + * cluster so same invariant as the load balancer holds, all servers holding either floor(avg) or * ceiling(avg). */ @Test public void testBulkAssignment() throws Exception { List regions = randomRegions(25); - Map> assignments = loadBalancer - .roundRobinAssignment(regions, servers); - //test empty region/servers scenario - //this should not throw an NPE + Map> assignments = + loadBalancer.roundRobinAssignment(regions, servers); + // test empty region/servers scenario + // this should not throw an NPE loadBalancer.roundRobinAssignment(regions, Collections.emptyList()); - //test regular scenario + // test regular scenario assertTrue(assignments.keySet().size() == servers.size()); for (ServerName sn : assignments.keySet()) { List regionAssigned = assignments.get(sn); for (RegionInfo region : regionAssigned) { TableName tableName = region.getTable(); - String groupName = - getMockedGroupInfoManager().getRSGroupOfTable(tableName); + String groupName = getMockedGroupInfoManager().getRSGroupOfTable(tableName); assertTrue(StringUtils.isNotEmpty(groupName)); - RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup( - groupName); - assertTrue( - "Region is not correctly assigned to group servers.", - gInfo.containsServer(sn.getAddress())); + RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(groupName); + assertTrue("Region is not correctly assigned to group servers.", + gInfo.containsServer(sn.getAddress())); } } ArrayListMultimap loadMap = convertToGroupBasedMap(assignments); @@ -148,10 +142,10 @@ public class TestRSGroupBasedLoadBalancer extends RSGroupableBalancerTestBase { inputForTest.put(region, sn); } } - //verify region->null server assignment is handled + // verify region->null server assignment is handled inputForTest.put(randomRegions(1).get(0), null); - Map> newAssignment = loadBalancer - .retainAssignment(inputForTest, servers); + Map> newAssignment = + loadBalancer.retainAssignment(inputForTest, servers); assertRetainedAssignment(inputForTest, servers, newAssignment); } @@ -164,24 +158,24 @@ public class TestRSGroupBasedLoadBalancer extends RSGroupableBalancerTestBase { onlineServers.addAll(servers); List regions = randomRegions(25); int bogusRegion = 0; - for(RegionInfo region : regions){ + for (RegionInfo region : regions) { String group = tableMap.get(region.getTable()); - if("dg3".equals(group) || "dg4".equals(group)){ + if ("dg3".equals(group) || "dg4".equals(group)) { bogusRegion++; } } Set
      offlineServers = new HashSet
      (); offlineServers.addAll(groupMap.get("dg3").getServers()); offlineServers.addAll(groupMap.get("dg4").getServers()); - for(Iterator it = onlineServers.iterator(); it.hasNext();){ + for (Iterator it = onlineServers.iterator(); it.hasNext();) { ServerName server = it.next(); Address address = server.getAddress(); - if(offlineServers.contains(address)){ + if (offlineServers.contains(address)) { it.remove(); } } - Map> assignments = loadBalancer - .roundRobinAssignment(regions, onlineServers); + Map> assignments = + loadBalancer.roundRobinAssignment(regions, onlineServers); assertEquals(bogusRegion, assignments.get(LoadBalancer.BOGUS_SERVER_NAME).size()); } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java index f9b5c9c503f..ee8922ebe85 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,10 +53,10 @@ import org.junit.experimental.categories.Category; */ @Category(LargeTests.class) public class TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal - extends RSGroupableBalancerTestBase { + extends RSGroupableBalancerTestBase { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass( - TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule + .forClass(TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.class); private static RSGroupBasedLoadBalancer loadBalancer; @BeforeClass @@ -77,10 +77,10 @@ public class TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal } private ServerMetrics mockServerMetricsWithReadRequests(ServerName server, - List regionsOnServer, long readRequestCount) { + List regionsOnServer, long readRequestCount) { ServerMetrics serverMetrics = mock(ServerMetrics.class); Map regionLoadMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for(RegionInfo info : regionsOnServer){ + for (RegionInfo info : regionsOnServer) { RegionMetrics rl = mock(RegionMetrics.class); when(rl.getReadRequestCount()).thenReturn(readRequestCount); when(rl.getWriteRequestCount()).thenReturn(0L); @@ -124,8 +124,8 @@ public class TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal // serverC : 0,0,0 // so should move two regions from serverA to serverB & serverC serverMetricsMap = new TreeMap<>(); - serverMetricsMap.put(serverA, mockServerMetricsWithReadRequests(serverA, - regionsOnServerA, 1000)); + serverMetricsMap.put(serverA, + mockServerMetricsWithReadRequests(serverA, regionsOnServerA, 1000)); serverMetricsMap.put(serverB, mockServerMetricsWithReadRequests(serverB, regionsOnServerB, 0)); serverMetricsMap.put(serverC, mockServerMetricsWithReadRequests(serverC, regionsOnServerC, 0)); clusterStatus = mock(ClusterMetrics.class); @@ -133,12 +133,12 @@ public class TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal loadBalancer.updateClusterMetrics(clusterStatus); Map>> LoadOfAllTable = - (Map) mockClusterServersWithTables(clusterState); + (Map) mockClusterServersWithTables(clusterState); List plans = loadBalancer.balanceCluster(LoadOfAllTable); Set regionsMoveFromServerA = new HashSet<>(); Set targetServers = new HashSet<>(); - for(RegionPlan plan : plans) { - if(plan.getSource().equals(serverA)) { + for (RegionPlan plan : plans) { + if (plan.getSource().equals(serverA)) { regionsMoveFromServerA.add(plan.getRegionInfo()); targetServers.add(plan.getDestination()); } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicasWithRSGroup.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicasWithRSGroup.java index 93739e722e9..3fe9db23d76 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicasWithRSGroup.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicasWithRSGroup.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,8 +58,8 @@ public class TestSCPWithReplicasWithRSGroup extends TestSCPBase { @Test public void testCrashTargetRs() throws Exception { HMaster master = util.getHBaseCluster().getMaster(); - util.waitFor(60000, (Predicate) () -> - master.isInitialized() && ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline()); + util.waitFor(60000, (Predicate) () -> master.isInitialized() + && ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline()); testRecoveryAndDoubleExecution(false, false); } } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestDetermineRSGroupInfoForTable.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestDetermineRSGroupInfoForTable.java index 68da177e192..4a9bdce6d80 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestDetermineRSGroupInfoForTable.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestDetermineRSGroupInfoForTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import static org.mockito.Mockito.when; import java.io.IOException; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -78,8 +77,7 @@ public class TestDetermineRSGroupInfoForTable { @BeforeClass public static void setUp() throws Exception { - UTIL.getConfiguration().set( - HConstants.HBASE_MASTER_LOADBALANCER_CLASS, + UTIL.getConfiguration().set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, RSGroupBasedLoadBalancer.class.getName()); UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, RSGroupAdminEndpoint.class.getName()); @@ -88,21 +86,20 @@ public class TestDetermineRSGroupInfoForTable { admin = UTIL.getAdmin(); rsGroupAdminClient = new RSGroupAdminClient(UTIL.getConnection()); - UTIL.waitFor(60000, (Predicate) () -> - master.isInitialized() && ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline()); + UTIL.waitFor(60000, (Predicate) () -> master.isInitialized() + && ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline()); List cps = - master.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class); + master.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class); assertTrue(cps.size() > 0); rsGroupInfoManager = cps.get(0).getGroupInfoManager(); HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0); rsGroupAdminClient.addRSGroup(GROUP_NAME); - rsGroupAdminClient.moveServers( - Collections.singleton(rs.getServerName().getAddress()), GROUP_NAME); + rsGroupAdminClient.moveServers(Collections.singleton(rs.getServerName().getAddress()), + GROUP_NAME); admin.createNamespace(NamespaceDescriptor.create(NAMESPACE_NAME) - .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, GROUP_NAME) - .build()); + .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, GROUP_NAME).build()); admin.createNamespace(NamespaceDescriptor.create(OTHER_NAMESPACE_NAME).build()); } @@ -115,8 +112,7 @@ public class TestDetermineRSGroupInfoForTable { @Test public void testByDefault() throws IOException { - RSGroupInfo group = - rsGroupInfoManager.determineRSGroupInfoForTable(TableName.valueOf("tb")); + RSGroupInfo group = rsGroupInfoManager.determineRSGroupInfoForTable(TableName.valueOf("tb")); assertEquals(group.getName(), RSGroupInfo.DEFAULT_GROUP); } @@ -125,8 +121,8 @@ public class TestDetermineRSGroupInfoForTable { RSGroupInfo group = rsGroupInfoManager.determineRSGroupInfoForTable(TABLE_NAME); assertEquals(group.getName(), GROUP_NAME); - group = rsGroupInfoManager.determineRSGroupInfoForTable( - TableName.valueOf(OTHER_NAMESPACE_NAME, "tb")); + group = rsGroupInfoManager + .determineRSGroupInfoForTable(TableName.valueOf(OTHER_NAMESPACE_NAME, "tb")); assertEquals(group.getName(), RSGroupInfo.DEFAULT_GROUP); } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java index b862fb0386a..c17efb74b73 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import static java.lang.Thread.sleep; import static org.junit.Assert.assertTrue; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -45,7 +44,7 @@ public class TestEnableRSGroups { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestEnableRSGroups.class); + HBaseClassTestRule.forClass(TestEnableRSGroups.class); protected static final Logger LOG = LoggerFactory.getLogger(TestEnableRSGroups.class); @@ -72,8 +71,7 @@ public class TestEnableRSGroups { LOG.info("stopped master..."); final Configuration conf = TEST_UTIL.getMiniHBaseCluster().getConfiguration(); conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, RSGroupAdminEndpoint.class.getName()); - conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, - RSGroupBasedLoadBalancer.class.getName()); + conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, RSGroupBasedLoadBalancer.class.getName()); TEST_UTIL.getMiniHBaseCluster().startMaster(); TEST_UTIL.getMiniHBaseCluster().waitForActiveAndReadyMaster(60000); @@ -84,7 +82,7 @@ public class TestEnableRSGroups { // wait RSGroupBasedLoadBalancer online RSGroupBasedLoadBalancer loadBalancer = - (RSGroupBasedLoadBalancer) TEST_UTIL.getMiniHBaseCluster().getMaster().getLoadBalancer(); + (RSGroupBasedLoadBalancer) TEST_UTIL.getMiniHBaseCluster().getMaster().getLoadBalancer(); long start = EnvironmentEdgeManager.currentTime(); while (EnvironmentEdgeManager.currentTime() - start <= 60000 && !loadBalancer.isOnline()) { LOG.info("waiting for rsgroup load balancer onLine..."); diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupConfig.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupConfig.java index e427d780c45..c066002427a 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupConfig.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import static org.junit.Assert.assertThrows; import java.io.IOException; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -43,7 +42,7 @@ public class TestRSGroupConfig extends TestRSGroupsBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRSGroupConfig.class); + HBaseClassTestRule.forClass(TestRSGroupConfig.class); @Rule public TestName name = new TestName(); @@ -88,4 +87,4 @@ public class TestRSGroupConfig extends TestRSGroupsBase { assertEquals(0, configFromGroup.size()); } -} \ No newline at end of file +} diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMajorCompactionTTL.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMajorCompactionTTL.java index f685780a14f..e8cc70d2785 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMajorCompactionTTL.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMajorCompactionTTL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,21 +6,20 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rsgroup; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -36,12 +35,13 @@ import org.junit.After; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; public class TestRSGroupMajorCompactionTTL extends TestMajorCompactorTTL { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRSGroupMajorCompactionTTL.class); + HBaseClassTestRule.forClass(TestRSGroupMajorCompactionTTL.class); private final static int NUM_SLAVES_BASE = 6; @@ -58,12 +58,12 @@ public class TestRSGroupMajorCompactionTTL extends TestMajorCompactorTTL { MiniHBaseCluster cluster = utility.getHBaseCluster(); final HMaster master = cluster.getMaster(); - //wait for balancer to come online + // wait for balancer to come online utility.waitFor(60000, new Waiter.Predicate() { @Override public boolean evaluate() { - return master.isInitialized() && - ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline(); + return master.isInitialized() + && ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline(); } }); admin = utility.getAdmin(); @@ -92,8 +92,8 @@ public class TestRSGroupMajorCompactionTTL extends TestMajorCompactorTTL { } RSGroupMajorCompactionTTL compactor = new RSGroupMajorCompactionTTL(); - compactor.compactTTLRegionsOnGroup(utility.getConfiguration(), - RSGroupInfo.DEFAULT_GROUP, 1, 200, -1, -1, false, false); + compactor.compactTTLRegionsOnGroup(utility.getConfiguration(), RSGroupInfo.DEFAULT_GROUP, 1, + 200, -1, -1, false, false); for (TableName tableName : tableNames) { int numberOfRegions = admin.getRegions(tableName).size(); diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMappingScript.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMappingScript.java index d626ab364b0..8a72b9a594f 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMappingScript.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMappingScript.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,10 +51,8 @@ public class TestRSGroupMappingScript { @BeforeClass public static void setupScript() throws Exception { String currentDir = new File("").getAbsolutePath(); - UTIL.getConfiguration().set( - RSGroupMappingScript.RS_GROUP_MAPPING_SCRIPT, - currentDir + "/rsgroup_table_mapping.sh" - ); + UTIL.getConfiguration().set(RSGroupMappingScript.RS_GROUP_MAPPING_SCRIPT, + currentDir + "/rsgroup_table_mapping.sh"); } @Before @@ -81,7 +79,7 @@ public class TestRSGroupMappingScript { pw.close(); } boolean executable = script.setExecutable(true); - LOG.info("Created " + script + ", executable=" + executable); + LOG.info("Created " + script + ", executable=" + executable); verifyScriptContent(script); } @@ -96,23 +94,18 @@ public class TestRSGroupMappingScript { @Test public void testScript() throws Exception { RSGroupMappingScript script = new RSGroupMappingScript(UTIL.getConfiguration()); - TableName testNamespace = - TableName.valueOf("test", "should_be_in_test"); - String rsgroup = script.getRSGroup( - testNamespace.getNamespaceAsString(), testNamespace.getQualifierAsString() - ); + TableName testNamespace = TableName.valueOf("test", "should_be_in_test"); + String rsgroup = + script.getRSGroup(testNamespace.getNamespaceAsString(), testNamespace.getQualifierAsString()); Assert.assertEquals("test", rsgroup); - TableName otherName = - TableName.valueOf("whatever", "oh_foo_should_be_in_other"); + TableName otherName = TableName.valueOf("whatever", "oh_foo_should_be_in_other"); rsgroup = script.getRSGroup(otherName.getNamespaceAsString(), otherName.getQualifierAsString()); Assert.assertEquals("other", rsgroup); - TableName defaultName = - TableName.valueOf("nono", "should_be_in_default"); - rsgroup = script.getRSGroup( - defaultName.getNamespaceAsString(), defaultName.getQualifierAsString() - ); + TableName defaultName = TableName.valueOf("nono", "should_be_in_default"); + rsgroup = + script.getRSGroup(defaultName.getNamespaceAsString(), defaultName.getQualifierAsString()); Assert.assertEquals("default", rsgroup); } @@ -124,4 +117,3 @@ public class TestRSGroupMappingScript { } } - diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupUtil.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupUtil.java index 32d1e9a02f8..eddb7a15388 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupUtil.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -40,12 +39,12 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestRSGroupUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRSGroupUtil.class); + HBaseClassTestRule.forClass(TestRSGroupUtil.class); private static final Logger LOG = LoggerFactory.getLogger(TestRSGroupUtil.class); @@ -61,21 +60,20 @@ public class TestRSGroupUtil { @BeforeClass public static void setUp() throws Exception { - UTIL.getConfiguration().set( - HConstants.HBASE_MASTER_LOADBALANCER_CLASS, + UTIL.getConfiguration().set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, RSGroupBasedLoadBalancer.class.getName()); UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, RSGroupAdminEndpoint.class.getName()); UTIL.startMiniCluster(5); master = UTIL.getMiniHBaseCluster().getMaster(); - UTIL.waitFor(60000, (Predicate) () -> - master.isInitialized() && ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline()); + UTIL.waitFor(60000, (Predicate) () -> master.isInitialized() + && ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline()); rsGroupAdminClient = new RSGroupAdminClient(UTIL.getConnection()); List cps = - master.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class); + master.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class); assertTrue(cps.size() > 0); rsGroupInfoManager = cps.get(0).getGroupInfoManager(); } @@ -93,7 +91,8 @@ public class TestRSGroupUtil { HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0); rsGroupAdminClient.addRSGroup(GROUP_NAME); - rsGroupAdminClient.moveServers(Collections.singleton(rs.getServerName().getAddress()), GROUP_NAME); + rsGroupAdminClient.moveServers(Collections.singleton(rs.getServerName().getAddress()), + GROUP_NAME); rsGroupInfoManager.refresh(); RSGroupInfo rsGroup = rsGroupInfoManager.getRSGroup(GROUP_NAME); diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java index 18611b23b11..6bfe9578604 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.SortedSet; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; @@ -249,8 +248,8 @@ public class TestRSGroupsAdmin1 extends TestRSGroupsBase { if (regionsB == null) { return false; } - return getTableRegionMap().get(tableNameA).size() >= 1 && - getTableRegionMap().get(tableNameB).size() >= 1; + return getTableRegionMap().get(tableNameA).size() >= 1 + && getTableRegionMap().get(tableNameB).size() >= 1; } }); @@ -457,8 +456,7 @@ public class TestRSGroupsAdmin1 extends TestRSGroupsBase { } @Test - public void testNotMoveTableToNullRSGroupWhenCreatingExistingTable() - throws Exception { + public void testNotMoveTableToNullRSGroupWhenCreatingExistingTable() throws Exception { // Trigger TableName tn1 = TableName.valueOf("t1"); TEST_UTIL.createTable(tn1, "cf1"); @@ -470,19 +468,18 @@ public class TestRSGroupsAdmin1 extends TestRSGroupsBase { } // Wait then verify - // Could not verify until the rollback of CreateTableProcedure is done - // (that is, the coprocessor finishes its work), - // or the table is still in the "default" rsgroup even though HBASE-21866 - // is not fixed. + // Could not verify until the rollback of CreateTableProcedure is done + // (that is, the coprocessor finishes its work), + // or the table is still in the "default" rsgroup even though HBASE-21866 + // is not fixed. TEST_UTIL.waitFor(5000, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return - (master.getMasterProcedureExecutor().getActiveExecutorCount() == 0); + return (master.getMasterProcedureExecutor().getActiveExecutorCount() == 0); } }); - SortedSet tables - = rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getTables(); + SortedSet tables = + rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getTables(); assertTrue("Table 't1' must be in 'default' rsgroup", tables.contains(tn1)); // Cleanup @@ -506,9 +503,8 @@ public class TestRSGroupsAdmin1 extends TestRSGroupsBase { final TableName tb1 = TableName.valueOf("testRename"); TEST_UTIL.createTable(tb1, "tr"); rsGroupAdmin.moveTables(Sets.newHashSet(tb1), oldgroup.getName()); - TEST_UTIL.waitFor(1000, - (Waiter.Predicate) () -> - rsGroupAdmin.getRSGroupInfoOfTable(tb1).getServers().size() == 2); + TEST_UTIL.waitFor(1000, (Waiter.Predicate< + Exception>) () -> rsGroupAdmin.getRSGroupInfoOfTable(tb1).getServers().size() == 2); oldgroup = rsGroupAdmin.getRSGroupInfo(oldgroup.getName()); assertEquals(2, oldgroup.getServers().size()); assertEquals(oldgroup.getName(), rsGroupAdmin.getRSGroupInfoOfTable(tb1).getName()); @@ -520,15 +516,13 @@ public class TestRSGroupsAdmin1 extends TestRSGroupsBase { final TableName tb2 = TableName.valueOf("unmovedTable"); TEST_UTIL.createTable(tb2, "ut"); rsGroupAdmin.moveTables(Sets.newHashSet(tb2), normal.getName()); - TEST_UTIL.waitFor(1000, - (Waiter.Predicate) () -> - rsGroupAdmin.getRSGroupInfoOfTable(tb2).getServers().size() == 1); + TEST_UTIL.waitFor(1000, (Waiter.Predicate< + Exception>) () -> rsGroupAdmin.getRSGroupInfoOfTable(tb2).getServers().size() == 1); normal = rsGroupAdmin.getRSGroupInfo(normal.getName()); assertEquals(1, normal.getServers().size()); assertEquals(normal.getName(), rsGroupAdmin.getRSGroupInfoOfTable(tb2).getName()); assertTrue(normal.containsTable(tb2)); - // Rename rsgroup rsGroupAdmin.renameRSGroup(oldgroup.getName(), "newgroup"); Set
      servers = oldgroup.getServers(); @@ -551,14 +545,14 @@ public class TestRSGroupsAdmin1 extends TestRSGroupsBase { assertNotNull(oldGroup); assertEquals(2, oldGroup.getServers().size()); - //Add another RSGroup + // Add another RSGroup String anotherRSGroupName = "anotherRSGroup"; RSGroupInfo anotherGroup = addGroup(anotherRSGroupName, 1); anotherGroup = rsGroupAdmin.getRSGroupInfo(anotherGroup.getName()); assertNotNull(anotherGroup); assertEquals(1, anotherGroup.getServers().size()); - //Rename a non existing RSGroup + // Rename a non existing RSGroup try { rsGroupAdmin.renameRSGroup("nonExistingRSGroup", "newRSGroup1"); fail("ConstraintException was expected."); @@ -566,7 +560,7 @@ public class TestRSGroupsAdmin1 extends TestRSGroupsBase { assertTrue(e.getMessage().contains("does not exist")); } - //Rename to existing group + // Rename to existing group try { rsGroupAdmin.renameRSGroup(oldGroup.getName(), anotherRSGroupName); fail("ConstraintException was expected."); @@ -574,15 +568,15 @@ public class TestRSGroupsAdmin1 extends TestRSGroupsBase { assertTrue(e.getMessage().contains("Group already exists")); } - //Rename default RSGroup + // Rename default RSGroup try { rsGroupAdmin.renameRSGroup(RSGroupInfo.DEFAULT_GROUP, "newRSGroup2"); fail("ConstraintException was expected."); } catch (ConstraintException e) { - //Do nothing + // Do nothing } - //Rename to default RSGroup + // Rename to default RSGroup try { rsGroupAdmin.renameRSGroup(oldGroup.getName(), RSGroupInfo.DEFAULT_GROUP); fail("ConstraintException was expected."); diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java index cbbb86cde37..f9c437c219a 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -138,9 +138,9 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return getTableRegionMap().get(tableName) != null && - getTableRegionMap().get(tableName).size() == 6 && - admin.getClusterMetrics(EnumSet.of(Option.REGIONS_IN_TRANSITION)) + return getTableRegionMap().get(tableName) != null + && getTableRegionMap().get(tableName).size() == 6 + && admin.getClusterMetrics(EnumSet.of(Option.REGIONS_IN_TRANSITION)) .getRegionStatesInTransition().size() < 1; } }); @@ -206,8 +206,8 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return getNumServers() == rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP) - .getServers().size(); + return getNumServers() + == rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size(); } }); @@ -257,8 +257,8 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return !master.getServerManager().areDeadServersInProgress() && - cluster.getClusterMetrics().getDeadServerNames().size() == NUM_DEAD_SERVERS; + return !master.getServerManager().areDeadServersInProgress() + && cluster.getClusterMetrics().getDeadServerNames().size() == NUM_DEAD_SERVERS; } }); @@ -266,8 +266,8 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase { rsGroupAdmin.removeServers(Sets.newHashSet(targetServer.getAddress())); fail("Dead servers shouldn't have been successfully removed."); } catch (IOException ex) { - String exp = "Server " + targetServer.getAddress() + " is on the dead servers list," + - " Maybe it will come back again, not allowed to remove."; + String exp = "Server " + targetServer.getAddress() + " is on the dead servers list," + + " Maybe it will come back again, not allowed to remove."; String msg = "Expected '" + exp + "' in exception message: "; assertTrue(msg + " " + ex.getMessage(), ex.getMessage().contains(exp)); } @@ -315,8 +315,10 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase { ServerName targetServer = null; for (ServerName server : admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) .getLiveServerMetrics().keySet()) { - if (!newGroup.containsServer(server.getAddress()) && - !rsGroupAdmin.getRSGroupInfo("master").containsServer(server.getAddress())) { + if ( + !newGroup.containsServer(server.getAddress()) + && !rsGroupAdmin.getRSGroupInfo("master").containsServer(server.getAddress()) + ) { targetServer = server; break; } @@ -364,10 +366,10 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return getTableRegionMap().get(tableName) != null && - getTableRegionMap().get(tableName).size() == 5 && - getTableServerRegionMap().get(tableName).size() == 1 && - admin.getClusterMetrics(EnumSet.of(Option.REGIONS_IN_TRANSITION)) + return getTableRegionMap().get(tableName) != null + && getTableRegionMap().get(tableName).size() == 5 + && getTableServerRegionMap().get(tableName).size() == 1 + && admin.getClusterMetrics(EnumSet.of(Option.REGIONS_IN_TRANSITION)) .getRegionStatesInTransition().size() < 1; } }); @@ -430,8 +432,8 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase { // test success case, remove one server from default ,keep at least one server if (defaultGroup.getServers().size() > 1) { Address serverInDefaultGroup = defaultGroup.getServers().iterator().next(); - LOG.info("moving server " + serverInDefaultGroup + " from group default to group " + - fooGroup.getName()); + LOG.info("moving server " + serverInDefaultGroup + " from group default to group " + + fooGroup.getName()); rsGroupAdmin.moveServers(Sets.newHashSet(serverInDefaultGroup), fooGroup.getName()); } @@ -442,8 +444,8 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return getNumServers() == rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP) - .getServers().size(); + return getNumServers() + == rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size(); } }); @@ -480,20 +482,19 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase { String rregion = null; ServerName toMoveServer = null; for (ServerName server : assignMap.keySet()) { - rregion = assignMap.get(server).size() > 1 && !newGroup.containsServer(server.getAddress()) ? - assignMap.get(server).get(0) : - null; + rregion = assignMap.get(server).size() > 1 && !newGroup.containsServer(server.getAddress()) + ? assignMap.get(server).get(0) + : null; if (rregion != null) { toMoveServer = server; break; } } assert toMoveServer != null; - RegionInfo ri = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(). - getRegionInfo(Bytes.toBytesBinary(rregion)); - RegionStateNode rsn = - TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates() - .getRegionStateNode(ri); + RegionInfo ri = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() + .getRegionInfo(Bytes.toBytesBinary(rregion)); + RegionStateNode rsn = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() + .getRegionStates().getRegionStateNode(ri); rsn.setState(RegionState.State.SPLITTING); // start thread to recover region state @@ -539,8 +540,8 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase { @Override public boolean evaluate() { if (changed.get()) { - return master.getAssignmentManager().getRegionsOnServer(movedServer).size() == 0 && !rsn - .getRegionLocation().equals(movedServer); + return master.getAssignmentManager().getRegionsOnServer(movedServer).size() == 0 + && !rsn.getRegionLocation().equals(movedServer); } return false; } @@ -570,20 +571,19 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase { String rregion = null; ServerName srcServer = null; for (ServerName server : assignMap.keySet()) { - rregion = assignMap.get(server).size() >= 1 && !newGroup.containsServer(server.getAddress()) ? - assignMap.get(server).get(0) : - null; + rregion = assignMap.get(server).size() >= 1 && !newGroup.containsServer(server.getAddress()) + ? assignMap.get(server).get(0) + : null; if (rregion != null) { srcServer = server; break; } } assert srcServer != null; - RegionInfo ri = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(). - getRegionInfo(Bytes.toBytesBinary(rregion)); - RegionStateNode rsn = - TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates() - .getRegionStateNode(ri); + RegionInfo ri = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() + .getRegionInfo(Bytes.toBytesBinary(rregion)); + RegionStateNode rsn = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() + .getRegionStates().getRegionStateNode(ri); rsn.setState(RegionState.State.SPLITTING); // move table to group diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java index 2e502b6a85c..c2a7ac92d3e 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -127,7 +127,8 @@ public class TestRSGroupsBalance extends TestRSGroupsBase { ServerName first = setupBalanceTest(newGroupName, tableName); - // run the balancer in dry run mode. it should return true, but should not actually move any regions + // run the balancer in dry run mode. it should return true, but should not actually move any + // regions admin.balancerSwitch(true, true); BalanceResponse response = rsGroupAdmin.balanceRSGroup(newGroupName, BalanceRequest.newBuilder().setDryRun(true).build()); @@ -225,8 +226,8 @@ public class TestRSGroupsBalance extends TestRSGroupsBase { HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); Map>> assignments = - rsGroupAdminEndpoint.getGroupAdminServer() - .getRSGroupAssignmentsByTable(master.getTableStateManager(), RSGroupInfo.DEFAULT_GROUP); + rsGroupAdminEndpoint.getGroupAdminServer() + .getRSGroupAssignmentsByTable(master.getTableStateManager(), RSGroupInfo.DEFAULT_GROUP); assertFalse(assignments.containsKey(disableTableName)); assertTrue(assignments.containsKey(tableName)); } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java index d658ba4a618..7b14a72f04f 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,12 +68,12 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets; public abstract class TestRSGroupsBase extends AbstractTestUpdateConfiguration { protected static final Logger LOG = LoggerFactory.getLogger(TestRSGroupsBase.class); - //shared + // shared protected final static String groupPrefix = "Group"; protected final static String tablePrefix = "Group"; protected final static Random rand = new Random(); - //shared, cluster type specific + // shared, cluster type specific protected static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); protected static Admin admin; protected static HBaseCluster cluster; @@ -84,7 +84,7 @@ public abstract class TestRSGroupsBase extends AbstractTestUpdateConfiguration { protected static CPMasterObserver observer; public final static long WAIT_TIMEOUT = 60000; - public final static int NUM_SLAVES_BASE = 4; //number of slaves for the smallest cluster + public final static int NUM_SLAVES_BASE = 4; // number of slaves for the smallest cluster public static int NUM_DEAD_SERVERS = 0; // Per test variables @@ -93,17 +93,14 @@ public abstract class TestRSGroupsBase extends AbstractTestUpdateConfiguration { protected TableName tableName; public static void setUpTestBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setFloat( - "hbase.master.balancer.stochastic.tableSkewCost", 6000); - TEST_UTIL.getConfiguration().set( - HConstants.HBASE_MASTER_LOADBALANCER_CLASS, - RSGroupBasedLoadBalancer.class.getName()); + TEST_UTIL.getConfiguration().setFloat("hbase.master.balancer.stochastic.tableSkewCost", 6000); + TEST_UTIL.getConfiguration().set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, + RSGroupBasedLoadBalancer.class.getName()); TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - RSGroupAdminEndpoint.class.getName() + "," + CPMasterObserver.class.getName()); + RSGroupAdminEndpoint.class.getName() + "," + CPMasterObserver.class.getName()); TEST_UTIL.startMiniCluster(NUM_SLAVES_BASE - 1); - TEST_UTIL.getConfiguration().setInt( - ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, - NUM_SLAVES_BASE - 1); + TEST_UTIL.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, + NUM_SLAVES_BASE - 1); TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); initialize(); } @@ -113,21 +110,21 @@ public abstract class TestRSGroupsBase extends AbstractTestUpdateConfiguration { cluster = TEST_UTIL.getHBaseCluster(); master = TEST_UTIL.getMiniHBaseCluster().getMaster(); - //wait for balancer to come online + // wait for balancer to come online TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return master.isInitialized() && - ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline(); + return master.isInitialized() + && ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline(); } }); admin.balancerSwitch(false, true); rsGroupAdmin = new VerifyingRSGroupAdminClient( - new RSGroupAdminClient(TEST_UTIL.getConnection()), TEST_UTIL.getConfiguration()); + new RSGroupAdminClient(TEST_UTIL.getConnection()), TEST_UTIL.getConfiguration()); MasterCoprocessorHost host = master.getMasterCoprocessorHost(); observer = (CPMasterObserver) host.findCoprocessor(CPMasterObserver.class.getName()); - rsGroupAdminEndpoint = (RSGroupAdminEndpoint) - host.findCoprocessor(RSGroupAdminEndpoint.class.getName()); + rsGroupAdminEndpoint = + (RSGroupAdminEndpoint) host.findCoprocessor(RSGroupAdminEndpoint.class.getName()); } public static void tearDownAfterClass() throws Exception { @@ -149,20 +146,19 @@ public abstract class TestRSGroupsBase extends AbstractTestUpdateConfiguration { deleteNamespaceIfNecessary(); deleteGroups(); - for(ServerName sn : admin.listDecommissionedRegionServers()){ + for (ServerName sn : admin.listDecommissionedRegionServers()) { admin.recommissionRegionServer(sn, null); } assertTrue(admin.listDecommissionedRegionServers().isEmpty()); int missing = NUM_SLAVES_BASE - getNumServers(); - LOG.info("Restoring servers: "+missing); - for(int i=0; i set = new HashSet<>(); - for(Address server: defaultInfo.getServers()) { - if(set.size() == serverCount) { + for (Address server : defaultInfo.getServers()) { + if (set.size() == serverCount) { break; } set.add(server); @@ -215,7 +211,7 @@ public abstract class TestRSGroupsBase extends AbstractTestUpdateConfiguration { protected void deleteNamespaceIfNecessary() throws IOException { for (NamespaceDescriptor desc : TEST_UTIL.getAdmin().listNamespaceDescriptors()) { - if(desc.getName().startsWith(tablePrefix)) { + if (desc.getName().startsWith(tablePrefix)) { admin.deleteNamespace(desc.getName()); } } @@ -223,8 +219,8 @@ public abstract class TestRSGroupsBase extends AbstractTestUpdateConfiguration { protected void deleteGroups() throws IOException { RSGroupAdmin groupAdmin = new RSGroupAdminClient(TEST_UTIL.getConnection()); - for(RSGroupInfo group: groupAdmin.listRSGroups()) { - if(!group.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { + for (RSGroupInfo group : groupAdmin.listRSGroups()) { + if (!group.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { groupAdmin.moveTables(group.getTables(), RSGroupInfo.DEFAULT_GROUP); groupAdmin.moveServers(group.getServers(), RSGroupInfo.DEFAULT_GROUP); groupAdmin.removeRSGroup(group.getName()); @@ -234,13 +230,12 @@ public abstract class TestRSGroupsBase extends AbstractTestUpdateConfiguration { protected Map> getTableRegionMap() throws IOException { Map> map = Maps.newTreeMap(); - Map>> tableServerRegionMap - = getTableServerRegionMap(); - for(TableName tableName : tableServerRegionMap.keySet()) { - if(!map.containsKey(tableName)) { + Map>> tableServerRegionMap = getTableServerRegionMap(); + for (TableName tableName : tableServerRegionMap.keySet()) { + if (!map.containsKey(tableName)) { map.put(tableName, new LinkedList<>()); } - for(List subset: tableServerRegionMap.get(tableName).values()) { + for (List subset : tableServerRegionMap.get(tableName).values()) { map.get(tableName).addAll(subset); } } @@ -265,8 +260,7 @@ public abstract class TestRSGroupsBase extends AbstractTestUpdateConfiguration { // return the real number of region servers, excluding the master embedded region server in 2.0+ protected int getNumServers() throws IOException { - ClusterMetrics status = - admin.getClusterMetrics(EnumSet.of(Option.MASTER, Option.LIVE_SERVERS)); + ClusterMetrics status = admin.getClusterMetrics(EnumSet.of(Option.MASTER, Option.LIVE_SERVERS)); ServerName masterName = status.getMasterName(); int count = 0; for (ServerName sn : status.getLiveServerMetrics().keySet()) { @@ -335,99 +329,97 @@ public abstract class TestRSGroupsBase extends AbstractTestUpdateConfiguration { @Override public void preMoveServersAndTables(final ObserverContext ctx, - Set
      servers, Set tables, String targetGroup) throws IOException { + Set
      servers, Set tables, String targetGroup) throws IOException { preMoveServersAndTables = true; } @Override public void postMoveServersAndTables(final ObserverContext ctx, - Set
      servers, Set tables, String targetGroup) throws IOException { + Set
      servers, Set tables, String targetGroup) throws IOException { postMoveServersAndTables = true; } @Override - public void preRemoveServers( - final ObserverContext ctx, - Set
      servers) throws IOException { + public void preRemoveServers(final ObserverContext ctx, + Set
      servers) throws IOException { preRemoveServersCalled = true; } @Override - public void postRemoveServers( - final ObserverContext ctx, - Set
      servers) throws IOException { + public void postRemoveServers(final ObserverContext ctx, + Set
      servers) throws IOException { postRemoveServersCalled = true; } @Override public void preRemoveRSGroup(final ObserverContext ctx, - String name) throws IOException { + String name) throws IOException { preRemoveRSGroupCalled = true; } @Override public void postRemoveRSGroup(final ObserverContext ctx, - String name) throws IOException { + String name) throws IOException { postRemoveRSGroupCalled = true; } @Override - public void preAddRSGroup(final ObserverContext ctx, - String name) throws IOException { + public void preAddRSGroup(final ObserverContext ctx, String name) + throws IOException { preAddRSGroupCalled = true; } @Override - public void postAddRSGroup(final ObserverContext ctx, - String name) throws IOException { + public void postAddRSGroup(final ObserverContext ctx, String name) + throws IOException { postAddRSGroupCalled = true; } @Override public void preMoveTables(final ObserverContext ctx, - Set tables, String targetGroup) throws IOException { + Set tables, String targetGroup) throws IOException { preMoveTablesCalled = true; } @Override public void postMoveTables(final ObserverContext ctx, - Set tables, String targetGroup) throws IOException { + Set tables, String targetGroup) throws IOException { postMoveTablesCalled = true; } @Override public void preMoveServers(final ObserverContext ctx, - Set
      servers, String targetGroup) throws IOException { + Set
      servers, String targetGroup) throws IOException { preMoveServersCalled = true; } @Override public void postMoveServers(final ObserverContext ctx, - Set
      servers, String targetGroup) throws IOException { + Set
      servers, String targetGroup) throws IOException { postMoveServersCalled = true; } @Override public void preBalanceRSGroup(final ObserverContext ctx, - String groupName, BalanceRequest request) throws IOException { + String groupName, BalanceRequest request) throws IOException { preBalanceRSGroupCalled = true; } @Override public void postBalanceRSGroup(final ObserverContext ctx, - String groupName, BalanceRequest request, BalanceResponse response) throws IOException { + String groupName, BalanceRequest request, BalanceResponse response) throws IOException { postBalanceRSGroupCalled = true; } @Override - public void preRenameRSGroup(ObserverContext ctx, - String oldName, String newName) throws IOException { + public void preRenameRSGroup(ObserverContext ctx, String oldName, + String newName) throws IOException { preRenameRSGroupCalled = true; } @Override - public void postRenameRSGroup(ObserverContext ctx, - String oldName, String newName) throws IOException { + public void postRenameRSGroup(ObserverContext ctx, String oldName, + String newName) throws IOException { postRenameRSGroupCalled = true; } } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java index 316a2651860..028cd4e66b8 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -216,7 +216,7 @@ public class TestRSGroupsBasics extends TestRSGroupsBase { // move region servers from default group to new group final int serverCountToMoveToNewGroup = 3; final RSGroupInfo newGroup = - addGroup(getGroupName(name.getMethodName()), serverCountToMoveToNewGroup); + addGroup(getGroupName(name.getMethodName()), serverCountToMoveToNewGroup); // get the existing dead servers NUM_DEAD_SERVERS = cluster.getClusterMetrics().getDeadServerNames().size(); @@ -235,8 +235,8 @@ public class TestRSGroupsBasics extends TestRSGroupsBase { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return cluster.getClusterMetrics().getDeadServerNames().size() == NUM_DEAD_SERVERS && - !master.getServerManager().areDeadServersInProgress(); + return cluster.getClusterMetrics().getDeadServerNames().size() == NUM_DEAD_SERVERS + && !master.getServerManager().areDeadServersInProgress(); } }); @@ -264,8 +264,7 @@ public class TestRSGroupsBasics extends TestRSGroupsBase { // move region servers from default group to "dead server" group final int serverCountToMoveToDeadServerGroup = 1; - RSGroupInfo deadServerGroup = - addGroup("deadServerGroup", serverCountToMoveToDeadServerGroup); + RSGroupInfo deadServerGroup = addGroup("deadServerGroup", serverCountToMoveToDeadServerGroup); // stop 1 region server in "dead server" group ServerName serverToStop = getServerName(deadServerGroup.getServers().iterator().next()); @@ -284,7 +283,7 @@ public class TestRSGroupsBasics extends TestRSGroupsBase { }); Set
      ServersInDeadServerGroup = - rsGroupAdmin.getRSGroupInfo(deadServerGroup.getName()).getServers(); + rsGroupAdmin.getRSGroupInfo(deadServerGroup.getName()).getServers(); assertEquals(serverCountToMoveToDeadServerGroup, ServersInDeadServerGroup.size()); assertTrue(ServersInDeadServerGroup.contains(serverToStop.getAddress())); } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsFallback.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsFallback.java index bd45e8ccd52..dee907fea62 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsFallback.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsFallback.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.Collections; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; @@ -50,7 +49,7 @@ public class TestRSGroupsFallback extends TestRSGroupsBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRSGroupsFallback.class); + HBaseClassTestRule.forClass(TestRSGroupsFallback.class); protected static final Logger LOG = LoggerFactory.getLogger(TestRSGroupsFallback.class); @@ -88,8 +87,8 @@ public class TestRSGroupsFallback extends TestRSGroupsBase { String groupName = getGroupName(name.getMethodName()); addGroup(groupName, 1); TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f")).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f")).build()) + .build(); admin.createTable(desc); rsGroupAdmin.moveTables(Collections.singleton(tableName), groupName); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); @@ -105,8 +104,8 @@ public class TestRSGroupsFallback extends TestRSGroupsBase { JVMClusterUtil.RegionServerThread t = TEST_UTIL.getMiniHBaseCluster().startRegionServerAndWait(60000); Address startRSAddress = t.getRegionServer().getServerName().getAddress(); - TEST_UTIL.waitFor(3000, () -> rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP) - .containsServer(startRSAddress)); + TEST_UTIL.waitFor(3000, + () -> rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).containsServer(startRSAddress)); assertTrue(master.balance().isBalancerRan()); assertRegionsInGroup(tableName, RSGroupInfo.DEFAULT_GROUP); @@ -121,12 +120,12 @@ public class TestRSGroupsFallback extends TestRSGroupsBase { } private void assertRegionsInGroup(TableName tableName, String group) throws IOException { - ProcedureTestingUtility.waitAllProcedures( - TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()); + ProcedureTestingUtility + .waitAllProcedures(TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()); RSGroupInfo groupInfo = rsGroupAdmin.getRSGroupInfo(group); master.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName).forEach(region -> { Address regionOnServer = master.getAssignmentManager().getRegionStates() - .getRegionAssignments().get(region).getAddress(); + .getRegionAssignments().get(region).getAddress(); assertTrue(groupInfo.getServers().contains(regionOnServer)); }); } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java index 9b6684fd1ff..75edd44b125 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -177,26 +177,25 @@ public class TestRSGroupsKillRS extends TestRSGroupsBase { Set
      servers = rsGroupAdmin.getRSGroupInfo(groupName).getServers(); assertEquals(2, servers.size()); LOG.debug("group servers {}", servers); - for (RegionInfo tr : - master.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName)) { - assertTrue(servers.contains( - master.getAssignmentManager().getRegionStates().getRegionAssignments() - .get(tr).getAddress())); + for (RegionInfo tr : master.getAssignmentManager().getRegionStates() + .getRegionsOfTable(tableName)) { + assertTrue(servers.contains(master.getAssignmentManager().getRegionStates() + .getRegionAssignments().get(tr).getAddress())); } // Move a region, to ensure there exists a region whose 'lastHost' is in my_group // ('lastHost' of other regions are in 'default' group) // and check if all table regions are online List gsn = new ArrayList<>(); - for(Address addr : servers){ + for (Address addr : servers) { gsn.add(getServerName(addr)); } assertEquals(2, gsn.size()); - for(Map.Entry entry : - master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()){ - if(entry.getKey().getTable().equals(tableName)){ + for (Map.Entry entry : master.getAssignmentManager().getRegionStates() + .getRegionAssignments().entrySet()) { + if (entry.getKey().getTable().equals(tableName)) { LOG.debug("move region {} from {} to {}", entry.getKey().getRegionNameAsString(), - entry.getValue(), gsn.get(1 - gsn.indexOf(entry.getValue()))); + entry.getValue(), gsn.get(1 - gsn.indexOf(entry.getValue()))); TEST_UTIL.moveRegionAndWait(entry.getKey(), gsn.get(1 - gsn.indexOf(entry.getValue()))); break; } @@ -205,32 +204,32 @@ public class TestRSGroupsKillRS extends TestRSGroupsBase { // case 1: stop all the regionservers in my_group, and restart a regionserver in my_group, // and then check if all table regions are online - for(Address addr : rsGroupAdmin.getRSGroupInfo(groupName).getServers()) { + for (Address addr : rsGroupAdmin.getRSGroupInfo(groupName).getServers()) { TEST_UTIL.getMiniHBaseCluster().stopRegionServer(getServerName(addr)); } // better wait for a while for region reassign sleep(10000); assertEquals(NUM_SLAVES_BASE - gsn.size(), - TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size()); + TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size()); TEST_UTIL.getMiniHBaseCluster().startRegionServer(gsn.get(0).getHostname(), - gsn.get(0).getPort()); + gsn.get(0).getPort()); assertEquals(NUM_SLAVES_BASE - gsn.size() + 1, - TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size()); + TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size()); TEST_UTIL.waitTableAvailable(tableName, 30000); // case 2: stop all the regionservers in my_group, and move another // regionserver(from the 'default' group) to my_group, // and then check if all table regions are online - for(JVMClusterUtil.RegionServerThread rst : - TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads()){ - if(rst.getRegionServer().getServerName().getAddress().equals(gsn.get(0).getAddress())){ + for (JVMClusterUtil.RegionServerThread rst : TEST_UTIL.getMiniHBaseCluster() + .getLiveRegionServerThreads()) { + if (rst.getRegionServer().getServerName().getAddress().equals(gsn.get(0).getAddress())) { TEST_UTIL.getMiniHBaseCluster().stopRegionServer(rst.getRegionServer().getServerName()); break; } } sleep(10000); assertEquals(NUM_SLAVES_BASE - gsn.size(), - TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size()); + TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size()); ServerName newServer = master.getServerManager().getOnlineServersList().get(0); rsGroupAdmin.moveServers(Sets.newHashSet(newServer.getAddress()), groupName); // wait and check if table regions are online @@ -238,7 +237,7 @@ public class TestRSGroupsKillRS extends TestRSGroupsBase { } @Test - public void testLowerMetaGroupVersion() throws Exception{ + public void testLowerMetaGroupVersion() throws Exception { // create a rsgroup and move one regionserver to it String groupName = "meta_group"; int groupRSCount = 1; @@ -249,12 +248,12 @@ public class TestRSGroupsKillRS extends TestRSGroupsBase { toAddTables.add(TableName.META_TABLE_NAME); rsGroupAdmin.moveTables(toAddTables, groupName); assertTrue( - rsGroupAdmin.getRSGroupInfo(groupName).getTables().contains(TableName.META_TABLE_NAME)); + rsGroupAdmin.getRSGroupInfo(groupName).getTables().contains(TableName.META_TABLE_NAME)); // restart the regionserver in meta_group, and lower its version String originVersion = ""; Set
      servers = new HashSet<>(); - for(Address addr : rsGroupAdmin.getRSGroupInfo(groupName).getServers()) { + for (Address addr : rsGroupAdmin.getRSGroupInfo(groupName).getServers()) { servers.add(addr); TEST_UTIL.getMiniHBaseCluster().stopRegionServer(getServerName(addr)); originVersion = master.getRegionServerVersion(getServerName(addr)); @@ -262,7 +261,7 @@ public class TestRSGroupsKillRS extends TestRSGroupsBase { // better wait for a while for region reassign sleep(10000); assertEquals(NUM_SLAVES_BASE - groupRSCount, - TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size()); + TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size()); Address address = servers.iterator().next(); int majorVersion = VersionInfo.getMajorVersion(originVersion); assertTrue(majorVersion >= 1); @@ -270,13 +269,13 @@ public class TestRSGroupsKillRS extends TestRSGroupsBase { setFinalStatic(Version.class.getField("version"), lowerVersion); TEST_UTIL.getMiniHBaseCluster().startRegionServer(address.getHostName(), address.getPort()); assertEquals(NUM_SLAVES_BASE, - TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size()); + TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size()); assertTrue(VersionInfo.compareVersion(originVersion, - master.getRegionServerVersion(getServerName(servers.iterator().next()))) > 0); + master.getRegionServerVersion(getServerName(servers.iterator().next()))) > 0); LOG.debug("wait for META assigned..."); // SCP finished, which means all regions assigned too. TEST_UTIL.waitFor(60000, () -> !TEST_UTIL.getHBaseCluster().getMaster().getProcedures().stream() - .filter(p -> (p instanceof ServerCrashProcedure)).findAny().isPresent()); + .filter(p -> (p instanceof ServerCrashProcedure)).findAny().isPresent()); } private static void setFinalStatic(Field field, Object newValue) throws Exception { diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java index 39cf164ecf4..452fd63d3c8 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -92,9 +92,9 @@ public class TestRSGroupsOfflineMode { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return master.isInitialized() && - ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline() && - master.getServerManager().getOnlineServersList().size() >= 3; + return master.isInitialized() + && ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline() + && master.getServerManager().getOnlineServersList().size() >= 3; } }); } @@ -115,8 +115,10 @@ public class TestRSGroupsOfflineMode { String newGroup = "my_group"; RSGroupAdmin groupAdmin = new RSGroupAdminClient(TEST_UTIL.getConnection()); groupAdmin.addRSGroup(newGroup); - if (master.getAssignmentManager().getRegionStates().getRegionAssignments() - .containsValue(failoverRS.getServerName())) { + if ( + master.getAssignmentManager().getRegionStates().getRegionAssignments() + .containsValue(failoverRS.getServerName()) + ) { for (RegionInfo regionInfo : hbaseAdmin.getRegions(failoverRS.getServerName())) { hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(), failoverRS.getServerName()); } @@ -135,8 +137,8 @@ public class TestRSGroupsOfflineMode { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return groupRS.getNumberOfOnlineRegions() < 1 && - master.getAssignmentManager().getRegionStates().getRegionsInTransitionCount() < 1; + return groupRS.getNumberOfOnlineRegions() < 1 + && master.getAssignmentManager().getRegionStates().getRegionsInTransitionCount() < 1; } }); // Move table to group and wait. @@ -156,10 +158,11 @@ public class TestRSGroupsOfflineMode { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return TEST_UTIL.getHBaseCluster().getMaster() != null && - TEST_UTIL.getHBaseCluster().getMaster().isActiveMaster() && - TEST_UTIL.getHBaseCluster().getMaster().isInitialized() && - TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getOnlineServers().size() <= 3; + return TEST_UTIL.getHBaseCluster().getMaster() != null + && TEST_UTIL.getHBaseCluster().getMaster().isActiveMaster() + && TEST_UTIL.getHBaseCluster().getMaster().isInitialized() + && TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getOnlineServers().size() + <= 3; } }); diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java index 734b3a6e100..a930b71a6e4 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.security.access.SecureTestUtil; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.SecurityTests; import org.apache.hadoop.hbase.util.Bytes; - import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -51,15 +50,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Performs authorization checks for rsgroup operations, according to different - * levels of authorized users. + * Performs authorization checks for rsgroup operations, according to different levels of authorized + * users. */ -@Category({SecurityTests.class, MediumTests.class}) -public class TestRSGroupsWithACL extends SecureTestUtil{ +@Category({ SecurityTests.class, MediumTests.class }) +public class TestRSGroupsWithACL extends SecureTestUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRSGroupsWithACL.class); + HBaseClassTestRule.forClass(TestRSGroupsWithACL.class); private static final Logger LOG = LoggerFactory.getLogger(TestRSGroupsWithACL.class); private static TableName TEST_TABLE = TableName.valueOf("testtable1"); @@ -100,8 +99,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ public static void setupBeforeClass() throws Exception { // setup configuration conf = TEST_UTIL.getConfiguration(); - conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, - RSGroupBasedLoadBalancer.class.getName()); + conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, RSGroupBasedLoadBalancer.class.getName()); // Enable security enableSecurity(conf); // Verify enableSecurity sets up what we require @@ -112,11 +110,11 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ TEST_UTIL.startMiniCluster(); HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - TEST_UTIL.waitFor(60000, (Predicate) () -> - master.isInitialized() && ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline()); + TEST_UTIL.waitFor(60000, (Predicate) () -> master.isInitialized() + && ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline()); - rsGroupAdminEndpoint = (RSGroupAdminEndpoint) TEST_UTIL.getMiniHBaseCluster().getMaster(). - getMasterCoprocessorHost().findCoprocessor(RSGroupAdminEndpoint.class.getName()); + rsGroupAdminEndpoint = (RSGroupAdminEndpoint) TEST_UTIL.getMiniHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(RSGroupAdminEndpoint.class.getName()); // Wait for the ACL table to become available TEST_UTIL.waitUntilAllRegionsAssigned(PermissionStorage.ACL_TABLE_NAME); TEST_UTIL.waitUntilAllRegionsAssigned(RSGroupInfoManagerImpl.RSGROUP_TABLE_NAME); @@ -132,13 +130,13 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ USER_NONE = User.createUserForTesting(conf, "nouser", new String[0]); USER_GROUP_ADMIN = - User.createUserForTesting(conf, "user_group_admin", new String[] { GROUP_ADMIN }); + User.createUserForTesting(conf, "user_group_admin", new String[] { GROUP_ADMIN }); USER_GROUP_CREATE = - User.createUserForTesting(conf, "user_group_create", new String[] { GROUP_CREATE }); + User.createUserForTesting(conf, "user_group_create", new String[] { GROUP_CREATE }); USER_GROUP_READ = - User.createUserForTesting(conf, "user_group_read", new String[] { GROUP_READ }); + User.createUserForTesting(conf, "user_group_read", new String[] { GROUP_READ }); USER_GROUP_WRITE = - User.createUserForTesting(conf, "user_group_write", new String[] { GROUP_WRITE }); + User.createUserForTesting(conf, "user_group_write", new String[] { GROUP_WRITE }); systemUserConnection = TEST_UTIL.getConnection(); setUpTableAndUserPermissions(); @@ -150,31 +148,21 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ cfd.setMaxVersions(100); tableBuilder.setColumnFamily(cfd.build()); tableBuilder.setValue(TableDescriptorBuilder.OWNER, USER_OWNER.getShortName()); - createTable(TEST_UTIL, tableBuilder.build(), - new byte[][] { Bytes.toBytes("s") }); + createTable(TEST_UTIL, tableBuilder.build(), new byte[][] { Bytes.toBytes("s") }); // Set up initial grants - grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(), - Permission.Action.ADMIN, - Permission.Action.CREATE, - Permission.Action.READ, - Permission.Action.WRITE); + grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(), Permission.Action.ADMIN, + Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE); - grantOnTable(TEST_UTIL, USER_RW.getShortName(), - TEST_TABLE, TEST_FAMILY, null, - Permission.Action.READ, - Permission.Action.WRITE); + grantOnTable(TEST_UTIL, USER_RW.getShortName(), TEST_TABLE, TEST_FAMILY, null, + Permission.Action.READ, Permission.Action.WRITE); // USER_CREATE is USER_RW plus CREATE permissions - grantOnTable(TEST_UTIL, USER_CREATE.getShortName(), - TEST_TABLE, null, null, - Permission.Action.CREATE, - Permission.Action.READ, - Permission.Action.WRITE); + grantOnTable(TEST_UTIL, USER_CREATE.getShortName(), TEST_TABLE, null, null, + Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE); - grantOnTable(TEST_UTIL, USER_RO.getShortName(), - TEST_TABLE, TEST_FAMILY, null, - Permission.Action.READ); + grantOnTable(TEST_UTIL, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null, + Permission.Action.READ); grantGlobal(TEST_UTIL, toGroupEntry(GROUP_ADMIN), Permission.Action.ADMIN); grantGlobal(TEST_UTIL, toGroupEntry(GROUP_CREATE), Permission.Action.CREATE); @@ -183,8 +171,8 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ assertEquals(4, PermissionStorage.getTablePermissions(conf, TEST_TABLE).size()); try { - assertEquals(4, AccessControlClient.getUserPermissions(systemUserConnection, - TEST_TABLE.toString()).size()); + assertEquals(4, + AccessControlClient.getUserPermissions(systemUserConnection, TEST_TABLE.toString()).size()); } catch (AssertionError e) { fail(e.getMessage()); } catch (Throwable e) { @@ -219,8 +207,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ coprocessors += "," + currentCoprocessors; } conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, coprocessors); - conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, - RSGroupBasedLoadBalancer.class.getName()); + conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, RSGroupBasedLoadBalancer.class.getName()); } @Test @@ -231,8 +218,8 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); - verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, - USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test @@ -243,8 +230,8 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); - verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, - USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test @@ -255,8 +242,8 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); - verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, - USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test @@ -267,8 +254,8 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); - verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, - USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test @@ -279,8 +266,8 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); - verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, - USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test @@ -291,8 +278,8 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); - verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, - USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test @@ -303,8 +290,8 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); - verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, - USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test @@ -315,8 +302,8 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); - verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, - USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test @@ -327,8 +314,8 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); - verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, - USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test @@ -339,8 +326,8 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); - verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, - USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test @@ -351,7 +338,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); - verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, - USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); } } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestTableDescriptorWithRSGroup.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestTableDescriptorWithRSGroup.java index b25daa36ef4..38f04e3089f 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestTableDescriptorWithRSGroup.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestTableDescriptorWithRSGroup.java @@ -15,13 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rsgroup; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.io.IOException; import java.util.List; import java.util.Optional; @@ -45,6 +45,7 @@ import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.collect.Sets; @Category({ LargeTests.class }) @@ -101,9 +102,8 @@ public class TestTableDescriptorWithRSGroup extends TestRSGroupsBase { // Create table ColumnFamilyDescriptor f1 = ColumnFamilyDescriptorBuilder.newBuilder(familyNameBytes).build(); - TableDescriptor desc = - TableDescriptorBuilder.newBuilder(tableName).setRegionServerGroup(newGroup) - .setColumnFamily(f1).build(); + TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) + .setRegionServerGroup(newGroup).setColumnFamily(f1).build(); admin.createTable(desc, getSpitKeys(5)); TEST_UTIL.waitFor(WAIT_TIMEOUT, (Waiter.Predicate) () -> { @@ -221,9 +221,8 @@ public class TestTableDescriptorWithRSGroup extends TestRSGroupsBase { assertEquals(rsGroup1.getName(), regionServerGroup.get()); // Delete table's original rs group, clone should fail. - rsGroupAdmin - .moveServersAndTables(Sets.newHashSet(rsGroup1.getServers()), Sets.newHashSet(clonedTable1), - rsGroup2.getName()); + rsGroupAdmin.moveServersAndTables(Sets.newHashSet(rsGroup1.getServers()), + Sets.newHashSet(clonedTable1), rsGroup2.getName()); rsGroupAdmin.removeRSGroup(rsGroup1.getName()); // Clone Snapshot final TableName clonedTable2 = TableName.valueOf(tableName.getNameAsString() + "_2"); @@ -287,9 +286,8 @@ public class TestTableDescriptorWithRSGroup extends TestRSGroupsBase { TableDescriptor descriptor = admin.getConnection().getTable(tableName).getDescriptor(); RSGroupInfo rsGroup2 = addGroup("rsGroup2", 1); - final TableDescriptor newTableDescriptor = - TableDescriptorBuilder.newBuilder(descriptor).setRegionServerGroup(rsGroup2.getName()) - .removeColumnFamily(familyNameBytes).build(); + final TableDescriptor newTableDescriptor = TableDescriptorBuilder.newBuilder(descriptor) + .setRegionServerGroup(rsGroup2.getName()).removeColumnFamily(familyNameBytes).build(); // Removed family to fail pre-check validation try { @@ -311,8 +309,7 @@ public class TestTableDescriptorWithRSGroup extends TestRSGroupsBase { // Create TableDescriptor without a family so creation fails TableDescriptor desc = - TableDescriptorBuilder.newBuilder(tableName).setRegionServerGroup(rsGroup1.getName()) - .build(); + TableDescriptorBuilder.newBuilder(tableName).setRegionServerGroup(rsGroup1.getName()).build(); try { admin.createTable(desc, getSpitKeys(5)); fail("Should have thrown DoNotRetryIOException but no exception thrown."); @@ -331,9 +328,8 @@ public class TestTableDescriptorWithRSGroup extends TestRSGroupsBase { assertEquals(0, rsGroup1.getTables().size()); final TableName tableName = TableName.valueOf("hbase:meta"); final TableDescriptor descriptor = admin.getConnection().getTable(tableName).getDescriptor(); - final TableDescriptor newTableDescriptor = - TableDescriptorBuilder.newBuilder(descriptor).setRegionServerGroup(rsGroup1.getName()) - .build(); + final TableDescriptor newTableDescriptor = TableDescriptorBuilder.newBuilder(descriptor) + .setRegionServerGroup(rsGroup1.getName()).build(); admin.modifyTable(newTableDescriptor); final RSGroupInfo rsGroupInfoOfTable = rsGroupAdmin.getRSGroupInfoOfTable(tableName); assertEquals(rsGroup1.getName(), rsGroupInfoOfTable.getName()); @@ -351,9 +347,9 @@ public class TestTableDescriptorWithRSGroup extends TestRSGroupsBase { createTable(table2, null); rsGroupAdmin.moveTables(Sets.newHashSet(tableName), rsGroup1.getName()); assertTrue("RSGroup info is not updated into TableDescriptor when table created", - admin.getConnection().getTable(tableName).getDescriptor().getRegionServerGroup() - .isPresent()); - assertFalse("Table descriptor should not have been updated " + admin.getConnection().getTable(tableName).getDescriptor().getRegionServerGroup().isPresent()); + assertFalse( + "Table descriptor should not have been updated " + "as rs group info was not stored in table descriptor.", admin.getConnection().getTable(table2).getDescriptor().getRegionServerGroup().isPresent()); @@ -361,7 +357,8 @@ public class TestTableDescriptorWithRSGroup extends TestRSGroupsBase { rsGroupAdmin.renameRSGroup(rsGroup1.getName(), rsGroup2); assertEquals(rsGroup2, admin.getConnection().getTable(tableName).getDescriptor().getRegionServerGroup().get()); - assertFalse("Table descriptor should not have been updated " + assertFalse( + "Table descriptor should not have been updated " + "as rs group info was not stored in table descriptor.", admin.getConnection().getTable(table2).getDescriptor().getRegionServerGroup().isPresent()); } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestUpdateRSGroupConfiguration.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestUpdateRSGroupConfiguration.java index 18d9980f3a6..62996c6b06c 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestUpdateRSGroupConfiguration.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestUpdateRSGroupConfiguration.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -98,22 +97,20 @@ public class TestUpdateRSGroupConfiguration extends TestRSGroupsBase { rsGroupAdmin.updateConfiguration(TEST_GROUP); // Check the configuration of the RegionServer in test rsgroup, should be update - Configuration regionServerConfiguration = - TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().stream() - .map(JVMClusterUtil.RegionServerThread::getRegionServer) - .filter(regionServer -> - (regionServer.getServerName().getAddress().equals(testRSGroup.getServers().first()))) - .collect(Collectors.toList()).get(0).getConfiguration(); + Configuration regionServerConfiguration = TEST_UTIL.getMiniHBaseCluster() + .getLiveRegionServerThreads().stream().map(JVMClusterUtil.RegionServerThread::getRegionServer) + .filter(regionServer -> (regionServer.getServerName().getAddress() + .equals(testRSGroup.getServers().first()))) + .collect(Collectors.toList()).get(0).getConfiguration(); int custom = regionServerConfiguration.getInt("hbase.custom.config", 0); assertEquals(1000, custom); // Check the configuration of the RegionServer in test2 rsgroup, should not be update - regionServerConfiguration = - TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().stream() - .map(JVMClusterUtil.RegionServerThread::getRegionServer) - .filter(regionServer -> - (regionServer.getServerName().getAddress().equals(test2RSGroup.getServers().first()))) - .collect(Collectors.toList()).get(0).getConfiguration(); + regionServerConfiguration = TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads() + .stream().map(JVMClusterUtil.RegionServerThread::getRegionServer) + .filter(regionServer -> (regionServer.getServerName().getAddress() + .equals(test2RSGroup.getServers().first()))) + .collect(Collectors.toList()).get(0).getConfiguration(); custom = regionServerConfiguration.getInt("hbase.custom.config", 0); assertEquals(0, custom); diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java index 7b8472d9fd7..e1ca06e5528 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.BalanceRequest; @@ -52,10 +51,10 @@ public class VerifyingRSGroupAdminClient implements RSGroupAdmin { private RSGroupAdmin wrapped; public VerifyingRSGroupAdminClient(RSGroupAdmin RSGroupAdmin, Configuration conf) - throws IOException { + throws IOException { wrapped = RSGroupAdmin; - table = ConnectionFactory.createConnection(conf) - .getTable(RSGroupInfoManager.RSGROUP_TABLE_NAME); + table = + ConnectionFactory.createConnection(conf).getTable(RSGroupInfoManager.RSGROUP_TABLE_NAME); zkw = new ZKWatcher(conf, this.getClass().getSimpleName(), null); } @@ -94,7 +93,8 @@ public class VerifyingRSGroupAdminClient implements RSGroupAdmin { } @Override - public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException { + public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) + throws IOException { return wrapped.balanceRSGroup(groupName, request); } @@ -110,7 +110,7 @@ public class VerifyingRSGroupAdminClient implements RSGroupAdmin { @Override public void moveServersAndTables(Set
      servers, Set tables, String targetGroup) - throws IOException { + throws IOException { wrapped.moveServersAndTables(servers, tables, targetGroup); verify(); } @@ -129,7 +129,7 @@ public class VerifyingRSGroupAdminClient implements RSGroupAdmin { @Override public void updateRSGroupConfig(String groupName, Map configuration) - throws IOException { + throws IOException { wrapped.updateRSGroupConfig(groupName, configuration); verify(); } @@ -144,28 +144,25 @@ public class VerifyingRSGroupAdminClient implements RSGroupAdmin { Set zList = Sets.newHashSet(); for (Result result : table.getScanner(new Scan())) { - RSGroupProtos.RSGroupInfo proto = - RSGroupProtos.RSGroupInfo.parseFrom( - result.getValue( - RSGroupInfoManager.META_FAMILY_BYTES, - RSGroupInfoManager.META_QUALIFIER_BYTES)); + RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo.parseFrom(result + .getValue(RSGroupInfoManager.META_FAMILY_BYTES, RSGroupInfoManager.META_QUALIFIER_BYTES)); groupMap.put(proto.getName(), RSGroupProtobufUtil.toGroupInfo(proto)); } Assert.assertEquals(Sets.newHashSet(groupMap.values()), - Sets.newHashSet(wrapped.listRSGroups())); + Sets.newHashSet(wrapped.listRSGroups())); try { String groupBasePath = ZNodePaths.joinZNode(zkw.getZNodePaths().baseZNode, "rsgroup"); - for(String znode: ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) { + for (String znode : ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) { byte[] data = ZKUtil.getData(zkw, ZNodePaths.joinZNode(groupBasePath, znode)); - if(data.length > 0) { + if (data.length > 0) { ProtobufUtil.expectPBMagicPrefix(data); - ByteArrayInputStream bis = new ByteArrayInputStream( - data, ProtobufUtil.lengthOfPBMagic(), data.length); + ByteArrayInputStream bis = + new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length); zList.add(RSGroupProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis))); } } Assert.assertEquals(zList.size(), groupMap.size()); - for(RSGroupInfo RSGroupInfo : zList) { + for (RSGroupInfo RSGroupInfo : zList) { Assert.assertTrue(groupMap.get(RSGroupInfo.getName()).equals(RSGroupInfo)); } } catch (KeeperException e) { diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index 3955b498178..7d2947fd655 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -1,6 +1,6 @@ - + - 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -36,203 +36,6 @@ true true - - - - - - ${project.build.directory} - - hbase-webapps/** - - - - src/main/resources - - **/** - - - - - - - src/test/resources/META-INF/ - META-INF/ - - NOTICE - - true - - - src/test/resources - - **/** - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - 2048 - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - - org.apache.hbase - hbase-resource-bundle - ${project.version} - - - - - default - - false - - ${build.year} - ${license.debug.print.included} - ${license.bundles.dependencies} - ${license.bundles.jquery} - ${license.bundles.vega} - ${license.bundles.logo} - ${license.bundles.bootstrap} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - supplemental-models.xml - - - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-jar-plugin - - - - org/apache/jute/** - org/apache/zookeeper/** - **/*.jsp - hbase-site.xml - hdfs-site.xml - log4j.properties - mapred-queues.xml - mapred-site.xml - - - - - - maven-antrun-plugin - - - - generate - generate-sources - - - - - - - - - - - - - - - - - - - - - - - run - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - - jspcSource-packageInfo-source - generate-sources - - add-source - - - - ${project.build.directory}/generated-jamon - ${project.build.directory}/generated-sources/java - - - - - - - org.jamon - jamon-maven-plugin - - - generate-sources - - translate - - - src/main/jamon - target/generated-jamon - - - - - - - maven-surefire-plugin - - - target/test-classes/webapps - - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.hbase.thirdparty @@ -381,7 +184,7 @@ org.glassfish.web javax.servlet.jsp - + javax.servlet.jsp javax.servlet.jsp-api @@ -555,6 +358,203 @@ test + + + + + + ${project.build.directory} + + hbase-webapps/** + + + + src/main/resources + + **/** + + + + + + + META-INF/ + true + src/test/resources/META-INF/ + + NOTICE + + + + src/test/resources + + **/** + + + + + + com.github.spotbugs + spotbugs-maven-plugin + + 2048 + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + + org.apache.hbase + hbase-resource-bundle + ${project.version} + + + + + default + + false + + ${build.year} + ${license.debug.print.included} + ${license.bundles.dependencies} + ${license.bundles.jquery} + ${license.bundles.vega} + ${license.bundles.logo} + ${license.bundles.bootstrap} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + supplemental-models.xml + + + + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-jar-plugin + + + + org/apache/jute/** + org/apache/zookeeper/** + **/*.jsp + hbase-site.xml + hdfs-site.xml + log4j.properties + mapred-queues.xml + mapred-site.xml + + + + + + maven-antrun-plugin + + + + generate + + run + + generate-sources + + + + + + + + + + + + + + + + + + + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + + jspcSource-packageInfo-source + + add-source + + generate-sources + + + ${project.build.directory}/generated-jamon + ${project.build.directory}/generated-sources/java + + + + + + + org.jamon + jamon-maven-plugin + + + + translate + + generate-sources + + src/main/jamon + target/generated-jamon + + + + + + + maven-surefire-plugin + + + target/test-classes/webapps + + + + + net.revelc.code + warbucks-maven-plugin + + + @@ -567,10 +567,10 @@ license-javadocs - prepare-package copy-resources + prepare-package ${project.build.directory}/apidocs @@ -618,15 +618,17 @@ make + + run + compile - run - + - + @@ -646,8 +648,9 @@ hadoop-2.0 - - !hadoop.profile + + + !hadoop.profile @@ -737,10 +740,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources @@ -915,10 +892,10 @@ - - false - true - + + false + true + @@ -947,7 +924,7 @@ - + @@ -956,6 +933,31 @@ + + + org.apache.maven.plugins + maven-eclipse-plugin + + + org.jamon.project.jamonnature + + + org.jamon.project.templateBuilder + org.eclipse.jdt.core.javabuilder + org.jamon.project.markerUpdater + + + + .settings/org.jamon.prefs + # now + eclipse.preferences.version=1 + templateSourceDir=src/main/jamon + templateOutputDir=target/generated-jamon + + + + + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java index 1f351c52da2..52bca682c81 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,14 +22,11 @@ import org.apache.hadoop.hbase.coordination.SplitLogWorkerCoordination; import org.apache.yetus.audience.InterfaceAudience; /** - * Implementations of this interface will keep and return to clients - * implementations of classes providing API to execute - * coordinated operations. This interface is client-side, so it does NOT - * include methods to retrieve the particular interface implementations. - * - * For each coarse-grained area of operations there will be a separate - * interface with implementation, providing API for relevant operations - * requiring coordination. + * Implementations of this interface will keep and return to clients implementations of classes + * providing API to execute coordinated operations. This interface is client-side, so it does NOT + * include methods to retrieve the particular interface implementations. For each coarse-grained + * area of operations there will be a separate interface with implementation, providing API for + * relevant operations requiring coordination. */ @InterfaceAudience.Private public interface CoordinatedStateManager { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/DaemonThreadFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/DaemonThreadFactory.java index fff68c79100..31dad71c1c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/DaemonThreadFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/DaemonThreadFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java index 0f0e9380623..d4aa962f2b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,24 +29,19 @@ import org.apache.hadoop.hbase.util.DNS; import org.apache.yetus.audience.InterfaceAudience; /** - * Data structure to describe the distribution of HDFS blocks among hosts. - * - * Adding erroneous data will be ignored silently. + * Data structure to describe the distribution of HDFS blocks among hosts. Adding erroneous data + * will be ignored silently. */ @InterfaceAudience.Private public class HDFSBlocksDistribution { - private Map hostAndWeights = null; + private Map hostAndWeights = null; private long uniqueBlocksTotalWeight = 0; /** - * Stores the hostname and weight for that hostname. - * - * This is used when determining the physical locations of the blocks making - * up a region. - * - * To make a prioritized list of the hosts holding the most data of a region, - * this class is used to count the total weight for each host. The weight is - * currently just the size of the file. + * Stores the hostname and weight for that hostname. This is used when determining the physical + * locations of the blocks making up a region. To make a prioritized list of the hosts holding the + * most data of a region, this class is used to count the total weight for each host. The weight + * is currently just the size of the file. */ public static class HostAndWeight { @@ -57,8 +51,8 @@ public class HDFSBlocksDistribution { /** * Constructor - * @param host the host name - * @param weight the weight + * @param host the host name + * @param weight the weight * @param weightForSsd the weight for ssd */ public HostAndWeight(String host, long weight, long weightForSsd) { @@ -69,7 +63,7 @@ public class HDFSBlocksDistribution { /** * add weight - * @param weight the weight + * @param weight the weight * @param weightForSsd the weight for ssd */ public void addWeight(long weight, long weightForSsd) { @@ -104,7 +98,7 @@ public class HDFSBlocksDistribution { public static class WeightComparator implements Comparator { @Override public int compare(HostAndWeight l, HostAndWeight r) { - if(l.getWeight() == r.getWeight()) { + if (l.getWeight() == r.getWeight()) { return l.getHost().compareTo(r.getHost()); } return l.getWeight() < r.getWeight() ? -1 : 1; @@ -124,13 +118,12 @@ public class HDFSBlocksDistribution { */ @Override public synchronized String toString() { - return "number of unique hosts in the distribution=" + - this.hostAndWeights.size(); + return "number of unique hosts in the distribution=" + this.hostAndWeights.size(); } /** * add some weight to a list of hosts, update the value of unique block weight - * @param hosts the list of the host + * @param hosts the list of the host * @param weight the weight */ public void addHostsAndBlockWeight(String[] hosts, long weight) { @@ -139,7 +132,7 @@ public class HDFSBlocksDistribution { /** * add some weight to a list of hosts, update the value of unique block weight - * @param hosts the list of the host + * @param hosts the list of the host * @param weight the weight */ public void addHostsAndBlockWeight(String[] hosts, long weight, StorageType[] storageTypes) { @@ -174,8 +167,8 @@ public class HDFSBlocksDistribution { /** * add some weight to a specific host - * @param host the host name - * @param weight the weight + * @param host the host name + * @param weight the weight * @param weightForSsd the weight for ssd */ private void addHostAndBlockWeight(String host, long weight, long weightForSsd) { @@ -185,7 +178,7 @@ public class HDFSBlocksDistribution { } HostAndWeight hostAndWeight = this.hostAndWeights.get(host); - if(hostAndWeight == null) { + if (hostAndWeight == null) { hostAndWeight = new HostAndWeight(host, weight, weightForSsd); this.hostAndWeights.put(host, hostAndWeight); } else { @@ -196,13 +189,12 @@ public class HDFSBlocksDistribution { /** * @return the hosts and their weights */ - public Map getHostAndWeights() { + public Map getHostAndWeights() { return this.hostAndWeights; } /** - * return the weight for a specific host, that will be the total bytes of all - * blocks on the host + * return the weight for a specific host, that will be the total bytes of all blocks on the host * @param host the host name * @return the weight of the given host */ @@ -210,7 +202,7 @@ public class HDFSBlocksDistribution { long weight = 0; if (host != null) { HostAndWeight hostAndWeight = this.hostAndWeights.get(host); - if(hostAndWeight != null) { + if (hostAndWeight != null) { weight = hostAndWeight.getWeight(); } } @@ -303,10 +295,8 @@ public class HDFSBlocksDistribution { * @param otherBlocksDistribution the other hdfs blocks distribution */ public void add(HDFSBlocksDistribution otherBlocksDistribution) { - Map otherHostAndWeights = - otherBlocksDistribution.getHostAndWeights(); - for (Map.Entry otherHostAndWeight: - otherHostAndWeights.entrySet()) { + Map otherHostAndWeights = otherBlocksDistribution.getHostAndWeights(); + for (Map.Entry otherHostAndWeight : otherHostAndWeights.entrySet()) { addHostAndBlockWeight(otherHostAndWeight.getValue().host, otherHostAndWeight.getValue().weight, otherHostAndWeight.getValue().weightForSsd); } @@ -319,7 +309,7 @@ public class HDFSBlocksDistribution { public List getTopHosts() { HostAndWeight[] hostAndWeights = getTopHostsWithWeights(); List topHosts = new ArrayList<>(hostAndWeights.length); - for(HostAndWeight haw : hostAndWeights) { + for (HostAndWeight haw : hostAndWeights) { topHosts.add(haw.getHost()); } return topHosts; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java index 8db0ca272d8..fce64aad2e3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java @@ -49,7 +49,7 @@ public class HealthCheckChore extends ScheduledChore { healthChecker.init(healthCheckScript, scriptTimeout); this.threshold = config.getInt(HConstants.HEALTH_FAILURE_THRESHOLD, HConstants.DEFAULT_HEALTH_FAILURE_THRESHOLD); - this.failureWindow = (long)this.threshold * (long)sleepTime; + this.failureWindow = (long) this.threshold * (long) sleepTime; } @Override @@ -59,13 +59,12 @@ public class HealthCheckChore extends ScheduledChore { if (!isHealthy) { boolean needToStop = decideToStop(); if (needToStop) { - this.getStopper().stop("The node reported unhealthy " + threshold - + " number of times consecutively."); + this.getStopper() + .stop("The node reported unhealthy " + threshold + " number of times consecutively."); } // Always log health report. - LOG.info("Health status at " + - StringUtils.formatTime(EnvironmentEdgeManager.currentTime()) + " : " + - report.getHealthReport()); + LOG.info("Health status at " + StringUtils.formatTime(EnvironmentEdgeManager.currentTime()) + + " : " + report.getHealthReport()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java index e47afd58d68..5e8473d1a3c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java @@ -19,17 +19,15 @@ package org.apache.hadoop.hbase; import java.io.IOException; import java.util.ArrayList; - import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * A utility for executing an external script that checks the health of - * the node. An example script can be found at - * src/main/sh/healthcheck/healthcheck.sh in the - * hbase-examples module. + * A utility for executing an external script that checks the health of the node. An example script + * can be found at src/main/sh/healthcheck/healthcheck.sh in the hbase-examples + * module. */ class HealthChecker { @@ -53,9 +51,8 @@ class HealthChecker { /** * Initialize. - * * @param location the location of the health script - * @param timeout the timeout to be used for the health script + * @param timeout the timeout to be used for the health script */ public void init(String location, long timeout) { this.healthCheckScript = location; @@ -63,9 +60,9 @@ class HealthChecker { ArrayList execScript = new ArrayList<>(); execScript.add(healthCheckScript); this.shexec = new ShellCommandExecutor(execScript.toArray(new String[execScript.size()]), null, - null, scriptTimeout); - LOG.info("HealthChecker initialized with script at " + this.healthCheckScript + - ", timeout=" + timeout); + null, scriptTimeout); + LOG.info("HealthChecker initialized with script at " + this.healthCheckScript + ", timeout=" + + timeout); } public HealthReport checkHealth() { @@ -104,24 +101,24 @@ class HealthChecker { return false; } - private String getHealthReport(HealthCheckerExitStatus status){ + private String getHealthReport(HealthCheckerExitStatus status) { String healthReport = null; switch (status) { - case SUCCESS: - healthReport = "Server is healthy."; - break; - case TIMED_OUT: - healthReport = "Health script timed out"; - break; - case FAILED_WITH_EXCEPTION: - healthReport = exceptionStackTrace; - break; - case FAILED_WITH_EXIT_CODE: - healthReport = "Health script failed with exit code."; - break; - case FAILED: - healthReport = shexec.getOutput(); - break; + case SUCCESS: + healthReport = "Server is healthy."; + break; + case TIMED_OUT: + healthReport = "Health script timed out"; + break; + case FAILED_WITH_EXCEPTION: + healthReport = exceptionStackTrace; + break; + case FAILED_WITH_EXIT_CODE: + healthReport = "Health script failed with exit code."; + break; + case FAILED: + healthReport = shexec.getOutput(); + break; } return healthReport; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java index 83882b0cdcc..44498200991 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,9 +34,7 @@ class HealthReport { } /** - * Gets the status of the region server. - * - * @return HealthCheckerExitStatus + * Gets the status of the region server. n */ HealthCheckerExitStatus getStatus() { return status; @@ -48,9 +46,7 @@ class HealthReport { } /** - * Gets the health report of the region server. - * - * @return String + * Gets the health report of the region server. n */ String getHealthReport() { return healthReport; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java index f8fb4bd9a42..7f86d4de0b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,11 +41,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Pluggable JMX Agent for HBase(to fix the 2 random TCP ports issue - * of the out-of-the-box JMX Agent): - * 1)connector port can share with the registry port if SSL is OFF - * 2)support password authentication - * 3)support subset of SSL (with default configuration) + * Pluggable JMX Agent for HBase(to fix the 2 random TCP ports issue of the out-of-the-box JMX + * Agent): 1)connector port can share with the registry port if SSL is OFF 2)support password + * authentication 3)support subset of SSL (with default configuration) */ @InterfaceAudience.Private public class JMXListener implements MasterCoprocessor, RegionServerCoprocessor { @@ -57,16 +54,15 @@ public class JMXListener implements MasterCoprocessor, RegionServerCoprocessor { public static final int defRegionserverRMIRegistryPort = 10102; /** - * workaround for HBASE-11146 - * master and regionserver are in 1 JVM in standalone mode - * only 1 JMX instance is allowed, otherwise there is port conflict even if - * we only load regionserver coprocessor on master + * workaround for HBASE-11146 master and regionserver are in 1 JVM in standalone mode only 1 JMX + * instance is allowed, otherwise there is port conflict even if we only load regionserver + * coprocessor on master */ private static JMXConnectorServer JMX_CS = null; private Registry rmiRegistry = null; - public static JMXServiceURL buildJMXServiceURL(int rmiRegistryPort, - int rmiConnectorPort) throws IOException { + public static JMXServiceURL buildJMXServiceURL(int rmiRegistryPort, int rmiConnectorPort) + throws IOException { // Build jmxURL StringBuilder url = new StringBuilder(); url.append("service:jmx:rmi://localhost:"); @@ -79,8 +75,7 @@ public class JMXListener implements MasterCoprocessor, RegionServerCoprocessor { } - public void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort) - throws IOException { + public void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort) throws IOException { boolean rmiSSL = false; boolean authenticate = true; String passwordFile = null; @@ -88,19 +83,18 @@ public class JMXListener implements MasterCoprocessor, RegionServerCoprocessor { System.setProperty("java.rmi.server.randomIDs", "true"); - String rmiSSLValue = System.getProperty("com.sun.management.jmxremote.ssl", - "false"); + String rmiSSLValue = System.getProperty("com.sun.management.jmxremote.ssl", "false"); rmiSSL = Boolean.parseBoolean(rmiSSLValue); String authenticateValue = - System.getProperty("com.sun.management.jmxremote.authenticate", "false"); + System.getProperty("com.sun.management.jmxremote.authenticate", "false"); authenticate = Boolean.parseBoolean(authenticateValue); passwordFile = System.getProperty("com.sun.management.jmxremote.password.file"); accessFile = System.getProperty("com.sun.management.jmxremote.access.file"); - LOG.info("rmiSSL:" + rmiSSLValue + ",authenticate:" + authenticateValue - + ",passwordFile:" + passwordFile + ",accessFile:" + accessFile); + LOG.info("rmiSSL:" + rmiSSLValue + ",authenticate:" + authenticateValue + ",passwordFile:" + + passwordFile + ",accessFile:" + accessFile); // Environment map HashMap jmxEnv = new HashMap<>(); @@ -110,8 +104,8 @@ public class JMXListener implements MasterCoprocessor, RegionServerCoprocessor { if (rmiSSL) { if (rmiRegistryPort == rmiConnectorPort) { - throw new IOException("SSL is enabled. " + - "rmiConnectorPort cannot share with the rmiRegistryPort!"); + throw new IOException( + "SSL is enabled. " + "rmiConnectorPort cannot share with the rmiRegistryPort!"); } csf = new SslRMIClientSocketFactorySecure(); ssf = new SslRMIServerSocketFactorySecure(); @@ -140,7 +134,7 @@ public class JMXListener implements MasterCoprocessor, RegionServerCoprocessor { try { // Start the JMXListener with the connection string - synchronized(JMXListener.class) { + synchronized (JMXListener.class) { if (JMX_CS != null) { throw new RuntimeException("Started by another thread?"); } @@ -172,7 +166,6 @@ public class JMXListener implements MasterCoprocessor, RegionServerCoprocessor { } } - @Override public void start(CoprocessorEnvironment env) throws IOException { int rmiRegistryPort = -1; @@ -182,30 +175,27 @@ public class JMXListener implements MasterCoprocessor, RegionServerCoprocessor { if (env instanceof MasterCoprocessorEnvironment) { // running on Master rmiRegistryPort = - conf.getInt("master" + RMI_REGISTRY_PORT_CONF_KEY, defMasterRMIRegistryPort); + conf.getInt("master" + RMI_REGISTRY_PORT_CONF_KEY, defMasterRMIRegistryPort); rmiConnectorPort = conf.getInt("master" + RMI_CONNECTOR_PORT_CONF_KEY, rmiRegistryPort); LOG.info("Master rmiRegistryPort:" + rmiRegistryPort + ",Master rmiConnectorPort:" - + rmiConnectorPort); + + rmiConnectorPort); } else if (env instanceof RegionServerCoprocessorEnvironment) { // running on RegionServer rmiRegistryPort = - conf.getInt("regionserver" + RMI_REGISTRY_PORT_CONF_KEY, - defRegionserverRMIRegistryPort); - rmiConnectorPort = - conf.getInt("regionserver" + RMI_CONNECTOR_PORT_CONF_KEY, rmiRegistryPort); - LOG.info("RegionServer rmiRegistryPort:" + rmiRegistryPort - + ",RegionServer rmiConnectorPort:" + rmiConnectorPort); + conf.getInt("regionserver" + RMI_REGISTRY_PORT_CONF_KEY, defRegionserverRMIRegistryPort); + rmiConnectorPort = conf.getInt("regionserver" + RMI_CONNECTOR_PORT_CONF_KEY, rmiRegistryPort); + LOG.info("RegionServer rmiRegistryPort:" + rmiRegistryPort + ",RegionServer rmiConnectorPort:" + + rmiConnectorPort); } else if (env instanceof RegionCoprocessorEnvironment) { LOG.error("JMXListener should not be loaded in Region Environment!"); return; } - synchronized(JMXListener.class) { + synchronized (JMXListener.class) { if (JMX_CS != null) { LOG.info("JMXListener has been started at Registry port " + rmiRegistryPort); - } - else { + } else { startConnectorServer(rmiRegistryPort, rmiConnectorPort); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index 1c721eef5c4..bc562b0a532 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,44 +22,38 @@ import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Collections; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.util.concurrent.CopyOnWriteArrayList; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.Threads; - -import java.util.concurrent.CopyOnWriteArrayList; -import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.util.JVMClusterUtil; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This class creates a single process HBase cluster. One thread is created for - * a master and one per region server. - * - * Call {@link #startup()} to start the cluster running and {@link #shutdown()} - * to close it all down. {@link #join} the cluster is you want to wait on - * shutdown completion. - * - *

      Runs master on port 16000 by default. Because we can't just kill the - * process -- not till HADOOP-1700 gets fixed and even then.... -- we need to - * be able to find the master with a remote client to run shutdown. To use a - * port other than 16000, set the hbase.master to a value of 'local:PORT': - * that is 'local', not 'localhost', and the port number the master should use - * instead of 16000. - * + * This class creates a single process HBase cluster. One thread is created for a master and one per + * region server. Call {@link #startup()} to start the cluster running and {@link #shutdown()} to + * close it all down. {@link #join} the cluster is you want to wait on shutdown completion. + *

      + * Runs master on port 16000 by default. Because we can't just kill the process -- not till + * HADOOP-1700 gets fixed and even then.... -- we need to be able to find the master with a remote + * client to run shutdown. To use a port other than 16000, set the hbase.master to a value of + * 'local:PORT': that is 'local', not 'localhost', and the port number the master should use instead + * of 16000. */ @InterfaceAudience.Public public class LocalHBaseCluster { private static final Logger LOG = LoggerFactory.getLogger(LocalHBaseCluster.class); private final List masterThreads = new CopyOnWriteArrayList<>(); - private final List regionThreads = new CopyOnWriteArrayList<>(); + private final List regionThreads = + new CopyOnWriteArrayList<>(); private final static int DEFAULT_NO = 1; /** local mode */ public static final String LOCAL = "local"; @@ -73,108 +66,103 @@ public class LocalHBaseCluster { private final Class regionServerClass; /** - * Constructor. - * @param conf - * @throws IOException + * Constructor. nn */ - public LocalHBaseCluster(final Configuration conf) - throws IOException { + public LocalHBaseCluster(final Configuration conf) throws IOException { this(conf, DEFAULT_NO); } /** * Constructor. - * @param conf Configuration to use. Post construction has the master's - * address. - * @param noRegionServers Count of regionservers to start. - * @throws IOException + * @param conf Configuration to use. Post construction has the master's address. + * @param noRegionServers Count of regionservers to start. n */ - public LocalHBaseCluster(final Configuration conf, final int noRegionServers) - throws IOException { + public LocalHBaseCluster(final Configuration conf, final int noRegionServers) throws IOException { this(conf, 1, 0, noRegionServers, getMasterImplementation(conf), - getRegionServerImplementation(conf)); + getRegionServerImplementation(conf)); } /** * Constructor. - * @param conf Configuration to use. Post construction has the active master - * address. - * @param noMasters Count of masters to start. - * @param noRegionServers Count of regionservers to start. - * @throws IOException + * @param conf Configuration to use. Post construction has the active master address. + * @param noMasters Count of masters to start. + * @param noRegionServers Count of regionservers to start. n */ - public LocalHBaseCluster(final Configuration conf, final int noMasters, - final int noRegionServers) - throws IOException { + public LocalHBaseCluster(final Configuration conf, final int noMasters, final int noRegionServers) + throws IOException { this(conf, noMasters, 0, noRegionServers, getMasterImplementation(conf), - getRegionServerImplementation(conf)); + getRegionServerImplementation(conf)); } @SuppressWarnings("unchecked") - private static Class getRegionServerImplementation(final Configuration conf) { - return (Class)conf.getClass(HConstants.REGION_SERVER_IMPL, - HRegionServer.class); + private static Class + getRegionServerImplementation(final Configuration conf) { + return (Class) conf.getClass(HConstants.REGION_SERVER_IMPL, + HRegionServer.class); } @SuppressWarnings("unchecked") private static Class getMasterImplementation(final Configuration conf) { - return (Class)conf.getClass(HConstants.MASTER_IMPL, - HMaster.class); + return (Class) conf.getClass(HConstants.MASTER_IMPL, HMaster.class); } public LocalHBaseCluster(final Configuration conf, final int noMasters, final int noRegionServers, - final Class masterClass, - final Class regionServerClass) throws IOException { + final Class masterClass, + final Class regionServerClass) throws IOException { this(conf, noMasters, 0, noRegionServers, masterClass, regionServerClass); } /** * Constructor. - * @param conf Configuration to use. Post construction has the master's - * address. - * @param noMasters Count of masters to start. - * @param noRegionServers Count of regionservers to start. - * @param masterClass - * @param regionServerClass - * @throws IOException + * @param conf Configuration to use. Post construction has the master's address. + * @param noMasters Count of masters to start. + * @param noRegionServers Count of regionservers to start. nnn */ @SuppressWarnings("unchecked") public LocalHBaseCluster(final Configuration conf, final int noMasters, - final int noAlwaysStandByMasters, final int noRegionServers, - final Class masterClass, - final Class regionServerClass) throws IOException { + final int noAlwaysStandByMasters, final int noRegionServers, + final Class masterClass, + final Class regionServerClass) throws IOException { this.conf = conf; // When active, if a port selection is default then we switch to random if (conf.getBoolean(ASSIGN_RANDOM_PORTS, false)) { - if (conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT) - == HConstants.DEFAULT_MASTER_PORT) { + if ( + conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT) + == HConstants.DEFAULT_MASTER_PORT + ) { LOG.debug("Setting Master Port to random."); conf.set(HConstants.MASTER_PORT, "0"); } - if (conf.getInt(HConstants.REGIONSERVER_PORT, HConstants.DEFAULT_REGIONSERVER_PORT) - == HConstants.DEFAULT_REGIONSERVER_PORT) { + if ( + conf.getInt(HConstants.REGIONSERVER_PORT, HConstants.DEFAULT_REGIONSERVER_PORT) + == HConstants.DEFAULT_REGIONSERVER_PORT + ) { LOG.debug("Setting RegionServer Port to random."); conf.set(HConstants.REGIONSERVER_PORT, "0"); } // treat info ports special; expressly don't change '-1' (keep off) // in case we make that the default behavior. - if (conf.getInt(HConstants.REGIONSERVER_INFO_PORT, 0) != -1 && - conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT) - == HConstants.DEFAULT_REGIONSERVER_INFOPORT) { + if ( + conf.getInt(HConstants.REGIONSERVER_INFO_PORT, 0) != -1 + && conf.getInt(HConstants.REGIONSERVER_INFO_PORT, + HConstants.DEFAULT_REGIONSERVER_INFOPORT) == HConstants.DEFAULT_REGIONSERVER_INFOPORT + ) { LOG.debug("Setting RS InfoServer Port to random."); conf.set(HConstants.REGIONSERVER_INFO_PORT, "0"); } - if (conf.getInt(HConstants.MASTER_INFO_PORT, 0) != -1 && - conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT) - == HConstants.DEFAULT_MASTER_INFOPORT) { + if ( + conf.getInt(HConstants.MASTER_INFO_PORT, 0) != -1 + && conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT) + == HConstants.DEFAULT_MASTER_INFOPORT + ) { LOG.debug("Setting Master InfoServer Port to random."); conf.set(HConstants.MASTER_INFO_PORT, "0"); } } - this.masterClass = (Class) - conf.getClass(HConstants.MASTER_IMPL, masterClass); + this.masterClass = + (Class) conf.getClass(HConstants.MASTER_IMPL, masterClass); // Start the HMasters. int i; for (i = 0; i < noMasters; i++) { @@ -186,45 +174,40 @@ public class LocalHBaseCluster { addMaster(c, i + j); } // Start the HRegionServers. - this.regionServerClass = - (Class)conf.getClass(HConstants.REGION_SERVER_IMPL, - regionServerClass); + this.regionServerClass = (Class) conf + .getClass(HConstants.REGION_SERVER_IMPL, regionServerClass); for (int j = 0; j < noRegionServers; j++) { addRegionServer(new Configuration(conf), j); } } - public JVMClusterUtil.RegionServerThread addRegionServer() - throws IOException { + public JVMClusterUtil.RegionServerThread addRegionServer() throws IOException { return addRegionServer(new Configuration(conf), this.regionThreads.size()); } @SuppressWarnings("unchecked") - public JVMClusterUtil.RegionServerThread addRegionServer( - Configuration config, final int index) - throws IOException { + public JVMClusterUtil.RegionServerThread addRegionServer(Configuration config, final int index) + throws IOException { // Create each regionserver with its own Configuration instance so each has // its Connection instance rather than share (see HBASE_INSTANCES down in // the guts of ConnectionManager). JVMClusterUtil.RegionServerThread rst = - JVMClusterUtil.createRegionServerThread(config, (Class) conf - .getClass(HConstants.REGION_SERVER_IMPL, this.regionServerClass), index); + JVMClusterUtil.createRegionServerThread(config, (Class) conf + .getClass(HConstants.REGION_SERVER_IMPL, this.regionServerClass), index); this.regionThreads.add(rst); return rst; } - public JVMClusterUtil.RegionServerThread addRegionServer( - final Configuration config, final int index, User user) - throws IOException, InterruptedException { - return user.runAs( - new PrivilegedExceptionAction() { - @Override - public JVMClusterUtil.RegionServerThread run() throws Exception { - return addRegionServer(config, index); - } - }); + public JVMClusterUtil.RegionServerThread addRegionServer(final Configuration config, + final int index, User user) throws IOException, InterruptedException { + return user.runAs(new PrivilegedExceptionAction() { + @Override + public JVMClusterUtil.RegionServerThread run() throws Exception { + return addRegionServer(config, index); + } + }); } public JVMClusterUtil.MasterThread addMaster() throws IOException { @@ -232,36 +215,33 @@ public class LocalHBaseCluster { } public JVMClusterUtil.MasterThread addMaster(Configuration c, final int index) - throws IOException { + throws IOException { // Create each master with its own Configuration instance so each has // its Connection instance rather than share (see HBASE_INSTANCES down in // the guts of ConnectionManager. JVMClusterUtil.MasterThread mt = JVMClusterUtil.createMasterThread(c, - (Class) c.getClass(HConstants.MASTER_IMPL, this.masterClass), index); + (Class) c.getClass(HConstants.MASTER_IMPL, this.masterClass), index); this.masterThreads.add(mt); // Refresh the master address config. List masterHostPorts = new ArrayList<>(); - getMasters().forEach(masterThread -> - masterHostPorts.add(masterThread.getMaster().getServerName().getAddress().toString())); + getMasters().forEach(masterThread -> masterHostPorts + .add(masterThread.getMaster().getServerName().getAddress().toString())); conf.set(HConstants.MASTER_ADDRS_KEY, String.join(",", masterHostPorts)); return mt; } - public JVMClusterUtil.MasterThread addMaster( - final Configuration c, final int index, User user) - throws IOException, InterruptedException { - return user.runAs( - new PrivilegedExceptionAction() { - @Override - public JVMClusterUtil.MasterThread run() throws Exception { - return addMaster(c, index); - } - }); + public JVMClusterUtil.MasterThread addMaster(final Configuration c, final int index, User user) + throws IOException, InterruptedException { + return user.runAs(new PrivilegedExceptionAction() { + @Override + public JVMClusterUtil.MasterThread run() throws Exception { + return addMaster(c, index); + } + }); } /** - * @param serverNumber - * @return region server + * n * @return region server */ public HRegionServer getRegionServer(int serverNumber) { return regionThreads.get(serverNumber).getRegionServer(); @@ -275,14 +255,13 @@ public class LocalHBaseCluster { } /** - * @return List of running servers (Some servers may have been killed or - * aborted during lifetime of cluster; these servers are not included in this - * list). + * @return List of running servers (Some servers may have been killed or aborted during lifetime + * of cluster; these servers are not included in this list). */ public List getLiveRegionServers() { List liveServers = new ArrayList<>(); List list = getRegionServers(); - for (JVMClusterUtil.RegionServerThread rst: list) { + for (JVMClusterUtil.RegionServerThread rst : list) { if (rst.isAlive()) liveServers.add(rst); else LOG.info("Not alive " + rst.getName()); } @@ -335,15 +314,14 @@ public class LocalHBaseCluster { } /** - * Gets the current active master, if available. If no active master, returns - * null. + * Gets the current active master, if available. If no active master, returns null. * @return the HMaster for the active master */ public HMaster getActiveMaster() { for (JVMClusterUtil.MasterThread mt : masterThreads) { // Ensure that the current active master is not stopped. // We don't want to return a stopping master as an active master. - if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) { + if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) { return mt.getMaster(); } } @@ -358,14 +336,13 @@ public class LocalHBaseCluster { } /** - * @return List of running master servers (Some servers may have been killed - * or aborted during lifetime of cluster; these servers are not included in - * this list). + * @return List of running master servers (Some servers may have been killed or aborted during + * lifetime of cluster; these servers are not included in this list). */ public List getLiveMasters() { List liveServers = new ArrayList<>(); List list = getMasters(); - for (JVMClusterUtil.MasterThread mt: list) { + for (JVMClusterUtil.MasterThread mt : list) { if (mt.isAlive()) { liveServers.add(mt); } @@ -394,7 +371,7 @@ public class LocalHBaseCluster { masterThread.join(); } catch (InterruptedException e) { LOG.error("Interrupted while waiting for {} to finish. Retrying join", - masterThread.getName(), e); + masterThread.getName(), e); interrupted = true; } } @@ -406,12 +383,11 @@ public class LocalHBaseCluster { } /** - * Wait for Mini HBase Cluster to shut down. - * Presumes you've already called {@link #shutdown()}. + * Wait for Mini HBase Cluster to shut down. Presumes you've already called {@link #shutdown()}. */ public void join() { if (this.regionThreads != null) { - for(Thread t: this.regionThreads) { + for (Thread t : this.regionThreads) { if (t.isAlive()) { try { Threads.threadDumpingIsAlive(t); @@ -453,14 +429,13 @@ public class LocalHBaseCluster { * @return True if a 'local' address in hbase.master value. */ public static boolean isLocal(final Configuration c) { - boolean mode = c.getBoolean(HConstants.CLUSTER_DISTRIBUTED, HConstants.DEFAULT_CLUSTER_DISTRIBUTED); - return(mode == HConstants.CLUSTER_IS_LOCAL); + boolean mode = + c.getBoolean(HConstants.CLUSTER_DISTRIBUTED, HConstants.DEFAULT_CLUSTER_DISTRIBUTED); + return (mode == HConstants.CLUSTER_IS_LOCAL); } /** - * Test things basically work. - * @param args - * @throws IOException + * Test things basically work. nn */ public static void main(String[] args) throws IOException { Configuration conf = HBaseConfiguration.create(); @@ -469,8 +444,7 @@ public class LocalHBaseCluster { Connection connection = ConnectionFactory.createConnection(conf); Admin admin = connection.getAdmin(); try { - HTableDescriptor htd = - new HTableDescriptor(TableName.valueOf(cluster.getClass().getName())); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(cluster.getClass().getName())); admin.createTable(htd); } finally { admin.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaRegionLocationCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaRegionLocationCache.java index b294f7be4a0..22edcbfdfa9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaRegionLocationCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaRegionLocationCache.java @@ -41,9 +41,9 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFacto import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** - * A cache of meta region location metadata. Registers a listener on ZK to track changes to the - * meta table znodes. Clients are expected to retry if the meta information is stale. This class - * is thread-safe (a single instance of this class can be shared by multiple threads without race + * A cache of meta region location metadata. Registers a listener on ZK to track changes to the meta + * table znodes. Clients are expected to retry if the meta information is stale. This class is + * thread-safe (a single instance of this class can be shared by multiple threads without race * conditions). */ @InterfaceAudience.Private @@ -61,14 +61,14 @@ public class MetaRegionLocationCache extends ZKListener { private static final int SLEEP_INTERVAL_MS_BETWEEN_RETRIES = 1000; private static final int SLEEP_INTERVAL_MS_MAX = 10000; private final RetryCounterFactory retryCounterFactory = - new RetryCounterFactory(MAX_ZK_META_FETCH_RETRIES, SLEEP_INTERVAL_MS_BETWEEN_RETRIES); + new RetryCounterFactory(MAX_ZK_META_FETCH_RETRIES, SLEEP_INTERVAL_MS_BETWEEN_RETRIES); /** - * Cached meta region locations indexed by replica ID. - * CopyOnWriteArrayMap ensures synchronization during updates and a consistent snapshot during - * client requests. Even though CopyOnWriteArrayMap copies the data structure for every write, - * that should be OK since the size of the list is often small and mutations are not too often - * and we do not need to block client requests while mutations are in progress. + * Cached meta region locations indexed by replica ID. CopyOnWriteArrayMap ensures synchronization + * during updates and a consistent snapshot during client requests. Even though + * CopyOnWriteArrayMap copies the data structure for every write, that should be OK since the size + * of the list is often small and mutations are not too often and we do not need to block client + * requests while mutations are in progress. */ private final CopyOnWriteArrayMap cachedMetaLocations; @@ -132,25 +132,24 @@ public class MetaRegionLocationCache extends ZKListener { // No new meta znodes got added. return; } - for (String znode: znodes) { + for (String znode : znodes) { String path = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, znode); updateMetaLocation(path, opType); } } /** - * Gets the HRegionLocation for a given meta replica ID. Renews the watch on the znode for - * future updates. + * Gets the HRegionLocation for a given meta replica ID. Renews the watch on the znode for future + * updates. * @param replicaId ReplicaID of the region. * @return HRegionLocation for the meta replica. * @throws KeeperException if there is any issue fetching/parsing the serialized data. */ - private HRegionLocation getMetaRegionLocation(int replicaId) - throws KeeperException { + private HRegionLocation getMetaRegionLocation(int replicaId) throws KeeperException { RegionState metaRegionState; try { - byte[] data = ZKUtil.getDataAndWatch(watcher, - watcher.getZNodePaths().getZNodeForReplica(replicaId)); + byte[] data = + ZKUtil.getDataAndWatch(watcher, watcher.getZNodePaths().getZNodeForReplica(replicaId)); metaRegionState = ProtobufUtil.parseMetaRegionStateFrom(data, replicaId); } catch (DeserializationException e) { throw ZKUtil.convert(e); @@ -201,11 +200,10 @@ public class MetaRegionLocationCache extends ZKListener { /** * @return Optional list of HRegionLocations for meta replica(s), null if the cache is empty. - * */ public Optional> getMetaRegionLocations() { ConcurrentNavigableMap snapshot = - cachedMetaLocations.tailMap(cachedMetaLocations.firstKey()); + cachedMetaLocations.tailMap(cachedMetaLocations.firstKey()); if (snapshot.isEmpty()) { // This could be possible if the master has not successfully initialized yet or meta region // is stuck in some weird state. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java index e57471a778f..47f6938652d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -27,27 +26,23 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Private public interface RegionStateListener { -// TODO: Get rid of this!!!! Ain't there a better way to watch region -// state than introduce a whole new listening mechanism? St.Ack + // TODO: Get rid of this!!!! Ain't there a better way to watch region + // state than introduce a whole new listening mechanism? St.Ack /** * Process region split event. - * - * @param hri An instance of RegionInfo - * @throws IOException + * @param hri An instance of RegionInfo n */ void onRegionSplit(RegionInfo hri) throws IOException; /** * Process region split reverted event. - * * @param hri An instance of RegionInfo * @throws IOException Signals that an I/O exception has occurred. */ void onRegionSplitReverted(RegionInfo hri) throws IOException; /** - * Process region merge event. - * @throws IOException + * Process region merge event. n */ void onRegionMerged(RegionInfo mergedRegion) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java index 8963b19ee6e..0476a2acb7a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,9 +26,9 @@ import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; /** - * Defines a curated set of shared functions implemented by HBase servers (Masters - * and RegionServers). For use internally only. Be judicious adding API. Changes cause ripples - * through the code base. + * Defines a curated set of shared functions implemented by HBase servers (Masters and + * RegionServers). For use internally only. Be judicious adding API. Changes cause ripples through + * the code base. */ @InterfaceAudience.Private public interface Server extends Abortable, Stoppable { @@ -43,10 +43,9 @@ public interface Server extends Abortable, Stoppable { ZKWatcher getZooKeeper(); /** - * Returns a reference to the servers' connection. - * - * Important note: this method returns a reference to Connection which is managed - * by Server itself, so callers must NOT attempt to close connection obtained. + * Returns a reference to the servers' connection. Important note: this method returns a reference + * to Connection which is managed by Server itself, so callers must NOT attempt to close + * connection obtained. */ Connection getConnection(); @@ -54,9 +53,8 @@ public interface Server extends Abortable, Stoppable { /** * Returns a reference to the servers' cluster connection. Prefer {@link #getConnection()}. - * - * Important note: this method returns a reference to Connection which is managed - * by Server itself, so callers must NOT attempt to close connection obtained. + * Important note: this method returns a reference to Connection which is managed by Server + * itself, so callers must NOT attempt to close connection obtained. */ ClusterConnection getClusterConnection(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java index 443c8d2e32c..176b860eec0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hbase; /** @@ -19,21 +36,20 @@ package org.apache.hadoop.hbase; */ import java.lang.reflect.Field; import java.util.concurrent.atomic.LongAdder; - import org.apache.yetus.audience.InterfaceAudience; /** - * Counters kept by the distributed WAL split log process. - * Used by master and regionserver packages. + * Counters kept by the distributed WAL split log process. Used by master and regionserver packages. * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter, see SplitWALManager + * distributed WAL splitter, see SplitWALManager */ @Deprecated @InterfaceAudience.Private public class SplitLogCounters { - private SplitLogCounters() {} + private SplitLogCounters() { + } - //Spnager counters + // Spnager counters public final static LongAdder tot_mgr_log_split_batch_start = new LongAdder(); public final static LongAdder tot_mgr_log_split_batch_success = new LongAdder(); public final static LongAdder tot_mgr_log_split_batch_err = new LongAdder(); @@ -95,7 +111,7 @@ public class SplitLogCounters { for (Field fld : cl.getDeclaredFields()) { /* Guard against source instrumentation. */ if ((!fld.isSynthetic()) && (LongAdder.class.isAssignableFrom(fld.getType()))) { - ((LongAdder)fld.get(null)).reset(); + ((LongAdder) fld.get(null)).reset(); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java index ca07fcb1ee3..280ad3b7c47 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +18,22 @@ package org.apache.hadoop.hbase; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; -import org.apache.hadoop.hbase.util.Bytes; /** - * State of a WAL log split during distributed splitting. State is kept up in zookeeper. - * Encapsulates protobuf serialization/deserialization so we don't leak generated pb outside of - * this class. Used by regionserver and master packages. - *

      Immutable + * State of a WAL log split during distributed splitting. State is kept up in zookeeper. + * Encapsulates protobuf serialization/deserialization so we don't leak generated pb outside of this + * class. Used by regionserver and master packages. + *

      + * Immutable * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter, see SplitWALManager + * distributed WAL splitter, see SplitWALManager */ @Deprecated @InterfaceAudience.Private @@ -132,7 +133,7 @@ public class SplitLogTask { @Override public boolean equals(Object obj) { if (!(obj instanceof SplitLogTask)) return false; - SplitLogTask other = (SplitLogTask)obj; + SplitLogTask other = (SplitLogTask) obj; return other.state.equals(this.state) && other.originServer.equals(this.originServer); } @@ -145,11 +146,10 @@ public class SplitLogTask { /** * @param data Serialized date to parse. - * @return An SplitLogTaskState instance made of the passed data - * @throws DeserializationException - * @see #toByteArray() + * @return An SplitLogTaskState instance made of the passed data n * @see + * #toByteArray() */ - public static SplitLogTask parseFrom(final byte [] data) throws DeserializationException { + public static SplitLogTask parseFrom(final byte[] data) throws DeserializationException { ProtobufUtil.expectPBMagicPrefix(data); try { int prefixLen = ProtobufUtil.lengthOfPBMagic(); @@ -165,9 +165,9 @@ public class SplitLogTask { * @return This instance serialized into a byte array * @see #parseFrom(byte[]) */ - public byte [] toByteArray() { - // First create a pb ServerName. Then create a ByteString w/ the TaskState - // bytes in it. Finally create a SplitLogTaskState passing in the two + public byte[] toByteArray() { + // First create a pb ServerName. Then create a ByteString w/ the TaskState + // bytes in it. Finally create a SplitLogTaskState passing in the two // pbs just created. HBaseProtos.ServerName snpb = ProtobufUtil.toServerName(this.originServer); ZooKeeperProtos.SplitLogTask slts = diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java index 5dffb73d3ed..5b089d1f292 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java @@ -1,12 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase; @@ -32,8 +39,7 @@ public class SslRMIClientSocketFactorySecure extends SslRMIClientSocketFactory { secureProtocols.add(p); } } - socket.setEnabledProtocols(secureProtocols.toArray( - new String[secureProtocols.size()])); + socket.setEnabledProtocols(secureProtocols.toArray(new String[secureProtocols.size()])); return socket; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java index 8a9223675a7..9e4a22cb84b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java @@ -1,12 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase; @@ -38,9 +45,8 @@ public class SslRMIServerSocketFactorySecure extends SslRMIServerSocketFactory { public Socket accept() throws IOException { Socket socket = super.accept(); SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault(); - SSLSocket sslSocket = - (SSLSocket) sslSocketFactory.createSocket(socket, - socket.getInetAddress().getHostName(), socket.getPort(), true); + SSLSocket sslSocket = (SSLSocket) sslSocketFactory.createSocket(socket, + socket.getInetAddress().getHostName(), socket.getPort(), true); sslSocket.setUseClientMode(false); sslSocket.setNeedClientAuth(false); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java index f7e07045f4c..1dc17eff0d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,9 +19,8 @@ package org.apache.hadoop.hbase; import java.io.IOException; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.yetus.audience.InterfaceAudience; /** * Get, remove and modify table descriptors. @@ -66,10 +65,10 @@ public interface TableDescriptors { /** * Add or update descriptor - * @param htd Descriptor to set into TableDescriptors + * @param htd Descriptor to set into TableDescriptors * @param cacheOnly only add the given {@code htd} to cache, without updating the storage. For - * example, when creating table, we will write the descriptor to fs when creating the fs - * layout, so we do not need to update the fs again. + * example, when creating table, we will write the descriptor to fs when creating + * the fs layout, so we do not need to update the fs again. */ void update(TableDescriptor htd, boolean cacheOnly) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java index 6ba719a4acb..5dec53e27a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java index 101c40ebd86..1bae99227c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; @@ -23,7 +22,6 @@ import java.util.List; import java.util.NavigableMap; import java.util.NavigableSet; import java.util.concurrent.ConcurrentSkipListMap; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.zookeeper.ZKUtil; @@ -35,23 +33,19 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** - * Class servers two purposes: - * - * 1. Broadcast NamespaceDescriptor information via ZK - * (Done by the Master) - * 2. Consume broadcasted NamespaceDescriptor changes - * (Done by the RegionServers) - * + * Class servers two purposes: 1. Broadcast NamespaceDescriptor information via ZK (Done by the + * Master) 2. Consume broadcasted NamespaceDescriptor changes (Done by the RegionServers) */ @InterfaceAudience.Private public class ZKNamespaceManager extends ZKListener { private static final Logger LOG = LoggerFactory.getLogger(ZKNamespaceManager.class); private final String nsZNode; - private final NavigableMap cache; + private final NavigableMap cache; public ZKNamespaceManager(ZKWatcher zkw) throws IOException { super(zkw); @@ -64,7 +58,7 @@ public class ZKNamespaceManager extends ZKListener { try { if (ZKUtil.watchAndCheckExists(watcher, nsZNode)) { List existing = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode); + ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode); if (existing != null) { refreshNodes(existing); } @@ -96,8 +90,8 @@ public class ZKNamespaceManager extends ZKListener { public NavigableSet list() throws IOException { NavigableSet ret = - Sets.newTreeSet(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR); - for(NamespaceDescriptor ns: cache.values()) { + Sets.newTreeSet(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR); + for (NamespaceDescriptor ns : cache.values()) { ret.add(ns); } return ret; @@ -108,7 +102,7 @@ public class ZKNamespaceManager extends ZKListener { if (nsZNode.equals(path)) { try { List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode); + ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode); refreshNodes(nodes); } catch (KeeperException ke) { String msg = "Error reading data from zookeeper"; @@ -136,16 +130,15 @@ public class ZKNamespaceManager extends ZKListener { try { byte[] data = ZKUtil.getDataAndWatch(watcher, path); NamespaceDescriptor ns = - ProtobufUtil.toNamespaceDescriptor( - HBaseProtos.NamespaceDescriptor.parseFrom(data)); + ProtobufUtil.toNamespaceDescriptor(HBaseProtos.NamespaceDescriptor.parseFrom(data)); cache.put(ns.getName(), ns); } catch (KeeperException ke) { - String msg = "Error reading data from zookeeper for node "+path; + String msg = "Error reading data from zookeeper for node " + path; LOG.error(msg, ke); // only option is to abort watcher.abort(msg, ke); } catch (IOException ioe) { - String msg = "Error deserializing namespace: "+path; + String msg = "Error deserializing namespace: " + path; LOG.error(msg, ioe); watcher.abort(msg, ioe); } @@ -157,13 +150,13 @@ public class ZKNamespaceManager extends ZKListener { if (nsZNode.equals(path)) { try { List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode); + ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode); refreshNodes(nodes); } catch (KeeperException ke) { - LOG.error("Error reading data from zookeeper for path "+path, ke); - watcher.abort("ZooKeeper error get node children for path "+path, ke); + LOG.error("Error reading data from zookeeper for path " + path, ke); + watcher.abort("ZooKeeper error get node children for path " + path, ke); } catch (IOException e) { - LOG.error("Error deserializing namespace child from: "+path, e); + LOG.error("Error deserializing namespace child from: " + path, e); watcher.abort("Error deserializing namespace child from: " + path, e); } } @@ -189,10 +182,10 @@ public class ZKNamespaceManager extends ZKListener { try { ZKUtil.createWithParents(watcher, zNode); ZKUtil.updateExistingNodeData(watcher, zNode, - ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray(), -1); + ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray(), -1); } catch (KeeperException e) { - LOG.error("Failed updating permissions for namespace "+ns.getName(), e); - throw new IOException("Failed updating permissions for namespace "+ns.getName(), e); + LOG.error("Failed updating permissions for namespace " + ns.getName(), e); + throw new IOException("Failed updating permissions for namespace " + ns.getName(), e); } } @@ -203,12 +196,11 @@ public class ZKNamespaceManager extends ZKListener { String namespace = ZKUtil.getNodeName(path); byte[] nodeData = n.getData(); if (LOG.isTraceEnabled()) { - LOG.trace("Updating namespace cache from node " + namespace + " with data: " + - Bytes.toStringBinary(nodeData)); + LOG.trace("Updating namespace cache from node " + namespace + " with data: " + + Bytes.toStringBinary(nodeData)); } NamespaceDescriptor ns = - ProtobufUtil.toNamespaceDescriptor( - HBaseProtos.NamespaceDescriptor.parseFrom(nodeData)); + ProtobufUtil.toNamespaceDescriptor(HBaseProtos.NamespaceDescriptor.parseFrom(nodeData)); cache.put(ns.getName(), ns); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java index 1cde2fa2484..8b3a45fc84c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.BufferedReader; @@ -37,22 +36,29 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - *

      Contains a set of methods for the collaboration between the start/stop scripts and the - * servers. It allows to delete immediately the znode when the master or the regions server crashes. - * The region server / master writes a specific file when it starts / becomes main master. When they - * end properly, they delete the file.

      - *

      In the script, we check for the existence of these files when the program ends. If they still + *

      + * Contains a set of methods for the collaboration between the start/stop scripts and the servers. + * It allows to delete immediately the znode when the master or the regions server crashes. The + * region server / master writes a specific file when it starts / becomes main master. When they end + * properly, they delete the file. + *

      + *

      + * In the script, we check for the existence of these files when the program ends. If they still * exist we conclude that the server crashed, likely without deleting their znode. To have a faster - * recovery we delete immediately the znode.

      - *

      The strategy depends on the server type. For a region server we store the znode path in the - * file, and use it to delete it. for a master, as the znode path constant whatever the server, we - * check its content to make sure that the backup server is not now in charge.

      + * recovery we delete immediately the znode. + *

      + *

      + * The strategy depends on the server type. For a region server we store the znode path in the file, + * and use it to delete it. for a master, as the znode path constant whatever the server, we check + * its content to make sure that the backup server is not now in charge. + *

      */ @InterfaceAudience.Private public final class ZNodeClearer { private static final Logger LOG = LoggerFactory.getLogger(ZNodeClearer.class); - private ZNodeClearer() {} + private ZNodeClearer() { + } /** * Logs the errors without failing on exception. @@ -60,8 +66,8 @@ public final class ZNodeClearer { public static void writeMyEphemeralNodeOnDisk(String fileContent) { String fileName = ZNodeClearer.getMyEphemeralNodeFileName(); if (fileName == null) { - LOG.warn("Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared " + - "on crash by start scripts (Longer MTTR!)"); + LOG.warn("Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared " + + "on crash by start scripts (Longer MTTR!)"); return; } @@ -69,7 +75,7 @@ public final class ZNodeClearer { try { fstream = new FileWriter(fileName); } catch (IOException e) { - LOG.warn("Can't write znode file "+fileName, e); + LOG.warn("Can't write znode file " + fileName, e); return; } @@ -86,7 +92,7 @@ public final class ZNodeClearer { } } } catch (IOException e) { - LOG.warn("Can't write znode file "+fileName, e); + LOG.warn("Can't write znode file " + fileName, e); } } @@ -95,7 +101,7 @@ public final class ZNodeClearer { */ public static String readMyEphemeralNodeOnDisk() throws IOException { String fileName = getMyEphemeralNodeFileName(); - if (fileName == null){ + if (fileName == null) { throw new FileNotFoundException("No filename; set environment variable HBASE_ZNODE_FILE"); } FileReader znodeFile = new FileReader(fileName); @@ -117,7 +123,7 @@ public final class ZNodeClearer { } /** - * delete the znode file + * delete the znode file */ public static void deleteMyEphemeralNodeOnDisk() { String fileName = getMyEphemeralNodeFileName(); @@ -128,8 +134,8 @@ public final class ZNodeClearer { } /** - * See HBASE-14861. We are extracting master ServerName from rsZnodePath - * example: "/hbase/rs/server.example.com,16020,1448266496481" + * See HBASE-14861. We are extracting master ServerName from rsZnodePath example: + * "/hbase/rs/server.example.com,16020,1448266496481" * @param rsZnodePath from HBASE_ZNODE_FILE * @return String representation of ServerName or null if fails */ @@ -138,7 +144,7 @@ public final class ZNodeClearer { String masterServerName = null; try { String[] rsZnodeParts = rsZnodePath.split("/"); - masterServerName = rsZnodeParts[rsZnodeParts.length -1]; + masterServerName = rsZnodeParts[rsZnodeParts.length - 1]; } catch (IndexOutOfBoundsException e) { LOG.warn("String " + rsZnodePath + " has wrong format", e); } @@ -161,9 +167,9 @@ public final class ZNodeClearer { } /** - * Delete the master znode if its content (ServerName string) is the same - * as the one in the znode file. (env: HBASE_ZNODE_FILE). I case of master-rs - * colloaction we extract ServerName string from rsZnode path.(HBASE-14861) + * Delete the master znode if its content (ServerName string) is the same as the one in the znode + * file. (env: HBASE_ZNODE_FILE). I case of master-rs colloaction we extract ServerName string + * from rsZnode path.(HBASE-14861) * @return true on successful deletion, false otherwise. */ public static boolean clear(Configuration conf) { @@ -172,11 +178,16 @@ public final class ZNodeClearer { ZKWatcher zkw; try { - zkw = new ZKWatcher(tempConf, "clean znode for master", - new Abortable() { - @Override public void abort(String why, Throwable e) {} - @Override public boolean isAborted() { return false; } - }); + zkw = new ZKWatcher(tempConf, "clean znode for master", new Abortable() { + @Override + public void abort(String why, Throwable e) { + } + + @Override + public boolean isAborted() { + return false; + } + }); } catch (IOException e) { LOG.warn("Can't connect to zookeeper to read the master znode", e); return false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/FailedArchiveException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/FailedArchiveException.java index fa081948f3f..51aeabb7b45 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/FailedArchiveException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/FailedArchiveException.java @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; -import org.apache.hadoop.fs.Path; -import org.apache.yetus.audience.InterfaceAudience; - import java.io.IOException; import java.util.Collection; +import org.apache.hadoop.fs.Path; +import org.apache.yetus.audience.InterfaceAudience; /** * Exception indicating that some files in the requested set could not be archived. @@ -42,9 +40,6 @@ public class FailedArchiveException extends IOException { @Override public String getMessage() { - return new StringBuilder(super.getMessage()) - .append("; files=") - .append(failedFiles) - .toString(); + return new StringBuilder(super.getMessage()).append("; files=").append(failedFiles).toString(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java index 6400976bf43..8e666308d4f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,13 +67,12 @@ public class HFileArchiver { /** Number of retries in case of fs operation failure */ private static final int DEFAULT_RETRIES_NUMBER = 3; - private static final Function FUNC_FILE_TO_PATH = - new Function() { - @Override - public Path apply(File file) { - return file == null ? null : file.getPath(); - } - }; + private static final Function FUNC_FILE_TO_PATH = new Function() { + @Override + public Path apply(File file) { + return file == null ? null : file.getPath(); + } + }; private static ThreadPoolExecutor archiveExecutor; @@ -85,7 +84,7 @@ public class HFileArchiver { * @return True if the Region exits in the filesystem. */ public static boolean exists(Configuration conf, FileSystem fs, RegionInfo info) - throws IOException { + throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); Path regionDir = FSUtils.getRegionDirFromRootDir(rootDir, info); return fs.exists(regionDir); @@ -94,11 +93,11 @@ public class HFileArchiver { /** * Cleans up all the files for a HRegion by archiving the HFiles to the archive directory * @param conf the configuration to use - * @param fs the file system object + * @param fs the file system object * @param info RegionInfo for region to be deleted */ public static void archiveRegion(Configuration conf, FileSystem fs, RegionInfo info) - throws IOException { + throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); archiveRegion(fs, rootDir, CommonFSUtils.getTableDir(rootDir, info.getTable()), FSUtils.getRegionDirFromRootDir(rootDir, info)); @@ -106,22 +105,23 @@ public class HFileArchiver { /** * Remove an entire region from the table directory via archiving the region's hfiles. - * @param fs {@link FileSystem} from which to remove the region - * @param rootdir {@link Path} to the root directory where hbase files are stored (for building - * the archive path) - * @param tableDir {@link Path} to where the table is being stored (for building the archive path) + * @param fs {@link FileSystem} from which to remove the region + * @param rootdir {@link Path} to the root directory where hbase files are stored (for building + * the archive path) + * @param tableDir {@link Path} to where the table is being stored (for building the archive + * path) * @param regionDir {@link Path} to where a region is being stored (for building the archive path) * @return true if the region was successfully deleted. false if the filesystem * operations could not complete. * @throws IOException if the request cannot be completed */ public static boolean archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir) - throws IOException { + throws IOException { // otherwise, we archive the files // make sure we can archive if (tableDir == null || regionDir == null) { LOG.error("No archive directory could be found because tabledir (" + tableDir - + ") or regiondir (" + regionDir + "was null. Deleting files instead."); + + ") or regiondir (" + regionDir + "was null. Deleting files instead."); if (regionDir != null) { deleteRegionWithoutArchiving(fs, regionDir); } @@ -159,12 +159,12 @@ public class HFileArchiver { // convert the files in the region to a File Stream.of(storeDirs).map(getAsFile).forEachOrdered(toArchive::add); LOG.debug("Archiving " + toArchive); - List failedArchive = resolveAndArchive(fs, regionArchiveDir, toArchive, - EnvironmentEdgeManager.currentTime()); + List failedArchive = + resolveAndArchive(fs, regionArchiveDir, toArchive, EnvironmentEdgeManager.currentTime()); if (!failedArchive.isEmpty()) { throw new FailedArchiveException( - "Failed to archive/delete all the files for region:" + regionDir.getName() + " into " + - regionArchiveDir + ". Something is probably awry on the filesystem.", + "Failed to archive/delete all the files for region:" + regionDir.getName() + " into " + + regionArchiveDir + ". Something is probably awry on the filesystem.", failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); } // if that was successful, then we delete the region @@ -173,20 +173,20 @@ public class HFileArchiver { /** * Archive the specified regions in parallel. - * @param conf the configuration to use - * @param fs {@link FileSystem} from which to remove the region - * @param rootDir {@link Path} to the root directory where hbase files are stored (for building - * the archive path) - * @param tableDir {@link Path} to where the table is being stored (for building the archive - * path) + * @param conf the configuration to use + * @param fs {@link FileSystem} from which to remove the region + * @param rootDir {@link Path} to the root directory where hbase files are stored (for + * building the archive path) + * @param tableDir {@link Path} to where the table is being stored (for building the archive + * path) * @param regionDirList {@link Path} to where regions are being stored (for building the archive - * path) + * path) * @throws IOException if the request cannot be completed */ public static void archiveRegions(Configuration conf, FileSystem fs, Path rootDir, Path tableDir, List regionDirList) throws IOException { List> futures = new ArrayList<>(regionDirList.size()); - for (Path regionDir: regionDirList) { + for (Path regionDir : regionDirList) { Future future = getArchiveExecutor(conf).submit(() -> { archiveRegion(fs, rootDir, tableDir, regionDir); return null; @@ -194,7 +194,7 @@ public class HFileArchiver { futures.add(future); } try { - for (Future future: futures) { + for (Future future : futures) { future.get(); } } catch (InterruptedException e) { @@ -207,8 +207,8 @@ public class HFileArchiver { private static synchronized ThreadPoolExecutor getArchiveExecutor(final Configuration conf) { if (archiveExecutor == null) { int maxThreads = conf.getInt("hbase.hfilearchiver.thread.pool.max", 8); - archiveExecutor = Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, - getThreadFactory()); + archiveExecutor = + Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, getThreadFactory()); // Shutdown this ThreadPool in a shutdown hook Runtime.getRuntime().addShutdownHook(new Thread(() -> archiveExecutor.shutdown())); @@ -235,37 +235,37 @@ public class HFileArchiver { } /** - * Remove from the specified region the store files of the specified column family, - * either by archiving them or outright deletion - * @param fs the filesystem where the store files live - * @param conf {@link Configuration} to examine to determine the archive directory - * @param parent Parent region hosting the store files + * Remove from the specified region the store files of the specified column family, either by + * archiving them or outright deletion + * @param fs the filesystem where the store files live + * @param conf {@link Configuration} to examine to determine the archive directory + * @param parent Parent region hosting the store files * @param tableDir {@link Path} to where the table is being stored (for building the archive path) - * @param family the family hosting the store files + * @param family the family hosting the store files * @throws IOException if the files could not be correctly disposed. */ - public static void archiveFamily(FileSystem fs, Configuration conf, - RegionInfo parent, Path tableDir, byte[] family) throws IOException { + public static void archiveFamily(FileSystem fs, Configuration conf, RegionInfo parent, + Path tableDir, byte[] family) throws IOException { Path familyDir = new Path(tableDir, new Path(parent.getEncodedName(), Bytes.toString(family))); archiveFamilyByFamilyDir(fs, conf, parent, familyDir, family); } /** - * Removes from the specified region the store files of the specified column family, - * either by archiving them or outright deletion - * @param fs the filesystem where the store files live - * @param conf {@link Configuration} to examine to determine the archive directory - * @param parent Parent region hosting the store files + * Removes from the specified region the store files of the specified column family, either by + * archiving them or outright deletion + * @param fs the filesystem where the store files live + * @param conf {@link Configuration} to examine to determine the archive directory + * @param parent Parent region hosting the store files * @param familyDir {@link Path} to where the family is being stored - * @param family the family hosting the store files + * @param family the family hosting the store files * @throws IOException if the files could not be correctly disposed. */ - public static void archiveFamilyByFamilyDir(FileSystem fs, Configuration conf, - RegionInfo parent, Path familyDir, byte[] family) throws IOException { + public static void archiveFamilyByFamilyDir(FileSystem fs, Configuration conf, RegionInfo parent, + Path familyDir, byte[] family) throws IOException { FileStatus[] storeFiles = CommonFSUtils.listStatus(fs, familyDir); if (storeFiles == null) { LOG.debug("No files to dispose of in {}, family={}", parent.getRegionNameAsString(), - Bytes.toString(family)); + Bytes.toString(family)); return; } @@ -274,29 +274,29 @@ public class HFileArchiver { Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, family); // do the actual archive - List failedArchive = resolveAndArchive(fs, storeArchiveDir, toArchive, - EnvironmentEdgeManager.currentTime()); - if (!failedArchive.isEmpty()){ - throw new FailedArchiveException("Failed to archive/delete all the files for region:" - + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family) - + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.", - failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); + List failedArchive = + resolveAndArchive(fs, storeArchiveDir, toArchive, EnvironmentEdgeManager.currentTime()); + if (!failedArchive.isEmpty()) { + throw new FailedArchiveException( + "Failed to archive/delete all the files for region:" + + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family) + " into " + + storeArchiveDir + ". Something is probably awry on the filesystem.", + failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); } } /** * Remove the store files, either by archiving them or outright deletion - * @param conf {@link Configuration} to examine to determine the archive directory - * @param fs the filesystem where the store files live - * @param regionInfo {@link RegionInfo} of the region hosting the store files - * @param family the family hosting the store files + * @param conf {@link Configuration} to examine to determine the archive directory + * @param fs the filesystem where the store files live + * @param regionInfo {@link RegionInfo} of the region hosting the store files + * @param family the family hosting the store files * @param compactedFiles files to be disposed of. No further reading of these files should be - * attempted; otherwise likely to cause an {@link IOException} + * attempted; otherwise likely to cause an {@link IOException} * @throws IOException if the files could not be correctly disposed. */ public static void archiveStoreFiles(Configuration conf, FileSystem fs, RegionInfo regionInfo, - Path tableDir, byte[] family, Collection compactedFiles) - throws IOException { + Path tableDir, byte[] family, Collection compactedFiles) throws IOException { Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family); archive(fs, regionInfo, family, compactedFiles, storeArchiveDir); } @@ -304,30 +304,29 @@ public class HFileArchiver { /** * Archive recovered edits using existing logic for archiving store files. This is currently only * relevant when hbase.region.archive.recovered.edits is true, as recovered edits shouldn't - * be kept after replay. In theory, we could use very same method available for archiving - * store files, but supporting WAL dir and store files on different FileSystems added the need for - * extra validation of the passed FileSystem instance and the path where the archiving edits - * should be placed. - * @param conf {@link Configuration} to determine the archive directory. - * @param fs the filesystem used for storing WAL files. - * @param regionInfo {@link RegionInfo} a pseudo region representation for the archiving logic. - * @param family a pseudo familiy representation for the archiving logic. + * be kept after replay. In theory, we could use very same method available for archiving store + * files, but supporting WAL dir and store files on different FileSystems added the need for extra + * validation of the passed FileSystem instance and the path where the archiving edits should be + * placed. + * @param conf {@link Configuration} to determine the archive directory. + * @param fs the filesystem used for storing WAL files. + * @param regionInfo {@link RegionInfo} a pseudo region representation for the archiving logic. + * @param family a pseudo familiy representation for the archiving logic. * @param replayedEdits the recovered edits to be archived. * @throws IOException if files can't be achived due to some internal error. */ public static void archiveRecoveredEdits(Configuration conf, FileSystem fs, RegionInfo regionInfo, - byte[] family, Collection replayedEdits) - throws IOException { + byte[] family, Collection replayedEdits) throws IOException { String workingDir = conf.get(CommonFSUtils.HBASE_WAL_DIR, conf.get(HConstants.HBASE_DIR)); - //extra sanity checks for the right FS + // extra sanity checks for the right FS Path path = new Path(workingDir); - if(path.isAbsoluteAndSchemeAuthorityNull()){ - //no schema specified on wal dir value, so it's on same FS as StoreFiles + if (path.isAbsoluteAndSchemeAuthorityNull()) { + // no schema specified on wal dir value, so it's on same FS as StoreFiles path = new Path(conf.get(HConstants.HBASE_DIR)); } - if(path.toUri().getScheme()!=null && !path.toUri().getScheme().equals(fs.getScheme())){ - throw new IOException("Wrong file system! Should be " + path.toUri().getScheme() + - ", but got " + fs.getScheme()); + if (path.toUri().getScheme() != null && !path.toUri().getScheme().equals(fs.getScheme())) { + throw new IOException( + "Wrong file system! Should be " + path.toUri().getScheme() + ", but got " + fs.getScheme()); } path = HFileArchiveUtil.getStoreArchivePathForRootDir(path, regionInfo, family); archive(fs, regionInfo, family, replayedEdits, path); @@ -337,8 +336,9 @@ public class HFileArchiver { Collection compactedFiles, Path storeArchiveDir) throws IOException { // sometimes in testing, we don't have rss, so we need to check for that if (fs == null) { - LOG.warn("Passed filesystem is null, so just deleting files without archiving for {}," + - "family={}", Bytes.toString(regionInfo.getRegionName()), Bytes.toString(family)); + LOG.warn( + "Passed filesystem is null, so just deleting files without archiving for {}," + "family={}", + Bytes.toString(regionInfo.getRegionName()), Bytes.toString(family)); deleteStoreFilesWithoutArchiving(compactedFiles); return; } @@ -350,12 +350,12 @@ public class HFileArchiver { } // build the archive path - if (regionInfo == null || family == null) throw new IOException( - "Need to have a region and a family to archive from."); + if (regionInfo == null || family == null) + throw new IOException("Need to have a region and a family to archive from."); // make sure we don't archive if we can't and that the archive dir exists if (!fs.mkdirs(storeArchiveDir)) { throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:" - + Bytes.toString(family) + ", deleting compacted files instead."); + + Bytes.toString(family) + ", deleting compacted files instead."); } // otherwise we attempt to archive the store files @@ -370,31 +370,33 @@ public class HFileArchiver { List failedArchive = resolveAndArchive(fs, storeArchiveDir, storeFiles, EnvironmentEdgeManager.currentTime()); - if (!failedArchive.isEmpty()){ - throw new FailedArchiveException("Failed to archive/delete all the files for region:" + if (!failedArchive.isEmpty()) { + throw new FailedArchiveException( + "Failed to archive/delete all the files for region:" + Bytes.toString(regionInfo.getRegionName()) + ", family:" + Bytes.toString(family) + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.", - failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); + failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); } } /** * Archive the store file - * @param fs the filesystem where the store files live + * @param fs the filesystem where the store files live * @param regionInfo region hosting the store files - * @param conf {@link Configuration} to examine to determine the archive directory - * @param tableDir {@link Path} to where the table is being stored (for building the archive path) - * @param family the family hosting the store files - * @param storeFile file to be archived + * @param conf {@link Configuration} to examine to determine the archive directory + * @param tableDir {@link Path} to where the table is being stored (for building the archive + * path) + * @param family the family hosting the store files + * @param storeFile file to be archived * @throws IOException if the files could not be correctly disposed. */ public static void archiveStoreFile(Configuration conf, FileSystem fs, RegionInfo regionInfo, - Path tableDir, byte[] family, Path storeFile) throws IOException { + Path tableDir, byte[] family, Path storeFile) throws IOException { Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family); // make sure we don't archive if we can't and that the archive dir exists if (!fs.mkdirs(storeArchiveDir)) { throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:" - + Bytes.toString(family) + ", deleting compacted files instead."); + + Bytes.toString(family) + ", deleting compacted files instead."); } // do the actual archive @@ -402,27 +404,25 @@ public class HFileArchiver { File file = new FileablePath(fs, storeFile); if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) { throw new IOException("Failed to archive/delete the file for region:" - + regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family) - + " into " + storeArchiveDir + ". Something is probably awry on the filesystem."); + + regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family) + " into " + + storeArchiveDir + ". Something is probably awry on the filesystem."); } } /** - * Resolve any conflict with an existing archive file via timestamp-append - * renaming of the existing file and then archive the passed in files. - * @param fs {@link FileSystem} on which to archive the files - * @param baseArchiveDir base archive directory to store the files. If any of - * the files to archive are directories, will append the name of the - * directory to the base archive directory name, creating a parallel - * structure. - * @param toArchive files/directories that need to be archvied - * @param start time the archiving started - used for resolving archive - * conflicts. + * Resolve any conflict with an existing archive file via timestamp-append renaming of the + * existing file and then archive the passed in files. + * @param fs {@link FileSystem} on which to archive the files + * @param baseArchiveDir base archive directory to store the files. If any of the files to archive + * are directories, will append the name of the directory to the base + * archive directory name, creating a parallel structure. + * @param toArchive files/directories that need to be archvied + * @param start time the archiving started - used for resolving archive conflicts. * @return the list of failed to archive files. * @throws IOException if an unexpected file operation exception occurred */ private static List resolveAndArchive(FileSystem fs, Path baseArchiveDir, - Collection toArchive, long start) throws IOException { + Collection toArchive, long start) throws IOException { // short circuit if no files to move if (toArchive.isEmpty()) { return Collections.emptyList(); @@ -434,7 +434,7 @@ public class HFileArchiver { if (!fs.exists(baseArchiveDir)) { if (!fs.mkdirs(baseArchiveDir)) { throw new IOException("Failed to create the archive directory:" + baseArchiveDir - + ", quitting archive attempt."); + + ", quitting archive attempt."); } LOG.trace("Created archive directory {}", baseArchiveDir); } @@ -474,15 +474,15 @@ public class HFileArchiver { *

      * If the same file already exists in the archive, it is moved to a timestamped directory under * the archive directory and the new file is put in its place. - * @param archiveDir {@link Path} to the directory that stores the archives of the hfiles - * @param currentFile {@link Path} to the original HFile that will be archived + * @param archiveDir {@link Path} to the directory that stores the archives of the hfiles + * @param currentFile {@link Path} to the original HFile that will be archived * @param archiveStartTime time the archiving started, to resolve naming conflicts * @return true if the file is successfully archived. false if there was a * problem, but the operation still completed. * @throws IOException on failure to complete {@link FileSystem} operations. */ private static boolean resolveAndArchiveFile(Path archiveDir, File currentFile, - String archiveStartTime) throws IOException { + String archiveStartTime) throws IOException { // build path as it should be in the archive String filename = currentFile.getName(); Path archiveFile = new Path(archiveDir, filename); @@ -514,14 +514,16 @@ public class HFileArchiver { long curMtime = curStatus.getModificationTime(); long archiveMtime = archiveStatus.getModificationTime(); if (curLen != archiveLen) { - LOG.error("{} already exists in archive with different size than current {}." + LOG.error( + "{} already exists in archive with different size than current {}." + " archiveLen: {} currentLen: {} archiveMtime: {} currentMtime: {}", archiveFile, currentFile, archiveLen, curLen, archiveMtime, curMtime); - throw new IOException(archiveFile + " already exists in archive with different size" + - " than " + currentFile); + throw new IOException( + archiveFile + " already exists in archive with different size" + " than " + currentFile); } - LOG.error("{} already exists in archive, moving to timestamped backup and overwriting" + LOG.error( + "{} already exists in archive, moving to timestamped backup and overwriting" + " current {}. archiveLen: {} currentLen: {} archiveMtime: {} currentMtime: {}", archiveFile, currentFile, archiveLen, curLen, archiveMtime, curMtime); @@ -529,12 +531,12 @@ public class HFileArchiver { Path backedupArchiveFile = new Path(archiveDir, filename + SEPARATOR + archiveStartTime); if (!fs.rename(archiveFile, backedupArchiveFile)) { LOG.error("Could not rename archive file to backup: " + backedupArchiveFile - + ", deleting existing file in favor of newer."); + + ", deleting existing file in favor of newer."); // try to delete the existing file, if we can't rename it if (!fs.delete(archiveFile, false)) { throw new IOException("Couldn't delete existing archive file (" + archiveFile - + ") or rename it to the backup file (" + backedupArchiveFile - + ") to make room for similarly named file."); + + ") or rename it to the backup file (" + backedupArchiveFile + + ") to make room for similarly named file."); } } else { LOG.info("Backed up archive file from {} to {}.", archiveFile, backedupArchiveFile); @@ -565,8 +567,8 @@ public class HFileArchiver { try { success = currentFile.moveAndClose(archiveFile); } catch (FileNotFoundException fnfe) { - LOG.warn("Failed to archive " + currentFile + - " because it does not exist! Skipping and continuing on.", fnfe); + LOG.warn("Failed to archive " + currentFile + + " because it does not exist! Skipping and continuing on.", fnfe); success = true; } catch (IOException e) { LOG.warn("Failed to archive " + currentFile + " on try #" + i, e); @@ -586,12 +588,12 @@ public class HFileArchiver { /** * Without regard for backup, delete a region. Should be used with caution. * @param regionDir {@link Path} to the region to be deleted. - * @param fs FileSystem from which to delete the region + * @param fs FileSystem from which to delete the region * @return true on successful deletion, false otherwise * @throws IOException on filesystem operation failure */ private static boolean deleteRegionWithoutArchiving(FileSystem fs, Path regionDir) - throws IOException { + throws IOException { if (fs.delete(regionDir, true)) { LOG.debug("Deleted {}", regionDir); return true; @@ -607,10 +609,10 @@ public class HFileArchiver { *

      * @param compactedFiles store files to delete from the file system. * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before - * throwing the exception, rather than failing at the first file. + * throwing the exception, rather than failing at the first file. */ private static void deleteStoreFilesWithoutArchiving(Collection compactedFiles) - throws IOException { + throws IOException { LOG.debug("Deleting files without archiving."); List errors = new ArrayList<>(0); for (HStoreFile hsf : compactedFiles) { @@ -654,8 +656,7 @@ public class HFileArchiver { } /** - * Convert the {@link HStoreFile} into something we can manage in the archive - * methods + * Convert the {@link HStoreFile} into something we can manage in the archive methods */ private static class StoreToFile extends FileConverter { public StoreToFile(FileSystem fs) { @@ -692,21 +693,18 @@ public class HFileArchiver { abstract boolean isFile() throws IOException; /** - * @return if this is a directory, returns all the children in the - * directory, otherwise returns an empty list - * @throws IOException + * @return if this is a directory, returns all the children in the directory, otherwise returns + * an empty list n */ abstract Collection getChildren() throws IOException; /** - * close any outside readers of the file - * @throws IOException + * close any outside readers of the file n */ abstract void close() throws IOException; /** - * @return the name of the file (not the full fs path, just the individual - * file name) + * @return the name of the file (not the full fs path, just the individual file name) */ abstract String getName(); @@ -716,10 +714,7 @@ public class HFileArchiver { abstract Path getPath(); /** - * Move the file to the given destination - * @param dest - * @return true on success - * @throws IOException + * Move the file to the given destination n * @return true on success n */ public boolean moveAndClose(Path dest) throws IOException { this.close(); @@ -788,8 +783,7 @@ public class HFileArchiver { } /** - * {@link File} adapter for a {@link HStoreFile} living on a {@link FileSystem} - * . + * {@link File} adapter for a {@link HStoreFile} living on a {@link FileSystem} . */ private static class FileableStoreFile extends File { HStoreFile file; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java index 9978f4a67d8..17ecdf1a83c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.backup.example; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Connection; @@ -46,11 +45,11 @@ class HFileArchiveManager { private volatile boolean stopped = false; public HFileArchiveManager(Connection connection, Configuration conf) - throws ZooKeeperConnectionException, IOException { - this.zooKeeper = new ZKWatcher(conf, "hfileArchiveManager-on-" + connection.toString(), - connection); - this.archiveZnode = ZKTableArchiveClient.getArchiveZNode(this.zooKeeper.getConfiguration(), - this.zooKeeper); + throws ZooKeeperConnectionException, IOException { + this.zooKeeper = + new ZKWatcher(conf, "hfileArchiveManager-on-" + connection.toString(), connection); + this.archiveZnode = + ZKTableArchiveClient.getArchiveZNode(this.zooKeeper.getConfiguration(), this.zooKeeper); } /** @@ -101,7 +100,7 @@ class HFileArchiveManager { * No attempt is made to make sure that backups are successfully created - it is inherently an * asynchronous operation. * @param zooKeeper watcher connection to zk cluster - * @param table table name on which to enable archiving + * @param table table name on which to enable archiving * @throws KeeperException if a ZooKeeper operation fails */ private void enable(ZKWatcher zooKeeper, byte[] table) throws KeeperException { @@ -119,7 +118,7 @@ class HFileArchiveManager { *

      * Inherently an asynchronous operation. * @param zooKeeper watcher for the ZK cluster - * @param table name of the table to disable + * @param table name of the table to disable * @throws KeeperException if an unexpected ZK connection issues occurs */ private void disable(ZKWatcher zooKeeper, byte[] table) throws KeeperException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java index a4daaf01139..80cdc7587a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,8 +36,7 @@ public class HFileArchiveTableMonitor { private final Set archivedTables = new TreeSet<>(); /** - * Set the tables to be archived. Internally adds each table and attempts to - * register it. + * Set the tables to be archived. Internally adds each table and attempts to register it. *

      * Note: All previous tables will be removed in favor of these tables. * @param tables add each of the tables to be archived. @@ -48,8 +47,7 @@ public class HFileArchiveTableMonitor { } /** - * Add the named table to be those being archived. Attempts to register the - * table + * Add the named table to be those being archived. Attempts to register the table * @param table name of the table to be registered */ public synchronized void addTable(String table) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java index 946f7593d43..b26b6bc4ef9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,10 +35,9 @@ import org.slf4j.LoggerFactory; * currently being archived. *

      * This only works properly if the - * {@link org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner} - * is also enabled (it always should be), since it may take a little time - * for the ZK notification to propagate, in which case we may accidentally - * delete some files. + * {@link org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner} is also enabled (it always + * should be), since it may take a little time for the ZK notification to propagate, in which case + * we may accidentally delete some files. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate { @@ -55,7 +54,7 @@ public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate { if (fStat.isDirectory()) { return true; } - + Path file = fStat.getPath(); // check to see if FileStatus[] deleteStatus = CommonFSUtils.listStatus(this.fs, file, null); @@ -72,8 +71,8 @@ public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate { String tableName = table.getName(); boolean ret = !archiveTracker.keepHFiles(tableName); - LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + - tableName); + LOG + .debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + tableName); return ret; } catch (IOException e) { LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java index 49b0e827758..1896199ede2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.backup.example; import java.io.IOException; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.zookeeper.ZKListener; @@ -49,8 +48,8 @@ public final class TableHFileArchiveTracker extends ZKListener { super(watcher); watcher.registerListener(this); this.monitor = monitor; - this.archiveHFileZNode = ZKTableArchiveClient.getArchiveZNode(watcher.getConfiguration(), - watcher); + this.archiveHFileZNode = + ZKTableArchiveClient.getArchiveZNode(watcher.getConfiguration(), watcher); } /** @@ -84,8 +83,8 @@ public final class TableHFileArchiveTracker extends ZKListener { try { addAndReWatchTable(path); } catch (KeeperException e) { - LOG.warn("Couldn't read zookeeper data for table for path:" + path - + ", not preserving a table.", e); + LOG.warn( + "Couldn't read zookeeper data for table for path:" + path + ", not preserving a table.", e); } } @@ -235,11 +234,11 @@ public final class TableHFileArchiveTracker extends ZKListener { * @param conf to read for zookeeper connection information * @return ZooKeeper tracker to monitor for this server if this server should archive hfiles for a * given table - * @throws IOException If a unexpected exception occurs + * @throws IOException If a unexpected exception occurs * @throws ZooKeeperConnectionException if we can't reach zookeeper */ public static TableHFileArchiveTracker create(Configuration conf) - throws ZooKeeperConnectionException, IOException { + throws ZooKeeperConnectionException, IOException { ZKWatcher zkw = new ZKWatcher(conf, "hfileArchiveCleaner", null); return create(zkw, new HFileArchiveTableMonitor()); } @@ -247,13 +246,12 @@ public final class TableHFileArchiveTracker extends ZKListener { /** * Create an archive tracker with the special passed in table monitor. Should only be used in * special cases (e.g. testing) - * @param zkw Watcher for the ZooKeeper cluster that we should track + * @param zkw Watcher for the ZooKeeper cluster that we should track * @param monitor Monitor for which tables need hfile archiving * @return ZooKeeper tracker to monitor for this server if this server should archive hfiles for a * given table */ - private static TableHFileArchiveTracker create(ZKWatcher zkw, - HFileArchiveTableMonitor monitor) { + private static TableHFileArchiveTracker create(ZKWatcher zkw, HFileArchiveTableMonitor monitor) { return new TableHFileArchiveTracker(zkw, monitor); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java index bc535b7db80..0388648a972 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,7 +53,7 @@ public class ZKTableArchiveClient extends Configured { * If the table does not exist, the archiving the table's hfiles is still enabled as a future * table with that name may be created shortly. * @param table name of the table to start backing up - * @throws IOException if an unexpected exception occurs + * @throws IOException if an unexpected exception occurs * @throws KeeperException if zookeeper can't be reached */ public void enableHFileBackupAsync(final byte[] table) throws IOException, KeeperException { @@ -68,7 +68,7 @@ public class ZKTableArchiveClient extends Configured { * Asynchronous operation - some extra HFiles may be retained, in the archive directory after * disable is called, dependent on the latency in zookeeper to the servers. * @param table name of the table stop backing up - * @throws IOException if an unexpected exception occurs + * @throws IOException if an unexpected exception occurs * @throws KeeperException if zookeeper can't be reached */ public void disableHFileBackup(String table) throws IOException, KeeperException { @@ -83,7 +83,7 @@ public class ZKTableArchiveClient extends Configured { * Asynchronous operation - some extra HFiles may be retained, in the archive directory after * disable is called, dependent on the latency in zookeeper to the servers. * @param table name of the table stop backing up - * @throws IOException if an unexpected exception occurs + * @throws IOException if an unexpected exception occurs * @throws KeeperException if zookeeper can't be reached */ public void disableHFileBackup(final byte[] table) throws IOException, KeeperException { @@ -97,7 +97,7 @@ public class ZKTableArchiveClient extends Configured { *

      * Asynchronous operation - some extra HFiles may be retained, in the archive directory after * disable is called, dependent on the latency in zookeeper to the servers. - * @throws IOException if an unexpected exception occurs + * @throws IOException if an unexpected exception occurs * @throws KeeperException if zookeeper can't be reached */ public void disableHFileBackup() throws IOException, KeeperException { @@ -108,7 +108,7 @@ public class ZKTableArchiveClient extends Configured { * Determine if archiving is enabled (but not necessarily fully propagated) for a table * @param table name of the table to check * @return true if it is, false otherwise - * @throws IOException if a connection to ZooKeeper cannot be established + * @throws IOException if a connection to ZooKeeper cannot be established * @throws KeeperException if a ZooKeeper operation fails */ public boolean getArchivingEnabled(byte[] table) throws IOException, KeeperException { @@ -124,7 +124,7 @@ public class ZKTableArchiveClient extends Configured { * Determine if archiving is enabled (but not necessarily fully propagated) for a table * @param table name of the table to check * @return true if it is, false otherwise - * @throws IOException if an unexpected network issue occurs + * @throws IOException if an unexpected network issue occurs * @throws KeeperException if zookeeper can't be reached */ public boolean getArchivingEnabled(String table) throws IOException, KeeperException { @@ -135,20 +135,20 @@ public class ZKTableArchiveClient extends Configured { * @return A new {@link HFileArchiveManager} to manage which tables' hfiles should be archived * rather than deleted. * @throws KeeperException if we can't reach zookeeper - * @throws IOException if an unexpected network issue occurs + * @throws IOException if an unexpected network issue occurs */ - private synchronized HFileArchiveManager createHFileArchiveManager() throws KeeperException, - IOException { + private synchronized HFileArchiveManager createHFileArchiveManager() + throws KeeperException, IOException { return new HFileArchiveManager(this.connection, this.getConf()); } /** - * @param conf conf to read for the base archive node + * @param conf conf to read for the base archive node * @param zooKeeper zookeeper to used for building the full path * @return get the znode for long-term archival of a table for */ public static String getArchiveZNode(Configuration conf, ZKWatcher zooKeeper) { - return ZNodePaths.joinZNode(zooKeeper.getZNodePaths().baseZNode, conf.get( - ZOOKEEPER_ZNODE_HFILE_ARCHIVE_KEY, TableHFileArchiveTracker.HFILE_ARCHIVE_ZNODE_PARENT)); + return ZNodePaths.joinZNode(zooKeeper.getZNodePaths().baseZNode, conf + .get(ZOOKEEPER_ZNODE_HFILE_ARCHIVE_KEY, TableHFileArchiveTracker.HFILE_ARCHIVE_ZNODE_PARENT)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java index 1feafc18993..4a8dd1d3ac8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -38,8 +37,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * A client scanner for a region opened for read-only on the client side. Assumes region data - * is not changing. + * A client scanner for a region opened for read-only on the client side. Assumes region data is not + * changing. */ @InterfaceAudience.Private public class ClientSideRegionScanner extends AbstractClientScanner { @@ -50,9 +49,8 @@ public class ClientSideRegionScanner extends AbstractClientScanner { RegionScanner scanner; List values; - public ClientSideRegionScanner(Configuration conf, FileSystem fs, - Path rootDir, TableDescriptor htd, RegionInfo hri, Scan scan, ScanMetrics scanMetrics) - throws IOException { + public ClientSideRegionScanner(Configuration conf, FileSystem fs, Path rootDir, + TableDescriptor htd, RegionInfo hri, Scan scan, ScanMetrics scanMetrics) throws IOException { // region is immutable, set isolation level scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED); @@ -67,7 +65,7 @@ public class ClientSideRegionScanner extends AbstractClientScanner { // IndexOnlyLruBlockCache and set a value to HBASE_CLIENT_SCANNER_BLOCK_CACHE_SIZE_KEY conf.set(BlockCacheFactory.BLOCKCACHE_POLICY_KEY, "IndexOnlyLRU"); conf.setIfUnset(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY, - String.valueOf(HConstants.HBASE_CLIENT_SCANNER_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT)); + String.valueOf(HConstants.HBASE_CLIENT_SCANNER_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT)); // don't allow L2 bucket cache for non RS process to avoid unexpected disk usage. conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY); region.setBlockCache(BlockCacheFactory.createBlockCache(conf)); @@ -95,7 +93,7 @@ public class ClientSideRegionScanner extends AbstractClientScanner { values.clear(); scanner.nextRaw(values); if (values.isEmpty()) { - //we are done + // we are done return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/RegionServerRegistry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/RegionServerRegistry.java index 93eb7e8b999..a56fc67b8ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/RegionServerRegistry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/RegionServerRegistry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerConnectionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerConnectionUtils.java index b97b212890f..ec34ed6cee3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerConnectionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerConnectionUtils.java @@ -73,14 +73,14 @@ public final class ServerConnectionUtils { private final ClientService.BlockingInterface localClientServiceBlockingInterfaceWrapper; private ShortCircuitingClusterConnection(Configuration conf, User user, ServerName serverName, - AdminService.BlockingInterface admin, ClientService.BlockingInterface client, - ConnectionRegistry registry) throws IOException { + AdminService.BlockingInterface admin, ClientService.BlockingInterface client, + ConnectionRegistry registry) throws IOException { super(conf, null, user, registry); this.serverName = serverName; this.localHostAdmin = admin; this.localHostClient = client; this.localClientServiceBlockingInterfaceWrapper = - new ClientServiceBlockingInterfaceWrapper(this.localHostClient); + new ClientServiceBlockingInterfaceWrapper(this.localHostClient); } @Override @@ -90,15 +90,16 @@ public final class ServerConnectionUtils { @Override public ClientService.BlockingInterface getClient(ServerName sn) throws IOException { - return serverName.equals(sn) ? this.localClientServiceBlockingInterfaceWrapper - : super.getClient(sn); + return serverName.equals(sn) + ? this.localClientServiceBlockingInterfaceWrapper + : super.getClient(sn); } @Override public MasterKeepAliveConnection getMaster() throws IOException { if (this.localHostClient instanceof MasterService.BlockingInterface) { return new ShortCircuitMasterConnection( - (MasterService.BlockingInterface) this.localHostClient); + (MasterService.BlockingInterface) this.localHostClient); } return super.getMaster(); } @@ -116,8 +117,7 @@ public final class ServerConnectionUtils { * surround the scan and get method call,so the {@link RegionScanner} created for the directly * {@link RSRpcServices#get} could be closed immediately,see HBASE-26812 for more. */ - static class ClientServiceBlockingInterfaceWrapper - implements ClientService.BlockingInterface { + static class ClientServiceBlockingInterfaceWrapper implements ClientService.BlockingInterface { private ClientService.BlockingInterface target; @@ -134,7 +134,7 @@ public final class ServerConnectionUtils { @Override public MultiResponse multi(RpcController controller, MultiRequest request) - throws ServiceException { + throws ServiceException { /** * Here is for multiGet */ @@ -145,7 +145,7 @@ public final class ServerConnectionUtils { @Override public ScanResponse scan(RpcController controller, ScanRequest request) - throws ServiceException { + throws ServiceException { return this.doCall(controller, request, (c, r) -> { return target.scan(c, r); }); @@ -156,7 +156,7 @@ public final class ServerConnectionUtils { } private RESPONSE doCall(RpcController controller, REQUEST request, - Operation operation) throws ServiceException { + Operation operation) throws ServiceException { Optional rpcCallOptional = RpcServer.unsetCurrentCall(); try { return operation.call(controller, request); @@ -167,37 +167,37 @@ public final class ServerConnectionUtils { @Override public MutateResponse mutate(RpcController controller, MutateRequest request) - throws ServiceException { + throws ServiceException { return target.mutate(controller, request); } @Override public BulkLoadHFileResponse bulkLoadHFile(RpcController controller, - BulkLoadHFileRequest request) throws ServiceException { + BulkLoadHFileRequest request) throws ServiceException { return target.bulkLoadHFile(controller, request); } @Override public PrepareBulkLoadResponse prepareBulkLoad(RpcController controller, - PrepareBulkLoadRequest request) throws ServiceException { + PrepareBulkLoadRequest request) throws ServiceException { return target.prepareBulkLoad(controller, request); } @Override public CleanupBulkLoadResponse cleanupBulkLoad(RpcController controller, - CleanupBulkLoadRequest request) throws ServiceException { + CleanupBulkLoadRequest request) throws ServiceException { return target.cleanupBulkLoad(controller, request); } @Override public CoprocessorServiceResponse execService(RpcController controller, - CoprocessorServiceRequest request) throws ServiceException { + CoprocessorServiceRequest request) throws ServiceException { return target.execService(controller, request); } @Override public CoprocessorServiceResponse execRegionServerService(RpcController controller, - CoprocessorServiceRequest request) throws ServiceException { + CoprocessorServiceRequest request) throws ServiceException { return target.execRegionServerService(controller, request); } } @@ -206,19 +206,18 @@ public final class ServerConnectionUtils { /** * Creates a short-circuit connection that can bypass the RPC layer (serialization, * deserialization, networking, etc..) when talking to a local server. - * @param conf the current configuration - * @param user the user the connection is for + * @param conf the current configuration + * @param user the user the connection is for * @param serverName the local server name - * @param admin the admin interface of the local server - * @param client the client interface of the local server - * @param registry the connection registry to be used, can be null + * @param admin the admin interface of the local server + * @param client the client interface of the local server + * @param registry the connection registry to be used, can be null * @return an short-circuit connection. * @throws IOException if IO failure occurred */ public static ClusterConnection createShortCircuitConnection(final Configuration conf, User user, - final ServerName serverName, final AdminService.BlockingInterface admin, - final ClientService.BlockingInterface client, ConnectionRegistry registry) - throws IOException { + final ServerName serverName, final AdminService.BlockingInterface admin, + final ClientService.BlockingInterface client, ConnectionRegistry registry) throws IOException { if (user == null) { user = UserProvider.instantiate(conf).getCurrent(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java index 146d76ccf89..2c75f576a25 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -42,18 +41,14 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.Snapshot * A Scanner which performs a scan over snapshot files. Using this class requires copying the * snapshot to a temporary empty directory, which will copy the snapshot reference files into that * directory. Actual data files are not copied. - * *

      - * This also allows one to run the scan from an - * online or offline hbase cluster. The snapshot files can be exported by using the - * org.apache.hadoop.hbase.snapshot.ExportSnapshot tool, - * to a pure-hdfs cluster, and this scanner can be used to - * run the scan directly over the snapshot files. The snapshot should not be deleted while there - * are open scanners reading from snapshot files. - * + * This also allows one to run the scan from an online or offline hbase cluster. The snapshot files + * can be exported by using the org.apache.hadoop.hbase.snapshot.ExportSnapshot tool, to a pure-hdfs + * cluster, and this scanner can be used to run the scan directly over the snapshot files. The + * snapshot should not be deleted while there are open scanners reading from snapshot files. *

      - * An internal RegionScanner is used to execute the {@link Scan} obtained - * from the user for each region in the snapshot. + * An internal RegionScanner is used to execute the {@link Scan} obtained from the user for each + * region in the snapshot. *

      * HBase owns all the data and snapshot files on the filesystem. Only the HBase user can read from * snapshot files and data files. HBase also enforces security because all the requests are handled @@ -62,8 +57,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.Snapshot * permissions to access snapshot and reference files. This means that to run mapreduce over * snapshot files, the job has to be run as the HBase user or the user must have group or other * priviledges in the filesystem (See HBASE-8369). Note that, given other users access to read from - * snapshot/data files will completely circumvent the access control enforced by HBase. - * See org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat. + * snapshot/data files will completely circumvent the access control enforced by HBase. See + * org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat. */ @InterfaceAudience.Private public class TableSnapshotScanner extends AbstractClientScanner { @@ -80,44 +75,47 @@ public class TableSnapshotScanner extends AbstractClientScanner { private TableDescriptor htd; private final boolean snapshotAlreadyRestored; - private ClientSideRegionScanner currentRegionScanner = null; + private ClientSideRegionScanner currentRegionScanner = null; private int currentRegion = -1; private int numOfCompleteRows = 0; + /** * Creates a TableSnapshotScanner. - * @param conf the configuration - * @param restoreDir a temporary directory to copy the snapshot files into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of - * rootDir. The scanner deletes the contents of the directory once the scanner is closed. + * @param conf the configuration + * @param restoreDir a temporary directory to copy the snapshot files into. Current user should + * have write permissions to this directory, and this should not be a + * subdirectory of rootDir. The scanner deletes the contents of the directory + * once the scanner is closed. * @param snapshotName the name of the snapshot to read from - * @param scan a Scan representing scan parameters + * @param scan a Scan representing scan parameters * @throws IOException in case of error */ public TableSnapshotScanner(Configuration conf, Path restoreDir, String snapshotName, Scan scan) - throws IOException { + throws IOException { this(conf, CommonFSUtils.getRootDir(conf), restoreDir, snapshotName, scan); } public TableSnapshotScanner(Configuration conf, Path rootDir, Path restoreDir, - String snapshotName, Scan scan) throws IOException { + String snapshotName, Scan scan) throws IOException { this(conf, rootDir, restoreDir, snapshotName, scan, false); } /** * Creates a TableSnapshotScanner. - * @param conf the configuration - * @param rootDir root directory for HBase. - * @param restoreDir a temporary directory to copy the snapshot files into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of - * rootdir. The scanner deletes the contents of the directory once the scanner is closed. - * @param snapshotName the name of the snapshot to read from - * @param scan a Scan representing scan parameters + * @param conf the configuration + * @param rootDir root directory for HBase. + * @param restoreDir a temporary directory to copy the snapshot files into. Current + * user should have write permissions to this directory, and this + * should not be a subdirectory of rootdir. The scanner deletes the + * contents of the directory once the scanner is closed. + * @param snapshotName the name of the snapshot to read from + * @param scan a Scan representing scan parameters * @param snapshotAlreadyRestored true to indicate that snapshot has been restored. * @throws IOException in case of error */ public TableSnapshotScanner(Configuration conf, Path rootDir, Path restoreDir, - String snapshotName, Scan scan, boolean snapshotAlreadyRestored) throws IOException { + String snapshotName, Scan scan, boolean snapshotAlreadyRestored) throws IOException { this.conf = conf; this.snapshotName = snapshotName; this.rootDir = rootDir; @@ -140,7 +138,7 @@ public class TableSnapshotScanner extends AbstractClientScanner { private void openWithoutRestoringSnapshot() throws IOException { Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); SnapshotProtos.SnapshotDescription snapshotDesc = - SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); List regionManifests = manifest.getRegionManifests(); @@ -150,7 +148,7 @@ public class TableSnapshotScanner extends AbstractClientScanner { regions = new ArrayList<>(regionManifests.size()); regionManifests.stream().map(r -> HRegionInfo.convert(r.getRegionInfo())) - .filter(this::isValidRegion).sorted().forEach(r -> regions.add(r)); + .filter(this::isValidRegion).sorted().forEach(r -> regions.add(r)); htd = manifest.getTableDescriptor(); } @@ -165,7 +163,7 @@ public class TableSnapshotScanner extends AbstractClientScanner { private void openWithRestoringSnapshot() throws IOException { final RestoreSnapshotHelper.RestoreMetaChanges meta = - RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName); + RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName); final List restoredRegions = meta.getRegionsToAdd(); htd = meta.getTableDescriptor(); @@ -184,8 +182,8 @@ public class TableSnapshotScanner extends AbstractClientScanner { } RegionInfo hri = regions.get(currentRegion); - currentRegionScanner = new ClientSideRegionScanner(conf, fs, - restoreDir, htd, hri, scan, scanMetrics); + currentRegionScanner = + new ClientSideRegionScanner(conf, fs, restoreDir, htd, hri, scan, scanMetrics); if (this.scanMetrics != null) { this.scanMetrics.countOfRegions.incrementAndGet(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java index b68c1e28d69..ec01bc5b51b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ipc.RpcCallContext; import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** * Class to help with parsing the version info. @@ -38,9 +37,8 @@ public final class VersionInfoUtil { return hasMinimumVersion(getCurrentClientVersionInfo(), major, minor); } - public static boolean hasMinimumVersion(HBaseProtos.VersionInfo versionInfo, - int major, - int minor) { + public static boolean hasMinimumVersion(HBaseProtos.VersionInfo versionInfo, int major, + int minor) { if (versionInfo != null) { if (versionInfo.hasVersionMajor() && versionInfo.hasVersionMinor()) { int clientMajor = versionInfo.getVersionMajor(); @@ -74,23 +72,18 @@ public final class VersionInfoUtil { return RpcServer.getCurrentCall().map(RpcCallContext::getClientVersionInfo).orElse(null); } - /** - * @param version - * @return the passed-in version int as a version String - * (e.g. 0x0103004 is 1.3.4) + * n * @return the passed-in version int as a version String (e.g. 0x0103004 is + * 1.3.4) */ public static String versionNumberToString(final int version) { - return String.format("%d.%d.%d", - ((version >> 20) & 0xff), - ((version >> 12) & 0xff), - (version & 0xfff)); + return String.format("%d.%d.%d", ((version >> 20) & 0xff), ((version >> 12) & 0xff), + (version & 0xfff)); } /** - * Pack the full number version in a int. by shifting each component by 8bit, - * except the dot release which has 12bit. - * Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000) + * Pack the full number version in a int. by shifting each component by 8bit, except the dot + * release which has 12bit. Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000) * @param versionInfo the VersionInfo object to pack * @return the version number as int. (e.g. 0x0103004 is 1.3.4) */ @@ -108,13 +101,12 @@ public final class VersionInfoUtil { return buildVersionNumber(clientMajor, clientMinor, 0); } } - return(0); // no version + return (0); // no version } /** - * Pack the full number version in a int. by shifting each component by 8bit, - * except the dot release which has 12bit. - * Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000) + * Pack the full number version in a int. by shifting each component by 8bit, except the dot + * release which has 12bit. Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000) * @param major version major number * @param minor version minor number * @param patch version patch number @@ -125,8 +117,8 @@ public final class VersionInfoUtil { } /** - * Returns the version components - * Examples: "1.4.3" returns [1, 4, 3], "4.5.6-SNAPSHOT" returns [4, 5, 6, "SNAPSHOT"] + * Returns the version components Examples: "1.4.3" returns [1, 4, 3], "4.5.6-SNAPSHOT" returns + * [4, 5, 6, "SNAPSHOT"] * @return the components of the version string */ private static String[] getVersionComponents(final HBaseProtos.VersionInfo versionInfo) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/RowProcessorClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/RowProcessorClient.java index bfdfacab3b7..db6466b303a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/RowProcessorClient.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/RowProcessorClient.java @@ -15,31 +15,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.coprocessor; +import com.google.protobuf.Message; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.ProcessRequest; import org.apache.hadoop.hbase.regionserver.RowProcessor; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; -import com.google.protobuf.Message; /** - * Convenience class that is used to make RowProcessorEndpoint invocations. - * For example usage, refer TestRowProcessorEndpoint - * + * Convenience class that is used to make RowProcessorEndpoint invocations. For example usage, refer + * TestRowProcessorEndpoint */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public class RowProcessorClient { - public static - ProcessRequest getRowProcessorPB(RowProcessor r) - throws IOException { - final ProcessRequest.Builder requestBuilder = - ProcessRequest.newBuilder(); + public static ProcessRequest + getRowProcessorPB(RowProcessor r) throws IOException { + final ProcessRequest.Builder requestBuilder = ProcessRequest.newBuilder(); requestBuilder.setRowProcessorClassName(r.getClass().getName()); S s = r.getRequestData(); if (s != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java index b68a962d15a..50c6dc27657 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java @@ -15,57 +15,52 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.locking; import java.io.IOException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService; import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService; + /** - * Lock for HBase Entity either a Table, a Namespace, or Regions. - * - * These are remote locks which live on master, and need periodic heartbeats to keep them alive. - * (Once we request the lock, internally an heartbeat thread will be started on the client). - * If master does not receive the heartbeat in time, it'll release the lock and make it available - * to other users. - * - *

      Use {@link LockServiceClient} to build instances. Then call {@link #requestLock()}. - * {@link #requestLock} will contact master to queue the lock and start the heartbeat thread - * which will check lock's status periodically and once the lock is acquired, it will send the - * heartbeats to the master. - * - *

      Use {@link #await} or {@link #await(long, TimeUnit)} to wait for the lock to be acquired. - * Always call {@link #unlock()} irrespective of whether lock was acquired or not. If the lock - * was acquired, it'll be released. If it was not acquired, it is possible that master grants the - * lock in future and the heartbeat thread keeps it alive forever by sending heartbeats. - * Calling {@link #unlock()} will stop the heartbeat thread and cancel the lock queued on master. - * - *

      There are 4 ways in which these remote locks may be released/can be lost: - *

      • Call {@link #unlock}.
      • - *
      • Lock times out on master: Can happen because of network issues, GC pauses, etc. - * Worker thread will call the given abortable as soon as it detects such a situation.
      • + * Lock for HBase Entity either a Table, a Namespace, or Regions. These are remote locks which live + * on master, and need periodic heartbeats to keep them alive. (Once we request the lock, internally + * an heartbeat thread will be started on the client). If master does not receive the heartbeat in + * time, it'll release the lock and make it available to other users. + *

        + * Use {@link LockServiceClient} to build instances. Then call {@link #requestLock()}. + * {@link #requestLock} will contact master to queue the lock and start the heartbeat thread which + * will check lock's status periodically and once the lock is acquired, it will send the heartbeats + * to the master. + *

        + * Use {@link #await} or {@link #await(long, TimeUnit)} to wait for the lock to be acquired. Always + * call {@link #unlock()} irrespective of whether lock was acquired or not. If the lock was + * acquired, it'll be released. If it was not acquired, it is possible that master grants the lock + * in future and the heartbeat thread keeps it alive forever by sending heartbeats. Calling + * {@link #unlock()} will stop the heartbeat thread and cancel the lock queued on master. + *

        + * There are 4 ways in which these remote locks may be released/can be lost: + *

          + *
        • Call {@link #unlock}.
        • + *
        • Lock times out on master: Can happen because of network issues, GC pauses, etc. Worker thread + * will call the given abortable as soon as it detects such a situation.
        • *
        • Fail to contact master: If worker thread can not contact mater and thus fails to send - * heartbeat before the timeout expires, it assumes that lock is lost and calls the - * abortable.
        • + * heartbeat before the timeout expires, it assumes that lock is lost and calls the abortable. *
        • Worker thread is interrupted.
        • *
        - * - * Use example: - * + * Use example: * EntityLock lock = lockServiceClient.*Lock(...., "exampled lock", abortable); * lock.requestLock(); * .... @@ -81,8 +76,7 @@ import org.slf4j.LoggerFactory; public class EntityLock { private static final Logger LOG = LoggerFactory.getLogger(EntityLock.class); - public static final String HEARTBEAT_TIME_BUFFER = - "hbase.client.locks.heartbeat.time.buffer.ms"; + public static final String HEARTBEAT_TIME_BUFFER = "hbase.client.locks.heartbeat.time.buffer.ms"; private final AtomicBoolean locked = new AtomicBoolean(false); private final CountDownLatch latch = new CountDownLatch(1); @@ -102,12 +96,12 @@ public class EntityLock { private Long procId = null; /** - * Abortable.abort() is called when the lease of the lock will expire. - * It's up to the user decide if simply abort the process or handle the loss of the lock - * by aborting the operation that was supposed to be under lock. + * Abortable.abort() is called when the lease of the lock will expire. It's up to the user decide + * if simply abort the process or handle the loss of the lock by aborting the operation that was + * supposed to be under lock. */ - EntityLock(Configuration conf, LockService.BlockingInterface stub, - LockRequest request, Abortable abort) { + EntityLock(Configuration conf, LockService.BlockingInterface stub, LockRequest request, + Abortable abort) { this.stub = stub; this.lockRequest = request; this.abort = abort; @@ -158,10 +152,9 @@ public class EntityLock { } /** - * Sends rpc to the master to request lock. - * The lock request is queued with other lock requests. - * Call {@link #await()} to wait on lock. - * Always call {@link #unlock()} after calling the below, even after error. + * Sends rpc to the master to request lock. The lock request is queued with other lock requests. + * Call {@link #await()} to wait on lock. Always call {@link #unlock()} after calling the below, + * even after error. */ public void requestLock() throws IOException { if (procId == null) { @@ -179,7 +172,7 @@ public class EntityLock { /** * @param timeout in milliseconds. If set to 0, waits indefinitely. * @return true if lock was acquired; and false if waiting time elapsed before lock could be - * acquired. + * acquired. */ public boolean await(long timeout, TimeUnit timeUnit) throws InterruptedException { final boolean result = latch.await(timeout, timeUnit); @@ -188,7 +181,7 @@ public class EntityLock { LOG.info("Acquired " + lockRequestStr); } else { LOG.info(String.format("Failed acquire in %s %s of %s", timeout, timeUnit.toString(), - lockRequestStr)); + lockRequestStr)); } return result; } @@ -227,7 +220,7 @@ public class EntityLock { @Override public void run() { final LockHeartbeatRequest lockHeartbeatRequest = - LockHeartbeatRequest.newBuilder().setProcId(procId).build(); + LockHeartbeatRequest.newBuilder().setProcId(procId).build(); LockHeartbeatResponse response; while (true) { @@ -243,12 +236,13 @@ public class EntityLock { if (!isLocked() && response.getLockStatus() == LockHeartbeatResponse.LockStatus.LOCKED) { locked.set(true); latch.countDown(); - } else if (isLocked() && response.getLockStatus() == LockHeartbeatResponse.LockStatus.UNLOCKED) { - // Lock timed out. - locked.set(false); - abort.abort("Lock timed out.", null); - return; - } + } else + if (isLocked() && response.getLockStatus() == LockHeartbeatResponse.LockStatus.UNLOCKED) { + // Lock timed out. + locked.set(false); + abort.abort("Lock timed out.", null); + return; + } try { // If lock not acquired yet, poll faster so we can notify faster. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java index 24f2835af8b..8f3705bab01 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.locking; import java.util.List; @@ -35,12 +33,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockS import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType; /** - * Helper class to create "master locks" for namespaces, tables and regions. - * DEV-NOTE: At the moment this class is used only by the RS for MOB, - * to prevent other MOB compaction to conflict. - * The RS has already the stub of the LockService, so we have only one constructor that - * takes the LockService stub. If in the future we are going to use this in other places - * we should add a constructor that from conf or connection, creates the stub. + * Helper class to create "master locks" for namespaces, tables and regions. DEV-NOTE: At the moment + * this class is used only by the RS for MOB, to prevent other MOB compaction to conflict. The RS + * has already the stub of the LockService, so we have only one constructor that takes the + * LockService stub. If in the future we are going to use this in other places we should add a + * constructor that from conf or connection, creates the stub. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Evolving @@ -50,30 +47,30 @@ public class LockServiceClient { private final NonceGenerator ng; public LockServiceClient(final Configuration conf, final LockService.BlockingInterface stub, - final NonceGenerator ng) { + final NonceGenerator ng) { this.conf = conf; this.stub = stub; this.ng = ng; } /** - * Create a new EntityLock object to acquire an exclusive or shared lock on a table. - * Internally, the table namespace will also be locked in shared mode. + * Create a new EntityLock object to acquire an exclusive or shared lock on a table. Internally, + * the table namespace will also be locked in shared mode. */ public EntityLock tableLock(final TableName tableName, final boolean exclusive, - final String description, final Abortable abort) { + final String description, final Abortable abort) { LockRequest lockRequest = buildLockRequest(exclusive ? LockType.EXCLUSIVE : LockType.SHARED, - tableName.getNameAsString(), null, null, description, ng.getNonceGroup(), ng.newNonce()); + tableName.getNameAsString(), null, null, description, ng.getNonceGroup(), ng.newNonce()); return new EntityLock(conf, stub, lockRequest, abort); } /** - * LocCreate a new EntityLock object to acquire exclusive lock on a namespace. - * Clients can not acquire shared locks on namespace. + * LocCreate a new EntityLock object to acquire exclusive lock on a namespace. Clients can not + * acquire shared locks on namespace. */ public EntityLock namespaceLock(String namespace, String description, Abortable abort) { - LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE, - namespace, null, null, description, ng.getNonceGroup(), ng.newNonce()); + LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE, namespace, null, null, + description, ng.getNonceGroup(), ng.newNonce()); return new EntityLock(conf, stub, lockRequest, abort); } @@ -82,21 +79,19 @@ public class LockServiceClient { * Internally, the table and its namespace will also be locked in shared mode. */ public EntityLock regionLock(List regionInfos, String description, Abortable abort) { - LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE, - null, null, regionInfos, description, ng.getNonceGroup(), ng.newNonce()); + LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE, null, null, regionInfos, + description, ng.getNonceGroup(), ng.newNonce()); return new EntityLock(conf, stub, lockRequest, abort); } @InterfaceAudience.Private - public static LockRequest buildLockRequest(final LockType type, - final String namespace, final TableName tableName, final List regionInfos, - final String description, final long nonceGroup, final long nonce) { - final LockRequest.Builder builder = LockRequest.newBuilder() - .setLockType(type) - .setNonceGroup(nonceGroup) - .setNonce(nonce); + public static LockRequest buildLockRequest(final LockType type, final String namespace, + final TableName tableName, final List regionInfos, final String description, + final long nonceGroup, final long nonce) { + final LockRequest.Builder builder = + LockRequest.newBuilder().setLockType(type).setNonceGroup(nonceGroup).setNonce(nonce); if (regionInfos != null) { - for (RegionInfo hri: regionInfos) { + for (RegionInfo hri : regionInfos) { builder.addRegionInfo(ProtobufUtil.toRegionInfo(hri)); } } else if (namespace != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java index ddbbb5fc8bd..a6d6940e1e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,22 +20,23 @@ package org.apache.hadoop.hbase.codec; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.io.ByteBuffInputStream; import org.apache.hadoop.hbase.nio.ByteBuff; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.ExtendedCellBuilder; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.CellProtos; /** - * Codec that just writes out Cell as a protobuf Cell Message. Does not write the mvcc stamp. - * Use a different codec if you want that in the stream. + * Codec that just writes out Cell as a protobuf Cell Message. Does not write the mvcc stamp. Use a + * different codec if you want that in the stream. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class MessageCodec implements Codec { @@ -48,26 +49,27 @@ public class MessageCodec implements Codec { public void write(Cell cell) throws IOException { checkFlushed(); CellProtos.Cell.Builder builder = CellProtos.Cell.newBuilder(); - // This copies bytes from Cell to ByteString. I don't see anyway around the copy. + // This copies bytes from Cell to ByteString. I don't see anyway around the copy. // ByteString is final. builder.setRow(UnsafeByteOperations.unsafeWrap(cell.getRowArray(), cell.getRowOffset(), - cell.getRowLength())); + cell.getRowLength())); builder.setFamily(UnsafeByteOperations.unsafeWrap(cell.getFamilyArray(), - cell.getFamilyOffset(), - cell.getFamilyLength())); + cell.getFamilyOffset(), cell.getFamilyLength())); builder.setQualifier(UnsafeByteOperations.unsafeWrap(cell.getQualifierArray(), - cell.getQualifierOffset(), cell.getQualifierLength())); + cell.getQualifierOffset(), cell.getQualifierLength())); builder.setTimestamp(cell.getTimestamp()); builder.setCellType(CellProtos.CellType.valueOf(cell.getTypeByte())); builder.setValue(UnsafeByteOperations.unsafeWrap(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength())); + cell.getValueLength())); CellProtos.Cell pbcell = builder.build(); pbcell.writeDelimitedTo(this.out); } } static class MessageDecoder extends BaseDecoder { - private final ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + private final ExtendedCellBuilder cellBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + MessageDecoder(final InputStream in) { super(in); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/BaseConstraint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/BaseConstraint.java index e5b6f4a166f..de2a470bd5a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/BaseConstraint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/BaseConstraint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,12 +17,12 @@ */ package org.apache.hadoop.hbase.constraint; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configured; +import org.apache.yetus.audience.InterfaceAudience; /** - * Base class to use when actually implementing a {@link Constraint}. It takes - * care of getting and setting of configuration for the constraint. + * Base class to use when actually implementing a {@link Constraint}. It takes care of getting and + * setting of configuration for the constraint. */ @InterfaceAudience.Private public abstract class BaseConstraint extends Configured implements Constraint { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java index 4a63ec10cb6..8c70ba5f602 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,50 +17,42 @@ */ package org.apache.hadoop.hbase.constraint; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hbase.client.Put; +import org.apache.yetus.audience.InterfaceAudience; /** - * Apply a {@link Constraint} (in traditional database terminology) to a HTable. - * Any number of {@link Constraint Constraints} can be added to the table, in - * any order. + * Apply a {@link Constraint} (in traditional database terminology) to a HTable. Any number of + * {@link Constraint Constraints} can be added to the table, in any order. *

        * A {@link Constraint} must be added to a table before the table is loaded via * {@link Constraints#add(org.apache.hadoop.hbase.HTableDescriptor, Class[])} or - * {@link Constraints#add(org.apache.hadoop.hbase.HTableDescriptor, - * org.apache.hadoop.hbase.util.Pair...)} - * (if you want to add a configuration with the {@link Constraint}). Constraints - * will be run in the order that they are added. Further, a Constraint will be - * configured before it is run (on load). + * {@link Constraints#add(org.apache.hadoop.hbase.HTableDescriptor, org.apache.hadoop.hbase.util.Pair...)} + * (if you want to add a configuration with the {@link Constraint}). Constraints will be run in the + * order that they are added. Further, a Constraint will be configured before it is run (on load). *

        * See {@link Constraints#enableConstraint(org.apache.hadoop.hbase.HTableDescriptor, Class)} and * {@link Constraints#disableConstraint(org.apache.hadoop.hbase.HTableDescriptor, Class)} for * enabling/disabling of a given {@link Constraint} after it has been added. *

        * If a {@link Put} is invalid, the Constraint should throw some sort of - * {@link org.apache.hadoop.hbase.constraint.ConstraintException}, indicating - * that the {@link Put} has failed. When - * this exception is thrown, not further retries of the {@link Put} are - * attempted nor are any other {@link Constraint Constraints} attempted (the - * {@link Put} is clearly not valid). Therefore, there are performance - * implications in the order in which {@link BaseConstraint Constraints} are - * specified. + * {@link org.apache.hadoop.hbase.constraint.ConstraintException}, indicating that the {@link Put} + * has failed. When this exception is thrown, not further retries of the {@link Put} are attempted + * nor are any other {@link Constraint Constraints} attempted (the {@link Put} is clearly not + * valid). Therefore, there are performance implications in the order in which {@link BaseConstraint + * Constraints} are specified. *

        * If a {@link Constraint} fails to fail the {@link Put} via a - * {@link org.apache.hadoop.hbase.constraint.ConstraintException}, but instead - * throws a {@link RuntimeException}, - * the entire constraint processing mechanism ({@link ConstraintProcessor}) will - * be unloaded from the table. This ensures that the region server is still - * functional, but no more {@link Put Puts} will be checked via - * {@link Constraint Constraints}. + * {@link org.apache.hadoop.hbase.constraint.ConstraintException}, but instead throws a + * {@link RuntimeException}, the entire constraint processing mechanism + * ({@link ConstraintProcessor}) will be unloaded from the table. This ensures that the region + * server is still functional, but no more {@link Put Puts} will be checked via {@link Constraint + * Constraints}. *

        - * Further, {@link Constraint Constraints} should probably not be used to - * enforce cross-table references as it will cause tremendous write slowdowns, - * but it is possible. + * Further, {@link Constraint Constraints} should probably not be used to enforce cross-table + * references as it will cause tremendous write slowdowns, but it is possible. *

        * NOTE: Implementing classes must have a nullary (no-args) constructor - * * @see BaseConstraint * @see Constraints */ @@ -68,15 +60,13 @@ import org.apache.hadoop.hbase.client.Put; public interface Constraint extends Configurable { /** - * Check a {@link Put} to ensure it is valid for the table. If the {@link Put} - * is valid, then just return from the method. Otherwise, throw an - * {@link Exception} specifying what happened. This {@link Exception} is - * propagated back to the client so you can see what caused the {@link Put} to - * fail. + * Check a {@link Put} to ensure it is valid for the table. If the {@link Put} is valid, then just + * return from the method. Otherwise, throw an {@link Exception} specifying what happened. This + * {@link Exception} is propagated back to the client so you can see what caused the {@link Put} + * to fail. * @param p {@link Put} to check - * @throws org.apache.hadoop.hbase.constraint.ConstraintException when the - * {@link Put} does not match the - * constraint. + * @throws org.apache.hadoop.hbase.constraint.ConstraintException when the {@link Put} does not + * match the constraint. */ void check(Put p) throws ConstraintException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java index 51641b91ce6..cb7af0f9d3b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +21,11 @@ import org.apache.yetus.audience.InterfaceAudience; /** * Exception that a user defined constraint throws on failure of a - * {@link org.apache.hadoop.hbase.client.Put}. - *

        Does NOT attempt the - * {@link org.apache.hadoop.hbase.client.Put} multiple times, - * since the constraint should fail every time for - * the same {@link org.apache.hadoop.hbase.client.Put} (it should be - * idempotent). + * {@link org.apache.hadoop.hbase.client.Put}. + *

        + * Does NOT attempt the {@link org.apache.hadoop.hbase.client.Put} multiple times, since the + * constraint should fail every time for the same {@link org.apache.hadoop.hbase.client.Put} + * (it should be idempotent). */ @InterfaceAudience.Private public class ConstraintException extends org.apache.hadoop.hbase.DoNotRetryIOException { @@ -36,12 +35,10 @@ public class ConstraintException extends org.apache.hadoop.hbase.DoNotRetryIOExc super(); } - public ConstraintException(String msg) - { + public ConstraintException(String msg) { super(msg); } - - + public ConstraintException(String msg, Throwable cause) { super(msg, cause); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java index b0a04c5044a..81ed9592d27 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Optional; - import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; @@ -32,15 +31,14 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; /*** * Processes multiple {@link Constraint Constraints} on a given table. *

        - * This is an ease of use mechanism - all the functionality here could be - * implemented on any given system by a coprocessor. + * This is an ease of use mechanism - all the functionality here could be implemented on any given + * system by a coprocessor. */ @InterfaceAudience.Private public class ConstraintProcessor implements RegionCoprocessor, RegionObserver { @@ -70,7 +68,7 @@ public class ConstraintProcessor implements RegionCoprocessor, RegionObserver { // make sure we are on a region server if (!(environment instanceof RegionCoprocessorEnvironment)) { throw new IllegalArgumentException( - "Constraints only act on regions - started in an environment that was not a region"); + "Constraints only act on regions - started in an environment that was not a region"); } RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) environment; TableDescriptor desc = env.getRegion().getTableDescriptor(); @@ -82,15 +80,15 @@ public class ConstraintProcessor implements RegionCoprocessor, RegionObserver { } if (LOG.isInfoEnabled()) { - LOG.info("Finished loading " + constraints.size() - + " user Constraints on table: " + desc.getTableName()); + LOG.info("Finished loading " + constraints.size() + " user Constraints on table: " + + desc.getTableName()); } } @Override - public void prePut(ObserverContext e, Put put, - WALEdit edit, Durability durability) throws IOException { + public void prePut(ObserverContext e, Put put, WALEdit edit, + Durability durability) throws IOException { // check the put against the stored constraints for (Constraint c : constraints) { c.check(put); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java index 759d2520b0b..25182fd457b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,23 +29,22 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.regex.Pattern; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Utilities for adding/removing constraints from a table. *

        * Constraints can be added on table load time, via the {@link HTableDescriptor}. *

        - * NOTE: this class is NOT thread safe. Concurrent setting/enabling/disabling of - * constraints can cause constraints to be run at incorrect times or not at all. + * NOTE: this class is NOT thread safe. Concurrent setting/enabling/disabling of constraints can + * cause constraints to be run at incorrect times or not at all. */ @InterfaceAudience.Private public final class Constraints { @@ -56,8 +55,8 @@ public final class Constraints { private static final Logger LOG = LoggerFactory.getLogger(Constraints.class); private static final String CONSTRAINT_HTD_KEY_PREFIX = "constraint $"; - private static final Pattern CONSTRAINT_HTD_ATTR_KEY_PATTERN = Pattern - .compile(CONSTRAINT_HTD_KEY_PREFIX, Pattern.LITERAL); + private static final Pattern CONSTRAINT_HTD_ATTR_KEY_PATTERN = + Pattern.compile(CONSTRAINT_HTD_KEY_PREFIX, Pattern.LITERAL); // configuration key for if the constraint is enabled private static final String ENABLED_KEY = "_ENABLED"; @@ -74,14 +73,9 @@ public final class Constraints { /** * Enable constraints on a table. *

        - * Currently, if you attempt to add a constraint to the table, then - * Constraints will automatically be turned on. - * - * @param desc - * table description to add the processor - * @throws IOException - * If the {@link ConstraintProcessor} CP couldn't be added to the - * table. + * Currently, if you attempt to add a constraint to the table, then Constraints will automatically + * be turned on. n * table description to add the processor n * If the {@link ConstraintProcessor} + * CP couldn't be added to the table. */ public static void enable(HTableDescriptor desc) throws IOException { // if the CP has already been loaded, do nothing @@ -95,27 +89,19 @@ public final class Constraints { } /** - * Turn off processing constraints for a given table, even if constraints have - * been turned on or added. - * - * @param desc - * {@link HTableDescriptor} where to disable {@link Constraint - * Constraints}. + * Turn off processing constraints for a given table, even if constraints have been turned on or + * added. n * {@link HTableDescriptor} where to disable {@link Constraint Constraints}. */ public static void disable(HTableDescriptor desc) { desc.removeCoprocessor(ConstraintProcessor.class.getName()); } /** - * Remove all {@link Constraint Constraints} that have been added to the table - * and turn off the constraint processing. + * Remove all {@link Constraint Constraints} that have been added to the table and turn off the + * constraint processing. *

        - * All {@link Configuration Configurations} and their associated - * {@link Constraint} are removed. - * - * @param desc - * {@link HTableDescriptor} to remove {@link Constraint Constraints} - * from. + * All {@link Configuration Configurations} and their associated {@link Constraint} are removed. n + * * {@link HTableDescriptor} to remove {@link Constraint Constraints} from. */ public static void remove(HTableDescriptor desc) { // disable constraints @@ -124,8 +110,7 @@ public final class Constraints { // remove all the constraint settings List keys = new ArrayList<>(); // loop through all the key, values looking for constraints - for (Map.Entry e : desc - .getValues().entrySet()) { + for (Map.Entry e : desc.getValues().entrySet()) { String key = Bytes.toString((e.getKey().get())); String[] className = CONSTRAINT_HTD_ATTR_KEY_PATTERN.split(key); if (className.length == 2) { @@ -139,30 +124,24 @@ public final class Constraints { } /** - * Check to see if the Constraint is currently set. - * - * @param desc - * {@link HTableDescriptor} to check - * @param clazz - * {@link Constraint} class to check for. - * @return true if the {@link Constraint} is present, even if it is - * disabled. false otherwise. + * Check to see if the Constraint is currently set. n * {@link HTableDescriptor} to check n * + * {@link Constraint} class to check for. + * @return true if the {@link Constraint} is present, even if it is disabled. + * false otherwise. */ - public static boolean has(HTableDescriptor desc, - Class clazz) { + public static boolean has(HTableDescriptor desc, Class clazz) { return getKeyValueForClass(desc, clazz) != null; } /** * Get the kv {@link Entry} in the descriptor for the specified class - * - * @param desc {@link HTableDescriptor} to read + * @param desc {@link HTableDescriptor} to read * @param clazz To search for - * @return The {@link Pair} of {@literal } in the table, if that class is - * present. {@code NULL} otherwise. + * @return The {@link Pair} of {@literal } in the table, if that class is present. + * {@code NULL} otherwise. */ - private static Pair getKeyValueForClass( - HTableDescriptor desc, Class clazz) { + private static Pair getKeyValueForClass(HTableDescriptor desc, + Class clazz) { // get the serialized version of the constraint String key = serializeConstraintClass(clazz); String value = desc.getValue(key); @@ -173,25 +152,19 @@ public final class Constraints { /** * Add configuration-less constraints to the table. *

        - * This will overwrite any configuration associated with the previous - * constraint of the same class. + * This will overwrite any configuration associated with the previous constraint of the same + * class. *

        - * Each constraint, when added to the table, will have a specific priority, - * dictating the order in which the {@link Constraint} will be run. A - * {@link Constraint} earlier in the list will be run before those later in - * the list. The same logic applies between two Constraints over time (earlier - * added is run first on the regionserver). - * - * @param desc - * {@link HTableDescriptor} to add {@link Constraint Constraints} - * @param constraints - * {@link Constraint Constraints} to add. All constraints are - * considered automatically enabled on add - * @throws IOException - * If constraint could not be serialized/added to table + * Each constraint, when added to the table, will have a specific priority, dictating the order in + * which the {@link Constraint} will be run. A {@link Constraint} earlier in the list will be run + * before those later in the list. The same logic applies between two Constraints over time + * (earlier added is run first on the regionserver). n * {@link HTableDescriptor} to add + * {@link Constraint Constraints} n * {@link Constraint Constraints} to add. All constraints are + * considered automatically enabled on add n * If constraint could not be serialized/added to + * table */ - public static void add(HTableDescriptor desc, - Class... constraints) throws IOException { + public static void add(HTableDescriptor desc, Class... constraints) + throws IOException { // make sure constraints are enabled enable(desc); long priority = getNextPriority(desc); @@ -206,30 +179,20 @@ public final class Constraints { /** * Add constraints and their associated configurations to the table. *

        - * Adding the same constraint class twice will overwrite the first - * constraint's configuration + * Adding the same constraint class twice will overwrite the first constraint's configuration *

        - * Each constraint, when added to the table, will have a specific priority, - * dictating the order in which the {@link Constraint} will be run. A - * {@link Constraint} earlier in the list will be run before those later in - * the list. The same logic applies between two Constraints over time (earlier - * added is run first on the regionserver). - * - * @param desc - * {@link HTableDescriptor} to add a {@link Constraint} - * @param constraints - * {@link Pair} of a {@link Constraint} and its associated - * {@link Configuration}. The Constraint will be configured on load - * with the specified configuration.All constraints are considered - * automatically enabled on add - * @throws IOException - * if any constraint could not be deserialized. Assumes if 1 - * constraint is not loaded properly, something has gone terribly - * wrong and that all constraints need to be enforced. + * Each constraint, when added to the table, will have a specific priority, dictating the order in + * which the {@link Constraint} will be run. A {@link Constraint} earlier in the list will be run + * before those later in the list. The same logic applies between two Constraints over time + * (earlier added is run first on the regionserver). n * {@link HTableDescriptor} to add a + * {@link Constraint} n * {@link Pair} of a {@link Constraint} and its associated + * {@link Configuration}. The Constraint will be configured on load with the specified + * configuration.All constraints are considered automatically enabled on add n * if any constraint + * could not be deserialized. Assumes if 1 constraint is not loaded properly, something has gone + * terribly wrong and that all constraints need to be enforced. */ public static void add(HTableDescriptor desc, - Pair, Configuration>... constraints) - throws IOException { + Pair, Configuration>... constraints) throws IOException { enable(desc); long priority = getNextPriority(desc); for (Pair, Configuration> pair : constraints) { @@ -241,25 +204,15 @@ public final class Constraints { /** * Add a {@link Constraint} to the table with the given configuration *

        - * Each constraint, when added to the table, will have a specific priority, - * dictating the order in which the {@link Constraint} will be run. A - * {@link Constraint} added will run on the regionserver before those added to - * the {@link HTableDescriptor} later. - * - * @param desc - * table descriptor to the constraint to - * @param constraint - * to be added - * @param conf - * configuration associated with the constraint - * @throws IOException - * if any constraint could not be deserialized. Assumes if 1 - * constraint is not loaded properly, something has gone terribly - * wrong and that all constraints need to be enforced. + * Each constraint, when added to the table, will have a specific priority, dictating the order in + * which the {@link Constraint} will be run. A {@link Constraint} added will run on the + * regionserver before those added to the {@link HTableDescriptor} later. n * table descriptor to + * the constraint to n * to be added n * configuration associated with the constraint n * if any + * constraint could not be deserialized. Assumes if 1 constraint is not loaded properly, something + * has gone terribly wrong and that all constraints need to be enforced. */ - public static void add(HTableDescriptor desc, - Class constraint, Configuration conf) - throws IOException { + public static void add(HTableDescriptor desc, Class constraint, + Configuration conf) throws IOException { enable(desc); long priority = getNextPriority(desc); addConstraint(desc, constraint, conf, priority++); @@ -270,37 +223,26 @@ public final class Constraints { /** * Write the raw constraint and configuration to the descriptor. *

        - * This method takes care of creating a new configuration based on the passed - * in configuration and then updating that with enabled and priority of the - * constraint. + * This method takes care of creating a new configuration based on the passed in configuration and + * then updating that with enabled and priority of the constraint. *

        * When a constraint is added, it is automatically enabled. */ - private static void addConstraint(HTableDescriptor desc, - Class clazz, Configuration conf, long priority) - throws IOException { - writeConstraint(desc, serializeConstraintClass(clazz), - configure(conf, true, priority)); + private static void addConstraint(HTableDescriptor desc, Class clazz, + Configuration conf, long priority) throws IOException { + writeConstraint(desc, serializeConstraintClass(clazz), configure(conf, true, priority)); } /** - * Setup the configuration for a constraint as to whether it is enabled and - * its priority - * - * @param conf - * on which to base the new configuration - * @param enabled - * true if it should be run - * @param priority - * relative to other constraints + * Setup the configuration for a constraint as to whether it is enabled and its priority n * on + * which to base the new configuration n * true if it should be run n * relative to other + * constraints * @return a new configuration, storable in the {@link HTableDescriptor} */ - private static Configuration configure(Configuration conf, boolean enabled, - long priority) { + private static Configuration configure(Configuration conf, boolean enabled, long priority) { // create the configuration to actually be stored // clone if possible, but otherwise just create an empty configuration - Configuration toWrite = conf == null ? new Configuration() - : new Configuration(conf); + Configuration toWrite = conf == null ? new Configuration() : new Configuration(conf); // update internal properties toWrite.setBooleanIfUnset(ENABLED_KEY, enabled); @@ -314,39 +256,29 @@ public final class Constraints { } /** - * Just write the class to a String representation of the class as a key for - * the {@link HTableDescriptor} - * - * @param clazz - * Constraint class to convert to a {@link HTableDescriptor} key + * Just write the class to a String representation of the class as a key for the + * {@link HTableDescriptor} n * Constraint class to convert to a {@link HTableDescriptor} key * @return key to store in the {@link HTableDescriptor} */ - private static String serializeConstraintClass( - Class clazz) { + private static String serializeConstraintClass(Class clazz) { String constraintClazz = clazz.getName(); return CONSTRAINT_HTD_KEY_PREFIX + constraintClazz; } /** - * Write the given key and associated configuration to the - * {@link HTableDescriptor} + * Write the given key and associated configuration to the {@link HTableDescriptor} */ - private static void writeConstraint(HTableDescriptor desc, String key, - Configuration conf) throws IOException { + private static void writeConstraint(HTableDescriptor desc, String key, Configuration conf) + throws IOException { // store the key and conf in the descriptor desc.setValue(key, serializeConfiguration(conf)); } /** - * Write the configuration to a String - * - * @param conf - * to write - * @return String representation of that configuration - * @throws IOException + * Write the configuration to a String n * to write + * @return String representation of that configuration n */ - private static String serializeConfiguration(Configuration conf) - throws IOException { + private static String serializeConfiguration(Configuration conf) throws IOException { // write the configuration out to the data stream ByteArrayOutputStream bos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(bos); @@ -357,14 +289,10 @@ public final class Constraints { } /** - * Read the {@link Configuration} stored in the byte stream. - * - * @param bytes - * to read from + * Read the {@link Configuration} stored in the byte stream. n * to read from * @return A valid configuration */ - private static Configuration readConfiguration(byte[] bytes) - throws IOException { + private static Configuration readConfiguration(byte[] bytes) throws IOException { ByteArrayInputStream is = new ByteArrayInputStream(bytes); Configuration conf = new Configuration(false); conf.addResource(is); @@ -372,16 +300,10 @@ public final class Constraints { } /** - * Read in the configuration from the String encoded configuration - * - * @param bytes - * to read from - * @return A valid configuration - * @throws IOException - * if the configuration could not be read + * Read in the configuration from the String encoded configuration n * to read from + * @return A valid configuration n * if the configuration could not be read */ - private static Configuration readConfiguration(String bytes) - throws IOException { + private static Configuration readConfiguration(String bytes) throws IOException { return readConfiguration(Bytes.toBytes(bytes)); } @@ -405,29 +327,19 @@ public final class Constraints { } /** - * Update the configuration for the {@link Constraint}; does not change the - * order in which the constraint is run. - * - * @param desc - * {@link HTableDescriptor} to update - * @param clazz - * {@link Constraint} to update - * @param configuration - * to update the {@link Constraint} with. - * @throws IOException - * if the Constraint was not stored correctly - * @throws IllegalArgumentException - * if the Constraint was not present on this table. + * Update the configuration for the {@link Constraint}; does not change the order in which the + * constraint is run. n * {@link HTableDescriptor} to update n * {@link Constraint} to update n * + * to update the {@link Constraint} with. n * if the Constraint was not stored correctly n * if + * the Constraint was not present on this table. */ - public static void setConfiguration(HTableDescriptor desc, - Class clazz, Configuration configuration) - throws IOException, IllegalArgumentException { + public static void setConfiguration(HTableDescriptor desc, Class clazz, + Configuration configuration) throws IOException, IllegalArgumentException { // get the entry for this class Pair e = getKeyValueForClass(desc, clazz); if (e == null) { - throw new IllegalArgumentException("Constraint: " + clazz.getName() - + " is not associated with this table."); + throw new IllegalArgumentException( + "Constraint: " + clazz.getName() + " is not associated with this table."); } // clone over the configuration elements @@ -445,65 +357,46 @@ public final class Constraints { } /** - * Remove the constraint (and associated information) for the table - * descriptor. - * - * @param desc - * {@link HTableDescriptor} to modify - * @param clazz - * {@link Constraint} class to remove + * Remove the constraint (and associated information) for the table descriptor. n * + * {@link HTableDescriptor} to modify n * {@link Constraint} class to remove */ - public static void remove(HTableDescriptor desc, - Class clazz) { + public static void remove(HTableDescriptor desc, Class clazz) { String key = serializeConstraintClass(clazz); desc.remove(key); } /** - * Enable the given {@link Constraint}. Retains all the information (e.g. - * Configuration) for the {@link Constraint}, but makes sure that it gets - * loaded on the table. - * - * @param desc - * {@link HTableDescriptor} to modify - * @param clazz - * {@link Constraint} to enable - * @throws IOException - * If the constraint cannot be properly deserialized + * Enable the given {@link Constraint}. Retains all the information (e.g. Configuration) for the + * {@link Constraint}, but makes sure that it gets loaded on the table. n * + * {@link HTableDescriptor} to modify n * {@link Constraint} to enable n * If the constraint + * cannot be properly deserialized */ - public static void enableConstraint(HTableDescriptor desc, - Class clazz) throws IOException { + public static void enableConstraint(HTableDescriptor desc, Class clazz) + throws IOException { changeConstraintEnabled(desc, clazz, true); } /** - * Disable the given {@link Constraint}. Retains all the information (e.g. - * Configuration) for the {@link Constraint}, but it just doesn't load the - * {@link Constraint} on the table. - * - * @param desc - * {@link HTableDescriptor} to modify - * @param clazz - * {@link Constraint} to disable. - * @throws IOException - * if the constraint cannot be found + * Disable the given {@link Constraint}. Retains all the information (e.g. Configuration) for the + * {@link Constraint}, but it just doesn't load the {@link Constraint} on the table. n * + * {@link HTableDescriptor} to modify n * {@link Constraint} to disable. n * if the constraint + * cannot be found */ - public static void disableConstraint(HTableDescriptor desc, - Class clazz) throws IOException { + public static void disableConstraint(HTableDescriptor desc, Class clazz) + throws IOException { changeConstraintEnabled(desc, clazz, false); } /** - * Change the whether the constraint (if it is already present) is enabled or - * disabled. + * Change the whether the constraint (if it is already present) is enabled or disabled. */ private static void changeConstraintEnabled(HTableDescriptor desc, - Class clazz, boolean enabled) throws IOException { + Class clazz, boolean enabled) throws IOException { // get the original constraint Pair entry = getKeyValueForClass(desc, clazz); if (entry == null) { throw new IllegalArgumentException("Constraint: " + clazz.getName() - + " is not associated with this table. You can't enable it!"); + + " is not associated with this table. You can't enable it!"); } // create a new configuration from that conf @@ -517,19 +410,13 @@ public final class Constraints { } /** - * Check to see if the given constraint is enabled. - * - * @param desc - * {@link HTableDescriptor} to check. - * @param clazz - * {@link Constraint} to check for - * @return true if the {@link Constraint} is present and enabled. - * false otherwise. - * @throws IOException - * If the constraint has improperly stored in the table + * Check to see if the given constraint is enabled. n * {@link HTableDescriptor} to check. n * + * {@link Constraint} to check for + * @return true if the {@link Constraint} is present and enabled. false + * otherwise. n * If the constraint has improperly stored in the table */ - public static boolean enabled(HTableDescriptor desc, - Class clazz) throws IOException { + public static boolean enabled(HTableDescriptor desc, Class clazz) + throws IOException { // get the kv Pair entry = getKeyValueForClass(desc, clazz); // its not enabled so just return false. In fact, its not even present! @@ -544,25 +431,18 @@ public final class Constraints { } /** - * Get the constraints stored in the table descriptor - * - * @param desc - * To read from - * @param classloader - * To use when loading classes. If a special classloader is used on a - * region, for instance, then that should be the classloader used to - * load the constraints. This could also apply to unit-testing - * situation, where want to ensure that class is reloaded or not. - * @return List of configured {@link Constraint Constraints} - * @throws IOException - * if any part of reading/arguments fails + * Get the constraints stored in the table descriptor n * To read from n * To use when loading + * classes. If a special classloader is used on a region, for instance, then that should be the + * classloader used to load the constraints. This could also apply to unit-testing situation, + * where want to ensure that class is reloaded or not. + * @return List of configured {@link Constraint Constraints} n * if any part of reading/arguments + * fails */ - static List getConstraints(TableDescriptor desc, - ClassLoader classloader) throws IOException { + static List getConstraints(TableDescriptor desc, ClassLoader classloader) + throws IOException { List constraints = new ArrayList<>(); // loop through all the key, values looking for constraints - for (Map.Entry e : desc - .getValues().entrySet()) { + for (Map.Entry e : desc.getValues().entrySet()) { // read out the constraint String key = Bytes.toString(e.getKey().get()).trim(); String[] className = CONSTRAINT_HTD_ATTR_KEY_PATTERN.split(key); @@ -578,27 +458,25 @@ public final class Constraints { conf = readConfiguration(e.getValue().get()); } catch (IOException e1) { // long that we don't have a valid configuration stored, and move on. - LOG.warn("Corrupted configuration found for key:" + key - + ", skipping it."); + LOG.warn("Corrupted configuration found for key:" + key + ", skipping it."); continue; } // if it is not enabled, skip it if (!conf.getBoolean(ENABLED_KEY, false)) { - if (LOG.isDebugEnabled()) - LOG.debug("Constraint: " + key + " is DISABLED - skipping it"); + if (LOG.isDebugEnabled()) LOG.debug("Constraint: " + key + " is DISABLED - skipping it"); // go to the next constraint continue; } try { // add the constraint, now that we expect it to be valid. - Class clazz = classloader.loadClass(key) - .asSubclass(Constraint.class); + Class clazz = + classloader.loadClass(key).asSubclass(Constraint.class); Constraint constraint = clazz.getDeclaredConstructor().newInstance(); constraint.setConf(conf); constraints.add(constraint); - } catch (InvocationTargetException | NoSuchMethodException | ClassNotFoundException | - InstantiationException | IllegalAccessException e1) { + } catch (InvocationTargetException | NoSuchMethodException | ClassNotFoundException + | InstantiationException | IllegalAccessException e1) { throw new IOException(e1); } } @@ -613,7 +491,7 @@ public final class Constraints { public int compare(Constraint c1, Constraint c2) { // compare the priorities of the constraints stored in their configuration return Long.compare(c1.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY), - c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY)); + c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY)); } }; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java index 9508321a625..324a6f37b99 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coordination; import java.io.IOException; import java.util.Set; import java.util.concurrent.ConcurrentMap; - import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective; @@ -39,7 +36,7 @@ import org.apache.yetus.audience.InterfaceAudience; * {@link #checkTaskStillAvailable(String)} Check that task is still there
        * {@link #checkTasks()} check for unassigned tasks and resubmit them * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter, see SplitWALManager + * distributed WAL splitter, see SplitWALManager */ @InterfaceAudience.Private @Deprecated @@ -53,7 +50,7 @@ public interface SplitLogManagerCoordination { final private Set failedDeletions; public SplitLogManagerDetails(ConcurrentMap tasks, MasterServices master, - Set failedDeletions) { + Set failedDeletions) { this.tasks = tasks; this.master = master; this.failedDeletions = failedDeletions; @@ -124,8 +121,8 @@ public interface SplitLogManagerCoordination { /** * Resubmit the task in case if found unassigned or failed * @param taskName path related to task - * @param task to resubmit - * @param force whether it should be forced + * @param task to resubmit + * @param force whether it should be forced * @return whether it was successful */ @@ -142,8 +139,7 @@ public interface SplitLogManagerCoordination { void deleteTask(String taskName); /** - * Support method to init constants such as timeout. Mostly required for UTs. - * @throws IOException + * Support method to init constants such as timeout. Mostly required for UTs. n */ void init() throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java index 5452578a2c2..ff2fbfbe7e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java @@ -1,21 +1,22 @@ - /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hbase.coordination; + import java.util.concurrent.atomic.LongAdder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -28,8 +29,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** * Coordinated operations for {@link SplitLogWorker} and - * {@link org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler} Important - * methods for SplitLogWorker:
        + * {@link org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler} Important methods for + * SplitLogWorker:
        * {@link #isReady()} called from {@link SplitLogWorker#run()} to check whether the coordination is * ready to supply the tasks
        * {@link #taskLoop()} loop for new tasks until the worker is stopped
        @@ -41,7 +42,7 @@ import org.apache.yetus.audience.InterfaceAudience; * Important methods for WALSplitterHandler:
        * splitting task has completed. * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter, see SplitWALManager + * distributed WAL splitter, see SplitWALManager */ @Deprecated @InterfaceAudience.Private @@ -50,16 +51,16 @@ public interface SplitLogWorkerCoordination { /** * Initialize internal values. This method should be used when corresponding SplitLogWorker * instance is created - * @param server instance of RegionServerServices to work with - * @param conf is current configuration. + * @param server instance of RegionServerServices to work with + * @param conf is current configuration. * @param splitTaskExecutor split executor from SplitLogWorker - * @param worker instance of SplitLogWorker + * @param worker instance of SplitLogWorker */ - void init(RegionServerServices server, Configuration conf, - TaskExecutor splitTaskExecutor, SplitLogWorker worker); + void init(RegionServerServices server, Configuration conf, TaskExecutor splitTaskExecutor, + SplitLogWorker worker); /** - * called when Coordination should stop processing tasks and exit + * called when Coordination should stop processing tasks and exit */ void stopProcessingTasks(); @@ -77,8 +78,8 @@ public interface SplitLogWorkerCoordination { /** * marks log file as corrupted * @param rootDir where to find the log - * @param name of the log - * @param fs file system + * @param name of the log + * @param fs file system */ void markCorrupted(Path rootDir, String name, FileSystem fs); @@ -109,10 +110,10 @@ public interface SplitLogWorkerCoordination { /** * Notify coordination engine that splitting task has completed. - * @param slt See {@link SplitLogTask} - * @param ctr counter to be updated + * @param slt See {@link SplitLogTask} + * @param ctr counter to be updated * @param splitTaskDetails details about log split task (specific to coordination engine being - * used). + * used). */ void endTask(SplitLogTask slt, LongAdder ctr, SplitTaskDetails splitTaskDetails); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java index b6256ad7005..d08a7238e81 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java @@ -1,5 +1,5 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one +/* + * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coordination; import static org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective.CHECK; @@ -28,7 +27,6 @@ import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent; import java.io.IOException; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; @@ -60,12 +58,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * ZooKeeper based implementation of - * {@link SplitLogManagerCoordination} + * ZooKeeper based implementation of {@link SplitLogManagerCoordination} */ @InterfaceAudience.Private -public class ZKSplitLogManagerCoordination extends ZKListener implements - SplitLogManagerCoordination { +public class ZKSplitLogManagerCoordination extends ZKListener + implements SplitLogManagerCoordination { public static final int DEFAULT_TIMEOUT = 120000; public static final int DEFAULT_ZK_RETRIES = 3; @@ -121,8 +118,8 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements public int remainingTasksInCoordination() { int count = 0; try { - List tasks = ZKUtil.listChildrenNoWatch(watcher, - watcher.getZNodePaths().splitLogZNode); + List tasks = + ZKUtil.listChildrenNoWatch(watcher, watcher.getZNodePaths().splitLogZNode); if (tasks != null) { int listSize = tasks.size(); for (int i = 0; i < listSize; i++) { @@ -142,8 +139,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements * It is possible for a task to stay in UNASSIGNED state indefinitely - say SplitLogManager wants * to resubmit a task. It forces the task to UNASSIGNED state but it dies before it could create * the RESCAN task node to signal the SplitLogWorkers to pick up the task. To prevent this - * scenario the SplitLogManager resubmits all orphan and UNASSIGNED tasks at startup. - * @param path + * scenario the SplitLogManager resubmits all orphan and UNASSIGNED tasks at startup. n */ private void handleUnassignedTask(String path) { if (ZKSplitLog.isRescanNode(watcher, path)) { @@ -177,13 +173,13 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements // finished the task. This allows to continue if the worker cannot actually handle it, // for any reason. final long time = EnvironmentEdgeManager.currentTime() - task.last_update; - final boolean alive = - details.getMaster().getServerManager() != null ? details.getMaster().getServerManager() - .isServerOnline(task.cur_worker_name) : true; + final boolean alive = details.getMaster().getServerManager() != null + ? details.getMaster().getServerManager().isServerOnline(task.cur_worker_name) + : true; if (alive && time < timeout) { LOG.trace("Skipping the resubmit of " + task.toString() + " because the server " - + task.cur_worker_name + " is not marked as dead, we waited for " + time - + " while the timeout is " + timeout); + + task.cur_worker_name + " is not marked as dead, we waited for " + time + + " while the timeout is " + timeout); return false; } @@ -192,7 +188,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements task.resubmitThresholdReached = true; SplitLogCounters.tot_mgr_resubmit_threshold_reached.increment(); LOG.info("Skipping resubmissions of task " + path + " because threshold " - + resubmitThreshold + " reached"); + + resubmitThreshold + " reached"); } return false; } @@ -219,7 +215,6 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements return true; } - @Override public void checkTasks() { rescan(Long.MAX_VALUE); @@ -237,11 +232,9 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements // Since the TimeoutMonitor will keep resubmitting UNASSIGNED tasks // therefore this behavior is safe. SplitLogTask slt = new SplitLogTask.Done(this.details.getServerName()); - this.watcher - .getRecoverableZooKeeper() - .getZooKeeper() - .create(ZKSplitLog.getRescanNode(watcher), slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, - CreateMode.EPHEMERAL_SEQUENTIAL, new CreateRescanAsyncCallback(), Long.valueOf(retries)); + this.watcher.getRecoverableZooKeeper().getZooKeeper().create(ZKSplitLog.getRescanNode(watcher), + slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL, + new CreateRescanAsyncCallback(), Long.valueOf(retries)); } @Override @@ -252,11 +245,8 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements @Override public void checkTaskStillAvailable(String path) { // A negative retry count will lead to ignoring all error processing. - this.watcher - .getRecoverableZooKeeper() - .getZooKeeper() - .getData(path, this.watcher, new GetDataAsyncCallback(), - Long.valueOf(-1) /* retry count */); + this.watcher.getRecoverableZooKeeper().getZooKeeper().getData(path, this.watcher, + new GetDataAsyncCallback(), Long.valueOf(-1) /* retry count */); SplitLogCounters.tot_mgr_get_data_queued.increment(); } @@ -265,8 +255,8 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements // Once a task znode is ready for delete, that is it is in the TASK_DONE // state, then no one should be writing to it anymore. That is no one // will be updating the znode version any more. - this.watcher.getRecoverableZooKeeper().getZooKeeper() - .delete(path, -1, new DeleteAsyncCallback(), retries); + this.watcher.getRecoverableZooKeeper().getZooKeeper().delete(path, -1, + new DeleteAsyncCallback(), retries); } private void deleteNodeSuccess(String path) { @@ -307,13 +297,13 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements /** * Helper function to check whether to abandon retries in ZooKeeper AsyncCallback functions * @param statusCode integer value of a ZooKeeper exception code - * @param action description message about the retried action + * @param action description message about the retried action * @return true when need to abandon retries otherwise false */ private boolean needAbandonRetries(int statusCode, String action) { if (statusCode == KeeperException.Code.SESSIONEXPIRED.intValue()) { LOG.error("ZK session expired. Master is expected to shut down. Abandoning retries for " - + "action=" + action); + + "action=" + action); return true; } return false; @@ -339,13 +329,13 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements } private void getDataSetWatch(String path, Long retry_count) { - this.watcher.getRecoverableZooKeeper().getZooKeeper() - .getData(path, this.watcher, new GetDataAsyncCallback(), retry_count); + this.watcher.getRecoverableZooKeeper().getZooKeeper().getData(path, this.watcher, + new GetDataAsyncCallback(), retry_count); SplitLogCounters.tot_mgr_get_data_queued.increment(); } private void getDataSetWatchSuccess(String path, byte[] data, int version) - throws DeserializationException { + throws DeserializationException { if (data == null) { if (version == Integer.MIN_VALUE) { // assume all done. The task znode suddenly disappeared. @@ -382,8 +372,8 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements LOG.info("Task " + path + " entered state=" + slt.toString()); resubmitOrFail(path, CHECK); } else { - LOG.error(HBaseMarkers.FATAL, "logic error - unexpected zk state for path = " - + path + " data = " + slt.toString()); + LOG.error(HBaseMarkers.FATAL, + "logic error - unexpected zk state for path = " + path + " data = " + slt.toString()); setDone(path, FAILURE); } } @@ -466,15 +456,15 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements private void lookForOrphans() { List orphans; try { - orphans = ZKUtil.listChildrenNoWatch(this.watcher, - this.watcher.getZNodePaths().splitLogZNode); + orphans = + ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.getZNodePaths().splitLogZNode); if (orphans == null) { LOG.warn("Could not get children of " + this.watcher.getZNodePaths().splitLogZNode); return; } } catch (KeeperException e) { LOG.warn("Could not get children of " + this.watcher.getZNodePaths().splitLogZNode + " " - + StringUtils.stringifyException(e)); + + StringUtils.stringifyException(e)); return; } int rescan_nodes = 0; @@ -491,7 +481,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements getDataSetWatch(nodepath, zkretries); } LOG.info("Found " + (orphans.size() - rescan_nodes) + " orphan tasks and " + rescan_nodes - + " rescan nodes"); + + " rescan nodes"); } @Override @@ -509,15 +499,14 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements private boolean resubmit(String path, int version) { try { // blocking zk call but this is done from the timeout thread - SplitLogTask slt = - new SplitLogTask.Unassigned(this.details.getServerName()); + SplitLogTask slt = new SplitLogTask.Unassigned(this.details.getServerName()); if (ZKUtil.setData(this.watcher, path, slt.toByteArray(), version) == false) { LOG.debug("Failed to resubmit task " + path + " version changed"); return false; } } catch (NoNodeException e) { LOG.warn("Failed to resubmit because znode doesn't exist " + path - + " task done (or forced done by removing the znode)"); + + " task done (or forced done by removing the znode)"); try { getDataSetWatchSuccess(path, null, Integer.MIN_VALUE); } catch (DeserializationException e1) { @@ -536,12 +525,11 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements return true; } - /** * {@link org.apache.hadoop.hbase.master.SplitLogManager} can use objects implementing this * interface to finish off a partially done task by - * {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}. This provides a - * serialization point at the end of the task processing. Must be restartable and idempotent. + * {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}. This provides a serialization + * point at the end of the task processing. Must be restartable and idempotent. */ public interface TaskFinisher { /** @@ -563,9 +551,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements * partially done tasks are present. taskname is the name of the task that was put up in * zookeeper. *

        - * @param workerName - * @param taskname - * @return DONE if task completed successfully, ERR otherwise + * nn * @return DONE if task completed successfully, ERR otherwise */ Status finish(ServerName workerName, String taskname); } @@ -596,7 +582,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements } else { Long retry_count = (Long) ctx; LOG.warn("Create rc=" + KeeperException.Code.get(rc) + " for " + path - + " remaining retries=" + retry_count); + + " remaining retries=" + retry_count); if (retry_count == 0) { SplitLogCounters.tot_mgr_node_create_err.increment(); createNodeFailure(path); @@ -636,11 +622,11 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements if (retry_count < 0) { LOG.warn("Getdata rc=" + KeeperException.Code.get(rc) + " " + path - + ". Ignoring error. No error handling. No retrying."); + + ". Ignoring error. No error handling. No retrying."); return; } - LOG.warn("Getdata rc=" + KeeperException.Code.get(rc) + " " + path - + " remaining retries=" + retry_count); + LOG.warn("Getdata rc=" + KeeperException.Code.get(rc) + " " + path + " remaining retries=" + + retry_count); if (retry_count == 0) { SplitLogCounters.tot_mgr_get_data_err.increment(); getDataSetWatchFailure(path); @@ -677,7 +663,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements SplitLogCounters.tot_mgr_node_delete_err.increment(); Long retry_count = (Long) ctx; LOG.warn("Delete rc=" + KeeperException.Code.get(rc) + " for " + path - + " remaining retries=" + retry_count); + + " remaining retries=" + retry_count); if (retry_count == 0) { LOG.warn("Delete failed " + path); details.getFailedDeletions().add(path); @@ -688,8 +674,8 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements return; } else { LOG.info(path + " does not exist. Either was created but deleted behind our" - + " back by another pending delete OR was deleted" - + " in earlier retry rounds. zkretries = " + ctx); + + " back by another pending delete OR was deleted" + + " in earlier retry rounds. zkretries = " + ctx); } } else { LOG.debug("Deleted " + path); @@ -715,7 +701,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements } Long retry_count = (Long) ctx; LOG.warn("rc=" + KeeperException.Code.get(rc) + " for " + path + " remaining retries=" - + retry_count); + + retry_count); if (retry_count == 0) { createRescanFailure(); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java index 323e5752ace..8f3d8fdd6e6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** * ZooKeeper-based implementation of {@link org.apache.hadoop.hbase.CoordinatedStateManager}. * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter (see SplitWALManager) which doesn't use this zk-based coordinator. + * distributed WAL splitter (see SplitWALManager) which doesn't use this zk-based + * coordinator. */ @Deprecated @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @@ -38,8 +39,8 @@ public class ZkCoordinatedStateManager implements CoordinatedStateManager { public ZkCoordinatedStateManager(Server server) { this.watcher = server.getZooKeeper(); splitLogWorkerCoordination = new ZkSplitLogWorkerCoordination(server.getServerName(), watcher); - splitLogManagerCoordination = new ZKSplitLogManagerCoordination(server.getConfiguration(), - watcher); + splitLogManagerCoordination = + new ZKSplitLogManagerCoordination(server.getConfiguration(), watcher); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java index 07e751716bf..1b255b25e17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coordination; import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER; @@ -59,20 +57,18 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * ZooKeeper based implementation of {@link SplitLogWorkerCoordination} - * It listen for changes in ZooKeeper and - * + * ZooKeeper based implementation of {@link SplitLogWorkerCoordination} It listen for changes in + * ZooKeeper and */ @InterfaceAudience.Private -public class ZkSplitLogWorkerCoordination extends ZKListener implements - SplitLogWorkerCoordination { +public class ZkSplitLogWorkerCoordination extends ZKListener implements SplitLogWorkerCoordination { private static final Logger LOG = LoggerFactory.getLogger(ZkSplitLogWorkerCoordination.class); private static final int checkInterval = 5000; // 5 seconds private static final int FAILED_TO_OWN_TASK = -1; - private SplitLogWorker worker; + private SplitLogWorker worker; private TaskExecutor splitTaskExecutor; @@ -132,17 +128,16 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements * Override setter from {@link SplitLogWorkerCoordination} */ @Override - public void init(RegionServerServices server, Configuration conf, - TaskExecutor splitExecutor, SplitLogWorker worker) { + public void init(RegionServerServices server, Configuration conf, TaskExecutor splitExecutor, + SplitLogWorker worker) { this.server = server; this.worker = worker; this.splitTaskExecutor = splitExecutor; maxConcurrentTasks = - conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER); - reportPeriod = - conf.getInt("hbase.splitlog.report.period", - conf.getInt(HConstants.HBASE_SPLITLOG_MANAGER_TIMEOUT, - ZKSplitLogManagerCoordination.DEFAULT_TIMEOUT) / 3); + conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER); + reportPeriod = conf.getInt("hbase.splitlog.report.period", + conf.getInt(HConstants.HBASE_SPLITLOG_MANAGER_TIMEOUT, + ZKSplitLogManagerCoordination.DEFAULT_TIMEOUT) / 3); } /* Support functions for ZooKeeper async callback */ @@ -165,8 +160,8 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements } public void getDataSetWatchAsync() { - watcher.getRecoverableZooKeeper().getZooKeeper() - .getData(currentTask, watcher, new GetDataAsyncCallback(), null); + watcher.getRecoverableZooKeeper().getZooKeeper().getData(currentTask, watcher, + new GetDataAsyncCallback(), null); SplitLogCounters.tot_wkr_get_data_queued.increment(); } @@ -189,10 +184,12 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements // UNASSIGNED because by the time this worker sets the data watch // the node might have made two transitions - from owned by this // worker to unassigned to owned by another worker - if (!slt.isOwned(serverName) && !slt.isDone(serverName) && !slt.isErr(serverName) - && !slt.isResigned(serverName)) { + if ( + !slt.isOwned(serverName) && !slt.isDone(serverName) && !slt.isErr(serverName) + && !slt.isResigned(serverName) + ) { LOG.info("task " + taskpath + " preempted from " + serverName - + ", current task state and owner=" + slt.toString()); + + ", current task state and owner=" + slt.toString()); worker.stopTask(); } } @@ -241,7 +238,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements } currentVersion = - attemptToOwnTask(true, watcher, server.getServerName(), path, stat.getVersion()); + attemptToOwnTask(true, watcher, server.getServerName(), path, stat.getVersion()); if (currentVersion < 0) { SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.increment(); return false; @@ -249,7 +246,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements if (ZKSplitLog.isRescanNode(watcher, currentTask)) { ZkSplitLogWorkerCoordination.ZkSplitTaskDetails splitTaskDetails = - new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails(); + new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails(); splitTaskDetails.setTaskNode(currentTask); splitTaskDetails.setCurTaskZKVersion(new MutableInt(currentVersion)); @@ -285,7 +282,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements /** * Submit a log split task to executor service - * @param curTask task to submit + * @param curTask task to submit * @param curTaskZKVersion current version of task */ void submitTask(final String curTask, final int curTaskZKVersion, final int reportPeriod) { @@ -300,8 +297,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements if ((t - last_report_at) > reportPeriod) { last_report_at = t; int latestZKVersion = - attemptToOwnTask(false, watcher, server.getServerName(), curTask, - zkVersion.intValue()); + attemptToOwnTask(false, watcher, server.getServerName(), curTask, zkVersion.intValue()); if (latestZKVersion < 0) { LOG.warn("Failed to heartbeat the task" + curTask); return false; @@ -312,13 +308,12 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements } }; ZkSplitLogWorkerCoordination.ZkSplitTaskDetails splitTaskDetails = - new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails(); + new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails(); splitTaskDetails.setTaskNode(curTask); splitTaskDetails.setCurTaskZKVersion(zkVersion); - WALSplitterHandler hsh = - new WALSplitterHandler(server, this, splitTaskDetails, reporter, - this.tasksInProgress, splitTaskExecutor); + WALSplitterHandler hsh = new WALSplitterHandler(server, this, splitTaskDetails, reporter, + this.tasksInProgress, splitTaskExecutor); server.getExecutorService().submit(hsh); } @@ -335,15 +330,15 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements * This method is also used to periodically heartbeat the task progress by transitioning the node * from OWNED to OWNED. *

        - * @param isFirstTime shows whther it's the first attempt. - * @param zkw zk wathcer - * @param server name - * @param task to own + * @param isFirstTime shows whther it's the first attempt. + * @param zkw zk wathcer + * @param server name + * @param task to own * @param taskZKVersion version of the task in zk * @return non-negative integer value when task can be owned by current region server otherwise -1 */ - protected static int attemptToOwnTask(boolean isFirstTime, ZKWatcher zkw, - ServerName server, String task, int taskZKVersion) { + protected static int attemptToOwnTask(boolean isFirstTime, ZKWatcher zkw, ServerName server, + String task, int taskZKVersion) { int latestZKVersion = FAILED_TO_OWN_TASK; try { SplitLogTask slt = new SplitLogTask.Owned(server); @@ -368,7 +363,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements } } catch (InterruptedException e1) { LOG.warn("Interrupted while trying to assert ownership of " + task + " " - + StringUtils.stringifyException(e1)); + + StringUtils.stringifyException(e1)); Thread.currentThread().interrupt(); } SplitLogCounters.tot_wkr_task_heartbeat_failed.increment(); @@ -381,8 +376,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements * in a cluster. *

        * Synchronization using taskReadySeq ensures that it will try to grab every task - * that has been put up - * @throws InterruptedException + * that has been put up n */ @Override public void taskLoop() throws InterruptedException { @@ -392,7 +386,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements paths = getTaskList(); if (paths == null) { LOG.warn("Could not get tasks, did someone remove " + watcher.getZNodePaths().splitLogZNode - + " ... worker thread exiting."); + + " ... worker thread exiting."); return; } // shuffle the paths to prevent different split log worker start from the same log file after @@ -418,8 +412,8 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements int idx = (i + offset) % paths.size(); // don't call ZKSplitLog.getNodeName() because that will lead to // double encoding of the path name - taskGrabbed |= grabTask(ZNodePaths.joinZNode( - watcher.getZNodePaths().splitLogZNode, paths.get(idx))); + taskGrabbed |= + grabTask(ZNodePaths.joinZNode(watcher.getZNodePaths().splitLogZNode, paths.get(idx))); break; } else { if (LOG.isTraceEnabled()) { @@ -453,8 +447,8 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements // it will come out if worker thread exited. while (!shouldStop) { try { - childrenPaths = ZKUtil.listChildrenAndWatchForNewChildren(watcher, - watcher.getZNodePaths().splitLogZNode); + childrenPaths = + ZKUtil.listChildrenAndWatchForNewChildren(watcher, watcher.getZNodePaths().splitLogZNode); if (childrenPaths != null) { return childrenPaths; } @@ -462,7 +456,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements LOG.warn("Could not get children of znode " + watcher.getZNodePaths().splitLogZNode, e); } LOG.debug("Retry listChildren of znode " + watcher.getZNodePaths().splitLogZNode - + " after sleep for " + sleepTime + "ms!"); + + " after sleep for " + sleepTime + "ms!"); Thread.sleep(sleepTime); } return childrenPaths; @@ -480,12 +474,13 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements result = ZKUtil.checkExists(watcher, watcher.getZNodePaths().splitLogZNode); } catch (KeeperException e) { // ignore - LOG.warn("Exception when checking for " + watcher.getZNodePaths().splitLogZNode - + " ... retrying", e); + LOG.warn( + "Exception when checking for " + watcher.getZNodePaths().splitLogZNode + " ... retrying", + e); } if (result == -1) { LOG.info(watcher.getZNodePaths().splitLogZNode - + " znode does not exist, waiting for master to create"); + + " znode does not exist, waiting for master to create"); Thread.sleep(1000); } return (result != -1); @@ -506,7 +501,6 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements watcher.unregisterListener(this); } - @Override public void stopProcessingTasks() { this.shouldStop = true; @@ -542,9 +536,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements */ /** * endTask() can fail and the only way to recover out of it is for the - * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node. - * @param slt - * @param ctr + * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node. nn */ @Override public void endTask(SplitLogTask slt, LongAdder ctr, SplitTaskDetails details) { @@ -558,7 +550,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements return; } LOG.warn("failed to transistion task " + task + " to end state " + slt - + " because of version mismatch "); + + " because of version mismatch "); } catch (KeeperException.BadVersionException bve) { LOG.warn("transisition task " + task + " to " + slt + " failed because of version mismatch", bve); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.java index 2818dcd675f..b8cabe8cfed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,9 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; +import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -27,8 +26,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; - /** * Encapsulation of the environment of each coprocessor */ @@ -48,10 +45,11 @@ public class BaseEnvironment implements CoprocessorEnviro /** * Constructor - * @param impl the coprocessor instance + * @param impl the coprocessor instance * @param priority chaining priority */ - public BaseEnvironment(final C impl, final int priority, final int seq, final Configuration conf) { + public BaseEnvironment(final C impl, final int priority, final int seq, + final Configuration conf) { this.impl = impl; this.classLoader = impl.getClass().getClassLoader(); this.priority = priority; @@ -62,8 +60,7 @@ public class BaseEnvironment implements CoprocessorEnviro /** Initialize the environment */ public void startup() throws IOException { - if (state == Coprocessor.State.INSTALLED || - state == Coprocessor.State.STOPPED) { + if (state == Coprocessor.State.INSTALLED || state == Coprocessor.State.STOPPED) { state = Coprocessor.State.STARTING; Thread currentThread = Thread.currentThread(); ClassLoader hostClassLoader = currentThread.getContextClassLoader(); @@ -75,8 +72,8 @@ public class BaseEnvironment implements CoprocessorEnviro currentThread.setContextClassLoader(hostClassLoader); } } else { - LOG.warn("Not starting coprocessor " + impl.getClass().getName() + - " because not inactive (state=" + state.toString() + ")"); + LOG.warn("Not starting coprocessor " + impl.getClass().getName() + + " because not inactive (state=" + state.toString() + ")"); } } @@ -91,13 +88,13 @@ public class BaseEnvironment implements CoprocessorEnviro impl.stop(this); state = Coprocessor.State.STOPPED; } catch (IOException ioe) { - LOG.error("Error stopping coprocessor "+impl.getClass().getName(), ioe); + LOG.error("Error stopping coprocessor " + impl.getClass().getName(), ioe); } finally { currentThread.setContextClassLoader(hostClassLoader); } } else { - LOG.warn("Not stopping coprocessor "+impl.getClass().getName()+ - " because not active (state="+state.toString()+")"); + LOG.warn("Not stopping coprocessor " + impl.getClass().getName() + + " because not active (state=" + state.toString() + ")"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java index ef91bf264fc..108590b36a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,57 +17,51 @@ */ package org.apache.hadoop.hbase.coprocessor; -import java.io.IOException; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.Collections; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; -import org.apache.hadoop.hbase.CoprocessorEnvironment; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.ProcessRequest; -import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.ProcessResponse; -import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorService; -import org.apache.hadoop.hbase.regionserver.Region; -import org.apache.hadoop.hbase.regionserver.RowProcessor; - import com.google.protobuf.ByteString; import com.google.protobuf.Message; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.Collections; +import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; +import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.ProcessRequest; +import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.ProcessResponse; +import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorService; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.RowProcessor; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; /** - * This class demonstrates how to implement atomic read-modify-writes - * using {@link Region#processRowsWithLocks} and Coprocessor endpoints. + * This class demonstrates how to implement atomic read-modify-writes using + * {@link Region#processRowsWithLocks} and Coprocessor endpoints. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public abstract class BaseRowProcessorEndpoint -extends RowProcessorService implements RegionCoprocessor { + extends RowProcessorService implements RegionCoprocessor { private RegionCoprocessorEnvironment env; + /** - * Pass a processor to region to process multiple rows atomically. - * - * The RowProcessor implementations should be the inner classes of your - * RowProcessorEndpoint. This way the RowProcessor can be class-loaded with - * the Coprocessor endpoint together. - * - * See {@code TestRowProcessorEndpoint} for example. - * - * The request contains information for constructing processor - * (see {@link #constructRowProcessorFromRequest}. The processor object defines - * the read-modify-write procedure. + * Pass a processor to region to process multiple rows atomically. The RowProcessor + * implementations should be the inner classes of your RowProcessorEndpoint. This way the + * RowProcessor can be class-loaded with the Coprocessor endpoint together. See + * {@code TestRowProcessorEndpoint} for example. The request contains information for constructing + * processor (see {@link #constructRowProcessorFromRequest}. The processor object defines the + * read-modify-write procedure. */ @Override public void process(RpcController controller, ProcessRequest request, - RpcCallback done) { + RpcCallback done) { ProcessResponse resultProto = null; try { - RowProcessor processor = constructRowProcessorFromRequest(request); + RowProcessor processor = constructRowProcessorFromRequest(request); Region region = env.getRegion(); long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE; long nonce = request.hasNonce() ? request.getNonce() : HConstants.NO_NONCE; @@ -90,17 +84,17 @@ extends RowProcessorService implements RegionCoprocessor { /** * Stores a reference to the coprocessor environment provided by the * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this - * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded - * on a table region, so always expects this to be an instance of + * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded on + * a table region, so always expects this to be an instance of * {@link RegionCoprocessorEnvironment}. * @param env the environment provided by the coprocessor host * @throws IOException if the provided environment is not an instance of - * {@code RegionCoprocessorEnvironment} + * {@code RegionCoprocessorEnvironment} */ @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; } else { throw new CoprocessorException("Must be loaded on a table region!"); } @@ -112,16 +106,15 @@ extends RowProcessorService implements RegionCoprocessor { } @SuppressWarnings("unchecked") - RowProcessor constructRowProcessorFromRequest(ProcessRequest request) - throws IOException { + RowProcessor constructRowProcessorFromRequest(ProcessRequest request) throws IOException { String className = request.getRowProcessorClassName(); Class cls; try { cls = Class.forName(className); - RowProcessor ci = (RowProcessor) cls.getDeclaredConstructor().newInstance(); + RowProcessor ci = (RowProcessor) cls.getDeclaredConstructor().newInstance(); if (request.hasRowProcessorInitializerMessageName()) { - Class imn = Class.forName(request.getRowProcessorInitializerMessageName()) - .asSubclass(Message.class); + Class imn = + Class.forName(request.getRowProcessorInitializerMessageName()).asSubclass(Message.class); Method m; try { m = imn.getMethod("parseFrom", ByteString.class); @@ -132,7 +125,7 @@ extends RowProcessorService implements RegionCoprocessor { } S s; try { - s = (S)m.invoke(null,request.getRowProcessorInitializerMessage()); + s = (S) m.invoke(null, request.getRowProcessorInitializerMessage()); } catch (IllegalArgumentException e) { throw new IOException(e); } catch (InvocationTargetException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java index b69a7270371..8e571c2403e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,55 +15,53 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Coprocessors implement this interface to observe and mediate bulk load operations. - *

        - * - *

        Exception Handling

        - * For all functions, exception handling is done as follows: + * Coprocessors implement this interface to observe and mediate bulk load operations.
        + *
        + *

        Exception Handling

        For all functions, exception handling is done as follows: *
          - *
        • Exceptions of type {@link IOException} are reported back to client.
        • - *
        • For any other kind of exception: - *
            - *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then - * the server aborts.
          • - *
          • Otherwise, coprocessor is removed from the server and - * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • - *
          - *
        • + *
        • Exceptions of type {@link IOException} are reported back to client.
        • + *
        • For any other kind of exception: + *
            + *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the + * server aborts.
          • + *
          • Otherwise, coprocessor is removed from the server and + * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • + *
          + *
        • *
        */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface BulkLoadObserver { - /** - * Called as part of SecureBulkLoadEndpoint.prepareBulkLoad() RPC call. - * It can't bypass the default action, e.g., ctx.bypass() won't have effect. - * If you need to get the region or table name, get it from the - * ctx as follows: code>ctx.getEnvironment().getRegion(). Use - * getRegionInfo to fetch the encodedName and use getTableDescriptor() to get the tableName. - * @param ctx the environment to interact with the framework and master - */ - default void prePrepareBulkLoad(ObserverContext ctx) - throws IOException {} + /** + * Called as part of SecureBulkLoadEndpoint.prepareBulkLoad() RPC call. It can't bypass the + * default action, e.g., ctx.bypass() won't have effect. If you need to get the region or table + * name, get it from the ctx as follows: + * code>ctx.getEnvironment().getRegion(). Use getRegionInfo to fetch the encodedName + * and use getTableDescriptor() to get the tableName. + * @param ctx the environment to interact with the framework and master + */ + default void prePrepareBulkLoad(ObserverContext ctx) + throws IOException { + } - /** - * Called as part of SecureBulkLoadEndpoint.cleanupBulkLoad() RPC call. - * It can't bypass the default action, e.g., ctx.bypass() won't have effect. - * If you need to get the region or table name, get it from the - * ctx as follows: code>ctx.getEnvironment().getRegion(). Use - * getRegionInfo to fetch the encodedName and use getTableDescriptor() to get the tableName. - * @param ctx the environment to interact with the framework and master - */ - default void preCleanupBulkLoad(ObserverContext ctx) - throws IOException {} + /** + * Called as part of SecureBulkLoadEndpoint.cleanupBulkLoad() RPC call. It can't bypass the + * default action, e.g., ctx.bypass() won't have effect. If you need to get the region or table + * name, get it from the ctx as follows: + * code>ctx.getEnvironment().getRegion(). Use getRegionInfo to fetch the encodedName + * and use getTableDescriptor() to get the tableName. + * @param ctx the environment to interact with the framework and master + */ + default void preCleanupBulkLoad(ObserverContext ctx) + throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index 319936d9ebf..cc8977f4581 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; @@ -31,10 +30,6 @@ import java.util.UUID; import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; @@ -46,43 +41,42 @@ import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.CoprocessorClassLoader; import org.apache.hadoop.hbase.util.SortedList; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Strings; /** - * Provides the common setup framework and runtime services for coprocessor - * invocation from HBase services. + * Provides the common setup framework and runtime services for coprocessor invocation from HBase + * services. * @param type of specific coprocessor this host will handle - * @param type of specific coprocessor environment this host requires. - * provides + * @param type of specific coprocessor environment this host requires. provides */ @InterfaceAudience.Private public abstract class CoprocessorHost> { - public static final String REGION_COPROCESSOR_CONF_KEY = - "hbase.coprocessor.region.classes"; + public static final String REGION_COPROCESSOR_CONF_KEY = "hbase.coprocessor.region.classes"; public static final String REGIONSERVER_COPROCESSOR_CONF_KEY = - "hbase.coprocessor.regionserver.classes"; + "hbase.coprocessor.regionserver.classes"; public static final String USER_REGION_COPROCESSOR_CONF_KEY = - "hbase.coprocessor.user.region.classes"; - public static final String MASTER_COPROCESSOR_CONF_KEY = - "hbase.coprocessor.master.classes"; - public static final String WAL_COPROCESSOR_CONF_KEY = - "hbase.coprocessor.wal.classes"; + "hbase.coprocessor.user.region.classes"; + public static final String MASTER_COPROCESSOR_CONF_KEY = "hbase.coprocessor.master.classes"; + public static final String WAL_COPROCESSOR_CONF_KEY = "hbase.coprocessor.wal.classes"; public static final String ABORT_ON_ERROR_KEY = "hbase.coprocessor.abortonerror"; public static final boolean DEFAULT_ABORT_ON_ERROR = true; public static final String COPROCESSORS_ENABLED_CONF_KEY = "hbase.coprocessor.enabled"; public static final boolean DEFAULT_COPROCESSORS_ENABLED = true; - public static final String USER_COPROCESSORS_ENABLED_CONF_KEY = - "hbase.coprocessor.user.enabled"; + public static final String USER_COPROCESSORS_ENABLED_CONF_KEY = "hbase.coprocessor.user.enabled"; public static final boolean DEFAULT_USER_COPROCESSORS_ENABLED = true; public static final String SKIP_LOAD_DUPLICATE_TABLE_COPROCESSOR = - "hbase.skip.load.duplicate.table.coprocessor"; + "hbase.skip.load.duplicate.table.coprocessor"; public static final boolean DEFAULT_SKIP_LOAD_DUPLICATE_TABLE_COPROCESSOR = false; private static final Logger LOG = LoggerFactory.getLogger(CoprocessorHost.class); protected Abortable abortable; /** Ordered set of loaded coprocessors with lock */ protected final SortedList coprocEnvironments = - new SortedList<>(new EnvironmentPriorityComparator()); + new SortedList<>(new EnvironmentPriorityComparator()); protected Configuration conf; // unique file prefix to use for local copies of jars when classloading protected String pathPrefix; @@ -94,15 +88,13 @@ public abstract class CoprocessorHost coprocessorNames = - Collections.synchronizedSet(new HashSet()); + private static Set coprocessorNames = Collections.synchronizedSet(new HashSet()); public static Set getLoadedCoprocessors() { synchronized (coprocessorNames) { @@ -111,27 +103,25 @@ public abstract class CoprocessorHost getCoprocessors() { Set returnValue = new TreeSet<>(); - for (E e: coprocEnvironments) { + for (E e : coprocEnvironments) { returnValue.add(e.getInstance().getClass().getSimpleName()); } return returnValue; } /** - * Load system coprocessors once only. Read the class names from configuration. - * Called by constructor. + * Load system coprocessors once only. Read the class names from configuration. Called by + * constructor. */ protected void loadSystemCoprocessors(Configuration conf, String confKey) { - boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, - DEFAULT_COPROCESSORS_ENABLED); + boolean coprocessorsEnabled = + conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, DEFAULT_COPROCESSORS_ENABLED); if (!coprocessorsEnabled) { return; } @@ -140,8 +130,7 @@ public abstract class CoprocessorHost implClass; - LOG.debug("Loading coprocessor class " + className + " with path " + - path + " and priority " + priority); + LOG.debug("Loading coprocessor class " + className + " with path " + path + " and priority " + + priority); boolean skipLoadDuplicateCoprocessor = conf.getBoolean(SKIP_LOAD_DUPLICATE_TABLE_COPROCESSOR, DEFAULT_SKIP_LOAD_DUPLICATE_TABLE_COPROCESSOR); @@ -243,19 +231,19 @@ public abstract class CoprocessorHost implClass, int priority, Configuration conf) - throws IOException { + throws IOException { E env = checkAndLoadInstance(implClass, priority, conf); coprocEnvironments.add(env); } /** * @param implClass Implementation class - * @param priority priority - * @param conf configuration + * @param priority priority + * @param conf configuration * @throws java.io.IOException Exception */ public E checkAndLoadInstance(Class implClass, int priority, Configuration conf) - throws IOException { + throws IOException { // create the instance C impl; try { @@ -288,7 +276,7 @@ public abstract class CoprocessorHost implClass) - throws InstantiationException, IllegalAccessException; + throws InstantiationException, IllegalAccessException; public void shutdown(E e) { assert e instanceof BaseEnvironment; @@ -328,9 +316,11 @@ public abstract class CoprocessorHost T findCoprocessor(Class cls) { - for (E env: coprocEnvironments) { + for (E env : coprocEnvironments) { if (cls.isAssignableFrom(env.getInstance().getClass())) { return (T) env.getInstance(); } @@ -354,12 +344,12 @@ public abstract class CoprocessorHost List findCoprocessors(Class cls) { ArrayList ret = new ArrayList<>(); - for (E env: coprocEnvironments) { + for (E env : coprocEnvironments) { C cp = env.getInstance(); - if(cp != null) { + if (cp != null) { if (cls.isAssignableFrom(cp.getClass())) { - ret.add((T)cp); + ret.add((T) cp); } } } @@ -372,9 +362,11 @@ public abstract class CoprocessorHost { @Override - public int compare(final CoprocessorEnvironment env1, - final CoprocessorEnvironment env2) { + public int compare(final CoprocessorEnvironment env1, final CoprocessorEnvironment env2) { if (env1.getPriority() < env2.getPriority()) { return -1; } else if (env1.getPriority() > env2.getPriority()) { @@ -436,18 +426,15 @@ public abstract class CoprocessorHost> legacyWarning = - new ConcurrentSkipListSet<>( - new Comparator>() { - @Override - public int compare(Class c1, Class c2) { - if (c1.equals(c2)) { - return 0; - } - return c1.getName().compareTo(c2.getName()); - } - }); + new ConcurrentSkipListSet<>(new Comparator>() { + @Override + public int compare(Class c1, Class c2) { + if (c1.equals(c2)) { + return 0; + } + return c1.getName().compareTo(c2.getName()); + } + }); /** * Implementations defined function to get an observer of type {@code O} from a coprocessor of - * type {@code C}. Concrete implementations of CoprocessorHost define one getter for each - * observer they can handle. For e.g. RegionCoprocessorHost will use 3 getters, one for - * each of RegionObserver, EndpointObserver and BulkLoadObserver. - * These getters are used by {@code ObserverOperation} to get appropriate observer from the - * coprocessor. + * type {@code C}. Concrete implementations of CoprocessorHost define one getter for each observer + * they can handle. For e.g. RegionCoprocessorHost will use 3 getters, one for each of + * RegionObserver, EndpointObserver and BulkLoadObserver. These getters are used by + * {@code ObserverOperation} to get appropriate observer from the coprocessor. */ @FunctionalInterface - public interface ObserverGetter extends Function> {} + public interface ObserverGetter extends Function> { + } private abstract class ObserverOperation extends ObserverContextImpl { ObserverGetter observerGetter; @@ -532,12 +517,14 @@ public abstract class CoprocessorHost observerGetter, User user, boolean bypassable) { - super(user != null? user: RpcServer.getRequestUser().orElse(null), bypassable); + super(user != null ? user : RpcServer.getRequestUser().orElse(null), bypassable); this.observerGetter = observerGetter; } abstract void callObserver() throws IOException; - protected void postEnvCall() {} + + protected void postEnvCall() { + } } // Can't derive ObserverOperation from ObserverOperationWithResult (R = Void) because then all @@ -555,15 +542,14 @@ public abstract class CoprocessorHost observerGetter, User user, - boolean bypassable) { + boolean bypassable) { super(observerGetter, user, bypassable); } /** * In case of coprocessors which have many kinds of observers (for eg, {@link RegionCoprocessor} - * has BulkLoadObserver, RegionObserver, etc), some implementations may not need all - * observers, in which case they will return null for that observer's getter. - * We simply ignore such cases. + * has BulkLoadObserver, RegionObserver, etc), some implementations may not need all observers, + * in which case they will return null for that observer's getter. We simply ignore such cases. */ @Override void callObserver() throws IOException { @@ -584,17 +570,16 @@ public abstract class CoprocessorHost observerGetter, R result, - boolean bypassable) { + boolean bypassable) { this(observerGetter, result, null, bypassable); } - public ObserverOperationWithResult(ObserverGetter observerGetter, R result, - User user) { + public ObserverOperationWithResult(ObserverGetter observerGetter, R result, User user) { this(observerGetter, result, user, false); } private ObserverOperationWithResult(ObserverGetter observerGetter, R result, User user, - boolean bypassable) { + boolean bypassable) { super(observerGetter, user, bypassable); this.result = result; } @@ -620,18 +605,18 @@ public abstract class CoprocessorHost R execOperationWithResult( - final ObserverOperationWithResult observerOperation) throws IOException { + final ObserverOperationWithResult observerOperation) throws IOException { boolean bypass = execOperation(observerOperation); R result = observerOperation.getResult(); - return bypass == observerOperation.isBypassable()? result: null; + return bypass == observerOperation.isBypassable() ? result : null; } /** * @return True if we are to bypass (Can only be true if - * ObserverOperation#isBypassable(). + * ObserverOperation#isBypassable(). */ protected boolean execOperation(final ObserverOperation observerOperation) - throws IOException { + throws IOException { boolean bypass = false; if (observerOperation == null) { return bypass; @@ -662,19 +647,18 @@ public abstract class CoprocessorHost + * Coprocessor classes can be configured in any order, based on that priority is set and chained + * in a sorted order. Should be used preStop*() hooks i.e. when master/regionserver is going down. + * This function first calls coprocessor methods (using ObserverOperation.call()) and then + * shutdowns the environment in postEnvCall().
        * Need to execute all coprocessor methods first then postEnvCall(), otherwise some coprocessors * may remain shutdown if any exception occurs during next coprocessor execution which prevent * master/regionserver stop or cluster shutdown. (Refer: * HBASE-16663 - * @return true if bypaas coprocessor execution, false if not. - * @throws IOException + * @return true if bypaas coprocessor execution, false if not. n */ protected boolean execShutdown(final ObserverOperation observerOperation) - throws IOException { + throws IOException { if (observerOperation == null) return false; boolean bypass = false; List envs = coprocEnvironments.get(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java index f6102290dec..1e07d3288cb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,17 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import com.google.protobuf.Service; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; /** - * Coprocessor endpoints providing protobuf services should implement this - * interface and return the {@link Service} instance via {@link #getService()}. + * Coprocessor endpoints providing protobuf services should implement this interface and return the + * {@link Service} instance via {@link #getService()}. * @deprecated Since 2.0. Will be removed in 3.0 */ @Deprecated diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorServiceBackwardCompatiblity.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorServiceBackwardCompatiblity.java index 23c63a22496..2b2cf042d83 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorServiceBackwardCompatiblity.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorServiceBackwardCompatiblity.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,26 +15,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import com.google.protobuf.Service; -import org.apache.yetus.audience.InterfaceAudience; - import java.util.Collections; +import org.apache.yetus.audience.InterfaceAudience; /** * Classes to help maintain backward compatibility with now deprecated {@link CoprocessorService} - * and {@link SingletonCoprocessorService}. - * From 2.0 onwards, implementors of coprocessor service should also implement the relevant - * coprocessor class (For eg {@link MasterCoprocessor} for coprocessor service in master), and - * override get*Service() method to return the {@link com.google.protobuf.Service} object. - * To maintain backward compatibility with 1.0 implementation, we'll wrap implementation of - * CoprocessorService/SingletonCoprocessorService in the new - * {Master, Region, RegionServer}Coprocessor class. - * Since there is no backward compatibility guarantee for Observers, we leave get*Observer() to - * default which returns null. - * This approach to maintain backward compatibility seems cleaner and more explicit. + * and {@link SingletonCoprocessorService}. From 2.0 onwards, implementors of coprocessor service + * should also implement the relevant coprocessor class (For eg {@link MasterCoprocessor} for + * coprocessor service in master), and override get*Service() method to return the + * {@link com.google.protobuf.Service} object. To maintain backward compatibility with 1.0 + * implementation, we'll wrap implementation of CoprocessorService/SingletonCoprocessorService in + * the new {Master, Region, RegionServer}Coprocessor class. Since there is no backward compatibility + * guarantee for Observers, we leave get*Observer() to default which returns null. This approach to + * maintain backward compatibility seems cleaner and more explicit. */ @InterfaceAudience.Private @Deprecated @@ -83,4 +78,3 @@ public class CoprocessorServiceBackwardCompatiblity { } } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoreCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoreCoprocessor.java index 0eb5e156b7b..e73523af47a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoreCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoreCoprocessor.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,20 +17,18 @@ */ package org.apache.hadoop.hbase.coprocessor; -import org.apache.yetus.audience.InterfaceAudience; - import java.lang.annotation.ElementType; import java.lang.annotation.Inherited; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; +import org.apache.yetus.audience.InterfaceAudience; /** - * Marker annotation that denotes Coprocessors that are core to HBase. - * A Core Coprocessor is a CP that realizes a core HBase feature. Features are sometimes - * implemented first as a Coprocessor to prove viability. The idea is that once proven, they then - * migrate to core. Meantime, HBase Core Coprocessors get this annotation. No other Coprocessors - * can carry this annotation. + * Marker annotation that denotes Coprocessors that are core to HBase. A Core Coprocessor is a CP + * that realizes a core HBase feature. Features are sometimes implemented first as a Coprocessor to + * prove viability. The idea is that once proven, they then migrate to core. Meantime, HBase Core + * Coprocessors get this annotation. No other Coprocessors can carry this annotation. */ // Core Coprocessors are generally naughty making use of HBase internals doing accesses no // Coprocessor should be up to so we mark these special Coprocessors with this annotation and on @@ -42,4 +39,5 @@ import java.lang.annotation.Target; @InterfaceAudience.Private @Retention(RetentionPolicy.RUNTIME) // This Annotation is not @Documented because I don't want users figuring out its mechanics. -public @interface CoreCoprocessor {} +public @interface CoreCoprocessor { +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java index 0fa5ea9e6ce..26166ad3f5a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java @@ -7,43 +7,38 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.coprocessor; import com.google.protobuf.Message; import com.google.protobuf.Service; - import java.io.IOException; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Coprocessors implement this interface to observe and mediate endpoint invocations - * on a region. - *

        - * - *

        Exception Handling

        - * For all functions, exception handling is done as follows: + * Coprocessors implement this interface to observe and mediate endpoint invocations on a region. + *
        + *
        + *

        Exception Handling

        For all functions, exception handling is done as follows: *
          - *
        • Exceptions of type {@link IOException} are reported back to client.
        • - *
        • For any other kind of exception: - *
            - *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then - * the server aborts.
          • - *
          • Otherwise, coprocessor is removed from the server and - * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • - *
          - *
        • + *
        • Exceptions of type {@link IOException} are reported back to client.
        • + *
        • For any other kind of exception: + *
            + *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the + * server aborts.
          • + *
          • Otherwise, coprocessor is removed from the server and + * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • + *
          + *
        • *
        */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -51,35 +46,35 @@ import org.apache.yetus.audience.InterfaceStability; public interface EndpointObserver { /** - * Called before an Endpoint service method is invoked. - * The request message can be altered by returning a new instance. Throwing an - * exception will abort the invocation. - * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no - * effect in this hook. - * @param ctx the environment provided by the region server - * @param service the endpoint service - * @param request Request message expected by given {@code Service}'s method (by the name - * {@code methodName}). + * Called before an Endpoint service method is invoked. The request message can be altered by + * returning a new instance. Throwing an exception will abort the invocation. Calling + * {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no effect in this + * hook. + * @param ctx the environment provided by the region server + * @param service the endpoint service + * @param request Request message expected by given {@code Service}'s method (by the name + * {@code methodName}). * @param methodName the invoked service method * @return the possibly modified message */ default Message preEndpointInvocation(ObserverContext ctx, - Service service, String methodName, Message request) throws IOException { + Service service, String methodName, Message request) throws IOException { return request; } /** - * Called after an Endpoint service method is invoked. The response message can be - * altered using the builder. - * @param ctx the environment provided by the region server - * @param service the endpoint service - * @param methodName the invoked service method - * @param request Request message expected by given {@code Service}'s method (by the name - * {@code methodName}). + * Called after an Endpoint service method is invoked. The response message can be altered using + * the builder. + * @param ctx the environment provided by the region server + * @param service the endpoint service + * @param methodName the invoked service method + * @param request Request message expected by given {@code Service}'s method (by the name + * {@code methodName}). * @param responseBuilder Builder for final response to the client, with original response from - * Service's method merged into it. + * Service's method merged into it. */ default void postEndpointInvocation(ObserverContext ctx, - Service service, String methodName, Message request, Message.Builder responseBuilder) - throws IOException {} + Service service, String methodName, Message request, Message.Builder responseBuilder) + throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java index 595e2d7765f..2682b78fd51 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java @@ -21,11 +21,11 @@ import org.apache.hadoop.hbase.master.MasterServices; import org.apache.yetus.audience.InterfaceAudience; /** - * Mark a class that it has a MasterServices accessor. - * Temporary hack until core Coprocesssors are integrated. + * Mark a class that it has a MasterServices accessor. Temporary hack until core Coprocesssors are + * integrated. * @see CoreCoprocessor * @deprecated Since 2.0.0 to be removed in 3.0.0. The hope is that by 3.0.0 we will not need this - * facility as CoreCoprocessors are integated into core. + * facility as CoreCoprocessors are integated into core. */ @Deprecated @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java index 89a2c729464..cef03390acb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java @@ -21,11 +21,11 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.yetus.audience.InterfaceAudience; /** - * Mark a class that it has a RegionServiceServices accessor. - * Temporary hack until core Coprocesssors are integrated. + * Mark a class that it has a RegionServiceServices accessor. Temporary hack until core + * Coprocesssors are integrated. * @see CoreCoprocessor * @deprecated Since 2.0.0 to be removed in 3.0.0. The hope is that by 3.0.0 we will not need this - * facility as CoreCoprocessors are integated into core. + * facility as CoreCoprocessors are integated into core. */ @Deprecated @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.java index d940385ffae..a288a4dd869 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; +import java.util.Optional; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import java.util.Optional; - @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface MasterCoprocessor extends Coprocessor { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java index cc72871b672..c83b9da4308 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -39,48 +36,44 @@ public interface MasterCoprocessorEnvironment extends CoprocessorEnvironmentDo not close! This is a shared connection - * with the hosting server. Throws {@link UnsupportedOperationException} if you try to close - * or abort it. - * - * For light-weight usage only. Heavy-duty usage will pull down - * the hosting RegionServer responsiveness as well as that of other Coprocessors making use of - * this Connection. Use to create table on start or to do administrative operations. Coprocessors - * should create their own Connections if heavy usage to avoid impinging on hosting Server - * operation. To create a Connection or if a Coprocessor requires a region with a particular - * Configuration, use {@link org.apache.hadoop.hbase.client.ConnectionFactory} or + * Returns the hosts' Connection to the Cluster. Do not close! This is a shared connection with + * the hosting server. Throws {@link UnsupportedOperationException} if you try to close or abort + * it. For light-weight usage only. Heavy-duty usage will pull down the hosting RegionServer + * responsiveness as well as that of other Coprocessors making use of this Connection. Use to + * create table on start or to do administrative operations. Coprocessors should create their own + * Connections if heavy usage to avoid impinging on hosting Server operation. To create a + * Connection or if a Coprocessor requires a region with a particular Configuration, use + * {@link org.apache.hadoop.hbase.client.ConnectionFactory} or * {@link #createConnection(Configuration)}}. - * - *

        Be aware that operations that make use of this Connection are executed as the RegionServer + *

        + * Be aware that operations that make use of this Connection are executed as the RegionServer * User, the hbase super user that started this server process. Exercise caution running - * operations as this User (See {@link #createConnection(Configuration)}} to run as other than - * the RegionServer User). - * - *

        Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl + * operations as this User (See {@link #createConnection(Configuration)}} to run as other than the + * RegionServer User). + *

        + * Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl * because the remote side is not online, is struggling or it is on the other side of a network * partition. Any use of Connection from inside a Coprocessor must be able to handle all such * hiccups. - * * @see #createConnection(Configuration) * @return The host's Connection to the Cluster. */ Connection getConnection(); /** - * Creates a cluster connection using the passed Configuration. - * - * Creating a Connection is a heavy-weight operation. The resultant Connection's cache of - * region locations will be empty. Therefore you should cache and reuse Connections rather than - * create a Connection on demand. Create on start of your Coprocessor. You will have to cast - * the CoprocessorEnvironment appropriately to get at this API at start time because - * Coprocessor start method is passed a subclass of this CoprocessorEnvironment or fetch - * Connection using a synchronized accessor initializing the Connection on first access. Close - * the returned Connection when done to free resources. Using this API rather - * than {@link org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration)} + * Creates a cluster connection using the passed Configuration. Creating a Connection is a + * heavy-weight operation. The resultant Connection's cache of region locations will be empty. + * Therefore you should cache and reuse Connections rather than create a Connection on demand. + * Create on start of your Coprocessor. You will have to cast the CoprocessorEnvironment + * appropriately to get at this API at start time because Coprocessor start method is passed a + * subclass of this CoprocessorEnvironment or fetch Connection using a synchronized accessor + * initializing the Connection on first access. Close the returned Connection when done to free + * resources. Using this API rather than + * {@link org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration)} * returns a Connection that will short-circuit RPC if the target is a local resource. Use * ConnectionFactory if you don't need this ability. - * - *

        Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl + *

        + * Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl * because the remote side is not online, is struggling or it is on the other side of a network * partition. Any use of Connection from inside a Coprocessor must be able to handle all such * hiccups. @@ -90,9 +83,10 @@ public interface MasterCoprocessorEnvironment extends CoprocessorEnvironmentSee ExampleMasterObserverWithMetrics class in the hbase-examples modules for examples - * of how metrics can be instantiated and used.

        + *

        + * See ExampleMasterObserverWithMetrics class in the hbase-examples modules for examples of how + * metrics can be instantiated and used. + *

        * @return A MetricRegistry for the coprocessor class to track and export metrics. */ MetricRegistry getMetricRegistryForMaster(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index bf667eb8ea2..9ffbd802054 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; @@ -44,31 +43,27 @@ import org.apache.hadoop.hbase.security.access.UserPermission; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; - /** * Defines coprocessor hooks for interacting with operations on the - * {@link org.apache.hadoop.hbase.master.HMaster} process. - *

        - * + * {@link org.apache.hadoop.hbase.master.HMaster} process.
        + *
        * Since most implementations will be interested in only a subset of hooks, this class uses * 'default' functions to avoid having to add unnecessary overrides. When the functions are - * non-empty, it's simply to satisfy the compiler by returning value of expected (non-void) type. - * It is done in a way that these default definitions act as no-op. So our suggestion to - * implementation would be to not call these 'default' methods from overrides. - *

        - * - *

        Exception Handling

        - * For all functions, exception handling is done as follows: + * non-empty, it's simply to satisfy the compiler by returning value of expected (non-void) type. It + * is done in a way that these default definitions act as no-op. So our suggestion to implementation + * would be to not call these 'default' methods from overrides.
        + *
        + *

        Exception Handling

        For all functions, exception handling is done as follows: *
          - *
        • Exceptions of type {@link IOException} are reported back to client.
        • - *
        • For any other kind of exception: - *
            - *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then - * the server aborts.
          • - *
          • Otherwise, coprocessor is removed from the server and - * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • - *
          - *
        • + *
        • Exceptions of type {@link IOException} are reported back to client.
        • + *
        • For any other kind of exception: + *
            + *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the + * server aborts.
          • + *
          • Otherwise, coprocessor is removed from the server and + * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • + *
          + *
        • *
        */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -78,225 +73,216 @@ public interface MasterObserver { /** * Called before we create the region infos for this table. Called as part of create table RPC * call. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param desc the TableDescriptor for the table * @return the TableDescriptor used to create the table. Default is the one passed in. Return * {@code null} means cancel the creation. */ default TableDescriptor preCreateTableRegionsInfos( - final ObserverContext ctx, TableDescriptor desc) - throws IOException { + final ObserverContext ctx, TableDescriptor desc) + throws IOException { return desc; } /** - * Called before a new table is created by - * {@link org.apache.hadoop.hbase.master.HMaster}. Called as part of create - * table RPC call. - * @param ctx the environment to interact with the framework and master - * @param desc the TableDescriptor for the table + * Called before a new table is created by {@link org.apache.hadoop.hbase.master.HMaster}. Called + * as part of create table RPC call. + * @param ctx the environment to interact with the framework and master + * @param desc the TableDescriptor for the table * @param regions the initial regions created for the table */ default void preCreateTable(final ObserverContext ctx, - TableDescriptor desc, RegionInfo[] regions) throws IOException {} + TableDescriptor desc, RegionInfo[] regions) throws IOException { + } /** - * Called after the createTable operation has been requested. Called as part - * of create table RPC call. - * @param ctx the environment to interact with the framework and master - * @param desc the TableDescriptor for the table + * Called after the createTable operation has been requested. Called as part of create table RPC + * call. + * @param ctx the environment to interact with the framework and master + * @param desc the TableDescriptor for the table * @param regions the initial regions created for the table */ default void postCreateTable(final ObserverContext ctx, - TableDescriptor desc, RegionInfo[] regions) throws IOException {} + TableDescriptor desc, RegionInfo[] regions) throws IOException { + } /** - * Called before a new table is created by - * {@link org.apache.hadoop.hbase.master.HMaster}. Called as part of create - * table procedure and it is async to the create RPC call. - * - * @param ctx the environment to interact with the framework and master - * @param desc the TableDescriptor for the table + * Called before a new table is created by {@link org.apache.hadoop.hbase.master.HMaster}. Called + * as part of create table procedure and it is async to the create RPC call. + * @param ctx the environment to interact with the framework and master + * @param desc the TableDescriptor for the table * @param regions the initial regions created for the table */ - default void preCreateTableAction( - final ObserverContext ctx, - final TableDescriptor desc, - final RegionInfo[] regions) throws IOException {} + default void preCreateTableAction(final ObserverContext ctx, + final TableDescriptor desc, final RegionInfo[] regions) throws IOException { + } /** - * Called after the createTable operation has been requested. Called as part - * of create table RPC call. Called as part of create table procedure and - * it is async to the create RPC call. - * - * @param ctx the environment to interact with the framework and master - * @param desc the TableDescriptor for the table + * Called after the createTable operation has been requested. Called as part of create table RPC + * call. Called as part of create table procedure and it is async to the create RPC call. + * @param ctx the environment to interact with the framework and master + * @param desc the TableDescriptor for the table * @param regions the initial regions created for the table */ default void postCompletedCreateTableAction( - final ObserverContext ctx, - final TableDescriptor desc, - final RegionInfo[] regions) throws IOException {} + final ObserverContext ctx, final TableDescriptor desc, + final RegionInfo[] regions) throws IOException { + } /** - * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a - * table. Called as part of delete table RPC call. - * @param ctx the environment to interact with the framework and master + * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a table. Called as part of + * delete table RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void preDeleteTable(final ObserverContext ctx, - TableName tableName) throws IOException {} + TableName tableName) throws IOException { + } /** - * Called after the deleteTable operation has been requested. Called as part - * of delete table RPC call. - * @param ctx the environment to interact with the framework and master + * Called after the deleteTable operation has been requested. Called as part of delete table RPC + * call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postDeleteTable(final ObserverContext ctx, - TableName tableName) throws IOException {} + TableName tableName) throws IOException { + } /** - * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a - * table. Called as part of delete table procedure and - * it is async to the delete RPC call. - * - * @param ctx the environment to interact with the framework and master + * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a table. Called as part of + * delete table procedure and it is async to the delete RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ - default void preDeleteTableAction( - final ObserverContext ctx, final TableName tableName) - throws IOException {} + default void preDeleteTableAction(final ObserverContext ctx, + final TableName tableName) throws IOException { + } /** - * Called after {@link org.apache.hadoop.hbase.master.HMaster} deletes a - * table. Called as part of delete table procedure and it is async to the - * delete RPC call. - * - * @param ctx the environment to interact with the framework and master + * Called after {@link org.apache.hadoop.hbase.master.HMaster} deletes a table. Called as part of + * delete table procedure and it is async to the delete RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postCompletedDeleteTableAction( - final ObserverContext ctx, final TableName tableName) - throws IOException {} + final ObserverContext ctx, final TableName tableName) + throws IOException { + } /** - * Called before {@link org.apache.hadoop.hbase.master.HMaster} truncates a - * table. Called as part of truncate table RPC call. - * @param ctx the environment to interact with the framework and master + * Called before {@link org.apache.hadoop.hbase.master.HMaster} truncates a table. Called as part + * of truncate table RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void preTruncateTable(final ObserverContext ctx, - TableName tableName) throws IOException {} + TableName tableName) throws IOException { + } /** - * Called after the truncateTable operation has been requested. Called as part - * of truncate table RPC call. - * The truncate is synchronous, so this method will be called when the - * truncate operation is terminated. - * @param ctx the environment to interact with the framework and master + * Called after the truncateTable operation has been requested. Called as part of truncate table + * RPC call. The truncate is synchronous, so this method will be called when the truncate + * operation is terminated. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postTruncateTable(final ObserverContext ctx, - TableName tableName) throws IOException {} + TableName tableName) throws IOException { + } /** - * Called before {@link org.apache.hadoop.hbase.master.HMaster} truncates a - * table. Called as part of truncate table procedure and it is async - * to the truncate RPC call. - * - * @param ctx the environment to interact with the framework and master + * Called before {@link org.apache.hadoop.hbase.master.HMaster} truncates a table. Called as part + * of truncate table procedure and it is async to the truncate RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ - default void preTruncateTableAction( - final ObserverContext ctx, final TableName tableName) - throws IOException {} + default void preTruncateTableAction(final ObserverContext ctx, + final TableName tableName) throws IOException { + } /** - * Called after {@link org.apache.hadoop.hbase.master.HMaster} truncates a - * table. Called as part of truncate table procedure and it is async to the - * truncate RPC call. - * - * @param ctx the environment to interact with the framework and master + * Called after {@link org.apache.hadoop.hbase.master.HMaster} truncates a table. Called as part + * of truncate table procedure and it is async to the truncate RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postCompletedTruncateTableAction( - final ObserverContext ctx, final TableName tableName) - throws IOException {} + final ObserverContext ctx, final TableName tableName) + throws IOException { + } /** - * Called prior to modifying a table's properties. Called as part of modify - * table RPC call. - * @param ctx the environment to interact with the framework and master - * @param tableName the name of the table + * Called prior to modifying a table's properties. Called as part of modify table RPC call. + * @param ctx the environment to interact with the framework and master + * @param tableName the name of the table * @param newDescriptor after modify operation, table will have this descriptor * @deprecated Since 2.1. Will be removed in 3.0. */ @Deprecated default void preModifyTable(final ObserverContext ctx, - final TableName tableName, TableDescriptor newDescriptor) throws IOException {} + final TableName tableName, TableDescriptor newDescriptor) throws IOException { + } /** - * Called prior to modifying a table's properties. Called as part of modify - * table RPC call. - * @param ctx the environment to interact with the framework and master - * @param tableName the name of the table + * Called prior to modifying a table's properties. Called as part of modify table RPC call. + * @param ctx the environment to interact with the framework and master + * @param tableName the name of the table * @param currentDescriptor current TableDescriptor of the table - * @param newDescriptor after modify operation, table will have this descriptor + * @param newDescriptor after modify operation, table will have this descriptor */ default TableDescriptor preModifyTable(final ObserverContext ctx, - final TableName tableName, TableDescriptor currentDescriptor, TableDescriptor newDescriptor) - throws IOException { + final TableName tableName, TableDescriptor currentDescriptor, TableDescriptor newDescriptor) + throws IOException { return newDescriptor; } /** - * Called after the modifyTable operation has been requested. Called as part - * of modify table RPC call. - * @param ctx the environment to interact with the framework and master - * @param tableName the name of the table + * Called after the modifyTable operation has been requested. Called as part of modify table RPC + * call. + * @param ctx the environment to interact with the framework and master + * @param tableName the name of the table * @param currentDescriptor current TableDescriptor of the table * @deprecated Since 2.1. Will be removed in 3.0. */ @Deprecated default void postModifyTable(final ObserverContext ctx, - final TableName tableName, TableDescriptor currentDescriptor) throws IOException {} + final TableName tableName, TableDescriptor currentDescriptor) throws IOException { + } /** - * Called after the modifyTable operation has been requested. Called as part - * of modify table RPC call. - * @param ctx the environment to interact with the framework and master - * @param tableName the name of the table - * @param oldDescriptor descriptor of table before modify operation happened + * Called after the modifyTable operation has been requested. Called as part of modify table RPC + * call. + * @param ctx the environment to interact with the framework and master + * @param tableName the name of the table + * @param oldDescriptor descriptor of table before modify operation happened * @param currentDescriptor current TableDescriptor of the table */ default void postModifyTable(final ObserverContext ctx, - final TableName tableName, TableDescriptor oldDescriptor, TableDescriptor currentDescriptor) + final TableName tableName, TableDescriptor oldDescriptor, TableDescriptor currentDescriptor) throws IOException { postModifyTable(ctx, tableName, currentDescriptor); } /** - * Called prior to modifying a table's properties. Called as part of modify - * table procedure and it is async to the modify table RPC call. - * - * @param ctx the environment to interact with the framework and master - * @param tableName the name of the table + * Called prior to modifying a table's properties. Called as part of modify table procedure and it + * is async to the modify table RPC call. + * @param ctx the environment to interact with the framework and master + * @param tableName the name of the table * @param newDescriptor after modify operation, table will have this descriptor * @deprecated Since 2.1. Will be removed in 3.0. */ @Deprecated - default void preModifyTableAction( - final ObserverContext ctx, - final TableName tableName, - final TableDescriptor newDescriptor) throws IOException {} + default void preModifyTableAction(final ObserverContext ctx, + final TableName tableName, final TableDescriptor newDescriptor) throws IOException { + } /** - * Called prior to modifying a table's store file tracker. Called as part of modify - * table store file tracker RPC call. - * @param ctx the environment to interact with the framework and master + * Called prior to modifying a table's store file tracker. Called as part of modify table store + * file tracker RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table - * @param dstSFT the store file tracker + * @param dstSFT the store file tracker * @return the store file tracker */ default String preModifyTableStoreFileTracker( @@ -306,23 +292,24 @@ public interface MasterObserver { } /** - * Called after modifying a table's store file tracker. Called as part of modify - * table store file tracker RPC call. - * @param ctx the environment to interact with the framework and master + * Called after modifying a table's store file tracker. Called as part of modify table store file + * tracker RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table - * @param dstSFT the store file tracker + * @param dstSFT the store file tracker */ default void postModifyTableStoreFileTracker( final ObserverContext ctx, final TableName tableName, - String dstSFT) throws IOException {} + String dstSFT) throws IOException { + } /** * Called prior to modifying a family's store file tracker. Called as part of modify family store * file tracker RPC call. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table - * @param family the column family - * @param dstSFT the store file tracker + * @param family the column family + * @param dstSFT the store file tracker * @return the store file tracker */ default String preModifyColumnFamilyStoreFileTracker( @@ -332,1259 +319,1306 @@ public interface MasterObserver { } /** - * Called after modifying a family store file tracker. Called as part of modify family store - * file tracker RPC call. - * @param ctx the environment to interact with the framework and master + * Called after modifying a family store file tracker. Called as part of modify family store file + * tracker RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table - * @param family the column family - * @param dstSFT the store file tracker + * @param family the column family + * @param dstSFT the store file tracker */ default void postModifyColumnFamilyStoreFileTracker( final ObserverContext ctx, final TableName tableName, - final byte[] family, String dstSFT) throws IOException {} + final byte[] family, String dstSFT) throws IOException { + } /** - * Called prior to modifying a table's properties. Called as part of modify - * table procedure and it is async to the modify table RPC call. - * - * @param ctx the environment to interact with the framework and master - * @param tableName the name of the table + * Called prior to modifying a table's properties. Called as part of modify table procedure and it + * is async to the modify table RPC call. + * @param ctx the environment to interact with the framework and master + * @param tableName the name of the table * @param currentDescriptor current TableDescriptor of the table - * @param newDescriptor after modify operation, table will have this descriptor + * @param newDescriptor after modify operation, table will have this descriptor */ - default void preModifyTableAction( - final ObserverContext ctx, - final TableName tableName, - final TableDescriptor currentDescriptor, - final TableDescriptor newDescriptor) throws IOException { + default void preModifyTableAction(final ObserverContext ctx, + final TableName tableName, final TableDescriptor currentDescriptor, + final TableDescriptor newDescriptor) throws IOException { preModifyTableAction(ctx, tableName, newDescriptor); } /** - * Called after to modifying a table's properties. Called as part of modify - * table procedure and it is async to the modify table RPC call. - * - * @param ctx the environment to interact with the framework and master - * @param tableName the name of the table + * Called after to modifying a table's properties. Called as part of modify table procedure and it + * is async to the modify table RPC call. + * @param ctx the environment to interact with the framework and master + * @param tableName the name of the table * @param currentDescriptor current TableDescriptor of the table * @deprecated Since 2.1. Will be removed in 3.0. */ @Deprecated default void postCompletedModifyTableAction( - final ObserverContext ctx, - final TableName tableName, - final TableDescriptor currentDescriptor) throws IOException {} + final ObserverContext ctx, final TableName tableName, + final TableDescriptor currentDescriptor) throws IOException { + } /** - * Called after to modifying a table's properties. Called as part of modify - * table procedure and it is async to the modify table RPC call. - * - * @param ctx the environment to interact with the framework and master - * @param tableName the name of the table - * @param oldDescriptor descriptor of table before modify operation happened + * Called after to modifying a table's properties. Called as part of modify table procedure and it + * is async to the modify table RPC call. + * @param ctx the environment to interact with the framework and master + * @param tableName the name of the table + * @param oldDescriptor descriptor of table before modify operation happened * @param currentDescriptor current TableDescriptor of the table */ default void postCompletedModifyTableAction( - final ObserverContext ctx, - final TableName tableName, - final TableDescriptor oldDescriptor, - final TableDescriptor currentDescriptor) throws IOException { + final ObserverContext ctx, final TableName tableName, + final TableDescriptor oldDescriptor, final TableDescriptor currentDescriptor) + throws IOException { postCompletedModifyTableAction(ctx, tableName, currentDescriptor); } /** - * Called prior to enabling a table. Called as part of enable table RPC call. - * @param ctx the environment to interact with the framework and master + * Called prior to enabling a table. Called as part of enable table RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void preEnableTable(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** - * Called after the enableTable operation has been requested. Called as part - * of enable table RPC call. - * @param ctx the environment to interact with the framework and master + * Called after the enableTable operation has been requested. Called as part of enable table RPC + * call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postEnableTable(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** - * Called prior to enabling a table. Called as part of enable table procedure - * and it is async to the enable table RPC call. - * - * @param ctx the environment to interact with the framework and master + * Called prior to enabling a table. Called as part of enable table procedure and it is async to + * the enable table RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ - default void preEnableTableAction( - final ObserverContext ctx, - final TableName tableName) throws IOException {} + default void preEnableTableAction(final ObserverContext ctx, + final TableName tableName) throws IOException { + } /** - * Called after the enableTable operation has been requested. Called as part - * of enable table procedure and it is async to the enable table RPC call. - * - * @param ctx the environment to interact with the framework and master + * Called after the enableTable operation has been requested. Called as part of enable table + * procedure and it is async to the enable table RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postCompletedEnableTableAction( - final ObserverContext ctx, - final TableName tableName) throws IOException {} + final ObserverContext ctx, final TableName tableName) + throws IOException { + } /** - * Called prior to disabling a table. Called as part of disable table RPC - * call. - * @param ctx the environment to interact with the framework and master + * Called prior to disabling a table. Called as part of disable table RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void preDisableTable(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** - * Called after the disableTable operation has been requested. Called as part - * of disable table RPC call. - * @param ctx the environment to interact with the framework and master + * Called after the disableTable operation has been requested. Called as part of disable table RPC + * call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postDisableTable(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** - * Called prior to disabling a table. Called as part of disable table procedure - * and it is asyn to the disable table RPC call. - * - * @param ctx the environment to interact with the framework and master + * Called prior to disabling a table. Called as part of disable table procedure and it is asyn to + * the disable table RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ - default void preDisableTableAction( - final ObserverContext ctx, - final TableName tableName) throws IOException {} + default void preDisableTableAction(final ObserverContext ctx, + final TableName tableName) throws IOException { + } /** - * Called after the disableTable operation has been requested. Called as part - * of disable table procedure and it is asyn to the disable table RPC call. - * - * @param ctx the environment to interact with the framework and master + * Called after the disableTable operation has been requested. Called as part of disable table + * procedure and it is asyn to the disable table RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postCompletedDisableTableAction( - final ObserverContext ctx, - final TableName tableName) throws IOException {} + final ObserverContext ctx, final TableName tableName) + throws IOException { + } /** * Called before a abortProcedure request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param procId the Id of the procedure */ - default void preAbortProcedure( - ObserverContext ctx, final long procId) throws IOException {} + default void preAbortProcedure(ObserverContext ctx, + final long procId) throws IOException { + } /** * Called after a abortProcedure request has been processed. * @param ctx the environment to interact with the framework and master */ default void postAbortProcedure(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called before a getProcedures request has been processed. * @param ctx the environment to interact with the framework and master */ default void preGetProcedures(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called after a getProcedures request has been processed. * @param ctx the environment to interact with the framework and master */ default void postGetProcedures(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called before a getLocks request has been processed. * @param ctx the environment to interact with the framework and master * @throws IOException if something went wrong */ - default void preGetLocks(ObserverContext ctx) - throws IOException {} + default void preGetLocks(ObserverContext ctx) throws IOException { + } /** * Called after a getLocks request has been processed. * @param ctx the environment to interact with the framework and master * @throws IOException if something went wrong */ - default void postGetLocks( - ObserverContext ctx) throws IOException {} + default void postGetLocks(ObserverContext ctx) throws IOException { + } /** * Called prior to moving a given region from one region server to another. - * @param ctx the environment to interact with the framework and master - * @param region the RegionInfo - * @param srcServer the source ServerName + * @param ctx the environment to interact with the framework and master + * @param region the RegionInfo + * @param srcServer the source ServerName * @param destServer the destination ServerName */ default void preMove(final ObserverContext ctx, - final RegionInfo region, final ServerName srcServer, - final ServerName destServer) - throws IOException {} + final RegionInfo region, final ServerName srcServer, final ServerName destServer) + throws IOException { + } /** * Called after the region move has been requested. - * @param ctx the environment to interact with the framework and master - * @param region the RegionInfo - * @param srcServer the source ServerName + * @param ctx the environment to interact with the framework and master + * @param region the RegionInfo + * @param srcServer the source ServerName * @param destServer the destination ServerName */ default void postMove(final ObserverContext ctx, - final RegionInfo region, final ServerName srcServer, - final ServerName destServer) - throws IOException {} + final RegionInfo region, final ServerName srcServer, final ServerName destServer) + throws IOException { + } /** * Called prior to assigning a specific region. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param regionInfo the regionInfo of the region */ default void preAssign(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException {} + final RegionInfo regionInfo) throws IOException { + } /** * Called after the region assignment has been requested. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param regionInfo the regionInfo of the region */ default void postAssign(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException {} + final RegionInfo regionInfo) throws IOException { + } /** * Called prior to unassigning a given region. - * @param ctx the environment to interact with the framework and master - * @param regionInfo - * @param force whether to force unassignment or not - * @deprecated in 2.4.0. replaced by preUnassign(ctx, regionInfo). removed in hbase 3. - * until then safe to either leave implementation here or move it - * to the new method. default impl of that method calls this one. + * @param ctx the environment to interact with the framework and master n * @param force whether + * to force unassignment or not + * @deprecated in 2.4.0. replaced by preUnassign(ctx, regionInfo). removed in hbase 3. until then + * safe to either leave implementation here or move it to the new method. default impl + * of that method calls this one. */ default void preUnassign(final ObserverContext ctx, - final RegionInfo regionInfo, final boolean force) throws IOException {} + final RegionInfo regionInfo, final boolean force) throws IOException { + } /** * Called prior to unassigning a given region. - * @param ctx the environment to interact with the framework and master - * @param regionInfo + * @param ctx the environment to interact with the framework and master n */ default void preUnassign(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException { + final RegionInfo regionInfo) throws IOException { preUnassign(ctx, regionInfo, false); } /** * Called after the region unassignment has been requested. - * @param ctx the environment to interact with the framework and master - * @param regionInfo - * @param force whether to force unassignment or not - * @deprecated in 2.4.0. replaced by postUnassign(ctx, regionInfo). removed in hbase 3. - * until then safe to either leave implementation here or move it - * to the new method. default impl of that method calls this one. + * @param ctx the environment to interact with the framework and master n * @param force whether + * to force unassignment or not + * @deprecated in 2.4.0. replaced by postUnassign(ctx, regionInfo). removed in hbase 3. until then + * safe to either leave implementation here or move it to the new method. default impl + * of that method calls this one. */ default void postUnassign(final ObserverContext ctx, - final RegionInfo regionInfo, final boolean force) throws IOException {} + final RegionInfo regionInfo, final boolean force) throws IOException { + } /** * Called after the region unassignment has been requested. - * @param ctx the environment to interact with the framework and master - * @param regionInfo + * @param ctx the environment to interact with the framework and master n */ default void postUnassign(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException { + final RegionInfo regionInfo) throws IOException { postUnassign(ctx, regionInfo, false); } /** * Called prior to marking a given region as offline. - * @param ctx the environment to interact with the framework and master - * @param regionInfo + * @param ctx the environment to interact with the framework and master n */ default void preRegionOffline(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException {} + final RegionInfo regionInfo) throws IOException { + } /** * Called after the region has been marked offline. - * @param ctx the environment to interact with the framework and master - * @param regionInfo + * @param ctx the environment to interact with the framework and master n */ default void postRegionOffline(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException {} + final RegionInfo regionInfo) throws IOException { + } /** - * Called prior to requesting rebalancing of the cluster regions, though after - * the initial checks for regions in transition and the balance switch flag. - * @param ctx the environment to interact with the framework and master + * Called prior to requesting rebalancing of the cluster regions, though after the initial checks + * for regions in transition and the balance switch flag. + * @param ctx the environment to interact with the framework and master * @param request the request used to trigger the balancer */ - default void preBalance(final ObserverContext ctx, BalanceRequest request) - throws IOException {} + default void preBalance(final ObserverContext ctx, + BalanceRequest request) throws IOException { + } /** * Called after the balancing plan has been submitted. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param request the request used to trigger the balance - * @param plans the RegionPlans which master has executed. RegionPlan serves as hint - * as for the final destination for the underlying region but may not represent the - * final state of assignment + * @param plans the RegionPlans which master has executed. RegionPlan serves as hint as for the + * final destination for the underlying region but may not represent the final + * state of assignment */ - default void postBalance(final ObserverContext ctx, BalanceRequest request, List plans) - throws IOException {} + default void postBalance(final ObserverContext ctx, + BalanceRequest request, List plans) throws IOException { + } /** - * Called prior to setting split / merge switch - * Supports Coprocessor 'bypass'. - * @param ctx the coprocessor instance's environment - * @param newValue the new value submitted in the call + * Called prior to setting split / merge switch Supports Coprocessor 'bypass'. + * @param ctx the coprocessor instance's environment + * @param newValue the new value submitted in the call * @param switchType type of switch */ default void preSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final MasterSwitchType switchType) throws IOException {} + final boolean newValue, final MasterSwitchType switchType) throws IOException { + } /** * Called after setting split / merge switch - * @param ctx the coprocessor instance's environment - * @param newValue the new value submitted in the call + * @param ctx the coprocessor instance's environment + * @param newValue the new value submitted in the call * @param switchType type of switch */ default void postSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final MasterSwitchType switchType) throws IOException {} + final boolean newValue, final MasterSwitchType switchType) throws IOException { + } /** * Called before the split region procedure is called. - * @param c the environment to interact with the framework and master + * @param c the environment to interact with the framework and master * @param tableName the table where the region belongs to - * @param splitRow split point + * @param splitRow split point */ - default void preSplitRegion( - final ObserverContext c, - final TableName tableName, - final byte[] splitRow) - throws IOException {} + default void preSplitRegion(final ObserverContext c, + final TableName tableName, final byte[] splitRow) throws IOException { + } /** * Called before the region is split. - * @param c the environment to interact with the framework and master + * @param c the environment to interact with the framework and master * @param tableName the table where the region belongs to - * @param splitRow split point + * @param splitRow split point */ - default void preSplitRegionAction( - final ObserverContext c, - final TableName tableName, - final byte[] splitRow) - throws IOException {} + default void preSplitRegionAction(final ObserverContext c, + final TableName tableName, final byte[] splitRow) throws IOException { + } /** * Called after the region is split. - * @param c the environment to interact with the framework and master + * @param c the environment to interact with the framework and master * @param regionInfoA the left daughter region * @param regionInfoB the right daughter region */ - default void postCompletedSplitRegionAction( - final ObserverContext c, - final RegionInfo regionInfoA, - final RegionInfo regionInfoB) throws IOException {} + default void postCompletedSplitRegionAction(final ObserverContext c, + final RegionInfo regionInfoA, final RegionInfo regionInfoB) throws IOException { + } /** * This will be called before update META step as part of split transaction. - * @param ctx the environment to interact with the framework and master - * @param splitKey - * @param metaEntries + * @param ctx the environment to interact with the framework and master nn */ default void preSplitRegionBeforeMETAAction( - final ObserverContext ctx, - final byte[] splitKey, - final List metaEntries) throws IOException {} - + final ObserverContext ctx, final byte[] splitKey, + final List metaEntries) throws IOException { + } /** * This will be called after update META step as part of split transaction * @param ctx the environment to interact with the framework and master */ default void preSplitRegionAfterMETAAction( - final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * This will be called after the roll back of the split region is completed * @param ctx the environment to interact with the framework and master */ default void postRollBackSplitRegionAction( - final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * Called before the regions merge. * @param ctx the environment to interact with the framework and master */ - default void preMergeRegionsAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge) throws IOException {} + default void preMergeRegionsAction(final ObserverContext ctx, + final RegionInfo[] regionsToMerge) throws IOException { + } /** * called after the regions merge. * @param ctx the environment to interact with the framework and master */ default void postCompletedMergeRegionsAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge, - final RegionInfo mergedRegion) throws IOException {} + final ObserverContext ctx, final RegionInfo[] regionsToMerge, + final RegionInfo mergedRegion) throws IOException { + } /** * This will be called before update META step as part of regions merge transaction. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param metaEntries mutations to execute on hbase:meta atomically with regions merge updates. - * Any puts or deletes to execute on hbase:meta can be added to the mutations. + * Any puts or deletes to execute on hbase:meta can be added to the mutations. */ - default void preMergeRegionsCommitAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge, - @MetaMutationAnnotation List metaEntries) throws IOException {} + default void preMergeRegionsCommitAction(final ObserverContext ctx, + final RegionInfo[] regionsToMerge, @MetaMutationAnnotation List metaEntries) + throws IOException { + } /** * This will be called after META step as part of regions merge transaction. * @param ctx the environment to interact with the framework and master */ - default void postMergeRegionsCommitAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge, - final RegionInfo mergedRegion) throws IOException {} + default void postMergeRegionsCommitAction(final ObserverContext ctx, + final RegionInfo[] regionsToMerge, final RegionInfo mergedRegion) throws IOException { + } /** * This will be called after the roll back of the regions merge. * @param ctx the environment to interact with the framework and master */ default void postRollBackMergeRegionsAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge) throws IOException {} + final ObserverContext ctx, final RegionInfo[] regionsToMerge) + throws IOException { + } /** * Called prior to modifying the flag used to enable/disable region balancing. * @param ctx the coprocessor instance's environment */ default void preBalanceSwitch(final ObserverContext ctx, - final boolean newValue) throws IOException {} + final boolean newValue) throws IOException { + } /** * Called after the flag to enable/disable balancing has changed. - * @param ctx the coprocessor instance's environment + * @param ctx the coprocessor instance's environment * @param oldValue the previously set balanceSwitch value * @param newValue the newly set balanceSwitch value */ default void postBalanceSwitch(final ObserverContext ctx, - final boolean oldValue, final boolean newValue) throws IOException {} + final boolean oldValue, final boolean newValue) throws IOException { + } /** * Called prior to shutting down the full HBase cluster, including this * {@link org.apache.hadoop.hbase.master.HMaster} process. */ default void preShutdown(final ObserverContext ctx) - throws IOException {} - + throws IOException { + } /** - * Called immediately prior to stopping this - * {@link org.apache.hadoop.hbase.master.HMaster} process. + * Called immediately prior to stopping this {@link org.apache.hadoop.hbase.master.HMaster} + * process. */ default void preStopMaster(final ObserverContext ctx) - throws IOException {} + throws IOException { + } /** - * Called immediately after an active master instance has completed - * initialization. Will not be called on standby master instances unless - * they take over the active role. + * Called immediately after an active master instance has completed initialization. Will not be + * called on standby master instances unless they take over the active role. */ default void postStartMaster(final ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Call before the master initialization is set to true. * {@link org.apache.hadoop.hbase.master.HMaster} process. */ default void preMasterInitialization(final ObserverContext ctx) - throws IOException {} + throws IOException { + } /** - * Called before a new snapshot is taken. - * Called as part of snapshot RPC call. - * @param ctx the environment to interact with the framework and master - * @param snapshot the SnapshotDescriptor for the snapshot + * Called before a new snapshot is taken. Called as part of snapshot RPC call. + * @param ctx the environment to interact with the framework and master + * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to snapshot */ default void preSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) throws IOException { + } /** - * Called after the snapshot operation has been requested. - * Called as part of snapshot RPC call. - * @param ctx the environment to interact with the framework and master - * @param snapshot the SnapshotDescriptor for the snapshot + * Called after the snapshot operation has been requested. Called as part of snapshot RPC call. + * @param ctx the environment to interact with the framework and master + * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to snapshot */ default void postSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) throws IOException { + } /** * Called after the snapshot operation has been completed. - * @param ctx the environment to interact with the framework and master - * @param snapshot the SnapshotDescriptor for the snapshot + * @param ctx the environment to interact with the framework and master + * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to snapshot */ default void postCompletedSnapshotAction(ObserverContext ctx, - SnapshotDescription snapshot, TableDescriptor tableDescriptor) throws IOException { + SnapshotDescription snapshot, TableDescriptor tableDescriptor) throws IOException { } /** * Called before listSnapshots request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor of the snapshot to list */ default void preListSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot) throws IOException {} + final SnapshotDescription snapshot) throws IOException { + } /** * Called after listSnapshots request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor of the snapshot to list */ default void postListSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot) throws IOException {} + final SnapshotDescription snapshot) throws IOException { + } /** - * Called before a snapshot is cloned. - * Called as part of restoreSnapshot RPC call. - * @param ctx the environment to interact with the framework and master - * @param snapshot the SnapshotDescriptor for the snapshot + * Called before a snapshot is cloned. Called as part of restoreSnapshot RPC call. + * @param ctx the environment to interact with the framework and master + * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to create */ default void preCloneSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) throws IOException { + } /** - * Called after a snapshot clone operation has been requested. - * Called as part of restoreSnapshot RPC call. - * @param ctx the environment to interact with the framework and master - * @param snapshot the SnapshotDescriptor for the snapshot + * Called after a snapshot clone operation has been requested. Called as part of restoreSnapshot + * RPC call. + * @param ctx the environment to interact with the framework and master + * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the v of the table to create */ default void postCloneSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) throws IOException { + } /** - * Called before a snapshot is restored. - * Called as part of restoreSnapshot RPC call. - * @param ctx the environment to interact with the framework and master - * @param snapshot the SnapshotDescriptor for the snapshot + * Called before a snapshot is restored. Called as part of restoreSnapshot RPC call. + * @param ctx the environment to interact with the framework and master + * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to restore */ default void preRestoreSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) throws IOException { + } /** - * Called after a snapshot restore operation has been requested. - * Called as part of restoreSnapshot RPC call. - * @param ctx the environment to interact with the framework and master - * @param snapshot the SnapshotDescriptor for the snapshot + * Called after a snapshot restore operation has been requested. Called as part of restoreSnapshot + * RPC call. + * @param ctx the environment to interact with the framework and master + * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to restore */ default void postRestoreSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) throws IOException { + } /** - * Called before a snapshot is deleted. - * Called as part of deleteSnapshot RPC call. - * @param ctx the environment to interact with the framework and master + * Called before a snapshot is deleted. Called as part of deleteSnapshot RPC call. + * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor of the snapshot to delete */ default void preDeleteSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot) throws IOException {} + final SnapshotDescription snapshot) throws IOException { + } /** - * Called after the delete snapshot operation has been requested. - * Called as part of deleteSnapshot RPC call. - * @param ctx the environment to interact with the framework and master + * Called after the delete snapshot operation has been requested. Called as part of deleteSnapshot + * RPC call. + * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor of the snapshot to delete */ default void postDeleteSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot) throws IOException {} + final SnapshotDescription snapshot) throws IOException { + } /** * Called before a getTableDescriptors request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param tableNamesList the list of table names, or null if querying for all - * @param descriptors an empty list, can be filled with what to return in coprocessor - * @param regex regular expression used for filtering the table names + * @param descriptors an empty list, can be filled with what to return in coprocessor + * @param regex regular expression used for filtering the table names */ default void preGetTableDescriptors(ObserverContext ctx, - List tableNamesList, List descriptors, - String regex) throws IOException {} + List tableNamesList, List descriptors, String regex) + throws IOException { + } /** * Called after a getTableDescriptors request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param tableNamesList the list of table names, or null if querying for all - * @param descriptors the list of descriptors about to be returned - * @param regex regular expression used for filtering the table names + * @param descriptors the list of descriptors about to be returned + * @param regex regular expression used for filtering the table names */ default void postGetTableDescriptors(ObserverContext ctx, - List tableNamesList, List descriptors, - String regex) throws IOException {} + List tableNamesList, List descriptors, String regex) + throws IOException { + } /** * Called before a getTableNames request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param descriptors an empty list, can be filled with what to return by coprocessor - * @param regex regular expression used for filtering the table names + * @param regex regular expression used for filtering the table names */ default void preGetTableNames(ObserverContext ctx, - List descriptors, String regex) throws IOException {} + List descriptors, String regex) throws IOException { + } /** * Called after a getTableNames request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param descriptors the list of descriptors about to be returned - * @param regex regular expression used for filtering the table names + * @param regex regular expression used for filtering the table names */ default void postGetTableNames(ObserverContext ctx, - List descriptors, String regex) throws IOException {} - - + List descriptors, String regex) throws IOException { + } /** - * Called before a new namespace is created by - * {@link org.apache.hadoop.hbase.master.HMaster}. + * Called before a new namespace is created by {@link org.apache.hadoop.hbase.master.HMaster}. * @param ctx the environment to interact with the framework and master - * @param ns the NamespaceDescriptor for the table + * @param ns the NamespaceDescriptor for the table */ default void preCreateNamespace(final ObserverContext ctx, - NamespaceDescriptor ns) throws IOException {} + NamespaceDescriptor ns) throws IOException { + } + /** * Called after the createNamespace operation has been requested. * @param ctx the environment to interact with the framework and master - * @param ns the NamespaceDescriptor for the table + * @param ns the NamespaceDescriptor for the table */ default void postCreateNamespace(final ObserverContext ctx, - NamespaceDescriptor ns) throws IOException {} + NamespaceDescriptor ns) throws IOException { + } /** - * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a - * namespace - * @param ctx the environment to interact with the framework and master + * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a namespace + * @param ctx the environment to interact with the framework and master * @param namespace the name of the namespace */ default void preDeleteNamespace(final ObserverContext ctx, - String namespace) throws IOException {} + String namespace) throws IOException { + } /** * Called after the deleteNamespace operation has been requested. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param namespace the name of the namespace */ default void postDeleteNamespace(final ObserverContext ctx, - String namespace) throws IOException {} + String namespace) throws IOException { + } /** * Called prior to modifying a namespace's properties. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param newNsDescriptor after modify operation, namespace will have this descriptor * @deprecated Since 2.1. Will be removed in 3.0. */ @Deprecated default void preModifyNamespace(final ObserverContext ctx, - NamespaceDescriptor newNsDescriptor) throws IOException {} + NamespaceDescriptor newNsDescriptor) throws IOException { + } /** * Called prior to modifying a namespace's properties. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param currentNsDescriptor current NamespaceDescriptor of the namespace - * @param newNsDescriptor after modify operation, namespace will have this descriptor + * @param newNsDescriptor after modify operation, namespace will have this descriptor */ default void preModifyNamespace(final ObserverContext ctx, - NamespaceDescriptor currentNsDescriptor, NamespaceDescriptor newNsDescriptor) + NamespaceDescriptor currentNsDescriptor, NamespaceDescriptor newNsDescriptor) throws IOException { preModifyNamespace(ctx, newNsDescriptor); } /** * Called after the modifyNamespace operation has been requested. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param currentNsDescriptor current NamespaceDescriptor of the namespace * @deprecated Since 2.1. Will be removed in 3.0. */ @Deprecated default void postModifyNamespace(final ObserverContext ctx, - NamespaceDescriptor currentNsDescriptor) throws IOException {} + NamespaceDescriptor currentNsDescriptor) throws IOException { + } /** * Called after the modifyNamespace operation has been requested. - * @param ctx the environment to interact with the framework and master - * @param oldNsDescriptor descriptor of namespace before modify operation happened + * @param ctx the environment to interact with the framework and master + * @param oldNsDescriptor descriptor of namespace before modify operation happened * @param currentNsDescriptor current NamespaceDescriptor of the namespace */ default void postModifyNamespace(final ObserverContext ctx, - NamespaceDescriptor oldNsDescriptor, NamespaceDescriptor currentNsDescriptor) + NamespaceDescriptor oldNsDescriptor, NamespaceDescriptor currentNsDescriptor) throws IOException { postModifyNamespace(ctx, currentNsDescriptor); } /** * Called before a getNamespaceDescriptor request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param namespace the name of the namespace */ default void preGetNamespaceDescriptor(ObserverContext ctx, - String namespace) throws IOException {} + String namespace) throws IOException { + } /** * Called after a getNamespaceDescriptor request has been processed. * @param ctx the environment to interact with the framework and master - * @param ns the NamespaceDescriptor + * @param ns the NamespaceDescriptor */ default void postGetNamespaceDescriptor(ObserverContext ctx, - NamespaceDescriptor ns) throws IOException {} + NamespaceDescriptor ns) throws IOException { + } /** * Called before a listNamespaces request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param namespaces an empty list, can be filled with what to return if bypassing * @throws IOException if something went wrong */ default void preListNamespaces(ObserverContext ctx, - List namespaces) throws IOException {} + List namespaces) throws IOException { + } /** * Called after a listNamespaces request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param namespaces the list of namespaces about to be returned * @throws IOException if something went wrong */ default void postListNamespaces(ObserverContext ctx, - List namespaces) throws IOException {}; + List namespaces) throws IOException { + }; /** * Called before a listNamespaceDescriptors request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param descriptors an empty list, can be filled with what to return by coprocessor */ default void preListNamespaceDescriptors(ObserverContext ctx, - List descriptors) throws IOException {} + List descriptors) throws IOException { + } /** * Called after a listNamespaceDescriptors request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param descriptors the list of descriptors about to be returned */ default void postListNamespaceDescriptors(ObserverContext ctx, - List descriptors) throws IOException {} - + List descriptors) throws IOException { + } /** * Called before the table memstore is flushed to disk. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void preTableFlush(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** * Called after the table memstore is flushed to disk. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postTableFlush(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** * Called before the quota for the user is stored. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param userName the name of user - * @param quotas the current quota for the user + * @param quotas the current quota for the user */ default void preSetUserQuota(final ObserverContext ctx, - final String userName, final GlobalQuotaSettings quotas) throws IOException {} + final String userName, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called after the quota for the user is stored. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param userName the name of user - * @param quotas the resulting quota for the user + * @param quotas the resulting quota for the user */ default void postSetUserQuota(final ObserverContext ctx, - final String userName, final GlobalQuotaSettings quotas) throws IOException {} + final String userName, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called before the quota for the user on the specified table is stored. - * @param ctx the environment to interact with the framework and master - * @param userName the name of user + * @param ctx the environment to interact with the framework and master + * @param userName the name of user * @param tableName the name of the table - * @param quotas the current quota for the user on the table + * @param quotas the current quota for the user on the table */ - default void preSetUserQuota( - final ObserverContext ctx, final String userName, - final TableName tableName, final GlobalQuotaSettings quotas) throws IOException {} + default void preSetUserQuota(final ObserverContext ctx, + final String userName, final TableName tableName, final GlobalQuotaSettings quotas) + throws IOException { + } /** * Called after the quota for the user on the specified table is stored. - * @param ctx the environment to interact with the framework and master - * @param userName the name of user + * @param ctx the environment to interact with the framework and master + * @param userName the name of user * @param tableName the name of the table - * @param quotas the resulting quota for the user on the table + * @param quotas the resulting quota for the user on the table */ - default void postSetUserQuota( - final ObserverContext ctx, final String userName, - final TableName tableName, final GlobalQuotaSettings quotas) throws IOException {} + default void postSetUserQuota(final ObserverContext ctx, + final String userName, final TableName tableName, final GlobalQuotaSettings quotas) + throws IOException { + } /** * Called before the quota for the user on the specified namespace is stored. - * @param ctx the environment to interact with the framework and master - * @param userName the name of user + * @param ctx the environment to interact with the framework and master + * @param userName the name of user * @param namespace the name of the namespace - * @param quotas the current quota for the user on the namespace + * @param quotas the current quota for the user on the namespace */ - default void preSetUserQuota( - final ObserverContext ctx, final String userName, - final String namespace, final GlobalQuotaSettings quotas) throws IOException {} + default void preSetUserQuota(final ObserverContext ctx, + final String userName, final String namespace, final GlobalQuotaSettings quotas) + throws IOException { + } /** * Called after the quota for the user on the specified namespace is stored. - * @param ctx the environment to interact with the framework and master - * @param userName the name of user + * @param ctx the environment to interact with the framework and master + * @param userName the name of user * @param namespace the name of the namespace - * @param quotas the resulting quota for the user on the namespace + * @param quotas the resulting quota for the user on the namespace */ - default void postSetUserQuota( - final ObserverContext ctx, final String userName, - final String namespace, final GlobalQuotaSettings quotas) throws IOException {} + default void postSetUserQuota(final ObserverContext ctx, + final String userName, final String namespace, final GlobalQuotaSettings quotas) + throws IOException { + } /** * Called before the quota for the table is stored. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table - * @param quotas the current quota for the table + * @param quotas the current quota for the table */ default void preSetTableQuota(final ObserverContext ctx, - final TableName tableName, final GlobalQuotaSettings quotas) throws IOException {} + final TableName tableName, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called after the quota for the table is stored. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table - * @param quotas the resulting quota for the table + * @param quotas the resulting quota for the table */ default void postSetTableQuota(final ObserverContext ctx, - final TableName tableName, final GlobalQuotaSettings quotas) throws IOException {} + final TableName tableName, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called before the quota for the namespace is stored. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param namespace the name of the namespace - * @param quotas the current quota for the namespace + * @param quotas the current quota for the namespace */ default void preSetNamespaceQuota(final ObserverContext ctx, - final String namespace, final GlobalQuotaSettings quotas) throws IOException {} + final String namespace, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called after the quota for the namespace is stored. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param namespace the name of the namespace - * @param quotas the resulting quota for the namespace + * @param quotas the resulting quota for the namespace */ default void postSetNamespaceQuota(final ObserverContext ctx, - final String namespace, final GlobalQuotaSettings quotas) throws IOException {} + final String namespace, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called before the quota for the region server is stored. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param regionServer the name of the region server - * @param quotas the current quota for the region server + * @param quotas the current quota for the region server */ default void preSetRegionServerQuota(final ObserverContext ctx, - final String regionServer, final GlobalQuotaSettings quotas) throws IOException {} + final String regionServer, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called after the quota for the region server is stored. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param regionServer the name of the region server - * @param quotas the resulting quota for the region server + * @param quotas the resulting quota for the region server */ default void postSetRegionServerQuota(final ObserverContext ctx, - final String regionServer, final GlobalQuotaSettings quotas) throws IOException {} + final String regionServer, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called before merge regions request. - * @param ctx coprocessor environment + * @param ctx coprocessor environment * @param regionsToMerge regions to be merged */ - default void preMergeRegions( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge) throws IOException {} + default void preMergeRegions(final ObserverContext ctx, + final RegionInfo[] regionsToMerge) throws IOException { + } /** * called after merge regions request. - * @param c coprocessor environment + * @param c coprocessor environment * @param regionsToMerge regions to be merged */ - default void postMergeRegions( - final ObserverContext c, - final RegionInfo[] regionsToMerge) throws IOException {} + default void postMergeRegions(final ObserverContext c, + final RegionInfo[] regionsToMerge) throws IOException { + } /** * Called before servers are moved to target region server group - * @param ctx the environment to interact with the framework and master - * @param servers set of servers to move + * @param ctx the environment to interact with the framework and master + * @param servers set of servers to move * @param targetGroup destination group */ default void preMoveServersAndTables(final ObserverContext ctx, - Set
        servers, Set tables, String targetGroup) throws IOException {} + Set
        servers, Set tables, String targetGroup) throws IOException { + } /** * Called after servers are moved to target region server group - * @param ctx the environment to interact with the framework and master - * @param servers set of servers to move + * @param ctx the environment to interact with the framework and master + * @param servers set of servers to move * @param targetGroup name of group */ default void postMoveServersAndTables(final ObserverContext ctx, - Set
        servers, Set tables, String targetGroup) throws IOException {} + Set
        servers, Set tables, String targetGroup) throws IOException { + } /** * Called before servers are moved to target region server group - * @param ctx the environment to interact with the framework and master - * @param servers set of servers to move + * @param ctx the environment to interact with the framework and master + * @param servers set of servers to move * @param targetGroup destination group */ default void preMoveServers(final ObserverContext ctx, - Set
        servers, String targetGroup) throws IOException {} + Set
        servers, String targetGroup) throws IOException { + } /** * Called after servers are moved to target region server group - * @param ctx the environment to interact with the framework and master - * @param servers set of servers to move + * @param ctx the environment to interact with the framework and master + * @param servers set of servers to move * @param targetGroup name of group */ default void postMoveServers(final ObserverContext ctx, - Set
        servers, String targetGroup) throws IOException {} + Set
        servers, String targetGroup) throws IOException { + } /** * Called before tables are moved to target region server group - * @param ctx the environment to interact with the framework and master - * @param tables set of tables to move + * @param ctx the environment to interact with the framework and master + * @param tables set of tables to move * @param targetGroup name of group */ default void preMoveTables(final ObserverContext ctx, - Set tables, String targetGroup) throws IOException {} + Set tables, String targetGroup) throws IOException { + } /** * Called after servers are moved to target region server group - * @param ctx the environment to interact with the framework and master - * @param tables set of tables to move + * @param ctx the environment to interact with the framework and master + * @param tables set of tables to move * @param targetGroup name of group */ default void postMoveTables(final ObserverContext ctx, - Set tables, String targetGroup) throws IOException {} + Set tables, String targetGroup) throws IOException { + } /** * Called before a new region server group is added - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param name group name */ - default void preAddRSGroup(final ObserverContext ctx, - String name) throws IOException {} + default void preAddRSGroup(final ObserverContext ctx, String name) + throws IOException { + } /** * Called after a new region server group is added - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param name group name */ - default void postAddRSGroup(final ObserverContext ctx, - String name) throws IOException {} + default void postAddRSGroup(final ObserverContext ctx, String name) + throws IOException { + } /** * Called before a region server group is removed - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param name group name */ default void preRemoveRSGroup(final ObserverContext ctx, - String name) throws IOException {} - - /** - * Called after a region server group is removed - * @param ctx the environment to interact with the framework and master - * @param name group name - */ - default void postRemoveRSGroup(final ObserverContext ctx, - String name) throws IOException {} - - /** - * Called before a region server group is removed - * @param ctx the environment to interact with the framework and master - * @param groupName group name - */ - default void preBalanceRSGroup(final ObserverContext ctx, - String groupName, BalanceRequest request) throws IOException { + String name) throws IOException { } /** * Called after a region server group is removed - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master + * @param name group name + */ + default void postRemoveRSGroup(final ObserverContext ctx, + String name) throws IOException { + } + + /** + * Called before a region server group is removed + * @param ctx the environment to interact with the framework and master * @param groupName group name - * @param request the request sent to the balancer - * @param response the response returned by the balancer + */ + default void preBalanceRSGroup(final ObserverContext ctx, + String groupName, BalanceRequest request) throws IOException { + } + + /** + * Called after a region server group is removed + * @param ctx the environment to interact with the framework and master + * @param groupName group name + * @param request the request sent to the balancer + * @param response the response returned by the balancer */ default void postBalanceRSGroup(final ObserverContext ctx, - String groupName, BalanceRequest request, BalanceResponse response) throws IOException { + String groupName, BalanceRequest request, BalanceResponse response) throws IOException { } /** * Called before servers are removed from rsgroup - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param servers set of decommissioned servers to remove */ - default void preRemoveServers( - final ObserverContext ctx, - Set
        servers) throws IOException {} + default void preRemoveServers(final ObserverContext ctx, + Set
        servers) throws IOException { + } /** * Called after servers are removed from rsgroup - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param servers set of servers to remove */ - default void postRemoveServers( - final ObserverContext ctx, - Set
        servers) throws IOException {} + default void postRemoveServers(final ObserverContext ctx, + Set
        servers) throws IOException { + } /** * Called before add a replication peer - * @param ctx the environment to interact with the framework and master - * @param peerId a short name that identifies the peer + * @param ctx the environment to interact with the framework and master + * @param peerId a short name that identifies the peer * @param peerConfig configuration for the replication peer */ default void preAddReplicationPeer(final ObserverContext ctx, - String peerId, ReplicationPeerConfig peerConfig) throws IOException {} + String peerId, ReplicationPeerConfig peerConfig) throws IOException { + } /** * Called after add a replication peer - * @param ctx the environment to interact with the framework and master - * @param peerId a short name that identifies the peer + * @param ctx the environment to interact with the framework and master + * @param peerId a short name that identifies the peer * @param peerConfig configuration for the replication peer */ default void postAddReplicationPeer(final ObserverContext ctx, - String peerId, ReplicationPeerConfig peerConfig) throws IOException {} + String peerId, ReplicationPeerConfig peerConfig) throws IOException { + } /** - * Called before remove a replication peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called before remove a replication peer n * @param peerId a short name that identifies the peer */ default void preRemoveReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** - * Called after remove a replication peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called after remove a replication peer n * @param peerId a short name that identifies the peer */ default void postRemoveReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** - * Called before enable a replication peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called before enable a replication peer n * @param peerId a short name that identifies the peer */ default void preEnableReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** - * Called after enable a replication peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called after enable a replication peer n * @param peerId a short name that identifies the peer */ default void postEnableReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** - * Called before disable a replication peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called before disable a replication peer n * @param peerId a short name that identifies the + * peer */ default void preDisableReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** - * Called after disable a replication peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called after disable a replication peer n * @param peerId a short name that identifies the peer */ default void postDisableReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** - * Called before get the configured ReplicationPeerConfig for the specified peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called before get the configured ReplicationPeerConfig for the specified peer n * @param peerId + * a short name that identifies the peer */ default void preGetReplicationPeerConfig(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** - * Called after get the configured ReplicationPeerConfig for the specified peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called after get the configured ReplicationPeerConfig for the specified peer n * @param peerId + * a short name that identifies the peer */ - default void postGetReplicationPeerConfig( - final ObserverContext ctx, String peerId) throws IOException {} + default void postGetReplicationPeerConfig(final ObserverContext ctx, + String peerId) throws IOException { + } /** - * Called before update peerConfig for the specified peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called before update peerConfig for the specified peer n * @param peerId a short name that + * identifies the peer */ default void preUpdateReplicationPeerConfig( - final ObserverContext ctx, String peerId, - ReplicationPeerConfig peerConfig) throws IOException {} + final ObserverContext ctx, String peerId, + ReplicationPeerConfig peerConfig) throws IOException { + } /** * Called after update peerConfig for the specified peer - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param peerId a short name that identifies the peer */ default void postUpdateReplicationPeerConfig( - final ObserverContext ctx, String peerId, - ReplicationPeerConfig peerConfig) throws IOException {} + final ObserverContext ctx, String peerId, + ReplicationPeerConfig peerConfig) throws IOException { + } /** * Called before list replication peers. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param regex The regular expression to match peer id */ default void preListReplicationPeers(final ObserverContext ctx, - String regex) throws IOException {} + String regex) throws IOException { + } /** * Called after list replication peers. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param regex The regular expression to match peer id */ default void postListReplicationPeers(final ObserverContext ctx, - String regex) throws IOException {} + String regex) throws IOException { + } /** * Called before new LockProcedure is queued. * @param ctx the environment to interact with the framework and master */ default void preRequestLock(ObserverContext ctx, String namespace, - TableName tableName, RegionInfo[] regionInfos, String description) throws IOException {} + TableName tableName, RegionInfo[] regionInfos, String description) throws IOException { + } /** * Called after new LockProcedure is queued. * @param ctx the environment to interact with the framework and master */ default void postRequestLock(ObserverContext ctx, String namespace, - TableName tableName, RegionInfo[] regionInfos, String description) throws IOException {} + TableName tableName, RegionInfo[] regionInfos, String description) throws IOException { + } /** * Called before heartbeat to a lock. * @param ctx the environment to interact with the framework and master */ - default void preLockHeartbeat(ObserverContext ctx, - TableName tn, String description) throws IOException {} + default void preLockHeartbeat(ObserverContext ctx, TableName tn, + String description) throws IOException { + } /** * Called after heartbeat to a lock. * @param ctx the environment to interact with the framework and master */ default void postLockHeartbeat(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called before get cluster status. */ default void preGetClusterMetrics(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called after get cluster status. */ default void postGetClusterMetrics(ObserverContext ctx, - ClusterMetrics status) throws IOException {} + ClusterMetrics status) throws IOException { + } /** * Called before clear dead region servers. */ default void preClearDeadServers(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called after clear dead region servers. */ default void postClearDeadServers(ObserverContext ctx, - List servers, List notClearedServers) - throws IOException {} + List servers, List notClearedServers) throws IOException { + } /** * Called before decommission region servers. */ default void preDecommissionRegionServers(ObserverContext ctx, - List servers, boolean offload) throws IOException {} + List servers, boolean offload) throws IOException { + } /** * Called after decommission region servers. */ default void postDecommissionRegionServers(ObserverContext ctx, - List servers, boolean offload) throws IOException {} + List servers, boolean offload) throws IOException { + } /** * Called before list decommissioned region servers. */ default void preListDecommissionedRegionServers(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called after list decommissioned region servers. */ - default void postListDecommissionedRegionServers(ObserverContext ctx) - throws IOException {} + default void postListDecommissionedRegionServers( + ObserverContext ctx) throws IOException { + } /** * Called before recommission region server. */ default void preRecommissionRegionServer(ObserverContext ctx, - ServerName server, List encodedRegionNames) throws IOException {} + ServerName server, List encodedRegionNames) throws IOException { + } /** * Called after recommission region server. */ default void postRecommissionRegionServer(ObserverContext ctx, - ServerName server, List encodedRegionNames) throws IOException {} + ServerName server, List encodedRegionNames) throws IOException { + } /** * Called before switching rpc throttle enabled state. - * @param ctx the coprocessor instance's environment + * @param ctx the coprocessor instance's environment * @param enable the rpc throttle value */ default void preSwitchRpcThrottle(final ObserverContext ctx, - final boolean enable) throws IOException { + final boolean enable) throws IOException { } /** * Called after switching rpc throttle enabled state. - * @param ctx the coprocessor instance's environment + * @param ctx the coprocessor instance's environment * @param oldValue the previously rpc throttle value * @param newValue the newly rpc throttle value */ default void postSwitchRpcThrottle(final ObserverContext ctx, - final boolean oldValue, final boolean newValue) throws IOException { + final boolean oldValue, final boolean newValue) throws IOException { } /** @@ -1592,104 +1626,104 @@ public interface MasterObserver { * @param ctx the coprocessor instance's environment */ default void preIsRpcThrottleEnabled(final ObserverContext ctx) - throws IOException { + throws IOException { } /** * Called after getting if is rpc throttle enabled. - * @param ctx the coprocessor instance's environment + * @param ctx the coprocessor instance's environment * @param rpcThrottleEnabled the rpc throttle enabled value */ default void postIsRpcThrottleEnabled(final ObserverContext ctx, - final boolean rpcThrottleEnabled) throws IOException { + final boolean rpcThrottleEnabled) throws IOException { } /** * Called before switching exceed throttle quota state. - * @param ctx the coprocessor instance's environment + * @param ctx the coprocessor instance's environment * @param enable the exceed throttle quota value */ default void preSwitchExceedThrottleQuota(final ObserverContext ctx, - final boolean enable) throws IOException { + final boolean enable) throws IOException { } /** * Called after switching exceed throttle quota state. - * @param ctx the coprocessor instance's environment + * @param ctx the coprocessor instance's environment * @param oldValue the previously exceed throttle quota value * @param newValue the newly exceed throttle quota value */ default void postSwitchExceedThrottleQuota( - final ObserverContext ctx, final boolean oldValue, - final boolean newValue) throws IOException { + final ObserverContext ctx, final boolean oldValue, + final boolean newValue) throws IOException { } /** * Called before granting user permissions. - * @param ctx the coprocessor instance's environment - * @param userPermission the user and permissions + * @param ctx the coprocessor instance's environment + * @param userPermission the user and permissions * @param mergeExistingPermissions True if merge with previous granted permissions */ default void preGrant(ObserverContext ctx, - UserPermission userPermission, boolean mergeExistingPermissions) throws IOException { + UserPermission userPermission, boolean mergeExistingPermissions) throws IOException { } /** * Called after granting user permissions. - * @param ctx the coprocessor instance's environment - * @param userPermission the user and permissions + * @param ctx the coprocessor instance's environment + * @param userPermission the user and permissions * @param mergeExistingPermissions True if merge with previous granted permissions */ default void postGrant(ObserverContext ctx, - UserPermission userPermission, boolean mergeExistingPermissions) throws IOException { + UserPermission userPermission, boolean mergeExistingPermissions) throws IOException { } /** * Called before revoking user permissions. - * @param ctx the coprocessor instance's environment + * @param ctx the coprocessor instance's environment * @param userPermission the user and permissions */ default void preRevoke(ObserverContext ctx, - UserPermission userPermission) throws IOException { + UserPermission userPermission) throws IOException { } /** * Called after revoking user permissions. - * @param ctx the coprocessor instance's environment + * @param ctx the coprocessor instance's environment * @param userPermission the user and permissions */ default void postRevoke(ObserverContext ctx, - UserPermission userPermission) throws IOException { + UserPermission userPermission) throws IOException { } /** * Called before getting user permissions. - * @param ctx the coprocessor instance's environment - * @param userName the user name, null if get all user permissions + * @param ctx the coprocessor instance's environment + * @param userName the user name, null if get all user permissions * @param namespace the namespace, null if don't get namespace permission * @param tableName the table name, null if don't get table permission - * @param family the table column family, null if don't get table family permission + * @param family the table column family, null if don't get table family permission * @param qualifier the table column qualifier, null if don't get table qualifier permission * @throws IOException if something went wrong */ default void preGetUserPermissions(ObserverContext ctx, - String userName, String namespace, TableName tableName, byte[] family, byte[] qualifier) - throws IOException { + String userName, String namespace, TableName tableName, byte[] family, byte[] qualifier) + throws IOException { } /** * Called after getting user permissions. - * @param ctx the coprocessor instance's environment - * @param userName the user name, null if get all user permissions + * @param ctx the coprocessor instance's environment + * @param userName the user name, null if get all user permissions * @param namespace the namespace, null if don't get namespace permission * @param tableName the table name, null if don't get table permission - * @param family the table column family, null if don't get table family permission + * @param family the table column family, null if don't get table family permission * @param qualifier the table column qualifier, null if don't get table qualifier permission * @throws IOException if something went wrong */ default void postGetUserPermissions(ObserverContext ctx, - String userName, String namespace, TableName tableName, byte[] family, byte[] qualifier) - throws IOException { + String userName, String namespace, TableName tableName, byte[] family, byte[] qualifier) + throws IOException { } /* @@ -1699,56 +1733,58 @@ public interface MasterObserver { * @param permissions the permission list */ default void preHasUserPermissions(ObserverContext ctx, - String userName, List permissions) throws IOException { + String userName, List permissions) throws IOException { } /** * Called after checking if user has permissions. - * @param ctx the coprocessor instance's environment - * @param userName the user name + * @param ctx the coprocessor instance's environment + * @param userName the user name * @param permissions the permission list */ default void postHasUserPermissions(ObserverContext ctx, - String userName, List permissions) throws IOException { + String userName, List permissions) throws IOException { } /** * Called before rename rsgroup. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param oldName old rsgroup name * @param newName new rsgroup name * @throws IOException on failure */ default void preRenameRSGroup(final ObserverContext ctx, - final String oldName, final String newName) throws IOException { + final String oldName, final String newName) throws IOException { } /** * Called after rename rsgroup. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param oldName old rsgroup name * @param newName new rsgroup name * @throws IOException on failure */ default void postRenameRSGroup(final ObserverContext ctx, - final String oldName, final String newName) throws IOException { + final String oldName, final String newName) throws IOException { } /** * Called before update rsgroup config. - * @param ctx the environment to interact with the framework and master - * @param groupName the group name + * @param ctx the environment to interact with the framework and master + * @param groupName the group name * @param configuration new configuration of the group name to be set */ default void preUpdateRSGroupConfig(final ObserverContext ctx, - final String groupName, final Map configuration) throws IOException {} + final String groupName, final Map configuration) throws IOException { + } /** * Called after update rsgroup config. - * @param ctx the environment to interact with the framework and master - * @param groupName the group name + * @param ctx the environment to interact with the framework and master + * @param groupName the group name * @param configuration new configuration of the group name to be set */ default void postUpdateRSGroupConfig(final ObserverContext ctx, - final String groupName, final Map configuration) throws IOException {} + final String groupName, final Map configuration) throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java index 4acec8c0956..499f8e4e31f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; @@ -40,6 +38,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.LossyCounting; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; /** @@ -61,15 +60,14 @@ public class MetaTableMetrics implements RegionCoprocessor { private Set metrics = ConcurrentHashMap.newKeySet(); enum MetaTableOps { - GET, PUT, DELETE, + GET, + PUT, + DELETE, } private ImmutableMap, MetaTableOps> opsNameMap = - ImmutableMap., MetaTableOps>builder() - .put(Put.class, MetaTableOps.PUT) - .put(Get.class, MetaTableOps.GET) - .put(Delete.class, MetaTableOps.DELETE) - .build(); + ImmutableMap., MetaTableOps> builder().put(Put.class, MetaTableOps.PUT) + .put(Get.class, MetaTableOps.GET).put(Delete.class, MetaTableOps.DELETE).build(); class ExampleRegionObserverMeta implements RegionCoprocessor, RegionObserver { @@ -80,23 +78,23 @@ public class MetaTableMetrics implements RegionCoprocessor { @Override public void preGetOp(ObserverContext e, Get get, - List results) throws IOException { + List results) throws IOException { registerAndMarkMetrics(e, get); } @Override public void prePut(ObserverContext e, Put put, WALEdit edit, - Durability durability) throws IOException { + Durability durability) throws IOException { registerAndMarkMetrics(e, put); } @Override public void preDelete(ObserverContext e, Delete delete, - WALEdit edit, Durability durability) { + WALEdit edit, Durability durability) { registerAndMarkMetrics(e, delete); } - private void registerAndMarkMetrics(ObserverContext e, Row row){ + private void registerAndMarkMetrics(ObserverContext e, Row row) { if (!active || !isMetaTableOp(e)) { return; } @@ -122,7 +120,7 @@ public class MetaTableMetrics implements RegionCoprocessor { /** * Get regionId from Ops such as: get, put, delete. - * @param op such as get, put or delete. + * @param op such as get, put or delete. */ private String getRegionIdFromOp(Row op) { final String tableRowKey = Bytes.toString(op.getRow()); @@ -134,8 +132,7 @@ public class MetaTableMetrics implements RegionCoprocessor { } private boolean isMetaTableOp(ObserverContext e) { - return TableName.META_TABLE_NAME - .equals(e.getEnvironment().getRegionInfo().getTable()); + return TableName.META_TABLE_NAME.equals(e.getEnvironment().getRegionInfo().getTable()); } private void clientMetricRegisterAndMark() { @@ -193,7 +190,7 @@ public class MetaTableMetrics implements RegionCoprocessor { if (requestMeter.isEmpty()) { return; } - if(!registry.get(requestMeter).isPresent()){ + if (!registry.get(requestMeter).isPresent()) { metrics.add(requestMeter); } registry.meter(requestMeter).mark(); @@ -266,10 +263,12 @@ public class MetaTableMetrics implements RegionCoprocessor { @Override public void start(CoprocessorEnvironment env) throws IOException { observer = new ExampleRegionObserverMeta(); - if (env instanceof RegionCoprocessorEnvironment + if ( + env instanceof RegionCoprocessorEnvironment && ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() != null && ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() - .equals(TableName.META_TABLE_NAME)) { + .equals(TableName.META_TABLE_NAME) + ) { RegionCoprocessorEnvironment regionCoprocessorEnv = (RegionCoprocessorEnvironment) env; registry = regionCoprocessorEnv.getMetricRegistryForRegionServer(); LossyCounting.LossyCountingListener listener = key -> { @@ -287,7 +286,7 @@ public class MetaTableMetrics implements RegionCoprocessor { @Override public void stop(CoprocessorEnvironment env) throws IOException { // since meta region can move around, clear stale metrics when stop. - for(String metric:metrics){ + for (String metric : metrics) { registry.remove(metric); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java index a77a0fe31f0..2ecf63263e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - - package org.apache.hadoop.hbase.coprocessor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.metrics.MetricRegistries; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.metrics.MetricRegistryInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Utility class for tracking metrics for various types of coprocessors. Each coprocessor instance @@ -36,49 +33,42 @@ public class MetricsCoprocessor { // Master coprocessor metrics private static final String MASTER_COPROC_METRICS_NAME = "Coprocessor.Master"; private static final String MASTER_COPROC_METRICS_CONTEXT = "master"; - private static final String MASTER_COPROC_METRICS_DESCRIPTION - = "Metrics about HBase MasterObservers"; - private static final String MASTER_COPROC_METRICS_JMX_CONTEXT - = "Master,sub=" + MASTER_COPROC_METRICS_NAME; + private static final String MASTER_COPROC_METRICS_DESCRIPTION = + "Metrics about HBase MasterObservers"; + private static final String MASTER_COPROC_METRICS_JMX_CONTEXT = + "Master,sub=" + MASTER_COPROC_METRICS_NAME; // RegionServer coprocessor metrics private static final String RS_COPROC_METRICS_NAME = "Coprocessor.RegionServer"; private static final String RS_COPROC_METRICS_CONTEXT = "regionserver"; - private static final String RS_COPROC_METRICS_DESCRIPTION - = "Metrics about HBase RegionServerObservers"; - private static final String RS_COPROC_METRICS_JMX_CONTEXT = "RegionServer,sub=" - + RS_COPROC_METRICS_NAME; + private static final String RS_COPROC_METRICS_DESCRIPTION = + "Metrics about HBase RegionServerObservers"; + private static final String RS_COPROC_METRICS_JMX_CONTEXT = + "RegionServer,sub=" + RS_COPROC_METRICS_NAME; // Region coprocessor metrics private static final String REGION_COPROC_METRICS_NAME = "Coprocessor.Region"; private static final String REGION_COPROC_METRICS_CONTEXT = "regionserver"; - private static final String REGION_COPROC_METRICS_DESCRIPTION - = "Metrics about HBase RegionObservers"; - private static final String REGION_COPROC_METRICS_JMX_CONTEXT - = "RegionServer,sub=" + REGION_COPROC_METRICS_NAME; + private static final String REGION_COPROC_METRICS_DESCRIPTION = + "Metrics about HBase RegionObservers"; + private static final String REGION_COPROC_METRICS_JMX_CONTEXT = + "RegionServer,sub=" + REGION_COPROC_METRICS_NAME; // WAL coprocessor metrics private static final String WAL_COPROC_METRICS_NAME = "Coprocessor.WAL"; private static final String WAL_COPROC_METRICS_CONTEXT = "regionserver"; - private static final String WAL_COPROC_METRICS_DESCRIPTION - = "Metrics about HBase WALObservers"; - private static final String WAL_COPROC_METRICS_JMX_CONTEXT - = "RegionServer,sub=" + WAL_COPROC_METRICS_NAME; + private static final String WAL_COPROC_METRICS_DESCRIPTION = "Metrics about HBase WALObservers"; + private static final String WAL_COPROC_METRICS_JMX_CONTEXT = + "RegionServer,sub=" + WAL_COPROC_METRICS_NAME; private static String suffix(String metricName, String cpName) { - return new StringBuilder(metricName) - .append(".") - .append("CP_") - .append(cpName) - .toString(); + return new StringBuilder(metricName).append(".").append("CP_").append(cpName).toString(); } static MetricRegistryInfo createRegistryInfoForMasterCoprocessor(String clazz) { - return new MetricRegistryInfo( - suffix(MASTER_COPROC_METRICS_NAME, clazz), - MASTER_COPROC_METRICS_DESCRIPTION, - suffix(MASTER_COPROC_METRICS_JMX_CONTEXT, clazz), - MASTER_COPROC_METRICS_CONTEXT, false); + return new MetricRegistryInfo(suffix(MASTER_COPROC_METRICS_NAME, clazz), + MASTER_COPROC_METRICS_DESCRIPTION, suffix(MASTER_COPROC_METRICS_JMX_CONTEXT, clazz), + MASTER_COPROC_METRICS_CONTEXT, false); } public static MetricRegistry createRegistryForMasterCoprocessor(String clazz) { @@ -86,11 +76,9 @@ public class MetricsCoprocessor { } static MetricRegistryInfo createRegistryInfoForRSCoprocessor(String clazz) { - return new MetricRegistryInfo( - suffix(RS_COPROC_METRICS_NAME, clazz), - RS_COPROC_METRICS_DESCRIPTION, - suffix(RS_COPROC_METRICS_JMX_CONTEXT, clazz), - RS_COPROC_METRICS_CONTEXT, false); + return new MetricRegistryInfo(suffix(RS_COPROC_METRICS_NAME, clazz), + RS_COPROC_METRICS_DESCRIPTION, suffix(RS_COPROC_METRICS_JMX_CONTEXT, clazz), + RS_COPROC_METRICS_CONTEXT, false); } public static MetricRegistry createRegistryForRSCoprocessor(String clazz) { @@ -98,11 +86,9 @@ public class MetricsCoprocessor { } public static MetricRegistryInfo createRegistryInfoForRegionCoprocessor(String clazz) { - return new MetricRegistryInfo( - suffix(REGION_COPROC_METRICS_NAME, clazz), - REGION_COPROC_METRICS_DESCRIPTION, - suffix(REGION_COPROC_METRICS_JMX_CONTEXT, clazz), - REGION_COPROC_METRICS_CONTEXT, false); + return new MetricRegistryInfo(suffix(REGION_COPROC_METRICS_NAME, clazz), + REGION_COPROC_METRICS_DESCRIPTION, suffix(REGION_COPROC_METRICS_JMX_CONTEXT, clazz), + REGION_COPROC_METRICS_CONTEXT, false); } public static MetricRegistry createRegistryForRegionCoprocessor(String clazz) { @@ -110,11 +96,9 @@ public class MetricsCoprocessor { } public static MetricRegistryInfo createRegistryInfoForWALCoprocessor(String clazz) { - return new MetricRegistryInfo( - suffix(WAL_COPROC_METRICS_NAME, clazz), - WAL_COPROC_METRICS_DESCRIPTION, - suffix(WAL_COPROC_METRICS_JMX_CONTEXT, clazz), - WAL_COPROC_METRICS_CONTEXT, false); + return new MetricRegistryInfo(suffix(WAL_COPROC_METRICS_NAME, clazz), + WAL_COPROC_METRICS_DESCRIPTION, suffix(WAL_COPROC_METRICS_JMX_CONTEXT, clazz), + WAL_COPROC_METRICS_CONTEXT, false); } public static MetricRegistry createRegistryForWALCoprocessor(String clazz) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java index bdedb9e4840..4efc610f72d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hbase.coprocessor; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -24,7 +27,6 @@ import java.util.Collections; import java.util.List; import java.util.SortedSet; import java.util.TreeSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -56,24 +58,15 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; - /** * This class implements atomic multi row transactions using * {@link HRegion#mutateRowsWithLocks(Collection, Collection, long, long)} and Coprocessor - * endpoints. We can also specify some conditions to perform conditional update. - * - * Defines a protocol to perform multi row transactions. - * See {@link MultiRowMutationEndpoint} for the implementation. + * endpoints. We can also specify some conditions to perform conditional update. Defines a protocol + * to perform multi row transactions. See {@link MultiRowMutationEndpoint} for the implementation. *
        - * See - * {@link HRegion#mutateRowsWithLocks(Collection, Collection, long, long)} - * for details and limitations. - *
        - * Example: - * + * See {@link HRegion#mutateRowsWithLocks(Collection, Collection, long, long)} for details and + * limitations.
        + * Example: * Put p = new Put(row1); * Delete d = new Delete(row2); * Increment i = new Increment(row3); @@ -113,7 +106,7 @@ public class MultiRowMutationEndpoint extends MultiRowMutationService implements @Override public void mutateRows(RpcController controller, MutateRowsRequest request, - RpcCallback done) { + RpcCallback done) { boolean matches = true; List rowLocks = null; try { @@ -131,8 +124,7 @@ public class MultiRowMutationEndpoint extends MultiRowMutationService implements for (Mutation m : mutations) { // check whether rows are in range for this region if (!HRegion.rowIsInRange(regionInfo, m.getRow())) { - String msg = "Requested row out of range '" - + Bytes.toStringBinary(m.getRow()) + "'"; + String msg = "Requested row out of range '" + Bytes.toStringBinary(m.getRow()) + "'"; if (rowsToLock.isEmpty()) { // if this is the first row, region might have moved, // allow client to retry @@ -208,8 +200,9 @@ public class MultiRowMutationEndpoint extends MultiRowMutationService implements comparator = ProtobufUtil.toComparator(condition.getComparator()); } - TimeRange timeRange = condition.hasTimeRange() ? - ProtobufUtil.toTimeRange(condition.getTimeRange()) : TimeRange.allTime(); + TimeRange timeRange = condition.hasTimeRange() + ? ProtobufUtil.toTimeRange(condition.getTimeRange()) + : TimeRange.allTime(); Get get = new Get(row); if (family != null) { @@ -251,9 +244,8 @@ public class MultiRowMutationEndpoint extends MultiRowMutationService implements private void checkFamily(Region region, byte[] family) throws NoSuchColumnFamilyException { if (!region.getTableDescriptor().hasColumnFamily(family)) { - throw new NoSuchColumnFamilyException( - "Column family " + Bytes.toString(family) + " does not exist in region " + this - + " in table " + region.getTableDescriptor()); + throw new NoSuchColumnFamilyException("Column family " + Bytes.toString(family) + + " does not exist in region " + this + " in table " + region.getTableDescriptor()); } } @@ -284,17 +276,17 @@ public class MultiRowMutationEndpoint extends MultiRowMutationService implements /** * Stores a reference to the coprocessor environment provided by the * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this - * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded - * on a table region, so always expects this to be an instance of + * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded on + * a table region, so always expects this to be an instance of * {@link RegionCoprocessorEnvironment}. * @param env the environment provided by the coprocessor host * @throws IOException if the provided environment is not an instance of - * {@code RegionCoprocessorEnvironment} + * {@code RegionCoprocessorEnvironment} */ @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; } else { throw new CoprocessorException("Must be loaded on a table region!"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java index c756926fb21..c0fd791bcef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java @@ -17,23 +17,22 @@ */ package org.apache.hadoop.hbase.coprocessor; +import java.util.Optional; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import java.util.Optional; - /** * Carries the execution state for a given invocation of an Observer coprocessor - * ({@link RegionObserver}, {@link MasterObserver}, or {@link WALObserver}) - * method. The same ObserverContext instance is passed sequentially to all loaded - * coprocessors for a given Observer method trigger, with the - * CoprocessorEnvironment reference set appropriately for each Coprocessor type: - * e.g. the RegionCoprocessorEnvironment is passed to RegionCoprocessors, and so on. - * @param The {@link CoprocessorEnvironment} subclass applicable to the - * revelant Observer interface. + * ({@link RegionObserver}, {@link MasterObserver}, or {@link WALObserver}) method. The same + * ObserverContext instance is passed sequentially to all loaded coprocessors for a given Observer + * method trigger, with the CoprocessorEnvironment reference set appropriately for each + * Coprocessor type: e.g. the RegionCoprocessorEnvironment is passed to RegionCoprocessors, and so + * on. + * @param The {@link CoprocessorEnvironment} subclass applicable to the revelant Observer + * interface. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving @@ -47,29 +46,29 @@ public interface ObserverContext { * Coprocessor invocations, only on a small subset of methods, mostly preXXX calls in * RegionObserver. Check javadoc on the pertinent Coprocessor Observer to see if * bypass is supported. - *

        This behavior of honoring only a subset of methods is new since hbase-2.0.0. - *

        Where bypass is supported what is being bypassed is all of the core code - * implementing the remainder of the operation. In order to understand what - * calling bypass() will skip, a coprocessor implementer should read and - * understand all of the remaining code and its nuances. Although this - * is good practice for coprocessor developers in general, it demands a lot. - * What is skipped is extremely version dependent. The core code will vary, perhaps significantly, - * even between point releases. We do not provide the promise of consistent behavior even between - * point releases for the bypass semantic. To achieve - * that we could not change any code between hook points. Therefore the - * coprocessor implementer becomes an HBase core developer in practice as soon - * as they rely on bypass(). Every release of HBase may break the assumption - * that the replacement for the bypassed code takes care of all necessary - * skipped concerns. Because those concerns can change at any point, such an - * assumption is never safe.

        - *

        As of hbase2, when bypass has been set, we will NOT call any Coprocessors follow the - * bypassing Coprocessor; we cut short the processing and return the bypassing Coprocessors - * response (this used be a separate 'complete' option that has been folded into the - * 'bypass' in hbase2.

        + *

        + * This behavior of honoring only a subset of methods is new since hbase-2.0.0. + *

        + * Where bypass is supported what is being bypassed is all of the core code implementing the + * remainder of the operation. In order to understand what calling bypass() will skip, a + * coprocessor implementer should read and understand all of the remaining code and its nuances. + * Although this is good practice for coprocessor developers in general, it demands a lot. What is + * skipped is extremely version dependent. The core code will vary, perhaps significantly, even + * between point releases. We do not provide the promise of consistent behavior even between point + * releases for the bypass semantic. To achieve that we could not change any code between hook + * points. Therefore the coprocessor implementer becomes an HBase core developer in practice as + * soon as they rely on bypass(). Every release of HBase may break the assumption that the + * replacement for the bypassed code takes care of all necessary skipped concerns. Because those + * concerns can change at any point, such an assumption is never safe. + *

        + *

        + * As of hbase2, when bypass has been set, we will NOT call any Coprocessors follow the bypassing + * Coprocessor; we cut short the processing and return the bypassing Coprocessors response (this + * used be a separate 'complete' option that has been folded into the 'bypass' in hbase2. + *

        */ void bypass(); - /** * Returns the active user for the coprocessor call. If an explicit {@code User} instance was * provided to the constructor, that will be returned, otherwise if we are in the context of an diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java index b5370db9a9d..85cdbbf0689 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java @@ -69,7 +69,7 @@ public class ObserverContextImpl implements Ob /** * @return {@code true}, if {@link ObserverContext#bypass()} was called by one of the loaded - * coprocessors, {@code false} otherwise. + * coprocessors, {@code false} otherwise. */ public boolean shouldBypass() { if (!isBypassable()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.java index d7705ef25b7..4b85cba2940 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.java @@ -34,12 +34,10 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.yetus.audience.InterfaceAudience; - /** * Wraps a Configuration to make it read-only. */ @@ -292,7 +290,7 @@ public class ReadOnlyConfiguration extends Configuration { @Override public InetSocketAddress getSocketAddr(String hostProperty, String addressProperty, - String defaultAddressValue, int defaultPort) { + String defaultAddressValue, int defaultPort) { return conf.getSocketAddr(hostProperty, addressProperty, defaultAddressValue, defaultPort); } @@ -308,7 +306,7 @@ public class ReadOnlyConfiguration extends Configuration { @Override public InetSocketAddress updateConnectAddr(String hostProperty, String addressProperty, - String defaultAddressValue, InetSocketAddress addr) { + String defaultAddressValue, InetSocketAddress addr) { throw new UnsupportedOperationException("Read-only Configuration"); } @@ -339,7 +337,7 @@ public class ReadOnlyConfiguration extends Configuration { @Override public Class getClass(String name, Class defaultValue, - Class xface) { + Class xface) { return conf.getClass(name, defaultValue, xface); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessor.java index 16c6d399040..15d6cba0de2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessor.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; +import java.util.Optional; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import java.util.Optional; - @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface RegionCoprocessor extends Coprocessor { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java index 162b41cfdb4..97d572d3727 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; @@ -111,14 +109,15 @@ public interface RegionCoprocessorEnvironment extends CoprocessorEnvironmentSee ExampleRegionObserverWithMetrics class in the hbase-examples modules to see examples of how - * metrics can be instantiated and used.

        + * metrics tracked at this level will be shared by all the coprocessor instances of the same class + * in the same region server process. Note that there will be one region coprocessor environment + * per region in the server, but all of these instances will share the same MetricRegistry. The + * metric instances (like Counter, Timer, etc) will also be shared among all of the region + * coprocessor instances. + *

        + * See ExampleRegionObserverWithMetrics class in the hbase-examples modules to see examples of how + * metrics can be instantiated and used. + *

        * @return A MetricRegistry for the coprocessor class to track and export metrics. */ // Note: we are not exposing getMetricRegistryForRegion(). per-region metrics are already costly diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index a4f3f08261d..be382d3a55c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -7,23 +7,20 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -106,158 +103,171 @@ import org.apache.yetus.audience.InterfaceStability; public interface RegionObserver { /** Mutation type for postMutationBeforeWAL hook */ enum MutationType { - APPEND, INCREMENT + APPEND, + INCREMENT } /** * Called before the region is reported as open to the master. * @param c the environment provided by the region server */ - default void preOpen(ObserverContext c) throws IOException {} + default void preOpen(ObserverContext c) throws IOException { + } /** * Called after the region is reported as open to the master. * @param c the environment provided by the region server */ - default void postOpen(ObserverContext c) {} + default void postOpen(ObserverContext c) { + } /** * Called before the memstore is flushed to disk. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param tracker tracker used to track the life cycle of a flush */ default void preFlush(final ObserverContext c, - FlushLifeCycleTracker tracker) throws IOException {} + FlushLifeCycleTracker tracker) throws IOException { + } /** * Called before we open store scanner for flush. You can use the {@code options} to change max * versions and TTL for the scanner being opened. - * @param c the environment provided by the region server - * @param store the store where flush is being requested + * @param c the environment provided by the region server + * @param store the store where flush is being requested * @param options used to change max versions and TTL for the scanner being opened */ default void preFlushScannerOpen(ObserverContext c, Store store, - ScanOptions options,FlushLifeCycleTracker tracker) throws IOException {} + ScanOptions options, FlushLifeCycleTracker tracker) throws IOException { + } /** * Called before a Store's memstore is flushed to disk. - * @param c the environment provided by the region server - * @param store the store where flush is being requested + * @param c the environment provided by the region server + * @param store the store where flush is being requested * @param scanner the scanner over existing data used in the memstore * @param tracker tracker used to track the life cycle of a flush * @return the scanner to use during flush. Should not be {@code null} unless the implementation * is writing new store files on its own. */ default InternalScanner preFlush(ObserverContext c, Store store, - InternalScanner scanner, FlushLifeCycleTracker tracker) throws IOException { + InternalScanner scanner, FlushLifeCycleTracker tracker) throws IOException { return scanner; } /** * Called after the memstore is flushed to disk. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param tracker tracker used to track the life cycle of a flush * @throws IOException if an error occurred on the coprocessor */ default void postFlush(ObserverContext c, - FlushLifeCycleTracker tracker) throws IOException {} + FlushLifeCycleTracker tracker) throws IOException { + } /** * Called after a Store's memstore is flushed to disk. - * @param c the environment provided by the region server - * @param store the store being flushed + * @param c the environment provided by the region server + * @param store the store being flushed * @param resultFile the new store file written out during compaction - * @param tracker tracker used to track the life cycle of a flush + * @param tracker tracker used to track the life cycle of a flush */ default void postFlush(ObserverContext c, Store store, - StoreFile resultFile, FlushLifeCycleTracker tracker) throws IOException {} + StoreFile resultFile, FlushLifeCycleTracker tracker) throws IOException { + } /** * Called before in memory compaction started. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param store the store where in memory compaction is being requested */ default void preMemStoreCompaction(ObserverContext c, Store store) - throws IOException {} + throws IOException { + } /** * Called before we open store scanner for in memory compaction. You can use the {@code options} * to change max versions and TTL for the scanner being opened. Notice that this method will only * be called when you use {@code eager} mode. For {@code basic} mode we will not drop any cells * thus we do not open a store scanner. - * @param c the environment provided by the region server - * @param store the store where in memory compaction is being requested + * @param c the environment provided by the region server + * @param store the store where in memory compaction is being requested * @param options used to change max versions and TTL for the scanner being opened */ default void preMemStoreCompactionCompactScannerOpen( - ObserverContext c, Store store, ScanOptions options) - throws IOException {} + ObserverContext c, Store store, ScanOptions options) + throws IOException { + } /** * Called before we do in memory compaction. Notice that this method will only be called when you * use {@code eager} mode. For {@code basic} mode we will not drop any cells thus there is no * {@link InternalScanner}. - * @param c the environment provided by the region server - * @param store the store where in memory compaction is being executed + * @param c the environment provided by the region server + * @param store the store where in memory compaction is being executed * @param scanner the scanner over existing data used in the memstore segments being compact * @return the scanner to use during in memory compaction. Must be non-null. */ default InternalScanner preMemStoreCompactionCompact( - ObserverContext c, Store store, InternalScanner scanner) - throws IOException { + ObserverContext c, Store store, InternalScanner scanner) + throws IOException { return scanner; } /** * Called after the in memory compaction is finished. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param store the store where in memory compaction is being executed */ default void postMemStoreCompaction(ObserverContext c, Store store) - throws IOException {} + throws IOException { + } /** * Called prior to selecting the {@link StoreFile StoreFiles} to compact from the list of * available candidates. To alter the files used for compaction, you may mutate the passed in list * of candidates. If you remove all the candidates then the compaction will be canceled. - *

        Supports Coprocessor 'bypass' -- 'bypass' is how this method indicates that it changed - * the passed in candidates. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. - * @param c the environment provided by the region server - * @param store the store where compaction is being requested + *

        + * Supports Coprocessor 'bypass' -- 'bypass' is how this method indicates that it changed the + * passed in candidates. If 'bypass' is set, we skip out on calling any subsequent + * chained coprocessors. + * @param c the environment provided by the region server + * @param store the store where compaction is being requested * @param candidates the store files currently available for compaction - * @param tracker tracker used to track the life cycle of a compaction + * @param tracker tracker used to track the life cycle of a compaction */ default void preCompactSelection(ObserverContext c, Store store, - List candidates, CompactionLifeCycleTracker tracker) - throws IOException {} + List candidates, CompactionLifeCycleTracker tracker) throws IOException { + } /** * Called after the {@link StoreFile}s to compact have been selected from the available * candidates. - * @param c the environment provided by the region server - * @param store the store being compacted + * @param c the environment provided by the region server + * @param store the store being compacted * @param selected the store files selected to compact - * @param tracker tracker used to track the life cycle of a compaction - * @param request the requested compaction + * @param tracker tracker used to track the life cycle of a compaction + * @param request the requested compaction */ default void postCompactSelection(ObserverContext c, Store store, - List selected, CompactionLifeCycleTracker tracker, - CompactionRequest request) {} + List selected, CompactionLifeCycleTracker tracker, + CompactionRequest request) { + } /** - * Called before we open store scanner for compaction. You can use the {@code options} to change max - * versions and TTL for the scanner being opened. - * @param c the environment provided by the region server - * @param store the store being compacted + * Called before we open store scanner for compaction. You can use the {@code options} to change + * max versions and TTL for the scanner being opened. + * @param c the environment provided by the region server + * @param store the store being compacted * @param scanType type of Scan - * @param options used to change max versions and TTL for the scanner being opened - * @param tracker tracker used to track the life cycle of a compaction - * @param request the requested compaction + * @param options used to change max versions and TTL for the scanner being opened + * @param tracker tracker used to track the life cycle of a compaction + * @param request the requested compaction */ default void preCompactScannerOpen(ObserverContext c, Store store, - ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker, - CompactionRequest request) throws IOException {} + ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException { + } /** * Called prior to writing the {@link StoreFile}s selected for compaction into a new @@ -267,130 +277,135 @@ public interface RegionObserver { * {@link InternalScanner} with a custom implementation that is returned from this method. The * custom scanner can then inspect {@link org.apache.hadoop.hbase.Cell}s from the wrapped scanner, * applying its own policy to what gets written. - * @param c the environment provided by the region server - * @param store the store being compacted - * @param scanner the scanner over existing data used in the store file rewriting + * @param c the environment provided by the region server + * @param store the store being compacted + * @param scanner the scanner over existing data used in the store file rewriting * @param scanType type of Scan - * @param tracker tracker used to track the life cycle of a compaction - * @param request the requested compaction + * @param tracker tracker used to track the life cycle of a compaction + * @param request the requested compaction * @return the scanner to use during compaction. Should not be {@code null} unless the * implementation is writing new store files on its own. */ default InternalScanner preCompact(ObserverContext c, Store store, - InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, - CompactionRequest request) throws IOException { + InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException { return scanner; } /** * Called after compaction has completed and the new store file has been moved in to place. - * @param c the environment provided by the region server - * @param store the store being compacted + * @param c the environment provided by the region server + * @param store the store being compacted * @param resultFile the new store file written out during compaction - * @param tracker used to track the life cycle of a compaction - * @param request the requested compaction + * @param tracker used to track the life cycle of a compaction + * @param request the requested compaction */ default void postCompact(ObserverContext c, Store store, - StoreFile resultFile, CompactionLifeCycleTracker tracker, CompactionRequest request) - throws IOException {} + StoreFile resultFile, CompactionLifeCycleTracker tracker, CompactionRequest request) + throws IOException { + } /** * Called before the region is reported as closed to the master. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param abortRequested true if the region server is aborting */ default void preClose(ObserverContext c, boolean abortRequested) - throws IOException {} + throws IOException { + } /** * Called after the region is reported as closed to the master. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param abortRequested true if the region server is aborting */ - default void postClose(ObserverContext c, boolean abortRequested) {} + default void postClose(ObserverContext c, boolean abortRequested) { + } /** * Called before the client performs a Get *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. - * @param c the environment provided by the region server - * @param get the Get request - * @param result The result to return to the client if default processing - * is bypassed. Can be modified. Will not be used if default processing - * is not bypassed. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. + * @param c the environment provided by the region server + * @param get the Get request + * @param result The result to return to the client if default processing is bypassed. Can be + * modified. Will not be used if default processing is not bypassed. */ default void preGetOp(ObserverContext c, Get get, List result) - throws IOException {} + throws IOException { + } /** * Called after the client performs a Get *

        - * Note: Do not retain references to any Cells in 'result' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param get the Get request + * Note: Do not retain references to any Cells in 'result' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param get the Get request * @param result the result to return to the client, modify as necessary */ default void postGetOp(ObserverContext c, Get get, - List result) throws IOException {} + List result) throws IOException { + } /** * Called before the client tests for existence using a Get. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. - * @param c the environment provided by the region server - * @param get the Get request + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. + * @param c the environment provided by the region server + * @param get the Get request * @param exists the result returned by the region server * @return the value to return to the client if bypassing default processing */ default boolean preExists(ObserverContext c, Get get, - boolean exists) throws IOException { + boolean exists) throws IOException { return exists; } /** * Called after the client tests for existence using a Get. - * @param c the environment provided by the region server - * @param get the Get request + * @param c the environment provided by the region server + * @param get the Get request * @param exists the result returned by the region server * @return the result to return to the client */ default boolean postExists(ObserverContext c, Get get, - boolean exists) throws IOException { + boolean exists) throws IOException { return exists; } /** * Called before the client stores a value. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param put The Put object - * @param edit The WALEdit object that will be written to the wal + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param put The Put object + * @param edit The WALEdit object that will be written to the wal * @param durability Persistence guarantee for this Put * @deprecated since 2.5.0 and will be removed in 4.0.0. Use - * {@link #prePut(ObserverContext, Put, WALEdit)} instead. + * {@link #prePut(ObserverContext, Put, WALEdit)} instead. */ @Deprecated default void prePut(ObserverContext c, Put put, WALEdit edit, - Durability durability) throws IOException {} + Durability durability) throws IOException { + } /** * Called before the client stores a value. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param put The Put object + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param put The Put object * @param edit The WALEdit object that will be written to the wal */ default void prePut(ObserverContext c, Put put, WALEdit edit) @@ -401,26 +416,27 @@ public interface RegionObserver { /** * Called after the client stores a value. *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param put The Put object - * @param edit The WALEdit object for the wal + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param put The Put object + * @param edit The WALEdit object for the wal * @param durability Persistence guarantee for this Put * @deprecated since 2.5.0 and will be removed in 4.0.0. Use - * {@link #postPut(ObserverContext, Put, WALEdit)} instead. + * {@link #postPut(ObserverContext, Put, WALEdit)} instead. */ @Deprecated default void postPut(ObserverContext c, Put put, WALEdit edit, - Durability durability) throws IOException {} + Durability durability) throws IOException { + } /** * Called after the client stores a value. *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param put The Put object + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param put The Put object * @param edit The WALEdit object for the wal */ default void postPut(ObserverContext c, Put put, WALEdit edit) @@ -431,33 +447,34 @@ public interface RegionObserver { /** * Called before the client deletes a value. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param delete The Delete object - * @param edit The WALEdit object for the wal + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param delete The Delete object + * @param edit The WALEdit object for the wal * @param durability Persistence guarantee for this Delete * @deprecated since 2.5.0 and will be removed in 4.0.0. Use - * {@link #preDelete(ObserverContext, Delete, WALEdit)} instead. + * {@link #preDelete(ObserverContext, Delete, WALEdit)} instead. */ @Deprecated default void preDelete(ObserverContext c, Delete delete, - WALEdit edit, Durability durability) throws IOException {} + WALEdit edit, Durability durability) throws IOException { + } /** * Called before the client deletes a value. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param delete The Delete object - * @param edit The WALEdit object for the wal + * @param edit The WALEdit object for the wal */ default void preDelete(ObserverContext c, Delete delete, WALEdit edit) throws IOException { @@ -467,45 +484,47 @@ public interface RegionObserver { /** * Called before the server updates the timestamp for version delete with latest timestamp. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. - * @param c the environment provided by the region server + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. + * @param c the environment provided by the region server * @param mutation - the parent mutation associated with this delete cell - * @param cell - The deleteColumn with latest version cell - * @param byteNow - timestamp bytes - * @param get - the get formed using the current cell's row. Note that the get does not specify - * the family and qualifier - * @deprecated Since hbase-2.0.0. No replacement. To be removed in hbase-3.0.0 and replaced - * with something that doesn't expose IntefaceAudience.Private classes. + * @param cell - The deleteColumn with latest version cell + * @param byteNow - timestamp bytes + * @param get - the get formed using the current cell's row. Note that the get does not + * specify the family and qualifier + * @deprecated Since hbase-2.0.0. No replacement. To be removed in hbase-3.0.0 and replaced with + * something that doesn't expose IntefaceAudience.Private classes. */ @Deprecated default void prePrepareTimeStampForDeleteVersion(ObserverContext c, - Mutation mutation, Cell cell, byte[] byteNow, Get get) throws IOException {} + Mutation mutation, Cell cell, byte[] byteNow, Get get) throws IOException { + } /** * Called after the client deletes a value. *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param delete The Delete object - * @param edit The WALEdit object for the wal + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param delete The Delete object + * @param edit The WALEdit object for the wal * @param durability Persistence guarantee for this Delete * @deprecated since 2.5.0 and will be removed in 4.0.0. Use - * {@link #postDelete(ObserverContext, Delete, WALEdit)} instead. + * {@link #postDelete(ObserverContext, Delete, WALEdit)} instead. */ @Deprecated default void postDelete(ObserverContext c, Delete delete, - WALEdit edit, Durability durability) throws IOException {} + WALEdit edit, Durability durability) throws IOException { + } /** * Called after the client deletes a value. *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param delete The Delete object - * @param edit The WALEdit object for the wal + * @param edit The WALEdit object for the wal */ default void postDelete(ObserverContext c, Delete delete, WALEdit edit) throws IOException { @@ -515,113 +534,113 @@ public interface RegionObserver { /** * This will be called for every batch mutation operation happening at the server. This will be * called after acquiring the locks on the mutating rows and after applying the proper timestamp - * for each Mutation at the server. The batch may contain Put/Delete/Increment/Append. By - * setting OperationStatus of Mutations + * for each Mutation at the server. The batch may contain Put/Delete/Increment/Append. By setting + * OperationStatus of Mutations * ({@link MiniBatchOperationInProgress#setOperationStatus(int, OperationStatus)}), * {@link RegionObserver} can make Region to skip these Mutations. *

        - * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param miniBatchOp batch of Mutations getting applied to region. */ default void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws IOException {} + MiniBatchOperationInProgress miniBatchOp) throws IOException { + } /** - * This will be called after applying a batch of Mutations on a region. The Mutations are added - * to memstore and WAL. The difference of this one with - * {@link #postPut(ObserverContext, Put, WALEdit)} - * and {@link #postDelete(ObserverContext, Delete, WALEdit)} - * and {@link #postIncrement(ObserverContext, Increment, Result, WALEdit)} - * and {@link #postAppend(ObserverContext, Append, Result, WALEdit)} is - * this hook will be executed before the mvcc transaction completion. + * This will be called after applying a batch of Mutations on a region. The Mutations are added to + * memstore and WAL. The difference of this one with + * {@link #postPut(ObserverContext, Put, WALEdit)} and + * {@link #postDelete(ObserverContext, Delete, WALEdit)} and + * {@link #postIncrement(ObserverContext, Increment, Result, WALEdit)} and + * {@link #postAppend(ObserverContext, Append, Result, WALEdit)} is this hook will be executed + * before the mvcc transaction completion. *

        - * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param miniBatchOp batch of Mutations applied to region. Coprocessors are discouraged from * manipulating its state. */ // Coprocessors can do a form of bypass by changing state in miniBatchOp. default void postBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws IOException {} + MiniBatchOperationInProgress miniBatchOp) throws IOException { + } /** * This will be called for region operations where read lock is acquired in - * {@link Region#startRegionOperation()}. - * @param ctx - * @param operation The operation is about to be taken on the region + * {@link Region#startRegionOperation()}. n * @param operation The operation is about to be taken + * on the region */ default void postStartRegionOperation(ObserverContext ctx, - Operation operation) throws IOException {} + Operation operation) throws IOException { + } /** - * Called after releasing read lock in {@link Region#closeRegionOperation()}. - * @param ctx - * @param operation + * Called after releasing read lock in {@link Region#closeRegionOperation()}. nn */ default void postCloseRegionOperation(ObserverContext ctx, - Operation operation) throws IOException {} + Operation operation) throws IOException { + } /** - * Called after the completion of batch put/delete/increment/append and will be called even if - * the batch operation fails. + * Called after the completion of batch put/delete/increment/append and will be called even if the + * batch operation fails. *

        - * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param ctx - * @param miniBatchOp - * @param success true if batch operation is successful otherwise false. + * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. nn * @param success true if + * batch operation is successful otherwise false. */ default void postBatchMutateIndispensably(ObserverContext ctx, - MiniBatchOperationInProgress miniBatchOp, boolean success) throws IOException {} + MiniBatchOperationInProgress miniBatchOp, boolean success) throws IOException { + } /** * Called before checkAndPut. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check - * @param family column family - * @param qualifier column qualifier - * @param op the comparison operation + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check + * @param family column family + * @param qualifier column qualifier + * @param op the comparison operation * @param comparator the comparator - * @param put data to put if check succeeds - * @param result the default value of the result + * @param put data to put if check succeeds + * @param result the default value of the result * @return the return value to return to client if bypassing default processing - * * @deprecated since 2.4.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndPut(ObserverContext c, byte[] row, - byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, Put put, - boolean result) throws IOException { + byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, Put put, + boolean result) throws IOException { return result; } /** * Called before checkAndPut. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check * @param filter filter - * @param put data to put if check succeeds + * @param put data to put if check succeeds * @param result the default value of the result * @return the return value to return to client if bypassing default processing - * * @deprecated since 2.4.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndPut(ObserverContext c, byte[] row, @@ -632,58 +651,56 @@ public interface RegionObserver { /** * Called before checkAndPut but after acquiring rowlock. *

        - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check - * @param family column family - * @param qualifier column qualifier - * @param op the comparison operation + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check + * @param family column family + * @param qualifier column qualifier + * @param op the comparison operation * @param comparator the comparator - * @param put data to put if check succeeds - * @param result the default value of the result + * @param put data to put if check succeeds + * @param result the default value of the result * @return the return value to return to client if bypassing default processing - * * @deprecated since 2.4.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} - * instead. + * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndPutAfterRowLock(ObserverContext c, - byte[] row, byte[] family, byte[] qualifier, CompareOperator op, - ByteArrayComparable comparator, Put put, boolean result) throws IOException { + byte[] row, byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, + Put put, boolean result) throws IOException { return result; } /** * Called before checkAndPut but after acquiring rowlock. *

        - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check * @param filter filter - * @param put data to put if check succeeds + * @param put data to put if check succeeds * @param result the default value of the result * @return the return value to return to client if bypassing default processing - * * @deprecated since 2.4.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} - * instead. + * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndPutAfterRowLock(ObserverContext c, @@ -694,42 +711,42 @@ public interface RegionObserver { /** * Called after checkAndPut *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check - * @param family column family - * @param qualifier column qualifier - * @param op the comparison operation + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check + * @param family column family + * @param qualifier column qualifier + * @param op the comparison operation * @param comparator the comparator - * @param put data to put if check succeeds - * @param result from the checkAndPut + * @param put data to put if check succeeds + * @param result from the checkAndPut * @return the possibly transformed return value to return to client - * * @deprecated since 2.4.0 and will be removed in 4.0.0. Use - * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean postCheckAndPut(ObserverContext c, byte[] row, - byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, Put put, - boolean result) throws IOException { + byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, Put put, + boolean result) throws IOException { return result; } /** * Called after checkAndPut *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check * @param filter filter - * @param put data to put if check succeeds + * @param put data to put if check succeeds * @param result from the checkAndPut * @return the possibly transformed return value to return to client - * * @deprecated since 2.4.0 and will be removed in 4.0.0. Use - * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean postCheckAndPut(ObserverContext c, byte[] row, @@ -740,48 +757,48 @@ public interface RegionObserver { /** * Called before checkAndDelete. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check - * @param family column family - * @param qualifier column qualifier - * @param op the comparison operation + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check + * @param family column family + * @param qualifier column qualifier + * @param op the comparison operation * @param comparator the comparator - * @param delete delete to commit if check succeeds - * @param result the default value of the result + * @param delete delete to commit if check succeeds + * @param result the default value of the result * @return the value to return to client if bypassing default processing - * * @deprecated since 2.4.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndDelete(ObserverContext c, byte[] row, - byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, - Delete delete, boolean result) throws IOException { + byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, + Delete delete, boolean result) throws IOException { return result; } /** * Called before checkAndDelete. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check * @param filter column family * @param delete delete to commit if check succeeds * @param result the default value of the result * @return the value to return to client if bypassing default processing - * * @deprecated since 2.4.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndDelete(ObserverContext c, byte[] row, @@ -792,58 +809,56 @@ public interface RegionObserver { /** * Called before checkAndDelete but after acquiring rowock. *

        - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check - * @param family column family - * @param qualifier column qualifier - * @param op the comparison operation + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check + * @param family column family + * @param qualifier column qualifier + * @param op the comparison operation * @param comparator the comparator - * @param delete delete to commit if check succeeds - * @param result the default value of the result + * @param delete delete to commit if check succeeds + * @param result the default value of the result * @return the value to return to client if bypassing default processing - * * @deprecated since 2.4.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} - * instead. + * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndDeleteAfterRowLock(ObserverContext c, - byte[] row, byte[] family, byte[] qualifier, CompareOperator op, - ByteArrayComparable comparator, Delete delete, boolean result) throws IOException { + byte[] row, byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, + Delete delete, boolean result) throws IOException { return result; } /** * Called before checkAndDelete but after acquiring rowock. *

        - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check * @param filter filter * @param delete delete to commit if check succeeds * @param result the default value of the result * @return the value to return to client if bypassing default processing - * * @deprecated since 2.4.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} - * instead. + * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndDeleteAfterRowLock(ObserverContext c, @@ -854,42 +869,42 @@ public interface RegionObserver { /** * Called after checkAndDelete *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check - * @param family column family - * @param qualifier column qualifier - * @param op the comparison operation + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check + * @param family column family + * @param qualifier column qualifier + * @param op the comparison operation * @param comparator the comparator - * @param delete delete to commit if check succeeds - * @param result from the CheckAndDelete + * @param delete delete to commit if check succeeds + * @param result from the CheckAndDelete * @return the possibly transformed returned value to return to client - * * @deprecated since 2.4.0 and will be removed in 4.0.0. Use - * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean postCheckAndDelete(ObserverContext c, byte[] row, - byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, - Delete delete, boolean result) throws IOException { + byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, + Delete delete, boolean result) throws IOException { return result; } /** * Called after checkAndDelete *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check * @param filter filter * @param delete delete to commit if check succeeds * @param result from the CheckAndDelete * @return the possibly transformed returned value to return to client - * * @deprecated since 2.4.0 and will be removed in 4.0.0. Use - * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean postCheckAndDelete(ObserverContext c, byte[] row, @@ -900,14 +915,14 @@ public interface RegionObserver { /** * Called before checkAndMutate *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in actions beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in actions beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param checkAndMutate the CheckAndMutate object - * @param result the default value of the result + * @param result the default value of the result * @return the return value to return to client if bypassing default processing * @throws IOException if an error occurred on the coprocessor */ @@ -944,18 +959,18 @@ public interface RegionObserver { /** * Called before checkAndDelete but after acquiring rowlock. *

        - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in actions beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in actions beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param checkAndMutate the CheckAndMutate object - * @param result the default value of the result + * @param result the default value of the result * @return the value to return to client if bypassing default processing * @throws IOException if an error occurred on the coprocessor */ @@ -965,13 +980,13 @@ public interface RegionObserver { if (checkAndMutate.getAction() instanceof Put) { boolean success; if (checkAndMutate.hasFilter()) { - success = preCheckAndPutAfterRowLock(c, checkAndMutate.getRow(), - checkAndMutate.getFilter(), (Put) checkAndMutate.getAction(), result.isSuccess()); - } else { - success = preCheckAndPutAfterRowLock(c, checkAndMutate.getRow(), - checkAndMutate.getFamily(), checkAndMutate.getQualifier(), - checkAndMutate.getCompareOp(), new BinaryComparator(checkAndMutate.getValue()), + success = preCheckAndPutAfterRowLock(c, checkAndMutate.getRow(), checkAndMutate.getFilter(), (Put) checkAndMutate.getAction(), result.isSuccess()); + } else { + success = preCheckAndPutAfterRowLock(c, checkAndMutate.getRow(), checkAndMutate.getFamily(), + checkAndMutate.getQualifier(), checkAndMutate.getCompareOp(), + new BinaryComparator(checkAndMutate.getValue()), (Put) checkAndMutate.getAction(), + result.isSuccess()); } return new CheckAndMutateResult(success, null); } else if (checkAndMutate.getAction() instanceof Delete) { @@ -981,9 +996,9 @@ public interface RegionObserver { checkAndMutate.getFilter(), (Delete) checkAndMutate.getAction(), result.isSuccess()); } else { success = preCheckAndDeleteAfterRowLock(c, checkAndMutate.getRow(), - checkAndMutate.getFamily(), checkAndMutate.getQualifier(), - checkAndMutate.getCompareOp(), new BinaryComparator(checkAndMutate.getValue()), - (Delete) checkAndMutate.getAction(), result.isSuccess()); + checkAndMutate.getFamily(), checkAndMutate.getQualifier(), checkAndMutate.getCompareOp(), + new BinaryComparator(checkAndMutate.getValue()), (Delete) checkAndMutate.getAction(), + result.isSuccess()); } return new CheckAndMutateResult(success, null); } @@ -993,11 +1008,11 @@ public interface RegionObserver { /** * Called after checkAndMutate *

        - * Note: Do not retain references to any Cells in actions beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in actions beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param checkAndMutate the CheckAndMutate object - * @param result from the checkAndMutate + * @param result from the checkAndMutate * @return the possibly transformed returned value to return to client * @throws IOException if an error occurred on the coprocessor */ @@ -1006,25 +1021,25 @@ public interface RegionObserver { if (checkAndMutate.getAction() instanceof Put) { boolean success; if (checkAndMutate.hasFilter()) { - success = postCheckAndPut(c, checkAndMutate.getRow(), - checkAndMutate.getFilter(), (Put) checkAndMutate.getAction(), result.isSuccess()); - } else { - success = postCheckAndPut(c, checkAndMutate.getRow(), - checkAndMutate.getFamily(), checkAndMutate.getQualifier(), - checkAndMutate.getCompareOp(), new BinaryComparator(checkAndMutate.getValue()), + success = postCheckAndPut(c, checkAndMutate.getRow(), checkAndMutate.getFilter(), (Put) checkAndMutate.getAction(), result.isSuccess()); + } else { + success = postCheckAndPut(c, checkAndMutate.getRow(), checkAndMutate.getFamily(), + checkAndMutate.getQualifier(), checkAndMutate.getCompareOp(), + new BinaryComparator(checkAndMutate.getValue()), (Put) checkAndMutate.getAction(), + result.isSuccess()); } return new CheckAndMutateResult(success, null); } else if (checkAndMutate.getAction() instanceof Delete) { boolean success; if (checkAndMutate.hasFilter()) { - success = postCheckAndDelete(c, checkAndMutate.getRow(), - checkAndMutate.getFilter(), (Delete) checkAndMutate.getAction(), result.isSuccess()); - } else { - success = postCheckAndDelete(c, checkAndMutate.getRow(), - checkAndMutate.getFamily(), checkAndMutate.getQualifier(), - checkAndMutate.getCompareOp(), new BinaryComparator(checkAndMutate.getValue()), + success = postCheckAndDelete(c, checkAndMutate.getRow(), checkAndMutate.getFilter(), (Delete) checkAndMutate.getAction(), result.isSuccess()); + } else { + success = postCheckAndDelete(c, checkAndMutate.getRow(), checkAndMutate.getFamily(), + checkAndMutate.getQualifier(), checkAndMutate.getCompareOp(), + new BinaryComparator(checkAndMutate.getValue()), (Delete) checkAndMutate.getAction(), + result.isSuccess()); } return new CheckAndMutateResult(success, null); } @@ -1034,16 +1049,16 @@ public interface RegionObserver { /** * Called before Append. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param append Append object * @return result to return to the client if bypassing default processing * @deprecated since 2.5.0 and will be removed in 4.0.0. Use - * {@link #preAppend(ObserverContext, Append, WALEdit)} instead. + * {@link #preAppend(ObserverContext, Append, WALEdit)} instead. */ @Deprecated default Result preAppend(ObserverContext c, Append append) @@ -1054,14 +1069,14 @@ public interface RegionObserver { /** * Called before Append. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param append Append object - * @param edit The WALEdit object that will be written to the wal + * @param edit The WALEdit object that will be written to the wal * @return result to return to the client if bypassing default processing */ default Result preAppend(ObserverContext c, Append append, @@ -1072,20 +1087,20 @@ public interface RegionObserver { /** * Called before Append but after acquiring rowlock. *

        - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param append Append object * @return result to return to the client if bypassing default processing * @deprecated since 2.5.0 and will be removed in 4.0.0. Use - * {@link #preBatchMutate(ObserverContext, MiniBatchOperationInProgress)} instead. + * {@link #preBatchMutate(ObserverContext, MiniBatchOperationInProgress)} instead. */ @Deprecated default Result preAppendAfterRowLock(ObserverContext c, @@ -1096,14 +1111,14 @@ public interface RegionObserver { /** * Called after Append *

        - * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param append Append object * @param result the result returned by increment * @return the result to return to the client * @deprecated since 2.5.0 and will be removed in 4.0.0. Use - * {@link #postAppend(ObserverContext, Append, Result, WALEdit)} instead. + * {@link #postAppend(ObserverContext, Append, Result, WALEdit)} instead. */ @Deprecated default Result postAppend(ObserverContext c, Append append, @@ -1114,12 +1129,12 @@ public interface RegionObserver { /** * Called after Append *

        - * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param append Append object * @param result the result returned by increment - * @param edit The WALEdit object for the wal + * @param edit The WALEdit object for the wal * @return the result to return to the client */ default Result postAppend(ObserverContext c, Append append, @@ -1130,16 +1145,16 @@ public interface RegionObserver { /** * Called before Increment. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        * Note: Do not retain references to any Cells in 'increment' beyond the life of this invocation. * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param increment increment object * @return result to return to the client if bypassing default processing * @deprecated since 2.5.0 and will be removed in 4.0.0. Use - * {@link #preIncrement(ObserverContext, Increment, WALEdit)} instead. + * {@link #preIncrement(ObserverContext, Increment, WALEdit)} instead. */ @Deprecated default Result preIncrement(ObserverContext c, Increment increment) @@ -1150,14 +1165,14 @@ public interface RegionObserver { /** * Called before Increment. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        * Note: Do not retain references to any Cells in 'increment' beyond the life of this invocation. * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param increment increment object - * @param edit The WALEdit object that will be written to the wal + * @param edit The WALEdit object that will be written to the wal * @return result to return to the client if bypassing default processing */ default Result preIncrement(ObserverContext c, Increment increment, @@ -1168,21 +1183,20 @@ public interface RegionObserver { /** * Called before Increment but after acquiring rowlock. *

        - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        * Note: Do not retain references to any Cells in 'increment' beyond the life of this invocation. * If need a Cell reference for later use, copy the cell and use that. - * - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param increment increment object * @return result to return to the client if bypassing default processing * @deprecated since 2.5.0 and will be removed in 4.0.0. Use - * {@link #preBatchMutate(ObserverContext, MiniBatchOperationInProgress)} instead. + * {@link #preBatchMutate(ObserverContext, MiniBatchOperationInProgress)} instead. */ @Deprecated default Result preIncrementAfterRowLock(ObserverContext c, @@ -1195,12 +1209,12 @@ public interface RegionObserver { *

        * Note: Do not retain references to any Cells in 'increment' beyond the life of this invocation. * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param increment increment object - * @param result the result returned by increment + * @param result the result returned by increment * @return the result to return to the client * @deprecated since 2.5.0 and will be removed in 4.0.0. Use - * {@link #postIncrement(ObserverContext, Increment, Result, WALEdit)} instead. + * {@link #postIncrement(ObserverContext, Increment, Result, WALEdit)} instead. */ @Deprecated default Result postIncrement(ObserverContext c, Increment increment, @@ -1213,10 +1227,10 @@ public interface RegionObserver { *

        * Note: Do not retain references to any Cells in 'increment' beyond the life of this invocation. * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param increment increment object - * @param result the result returned by increment - * @param edit The WALEdit object for the wal + * @param result the result returned by increment + * @param edit The WALEdit object for the wal * @return the result to return to the client */ default Result postIncrement(ObserverContext c, Increment increment, @@ -1229,11 +1243,11 @@ public interface RegionObserver { *

        * Note: Do not retain references to any Cells returned by scanner, beyond the life of this * invocation. If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param scan the Scan specification */ default void preScannerOpen(ObserverContext c, Scan scan) - throws IOException { + throws IOException { } /** @@ -1241,35 +1255,34 @@ public interface RegionObserver { *

        * Note: Do not retain references to any Cells returned by scanner, beyond the life of this * invocation. If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param scan the Scan specification - * @param s if not null, the base scanner + * @param s if not null, the base scanner * @return the scanner instance to use */ default RegionScanner postScannerOpen(ObserverContext c, Scan scan, - RegionScanner s) throws IOException { + RegionScanner s) throws IOException { return s; } /** * Called before the client asks for the next row on a scanner. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        * Note: Do not retain references to any Cells returned by scanner, beyond the life of this * invocation. If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param s the scanner - * @param result The result to return to the client if default processing - * is bypassed. Can be modified. Will not be returned if default processing - * is not bypassed. - * @param limit the maximum number of results to return + * @param c the environment provided by the region server + * @param s the scanner + * @param result The result to return to the client if default processing is bypassed. Can be + * modified. Will not be returned if default processing is not bypassed. + * @param limit the maximum number of results to return * @param hasNext the 'has more' indication * @return 'has more' indication that should be sent to client */ default boolean preScannerNext(ObserverContext c, InternalScanner s, - List result, int limit, boolean hasNext) throws IOException { + List result, int limit, boolean hasNext) throws IOException { return hasNext; } @@ -1278,15 +1291,15 @@ public interface RegionObserver { *

        * Note: Do not retain references to any Cells returned by scanner, beyond the life of this * invocation. If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param s the scanner - * @param result the result to return to the client, can be modified - * @param limit the maximum number of results to return + * @param c the environment provided by the region server + * @param s the scanner + * @param result the result to return to the client, can be modified + * @param limit the maximum number of results to return * @param hasNext the 'has more' indication * @return 'has more' indication that should be sent to client */ default boolean postScannerNext(ObserverContext c, - InternalScanner s, List result, int limit, boolean hasNext) throws IOException { + InternalScanner s, List result, int limit, boolean hasNext) throws IOException { return hasNext; } @@ -1294,46 +1307,46 @@ public interface RegionObserver { * This will be called by the scan flow when the current scanned row is being filtered out by the * filter. The filter may be filtering out the row via any of the below scenarios *

          - *
        1. - * boolean filterRowKey(byte [] buffer, int offset, int length) returning true
        2. - *
        3. - * boolean filterRow() returning true
        4. - *
        5. - * default void filterRow(List<KeyValue> kvs) removing all the kvs from - * the passed List
        6. + *
        7. boolean filterRowKey(byte [] buffer, int offset, int length) returning + * true
        8. + *
        9. boolean filterRow() returning true
        10. + *
        11. default void filterRow(List<KeyValue> kvs) removing all the kvs from the + * passed List
        12. *
        *

        * Note: Do not retain references to any Cells returned by scanner, beyond the life of this * invocation. If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param s the scanner + * @param c the environment provided by the region server + * @param s the scanner * @param curRowCell The cell in the current row which got filtered out - * @param hasMore the 'has more' indication + * @param hasMore the 'has more' indication * @return whether more rows are available for the scanner or not */ default boolean postScannerFilterRow(ObserverContext c, - InternalScanner s, Cell curRowCell, boolean hasMore) throws IOException { + InternalScanner s, Cell curRowCell, boolean hasMore) throws IOException { return hasMore; } /** * Called before the client closes a scanner. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. * @param c the environment provided by the region server * @param s the scanner */ default void preScannerClose(ObserverContext c, InternalScanner s) - throws IOException {} + throws IOException { + } /** * Called after the client closes a scanner. * @param ctx the environment provided by the region server - * @param s the scanner + * @param s the scanner */ default void postScannerClose(ObserverContext ctx, - InternalScanner s) throws IOException {} + InternalScanner s) throws IOException { + } /** * Called before a store opens a new scanner. @@ -1346,136 +1359,134 @@ public interface RegionObserver { * {@code preScannerOpen}, but if the max versions config on the Store is 1, then you still can * only read 1 version. You need also to inject here to change the max versions to 10 if you want * to get more versions. - * @param ctx the environment provided by the region server - * @param store the store which we want to get scanner from + * @param ctx the environment provided by the region server + * @param store the store which we want to get scanner from * @param options used to change max versions and TTL for the scanner being opened * @see #preFlushScannerOpen(ObserverContext, Store, ScanOptions, FlushLifeCycleTracker) * @see #preCompactScannerOpen(ObserverContext, Store, ScanType, ScanOptions, * CompactionLifeCycleTracker, CompactionRequest) */ default void preStoreScannerOpen(ObserverContext ctx, Store store, - ScanOptions options) throws IOException {} + ScanOptions options) throws IOException { + } /** - * Called before replaying WALs for this region. - * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no - * effect in this hook. - * @param ctx the environment provided by the region server - * @param info the RegionInfo for this region + * Called before replaying WALs for this region. Calling + * {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no effect in this + * hook. + * @param ctx the environment provided by the region server + * @param info the RegionInfo for this region * @param edits the file of recovered edits */ // todo: what about these? default void preReplayWALs(ObserverContext ctx, - RegionInfo info, Path edits) throws IOException {} + RegionInfo info, Path edits) throws IOException { + } /** * Called after replaying WALs for this region. - * @param ctx the environment provided by the region server - * @param info the RegionInfo for this region + * @param ctx the environment provided by the region server + * @param info the RegionInfo for this region * @param edits the file of recovered edits */ default void postReplayWALs(ObserverContext ctx, - RegionInfo info, Path edits) throws IOException {} + RegionInfo info, Path edits) throws IOException { + } /** - * Called before a {@link WALEdit} - * replayed for this region. + * Called before a {@link WALEdit} replayed for this region. * @param ctx the environment provided by the region server */ default void preWALRestore(ObserverContext ctx, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {} + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + } /** - * Called after a {@link WALEdit} - * replayed for this region. + * Called after a {@link WALEdit} replayed for this region. * @param ctx the environment provided by the region server */ default void postWALRestore(ObserverContext ctx, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {} + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + } /** - * Called before bulkLoadHFile. Users can create a StoreFile instance to - * access the contents of a HFile. - * - * @param ctx the environment provided by the region server - * @param familyPaths pairs of { CF, HFile path } submitted for bulk load. Adding - * or removing from this list will add or remove HFiles to be bulk loaded. + * Called before bulkLoadHFile. Users can create a StoreFile instance to access the contents of a + * HFile. + * @param ctx the environment provided by the region server + * @param familyPaths pairs of { CF, HFile path } submitted for bulk load. Adding or removing from + * this list will add or remove HFiles to be bulk loaded. */ default void preBulkLoadHFile(ObserverContext ctx, - List> familyPaths) throws IOException {} + List> familyPaths) throws IOException { + } /** * Called before moving bulk loaded hfile to region directory. - * - * @param ctx the environment provided by the region server + * @param ctx the environment provided by the region server * @param family column family - * @param pairs List of pairs of { HFile location in staging dir, HFile path in region dir } - * Each pair are for the same hfile. + * @param pairs List of pairs of { HFile location in staging dir, HFile path in region dir } Each + * pair are for the same hfile. */ default void preCommitStoreFile(ObserverContext ctx, byte[] family, - List> pairs) throws IOException {} + List> pairs) throws IOException { + } /** * Called after moving bulk loaded hfile to region directory. - * - * @param ctx the environment provided by the region server - * @param family column family + * @param ctx the environment provided by the region server + * @param family column family * @param srcPath Path to file before the move * @param dstPath Path to file after the move */ default void postCommitStoreFile(ObserverContext ctx, byte[] family, - Path srcPath, Path dstPath) throws IOException {} - - /** - * Called after bulkLoadHFile. - * - * @param ctx the environment provided by the region server - * @param stagingFamilyPaths pairs of { CF, HFile path } submitted for bulk load - * @param finalPaths Map of CF to List of file paths for the loaded files - * if the Map is not null, the bulkLoad was successful. Otherwise the bulk load failed. - * bulkload is done by the time this hook is called. - */ - default void postBulkLoadHFile(ObserverContext ctx, - List> stagingFamilyPaths, Map> finalPaths) - throws IOException { + Path srcPath, Path dstPath) throws IOException { } /** - * Called before creation of Reader for a store file. - * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no - * effect in this hook. - * - * @param ctx the environment provided by the region server - * @param fs fileystem to read from - * @param p path to the file - * @param in {@link FSDataInputStreamWrapper} - * @param size Full size of the file - * @param cacheConf - * @param r original reference file. This will be not null only when reading a split file. + * Called after bulkLoadHFile. + * @param ctx the environment provided by the region server + * @param stagingFamilyPaths pairs of { CF, HFile path } submitted for bulk load + * @param finalPaths Map of CF to List of file paths for the loaded files if the Map is + * not null, the bulkLoad was successful. Otherwise the bulk load + * failed. bulkload is done by the time this hook is called. + */ + default void postBulkLoadHFile(ObserverContext ctx, + List> stagingFamilyPaths, Map> finalPaths) + throws IOException { + } + + /** + * Called before creation of Reader for a store file. Calling + * {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no effect in this + * hook. + * @param ctx the environment provided by the region server + * @param fs fileystem to read from + * @param p path to the file + * @param in {@link FSDataInputStreamWrapper} + * @param size Full size of the file n * @param r original reference file. This will be not null + * only when reading a split file. * @param reader the base reader, if not {@code null}, from previous RegionObserver in the chain - * @return a Reader instance to use instead of the base reader if overriding - * default behavior, null otherwise + * @return a Reader instance to use instead of the base reader if overriding default behavior, + * null otherwise * @deprecated For Phoenix only, StoreFileReader is not a stable interface. */ @Deprecated // Passing InterfaceAudience.Private args FSDataInputStreamWrapper, CacheConfig and Reference. // This is fine as the hook is deprecated any way. default StoreFileReader preStoreFileReaderOpen(ObserverContext ctx, - FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, - Reference r, StoreFileReader reader) throws IOException { + FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, + Reference r, StoreFileReader reader) throws IOException { return reader; } /** * Called after the creation of Reader for a store file. - * - * @param ctx the environment provided by the region server - * @param fs fileystem to read from - * @param p path to the file - * @param in {@link FSDataInputStreamWrapper} - * @param size Full size of the file - * @param cacheConf - * @param r original reference file. This will be not null only when reading a split file. + * @param ctx the environment provided by the region server + * @param fs fileystem to read from + * @param p path to the file + * @param in {@link FSDataInputStreamWrapper} + * @param size Full size of the file n * @param r original reference file. This will be not null + * only when reading a split file. * @param reader the base reader instance * @return The reader to use * @deprecated For Phoenix only, StoreFileReader is not a stable interface. @@ -1484,106 +1495,100 @@ public interface RegionObserver { // Passing InterfaceAudience.Private args FSDataInputStreamWrapper, CacheConfig and Reference. // This is fine as the hook is deprecated any way. default StoreFileReader postStoreFileReaderOpen(ObserverContext ctx, - FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, - Reference r, StoreFileReader reader) throws IOException { + FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, + Reference r, StoreFileReader reader) throws IOException { return reader; } /** - * Called after a new cell has been created during an increment operation, but before - * it is committed to the WAL or memstore. - * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no - * effect in this hook. - * @param ctx the environment provided by the region server - * @param opType the operation type + * Called after a new cell has been created during an increment operation, but before it is + * committed to the WAL or memstore. Calling + * {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no effect in this + * hook. + * @param ctx the environment provided by the region server + * @param opType the operation type * @param mutation the current mutation - * @param oldCell old cell containing previous value - * @param newCell the new cell containing the computed value + * @param oldCell old cell containing previous value + * @param newCell the new cell containing the computed value * @return the new cell, possibly changed * @deprecated since 2.2.0 and will be removedin 4.0.0. Use - * {@link #postIncrementBeforeWAL(ObserverContext, Mutation, List)} or - * {@link #postAppendBeforeWAL(ObserverContext, Mutation, List)} instead. + * {@link #postIncrementBeforeWAL(ObserverContext, Mutation, List)} or + * {@link #postAppendBeforeWAL(ObserverContext, Mutation, List)} instead. * @see #postIncrementBeforeWAL(ObserverContext, Mutation, List) * @see #postAppendBeforeWAL(ObserverContext, Mutation, List) * @see HBASE-21643 */ @Deprecated default Cell postMutationBeforeWAL(ObserverContext ctx, - MutationType opType, Mutation mutation, Cell oldCell, Cell newCell) throws IOException { + MutationType opType, Mutation mutation, Cell oldCell, Cell newCell) throws IOException { return newCell; } /** * Called after a list of new cells has been created during an increment operation, but before * they are committed to the WAL or memstore. - * * @param ctx the environment provided by the region server * @param mutation the current mutation - * @param cellPairs a list of cell pair. The first cell is old cell which may be null. - * And the second cell is the new cell. + * @param cellPairs a list of cell pair. The first cell is old cell which may be null. And the + * second cell is the new cell. * @return a list of cell pair, possibly changed. */ default List> postIncrementBeforeWAL( - ObserverContext ctx, Mutation mutation, - List> cellPairs) throws IOException { + ObserverContext ctx, Mutation mutation, + List> cellPairs) throws IOException { List> resultPairs = new ArrayList<>(cellPairs.size()); for (Pair pair : cellPairs) { - resultPairs.add(new Pair<>(pair.getFirst(), - postMutationBeforeWAL(ctx, MutationType.INCREMENT, mutation, pair.getFirst(), - pair.getSecond()))); + resultPairs.add(new Pair<>(pair.getFirst(), postMutationBeforeWAL(ctx, MutationType.INCREMENT, + mutation, pair.getFirst(), pair.getSecond()))); } return resultPairs; } /** - * Called after a list of new cells has been created during an append operation, but before - * they are committed to the WAL or memstore. - * + * Called after a list of new cells has been created during an append operation, but before they + * are committed to the WAL or memstore. * @param ctx the environment provided by the region server * @param mutation the current mutation - * @param cellPairs a list of cell pair. The first cell is old cell which may be null. - * And the second cell is the new cell. + * @param cellPairs a list of cell pair. The first cell is old cell which may be null. And the + * second cell is the new cell. * @return a list of cell pair, possibly changed. */ default List> postAppendBeforeWAL( - ObserverContext ctx, Mutation mutation, - List> cellPairs) throws IOException { + ObserverContext ctx, Mutation mutation, + List> cellPairs) throws IOException { List> resultPairs = new ArrayList<>(cellPairs.size()); for (Pair pair : cellPairs) { - resultPairs.add(new Pair<>(pair.getFirst(), - postMutationBeforeWAL(ctx, MutationType.APPEND, mutation, pair.getFirst(), - pair.getSecond()))); + resultPairs.add(new Pair<>(pair.getFirst(), postMutationBeforeWAL(ctx, MutationType.APPEND, + mutation, pair.getFirst(), pair.getSecond()))); } return resultPairs; } /** - * Called after the ScanQueryMatcher creates ScanDeleteTracker. Implementing - * this hook would help in creating customised DeleteTracker and returning - * the newly created DeleteTracker + * Called after the ScanQueryMatcher creates ScanDeleteTracker. Implementing this hook would help + * in creating customised DeleteTracker and returning the newly created DeleteTracker *

        * Warn: This is used by internal coprocessors. Should not be implemented by user coprocessors - * @param ctx the environment provided by the region server + * @param ctx the environment provided by the region server * @param delTracker the deleteTracker that is created by the QueryMatcher * @return the Delete Tracker * @deprecated Since 2.0 with out any replacement and will be removed in 3.0 */ @Deprecated default DeleteTracker postInstantiateDeleteTracker( - ObserverContext ctx, DeleteTracker delTracker) - throws IOException { + ObserverContext ctx, DeleteTracker delTracker) + throws IOException { return delTracker; } /** * Called just before the WAL Entry is appended to the WAL. Implementing this hook allows - * coprocessors to add extended attributes to the WALKey that then get persisted to the - * WAL, and are available to replication endpoints to use in processing WAL Entries. + * coprocessors to add extended attributes to the WALKey that then get persisted to the WAL, and + * are available to replication endpoints to use in processing WAL Entries. * @param ctx the environment provided by the region server * @param key the WALKey associated with a particular append to a WAL */ default void preWALAppend(ObserverContext ctx, WALKey key, - WALEdit edit) - throws IOException { + WALEdit edit) throws IOException { } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessor.java index 66d8113a87a..60bee538c16 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessor.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; +import java.util.Optional; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import java.util.Optional; - @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface RegionServerCoprocessor extends Coprocessor { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java index 4a5d69a17aa..4b2e8b5791b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -33,7 +31,7 @@ import org.apache.yetus.audience.InterfaceStability; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface RegionServerCoprocessorEnvironment - extends CoprocessorEnvironment { + extends CoprocessorEnvironment { /** * @return Hosting Server's ServerName */ @@ -45,48 +43,44 @@ public interface RegionServerCoprocessorEnvironment OnlineRegions getOnlineRegions(); /** - * Returns the hosts' Connection to the Cluster. Do not close! This is a shared connection - * with the hosting server. Throws {@link UnsupportedOperationException} if you try to close - * or abort it. - * - * For light-weight usage only. Heavy-duty usage will pull down - * the hosting RegionServer responsiveness as well as that of other Coprocessors making use of - * this Connection. Use to create table on start or to do administrative operations. Coprocessors - * should create their own Connections if heavy usage to avoid impinging on hosting Server - * operation. To create a Connection or if a Coprocessor requires a region with a particular - * Configuration, use {@link org.apache.hadoop.hbase.client.ConnectionFactory} or + * Returns the hosts' Connection to the Cluster. Do not close! This is a shared connection with + * the hosting server. Throws {@link UnsupportedOperationException} if you try to close or abort + * it. For light-weight usage only. Heavy-duty usage will pull down the hosting RegionServer + * responsiveness as well as that of other Coprocessors making use of this Connection. Use to + * create table on start or to do administrative operations. Coprocessors should create their own + * Connections if heavy usage to avoid impinging on hosting Server operation. To create a + * Connection or if a Coprocessor requires a region with a particular Configuration, use + * {@link org.apache.hadoop.hbase.client.ConnectionFactory} or * {@link #createConnection(Configuration)}}. - * - *

        Be aware that operations that make use of this Connection are executed as the RegionServer + *

        + * Be aware that operations that make use of this Connection are executed as the RegionServer * User, the hbase super user that started this server process. Exercise caution running - * operations as this User (See {@link #createConnection(Configuration)}} to run as other than - * the RegionServer User). - * - *

        Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl + * operations as this User (See {@link #createConnection(Configuration)}} to run as other than the + * RegionServer User). + *

        + * Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl * because the remote side is not online, is struggling or it is on the other side of a network * partition. Any use of Connection from inside a Coprocessor must be able to handle all such * hiccups. - * * @see #createConnection(Configuration) * @return The host's Connection to the Cluster. */ Connection getConnection(); /** - * Creates a cluster connection using the passed Configuration. - * - * Creating a Connection is a heavy-weight operation. The resultant Connection's cache of - * region locations will be empty. Therefore you should cache and reuse Connections rather than - * create a Connection on demand. Create on start of your Coprocessor. You will have to cast - * the CoprocessorEnvironment appropriately to get at this API at start time because - * Coprocessor start method is passed a subclass of this CoprocessorEnvironment or fetch - * Connection using a synchronized accessor initializing the Connection on first access. Close - * the returned Connection when done to free resources. Using this API rather - * than {@link org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration)} + * Creates a cluster connection using the passed Configuration. Creating a Connection is a + * heavy-weight operation. The resultant Connection's cache of region locations will be empty. + * Therefore you should cache and reuse Connections rather than create a Connection on demand. + * Create on start of your Coprocessor. You will have to cast the CoprocessorEnvironment + * appropriately to get at this API at start time because Coprocessor start method is passed a + * subclass of this CoprocessorEnvironment or fetch Connection using a synchronized accessor + * initializing the Connection on first access. Close the returned Connection when done to free + * resources. Using this API rather than + * {@link org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration)} * returns a Connection that will short-circuit RPC if the target is a local resource. Use * ConnectionFactory if you don't need this ability. - * - *

        Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl + *

        + * Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl * because the remote side is not online, is struggling or it is on the other side of a network * partition. Any use of Connection from inside a Coprocessor must be able to handle all such * hiccups. @@ -96,9 +90,10 @@ public interface RegionServerCoprocessorEnvironment /** * Returns a MetricRegistry that can be used to track metrics at the region server level. - * - *

        See ExampleMasterObserverWithMetrics class in the hbase-examples modules for examples - * of how metrics can be instantiated and used.

        + *

        + * See ExampleMasterObserverWithMetrics class in the hbase-examples modules for examples of how + * metrics can be instantiated and used. + *

        * @return A MetricRegistry for the coprocessor class to track and export metrics. */ MetricRegistry getMetricRegistryForRegionServer(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java index f3ccd9d3638..dc37ac324eb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; import org.apache.yetus.audience.InterfaceAudience; @@ -27,27 +25,24 @@ import org.apache.yetus.audience.InterfaceStability; /** * Defines coprocessor hooks for interacting with operations on the - * {@link org.apache.hadoop.hbase.regionserver.HRegionServer} process. - * - * Since most implementations will be interested in only a subset of hooks, this class uses - * 'default' functions to avoid having to add unnecessary overrides. When the functions are - * non-empty, it's simply to satisfy the compiler by returning value of expected (non-void) type. - * It is done in a way that these default definitions act as no-op. So our suggestion to - * implementation would be to not call these 'default' methods from overrides. - *

        - * - *

        Exception Handling

        - * For all functions, exception handling is done as follows: + * {@link org.apache.hadoop.hbase.regionserver.HRegionServer} process. Since most implementations + * will be interested in only a subset of hooks, this class uses 'default' functions to avoid having + * to add unnecessary overrides. When the functions are non-empty, it's simply to satisfy the + * compiler by returning value of expected (non-void) type. It is done in a way that these default + * definitions act as no-op. So our suggestion to implementation would be to not call these + * 'default' methods from overrides.
        + *
        + *

        Exception Handling

        For all functions, exception handling is done as follows: *
          - *
        • Exceptions of type {@link IOException} are reported back to client.
        • - *
        • For any other kind of exception: - *
            - *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then - * the server aborts.
          • - *
          • Otherwise, coprocessor is removed from the server and - * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • - *
          - *
        • + *
        • Exceptions of type {@link IOException} are reported back to client.
        • + *
        • For any other kind of exception: + *
            + *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the + * server aborts.
          • + *
          • Otherwise, coprocessor is removed from the server and + * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • + *
          + *
        • *
        */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -57,33 +52,34 @@ public interface RegionServerObserver { * Called before stopping region server. * @param ctx the environment to interact with the framework and region server. */ - default void preStopRegionServer( - final ObserverContext ctx) throws IOException {} + default void preStopRegionServer(final ObserverContext ctx) + throws IOException { + } /** * This will be called before executing user request to roll a region server WAL. * @param ctx the environment to interact with the framework and region server. */ default void preRollWALWriterRequest( - final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * This will be called after executing user request to roll a region server WAL. * @param ctx the environment to interact with the framework and region server. */ default void postRollWALWriterRequest( - final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * This will be called after the replication endpoint is instantiated. - * @param ctx the environment to interact with the framework and region server. + * @param ctx the environment to interact with the framework and region server. * @param endpoint - the base endpoint for replication * @return the endpoint to use during replication. */ default ReplicationEndpoint postCreateReplicationEndPoint( - ObserverContext ctx, ReplicationEndpoint endpoint) { + ObserverContext ctx, ReplicationEndpoint endpoint) { return endpoint; } @@ -91,23 +87,23 @@ public interface RegionServerObserver { /** * This will be called before executing replication request to shipping log entries. * @param ctx the environment to interact with the framework and region server. - * @deprecated As of release 2.0.0 with out any replacement. This is maintained for internal - * usage by AccessController. Do not use these hooks in custom co-processors. + * @deprecated As of release 2.0.0 with out any replacement. This is maintained for internal usage + * by AccessController. Do not use these hooks in custom co-processors. */ @Deprecated default void preReplicateLogEntries(final ObserverContext ctx) - throws IOException { + throws IOException { } /** * This will be called after executing replication request to shipping log entries. * @param ctx the environment to interact with the framework and region server. - * @deprecated As of release 2.0.0 with out any replacement. This is maintained for internal - * usage by AccessController. Do not use these hooks in custom co-processors. + * @deprecated As of release 2.0.0 with out any replacement. This is maintained for internal usage + * by AccessController. Do not use these hooks in custom co-processors. */ @Deprecated default void postReplicateLogEntries( - final ObserverContext ctx) throws IOException { + final ObserverContext ctx) throws IOException { } /** @@ -115,28 +111,30 @@ public interface RegionServerObserver { * @param ctx the environment to interact with the framework and region server. */ default void preClearCompactionQueues( - final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * This will be called after clearing compaction queues * @param ctx the environment to interact with the framework and region server. */ default void postClearCompactionQueues( - final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * This will be called before executing procedures * @param ctx the environment to interact with the framework and region server. */ default void preExecuteProcedures(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * This will be called after executing procedures * @param ctx the environment to interact with the framework and region server. */ default void postExecuteProcedures(ObserverContext ctx) - throws IOException {} + throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java index 719acf76eef..6e5f6fde393 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,13 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import com.google.protobuf.Service; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; /** * Coprocessor endpoints registered once per server and providing protobuf services should implement diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessor.java index 1deddf9407b..f2b98b61e6b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessor.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; +import java.util.Optional; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import java.util.Optional; - /** * WALCoprocessor don't support loading services using {@link #getServices()}. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java index 71c72a2e7f1..1774481f210 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.wal.WAL; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving @@ -34,9 +32,10 @@ public interface WALCoprocessorEnvironment extends CoprocessorEnvironmentSee ExampleRegionServerObserverWithMetrics class in the hbase-examples modules for examples - * of how metrics can be instantiated and used.

        + *

        + * See ExampleRegionServerObserverWithMetrics class in the hbase-examples modules for examples of + * how metrics can be instantiated and used. + *

        * @return A MetricRegistry for the coprocessor class to track and export metrics. */ MetricRegistry getMetricRegistryForRegionServer(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java index b2fa7ca4777..bc57dbc735a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.client.RegionInfo; @@ -30,80 +27,73 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * It's provided to have a way for coprocessors to observe, rewrite, - * or skip WALEdits as they are being written to the WAL. - * - * Note that implementers of WALObserver will not see WALEdits that report themselves - * as empty via {@link WALEdit#isEmpty()}. - * - * {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} provides - * hooks for adding logic for WALEdits in the region context during reconstruction. - * - * Defines coprocessor hooks for interacting with operations on the - * {@link org.apache.hadoop.hbase.wal.WAL}. - * - * Since most implementations will be interested in only a subset of hooks, this class uses - * 'default' functions to avoid having to add unnecessary overrides. When the functions are - * non-empty, it's simply to satisfy the compiler by returning value of expected (non-void) type. - * It is done in a way that these default definitions act as no-op. So our suggestion to - * implementation would be to not call these 'default' methods from overrides. - *

        - * - *

        Exception Handling

        - * For all functions, exception handling is done as follows: + * It's provided to have a way for coprocessors to observe, rewrite, or skip WALEdits as they are + * being written to the WAL. Note that implementers of WALObserver will not see WALEdits that report + * themselves as empty via {@link WALEdit#isEmpty()}. + * {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} provides hooks for adding logic for + * WALEdits in the region context during reconstruction. Defines coprocessor hooks for interacting + * with operations on the {@link org.apache.hadoop.hbase.wal.WAL}. Since most implementations will + * be interested in only a subset of hooks, this class uses 'default' functions to avoid having to + * add unnecessary overrides. When the functions are non-empty, it's simply to satisfy the compiler + * by returning value of expected (non-void) type. It is done in a way that these default + * definitions act as no-op. So our suggestion to implementation would be to not call these + * 'default' methods from overrides.
        + *
        + *

        Exception Handling

        For all functions, exception handling is done as follows: *
          - *
        • Exceptions of type {@link IOException} are reported back to client.
        • - *
        • For any other kind of exception: - *
            - *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then - * the server aborts.
          • - *
          • Otherwise, coprocessor is removed from the server and - * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • - *
          - *
        • + *
        • Exceptions of type {@link IOException} are reported back to client.
        • + *
        • For any other kind of exception: + *
            + *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the + * server aborts.
          • + *
          • Otherwise, coprocessor is removed from the server and + * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • + *
          + *
        • *
        */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface WALObserver { /** - * Called before a {@link WALEdit} - * is writen to WAL. - * Do not amend the WALKey. It is InterfaceAudience.Private. Changing the WALKey will cause - * damage. + * Called before a {@link WALEdit} is writen to WAL. Do not amend the WALKey. It is + * InterfaceAudience.Private. Changing the WALKey will cause damage. * @deprecated Since hbase-2.0.0. To be replaced with an alternative that does not expose - * InterfaceAudience classes such as WALKey and WALEdit. Will be removed in hbase-3.0.0. + * InterfaceAudience classes such as WALKey and WALEdit. Will be removed in + * hbase-3.0.0. */ @Deprecated default void preWALWrite(ObserverContext ctx, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {} + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + } /** - * Called after a {@link WALEdit} - * is writen to WAL. - * Do not amend the WALKey. It is InterfaceAudience.Private. Changing the WALKey will cause - * damage. + * Called after a {@link WALEdit} is writen to WAL. Do not amend the WALKey. It is + * InterfaceAudience.Private. Changing the WALKey will cause damage. * @deprecated Since hbase-2.0.0. To be replaced with an alternative that does not expose - * InterfaceAudience classes such as WALKey and WALEdit. Will be removed in hbase-3.0.0. + * InterfaceAudience classes such as WALKey and WALEdit. Will be removed in + * hbase-3.0.0. */ @Deprecated default void postWALWrite(ObserverContext ctx, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {} + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + } /** * Called before rolling the current WAL * @param oldPath the path of the current wal that we are replacing * @param newPath the path of the wal we are going to create */ - default void preWALRoll(ObserverContext ctx, - Path oldPath, Path newPath) throws IOException {} + default void preWALRoll(ObserverContext ctx, Path oldPath, + Path newPath) throws IOException { + } /** * Called after rolling the current WAL * @param oldPath the path of the wal that we replaced * @param newPath the path of the wal we have created and now is the current */ - default void postWALRoll(ObserverContext ctx, - Path oldPath, Path newPath) throws IOException {} + default void postWALRoll(ObserverContext ctx, Path oldPath, + Path newPath) throws IOException { + } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java index 142827be70c..19fa8adc1e3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,39 +20,36 @@ package org.apache.hadoop.hbase.errorhandling; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage; import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage; import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage; - /** * A ForeignException is an exception from another thread or process. *

        - * ForeignExceptions are sent to 'remote' peers to signal an abort in the face of failures. - * When serialized for transmission we encode using Protobufs to ensure version compatibility. + * ForeignExceptions are sent to 'remote' peers to signal an abort in the face of failures. When + * serialized for transmission we encode using Protobufs to ensure version compatibility. *

        - * Foreign exceptions contain a Throwable as its cause. This can be a "regular" exception - * generated locally or a ProxyThrowable that is a representation of the original exception - * created on original 'remote' source. These ProxyThrowables have their their stacks traces and - * messages overridden to reflect the original 'remote' exception. The only way these - * ProxyThrowables are generated are by this class's {@link #deserialize(byte[])} method. + * Foreign exceptions contain a Throwable as its cause. This can be a "regular" exception generated + * locally or a ProxyThrowable that is a representation of the original exception created on + * original 'remote' source. These ProxyThrowables have their their stacks traces and messages + * overridden to reflect the original 'remote' exception. The only way these ProxyThrowables are + * generated are by this class's {@link #deserialize(byte[])} method. */ @InterfaceAudience.Public @SuppressWarnings("serial") public class ForeignException extends IOException { /** - * Name of the throwable's source such as a host or thread name. Must be non-null. + * Name of the throwable's source such as a host or thread name. Must be non-null. */ private final String source; /** - * Create a new ForeignException that can be serialized. It is assumed that this came form a - * local source. - * @param source - * @param cause + * Create a new ForeignException that can be serialized. It is assumed that this came form a local + * source. nn */ public ForeignException(String source, Throwable cause) { super(cause); @@ -62,10 +59,8 @@ public class ForeignException extends IOException { } /** - * Create a new ForeignException that can be serialized. It is assumed that this is locally - * generated. - * @param source - * @param msg + * Create a new ForeignException that can be serialized. It is assumed that this is locally + * generated. nn */ public ForeignException(String source, String msg) { super(new IllegalArgumentException(msg)); @@ -78,11 +73,9 @@ public class ForeignException extends IOException { /** * The cause of a ForeignException can be an exception that was generated on a local in process - * thread, or a thread from a 'remote' separate process. - * - * If the cause is a ProxyThrowable, we know it came from deserialization which usually means - * it came from not only another thread, but also from a remote thread. - * + * thread, or a thread from a 'remote' separate process. If the cause is a ProxyThrowable, we know + * it came from deserialization which usually means it came from not only another thread, but also + * from a remote thread. * @return true if went through deserialization, false if locally generated */ public boolean isRemote() { @@ -91,7 +84,7 @@ public class ForeignException extends IOException { @Override public String toString() { - String className = getCause().getClass().getName() ; + String className = getCause().getClass().getName(); return className + " via " + getSource() + ":" + getLocalizedMessage(); } @@ -100,8 +93,8 @@ public class ForeignException extends IOException { * @param trace the stack trace to convert to protobuf message * @return null if the passed stack is null. */ - private static List toStackTraceElementMessages( - StackTraceElement[] trace) { + private static List + toStackTraceElementMessages(StackTraceElement[] trace) { // if there is no stack trace, ignore it and just return the message if (trace == null) return null; // build the stack trace for the message @@ -130,7 +123,7 @@ public class ForeignException extends IOException { /** * Converts a ForeignException to an array of bytes. * @param source the name of the external exception source - * @param t the "local" external exception (local) + * @param t the "local" external exception (local) * @return protobuf serialized version of ForeignException */ public static byte[] serialize(String source, Throwable t) { @@ -141,7 +134,7 @@ public class ForeignException extends IOException { } // set the stack trace, if there is one List stack = - ForeignException.toStackTraceElementMessages(t.getStackTrace()); + ForeignException.toStackTraceElementMessages(t.getStackTrace()); if (stack != null) { gemBuilder.addAllTrace(stack); } @@ -153,25 +146,22 @@ public class ForeignException extends IOException { } /** - * Takes a series of bytes and tries to generate an ForeignException instance for it. - * @param bytes - * @return the ForeignExcpetion instance + * Takes a series of bytes and tries to generate an ForeignException instance for it. n * @return + * the ForeignExcpetion instance * @throws InvalidProtocolBufferException if there was deserialization problem this is thrown. */ - public static ForeignException deserialize(byte[] bytes) - throws IOException { + public static ForeignException deserialize(byte[] bytes) throws IOException { // figure out the data we need to pass ForeignExceptionMessage eem = ForeignExceptionMessage.parseFrom(bytes); GenericExceptionMessage gem = eem.getGenericException(); - StackTraceElement [] trace = ForeignException.toStackTrace(gem.getTraceList()); + StackTraceElement[] trace = ForeignException.toStackTrace(gem.getTraceList()); ProxyThrowable dfe = new ProxyThrowable(gem.getMessage(), trace); ForeignException e = new ForeignException(eem.getSource(), dfe); return e; } /** - * Unwind a serialized array of {@link StackTraceElementMessage}s to a - * {@link StackTraceElement}s. + * Unwind a serialized array of {@link StackTraceElementMessage}s to a {@link StackTraceElement}s. * @param traceList list that was serialized * @return the deserialized list or null if it couldn't be unwound (e.g. wasn't set on * the sender). @@ -183,8 +173,8 @@ public class ForeignException extends IOException { StackTraceElement[] trace = new StackTraceElement[traceList.size()]; for (int i = 0; i < traceList.size(); i++) { StackTraceElementMessage elem = traceList.get(i); - trace[i] = new StackTraceElement( - elem.getDeclaringClass(), elem.getMethodName(), elem.getFileName(), elem.getLineNumber()); + trace[i] = new StackTraceElement(elem.getDeclaringClass(), elem.getMethodName(), + elem.getFileName(), elem.getLineNumber()); } return trace; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java index b2ed0c267da..22b208bf147 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,24 +19,23 @@ package org.apache.hadoop.hbase.errorhandling; import java.util.ArrayList; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * The dispatcher acts as the state holding entity for foreign error handling. The first - * exception received by the dispatcher get passed directly to the listeners. Subsequent - * exceptions are dropped. + * The dispatcher acts as the state holding entity for foreign error handling. The first exception + * received by the dispatcher get passed directly to the listeners. Subsequent exceptions are + * dropped. *

        * If there are multiple dispatchers that are all in the same foreign exception monitoring group, * ideally all these monitors are "peers" -- any error on one dispatcher should get propagated to - * all others (via rpc, or some other mechanism). Due to racing error conditions the exact reason - * for failure may be different on different peers, but the fact that they are in error state - * should eventually hold on all. + * all others (via rpc, or some other mechanism). Due to racing error conditions the exact reason + * for failure may be different on different peers, but the fact that they are in error state should + * eventually hold on all. *

        - * This is thread-safe and must be because this is expected to be used to propagate exceptions - * from foreign threads. + * This is thread-safe and must be because this is expected to be used to propagate exceptions from + * foreign threads. */ @InterfaceAudience.Private public class ForeignExceptionDispatcher implements ForeignExceptionListener, ForeignExceptionSnare { @@ -62,7 +61,7 @@ public class ForeignExceptionDispatcher implements ForeignExceptionListener, For // if we already have an exception, then ignore it if (exception != null) return; - LOG.debug(name + " accepting received exception" , e); + LOG.debug(name + " accepting received exception", e); // mark that we got the error if (e != null) { exception = e; @@ -95,19 +94,19 @@ public class ForeignExceptionDispatcher implements ForeignExceptionListener, For /** * Sends an exception to all listeners. - * @param e {@link ForeignException} containing the cause. Can be null. + * @param e {@link ForeignException} containing the cause. Can be null. */ private void dispatch(ForeignException e) { // update all the listeners with the passed error - for (ForeignExceptionListener l: listeners) { + for (ForeignExceptionListener l : listeners) { l.receive(e); } } /** - * Listen for failures to a given process. This method should only be used during - * initialization and not added to after exceptions are accepted. - * @param errorable listener for the errors. may be null. + * Listen for failures to a given process. This method should only be used during initialization + * and not added to after exceptions are accepted. + * @param errorable listener for the errors. may be null. */ public synchronized void addListener(ForeignExceptionListener errorable) { this.listeners.add(errorable); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java index 26de489aa76..d2ff5bcc41a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java index 7bc1ee47713..3718900cc87 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,45 +20,39 @@ package org.apache.hadoop.hbase.errorhandling; import org.apache.yetus.audience.InterfaceAudience; /** - * This is an interface for a cooperative exception throwing mechanism. Implementations are - * containers that holds an exception from a separate thread. This can be used to receive - * exceptions from 'foreign' threads or from separate 'foreign' processes. + * This is an interface for a cooperative exception throwing mechanism. Implementations are + * containers that holds an exception from a separate thread. This can be used to receive exceptions + * from 'foreign' threads or from separate 'foreign' processes. *

        - * To use, one would pass an implementation of this object to a long running method and - * periodically check by calling {@link #rethrowException()}. If any foreign exceptions have - * been received, the calling thread is then responsible for handling the rethrown exception. + * To use, one would pass an implementation of this object to a long running method and periodically + * check by calling {@link #rethrowException()}. If any foreign exceptions have been received, the + * calling thread is then responsible for handling the rethrown exception. *

        * One could use the boolean {@link #hasException()} to determine if there is an exceptoin as well. *

        - * NOTE: This is very similar to the InterruptedException/interrupt/interrupted pattern. There, - * the notification state is bound to a Thread. Using this, applications receive Exceptions in - * the snare. The snare is referenced and checked by multiple threads which enables exception - * notification in all the involved threads/processes. + * NOTE: This is very similar to the InterruptedException/interrupt/interrupted pattern. There, the + * notification state is bound to a Thread. Using this, applications receive Exceptions in the + * snare. The snare is referenced and checked by multiple threads which enables exception + * notification in all the involved threads/processes. */ @InterfaceAudience.Private public interface ForeignExceptionSnare { /** - * Rethrow an exception currently held by the {@link ForeignExceptionSnare}. If there is - * no exception this is a no-op - * - * @throws ForeignException - * all exceptions from remote sources are procedure exceptions + * Rethrow an exception currently held by the {@link ForeignExceptionSnare}. If there is no + * exception this is a no-op n * all exceptions from remote sources are procedure exceptions */ void rethrowException() throws ForeignException; /** - * Non-exceptional form of {@link #rethrowException()}. Checks to see if any - * process to which the exception checkers is bound has created an error that - * would cause a failure. - * + * Non-exceptional form of {@link #rethrowException()}. Checks to see if any process to which the + * exception checkers is bound has created an error that would cause a failure. * @return true if there has been an error,false otherwise */ boolean hasException(); /** * Get the value of the captured exception. - * * @return the captured foreign exception or null if no exception captured. */ ForeignException getException(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java index f17dcde6bae..a295dd9759c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,14 +34,14 @@ public class TimeoutException extends Exception { /** * Exception indicating that an operation attempt has timed out - * @param start time the operation started (ms since epoch) - * @param end time the timeout was triggered (ms since epoch) - * @param expected expected amount of time for the operation to complete (ms) - * (ideally, expected <= end-start) + * @param start time the operation started (ms since epoch) + * @param end time the timeout was triggered (ms since epoch) + * @param expected expected amount of time for the operation to complete (ms) (ideally, expected + * <= end-start) */ public TimeoutException(String sourceName, long start, long end, long expected) { - super("Timeout elapsed! Source:" + sourceName + " Start:" + start + ", End:" + end - + ", diff:" + (end - start) + ", max:" + expected + " ms"); + super("Timeout elapsed! Source:" + sourceName + " Start:" + start + ", End:" + end + ", diff:" + + (end - start) + ", max:" + expected + " ms"); this.sourceName = sourceName; this.start = start; this.end = end; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java index 36182d677d8..aaf0f67f8d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +19,10 @@ package org.apache.hadoop.hbase.errorhandling; import java.util.Timer; import java.util.TimerTask; - +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * Time a given process/operation and report a failure if the elapsed time exceeds the max allowed @@ -46,8 +45,8 @@ public class TimeoutExceptionInjector { /** * Create a generic timer for a task/process. * @param listener listener to notify if the process times out - * @param maxTime max allowed running time for the process. Timer starts on calls to - * {@link #start()} + * @param maxTime max allowed running time for the process. Timer starts on calls to + * {@link #start()} */ public TimeoutExceptionInjector(final ForeignExceptionListener listener, final long maxTime) { this.maxTime = maxTime; @@ -63,8 +62,8 @@ public class TimeoutExceptionInjector { TimeoutExceptionInjector.this.complete = true; } long end = EnvironmentEdgeManager.currentTime(); - TimeoutException tee = new TimeoutException( - "Timeout caused Foreign Exception", start, end, maxTime); + TimeoutException tee = + new TimeoutException("Timeout caused Foreign Exception", start, end, maxTime); String source = "timer-" + timer; listener.receive(new ForeignException(source, tee)); } @@ -85,8 +84,8 @@ public class TimeoutExceptionInjector { return; } if (LOG.isDebugEnabled()) { - LOG.debug("Marking timer as complete - no error notifications will be received for " + - "this timer."); + LOG.debug("Marking timer as complete - no error notifications will be received for " + + "this timer."); } this.complete = true; } @@ -98,7 +97,7 @@ public class TimeoutExceptionInjector { *

        * Non-blocking. * @throws IllegalStateException if the timer has already been marked done via {@link #complete()} - * or {@link #trigger()} + * or {@link #trigger()} */ public synchronized void start() throws IllegalStateException { if (this.start >= 0) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java index 17054a5c409..fcb60a35c4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import io.opentelemetry.context.Context; import io.opentelemetry.context.Scope; import java.io.IOException; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; @@ -31,23 +29,19 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Abstract base class for all HBase event handlers. Subclasses should - * implement the {@link #process()} and {@link #prepare()} methods. Subclasses - * should also do all necessary checks up in their prepare() if possible -- check - * table exists, is disabled, etc. -- so they fail fast rather than later when process - * is running. Do it this way because process be invoked directly but event - * handlers are also - * run in an executor context -- i.e. asynchronously -- and in this case, - * exceptions thrown at process time will not be seen by the invoker, not till - * we implement a call-back mechanism so the client can pick them up later. + * Abstract base class for all HBase event handlers. Subclasses should implement the + * {@link #process()} and {@link #prepare()} methods. Subclasses should also do all necessary checks + * up in their prepare() if possible -- check table exists, is disabled, etc. -- so they fail fast + * rather than later when process is running. Do it this way because process be invoked directly but + * event handlers are also run in an executor context -- i.e. asynchronously -- and in this case, + * exceptions thrown at process time will not be seen by the invoker, not till we implement a + * call-back mechanism so the client can pick them up later. *

        - * Event handlers have an {@link EventType}. - * {@link EventType} is a list of ALL handler event types. We need to keep - * a full list in one place -- and as enums is a good shorthand for an - * implemenations -- because event handlers can be passed to executors when - * they are to be run asynchronously. The - * hbase executor, see ExecutorService, has a switch for passing - * event type to executor. + * Event handlers have an {@link EventType}. {@link EventType} is a list of ALL handler event types. + * We need to keep a full list in one place -- and as enums is a good shorthand for an + * implemenations -- because event handlers can be passed to executors when they are to be run + * asynchronously. The hbase executor, see ExecutorService, has a switch for passing event type to + * executor. *

        * @see ExecutorService */ @@ -80,17 +74,17 @@ public abstract class EventHandler implements Runnable, Comparable this.eventType = eventType; seqid = seqids.incrementAndGet(); if (server != null) { - this.waitingTimeForEvents = server.getConfiguration(). - getInt("hbase.master.event.waiting.time", 1000); + this.waitingTimeForEvents = + server.getConfiguration().getInt("hbase.master.event.waiting.time", 1000); } } /** - * Event handlers should do all the necessary checks in this method (rather than - * in the constructor, or in process()) so that the caller, which is mostly executed - * in the ipc context can fail fast. Process is executed async from the client ipc, - * so this method gives a quick chance to do some basic checks. - * Should be called after constructing the EventHandler, and before process(). + * Event handlers should do all the necessary checks in this method (rather than in the + * constructor, or in process()) so that the caller, which is mostly executed in the ipc context + * can fail fast. Process is executed async from the client ipc, so this method gives a quick + * chance to do some basic checks. Should be called after constructing the EventHandler, and + * before process(). * @return the instance of this class * @throws Exception when something goes wrong */ @@ -112,9 +106,7 @@ public abstract class EventHandler implements Runnable, Comparable } /** - * This method is the main processing loop to be implemented by the various - * subclasses. - * @throws IOException + * This method is the main processing loop to be implemented by the various subclasses. n */ public abstract void process() throws IOException; @@ -127,10 +119,10 @@ public abstract class EventHandler implements Runnable, Comparable } /** - * Get the priority level for this handler instance. This uses natural - * ordering so lower numbers are higher priority. + * Get the priority level for this handler instance. This uses natural ordering so lower numbers + * are higher priority. *

        - * Lowest priority is Integer.MAX_VALUE. Highest priority is 0. + * Lowest priority is Integer.MAX_VALUE. Highest priority is 0. *

        * Subclasses should override this method to allow prioritizing handlers. *

        @@ -152,15 +144,15 @@ public abstract class EventHandler implements Runnable, Comparable /** * Default prioritized runnable comparator which implements a FIFO ordering. *

        - * Subclasses should not override this. Instead, if they want to implement - * priority beyond FIFO, they should override {@link #getPriority()}. + * Subclasses should not override this. Instead, if they want to implement priority beyond FIFO, + * they should override {@link #getPriority()}. */ @Override public int compareTo(EventHandler o) { if (o == null) { return 1; } - if(getPriority() != o.getPriority()) { + if (getPriority() != o.getPriority()) { return (getPriority() < o.getPriority()) ? -1 : 1; } return (this.seqid < o.seqid) ? -1 : 1; @@ -168,16 +160,13 @@ public abstract class EventHandler implements Runnable, Comparable @Override public String toString() { - return "Event #" + getSeqid() + - " of type " + eventType + - " (" + getInformativeName() + ")"; + return "Event #" + getSeqid() + " of type " + eventType + " (" + getInformativeName() + ")"; } /** - * Event implementations should override thie class to provide an - * informative name about what event they are handling. For example, - * event-specific information such as which region or server is - * being processed should be included if possible. + * Event implementations should override thie class to provide an informative name about what + * event they are handling. For example, event-specific information such as which region or server + * is being processed should be included if possible. */ public String getInformativeName() { return this.getClass().toString(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java index 3d1fa39b235..e3e1e397616 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,113 +33,103 @@ import org.apache.yetus.audience.InterfaceAudience; public enum EventType { // Messages originating from RS (NOTE: there is NO direct communication from // RS to Master). These are a result of RS updates into ZK. - // RS_ZK_REGION_CLOSING (1), // It is replaced by M_ZK_REGION_CLOSING(HBASE-4739) + // RS_ZK_REGION_CLOSING (1), // It is replaced by M_ZK_REGION_CLOSING(HBASE-4739) /** * RS_ZK_REGION_CLOSED
        - * * RS has finished closing a region. */ - RS_ZK_REGION_CLOSED (2, ExecutorType.MASTER_CLOSE_REGION), + RS_ZK_REGION_CLOSED(2, ExecutorType.MASTER_CLOSE_REGION), /** * RS_ZK_REGION_OPENING
        - * * RS is in process of opening a region. */ - RS_ZK_REGION_OPENING (3, null), + RS_ZK_REGION_OPENING(3, null), /** * RS_ZK_REGION_OPENED
        - * * RS has finished opening a region. */ - RS_ZK_REGION_OPENED (4, ExecutorType.MASTER_OPEN_REGION), + RS_ZK_REGION_OPENED(4, ExecutorType.MASTER_OPEN_REGION), /** * RS_ZK_REGION_SPLITTING
        - * * RS has started a region split after master says it's ok to move on. */ - RS_ZK_REGION_SPLITTING (5, null), + RS_ZK_REGION_SPLITTING(5, null), /** * RS_ZK_REGION_SPLIT
        - * * RS split has completed and is notifying the master. */ - RS_ZK_REGION_SPLIT (6, ExecutorType.MASTER_SERVER_OPERATIONS), + RS_ZK_REGION_SPLIT(6, ExecutorType.MASTER_SERVER_OPERATIONS), /** * RS_ZK_REGION_FAILED_OPEN
        - * * RS failed to open a region. */ - RS_ZK_REGION_FAILED_OPEN (7, ExecutorType.MASTER_CLOSE_REGION), + RS_ZK_REGION_FAILED_OPEN(7, ExecutorType.MASTER_CLOSE_REGION), /** * RS_ZK_REGION_MERGING
        - * * RS has started merging regions after master says it's ok to move on. */ - RS_ZK_REGION_MERGING (8, null), + RS_ZK_REGION_MERGING(8, null), /** * RS_ZK_REGION_MERGE
        - * * RS region merge has completed and is notifying the master. */ - RS_ZK_REGION_MERGED (9, ExecutorType.MASTER_SERVER_OPERATIONS), + RS_ZK_REGION_MERGED(9, ExecutorType.MASTER_SERVER_OPERATIONS), /** * RS_ZK_REQUEST_REGION_SPLIT
        - * - * RS has requested to split a region. This is to notify master - * and check with master if the region is in a state good to split. + * RS has requested to split a region. This is to notify master and check with master if the + * region is in a state good to split. */ - RS_ZK_REQUEST_REGION_SPLIT (10, null), + RS_ZK_REQUEST_REGION_SPLIT(10, null), /** * RS_ZK_REQUEST_REGION_MERGE
        - * - * RS has requested to merge two regions. This is to notify master - * and check with master if two regions is in states good to merge. + * RS has requested to merge two regions. This is to notify master and check with master if two + * regions is in states good to merge. */ - RS_ZK_REQUEST_REGION_MERGE (11, null), + RS_ZK_REQUEST_REGION_MERGE(11, null), /** * Messages originating from Master to RS.
        * M_RS_OPEN_REGION
        * Master asking RS to open a region. */ - M_RS_OPEN_REGION (20, ExecutorType.RS_OPEN_REGION), + M_RS_OPEN_REGION(20, ExecutorType.RS_OPEN_REGION), /** * Messages originating from Master to RS.
        * M_RS_OPEN_ROOT
        * Master asking RS to open root. */ - M_RS_OPEN_ROOT (21, ExecutorType.RS_OPEN_ROOT), + M_RS_OPEN_ROOT(21, ExecutorType.RS_OPEN_ROOT), /** * Messages originating from Master to RS.
        * M_RS_OPEN_META
        * Master asking RS to open meta. */ - M_RS_OPEN_META (22, ExecutorType.RS_OPEN_META), + M_RS_OPEN_META(22, ExecutorType.RS_OPEN_META), /** * Messages originating from Master to RS.
        * M_RS_CLOSE_REGION
        * Master asking RS to close a region. */ - M_RS_CLOSE_REGION (23, ExecutorType.RS_CLOSE_REGION), + M_RS_CLOSE_REGION(23, ExecutorType.RS_CLOSE_REGION), /** * Messages originating from Master to RS.
        * M_RS_CLOSE_ROOT
        * Master asking RS to close root. */ - M_RS_CLOSE_ROOT (24, ExecutorType.RS_CLOSE_ROOT), + M_RS_CLOSE_ROOT(24, ExecutorType.RS_CLOSE_ROOT), /** * Messages originating from Master to RS.
        * M_RS_CLOSE_META
        * Master asking RS to close meta. */ - M_RS_CLOSE_META (25, ExecutorType.RS_CLOSE_META), + M_RS_CLOSE_META(25, ExecutorType.RS_CLOSE_META), /** * Messages originating from Master to RS.
        * M_RS_OPEN_PRIORITY_REGION
        - * Master asking RS to open a priority region. + * Master asking RS to open a priority region. */ - M_RS_OPEN_PRIORITY_REGION (26, ExecutorType.RS_OPEN_PRIORITY_REGION), + M_RS_OPEN_PRIORITY_REGION(26, ExecutorType.RS_OPEN_PRIORITY_REGION), /** * Messages originating from Master to RS.
        * M_RS_SWITCH_RPC_THROTTLE
        @@ -152,146 +142,135 @@ public enum EventType { * C_M_MERGE_REGION
        * Client asking Master to merge regions. */ - C_M_MERGE_REGION (30, ExecutorType.MASTER_MERGE_OPERATIONS), + C_M_MERGE_REGION(30, ExecutorType.MASTER_MERGE_OPERATIONS), /** * Messages originating from Client to Master.
        * C_M_DELETE_TABLE
        * Client asking Master to delete a table. */ - C_M_DELETE_TABLE (40, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_DELETE_TABLE(40, ExecutorType.MASTER_TABLE_OPERATIONS), /** * Messages originating from Client to Master.
        * C_M_DISABLE_TABLE
        * Client asking Master to disable a table. */ - C_M_DISABLE_TABLE (41, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_DISABLE_TABLE(41, ExecutorType.MASTER_TABLE_OPERATIONS), /** * Messages originating from Client to Master.
        * C_M_ENABLE_TABLE
        * Client asking Master to enable a table. */ - C_M_ENABLE_TABLE (42, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_ENABLE_TABLE(42, ExecutorType.MASTER_TABLE_OPERATIONS), /** * Messages originating from Client to Master.
        * C_M_MODIFY_TABLE
        * Client asking Master to modify a table. */ - C_M_MODIFY_TABLE (43, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_MODIFY_TABLE(43, ExecutorType.MASTER_TABLE_OPERATIONS), /** * Messages originating from Client to Master.
        * C_M_ADD_FAMILY
        * Client asking Master to add family to table. */ - C_M_ADD_FAMILY (44, null), + C_M_ADD_FAMILY(44, null), /** * Messages originating from Client to Master.
        * C_M_DELETE_FAMILY
        * Client asking Master to delete family of table. */ - C_M_DELETE_FAMILY (45, null), + C_M_DELETE_FAMILY(45, null), /** * Messages originating from Client to Master.
        * C_M_MODIFY_FAMILY
        * Client asking Master to modify family of table. */ - C_M_MODIFY_FAMILY (46, null), + C_M_MODIFY_FAMILY(46, null), /** * Messages originating from Client to Master.
        * C_M_CREATE_TABLE
        * Client asking Master to create a table. */ - C_M_CREATE_TABLE (47, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_CREATE_TABLE(47, ExecutorType.MASTER_TABLE_OPERATIONS), /** * Messages originating from Client to Master.
        * C_M_SNAPSHOT_TABLE
        * Client asking Master to snapshot an offline table. */ - C_M_SNAPSHOT_TABLE (48, ExecutorType.MASTER_SNAPSHOT_OPERATIONS), + C_M_SNAPSHOT_TABLE(48, ExecutorType.MASTER_SNAPSHOT_OPERATIONS), /** * Messages originating from Client to Master.
        * C_M_RESTORE_SNAPSHOT
        * Client asking Master to restore a snapshot. */ - C_M_RESTORE_SNAPSHOT (49, ExecutorType.MASTER_SNAPSHOT_OPERATIONS), + C_M_RESTORE_SNAPSHOT(49, ExecutorType.MASTER_SNAPSHOT_OPERATIONS), // Updates from master to ZK. This is done by the master and there is // nothing to process by either Master or RS /** - * M_ZK_REGION_OFFLINE - * Master adds this region as offline in ZK + * M_ZK_REGION_OFFLINE Master adds this region as offline in ZK */ - M_ZK_REGION_OFFLINE (50, null), + M_ZK_REGION_OFFLINE(50, null), /** - * M_ZK_REGION_CLOSING - * Master adds this region as closing in ZK + * M_ZK_REGION_CLOSING Master adds this region as closing in ZK */ - M_ZK_REGION_CLOSING (51, null), + M_ZK_REGION_CLOSING(51, null), /** - * Master controlled events to be executed on the master - * M_SERVER_SHUTDOWN - * Master is processing shutdown of a RS + * Master controlled events to be executed on the master M_SERVER_SHUTDOWN Master is processing + * shutdown of a RS */ - M_SERVER_SHUTDOWN (70, ExecutorType.MASTER_SERVER_OPERATIONS), + M_SERVER_SHUTDOWN(70, ExecutorType.MASTER_SERVER_OPERATIONS), /** * Master controlled events to be executed on the master.
        * M_META_SERVER_SHUTDOWN
        * Master is processing shutdown of RS hosting a meta region (-ROOT- or hbase:meta). */ - M_META_SERVER_SHUTDOWN (72, ExecutorType.MASTER_META_SERVER_OPERATIONS), + M_META_SERVER_SHUTDOWN(72, ExecutorType.MASTER_META_SERVER_OPERATIONS), /** * Master controlled events to be executed on the master.
        - * * M_MASTER_RECOVERY
        * Master is processing recovery of regions found in ZK RIT */ - M_MASTER_RECOVERY (73, ExecutorType.MASTER_SERVER_OPERATIONS), + M_MASTER_RECOVERY(73, ExecutorType.MASTER_SERVER_OPERATIONS), /** * Master controlled events to be executed on the master.
        - * * M_LOG_REPLAY
        * Master is processing log replay of failed region server */ - M_LOG_REPLAY (74, ExecutorType.M_LOG_REPLAY_OPS), + M_LOG_REPLAY(74, ExecutorType.M_LOG_REPLAY_OPS), /** * RS controlled events to be executed on the RS.
        - * * RS_PARALLEL_SEEK */ - RS_PARALLEL_SEEK (80, ExecutorType.RS_PARALLEL_SEEK), + RS_PARALLEL_SEEK(80, ExecutorType.RS_PARALLEL_SEEK), /** * RS wal recovery work items (splitting wals) to be executed on the RS.
        - * * RS_LOG_REPLAY */ - RS_LOG_REPLAY (81, ExecutorType.RS_LOG_REPLAY_OPS), + RS_LOG_REPLAY(81, ExecutorType.RS_LOG_REPLAY_OPS), /** * RS flush triggering from secondary region replicas to primary region replica.
        - * * RS_REGION_REPLICA_FLUSH */ - RS_REGION_REPLICA_FLUSH (82, ExecutorType.RS_REGION_REPLICA_FLUSH_OPS), + RS_REGION_REPLICA_FLUSH(82, ExecutorType.RS_REGION_REPLICA_FLUSH_OPS), /** * RS compacted files discharger
        - * * RS_COMPACTED_FILES_DISCHARGER */ - RS_COMPACTED_FILES_DISCHARGER (83, ExecutorType.RS_COMPACTED_FILES_DISCHARGER), + RS_COMPACTED_FILES_DISCHARGER(83, ExecutorType.RS_COMPACTED_FILES_DISCHARGER), /** * RS refresh peer.
        - * * RS_REFRESH_PEER */ RS_REFRESH_PEER(84, ExecutorType.RS_REFRESH_PEER), /** * RS claim replication queue.
        - * * RS_CLAIM_REPLICATION_QUEUE */ RS_CLAIM_REPLICATION_QUEUE(86, ExecutorType.RS_CLAIM_REPLICATION_QUEUE); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java index a49beef5647..eafbad1ecba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,15 +45,15 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.MoreExecuto import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * This is a generic executor service. This component abstracts a - * threadpool, a queue to which {@link EventType}s can be submitted, - * and a Runnable that handles the object that is added to the queue. - * - *

        In order to create a new service, create an instance of this class and - * then do: instance.startExecutorService(executorConfig);. {@link ExecutorConfig} - * wraps the configuration needed by this service. When done call {@link #shutdown()}. - * - *

        In order to use the service created above, call {@link #submit(EventHandler)}. + * This is a generic executor service. This component abstracts a threadpool, a queue to which + * {@link EventType}s can be submitted, and a Runnable that handles the object that is + * added to the queue. + *

        + * In order to create a new service, create an instance of this class and then do: + * instance.startExecutorService(executorConfig);. {@link ExecutorConfig} wraps the + * configuration needed by this service. When done call {@link #shutdown()}. + *

        + * In order to use the service created above, call {@link #submit(EventHandler)}. */ @InterfaceAudience.Private public class ExecutorService { @@ -79,24 +78,24 @@ public class ExecutorService { } /** - * Start an executor service with a given name. If there was a service already - * started with the same name, this throws a RuntimeException. + * Start an executor service with a given name. If there was a service already started with the + * same name, this throws a RuntimeException. * @param config Configuration to use for the executor. */ public void startExecutorService(final ExecutorConfig config) { final String name = config.getName(); if (this.executorMap.get(name) != null) { - throw new RuntimeException("An executor service with the name " + name + - " is already running!"); + throw new RuntimeException( + "An executor service with the name " + name + " is already running!"); } Executor hbes = new Executor(config); if (this.executorMap.putIfAbsent(name, hbes) != null) { - throw new RuntimeException("An executor service with the name " + name + - " is already running (2)!"); + throw new RuntimeException( + "An executor service with the name " + name + " is already running (2)!"); } - LOG.debug("Starting executor service name=" + name + - ", corePoolSize=" + hbes.threadPoolExecutor.getCorePoolSize() + - ", maxPoolSize=" + hbes.threadPoolExecutor.getMaximumPoolSize()); + LOG.debug("Starting executor service name=" + name + ", corePoolSize=" + + hbes.threadPoolExecutor.getCorePoolSize() + ", maxPoolSize=" + + hbes.threadPoolExecutor.getMaximumPoolSize()); } boolean isExecutorServiceRunning(String name) { @@ -105,9 +104,8 @@ public class ExecutorService { public void shutdown() { this.delayedSubmitTimer.shutdownNow(); - for(Entry entry: this.executorMap.entrySet()) { - List wasRunning = - entry.getValue().threadPoolExecutor.shutdownNow(); + for (Entry entry : this.executorMap.entrySet()) { + List wasRunning = entry.getValue().threadPoolExecutor.shutdownNow(); if (!wasRunning.isEmpty()) { LOG.info(entry.getValue() + " had " + wasRunning + " on shutdown"); } @@ -133,8 +131,8 @@ public class ExecutorService { * {@link ExecutorService#startExecutorService(ExecutorConfig)} */ public ThreadPoolExecutor getExecutorLazily(ExecutorConfig config) { - return executorMap.computeIfAbsent(config.getName(), (executorName) -> - new Executor(config)).getThreadPoolExecutor(); + return executorMap.computeIfAbsent(config.getName(), (executorName) -> new Executor(config)) + .getThreadPoolExecutor(); } public void submit(final EventHandler eh) { @@ -143,8 +141,8 @@ public class ExecutorService { // This happens only when events are submitted after shutdown() was // called, so dropping them should be "ok" since it means we're // shutting down. - LOG.error("Cannot submit [" + eh + "] because the executor is missing." + - " Is this process shutting down?"); + LOG.error("Cannot submit [" + eh + "] because the executor is missing." + + " Is this process shutting down?"); } else { executor.submit(eh); } @@ -206,9 +204,9 @@ public class ExecutorService { } /** - * Allows timing out of core threads. Good to set this for non-critical thread pools for - * release of unused resources. Refer to {@link ThreadPoolExecutor#allowCoreThreadTimeOut} - * for additional details. + * Allows timing out of core threads. Good to set this for non-critical thread pools for release + * of unused resources. Refer to {@link ThreadPoolExecutor#allowCoreThreadTimeOut} for + * additional details. */ public ExecutorConfig setAllowCoreThreadTimeout(boolean allowCoreThreadTimeout) { this.allowCoreThreadTimeout = allowCoreThreadTimeout; @@ -249,9 +247,9 @@ public class ExecutorService { this.name = config.getName(); // create the thread pool executor this.threadPoolExecutor = new TrackingThreadPoolExecutor( - // setting maxPoolSize > corePoolSize has no effect since we use an unbounded task queue. - config.getCorePoolSize(), config.getCorePoolSize(), - config.getKeepAliveTimeMillis(), TimeUnit.MILLISECONDS, q); + // setting maxPoolSize > corePoolSize has no effect since we use an unbounded task queue. + config.getCorePoolSize(), config.getCorePoolSize(), config.getKeepAliveTimeMillis(), + TimeUnit.MILLISECONDS, q); this.threadPoolExecutor.allowCoreThreadTimeOut(config.allowCoreThreadTimeout()); // name the threads for this threadpool ThreadFactoryBuilder tfb = new ThreadFactoryBuilder(); @@ -261,8 +259,7 @@ public class ExecutorService { } /** - * Submit the event to the queue for handling. - * @param event + * Submit the event to the queue for handling. n */ void submit(final EventHandler event) { // If there is a listener for this type, make sure we call the before @@ -286,18 +283,17 @@ public class ExecutorService { LOG.warn("Non-EventHandler " + r + " queued in " + name); continue; } - queuedEvents.add((EventHandler)r); + queuedEvents.add((EventHandler) r); } List running = Lists.newArrayList(); - for (Map.Entry e : - threadPoolExecutor.getRunningTasks().entrySet()) { + for (Map.Entry e : threadPoolExecutor.getRunningTasks().entrySet()) { Runnable r = e.getValue(); if (!(r instanceof EventHandler)) { LOG.warn("Non-EventHandler " + r + " running in " + name); continue; } - running.add(new RunningEventStatus(e.getKey(), (EventHandler)r)); + running.add(new RunningEventStatus(e.getKey(), (EventHandler) r)); } return new ExecutorStatus(this, queuedEvents, running); @@ -305,14 +301,14 @@ public class ExecutorService { } /** - * A subclass of ThreadPoolExecutor that keeps track of the Runnables that - * are executing at any given point in time. + * A subclass of ThreadPoolExecutor that keeps track of the Runnables that are executing at any + * given point in time. */ static class TrackingThreadPoolExecutor extends ThreadPoolExecutor { private ConcurrentMap running = Maps.newConcurrentMap(); - public TrackingThreadPoolExecutor(int corePoolSize, int maximumPoolSize, - long keepAliveTime, TimeUnit unit, BlockingQueue workQueue) { + public TrackingThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, + TimeUnit unit, BlockingQueue workQueue) { super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue); } @@ -330,10 +326,9 @@ public class ExecutorService { } /** - * @return a map of the threads currently running tasks - * inside this executor. Each key is an active thread, - * and the value is the task that is currently running. - * Note that this is not a stable snapshot of the map. + * @return a map of the threads currently running tasks inside this executor. Each key is an + * active thread, and the value is the task that is currently running. Note that this is + * not a stable snapshot of the map. */ public ConcurrentMap getRunningTasks() { return running; @@ -341,37 +336,31 @@ public class ExecutorService { } /** - * A snapshot of the status of a particular executor. This includes - * the contents of the executor's pending queue, as well as the - * threads and events currently being processed. - * - * This is a consistent snapshot that is immutable once constructed. + * A snapshot of the status of a particular executor. This includes the contents of the executor's + * pending queue, as well as the threads and events currently being processed. This is a + * consistent snapshot that is immutable once constructed. */ public static class ExecutorStatus { final Executor executor; final List queuedEvents; final List running; - ExecutorStatus(Executor executor, - List queuedEvents, - List running) { + ExecutorStatus(Executor executor, List queuedEvents, + List running) { this.executor = executor; this.queuedEvents = queuedEvents; this.running = running; } /** - * Dump a textual representation of the executor's status - * to the given writer. - * - * @param out the stream to write to + * Dump a textual representation of the executor's status to the given writer. + * @param out the stream to write to * @param indent a string prefix for each line, used for indentation */ public void dumpTo(Writer out, String indent) throws IOException { out.write(indent + "Status for executor: " + executor + "\n"); out.write(indent + "=======================================\n"); - out.write(indent + queuedEvents.size() + " events queued, " + - running.size() + " running\n"); + out.write(indent + queuedEvents.size() + " events queued, " + running.size() + " running\n"); if (!queuedEvents.isEmpty()) { out.write(indent + "Queued:\n"); for (EventHandler e : queuedEvents) { @@ -382,11 +371,9 @@ public class ExecutorService { if (!running.isEmpty()) { out.write(indent + "Running:\n"); for (RunningEventStatus stat : running) { - out.write(indent + " Running on thread '" + - stat.threadInfo.getThreadName() + - "': " + stat.event + "\n"); - out.write(ThreadMonitoring.formatThreadInfo( - stat.threadInfo, indent + " ")); + out.write(indent + " Running on thread '" + stat.threadInfo.getThreadName() + "': " + + stat.event + "\n"); + out.write(ThreadMonitoring.formatThreadInfo(stat.threadInfo, indent + " ")); out.write("\n"); } } @@ -395,8 +382,7 @@ public class ExecutorService { } /** - * The status of a particular event that is in the middle of being - * handled by an executor. + * The status of a particular event that is in the middle of being handled by an executor. */ public static class RunningEventStatus { final ThreadInfo threadInfo; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java index ea831221752..990619572d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,35 +20,35 @@ package org.apache.hadoop.hbase.executor; import org.apache.yetus.audience.InterfaceAudience; /** - * The following is a list of all executor types, both those that run in the - * master and those that run in the regionserver. + * The following is a list of all executor types, both those that run in the master and those that + * run in the regionserver. */ @InterfaceAudience.Private public enum ExecutorType { // Master executor services - MASTER_CLOSE_REGION (1), - MASTER_OPEN_REGION (2), - MASTER_SERVER_OPERATIONS (3), - MASTER_TABLE_OPERATIONS (4), - MASTER_RS_SHUTDOWN (5), - MASTER_META_SERVER_OPERATIONS (6), - M_LOG_REPLAY_OPS (7), - MASTER_SNAPSHOT_OPERATIONS (8), - MASTER_MERGE_OPERATIONS (9), + MASTER_CLOSE_REGION(1), + MASTER_OPEN_REGION(2), + MASTER_SERVER_OPERATIONS(3), + MASTER_TABLE_OPERATIONS(4), + MASTER_RS_SHUTDOWN(5), + MASTER_META_SERVER_OPERATIONS(6), + M_LOG_REPLAY_OPS(7), + MASTER_SNAPSHOT_OPERATIONS(8), + MASTER_MERGE_OPERATIONS(9), // RegionServer executor services - RS_OPEN_REGION (20), - RS_OPEN_ROOT (21), - RS_OPEN_META (22), - RS_CLOSE_REGION (23), - RS_CLOSE_ROOT (24), - RS_CLOSE_META (25), - RS_PARALLEL_SEEK (26), - RS_LOG_REPLAY_OPS (27), - RS_REGION_REPLICA_FLUSH_OPS (28), - RS_COMPACTED_FILES_DISCHARGER (29), - RS_OPEN_PRIORITY_REGION (30), + RS_OPEN_REGION(20), + RS_OPEN_ROOT(21), + RS_OPEN_META(22), + RS_CLOSE_REGION(23), + RS_CLOSE_ROOT(24), + RS_CLOSE_META(25), + RS_PARALLEL_SEEK(26), + RS_LOG_REPLAY_OPS(27), + RS_REGION_REPLICA_FLUSH_OPS(28), + RS_COMPACTED_FILES_DISCHARGER(29), + RS_OPEN_PRIORITY_REGION(30), RS_REFRESH_PEER(31), RS_SWITCH_RPC_THROTTLE(33), RS_IN_MEMORY_COMPACTION(34), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java index 392e5d5d15c..85b815ded5e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.favored; import static org.apache.hadoop.hbase.ServerName.NON_STARTCODE; @@ -61,11 +59,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.FavoredNodes; /** - * Helper class for {@link FavoredNodeLoadBalancer} that has all the intelligence for racks, - * meta scans, etc. Instantiated by the {@link FavoredNodeLoadBalancer} when needed (from - * within calls like {@link FavoredNodeLoadBalancer#randomAssignment(RegionInfo, List)}). - * All updates to favored nodes should only be done from {@link FavoredNodesManager} and not - * through this helper class (except for tests). + * Helper class for {@link FavoredNodeLoadBalancer} that has all the intelligence for racks, meta + * scans, etc. Instantiated by the {@link FavoredNodeLoadBalancer} when needed (from within calls + * like {@link FavoredNodeLoadBalancer#randomAssignment(RegionInfo, List)}). All updates to favored + * nodes should only be done from {@link FavoredNodesManager} and not through this helper class + * (except for tests). */ @InterfaceAudience.Private public class FavoredNodeAssignmentHelper { @@ -77,7 +75,7 @@ public class FavoredNodeAssignmentHelper { // region server entries might not match with that is in servers. private Map regionServerToRackMap; private List servers; - public static final byte [] FAVOREDNODES_QUALIFIER = Bytes.toBytes("fn"); + public static final byte[] FAVOREDNODES_QUALIFIER = Bytes.toBytes("fn"); public final static short FAVORED_NODES_NUM = 3; public final static short MAX_ATTEMPTS_FN_GENERATION = 10; @@ -86,7 +84,7 @@ public class FavoredNodeAssignmentHelper { } public FavoredNodeAssignmentHelper(final List servers, - final RackManager rackManager) { + final RackManager rackManager) { this.servers = servers; this.rackManager = rackManager; this.rackToRegionServerMap = new HashMap<>(); @@ -119,7 +117,7 @@ public class FavoredNodeAssignmentHelper { /** * Update meta table with favored nodes info * @param regionToFavoredNodes map of RegionInfo's to their favored nodes - * @param connection connection to be used + * @param connection connection to be used */ public static void updateMetaWithFavoredNodesInfo( Map> regionToFavoredNodes, Connection connection) @@ -185,8 +183,7 @@ public class FavoredNodeAssignmentHelper { } /** - * @param serverAddrList - * @return PB'ed bytes of {@link FavoredNodes} generated by the server list. + * n * @return PB'ed bytes of {@link FavoredNodes} generated by the server list. */ public static byte[] getFavoredNodes(List serverAddrList) { FavoredNodes.Builder f = FavoredNodes.newBuilder(); @@ -210,12 +207,12 @@ public class FavoredNodeAssignmentHelper { // placement could be r2:s5, , r4:s5, r1:s5, r2:s6, ... // The regions should be distributed proportionately to the racksizes public void placePrimaryRSAsRoundRobin(Map> assignmentMap, - Map primaryRSMap, List regions) { + Map primaryRSMap, List regions) { List rackList = new ArrayList<>(rackToRegionServerMap.size()); rackList.addAll(rackToRegionServerMap.keySet()); int rackIndex = ThreadLocalRandom.current().nextInt(rackList.size()); int maxRackSize = 0; - for (Map.Entry> r : rackToRegionServerMap.entrySet()) { + for (Map.Entry> r : rackToRegionServerMap.entrySet()) { if (r.getValue().size() > maxRackSize) { maxRackSize = r.getValue().size(); } @@ -232,7 +229,7 @@ public class FavoredNodeAssignmentHelper { // Get the server list for the current rack currentServerList = rackToRegionServerMap.get(rackName); - if (serverIndex >= currentServerList.size()) { //not enough machines in this rack + if (serverIndex >= currentServerList.size()) { // not enough machines in this rack if (numIterations % rackList.size() == 0) { if (++serverIndex >= maxRackSize) serverIndex = 0; } @@ -266,8 +263,8 @@ public class FavoredNodeAssignmentHelper { } } - public Map placeSecondaryAndTertiaryRS( - Map primaryRSMap) { + public Map + placeSecondaryAndTertiaryRS(Map primaryRSMap) { Map secondaryAndTertiaryMap = new HashMap<>(); for (Map.Entry entry : primaryRSMap.entrySet()) { // Get the target region and its primary region server rack @@ -279,11 +276,11 @@ public class FavoredNodeAssignmentHelper { if (favoredNodes != null) { secondaryAndTertiaryMap.put(regionInfo, favoredNodes); LOG.debug("Place the secondary and tertiary region server for region " - + regionInfo.getRegionNameAsString()); + + regionInfo.getRegionNameAsString()); } } catch (Exception e) { - LOG.warn("Cannot place the favored nodes for region " + - regionInfo.getRegionNameAsString() + " because " + e, e); + LOG.warn("Cannot place the favored nodes for region " + regionInfo.getRegionNameAsString() + + " because " + e, e); continue; } } @@ -291,7 +288,7 @@ public class FavoredNodeAssignmentHelper { } public ServerName[] getSecondaryAndTertiary(RegionInfo regionInfo, ServerName primaryRS) - throws IOException { + throws IOException { ServerName[] favoredNodes;// Get the rack for the primary region server String primaryRack = getRackOfServer(primaryRS); @@ -304,8 +301,8 @@ public class FavoredNodeAssignmentHelper { return favoredNodes; } - private Map> mapRSToPrimaries( - Map primaryRSMap) { + private Map> + mapRSToPrimaries(Map primaryRSMap) { Map> primaryServerMap = new HashMap<>(); for (Entry e : primaryRSMap.entrySet()) { Set currentSet = primaryServerMap.get(e.getValue()); @@ -319,16 +316,13 @@ public class FavoredNodeAssignmentHelper { } /** - * For regions that share the primary, avoid placing the secondary and tertiary - * on a same RS. Used for generating new assignments for the - * primary/secondary/tertiary RegionServers - * @param primaryRSMap - * @return the map of regions to the servers the region-files should be hosted on + * For regions that share the primary, avoid placing the secondary and tertiary on a same RS. Used + * for generating new assignments for the primary/secondary/tertiary RegionServers n * @return the + * map of regions to the servers the region-files should be hosted on */ - public Map placeSecondaryAndTertiaryWithRestrictions( - Map primaryRSMap) { - Map> serverToPrimaries = - mapRSToPrimaries(primaryRSMap); + public Map + placeSecondaryAndTertiaryWithRestrictions(Map primaryRSMap) { + Map> serverToPrimaries = mapRSToPrimaries(primaryRSMap); Map secondaryAndTertiaryMap = new HashMap<>(); for (Entry entry : primaryRSMap.entrySet()) { @@ -344,17 +338,17 @@ public class FavoredNodeAssignmentHelper { // from the same rack favoredNodes = singleRackCase(regionInfo, primaryRS, primaryRack); } else { - favoredNodes = multiRackCaseWithRestrictions(serverToPrimaries, - secondaryAndTertiaryMap, primaryRack, primaryRS, regionInfo); + favoredNodes = multiRackCaseWithRestrictions(serverToPrimaries, secondaryAndTertiaryMap, + primaryRack, primaryRS, regionInfo); } if (favoredNodes != null) { secondaryAndTertiaryMap.put(regionInfo, favoredNodes); LOG.debug("Place the secondary and tertiary region server for region " - + regionInfo.getRegionNameAsString()); + + regionInfo.getRegionNameAsString()); } } catch (Exception e) { - LOG.warn("Cannot place the favored nodes for region " - + regionInfo.getRegionNameAsString() + " because " + e, e); + LOG.warn("Cannot place the favored nodes for region " + regionInfo.getRegionNameAsString() + + " because " + e, e); continue; } } @@ -362,9 +356,9 @@ public class FavoredNodeAssignmentHelper { } private ServerName[] multiRackCaseWithRestrictions( - Map> serverToPrimaries, - Map secondaryAndTertiaryMap, - String primaryRack, ServerName primaryRS, RegionInfo regionInfo) throws IOException { + Map> serverToPrimaries, + Map secondaryAndTertiaryMap, String primaryRack, ServerName primaryRS, + RegionInfo regionInfo) throws IOException { // Random to choose the secondary and tertiary region server // from another rack to place the secondary and tertiary // Random to choose one rack except for the current rack @@ -396,8 +390,7 @@ public class FavoredNodeAssignmentHelper { } } } - if (skipServerSet.size() + 2 <= serverSet.size()) - break; + if (skipServerSet.size() + 2 <= serverSet.size()) break; skipServerSet.clear(); rackSkipSet.add(secondaryRack); // we used all racks @@ -419,9 +412,8 @@ public class FavoredNodeAssignmentHelper { ServerName tertiaryRS = getOneRandomServer(secondaryRack, skipServerSet); if (secondaryRS == null || tertiaryRS == null) { - LOG.error("Cannot place the secondary and tertiary" - + " region server for region " - + regionInfo.getRegionNameAsString()); + LOG.error("Cannot place the secondary and tertiary" + " region server for region " + + regionInfo.getRegionNameAsString()); } // Create the secondary and tertiary pair favoredNodes = new ServerName[2]; @@ -450,9 +442,8 @@ public class FavoredNodeAssignmentHelper { return favoredNodes; } - private ServerName[] singleRackCase(RegionInfo regionInfo, - ServerName primaryRS, - String primaryRack) throws IOException { + private ServerName[] singleRackCase(RegionInfo regionInfo, ServerName primaryRS, + String primaryRack) throws IOException { // Single rack case: have to pick the secondary and tertiary // from the same rack List serverList = getServersFromRack(primaryRack); @@ -463,45 +454,43 @@ public class FavoredNodeAssignmentHelper { } else { // Randomly select two region servers from the server list and make sure // they are not overlap with the primary region server; - Set serverSkipSet = new HashSet<>(); - serverSkipSet.add(primaryRS); + Set serverSkipSet = new HashSet<>(); + serverSkipSet.add(primaryRS); - // Place the secondary RS - ServerName secondaryRS = getOneRandomServer(primaryRack, serverSkipSet); - // Skip the secondary for the tertiary placement - serverSkipSet.add(secondaryRS); - ServerName tertiaryRS = getOneRandomServer(primaryRack, serverSkipSet); + // Place the secondary RS + ServerName secondaryRS = getOneRandomServer(primaryRack, serverSkipSet); + // Skip the secondary for the tertiary placement + serverSkipSet.add(secondaryRS); + ServerName tertiaryRS = getOneRandomServer(primaryRack, serverSkipSet); - if (secondaryRS == null || tertiaryRS == null) { - LOG.error("Cannot place the secondary, tertiary favored node for region " + - regionInfo.getRegionNameAsString()); - } - // Create the secondary and tertiary pair - ServerName[] favoredNodes = new ServerName[2]; - favoredNodes[0] = secondaryRS; - favoredNodes[1] = tertiaryRS; - return favoredNodes; + if (secondaryRS == null || tertiaryRS == null) { + LOG.error("Cannot place the secondary, tertiary favored node for region " + + regionInfo.getRegionNameAsString()); + } + // Create the secondary and tertiary pair + ServerName[] favoredNodes = new ServerName[2]; + favoredNodes[0] = secondaryRS; + favoredNodes[1] = tertiaryRS; + return favoredNodes; } } /** - * Place secondary and tertiary nodes in a multi rack case. - * If there are only two racks, then we try the place the secondary - * and tertiary on different rack than primary. But if the other rack has - * only one region server, then we place primary and tertiary on one rack - * and secondary on another. The aim is two distribute the three favored nodes - * on >= 2 racks. - * TODO: see how we can use generateMissingFavoredNodeMultiRack API here - * @param regionInfo Region for which we are trying to generate FN - * @param primaryRS The primary favored node. + * Place secondary and tertiary nodes in a multi rack case. If there are only two racks, then we + * try the place the secondary and tertiary on different rack than primary. But if the other rack + * has only one region server, then we place primary and tertiary on one rack and secondary on + * another. The aim is two distribute the three favored nodes on >= 2 racks. TODO: see how we can + * use generateMissingFavoredNodeMultiRack API here + * @param regionInfo Region for which we are trying to generate FN + * @param primaryRS The primary favored node. * @param primaryRack The rack of the primary favored node. * @return Array containing secondary and tertiary favored nodes. * @throws IOException Signals that an I/O exception has occurred. */ private ServerName[] multiRackCase(RegionInfo regionInfo, ServerName primaryRS, - String primaryRack) throws IOException { + String primaryRack) throws IOException { - ListfavoredNodes = Lists.newArrayList(primaryRS); + List favoredNodes = Lists.newArrayList(primaryRS); // Create the secondary and tertiary pair ServerName secondaryRS = generateMissingFavoredNodeMultiRack(favoredNodes); favoredNodes.add(secondaryRS); @@ -521,7 +510,7 @@ public class FavoredNodeAssignmentHelper { tertiaryRS = generateMissingFavoredNode(Lists.newArrayList(primaryRS, secondaryRS)); } } - return new ServerName[]{ secondaryRS, tertiaryRS }; + return new ServerName[] { secondaryRS, tertiaryRS }; } public boolean canPlaceFavoredNodes() { @@ -538,15 +527,13 @@ public class FavoredNodeAssignmentHelper { /** * Gets a random server from the specified rack and skips anything specified. - - * @param rack rack from a server is needed + * @param rack rack from a server is needed * @param skipServerSet the server shouldn't belong to this set */ protected ServerName getOneRandomServer(String rack, Set skipServerSet) { // Is the rack valid? Do we recognize it? - if (rack == null || getServersFromRack(rack) == null || - getServersFromRack(rack).isEmpty()) { + if (rack == null || getServersFromRack(rack) == null || getServersFromRack(rack).isEmpty()) { return null; } @@ -613,12 +600,12 @@ public class FavoredNodeAssignmentHelper { } /* - * Generates a missing favored node based on the input favored nodes. This helps to generate - * new FN when there is already 2 FN and we need a third one. For eg, while generating new FN - * for split daughters after inheriting 2 FN from the parent. If the cluster has only one rack - * it generates from the same rack. If the cluster has multiple racks, then it ensures the new - * FN respects the rack constraints similar to HDFS. For eg: if there are 3 FN, they will be - * spread across 2 racks. + * Generates a missing favored node based on the input favored nodes. This helps to generate new + * FN when there is already 2 FN and we need a third one. For eg, while generating new FN for + * split daughters after inheriting 2 FN from the parent. If the cluster has only one rack it + * generates from the same rack. If the cluster has multiple racks, then it ensures the new FN + * respects the rack constraints similar to HDFS. For eg: if there are 3 FN, they will be spread + * across 2 racks. */ public ServerName generateMissingFavoredNode(List favoredNodes) throws IOException { if (this.uniqueRackList.size() == 1) { @@ -629,7 +616,7 @@ public class FavoredNodeAssignmentHelper { } public ServerName generateMissingFavoredNode(List favoredNodes, - List excludeNodes) throws IOException { + List excludeNodes) throws IOException { if (this.uniqueRackList.size() == 1) { return generateMissingFavoredNodeSingleRack(favoredNodes, excludeNodes); } else { @@ -642,7 +629,7 @@ public class FavoredNodeAssignmentHelper { * when we would like to find a replacement node. */ private ServerName generateMissingFavoredNodeSingleRack(List favoredNodes, - List excludeNodes) throws IOException { + List excludeNodes) throws IOException { ServerName newServer = null; Set excludeFNSet = Sets.newHashSet(favoredNodes); if (excludeNodes != null && excludeNodes.size() > 0) { @@ -655,20 +642,19 @@ public class FavoredNodeAssignmentHelper { } private ServerName generateMissingFavoredNodeMultiRack(List favoredNodes) - throws IOException { + throws IOException { return generateMissingFavoredNodeMultiRack(favoredNodes, null); } /* - * Generates a missing FN based on the input favoredNodes and also the nodes to be skipped. - * - * Get the current layout of favored nodes arrangement and nodes to be excluded and get a - * random node that goes with HDFS block placement. Eg: If the existing nodes are on one rack, - * generate one from another rack. We exclude as much as possible so the random selection - * has more chance to generate a node within a few iterations, ideally 1. + * Generates a missing FN based on the input favoredNodes and also the nodes to be skipped. Get + * the current layout of favored nodes arrangement and nodes to be excluded and get a random node + * that goes with HDFS block placement. Eg: If the existing nodes are on one rack, generate one + * from another rack. We exclude as much as possible so the random selection has more chance to + * generate a node within a few iterations, ideally 1. */ private ServerName generateMissingFavoredNodeMultiRack(List favoredNodes, - List excludeNodes) throws IOException { + List excludeNodes) throws IOException { Set racks = Sets.newHashSet(); Map> rackToFNMapping = new HashMap<>(); @@ -690,8 +676,8 @@ public class FavoredNodeAssignmentHelper { Set skipRackSet = Sets.newHashSet(); /* - * If both the FN are from the same rack, then we don't want to generate another FN on the - * same rack. If that rack fails, the region would be unavailable. + * If both the FN are from the same rack, then we don't want to generate another FN on the same + * rack. If that rack fails, the region would be unavailable. */ if (racks.size() == 1 && favoredNodes.size() > 1) { skipRackSet.add(racks.iterator().next()); @@ -702,8 +688,10 @@ public class FavoredNodeAssignmentHelper { * reduce the number of iterations for FN selection. */ for (String rack : racks) { - if (getServersFromRack(rack) != null && - rackToFNMapping.get(rack).size() == getServersFromRack(rack).size()) { + if ( + getServersFromRack(rack) != null + && rackToFNMapping.get(rack).size() == getServersFromRack(rack).size() + ) { skipRackSet.add(rack); } } @@ -728,23 +716,22 @@ public class FavoredNodeAssignmentHelper { if (newServer == null) { if (LOG.isTraceEnabled()) { - LOG.trace(String.format("Unable to generate additional favored nodes for %s after " + LOG.trace(String.format( + "Unable to generate additional favored nodes for %s after " + "considering racks %s and skip rack %s with a unique rack list of %s and rack " + "to RS map of %s and RS to rack map of %s", StringUtils.join(favoredNodes, ","), randomRacks, skipRackSet, uniqueRackList, rackToRegionServerMap, regionServerToRackMap)); } - throw new IOException(" Unable to generate additional favored nodes for " - + StringUtils.join(favoredNodes, ",")); + throw new IOException( + " Unable to generate additional favored nodes for " + StringUtils.join(favoredNodes, ",")); } return newServer; } /* - * Generate favored nodes for a region. - * - * Choose a random server as primary and then choose secondary and tertiary FN so its spread - * across two racks. + * Generate favored nodes for a region. Choose a random server as primary and then choose + * secondary and tertiary FN so its spread across two racks. */ public List generateFavoredNodes(RegionInfo hri) throws IOException { @@ -755,7 +742,7 @@ public class FavoredNodeAssignmentHelper { Map primaryRSMap = new HashMap<>(1); primaryRSMap.put(hri, primary); Map secondaryAndTertiaryRSMap = - placeSecondaryAndTertiaryRS(primaryRSMap); + placeSecondaryAndTertiaryRS(primaryRSMap); ServerName[] secondaryAndTertiaryNodes = secondaryAndTertiaryRSMap.get(hri); if (secondaryAndTertiaryNodes != null && secondaryAndTertiaryNodes.length == 2) { for (ServerName sn : secondaryAndTertiaryNodes) { @@ -768,8 +755,7 @@ public class FavoredNodeAssignmentHelper { } public Map> generateFavoredNodesRoundRobin( - Map> assignmentMap, List regions) - throws IOException { + Map> assignmentMap, List regions) throws IOException { if (regions.size() > 0) { if (canPlaceFavoredNodes()) { @@ -788,8 +774,8 @@ public class FavoredNodeAssignmentHelper { /* * Generate favored nodes for a set of regions when we know where they are currently hosted. */ - private Map> generateFavoredNodes( - Map primaryRSMap) { + private Map> + generateFavoredNodes(Map primaryRSMap) { Map> generatedFavNodes = new HashMap<>(); Map secondaryAndTertiaryRSMap = @@ -799,16 +785,14 @@ public class FavoredNodeAssignmentHelper { List favoredNodesForRegion = new ArrayList<>(FAVORED_NODES_NUM); RegionInfo region = entry.getKey(); ServerName primarySN = entry.getValue(); - favoredNodesForRegion.add(ServerName.valueOf(primarySN.getHostname(), primarySN.getPort(), - NON_STARTCODE)); + favoredNodesForRegion + .add(ServerName.valueOf(primarySN.getHostname(), primarySN.getPort(), NON_STARTCODE)); ServerName[] secondaryAndTertiaryNodes = secondaryAndTertiaryRSMap.get(region); if (secondaryAndTertiaryNodes != null) { - favoredNodesForRegion.add(ServerName.valueOf( - secondaryAndTertiaryNodes[0].getHostname(), secondaryAndTertiaryNodes[0].getPort(), - NON_STARTCODE)); - favoredNodesForRegion.add(ServerName.valueOf( - secondaryAndTertiaryNodes[1].getHostname(), secondaryAndTertiaryNodes[1].getPort(), - NON_STARTCODE)); + favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[0].getHostname(), + secondaryAndTertiaryNodes[0].getPort(), NON_STARTCODE)); + favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[1].getHostname(), + secondaryAndTertiaryNodes[1].getPort(), NON_STARTCODE)); } generatedFavNodes.put(region, favoredNodesForRegion); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java index da7d219fd8c..38aa7669c00 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.favored; import static org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position.PRIMARY; @@ -49,18 +48,15 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /** - * An implementation of the {@link org.apache.hadoop.hbase.master.LoadBalancer} that - * assigns favored nodes for each region. There is a Primary RegionServer that hosts - * the region, and then there is Secondary and Tertiary RegionServers. Currently, the - * favored nodes information is used in creating HDFS files - the Primary RegionServer - * passes the primary, secondary, tertiary node addresses as hints to the - * DistributedFileSystem API for creating files on the filesystem. These nodes are - * treated as hints by the HDFS to place the blocks of the file. This alleviates the - * problem to do with reading from remote nodes (since we can make the Secondary - * RegionServer as the new Primary RegionServer) after a region is recovered. This - * should help provide consistent read latencies for the regions even when their - * primary region servers die. - * + * An implementation of the {@link org.apache.hadoop.hbase.master.LoadBalancer} that assigns favored + * nodes for each region. There is a Primary RegionServer that hosts the region, and then there is + * Secondary and Tertiary RegionServers. Currently, the favored nodes information is used in + * creating HDFS files - the Primary RegionServer passes the primary, secondary, tertiary node + * addresses as hints to the DistributedFileSystem API for creating files on the filesystem. These + * nodes are treated as hints by the HDFS to place the blocks of the file. This alleviates the + * problem to do with reading from remote nodes (since we can make the Secondary RegionServer as the + * new Primary RegionServer) after a region is recovered. This should help provide consistent read + * latencies for the regions even when their primary region servers die. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements FavoredNodesPromoter { @@ -76,7 +72,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored @Override protected List balanceTable(TableName tableName, - Map> loadOfOneTable) { + Map> loadOfOneTable) { // TODO. Look at is whether Stochastic loadbalancer can be integrated with this List plans = new ArrayList<>(); Map serverNameWithoutCodeToServerName = new HashMap<>(); @@ -105,16 +101,18 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(0)); if (destination == null) { // check whether the region is on secondary/tertiary - if (currentServerWithoutStartCode.equals(favoredNodes.get(1)) - || currentServerWithoutStartCode.equals(favoredNodes.get(2))) { + if ( + currentServerWithoutStartCode.equals(favoredNodes.get(1)) + || currentServerWithoutStartCode.equals(favoredNodes.get(2)) + ) { continue; } // the region is currently on none of the favored nodes // get it on one of them if possible ServerMetrics l1 = super.services.getServerManager() - .getLoad(serverNameWithoutCodeToServerName.get(favoredNodes.get(1))); + .getLoad(serverNameWithoutCodeToServerName.get(favoredNodes.get(1))); ServerMetrics l2 = super.services.getServerManager() - .getLoad(serverNameWithoutCodeToServerName.get(favoredNodes.get(2))); + .getLoad(serverNameWithoutCodeToServerName.get(favoredNodes.get(2))); if (l1 != null && l2 != null) { if (l1.getRegionMetrics().size() > l2.getRegionMetrics().size()) { destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(2)); @@ -140,43 +138,42 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored @Override @NonNull public Map> roundRobinAssignment(List regions, - List servers) throws HBaseIOException { + List servers) throws HBaseIOException { Map> assignmentMap; try { FavoredNodeAssignmentHelper assignmentHelper = - new FavoredNodeAssignmentHelper(servers, rackManager); + new FavoredNodeAssignmentHelper(servers, rackManager); assignmentHelper.initialize(); if (!assignmentHelper.canPlaceFavoredNodes()) { return super.roundRobinAssignment(regions, servers); } // Segregate the regions into two types: // 1. The regions that have favored node assignment, and where at least - // one of the favored node is still alive. In this case, try to adhere - // to the current favored nodes assignment as much as possible - i.e., - // if the current primary is gone, then make the secondary or tertiary - // as the new host for the region (based on their current load). - // Note that we don't change the favored - // node assignments here (even though one or more favored node is currently - // down). It is up to the balanceCluster to do this hard work. The HDFS - // can handle the fact that some nodes in the favored nodes hint is down - // It'd allocate some other DNs. In combination with stale settings for HDFS, - // we should be just fine. + // one of the favored node is still alive. In this case, try to adhere + // to the current favored nodes assignment as much as possible - i.e., + // if the current primary is gone, then make the secondary or tertiary + // as the new host for the region (based on their current load). + // Note that we don't change the favored + // node assignments here (even though one or more favored node is currently + // down). It is up to the balanceCluster to do this hard work. The HDFS + // can handle the fact that some nodes in the favored nodes hint is down + // It'd allocate some other DNs. In combination with stale settings for HDFS, + // we should be just fine. // 2. The regions that currently don't have favored node assignment. We will - // need to come up with favored nodes assignments for them. The corner case - // in (1) above is that all the nodes are unavailable and in that case, we - // will note that this region doesn't have favored nodes. - Pair>, List> segregatedRegions = - segregateRegionsAndAssignRegionsWithFavoredNodes(regions, servers); - Map> regionsWithFavoredNodesMap = segregatedRegions.getFirst(); + // need to come up with favored nodes assignments for them. The corner case + // in (1) above is that all the nodes are unavailable and in that case, we + // will note that this region doesn't have favored nodes. + Pair>, List> segregatedRegions = + segregateRegionsAndAssignRegionsWithFavoredNodes(regions, servers); + Map> regionsWithFavoredNodesMap = segregatedRegions.getFirst(); List regionsWithNoFavoredNodes = segregatedRegions.getSecond(); assignmentMap = new HashMap<>(); - roundRobinAssignmentImpl(assignmentHelper, assignmentMap, regionsWithNoFavoredNodes, - servers); + roundRobinAssignmentImpl(assignmentHelper, assignmentMap, regionsWithNoFavoredNodes, servers); // merge the assignment maps assignmentMap.putAll(regionsWithFavoredNodesMap); } catch (Exception ex) { - LOG.warn("Encountered exception while doing favored-nodes assignment " + ex + - " Falling back to regular assignment"); + LOG.warn("Encountered exception while doing favored-nodes assignment " + ex + + " Falling back to regular assignment"); assignmentMap = super.roundRobinAssignment(regions, servers); } return assignmentMap; @@ -184,14 +181,16 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored @Override public ServerName randomAssignment(RegionInfo regionInfo, List servers) - throws HBaseIOException { + throws HBaseIOException { try { FavoredNodeAssignmentHelper assignmentHelper = - new FavoredNodeAssignmentHelper(servers, rackManager); + new FavoredNodeAssignmentHelper(servers, rackManager); assignmentHelper.initialize(); ServerName primary = super.randomAssignment(regionInfo, servers); - if (!FavoredNodesManager.isFavoredNodeApplicable(regionInfo) - || !assignmentHelper.canPlaceFavoredNodes()) { + if ( + !FavoredNodesManager.isFavoredNodeApplicable(regionInfo) + || !assignmentHelper.canPlaceFavoredNodes() + ) { return primary; } List favoredNodes = fnm.getFavoredNodes(regionInfo); @@ -213,17 +212,18 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored assignSecondaryAndTertiaryNodesForRegion(assignmentHelper, regions, primaryRSMap); return primary; } catch (Exception ex) { - LOG.warn("Encountered exception while doing favored-nodes (random)assignment " + ex + - " Falling back to regular assignment"); + LOG.warn("Encountered exception while doing favored-nodes (random)assignment " + ex + + " Falling back to regular assignment"); return super.randomAssignment(regionInfo, servers); } } private Pair>, List> - segregateRegionsAndAssignRegionsWithFavoredNodes(List regions, + segregateRegionsAndAssignRegionsWithFavoredNodes(List regions, List availableServers) { - Map> assignmentMapForFavoredNodes = new HashMap<>(regions.size() / 2); - List regionsWithNoFavoredNodes = new ArrayList<>(regions.size()/2); + Map> assignmentMapForFavoredNodes = + new HashMap<>(regions.size() / 2); + List regionsWithNoFavoredNodes = new ArrayList<>(regions.size() / 2); for (RegionInfo region : regions) { List favoredNodes = fnm.getFavoredNodes(region); ServerName primaryHost = null; @@ -234,7 +234,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored ServerName serverWithLegitStartCode = availableServersContains(availableServers, s); if (serverWithLegitStartCode != null) { FavoredNodesPlan.Position position = - FavoredNodesPlan.getFavoredServerPosition(favoredNodes, s); + FavoredNodesPlan.getFavoredServerPosition(favoredNodes, s); if (Position.PRIMARY.equals(position)) { primaryHost = serverWithLegitStartCode; } else if (Position.SECONDARY.equals(position)) { @@ -244,11 +244,11 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored } } } - assignRegionToAvailableFavoredNode(assignmentMapForFavoredNodes, region, - primaryHost, secondaryHost, tertiaryHost); + assignRegionToAvailableFavoredNode(assignmentMapForFavoredNodes, region, primaryHost, + secondaryHost, tertiaryHost); } if (primaryHost == null && secondaryHost == null && tertiaryHost == null) { - //all favored nodes unavailable + // all favored nodes unavailable regionsWithNoFavoredNodes.add(region); } } @@ -267,9 +267,9 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored return null; } - private void assignRegionToAvailableFavoredNode(Map> assignmentMapForFavoredNodes, RegionInfo region, ServerName primaryHost, - ServerName secondaryHost, ServerName tertiaryHost) { + private void assignRegionToAvailableFavoredNode( + Map> assignmentMapForFavoredNodes, RegionInfo region, + ServerName primaryHost, ServerName secondaryHost, ServerName tertiaryHost) { if (primaryHost != null) { addRegionToMap(assignmentMapForFavoredNodes, region, primaryHost); } else if (secondaryHost != null && tertiaryHost != null) { @@ -292,7 +292,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored } private void addRegionToMap(Map> assignmentMapForFavoredNodes, - RegionInfo region, ServerName host) { + RegionInfo region, ServerName host) { List regionsOnServer = null; if ((regionsOnServer = assignmentMapForFavoredNodes.get(host)) == null) { regionsOnServer = new ArrayList<>(); @@ -306,8 +306,8 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored } private void roundRobinAssignmentImpl(FavoredNodeAssignmentHelper assignmentHelper, - Map> assignmentMap, - List regions, List servers) throws IOException { + Map> assignmentMap, List regions, + List servers) throws IOException { Map primaryRSMap = new HashMap<>(); // figure the primary RSs assignmentHelper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); @@ -315,11 +315,11 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored } private void assignSecondaryAndTertiaryNodesForRegion( - FavoredNodeAssignmentHelper assignmentHelper, - List regions, Map primaryRSMap) throws IOException { + FavoredNodeAssignmentHelper assignmentHelper, List regions, + Map primaryRSMap) throws IOException { // figure the secondary and tertiary RSs Map secondaryAndTertiaryRSMap = - assignmentHelper.placeSecondaryAndTertiaryRS(primaryRSMap); + assignmentHelper.placeSecondaryAndTertiaryRS(primaryRSMap); Map> regionFNMap = Maps.newHashMap(); // now record all the assignments so that we can serve queries later @@ -328,14 +328,14 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored // We don't care about the startcode; but only the hostname really List favoredNodesForRegion = new ArrayList<>(3); ServerName sn = primaryRSMap.get(region); - favoredNodesForRegion.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), - ServerName.NON_STARTCODE)); + favoredNodesForRegion + .add(ServerName.valueOf(sn.getHostname(), sn.getPort(), ServerName.NON_STARTCODE)); ServerName[] secondaryAndTertiaryNodes = secondaryAndTertiaryRSMap.get(region); if (secondaryAndTertiaryNodes != null) { favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[0].getHostname(), - secondaryAndTertiaryNodes[0].getPort(), ServerName.NON_STARTCODE)); + secondaryAndTertiaryNodes[0].getPort(), ServerName.NON_STARTCODE)); favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[1].getHostname(), - secondaryAndTertiaryNodes[1].getPort(), ServerName.NON_STARTCODE)); + secondaryAndTertiaryNodes[1].getPort(), ServerName.NON_STARTCODE)); } regionFNMap.put(region, favoredNodesForRegion); } @@ -343,18 +343,16 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored } /* - * Generate Favored Nodes for daughters during region split. - * - * If the parent does not have FN, regenerates them for the daughters. - * - * If the parent has FN, inherit two FN from parent for each daughter and generate the remaining. - * The primary FN for both the daughters should be the same as parent. Inherit the secondary - * FN from the parent but keep it different for each daughter. Choose the remaining FN - * randomly. This would give us better distribution over a period of time after enough splits. + * Generate Favored Nodes for daughters during region split. If the parent does not have FN, + * regenerates them for the daughters. If the parent has FN, inherit two FN from parent for each + * daughter and generate the remaining. The primary FN for both the daughters should be the same + * as parent. Inherit the secondary FN from the parent but keep it different for each daughter. + * Choose the remaining FN randomly. This would give us better distribution over a period of time + * after enough splits. */ @Override public void generateFavoredNodesForDaughter(List servers, RegionInfo parent, - RegionInfo regionA, RegionInfo regionB) throws IOException { + RegionInfo regionA, RegionInfo regionB) throws IOException { Map> result = new HashMap<>(); FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); @@ -363,7 +361,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored List parentFavoredNodes = getFavoredNodes(parent); if (parentFavoredNodes == null) { LOG.debug("Unable to find favored nodes for parent, " + parent - + " generating new favored nodes for daughter"); + + " generating new favored nodes for daughter"); result.put(regionA, helper.generateFavoredNodes(regionA)); result.put(regionB, helper.generateFavoredNodes(regionB)); @@ -371,12 +369,12 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored // Lets get the primary and secondary from parent for regionA Set regionAFN = - getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, SECONDARY); + getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, SECONDARY); result.put(regionA, Lists.newArrayList(regionAFN)); // Lets get the primary and tertiary from parent for regionB Set regionBFN = - getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, TERTIARY); + getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, TERTIARY); result.put(regionB, Lists.newArrayList(regionBFN)); } @@ -384,8 +382,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored } private Set getInheritedFNForDaughter(FavoredNodeAssignmentHelper helper, - List parentFavoredNodes, Position primary, Position secondary) - throws IOException { + List parentFavoredNodes, Position primary, Position secondary) throws IOException { Set daughterFN = Sets.newLinkedHashSet(); if (parentFavoredNodes.size() >= primary.ordinal()) { @@ -404,12 +401,12 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored } /* - * Generate favored nodes for a region during merge. Choose the FN from one of the sources to - * keep it simple. + * Generate favored nodes for a region during merge. Choose the FN from one of the sources to keep + * it simple. */ @Override - public void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo [] mergeParents) - throws IOException { + public void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo[] mergeParents) + throws IOException { Map> regionFNMap = Maps.newHashMap(); regionFNMap.put(merged, getFavoredNodes(mergeParents[0])); fnm.updateFavoredNodes(regionFNMap); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesManager.java index 16f0934a291..a4c5120c008 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesManager.java @@ -49,13 +49,13 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /** - * FavoredNodesManager is responsible for maintaining favored nodes info in internal cache and - * META table. Its the centralized store for all favored nodes information. All reads and updates - * should be done through this class. There should only be one instance of - * {@link FavoredNodesManager} in Master. {@link FavoredNodesPlan} and favored node information - * from {@link SnapshotOfRegionAssignmentFromMeta} should not be used outside this class (except - * for tools that only read or fortest cases). All other classes including Favored balancers - * and {@link FavoredNodeAssignmentHelper} should use {@link FavoredNodesManager} for any + * FavoredNodesManager is responsible for maintaining favored nodes info in internal cache and META + * table. Its the centralized store for all favored nodes information. All reads and updates should + * be done through this class. There should only be one instance of {@link FavoredNodesManager} in + * Master. {@link FavoredNodesPlan} and favored node information from + * {@link SnapshotOfRegionAssignmentFromMeta} should not be used outside this class (except for + * tools that only read or fortest cases). All other classes including Favored balancers and + * {@link FavoredNodeAssignmentHelper} should use {@link FavoredNodesManager} for any * read/write/deletes to favored nodes. */ @InterfaceAudience.Private @@ -87,13 +87,13 @@ public class FavoredNodesManager { public synchronized void initialize(SnapshotOfRegionAssignmentFromMeta snapshot) { // Add snapshot to structures made on creation. Current structures may have picked // up data between construction and the scan of meta needed before this method - // is called. See HBASE-23737 "[Flakey Tests] TestFavoredNodeTableImport fails 30% of the time" - this.globalFavoredNodesAssignmentPlan. - updateFavoredNodesMap(snapshot.getExistingAssignmentPlan()); + // is called. See HBASE-23737 "[Flakey Tests] TestFavoredNodeTableImport fails 30% of the time" + this.globalFavoredNodesAssignmentPlan + .updateFavoredNodesMap(snapshot.getExistingAssignmentPlan()); primaryRSToRegionMap.putAll(snapshot.getPrimaryToRegionInfoMap()); secondaryRSToRegionMap.putAll(snapshot.getSecondaryToRegionInfoMap()); teritiaryRSToRegionMap.putAll(snapshot.getTertiaryToRegionInfoMap()); - datanodeDataTransferPort= getDataNodePort(); + datanodeDataTransferPort = getDataNodePort(); } public int getDataNodePort() { @@ -101,9 +101,8 @@ public class FavoredNodesManager { Configuration dnConf = new HdfsConfiguration(masterServices.getConfiguration()); - int dnPort = NetUtils.createSocketAddr( - dnConf.get(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, - DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort(); + int dnPort = NetUtils.createSocketAddr(dnConf.get(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, + DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort(); LOG.debug("Loaded default datanode port for FN: " + datanodeDataTransferPort); return dnPort; } @@ -113,8 +112,8 @@ public class FavoredNodesManager { } /* - * Favored nodes are not applicable for system tables. We will use this to check before - * we apply any favored nodes logic on a region. + * Favored nodes are not applicable for system tables. We will use this to check before we apply + * any favored nodes logic on a region. */ public static boolean isFavoredNodeApplicable(RegionInfo regionInfo) { return !regionInfo.getTable().isSystemTable(); @@ -129,9 +128,9 @@ public class FavoredNodesManager { } /* - * This should only be used when sending FN information to the region servers. Instead of - * sending the region server port, we use the datanode port. This helps in centralizing the DN - * port logic in Master. The RS uses the port from the favored node list as hints. + * This should only be used when sending FN information to the region servers. Instead of sending + * the region server port, we use the datanode port. This helps in centralizing the DN port logic + * in Master. The RS uses the port from the favored node list as hints. */ public synchronized List getFavoredNodesWithDNPort(RegionInfo regionInfo) { if (getFavoredNodes(regionInfo) == null) { @@ -140,14 +139,14 @@ public class FavoredNodesManager { List fnWithDNPort = Lists.newArrayList(); for (ServerName sn : getFavoredNodes(regionInfo)) { - fnWithDNPort.add(ServerName.valueOf(sn.getHostname(), datanodeDataTransferPort, - NON_STARTCODE)); + fnWithDNPort + .add(ServerName.valueOf(sn.getHostname(), datanodeDataTransferPort, NON_STARTCODE)); } return fnWithDNPort; } public synchronized void updateFavoredNodes(Map> regionFNMap) - throws IOException { + throws IOException { Map> regionToFavoredNodes = new HashMap<>(); for (Map.Entry> entry : regionFNMap.entrySet()) { @@ -164,13 +163,13 @@ public class FavoredNodesManager { if (!isFavoredNodeApplicable(regionInfo)) { throw new IOException("Can't update FN for a un-applicable region: " - + regionInfo.getRegionNameAsString() + " with " + servers); + + regionInfo.getRegionNameAsString() + " with " + servers); } if (servers.size() != FAVORED_NODES_NUM) { - throw new IOException("At least " + FAVORED_NODES_NUM - + " favored nodes should be present for region : " + regionInfo.getEncodedName() - + " current FN servers:" + servers); + throw new IOException( + "At least " + FAVORED_NODES_NUM + " favored nodes should be present for region : " + + regionInfo.getEncodedName() + " current FN servers:" + servers); } List serversWithNoStartCodes = Lists.newArrayList(); @@ -178,8 +177,8 @@ public class FavoredNodesManager { if (sn.getStartcode() == NON_STARTCODE) { serversWithNoStartCodes.add(sn); } else { - serversWithNoStartCodes.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), - NON_STARTCODE)); + serversWithNoStartCodes + .add(ServerName.valueOf(sn.getHostname(), sn.getPort(), NON_STARTCODE)); } } regionToFavoredNodes.put(regionInfo, serversWithNoStartCodes); @@ -187,7 +186,7 @@ public class FavoredNodesManager { // Lets do a bulk update to meta since that reduces the RPC's FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes, - masterServices.getConnection()); + masterServices.getConnection()); deleteFavoredNodesForRegions(regionToFavoredNodes.keySet()); for (Map.Entry> entry : regionToFavoredNodes.entrySet()) { @@ -208,8 +207,7 @@ public class FavoredNodesManager { regionList.add(hri); primaryRSToRegionMap.put(serverToUse, regionList); - serverToUse = ServerName - .valueOf(servers.get(SECONDARY.ordinal()).getAddress(), NON_STARTCODE); + serverToUse = ServerName.valueOf(servers.get(SECONDARY.ordinal()).getAddress(), NON_STARTCODE); regionList = secondaryRSToRegionMap.get(serverToUse); if (regionList == null) { regionList = new ArrayList<>(); @@ -217,8 +215,7 @@ public class FavoredNodesManager { regionList.add(hri); secondaryRSToRegionMap.put(serverToUse, regionList); - serverToUse = ServerName.valueOf(servers.get(TERTIARY.ordinal()).getAddress(), - NON_STARTCODE); + serverToUse = ServerName.valueOf(servers.get(TERTIARY.ordinal()).getAddress(), NON_STARTCODE); regionList = teritiaryRSToRegionMap.get(serverToUse); if (regionList == null) { regionList = new ArrayList<>(); @@ -228,12 +225,11 @@ public class FavoredNodesManager { } /* - * Get the replica count for the servers provided. - * - * For each server, replica count includes three counts for primary, secondary and tertiary. - * If a server is the primary favored node for 10 regions, secondary for 5 and tertiary - * for 1, then the list would be [10, 5, 1]. If the server is newly added to the cluster is - * not a favored node for any region, the replica count would be [0, 0, 0]. + * Get the replica count for the servers provided. For each server, replica count includes three + * counts for primary, secondary and tertiary. If a server is the primary favored node for 10 + * regions, secondary for 5 and tertiary for 1, then the list would be [10, 5, 1]. If the server + * is newly added to the cluster is not a favored node for any region, the replica count would be + * [0, 0, 0]. */ public synchronized Map> getReplicaLoad(List servers) { Map> result = Maps.newHashMap(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java index a7667c328a1..da58fc9d358 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,11 +27,9 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.yetus.audience.InterfaceAudience; /** - * This class contains the mapping information between each region name and - * its favored region server list. Used by {@link FavoredNodeLoadBalancer} set - * of classes and from unit tests (hence the class is public) - * - * All the access to this class is thread-safe. + * This class contains the mapping information between each region name and its favored region + * server list. Used by {@link FavoredNodeLoadBalancer} set of classes and from unit tests (hence + * the class is public) All the access to this class is thread-safe. */ @InterfaceAudience.Private public class FavoredNodesPlan { @@ -82,18 +79,19 @@ public class FavoredNodesPlan { } /** - * Return the position of the server in the favoredNodes list. Assumes the - * favoredNodes list is of size 3. - * @return position + * Return the position of the server in the favoredNodes list. Assumes the favoredNodes list is of + * size 3. n */ - public static Position getFavoredServerPosition( - List favoredNodes, ServerName server) { - if (favoredNodes == null || server == null || - favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { + public static Position getFavoredServerPosition(List favoredNodes, + ServerName server) { + if ( + favoredNodes == null || server == null + || favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM + ) { return null; } for (Position p : Position.values()) { - if (ServerName.isSameAddress(favoredNodes.get(p.ordinal()),server)) { + if (ServerName.isSameAddress(favoredNodes.get(p.ordinal()), server)) { return p; } } @@ -105,8 +103,8 @@ public class FavoredNodesPlan { */ public Map> getAssignmentMap() { // Make a deep copy so changes don't harm our copy of favoredNodesMap. - return this.favoredNodesMap.entrySet().stream(). - collect(Collectors.toMap(k -> k.getKey(), v -> new ArrayList(v.getValue()))); + return this.favoredNodesMap.entrySet().stream() + .collect(Collectors.toMap(k -> k.getKey(), v -> new ArrayList(v.getValue()))); } public int size() { @@ -125,7 +123,7 @@ public class FavoredNodesPlan { return false; } // To compare the map from object o is identical to current assignment map. - Map> comparedMap = ((FavoredNodesPlan)o).favoredNodesMap; + Map> comparedMap = ((FavoredNodesPlan) o).favoredNodesMap; // compare the size if (comparedMap.size() != this.favoredNodesMap.size()) { @@ -133,8 +131,7 @@ public class FavoredNodesPlan { } // compare each element in the assignment map - for (Map.Entry> entry : - comparedMap.entrySet()) { + for (Map.Entry> entry : comparedMap.entrySet()) { List serverList = this.favoredNodesMap.get(entry.getKey()); if (serverList == null && entry.getValue() != null) { return false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPromoter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPromoter.java index 322eb1df0d2..1c7665ceed0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPromoter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPromoter.java @@ -19,10 +19,9 @@ package org.apache.hadoop.hbase.favored; import java.io.IOException; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public interface FavoredNodesPromoter { @@ -30,9 +29,9 @@ public interface FavoredNodesPromoter { /* Try and assign regions even if favored nodes are dead */ String FAVORED_ALWAYS_ASSIGN_REGIONS = "hbase.favored.assignment.always.assign"; - void generateFavoredNodesForDaughter(List servers, - RegionInfo parent, RegionInfo hriA, RegionInfo hriB) throws IOException; + void generateFavoredNodesForDaughter(List servers, RegionInfo parent, RegionInfo hriA, + RegionInfo hriB) throws IOException; - void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo [] mergeParents) - throws IOException; + void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo[] mergeParents) + throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/StartcodeAgnosticServerName.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/StartcodeAgnosticServerName.java index 2a7600079d7..a419fedd8bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/StartcodeAgnosticServerName.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/StartcodeAgnosticServerName.java @@ -17,16 +17,17 @@ */ package org.apache.hadoop.hbase.favored; -import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Addressing; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort; /** - * This class differs from ServerName in that start code is always ignored. This is because - * start code, ServerName.NON_STARTCODE is used to persist favored nodes and keeping this separate - * from {@link ServerName} is much cleaner. This should only be used by Favored node specific - * classes and should not be used outside favored nodes. + * This class differs from ServerName in that start code is always ignored. This is because start + * code, ServerName.NON_STARTCODE is used to persist favored nodes and keeping this separate from + * {@link ServerName} is much cleaner. This should only be used by Favored node specific classes and + * should not be used outside favored nodes. */ @InterfaceAudience.Private class StartcodeAgnosticServerName extends ServerName { @@ -37,17 +38,18 @@ class StartcodeAgnosticServerName extends ServerName { public static StartcodeAgnosticServerName valueOf(final ServerName serverName) { return new StartcodeAgnosticServerName(serverName.getHostname(), serverName.getPort(), - serverName.getStartcode()); + serverName.getStartcode()); } public static StartcodeAgnosticServerName valueOf(final String hostnameAndPort, long startcode) { return new StartcodeAgnosticServerName(Addressing.parseHostname(hostnameAndPort), - Addressing.parsePort(hostnameAndPort), startcode); + Addressing.parsePort(hostnameAndPort), startcode); } - public static StartcodeAgnosticServerName valueOf(final HostAndPort hostnameAndPort, long startcode) { - return new StartcodeAgnosticServerName(hostnameAndPort.getHost(), - hostnameAndPort.getPort(), startcode); + public static StartcodeAgnosticServerName valueOf(final HostAndPort hostnameAndPort, + long startcode) { + return new StartcodeAgnosticServerName(hostnameAndPort.getHost(), hostnameAndPort.getPort(), + startcode); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java index 9bc072a048e..f2807ea023b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java @@ -1,6 +1,4 @@ /* - * Copyright The Apache Software Foundation - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,26 +19,25 @@ package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * This is a Filter wrapper class which is used in the server side. Some filter - * related hooks can be defined in this wrapper. The only way to create a - * FilterWrapper instance is passing a client side Filter instance through - * {@link org.apache.hadoop.hbase.client.Scan#getFilter()}. - * + * This is a Filter wrapper class which is used in the server side. Some filter related hooks can be + * defined in this wrapper. The only way to create a FilterWrapper instance is passing a client side + * Filter instance through {@link org.apache.hadoop.hbase.client.Scan#getFilter()}. */ @InterfaceAudience.Private final public class FilterWrapper extends Filter { Filter filter = null; - public FilterWrapper( Filter filter ) { + public FilterWrapper(Filter filter) { if (null == filter) { // ensure the filter instance is not null throw new NullPointerException("Cannot create FilterWrapper with null Filter"); @@ -53,8 +50,7 @@ final public class FilterWrapper extends Filter { */ @Override public byte[] toByteArray() throws IOException { - FilterProtos.FilterWrapper.Builder builder = - FilterProtos.FilterWrapper.newBuilder(); + FilterProtos.FilterWrapper.Builder builder = FilterProtos.FilterWrapper.newBuilder(); builder.setFilter(ProtobufUtil.toFilter(this.filter)); return builder.build().toByteArray(); } @@ -65,8 +61,7 @@ final public class FilterWrapper extends Filter { * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray */ - public static FilterWrapper parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static FilterWrapper parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.FilterWrapper proto; try { proto = FilterProtos.FilterWrapper.parseFrom(pbBytes); @@ -141,16 +136,17 @@ final public class FilterWrapper extends Filter { public enum FilterRowRetCode { NOT_CALLED, - INCLUDE, // corresponds to filter.filterRow() returning false - EXCLUDE, // corresponds to filter.filterRow() returning true - INCLUDE_THIS_FAMILY // exclude other families + INCLUDE, // corresponds to filter.filterRow() returning false + EXCLUDE, // corresponds to filter.filterRow() returning true + INCLUDE_THIS_FAMILY // exclude other families } + public FilterRowRetCode filterRowCellsWithRet(List kvs) throws IOException { - //To fix HBASE-6429, - //Filter with filterRow() returning true is incompatible with scan with limit - //1. hasFilterRow() returns true, if either filterRow() or filterRow(kvs) is implemented. - //2. filterRow() is merged with filterRow(kvs), - //so that to make all those row related filtering stuff in the same function. + // To fix HBASE-6429, + // Filter with filterRow() returning true is incompatible with scan with limit + // 1. hasFilterRow() returns true, if either filterRow() or filterRow(kvs) is implemented. + // 2. filterRow() is merged with filterRow(kvs), + // so that to make all those row related filtering stuff in the same function. this.filter.filterRowCells(kvs); if (!kvs.isEmpty()) { if (this.filter.filterRow()) { @@ -169,15 +165,15 @@ final public class FilterWrapper extends Filter { /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof FilterWrapper)) return false; - FilterWrapper other = (FilterWrapper)o; + FilterWrapper other = (FilterWrapper) o; return this.filter.areSerializedFieldsEqual(other.filter); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java index 17b04ab1a0b..03df1e94229 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java @@ -1,6 +1,4 @@ /* - * Copyright The Apache Software Foundation - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.fs; import edu.umd.cs.findbugs.annotations.Nullable; @@ -57,27 +54,24 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * An encapsulation for the FileSystem object that hbase uses to access - * data. This class allows the flexibility of using - * separate filesystem objects for reading and writing hfiles and wals. + * An encapsulation for the FileSystem object that hbase uses to access data. This class allows the + * flexibility of using separate filesystem objects for reading and writing hfiles and wals. */ @InterfaceAudience.Private public class HFileSystem extends FilterFileSystem { public static final Logger LOG = LoggerFactory.getLogger(HFileSystem.class); - private final FileSystem noChecksumFs; // read hfile data from storage + private final FileSystem noChecksumFs; // read hfile data from storage private final boolean useHBaseChecksum; private static volatile byte unspecifiedStoragePolicyId = Byte.MIN_VALUE; /** * Create a FileSystem object for HBase regionservers. - * @param conf The configuration to be used for the filesystem - * @param useHBaseChecksum if true, then use - * checksum verfication in hbase, otherwise - * delegate checksum verification to the FileSystem. + * @param conf The configuration to be used for the filesystem + * @param useHBaseChecksum if true, then use checksum verfication in hbase, otherwise delegate + * checksum verification to the FileSystem. */ - public HFileSystem(Configuration conf, boolean useHBaseChecksum) - throws IOException { + public HFileSystem(Configuration conf, boolean useHBaseChecksum) throws IOException { // Create the default filesystem with checksum verification switched on. // By default, any operation to this FilterFileSystem occurs on @@ -120,9 +114,8 @@ public class HFileSystem extends FilterFileSystem { } /** - * Wrap a FileSystem object within a HFileSystem. The noChecksumFs and - * writefs are both set to be the same specified fs. - * Do not verify hbase-checksums while reading data from filesystem. + * Wrap a FileSystem object within a HFileSystem. The noChecksumFs and writefs are both set to be + * the same specified fs. Do not verify hbase-checksums while reading data from filesystem. * @param fs Set the noChecksumFs and writeFs to this specified filesystem. */ public HFileSystem(FileSystem fs) { @@ -132,11 +125,9 @@ public class HFileSystem extends FilterFileSystem { } /** - * Returns the filesystem that is specially setup for - * doing reads from storage. This object avoids doing - * checksum verifications for reads. - * @return The FileSystem object that can be used to read data - * from files. + * Returns the filesystem that is specially setup for doing reads from storage. This object avoids + * doing checksum verifications for reads. + * @return The FileSystem object that can be used to read data from files. */ public FileSystem getNoChecksumFs() { return noChecksumFs; @@ -152,10 +143,10 @@ public class HFileSystem extends FilterFileSystem { /** * Set the source path (directory/file) to the specified storage policy. - * @param path The source path (directory/file). - * @param policyName The name of the storage policy: 'HOT', 'COLD', etc. - * See see hadoop 2.6+ org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g - * 'COLD', 'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'. + * @param path The source path (directory/file). + * @param policyName The name of the storage policy: 'HOT', 'COLD', etc. See see hadoop 2.6+ + * org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g 'COLD', + * 'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'. */ public void setStoragePolicy(Path path, String policyName) { CommonFSUtils.setStoragePolicy(this.fs, path, policyName); @@ -171,7 +162,7 @@ public class HFileSystem extends FilterFileSystem { public String getStoragePolicyName(Path path) { try { Object blockStoragePolicySpi = - ReflectionUtils.invokeMethod(this.fs, "getStoragePolicy", path); + ReflectionUtils.invokeMethod(this.fs, "getStoragePolicy", path); return (String) ReflectionUtils.invokeMethod(blockStoragePolicySpi, "getName"); } catch (Exception e) { // Maybe fail because of using old HDFS version, try the old way @@ -221,8 +212,7 @@ public class HFileSystem extends FilterFileSystem { /** * Are we verifying checksums in HBase? - * @return True, if hbase is configured to verify checksums, - * otherwise false. + * @return True, if hbase is configured to verify checksums, otherwise false. */ public boolean useHBaseChecksum() { return useHBaseChecksum; @@ -240,10 +230,8 @@ public class HFileSystem extends FilterFileSystem { } /** - * Returns a brand new instance of the FileSystem. It does not use - * the FileSystem.Cache. In newer versions of HDFS, we can directly - * invoke FileSystem.newInstance(Configuration). - * + * Returns a brand new instance of the FileSystem. It does not use the FileSystem.Cache. In newer + * versions of HDFS, we can directly invoke FileSystem.newInstance(Configuration). * @param conf Configuration * @return A new instance of the filesystem */ @@ -271,9 +259,9 @@ public class HFileSystem extends FilterFileSystem { } /** - * Returns an instance of Filesystem wrapped into the class specified in - * hbase.fs.wrapper property, if one is set in the configuration, returns - * unmodified FS instance passed in as an argument otherwise. + * Returns an instance of Filesystem wrapped into the class specified in hbase.fs.wrapper + * property, if one is set in the configuration, returns unmodified FS instance passed in as an + * argument otherwise. * @param base Filesystem instance to wrap * @param conf Configuration * @return wrapped instance of FS, or the same instance if no wrapping configured. @@ -296,15 +284,14 @@ public class HFileSystem extends FilterFileSystem { } /** - * Add an interceptor on the calls to the namenode#getBlockLocations from the DFSClient - * linked to this FileSystem. See HBASE-6435 for the background. + * Add an interceptor on the calls to the namenode#getBlockLocations from the DFSClient linked to + * this FileSystem. See HBASE-6435 for the background. *

        * There should be no reason, except testing, to create a specific ReorderBlocks. - * * @return true if the interceptor was added, false otherwise. */ static boolean addLocationsOrderInterceptor(Configuration conf, final ReorderBlocks lrb) { - if (!conf.getBoolean("hbase.filesystem.reorder.blocks", true)) { // activated by default + if (!conf.getBoolean("hbase.filesystem.reorder.blocks", true)) { // activated by default LOG.debug("addLocationsOrderInterceptor configured to false"); return false; } @@ -318,17 +305,16 @@ public class HFileSystem extends FilterFileSystem { } if (!(fs instanceof DistributedFileSystem)) { - LOG.debug("The file system is not a DistributedFileSystem. " + - "Skipping on block location reordering"); + LOG.debug("The file system is not a DistributedFileSystem. " + + "Skipping on block location reordering"); return false; } DistributedFileSystem dfs = (DistributedFileSystem) fs; DFSClient dfsc = dfs.getClient(); if (dfsc == null) { - LOG.warn("The DistributedFileSystem does not contain a DFSClient. Can't add the location " + - "block reordering interceptor. Continuing, but this is unexpected." - ); + LOG.warn("The DistributedFileSystem does not contain a DFSClient. Can't add the location " + + "block reordering interceptor. Continuing, but this is unexpected."); return false; } @@ -341,16 +327,15 @@ public class HFileSystem extends FilterFileSystem { ClientProtocol namenode = (ClientProtocol) nf.get(dfsc); if (namenode == null) { - LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block" + - " reordering interceptor. Continuing, but this is unexpected." - ); + LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block" + + " reordering interceptor. Continuing, but this is unexpected."); return false; } ClientProtocol cp1 = createReorderingProxy(namenode, lrb, conf); nf.set(dfsc, cp1); - LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering" + - " using class " + lrb.getClass().getName()); + LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering" + + " using class " + lrb.getClass().getName()); } catch (NoSuchFieldException e) { LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e); return false; @@ -363,44 +348,44 @@ public class HFileSystem extends FilterFileSystem { } private static ClientProtocol createReorderingProxy(final ClientProtocol cp, - final ReorderBlocks lrb, final Configuration conf) { + final ReorderBlocks lrb, final Configuration conf) { return (ClientProtocol) Proxy.newProxyInstance(cp.getClass().getClassLoader(), - new Class[]{ClientProtocol.class, Closeable.class}, new InvocationHandler() { - @Override - public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { - try { - if ((args == null || args.length == 0) && "close".equals(method.getName())) { - RPC.stopProxy(cp); - return null; - } else { - Object res = method.invoke(cp, args); - if (res != null && args != null && args.length == 3 - && "getBlockLocations".equals(method.getName()) - && res instanceof LocatedBlocks - && args[0] instanceof String - && args[0] != null) { - lrb.reorderBlocks(conf, (LocatedBlocks) res, (String) args[0]); - } - return res; + new Class[] { ClientProtocol.class, Closeable.class }, new InvocationHandler() { + @Override + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + try { + if ((args == null || args.length == 0) && "close".equals(method.getName())) { + RPC.stopProxy(cp); + return null; + } else { + Object res = method.invoke(cp, args); + if ( + res != null && args != null && args.length == 3 + && "getBlockLocations".equals(method.getName()) && res instanceof LocatedBlocks + && args[0] instanceof String && args[0] != null + ) { + lrb.reorderBlocks(conf, (LocatedBlocks) res, (String) args[0]); } - } catch (InvocationTargetException ite) { - // We will have this for all the exception, checked on not, sent - // by any layer, including the functional exception - Throwable cause = ite.getCause(); - if (cause == null){ - throw new RuntimeException("Proxy invocation failed and getCause is null", ite); - } - if (cause instanceof UndeclaredThrowableException) { - Throwable causeCause = cause.getCause(); - if (causeCause == null) { - throw new RuntimeException("UndeclaredThrowableException had null cause!"); - } - cause = cause.getCause(); - } - throw cause; + return res; } + } catch (InvocationTargetException ite) { + // We will have this for all the exception, checked on not, sent + // by any layer, including the functional exception + Throwable cause = ite.getCause(); + if (cause == null) { + throw new RuntimeException("Proxy invocation failed and getCause is null", ite); + } + if (cause instanceof UndeclaredThrowableException) { + Throwable causeCause = cause.getCause(); + if (causeCause == null) { + throw new RuntimeException("UndeclaredThrowableException had null cause!"); + } + cause = cause.getCause(); + } + throw cause; } - }); + } + }); } /** @@ -408,24 +393,23 @@ public class HFileSystem extends FilterFileSystem { */ interface ReorderBlocks { /** - * * @param conf - the conf to use - * @param lbs - the LocatedBlocks to reorder - * @param src - the file name currently read + * @param lbs - the LocatedBlocks to reorder + * @param src - the file name currently read * @throws IOException - if something went wrong */ void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) throws IOException; } /** - * We're putting at lowest priority the wal files blocks that are on the same datanode - * as the original regionserver which created these files. This because we fear that the - * datanode is actually dead, so if we use it it will timeout. + * We're putting at lowest priority the wal files blocks that are on the same datanode as the + * original regionserver which created these files. This because we fear that the datanode is + * actually dead, so if we use it it will timeout. */ static class ReorderWALBlocks implements ReorderBlocks { @Override public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) - throws IOException { + throws IOException { ServerName sn = AbstractFSWALProvider.getServerNameFromWALDirectoryName(conf, src); if (sn == null) { @@ -436,8 +420,7 @@ public class HFileSystem extends FilterFileSystem { // Ok, so it's an WAL String hostName = sn.getHostname(); if (LOG.isTraceEnabled()) { - LOG.trace(src + - " is an WAL file, so reordering blocks, last hostname will be:" + hostName); + LOG.trace(src + " is an WAL file, so reordering blocks, last hostname will be:" + hostName); } // Just check for all blocks @@ -460,10 +443,9 @@ public class HFileSystem extends FilterFileSystem { } /** - * Create a new HFileSystem object, similar to FileSystem.get(). - * This returns a filesystem object that avoids checksum - * verification in the filesystem for hfileblock-reads. - * For these blocks, checksum verification is done by HBase. + * Create a new HFileSystem object, similar to FileSystem.get(). This returns a filesystem object + * that avoids checksum verification in the filesystem for hfileblock-reads. For these blocks, + * checksum verification is done by HBase. */ static public FileSystem get(Configuration conf) throws IOException { return new HFileSystem(conf, true); @@ -477,17 +459,13 @@ public class HFileSystem extends FilterFileSystem { } /** - * The org.apache.hadoop.fs.FilterFileSystem does not yet support - * createNonRecursive. This is a hadoop bug and when it is fixed in Hadoop, - * this definition will go away. + * The org.apache.hadoop.fs.FilterFileSystem does not yet support createNonRecursive. This is a + * hadoop bug and when it is fixed in Hadoop, this definition will go away. */ @Override @SuppressWarnings("deprecation") - public FSDataOutputStream createNonRecursive(Path f, - boolean overwrite, - int bufferSize, short replication, long blockSize, - Progressable progress) throws IOException { - return fs.createNonRecursive(f, overwrite, bufferSize, replication, - blockSize, progress); + public FSDataOutputStream createNonRecursive(Path f, boolean overwrite, int bufferSize, + short replication, long blockSize, Progressable progress) throws IOException { + return fs.createNonRecursive(f, overwrite, bufferSize, replication, blockSize, progress); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java index 1473efc1272..819c5651081 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.fs.HFileSystem; -import org.apache.hadoop.hdfs.DFSInputStream; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -35,9 +34,9 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; /** - * Wrapper for input stream(s) that takes care of the interaction of FS and HBase checksums, - * as well as closing streams. Initialization is not thread-safe, but normal operation is; - * see method comments. + * Wrapper for input stream(s) that takes care of the interaction of FS and HBase checksums, as well + * as closing streams. Initialization is not thread-safe, but normal operation is; see method + * comments. */ @InterfaceAudience.Private public class FSDataInputStreamWrapper implements Closeable { @@ -51,25 +50,23 @@ public class FSDataInputStreamWrapper implements Closeable { private final boolean dropBehind; private final long readahead; - /** Two stream handles, one with and one without FS-level checksum. - * HDFS checksum setting is on FS level, not single read level, so you have to keep two - * FS objects and two handles open to interleave different reads freely, which is very sad. - * This is what we do: - * 1) First, we need to read the trailer of HFile to determine checksum parameters. - * We always use FS checksum to do that, so ctor opens {@link #stream}. - * 2.1) After that, if HBase checksum is not used, we'd just always use {@link #stream}; - * 2.2) If HBase checksum can be used, we'll open {@link #streamNoFsChecksum}, - * and close {@link #stream}. User MUST call prepareForBlockReader for that to happen; - * if they don't, (2.1) will be the default. - * 3) The users can call {@link #shouldUseHBaseChecksum()}, and pass its result to - * {@link #getStream(boolean)} to get stream (if Java had out/pointer params we could - * return both in one call). This stream is guaranteed to be set. - * 4) The first time HBase checksum fails, one would call {@link #fallbackToFsChecksum(int)}. - * That will take lock, and open {@link #stream}. While this is going on, others will - * continue to use the old stream; if they also want to fall back, they'll also call - * {@link #fallbackToFsChecksum(int)}, and block until {@link #stream} is set. - * 5) After some number of checksumOk() calls, we will go back to using HBase checksum. - * We will have 2 handles; however we presume checksums fail so rarely that we don't care. + /** + * Two stream handles, one with and one without FS-level checksum. HDFS checksum setting is on FS + * level, not single read level, so you have to keep two FS objects and two handles open to + * interleave different reads freely, which is very sad. This is what we do: 1) First, we need to + * read the trailer of HFile to determine checksum parameters. We always use FS checksum to do + * that, so ctor opens {@link #stream}. 2.1) After that, if HBase checksum is not used, we'd just + * always use {@link #stream}; 2.2) If HBase checksum can be used, we'll open + * {@link #streamNoFsChecksum}, and close {@link #stream}. User MUST call prepareForBlockReader + * for that to happen; if they don't, (2.1) will be the default. 3) The users can call + * {@link #shouldUseHBaseChecksum()}, and pass its result to {@link #getStream(boolean)} to get + * stream (if Java had out/pointer params we could return both in one call). This stream is + * guaranteed to be set. 4) The first time HBase checksum fails, one would call + * {@link #fallbackToFsChecksum(int)}. That will take lock, and open {@link #stream}. While this + * is going on, others will continue to use the old stream; if they also want to fall back, + * they'll also call {@link #fallbackToFsChecksum(int)}, and block until {@link #stream} is set. + * 5) After some number of checksumOk() calls, we will go back to using HBase checksum. We will + * have 2 handles; however we presume checksums fail so rarely that we don't care. */ private volatile FSDataInputStream stream = null; private volatile FSDataInputStream streamNoFsChecksum = null; @@ -104,17 +101,18 @@ public class FSDataInputStreamWrapper implements Closeable { this(fs, path, false, -1L); } - public FSDataInputStreamWrapper(FileSystem fs, Path path, boolean dropBehind, long readahead) throws IOException { + public FSDataInputStreamWrapper(FileSystem fs, Path path, boolean dropBehind, long readahead) + throws IOException { this(fs, null, path, dropBehind, readahead); } - public FSDataInputStreamWrapper(FileSystem fs, FileLink link, - boolean dropBehind, long readahead) throws IOException { + public FSDataInputStreamWrapper(FileSystem fs, FileLink link, boolean dropBehind, long readahead) + throws IOException { this(fs, link, null, dropBehind, readahead); } private FSDataInputStreamWrapper(FileSystem fs, FileLink link, Path path, boolean dropBehind, - long readahead) throws IOException { + long readahead) throws IOException { assert (path == null) != (link == null); this.path = path; this.link = link; @@ -148,16 +146,16 @@ public class FSDataInputStreamWrapper implements Closeable { } /** - * Prepares the streams for block reader. NOT THREAD SAFE. Must be called once, after any - * reads finish and before any other reads start (what happens in reality is we read the - * tail, then call this based on what's in the tail, then read blocks). + * Prepares the streams for block reader. NOT THREAD SAFE. Must be called once, after any reads + * finish and before any other reads start (what happens in reality is we read the tail, then call + * this based on what's in the tail, then read blocks). * @param forceNoHBaseChecksum Force not using HBase checksum. */ public void prepareForBlockReader(boolean forceNoHBaseChecksum) throws IOException { if (hfs == null) return; assert this.stream != null && !this.useHBaseChecksumConfigured; boolean useHBaseChecksum = - !forceNoHBaseChecksum && hfs.useHBaseChecksum() && (hfs.getNoChecksumFs() != hfs); + !forceNoHBaseChecksum && hfs.useHBaseChecksum() && (hfs.getNoChecksumFs() != hfs); if (useHBaseChecksum) { FileSystem fsNc = hfs.getNoChecksumFs(); @@ -197,8 +195,8 @@ public class FSDataInputStreamWrapper implements Closeable { /** * Get the stream to use. Thread-safe. - * @param useHBaseChecksum must be the value that shouldUseHBaseChecksum has returned - * at some point in the past, otherwise the result is undefined. + * @param useHBaseChecksum must be the value that shouldUseHBaseChecksum has returned at some + * point in the past, otherwise the result is undefined. */ public FSDataInputStream getStream(boolean useHBaseChecksum) { return useHBaseChecksum ? this.streamNoFsChecksum : this.stream; @@ -228,8 +226,10 @@ public class FSDataInputStreamWrapper implements Closeable { /** Report that checksum was ok, so we may ponder going back to HBase checksum. */ public void checksumOk() { - if (this.useHBaseChecksumConfigured && !this.useHBaseChecksum - && (this.hbaseChecksumOffCount.getAndDecrement() < 0)) { + if ( + this.useHBaseChecksumConfigured && !this.useHBaseChecksum + && (this.hbaseChecksumOffCount.getAndDecrement() < 0) + ) { // The stream we need is already open (because we were using HBase checksum in the past). assert this.streamNoFsChecksum != null; this.useHBaseChecksum = true; @@ -240,20 +240,20 @@ public class FSDataInputStreamWrapper implements Closeable { // If the underlying file system is HDFS, update read statistics upon close. if (stream instanceof HdfsDataInputStream) { /** - * Because HDFS ReadStatistics is calculated per input stream, it is not - * feasible to update the aggregated number in real time. Instead, the - * metrics are updated when an input stream is closed. + * Because HDFS ReadStatistics is calculated per input stream, it is not feasible to update + * the aggregated number in real time. Instead, the metrics are updated when an input stream + * is closed. */ - HdfsDataInputStream hdfsDataInputStream = (HdfsDataInputStream)stream; + HdfsDataInputStream hdfsDataInputStream = (HdfsDataInputStream) stream; synchronized (readStatistics) { - readStatistics.totalBytesRead += hdfsDataInputStream.getReadStatistics(). - getTotalBytesRead(); - readStatistics.totalLocalBytesRead += hdfsDataInputStream.getReadStatistics(). - getTotalLocalBytesRead(); - readStatistics.totalShortCircuitBytesRead += hdfsDataInputStream.getReadStatistics(). - getTotalShortCircuitBytesRead(); - readStatistics.totalZeroCopyBytesRead += hdfsDataInputStream.getReadStatistics(). - getTotalZeroCopyBytesRead(); + readStatistics.totalBytesRead += + hdfsDataInputStream.getReadStatistics().getTotalBytesRead(); + readStatistics.totalLocalBytesRead += + hdfsDataInputStream.getReadStatistics().getTotalLocalBytesRead(); + readStatistics.totalShortCircuitBytesRead += + hdfsDataInputStream.getReadStatistics().getTotalShortCircuitBytesRead(); + readStatistics.totalZeroCopyBytesRead += + hdfsDataInputStream.getReadStatistics().getTotalZeroCopyBytesRead(); } } } @@ -292,7 +292,6 @@ public class FSDataInputStreamWrapper implements Closeable { // we do not care about the close exception as it is for reading, no data loss issue. Closeables.closeQuietly(streamNoFsChecksum); - updateInputStreamStatistics(stream); Closeables.closeQuietly(stream); } @@ -332,10 +331,10 @@ public class FSDataInputStreamWrapper implements Closeable { if (this.instanceOfCanUnbuffer) { try { this.unbuffer.unbuffer(); - } catch (UnsupportedOperationException e){ + } catch (UnsupportedOperationException e) { if (isLogTraceEnabled) { LOG.trace("Failed to invoke 'unbuffer' method in class " + streamClass - + " . So there may be the stream does not support unbuffering.", e); + + " . So there may be the stream does not support unbuffering.", e); } } } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java index ea285ed53fa..2d12fd88c11 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import java.io.FileNotFoundException; @@ -43,52 +42,34 @@ import org.slf4j.LoggerFactory; /** * The FileLink is a sort of hardlink, that allows access to a file given a set of locations. - * - *

        The Problem: + *

        + * The Problem: *

          - *
        • - * HDFS doesn't have support for hardlinks, and this make impossible to referencing - * the same data blocks using different names. - *
        • - *
        • - * HBase store files in one location (e.g. table/region/family/) and when the file is not - * needed anymore (e.g. compaction, region deletion, ...) moves it to an archive directory. - *
        • + *
        • HDFS doesn't have support for hardlinks, and this make impossible to referencing the same + * data blocks using different names.
        • + *
        • HBase store files in one location (e.g. table/region/family/) and when the file is not needed + * anymore (e.g. compaction, region deletion, ...) moves it to an archive directory.
        • *
        - * If we want to create a reference to a file, we need to remember that it can be in its - * original location or in the archive folder. - * The FileLink class tries to abstract this concept and given a set of locations - * it is able to switch between them making this operation transparent for the user. - * {@link HFileLink} is a more concrete implementation of the {@code FileLink}. - * - *

        Back-references: - * To help the {@link org.apache.hadoop.hbase.master.cleaner.CleanerChore} to keep track of - * the links to a particular file, during the {@code FileLink} creation, a new file is placed - * inside a back-reference directory. There's one back-reference directory for each file that - * has links, and in the directory there's one file per link. - * - *

        HFileLink Example + * If we want to create a reference to a file, we need to remember that it can be in its original + * location or in the archive folder. The FileLink class tries to abstract this concept and given a + * set of locations it is able to switch between them making this operation transparent for the + * user. {@link HFileLink} is a more concrete implementation of the {@code FileLink}. + *

        + * Back-references: To help the {@link org.apache.hadoop.hbase.master.cleaner.CleanerChore} + * to keep track of the links to a particular file, during the {@code FileLink} creation, a new file + * is placed inside a back-reference directory. There's one back-reference directory for each file + * that has links, and in the directory there's one file per link. + *

        + * HFileLink Example *

          - *
        • - * /hbase/table/region-x/cf/file-k - * (Original File) - *
        • - *
        • - * /hbase/table-cloned/region-y/cf/file-k.region-x.table - * (HFileLink to the original file) - *
        • - *
        • - * /hbase/table-2nd-cloned/region-z/cf/file-k.region-x.table - * (HFileLink to the original file) - *
        • - *
        • - * /hbase/.archive/table/region-x/.links-file-k/region-y.table-cloned - * (Back-reference to the link in table-cloned) - *
        • - *
        • - * /hbase/.archive/table/region-x/.links-file-k/region-z.table-2nd-cloned - * (Back-reference to the link in table-2nd-cloned) - *
        • + *
        • /hbase/table/region-x/cf/file-k (Original File)
        • + *
        • /hbase/table-cloned/region-y/cf/file-k.region-x.table (HFileLink to the original file)
        • + *
        • /hbase/table-2nd-cloned/region-z/cf/file-k.region-x.table (HFileLink to the original file) + *
        • + *
        • /hbase/.archive/table/region-x/.links-file-k/region-y.table-cloned (Back-reference to the + * link in table-cloned)
        • + *
        • /hbase/.archive/table/region-x/.links-file-k/region-z.table-2nd-cloned (Back-reference to the + * link in table-2nd-cloned)
        • *
        */ @InterfaceAudience.Private @@ -99,11 +80,11 @@ public class FileLink { public static final String BACK_REFERENCES_DIRECTORY_PREFIX = ".links-"; /** - * FileLink InputStream that handles the switch between the original path - * and the alternative locations, when the file is moved. + * FileLink InputStream that handles the switch between the original path and the alternative + * locations, when the file is moved. */ private static class FileLinkInputStream extends InputStream - implements Seekable, PositionedReadable, CanSetDropBehind, CanSetReadahead, CanUnbuffer { + implements Seekable, PositionedReadable, CanSetDropBehind, CanSetReadahead, CanUnbuffer { private FSDataInputStream in = null; private Path currentPath = null; private long pos = 0; @@ -112,13 +93,12 @@ public class FileLink { private final int bufferSize; private final FileSystem fs; - public FileLinkInputStream(final FileSystem fs, final FileLink fileLink) - throws IOException { + public FileLinkInputStream(final FileSystem fs, final FileLink fileLink) throws IOException { this(fs, fileLink, CommonFSUtils.getDefaultBufferSize(fs)); } public FileLinkInputStream(final FileSystem fs, final FileLink fileLink, int bufferSize) - throws IOException { + throws IOException { this.bufferSize = bufferSize; this.fileLink = fileLink; this.fs = fs; @@ -148,7 +128,7 @@ public class FileLink { @Override public int read(byte[] b) throws IOException { - return read(b, 0, b.length); + return read(b, 0, b.length); } @Override @@ -164,7 +144,7 @@ public class FileLink { n = tryOpen().read(b, off, len); } if (n > 0) pos += n; - assert(in.getPos() == pos); + assert (in.getPos() == pos); return n; } @@ -296,18 +276,17 @@ public class FileLink { /** * Try to open the file from one of the available locations. - * * @return FSDataInputStream stream of the opened file link * @throws IOException on unexpected error, or file not found. */ private FSDataInputStream tryOpen() throws IOException { IOException exception = null; - for (Path path: fileLink.getLocations()) { + for (Path path : fileLink.getLocations()) { if (path.equals(currentPath)) continue; try { in = fs.open(path, bufferSize); if (pos != 0) in.seek(pos); - assert(in.getPos() == pos) : "Link unable to seek to the right position=" + pos; + assert (in.getPos() == pos) : "Link unable to seek to the right position=" + pos; if (LOG.isTraceEnabled()) { if (currentPath == null) { LOG.debug("link open path=" + path); @@ -316,7 +295,7 @@ public class FileLink { } } currentPath = path; - return(in); + return (in); } catch (FileNotFoundException | AccessControlException | RemoteException e) { exception = FileLink.handleAccessLocationException(fileLink, e, exception); } @@ -342,7 +321,7 @@ public class FileLink { } /** - * @param originPath Original location of the file to link + * @param originPath Original location of the file to link * @param alternativePaths Alternative locations to look for the linked file */ public FileLink(Path originPath, Path... alternativePaths) { @@ -401,7 +380,6 @@ public class FileLink { /** * Get the FileStatus of the referenced file. - * * @param fs {@link FileSystem} on which to get the file status * @return InputStream for the hfile link. * @throws IOException on unexpected error. @@ -420,21 +398,21 @@ public class FileLink { /** * Handle exceptions which are thrown when access locations of file link - * @param fileLink the file link - * @param newException the exception caught by access the current location + * @param fileLink the file link + * @param newException the exception caught by access the current location * @param previousException the previous exception caught by access the other locations * @return return AccessControlException if access one of the locations caught, otherwise return * FileNotFoundException. The AccessControlException is threw if user scan snapshot * feature is enabled, see * {@link org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclController}. * @throws IOException if the exception is neither AccessControlException nor - * FileNotFoundException + * FileNotFoundException */ private static IOException handleAccessLocationException(FileLink fileLink, - IOException newException, IOException previousException) throws IOException { + IOException newException, IOException previousException) throws IOException { if (newException instanceof RemoteException) { newException = ((RemoteException) newException) - .unwrapRemoteException(FileNotFoundException.class, AccessControlException.class); + .unwrapRemoteException(FileNotFoundException.class, AccessControlException.class); } if (newException instanceof FileNotFoundException) { // Try another file location @@ -453,9 +431,8 @@ public class FileLink { /** * Open the FileLink for read. *

        - * It uses a wrapper of FSDataInputStream that is agnostic to the location - * of the file, even if the file switches between locations. - * + * It uses a wrapper of FSDataInputStream that is agnostic to the location of the file, even if + * the file switches between locations. * @param fs {@link FileSystem} on which to open the FileLink * @return InputStream for reading the file link. * @throws IOException on unexpected error. @@ -467,10 +444,9 @@ public class FileLink { /** * Open the FileLink for read. *

        - * It uses a wrapper of FSDataInputStream that is agnostic to the location - * of the file, even if the file switches between locations. - * - * @param fs {@link FileSystem} on which to open the FileLink + * It uses a wrapper of FSDataInputStream that is agnostic to the location of the file, even if + * the file switches between locations. + * @param fs {@link FileSystem} on which to open the FileLink * @param bufferSize the size of the buffer to be used. * @return InputStream for reading the file link. * @throws IOException on unexpected error. @@ -480,8 +456,8 @@ public class FileLink { } /** - * If the passed FSDataInputStream is backed by a FileLink, returns the underlying - * InputStream for the resolved link target. Otherwise, returns null. + * If the passed FSDataInputStream is backed by a FileLink, returns the underlying InputStream for + * the resolved link target. Otherwise, returns null. */ public static FSDataInputStream getUnderlyingFileLinkInputStream(FSDataInputStream stream) { if (stream.getWrappedStream() instanceof FileLinkInputStream) { @@ -491,13 +467,13 @@ public class FileLink { } /** - * NOTE: This method must be used only in the constructor! - * It creates a List with the specified locations for the link. + * NOTE: This method must be used only in the constructor! It creates a List with the specified + * locations for the link. */ protected void setLocations(Path originPath, Path... alternativePaths) { assert this.locations == null : "Link locations already set"; - List paths = new ArrayList<>(alternativePaths.length +1); + List paths = new ArrayList<>(alternativePaths.length + 1); if (originPath != null) { paths.add(originPath); } @@ -512,10 +488,9 @@ public class FileLink { /** * Get the directory to store the link back references - * - *

        To simplify the reference count process, during the FileLink creation - * a back-reference is added to the back-reference directory of the specified file. - * + *

        + * To simplify the reference count process, during the FileLink creation a back-reference is added + * to the back-reference directory of the specified file. * @param storeDir Root directory for the link reference folder * @param fileName File Name with links * @return Path for the link back references. @@ -526,7 +501,6 @@ public class FileLink { /** * Get the referenced file name from the reference link directory path. - * * @param dirPath Link references directory path * @return Name of the file referenced */ @@ -566,4 +540,3 @@ public class FileLink { return Arrays.hashCode(locations); } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java index fbed724a207..4cf350004a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import java.io.IOException; @@ -39,44 +38,39 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * HFileLink describes a link to an hfile. - * - * An hfile can be served from a region or from the hfile archive directory (/hbase/.archive) - * HFileLink allows to access the referenced hfile regardless of the location where it is. - * - *

        Searches for hfiles in the following order and locations: + * HFileLink describes a link to an hfile. An hfile can be served from a region or from the hfile + * archive directory (/hbase/.archive) HFileLink allows to access the referenced hfile regardless of + * the location where it is. + *

        + * Searches for hfiles in the following order and locations: *

          - *
        • /hbase/table/region/cf/hfile
        • - *
        • /hbase/.archive/table/region/cf/hfile
        • + *
        • /hbase/table/region/cf/hfile
        • + *
        • /hbase/.archive/table/region/cf/hfile
        • *
        - * - * The link checks first in the original path if it is not present - * it fallbacks to the archived path. + * The link checks first in the original path if it is not present it fallbacks to the archived + * path. */ @InterfaceAudience.Private -@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_DOESNT_OVERRIDE_EQUALS", - justification="To be fixed but warning suppressed for now") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "EQ_DOESNT_OVERRIDE_EQUALS", + justification = "To be fixed but warning suppressed for now") public class HFileLink extends FileLink { private static final Logger LOG = LoggerFactory.getLogger(HFileLink.class); /** - * A non-capture group, for HFileLink, so that this can be embedded. - * The HFileLink describe a link to an hfile in a different table/region - * and the name is in the form: table=region-hfile. + * A non-capture group, for HFileLink, so that this can be embedded. The HFileLink describe a link + * to an hfile in a different table/region and the name is in the form: table=region-hfile. *

        * Table name is ([\p{IsAlphabetic}\p{Digit}][\p{IsAlphabetic}\p{Digit}.-]*), so '=' is an invalid - * character for the table name. - * Region name is ([a-f0-9]+), so '-' is an invalid character for the region name. - * HFile is ([0-9a-f]+(?:_SeqId_[0-9]+_)?) covering the plain hfiles (uuid) - * and the bulk loaded (_SeqId_[0-9]+_) hfiles. - * - *

        Here is an example name: /hbase/test/0123/cf/testtb=4567-abcd where 'testtb' is table name - * and '4567' is region name and 'abcd' is filename. + * character for the table name. Region name is ([a-f0-9]+), so '-' is an invalid character for + * the region name. HFile is ([0-9a-f]+(?:_SeqId_[0-9]+_)?) covering the plain hfiles (uuid) and + * the bulk loaded (_SeqId_[0-9]+_) hfiles. + *

        + * Here is an example name: /hbase/test/0123/cf/testtb=4567-abcd where 'testtb' is table name and + * '4567' is region name and 'abcd' is filename. */ - public static final String LINK_NAME_REGEX = - String.format("(?:(?:%s=)?)%s=%s-%s", - TableName.VALID_NAMESPACE_REGEX, TableName.VALID_TABLE_QUALIFIER_REGEX, - RegionInfoBuilder.ENCODED_REGION_NAME_REGEX, StoreFileInfo.HFILE_NAME_REGEX); + public static final String LINK_NAME_REGEX = String.format("(?:(?:%s=)?)%s=%s-%s", + TableName.VALID_NAMESPACE_REGEX, TableName.VALID_TABLE_QUALIFIER_REGEX, + RegionInfoBuilder.ENCODED_REGION_NAME_REGEX, StoreFileInfo.HFILE_NAME_REGEX); /** Define the HFile Link name parser in the form of: table=region-hfile */ public static final Pattern LINK_NAME_PATTERN = @@ -85,13 +79,12 @@ public class HFileLink extends FileLink { RegionInfoBuilder.ENCODED_REGION_NAME_REGEX, StoreFileInfo.HFILE_NAME_REGEX)); /** - * The pattern should be used for hfile and reference links - * that can be found in /hbase/table/region/family/ + * The pattern should be used for hfile and reference links that can be found in + * /hbase/table/region/family/ */ private static final Pattern REF_OR_HFILE_LINK_PATTERN = - Pattern.compile(String.format("^(?:(%s)(?:=))?(%s)=(%s)-(.+)$", - TableName.VALID_NAMESPACE_REGEX, TableName.VALID_TABLE_QUALIFIER_REGEX, - RegionInfoBuilder.ENCODED_REGION_NAME_REGEX)); + Pattern.compile(String.format("^(?:(%s)(?:=))?(%s)=(%s)-(.+)$", TableName.VALID_NAMESPACE_REGEX, + TableName.VALID_TABLE_QUALIFIER_REGEX, RegionInfoBuilder.ENCODED_REGION_NAME_REGEX)); private final Path archivePath; private final Path originPath; @@ -102,7 +95,7 @@ public class HFileLink extends FileLink { * Dead simple hfile link constructor */ public HFileLink(final Path originPath, final Path tempPath, final Path mobPath, - final Path archivePath) { + final Path archivePath) { this.tempPath = tempPath; this.originPath = originPath; this.mobPath = mobPath; @@ -110,28 +103,24 @@ public class HFileLink extends FileLink { setLocations(originPath, tempPath, mobPath, archivePath); } - /** - * @param conf {@link Configuration} from which to extract specific archive locations + * @param conf {@link Configuration} from which to extract specific archive locations * @param hFileLinkPattern The path ending with a HFileLink pattern. (table=region-hfile) * @throws IOException on unexpected error. */ public static final HFileLink buildFromHFileLinkPattern(Configuration conf, Path hFileLinkPattern) - throws IOException { + throws IOException { return buildFromHFileLinkPattern(CommonFSUtils.getRootDir(conf), - HFileArchiveUtil.getArchivePath(conf), hFileLinkPattern); + HFileArchiveUtil.getArchivePath(conf), hFileLinkPattern); } - - /** - * @param rootDir Path to the root directory where hbase files are stored - * @param archiveDir Path to the hbase archive directory + * @param rootDir Path to the root directory where hbase files are stored + * @param archiveDir Path to the hbase archive directory * @param hFileLinkPattern The path of the HFile Link. */ - public final static HFileLink buildFromHFileLinkPattern(final Path rootDir, - final Path archiveDir, - final Path hFileLinkPattern) { + public final static HFileLink buildFromHFileLinkPattern(final Path rootDir, final Path archiveDir, + final Path hFileLinkPattern) { Path hfilePath = getHFileLinkPatternRelativePath(hFileLinkPattern); Path tempPath = new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY), hfilePath); Path originPath = new Path(rootDir, hfilePath); @@ -142,14 +131,14 @@ public class HFileLink extends FileLink { /** * Create an HFileLink relative path for the table/region/family/hfile location - * @param table Table name + * @param table Table name * @param region Region Name * @param family Family Name - * @param hfile HFile Name + * @param hfile HFile Name * @return the relative Path to open the specified table/region/family/hfile link */ - public static Path createPath(final TableName table, final String region, - final String family, final String hfile) { + public static Path createPath(final TableName table, final String region, final String family, + final String hfile) { if (HFileLink.isHFileLink(hfile)) { return new Path(family, hfile); } @@ -158,17 +147,16 @@ public class HFileLink extends FileLink { /** * Create an HFileLink instance from table/region/family/hfile location - * @param conf {@link Configuration} from which to extract specific archive locations - * @param table Table name + * @param conf {@link Configuration} from which to extract specific archive locations + * @param table Table name * @param region Region Name * @param family Family Name - * @param hfile HFile Name + * @param hfile HFile Name * @return Link to the file with the specified table/region/family/hfile location * @throws IOException on unexpected error. */ public static HFileLink build(final Configuration conf, final TableName table, - final String region, final String family, final String hfile) - throws IOException { + final String region, final String family, final String hfile) throws IOException { return HFileLink.buildFromHFileLinkPattern(conf, createPath(table, region, family, hfile)); } @@ -193,7 +181,7 @@ public class HFileLink extends FileLink { return this.mobPath; } - /** + /** * @param path Path to check. * @return True if the path is a HFileLink. */ @@ -214,10 +202,8 @@ public class HFileLink extends FileLink { } /** - * Convert a HFileLink path to a table relative path. - * e.g. the link: /hbase/test/0123/cf/testtb=4567-abcd - * becomes: /hbase/testtb/4567/cf/abcd - * + * Convert a HFileLink path to a table relative path. e.g. the link: + * /hbase/test/0123/cf/testtb=4567-abcd becomes: /hbase/testtb/4567/cf/abcd * @param path HFileLink path * @return Relative table path * @throws IOException on unexpected error. @@ -235,13 +221,11 @@ public class HFileLink extends FileLink { String hfileName = m.group(4); String familyName = path.getParent().getName(); Path tableDir = CommonFSUtils.getTableDir(new Path("./"), tableName); - return new Path(tableDir, new Path(regionName, new Path(familyName, - hfileName))); + return new Path(tableDir, new Path(regionName, new Path(familyName, hfileName))); } /** * Get the HFile name of the referenced link - * * @param fileName HFileLink file name * @return the name of the referenced HFile */ @@ -250,12 +234,11 @@ public class HFileLink extends FileLink { if (!m.matches()) { throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!"); } - return(m.group(4)); + return (m.group(4)); } /** * Get the Region name of the referenced link - * * @param fileName HFileLink file name * @return the name of the referenced Region */ @@ -264,12 +247,11 @@ public class HFileLink extends FileLink { if (!m.matches()) { throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!"); } - return(m.group(3)); + return (m.group(3)); } /** * Get the Table name of the referenced link - * * @param fileName HFileLink file name * @return the name of the referenced Table */ @@ -278,76 +260,71 @@ public class HFileLink extends FileLink { if (!m.matches()) { throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!"); } - return(TableName.valueOf(m.group(1), m.group(2))); + return (TableName.valueOf(m.group(1), m.group(2))); } /** * Create a new HFileLink name - * * @param hfileRegionInfo - Linked HFile Region Info - * @param hfileName - Linked HFile name + * @param hfileName - Linked HFile name * @return file name of the HFile Link */ public static String createHFileLinkName(final RegionInfo hfileRegionInfo, - final String hfileName) { - return createHFileLinkName(hfileRegionInfo.getTable(), - hfileRegionInfo.getEncodedName(), hfileName); + final String hfileName) { + return createHFileLinkName(hfileRegionInfo.getTable(), hfileRegionInfo.getEncodedName(), + hfileName); } /** * Create a new HFileLink name - * - * @param tableName - Linked HFile table name + * @param tableName - Linked HFile table name * @param regionName - Linked HFile region name - * @param hfileName - Linked HFile name + * @param hfileName - Linked HFile name * @return file name of the HFile Link */ - public static String createHFileLinkName(final TableName tableName, - final String regionName, final String hfileName) { + public static String createHFileLinkName(final TableName tableName, final String regionName, + final String hfileName) { String s = String.format("%s=%s-%s", - tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM, '='), - regionName, hfileName); + tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM, '='), regionName, hfileName); return s; } /** * Create a new HFileLink - * - *

        It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. - * - * @param conf {@link Configuration} to read for the archive directory name - * @param fs {@link FileSystem} on which to write the HFileLink - * @param dstFamilyPath - Destination path (table/region/cf/) + *

        + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. + * @param conf {@link Configuration} to read for the archive directory name + * @param fs {@link FileSystem} on which to write the HFileLink + * @param dstFamilyPath - Destination path (table/region/cf/) * @param hfileRegionInfo - Linked HFile Region Info - * @param hfileName - Linked HFile name + * @param hfileName - Linked HFile name * @return the file link name. * @throws IOException on file or parent directory creation failure. */ public static String create(final Configuration conf, final FileSystem fs, - final Path dstFamilyPath, final RegionInfo hfileRegionInfo, - final String hfileName) throws IOException { + final Path dstFamilyPath, final RegionInfo hfileRegionInfo, final String hfileName) + throws IOException { return create(conf, fs, dstFamilyPath, hfileRegionInfo, hfileName, true); } /** * Create a new HFileLink - * - *

        It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. - * - * @param conf {@link Configuration} to read for the archive directory name - * @param fs {@link FileSystem} on which to write the HFileLink - * @param dstFamilyPath - Destination path (table/region/cf/) + *

        + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. + * @param conf {@link Configuration} to read for the archive directory name + * @param fs {@link FileSystem} on which to write the HFileLink + * @param dstFamilyPath - Destination path (table/region/cf/) * @param hfileRegionInfo - Linked HFile Region Info - * @param hfileName - Linked HFile name - * @param createBackRef - Whether back reference should be created. Defaults to true. + * @param hfileName - Linked HFile name + * @param createBackRef - Whether back reference should be created. Defaults to true. * @return the file link name. * @throws IOException on file or parent directory creation failure. */ public static String create(final Configuration conf, final FileSystem fs, - final Path dstFamilyPath, final RegionInfo hfileRegionInfo, - final String hfileName, final boolean createBackRef) throws IOException { + final Path dstFamilyPath, final RegionInfo hfileRegionInfo, final String hfileName, + final boolean createBackRef) throws IOException { TableName linkedTable = hfileRegionInfo.getTable(); String linkedRegion = hfileRegionInfo.getEncodedName(); return create(conf, fs, dstFamilyPath, linkedTable, linkedRegion, hfileName, createBackRef); @@ -355,49 +332,47 @@ public class HFileLink extends FileLink { /** * Create a new HFileLink - * - *

        It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. - * - * @param conf {@link Configuration} to read for the archive directory name - * @param fs {@link FileSystem} on which to write the HFileLink + *

        + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. + * @param conf {@link Configuration} to read for the archive directory name + * @param fs {@link FileSystem} on which to write the HFileLink * @param dstFamilyPath - Destination path (table/region/cf/) - * @param linkedTable - Linked Table Name - * @param linkedRegion - Linked Region Name - * @param hfileName - Linked HFile name + * @param linkedTable - Linked Table Name + * @param linkedRegion - Linked Region Name + * @param hfileName - Linked HFile name * @return the file link name. * @throws IOException on file or parent directory creation failure. */ public static String create(final Configuration conf, final FileSystem fs, - final Path dstFamilyPath, final TableName linkedTable, final String linkedRegion, - final String hfileName) throws IOException { + final Path dstFamilyPath, final TableName linkedTable, final String linkedRegion, + final String hfileName) throws IOException { return create(conf, fs, dstFamilyPath, linkedTable, linkedRegion, hfileName, true); } /** * Create a new HFileLink. In the event of link creation failure, this method throws an * IOException, so that the calling upper laying can decide on how to proceed with this. - * - *

        It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. - * - * @param conf {@link Configuration} to read for the archive directory name - * @param fs {@link FileSystem} on which to write the HFileLink + *

        + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. + * @param conf {@link Configuration} to read for the archive directory name + * @param fs {@link FileSystem} on which to write the HFileLink * @param dstFamilyPath - Destination path (table/region/cf/) - * @param linkedTable - Linked Table Name - * @param linkedRegion - Linked Region Name - * @param hfileName - Linked HFile name + * @param linkedTable - Linked Table Name + * @param linkedRegion - Linked Region Name + * @param hfileName - Linked HFile name * @param createBackRef - Whether back reference should be created. Defaults to true. * @return the file link name. * @throws IOException on file or parent directory creation failure. */ public static String create(final Configuration conf, final FileSystem fs, - final Path dstFamilyPath, final TableName linkedTable, final String linkedRegion, - final String hfileName, final boolean createBackRef) throws IOException { + final Path dstFamilyPath, final TableName linkedTable, final String linkedRegion, + final String hfileName, final boolean createBackRef) throws IOException { String familyName = dstFamilyPath.getName(); String regionName = dstFamilyPath.getParent().getName(); - String tableName = CommonFSUtils.getTableName(dstFamilyPath.getParent().getParent()) - .getNameAsString(); + String tableName = + CommonFSUtils.getTableName(dstFamilyPath.getParent().getParent()).getNameAsString(); return create(conf, fs, dstFamilyPath, familyName, tableName, regionName, linkedTable, linkedRegion, hfileName, createBackRef); @@ -405,25 +380,25 @@ public class HFileLink extends FileLink { /** * Create a new HFileLink - * - *

        It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. - * @param conf {@link Configuration} to read for the archive directory name - * @param fs {@link FileSystem} on which to write the HFileLink + *

        + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. + * @param conf {@link Configuration} to read for the archive directory name + * @param fs {@link FileSystem} on which to write the HFileLink * @param dstFamilyPath - Destination path (table/region/cf/) - * @param dstTableName - Destination table name + * @param dstTableName - Destination table name * @param dstRegionName - Destination region name - * @param linkedTable - Linked Table Name - * @param linkedRegion - Linked Region Name - * @param hfileName - Linked HFile name + * @param linkedTable - Linked Table Name + * @param linkedRegion - Linked Region Name + * @param hfileName - Linked HFile name * @param createBackRef - Whether back reference should be created. Defaults to true. * @return the file link name. * @throws IOException on file or parent directory creation failure */ public static String create(final Configuration conf, final FileSystem fs, - final Path dstFamilyPath, final String familyName, final String dstTableName, - final String dstRegionName, final TableName linkedTable, final String linkedRegion, - final String hfileName, final boolean createBackRef) throws IOException { + final Path dstFamilyPath, final String familyName, final String dstTableName, + final String dstRegionName, final TableName linkedTable, final String linkedRegion, + final String hfileName, final boolean createBackRef) throws IOException { String name = createHFileLinkName(linkedTable, linkedRegion, hfileName); String refName = createBackReferenceName(dstTableName, dstRegionName); @@ -431,8 +406,8 @@ public class HFileLink extends FileLink { fs.mkdirs(dstFamilyPath); // Make sure the FileLink reference directory exists - Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf, - linkedTable, linkedRegion, familyName); + Path archiveStoreDir = + HFileArchiveUtil.getStoreArchivePath(conf, linkedTable, linkedRegion, familyName); Path backRefPath = null; if (createBackRef) { Path backRefssDir = getBackReferencesDir(archiveStoreDir, hfileName); @@ -455,18 +430,17 @@ public class HFileLink extends FileLink { } throw e; } - throw new IOException("File link=" + name + " already exists under " + - dstFamilyPath + " folder."); + throw new IOException( + "File link=" + name + " already exists under " + dstFamilyPath + " folder."); } /** * Create a new HFileLink starting from a hfileLink name - * - *

        It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. - * - * @param conf {@link Configuration} to read for the archive directory name - * @param fs {@link FileSystem} on which to write the HFileLink + *

        + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. + * @param conf {@link Configuration} to read for the archive directory name + * @param fs {@link FileSystem} on which to write the HFileLink * @param dstFamilyPath - Destination path (table/region/cf/) * @param hfileLinkName - HFileLink name (it contains hfile-region-table) * @param createBackRef - Whether back reference should be created. Defaults to true. @@ -474,30 +448,28 @@ public class HFileLink extends FileLink { * @throws IOException on file or parent directory creation failure. */ public static String createFromHFileLink(final Configuration conf, final FileSystem fs, - final Path dstFamilyPath, final String hfileLinkName, final boolean createBackRef) - throws IOException { + final Path dstFamilyPath, final String hfileLinkName, final boolean createBackRef) + throws IOException { Matcher m = LINK_NAME_PATTERN.matcher(hfileLinkName); if (!m.matches()) { throw new IllegalArgumentException(hfileLinkName + " is not a valid HFileLink name!"); } - return create(conf, fs, dstFamilyPath, TableName.valueOf(m.group(1), m.group(2)), - m.group(3), m.group(4), createBackRef); + return create(conf, fs, dstFamilyPath, TableName.valueOf(m.group(1), m.group(2)), m.group(3), + m.group(4), createBackRef); } /** * Create the back reference name */ - //package-private for testing - static String createBackReferenceName(final String tableNameStr, - final String regionName) { + // package-private for testing + static String createBackReferenceName(final String tableNameStr, final String regionName) { return regionName + "." + tableNameStr.replace(TableName.NAMESPACE_DELIM, '='); } /** * Get the full path of the HFile referenced by the back reference - * - * @param rootDir root hbase directory + * @param rootDir root hbase directory * @param linkRefPath Link Back Reference path * @return full path of the referenced hfile */ @@ -511,8 +483,8 @@ public class HFileLink extends FileLink { Path regionPath = familyPath.getParent(); Path tablePath = regionPath.getParent(); - String linkName = createHFileLinkName(CommonFSUtils.getTableName(tablePath), - regionPath.getName(), hfileName); + String linkName = + createHFileLinkName(CommonFSUtils.getTableName(tablePath), regionPath.getName(), hfileName); Path linkTableDir = CommonFSUtils.getTableDir(rootDir, linkTableName); Path regionDir = HRegion.getRegionDir(linkTableDir, linkRegionName); return new Path(new Path(regionDir, familyPath.getName()), linkName); @@ -521,22 +493,20 @@ public class HFileLink extends FileLink { public static Pair parseBackReferenceName(String name) { int separatorIndex = name.indexOf('.'); String linkRegionName = name.substring(0, separatorIndex); - String tableSubstr = name.substring(separatorIndex + 1) - .replace('=', TableName.NAMESPACE_DELIM); + String tableSubstr = name.substring(separatorIndex + 1).replace('=', TableName.NAMESPACE_DELIM); TableName linkTableName = TableName.valueOf(tableSubstr); return new Pair<>(linkTableName, linkRegionName); } /** * Get the full path of the HFile referenced by the back reference - * - * @param conf {@link Configuration} to read for the archive directory name + * @param conf {@link Configuration} to read for the archive directory name * @param linkRefPath Link Back Reference path * @return full path of the referenced hfile * @throws IOException on unexpected error. */ public static Path getHFileFromBackReference(final Configuration conf, final Path linkRefPath) - throws IOException { + throws IOException { return getHFileFromBackReference(CommonFSUtils.getRootDir(conf), linkRefPath); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index ab293e36277..95665391740 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +21,6 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; @@ -40,25 +38,24 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * A facade for a {@link org.apache.hadoop.hbase.io.hfile.HFile.Reader} that serves up - * either the top or bottom half of a HFile where 'bottom' is the first half - * of the file containing the keys that sort lowest and 'top' is the second half - * of the file with keys that sort greater than those of the bottom half. - * The top includes the split files midkey, of the key that follows if it does + * A facade for a {@link org.apache.hadoop.hbase.io.hfile.HFile.Reader} that serves up either the + * top or bottom half of a HFile where 'bottom' is the first half of the file containing the keys + * that sort lowest and 'top' is the second half of the file with keys that sort greater than those + * of the bottom half. The top includes the split files midkey, of the key that follows if it does * not exist in the file. - * - *

        This type works in tandem with the {@link Reference} type. This class - * is used reading while Reference is used writing. - * - *

        This file is not splitable. Calls to {@link #midKey()} return null. + *

        + * This type works in tandem with the {@link Reference} type. This class is used reading while + * Reference is used writing. + *

        + * This file is not splitable. Calls to {@link #midKey()} return null. */ @InterfaceAudience.Private public class HalfStoreFileReader extends StoreFileReader { private static final Logger LOG = LoggerFactory.getLogger(HalfStoreFileReader.class); final boolean top; - // This is the key we split around. Its the first possible entry on a row: + // This is the key we split around. Its the first possible entry on a row: // i.e. empty column and a timestamp of LATEST_TIMESTAMP. - protected final byte [] splitkey; + protected final byte[] splitkey; private final Cell splitCell; @@ -68,20 +65,20 @@ public class HalfStoreFileReader extends StoreFileReader { /** * Creates a half file reader for a hfile referred to by an hfilelink. - * @param context Reader context info - * @param fileInfo HFile info + * @param context Reader context info + * @param fileInfo HFile info * @param cacheConf CacheConfig - * @param r original reference file (contains top or bottom) - * @param refCount reference count - * @param conf Configuration + * @param r original reference file (contains top or bottom) + * @param refCount reference count + * @param conf Configuration */ public HalfStoreFileReader(final ReaderContext context, final HFileInfo fileInfo, - final CacheConfig cacheConf, final Reference r, - AtomicInteger refCount, final Configuration conf) throws IOException { + final CacheConfig cacheConf, final Reference r, AtomicInteger refCount, + final Configuration conf) throws IOException { super(context, fileInfo, cacheConf, refCount, conf); // This is not actual midkey for this half-file; its just border - // around which we split top and bottom. Have to look in files to find - // actual last and first keys for bottom and top halves. Half-files don't + // around which we split top and bottom. Have to look in files to find + // actual last and first keys for bottom and top halves. Half-files don't // have an actual midkey themselves. No midkey is how we indicate file is // not splittable. this.splitkey = r.getSplitKey(); @@ -95,8 +92,8 @@ public class HalfStoreFileReader extends StoreFileReader { } @Override - public HFileScanner getScanner(final boolean cacheBlocks, - final boolean pread, final boolean isCompaction) { + public HFileScanner getScanner(final boolean cacheBlocks, final boolean pread, + final boolean isCompaction) { final HFileScanner s = super.getScanner(cacheBlocks, pread, isCompaction); return new HFileScanner() { final HFileScanner delegate = s; @@ -202,8 +199,8 @@ public class HalfStoreFileReader extends StoreFileReader { boolean res = delegate.seekBefore(splitCell); if (!res) { throw new IOException( - "Seeking for a key in bottom of file, but key exists in top of file, " + - "failed on seekBefore(midkey)"); + "Seeking for a key in bottom of file, but key exists in top of file, " + + "failed on seekBefore(midkey)"); } return 1; } @@ -227,7 +224,7 @@ public class HalfStoreFileReader extends StoreFileReader { boolean res = delegate.seekBefore(splitCell); if (!res) { throw new IOException("Seeking for a key in bottom of file, but" - + " key exists in top of file, failed on seekBefore(midkey)"); + + " key exists in top of file, failed on seekBefore(midkey)"); } return 1; } @@ -243,8 +240,10 @@ public class HalfStoreFileReader extends StoreFileReader { public boolean seekBefore(Cell key) throws IOException { if (top) { Optional fk = getFirstKey(); - if (fk.isPresent() && - PrivateCellUtil.compareKeyIgnoresMvcc(getComparator(), key, fk.get()) <= 0) { + if ( + fk.isPresent() + && PrivateCellUtil.compareKeyIgnoresMvcc(getComparator(), key, fk.get()) <= 0 + ) { return false; } } else { @@ -281,12 +280,12 @@ public class HalfStoreFileReader extends StoreFileReader { } }; } - + @Override public boolean passesKeyRangeFilter(Scan scan) { return true; } - + @Override public Optional getLastKey() { if (top) { @@ -326,7 +325,7 @@ public class HalfStoreFileReader extends StoreFileReader { } catch (IOException e) { LOG.warn("Failed seekTo first KV in the file", e); } finally { - if(scanner != null) { + if (scanner != null) { scanner.close(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java index 72da73e1e92..c2197cef945 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; @@ -30,7 +29,7 @@ public class MetricsIO { public MetricsIO(MetricsIOWrapper wrapper) { this(CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class) - .createIO(wrapper), wrapper); + .createIO(wrapper), wrapper); } MetricsIO(MetricsIOSource source, MetricsIOWrapper wrapper) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java index 1ce762a0ad2..687d5833458 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.hadoop.hbase.io.hfile.HFile; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java index 845005f1bbd..ed3986f5883 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,44 +23,42 @@ import java.io.DataInputStream; import java.io.IOException; import java.io.InputStream; import java.util.Arrays; - import org.apache.commons.io.IOUtils; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos; -import org.apache.hadoop.hbase.util.Bytes; /** - * A reference to the top or bottom half of a store file where 'bottom' is the first half - * of the file containing the keys that sort lowest and 'top' is the second half - * of the file with keys that sort greater than those of the bottom half. The file referenced - * lives under a different region. References are made at region split time. - * - *

        References work with a special half store file type. References know how - * to write out the reference format in the file system and are what is juggled - * when references are mixed in with direct store files. The half store file - * type is used reading the referred to file. - * - *

        References to store files located over in some other region look like - * this in the file system - * 1278437856009925445.3323223323: - * i.e. an id followed by hash of the referenced region. - * Note, a region is itself not splittable if it has instances of store file - * references. References are cleaned up by compactions. + * A reference to the top or bottom half of a store file where 'bottom' is the first half of the + * file containing the keys that sort lowest and 'top' is the second half of the file with keys that + * sort greater than those of the bottom half. The file referenced lives under a different region. + * References are made at region split time. + *

        + * References work with a special half store file type. References know how to write out the + * reference format in the file system and are what is juggled when references are mixed in with + * direct store files. The half store file type is used reading the referred to file. + *

        + * References to store files located over in some other region look like this in the file system + * 1278437856009925445.3323223323: i.e. an id followed by hash of the referenced + * region. Note, a region is itself not splittable if it has instances of store file references. + * References are cleaned up by compactions. */ @InterfaceAudience.Private public class Reference { - private byte [] splitkey; + private byte[] splitkey; private Range region; /** - * For split HStoreFiles, it specifies if the file covers the lower half or - * the upper half of the key range + * For split HStoreFiles, it specifies if the file covers the lower half or the upper half of the + * key range */ static enum Range { /** HStoreFile contains upper half of key range */ @@ -71,28 +68,25 @@ public class Reference { } /** - * @param splitRow - * @return A {@link Reference} that points at top half of a an hfile + * n * @return A {@link Reference} that points at top half of a an hfile */ - public static Reference createTopReference(final byte [] splitRow) { + public static Reference createTopReference(final byte[] splitRow) { return new Reference(splitRow, Range.top); } /** - * @param splitRow - * @return A {@link Reference} that points at the bottom half of a an hfile + * n * @return A {@link Reference} that points at the bottom half of a an hfile */ - public static Reference createBottomReference(final byte [] splitRow) { + public static Reference createBottomReference(final byte[] splitRow) { return new Reference(splitRow, Range.bottom); } /** * Constructor - * @param splitRow This is row we are splitting around. - * @param fr + * @param splitRow This is row we are splitting around. n */ - Reference(final byte [] splitRow, final Range fr) { - this.splitkey = splitRow == null? null: KeyValueUtil.createFirstOnRow(splitRow).getKey(); + Reference(final byte[] splitRow, final Range fr) { + this.splitkey = splitRow == null ? null : KeyValueUtil.createFirstOnRow(splitRow).getKey(); this.region = fr; } @@ -108,17 +102,16 @@ public class Reference { } /** - * - * @return Range + * n */ public Range getFileRegion() { return this.region; } /** - * @return splitKey + * n */ - public byte [] getSplitKey() { + public byte[] getSplitKey() { return splitkey; } @@ -135,20 +128,19 @@ public class Reference { } /** - * @deprecated Writables are going away. Use the pb serialization methods instead. - * Remove in a release after 0.96 goes out. This is here only to migrate - * old Reference files written with Writables before 0.96. + * @deprecated Writables are going away. Use the pb serialization methods instead. Remove in a + * release after 0.96 goes out. This is here only to migrate old Reference files + * written with Writables before 0.96. */ @Deprecated public void readFields(DataInput in) throws IOException { boolean tmp = in.readBoolean(); // If true, set region to top. - this.region = tmp? Range.top: Range.bottom; + this.region = tmp ? Range.top : Range.bottom; this.splitkey = Bytes.readByteArray(in); } - public Path write(final FileSystem fs, final Path p) - throws IOException { + public Path write(final FileSystem fs, final Path p) throws IOException { FSDataOutputStream out = fs.create(p, false); try { out.write(toByteArray()); @@ -159,26 +151,21 @@ public class Reference { } /** - * Read a Reference from FileSystem. - * @param fs - * @param p - * @return New Reference made from passed p - * @throws IOException + * Read a Reference from FileSystem. nn * @return New Reference made from passed p n */ - public static Reference read(final FileSystem fs, final Path p) - throws IOException { + public static Reference read(final FileSystem fs, final Path p) throws IOException { InputStream in = fs.open(p); try { // I need to be able to move back in the stream if this is not a pb serialization so I can // do the Writable decoding instead. - in = in.markSupported()? in: new BufferedInputStream(in); + in = in.markSupported() ? in : new BufferedInputStream(in); int pblen = ProtobufUtil.lengthOfPBMagic(); in.mark(pblen); - byte [] pbuf = new byte[pblen]; - IOUtils.readFully(in, pbuf,0, pblen); + byte[] pbuf = new byte[pblen]; + IOUtils.readFully(in, pbuf, 0, pblen); // WATCHOUT! Return in middle of function!!! if (ProtobufUtil.isPBMagicPrefix(pbuf)) return convert(FSProtos.Reference.parseFrom(in)); - // Else presume Writables. Need to reset the stream since it didn't start w/ pb. + // Else presume Writables. Need to reset the stream since it didn't start w/ pb. // We won't bother rewriting thie Reference as a pb since Reference is transitory. in.reset(); Reference r = new Reference(); @@ -194,8 +181,9 @@ public class Reference { public FSProtos.Reference convert() { FSProtos.Reference.Builder builder = FSProtos.Reference.newBuilder(); - builder.setRange(isTopFileRegion(getFileRegion())? - FSProtos.Reference.Range.TOP: FSProtos.Reference.Range.BOTTOM); + builder.setRange(isTopFileRegion(getFileRegion()) + ? FSProtos.Reference.Range.TOP + : FSProtos.Reference.Range.BOTTOM); builder.setSplitkey(UnsafeByteOperations.unsafeWrap(getSplitKey())); return builder.build(); } @@ -203,17 +191,16 @@ public class Reference { public static Reference convert(final FSProtos.Reference r) { Reference result = new Reference(); result.splitkey = r.getSplitkey().toByteArray(); - result.region = r.getRange() == FSProtos.Reference.Range.TOP? Range.top: Range.bottom; + result.region = r.getRange() == FSProtos.Reference.Range.TOP ? Range.top : Range.bottom; return result; } /** - * Use this when writing to a stream and you want to use the pb mergeDelimitedFrom - * (w/o the delimiter, pb reads to EOF which may not be what you want). - * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. - * @throws IOException + * Use this when writing to a stream and you want to use the pb mergeDelimitedFrom (w/o the + * delimiter, pb reads to EOF which may not be what you want). + * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. n */ - byte [] toByteArray() throws IOException { + byte[] toByteArray() throws IOException { return ProtobufUtil.prependPBMagic(convert().toByteArray()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java index c495201a45f..9ad95ff98f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,43 +15,37 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.yetus.audience.InterfaceAudience; /** - * WALLink describes a link to a WAL. - * - * An wal can be in /hbase/.logs/<server>/<wal> - * or it can be in /hbase/.oldlogs/<wal> - * - * The link checks first in the original path, - * if it is not present it fallbacks to the archived path. + * WALLink describes a link to a WAL. An wal can be in /hbase/.logs/<server>/<wal> or it + * can be in /hbase/.oldlogs/<wal> The link checks first in the original path, if it is not + * present it fallbacks to the archived path. */ @InterfaceAudience.Private public class WALLink extends FileLink { /** - * @param conf {@link Configuration} from which to extract specific archive locations + * @param conf {@link Configuration} from which to extract specific archive locations * @param serverName Region Server owner of the log - * @param logName WAL file name + * @param logName WAL file name * @throws IOException on unexpected error. */ - public WALLink(final Configuration conf, - final String serverName, final String logName) throws IOException { + public WALLink(final Configuration conf, final String serverName, final String logName) + throws IOException { this(CommonFSUtils.getWALRootDir(conf), serverName, logName); } /** * @param walRootDir Path to the root directory where hbase files are stored * @param serverName Region Server owner of the log - * @param logName WAL file name + * @param logName WAL file name */ public WALLink(final Path walRootDir, final String serverName, final String logName) { final Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); @@ -60,7 +54,7 @@ public class WALLink extends FileLink { } /** - * @param originPath Path to the wal in the log directory + * @param originPath Path to the wal in the log directory * @param archivePath Path to the wal in the archived log directory */ public WALLink(final Path originPath, final Path archivePath) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java index 017c4d14b6e..92b5e9fa67d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.yetus.audience.InterfaceAudience; @@ -27,11 +25,8 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public interface WritableWithSize { /** - * Provide a size hint to the caller. write() should ideally - * not go beyond this if at all possible. - * - * You can return 0 if there is no size hint. - * + * Provide a size hint to the caller. write() should ideally not go beyond this if at all + * possible. You can return 0 if there is no size hint. * @return the size of the writable */ long getWritableSize(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java index cd8932269c5..187b0536a30 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +21,8 @@ import org.apache.hadoop.hbase.metrics.impl.FastLongHistogram; import org.apache.yetus.audience.InterfaceAudience; /** - * Snapshot of block cache age in cache. - * This object is preferred because we can control how it is serialized out when JSON'ing. + * Snapshot of block cache age in cache. This object is preferred because we can control how it is + * serialized out when JSON'ing. */ @InterfaceAudience.Private public class AgeSnapshot { @@ -32,7 +32,7 @@ public class AgeSnapshot { AgeSnapshot(final FastLongHistogram ageHistogram) { this.ageHistogram = ageHistogram; - this.quantiles = ageHistogram.getQuantiles(new double[]{0.75, 0.95, 0.98, 0.99, 0.999}); + this.quantiles = ageHistogram.getQuantiles(new double[] { 0.75, 0.95, 0.98, 0.99, 0.999 }); } public double get75thPercentile() { @@ -55,7 +55,6 @@ public class AgeSnapshot { return quantiles[4]; } - public double getMean() { return this.ageHistogram.getMean(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index fc58fc4bf73..7cbfad4b0ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,19 +18,18 @@ package org.apache.hadoop.hbase.io.hfile; import java.util.Iterator; - import org.apache.yetus.audience.InterfaceAudience; /** - * Block cache interface. Anything that implements the {@link Cacheable} - * interface can be put in the cache. + * Block cache interface. Anything that implements the {@link Cacheable} interface can be put in the + * cache. */ @InterfaceAudience.Private public interface BlockCache extends Iterable { /** * Add block to cache. * @param cacheKey The block's cache key. - * @param buf The block contents wrapped in a ByteBuffer. + * @param buf The block contents wrapped in a ByteBuffer. * @param inMemory Whether block should be treated as in-memory */ void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory); @@ -39,16 +37,16 @@ public interface BlockCache extends Iterable { /** * Add block to cache (defaults to not in-memory). * @param cacheKey The block's cache key. - * @param buf The object to cache. + * @param buf The object to cache. */ void cacheBlock(BlockCacheKey cacheKey, Cacheable buf); /** * Fetch block from cache. - * @param cacheKey Block to fetch. - * @param caching Whether this request has caching enabled (used for stats) - * @param repeat Whether this is a repeat lookup for the same block - * (used to avoid double counting cache misses when doing double-check locking) + * @param cacheKey Block to fetch. + * @param caching Whether this request has caching enabled (used for stats) + * @param repeat Whether this is a repeat lookup for the same block (used to avoid + * double counting cache misses when doing double-check locking) * @param updateCacheMetrics Whether to update cache metrics or not * @return Block or null if block is not in 2 cache. */ @@ -64,14 +62,12 @@ public interface BlockCache extends Iterable { /** * Evicts all blocks for the given HFile. - * * @return the number of blocks evicted */ int evictBlocksByHfileName(String hfileName); /** - * Get the statistics for this block cache. - * @return Stats + * Get the statistics for this block cache. n */ CacheStats getStats(); @@ -116,11 +112,11 @@ public interface BlockCache extends Iterable { */ long getBlockCount(); - /** - * Returns the number of data blocks currently cached in the block cache. - * @return number of blocks in the cache - */ - long getDataBlockCount(); + /** + * Returns the number of data blocks currently cached in the block cache. + * @return number of blocks in the cache + */ + long getDataBlockCount(); /** * @return Iterator over the blocks in the cache. @@ -131,7 +127,7 @@ public interface BlockCache extends Iterable { /** * @return The list of sub blockcaches that make up this one; returns null if no sub caches. */ - BlockCache [] getBlockCaches(); + BlockCache[] getBlockCaches(); /** * Check if block type is meta or index block diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java index 12c769ec805..38a296aad52 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; import java.io.IOException; import java.util.concurrent.ForkJoinPool; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; @@ -50,13 +49,13 @@ public final class BlockCacheFactory { /** * If the chosen ioengine can persist its state across restarts, the path to the file to persist - * to. This file is NOT the data file. It is a file into which we will serialize the map of - * what is in the data file. For example, if you pass the following argument as + * to. This file is NOT the data file. It is a file into which we will serialize the map of what + * is in the data file. For example, if you pass the following argument as * BUCKET_CACHE_IOENGINE_KEY ("hbase.bucketcache.ioengine"), * file:/tmp/bucketcache.data , then we will write the bucketcache data to the file * /tmp/bucketcache.data but the metadata on where the data is in the supplied file - * is an in-memory map that needs to be persisted across restarts. Where to store this - * in-memory state is what you supply here: e.g. /tmp/bucketcache.map. + * is an in-memory map that needs to be persisted across restarts. Where to store this in-memory + * state is what you supply here: e.g. /tmp/bucketcache.map. */ public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY = "hbase.bucketcache.persistent.path"; @@ -103,16 +102,16 @@ public final class BlockCacheFactory { boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); if (useExternal) { BlockCache l2CacheInstance = createExternalBlockcache(conf); - return l2CacheInstance == null ? - l1Cache : - new InclusiveCombinedBlockCache(l1Cache, l2CacheInstance); + return l2CacheInstance == null + ? l1Cache + : new InclusiveCombinedBlockCache(l1Cache, l2CacheInstance); } else { // otherwise use the bucket cache. BucketCache bucketCache = createBucketCache(conf); if (!conf.getBoolean("hbase.bucketcache.combinedcache.enabled", true)) { // Non combined mode is off from 2.0 LOG.warn( - "From HBase 2.0 onwards only combined mode of LRU cache and bucket cache is available"); + "From HBase 2.0 onwards only combined mode of LRU cache and bucket cache is available"); } return bucketCache == null ? l1Cache : new CombinedBlockCache(l1Cache, bucketCache); } @@ -125,8 +124,8 @@ public final class BlockCacheFactory { } String policy = c.get(BLOCKCACHE_POLICY_KEY, BLOCKCACHE_POLICY_DEFAULT); int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); - LOG.info("Allocating BlockCache size=" + - StringUtils.byteDesc(cacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize)); + LOG.info("Allocating BlockCache size=" + StringUtils.byteDesc(cacheSize) + ", blockSize=" + + StringUtils.byteDesc(blockSize)); if (policy.equalsIgnoreCase("LRU")) { return new LruBlockCache(cacheSize, blockSize, true, c); } else if (policy.equalsIgnoreCase("IndexOnlyLRU")) { @@ -141,13 +140,14 @@ public final class BlockCacheFactory { } /** - * Enum of all built in external block caches. - * This is used for config. + * Enum of all built in external block caches. This is used for config. */ private static enum ExternalBlockCaches { memcached("org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache"); + // TODO(eclark): Consider more. Redis, etc. Class clazz; + ExternalBlockCaches(String clazzName) { try { clazz = (Class) Class.forName(clazzName); @@ -155,6 +155,7 @@ public final class BlockCacheFactory { clazz = null; } } + ExternalBlockCaches(Class clazz) { this.clazz = clazz; } @@ -168,12 +169,11 @@ public final class BlockCacheFactory { // Get the class, from the config. s try { - klass = ExternalBlockCaches - .valueOf(c.get(EXTERNAL_BLOCKCACHE_CLASS_KEY, "memcache")).clazz; + klass = ExternalBlockCaches.valueOf(c.get(EXTERNAL_BLOCKCACHE_CLASS_KEY, "memcache")).clazz; } catch (IllegalArgumentException exception) { try { - klass = c.getClass(EXTERNAL_BLOCKCACHE_CLASS_KEY, Class.forName( - "org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache")); + klass = c.getClass(EXTERNAL_BLOCKCACHE_CLASS_KEY, + Class.forName("org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache")); } catch (ClassNotFoundException e) { return null; } @@ -191,7 +191,7 @@ public final class BlockCacheFactory { } private static BucketCache createBucketCache(Configuration c) { - // Check for L2. ioengine name must be non-null. + // Check for L2. ioengine name must be non-null. String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY, null); if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) { return null; @@ -200,20 +200,19 @@ public final class BlockCacheFactory { int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); final long bucketCacheSize = MemorySizeUtil.getBucketCacheSize(c); if (bucketCacheSize <= 0) { - throw new IllegalStateException("bucketCacheSize <= 0; Check " + - BUCKET_CACHE_SIZE_KEY + " setting and/or server java heap size"); + throw new IllegalStateException("bucketCacheSize <= 0; Check " + BUCKET_CACHE_SIZE_KEY + + " setting and/or server java heap size"); } if (c.get("hbase.bucketcache.percentage.in.combinedcache") != null) { LOG.warn("Configuration 'hbase.bucketcache.percentage.in.combinedcache' is no longer " - + "respected. See comments in http://hbase.apache.org/book.html#_changes_of_note"); + + "respected. See comments in http://hbase.apache.org/book.html#_changes_of_note"); } - int writerThreads = c.getInt(BUCKET_CACHE_WRITER_THREADS_KEY, - DEFAULT_BUCKET_CACHE_WRITER_THREADS); - int writerQueueLen = c.getInt(BUCKET_CACHE_WRITER_QUEUE_KEY, - DEFAULT_BUCKET_CACHE_WRITER_QUEUE); + int writerThreads = + c.getInt(BUCKET_CACHE_WRITER_THREADS_KEY, DEFAULT_BUCKET_CACHE_WRITER_THREADS); + int writerQueueLen = c.getInt(BUCKET_CACHE_WRITER_QUEUE_KEY, DEFAULT_BUCKET_CACHE_WRITER_QUEUE); String persistentPath = c.get(BUCKET_CACHE_PERSISTENT_PATH_KEY); String[] configuredBucketSizes = c.getStrings(BUCKET_CACHE_BUCKETS_KEY); - int [] bucketSizes = null; + int[] bucketSizes = null; if (configuredBucketSizes != null) { bucketSizes = new int[configuredBucketSizes.length]; for (int i = 0; i < configuredBucketSizes.length; i++) { @@ -225,22 +224,22 @@ public final class BlockCacheFactory { // See BucketEntry where offset to each block is represented using 5 bytes (instead of 8 // bytes long). We would like to save heap overhead as less as possible. throw new IllegalArgumentException("Illegal value: " + bucketSize + " configured for '" - + BUCKET_CACHE_BUCKETS_KEY + "'. All bucket sizes to be multiples of 256"); + + BUCKET_CACHE_BUCKETS_KEY + "'. All bucket sizes to be multiples of 256"); } bucketSizes[i] = bucketSize; } } BucketCache bucketCache = null; try { - int ioErrorsTolerationDuration = c.getInt( - "hbase.bucketcache.ioengine.errors.tolerated.duration", + int ioErrorsTolerationDuration = + c.getInt("hbase.bucketcache.ioengine.errors.tolerated.duration", BucketCache.DEFAULT_ERROR_TOLERATION_DURATION); // Bucket cache logs its stats on creation internal to the constructor. - bucketCache = new BucketCache(bucketCacheIOEngineName, - bucketCacheSize, blockSize, bucketSizes, writerThreads, writerQueueLen, persistentPath, - ioErrorsTolerationDuration, c); + bucketCache = new BucketCache(bucketCacheIOEngineName, bucketCacheSize, blockSize, + bucketSizes, writerThreads, writerQueueLen, persistentPath, ioErrorsTolerationDuration, c); } catch (IOException ioex) { - LOG.error("Can't instantiate bucket cache", ioex); throw new RuntimeException(ioex); + LOG.error("Can't instantiate bucket cache", ioex); + throw new RuntimeException(ioex); } return bucketCache; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java index 4683c3520c1..f9c52e4acbc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.io.hfile; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.yetus.audience.InterfaceAudience; /** * Cache Key for use with implementations of {@link BlockCache} @@ -35,14 +35,14 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable { /** * Construct a new BlockCacheKey * @param hfileName The name of the HFile this block belongs to. - * @param offset Offset of the block into the file + * @param offset Offset of the block into the file */ public BlockCacheKey(String hfileName, long offset) { this(hfileName, offset, true, BlockType.DATA); } public BlockCacheKey(String hfileName, long offset, boolean isPrimaryReplica, - BlockType blockType) { + BlockType blockType) { this.isPrimaryReplicaBlock = isPrimaryReplica; this.hfileName = hfileName; this.offset = offset; @@ -59,8 +59,7 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable { if (o instanceof BlockCacheKey) { BlockCacheKey k = (BlockCacheKey) o; return offset == k.offset - && (hfileName == null ? k.hfileName == null : hfileName - .equals(k.hfileName)); + && (hfileName == null ? k.hfileName == null : hfileName.equals(k.hfileName)); } else { return false; } @@ -74,13 +73,12 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable { public static final long FIXED_OVERHEAD = ClassSize.estimateBase(BlockCacheKey.class, false); /** - * Strings have two bytes per character due to default Java Unicode encoding - * (hence length times 2). + * Strings have two bytes per character due to default Java Unicode encoding (hence length times + * 2). */ @Override public long heapSize() { - return ClassSize.align(FIXED_OVERHEAD + ClassSize.STRING + - 2 * hfileName.length()); + return ClassSize.align(FIXED_OVERHEAD + ClassSize.STRING + 2 * hfileName.length()); } // can't avoid this unfortunately diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java index c2cf82148be..2a2faae3eae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -75,16 +75,14 @@ public class BlockCacheUtil { }).setPrettyPrinting().create(); /** - * @param cb - * @return The block content as String. + * n * @return The block content as String. */ public static String toString(final CachedBlock cb, final long now) { return "filename=" + cb.getFilename() + ", " + toStringMinusFileName(cb, now); } /** - * Little data structure to hold counts for a file. - * Used doing a toJSON. + * Little data structure to hold counts for a file. Used doing a toJSON. */ static class CachedBlockCountsPerFile { private int count = 0; @@ -122,7 +120,7 @@ public class BlockCacheUtil { * @return A JSON String of filename and counts of blocks */ public static String toJSON(String filename, NavigableSet blocks) - throws IOException { + throws IOException { CachedBlockCountsPerFile counts = new CachedBlockCountsPerFile(filename); for (CachedBlock cb : blocks) { counts.count++; @@ -151,63 +149,59 @@ public class BlockCacheUtil { } /** - * @param cb - * @return The block content of bc as a String minus the filename. + * n * @return The block content of bc as a String minus the filename. */ public static String toStringMinusFileName(final CachedBlock cb, final long now) { - return "offset=" + cb.getOffset() + - ", size=" + cb.getSize() + - ", age=" + (now - cb.getCachedTime()) + - ", type=" + cb.getBlockType() + - ", priority=" + cb.getBlockPriority(); + return "offset=" + cb.getOffset() + ", size=" + cb.getSize() + ", age=" + + (now - cb.getCachedTime()) + ", type=" + cb.getBlockType() + ", priority=" + + cb.getBlockPriority(); } /** * Get a {@link CachedBlocksByFile} instance and load it up by iterating content in * {@link BlockCache}. * @param conf Used to read configurations - * @param bc Block Cache to iterate. + * @param bc Block Cache to iterate. * @return Laoded up instance of CachedBlocksByFile */ public static CachedBlocksByFile getLoadedCachedBlocksByFile(final Configuration conf, - final BlockCache bc) { + final BlockCache bc) { CachedBlocksByFile cbsbf = new CachedBlocksByFile(conf); - for (CachedBlock cb: bc) { + for (CachedBlock cb : bc) { if (cbsbf.update(cb)) break; } return cbsbf; } private static int compareCacheBlock(Cacheable left, Cacheable right, - boolean includeNextBlockMetadata) { + boolean includeNextBlockMetadata) { ByteBuffer l = ByteBuffer.allocate(left.getSerializedLength()); left.serialize(l, includeNextBlockMetadata); ByteBuffer r = ByteBuffer.allocate(right.getSerializedLength()); right.serialize(r, includeNextBlockMetadata); - return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(), - r.array(), r.arrayOffset(), r.limit()); + return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(), r.array(), r.arrayOffset(), + r.limit()); } /** * Validate that the existing and newBlock are the same without including the nextBlockMetadata, - * if not, throw an exception. If they are the same without the nextBlockMetadata, - * return the comparison. - * + * if not, throw an exception. If they are the same without the nextBlockMetadata, return the + * comparison. * @param existing block that is existing in the cache. * @param newBlock block that is trying to be cached. * @param cacheKey the cache key of the blocks. * @return comparison of the existing block to the newBlock. */ public static int validateBlockAddition(Cacheable existing, Cacheable newBlock, - BlockCacheKey cacheKey) { + BlockCacheKey cacheKey) { int comparison = compareCacheBlock(existing, newBlock, false); if (comparison != 0) { - throw new RuntimeException("Cached block contents differ, which should not have happened." - + "cacheKey:" + cacheKey); + throw new RuntimeException( + "Cached block contents differ, which should not have happened." + "cacheKey:" + cacheKey); } if ((existing instanceof HFileBlock) && (newBlock instanceof HFileBlock)) { comparison = ((HFileBlock) existing).getNextBlockOnDiskSize() - - ((HFileBlock) newBlock).getNextBlockOnDiskSize(); + - ((HFileBlock) newBlock).getNextBlockOnDiskSize(); } return comparison; } @@ -221,13 +215,13 @@ public class BlockCacheUtil { * new block to cache has, then we can replace the existing block with the new block for better * performance.(HBASE-20447) * @param blockCache BlockCache to check - * @param cacheKey the block cache key - * @param newBlock the new block which try to put into the block cache. + * @param cacheKey the block cache key + * @param newBlock the new block which try to put into the block cache. * @return true means need to replace existing block with new block for the same block cache key. * false means just keep the existing block. */ public static boolean shouldReplaceExistingCacheBlock(BlockCache blockCache, - BlockCacheKey cacheKey, Cacheable newBlock) { + BlockCacheKey cacheKey, Cacheable newBlock) { // NOTICE: The getBlock has retained the existingBlock inside. Cacheable existingBlock = blockCache.getBlock(cacheKey, false, false, false); if (existingBlock == null) { @@ -237,16 +231,15 @@ public class BlockCacheUtil { int comparison = BlockCacheUtil.validateBlockAddition(existingBlock, newBlock, cacheKey); if (comparison < 0) { LOG.warn("Cached block contents differ by nextBlockOnDiskSize, the new block has " - + "nextBlockOnDiskSize set. Caching new block."); + + "nextBlockOnDiskSize set. Caching new block."); return true; } else if (comparison > 0) { LOG.warn("Cached block contents differ by nextBlockOnDiskSize, the existing block has " - + "nextBlockOnDiskSize set, Keeping cached block."); + + "nextBlockOnDiskSize set, Keeping cached block."); return false; } else { LOG.debug("Caching an already cached block: {}. This is harmless and can happen in rare " - + "cases (see HBASE-8547)", - cacheKey); + + "cases (see HBASE-8547)", cacheKey); return false; } } finally { @@ -256,9 +249,9 @@ public class BlockCacheUtil { } /** - * Use one of these to keep a running account of cached blocks by file. Throw it away when done. - * This is different than metrics in that it is stats on current state of a cache. - * See getLoadedCachedBlocksByFile + * Use one of these to keep a running account of cached blocks by file. Throw it away when done. + * This is different than metrics in that it is stats on current state of a cache. See + * getLoadedCachedBlocksByFile */ public static class CachedBlocksByFile { private int count; @@ -267,11 +260,9 @@ public class BlockCacheUtil { private long dataSize; private final long now = System.nanoTime(); /** - * How many blocks to look at before we give up. - * There could be many millions of blocks. We don't want the - * ui to freeze while we run through 1B blocks... users will - * think hbase dead. UI displays warning in red when stats - * are incomplete. + * How many blocks to look at before we give up. There could be many millions of blocks. We + * don't want the ui to freeze while we run through 1B blocks... users will think hbase dead. UI + * displays warning in red when stats are incomplete. */ private final int max; public static final int DEFAULT_MAX = 1000000; @@ -281,7 +272,7 @@ public class BlockCacheUtil { } CachedBlocksByFile(final Configuration c) { - this.max = c == null? DEFAULT_MAX: c.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX); + this.max = c == null ? DEFAULT_MAX : c.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX); } /** @@ -292,8 +283,7 @@ public class BlockCacheUtil { FastLongHistogram hist = new FastLongHistogram(); /** - * @param cb - * @return True if full.... if we won't be adding any more. + * n * @return True if full.... if we won't be adding any more. */ public boolean update(final CachedBlock cb) { if (isFull()) return true; @@ -310,15 +300,15 @@ public class BlockCacheUtil { this.dataBlockCount++; this.dataSize += cb.getSize(); } - long age = (this.now - cb.getCachedTime())/NANOS_PER_SECOND; + long age = (this.now - cb.getCachedTime()) / NANOS_PER_SECOND; this.hist.add(age, 1); return false; } /** - * @return True if full; i.e. there are more items in the cache but we only loaded up - * the maximum set in configuration hbase.ui.blockcache.by.file.max - * (Default: DEFAULT_MAX). + * @return True if full; i.e. there are more items in the cache but we only loaded up the + * maximum set in configuration hbase.ui.blockcache.by.file.max (Default: + * DEFAULT_MAX). */ public boolean isFull() { return this.count >= this.max; @@ -360,16 +350,12 @@ public class BlockCacheUtil { @Override public String toString() { AgeSnapshot snapshot = getAgeInCacheSnapshot(); - return "count=" + count + ", dataBlockCount=" + dataBlockCount + ", size=" + size + - ", dataSize=" + getDataSize() + - ", mean age=" + snapshot.getMean() + - ", min age=" + snapshot.getMin() + - ", max age=" + snapshot.getMax() + - ", 75th percentile age=" + snapshot.get75thPercentile() + - ", 95th percentile age=" + snapshot.get95thPercentile() + - ", 98th percentile age=" + snapshot.get98thPercentile() + - ", 99th percentile age=" + snapshot.get99thPercentile() + - ", 99.9th percentile age=" + snapshot.get99thPercentile(); + return "count=" + count + ", dataBlockCount=" + dataBlockCount + ", size=" + size + + ", dataSize=" + getDataSize() + ", mean age=" + snapshot.getMean() + ", min age=" + + snapshot.getMin() + ", max age=" + snapshot.getMax() + ", 75th percentile age=" + + snapshot.get75thPercentile() + ", 95th percentile age=" + snapshot.get95thPercentile() + + ", 98th percentile age=" + snapshot.get98thPercentile() + ", 99th percentile age=" + + snapshot.get99thPercentile() + ", 99.9th percentile age=" + snapshot.get99thPercentile(); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCachesIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCachesIterator.java index 2d90a85d9fc..43498b85f20 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCachesIterator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCachesIterator.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +18,6 @@ package org.apache.hadoop.hbase.io.hfile; import java.util.Iterator; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -29,10 +26,10 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private class BlockCachesIterator implements Iterator { int index = 0; - final BlockCache [] bcs; + final BlockCache[] bcs; Iterator current; - BlockCachesIterator(final BlockCache [] blockCaches) { + BlockCachesIterator(final BlockCache[] blockCaches) { this.bcs = blockCaches; this.current = this.bcs[this.index].iterator(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java index b8f83578d2f..c340254e07c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,8 +28,8 @@ import org.apache.yetus.audience.InterfaceAudience; public class BlockWithScanInfo { private final HFileBlock hFileBlock; /** - * The first key in the next block following this one in the HFile. - * If this key is unknown, this is reference-equal with HConstants.NO_NEXT_INDEXED_KEY + * The first key in the next block following this one in the HFile. If this key is unknown, this + * is reference-equal with HConstants.NO_NEXT_INDEXED_KEY */ private final Cell nextIndexedKey; @@ -42,7 +42,7 @@ public class BlockWithScanInfo { return hFileBlock; } - public Cell getNextIndexedKey() { + public Cell getNextIndexedKey() { return nextIndexedKey; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index f58491de044..9e456afcebc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.io.hfile; import java.util.Optional; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.io.ByteBuffAllocator; @@ -46,14 +45,13 @@ public class CacheConfig { public static final String CACHE_DATA_ON_READ_KEY = "hbase.block.data.cacheonread"; /** - * Configuration key to cache data blocks on write. There are separate - * switches for bloom blocks and non-root index blocks. + * Configuration key to cache data blocks on write. There are separate switches for bloom blocks + * and non-root index blocks. */ public static final String CACHE_BLOCKS_ON_WRITE_KEY = "hbase.rs.cacheblocksonwrite"; /** - * Configuration key to cache leaf and intermediate-level index blocks on - * write. + * Configuration key to cache leaf and intermediate-level index blocks on write. */ public static final String CACHE_INDEX_BLOCKS_ON_WRITE_KEY = "hfile.block.index.cacheonwrite"; @@ -68,14 +66,14 @@ public class CacheConfig { public static final String CACHE_DATA_BLOCKS_COMPRESSED_KEY = "hbase.block.data.cachecompressed"; /** - * Configuration key to evict all blocks of a given file from the block cache - * when the file is closed. + * Configuration key to evict all blocks of a given file from the block cache when the file is + * closed. */ public static final String EVICT_BLOCKS_ON_CLOSE_KEY = "hbase.rs.evictblocksonclose"; /** - * Configuration key to prefetch all blocks of a given file into the block cache - * when the file is opened. + * Configuration key to prefetch all blocks of a given file into the block cache when the file is + * opened. */ public static final String PREFETCH_BLOCKS_ON_OPEN_KEY = "hbase.rs.prefetchblocksonopen"; @@ -83,17 +81,17 @@ public class CacheConfig { * Configuration key to cache blocks when a compacted file is written */ public static final String CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY = - "hbase.rs.cachecompactedblocksonwrite"; + "hbase.rs.cachecompactedblocksonwrite"; /** * Configuration key to determine total size in bytes of compacted files beyond which we do not * cache blocks on compaction */ public static final String CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY = - "hbase.rs.cachecompactedblocksonwrite.threshold"; + "hbase.rs.cachecompactedblocksonwrite.threshold"; public static final String DROP_BEHIND_CACHE_COMPACTION_KEY = - "hbase.hfile.drop.behind.compaction"; + "hbase.hfile.drop.behind.compaction"; // Defaults public static final boolean DEFAULT_CACHE_DATA_ON_READ = true; @@ -109,10 +107,9 @@ public class CacheConfig { public static final long DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD = Long.MAX_VALUE; /** - * Whether blocks should be cached on read (default is on if there is a - * cache but this can be turned off on a per-family or per-request basis). - * If off we will STILL cache meta blocks; i.e. INDEX and BLOOM types. - * This cannot be disabled. + * Whether blocks should be cached on read (default is on if there is a cache but this can be + * turned off on a per-family or per-request basis). If off we will STILL cache meta blocks; i.e. + * INDEX and BLOOM types. This cannot be disabled. */ private final boolean cacheDataOnRead; @@ -155,8 +152,8 @@ public class CacheConfig { private final ByteBuffAllocator byteBuffAllocator; /** - * Create a cache configuration using the specified configuration object and - * defaults for family level settings. Only use if no column family context. + * Create a cache configuration using the specified configuration object and defaults for family + * level settings. Only use if no column family context. * @param conf hbase configuration */ public CacheConfig(Configuration conf) { @@ -168,37 +165,35 @@ public class CacheConfig { } /** - * Create a cache configuration using the specified configuration object and - * family descriptor. - * @param conf hbase configuration + * Create a cache configuration using the specified configuration object and family descriptor. + * @param conf hbase configuration * @param family column family configuration */ public CacheConfig(Configuration conf, ColumnFamilyDescriptor family, BlockCache blockCache, - ByteBuffAllocator byteBuffAllocator) { - this.cacheDataOnRead = conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ) && - (family == null ? true : family.isBlockCacheEnabled()); + ByteBuffAllocator byteBuffAllocator) { + this.cacheDataOnRead = conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ) + && (family == null ? true : family.isBlockCacheEnabled()); this.inMemory = family == null ? DEFAULT_IN_MEMORY : family.isInMemory(); this.cacheDataCompressed = - conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED); + conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED); this.dropBehindCompaction = - conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT); + conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT); // For the following flags we enable them regardless of per-schema settings // if they are enabled in the global configuration. - this.cacheDataOnWrite = - conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE) || - (family == null ? false : family.isCacheDataOnWrite()); + this.cacheDataOnWrite = conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE) + || (family == null ? false : family.isCacheDataOnWrite()); this.cacheIndexesOnWrite = - conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_INDEXES_ON_WRITE) || - (family == null ? false : family.isCacheIndexesOnWrite()); + conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_INDEXES_ON_WRITE) + || (family == null ? false : family.isCacheIndexesOnWrite()); this.cacheBloomsOnWrite = - conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_BLOOMS_ON_WRITE) || - (family == null ? false : family.isCacheBloomsOnWrite()); - this.evictOnClose = conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE) || - (family == null ? false : family.isEvictBlocksOnClose()); - this.prefetchOnOpen = conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN) || - (family == null ? false : family.isPrefetchBlocksOnOpen()); - this.cacheCompactedDataOnWrite = conf.getBoolean(CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, - DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE); + conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_BLOOMS_ON_WRITE) + || (family == null ? false : family.isCacheBloomsOnWrite()); + this.evictOnClose = conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE) + || (family == null ? false : family.isEvictBlocksOnClose()); + this.prefetchOnOpen = conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN) + || (family == null ? false : family.isPrefetchBlocksOnOpen()); + this.cacheCompactedDataOnWrite = + conf.getBoolean(CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE); this.cacheCompactedDataOnWriteThreshold = getCacheCompactedBlocksOnWriteThreshold(conf); this.blockCache = blockCache; this.byteBuffAllocator = byteBuffAllocator; @@ -239,8 +234,8 @@ public class CacheConfig { } /** - * Returns whether the DATA blocks of this HFile should be cached on read or not (we always - * cache the meta blocks, the INDEX and BLOOM blocks). + * Returns whether the DATA blocks of this HFile should be cached on read or not (we always cache + * the meta blocks, the INDEX and BLOOM blocks). * @return true if blocks should be cached on read, false if not */ public boolean shouldCacheDataOnRead() { @@ -252,13 +247,12 @@ public class CacheConfig { } /** - * Should we cache a block of a particular category? We always cache - * important blocks such as index blocks, as long as the block cache is - * available. + * Should we cache a block of a particular category? We always cache important blocks such as + * index blocks, as long as the block cache is available. */ public boolean shouldCacheBlockOnRead(BlockCategory category) { - return cacheDataOnRead || category == BlockCategory.INDEX || category == BlockCategory.BLOOM || - (prefetchOnOpen && (category != BlockCategory.META && category != BlockCategory.UNKNOWN)); + return cacheDataOnRead || category == BlockCategory.INDEX || category == BlockCategory.BLOOM + || (prefetchOnOpen && (category != BlockCategory.META && category != BlockCategory.UNKNOWN)); } /** @@ -269,26 +263,23 @@ public class CacheConfig { } /** - * @return true if data blocks should be written to the cache when an HFile is - * written, false if not + * @return true if data blocks should be written to the cache when an HFile is written, false if + * not */ public boolean shouldCacheDataOnWrite() { return this.cacheDataOnWrite; } /** - * @param cacheDataOnWrite whether data blocks should be written to the cache - * when an HFile is written + * @param cacheDataOnWrite whether data blocks should be written to the cache when an HFile is + * written */ public void setCacheDataOnWrite(boolean cacheDataOnWrite) { this.cacheDataOnWrite = cacheDataOnWrite; } /** - * Enable cache on write including: - * cacheDataOnWrite - * cacheIndexesOnWrite - * cacheBloomsOnWrite + * Enable cache on write including: cacheDataOnWrite cacheIndexesOnWrite cacheBloomsOnWrite */ public void enableCacheOnWrite() { this.cacheDataOnWrite = true; @@ -297,24 +288,24 @@ public class CacheConfig { } /** - * @return true if index blocks should be written to the cache when an HFile - * is written, false if not + * @return true if index blocks should be written to the cache when an HFile is written, false if + * not */ public boolean shouldCacheIndexesOnWrite() { return this.cacheIndexesOnWrite; } /** - * @return true if bloom blocks should be written to the cache when an HFile - * is written, false if not + * @return true if bloom blocks should be written to the cache when an HFile is written, false if + * not */ public boolean shouldCacheBloomsOnWrite() { return this.cacheBloomsOnWrite; } /** - * @return true if blocks should be evicted from the cache when an HFile - * reader is closed, false if not + * @return true if blocks should be evicted from the cache when an HFile reader is closed, false + * if not */ public boolean shouldEvictOnClose() { return this.evictOnClose; @@ -322,8 +313,8 @@ public class CacheConfig { /** * Only used for testing. - * @param evictOnClose whether blocks should be evicted from the cache when an - * HFile reader is closed + * @param evictOnClose whether blocks should be evicted from the cache when an HFile reader is + * closed */ public void setEvictOnClose(boolean evictOnClose) { this.evictOnClose = evictOnClose; @@ -368,6 +359,7 @@ public class CacheConfig { public long getCacheCompactedBlocksOnWriteThreshold() { return this.cacheCompactedDataOnWriteThreshold; } + /** * Return true if we may find this type of block in block cache. *

        @@ -389,16 +381,18 @@ public class CacheConfig { if (blockType == null) { return true; } - if (blockType.getCategory() == BlockCategory.BLOOM || - blockType.getCategory() == BlockCategory.INDEX) { + if ( + blockType.getCategory() == BlockCategory.BLOOM + || blockType.getCategory() == BlockCategory.INDEX + ) { return true; } return false; } /** - * If we make sure the block could not be cached, we will not acquire the lock - * otherwise we will acquire lock + * If we make sure the block could not be cached, we will not acquire the lock otherwise we will + * acquire lock */ public boolean shouldLockOnCacheMiss(BlockType blockType) { if (blockType == null) { @@ -409,7 +403,6 @@ public class CacheConfig { /** * Returns the block cache. - * * @return the block cache, or null if caching is completely disabled */ public Optional getBlockCache() { @@ -425,8 +418,8 @@ public class CacheConfig { } private long getCacheCompactedBlocksOnWriteThreshold(Configuration conf) { - long cacheCompactedBlocksOnWriteThreshold = conf - .getLong(CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY, + long cacheCompactedBlocksOnWriteThreshold = + conf.getLong(CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY, DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD); if (cacheCompactedBlocksOnWriteThreshold < 0) { @@ -442,9 +435,9 @@ public class CacheConfig { @Override public String toString() { return "cacheDataOnRead=" + shouldCacheDataOnRead() + ", cacheDataOnWrite=" - + shouldCacheDataOnWrite() + ", cacheIndexesOnWrite=" + shouldCacheIndexesOnWrite() - + ", cacheBloomsOnWrite=" + shouldCacheBloomsOnWrite() + ", cacheEvictOnClose=" - + shouldEvictOnClose() + ", cacheDataCompressed=" + shouldCacheDataCompressed() - + ", prefetchOnOpen=" + shouldPrefetchOnOpen(); + + shouldCacheDataOnWrite() + ", cacheIndexesOnWrite=" + shouldCacheIndexesOnWrite() + + ", cacheBloomsOnWrite=" + shouldCacheBloomsOnWrite() + ", cacheEvictOnClose=" + + shouldEvictOnClose() + ", cacheDataCompressed=" + shouldCacheDataCompressed() + + ", prefetchOnOpen=" + shouldPrefetchOnOpen(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java index 7c5b5636409..c5a247dfce1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,15 +23,15 @@ import java.util.concurrent.atomic.LongAdder; import org.apache.hadoop.hbase.metrics.impl.FastLongHistogram; import org.apache.yetus.audience.InterfaceAudience; - /** * Class that implements cache metrics. */ @InterfaceAudience.Private public class CacheStats { - /** Sliding window statistics. The number of metric periods to include in - * sliding window hit ratio calculations. + /** + * Sliding window statistics. The number of metric periods to include in sliding window hit ratio + * calculations. */ static final int DEFAULT_WINDOW_PERIODS = 5; @@ -43,10 +42,9 @@ public class CacheStats { private final LongAdder primaryHitCount = new LongAdder(); /** - * The number of getBlock requests that were cache hits, but only from - * requests that were set to use the block cache. This is because all reads - * attempt to read from the block cache even if they will not put new blocks - * into the block cache. See HBASE-2253 for more information. + * The number of getBlock requests that were cache hits, but only from requests that were set to + * use the block cache. This is because all reads attempt to read from the block cache even if + * they will not put new blocks into the block cache. See HBASE-2253 for more information. */ private final LongAdder hitCachingCount = new LongAdder(); @@ -56,8 +54,8 @@ public class CacheStats { /** The number of getBlock requests for primary replica that were cache misses */ private final LongAdder primaryMissCount = new LongAdder(); /** - * The number of getBlock requests that were cache misses, but only from - * requests that were set to use the block cache. + * The number of getBlock requests that were cache misses, but only from requests that were set to + * use the block cache. */ private final LongAdder missCachingCount = new LongAdder(); @@ -129,25 +127,22 @@ public class CacheStats { public CacheStats(final String name, int numPeriodsInWindow) { this.numPeriodsInWindow = numPeriodsInWindow; this.hitCounts = new long[numPeriodsInWindow]; - this.hitCachingCounts = new long[numPeriodsInWindow]; - this.requestCounts = new long[numPeriodsInWindow]; - this.requestCachingCounts = new long[numPeriodsInWindow]; + this.hitCachingCounts = new long[numPeriodsInWindow]; + this.requestCounts = new long[numPeriodsInWindow]; + this.requestCachingCounts = new long[numPeriodsInWindow]; this.ageAtEviction = new FastLongHistogram(); } @Override public String toString() { AgeSnapshot snapshot = getAgeAtEvictionSnapshot(); - return "hitCount=" + getHitCount() + ", hitCachingCount=" + getHitCachingCount() + - ", missCount=" + getMissCount() + ", missCachingCount=" + getMissCachingCount() + - ", evictionCount=" + getEvictionCount() + - ", evictedBlockCount=" + getEvictedCount() + - ", primaryMissCount=" + getPrimaryMissCount() + - ", primaryHitCount=" + getPrimaryHitCount() + - ", evictedAgeMean=" + snapshot.getMean(); + return "hitCount=" + getHitCount() + ", hitCachingCount=" + getHitCachingCount() + + ", missCount=" + getMissCount() + ", missCachingCount=" + getMissCachingCount() + + ", evictionCount=" + getEvictionCount() + ", evictedBlockCount=" + getEvictedCount() + + ", primaryMissCount=" + getPrimaryMissCount() + ", primaryHitCount=" + getPrimaryHitCount() + + ", evictedAgeMean=" + snapshot.getMean(); } - public void miss(boolean caching, boolean primary, BlockType type) { missCount.increment(); if (primary) primaryMissCount.increment(); @@ -199,7 +194,6 @@ public class CacheStats { if (primary) primaryHitCount.increment(); if (caching) hitCachingCount.increment(); - if (type == null) { return; } @@ -260,7 +254,6 @@ public class CacheStats { return failedInserts.incrementAndGet(); } - // All of the counts of misses and hits. public long getDataMissCount() { return dataMissCount.sum(); @@ -443,13 +436,11 @@ public class CacheStats { public void rollMetricsPeriod() { hitCounts[windowIndex] = getHitCount() - lastHitCount; lastHitCount = getHitCount(); - hitCachingCounts[windowIndex] = - getHitCachingCount() - lastHitCachingCount; + hitCachingCounts[windowIndex] = getHitCachingCount() - lastHitCachingCount; lastHitCachingCount = getHitCachingCount(); requestCounts[windowIndex] = getRequestCount() - lastRequestCount; lastRequestCount = getRequestCount(); - requestCachingCounts[windowIndex] = - getRequestCachingCount() - lastRequestCachingCount; + requestCachingCounts[windowIndex] = getRequestCachingCount() - lastRequestCachingCount; lastRequestCachingCount = getRequestCachingCount(); windowIndex = (windowIndex + 1) % numPeriodsInWindow; } @@ -471,14 +462,14 @@ public class CacheStats { } public double getHitRatioPastNPeriods() { - double ratio = ((double)getSumHitCountsPastNPeriods() / - (double)getSumRequestCountsPastNPeriods()); + double ratio = + ((double) getSumHitCountsPastNPeriods() / (double) getSumRequestCountsPastNPeriods()); return Double.isNaN(ratio) ? 0 : ratio; } public double getHitCachingRatioPastNPeriods() { - double ratio = ((double)getSumHitCachingCountsPastNPeriods() / - (double)getSumRequestCachingCountsPastNPeriods()); + double ratio = ((double) getSumHitCachingCountsPastNPeriods() + / (double) getSumRequestCachingCountsPastNPeriods()); return Double.isNaN(ratio) ? 0 : ratio; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java index 96c8e827563..0a41b53a804 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,44 +15,37 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile; import java.nio.ByteBuffer; - import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.nio.HBaseReferenceCounted; import org.apache.yetus.audience.InterfaceAudience; /** - * Cacheable is an interface that allows for an object to be cached. If using an - * on heap cache, just use heapsize. If using an off heap cache, Cacheable - * provides methods for serialization of the object. - * - * Some objects cannot be moved off heap, those objects will return a - * getSerializedLength() of 0. - * + * Cacheable is an interface that allows for an object to be cached. If using an on heap cache, just + * use heapsize. If using an off heap cache, Cacheable provides methods for serialization of the + * object. Some objects cannot be moved off heap, those objects will return a getSerializedLength() + * of 0. */ @InterfaceAudience.Private public interface Cacheable extends HeapSize, HBaseReferenceCounted { /** - * Returns the length of the ByteBuffer required to serialized the object. If the - * object cannot be serialized, it should return 0. - * + * Returns the length of the ByteBuffer required to serialized the object. If the object cannot be + * serialized, it should return 0. * @return int length in bytes of the serialized form or 0 if the object cannot be cached. */ int getSerializedLength(); /** * Serializes its data into destination. - * @param destination Where to serialize to + * @param destination Where to serialize to * @param includeNextBlockMetadata Whether to include nextBlockMetadata in the Cache block. */ void serialize(ByteBuffer destination, boolean includeNextBlockMetadata); /** * Returns CacheableDeserializer instance which reconstructs original object from ByteBuffer. - * * @return CacheableDeserialzer instance. */ CacheableDeserializer getDeserializer(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java index e12173daba9..2fe50381b77 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.io.hfile; import java.io.IOException; - import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.yetus.audience.InterfaceAudience; @@ -30,10 +29,9 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public interface CacheableDeserializer { /** - * @param b ByteBuff to deserialize the Cacheable. + * @param b ByteBuff to deserialize the Cacheable. * @param allocator to manage NIO ByteBuffers for future allocation or de-allocation. - * @return T the deserialized object. - * @throws IOException + * @return T the deserialized object. n */ T deserialize(ByteBuff b, ByteBuffAllocator allocator) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java index 0a3a9f17096..fbe1260b3b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java @@ -1,45 +1,43 @@ -/** - * Copyright The Apache Software Foundation +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.yetus.audience.InterfaceAudience; /** - * This class is used to manage the identifiers for {@link CacheableDeserializer}. - * All deserializers are registered with this Manager via the - * {@link #registerDeserializer(CacheableDeserializer)}}. On registration, we return an - * int *identifier* for this deserializer. The int identifier is passed to - * {@link #getDeserializer(int)}} to obtain the registered deserializer instance. + * This class is used to manage the identifiers for {@link CacheableDeserializer}. All deserializers + * are registered with this Manager via the {@link #registerDeserializer(CacheableDeserializer)}}. + * On registration, we return an int *identifier* for this deserializer. The int identifier is + * passed to {@link #getDeserializer(int)}} to obtain the registered deserializer instance. */ @InterfaceAudience.Private public class CacheableDeserializerIdManager { - private static final Map> registeredDeserializers = new HashMap<>(); + private static final Map> registeredDeserializers = + new HashMap<>(); private static final AtomicInteger identifier = new AtomicInteger(0); /** - * Register the given {@link Cacheable} -- usually an hfileblock instance, these implement - * the Cacheable Interface -- deserializer and generate a unique identifier id for it and return - * this as our result. + * Register the given {@link Cacheable} -- usually an hfileblock instance, these implement the + * Cacheable Interface -- deserializer and generate a unique identifier id for it and return this + * as our result. * @return the identifier of given cacheable deserializer * @see #getDeserializer(int) */ @@ -60,14 +58,14 @@ public class CacheableDeserializerIdManager { } /** - * Snapshot a map of the current identifiers to class names for reconstruction on reading out - * of a file. + * Snapshot a map of the current identifiers to class names for reconstruction on reading out of a + * file. */ - public static Map save() { + public static Map save() { Map snapshot = new HashMap<>(); synchronized (registeredDeserializers) { - for (Map.Entry> entry : - registeredDeserializers.entrySet()) { + for (Map.Entry> entry : registeredDeserializers + .entrySet()) { snapshot.put(entry.getKey(), entry.getValue().getClass().getName()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java index 0fcef862d42..81823dd15db 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,9 +22,14 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public interface CachedBlock extends Comparable { BlockPriority getBlockPriority(); + BlockType getBlockType(); + long getOffset(); + long getSize(); + long getCachedTime(); + String getFilename(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java index f2f9d58796a..8447b3bdbd2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java @@ -19,15 +19,14 @@ package org.apache.hadoop.hbase.io.hfile; import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.nio.SingleByteBuff; +import org.apache.hadoop.hbase.util.ChecksumType; +import org.apache.hadoop.util.DataChecksum; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.ChecksumType; -import org.apache.hadoop.util.DataChecksum; /** * Utility methods to compute and validate checksums. @@ -39,42 +38,38 @@ public class ChecksumUtil { public static final int CHECKSUM_BUF_SIZE = 256; /** - * This is used by unit tests to make checksum failures throw an - * exception instead of returning null. Returning a null value from - * checksum validation will cause the higher layer to retry that - * read with hdfs-level checksums. Instead, we would like checksum - * failures to cause the entire unit test to fail. + * This is used by unit tests to make checksum failures throw an exception instead of returning + * null. Returning a null value from checksum validation will cause the higher layer to retry that + * read with hdfs-level checksums. Instead, we would like checksum failures to cause the entire + * unit test to fail. */ private static boolean generateExceptions = false; /** - * Generates a checksum for all the data in indata. The checksum is - * written to outdata. - * @param indata input data stream - * @param startOffset starting offset in the indata stream from where to - * compute checkums from - * @param endOffset ending offset in the indata stream upto - * which checksums needs to be computed - * @param outdata the output buffer where checksum values are written - * @param outOffset the starting offset in the outdata where the - * checksum values are written - * @param checksumType type of checksum + * Generates a checksum for all the data in indata. The checksum is written to outdata. + * @param indata input data stream + * @param startOffset starting offset in the indata stream from where to compute checkums + * from + * @param endOffset ending offset in the indata stream upto which checksums needs to be + * computed + * @param outdata the output buffer where checksum values are written + * @param outOffset the starting offset in the outdata where the checksum values are + * written + * @param checksumType type of checksum * @param bytesPerChecksum number of bytes per checksum value */ - static void generateChecksums(byte[] indata, int startOffset, int endOffset, - byte[] outdata, int outOffset, ChecksumType checksumType, - int bytesPerChecksum) throws IOException { + static void generateChecksums(byte[] indata, int startOffset, int endOffset, byte[] outdata, + int outOffset, ChecksumType checksumType, int bytesPerChecksum) throws IOException { if (checksumType == ChecksumType.NULL) { return; // No checksum for this block. } - DataChecksum checksum = DataChecksum.newDataChecksum( - checksumType.getDataChecksumType(), bytesPerChecksum); + DataChecksum checksum = + DataChecksum.newDataChecksum(checksumType.getDataChecksumType(), bytesPerChecksum); - checksum.calculateChunkedSums( - ByteBuffer.wrap(indata, startOffset, endOffset - startOffset), - ByteBuffer.wrap(outdata, outOffset, outdata.length - outOffset)); + checksum.calculateChunkedSums(ByteBuffer.wrap(indata, startOffset, endOffset - startOffset), + ByteBuffer.wrap(outdata, outOffset, outdata.length - outOffset)); } /** @@ -82,24 +77,24 @@ public class ChecksumUtil { * this method will also verify checksum of each chunk in data. the difference is: this method can * accept {@link ByteBuff} as arguments, we can not add it in hadoop-common so defined here. * @param dataChecksum to calculate the checksum. - * @param data as the input - * @param checksums to compare - * @param pathName indicate that the data is read from which file. + * @param data as the input + * @param checksums to compare + * @param pathName indicate that the data is read from which file. * @return a flag indicate the checksum match or mismatch. * @see org.apache.hadoop.util.DataChecksum#verifyChunkedSums(ByteBuffer, ByteBuffer, String, * long) */ private static boolean verifyChunkedSums(DataChecksum dataChecksum, ByteBuff data, - ByteBuff checksums, String pathName) { + ByteBuff checksums, String pathName) { // Almost all of the HFile Block are about 64KB, and it would be a SingleByteBuff, use the // Hadoop's verify checksum directly, because it'll use the native checksum, which has no extra // byte[] allocation or copying. (HBASE-21917) if (data instanceof SingleByteBuff && checksums instanceof SingleByteBuff) { // the checksums ByteBuff must also be an SingleByteBuff because it's duplicated from data. ByteBuffer dataBB = (ByteBuffer) (data.nioByteBuffers()[0]).duplicate() - .position(data.position()).limit(data.limit()); + .position(data.position()).limit(data.limit()); ByteBuffer checksumBB = (ByteBuffer) (checksums.nioByteBuffers()[0]).duplicate() - .position(checksums.position()).limit(checksums.limit()); + .position(checksums.position()).limit(checksums.limit()); try { dataChecksum.verifyChunkedSums(dataBB, checksumBB, pathName, 0); return true; @@ -142,7 +137,7 @@ public class ChecksumUtil { } int calculated = (int) dataChecksum.getValue(); int stored = (sum[0] << 24 & 0xff000000) | (sum[1] << 16 & 0xff0000) - | (sum[2] << 8 & 0xff00) | (sum[3] & 0xff); + | (sum[2] << 8 & 0xff00) | (sum[3] & 0xff); if (calculated != stored) { if (LOG.isTraceEnabled()) { long errPos = data.position() - startDataPos - n; @@ -162,10 +157,10 @@ public class ChecksumUtil { /** * Validates that the data in the specified HFileBlock matches the checksum. Generates the * checksums for the data and then validate that it matches those stored in the end of the data. - * @param buf Contains the data in following order: HFileBlock header, data, checksums. + * @param buf Contains the data in following order: HFileBlock header, data, checksums. * @param pathName Path of the HFile to which the {@code data} belongs. Only used for logging. - * @param offset offset of the data being validated. Only used for logging. - * @param hdrSize Size of the block header in {@code data}. Only used for logging. + * @param offset offset of the data being validated. Only used for logging. + * @param hdrSize Size of the block header in {@code data}. Only used for logging. * @return True if checksum matches, else false. */ static boolean validateChecksum(ByteBuff buf, String pathName, long offset, int hdrSize) { @@ -177,22 +172,23 @@ public class ChecksumUtil { // read in the stored value of the checksum size from the header. int bytesPerChecksum = buf.getInt(HFileBlock.Header.BYTES_PER_CHECKSUM_INDEX); DataChecksum dataChecksum = - DataChecksum.newDataChecksum(ctype.getDataChecksumType(), bytesPerChecksum); + DataChecksum.newDataChecksum(ctype.getDataChecksumType(), bytesPerChecksum); assert dataChecksum != null; int onDiskDataSizeWithHeader = buf.getInt(HFileBlock.Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX); - LOG.trace("dataLength={}, sizeWithHeader={}, checksumType={}, file={}, " - + "offset={}, headerSize={}, bytesPerChecksum={}", buf.capacity(), onDiskDataSizeWithHeader, - ctype.getName(), pathName, offset, hdrSize, bytesPerChecksum); + LOG.trace( + "dataLength={}, sizeWithHeader={}, checksumType={}, file={}, " + + "offset={}, headerSize={}, bytesPerChecksum={}", + buf.capacity(), onDiskDataSizeWithHeader, ctype.getName(), pathName, offset, hdrSize, + bytesPerChecksum); ByteBuff data = buf.duplicate().position(0).limit(onDiskDataSizeWithHeader); ByteBuff checksums = buf.duplicate().position(onDiskDataSizeWithHeader).limit(buf.limit()); return verifyChunkedSums(dataChecksum, data, checksums, pathName); } /** - * Returns the number of bytes needed to store the checksums for - * a specified data size - * @param datasize number of bytes of data + * Returns the number of bytes needed to store the checksums for a specified data size + * @param datasize number of bytes of data * @param bytesPerChecksum number of bytes in a checksum chunk * @return The number of bytes needed to store the checksum values */ @@ -201,14 +197,13 @@ public class ChecksumUtil { } /** - * Returns the number of checksum chunks needed to store the checksums for - * a specified data size - * @param datasize number of bytes of data + * Returns the number of checksum chunks needed to store the checksums for a specified data size + * @param datasize number of bytes of data * @param bytesPerChecksum number of bytes in a checksum chunk * @return The number of checksum chunks */ static long numChunks(long datasize, int bytesPerChecksum) { - long numChunks = datasize/bytesPerChecksum; + long numChunks = datasize / bytesPerChecksum; if (datasize % bytesPerChecksum != 0) { numChunks++; } @@ -216,13 +211,12 @@ public class ChecksumUtil { } /** - * Mechanism to throw an exception in case of hbase checksum - * failure. This is used by unit tests only. - * @param value Setting this to true will cause hbase checksum - * verification failures to generate exceptions. + * Mechanism to throw an exception in case of hbase checksum failure. This is used by unit tests + * only. + * @param value Setting this to true will cause hbase checksum verification failures to generate + * exceptions. */ public static void generateExceptionForChecksumFailureForTest(boolean value) { generateExceptions = value; } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index 4a494c887ad..572f1ff7289 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -1,37 +1,34 @@ -/** - * Copyright The Apache Software Foundation +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; import java.util.Iterator; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; +import org.apache.yetus.audience.InterfaceAudience; /** - * CombinedBlockCache is an abstraction layer that combines - * {@link FirstLevelBlockCache} and {@link BucketCache}. The smaller lruCache is used - * to cache bloom blocks and index blocks. The larger Cache is used to - * cache data blocks. {@link #getBlock(BlockCacheKey, boolean, boolean, boolean)} reads - * first from the smaller l1Cache before looking for the block in the l2Cache. Blocks evicted - * from l1Cache are put into the bucket cache. - * Metrics are the combined size and hits and misses of both caches. + * CombinedBlockCache is an abstraction layer that combines {@link FirstLevelBlockCache} and + * {@link BucketCache}. The smaller lruCache is used to cache bloom blocks and index blocks. The + * larger Cache is used to cache data blocks. + * {@link #getBlock(BlockCacheKey, boolean, boolean, boolean)} reads first from the smaller l1Cache + * before looking for the block in the l2Cache. Blocks evicted from l1Cache are put into the bucket + * cache. Metrics are the combined size and hits and misses of both caches. */ @InterfaceAudience.Private public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @@ -42,8 +39,7 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { public CombinedBlockCache(FirstLevelBlockCache l1Cache, BlockCache l2Cache) { this.l1Cache = l1Cache; this.l2Cache = l2Cache; - this.combinedCacheStats = new CombinedCacheStats(l1Cache.getStats(), - l2Cache.getStats()); + this.combinedCacheStats = new CombinedCacheStats(l1Cache.getStats(), l2Cache.getStats()); } @Override @@ -71,8 +67,8 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { } @Override - public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, - boolean repeat, boolean updateCacheMetrics) { + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, + boolean updateCacheMetrics) { // We are not in a position to exactly look at LRU cache or BC as BlockType may not be getting // passed always. boolean existInL1 = l1Cache.containsBlock(cacheKey); @@ -81,9 +77,9 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { l1Cache.getStats().miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); } - return existInL1 ? - l1Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics): - l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); + return existInL1 + ? l1Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics) + : l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); } @Override @@ -93,8 +89,7 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public int evictBlocksByHfileName(String hfileName) { - return l1Cache.evictBlocksByHfileName(hfileName) - + l2Cache.evictBlocksByHfileName(hfileName); + return l1Cache.evictBlocksByHfileName(hfileName) + l2Cache.evictBlocksByHfileName(hfileName); } @Override @@ -188,8 +183,8 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public long getIntermediateIndexMissCount() { - return lruCacheStats.getIntermediateIndexMissCount() + - bucketCacheStats.getIntermediateIndexMissCount(); + return lruCacheStats.getIntermediateIndexMissCount() + + bucketCacheStats.getIntermediateIndexMissCount(); } @Override @@ -199,14 +194,14 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public long getGeneralBloomMetaMissCount() { - return lruCacheStats.getGeneralBloomMetaMissCount() + - bucketCacheStats.getGeneralBloomMetaMissCount(); + return lruCacheStats.getGeneralBloomMetaMissCount() + + bucketCacheStats.getGeneralBloomMetaMissCount(); } @Override public long getDeleteFamilyBloomMissCount() { - return lruCacheStats.getDeleteFamilyBloomMissCount() + - bucketCacheStats.getDeleteFamilyBloomMissCount(); + return lruCacheStats.getDeleteFamilyBloomMissCount() + + bucketCacheStats.getDeleteFamilyBloomMissCount(); } @Override @@ -241,8 +236,8 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public long getIntermediateIndexHitCount() { - return lruCacheStats.getIntermediateIndexHitCount() + - bucketCacheStats.getIntermediateIndexHitCount(); + return lruCacheStats.getIntermediateIndexHitCount() + + bucketCacheStats.getIntermediateIndexHitCount(); } @Override @@ -252,14 +247,14 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public long getGeneralBloomMetaHitCount() { - return lruCacheStats.getGeneralBloomMetaHitCount() + - bucketCacheStats.getGeneralBloomMetaHitCount(); + return lruCacheStats.getGeneralBloomMetaHitCount() + + bucketCacheStats.getGeneralBloomMetaHitCount(); } @Override public long getDeleteFamilyBloomHitCount() { - return lruCacheStats.getDeleteFamilyBloomHitCount() + - bucketCacheStats.getDeleteFamilyBloomHitCount(); + return lruCacheStats.getDeleteFamilyBloomHitCount() + + bucketCacheStats.getDeleteFamilyBloomHitCount(); } @Override @@ -269,14 +264,12 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public long getRequestCount() { - return lruCacheStats.getRequestCount() - + bucketCacheStats.getRequestCount(); + return lruCacheStats.getRequestCount() + bucketCacheStats.getRequestCount(); } @Override public long getRequestCachingCount() { - return lruCacheStats.getRequestCachingCount() - + bucketCacheStats.getRequestCachingCount(); + return lruCacheStats.getRequestCachingCount() + bucketCacheStats.getRequestCachingCount(); } @Override @@ -291,8 +284,7 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public long getMissCachingCount() { - return lruCacheStats.getMissCachingCount() - + bucketCacheStats.getMissCachingCount(); + return lruCacheStats.getMissCachingCount() + bucketCacheStats.getMissCachingCount(); } @Override @@ -304,28 +296,25 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { public long getPrimaryHitCount() { return lruCacheStats.getPrimaryHitCount() + bucketCacheStats.getPrimaryHitCount(); } + @Override public long getHitCachingCount() { - return lruCacheStats.getHitCachingCount() - + bucketCacheStats.getHitCachingCount(); + return lruCacheStats.getHitCachingCount() + bucketCacheStats.getHitCachingCount(); } @Override public long getEvictionCount() { - return lruCacheStats.getEvictionCount() - + bucketCacheStats.getEvictionCount(); + return lruCacheStats.getEvictionCount() + bucketCacheStats.getEvictionCount(); } @Override public long getEvictedCount() { - return lruCacheStats.getEvictedCount() - + bucketCacheStats.getEvictedCount(); + return lruCacheStats.getEvictedCount() + bucketCacheStats.getEvictedCount(); } @Override public long getPrimaryEvictedCount() { - return lruCacheStats.getPrimaryEvictedCount() - + bucketCacheStats.getPrimaryEvictedCount(); + return lruCacheStats.getPrimaryEvictedCount() + bucketCacheStats.getPrimaryEvictedCount(); } @Override @@ -342,25 +331,25 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public long getSumHitCountsPastNPeriods() { return lruCacheStats.getSumHitCountsPastNPeriods() - + bucketCacheStats.getSumHitCountsPastNPeriods(); + + bucketCacheStats.getSumHitCountsPastNPeriods(); } @Override public long getSumRequestCountsPastNPeriods() { return lruCacheStats.getSumRequestCountsPastNPeriods() - + bucketCacheStats.getSumRequestCountsPastNPeriods(); + + bucketCacheStats.getSumRequestCountsPastNPeriods(); } @Override public long getSumHitCachingCountsPastNPeriods() { return lruCacheStats.getSumHitCachingCountsPastNPeriods() - + bucketCacheStats.getSumHitCachingCountsPastNPeriods(); + + bucketCacheStats.getSumHitCachingCountsPastNPeriods(); } @Override public long getSumRequestCachingCountsPastNPeriods() { return lruCacheStats.getSumRequestCachingCountsPastNPeriods() - + bucketCacheStats.getSumRequestCachingCountsPastNPeriods(); + + bucketCacheStats.getSumRequestCachingCountsPastNPeriods(); } } @@ -371,7 +360,7 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public BlockCache[] getBlockCaches() { - return new BlockCache [] {this.l1Cache, this.l2Cache}; + return new BlockCache[] { this.l1Cache, this.l2Cache }; } @Override @@ -381,8 +370,8 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { public int getRpcRefCount(BlockCacheKey cacheKey) { return (this.l2Cache instanceof BucketCache) - ? ((BucketCache) this.l2Cache).getRpcRefCount(cacheKey) - : 0; + ? ((BucketCache) this.l2Cache).getRpcRefCount(cacheKey) + : 0; } public FirstLevelBlockCache getFirstLevelCache() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java index 29f29e15a8c..0d39d09c969 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,32 +15,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile; import java.io.DataInput; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.BloomFilter; import org.apache.hadoop.hbase.util.BloomFilterUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Hash; +import org.apache.yetus.audience.InterfaceAudience; /** - * A Bloom filter implementation built on top of - * {@link org.apache.hadoop.hbase.util.BloomFilterChunk}, encapsulating - * a set of fixed-size Bloom filters written out at the time of - * {@link org.apache.hadoop.hbase.io.hfile.HFile} generation into the data - * block stream, and loaded on demand at query time. This class only provides - * reading capabilities. + * A Bloom filter implementation built on top of + * {@link org.apache.hadoop.hbase.util.BloomFilterChunk}, encapsulating a set of fixed-size Bloom + * filters written out at the time of {@link org.apache.hadoop.hbase.io.hfile.HFile} generation into + * the data block stream, and loaded on demand at query time. This class only provides reading + * capabilities. */ @InterfaceAudience.Private -public class CompoundBloomFilter extends CompoundBloomFilterBase - implements BloomFilter { +public class CompoundBloomFilter extends CompoundBloomFilterBase implements BloomFilter { /** Used to load chunks on demand */ private HFile.Reader reader; @@ -55,14 +50,11 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase private long[] numPositivesPerChunk; /** - * De-serialization for compound Bloom filter metadata. Must be consistent - * with what {@link CompoundBloomFilterWriter} does. - * - * @param meta serialized Bloom filter metadata without any magic blocks - * @throws IOException + * De-serialization for compound Bloom filter metadata. Must be consistent with what + * {@link CompoundBloomFilterWriter} does. + * @param meta serialized Bloom filter metadata without any magic blocks n */ - public CompoundBloomFilter(DataInput meta, HFile.Reader reader) - throws IOException { + public CompoundBloomFilter(DataInput meta, HFile.Reader reader) throws IOException { this.reader = reader; totalByteSize = meta.readLong(); @@ -72,8 +64,8 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase totalMaxKeys = meta.readLong(); numChunks = meta.readInt(); byte[] comparatorClassName = Bytes.readByteArray(meta); - // The writer would have return 0 as the vint length for the case of - // Bytes.BYTES_RAWCOMPARATOR. In such cases do not initialize comparator, it can be + // The writer would have return 0 as the vint length for the case of + // Bytes.BYTES_RAWCOMPARATOR. In such cases do not initialize comparator, it can be // null if (comparatorClassName.length != 0) { comparator = FixedFileTrailer.createComparator(Bytes.toString(comparatorClassName)); @@ -84,7 +76,7 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase throw new IllegalArgumentException("Invalid hash type: " + hashType); } // We will pass null for ROW block - if(comparator == null) { + if (comparator == null) { index = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); } else { index = new HFileBlockIndex.CellBasedKeyBlockIndexReader(comparator, 1); @@ -103,7 +95,7 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase try { ByteBuff bloomBuf = bloomBlock.getBufferReadOnly(); result = BloomFilterUtil.contains(key, keyOffset, keyLength, bloomBuf, - bloomBlock.headerSize(), bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount); + bloomBlock.headerSize(), bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount); } finally { // After the use, should release the block to deallocate byte buffers. bloomBlock.release(); @@ -120,7 +112,7 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase try { // We cache the block and use a positional read. bloomBlock = reader.readBlock(index.getRootBlockOffset(block), - index.getRootBlockDataSize(block), true, true, false, true, BlockType.BLOOM_CHUNK, null); + index.getRootBlockDataSize(block), true, true, false, true, BlockType.BLOOM_CHUNK, null); } catch (IOException ex) { // The Bloom filter is broken, turn it off. throw new IllegalArgumentException("Failed to load Bloom block", ex); @@ -198,12 +190,10 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase public String toString() { StringBuilder sb = new StringBuilder(); sb.append(BloomFilterUtil.formatStats(this)); - sb.append(BloomFilterUtil.STATS_RECORD_SEP + - "Number of chunks: " + numChunks); - sb.append(BloomFilterUtil.STATS_RECORD_SEP + - ((comparator != null) ? "Comparator: " - + comparator.getClass().getSimpleName() : "Comparator: " - + Bytes.BYTES_RAWCOMPARATOR.getClass().getSimpleName())); + sb.append(BloomFilterUtil.STATS_RECORD_SEP + "Number of chunks: " + numChunks); + sb.append(BloomFilterUtil.STATS_RECORD_SEP + ((comparator != null) + ? "Comparator: " + comparator.getClass().getSimpleName() + : "Comparator: " + Bytes.BYTES_RAWCOMPARATOR.getClass().getSimpleName())); return sb.toString(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterBase.java index efc21c64140..19922388272 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterBase.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,27 +15,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.util.BloomFilterBase; - import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.util.BloomFilterBase; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class CompoundBloomFilterBase implements BloomFilterBase { /** - * At read time, the total number of chunks. At write time, the number of - * chunks created so far. The first chunk has an ID of 0, and the current - * chunk has the ID of numChunks - 1. + * At read time, the total number of chunks. At write time, the number of chunks created so far. + * The first chunk has an ID of 0, and the current chunk has the ID of numChunks - 1. */ protected int numChunks; /** - * The Bloom filter version. There used to be a DynamicByteBloomFilter which - * had version 2. + * The Bloom filter version. There used to be a DynamicByteBloomFilter which had version 2. */ public static final int VERSION = 3; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java index fd3ac0a194b..84d120a5568 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java @@ -22,33 +22,30 @@ import java.io.DataOutput; import java.io.IOException; import java.util.LinkedList; import java.util.Queue; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.BloomFilterChunk; import org.apache.hadoop.hbase.util.BloomFilterUtil; import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Writable; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Adds methods required for writing a compound Bloom filter to the data - * section of an {@link org.apache.hadoop.hbase.io.hfile.HFile} to the - * {@link CompoundBloomFilter} class. + * Adds methods required for writing a compound Bloom filter to the data section of an + * {@link org.apache.hadoop.hbase.io.hfile.HFile} to the {@link CompoundBloomFilter} class. */ @InterfaceAudience.Private public class CompoundBloomFilterWriter extends CompoundBloomFilterBase - implements BloomFilterWriter, InlineBlockWriter { + implements BloomFilterWriter, InlineBlockWriter { - private static final Logger LOG = - LoggerFactory.getLogger(CompoundBloomFilterWriter.class); + private static final Logger LOG = LoggerFactory.getLogger(CompoundBloomFilterWriter.class); /** The current chunk being written to */ private BloomFilterChunk chunk; @@ -61,7 +58,7 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase /** The size of individual Bloom filter chunks to create */ private int chunkByteSize; - /** The prev Cell that was processed */ + /** The prev Cell that was processed */ private Cell prevCell; /** A Bloom filter chunk enqueued for writing */ @@ -77,7 +74,7 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase private byte[] firstKeyInChunk = null; private HFileBlockIndex.BlockIndexWriter bloomBlockIndexWriter = - new HFileBlockIndex.BlockIndexWriter(); + new HFileBlockIndex.BlockIndexWriter(); /** Whether to cache-on-write compound Bloom filter chunks */ private boolean cacheOnWrite; @@ -85,23 +82,13 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase private BloomType bloomType; /** - * @param chunkByteSizeHint - * each chunk's size in bytes. The real chunk size might be different - * as required by the fold factor. - * @param errorRate - * target false positive rate - * @param hashType - * hash function type to use - * @param maxFold - * maximum degree of folding allowed - * @param bloomType - * the bloom type + * n * each chunk's size in bytes. The real chunk size might be different as required by the fold + * factor. n * target false positive rate n * hash function type to use n * maximum degree of + * folding allowed n * the bloom type */ - public CompoundBloomFilterWriter(int chunkByteSizeHint, float errorRate, - int hashType, int maxFold, boolean cacheOnWrite, - CellComparator comparator, BloomType bloomType) { - chunkByteSize = BloomFilterUtil.computeFoldableByteSize( - chunkByteSizeHint * 8L, maxFold); + public CompoundBloomFilterWriter(int chunkByteSizeHint, float errorRate, int hashType, + int maxFold, boolean cacheOnWrite, CellComparator comparator, BloomType bloomType) { + chunkByteSize = BloomFilterUtil.computeFoldableByteSize(chunkByteSizeHint * 8L, maxFold); this.errorRate = errorRate; this.hashType = hashType; @@ -119,20 +106,17 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase /** * Enqueue the current chunk if it is ready to be written out. - * - * @param closing true if we are closing the file, so we do not expect new - * keys to show up + * @param closing true if we are closing the file, so we do not expect new keys to show up */ private void enqueueReadyChunk(boolean closing) { - if (chunk == null || - (chunk.getKeyCount() < chunk.getMaxKeys() && !closing)) { + if (chunk == null || (chunk.getKeyCount() < chunk.getMaxKeys() && !closing)) { return; } if (firstKeyInChunk == null) { - throw new NullPointerException("Trying to enqueue a chunk, " + - "but first key is null: closing=" + closing + ", keyCount=" + - chunk.getKeyCount() + ", maxKeys=" + chunk.getMaxKeys()); + throw new NullPointerException( + "Trying to enqueue a chunk, " + "but first key is null: closing=" + closing + ", keyCount=" + + chunk.getKeyCount() + ", maxKeys=" + chunk.getMaxKeys()); } ReadyChunk readyChunk = new ReadyChunk(); @@ -147,10 +131,9 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase chunk.compactBloom(); if (LOG.isTraceEnabled() && prevByteSize != chunk.getByteSize()) { - LOG.trace("Compacted Bloom chunk #" + readyChunk.chunkId + " from [" - + prevMaxKeys + " max keys, " + prevByteSize + " bytes] to [" - + chunk.getMaxKeys() + " max keys, " + chunk.getByteSize() - + " bytes]"); + LOG.trace("Compacted Bloom chunk #" + readyChunk.chunkId + " from [" + prevMaxKeys + + " max keys, " + prevByteSize + " bytes] to [" + chunk.getMaxKeys() + " max keys, " + + chunk.getByteSize() + " bytes]"); } totalMaxKeys += chunk.getMaxKeys(); @@ -163,21 +146,19 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase @Override public void append(Cell cell) throws IOException { - if (cell == null) - throw new NullPointerException(); + if (cell == null) throw new NullPointerException(); enqueueReadyChunk(false); if (chunk == null) { if (firstKeyInChunk != null) { - throw new IllegalStateException("First key in chunk already set: " - + Bytes.toStringBinary(firstKeyInChunk)); + throw new IllegalStateException( + "First key in chunk already set: " + Bytes.toStringBinary(firstKeyInChunk)); } // This will be done only once per chunk if (bloomType == BloomType.ROWCOL) { - firstKeyInChunk = - PrivateCellUtil - .getCellKeySerializedAsKeyValueKey(PrivateCellUtil.createFirstOnRowCol(cell)); + firstKeyInChunk = PrivateCellUtil + .getCellKeySerializedAsKeyValueKey(PrivateCellUtil.createFirstOnRowCol(cell)); } else { firstKeyInChunk = CellUtil.copyRow(cell); } @@ -204,8 +185,7 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase private void allocateNewChunk() { if (prevChunk == null) { // First chunk - chunk = BloomFilterUtil.createBySize(chunkByteSize, errorRate, - hashType, maxFold, bloomType); + chunk = BloomFilterUtil.createBySize(chunkByteSize, errorRate, hashType, maxFold, bloomType); } else { // Use the same parameters as the last chunk, but a new array and // a zero key count. @@ -213,13 +193,13 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase } if (chunk.getKeyCount() != 0) { - throw new IllegalStateException("keyCount=" + chunk.getKeyCount() - + " > 0"); + throw new IllegalStateException("keyCount=" + chunk.getKeyCount() + " > 0"); } chunk.allocBloom(); ++numChunks; } + @Override public void writeInlineBlock(DataOutput out) throws IOException { // We don't remove the chunk from the queue here, because we might need it @@ -242,7 +222,8 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase } private class MetaWriter implements Writable { - protected MetaWriter() {} + protected MetaWriter() { + } @Override public void readFields(DataInput in) throws IOException { @@ -250,11 +231,11 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase } /** - * This is modeled after {@link CompoundBloomFilterWriter.MetaWriter} for simplicity, - * although the two metadata formats do not have to be consistent. This - * does have to be consistent with how {@link - * CompoundBloomFilter#CompoundBloomFilter(DataInput, - * org.apache.hadoop.hbase.io.hfile.HFile.Reader)} reads fields. + * This is modeled after {@link CompoundBloomFilterWriter.MetaWriter} for simplicity, although + * the two metadata formats do not have to be consistent. This does have to be consistent with + * how + * {@link CompoundBloomFilter#CompoundBloomFilter(DataInput, org.apache.hadoop.hbase.io.hfile.HFile.Reader)} + * reads fields. */ @Override public void write(DataOutput out) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CorruptHFileException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CorruptHFileException.java index 28516c6bab4..6a40ab1b15b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CorruptHFileException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CorruptHFileException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.io.hfile; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.yetus.audience.InterfaceAudience; /** * This exception is thrown when attempts to read an HFile fail due to corruption or truncation diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ExclusiveMemHFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ExclusiveMemHFileBlock.java index 7f7cc3e41b2..2e4a55bc7d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ExclusiveMemHFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ExclusiveMemHFileBlock.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,11 +37,11 @@ import org.apache.yetus.audience.InterfaceAudience; public class ExclusiveMemHFileBlock extends HFileBlock { ExclusiveMemHFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, - int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuff buf, boolean fillHeader, - long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, - HFileContext fileContext, ByteBuffAllocator alloc) { + int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuff buf, boolean fillHeader, + long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext, + ByteBuffAllocator alloc) { super(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, prevBlockOffset, buf, - fillHeader, offset, nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, alloc); + fillHeader, offset, nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, alloc); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java index a0c34c9fe3e..34d6c8d926b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ public interface FirstLevelBlockCache extends ResizableBlockCache, HeapSize { /** * Whether the cache contains the block with specified cacheKey - * * @param cacheKey cache key for the block * @return true if it contains the block */ @@ -37,7 +36,6 @@ public interface FirstLevelBlockCache extends ResizableBlockCache, HeapSize { /** * Specifies the secondary cache. An entry that is evicted from this cache due to a size * constraint will be inserted into the victim cache. - * * @param victimCache the second level cache * @throws IllegalArgumentException if the victim cache had already been set */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java index 6a2dcf926a4..b358ad60689 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +17,6 @@ */ package org.apache.hadoop.hbase.io.hfile; - import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInput; @@ -36,20 +34,20 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos; /** - * The {@link HFile} has a fixed trailer which contains offsets to other - * variable parts of the file. Also includes basic metadata on this file. The - * trailer size is fixed within a given {@link HFile} format version only, but - * we always store the version number as the last four-byte integer of the file. - * The version number itself is split into two portions, a major - * version and a minor version. The last three bytes of a file are the major - * version and a single preceding byte is the minor number. The major version - * determines which readers/writers to use to read/write a hfile while a minor - * version determines smaller changes in hfile format that do not need a new - * reader/writer type. + * The {@link HFile} has a fixed trailer which contains offsets to other variable parts of the file. + * Also includes basic metadata on this file. The trailer size is fixed within a given {@link HFile} + * format version only, but we always store the version number as the last four-byte integer of the + * file. The version number itself is split into two portions, a major version and a minor version. + * The last three bytes of a file are the major version and a single preceding byte is the minor + * number. The major version determines which readers/writers to use to read/write a hfile while a + * minor version determines smaller changes in hfile format that do not need a new reader/writer + * type. */ @InterfaceAudience.Private public class FixedFileTrailer { @@ -61,17 +59,16 @@ public class FixedFileTrailer { private static final int MAX_COMPARATOR_NAME_LENGTH = 128; /** - * Offset to the fileinfo data, a small block of vitals. Necessary in v1 but - * only potentially useful for pretty-printing in v2. + * Offset to the fileinfo data, a small block of vitals. Necessary in v1 but only potentially + * useful for pretty-printing in v2. */ private long fileInfoOffset; /** - * In version 1, the offset to the data block index. Starting from version 2, - * the meaning of this field is the offset to the section of the file that - * should be loaded at the time the file is being opened: i.e. on open we load - * the root index, file info, etc. See http://hbase.apache.org/book.html#_hfile_format_2 - * in the reference guide. + * In version 1, the offset to the data block index. Starting from version 2, the meaning of this + * field is the offset to the section of the file that should be loaded at the time the file is + * being opened: i.e. on open we load the root index, file info, etc. See + * http://hbase.apache.org/book.html#_hfile_format_2 in the reference guide. */ private long loadOnOpenDataOffset; @@ -96,8 +93,7 @@ public class FixedFileTrailer { private long totalUncompressedBytes; /** - * The number of key/value pairs in the file. This field was int in version 1, - * but is now long. + * The number of key/value pairs in the file. This field was int in version 1, but is now long. */ private long entryCount; @@ -107,8 +103,7 @@ public class FixedFileTrailer { private Compression.Algorithm compressionCodec = Compression.Algorithm.NONE; /** - * The number of levels in the potentially multi-level data index. Used from - * version 2 onwards. + * The number of levels in the potentially multi-level data index. Used from version 2 onwards. */ private int numDataIndexLevels; @@ -118,8 +113,7 @@ public class FixedFileTrailer { private long firstDataBlockOffset; /** - * It is guaranteed that no key/value data blocks start after this offset in - * the file. + * It is guaranteed that no key/value data blocks start after this offset in the file. */ private long lastDataBlockOffset; @@ -185,9 +179,8 @@ public class FixedFileTrailer { } /** - * Write the trailer to a data stream. We support writing version 1 for - * testing and for determining version 1 trailer size. It is also easy to see - * what fields changed in version 2. + * Write the trailer to a data stream. We support writing version 1 for testing and for + * determining version 1 trailer size. It is also easy to see what fields changed in version 2. */ void serialize(DataOutputStream outputStream) throws IOException { HFile.checkFormatVersion(majorVersion); @@ -206,15 +199,11 @@ public class FixedFileTrailer { HFileProtos.FileTrailerProto toProtobuf() { HFileProtos.FileTrailerProto.Builder builder = HFileProtos.FileTrailerProto.newBuilder() - .setFileInfoOffset(fileInfoOffset) - .setLoadOnOpenDataOffset(loadOnOpenDataOffset) + .setFileInfoOffset(fileInfoOffset).setLoadOnOpenDataOffset(loadOnOpenDataOffset) .setUncompressedDataIndexSize(uncompressedDataIndexSize) - .setTotalUncompressedBytes(totalUncompressedBytes) - .setDataIndexCount(dataIndexCount) - .setMetaIndexCount(metaIndexCount) - .setEntryCount(entryCount) - .setNumDataIndexLevels(numDataIndexLevels) - .setFirstDataBlockOffset(firstDataBlockOffset) + .setTotalUncompressedBytes(totalUncompressedBytes).setDataIndexCount(dataIndexCount) + .setMetaIndexCount(metaIndexCount).setEntryCount(entryCount) + .setNumDataIndexLevels(numDataIndexLevels).setFirstDataBlockOffset(firstDataBlockOffset) .setLastDataBlockOffset(lastDataBlockOffset) .setComparatorClassName(getHBase1CompatibleName(comparatorClassName)) .setCompressionCodec(compressionCodec.ordinal()); @@ -225,9 +214,8 @@ public class FixedFileTrailer { } /** - * Write trailer data as protobuf. - * NOTE: we run a translation on the comparator name and will serialize the old hbase-1.x where - * it makes sense. See {@link #getHBase1CompatibleName(String)}. + * Write trailer data as protobuf. NOTE: we run a translation on the comparator name and will + * serialize the old hbase-1.x where it makes sense. See {@link #getHBase1CompatibleName(String)}. */ void serializeAsPB(DataOutputStream output) throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); @@ -249,17 +237,18 @@ public class FixedFileTrailer { } /** - * Deserialize the fixed file trailer from the given stream. The version needs - * to already be specified. Make sure this is consistent with - * {@link #serialize(DataOutputStream)}. + * Deserialize the fixed file trailer from the given stream. The version needs to already be + * specified. Make sure this is consistent with {@link #serialize(DataOutputStream)}. */ void deserialize(DataInputStream inputStream) throws IOException { HFile.checkFormatVersion(majorVersion); BlockType.TRAILER.readAndCheck(inputStream); - if (majorVersion > 2 - || (majorVersion == 2 && minorVersion >= HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION)) { + if ( + majorVersion > 2 + || (majorVersion == 2 && minorVersion >= HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION) + ) { deserializeFromPB(inputStream); } else { deserializeFromWritable(inputStream); @@ -342,10 +331,10 @@ public class FixedFileTrailer { numDataIndexLevels = input.readInt(); firstDataBlockOffset = input.readLong(); lastDataBlockOffset = input.readLong(); - // TODO this is a classname encoded into an HFile's trailer. We are going to need to have + // TODO this is a classname encoded into an HFile's trailer. We are going to need to have // some compat code here. - setComparatorClass(getComparatorClass(Bytes.readStringFixedSize(input, - MAX_COMPARATOR_NAME_LENGTH))); + setComparatorClass( + getComparatorClass(Bytes.readStringFixedSize(input, MAX_COMPARATOR_NAME_LENGTH))); } private void append(StringBuilder sb, String s) { @@ -381,19 +370,16 @@ public class FixedFileTrailer { /** * Reads a file trailer from the given file. - * - * @param istream the input stream with the ability to seek. Does not have to - * be buffered, as only one read operation is made. + * @param istream the input stream with the ability to seek. Does not have to be buffered, as + * only one read operation is made. * @param fileSize the file size. Can be obtained using - * {@link org.apache.hadoop.fs.FileSystem#getFileStatus( - *org.apache.hadoop.fs.Path)}. + * {@link org.apache.hadoop.fs.FileSystem#getFileStatus( org.apache.hadoop.fs.Path)}. * @return the fixed file trailer read - * @throws IOException if failed to read from the underlying stream, or the - * trailer is corrupted, or the version of the trailer is - * unsupported + * @throws IOException if failed to read from the underlying stream, or the trailer is corrupted, + * or the version of the trailer is unsupported */ - public static FixedFileTrailer readFromStream(FSDataInputStream istream, - long fileSize) throws IOException { + public static FixedFileTrailer readFromStream(FSDataInputStream istream, long fileSize) + throws IOException { int bufferSize = MAX_TRAILER_SIZE; long seekPoint = fileSize - bufferSize; if (seekPoint < 0) { @@ -405,8 +391,7 @@ public class FixedFileTrailer { HFileUtil.seekOnMultipleSources(istream, seekPoint); ByteBuffer buf = ByteBuffer.allocate(bufferSize); - istream.readFully(buf.array(), buf.arrayOffset(), - buf.arrayOffset() + buf.limit()); + istream.readFully(buf.array(), buf.arrayOffset(), buf.arrayOffset() + buf.limit()); // Read the version from the last int of the file. buf.position(buf.limit() - Bytes.SIZEOF_INT); @@ -428,23 +413,21 @@ public class FixedFileTrailer { public void expectMajorVersion(int expected) { if (majorVersion != expected) { - throw new IllegalArgumentException("Invalid HFile major version: " - + majorVersion - + " (expected: " + expected + ")"); + throw new IllegalArgumentException( + "Invalid HFile major version: " + majorVersion + " (expected: " + expected + ")"); } } public void expectMinorVersion(int expected) { if (minorVersion != expected) { - throw new IllegalArgumentException("Invalid HFile minor version: " - + minorVersion + " (expected: " + expected + ")"); + throw new IllegalArgumentException( + "Invalid HFile minor version: " + minorVersion + " (expected: " + expected + ")"); } } public void expectAtLeastMajorVersion(int lowerBound) { if (majorVersion < lowerBound) { - throw new IllegalArgumentException("Invalid HFile major version: " - + majorVersion + throw new IllegalArgumentException("Invalid HFile major version: " + majorVersion + " (expected: " + lowerBound + " or higher)."); } } @@ -569,21 +552,20 @@ public class FixedFileTrailer { } /** - * If a 'standard' Comparator, write the old name for the Comparator when we serialize rather - * than the new name; writing the new name will make it so newly-written hfiles are not parseable - * by hbase-1.x, a facility we'd like to preserve across rolling upgrade and hbase-1.x clusters + * If a 'standard' Comparator, write the old name for the Comparator when we serialize rather than + * the new name; writing the new name will make it so newly-written hfiles are not parseable by + * hbase-1.x, a facility we'd like to preserve across rolling upgrade and hbase-1.x clusters * reading hbase-2.x produce. *

        - * The Comparators in hbase-2.x work the same as they did in hbase-1.x; they compare - * KeyValues. In hbase-2.x they were renamed making use of the more generic 'Cell' - * nomenclature to indicate that we intend to move away from KeyValues post hbase-2. A naming - * change is not reason enough to make it so hbase-1.x cannot read hbase-2.x files given the - * structure goes unchanged (hfile v3). So, lets write the old names for Comparators into the - * hfile tails in hbase-2. Here is where we do the translation. - * {@link #getComparatorClass(String)} does translation going the other way. - * - *

        The translation is done on the serialized Protobuf only.

        - * + * The Comparators in hbase-2.x work the same as they did in hbase-1.x; they compare KeyValues. In + * hbase-2.x they were renamed making use of the more generic 'Cell' nomenclature to indicate that + * we intend to move away from KeyValues post hbase-2. A naming change is not reason enough to + * make it so hbase-1.x cannot read hbase-2.x files given the structure goes unchanged (hfile v3). + * So, lets write the old names for Comparators into the hfile tails in hbase-2. Here is where we + * do the translation. {@link #getComparatorClass(String)} does translation going the other way. + *

        + * The translation is done on the serialized Protobuf only. + *

        * @param comparator String class name of the Comparator used in this hfile. * @return What to store in the trailer as our comparator name. * @see #getComparatorClass(String) @@ -606,18 +588,25 @@ public class FixedFileTrailer { throws IOException { Class comparatorKlass; // for BC - if (comparatorClassName.equals(KeyValue.COMPARATOR.getLegacyKeyComparatorName()) - || comparatorClassName.equals(KeyValue.COMPARATOR.getClass().getName()) - || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator"))) { + if ( + comparatorClassName.equals(KeyValue.COMPARATOR.getLegacyKeyComparatorName()) + || comparatorClassName.equals(KeyValue.COMPARATOR.getClass().getName()) + || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator")) + ) { comparatorKlass = CellComparatorImpl.class; - } else if (comparatorClassName.equals(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()) - || comparatorClassName.equals(KeyValue.META_COMPARATOR.getClass().getName()) - || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator")) - || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator")) - || (comparatorClassName.equals("org.apache.hadoop.hbase.MetaCellComparator"))) { + } else if ( + comparatorClassName.equals(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()) + || comparatorClassName.equals(KeyValue.META_COMPARATOR.getClass().getName()) + || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator")) + || (comparatorClassName + .equals("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator")) + || (comparatorClassName.equals("org.apache.hadoop.hbase.MetaCellComparator")) + ) { comparatorKlass = MetaCellComparator.class; - } else if (comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$RawBytesComparator") - || comparatorClassName.equals("org.apache.hadoop.hbase.util.Bytes$ByteArrayComparator")) { + } else if ( + comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$RawBytesComparator") + || comparatorClassName.equals("org.apache.hadoop.hbase.util.Bytes$ByteArrayComparator") + ) { // When the comparator to be used is Bytes.BYTES_RAWCOMPARATOR, we just return null from here // Bytes.BYTES_RAWCOMPARATOR is not a CellComparator comparatorKlass = null; @@ -635,10 +624,10 @@ public class FixedFileTrailer { static CellComparator createComparator(String comparatorClassName) throws IOException { if (comparatorClassName.equals(CellComparatorImpl.COMPARATOR.getClass().getName())) { return CellComparatorImpl.COMPARATOR; - } else if (comparatorClassName.equals( - MetaCellComparator.META_COMPARATOR.getClass().getName())) { - return MetaCellComparator.META_COMPARATOR; - } + } else + if (comparatorClassName.equals(MetaCellComparator.META_COMPARATOR.getClass().getName())) { + return MetaCellComparator.META_COMPARATOR; + } try { Class comparatorClass = getComparatorClass(comparatorClassName); if (comparatorClass != null) { @@ -660,8 +649,7 @@ public class FixedFileTrailer { return uncompressedDataIndexSize; } - public void setUncompressedDataIndexSize( - long uncompressedDataIndexSize) { + public void setUncompressedDataIndexSize(long uncompressedDataIndexSize) { expectAtLeastMajorVersion(2); this.uncompressedDataIndexSize = uncompressedDataIndexSize; } @@ -678,24 +666,23 @@ public class FixedFileTrailer { } /** - * Extracts the major version for a 4-byte serialized version data. - * The major version is the 3 least significant bytes + * Extracts the major version for a 4-byte serialized version data. The major version is the 3 + * least significant bytes */ private static int extractMajorVersion(int serializedVersion) { return (serializedVersion & 0x00ffffff); } /** - * Extracts the minor version for a 4-byte serialized version data. - * The major version are the 3 the most significant bytes + * Extracts the minor version for a 4-byte serialized version data. The major version are the 3 + * the most significant bytes */ private static int extractMinorVersion(int serializedVersion) { return (serializedVersion >>> 24); } /** - * Create a 4 byte serialized version number by combining the - * minor and major version numbers. + * Create a 4 byte serialized version number by combining the minor and major version numbers. */ static int materializeVersion(int majorVersion, int minorVersion) { return ((majorVersion & 0x00ffffff) | (minorVersion << 24)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 0cb100eefb5..ba7ff84996f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,68 +54,66 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * File format for hbase. - * A file of sorted key/value pairs. Both keys and values are byte arrays. + * File format for hbase. A file of sorted key/value pairs. Both keys and values are byte arrays. *

        - * The memory footprint of a HFile includes the following (below is taken from the - * TFile documentation - * but applies also to HFile): + * The memory footprint of a HFile includes the following (below is taken from the TFile documentation but applies also + * to HFile): *

          *
        • Some constant overhead of reading or writing a compressed block. *
            - *
          • Each compressed block requires one compression/decompression codec for - * I/O. + *
          • Each compressed block requires one compression/decompression codec for I/O. *
          • Temporary space to buffer the key. *
          • Temporary space to buffer the value. *
          - *
        • HFile index, which is proportional to the total number of Data Blocks. - * The total amount of memory needed to hold the index can be estimated as - * (56+AvgKeySize)*NumBlocks. + *
        • HFile index, which is proportional to the total number of Data Blocks. The total amount of + * memory needed to hold the index can be estimated as (56+AvgKeySize)*NumBlocks. *
        * Suggestions on performance optimization. *
          - *
        • Minimum block size. We recommend a setting of minimum block size between - * 8KB to 1MB for general usage. Larger block size is preferred if files are - * primarily for sequential access. However, it would lead to inefficient random - * access (because there are more data to decompress). Smaller blocks are good - * for random access, but require more memory to hold the block index, and may - * be slower to create (because we must flush the compressor stream at the - * conclusion of each data block, which leads to an FS I/O flush). Further, due - * to the internal caching in Compression codec, the smallest possible block - * size would be around 20KB-30KB. - *
        • The current implementation does not offer true multi-threading for - * reading. The implementation uses FSDataInputStream seek()+read(), which is - * shown to be much faster than positioned-read call in single thread mode. - * However, it also means that if multiple threads attempt to access the same - * HFile (using multiple scanners) simultaneously, the actual I/O is carried out - * sequentially even if they access different DFS blocks (Reexamine! pread seems - * to be 10% faster than seek+read in my testing -- stack). - *
        • Compression codec. Use "none" if the data is not very compressable (by - * compressable, I mean a compression ratio at least 2:1). Generally, use "lzo" - * as the starting point for experimenting. "gz" overs slightly better - * compression ratio over "lzo" but requires 4x CPU to compress and 2x CPU to - * decompress, comparing to "lzo". + *
        • Minimum block size. We recommend a setting of minimum block size between 8KB to 1MB for + * general usage. Larger block size is preferred if files are primarily for sequential access. + * However, it would lead to inefficient random access (because there are more data to decompress). + * Smaller blocks are good for random access, but require more memory to hold the block index, and + * may be slower to create (because we must flush the compressor stream at the conclusion of each + * data block, which leads to an FS I/O flush). Further, due to the internal caching in Compression + * codec, the smallest possible block size would be around 20KB-30KB. + *
        • The current implementation does not offer true multi-threading for reading. The + * implementation uses FSDataInputStream seek()+read(), which is shown to be much faster than + * positioned-read call in single thread mode. However, it also means that if multiple threads + * attempt to access the same HFile (using multiple scanners) simultaneously, the actual I/O is + * carried out sequentially even if they access different DFS blocks (Reexamine! pread seems to be + * 10% faster than seek+read in my testing -- stack). + *
        • Compression codec. Use "none" if the data is not very compressable (by compressable, I mean a + * compression ratio at least 2:1). Generally, use "lzo" as the starting point for experimenting. + * "gz" overs slightly better compression ratio over "lzo" but requires 4x CPU to compress and 2x + * CPU to decompress, comparing to "lzo". *
        - * * For more on the background behind HFile, see HBASE-61. *

        - * File is made of data blocks followed by meta data blocks (if any), a fileinfo - * block, data block index, meta data block index, and a fixed size trailer - * which records the offsets at which file changes content type. - *

        <data blocks><meta blocks><fileinfo><
        - * data index><meta index><trailer>
        - * Each block has a bit of magic at its start. Block are comprised of - * key/values. In data blocks, they are both byte arrays. Metadata blocks are - * a String key and a byte array value. An empty file looks like this: - *
        <fileinfo><trailer>
        . That is, there are not data nor meta - * blocks present. + * File is made of data blocks followed by meta data blocks (if any), a fileinfo block, data block + * index, meta data block index, and a fixed size trailer which records the offsets at which file + * changes content type. + * + *
        + * <data blocks><meta blocks><fileinfo><
        + * data index><meta index><trailer>
        + * 
        + * + * Each block has a bit of magic at its start. Block are comprised of key/values. In data blocks, + * they are both byte arrays. Metadata blocks are a String key and a byte array value. An empty file + * looks like this: + * + *
        + * <fileinfo><trailer>
        + * 
        + * + * . That is, there are not data nor meta blocks present. *

        - * TODO: Do scanners need to be able to take a start and end row? - * TODO: Should BlockIndex know the name of its file? Should it have a Path - * that points at its file say for the case where an index lives apart from - * an HFile instance? + * TODO: Do scanners need to be able to take a start and end row? TODO: Should BlockIndex know the + * name of its file? Should it have a Path that points at its file say for the case where an index + * lives apart from an HFile instance? */ @InterfaceAudience.Private public final class HFile { @@ -137,7 +134,8 @@ public final class HFile { /** Minimum supported HFile format version */ public static final int MIN_FORMAT_VERSION = 2; - /** Maximum supported HFile format version + /** + * Maximum supported HFile format version */ public static final int MAX_FORMAT_VERSION = 3; @@ -147,17 +145,15 @@ public final class HFile { public static final int MIN_FORMAT_VERSION_WITH_TAGS = 3; /** Default compression name: none. */ - public final static String DEFAULT_COMPRESSION = - DEFAULT_COMPRESSION_ALGORITHM.getName(); + public final static String DEFAULT_COMPRESSION = DEFAULT_COMPRESSION_ALGORITHM.getName(); /** Meta data block name for bloom filter bits. */ public static final String BLOOM_FILTER_DATA_KEY = "BLOOM_FILTER_DATA"; /** - * We assume that HFile path ends with - * ROOT_DIR/TABLE_NAME/REGION_NAME/CF_NAME/HFILE, so it has at least this - * many levels of nesting. This is needed for identifying table and CF name - * from an HFile path. + * We assume that HFile path ends with ROOT_DIR/TABLE_NAME/REGION_NAME/CF_NAME/HFILE, so it has at + * least this many levels of nesting. This is needed for identifying table and CF name from an + * HFile path. */ public final static int MIN_NUM_HFILE_PATH_LEVELS = 5; @@ -178,19 +174,18 @@ public final class HFile { /** * Shutdown constructor. */ - private HFile() {} + private HFile() { + } /** - * Number of checksum verification failures. It also - * clears the counter. + * Number of checksum verification failures. It also clears the counter. */ public static final long getAndResetChecksumFailuresCount() { return CHECKSUM_FAILURES.sumThenReset(); } /** - * Number of checksum verification failures. It also - * clears the counter. + * Number of checksum verification failures. It also clears the counter. */ public static final long getChecksumFailuresCount() { return CHECKSUM_FAILURES.sum(); @@ -211,7 +206,7 @@ public final class HFile { /** API required to write an {@link HFile} */ public interface Writer extends Closeable, CellSink, ShipperListener { /** Max memstore (mvcc) timestamp in FileInfo */ - public static final byte [] MAX_MEMSTORE_TS_KEY = Bytes.toBytes("MAX_MEMSTORE_TS_KEY"); + public static final byte[] MAX_MEMSTORE_TS_KEY = Bytes.toBytes("MAX_MEMSTORE_TS_KEY"); /** Add an element to the file info map. */ void appendFileInfo(byte[] key, byte[] value) throws IOException; @@ -220,29 +215,27 @@ public final class HFile { Path getPath(); /** - * Adds an inline block writer such as a multi-level block index writer or - * a compound Bloom filter writer. + * Adds an inline block writer such as a multi-level block index writer or a compound Bloom + * filter writer. */ void addInlineBlockWriter(InlineBlockWriter bloomWriter); - // The below three methods take Writables. We'd like to undo Writables but undoing the below - // would be pretty painful. Could take a byte [] or a Message but we want to be backward + // The below three methods take Writables. We'd like to undo Writables but undoing the below + // would be pretty painful. Could take a byte [] or a Message but we want to be backward // compatible around hfiles so would need to map between Message and Writable or byte [] and - // current Writable serialization. This would be a bit of work to little gain. Thats my - // thinking at moment. St.Ack 20121129 + // current Writable serialization. This would be a bit of work to little gain. Thats my + // thinking at moment. St.Ack 20121129 void appendMetaBlock(String bloomFilterMetaKey, Writable metaWriter); /** - * Store general Bloom filter in the file. This does not deal with Bloom filter - * internals but is necessary, since Bloom filters are stored differently - * in HFile version 1 and version 2. + * Store general Bloom filter in the file. This does not deal with Bloom filter internals but is + * necessary, since Bloom filters are stored differently in HFile version 1 and version 2. */ void addGeneralBloomFilter(BloomFilterWriter bfw); /** - * Store delete family Bloom filter in the file, which is only supported in - * HFile V2. + * Store delete family Bloom filter in the file, which is only supported in HFile V2. */ void addDeleteFamilyBloomFilter(BloomFilterWriter bfw) throws IOException; @@ -253,8 +246,8 @@ public final class HFile { } /** - * This variety of ways to construct writers is used throughout the code, and - * we want to be able to swap writer implementations. + * This variety of ways to construct writers is used throughout the code, and we want to be able + * to swap writer implementations. */ public static class WriterFactory { protected final Configuration conf; @@ -301,11 +294,9 @@ public final class HFile { return this; } - public Writer create() throws IOException { if ((path != null ? 1 : 0) + (ostream != null ? 1 : 0) != 1) { - throw new AssertionError("Please specify exactly one of " + - "filesystem/path or path"); + throw new AssertionError("Please specify exactly one of " + "filesystem/path or path"); } if (path != null) { ostream = HFileWriterImpl.createOutputStream(conf, fs, path, favoredNodes); @@ -330,68 +321,65 @@ public final class HFile { } /** - * Returns the factory to be used to create {@link HFile} writers. - * Disables block cache access for all writers created through the - * returned factory. + * Returns the factory to be used to create {@link HFile} writers. Disables block cache access for + * all writers created through the returned factory. */ - public static final WriterFactory getWriterFactoryNoCache(Configuration - conf) { + public static final WriterFactory getWriterFactoryNoCache(Configuration conf) { return HFile.getWriterFactory(conf, CacheConfig.DISABLED); } /** * Returns the factory to be used to create {@link HFile} writers */ - public static final WriterFactory getWriterFactory(Configuration conf, - CacheConfig cacheConf) { + public static final WriterFactory getWriterFactory(Configuration conf, CacheConfig cacheConf) { int version = getFormatVersion(conf); switch (version) { case 2: - throw new IllegalArgumentException("This should never happen. " + - "Did you change hfile.format.version to read v2? This version of the software writes v3" + - " hfiles only (but it can read v2 files without having to update hfile.format.version " + - "in hbase-site.xml)"); + throw new IllegalArgumentException("This should never happen. " + + "Did you change hfile.format.version to read v2? This version of the software writes v3" + + " hfiles only (but it can read v2 files without having to update hfile.format.version " + + "in hbase-site.xml)"); case 3: return new HFile.WriterFactory(conf, cacheConf); default: - throw new IllegalArgumentException("Cannot create writer for HFile " + - "format version " + version); + throw new IllegalArgumentException( + "Cannot create writer for HFile " + "format version " + version); } } /** - * An abstraction used by the block index. - * Implementations will check cache for any asked-for block and return cached block if found. - * Otherwise, after reading from fs, will try and put block into cache before returning. + * An abstraction used by the block index. Implementations will check cache for any asked-for + * block and return cached block if found. Otherwise, after reading from fs, will try and put + * block into cache before returning. */ public interface CachingBlockReader { /** * Read in a file block. - * @param offset offset to read. - * @param onDiskBlockSize size of the block - * @param isCompaction is this block being read as part of a compaction - * @param expectedBlockType the block type we are expecting to read with this read operation, - * or null to read whatever block type is available and avoid checking (that might reduce - * caching efficiency of encoded data blocks) + * @param offset offset to read. + * @param onDiskBlockSize size of the block + * @param isCompaction is this block being read as part of a compaction + * @param expectedBlockType the block type we are expecting to read with this read + * operation, or null to read whatever block type is available + * and avoid checking (that might reduce caching efficiency of + * encoded data blocks) * @param expectedDataBlockEncoding the data block encoding the caller is expecting data blocks - * to be in, or null to not perform this check and return the block irrespective of the - * encoding. This check only applies to data blocks and can be set to null when the caller is - * expecting to read a non-data block and has set expectedBlockType accordingly. + * to be in, or null to not perform this check and return the + * block irrespective of the encoding. This check only applies + * to data blocks and can be set to null when the caller is + * expecting to read a non-data block and has set + * expectedBlockType accordingly. * @return Block wrapped in a ByteBuffer. */ - HFileBlock readBlock(long offset, long onDiskBlockSize, - boolean cacheBlock, final boolean pread, final boolean isCompaction, - final boolean updateCacheMetrics, BlockType expectedBlockType, - DataBlockEncoding expectedDataBlockEncoding) - throws IOException; + HFileBlock readBlock(long offset, long onDiskBlockSize, boolean cacheBlock, final boolean pread, + final boolean isCompaction, final boolean updateCacheMetrics, BlockType expectedBlockType, + DataBlockEncoding expectedDataBlockEncoding) throws IOException; } /** An interface used by clients to open and iterate an {@link HFile}. */ public interface Reader extends Closeable, CachingBlockReader { /** - * Returns this reader's "name". Usually the last component of the path. - * Needs to be constant as the file is being moved to support caching on - * write. + * Returns this reader's "name". Usually the last component of the path. Needs to be constant as + * the file is being moved to support caching on write. */ String getName(); @@ -421,23 +409,23 @@ public final class HFile { FixedFileTrailer getTrailer(); void setDataBlockIndexReader(HFileBlockIndex.CellBasedKeyBlockIndexReader reader); + HFileBlockIndex.CellBasedKeyBlockIndexReader getDataBlockIndexReader(); void setMetaBlockIndexReader(HFileBlockIndex.ByteArrayKeyBlockIndexReader reader); + HFileBlockIndex.ByteArrayKeyBlockIndexReader getMetaBlockIndexReader(); HFileScanner getScanner(Configuration conf, boolean cacheBlocks, boolean pread); /** - * Retrieves general Bloom filter metadata as appropriate for each - * {@link HFile} version. - * Knows nothing about how that metadata is structured. + * Retrieves general Bloom filter metadata as appropriate for each {@link HFile} version. Knows + * nothing about how that metadata is structured. */ DataInput getGeneralBloomFilterMetadata() throws IOException; /** - * Retrieves delete family Bloom filter metadata as appropriate for each - * {@link HFile} version. + * Retrieves delete family Bloom filter metadata as appropriate for each {@link HFile} version. * Knows nothing about how that metadata is structured. */ DataInput getDeleteBloomFilterMetadata() throws IOException; @@ -471,25 +459,26 @@ public final class HFile { void unbufferStream(); ReaderContext getContext(); + HFileInfo getHFileInfo(); + void setDataBlockEncoder(HFileDataBlockEncoder dataBlockEncoder); } /** - * Method returns the reader given the specified arguments. - * TODO This is a bad abstraction. See HBASE-6635. - * - * @param context Reader context info - * @param fileInfo HFile info + * Method returns the reader given the specified arguments. TODO This is a bad abstraction. See + * HBASE-6635. + * @param context Reader context info + * @param fileInfo HFile info * @param cacheConf Cache configuation values, cannot be null. - * @param conf Configuration + * @param conf Configuration * @return an appropriate instance of HFileReader * @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", - justification="Intentional") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SF_SWITCH_FALLTHROUGH", + justification = "Intentional") public static Reader createReader(ReaderContext context, HFileInfo fileInfo, - CacheConfig cacheConf, Configuration conf) throws IOException { + CacheConfig cacheConf, Configuration conf) throws IOException { try { if (context.getReaderType() == ReaderType.STREAM) { // stream reader will share trailer with pread reader, see HFileStreamReader#copyFields @@ -508,8 +497,8 @@ public final class HFile { } catch (Throwable t) { IOUtils.closeQuietly(context.getInputStreamWrapper(), e -> LOG.warn("failed to close input stream wrapper", e)); - throw new CorruptHFileException("Problem reading HFile Trailer from file " - + context.getFilePath(), t); + throw new CorruptHFileException( + "Problem reading HFile Trailer from file " + context.getFilePath(), t); } finally { context.getInputStreamWrapper().unbuffer(); } @@ -517,43 +506,39 @@ public final class HFile { /** * Creates reader with cache configuration disabled - * @param fs filesystem + * @param fs filesystem * @param path Path to file to read * @param conf Configuration * @return an active Reader instance - * @throws IOException Will throw a CorruptHFileException - * (DoNotRetryIOException subtype) if hfile is corrupt/invalid. + * @throws IOException Will throw a CorruptHFileException (DoNotRetryIOException subtype) if hfile + * is corrupt/invalid. */ public static Reader createReader(FileSystem fs, Path path, Configuration conf) - throws IOException { + throws IOException { // The primaryReplicaReader is mainly used for constructing block cache key, so if we do not use // block cache then it is OK to set it as any value. We use true here. return createReader(fs, path, CacheConfig.DISABLED, true, conf); } /** - * @param fs filesystem - * @param path Path to file to read - * @param cacheConf This must not be null. + * @param fs filesystem + * @param path Path to file to read + * @param cacheConf This must not be null. * @param primaryReplicaReader true if this is a reader for primary replica - * @param conf Configuration + * @param conf Configuration * @return an active Reader instance * @throws IOException Will throw a CorruptHFileException (DoNotRetryIOException subtype) if hfile - * is corrupt/invalid. + * is corrupt/invalid. * @see CacheConfig#CacheConfig(Configuration) */ public static Reader createReader(FileSystem fs, Path path, CacheConfig cacheConf, - boolean primaryReplicaReader, Configuration conf) throws IOException { + boolean primaryReplicaReader, Configuration conf) throws IOException { Preconditions.checkNotNull(cacheConf, "Cannot create Reader with null CacheConf"); FSDataInputStreamWrapper stream = new FSDataInputStreamWrapper(fs, path); - ReaderContext context = new ReaderContextBuilder() - .withFilePath(path) - .withInputStreamWrapper(stream) - .withFileSize(fs.getFileStatus(path).getLen()) - .withFileSystem(stream.getHfs()) - .withPrimaryReplicaReader(primaryReplicaReader) - .withReaderType(ReaderType.PREAD) - .build(); + ReaderContext context = + new ReaderContextBuilder().withFilePath(path).withInputStreamWrapper(stream) + .withFileSize(fs.getFileStatus(path).getLen()).withFileSystem(stream.getHfs()) + .withPrimaryReplicaReader(primaryReplicaReader).withReaderType(ReaderType.PREAD).build(); HFileInfo fileInfo = new HFileInfo(context, conf); Reader reader = createReader(context, fileInfo, cacheConf, conf); fileInfo.initMetaAndIndex(reader); @@ -562,7 +547,7 @@ public final class HFile { /** * Returns true if the specified file has a valid HFile Trailer. - * @param fs filesystem + * @param fs filesystem * @param path Path to file to verify * @return true if the file has a valid HFile Trailer, otherwise false * @throws IOException if failed to read from the underlying stream @@ -573,13 +558,13 @@ public final class HFile { /** * Returns true if the specified file has a valid HFile Trailer. - * @param fs filesystem + * @param fs filesystem * @param fileStatus the file to verify * @return true if the file has a valid HFile Trailer, otherwise false * @throws IOException if failed to read from the underlying stream */ public static boolean isHFileFormat(final FileSystem fs, final FileStatus fileStatus) - throws IOException { + throws IOException { final Path path = fileStatus.getPath(); final long size = fileStatus.getLen(); try (FSDataInputStreamWrapper fsdis = new FSDataInputStreamWrapper(fs, path)) { @@ -593,12 +578,9 @@ public final class HFile { } /** - * Get names of supported compression algorithms. The names are acceptable by - * HFile.Writer. - * - * @return Array of strings, each represents a supported compression - * algorithm. Currently, the following compression algorithms are - * supported. + * Get names of supported compression algorithms. The names are acceptable by HFile.Writer. + * @return Array of strings, each represents a supported compression algorithm. Currently, the + * following compression algorithms are supported. *

          *
        • "none" - No compression. *
        • "gz" - GZIP compression. @@ -616,29 +598,28 @@ public final class HFile { static int longToInt(final long l) { // Expecting the size() of a block not exceeding 4GB. Assuming the // size() will wrap to negative integer if it exceeds 2GB (From tfile). - return (int)(l & 0x00000000ffffffffL); + return (int) (l & 0x00000000ffffffffL); } /** - * Returns all HFiles belonging to the given region directory. Could return an - * empty list. - * - * @param fs The file system reference. - * @param regionDir The region directory to scan. + * Returns all HFiles belonging to the given region directory. Could return an empty list. + * @param fs The file system reference. + * @param regionDir The region directory to scan. * @return The list of files found. * @throws IOException When scanning the files fails. */ - public static List getStoreFiles(FileSystem fs, Path regionDir) - throws IOException { + public static List getStoreFiles(FileSystem fs, Path regionDir) throws IOException { List regionHFiles = new ArrayList<>(); PathFilter dirFilter = new FSUtils.DirFilter(fs); FileStatus[] familyDirs = fs.listStatus(regionDir, dirFilter); - for(FileStatus dir : familyDirs) { + for (FileStatus dir : familyDirs) { FileStatus[] files = fs.listStatus(dir.getPath()); for (FileStatus file : files) { - if (!file.isDirectory() && - (!file.getPath().toString().contains(HConstants.HREGION_OLDLOGDIR_NAME)) && - (!file.getPath().toString().contains(HConstants.RECOVERED_EDITS_DIR))) { + if ( + !file.isDirectory() + && (!file.getPath().toString().contains(HConstants.HREGION_OLDLOGDIR_NAME)) + && (!file.getPath().toString().contains(HConstants.RECOVERED_EDITS_DIR)) + ) { regionHFiles.add(file.getPath()); } } @@ -647,32 +628,28 @@ public final class HFile { } /** - * Checks the given {@link HFile} format version, and throws an exception if - * invalid. Note that if the version number comes from an input file and has - * not been verified, the caller needs to re-throw an {@link IOException} to - * indicate that this is not a software error, but corrupted input. - * + * Checks the given {@link HFile} format version, and throws an exception if invalid. Note that if + * the version number comes from an input file and has not been verified, the caller needs to + * re-throw an {@link IOException} to indicate that this is not a software error, but corrupted + * input. * @param version an HFile version * @throws IllegalArgumentException if the version is invalid */ - public static void checkFormatVersion(int version) - throws IllegalArgumentException { + public static void checkFormatVersion(int version) throws IllegalArgumentException { if (version < MIN_FORMAT_VERSION || version > MAX_FORMAT_VERSION) { - throw new IllegalArgumentException("Invalid HFile version: " + version - + " (expected to be " + "between " + MIN_FORMAT_VERSION + " and " - + MAX_FORMAT_VERSION + ")"); + throw new IllegalArgumentException("Invalid HFile version: " + version + " (expected to be " + + "between " + MIN_FORMAT_VERSION + " and " + MAX_FORMAT_VERSION + ")"); } } - public static void checkHFileVersion(final Configuration c) { int version = c.getInt(FORMAT_VERSION_KEY, MAX_FORMAT_VERSION); if (version < MAX_FORMAT_VERSION || version > MAX_FORMAT_VERSION) { - throw new IllegalArgumentException("The setting for " + FORMAT_VERSION_KEY + - " (in your hbase-*.xml files) is " + version + " which does not match " + - MAX_FORMAT_VERSION + - "; are you running with a configuration from an older or newer hbase install (an " + - "incompatible hbase-default.xml or hbase-site.xml on your CLASSPATH)?"); + throw new IllegalArgumentException( + "The setting for " + FORMAT_VERSION_KEY + " (in your hbase-*.xml files) is " + version + + " which does not match " + MAX_FORMAT_VERSION + + "; are you running with a configuration from an older or newer hbase install (an " + + "incompatible hbase-default.xml or hbase-site.xml on your CLASSPATH)?"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index c311f6a09e1..747fa16221f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.io.hfile; import static org.apache.hadoop.hbase.io.ByteBuffAllocator.HEAP; + import java.io.DataInputStream; import java.io.DataOutput; import java.io.DataOutputStream; @@ -28,7 +29,6 @@ import java.util.List; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -62,56 +62,51 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * Cacheable Blocks of an {@link HFile} version 2 file. - * Version 2 was introduced in hbase-0.92.0. - * - *

          Version 1 was the original file block. Version 2 was introduced when we changed the hbase file - * format to support multi-level block indexes and compound bloom filters (HBASE-3857). Support - * for Version 1 was removed in hbase-1.3.0. - * - *

          HFileBlock: Version 2

          - * In version 2, a block is structured as follows: + * Cacheable Blocks of an {@link HFile} version 2 file. Version 2 was introduced in hbase-0.92.0. + *

          + * Version 1 was the original file block. Version 2 was introduced when we changed the hbase file + * format to support multi-level block indexes and compound bloom filters (HBASE-3857). Support for + * Version 1 was removed in hbase-1.3.0. + *

          HFileBlock: Version 2

          In version 2, a block is structured as follows: *
            *
          • Header: See Writer#putHeader() for where header is written; header total size is * HFILEBLOCK_HEADER_SIZE *
              - *
            • 0. blockType: Magic record identifying the {@link BlockType} (8 bytes): - * e.g. DATABLK* + *
            • 0. blockType: Magic record identifying the {@link BlockType} (8 bytes): e.g. + * DATABLK* *
            • 1. onDiskSizeWithoutHeader: Compressed -- a.k.a 'on disk' -- block size, excluding header, * but including tailing checksum bytes (4 bytes) *
            • 2. uncompressedSizeWithoutHeader: Uncompressed block size, excluding header, and excluding * checksum bytes (4 bytes) - *
            • 3. prevBlockOffset: The offset of the previous block of the same type (8 bytes). This is - * used to navigate to the previous block without having to go to the block index + *
            • 3. prevBlockOffset: The offset of the previous block of the same type (8 bytes). This is used + * to navigate to the previous block without having to go to the block index *
            • 4: For minorVersions >=1, the ordinal describing checksum type (1 byte) *
            • 5: For minorVersions >=1, the number of data bytes/checksum chunk (4 bytes) *
            • 6: onDiskDataSizeWithHeader: For minorVersions >=1, the size of data 'on disk', including * header, excluding checksums (4 bytes) *
            *
          • - *
          • Raw/Compressed/Encrypted/Encoded data: The compression - * algorithm is the same for all the blocks in an {@link HFile}. If compression is NONE, this is - * just raw, serialized Cells. - *
          • Tail: For minorVersions >=1, a series of 4 byte checksums, one each for - * the number of bytes specified by bytesPerChecksum. + *
          • Raw/Compressed/Encrypted/Encoded data: The compression algorithm is the same for all + * the blocks in an {@link HFile}. If compression is NONE, this is just raw, serialized Cells. + *
          • Tail: For minorVersions >=1, a series of 4 byte checksums, one each for the number + * of bytes specified by bytesPerChecksum. *
          - * - *

          Caching

          - * Caches cache whole blocks with trailing checksums if any. We then tag on some metadata, the - * content of BLOCK_METADATA_SPACE which will be flag on if we are doing 'hbase' - * checksums and then the offset into the file which is needed when we re-make a cache key - * when we return the block to the cache as 'done'. - * See {@link Cacheable#serialize(ByteBuffer, boolean)} and {@link Cacheable#getDeserializer()}. - * - *

          TODO: Should we cache the checksums? Down in Writer#getBlockForCaching(CacheConfig) where - * we make a block to cache-on-write, there is an attempt at turning off checksums. This is not the - * only place we get blocks to cache. We also will cache the raw return from an hdfs read. In this - * case, the checksums may be present. If the cache is backed by something that doesn't do ECC, - * say an SSD, we might want to preserve checksums. For now this is open question. - *

          TODO: Over in BucketCache, we save a block allocation by doing a custom serialization. - * Be sure to change it if serialization changes in here. Could we add a method here that takes an - * IOEngine and that then serializes to it rather than expose our internals over in BucketCache? - * IOEngine is in the bucket subpackage. Pull it up? Then this class knows about bucketcache. Ugh. + *

          Caching

          Caches cache whole blocks with trailing checksums if any. We then tag on some + * metadata, the content of BLOCK_METADATA_SPACE which will be flag on if we are doing 'hbase' + * checksums and then the offset into the file which is needed when we re-make a cache key when we + * return the block to the cache as 'done'. See {@link Cacheable#serialize(ByteBuffer, boolean)} and + * {@link Cacheable#getDeserializer()}. + *

          + * TODO: Should we cache the checksums? Down in Writer#getBlockForCaching(CacheConfig) where we make + * a block to cache-on-write, there is an attempt at turning off checksums. This is not the only + * place we get blocks to cache. We also will cache the raw return from an hdfs read. In this case, + * the checksums may be present. If the cache is backed by something that doesn't do ECC, say an + * SSD, we might want to preserve checksums. For now this is open question. + *

          + * TODO: Over in BucketCache, we save a block allocation by doing a custom serialization. Be sure to + * change it if serialization changes in here. Could we add a method here that takes an IOEngine and + * that then serializes to it rather than expose our internals over in BucketCache? IOEngine is in + * the bucket subpackage. Pull it up? Then this class knows about bucketcache. Ugh. */ @InterfaceAudience.Private public class HFileBlock implements Cacheable { @@ -162,48 +157,47 @@ public class HFileBlock implements Cacheable { private long prevBlockOffset; /** - * Size on disk of header + data. Excludes checksum. Header field 6, - * OR calculated from {@link #onDiskSizeWithoutHeader} when using HDFS checksum. + * Size on disk of header + data. Excludes checksum. Header field 6, OR calculated from + * {@link #onDiskSizeWithoutHeader} when using HDFS checksum. * @see Writer#putHeader(byte[], int, int, int, int) */ private int onDiskDataSizeWithHeader; // End of Block Header fields. /** - * The in-memory representation of the hfile block. Can be on or offheap. Can be backed by - * a single ByteBuffer or by many. Make no assumptions. - * - *

          Be careful reading from this buf. Duplicate and work on the duplicate or if - * not, be sure to reset position and limit else trouble down the road. - * - *

          TODO: Make this read-only once made. - * - *

          We are using the ByteBuff type. ByteBuffer is not extensible yet we need to be able to have - * a ByteBuffer-like API across multiple ByteBuffers reading from a cache such as BucketCache. - * So, we have this ByteBuff type. Unfortunately, it is spread all about HFileBlock. Would be - * good if could be confined to cache-use only but hard-to-do. + * The in-memory representation of the hfile block. Can be on or offheap. Can be backed by a + * single ByteBuffer or by many. Make no assumptions. + *

          + * Be careful reading from this buf. Duplicate and work on the duplicate or if not, + * be sure to reset position and limit else trouble down the road. + *

          + * TODO: Make this read-only once made. + *

          + * We are using the ByteBuff type. ByteBuffer is not extensible yet we need to be able to have a + * ByteBuffer-like API across multiple ByteBuffers reading from a cache such as BucketCache. So, + * we have this ByteBuff type. Unfortunately, it is spread all about HFileBlock. Would be good if + * could be confined to cache-use only but hard-to-do. */ private ByteBuff buf; - /** Meta data that holds meta information on the hfileblock. + /** + * Meta data that holds meta information on the hfileblock. */ private HFileContext fileContext; /** - * The offset of this block in the file. Populated by the reader for - * convenience of access. This offset is not part of the block header. + * The offset of this block in the file. Populated by the reader for convenience of access. This + * offset is not part of the block header. */ private long offset = UNSET; /** - * The on-disk size of the next block, including the header and checksums if present. - * UNSET if unknown. - * - * Blocks try to carry the size of the next block to read in this data member. Usually - * we get block sizes from the hfile index but sometimes the index is not available: - * e.g. when we read the indexes themselves (indexes are stored in blocks, we do not - * have an index for the indexes). Saves seeks especially around file open when - * there is a flurry of reading in hfile metadata. + * The on-disk size of the next block, including the header and checksums if present. UNSET if + * unknown. Blocks try to carry the size of the next block to read in this data member. Usually we + * get block sizes from the hfile index but sometimes the index is not available: e.g. when we + * read the indexes themselves (indexes are stored in blocks, we do not have an index for the + * indexes). Saves seeks especially around file open when there is a flurry of reading in hfile + * metadata. */ private int nextBlockOnDiskSize = UNSET; @@ -221,19 +215,18 @@ public class HFileBlock implements Cacheable { // How to get the estimate correctly? if it is a singleBB? public static final int MULTI_BYTE_BUFFER_HEAP_SIZE = - (int)ClassSize.estimateBase(MultiByteBuff.class, false); + (int) ClassSize.estimateBase(MultiByteBuff.class, false); /** - * Space for metadata on a block that gets stored along with the block when we cache it. - * There are a few bytes stuck on the end of the HFileBlock that we pull in from HDFS. - * 8 bytes are for the offset of this block (long) in the file. Offset is important because is is - * used when we remake the CacheKey when we return block to the cache when done. There is also - * a flag on whether checksumming is being done by hbase or not. See class comment for note on - * uncertain state of checksumming of blocks that come out of cache (should we or should we not?). - * Finally there are 4 bytes to hold the length of the next block which can save a seek on - * occasion if available. - * (This EXTRA info came in with original commit of the bucketcache, HBASE-7404. It was - * formerly known as EXTRA_SERIALIZATION_SPACE). + * Space for metadata on a block that gets stored along with the block when we cache it. There are + * a few bytes stuck on the end of the HFileBlock that we pull in from HDFS. 8 bytes are for the + * offset of this block (long) in the file. Offset is important because is is used when we remake + * the CacheKey when we return block to the cache when done. There is also a flag on whether + * checksumming is being done by hbase or not. See class comment for note on uncertain state of + * checksumming of blocks that come out of cache (should we or should we not?). Finally there are + * 4 bytes to hold the length of the next block which can save a seek on occasion if available. + * (This EXTRA info came in with original commit of the bucketcache, HBASE-7404. It was formerly + * known as EXTRA_SERIALIZATION_SPACE). */ public static final int BLOCK_METADATA_SPACE = Bytes.SIZEOF_BYTE + Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT; @@ -244,12 +237,10 @@ public class HFileBlock implements Cacheable { static final int CHECKSUM_SIZE = Bytes.SIZEOF_INT; static final byte[] DUMMY_HEADER_NO_CHECKSUM = - new byte[HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM]; + new byte[HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM]; /** - * Used deserializing blocks from Cache. - * - * + * Used deserializing blocks from Cache. * ++++++++++++++ * + HFileBlock + * ++++++++++++++ @@ -267,8 +258,7 @@ public class HFileBlock implements Cacheable { } @Override - public HFileBlock deserialize(ByteBuff buf, ByteBuffAllocator alloc) - throws IOException { + public HFileBlock deserialize(ByteBuff buf, ByteBuffAllocator alloc) throws IOException { // The buf has the file block followed by block metadata. // Set limit to just before the BLOCK_METADATA_SPACE then rewind. buf.limit(buf.limit() - BLOCK_METADATA_SPACE).rewind(); @@ -292,34 +282,36 @@ public class HFileBlock implements Cacheable { private static final int DESERIALIZER_IDENTIFIER; static { DESERIALIZER_IDENTIFIER = - CacheableDeserializerIdManager.registerDeserializer(BLOCK_DESERIALIZER); + CacheableDeserializerIdManager.registerDeserializer(BLOCK_DESERIALIZER); } /** - * Creates a new {@link HFile} block from the given fields. This constructor - * is used only while writing blocks and caching, - * and is sitting in a byte buffer and we want to stuff the block into cache. - * See {@link Writer#getBlockForCaching(CacheConfig)}. - * - *

          TODO: The caller presumes no checksumming - *

          TODO: HFile block writer can also off-heap ?

          - * required of this block instance since going into cache; checksum already verified on - * underlying block data pulled in from filesystem. Is that correct? What if cache is SSD? - * - * @param blockType the type of this block, see {@link BlockType} - * @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader} + * Creates a new {@link HFile} block from the given fields. This constructor is used only while + * writing blocks and caching, and is sitting in a byte buffer and we want to stuff the block into + * cache. See {@link Writer#getBlockForCaching(CacheConfig)}. + *

          + * TODO: The caller presumes no checksumming + *

          + * TODO: HFile block writer can also off-heap ? + *

          + * required of this block instance since going into cache; checksum already verified on underlying + * block data pulled in from filesystem. Is that correct? What if cache is SSD? + * @param blockType the type of this block, see {@link BlockType} + * @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader} * @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader} - * @param prevBlockOffset see {@link #prevBlockOffset} - * @param buf block buffer with header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) - * @param fillHeader when true, write the first 4 header fields into passed buffer. - * @param offset the file offset the block was read from - * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader} - * @param fileContext HFile meta data + * @param prevBlockOffset see {@link #prevBlockOffset} + * @param buf block buffer with header + * ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) + * @param fillHeader when true, write the first 4 header fields into passed + * buffer. + * @param offset the file offset the block was read from + * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader} + * @param fileContext HFile meta data */ public HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, - int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuff buf, boolean fillHeader, - long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext, - ByteBuffAllocator allocator) { + int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuff buf, boolean fillHeader, + long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext, + ByteBuffAllocator allocator) { this.blockType = blockType; this.onDiskSizeWithoutHeader = onDiskSizeWithoutHeader; this.uncompressedSizeWithoutHeader = uncompressedSizeWithoutHeader; @@ -337,25 +329,24 @@ public class HFileBlock implements Cacheable { } /** - * Creates a block from an existing buffer starting with a header. Rewinds - * and takes ownership of the buffer. By definition of rewind, ignores the - * buffer position, but if you slice the buffer beforehand, it will rewind - * to that point. + * Creates a block from an existing buffer starting with a header. Rewinds and takes ownership of + * the buffer. By definition of rewind, ignores the buffer position, but if you slice the buffer + * beforehand, it will rewind to that point. * @param buf Has header, content, and trailing checksums if present. */ static HFileBlock createFromBuff(ByteBuff buf, boolean usesHBaseChecksum, final long offset, - final int nextBlockOnDiskSize, HFileContext fileContext, ByteBuffAllocator allocator) - throws IOException { + final int nextBlockOnDiskSize, HFileContext fileContext, ByteBuffAllocator allocator) + throws IOException { buf.rewind(); final BlockType blockType = BlockType.read(buf); final int onDiskSizeWithoutHeader = buf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX); final int uncompressedSizeWithoutHeader = - buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX); + buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX); final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX); // This constructor is called when we deserialize a block from cache and when we read a block in // from the fs. fileCache is null when deserialized from cache so need to make up one. - HFileContextBuilder fileContextBuilder = fileContext != null ? - new HFileContextBuilder(fileContext) : new HFileContextBuilder(); + HFileContextBuilder fileContextBuilder = + fileContext != null ? new HFileContextBuilder(fileContext) : new HFileContextBuilder(); fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum); int onDiskDataSizeWithHeader; if (usesHBaseChecksum) { @@ -373,36 +364,30 @@ public class HFileBlock implements Cacheable { } fileContext = fileContextBuilder.build(); assert usesHBaseChecksum == fileContext.isUseHBaseChecksum(); - return new HFileBlockBuilder() - .withBlockType(blockType) - .withOnDiskSizeWithoutHeader(onDiskSizeWithoutHeader) - .withUncompressedSizeWithoutHeader(uncompressedSizeWithoutHeader) - .withPrevBlockOffset(prevBlockOffset) - .withOffset(offset) - .withOnDiskDataSizeWithHeader(onDiskDataSizeWithHeader) - .withNextBlockOnDiskSize(nextBlockOnDiskSize) - .withHFileContext(fileContext) - .withByteBuffAllocator(allocator) - .withByteBuff(buf.rewind()) - .withShared(!buf.hasArray()) - .build(); + return new HFileBlockBuilder().withBlockType(blockType) + .withOnDiskSizeWithoutHeader(onDiskSizeWithoutHeader) + .withUncompressedSizeWithoutHeader(uncompressedSizeWithoutHeader) + .withPrevBlockOffset(prevBlockOffset).withOffset(offset) + .withOnDiskDataSizeWithHeader(onDiskDataSizeWithHeader) + .withNextBlockOnDiskSize(nextBlockOnDiskSize).withHFileContext(fileContext) + .withByteBuffAllocator(allocator).withByteBuff(buf.rewind()).withShared(!buf.hasArray()) + .build(); } /** * Parse total on disk size including header and checksum. - * @param headerBuf Header ByteBuffer. Presumed exact size of header. + * @param headerBuf Header ByteBuffer. Presumed exact size of header. * @param verifyChecksum true if checksum verification is in use. * @return Size of the block with header included. */ - private static int getOnDiskSizeWithHeader(final ByteBuff headerBuf, - boolean verifyChecksum) { + private static int getOnDiskSizeWithHeader(final ByteBuff headerBuf, boolean verifyChecksum) { return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) + headerSize(verifyChecksum); } /** * @return the on-disk size of the next block (including the header size and any checksums if - * present) read by peeking into the next block's header; use as a hint when doing - * a read of the next block when scanning or running over a file. + * present) read by peeking into the next block's header; use as a hint when doing a read + * of the next block when scanning or running over a file. */ int getNextBlockOnDiskSize() { return nextBlockOnDiskSize; @@ -436,8 +421,8 @@ public class HFileBlock implements Cacheable { /** @return get data block encoding id that was used to encode this block */ short getDataBlockEncodingId() { if (blockType != BlockType.ENCODED_DATA) { - throw new IllegalArgumentException("Querying encoder ID of a block " + - "of type other than " + BlockType.ENCODED_DATA + ": " + blockType); + throw new IllegalArgumentException("Querying encoder ID of a block " + "of type other than " + + BlockType.ENCODED_DATA + ": " + blockType); } return buf.getShort(headerSize()); } @@ -464,16 +449,15 @@ public class HFileBlock implements Cacheable { } /** - * @return the offset of the previous block of the same type in the file, or - * -1 if unknown + * @return the offset of the previous block of the same type in the file, or -1 if unknown */ long getPrevBlockOffset() { return prevBlockOffset; } /** - * Rewinds {@code buf} and writes first 4 header fields. {@code buf} position - * is modified as side-effect. + * Rewinds {@code buf} and writes first 4 header fields. {@code buf} position is modified as + * side-effect. */ private void overwriteHeader() { buf.rewind(); @@ -511,10 +495,9 @@ public class HFileBlock implements Cacheable { * Returns a read-only duplicate of the buffer this block stores internally ready to be read. * Clients must not modify the buffer object though they may set position and limit on the * returned buffer since we pass back a duplicate. This method has to be public because it is used - * in {@link CompoundBloomFilter} to avoid object creation on every Bloom - * filter lookup, but has to be used with caution. Buffer holds header, block content, - * and any follow-on checksums if present. - * + * in {@link CompoundBloomFilter} to avoid object creation on every Bloom filter lookup, but has + * to be used with caution. Buffer holds header, block content, and any follow-on checksums if + * present. * @return the buffer of this block for read-only operations */ public ByteBuff getBufferReadOnly() { @@ -528,29 +511,28 @@ public class HFileBlock implements Cacheable { return this.allocator; } - private void sanityCheckAssertion(long valueFromBuf, long valueFromField, - String fieldName) throws IOException { + private void sanityCheckAssertion(long valueFromBuf, long valueFromField, String fieldName) + throws IOException { if (valueFromBuf != valueFromField) { throw new AssertionError(fieldName + " in the buffer (" + valueFromBuf - + ") is different from that in the field (" + valueFromField + ")"); + + ") is different from that in the field (" + valueFromField + ")"); } } private void sanityCheckAssertion(BlockType valueFromBuf, BlockType valueFromField) - throws IOException { + throws IOException { if (valueFromBuf != valueFromField) { - throw new IOException("Block type stored in the buffer: " + - valueFromBuf + ", block type field: " + valueFromField); + throw new IOException("Block type stored in the buffer: " + valueFromBuf + + ", block type field: " + valueFromField); } } /** * Checks if the block is internally consistent, i.e. the first - * {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the buffer contain a - * valid header consistent with the fields. Assumes a packed block structure. - * This function is primary for testing and debugging, and is not - * thread-safe, because it alters the internal buffer pointer. - * Used by tests only. + * {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the buffer contain a valid header consistent + * with the fields. Assumes a packed block structure. This function is primary for testing and + * debugging, and is not thread-safe, because it alters the internal buffer pointer. Used by tests + * only. */ void sanityCheck() throws IOException { // Duplicate so no side-effects @@ -560,13 +542,13 @@ public class HFileBlock implements Cacheable { sanityCheckAssertion(dup.getInt(), onDiskSizeWithoutHeader, "onDiskSizeWithoutHeader"); sanityCheckAssertion(dup.getInt(), uncompressedSizeWithoutHeader, - "uncompressedSizeWithoutHeader"); + "uncompressedSizeWithoutHeader"); sanityCheckAssertion(dup.getLong(), prevBlockOffset, "prevBlockOffset"); if (this.fileContext.isUseHBaseChecksum()) { sanityCheckAssertion(dup.get(), this.fileContext.getChecksumType().getCode(), "checksumType"); sanityCheckAssertion(dup.getInt(), this.fileContext.getBytesPerChecksum(), - "bytesPerChecksum"); + "bytesPerChecksum"); sanityCheckAssertion(dup.getInt(), onDiskDataSizeWithHeader, "onDiskDataSizeWithHeader"); } @@ -581,50 +563,44 @@ public class HFileBlock implements Cacheable { int hdrSize = headerSize(); dup.rewind(); if (dup.remaining() != expectedBufLimit && dup.remaining() != expectedBufLimit + hdrSize) { - throw new AssertionError("Invalid buffer capacity: " + dup.remaining() + - ", expected " + expectedBufLimit + " or " + (expectedBufLimit + hdrSize)); + throw new AssertionError("Invalid buffer capacity: " + dup.remaining() + ", expected " + + expectedBufLimit + " or " + (expectedBufLimit + hdrSize)); } } @Override public String toString() { - StringBuilder sb = new StringBuilder() - .append("[") - .append("blockType=").append(blockType) - .append(", fileOffset=").append(offset) - .append(", headerSize=").append(headerSize()) + StringBuilder sb = new StringBuilder().append("[").append("blockType=").append(blockType) + .append(", fileOffset=").append(offset).append(", headerSize=").append(headerSize()) .append(", onDiskSizeWithoutHeader=").append(onDiskSizeWithoutHeader) .append(", uncompressedSizeWithoutHeader=").append(uncompressedSizeWithoutHeader) - .append(", prevBlockOffset=").append(prevBlockOffset) - .append(", isUseHBaseChecksum=").append(fileContext.isUseHBaseChecksum()); + .append(", prevBlockOffset=").append(prevBlockOffset).append(", isUseHBaseChecksum=") + .append(fileContext.isUseHBaseChecksum()); if (fileContext.isUseHBaseChecksum()) { sb.append(", checksumType=").append(ChecksumType.codeToType(this.buf.get(24))) .append(", bytesPerChecksum=").append(this.buf.getInt(24 + 1)) .append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader); } else { - sb.append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader) - .append("(").append(onDiskSizeWithoutHeader) - .append("+").append(HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM).append(")"); + sb.append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader).append("(") + .append(onDiskSizeWithoutHeader).append("+") + .append(HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM).append(")"); } String dataBegin; if (buf.hasArray()) { dataBegin = Bytes.toStringBinary(buf.array(), buf.arrayOffset() + headerSize(), - Math.min(32, buf.limit() - buf.arrayOffset() - headerSize())); + Math.min(32, buf.limit() - buf.arrayOffset() - headerSize())); } else { ByteBuff bufWithoutHeader = getBufferWithoutHeader(); - byte[] dataBeginBytes = new byte[Math.min(32, - bufWithoutHeader.limit() - bufWithoutHeader.position())]; + byte[] dataBeginBytes = + new byte[Math.min(32, bufWithoutHeader.limit() - bufWithoutHeader.position())]; bufWithoutHeader.get(dataBeginBytes); dataBegin = Bytes.toStringBinary(dataBeginBytes); } sb.append(", getOnDiskSizeWithHeader=").append(getOnDiskSizeWithHeader()) - .append(", totalChecksumBytes=").append(totalChecksumBytes()) - .append(", isUnpacked=").append(isUnpacked()) - .append(", buf=[").append(buf).append("]") - .append(", dataBeginsWith=").append(dataBegin) - .append(", fileContext=").append(fileContext) - .append(", nextBlockOnDiskSize=").append(nextBlockOnDiskSize) - .append("]"); + .append(", totalChecksumBytes=").append(totalChecksumBytes()).append(", isUnpacked=") + .append(isUnpacked()).append(", buf=[").append(buf).append("]").append(", dataBeginsWith=") + .append(dataBegin).append(", fileContext=").append(fileContext) + .append(", nextBlockOnDiskSize=").append(nextBlockOnDiskSize).append("]"); return sb.toString(); } @@ -645,7 +621,8 @@ public class HFileBlock implements Cacheable { boolean succ = false; try { HFileBlockDecodingContext ctx = blockType == BlockType.ENCODED_DATA - ? reader.getBlockDecodingContext() : reader.getDefaultBlockDecodingContext(); + ? reader.getBlockDecodingContext() + : reader.getDefaultBlockDecodingContext(); // Create a duplicated buffer without the header part. ByteBuff dup = this.buf.duplicate(); dup.position(this.headerSize()); @@ -663,9 +640,8 @@ public class HFileBlock implements Cacheable { } /** - * Always allocates a new buffer of the correct size. Copies header bytes - * from the existing buffer. Does not change header fields. - * Reserve room to keep checksum bytes too. + * Always allocates a new buffer of the correct size. Copies header bytes from the existing + * buffer. Does not change header fields. Reserve room to keep checksum bytes too. */ private void allocateBuffer() { int cksumBytes = totalChecksumBytes(); @@ -741,14 +717,13 @@ public class HFileBlock implements Cacheable { } /** - * Unified version 2 {@link HFile} block writer. The intended usage pattern - * is as follows: + * Unified version 2 {@link HFile} block writer. The intended usage pattern is as follows: *
            *
          1. Construct an {@link HFileBlock.Writer}, providing a compression algorithm. *
          2. Call {@link Writer#startWriting} and get a data stream to write to. *
          3. Write your data into the stream. - *
          4. Call Writer#writeHeaderAndData(FSDataOutputStream) as many times as you need to. - * store the serialized block into an external stream. + *
          5. Call Writer#writeHeaderAndData(FSDataOutputStream) as many times as you need to. store the + * serialized block into an external stream. *
          6. Repeat to write more blocks. *
          *

          @@ -768,42 +743,39 @@ public class HFileBlock implements Cacheable { private HFileBlockEncodingContext dataBlockEncodingCtx; - /** block encoding context for non-data blocks*/ + /** block encoding context for non-data blocks */ private HFileBlockDefaultEncodingContext defaultBlockEncodingCtx; /** - * The stream we use to accumulate data into a block in an uncompressed format. - * We reset this stream at the end of each block and reuse it. The - * header is written as the first {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes into this - * stream. + * The stream we use to accumulate data into a block in an uncompressed format. We reset this + * stream at the end of each block and reuse it. The header is written as the first + * {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes into this stream. */ private ByteArrayOutputStream baosInMemory; /** - * Current block type. Set in {@link #startWriting(BlockType)}. Could be - * changed in {@link #finishBlock()} from {@link BlockType#DATA} - * to {@link BlockType#ENCODED_DATA}. + * Current block type. Set in {@link #startWriting(BlockType)}. Could be changed in + * {@link #finishBlock()} from {@link BlockType#DATA} to {@link BlockType#ENCODED_DATA}. */ private BlockType blockType; /** - * A stream that we write uncompressed bytes to, which compresses them and - * writes them to {@link #baosInMemory}. + * A stream that we write uncompressed bytes to, which compresses them and writes them to + * {@link #baosInMemory}. */ private DataOutputStream userDataStream; /** - * Bytes to be written to the file system, including the header. Compressed - * if compression is turned on. It also includes the checksum data that - * immediately follows the block data. (header + data + checksums) + * Bytes to be written to the file system, including the header. Compressed if compression is + * turned on. It also includes the checksum data that immediately follows the block data. + * (header + data + checksums) */ private ByteArrayOutputStream onDiskBlockBytesWithHeader; /** - * The size of the checksum data on disk. It is used only if data is - * not compressed. If data is compressed, then the checksums are already - * part of onDiskBytesWithHeader. If data is uncompressed, then this - * variable stores the checksum data for this block. + * The size of the checksum data on disk. It is used only if data is not compressed. If data is + * compressed, then the checksums are already part of onDiskBytesWithHeader. If data is + * uncompressed, then this variable stores the checksum data for this block. */ private byte[] onDiskChecksum = HConstants.EMPTY_BYTE_ARRAY; @@ -814,14 +786,13 @@ public class HFileBlock implements Cacheable { private long startOffset; /** - * Offset of previous block by block type. Updated when the next block is - * started. + * Offset of previous block by block type. Updated when the next block is started. */ private long[] prevOffsetByType; /** The offset of the previous block of the same type */ private long prevOffset; - /** Meta data that holds information about the hfileblock**/ + /** Meta data that holds information about the hfileblock **/ private HFileContext fileContext; private final ByteBuffAllocator allocator; @@ -841,20 +812,20 @@ public class HFileBlock implements Cacheable { * @param dataBlockEncoder data block encoding algorithm to use */ public Writer(Configuration conf, HFileDataBlockEncoder dataBlockEncoder, - HFileContext fileContext) { + HFileContext fileContext) { this(conf, dataBlockEncoder, fileContext, ByteBuffAllocator.HEAP); } public Writer(Configuration conf, HFileDataBlockEncoder dataBlockEncoder, - HFileContext fileContext, ByteBuffAllocator allocator) { + HFileContext fileContext, ByteBuffAllocator allocator) { if (fileContext.getBytesPerChecksum() < HConstants.HFILEBLOCK_HEADER_SIZE) { - throw new RuntimeException("Unsupported value of bytesPerChecksum. " + - " Minimum is " + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " + - fileContext.getBytesPerChecksum()); + throw new RuntimeException("Unsupported value of bytesPerChecksum. " + " Minimum is " + + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " + + fileContext.getBytesPerChecksum()); } this.allocator = allocator; - this.dataBlockEncoder = dataBlockEncoder != null? - dataBlockEncoder: NoOpDataBlockEncoder.INSTANCE; + this.dataBlockEncoder = + dataBlockEncoder != null ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE; this.dataBlockEncodingCtx = this.dataBlockEncoder.newDataBlockEncodingContext(conf, HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); // TODO: This should be lazily instantiated @@ -873,11 +844,9 @@ public class HFileBlock implements Cacheable { /** * Starts writing into the block. The previous block's data is discarded. - * * @return the stream the user can write their data into */ - DataOutputStream startWriting(BlockType newBlockType) - throws IOException { + DataOutputStream startWriting(BlockType newBlockType) throws IOException { if (state == State.BLOCK_READY && startOffset != -1) { // We had a previous block that was written to a stream at a specific // offset. Save that offset as the last offset of a block of that type. @@ -903,18 +872,17 @@ public class HFileBlock implements Cacheable { /** * Writes the Cell to this block */ - void write(Cell cell) throws IOException{ + void write(Cell cell) throws IOException { expectState(State.WRITING); this.dataBlockEncoder.encode(cell, dataBlockEncodingCtx, this.userDataStream); } /** - * Transitions the block writer from the "writing" state to the "block - * ready" state. Does nothing if a block is already finished. + * Transitions the block writer from the "writing" state to the "block ready" state. Does + * nothing if a block is already finished. */ void ensureBlockReady() throws IOException { - Preconditions.checkState(state != State.INIT, - "Unexpected state: " + state); + Preconditions.checkState(state != State.INIT, "Unexpected state: " + state); if (state == State.BLOCK_READY) { return; @@ -925,15 +893,14 @@ public class HFileBlock implements Cacheable { } /** - * Finish up writing of the block. - * Flushes the compressing stream (if using compression), fills out the header, - * does any compression/encryption of bytes to flush out to disk, and manages + * Finish up writing of the block. Flushes the compressing stream (if using compression), fills + * out the header, does any compression/encryption of bytes to flush out to disk, and manages * the cache on write content, if applicable. Sets block write state to "block ready". */ private void finishBlock() throws IOException { if (blockType == BlockType.DATA) { this.dataBlockEncoder.endBlockEncoding(dataBlockEncodingCtx, userDataStream, - baosInMemory.getBuffer(), blockType); + baosInMemory.getBuffer(), blockType); blockType = dataBlockEncodingCtx.getBlockType(); } userDataStream.flush(); @@ -944,11 +911,11 @@ public class HFileBlock implements Cacheable { state = State.BLOCK_READY; Bytes compressAndEncryptDat; if (blockType == BlockType.DATA || blockType == BlockType.ENCODED_DATA) { - compressAndEncryptDat = dataBlockEncodingCtx. - compressAndEncrypt(baosInMemory.getBuffer(), 0, baosInMemory.size()); + compressAndEncryptDat = + dataBlockEncodingCtx.compressAndEncrypt(baosInMemory.getBuffer(), 0, baosInMemory.size()); } else { - compressAndEncryptDat = defaultBlockEncodingCtx. - compressAndEncrypt(baosInMemory.getBuffer(), 0, baosInMemory.size()); + compressAndEncryptDat = defaultBlockEncodingCtx.compressAndEncrypt(baosInMemory.getBuffer(), + 0, baosInMemory.size()); } if (compressAndEncryptDat == null) { compressAndEncryptDat = new Bytes(baosInMemory.getBuffer(), 0, baosInMemory.size()); @@ -958,34 +925,32 @@ public class HFileBlock implements Cacheable { } onDiskBlockBytesWithHeader.reset(); onDiskBlockBytesWithHeader.write(compressAndEncryptDat.get(), - compressAndEncryptDat.getOffset(), compressAndEncryptDat.getLength()); + compressAndEncryptDat.getOffset(), compressAndEncryptDat.getLength()); // Calculate how many bytes we need for checksum on the tail of the block. - int numBytes = (int) ChecksumUtil.numBytes( - onDiskBlockBytesWithHeader.size(), - fileContext.getBytesPerChecksum()); + int numBytes = (int) ChecksumUtil.numBytes(onDiskBlockBytesWithHeader.size(), + fileContext.getBytesPerChecksum()); // Put the header for the on disk bytes; header currently is unfilled-out - putHeader(onDiskBlockBytesWithHeader, - onDiskBlockBytesWithHeader.size() + numBytes, - baosInMemory.size(), onDiskBlockBytesWithHeader.size()); + putHeader(onDiskBlockBytesWithHeader, onDiskBlockBytesWithHeader.size() + numBytes, + baosInMemory.size(), onDiskBlockBytesWithHeader.size()); if (onDiskChecksum.length != numBytes) { onDiskChecksum = new byte[numBytes]; } - ChecksumUtil.generateChecksums( - onDiskBlockBytesWithHeader.getBuffer(), 0,onDiskBlockBytesWithHeader.size(), - onDiskChecksum, 0, fileContext.getChecksumType(), fileContext.getBytesPerChecksum()); + ChecksumUtil.generateChecksums(onDiskBlockBytesWithHeader.getBuffer(), 0, + onDiskBlockBytesWithHeader.size(), onDiskChecksum, 0, fileContext.getChecksumType(), + fileContext.getBytesPerChecksum()); } /** * Put the header into the given byte array at the given offset. - * @param onDiskSize size of the block on disk header + data + checksum - * @param uncompressedSize size of the block after decompression (but - * before optional data block decoding) including header - * @param onDiskDataSize size of the block on disk with header - * and data but not including the checksums + * @param onDiskSize size of the block on disk header + data + checksum + * @param uncompressedSize size of the block after decompression (but before optional data block + * decoding) including header + * @param onDiskDataSize size of the block on disk with header and data but not including the + * checksums */ - private void putHeader(byte[] dest, int offset, int onDiskSize, - int uncompressedSize, int onDiskDataSize) { + private void putHeader(byte[] dest, int offset, int onDiskSize, int uncompressedSize, + int onDiskDataSize) { offset = blockType.put(dest, offset); offset = Bytes.putInt(dest, offset, onDiskSize - HConstants.HFILEBLOCK_HEADER_SIZE); offset = Bytes.putInt(dest, offset, uncompressedSize - HConstants.HFILEBLOCK_HEADER_SIZE); @@ -995,8 +960,8 @@ public class HFileBlock implements Cacheable { Bytes.putInt(dest, offset, onDiskDataSize); } - private void putHeader(ByteBuff buff, int onDiskSize, - int uncompressedSize, int onDiskDataSize) { + private void putHeader(ByteBuff buff, int onDiskSize, int uncompressedSize, + int onDiskDataSize) { buff.rewind(); blockType.write(buff); buff.putInt(onDiskSize - HConstants.HFILEBLOCK_HEADER_SIZE); @@ -1007,36 +972,33 @@ public class HFileBlock implements Cacheable { buff.putInt(onDiskDataSize); } - private void putHeader(ByteArrayOutputStream dest, int onDiskSize, - int uncompressedSize, int onDiskDataSize) { - putHeader(dest.getBuffer(),0, onDiskSize, uncompressedSize, onDiskDataSize); + private void putHeader(ByteArrayOutputStream dest, int onDiskSize, int uncompressedSize, + int onDiskDataSize) { + putHeader(dest.getBuffer(), 0, onDiskSize, uncompressedSize, onDiskDataSize); } /** - * Similar to {@link #writeHeaderAndData(FSDataOutputStream)}, but records - * the offset of this block so that it can be referenced in the next block - * of the same type. + * Similar to {@link #writeHeaderAndData(FSDataOutputStream)}, but records the offset of this + * block so that it can be referenced in the next block of the same type. */ void writeHeaderAndData(FSDataOutputStream out) throws IOException { long offset = out.getPos(); if (startOffset != UNSET && offset != startOffset) { throw new IOException("A " + blockType + " block written to a " - + "stream twice, first at offset " + startOffset + ", then at " - + offset); + + "stream twice, first at offset " + startOffset + ", then at " + offset); } startOffset = offset; finishBlockAndWriteHeaderAndData(out); } /** - * Writes the header and the compressed data of this block (or uncompressed - * data when not using compression) into the given stream. Can be called in - * the "writing" state or in the "block ready" state. If called in the - * "writing" state, transitions the writer to the "block ready" state. + * Writes the header and the compressed data of this block (or uncompressed data when not using + * compression) into the given stream. Can be called in the "writing" state or in the "block + * ready" state. If called in the "writing" state, transitions the writer to the "block ready" + * state. * @param out the output stream to write the */ - protected void finishBlockAndWriteHeaderAndData(DataOutputStream out) - throws IOException { + protected void finishBlockAndWriteHeaderAndData(DataOutputStream out) throws IOException { ensureBlockReady(); long startTime = EnvironmentEdgeManager.currentTime(); out.write(onDiskBlockBytesWithHeader.getBuffer(), 0, onDiskBlockBytesWithHeader.size()); @@ -1045,25 +1007,21 @@ public class HFileBlock implements Cacheable { } /** - * Returns the header or the compressed data (or uncompressed data when not - * using compression) as a byte array. Can be called in the "writing" state - * or in the "block ready" state. If called in the "writing" state, - * transitions the writer to the "block ready" state. This returns - * the header + data + checksums stored on disk. - * + * Returns the header or the compressed data (or uncompressed data when not using compression) + * as a byte array. Can be called in the "writing" state or in the "block ready" state. If + * called in the "writing" state, transitions the writer to the "block ready" state. This + * returns the header + data + checksums stored on disk. * @return header and data as they would be stored on disk in a byte array */ byte[] getHeaderAndDataForTest() throws IOException { ensureBlockReady(); // This is not very optimal, because we are doing an extra copy. // But this method is used only by unit tests. - byte[] output = - new byte[onDiskBlockBytesWithHeader.size() - + onDiskChecksum.length]; + byte[] output = new byte[onDiskBlockBytesWithHeader.size() + onDiskChecksum.length]; System.arraycopy(onDiskBlockBytesWithHeader.getBuffer(), 0, output, 0, - onDiskBlockBytesWithHeader.size()); - System.arraycopy(onDiskChecksum, 0, output, - onDiskBlockBytesWithHeader.size(), onDiskChecksum.length); + onDiskBlockBytesWithHeader.size()); + System.arraycopy(onDiskChecksum, 0, output, onDiskBlockBytesWithHeader.size(), + onDiskChecksum.length); return output; } @@ -1082,25 +1040,21 @@ public class HFileBlock implements Cacheable { } /** - * Returns the on-disk size of the data portion of the block. This is the - * compressed size if compression is enabled. Can only be called in the - * "block ready" state. Header is not compressed, and its size is not - * included in the return value. - * + * Returns the on-disk size of the data portion of the block. This is the compressed size if + * compression is enabled. Can only be called in the "block ready" state. Header is not + * compressed, and its size is not included in the return value. * @return the on-disk size of the block, not including the header. */ int getOnDiskSizeWithoutHeader() { expectState(State.BLOCK_READY); - return onDiskBlockBytesWithHeader.size() + - onDiskChecksum.length - HConstants.HFILEBLOCK_HEADER_SIZE; + return onDiskBlockBytesWithHeader.size() + onDiskChecksum.length + - HConstants.HFILEBLOCK_HEADER_SIZE; } /** - * Returns the on-disk size of the block. Can only be called in the - * "block ready" state. - * - * @return the on-disk size of the block ready to be written, including the - * header size, the data and the checksum data. + * Returns the on-disk size of the block. Can only be called in the "block ready" state. + * @return the on-disk size of the block ready to be written, including the header size, the + * data and the checksum data. */ int getOnDiskSizeWithHeader() { expectState(State.BLOCK_READY); @@ -1123,16 +1077,14 @@ public class HFileBlock implements Cacheable { return baosInMemory.size(); } - /** @return true if a block is being written */ + /** @return true if a block is being written */ boolean isWriting() { return state == State.WRITING; } /** - * Returns the number of bytes written into the current block so far, or - * zero if not writing the block at the moment. Note that this will return - * zero in the "block ready" state as well. - * + * Returns the number of bytes written into the current block so far, or zero if not writing the + * block at the moment. Note that this will return zero in the "block ready" state as well. * @return the number of bytes written */ public int encodedBlockSizeWritten() { @@ -1140,10 +1092,8 @@ public class HFileBlock implements Cacheable { } /** - * Returns the number of bytes written into the current block so far, or - * zero if not writing the block at the moment. Note that this will return - * zero in the "block ready" state as well. - * + * Returns the number of bytes written into the current block so far, or zero if not writing the + * block at the moment. Note that this will return zero in the "block ready" state as well. * @return the number of bytes written */ int blockSizeWritten() { @@ -1151,22 +1101,20 @@ public class HFileBlock implements Cacheable { } /** - * Clones the header followed by the uncompressed data, even if using - * compression. This is needed for storing uncompressed blocks in the block - * cache. Can be called in the "writing" state or the "block ready" state. - * Returns only the header and data, does not include checksum data. - * + * Clones the header followed by the uncompressed data, even if using compression. This is + * needed for storing uncompressed blocks in the block cache. Can be called in the "writing" + * state or the "block ready" state. Returns only the header and data, does not include checksum + * data. * @return Returns an uncompressed block ByteBuff for caching on write */ ByteBuff cloneUncompressedBufferWithHeader() { expectState(State.BLOCK_READY); ByteBuff bytebuff = allocator.allocate(baosInMemory.size()); baosInMemory.toByteBuff(bytebuff); - int numBytes = (int) ChecksumUtil.numBytes( - onDiskBlockBytesWithHeader.size(), - fileContext.getBytesPerChecksum()); - putHeader(bytebuff, onDiskBlockBytesWithHeader.size() + numBytes, - baosInMemory.size(), onDiskBlockBytesWithHeader.size()); + int numBytes = (int) ChecksumUtil.numBytes(onDiskBlockBytesWithHeader.size(), + fileContext.getBytesPerChecksum()); + putHeader(bytebuff, onDiskBlockBytesWithHeader.size() + numBytes, baosInMemory.size(), + onDiskBlockBytesWithHeader.size()); bytebuff.rewind(); return bytebuff; } @@ -1187,53 +1135,45 @@ public class HFileBlock implements Cacheable { private void expectState(State expectedState) { if (state != expectedState) { - throw new IllegalStateException("Expected state: " + expectedState + - ", actual state: " + state); + throw new IllegalStateException( + "Expected state: " + expectedState + ", actual state: " + state); } } /** - * Takes the given {@link BlockWritable} instance, creates a new block of - * its appropriate type, writes the writable into this block, and flushes - * the block into the output stream. The writer is instructed not to buffer - * uncompressed bytes for cache-on-write. - * - * @param bw the block-writable object to write as a block + * Takes the given {@link BlockWritable} instance, creates a new block of its appropriate type, + * writes the writable into this block, and flushes the block into the output stream. The writer + * is instructed not to buffer uncompressed bytes for cache-on-write. + * @param bw the block-writable object to write as a block * @param out the file system output stream */ - void writeBlock(BlockWritable bw, FSDataOutputStream out) - throws IOException { + void writeBlock(BlockWritable bw, FSDataOutputStream out) throws IOException { bw.writeToBlock(startWriting(bw.getBlockType())); writeHeaderAndData(out); } /** - * Creates a new HFileBlock. Checksums have already been validated, so - * the byte buffer passed into the constructor of this newly created - * block does not have checksum data even though the header minor - * version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a - * 0 value in bytesPerChecksum. This method copies the on-disk or - * uncompressed data to build the HFileBlock which is used only - * while writing blocks and caching. - * - *

          TODO: Should there be an option where a cache can ask that hbase preserve block - * checksums for checking after a block comes out of the cache? Otehrwise, cache is responsible - * for blocks being wholesome (ECC memory or if file-backed, it does checksumming). + * Creates a new HFileBlock. Checksums have already been validated, so the byte buffer passed + * into the constructor of this newly created block does not have checksum data even though the + * header minor version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a 0 value + * in bytesPerChecksum. This method copies the on-disk or uncompressed data to build the + * HFileBlock which is used only while writing blocks and caching. + *

          + * TODO: Should there be an option where a cache can ask that hbase preserve block checksums for + * checking after a block comes out of the cache? Otehrwise, cache is responsible for blocks + * being wholesome (ECC memory or if file-backed, it does checksumming). */ HFileBlock getBlockForCaching(CacheConfig cacheConf) { - HFileContext newContext = new HFileContextBuilder() - .withBlockSize(fileContext.getBlocksize()) - .withBytesPerCheckSum(0) - .withChecksumType(ChecksumType.NULL) // no checksums in cached data - .withCompression(fileContext.getCompression()) - .withDataBlockEncoding(fileContext.getDataBlockEncoding()) - .withHBaseCheckSum(fileContext.isUseHBaseChecksum()) - .withCompressTags(fileContext.isCompressTags()) - .withIncludesMvcc(fileContext.isIncludesMvcc()) - .withIncludesTags(fileContext.isIncludesTags()) - .withColumnFamily(fileContext.getColumnFamily()) - .withTableName(fileContext.getTableName()) - .build(); + HFileContext newContext = new HFileContextBuilder().withBlockSize(fileContext.getBlocksize()) + .withBytesPerCheckSum(0).withChecksumType(ChecksumType.NULL) // no checksums in cached data + .withCompression(fileContext.getCompression()) + .withDataBlockEncoding(fileContext.getDataBlockEncoding()) + .withHBaseCheckSum(fileContext.isUseHBaseChecksum()) + .withCompressTags(fileContext.isCompressTags()) + .withIncludesMvcc(fileContext.isIncludesMvcc()) + .withIncludesTags(fileContext.isIncludesTags()) + .withColumnFamily(fileContext.getColumnFamily()).withTableName(fileContext.getTableName()) + .build(); // Build the HFileBlock. HFileBlockBuilder builder = new HFileBlockBuilder(); ByteBuff buff; @@ -1243,18 +1183,13 @@ public class HFileBlock implements Cacheable { buff = cloneUncompressedBufferWithHeader(); } return builder.withBlockType(blockType) - .withOnDiskSizeWithoutHeader(getOnDiskSizeWithoutHeader()) - .withUncompressedSizeWithoutHeader(getUncompressedSizeWithoutHeader()) - .withPrevBlockOffset(prevOffset) - .withByteBuff(buff) - .withFillHeader(FILL_HEADER) - .withOffset(startOffset) - .withNextBlockOnDiskSize(UNSET) - .withOnDiskDataSizeWithHeader(onDiskBlockBytesWithHeader.size() + onDiskChecksum.length) - .withHFileContext(newContext) - .withByteBuffAllocator(cacheConf.getByteBuffAllocator()) - .withShared(!buff.hasArray()) - .build(); + .withOnDiskSizeWithoutHeader(getOnDiskSizeWithoutHeader()) + .withUncompressedSizeWithoutHeader(getUncompressedSizeWithoutHeader()) + .withPrevBlockOffset(prevOffset).withByteBuff(buff).withFillHeader(FILL_HEADER) + .withOffset(startOffset).withNextBlockOnDiskSize(UNSET) + .withOnDiskDataSizeWithHeader(onDiskBlockBytesWithHeader.size() + onDiskChecksum.length) + .withHFileContext(newContext).withByteBuffAllocator(cacheConf.getByteBuffAllocator()) + .withShared(!buff.hasArray()).build(); } } @@ -1264,9 +1199,7 @@ public class HFileBlock implements Cacheable { BlockType getBlockType(); /** - * Writes the block to the provided stream. Must not write any magic - * records. - * + * Writes the block to the provided stream. Must not write any magic records. * @param out a stream to write uncompressed data into */ void writeToBlock(DataOutput out) throws IOException; @@ -1304,37 +1237,37 @@ public class HFileBlock implements Cacheable { /** * Reads the block at the given offset in the file with the given on-disk size and uncompressed * size. - * @param offset of the file to read - * @param onDiskSize the on-disk size of the entire block, including all applicable headers, or - * -1 if unknown - * @param pread true to use pread, otherwise use the stream read. + * @param offset of the file to read + * @param onDiskSize the on-disk size of the entire block, including all applicable headers, + * or -1 if unknown + * @param pread true to use pread, otherwise use the stream read. * @param updateMetrics update the metrics or not. - * @param intoHeap allocate the block's ByteBuff by {@link ByteBuffAllocator} or JVM heap. For - * LRUBlockCache, we must ensure that the block to cache is an heap one, because the - * memory occupation is based on heap now, also for {@link CombinedBlockCache}, we use - * the heap LRUBlockCache as L1 cache to cache small blocks such as IndexBlock or - * MetaBlock for faster access. So introduce an flag here to decide whether allocate - * from JVM heap or not so that we can avoid an extra off-heap to heap memory copy when - * using LRUBlockCache. For most cases, we known what's the expected block type we'll - * read, while for some special case (Example: HFileReaderImpl#readNextDataBlock()), we - * cannot pre-decide what's the expected block type, then we can only allocate block's - * ByteBuff from {@link ByteBuffAllocator} firstly, and then when caching it in - * {@link LruBlockCache} we'll check whether the ByteBuff is from heap or not, if not - * then we'll clone it to an heap one and cache it. + * @param intoHeap allocate the block's ByteBuff by {@link ByteBuffAllocator} or JVM heap. + * For LRUBlockCache, we must ensure that the block to cache is an heap + * one, because the memory occupation is based on heap now, also for + * {@link CombinedBlockCache}, we use the heap LRUBlockCache as L1 cache to + * cache small blocks such as IndexBlock or MetaBlock for faster access. So + * introduce an flag here to decide whether allocate from JVM heap or not + * so that we can avoid an extra off-heap to heap memory copy when using + * LRUBlockCache. For most cases, we known what's the expected block type + * we'll read, while for some special case (Example: + * HFileReaderImpl#readNextDataBlock()), we cannot pre-decide what's the + * expected block type, then we can only allocate block's ByteBuff from + * {@link ByteBuffAllocator} firstly, and then when caching it in + * {@link LruBlockCache} we'll check whether the ByteBuff is from heap or + * not, if not then we'll clone it to an heap one and cache it. * @return the newly read block */ HFileBlock readBlockData(long offset, long onDiskSize, boolean pread, boolean updateMetrics, - boolean intoHeap) throws IOException; + boolean intoHeap) throws IOException; /** - * Creates a block iterator over the given portion of the {@link HFile}. - * The iterator returns blocks starting with offset such that offset <= - * startOffset < endOffset. Returned blocks are always unpacked. - * Used when no hfile index available; e.g. reading in the hfile index - * blocks themselves on file open. - * + * Creates a block iterator over the given portion of the {@link HFile}. The iterator returns + * blocks starting with offset such that offset <= startOffset < endOffset. Returned + * blocks are always unpacked. Used when no hfile index available; e.g. reading in the hfile + * index blocks themselves on file open. * @param startOffset the offset of the block to start iteration with - * @param endOffset the offset to end iteration at (exclusive) + * @param endOffset the offset to end iteration at (exclusive) * @return an iterator of blocks between the two given offsets */ BlockIterator blockRange(long startOffset, long endOffset); @@ -1349,6 +1282,7 @@ public class HFileBlock implements Cacheable { HFileBlockDecodingContext getDefaultBlockDecodingContext(); void setIncludesMemStoreTS(boolean includesMemstoreTS); + void setDataBlockEncoder(HFileDataBlockEncoder encoder, Configuration conf); /** @@ -1359,12 +1293,10 @@ public class HFileBlock implements Cacheable { } /** - * Data-structure to use caching the header of the NEXT block. Only works if next read - * that comes in here is next in sequence in this block. - * - * When we read, we read current block and the next blocks' header. We do this so we have - * the length of the next block to read if the hfile index is not available (rare, at - * hfile open only). + * Data-structure to use caching the header of the NEXT block. Only works if next read that comes + * in here is next in sequence in this block. When we read, we read current block and the next + * blocks' header. We do this so we have the length of the next block to read if the hfile index + * is not available (rare, at hfile open only). */ private static class PrefetchedHeader { long offset = -1; @@ -1381,8 +1313,10 @@ public class HFileBlock implements Cacheable { * Reads version 2 HFile blocks from the filesystem. */ static class FSReaderImpl implements FSReader { - /** The file system stream of the underlying {@link HFile} that - * does or doesn't do checksum validations in the filesystem */ + /** + * The file system stream of the underlying {@link HFile} that does or doesn't do checksum + * validations in the filesystem + */ private FSDataInputStreamWrapper streamWrapper; private HFileBlockDecodingContext encodedBlockDecodingCtx; @@ -1391,10 +1325,9 @@ public class HFileBlock implements Cacheable { private final HFileBlockDefaultDecodingContext defaultDecodingCtx; /** - * Cache of the NEXT header after this. Check it is indeed next blocks header - * before using it. TODO: Review. This overread into next block to fetch - * next blocks header seems unnecessary given we usually get the block size - * from the hfile index. Review! + * Cache of the NEXT header after this. Check it is indeed next blocks header before using it. + * TODO: Review. This overread into next block to fetch next blocks header seems unnecessary + * given we usually get the block size from the hfile index. Review! */ private AtomicReference prefetchedHeader = new AtomicReference<>(new PrefetchedHeader()); @@ -1416,8 +1349,8 @@ public class HFileBlock implements Cacheable { private final Lock streamLock = new ReentrantLock(); - FSReaderImpl(ReaderContext readerContext, HFileContext fileContext, - ByteBuffAllocator allocator, Configuration conf) throws IOException { + FSReaderImpl(ReaderContext readerContext, HFileContext fileContext, ByteBuffAllocator allocator, + Configuration conf) throws IOException { this.fileSize = readerContext.getFileSize(); this.hfs = readerContext.getFileSystem(); if (readerContext.getFilePath() != null) { @@ -1466,7 +1399,7 @@ public class HFileBlock implements Cacheable { HFileBlock blk = nextBlock(); if (blk.getBlockType() != blockType) { throw new IOException( - "Expected block of type " + blockType + " but found " + blk.getBlockType()); + "Expected block of type " + blockType + " but found " + blk.getBlockType()); } return blk; } @@ -1487,25 +1420,25 @@ public class HFileBlock implements Cacheable { * Does a positional read or a seek and read into the given byte buffer. We need take care that * we will call the {@link ByteBuff#release()} for every exit to deallocate the ByteBuffers, * otherwise the memory leak may happen. - * @param dest destination buffer - * @param size size of read + * @param dest destination buffer + * @param size size of read * @param peekIntoNextBlock whether to read the next block's on-disk size - * @param fileOffset position in the stream to read at - * @param pread whether we should do a positional read - * @param istream The input source of data + * @param fileOffset position in the stream to read at + * @param pread whether we should do a positional read + * @param istream The input source of data * @return true to indicate the destination buffer include the next block header, otherwise only * include the current block data without the next block header. * @throws IOException if any IO error happen. */ protected boolean readAtOffset(FSDataInputStream istream, ByteBuff dest, int size, - boolean peekIntoNextBlock, long fileOffset, boolean pread) throws IOException { + boolean peekIntoNextBlock, long fileOffset, boolean pread) throws IOException { if (!pread) { // Seek + read. Better for scanning. HFileUtil.seekOnMultipleSources(istream, fileOffset); long realOffset = istream.getPos(); if (realOffset != fileOffset) { throw new IOException("Tried to seek to " + fileOffset + " to read " + size - + " bytes, but pos=" + realOffset + " after seek"); + + " bytes, but pos=" + realOffset + " after seek"); } if (!peekIntoNextBlock) { BlockIOUtils.readFully(dest, istream, size); @@ -1532,18 +1465,19 @@ public class HFileBlock implements Cacheable { /** * Reads a version 2 block (version 1 blocks not supported and not expected). Tries to do as * little memory allocation as possible, using the provided on-disk size. - * @param offset the offset in the stream to read at + * @param offset the offset in the stream to read at * @param onDiskSizeWithHeaderL the on-disk size of the block, including the header, or -1 if - * unknown; i.e. when iterating over blocks reading in the file metadata info. - * @param pread whether to use a positional read - * @param updateMetrics whether to update the metrics - * @param intoHeap allocate ByteBuff of block from heap or off-heap. + * unknown; i.e. when iterating over blocks reading in the file + * metadata info. + * @param pread whether to use a positional read + * @param updateMetrics whether to update the metrics + * @param intoHeap allocate ByteBuff of block from heap or off-heap. * @see FSReader#readBlockData(long, long, boolean, boolean, boolean) for more details about the * useHeap. */ @Override public HFileBlock readBlockData(long offset, long onDiskSizeWithHeaderL, boolean pread, - boolean updateMetrics, boolean intoHeap) throws IOException { + boolean updateMetrics, boolean intoHeap) throws IOException { // Get a copy of the current state of whether to validate // hbase checksums or not for this read call. This is not // thread-safe but the one constaint is that if we decide @@ -1555,17 +1489,13 @@ public class HFileBlock implements Cacheable { HFileBlock blk = readBlockDataInternal(is, offset, onDiskSizeWithHeaderL, pread, doVerificationThruHBaseChecksum, updateMetrics, intoHeap); if (blk == null) { - HFile.LOG.warn("HBase checksum verification failed for file " + - pathName + " at offset " + - offset + " filesize " + fileSize + - ". Retrying read with HDFS checksums turned on..."); + HFile.LOG.warn("HBase checksum verification failed for file " + pathName + " at offset " + + offset + " filesize " + fileSize + ". Retrying read with HDFS checksums turned on..."); if (!doVerificationThruHBaseChecksum) { - String msg = "HBase checksum verification failed for file " + - pathName + " at offset " + - offset + " filesize " + fileSize + - " but this cannot happen because doVerify is " + - doVerificationThruHBaseChecksum; + String msg = "HBase checksum verification failed for file " + pathName + " at offset " + + offset + " filesize " + fileSize + " but this cannot happen because doVerify is " + + doVerificationThruHBaseChecksum; HFile.LOG.warn(msg); throw new IOException(msg); // cannot happen case here } @@ -1582,15 +1512,14 @@ public class HFileBlock implements Cacheable { blk = readBlockDataInternal(is, offset, onDiskSizeWithHeaderL, pread, doVerificationThruHBaseChecksum, updateMetrics, intoHeap); if (blk != null) { - HFile.LOG.warn("HDFS checksum verification succeeded for file " + - pathName + " at offset " + - offset + " filesize " + fileSize); + HFile.LOG.warn("HDFS checksum verification succeeded for file " + pathName + " at offset " + + offset + " filesize " + fileSize); } } if (blk == null && !doVerificationThruHBaseChecksum) { - String msg = "readBlockData failed, possibly due to " + - "checksum verification failed for file " + pathName + - " at offset " + offset + " filesize " + fileSize; + String msg = + "readBlockData failed, possibly due to " + "checksum verification failed for file " + + pathName + " at offset " + offset + " filesize " + fileSize; HFile.LOG.warn(msg); throw new IOException(msg); } @@ -1610,37 +1539,38 @@ public class HFileBlock implements Cacheable { * @return Check onDiskSizeWithHeaderL size is healthy and then return it as an int */ private static int checkAndGetSizeAsInt(final long onDiskSizeWithHeaderL, final int hdrSize) - throws IOException { - if ((onDiskSizeWithHeaderL < hdrSize && onDiskSizeWithHeaderL != -1) - || onDiskSizeWithHeaderL >= Integer.MAX_VALUE) { - throw new IOException("Invalid onDisksize=" + onDiskSizeWithHeaderL - + ": expected to be at least " + hdrSize + throws IOException { + if ( + (onDiskSizeWithHeaderL < hdrSize && onDiskSizeWithHeaderL != -1) + || onDiskSizeWithHeaderL >= Integer.MAX_VALUE + ) { + throw new IOException( + "Invalid onDisksize=" + onDiskSizeWithHeaderL + ": expected to be at least " + hdrSize + " and at most " + Integer.MAX_VALUE + ", or -1"); } - return (int)onDiskSizeWithHeaderL; + return (int) onDiskSizeWithHeaderL; } /** - * Verify the passed in onDiskSizeWithHeader aligns with what is in the header else something - * is not right. + * Verify the passed in onDiskSizeWithHeader aligns with what is in the header else something is + * not right. */ private void verifyOnDiskSizeMatchesHeader(final int passedIn, final ByteBuff headerBuf, - final long offset, boolean verifyChecksum) - throws IOException { + final long offset, boolean verifyChecksum) throws IOException { // Assert size provided aligns with what is in the header int fromHeader = getOnDiskSizeWithHeader(headerBuf, verifyChecksum); if (passedIn != fromHeader) { - throw new IOException("Passed in onDiskSizeWithHeader=" + passedIn + " != " + fromHeader + - ", offset=" + offset + ", fileContext=" + this.fileContext); + throw new IOException("Passed in onDiskSizeWithHeader=" + passedIn + " != " + fromHeader + + ", offset=" + offset + ", fileContext=" + this.fileContext); } } /** - * Check atomic reference cache for this block's header. Cache only good if next - * read coming through is next in sequence in the block. We read next block's - * header on the tail of reading the previous block to save a seek. Otherwise, - * we have to do a seek to read the header before we can pull in the block OR - * we have to backup the stream because we over-read (the next block's header). + * Check atomic reference cache for this block's header. Cache only good if next read coming + * through is next in sequence in the block. We read next block's header on the tail of reading + * the previous block to save a seek. Otherwise, we have to do a seek to read the header before + * we can pull in the block OR we have to backup the stream because we over-read (the next + * block's header). * @see PrefetchedHeader * @return The cached block header or null if not found. * @see #cacheNextBlockHeader(long, ByteBuff, int, int) @@ -1655,8 +1585,8 @@ public class HFileBlock implements Cacheable { * @see #getCachedHeader(long) * @see PrefetchedHeader */ - private void cacheNextBlockHeader(final long offset, - ByteBuff onDiskBlock, int onDiskSizeWithHeader, int headerLength) { + private void cacheNextBlockHeader(final long offset, ByteBuff onDiskBlock, + int onDiskSizeWithHeader, int headerLength) { PrefetchedHeader ph = new PrefetchedHeader(); ph.offset = offset; onDiskBlock.get(onDiskSizeWithHeader, ph.header, 0, headerLength); @@ -1664,12 +1594,11 @@ public class HFileBlock implements Cacheable { } private int getNextBlockOnDiskSize(boolean readNextHeader, ByteBuff onDiskBlock, - int onDiskSizeWithHeader) { + int onDiskSizeWithHeader) { int nextBlockOnDiskSize = -1; if (readNextHeader) { nextBlockOnDiskSize = - onDiskBlock.getIntAfterPosition(onDiskSizeWithHeader + BlockType.MAGIC_LENGTH) - + hdrSize; + onDiskBlock.getIntAfterPosition(onDiskSizeWithHeader + BlockType.MAGIC_LENGTH) + hdrSize; } return nextBlockOnDiskSize; } @@ -1680,34 +1609,37 @@ public class HFileBlock implements Cacheable { /** * Reads a version 2 block. - * @param offset the offset in the stream to read at. + * @param offset the offset in the stream to read at. * @param onDiskSizeWithHeaderL the on-disk size of the block, including the header and - * checksums if present or -1 if unknown (as a long). Can be -1 if we are doing raw - * iteration of blocks as when loading up file metadata; i.e. the first read of a new - * file. Usually non-null gotten from the file index. - * @param pread whether to use a positional read - * @param verifyChecksum Whether to use HBase checksums. If HBase checksum is switched off, then - * use HDFS checksum. Can also flip on/off reading same file if we hit a troublesome - * patch in an hfile. - * @param updateMetrics whether need to update the metrics. - * @param intoHeap allocate the ByteBuff of block from heap or off-heap. + * checksums if present or -1 if unknown (as a long). Can be -1 if + * we are doing raw iteration of blocks as when loading up file + * metadata; i.e. the first read of a new file. Usually non-null + * gotten from the file index. + * @param pread whether to use a positional read + * @param verifyChecksum Whether to use HBase checksums. If HBase checksum is switched + * off, then use HDFS checksum. Can also flip on/off reading same + * file if we hit a troublesome patch in an hfile. + * @param updateMetrics whether need to update the metrics. + * @param intoHeap allocate the ByteBuff of block from heap or off-heap. * @return the HFileBlock or null if there is a HBase checksum mismatch */ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, - long onDiskSizeWithHeaderL, boolean pread, boolean verifyChecksum, boolean updateMetrics, - boolean intoHeap) throws IOException { + long onDiskSizeWithHeaderL, boolean pread, boolean verifyChecksum, boolean updateMetrics, + boolean intoHeap) throws IOException { if (offset < 0) { - throw new IOException("Invalid offset=" + offset + " trying to read " - + "block (onDiskSize=" + onDiskSizeWithHeaderL + ")"); + throw new IOException("Invalid offset=" + offset + " trying to read " + "block (onDiskSize=" + + onDiskSizeWithHeaderL + ")"); } int onDiskSizeWithHeader = checkAndGetSizeAsInt(onDiskSizeWithHeaderL, hdrSize); // Try and get cached header. Will serve us in rare case where onDiskSizeWithHeaderL is -1 // and will save us having to seek the stream backwards to reread the header we // read the last time through here. ByteBuff headerBuf = getCachedHeader(offset); - LOG.trace("Reading {} at offset={}, pread={}, verifyChecksum={}, cachedHeader={}, " + - "onDiskSizeWithHeader={}", this.fileContext.getHFileName(), offset, pread, - verifyChecksum, headerBuf, onDiskSizeWithHeader); + LOG.trace( + "Reading {} at offset={}, pread={}, verifyChecksum={}, cachedHeader={}, " + + "onDiskSizeWithHeader={}", + this.fileContext.getHFileName(), offset, pread, verifyChecksum, headerBuf, + onDiskSizeWithHeader); // This is NOT same as verifyChecksum. This latter is whether to do hbase // checksums. Can change with circumstances. The below flag is whether the // file has support for checksums (version 2+). @@ -1730,7 +1662,7 @@ public class HFileBlock implements Cacheable { } onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf, checksumSupport); } - int preReadHeaderSize = headerBuf == null? 0 : hdrSize; + int preReadHeaderSize = headerBuf == null ? 0 : hdrSize; // Allocate enough space to fit the next block's header too; saves a seek next time through. // onDiskBlock is whole block + header + checksums then extra hdrSize to read next header; // onDiskSizeWithHeader is header, body, and any checksums if present. preReadHeaderSize @@ -1747,7 +1679,7 @@ public class HFileBlock implements Cacheable { onDiskSizeWithHeader - preReadHeaderSize, true, offset + preReadHeaderSize, pread); onDiskBlock.rewind(); // in case of moving position when copying a cached header int nextBlockOnDiskSize = - getNextBlockOnDiskSize(readNextHeader, onDiskBlock, onDiskSizeWithHeader); + getNextBlockOnDiskSize(readNextHeader, onDiskBlock, onDiskSizeWithHeader); if (headerBuf == null) { headerBuf = onDiskBlock.duplicate().position(0).limit(hdrSize); } @@ -1789,8 +1721,8 @@ public class HFileBlock implements Cacheable { @Override public void setIncludesMemStoreTS(boolean includesMemstoreTS) { - this.fileContext = new HFileContextBuilder(this.fileContext) - .withIncludesMvcc(includesMemstoreTS).build(); + this.fileContext = + new HFileContextBuilder(this.fileContext).withIncludesMvcc(includesMemstoreTS).build(); } @Override @@ -1809,8 +1741,8 @@ public class HFileBlock implements Cacheable { } /** - * Generates the checksum for the header as well as the data and then validates it. - * If the block doesn't uses checksum, returns false. + * Generates the checksum for the header as well as the data and then validates it. If the block + * doesn't uses checksum, returns false. * @return True if checksum matches, else false. */ private boolean validateChecksum(long offset, ByteBuff data, int hdrSize) { @@ -1851,12 +1783,10 @@ public class HFileBlock implements Cacheable { /** An additional sanity-check in case no compression or encryption is being used. */ void sanityCheckUncompressed() throws IOException { - if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader + - totalChecksumBytes()) { - throw new IOException("Using no compression but " - + "onDiskSizeWithoutHeader=" + onDiskSizeWithoutHeader + ", " - + "uncompressedSizeWithoutHeader=" + uncompressedSizeWithoutHeader - + ", numChecksumbytes=" + totalChecksumBytes()); + if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader + totalChecksumBytes()) { + throw new IOException("Using no compression but " + "onDiskSizeWithoutHeader=" + + onDiskSizeWithoutHeader + ", " + "uncompressedSizeWithoutHeader=" + + uncompressedSizeWithoutHeader + ", numChecksumbytes=" + totalChecksumBytes()); } } @@ -1956,8 +1886,10 @@ public class HFileBlock implements Cacheable { if (castedComparison.uncompressedSizeWithoutHeader != this.uncompressedSizeWithoutHeader) { return false; } - if (ByteBuff.compareTo(this.buf, 0, this.buf.limit(), castedComparison.buf, 0, - castedComparison.buf.limit()) != 0) { + if ( + ByteBuff.compareTo(this.buf, 0, this.buf.limit(), castedComparison.buf, 0, + castedComparison.buf.limit()) != 0 + ) { return false; } return true; @@ -1984,8 +1916,8 @@ public class HFileBlock implements Cacheable { } /** - * Calculate the number of bytes required to store all the checksums - * for this block. Each checksum value is a 4 byte integer. + * Calculate the number of bytes required to store all the checksums for this block. Each checksum + * value is a 4 byte integer. */ int totalChecksumBytes() { // If the hfile block has minorVersion 0, then there are no checksum @@ -1996,7 +1928,7 @@ public class HFileBlock implements Cacheable { return 0; } return (int) ChecksumUtil.numBytes(onDiskDataSizeWithHeader, - this.fileContext.getBytesPerChecksum()); + this.fileContext.getBytesPerChecksum()); } /** @@ -2010,8 +1942,9 @@ public class HFileBlock implements Cacheable { * Maps a minor version to the size of the header. */ public static int headerSize(boolean usesHBaseChecksum) { - return usesHBaseChecksum? - HConstants.HFILEBLOCK_HEADER_SIZE: HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM; + return usesHBaseChecksum + ? HConstants.HFILEBLOCK_HEADER_SIZE + : HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM; } /** @@ -2026,21 +1959,20 @@ public class HFileBlock implements Cacheable { * Return the appropriate DUMMY_HEADER for the minor version */ static private byte[] getDummyHeaderForVersion(boolean usesHBaseChecksum) { - return usesHBaseChecksum? HConstants.HFILEBLOCK_DUMMY_HEADER: DUMMY_HEADER_NO_CHECKSUM; + return usesHBaseChecksum ? HConstants.HFILEBLOCK_DUMMY_HEADER : DUMMY_HEADER_NO_CHECKSUM; } /** - * @return This HFileBlocks fileContext which will a derivative of the - * fileContext for the file from which this block's data was originally read. + * @return This HFileBlocks fileContext which will a derivative of the fileContext for the file + * from which this block's data was originally read. */ public HFileContext getHFileContext() { return this.fileContext; } /** - * Convert the contents of the block header into a human readable string. - * This is mostly helpful for debugging. This assumes that the block - * has minor version > 0. + * Convert the contents of the block header into a human readable string. This is mostly helpful + * for debugging. This assumes that the block has minor version > 0. */ static String toStringHeader(ByteBuff buf) throws IOException { byte[] magicBuf = new byte[Math.min(buf.limit() - buf.position(), BlockType.MAGIC_LENGTH)]; @@ -2052,31 +1984,23 @@ public class HFileBlock implements Cacheable { byte cksumtype = buf.get(); long bytesPerChecksum = buf.getInt(); long onDiskDataSizeWithHeader = buf.getInt(); - return " Header dump: magic: " + Bytes.toString(magicBuf) + - " blockType " + bt + - " compressedBlockSizeNoHeader " + - compressedBlockSizeNoHeader + - " uncompressedBlockSizeNoHeader " + - uncompressedBlockSizeNoHeader + - " prevBlockOffset " + prevBlockOffset + - " checksumType " + ChecksumType.codeToType(cksumtype) + - " bytesPerChecksum " + bytesPerChecksum + - " onDiskDataSizeWithHeader " + onDiskDataSizeWithHeader; + return " Header dump: magic: " + Bytes.toString(magicBuf) + " blockType " + bt + + " compressedBlockSizeNoHeader " + compressedBlockSizeNoHeader + + " uncompressedBlockSizeNoHeader " + uncompressedBlockSizeNoHeader + " prevBlockOffset " + + prevBlockOffset + " checksumType " + ChecksumType.codeToType(cksumtype) + + " bytesPerChecksum " + bytesPerChecksum + " onDiskDataSizeWithHeader " + + onDiskDataSizeWithHeader; } - private static HFileBlockBuilder createBuilder(HFileBlock blk){ - return new HFileBlockBuilder() - .withBlockType(blk.blockType) - .withOnDiskSizeWithoutHeader(blk.onDiskSizeWithoutHeader) - .withUncompressedSizeWithoutHeader(blk.uncompressedSizeWithoutHeader) - .withPrevBlockOffset(blk.prevBlockOffset) - .withByteBuff(blk.buf.duplicate()) // Duplicate the buffer. - .withOffset(blk.offset) - .withOnDiskDataSizeWithHeader(blk.onDiskDataSizeWithHeader) - .withNextBlockOnDiskSize(blk.nextBlockOnDiskSize) - .withHFileContext(blk.fileContext) - .withByteBuffAllocator(blk.allocator) - .withShared(blk.isSharedMem()); + private static HFileBlockBuilder createBuilder(HFileBlock blk) { + return new HFileBlockBuilder().withBlockType(blk.blockType) + .withOnDiskSizeWithoutHeader(blk.onDiskSizeWithoutHeader) + .withUncompressedSizeWithoutHeader(blk.uncompressedSizeWithoutHeader) + .withPrevBlockOffset(blk.prevBlockOffset).withByteBuff(blk.buf.duplicate()) // Duplicate the + // buffer. + .withOffset(blk.offset).withOnDiskDataSizeWithHeader(blk.onDiskDataSizeWithHeader) + .withNextBlockOnDiskSize(blk.nextBlockOnDiskSize).withHFileContext(blk.fileContext) + .withByteBuffAllocator(blk.allocator).withShared(blk.isSharedMem()); } static HFileBlock shallowClone(HFileBlock blk) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockBuilder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockBuilder.java index dc37a920f2f..91e62b491c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockBuilder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -103,12 +103,12 @@ public class HFileBlockBuilder { public HFileBlock build() { if (isShared) { return new SharedMemHFileBlock(blockType, onDiskSizeWithoutHeader, - uncompressedSizeWithoutHeader, prevBlockOffset, buf, fillHeader, offset, - nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, allocator); + uncompressedSizeWithoutHeader, prevBlockOffset, buf, fillHeader, offset, + nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, allocator); } else { return new ExclusiveMemHFileBlock(blockType, onDiskSizeWithoutHeader, - uncompressedSizeWithoutHeader, prevBlockOffset, buf, fillHeader, offset, - nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, allocator); + uncompressedSizeWithoutHeader, prevBlockOffset, buf, fillHeader, offset, + nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, allocator); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index b38964ebfd7..782383d697b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,20 +28,15 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicReference; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; -//import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KeyOnlyKeyValue; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.HFile.CachingBlockReader; @@ -53,17 +47,16 @@ import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.ObjectIntPair; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Provides functionality to write ({@link BlockIndexWriter}) and read - * BlockIndexReader - * single-level and multi-level block indexes. - * - * Examples of how to use the block index writer can be found in - * {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter} and - * {@link HFileWriterImpl}. Examples of how to use the reader can be - * found in {@link HFileReaderImpl} and - * org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex. + * Provides functionality to write ({@link BlockIndexWriter}) and read BlockIndexReader single-level + * and multi-level block indexes. Examples of how to use the block index writer can be found in + * {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter} and {@link HFileWriterImpl}. + * Examples of how to use the reader can be found in {@link HFileReaderImpl} and + * org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex. */ @InterfaceAudience.Private public class HFileBlockIndex { @@ -73,8 +66,8 @@ public class HFileBlockIndex { static final int DEFAULT_MAX_CHUNK_SIZE = 128 * 1024; /** - * The maximum size guideline for index blocks (both leaf, intermediate, and - * root). If not specified, DEFAULT_MAX_CHUNK_SIZE is used. + * The maximum size guideline for index blocks (both leaf, intermediate, and root). If not + * specified, DEFAULT_MAX_CHUNK_SIZE is used. */ public static final String MAX_CHUNK_SIZE_KEY = "hfile.index.block.max.size"; @@ -89,35 +82,32 @@ public class HFileBlockIndex { static final int DEFAULT_MIN_INDEX_NUM_ENTRIES = 16; /** - * The number of bytes stored in each "secondary index" entry in addition to - * key bytes in the non-root index block format. The first long is the file - * offset of the deeper-level block the entry points to, and the int that - * follows is that block's on-disk size without including header. + * The number of bytes stored in each "secondary index" entry in addition to key bytes in the + * non-root index block format. The first long is the file offset of the deeper-level block the + * entry points to, and the int that follows is that block's on-disk size without including + * header. */ - static final int SECONDARY_INDEX_ENTRY_OVERHEAD = Bytes.SIZEOF_INT - + Bytes.SIZEOF_LONG; + static final int SECONDARY_INDEX_ENTRY_OVERHEAD = Bytes.SIZEOF_INT + Bytes.SIZEOF_LONG; /** * Error message when trying to use inline block API in single-level mode. */ private static final String INLINE_BLOCKS_NOT_ALLOWED = - "Inline blocks are not allowed in the single-level-only mode"; + "Inline blocks are not allowed in the single-level-only mode"; /** - * The size of a meta-data record used for finding the mid-key in a - * multi-level index. Consists of the middle leaf-level index block offset - * (long), its on-disk size without header included (int), and the mid-key - * entry's zero-based index in that leaf index block. + * The size of a meta-data record used for finding the mid-key in a multi-level index. Consists of + * the middle leaf-level index block offset (long), its on-disk size without header included + * (int), and the mid-key entry's zero-based index in that leaf index block. */ - private static final int MID_KEY_METADATA_SIZE = Bytes.SIZEOF_LONG + - 2 * Bytes.SIZEOF_INT; + private static final int MID_KEY_METADATA_SIZE = Bytes.SIZEOF_LONG + 2 * Bytes.SIZEOF_INT; /** - * An implementation of the BlockIndexReader that deals with block keys which are plain - * byte[] like MetaBlock or the Bloom Block for ROW bloom. - * Does not need a comparator. It can work on Bytes.BYTES_RAWCOMPARATOR + * An implementation of the BlockIndexReader that deals with block keys which are plain byte[] + * like MetaBlock or the Bloom Block for ROW bloom. Does not need a comparator. It can work on + * Bytes.BYTES_RAWCOMPARATOR */ - static class ByteArrayKeyBlockIndexReader extends BlockIndexReader { + static class ByteArrayKeyBlockIndexReader extends BlockIndexReader { private byte[][] blockKeys; @@ -148,8 +138,7 @@ public class HFileBlockIndex { } /** - * @param i - * from 0 to {@link #getRootBlockCount() - 1} + * n * from 0 to {@link #getRootBlockCount() - 1} */ public byte[] getRootBlockKey(int i) { return blockKeys[i]; @@ -157,9 +146,9 @@ public class HFileBlockIndex { @Override public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, - boolean cacheBlocks, boolean pread, boolean isCompaction, - DataBlockEncoding expectedDataBlockEncoding, - CachingBlockReader cachingBlockReader) throws IOException { + boolean cacheBlocks, boolean pread, boolean isCompaction, + DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) + throws IOException { // this would not be needed return null; } @@ -209,8 +198,8 @@ public class HFileBlockIndex { public int rootBlockContainingKey(Cell key) { // Should not be called on this because here it deals only with byte[] throw new UnsupportedOperationException( - "Cannot search for a key that is of Cell type. Only plain byte array keys " + - "can be searched for"); + "Cannot search for a key that is of Cell type. Only plain byte array keys " + + "can be searched for"); } @Override @@ -218,18 +207,17 @@ public class HFileBlockIndex { StringBuilder sb = new StringBuilder(); sb.append("size=" + rootCount).append("\n"); for (int i = 0; i < rootCount; i++) { - sb.append("key=").append(KeyValue.keyToString(blockKeys[i])) - .append("\n offset=").append(blockOffsets[i]) - .append(", dataSize=" + blockDataSizes[i]).append("\n"); + sb.append("key=").append(KeyValue.keyToString(blockKeys[i])).append("\n offset=") + .append(blockOffsets[i]).append(", dataSize=" + blockDataSizes[i]).append("\n"); } return sb.toString(); } } /** - * An implementation of the BlockIndexReader that deals with block keys which are the key - * part of a cell like the Data block index or the ROW_COL bloom blocks - * This needs a comparator to work with the Cells + * An implementation of the BlockIndexReader that deals with block keys which are the key part of + * a cell like the Data block index or the ROW_COL bloom blocks This needs a comparator to work + * with the Cells */ static class CellBasedKeyBlockIndexReader extends BlockIndexReader { @@ -268,8 +256,7 @@ public class HFileBlockIndex { } /** - * @param i - * from 0 to {@link #getRootBlockCount() - 1} + * n * from 0 to {@link #getRootBlockCount() - 1} */ public Cell getRootBlockKey(int i) { return blockKeys[i]; @@ -277,9 +264,9 @@ public class HFileBlockIndex { @Override public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, - boolean cacheBlocks, boolean pread, boolean isCompaction, - DataBlockEncoding expectedDataBlockEncoding, - CachingBlockReader cachingBlockReader) throws IOException { + boolean cacheBlocks, boolean pread, boolean isCompaction, + DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) + throws IOException { int rootLevelIndex = rootBlockContainingKey(key); if (rootLevelIndex < 0 || rootLevelIndex >= blockOffsets.length) { return null; @@ -334,7 +321,7 @@ public class HFileBlockIndex { if (block == null) { throw new IOException("Failed to read block at offset " + currentOffset - + ", onDiskSize=" + currentOnDiskSize); + + ", onDiskSize=" + currentOnDiskSize); } // Found a data block, break the loop and check our level in the tree. @@ -346,7 +333,7 @@ public class HFileBlockIndex { // index block. We don't allow going deeper than searchTreeLevel. if (++lookupLevel > searchTreeLevel) { throw new IOException("Search Tree Level overflow: lookupLevel=" + lookupLevel - + ", searchTreeLevel=" + searchTreeLevel); + + ", searchTreeLevel=" + searchTreeLevel); } // Locate the entry corresponding to the given key in the non-root @@ -356,9 +343,8 @@ public class HFileBlockIndex { if (index == -1) { // This has to be changed // For now change this to key value - throw new IOException("The key " - + CellUtil.getCellKeyAsString(key) - + " is before the" + " first key of the non-root index block " + block); + throw new IOException("The key " + CellUtil.getCellKeyAsString(key) + " is before the" + + " first key of the non-root index block " + block); } currentOffset = buffer.getLong(); @@ -387,7 +373,7 @@ public class HFileBlockIndex { block.release(); } throw new IOException("Reached a data block at level " + lookupLevel - + " but the number of levels is " + searchTreeLevel); + + " but the number of levels is " + searchTreeLevel); } // set the next indexed key for the current block. @@ -396,8 +382,7 @@ public class HFileBlockIndex { @Override public Cell midkey(CachingBlockReader cachingBlockReader) throws IOException { - if (rootCount == 0) - throw new IOException("HFile empty"); + if (rootCount == 0) throw new IOException("HFile empty"); Cell targetMidKey = this.midKey.get(); if (targetMidKey != null) { @@ -406,23 +391,21 @@ public class HFileBlockIndex { if (midLeafBlockOffset >= 0) { if (cachingBlockReader == null) { - throw new IOException("Have to read the middle leaf block but " + - "no block reader available"); + throw new IOException( + "Have to read the middle leaf block but " + "no block reader available"); } // Caching, using pread, assuming this is not a compaction. - HFileBlock midLeafBlock = cachingBlockReader.readBlock( - midLeafBlockOffset, midLeafBlockOnDiskSize, true, true, false, true, - BlockType.LEAF_INDEX, null); + HFileBlock midLeafBlock = cachingBlockReader.readBlock(midLeafBlockOffset, + midLeafBlockOnDiskSize, true, true, false, true, BlockType.LEAF_INDEX, null); try { ByteBuff b = midLeafBlock.getBufferWithoutHeader(); int numDataBlocks = b.getIntAfterPosition(0); int keyRelOffset = b.getIntAfterPosition(Bytes.SIZEOF_INT * (midKeyEntry + 1)); int keyLen = b.getIntAfterPosition(Bytes.SIZEOF_INT * (midKeyEntry + 2)) - keyRelOffset - - SECONDARY_INDEX_ENTRY_OVERHEAD; + - SECONDARY_INDEX_ENTRY_OVERHEAD; int keyOffset = - Bytes.SIZEOF_INT * (numDataBlocks + 2) + keyRelOffset - + SECONDARY_INDEX_ENTRY_OVERHEAD; + Bytes.SIZEOF_INT * (numDataBlocks + 2) + keyRelOffset + SECONDARY_INDEX_ENTRY_OVERHEAD; byte[] bytes = b.toBytes(keyOffset, keyLen); targetMidKey = new KeyValue.KeyOnlyKeyValue(bytes, 0, bytes.length); } finally { @@ -444,9 +427,8 @@ public class HFileBlockIndex { /** * Adds a new entry in the root block index. Only used when reading. - * - * @param key Last key in the block - * @param offset file offset where the block is stored + * @param key Last key in the block + * @param offset file offset where the block is stored * @param dataSize the uncompressed data size */ @Override @@ -460,10 +442,10 @@ public class HFileBlockIndex { @Override public int rootBlockContainingKey(final byte[] key, int offset, int length, - CellComparator comp) { + CellComparator comp) { // This should always be called with Cell not with a byte[] key - throw new UnsupportedOperationException("Cannot find for a key containing plain byte " + - "array. Only cell based keys can be searched for"); + throw new UnsupportedOperationException("Cannot find for a key containing plain byte " + + "array. Only cell based keys can be searched for"); } @Override @@ -494,23 +476,20 @@ public class HFileBlockIndex { StringBuilder sb = new StringBuilder(); sb.append("size=" + rootCount).append("\n"); for (int i = 0; i < rootCount; i++) { - sb.append("key=").append((blockKeys[i])) - .append("\n offset=").append(blockOffsets[i]) - .append(", dataSize=" + blockDataSizes[i]).append("\n"); + sb.append("key=").append((blockKeys[i])).append("\n offset=").append(blockOffsets[i]) + .append(", dataSize=" + blockDataSizes[i]).append("\n"); } return sb.toString(); } } /** - * The reader will always hold the root level index in the memory. Index - * blocks at all other levels will be cached in the LRU cache in practice, - * although this API does not enforce that. - * - *

          All non-root (leaf and intermediate) index blocks contain what we call a - * "secondary index": an array of offsets to the entries within the block. - * This allows us to do binary search for the entry corresponding to the - * given key without having to deserialize the block. + * The reader will always hold the root level index in the memory. Index blocks at all other + * levels will be cached in the LRU cache in practice, although this API does not enforce that. + *

          + * All non-root (leaf and intermediate) index blocks contain what we call a "secondary index": an + * array of offsets to the entries within the block. This allows us to do binary search for the + * entry corresponding to the given key without having to deserialize the block. */ static abstract class BlockIndexReader implements HeapSize { @@ -524,8 +503,8 @@ public class HFileBlockIndex { protected int midKeyEntry = -1; /** - * The number of levels in the block index tree. One if there is only root - * level, two for root and leaf levels, etc. + * The number of levels in the block index tree. One if there is only root level, two for root + * and leaf levels, etc. */ protected int searchTreeLevel; @@ -535,8 +514,8 @@ public class HFileBlockIndex { public abstract boolean isEmpty(); /** - * Verifies that the block index is non-empty and throws an - * {@link IllegalStateException} otherwise. + * Verifies that the block index is non-empty and throws an {@link IllegalStateException} + * otherwise. */ public void ensureNonEmpty() { if (isEmpty()) { @@ -545,23 +524,18 @@ public class HFileBlockIndex { } /** - * Return the data block which contains this key. This function will only - * be called when the HFile version is larger than 1. - * - * @param key the key we are looking for - * @param currentBlock the current block, to avoid re-reading the same block - * @param cacheBlocks - * @param pread - * @param isCompaction - * @param expectedDataBlockEncoding the data block encoding the caller is - * expecting the data block to be in, or null to not perform this - * check and return the block irrespective of the encoding - * @return reader a basic way to load blocks - * @throws IOException + * Return the data block which contains this key. This function will only be called when the + * HFile version is larger than 1. + * @param key the key we are looking for + * @param currentBlock the current block, to avoid re-reading the same block nnn * @param + * expectedDataBlockEncoding the data block encoding the caller is expecting + * the data block to be in, or null to not perform this check and return the + * block irrespective of the encoding + * @return reader a basic way to load blocks n */ public HFileBlock seekToDataBlock(final Cell key, HFileBlock currentBlock, boolean cacheBlocks, - boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, - CachingBlockReader cachingBlockReader) throws IOException { + boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, + CachingBlockReader cachingBlockReader) throws IOException { BlockWithScanInfo blockWithScanInfo = loadDataBlockWithScanInfo(key, currentBlock, cacheBlocks, pread, isCompaction, expectedDataBlockEncoding, cachingBlockReader); if (blockWithScanInfo == null) { @@ -572,29 +546,25 @@ public class HFileBlockIndex { } /** - * Return the BlockWithScanInfo, a data structure which contains the Data HFileBlock with - * other scan info such as the key that starts the next HFileBlock. This function will only - * be called when the HFile version is larger than 1. - * - * @param key the key we are looking for - * @param currentBlock the current block, to avoid re-reading the same block - * @param expectedDataBlockEncoding the data block encoding the caller is - * expecting the data block to be in, or null to not perform this - * check and return the block irrespective of the encoding. - * @return the BlockWithScanInfo which contains the DataBlock with other - * scan info such as nextIndexedKey. - * @throws IOException + * Return the BlockWithScanInfo, a data structure which contains the Data HFileBlock with other + * scan info such as the key that starts the next HFileBlock. This function will only be called + * when the HFile version is larger than 1. + * @param key the key we are looking for + * @param currentBlock the current block, to avoid re-reading the same block + * @param expectedDataBlockEncoding the data block encoding the caller is expecting the data + * block to be in, or null to not perform this check and return + * the block irrespective of the encoding. + * @return the BlockWithScanInfo which contains the DataBlock with other scan info such as + * nextIndexedKey. n */ public abstract BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, - boolean cacheBlocks, boolean pread, boolean isCompaction, - DataBlockEncoding expectedDataBlockEncoding, - CachingBlockReader cachingBlockReader) throws IOException; + boolean cacheBlocks, boolean pread, boolean isCompaction, + DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) + throws IOException; /** - * An approximation to the {@link HFile}'s mid-key. Operates on block - * boundaries, and does not go inside blocks. In other words, returns the - * first key of the middle block of the file. - * + * An approximation to the {@link HFile}'s mid-key. Operates on block boundaries, and does not + * go inside blocks. In other words, returns the first key of the middle block of the file. * @return the first key of the middle block */ public abstract Cell midkey(CachingBlockReader cachingBlockReader) throws IOException; @@ -608,8 +578,8 @@ public class HFileBlockIndex { /** * @param i zero-based index of a root-level block - * @return the on-disk size of the root-level block for version 2, or the - * uncompressed size for version 1 + * @return the on-disk size of the root-level block for version 2, or the uncompressed size for + * version 1 */ public int getRootBlockDataSize(int i) { return blockDataSizes[i]; @@ -623,30 +593,21 @@ public class HFileBlockIndex { } /** - * Finds the root-level index block containing the given key. - * - * @param key - * Key to find - * @param comp - * the comparator to be used - * @return Offset of block containing key (between 0 and the - * number of blocks - 1) or -1 if this file does not contain the - * request. + * Finds the root-level index block containing the given key. n * Key to find n * the comparator + * to be used + * @return Offset of block containing key (between 0 and the number of blocks - 1) + * or -1 if this file does not contain the request. */ // When we want to find the meta index block or bloom block for ROW bloom // type Bytes.BYTES_RAWCOMPARATOR would be enough. For the ROW_COL bloom case we need the // CellComparator. public abstract int rootBlockContainingKey(final byte[] key, int offset, int length, - CellComparator comp); + CellComparator comp); /** - * Finds the root-level index block containing the given key. - * - * @param key - * Key to find - * @return Offset of block containing key (between 0 and the - * number of blocks - 1) or -1 if this file does not contain the - * request. + * Finds the root-level index block containing the given key. n * Key to find + * @return Offset of block containing key (between 0 and the number of blocks - 1) + * or -1 if this file does not contain the request. */ // When we want to find the meta index block or bloom block for ROW bloom // type @@ -657,17 +618,13 @@ public class HFileBlockIndex { } /** - * Finds the root-level index block containing the given key. - * - * @param key - * Key to find + * Finds the root-level index block containing the given key. n * Key to find */ public abstract int rootBlockContainingKey(final Cell key); /** - * The indexed key at the ith position in the nonRootIndex. The position starts at 0. - * @param nonRootIndex - * @param i the ith position + * The indexed key at the ith position in the nonRootIndex. The position starts at 0. n * @param + * i the ith position * @return The indexed key at the ith position in the nonRootIndex. */ protected byte[] getNonRootIndexedKey(ByteBuff nonRootIndex, int i) { @@ -680,42 +637,33 @@ public class HFileBlockIndex { // The secondary index takes numEntries + 1 ints. int entriesOffset = Bytes.SIZEOF_INT * (numEntries + 2); // Targetkey's offset relative to the end of secondary index - int targetKeyRelOffset = nonRootIndex.getInt( - Bytes.SIZEOF_INT * (i + 1)); + int targetKeyRelOffset = nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 1)); // The offset of the target key in the blockIndex buffer - int targetKeyOffset = entriesOffset // Skip secondary index - + targetKeyRelOffset // Skip all entries until mid - + SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size + int targetKeyOffset = entriesOffset // Skip secondary index + + targetKeyRelOffset // Skip all entries until mid + + SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size // We subtract the two consecutive secondary index elements, which // gives us the size of the whole (offset, onDiskSize, key) tuple. We // then need to subtract the overhead of offset and onDiskSize. - int targetKeyLength = nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 2)) - - targetKeyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD; + int targetKeyLength = nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 2)) - targetKeyRelOffset + - SECONDARY_INDEX_ENTRY_OVERHEAD; // TODO check whether we can make BB backed Cell here? So can avoid bytes copy. return nonRootIndex.toBytes(targetKeyOffset, targetKeyLength); } /** - * Performs a binary search over a non-root level index block. Utilizes the - * secondary index, which records the offsets of (offset, onDiskSize, - * firstKey) tuples of all entries. - * - * @param key - * the key we are searching for offsets to individual entries in - * the blockIndex buffer - * @param nonRootIndex - * the non-root index block buffer, starting with the secondary - * index. The position is ignored. - * @return the index i in [0, numEntries - 1] such that keys[i] <= key < - * keys[i + 1], if keys is the array of all keys being searched, or - * -1 otherwise - * @throws IOException + * Performs a binary search over a non-root level index block. Utilizes the secondary index, + * which records the offsets of (offset, onDiskSize, firstKey) tuples of all entries. n * the + * key we are searching for offsets to individual entries in the blockIndex buffer n * the + * non-root index block buffer, starting with the secondary index. The position is ignored. + * @return the index i in [0, numEntries - 1] such that keys[i] <= key < keys[i + 1], if keys is + * the array of all keys being searched, or -1 otherwise n */ static int binarySearchNonRootIndex(Cell key, ByteBuff nonRootIndex, - CellComparator comparator) { + CellComparator comparator) { int numEntries = nonRootIndex.getIntAfterPosition(0); int low = 0; @@ -738,15 +686,15 @@ public class HFileBlockIndex { int midKeyRelOffset = nonRootIndex.getIntAfterPosition(Bytes.SIZEOF_INT * (mid + 1)); // The offset of the middle key in the blockIndex buffer - int midKeyOffset = entriesOffset // Skip secondary index - + midKeyRelOffset // Skip all entries until mid - + SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size + int midKeyOffset = entriesOffset // Skip secondary index + + midKeyRelOffset // Skip all entries until mid + + SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size // We subtract the two consecutive secondary index elements, which // gives us the size of the whole (offset, onDiskSize, key) tuple. We // then need to subtract the overhead of offset and onDiskSize. - int midLength = nonRootIndex.getIntAfterPosition(Bytes.SIZEOF_INT * (mid + 2)) - - midKeyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD; + int midLength = nonRootIndex.getIntAfterPosition(Bytes.SIZEOF_INT * (mid + 2)) + - midKeyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD; // we have to compare in this order, because the comparator order // has special logic when the 'left side' is a special key. @@ -758,13 +706,10 @@ public class HFileBlockIndex { int cmp = PrivateCellUtil.compareKeyIgnoresMvcc(comparator, key, nonRootIndexkeyOnlyKV); // key lives above the midpoint - if (cmp > 0) - low = mid + 1; // Maintain the invariant that keys[low - 1] < key + if (cmp > 0) low = mid + 1; // Maintain the invariant that keys[low - 1] < key // key lives below the midpoint - else if (cmp < 0) - high = mid - 1; // Maintain the invariant that key < keys[high + 1] - else - return mid; // exact match + else if (cmp < 0) high = mid - 1; // Maintain the invariant that key < keys[high + 1] + else return mid; // exact match } // As per our invariant, keys[low - 1] < key < keys[high + 1], meaning @@ -772,8 +717,8 @@ public class HFileBlockIndex { // condition, low >= high + 1. Therefore, low = high + 1. if (low != high + 1) { - throw new IllegalStateException("Binary search broken: low=" + low - + " " + "instead of " + (high + 1)); + throw new IllegalStateException( + "Binary search broken: low=" + low + " " + "instead of " + (high + 1)); } // OK, our invariant says that keys[low - 1] < key < keys[low]. We need to @@ -782,30 +727,22 @@ public class HFileBlockIndex { // Some extra validation on the result. if (i < -1 || i >= numEntries) { - throw new IllegalStateException("Binary search broken: result is " + - i + " but expected to be between -1 and (numEntries - 1) = " + - (numEntries - 1)); + throw new IllegalStateException("Binary search broken: result is " + i + + " but expected to be between -1 and (numEntries - 1) = " + (numEntries - 1)); } return i; } /** - * Search for one key using the secondary index in a non-root block. In case - * of success, positions the provided buffer at the entry of interest, where - * the file offset and the on-disk-size can be read. - * - * @param nonRootBlock - * a non-root block without header. Initial position does not - * matter. - * @param key - * the byte array containing the key - * @return the index position where the given key was found, otherwise - * return -1 in the case the given key is before the first key. - * + * Search for one key using the secondary index in a non-root block. In case of success, + * positions the provided buffer at the entry of interest, where the file offset and the + * on-disk-size can be read. n * a non-root block without header. Initial position does not + * matter. n * the byte array containing the key + * @return the index position where the given key was found, otherwise return -1 in the case the + * given key is before the first key. */ - static int locateNonRootIndexEntry(ByteBuff nonRootBlock, Cell key, - CellComparator comparator) { + static int locateNonRootIndexEntry(ByteBuff nonRootBlock, Cell key, CellComparator comparator) { int entryIndex = binarySearchNonRootIndex(key, nonRootBlock, comparator); if (entryIndex != -1) { @@ -816,8 +753,7 @@ public class HFileBlockIndex { // The offset of the entry we are interested in relative to the end of // the secondary index. - int entryRelOffset = nonRootBlock - .getIntAfterPosition(Bytes.SIZEOF_INT * (1 + entryIndex)); + int entryRelOffset = nonRootBlock.getIntAfterPosition(Bytes.SIZEOF_INT * (1 + entryIndex)); nonRootBlock.position(entriesOffset + entryRelOffset); } @@ -826,14 +762,11 @@ public class HFileBlockIndex { } /** - * Read in the root-level index from the given input stream. Must match - * what was written into the root level by - * {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the - * offset that function returned. - * - * @param in the buffered input stream or wrapped byte input stream - * @param numEntries the number of root-level index entries - * @throws IOException + * Read in the root-level index from the given input stream. Must match what was written into + * the root level by {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the offset + * that function returned. + * @param in the buffered input stream or wrapped byte input stream + * @param numEntries the number of root-level index entries n */ public void readRootIndex(DataInput in, final int numEntries) throws IOException { blockOffsets = new long[numEntries]; @@ -856,15 +789,12 @@ public class HFileBlockIndex { protected abstract void add(final byte[] key, final long offset, final int dataSize); /** - * Read in the root-level index from the given input stream. Must match - * what was written into the root level by - * {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the - * offset that function returned. - * - * @param blk the HFile block + * Read in the root-level index from the given input stream. Must match what was written into + * the root level by {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the offset + * that function returned. + * @param blk the HFile block * @param numEntries the number of root-level index entries - * @return the buffered input stream or wrapped byte input stream - * @throws IOException + * @return the buffered input stream or wrapped byte input stream n */ public DataInputStream readRootIndex(HFileBlock blk, final int numEntries) throws IOException { DataInputStream in = blk.getByteStream(); @@ -874,15 +804,12 @@ public class HFileBlockIndex { /** * Read the root-level metadata of a multi-level block index. Based on - * {@link #readRootIndex(DataInput, int)}, but also reads metadata - * necessary to compute the mid-key in a multi-level index. - * - * @param blk the HFile block - * @param numEntries the number of root-level index entries - * @throws IOException + * {@link #readRootIndex(DataInput, int)}, but also reads metadata necessary to compute the + * mid-key in a multi-level index. + * @param blk the HFile block + * @param numEntries the number of root-level index entries n */ - public void readMultiLevelIndexRoot(HFileBlock blk, - final int numEntries) throws IOException { + public void readMultiLevelIndexRoot(HFileBlock blk, final int numEntries) throws IOException { DataInputStream in = readRootIndex(blk, numEntries); // after reading the root index the checksum bytes have to // be subtracted to know if the mid key exists. @@ -899,8 +826,8 @@ public class HFileBlockIndex { @Override public long heapSize() { // The BlockIndexReader does not have the blockKey, comparator and the midkey atomic reference - long heapSize = ClassSize.align(3 * ClassSize.REFERENCE + - 2 * Bytes.SIZEOF_INT + ClassSize.OBJECT); + long heapSize = + ClassSize.align(3 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT + ClassSize.OBJECT); // Mid-key metadata. heapSize += MID_KEY_METADATA_SIZE; @@ -908,13 +835,11 @@ public class HFileBlockIndex { heapSize = calculateHeapSizeForBlockKeys(heapSize); if (blockOffsets != null) { - heapSize += ClassSize.align(ClassSize.ARRAY + blockOffsets.length - * Bytes.SIZEOF_LONG); + heapSize += ClassSize.align(ClassSize.ARRAY + blockOffsets.length * Bytes.SIZEOF_LONG); } if (blockDataSizes != null) { - heapSize += ClassSize.align(ClassSize.ARRAY + blockDataSizes.length - * Bytes.SIZEOF_INT); + heapSize += ClassSize.align(ClassSize.ARRAY + blockDataSizes.length * Bytes.SIZEOF_INT); } return ClassSize.align(heapSize); @@ -924,46 +849,38 @@ public class HFileBlockIndex { } /** - * Writes the block index into the output stream. Generate the tree from - * bottom up. The leaf level is written to disk as a sequence of inline - * blocks, if it is larger than a certain number of bytes. If the leaf level - * is not large enough, we write all entries to the root level instead. - * - * After all leaf blocks have been written, we end up with an index - * referencing the resulting leaf index blocks. If that index is larger than - * the allowed root index size, the writer will break it up into - * reasonable-size intermediate-level index block chunks write those chunks - * out, and create another index referencing those chunks. This will be - * repeated until the remaining index is small enough to become the root - * index. However, in most practical cases we will only have leaf-level - * blocks and the root index, or just the root index. + * Writes the block index into the output stream. Generate the tree from bottom up. The leaf level + * is written to disk as a sequence of inline blocks, if it is larger than a certain number of + * bytes. If the leaf level is not large enough, we write all entries to the root level instead. + * After all leaf blocks have been written, we end up with an index referencing the resulting leaf + * index blocks. If that index is larger than the allowed root index size, the writer will break + * it up into reasonable-size intermediate-level index block chunks write those chunks out, and + * create another index referencing those chunks. This will be repeated until the remaining index + * is small enough to become the root index. However, in most practical cases we will only have + * leaf-level blocks and the root index, or just the root index. */ public static class BlockIndexWriter implements InlineBlockWriter { /** - * While the index is being written, this represents the current block - * index referencing all leaf blocks, with one exception. If the file is - * being closed and there are not enough blocks to complete even a single - * leaf block, no leaf blocks get written and this contains the entire + * While the index is being written, this represents the current block index referencing all + * leaf blocks, with one exception. If the file is being closed and there are not enough blocks + * to complete even a single leaf block, no leaf blocks get written and this contains the entire * block index. After all levels of the index were written by - * {@link #writeIndexBlocks(FSDataOutputStream)}, this contains the final - * root-level index. + * {@link #writeIndexBlocks(FSDataOutputStream)}, this contains the final root-level index. */ private BlockIndexChunk rootChunk = new BlockIndexChunk(); /** - * Current leaf-level chunk. New entries referencing data blocks get added - * to this chunk until it grows large enough to be written to disk. + * Current leaf-level chunk. New entries referencing data blocks get added to this chunk until + * it grows large enough to be written to disk. */ private BlockIndexChunk curInlineChunk = new BlockIndexChunk(); /** - * The number of block index levels. This is one if there is only root - * level (even empty), two if there a leaf level and root level, and is - * higher if there are intermediate levels. This is only final after - * {@link #writeIndexBlocks(FSDataOutputStream)} has been called. The - * initial value accounts for the root level, and will be increased to two - * as soon as we find out there is a leaf-level in - * {@link #blockWritten(long, int, int)}. + * The number of block index levels. This is one if there is only root level (even empty), two + * if there a leaf level and root level, and is higher if there are intermediate levels. This is + * only final after {@link #writeIndexBlocks(FSDataOutputStream)} has been called. The initial + * value accounts for the root level, and will be increased to two as soon as we find out there + * is a leaf-level in {@link #blockWritten(long, int, int)}. */ private int numLevels = 1; @@ -971,9 +888,8 @@ public class HFileBlockIndex { private byte[] firstKey = null; /** - * The total number of leaf-level entries, i.e. entries referenced by - * leaf-level blocks. For the data block index this is equal to the number - * of data blocks. + * The total number of leaf-level entries, i.e. entries referenced by leaf-level blocks. For the + * data block index this is equal to the number of data blocks. */ private long totalNumEntries; @@ -1006,15 +922,14 @@ public class HFileBlockIndex { /** * Creates a multi-level block index writer. - * * @param blockWriter the block writer to use to write index blocks - * @param cacheConf used to determine when and how a block should be cached-on-write. + * @param cacheConf used to determine when and how a block should be cached-on-write. */ - public BlockIndexWriter(HFileBlock.Writer blockWriter, - CacheConfig cacheConf, String nameForCaching) { + public BlockIndexWriter(HFileBlock.Writer blockWriter, CacheConfig cacheConf, + String nameForCaching) { if ((cacheConf == null) != (nameForCaching == null)) { - throw new IllegalArgumentException("Block cache and file name for " + - "caching must be both specified or both null"); + throw new IllegalArgumentException( + "Block cache and file name for " + "caching must be both specified or both null"); } this.blockWriter = blockWriter; @@ -1039,41 +954,35 @@ public class HFileBlockIndex { } /** - * Writes the root level and intermediate levels of the block index into - * the output stream, generating the tree from bottom up. Assumes that the - * leaf level has been inline-written to the disk if there is enough data - * for more than one leaf block. We iterate by breaking the current level - * of the block index, starting with the index of all leaf-level blocks, - * into chunks small enough to be written to disk, and generate its parent - * level, until we end up with a level small enough to become the root - * level. - * - * If the leaf level is not large enough, there is no inline block index - * anymore, so we only write that level of block index to disk as the root - * level. - * + * Writes the root level and intermediate levels of the block index into the output stream, + * generating the tree from bottom up. Assumes that the leaf level has been inline-written to + * the disk if there is enough data for more than one leaf block. We iterate by breaking the + * current level of the block index, starting with the index of all leaf-level blocks, into + * chunks small enough to be written to disk, and generate its parent level, until we end up + * with a level small enough to become the root level. If the leaf level is not large enough, + * there is no inline block index anymore, so we only write that level of block index to disk as + * the root level. * @param out FSDataOutputStream - * @return position at which we entered the root-level index. - * @throws IOException + * @return position at which we entered the root-level index. n */ public long writeIndexBlocks(FSDataOutputStream out) throws IOException { if (curInlineChunk != null && curInlineChunk.getNumEntries() != 0) { - throw new IOException("Trying to write a multi-level block index, " + - "but are " + curInlineChunk.getNumEntries() + " entries in the " + - "last inline chunk."); + throw new IOException("Trying to write a multi-level block index, " + "but are " + + curInlineChunk.getNumEntries() + " entries in the " + "last inline chunk."); } // We need to get mid-key metadata before we create intermediate // indexes and overwrite the root chunk. - byte[] midKeyMetadata = numLevels > 1 ? rootChunk.getMidKeyMetadata() - : null; + byte[] midKeyMetadata = numLevels > 1 ? rootChunk.getMidKeyMetadata() : null; if (curInlineChunk != null) { - while (rootChunk.getRootSize() > maxChunkSize + while ( + rootChunk.getRootSize() > maxChunkSize // HBASE-16288: if firstKey is larger than maxChunkSize we will loop indefinitely && rootChunk.getNumEntries() > minIndexNumEntries // Sanity check. We will not hit this (minIndexNumEntries ^ 16) blocks can be addressed - && numLevels < 16) { + && numLevels < 16 + ) { rootChunk = writeIntermediateLevel(out, rootChunk); numLevels += 1; } @@ -1083,84 +992,67 @@ public class HFileBlockIndex { long rootLevelIndexPos = out.getPos(); { - DataOutput blockStream = - blockWriter.startWriting(BlockType.ROOT_INDEX); + DataOutput blockStream = blockWriter.startWriting(BlockType.ROOT_INDEX); rootChunk.writeRoot(blockStream); - if (midKeyMetadata != null) - blockStream.write(midKeyMetadata); + if (midKeyMetadata != null) blockStream.write(midKeyMetadata); blockWriter.writeHeaderAndData(out); if (cacheConf != null) { cacheConf.getBlockCache().ifPresent(cache -> { HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf); cache.cacheBlock(new BlockCacheKey(nameForCaching, rootLevelIndexPos, true, - blockForCaching.getBlockType()), blockForCaching); + blockForCaching.getBlockType()), blockForCaching); }); } } // Add root index block size totalBlockOnDiskSize += blockWriter.getOnDiskSizeWithoutHeader(); - totalBlockUncompressedSize += - blockWriter.getUncompressedSizeWithoutHeader(); + totalBlockUncompressedSize += blockWriter.getUncompressedSizeWithoutHeader(); if (LOG.isTraceEnabled()) { LOG.trace("Wrote a " + numLevels + "-level index with root level at pos " - + rootLevelIndexPos + ", " + rootChunk.getNumEntries() - + " root-level entries, " + totalNumEntries + " total entries, " - + StringUtils.humanReadableInt(this.totalBlockOnDiskSize) + - " on-disk size, " - + StringUtils.humanReadableInt(totalBlockUncompressedSize) + - " total uncompressed size."); + + rootLevelIndexPos + ", " + rootChunk.getNumEntries() + " root-level entries, " + + totalNumEntries + " total entries, " + + StringUtils.humanReadableInt(this.totalBlockOnDiskSize) + " on-disk size, " + + StringUtils.humanReadableInt(totalBlockUncompressedSize) + " total uncompressed size."); } return rootLevelIndexPos; } /** - * Writes the block index data as a single level only. Does not do any - * block framing. - * - * @param out the buffered output stream to write the index to. Typically a - * stream writing into an {@link HFile} block. - * @param description a short description of the index being written. Used - * in a log message. - * @throws IOException + * Writes the block index data as a single level only. Does not do any block framing. + * @param out the buffered output stream to write the index to. Typically a stream + * writing into an {@link HFile} block. + * @param description a short description of the index being written. Used in a log message. n */ - public void writeSingleLevelIndex(DataOutput out, String description) - throws IOException { + public void writeSingleLevelIndex(DataOutput out, String description) throws IOException { expectNumLevels(1); - if (!singleLevelOnly) - throw new IOException("Single-level mode is turned off"); + if (!singleLevelOnly) throw new IOException("Single-level mode is turned off"); if (rootChunk.getNumEntries() > 0) - throw new IOException("Root-level entries already added in " + - "single-level mode"); + throw new IOException("Root-level entries already added in " + "single-level mode"); rootChunk = curInlineChunk; curInlineChunk = new BlockIndexChunk(); if (LOG.isTraceEnabled()) { - LOG.trace("Wrote a single-level " + description + " index with " - + rootChunk.getNumEntries() + " entries, " + rootChunk.getRootSize() - + " bytes"); + LOG.trace("Wrote a single-level " + description + " index with " + rootChunk.getNumEntries() + + " entries, " + rootChunk.getRootSize() + " bytes"); } rootChunk.writeRoot(out); } /** - * Split the current level of the block index into intermediate index - * blocks of permitted size and write those blocks to disk. Return the next - * level of the block index referencing those intermediate-level blocks. - * - * @param out - * @param currentLevel the current level of the block index, such as the a - * chunk referencing all leaf-level index blocks - * @return the parent level block index, which becomes the root index after - * a few (usually zero) iterations - * @throws IOException + * Split the current level of the block index into intermediate index blocks of permitted size + * and write those blocks to disk. Return the next level of the block index referencing those + * intermediate-level blocks. n * @param currentLevel the current level of the block index, such + * as the a chunk referencing all leaf-level index blocks + * @return the parent level block index, which becomes the root index after a few (usually zero) + * iterations n */ private BlockIndexChunk writeIntermediateLevel(FSDataOutputStream out, - BlockIndexChunk currentLevel) throws IOException { + BlockIndexChunk currentLevel) throws IOException { // Entries referencing intermediate-level blocks we are about to create. BlockIndexChunk parent = new BlockIndexChunk(); @@ -1168,8 +1060,8 @@ public class HFileBlockIndex { BlockIndexChunk curChunk = new BlockIndexChunk(); for (int i = 0; i < currentLevel.getNumEntries(); ++i) { - curChunk.add(currentLevel.getBlockKey(i), - currentLevel.getBlockOffset(i), currentLevel.getOnDiskDataSize(i)); + curChunk.add(currentLevel.getBlockKey(i), currentLevel.getBlockOffset(i), + currentLevel.getOnDiskDataSize(i)); // HBASE-16288: We have to have at least minIndexNumEntries(16) items in the index so that // we won't end up with too-many levels for a index with very large rowKeys. Also, if the @@ -1186,11 +1078,10 @@ public class HFileBlockIndex { return parent; } - private void writeIntermediateBlock(FSDataOutputStream out, - BlockIndexChunk parent, BlockIndexChunk curChunk) throws IOException { + private void writeIntermediateBlock(FSDataOutputStream out, BlockIndexChunk parent, + BlockIndexChunk curChunk) throws IOException { long beginOffset = out.getPos(); - DataOutputStream dos = blockWriter.startWriting( - BlockType.INTERMEDIATE_INDEX); + DataOutputStream dos = blockWriter.startWriting(BlockType.INTERMEDIATE_INDEX); curChunk.writeNonRoot(dos); byte[] curFirstKey = curChunk.getBlockKey(0); blockWriter.writeHeaderAndData(out); @@ -1199,23 +1090,21 @@ public class HFileBlockIndex { cacheConf.getBlockCache().ifPresent(cache -> { HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf); cache.cacheBlock( - new BlockCacheKey(nameForCaching, beginOffset, true, blockForCaching.getBlockType()), - blockForCaching); + new BlockCacheKey(nameForCaching, beginOffset, true, blockForCaching.getBlockType()), + blockForCaching); }); } // Add intermediate index block size totalBlockOnDiskSize += blockWriter.getOnDiskSizeWithoutHeader(); - totalBlockUncompressedSize += - blockWriter.getUncompressedSizeWithoutHeader(); + totalBlockUncompressedSize += blockWriter.getUncompressedSizeWithoutHeader(); // OFFSET is the beginning offset the chunk of block index entries. // SIZE is the total byte size of the chunk of block index entries // + the secondary index size // FIRST_KEY is the first key in the chunk of block index // entries. - parent.add(curFirstKey, beginOffset, - blockWriter.getOnDiskSizeWithHeader()); + parent.add(curFirstKey, beginOffset, blockWriter.getOnDiskSizeWithHeader()); // clear current block index chunk curChunk.clear(); @@ -1238,15 +1127,15 @@ public class HFileBlockIndex { private void expectNumLevels(int expectedNumLevels) { if (numLevels != expectedNumLevels) { - throw new IllegalStateException("Number of block index levels is " - + numLevels + "but is expected to be " + expectedNumLevels); + throw new IllegalStateException("Number of block index levels is " + numLevels + + "but is expected to be " + expectedNumLevels); } } /** - * Whether there is an inline block ready to be written. In general, we - * write an leaf-level index block as an inline block as soon as its size - * as serialized in the non-root format reaches a certain threshold. + * Whether there is an inline block ready to be written. In general, we write an leaf-level + * index block as an inline block as soon as its size as serialized in the non-root format + * reaches a certain threshold. */ @Override public boolean shouldWriteBlock(boolean closing) { @@ -1255,8 +1144,8 @@ public class HFileBlockIndex { } if (curInlineChunk == null) { - throw new IllegalStateException("curInlineChunk is null; has shouldWriteBlock been " + - "called with closing=true and then called again?"); + throw new IllegalStateException("curInlineChunk is null; has shouldWriteBlock been " + + "called with closing=true and then called again?"); } if (curInlineChunk.getNumEntries() == 0) { @@ -1271,7 +1160,7 @@ public class HFileBlockIndex { expectNumLevels(1); rootChunk = curInlineChunk; - curInlineChunk = null; // Disallow adding any more index entries. + curInlineChunk = null; // Disallow adding any more index entries. return false; } @@ -1282,15 +1171,12 @@ public class HFileBlockIndex { } /** - * Write out the current inline index block. Inline blocks are non-root - * blocks, so the non-root index format is used. - * - * @param out + * Write out the current inline index block. Inline blocks are non-root blocks, so the non-root + * index format is used. n */ @Override public void writeInlineBlock(DataOutput out) throws IOException { - if (singleLevelOnly) - throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED); + if (singleLevelOnly) throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED); // Write the inline block index to the output stream in the non-root // index block format. @@ -1305,8 +1191,8 @@ public class HFileBlockIndex { } /** - * Called after an inline block has been written so that we can add an - * entry referring to that block to the parent-level index. + * Called after an inline block has been written so that we can add an entry referring to that + * block to the parent-level index. */ @Override public void blockWritten(long offset, int onDiskSize, int uncompressedSize) { @@ -1314,13 +1200,12 @@ public class HFileBlockIndex { totalBlockOnDiskSize += onDiskSize; totalBlockUncompressedSize += uncompressedSize; - if (singleLevelOnly) - throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED); + if (singleLevelOnly) throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED); if (firstKey == null) { - throw new IllegalStateException("Trying to add second-level index " + - "entry with offset=" + offset + " and onDiskSize=" + onDiskSize + - "but the first key was not set in writeInlineBlock"); + throw new IllegalStateException( + "Trying to add second-level index " + "entry with offset=" + offset + " and onDiskSize=" + + onDiskSize + "but the first key was not set in writeInlineBlock"); } if (rootChunk.getNumEntries() == 0) { @@ -1341,14 +1226,13 @@ public class HFileBlockIndex { } /** - * Add one index entry to the current leaf-level block. When the leaf-level - * block gets large enough, it will be flushed to disk as an inline block. - * - * @param firstKey the first key of the data block - * @param blockOffset the offset of the data block - * @param blockDataSize the on-disk size of the data block ({@link HFile} - * format version 2), or the uncompressed size of the data block ( - * {@link HFile} format version 1). + * Add one index entry to the current leaf-level block. When the leaf-level block gets large + * enough, it will be flushed to disk as an inline block. + * @param firstKey the first key of the data block + * @param blockOffset the offset of the data block + * @param blockDataSize the on-disk size of the data block ({@link HFile} format version 2), or + * the uncompressed size of the data block ( {@link HFile} format version + * 1). */ public void addEntry(byte[] firstKey, long blockOffset, int blockDataSize) { curInlineChunk.add(firstKey, blockOffset, blockDataSize); @@ -1360,16 +1244,15 @@ public class HFileBlockIndex { */ public void ensureSingleLevel() throws IOException { if (numLevels > 1) { - throw new IOException ("Wrote a " + numLevels + "-level index with " + - rootChunk.getNumEntries() + " root-level entries, but " + - "this is expected to be a single-level block index."); + throw new IOException( + "Wrote a " + numLevels + "-level index with " + rootChunk.getNumEntries() + + " root-level entries, but " + "this is expected to be a single-level block index."); } } /** - * @return true if we are using cache-on-write. This is configured by the - * caller of the constructor by either passing a valid block cache - * or null. + * @return true if we are using cache-on-write. This is configured by the caller of the + * constructor by either passing a valid block cache or null. */ @Override public boolean getCacheOnWrite() { @@ -1377,9 +1260,8 @@ public class HFileBlockIndex { } /** - * The total uncompressed size of the root index block, intermediate-level - * index blocks, and leaf-level index blocks. - * + * The total uncompressed size of the root index block, intermediate-level index blocks, and + * leaf-level index blocks. * @return the total uncompressed size of all index blocks */ public long getTotalUncompressedSize() { @@ -1389,9 +1271,8 @@ public class HFileBlockIndex { } /** - * A single chunk of the block index in the process of writing. The data in - * this chunk can become a leaf-level, intermediate-level, or root index - * block. + * A single chunk of the block index in the process of writing. The data in this chunk can become + * a leaf-level, intermediate-level, or root index block. */ static class BlockIndexChunk { @@ -1405,16 +1286,16 @@ public class HFileBlockIndex { private final List onDiskDataSizes = new ArrayList<>(); /** - * The cumulative number of sub-entries, i.e. entries on deeper-level block - * index entries. numSubEntriesAt[i] is the number of sub-entries in the - * blocks corresponding to this chunk's entries #0 through #i inclusively. + * The cumulative number of sub-entries, i.e. entries on deeper-level block index entries. + * numSubEntriesAt[i] is the number of sub-entries in the blocks corresponding to this chunk's + * entries #0 through #i inclusively. */ private final List numSubEntriesAt = new ArrayList<>(); /** - * The offset of the next entry to be added, relative to the end of the - * "secondary index" in the "non-root" format representation of this index - * chunk. This is the next value to be added to the secondary index. + * The offset of the next entry to be added, relative to the end of the "secondary index" in the + * "non-root" format representation of this index chunk. This is the next value to be added to + * the secondary index. */ private int curTotalNonRootEntrySize = 0; @@ -1424,34 +1305,29 @@ public class HFileBlockIndex { private int curTotalRootSize = 0; /** - * The "secondary index" used for binary search over variable-length - * records in a "non-root" format block. These offsets are relative to the - * end of this secondary index. + * The "secondary index" used for binary search over variable-length records in a "non-root" + * format block. These offsets are relative to the end of this secondary index. */ private final List secondaryIndexOffsetMarks = new ArrayList<>(); /** * Adds a new entry to this block index chunk. - * - * @param firstKey the first key in the block pointed to by this entry - * @param blockOffset the offset of the next-level block pointed to by this - * entry - * @param onDiskDataSize the on-disk data of the block pointed to by this - * entry, including header size - * @param curTotalNumSubEntries if this chunk is the root index chunk under - * construction, this specifies the current total number of - * sub-entries in all leaf-level chunks, including the one - * corresponding to the second-level entry being added. + * @param firstKey the first key in the block pointed to by this entry + * @param blockOffset the offset of the next-level block pointed to by this entry + * @param onDiskDataSize the on-disk data of the block pointed to by this entry, + * including header size + * @param curTotalNumSubEntries if this chunk is the root index chunk under construction, this + * specifies the current total number of sub-entries in all + * leaf-level chunks, including the one corresponding to the + * second-level entry being added. */ - void add(byte[] firstKey, long blockOffset, int onDiskDataSize, - long curTotalNumSubEntries) { + void add(byte[] firstKey, long blockOffset, int onDiskDataSize, long curTotalNumSubEntries) { // Record the offset for the secondary index secondaryIndexOffsetMarks.add(curTotalNonRootEntrySize); - curTotalNonRootEntrySize += SECONDARY_INDEX_ENTRY_OVERHEAD - + firstKey.length; + curTotalNonRootEntrySize += SECONDARY_INDEX_ENTRY_OVERHEAD + firstKey.length; curTotalRootSize += Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT - + WritableUtils.getVIntSize(firstKey.length) + firstKey.length; + + WritableUtils.getVIntSize(firstKey.length) + firstKey.length; blockKeys.add(firstKey); blockOffsets.add(blockOffset); @@ -1462,17 +1338,15 @@ public class HFileBlockIndex { // Make sure the parallel arrays are in sync. if (numSubEntriesAt.size() != blockKeys.size()) { - throw new IllegalStateException("Only have key/value count " + - "stats for " + numSubEntriesAt.size() + " block index " + - "entries out of " + blockKeys.size()); + throw new IllegalStateException("Only have key/value count " + "stats for " + + numSubEntriesAt.size() + " block index " + "entries out of " + blockKeys.size()); } } } /** - * The same as {@link #add(byte[], long, int, long)} but does not take the - * key/value into account. Used for single-level indexes. - * + * The same as {@link #add(byte[], long, int, long)} but does not take the key/value into + * account. Used for single-level indexes. * @see #add(byte[], long, int, long) */ public void add(byte[] firstKey, long blockOffset, int onDiskDataSize) { @@ -1490,21 +1364,15 @@ public class HFileBlockIndex { } /** - * Finds the entry corresponding to the deeper-level index block containing - * the given deeper-level entry (a "sub-entry"), assuming a global 0-based - * ordering of sub-entries. - * + * Finds the entry corresponding to the deeper-level index block containing the given + * deeper-level entry (a "sub-entry"), assuming a global 0-based ordering of sub-entries. *

          - * Implementation note. We are looking for i such that - * numSubEntriesAt[i - 1] <= k < numSubEntriesAt[i], because a deeper-level - * block #i (0-based) contains sub-entries # numSubEntriesAt[i - 1]'th - * through numSubEntriesAt[i] - 1, assuming a global 0-based ordering of - * sub-entries. i is by definition the insertion point of k in - * numSubEntriesAt. - * + * Implementation note. We are looking for i such that numSubEntriesAt[i - 1] <= k < + * numSubEntriesAt[i], because a deeper-level block #i (0-based) contains sub-entries # + * numSubEntriesAt[i - 1]'th through numSubEntriesAt[i] - 1, assuming a global 0-based ordering + * of sub-entries. i is by definition the insertion point of k in numSubEntriesAt. * @param k sub-entry index, from 0 to the total number sub-entries - 1 - * @return the 0-based index of the entry corresponding to the given - * sub-entry + * @return the 0-based index of the entry corresponding to the given sub-entry */ public int getEntryBySubEntry(long k) { // We define mid-key as the key corresponding to k'th sub-entry @@ -1515,24 +1383,20 @@ public class HFileBlockIndex { // Exact match: cumulativeWeight[i] = k. This means chunks #0 through // #i contain exactly k sub-entries, and the sub-entry #k (0-based) // is in the (i + 1)'th chunk. - if (i >= 0) - return i + 1; + if (i >= 0) return i + 1; // Inexact match. Return the insertion point. return -i - 1; } /** - * Used when writing the root block index of a multi-level block index. - * Serializes additional information allowing to efficiently identify the - * mid-key. - * + * Used when writing the root block index of a multi-level block index. Serializes additional + * information allowing to efficiently identify the mid-key. * @return a few serialized fields for finding the mid-key * @throws IOException if could not create metadata for computing mid-key */ public byte[] getMidKeyMetadata() throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream( - MID_KEY_METADATA_SIZE); + ByteArrayOutputStream baos = new ByteArrayOutputStream(MID_KEY_METADATA_SIZE); DataOutputStream baosDos = new DataOutputStream(baos); long totalNumSubEntries = numSubEntriesAt.get(blockKeys.size() - 1); if (totalNumSubEntries == 0) { @@ -1544,23 +1408,20 @@ public class HFileBlockIndex { baosDos.writeLong(blockOffsets.get(midKeyEntry)); baosDos.writeInt(onDiskDataSizes.get(midKeyEntry)); - long numSubEntriesBefore = midKeyEntry > 0 - ? numSubEntriesAt.get(midKeyEntry - 1) : 0; + long numSubEntriesBefore = midKeyEntry > 0 ? numSubEntriesAt.get(midKeyEntry - 1) : 0; long subEntryWithinEntry = midKeySubEntry - numSubEntriesBefore; - if (subEntryWithinEntry < 0 || subEntryWithinEntry > Integer.MAX_VALUE) - { + if (subEntryWithinEntry < 0 || subEntryWithinEntry > Integer.MAX_VALUE) { throw new IOException("Could not identify mid-key index within the " - + "leaf-level block containing mid-key: out of range (" - + subEntryWithinEntry + ", numSubEntriesBefore=" - + numSubEntriesBefore + ", midKeySubEntry=" + midKeySubEntry - + ")"); + + "leaf-level block containing mid-key: out of range (" + subEntryWithinEntry + + ", numSubEntriesBefore=" + numSubEntriesBefore + ", midKeySubEntry=" + midKeySubEntry + + ")"); } baosDos.writeInt((int) subEntryWithinEntry); if (baosDos.size() != MID_KEY_METADATA_SIZE) { - throw new IOException("Could not write mid-key metadata: size=" + - baosDos.size() + ", correct size: " + MID_KEY_METADATA_SIZE); + throw new IOException("Could not write mid-key metadata: size=" + baosDos.size() + + ", correct size: " + MID_KEY_METADATA_SIZE); } // Close just to be good citizens, although this has no effect. @@ -1570,22 +1431,17 @@ public class HFileBlockIndex { } /** - * Writes the block index chunk in the non-root index block format. This - * format contains the number of entries, an index of integer offsets - * for quick binary search on variable-length records, and tuples of - * block offset, on-disk block size, and the first key for each entry. - * - * @param out - * @throws IOException + * Writes the block index chunk in the non-root index block format. This format contains the + * number of entries, an index of integer offsets for quick binary search on variable-length + * records, and tuples of block offset, on-disk block size, and the first key for each entry. nn */ void writeNonRoot(DataOutput out) throws IOException { // The number of entries in the block. out.writeInt(blockKeys.size()); if (secondaryIndexOffsetMarks.size() != blockKeys.size()) { - throw new IOException("Corrupted block index chunk writer: " + - blockKeys.size() + " entries but " + - secondaryIndexOffsetMarks.size() + " secondary index items"); + throw new IOException("Corrupted block index chunk writer: " + blockKeys.size() + + " entries but " + secondaryIndexOffsetMarks.size() + " secondary index items"); } // For each entry, write a "secondary index" of relative offsets to the @@ -1607,24 +1463,20 @@ public class HFileBlockIndex { } /** - * @return the size of this chunk if stored in the non-root index block - * format + * @return the size of this chunk if stored in the non-root index block format */ int getNonRootSize() { - return Bytes.SIZEOF_INT // Number of entries - + Bytes.SIZEOF_INT * (blockKeys.size() + 1) // Secondary index - + curTotalNonRootEntrySize; // All entries + return Bytes.SIZEOF_INT // Number of entries + + Bytes.SIZEOF_INT * (blockKeys.size() + 1) // Secondary index + + curTotalNonRootEntrySize; // All entries } /** - * Writes this chunk into the given output stream in the root block index - * format. This format is similar to the {@link HFile} version 1 block - * index format, except that we store on-disk size of the block instead of - * its uncompressed size. - * - * @param out the data output stream to write the block index to. Typically - * a stream writing into an {@link HFile} block. - * @throws IOException + * Writes this chunk into the given output stream in the root block index format. This format is + * similar to the {@link HFile} version 1 block index format, except that we store on-disk size + * of the block instead of its uncompressed size. + * @param out the data output stream to write the block index to. Typically a stream writing + * into an {@link HFile} block. n */ void writeRoot(DataOutput out) throws IOException { for (int i = 0; i < blockKeys.size(); ++i) { @@ -1661,8 +1513,7 @@ public class HFileBlockIndex { } public long getCumulativeNumKV(int i) { - if (i < 0) - return 0; + if (i < 0) return 0; return numSubEntriesAt.get(i); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java index 6a1611de8dc..cf253e20bd8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; @@ -27,9 +28,8 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** - * Controls what kind of data block encoding is used. If data block encoding is - * not set or the given block is not a data block (encoded or not), methods - * should just return the unmodified block. + * Controls what kind of data block encoding is used. If data block encoding is not set or the given + * block is not a data block (encoded or not), methods should just return the unmodified block. */ @InterfaceAudience.Private public interface HFileDataBlockEncoder { @@ -38,36 +38,24 @@ public interface HFileDataBlockEncoder { /** * Starts encoding for a block of KeyValues. Call - * {@link #endBlockEncoding(HFileBlockEncodingContext, DataOutputStream, byte[], BlockType)} - * to finish encoding of a block. - * @param encodingCtx - * @param out - * @throws IOException + * {@link #endBlockEncoding(HFileBlockEncodingContext, DataOutputStream, byte[], BlockType)} to + * finish encoding of a block. nnn */ void startBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out) - throws IOException; + throws IOException; /** - * Encodes a KeyValue. - * @param cell - * @param encodingCtx - * @param out - * @throws IOException + * Encodes a KeyValue. nnnn */ void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) - throws IOException; + throws IOException; /** * Ends encoding for a block of KeyValues. Gives a chance for the encoder to do the finishing - * stuff for the encoded block. It must be called at the end of block encoding. - * @param encodingCtx - * @param out - * @param uncompressedBytesWithHeader - * @param blockType - * @throws IOException + * stuff for the encoded block. It must be called at the end of block encoding. nnnnn */ void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out, - byte[] uncompressedBytesWithHeader, BlockType blockType) throws IOException; + byte[] uncompressedBytesWithHeader, BlockType blockType) throws IOException; /** * Decides whether we should use a scanner over encoded blocks. @@ -80,40 +68,35 @@ public interface HFileDataBlockEncoder { * @param writer writer for a given HFile * @exception IOException on disk problems */ - void saveMetadata(HFile.Writer writer) - throws IOException; + void saveMetadata(HFile.Writer writer) throws IOException; /** @return the data block encoding */ DataBlockEncoding getDataBlockEncoding(); /** - * @return the effective in-cache data block encoding, taking into account - * whether we are doing a compaction. + * @return the effective in-cache data block encoding, taking into account whether we are doing a + * compaction. */ public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction); /** - * Create an encoder specific encoding context object for writing. And the - * encoding context should also perform compression if compressionAlgorithm is - * valid. - * - * @param conf store configuration + * Create an encoder specific encoding context object for writing. And the encoding context should + * also perform compression if compressionAlgorithm is valid. + * @param conf store configuration * @param headerBytes header bytes * @param fileContext HFile meta data * @return a new {@link HFileBlockEncodingContext} object */ HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf, byte[] headerBytes, - HFileContext fileContext); + HFileContext fileContext); /** - * create a encoder specific decoding context for reading. And the - * decoding context should also do decompression if compressionAlgorithm - * is valid. - * - * @param conf store configuration + * create a encoder specific decoding context for reading. And the decoding context should also do + * decompression if compressionAlgorithm is valid. + * @param conf store configuration * @param fileContext - HFile meta data * @return a new {@link HFileBlockDecodingContext} object */ HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, - HFileContext fileContext); + HFileContext fileContext); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java index d2ce77245c9..6505e3d33fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java @@ -1,24 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; import java.io.DataOutputStream; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; @@ -31,8 +31,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** - * Do different kinds of data block encoding according to column family - * options. + * Do different kinds of data block encoding according to column family options. */ @InterfaceAudience.Private public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { @@ -46,8 +45,7 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { this.encoding = encoding != null ? encoding : DataBlockEncoding.NONE; } - public static HFileDataBlockEncoder createFromFileInfo( - HFileInfo fileInfo) throws IOException { + public static HFileDataBlockEncoder createFromFileInfo(HFileInfo fileInfo) throws IOException { DataBlockEncoding encoding = DataBlockEncoding.NONE; byte[] dataBlockEncodingType = fileInfo.get(DATA_BLOCK_ENCODING); if (dataBlockEncodingType != null) { @@ -55,8 +53,8 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { try { encoding = DataBlockEncoding.valueOf(dataBlockEncodingStr); } catch (IllegalArgumentException ex) { - throw new IOException("Invalid data block encoding type in file info: " - + dataBlockEncodingStr, ex); + throw new IOException( + "Invalid data block encoding type in file info: " + dataBlockEncodingStr, ex); } } @@ -93,7 +91,7 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { @Override public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) - throws IOException { + throws IOException { this.encoding.getEncoder().encode(cell, encodingCtx, out); } @@ -102,7 +100,6 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { return encoding != DataBlockEncoding.NONE; } - @Override public String toString() { return getClass().getSimpleName() + "(encoding=" + encoding + ")"; @@ -110,7 +107,7 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { @Override public HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf, - byte[] dummyHeader, HFileContext fileContext) { + byte[] dummyHeader, HFileContext fileContext) { DataBlockEncoder encoder = encoding.getEncoder(); if (encoder != null) { return encoder.newDataBlockEncodingContext(conf, encoding, dummyHeader, fileContext); @@ -120,7 +117,7 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { @Override public HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, - HFileContext fileContext) { + HFileContext fileContext) { DataBlockEncoder encoder = encoding.getEncoder(); if (encoder != null) { return encoder.newDataBlockDecodingContext(conf, fileContext); @@ -130,7 +127,7 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { @Override public void startBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out) - throws IOException { + throws IOException { if (this.encoding != null && this.encoding != DataBlockEncoding.NONE) { this.encoding.getEncoder().startBlockEncoding(encodingCtx, out); } @@ -138,7 +135,7 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { @Override public void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out, - byte[] uncompressedBytesWithHeader, BlockType blockType) throws IOException { + byte[] uncompressedBytesWithHeader, BlockType blockType) throws IOException { this.encoding.getEncoder().endBlockEncoding(encodingCtx, out, uncompressedBytesWithHeader); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java index 3552d6eae9b..c56eb323493 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,8 +58,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos; * key seen, comparator used writing the file, etc. Clients can add their own attributes via * {@link #append(byte[], byte[], boolean)} and they'll be persisted and available at read time. * Reader creates the HFileInfo on open by reading the tail of the HFile. The parse of the HFile - * trailer also creates a {@link HFileContext}, a read-only data structure that includes bulk of - * the HFileInfo and extras that is safe to pass around when working on HFiles. + * trailer also creates a {@link HFileContext}, a read-only data structure that includes bulk of the + * HFileInfo and extras that is safe to pass around when working on HFiles. * @see HFileContext */ @InterfaceAudience.Private @@ -70,13 +69,13 @@ public class HFileInfo implements SortedMap { static final String RESERVED_PREFIX = "hfile."; static final byte[] RESERVED_PREFIX_BYTES = Bytes.toBytes(RESERVED_PREFIX); - static final byte [] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY"); - static final byte [] AVG_KEY_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_KEY_LEN"); - static final byte [] AVG_VALUE_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_VALUE_LEN"); - static final byte [] CREATE_TIME_TS = Bytes.toBytes(RESERVED_PREFIX + "CREATE_TIME_TS"); - static final byte [] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED"); - public static final byte [] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN"); - private final SortedMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); + static final byte[] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY"); + static final byte[] AVG_KEY_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_KEY_LEN"); + static final byte[] AVG_VALUE_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_VALUE_LEN"); + static final byte[] CREATE_TIME_TS = Bytes.toBytes(RESERVED_PREFIX + "CREATE_TIME_TS"); + static final byte[] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED"); + public static final byte[] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN"); + private final SortedMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** * We can read files whose major version is v2 IFF their minor version is at least 3. @@ -98,15 +97,15 @@ public class HFileInfo implements SortedMap { private boolean decodeMemstoreTS = false; /** - * Blocks read from the load-on-open section, excluding data root index, meta - * index, and file info. + * Blocks read from the load-on-open section, excluding data root index, meta index, and file + * info. */ private List loadOnOpenBlocks = new ArrayList<>(); /** * The iterator will track all blocks in load-on-open section, since we use the - * {@link org.apache.hadoop.hbase.io.ByteBuffAllocator} to manage the ByteBuffers in block now, - * so we must ensure that deallocate all ByteBuffers in the end. + * {@link org.apache.hadoop.hbase.io.ByteBuffAllocator} to manage the ByteBuffers in block now, so + * we must ensure that deallocate all ByteBuffers in the end. */ private HFileBlock.BlockIterator blockIter; @@ -125,24 +124,21 @@ public class HFileInfo implements SortedMap { } /** - * Append the given key/value pair to the file info, optionally checking the - * key prefix. - * - * @param k key to add - * @param v value to add - * @param checkPrefix whether to check that the provided key does not start - * with the reserved prefix + * Append the given key/value pair to the file info, optionally checking the key prefix. + * @param k key to add + * @param v value to add + * @param checkPrefix whether to check that the provided key does not start with the reserved + * prefix * @return this file info object * @throws IOException if the key or value is invalid */ - public HFileInfo append(final byte[] k, final byte[] v, - final boolean checkPrefix) throws IOException { + public HFileInfo append(final byte[] k, final byte[] v, final boolean checkPrefix) + throws IOException { if (k == null || v == null) { throw new NullPointerException("Key nor value may be null"); } if (checkPrefix && isReservedFileInfoKey(k)) { - throw new IOException("Keys with a " + HFileInfo.RESERVED_PREFIX - + " are reserved"); + throw new IOException("Keys with a " + HFileInfo.RESERVED_PREFIX + " are reserved"); } put(k, v); return this; @@ -254,13 +250,12 @@ public class HFileInfo implements SortedMap { } /** - * Write out this instance on the passed in out stream. - * We write it as a protobuf. + * Write out this instance on the passed in out stream. We write it as a protobuf. * @see #read(DataInputStream) */ void write(final DataOutputStream out) throws IOException { HFileProtos.FileInfoProto.Builder builder = HFileProtos.FileInfoProto.newBuilder(); - for (Map.Entry e: this.map.entrySet()) { + for (Map.Entry e : this.map.entrySet()) { HBaseProtos.BytesBytesPair.Builder bbpBuilder = HBaseProtos.BytesBytesPair.newBuilder(); bbpBuilder.setFirst(UnsafeByteOperations.unsafeWrap(e.getKey())); bbpBuilder.setSecond(UnsafeByteOperations.unsafeWrap(e.getValue())); @@ -271,14 +266,14 @@ public class HFileInfo implements SortedMap { } /** - * Populate this instance with what we find on the passed in in stream. - * Can deserialize protobuf of old Writables format. + * Populate this instance with what we find on the passed in in stream. Can + * deserialize protobuf of old Writables format. * @see #write(DataOutputStream) */ void read(final DataInputStream in) throws IOException { // This code is tested over in TestHFileReaderV1 where we read an old hfile w/ this new code. int pblen = ProtobufUtil.lengthOfPBMagic(); - byte [] pbuf = new byte[pblen]; + byte[] pbuf = new byte[pblen]; if (in.markSupported()) { in.mark(pblen); } @@ -296,7 +291,7 @@ public class HFileInfo implements SortedMap { // We cannot use BufferedInputStream, it consumes more than we read from the underlying IS ByteArrayInputStream bais = new ByteArrayInputStream(pbuf); SequenceInputStream sis = new SequenceInputStream(bais, in); // Concatenate input streams - // TODO: Am I leaking anything here wrapping the passed in stream? We are not calling + // TODO: Am I leaking anything here wrapping the passed in stream? We are not calling // close on the wrapped streams but they should be let go after we leave this context? // I see that we keep a reference to the passed in inputstream but since we no longer // have a reference to this after we leave, we should be ok. @@ -306,10 +301,9 @@ public class HFileInfo implements SortedMap { } /** - * Now parse the old Writable format. It was a list of Map entries. Each map entry was a - * key and a value of a byte []. The old map format had a byte before each entry that held - * a code which was short for the key or value type. We know it was a byte [] so in below - * we just read and dump it. + * Now parse the old Writable format. It was a list of Map entries. Each map entry was a key and a + * value of a byte []. The old map format had a byte before each entry that held a code which was + * short for the key or value type. We know it was a byte [] so in below we just read and dump it. */ void parseWritable(final DataInputStream in) throws IOException { // First clear the map. @@ -319,11 +313,11 @@ public class HFileInfo implements SortedMap { int entries = in.readInt(); // Then read each key/value pair for (int i = 0; i < entries; i++) { - byte [] key = Bytes.readByteArray(in); + byte[] key = Bytes.readByteArray(in); // We used to read a byte that encoded the class type. // Read and ignore it because it is always byte [] in hfile in.readByte(); - byte [] value = Bytes.readByteArray(in); + byte[] value = Bytes.readByteArray(in); this.map.put(key, value); } } @@ -334,7 +328,7 @@ public class HFileInfo implements SortedMap { */ void parsePB(final HFileProtos.FileInfoProto fip) { this.map.clear(); - for (BytesBytesPair pair: fip.getMapEntryList()) { + for (BytesBytesPair pair : fip.getMapEntryList()) { this.map.put(pair.getFirst().toByteArray(), pair.getSecond().toByteArray()); } } @@ -342,8 +336,8 @@ public class HFileInfo implements SortedMap { public void initTrailerAndContext(ReaderContext context, Configuration conf) throws IOException { try { boolean isHBaseChecksum = context.getInputStreamWrapper().shouldUseHBaseChecksum(); - trailer = FixedFileTrailer.readFromStream(context.getInputStreamWrapper() - .getStream(isHBaseChecksum), context.getFileSize()); + trailer = FixedFileTrailer.readFromStream( + context.getInputStreamWrapper().getStream(isHBaseChecksum), context.getFileSize()); Path path = context.getFilePath(); checkFileVersion(path); this.hfileContext = createHFileContext(path, trailer, conf); @@ -351,8 +345,8 @@ public class HFileInfo implements SortedMap { } catch (Throwable t) { IOUtils.closeQuietly(context.getInputStreamWrapper(), e -> LOG.warn("failed to close input stream wrapper", e)); - throw new CorruptHFileException("Problem reading HFile Trailer from file " - + context.getFilePath(), t); + throw new CorruptHFileException( + "Problem reading HFile Trailer from file " + context.getFilePath(), t); } } @@ -365,13 +359,13 @@ public class HFileInfo implements SortedMap { HFileBlock.FSReader blockReader = reader.getUncachedBlockReader(); // Initialize an block iterator, and parse load-on-open blocks in the following. blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(), - context.getFileSize() - trailer.getTrailerSize()); + context.getFileSize() - trailer.getTrailerSize()); // Data index. We also read statistics about the block index written after // the root level. - this.dataIndexReader = - new HFileBlockIndex.CellBasedKeyBlockIndexReader(trailer.createComparator(), trailer.getNumDataIndexLevels()); - dataIndexReader - .readMultiLevelIndexRoot(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount()); + this.dataIndexReader = new HFileBlockIndex.CellBasedKeyBlockIndexReader( + trailer.createComparator(), trailer.getNumDataIndexLevels()); + dataIndexReader.readMultiLevelIndexRoot( + blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount()); reader.setDataBlockIndexReader(dataIndexReader); // Meta index. this.metaIndexReader = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); @@ -395,12 +389,10 @@ public class HFileInfo implements SortedMap { } } - private HFileContext createHFileContext(Path path, - FixedFileTrailer trailer, Configuration conf) throws IOException { - HFileContextBuilder builder = new HFileContextBuilder() - .withHBaseCheckSum(true) - .withHFileName(path.getName()) - .withCompression(trailer.getCompressionCodec()) + private HFileContext createHFileContext(Path path, FixedFileTrailer trailer, Configuration conf) + throws IOException { + HFileContextBuilder builder = new HFileContextBuilder().withHBaseCheckSum(true) + .withHFileName(path.getName()).withCompression(trailer.getCompressionCodec()) .withCellComparator(FixedFileTrailer.createComparator(trailer.getComparatorClassName())); // Check for any key material available byte[] keyBytes = trailer.getEncryptionKey(); @@ -410,8 +402,8 @@ public class HFileInfo implements SortedMap { // Use the algorithm the key wants Cipher cipher = Encryption.getCipher(conf, key.getAlgorithm()); if (cipher == null) { - throw new IOException("Cipher '" + key.getAlgorithm() + "' is not available" - + ", path=" + path); + throw new IOException( + "Cipher '" + key.getAlgorithm() + "' is not available" + ", path=" + path); } cryptoContext.setCipher(cipher); cryptoContext.setKey(key); @@ -422,11 +414,10 @@ public class HFileInfo implements SortedMap { } private void loadMetaInfo(HFileBlock.BlockIterator blockIter, HFileContext hfileContext) - throws IOException { + throws IOException { read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); byte[] creationTimeBytes = get(HFileInfo.CREATE_TIME_TS); - hfileContext.setFileCreateTime(creationTimeBytes == null ? - 0 : Bytes.toLong(creationTimeBytes)); + hfileContext.setFileCreateTime(creationTimeBytes == null ? 0 : Bytes.toLong(creationTimeBytes)); byte[] tmp = get(HFileInfo.MAX_TAGS_LEN); // max tag length is not present in the HFile means tags were not at all written to file. if (tmp != null) { @@ -442,9 +433,9 @@ public class HFileInfo implements SortedMap { } avgKeyLen = Bytes.toInt(get(HFileInfo.AVG_KEY_LEN)); avgValueLen = Bytes.toInt(get(HFileInfo.AVG_VALUE_LEN)); - byte [] keyValueFormatVersion = get(HFileWriterImpl.KEY_VALUE_VERSION); - includesMemstoreTS = keyValueFormatVersion != null && - Bytes.toInt(keyValueFormatVersion) == HFileWriterImpl.KEY_VALUE_VER_WITH_MEMSTORE; + byte[] keyValueFormatVersion = get(HFileWriterImpl.KEY_VALUE_VERSION); + includesMemstoreTS = keyValueFormatVersion != null + && Bytes.toInt(keyValueFormatVersion) == HFileWriterImpl.KEY_VALUE_VER_WITH_MEMSTORE; hfileContext.setIncludesMvcc(includesMemstoreTS); if (includesMemstoreTS) { decodeMemstoreTS = Bytes.toLong(get(HFileWriterImpl.MAX_MEMSTORE_TS_KEY)) > 0; @@ -465,9 +456,9 @@ public class HFileInfo implements SortedMap { return; } // We can read v3 or v2 versions of hfile. - throw new IllegalArgumentException("Invalid HFile version: major=" + - trailer.getMajorVersion() + ", minor=" + trailer.getMinorVersion() + ": expected at least " + - "major=2 and minor=" + MAX_MINOR_VERSION + ", path=" + path); + throw new IllegalArgumentException("Invalid HFile version: major=" + trailer.getMajorVersion() + + ", minor=" + trailer.getMinorVersion() + ": expected at least " + "major=2 and minor=" + + MAX_MINOR_VERSION + ", path=" + path); } public void close() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java index 98fe885de51..25627c34f51 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,8 +31,8 @@ import org.slf4j.LoggerFactory; public class HFilePreadReader extends HFileReaderImpl { private static final Logger LOG = LoggerFactory.getLogger(HFileReaderImpl.class); - public HFilePreadReader(ReaderContext context, HFileInfo fileInfo, - CacheConfig cacheConf, Configuration conf) throws IOException { + public HFilePreadReader(ReaderContext context, HFileInfo fileInfo, CacheConfig cacheConf, + Configuration conf) throws IOException { super(context, fileInfo, cacheConf, conf); // Prefetch file blocks upon open if requested if (cacheConf.shouldPrefetchOnOpen()) { @@ -74,8 +74,9 @@ public class HFilePreadReader extends HFileReaderImpl { LOG.trace("Prefetch " + getPathOffsetEndStr(path, offset, end), e); } } catch (NullPointerException e) { - LOG.warn("Stream moved/closed or prefetch cancelled?" + - getPathOffsetEndStr(path, offset, end), e); + LOG.warn( + "Stream moved/closed or prefetch cancelled?" + getPathOffsetEndStr(path, offset, end), + e); } catch (Exception e) { // Other exceptions are interesting LOG.warn("Prefetch " + getPathOffsetEndStr(path, offset, end), e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index 01e8863ada1..842ca8389c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -138,15 +137,14 @@ public class HFilePrettyPrinter extends Configured implements Tool { } private void init() { - options.addOption("v", "verbose", false, - "Verbose output; emits file and meta data delimiters"); + options.addOption("v", "verbose", false, "Verbose output; emits file and meta data delimiters"); options.addOption("p", "printkv", false, "Print key/value pairs"); options.addOption("e", "printkey", false, "Print keys"); options.addOption("m", "printmeta", false, "Print meta data of file"); options.addOption("b", "printblocks", false, "Print block index meta data"); options.addOption("h", "printblockheaders", false, "Print block headers for each block."); options.addOption("k", "checkrow", false, - "Enable row order check; looks for out-of-order keys"); + "Enable row order check; looks for out-of-order keys"); options.addOption("a", "checkfamily", false, "Enable family check"); options.addOption("w", "seekToRow", true, "Seek to this row and print all the kvs for this row only"); @@ -157,8 +155,8 @@ public class HFilePrettyPrinter extends Configured implements Tool { OptionGroup files = new OptionGroup(); files.addOption(new Option("f", "file", true, "File to scan. Pass full-path; e.g. hdfs://a:9000/hbase/hbase:meta/12/34")); - files.addOption(new Option("r", "region", true, - "Region to scan. Pass region name; e.g. 'hbase:meta,,1'")); + files.addOption( + new Option("r", "region", true, "Region to scan. Pass region name; e.g. 'hbase:meta,,1'")); options.addOptionGroup(files); } @@ -167,8 +165,7 @@ public class HFilePrettyPrinter extends Configured implements Tool { this.err = err; } - public boolean parseOptions(String args[]) throws ParseException, - IOException { + public boolean parseOptions(String args[]) throws ParseException, IOException { if (args.length == 0) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("hfile", options, true); @@ -211,24 +208,19 @@ public class HFilePrettyPrinter extends Configured implements Tool { Path tableDir = CommonFSUtils.getTableDir(rootDir, TableName.valueOf(hri[0])); String enc = HRegionInfo.encodeRegionName(rn); Path regionDir = new Path(tableDir, enc); - if (verbose) - out.println("region dir -> " + regionDir); - List regionFiles = HFile.getStoreFiles(FileSystem.get(getConf()), - regionDir); - if (verbose) - out.println("Number of region files found -> " - + regionFiles.size()); + if (verbose) out.println("region dir -> " + regionDir); + List regionFiles = HFile.getStoreFiles(FileSystem.get(getConf()), regionDir); + if (verbose) out.println("Number of region files found -> " + regionFiles.size()); if (verbose) { int i = 1; for (Path p : regionFiles) { - if (verbose) - out.println("Found file[" + i++ + "] -> " + p); + if (verbose) out.println("Found file[" + i++ + "] -> " + p); } } files.addAll(regionFiles); } - if(checkMobIntegrity) { + if (checkMobIntegrity) { if (verbose) { System.out.println("checkMobIntegrity is enabled"); } @@ -241,8 +233,8 @@ public class HFilePrettyPrinter extends Configured implements Tool { } /** - * Runs the command-line pretty-printer, and returns the desired command - * exit code (zero for success, non-zero for failure). + * Runs the command-line pretty-printer, and returns the desired command exit code (zero for + * success, non-zero for failure). */ @Override public int run(String[] args) { @@ -351,10 +343,8 @@ public class HFilePrettyPrinter extends Configured implements Tool { */ FSDataInputStreamWrapper fsdis = new FSDataInputStreamWrapper(fs, file); long fileSize = fs.getFileStatus(file).getLen(); - FixedFileTrailer trailer = - FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize); - long offset = trailer.getFirstDataBlockOffset(), - max = trailer.getLastDataBlockOffset(); + FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize); + long offset = trailer.getFirstDataBlockOffset(), max = trailer.getLastDataBlockOffset(); HFileBlock block; while (offset <= max) { block = reader.readBlock(offset, -1, /* cacheBlock */ false, /* pread */ false, @@ -373,8 +363,8 @@ public class HFilePrettyPrinter extends Configured implements Tool { return 0; } - private void scanKeysValues(Path file, KeyValueStatsCollector fileStats, - HFileScanner scanner, byte[] row) throws IOException { + private void scanKeysValues(Path file, KeyValueStatsCollector fileStats, HFileScanner scanner, + byte[] row) throws IOException { Cell pCell = null; FileSystem fs = FileSystem.get(getConf()); Set foundMobFiles = new LinkedHashSet<>(FOUND_MOB_FILES_CACHE_CAPACITY); @@ -397,9 +387,8 @@ public class HFilePrettyPrinter extends Configured implements Tool { if (printKey) { out.print("K: " + cell); if (printValue) { - out.print(" V: " - + Bytes.toStringBinary(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength())); + out.print(" V: " + Bytes.toStringBinary(cell.getValueArray(), cell.getValueOffset(), + cell.getValueLength())); int i = 0; List tags = PrivateCellUtil.getTags(cell); for (Tag tag : tags) { @@ -411,37 +400,35 @@ public class HFilePrettyPrinter extends Configured implements Tool { // check if rows are in order if (checkRow && pCell != null) { if (CellComparator.getInstance().compareRows(pCell, cell) > 0) { - err.println("WARNING, previous row is greater then" - + " current row\n\tfilename -> " + file + "\n\tprevious -> " - + CellUtil.getCellKeyAsString(pCell) + "\n\tcurrent -> " - + CellUtil.getCellKeyAsString(cell)); + err.println("WARNING, previous row is greater then" + " current row\n\tfilename -> " + + file + "\n\tprevious -> " + CellUtil.getCellKeyAsString(pCell) + "\n\tcurrent -> " + + CellUtil.getCellKeyAsString(cell)); } } // check if families are consistent if (checkFamily) { - String fam = Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), - cell.getFamilyLength()); + String fam = + Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); if (!file.toString().contains(fam)) { - err.println("WARNING, filename does not match kv family," - + "\n\tfilename -> " + file + "\n\tkeyvalue -> " - + CellUtil.getCellKeyAsString(cell)); + err.println("WARNING, filename does not match kv family," + "\n\tfilename -> " + file + + "\n\tkeyvalue -> " + CellUtil.getCellKeyAsString(cell)); } if (pCell != null && CellComparator.getInstance().compareFamilies(pCell, cell) != 0) { - err.println("WARNING, previous kv has different family" - + " compared to current key\n\tfilename -> " + file - + "\n\tprevious -> " + CellUtil.getCellKeyAsString(pCell) - + "\n\tcurrent -> " + CellUtil.getCellKeyAsString(cell)); + err.println( + "WARNING, previous kv has different family" + " compared to current key\n\tfilename -> " + + file + "\n\tprevious -> " + CellUtil.getCellKeyAsString(pCell) + "\n\tcurrent -> " + + CellUtil.getCellKeyAsString(cell)); } } // check if mob files are missing. if (checkMobIntegrity && MobUtils.isMobReferenceCell(cell)) { Tag tnTag = MobUtils.getTableNameTag(cell); if (tnTag == null) { - System.err.println("ERROR, wrong tag format in mob reference cell " - + CellUtil.getCellKeyAsString(cell)); + System.err.println( + "ERROR, wrong tag format in mob reference cell " + CellUtil.getCellKeyAsString(cell)); } else if (!MobUtils.hasValidMobRefCellValue(cell)) { - System.err.println("ERROR, wrong value format in mob reference cell " - + CellUtil.getCellKeyAsString(cell)); + System.err.println( + "ERROR, wrong value format in mob reference cell " + CellUtil.getCellKeyAsString(cell)); } else { TableName tn = TableName.valueOf(Tag.cloneValue(tnTag)); String mobFileName = MobUtils.getMobFileName(cell); @@ -515,42 +502,44 @@ public class HFilePrettyPrinter extends Configured implements Tool { } /** - * Format a string of the form "k1=v1, k2=v2, ..." into separate lines - * with a four-space indentation. + * Format a string of the form "k1=v1, k2=v2, ..." into separate lines with a four-space + * indentation. */ private static String asSeparateLines(String keyValueStr) { - return keyValueStr.replaceAll(", ([a-zA-Z]+=)", - ",\n" + FOUR_SPACES + "$1"); + return keyValueStr.replaceAll(", ([a-zA-Z]+=)", ",\n" + FOUR_SPACES + "$1"); } - private void printMeta(HFile.Reader reader, Map fileInfo) - throws IOException { - out.println("Block index size as per heapsize: " - + reader.indexSize()); + private void printMeta(HFile.Reader reader, Map fileInfo) throws IOException { + out.println("Block index size as per heapsize: " + reader.indexSize()); out.println(asSeparateLines(reader.toString())); - out.println("Trailer:\n " - + asSeparateLines(reader.getTrailer().toString())); + out.println("Trailer:\n " + asSeparateLines(reader.getTrailer().toString())); out.println("Fileinfo:"); for (Map.Entry e : fileInfo.entrySet()) { out.print(FOUR_SPACES + Bytes.toString(e.getKey()) + " = "); - if (Bytes.equals(e.getKey(), HStoreFile.MAX_SEQ_ID_KEY) + if ( + Bytes.equals(e.getKey(), HStoreFile.MAX_SEQ_ID_KEY) || Bytes.equals(e.getKey(), HStoreFile.DELETE_FAMILY_COUNT) || Bytes.equals(e.getKey(), HStoreFile.EARLIEST_PUT_TS) || Bytes.equals(e.getKey(), HFileWriterImpl.MAX_MEMSTORE_TS_KEY) || Bytes.equals(e.getKey(), HFileInfo.CREATE_TIME_TS) - || Bytes.equals(e.getKey(), HStoreFile.BULKLOAD_TIME_KEY)) { + || Bytes.equals(e.getKey(), HStoreFile.BULKLOAD_TIME_KEY) + ) { out.println(Bytes.toLong(e.getValue())); } else if (Bytes.equals(e.getKey(), HStoreFile.TIMERANGE_KEY)) { TimeRangeTracker timeRangeTracker = TimeRangeTracker.parseFrom(e.getValue()); out.println(timeRangeTracker.getMin() + "...." + timeRangeTracker.getMax()); - } else if (Bytes.equals(e.getKey(), HFileInfo.AVG_KEY_LEN) + } else if ( + Bytes.equals(e.getKey(), HFileInfo.AVG_KEY_LEN) || Bytes.equals(e.getKey(), HFileInfo.AVG_VALUE_LEN) || Bytes.equals(e.getKey(), HFileWriterImpl.KEY_VALUE_VERSION) - || Bytes.equals(e.getKey(), HFileInfo.MAX_TAGS_LEN)) { + || Bytes.equals(e.getKey(), HFileInfo.MAX_TAGS_LEN) + ) { out.println(Bytes.toInt(e.getValue())); - } else if (Bytes.equals(e.getKey(), HStoreFile.MAJOR_COMPACTION_KEY) + } else if ( + Bytes.equals(e.getKey(), HStoreFile.MAJOR_COMPACTION_KEY) || Bytes.equals(e.getKey(), HFileInfo.TAGS_COMPRESSED) - || Bytes.equals(e.getKey(), HStoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY)) { + || Bytes.equals(e.getKey(), HStoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY) + ) { out.println(Bytes.toBoolean(e.getValue())); } else if (Bytes.equals(e.getKey(), HFileInfo.LASTKEY)) { out.println(new KeyValue.KeyOnlyKeyValue(e.getValue()).toString()); @@ -562,19 +551,18 @@ public class HFilePrettyPrinter extends Configured implements Tool { try { out.println("Mid-key: " + reader.midKey().map(CellUtil::getCellKeyAsString)); } catch (Exception e) { - out.println ("Unable to retrieve the midkey"); + out.println("Unable to retrieve the midkey"); } // Printing general bloom information DataInput bloomMeta = reader.getGeneralBloomFilterMetadata(); BloomFilter bloomFilter = null; - if (bloomMeta != null) - bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); + if (bloomMeta != null) bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); out.println("Bloom filter:"); if (bloomFilter != null) { - out.println(FOUR_SPACES + bloomFilter.toString().replaceAll( - BloomFilterUtil.STATS_RECORD_SEP, "\n" + FOUR_SPACES)); + out.println(FOUR_SPACES + + bloomFilter.toString().replaceAll(BloomFilterUtil.STATS_RECORD_SEP, "\n" + FOUR_SPACES)); } else { out.println(FOUR_SPACES + "Not present"); } @@ -582,14 +570,12 @@ public class HFilePrettyPrinter extends Configured implements Tool { // Printing delete bloom information bloomMeta = reader.getDeleteBloomFilterMetadata(); bloomFilter = null; - if (bloomMeta != null) - bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); + if (bloomMeta != null) bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); out.println("Delete Family Bloom filter:"); if (bloomFilter != null) { out.println(FOUR_SPACES - + bloomFilter.toString().replaceAll(BloomFilterUtil.STATS_RECORD_SEP, - "\n" + FOUR_SPACES)); + + bloomFilter.toString().replaceAll(BloomFilterUtil.STATS_RECORD_SEP, "\n" + FOUR_SPACES)); } else { out.println(FOUR_SPACES + "Not present"); } @@ -598,15 +584,15 @@ public class HFilePrettyPrinter extends Configured implements Tool { private static class KeyValueStatsCollector { private final MetricRegistry metricsRegistry = new MetricRegistry(); private final ByteArrayOutputStream metricsOutput = new ByteArrayOutputStream(); - private final SimpleReporter simpleReporter = SimpleReporter.forRegistry(metricsRegistry). - outputTo(new PrintStream(metricsOutput)).filter(MetricFilter.ALL).build(); + private final SimpleReporter simpleReporter = SimpleReporter.forRegistry(metricsRegistry) + .outputTo(new PrintStream(metricsOutput)).filter(MetricFilter.ALL).build(); Histogram keyLen = metricsRegistry.histogram(name(HFilePrettyPrinter.class, "Key length")); Histogram valLen = metricsRegistry.histogram(name(HFilePrettyPrinter.class, "Val length")); - Histogram rowSizeBytes = metricsRegistry.histogram( - name(HFilePrettyPrinter.class, "Row size (bytes)")); - Histogram rowSizeCols = metricsRegistry.histogram( - name(HFilePrettyPrinter.class, "Row size (columns)")); + Histogram rowSizeBytes = + metricsRegistry.histogram(name(HFilePrettyPrinter.class, "Row size (bytes)")); + Histogram rowSizeCols = + metricsRegistry.histogram(name(HFilePrettyPrinter.class, "Row size (columns)")); long curRowBytes = 0; long curRowCols = 0; @@ -619,8 +605,7 @@ public class HFilePrettyPrinter extends Configured implements Tool { public void collect(Cell cell) { valLen.update(cell.getValueLength()); - if (prevCell != null && - CellComparator.getInstance().compareRows(prevCell, cell) != 0) { + if (prevCell != null && CellComparator.getInstance().compareRows(prevCell, cell) != 0) { // new row collectRow(); } @@ -652,27 +637,23 @@ public class HFilePrettyPrinter extends Configured implements Tool { @Override public String toString() { - if (prevCell == null) - return "no data available for statistics"; + if (prevCell == null) return "no data available for statistics"; // Dump the metrics to the output stream simpleReporter.stop(); simpleReporter.report(); - return - metricsOutput.toString() + - "Key of biggest row: " + Bytes.toStringBinary(biggestRow); + return metricsOutput.toString() + "Key of biggest row: " + Bytes.toStringBinary(biggestRow); } } /** - * Almost identical to ConsoleReporter, but extending ScheduledReporter, - * as extending ConsoleReporter in this version of dropwizard is now too much trouble. + * Almost identical to ConsoleReporter, but extending ScheduledReporter, as extending + * ConsoleReporter in this version of dropwizard is now too much trouble. */ private static class SimpleReporter extends ScheduledReporter { /** * Returns a new {@link Builder} for {@link ConsoleReporter}. - * * @param registry the registry to report * @return a {@link Builder} instance for a {@link ConsoleReporter} */ @@ -681,9 +662,9 @@ public class HFilePrettyPrinter extends Configured implements Tool { } /** - * A builder for {@link SimpleReporter} instances. Defaults to using the default locale and - * time zone, writing to {@code System.out}, converting rates to events/second, converting - * durations to milliseconds, and not filtering metrics. + * A builder for {@link SimpleReporter} instances. Defaults to using the default locale and time + * zone, writing to {@code System.out}, converting rates to events/second, converting durations + * to milliseconds, and not filtering metrics. */ public static class Builder { private final MetricRegistry registry; @@ -706,7 +687,6 @@ public class HFilePrettyPrinter extends Configured implements Tool { /** * Write to the given {@link PrintStream}. - * * @param output a {@link PrintStream} instance. * @return {@code this} */ @@ -717,7 +697,6 @@ public class HFilePrettyPrinter extends Configured implements Tool { /** * Only report metrics which match the given filter. - * * @param filter a {@link MetricFilter} * @return {@code this} */ @@ -728,17 +707,11 @@ public class HFilePrettyPrinter extends Configured implements Tool { /** * Builds a {@link ConsoleReporter} with the given properties. - * * @return a {@link ConsoleReporter} */ public SimpleReporter build() { - return new SimpleReporter(registry, - output, - locale, - timeZone, - rateUnit, - durationUnit, - filter); + return new SimpleReporter(registry, output, locale, timeZone, rateUnit, durationUnit, + filter); } } @@ -746,29 +719,20 @@ public class HFilePrettyPrinter extends Configured implements Tool { private final Locale locale; private final DateFormat dateFormat; - private SimpleReporter(MetricRegistry registry, - PrintStream output, - Locale locale, - TimeZone timeZone, - TimeUnit rateUnit, - TimeUnit durationUnit, - MetricFilter filter) { + private SimpleReporter(MetricRegistry registry, PrintStream output, Locale locale, + TimeZone timeZone, TimeUnit rateUnit, TimeUnit durationUnit, MetricFilter filter) { super(registry, "simple-reporter", filter, rateUnit, durationUnit); this.output = output; this.locale = locale; - this.dateFormat = DateFormat.getDateTimeInstance(DateFormat.SHORT, - DateFormat.MEDIUM, - locale); + this.dateFormat = DateFormat.getDateTimeInstance(DateFormat.SHORT, DateFormat.MEDIUM, locale); dateFormat.setTimeZone(timeZone); } @Override - public void report(SortedMap gauges, - SortedMap counters, - SortedMap histograms, - SortedMap meters, - SortedMap timers) { + public void report(SortedMap gauges, SortedMap counters, + SortedMap histograms, SortedMap meters, + SortedMap timers) { // we know we only have histograms if (!histograms.isEmpty()) { for (Map.Entry entry : histograms.entrySet()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index 80049c999c4..c74bbb1bdb7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -58,10 +58,10 @@ import org.slf4j.LoggerFactory; * Implementation that can handle all hfile versions of {@link HFile.Reader}. */ @InterfaceAudience.Private -@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") public abstract class HFileReaderImpl implements HFile.Reader, Configurable { // This class is HFileReaderV3 + HFileReaderV2 + AbstractHFileReader all squashed together into - // one file. Ditto for all the HFileReader.ScannerV? implementations. I was running up against + // one file. Ditto for all the HFileReader.ScannerV? implementations. I was running up against // the MaxInlineLevel limit because too many tiers involved reading from an hfile. Was also hard // to navigate the source code when so many classes participating in read. private static final Logger LOG = LoggerFactory.getLogger(HFileReaderImpl.class); @@ -77,8 +77,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { private final boolean primaryReplicaReader; /** - * What kind of data block encoding should be used while reading, writing, - * and handling cache. + * What kind of data block encoding should be used while reading, writing, and handling cache. */ protected HFileDataBlockEncoder dataBlockEncoder = NoOpDataBlockEncoder.INSTANCE; @@ -103,10 +102,9 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { protected HFileBlock.FSReader fsBlockReader; /** - * A "sparse lock" implementation allowing to lock on a particular block - * identified by offset. The purpose of this is to avoid two clients loading - * the same block, and have all but one client wait to get the block from the - * cache. + * A "sparse lock" implementation allowing to lock on a particular block identified by offset. The + * purpose of this is to avoid two clients loading the same block, and have all but one client + * wait to get the block from the cache. */ private IdLock offsetLock = new IdLock(); @@ -123,14 +121,14 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { /** * Opens a HFile. - * @param context Reader context info - * @param fileInfo HFile info + * @param context Reader context info + * @param fileInfo HFile info * @param cacheConf Cache configuration. - * @param conf Configuration + * @param conf Configuration */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") public HFileReaderImpl(ReaderContext context, HFileInfo fileInfo, CacheConfig cacheConf, - Configuration conf) throws IOException { + Configuration conf) throws IOException { this.cacheConf = cacheConf; this.context = context; this.path = context.getFilePath(); @@ -140,8 +138,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { this.fileInfo = fileInfo; this.trailer = fileInfo.getTrailer(); this.hfileContext = fileInfo.getHFileContext(); - this.fsBlockReader = new HFileBlock.FSReaderImpl(context, hfileContext, - cacheConf.getByteBuffAllocator(), conf); + this.fsBlockReader = + new HFileBlock.FSReaderImpl(context, hfileContext, cacheConf.getByteBuffAllocator(), conf); this.dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo); fsBlockReader.setDataBlockEncoder(dataBlockEncoder, conf); dataBlockIndexReader = fileInfo.getDataBlockIndexReader(); @@ -166,16 +164,13 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { @Override public String toString() { - return "reader=" + path.toString() + - (!isFileInfoLoaded()? "": - ", compression=" + trailer.getCompressionCodec().getName() + - ", cacheConf=" + cacheConf + - ", firstKey=" + toStringFirstKey() + - ", lastKey=" + toStringLastKey()) + - ", avgKeyLen=" + fileInfo.getAvgKeyLen() + - ", avgValueLen=" + fileInfo.getAvgValueLen() + - ", entries=" + trailer.getEntryCount() + - ", length=" + context.getFileSize(); + return "reader=" + path.toString() + + (!isFileInfoLoaded() + ? "" + : ", compression=" + trailer.getCompressionCodec().getName() + ", cacheConf=" + cacheConf + + ", firstKey=" + toStringFirstKey() + ", lastKey=" + toStringLastKey()) + + ", avgKeyLen=" + fileInfo.getAvgKeyLen() + ", avgValueLen=" + fileInfo.getAvgValueLen() + + ", entries=" + trailer.getEntryCount() + ", length=" + context.getFileSize(); } @Override @@ -184,23 +179,22 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { } /** - * @return the first key in the file. May be null if file has no entries. Note - * that this is not the first row key, but rather the byte form of the - * first KeyValue. + * @return the first key in the file. May be null if file has no entries. Note that this is not + * the first row key, but rather the byte form of the first KeyValue. */ @Override public Optional getFirstKey() { if (dataBlockIndexReader == null) { throw new BlockIndexNotLoadedException(path); } - return dataBlockIndexReader.isEmpty() ? Optional.empty() - : Optional.of(dataBlockIndexReader.getRootBlockKey(0)); + return dataBlockIndexReader.isEmpty() + ? Optional.empty() + : Optional.of(dataBlockIndexReader.getRootBlockKey(0)); } /** - * TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's - * patch goes in to eliminate {@link KeyValue} here. - * + * TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's patch goes in to + * eliminate {@link KeyValue} here. * @return the first row key, or null if the file is empty. */ @Override @@ -210,9 +204,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { } /** - * TODO left from {@link HFile} version 1: move this to StoreFile after - * Ryan's patch goes in to eliminate {@link KeyValue} here. - * + * TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's patch goes in to + * eliminate {@link KeyValue} here. * @return the last row key, or null if the file is empty. */ @Override @@ -238,14 +231,13 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { } /** - * @return the total heap size of data and meta block indexes in bytes. Does - * not take into account non-root blocks of a multilevel data index. + * @return the total heap size of data and meta block indexes in bytes. Does not take into account + * non-root blocks of a multilevel data index. */ @Override public long indexSize() { return (dataBlockIndexReader != null ? dataBlockIndexReader.heapSize() : 0) - + ((metaBlockIndexReader != null) ? metaBlockIndexReader.heapSize() - : 0); + + ((metaBlockIndexReader != null) ? metaBlockIndexReader.heapSize() : 0); } @Override @@ -300,8 +292,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { } /** - * An exception thrown when an operation requiring a scanner to be seeked - * is invoked on a scanner that is not seeked. + * An exception thrown when an operation requiring a scanner to be seeked is invoked on a scanner + * that is not seeked. */ @SuppressWarnings("serial") public static class NotSeekedException extends IllegalStateException { @@ -328,11 +320,10 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { final ObjectIntPair pair = new ObjectIntPair<>(); /** - * The next indexed key is to keep track of the indexed key of the next data block. - * If the nextIndexedKey is HConstants.NO_NEXT_INDEXED_KEY, it means that the - * current data block is the last data block. - * - * If the nextIndexedKey is null, it means the nextIndexedKey has not been loaded yet. + * The next indexed key is to keep track of the indexed key of the next data block. If the + * nextIndexedKey is HConstants.NO_NEXT_INDEXED_KEY, it means that the current data block is the + * last data block. If the nextIndexedKey is null, it means the nextIndexedKey has not been + * loaded yet. */ protected Cell nextIndexedKey; // Current block being used. NOTICE: DON't release curBlock separately except in shipped() or @@ -345,7 +336,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { protected final ArrayList prevBlocks = new ArrayList<>(); public HFileScannerImpl(final HFile.Reader reader, final boolean cacheBlocks, - final boolean pread, final boolean isCompaction) { + final boolean pread, final boolean isCompaction) { this.reader = reader; this.cacheBlocks = cacheBlocks; this.pread = pread; @@ -380,7 +371,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { } @Override - public boolean isSeeked(){ + public boolean isSeeked() { return blockBuffer != null; } @@ -423,8 +414,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { // Returns the #bytes in HFile for the current cell. Used to skip these many bytes in current // HFile block's buffer so as to position to the next cell. private int getCurCellSerializedSize() { - int curCellSize = KEY_VALUE_LEN_SIZE + currKeyLen + currValueLen - + currMemstoreTSLen; + int curCellSize = KEY_VALUE_LEN_SIZE + currKeyLen + currValueLen + currMemstoreTSLen; if (this.reader.getFileContext().isIncludesTags()) { curCellSize += Bytes.SIZEOF_SHORT + currTagsLen; } @@ -443,8 +433,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { // But ensure that you read long instead of two ints long ll = blockBuffer.getLongAfterPosition(0); // Read top half as an int of key length and bottom int as value length - this.currKeyLen = (int)(ll >> Integer.SIZE); - this.currValueLen = (int)(Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll); + this.currKeyLen = (int) (ll >> Integer.SIZE); + this.currValueLen = (int) (Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll); checkKeyValueLen(); this.rowLen = blockBuffer.getShortAfterPosition(Bytes.SIZEOF_LONG); // Move position past the key and value lengths and then beyond the key and value @@ -460,11 +450,10 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { private final void checkTagsLen() { if (checkLen(this.currTagsLen)) { - throw new IllegalStateException("Invalid currTagsLen " + this.currTagsLen + - ". Block offset: " + curBlock.getOffset() + ", block length: " + - this.blockBuffer.limit() + - ", position: " + this.blockBuffer.position() + " (without header)." + - " path=" + reader.getPath()); + throw new IllegalStateException( + "Invalid currTagsLen " + this.currTagsLen + ". Block offset: " + curBlock.getOffset() + + ", block length: " + this.blockBuffer.limit() + ", position: " + + this.blockBuffer.position() + " (without header)." + " path=" + reader.getPath()); } } @@ -496,7 +485,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { if (len == 1) { this.currMemstoreTS = firstByte; } else { - int remaining = len -1; + int remaining = len - 1; long i = 0; offsetFromPos++; if (remaining >= Bytes.SIZEOF_INT) { @@ -523,19 +512,14 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { } /** - * Within a loaded block, seek looking for the last key that is smaller than - * (or equal to?) the key we are interested in. - * A note on the seekBefore: if you have seekBefore = true, AND the first - * key in the block = key, then you'll get thrown exceptions. The caller has - * to check for that case and load the previous block as appropriate. - * @param key - * the key to find - * @param seekBefore - * find the key before the given key in case of exact match. - * @return 0 in case of an exact key match, 1 in case of an inexact match, - * -2 in case of an inexact match and furthermore, the input key - * less than the first key of current block(e.g. using a faked index - * key) + * Within a loaded block, seek looking for the last key that is smaller than (or equal to?) the + * key we are interested in. A note on the seekBefore: if you have seekBefore = true, AND the + * first key in the block = key, then you'll get thrown exceptions. The caller has to check for + * that case and load the previous block as appropriate. n * the key to find n * find the key + * before the given key in case of exact match. + * @return 0 in case of an exact key match, 1 in case of an inexact match, -2 in case of an + * inexact match and furthermore, the input key less than the first key of current + * block(e.g. using a faked index key) */ protected int blockSeek(Cell key, boolean seekBefore) { int klen, vlen, tlen = 0; @@ -545,31 +529,29 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { offsetFromPos = 0; // Better to ensure that we use the BB Utils here long ll = blockBuffer.getLongAfterPosition(offsetFromPos); - klen = (int)(ll >> Integer.SIZE); - vlen = (int)(Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll); + klen = (int) (ll >> Integer.SIZE); + vlen = (int) (Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll); if (checkKeyLen(klen) || checkLen(vlen)) { - throw new IllegalStateException("Invalid klen " + klen + " or vlen " - + vlen + ". Block offset: " - + curBlock.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " - + blockBuffer.position() + " (without header)." - + " path=" + reader.getPath()); + throw new IllegalStateException( + "Invalid klen " + klen + " or vlen " + vlen + ". Block offset: " + curBlock.getOffset() + + ", block length: " + blockBuffer.limit() + ", position: " + blockBuffer.position() + + " (without header)." + " path=" + reader.getPath()); } offsetFromPos += Bytes.SIZEOF_LONG; this.rowLen = blockBuffer.getShortAfterPosition(offsetFromPos); blockBuffer.asSubByteBuffer(blockBuffer.position() + offsetFromPos, klen, pair); bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), klen, rowLen); int comp = - PrivateCellUtil.compareKeyIgnoresMvcc(reader.getComparator(), key, bufBackedKeyOnlyKv); + PrivateCellUtil.compareKeyIgnoresMvcc(reader.getComparator(), key, bufBackedKeyOnlyKv); offsetFromPos += klen + vlen; if (this.reader.getFileContext().isIncludesTags()) { // Read short as unsigned, high byte first tlen = ((blockBuffer.getByteAfterPosition(offsetFromPos) & 0xff) << 8) - ^ (blockBuffer.getByteAfterPosition(offsetFromPos + 1) & 0xff); + ^ (blockBuffer.getByteAfterPosition(offsetFromPos + 1) & 0xff); if (checkLen(tlen)) { throw new IllegalStateException("Invalid tlen " + tlen + ". Block offset: " - + curBlock.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " - + blockBuffer.position() + " (without header)." - + " path=" + reader.getPath()); + + curBlock.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " + + blockBuffer.position() + " (without header)." + " path=" + reader.getPath()); } // add the two bytes read for the tags. offsetFromPos += tlen + (Bytes.SIZEOF_SHORT); @@ -582,10 +564,9 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { if (seekBefore) { if (lastKeyValueSize < 0) { throw new IllegalStateException("blockSeek with seekBefore " - + "at the first key of the block: key=" + CellUtil.getCellKeyAsString(key) - + ", blockOffset=" + curBlock.getOffset() + ", onDiskSize=" - + curBlock.getOnDiskSizeWithHeader() - + ", path=" + reader.getPath()); + + "at the first key of the block: key=" + CellUtil.getCellKeyAsString(key) + + ", blockOffset=" + curBlock.getOffset() + ", onDiskSize=" + + curBlock.getOnDiskSizeWithHeader() + ", path=" + reader.getPath()); } blockBuffer.moveBack(lastKeyValueSize); readKeyValueLen(); @@ -643,17 +624,19 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { return compared; } else { // The comparison with no_next_index_key has to be checked - if (this.nextIndexedKey != null && - (this.nextIndexedKey == KeyValueScanner.NO_NEXT_INDEXED_KEY || PrivateCellUtil - .compareKeyIgnoresMvcc(reader.getComparator(), key, nextIndexedKey) < 0)) { + if ( + this.nextIndexedKey != null && (this.nextIndexedKey + == KeyValueScanner.NO_NEXT_INDEXED_KEY + || PrivateCellUtil.compareKeyIgnoresMvcc(reader.getComparator(), key, nextIndexedKey) + < 0) + ) { // The reader shall continue to scan the current data block instead // of querying the // block index as long as it knows the target key is strictly // smaller than // the next indexed key or the current data block is the last data // block. - return loadBlockAndSeekToKey(this.curBlock, nextIndexedKey, false, key, - false); + return loadBlockAndSeekToKey(this.curBlock, nextIndexedKey, false, key, false); } } } @@ -663,22 +646,19 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { } /** - * An internal API function. Seek to the given key, optionally rewinding to - * the first key of the block before doing the seek. - * - * @param key - a cell representing the key that we need to fetch - * @param rewind whether to rewind to the first key of the block before - * doing the seek. If this is false, we are assuming we never go - * back, otherwise the result is undefined. - * @return -1 if the key is earlier than the first key of the file, - * 0 if we are at the given key, 1 if we are past the given key - * -2 if the key is earlier than the first key of the file while - * using a faked index key + * An internal API function. Seek to the given key, optionally rewinding to the first key of the + * block before doing the seek. + * @param key - a cell representing the key that we need to fetch + * @param rewind whether to rewind to the first key of the block before doing the seek. If this + * is false, we are assuming we never go back, otherwise the result is undefined. + * @return -1 if the key is earlier than the first key of the file, 0 if we are at the given + * key, 1 if we are past the given key -2 if the key is earlier than the first key of + * the file while using a faked index key */ public int seekTo(Cell key, boolean rewind) throws IOException { HFileBlockIndex.BlockIndexReader indexReader = reader.getDataBlockIndexReader(); BlockWithScanInfo blockWithScanInfo = indexReader.loadDataBlockWithScanInfo(key, curBlock, - cacheBlocks, pread, isCompaction, getEffectiveDataBlockEncoding(), reader); + cacheBlocks, pread, isCompaction, getEffectiveDataBlockEncoding(), reader); if (blockWithScanInfo == null || blockWithScanInfo.getHFileBlock() == null) { // This happens if the key e.g. falls before the beginning of the file. return -1; @@ -690,8 +670,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { @Override public boolean seekBefore(Cell key) throws IOException { HFileBlock seekToBlock = reader.getDataBlockIndexReader().seekToDataBlock(key, curBlock, - cacheBlocks, pread, isCompaction, reader.getEffectiveEncodingInCache(isCompaction), - reader); + cacheBlocks, pread, isCompaction, reader.getEffectiveEncodingInCache(isCompaction), reader); if (seekToBlock == null) { return false; } @@ -735,13 +714,12 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { } /** - * Scans blocks in the "scanned" section of the {@link HFile} until the next - * data block is found. - * + * Scans blocks in the "scanned" section of the {@link HFile} until the next data block is + * found. * @return the next block, or null if there are no more data blocks */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", - justification="Yeah, unnecessary null check; could do w/ clean up") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", + justification = "Yeah, unnecessary null check; could do w/ clean up") protected HFileBlock readNextDataBlock() throws IOException { long lastDataBlockOffset = reader.getTrailer().getLastDataBlockOffset(); if (curBlock == null) { @@ -792,28 +770,28 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { // we can handle the 'no tags' case. if (currTagsLen > 0) { ret = new SizeCachedKeyValue(blockBuffer.array(), - blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId, currKeyLen, - rowLen); + blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId, currKeyLen, + rowLen); } else { ret = new SizeCachedNoTagsKeyValue(blockBuffer.array(), - blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId, currKeyLen, - rowLen); + blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId, currKeyLen, + rowLen); } } else { ByteBuffer buf = blockBuffer.asSubByteBuffer(cellBufSize); if (buf.isDirect()) { ret = currTagsLen > 0 - ? new SizeCachedByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId, - currKeyLen, rowLen) - : new SizeCachedNoTagsByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId, - currKeyLen, rowLen); + ? new SizeCachedByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId, currKeyLen, + rowLen) + : new SizeCachedNoTagsByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId, + currKeyLen, rowLen); } else { if (currTagsLen > 0) { ret = new SizeCachedKeyValue(buf.array(), buf.arrayOffset() + buf.position(), - cellBufSize, seqId, currKeyLen, rowLen); + cellBufSize, seqId, currKeyLen, rowLen); } else { ret = new SizeCachedNoTagsKeyValue(buf.array(), buf.arrayOffset() + buf.position(), - cellBufSize, seqId, currKeyLen, rowLen); + cellBufSize, seqId, currKeyLen, rowLen); } } } @@ -828,8 +806,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { blockBuffer.asSubByteBuffer(blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen, keyPair); ByteBuffer keyBuf = keyPair.getFirst(); if (keyBuf.hasArray()) { - return new KeyValue.KeyOnlyKeyValue(keyBuf.array(), keyBuf.arrayOffset() - + keyPair.getSecond(), currKeyLen); + return new KeyValue.KeyOnlyKeyValue(keyBuf.array(), + keyBuf.arrayOffset() + keyPair.getSecond(), currKeyLen); } else { // Better to do a copy here instead of holding on to this BB so that // we could release the blocks referring to this key. This key is specifically used @@ -871,11 +849,10 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { try { blockBuffer.skip(getCurCellSerializedSize()); } catch (IllegalArgumentException e) { - LOG.error("Current pos = " + blockBuffer.position() - + "; currKeyLen = " + currKeyLen + "; currValLen = " - + currValueLen + "; block limit = " + blockBuffer.limit() - + "; currBlock currBlockOffset = " + this.curBlock.getOffset() - + "; path=" + reader.getPath()); + LOG.error("Current pos = " + blockBuffer.position() + "; currKeyLen = " + currKeyLen + + "; currValLen = " + currValueLen + "; block limit = " + blockBuffer.limit() + + "; currBlock currBlockOffset = " + this.curBlock.getOffset() + "; path=" + + reader.getPath()); throw e; } } @@ -894,7 +871,6 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { return isNextBlock(); } - private boolean isNextBlock() throws IOException { // Methods are small so they get inlined because they are 'hot'. HFileBlock nextBlock = readNextDataBlock(); @@ -918,10 +894,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { } /** - * Go to the next key/value in the block section. Loads the next block if - * necessary. If successful, {@link #getKey()} and {@link #getValue()} can - * be called. - * + * Go to the next key/value in the block section. Loads the next block if necessary. If + * successful, {@link #getKey()} and {@link #getValue()} can be called. * @return true if successfully navigated to the next key/value */ @Override @@ -935,9 +909,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { /** * Positions this scanner at the start of the file. - * - * @return false if empty file; i.e. a call to next would return false and - * the current key and value are undefined. + * @return false if empty file; i.e. a call to next would return false and the current key and + * value are undefined. */ @Override public boolean seekTo() throws IOException { @@ -959,7 +932,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { return true; } - protected boolean processFirstDataBlock() throws IOException{ + protected boolean processFirstDataBlock() throws IOException { blockBuffer.rewind(); readKeyValueLen(); return true; @@ -970,14 +943,14 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { isCompaction, true, BlockType.DATA, getEffectiveDataBlockEncoding()); if (newBlock.getOffset() < 0) { releaseIfNotCurBlock(newBlock); - throw new IOException("Invalid offset=" + newBlock.getOffset() + - ", path=" + reader.getPath()); + throw new IOException( + "Invalid offset=" + newBlock.getOffset() + ", path=" + reader.getPath()); } updateCurrentBlock(newBlock); } protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey, boolean rewind, - Cell key, boolean seekBefore) throws IOException { + Cell key, boolean seekBefore) throws IOException { if (this.curBlock == null || this.curBlock.getOffset() != seekToBlock.getOffset()) { updateCurrentBlock(seekToBlock); } else if (rewind) { @@ -1008,10 +981,9 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { protected final void checkKeyValueLen() { if (checkKeyLen(this.currKeyLen) || checkLen(this.currValueLen)) { throw new IllegalStateException("Invalid currKeyLen " + this.currKeyLen - + " or currValueLen " + this.currValueLen + ". Block offset: " - + this.curBlock.getOffset() + ", block length: " - + this.blockBuffer.limit() + ", position: " + this.blockBuffer.position() - + " (without header)." + ", path=" + reader.getPath()); + + " or currValueLen " + this.currValueLen + ". Block offset: " + this.curBlock.getOffset() + + ", block length: " + this.blockBuffer.limit() + ", position: " + + this.blockBuffer.position() + " (without header)." + ", path=" + reader.getPath()); } } @@ -1019,16 +991,16 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { * Updates the current block to be the given {@link HFileBlock}. Seeks to the the first * key/value pair. * @param newBlock the block read by {@link HFileReaderImpl#readBlock}, it's a totally new block - * with new allocated {@link ByteBuff}, so if no further reference to this block, we - * should release it carefully. + * with new allocated {@link ByteBuff}, so if no further reference to this + * block, we should release it carefully. */ protected void updateCurrentBlock(HFileBlock newBlock) throws IOException { try { if (newBlock.getBlockType() != BlockType.DATA) { throw new IllegalStateException( - "ScannerV2 works only on data blocks, got " + newBlock.getBlockType() + "; " - + "HFileName=" + reader.getPath() + ", " + "dataBlockEncoder=" - + reader.getDataBlockEncoding() + ", " + "isCompaction=" + isCompaction); + "ScannerV2 works only on data blocks, got " + newBlock.getBlockType() + "; " + + "HFileName=" + reader.getPath() + ", " + "dataBlockEncoder=" + + reader.getDataBlockEncoding() + ", " + "isCompaction=" + isCompaction); } updateCurrBlockRef(newBlock); blockBuffer = newBlock.getBufferWithoutHeader(); @@ -1048,8 +1020,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { buffer.skip(Bytes.SIZEOF_INT);// Skip value len part ByteBuffer keyBuff = buffer.asSubByteBuffer(klen); if (keyBuff.hasArray()) { - return new KeyValue.KeyOnlyKeyValue(keyBuff.array(), keyBuff.arrayOffset() - + keyBuff.position(), klen); + return new KeyValue.KeyOnlyKeyValue(keyBuff.array(), + keyBuff.arrayOffset() + keyBuff.position(), klen); } else { return new ByteBufferKeyOnlyKeyValue(keyBuff, keyBuff.position(), klen); } @@ -1106,8 +1078,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { public static final int PBUF_TRAILER_MINOR_VERSION = 2; /** - * The size of a (key length, value length) tuple that prefixes each entry in - * a data block. + * The size of a (key length, value length) tuple that prefixes each entry in a data block. */ public final static int KEY_VALUE_LEN_SIZE = 2 * Bytes.SIZEOF_INT; @@ -1116,13 +1087,13 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { * and its encoding vs. {@code expectedDataBlockEncoding}. Unpacks the block as necessary. */ private HFileBlock getCachedBlock(BlockCacheKey cacheKey, boolean cacheBlock, boolean useLock, - boolean isCompaction, boolean updateCacheMetrics, BlockType expectedBlockType, - DataBlockEncoding expectedDataBlockEncoding) throws IOException { + boolean isCompaction, boolean updateCacheMetrics, BlockType expectedBlockType, + DataBlockEncoding expectedDataBlockEncoding) throws IOException { // Check cache for block. If found return. BlockCache cache = cacheConf.getBlockCache().orElse(null); if (cache != null) { HFileBlock cachedBlock = - (HFileBlock) cache.getBlock(cacheKey, cacheBlock, useLock, updateCacheMetrics); + (HFileBlock) cache.getBlock(cacheKey, cacheBlock, useLock, updateCacheMetrics); if (cachedBlock != null) { if (cacheConf.shouldCacheCompressed(cachedBlock.getBlockType().getCategory())) { HFileBlock compressedBlock = cachedBlock; @@ -1146,8 +1117,10 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { // Block types other than data blocks always have // DataBlockEncoding.NONE. To avoid false negative cache misses, only // perform this check if cached block is a data block. - if (cachedBlock.getBlockType().isData() && - !actualDataBlockEncoding.equals(expectedDataBlockEncoding)) { + if ( + cachedBlock.getBlockType().isData() + && !actualDataBlockEncoding.equals(expectedDataBlockEncoding) + ) { // This mismatch may happen if a Scanner, which is used for say a // compaction, tries to read an encoded block from the block cache. // The reverse might happen when an EncodedScanner tries to read @@ -1158,17 +1131,20 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { // forced here. This will potentially cause a significant number of // cache misses, so update so we should keep track of this as it might // justify the work on a CompoundScanner. - if (!expectedDataBlockEncoding.equals(DataBlockEncoding.NONE) && - !actualDataBlockEncoding.equals(DataBlockEncoding.NONE)) { + if ( + !expectedDataBlockEncoding.equals(DataBlockEncoding.NONE) + && !actualDataBlockEncoding.equals(DataBlockEncoding.NONE) + ) { // If the block is encoded but the encoding does not match the // expected encoding it is likely the encoding was changed but the // block was not yet evicted. Evictions on file close happen async // so blocks with the old encoding still linger in cache for some // period of time. This event should be rare as it only happens on // schema definition change. - LOG.info("Evicting cached block with key {} because data block encoding mismatch; " + - "expected {}, actual {}, path={}", cacheKey, actualDataBlockEncoding, - expectedDataBlockEncoding, path); + LOG.info( + "Evicting cached block with key {} because data block encoding mismatch; " + + "expected {}, actual {}, path={}", + cacheKey, actualDataBlockEncoding, expectedDataBlockEncoding, path); // This is an error scenario. so here we need to release the block. returnAndEvictBlock(cache, cacheKey, cachedBlock); } @@ -1190,8 +1166,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { * @return block wrapped in a ByteBuffer, with header skipped */ @Override - public HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) - throws IOException { + public HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) throws IOException { if (trailer.getMetaIndexCount() == 0) { return null; // there are no meta blocks } @@ -1200,8 +1175,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { } byte[] mbname = Bytes.toBytes(metaBlockName); - int block = metaBlockIndexReader.rootBlockContainingKey(mbname, - 0, mbname.length); + int block = metaBlockIndexReader.rootBlockContainingKey(mbname, 0, mbname.length); if (block == -1) { return null; } @@ -1214,11 +1188,11 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { // Check cache for block. If found return. long metaBlockOffset = metaBlockIndexReader.getRootBlockOffset(block); BlockCacheKey cacheKey = - new BlockCacheKey(name, metaBlockOffset, this.isPrimaryReplicaReader(), BlockType.META); + new BlockCacheKey(name, metaBlockOffset, this.isPrimaryReplicaReader(), BlockType.META); cacheBlock &= cacheConf.shouldCacheBlockOnRead(BlockType.META.getCategory()); HFileBlock cachedBlock = - getCachedBlock(cacheKey, cacheBlock, false, true, true, BlockType.META, null); + getCachedBlock(cacheKey, cacheBlock, false, true, true, BlockType.META, null); if (cachedBlock != null) { assert cachedBlock.isUnpacked() : "Packed block leak."; // Return a distinct 'shallow copy' of the block, @@ -1228,7 +1202,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { // Cache Miss, please load. HFileBlock compressedBlock = - fsBlockReader.readBlockData(metaBlockOffset, blockSize, true, false, true); + fsBlockReader.readBlockData(metaBlockOffset, blockSize, true, false, true); HFileBlock uncompressedBlock = compressedBlock.unpack(hfileContext, fsBlockReader); if (compressedBlock != uncompressedBlock) { compressedBlock.release(); @@ -1262,20 +1236,17 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { } @Override - public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, - final boolean cacheBlock, boolean pread, final boolean isCompaction, - boolean updateCacheMetrics, BlockType expectedBlockType, - DataBlockEncoding expectedDataBlockEncoding) - throws IOException { + public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, final boolean cacheBlock, + boolean pread, final boolean isCompaction, boolean updateCacheMetrics, + BlockType expectedBlockType, DataBlockEncoding expectedDataBlockEncoding) throws IOException { if (dataBlockIndexReader == null) { throw new IOException(path + " block index not loaded"); } long trailerOffset = trailer.getLoadOnOpenDataOffset(); if (dataBlockOffset < 0 || dataBlockOffset >= trailerOffset) { - throw new IOException("Requested block is out of range: " + dataBlockOffset + - ", lastDataBlockOffset: " + trailer.getLastDataBlockOffset() + - ", trailer.getLoadOnOpenDataOffset: " + trailerOffset + - ", path=" + path); + throw new IOException("Requested block is out of range: " + dataBlockOffset + + ", lastDataBlockOffset: " + trailer.getLastDataBlockOffset() + + ", trailer.getLoadOnOpenDataOffset: " + trailerOffset + ", path=" + path); } // For any given block from any given file, synchronize reads for said // block. @@ -1283,8 +1254,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { // the other choice is to duplicate work (which the cache would prevent you // from doing). - BlockCacheKey cacheKey = new BlockCacheKey(name, dataBlockOffset, - this.isPrimaryReplicaReader(), expectedBlockType); + BlockCacheKey cacheKey = + new BlockCacheKey(name, dataBlockOffset, this.isPrimaryReplicaReader(), expectedBlockType); boolean useLock = false; IdLock.Entry lockEntry = null; @@ -1318,8 +1289,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { returnAndEvictBlock(cache, cacheKey, cachedBlock); }); throw new IOException("Cached block under key " + cacheKey + " " - + "has wrong encoding: " + cachedBlock.getDataBlockEncoding() + " (expected: " - + dataBlockEncoder.getDataBlockEncoding() + "), path=" + path); + + "has wrong encoding: " + cachedBlock.getDataBlockEncoding() + " (expected: " + + dataBlockEncoder.getDataBlockEncoding() + "), path=" + path); } } // Cache-hit. Return! @@ -1374,16 +1345,13 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { } /** - * Compares the actual type of a block retrieved from cache or disk with its - * expected type and throws an exception in case of a mismatch. Expected - * block type of {@link BlockType#DATA} is considered to match the actual - * block type [@link {@link BlockType#ENCODED_DATA} as well. - * @param block a block retrieved from cache or disk - * @param expectedBlockType the expected block type, or null to skip the - * check + * Compares the actual type of a block retrieved from cache or disk with its expected type and + * throws an exception in case of a mismatch. Expected block type of {@link BlockType#DATA} is + * considered to match the actual block type [@link {@link BlockType#ENCODED_DATA} as well. + * @param block a block retrieved from cache or disk + * @param expectedBlockType the expected block type, or null to skip the check */ - private void validateBlockType(HFileBlock block, - BlockType expectedBlockType) throws IOException { + private void validateBlockType(HFileBlock block, BlockType expectedBlockType) throws IOException { if (expectedBlockType == null) { return; } @@ -1394,25 +1362,25 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { return; } if (actualBlockType != expectedBlockType) { - throw new IOException("Expected block type " + expectedBlockType + ", " + - "but got " + actualBlockType + ": " + block + ", path=" + path); + throw new IOException("Expected block type " + expectedBlockType + ", " + "but got " + + actualBlockType + ": " + block + ", path=" + path); } } /** - * @return Last key as cell in the file. May be null if file has no entries. Note that - * this is not the last row key, but it is the Cell representation of the last - * key + * @return Last key as cell in the file. May be null if file has no entries. Note that this is not + * the last row key, but it is the Cell representation of the last key */ @Override public Optional getLastKey() { - return dataBlockIndexReader.isEmpty() ? Optional.empty() : - Optional.of(fileInfo.getLastKeyCell()); + return dataBlockIndexReader.isEmpty() + ? Optional.empty() + : Optional.of(fileInfo.getLastKeyCell()); } /** - * @return Midkey for this file. We work with block boundaries only so - * returned midkey is an approximation only. + * @return Midkey for this file. We work with block boundaries only so returned midkey is an + * approximation only. */ @Override public Optional midKey() throws IOException { @@ -1443,8 +1411,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { private final DataBlockEncoder.EncodedSeeker seeker; private final DataBlockEncoder dataBlockEncoder; - public EncodedScanner(HFile.Reader reader, boolean cacheBlocks, - boolean pread, boolean isCompaction, HFileContext meta, Configuration conf) { + public EncodedScanner(HFile.Reader reader, boolean cacheBlocks, boolean pread, + boolean isCompaction, HFileContext meta, Configuration conf) { super(reader, cacheBlocks, pread, isCompaction); DataBlockEncoding encoding = reader.getDataBlockEncoding(); dataBlockEncoder = encoding.getEncoder(); @@ -1453,7 +1421,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { } @Override - public boolean isSeeked(){ + public boolean isSeeked() { return curBlock != null; } @@ -1466,8 +1434,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { * Updates the current block to be the given {@link HFileBlock}. Seeks to the the first * key/value pair. * @param newBlock the block to make current, and read by {@link HFileReaderImpl#readBlock}, - * it's a totally new block with new allocated {@link ByteBuff}, so if no further - * reference to this block, we should release it carefully. + * it's a totally new block with new allocated {@link ByteBuff}, so if no + * further reference to this block, we should release it carefully. */ @Override protected void updateCurrentBlock(HFileBlock newBlock) throws CorruptHFileException { @@ -1479,9 +1447,9 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { short dataBlockEncoderId = newBlock.getDataBlockEncodingId(); if (!DataBlockEncoding.isCorrectEncoder(dataBlockEncoder, dataBlockEncoderId)) { String encoderCls = dataBlockEncoder.getClass().getName(); - throw new CorruptHFileException("Encoder " + encoderCls + - " doesn't support data block encoding " + - DataBlockEncoding.getNameFromId(dataBlockEncoderId) + ",path=" + reader.getPath()); + throw new CorruptHFileException( + "Encoder " + encoderCls + " doesn't support data block encoding " + + DataBlockEncoding.getNameFromId(dataBlockEncoderId) + ",path=" + reader.getPath()); } updateCurrBlockRef(newBlock); ByteBuff encodedBuffer = getEncodedBuffer(newBlock); @@ -1498,7 +1466,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { int pos = newBlock.headerSize() + DataBlockEncoding.ID_SIZE; origBlock.position(pos); origBlock - .limit(pos + newBlock.getUncompressedSizeWithoutHeader() - DataBlockEncoding.ID_SIZE); + .limit(pos + newBlock.getUncompressedSizeWithoutHeader() - DataBlockEncoding.ID_SIZE); return origBlock.slice(); } @@ -1566,8 +1534,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { } @Override - protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey, - boolean rewind, Cell key, boolean seekBefore) throws IOException { + protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey, boolean rewind, + Cell key, boolean seekBefore) throws IOException { if (this.curBlock == null || this.curBlock.getOffset() != seekToBlock.getOffset()) { updateCurrentBlock(seekToBlock); } else if (rewind) { @@ -1584,8 +1552,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { } /** - * Returns a buffer with the Bloom filter metadata. The caller takes - * ownership of the buffer. + * Returns a buffer with the Bloom filter metadata. The caller takes ownership of the buffer. */ @Override public DataInput getGeneralBloomFilterMetadata() throws IOException { @@ -1597,12 +1564,12 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { return this.getBloomFilterMetadata(BlockType.DELETE_FAMILY_BLOOM_META); } - private DataInput getBloomFilterMetadata(BlockType blockType) - throws IOException { - if (blockType != BlockType.GENERAL_BLOOM_META && - blockType != BlockType.DELETE_FAMILY_BLOOM_META) { - throw new RuntimeException("Block Type: " + blockType.toString() + - " is not supported, path=" + path) ; + private DataInput getBloomFilterMetadata(BlockType blockType) throws IOException { + if ( + blockType != BlockType.GENERAL_BLOOM_META && blockType != BlockType.DELETE_FAMILY_BLOOM_META + ) { + throw new RuntimeException( + "Block Type: " + blockType.toString() + " is not supported, path=" + path); } for (HFileBlock b : fileInfo.getLoadOnOpenBlocks()) { @@ -1623,8 +1590,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { } /** - * Returns false if block prefetching was requested for this file and has - * not completed, true otherwise + * Returns false if block prefetching was requested for this file and has not completed, true + * otherwise */ @Override public boolean prefetchComplete() { @@ -1633,15 +1600,14 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { /** * Create a Scanner on this file. No seeks or reads are done on creation. Call - * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is - * nothing to clean up in a Scanner. Letting go of your references to the - * scanner is sufficient. NOTE: Do not use this overload of getScanner for - * compactions. See {@link #getScanner(Configuration, boolean, boolean, boolean)} - * - * @param conf Store configuration. + * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is nothing to clean up + * in a Scanner. Letting go of your references to the scanner is sufficient. NOTE: Do not use this + * overload of getScanner for compactions. See + * {@link #getScanner(Configuration, boolean, boolean, boolean)} + * @param conf Store configuration. * @param cacheBlocks True if we should cache blocks read in by this scanner. - * @param pread Use positional read rather than seek+read if true (pread is - * better for random reads, seek+read is better scanning). + * @param pread Use positional read rather than seek+read if true (pread is better for + * random reads, seek+read is better scanning). * @return Scanner on this file. */ @Override @@ -1651,23 +1617,16 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { /** * Create a Scanner on this file. No seeks or reads are done on creation. Call - * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is - * nothing to clean up in a Scanner. Letting go of your references to the - * scanner is sufficient. - * @param conf - * Store configuration. - * @param cacheBlocks - * True if we should cache blocks read in by this scanner. - * @param pread - * Use positional read rather than seek+read if true (pread is better - * for random reads, seek+read is better scanning). - * @param isCompaction - * is scanner being used for a compaction? + * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is nothing to clean up + * in a Scanner. Letting go of your references to the scanner is sufficient. n * Store + * configuration. n * True if we should cache blocks read in by this scanner. n * Use positional + * read rather than seek+read if true (pread is better for random reads, seek+read is better + * scanning). n * is scanner being used for a compaction? * @return Scanner on this file. */ @Override public HFileScanner getScanner(Configuration conf, boolean cacheBlocks, final boolean pread, - final boolean isCompaction) { + final boolean isCompaction) { if (dataBlockEncoder.useEncodedScanner()) { return new EncodedScanner(this, cacheBlocks, pread, isCompaction, this.hfileContext, conf); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java index a2a35fef37a..3e5ada1442f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,105 +20,84 @@ package org.apache.hadoop.hbase.io.hfile; import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.regionserver.Shipper; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.regionserver.Shipper; +import org.apache.yetus.audience.InterfaceAudience; /** - * A scanner allows you to position yourself within a HFile and - * scan through it. It allows you to reposition yourself as well. - * - *

          A scanner doesn't always have a key/value that it is pointing to - * when it is first created and before - * {@link #seekTo()}/{@link #seekTo(Cell)} are called. - * In this case, {@link #getKey()}/{@link #getValue()} returns null. At most - * other times, a key and value will be available. The general pattern is that - * you position the Scanner using the seekTo variants and then getKey and - * getValue. + * A scanner allows you to position yourself within a HFile and scan through it. It allows you to + * reposition yourself as well. + *

          + * A scanner doesn't always have a key/value that it is pointing to when it is first created and + * before {@link #seekTo()}/{@link #seekTo(Cell)} are called. In this case, + * {@link #getKey()}/{@link #getValue()} returns null. At most other times, a key and value will be + * available. The general pattern is that you position the Scanner using the seekTo variants and + * then getKey and getValue. */ @InterfaceAudience.Private public interface HFileScanner extends Shipper, Closeable { /** - * SeekTo or just before the passed cell. Examine the return - * code to figure whether we found the cell or not. - * Consider the cell stream of all the cells in the file, - * c[0] .. c[n], where there are n cells in the file. - * @param cell - * @return -1, if cell < c[0], no position; - * 0, such that c[i] = cell and scanner is left in position i; and - * 1, such that c[i] < cell, and scanner is left in position i. - * The scanner will position itself between c[i] and c[i+1] where - * c[i] < cell <= c[i+1]. - * If there is no cell c[i+1] greater than or equal to the input cell, then the - * scanner will position itself at the end of the file and next() will return - * false when it is called. - * @throws IOException + * SeekTo or just before the passed cell. Examine the return code to figure whether + * we found the cell or not. Consider the cell stream of all the cells in the file, + * c[0] .. c[n], where there are n cells in the file. n * @return -1, if cell < + * c[0], no position; 0, such that c[i] = cell and scanner is left in position i; and 1, such that + * c[i] < cell, and scanner is left in position i. The scanner will position itself between + * c[i] and c[i+1] where c[i] < cell <= c[i+1]. If there is no cell c[i+1] greater than or + * equal to the input cell, then the scanner will position itself at the end of the file and + * next() will return false when it is called. n */ int seekTo(Cell cell) throws IOException; /** - * Reseek to or just before the passed cell. Similar to seekTo - * except that this can be called even if the scanner is not at the beginning - * of a file. - * This can be used to seek only to cells which come after the current position - * of the scanner. - * Consider the cell stream of all the cells in the file, - * c[0] .. c[n], where there are n cellc in the file after - * current position of HFileScanner. - * The scanner will position itself between c[i] and c[i+1] where - * c[i] < cell <= c[i+1]. - * If there is no cell c[i+1] greater than or equal to the input cell, then the - * scanner will position itself at the end of the file and next() will return + * Reseek to or just before the passed cell. Similar to seekTo except that this can + * be called even if the scanner is not at the beginning of a file. This can be used to seek only + * to cells which come after the current position of the scanner. Consider the cell stream of all + * the cells in the file, c[0] .. c[n], where there are n cellc in the file after + * current position of HFileScanner. The scanner will position itself between c[i] and c[i+1] + * where c[i] < cell <= c[i+1]. If there is no cell c[i+1] greater than or equal to the + * input cell, then the scanner will position itself at the end of the file and next() will return * false when it is called. * @param cell Cell to find (should be non-null) - * @return -1, if cell < c[0], no position; - * 0, such that c[i] = cell and scanner is left in position i; and - * 1, such that c[i] < cell, and scanner is left in position i. - * @throws IOException + * @return -1, if cell < c[0], no position; 0, such that c[i] = cell and scanner is left in + * position i; and 1, such that c[i] < cell, and scanner is left in position i. n */ int reseekTo(Cell cell) throws IOException; /** - * Consider the cell stream of all the cells in the file, - * c[0] .. c[n], where there are n cells in the file. + * Consider the cell stream of all the cells in the file, c[0] .. c[n], where there + * are n cells in the file. * @param cell Cell to find - * @return false if cell <= c[0] or true with scanner in position 'i' such - * that: c[i] < cell. Furthermore: there may be a c[i+1], such that - * c[i] < cell <= c[i+1] but there may also NOT be a c[i+1], and next() will - * return false (EOF). - * @throws IOException + * @return false if cell <= c[0] or true with scanner in position 'i' such that: c[i] < + * cell. Furthermore: there may be a c[i+1], such that c[i] < cell <= c[i+1] but + * there may also NOT be a c[i+1], and next() will return false (EOF). n */ boolean seekBefore(Cell cell) throws IOException; /** * Positions this scanner at the start of the file. - * @return False if empty file; i.e. a call to next would return false and - * the current key and value are undefined. - * @throws IOException + * @return False if empty file; i.e. a call to next would return false and the current key and + * value are undefined. n */ boolean seekTo() throws IOException; /** * Scans to the next entry in the file. - * @return Returns false if you are at the end otherwise true if more in file. - * @throws IOException + * @return Returns false if you are at the end otherwise true if more in file. n */ boolean next() throws IOException; /** - * Gets the current key in the form of a cell. You must call - * {@link #seekTo(Cell)} before this method. + * Gets the current key in the form of a cell. You must call {@link #seekTo(Cell)} before this + * method. * @return gets the current key as a Cell. */ Cell getKey(); /** - * Gets a buffer view to the current value. You must call - * {@link #seekTo(Cell)} before this method. - * - * @return byte buffer for the value. The limit is set to the value size, and - * the position is 0, the start of the buffer view. + * Gets a buffer view to the current value. You must call {@link #seekTo(Cell)} before this + * method. + * @return byte buffer for the value. The limit is set to the value size, and the position is 0, + * the start of the buffer view. */ ByteBuffer getValue(); @@ -129,8 +107,8 @@ public interface HFileScanner extends Shipper, Closeable { Cell getCell(); /** - * Convenience method to get a copy of the key as a string - interpreting the - * bytes as UTF8. You must call {@link #seekTo(Cell)} before this method. + * Convenience method to get a copy of the key as a string - interpreting the bytes as UTF8. You + * must call {@link #seekTo(Cell)} before this method. * @return key as a string * @deprecated Since hbase-2.0.0 */ @@ -138,8 +116,8 @@ public interface HFileScanner extends Shipper, Closeable { String getKeyString(); /** - * Convenience method to get a copy of the value as a string - interpreting - * the bytes as UTF8. You must call {@link #seekTo(Cell)} before this method. + * Convenience method to get a copy of the value as a string - interpreting the bytes as UTF8. You + * must call {@link #seekTo(Cell)} before this method. * @return value as a string * @deprecated Since hbase-2.0.0 */ @@ -152,9 +130,8 @@ public interface HFileScanner extends Shipper, Closeable { HFile.Reader getReader(); /** - * @return True is scanner has had one of the seek calls invoked; i.e. - * {@link #seekBefore(Cell)} or {@link #seekTo()} or {@link #seekTo(Cell)}. - * Otherwise returns false. + * @return True is scanner has had one of the seek calls invoked; i.e. {@link #seekBefore(Cell)} + * or {@link #seekTo()} or {@link #seekTo(Cell)}. Otherwise returns false. */ boolean isSeeked(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileStreamReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileStreamReader.java index 3f72b4adab3..5f31e148dbe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileStreamReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileStreamReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,13 +22,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; /** - * Implementation of {@link HFile.Reader} to deal with stream read - * do not perform any prefetch operations (HFilePreadReader will do this). + * Implementation of {@link HFile.Reader} to deal with stream read do not perform any prefetch + * operations (HFilePreadReader will do this). */ @InterfaceAudience.Private public class HFileStreamReader extends HFileReaderImpl { public HFileStreamReader(ReaderContext context, HFileInfo fileInfo, CacheConfig cacheConf, - Configuration conf) throws IOException { + Configuration conf) throws IOException { super(context, fileInfo, cacheConf, conf); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java index ec73f89631d..612f127e11e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +18,18 @@ package org.apache.hadoop.hbase.io.hfile; import java.io.IOException; - import org.apache.hadoop.fs.FSDataInputStream; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private class HFileUtil { - /** guards against NullPointer - * utility which tries to seek on the DFSIS and will try an alternative source - * if the FSDataInputStream throws an NPE HBASE-17501 - * @param istream - * @param offset - * @throws IOException + /** + * guards against NullPointer utility which tries to seek on the DFSIS and will try an alternative + * source if the FSDataInputStream throws an NPE HBASE-17501 nnn */ - static public void seekOnMultipleSources(FSDataInputStream istream, long offset) throws IOException { + static public void seekOnMultipleSources(FSDataInputStream istream, long offset) + throws IOException { try { // attempt to seek inside of current blockReader istream.seek(offset); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index 3e03b887b66..026e4c01b7c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile; import java.io.DataOutput; @@ -33,7 +32,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.MetaCellComparator; @@ -64,14 +62,14 @@ public class HFileWriterImpl implements HFile.Writer { private static final long UNSET = -1; - /** if this feature is enabled, preCalculate encoded data size before real encoding happens*/ + /** if this feature is enabled, preCalculate encoded data size before real encoding happens */ public static final String UNIFIED_ENCODED_BLOCKSIZE_RATIO = "hbase.writer.unified.encoded.blocksize.ratio"; - /** Block size limit after encoding, used to unify encoded block Cache entry size*/ + /** Block size limit after encoding, used to unify encoded block Cache entry size */ private final int encodedBlockSizeLimit; - /** The Cell previously appended. Becomes the last cell in the file.*/ + /** The Cell previously appended. Becomes the last cell in the file. */ protected Cell lastCell = null; /** FileSystem stream to write into. */ @@ -102,12 +100,10 @@ public class HFileWriterImpl implements HFile.Writer { protected List metaData = new ArrayList<>(); /** - * First cell in a block. - * This reference should be short-lived since we write hfiles in a burst. + * First cell in a block. This reference should be short-lived since we write hfiles in a burst. */ protected Cell firstCellInBlock = null; - /** May be null if we were passed a stream. */ protected final Path path; @@ -115,14 +111,14 @@ public class HFileWriterImpl implements HFile.Writer { protected final CacheConfig cacheConf; /** - * Name for this object used when logging or in toString. Is either - * the result of a toString on stream or else name of passed file Path. + * Name for this object used when logging or in toString. Is either the result of a toString on + * stream or else name of passed file Path. */ protected final String name; /** - * The data block encoding which will be used. - * {@link NoOpDataBlockEncoder#INSTANCE} if there is no encoding. + * The data block encoding which will be used. {@link NoOpDataBlockEncoder#INSTANCE} if there is + * no encoding. */ protected final HFileDataBlockEncoder blockEncoder; @@ -131,7 +127,7 @@ public class HFileWriterImpl implements HFile.Writer { private int maxTagsLength = 0; /** KeyValue version in FileInfo */ - public static final byte [] KEY_VALUE_VERSION = Bytes.toBytes("KEY_VALUE_VERSION"); + public static final byte[] KEY_VALUE_VERSION = Bytes.toBytes("KEY_VALUE_VERSION"); /** Version for KeyValue which includes memstore timestamp */ public static final int KEY_VALUE_VER_WITH_MEMSTORE = 1; @@ -152,8 +148,8 @@ public class HFileWriterImpl implements HFile.Writer { protected long lastDataBlockOffset = UNSET; /** - * The last(stop) Cell of the previous data block. - * This reference should be short-lived since we write hfiles in a burst. + * The last(stop) Cell of the previous data block. This reference should be short-lived since we + * write hfiles in a burst. */ private Cell lastCellOfPreviousBlock = null; @@ -163,7 +159,7 @@ public class HFileWriterImpl implements HFile.Writer { protected long maxMemstoreTS = 0; public HFileWriterImpl(final Configuration conf, CacheConfig cacheConf, Path path, - FSDataOutputStream outputStream, HFileContext fileContext) { + FSDataOutputStream outputStream, HFileContext fileContext) { this.outputStream = outputStream; this.path = path; this.name = path != null ? path.getName() : outputStream.toString(); @@ -177,40 +173,35 @@ public class HFileWriterImpl implements HFile.Writer { closeOutputStream = path != null; this.cacheConf = cacheConf; float encodeBlockSizeRatio = conf.getFloat(UNIFIED_ENCODED_BLOCKSIZE_RATIO, 1f); - this.encodedBlockSizeLimit = (int)(hFileContext.getBlocksize() * encodeBlockSizeRatio); + this.encodedBlockSizeLimit = (int) (hFileContext.getBlocksize() * encodeBlockSizeRatio); finishInit(conf); if (LOG.isTraceEnabled()) { - LOG.trace("Writer" + (path != null ? " for " + path : "") + - " initialized with cacheConf: " + cacheConf + - " fileContext: " + fileContext); + LOG.trace("Writer" + (path != null ? " for " + path : "") + " initialized with cacheConf: " + + cacheConf + " fileContext: " + fileContext); } } /** * Add to the file info. All added key/value pairs can be obtained using * {@link HFile.Reader#getHFileInfo()}. - * * @param k Key * @param v Value * @throws IOException in case the key or the value are invalid */ @Override - public void appendFileInfo(final byte[] k, final byte[] v) - throws IOException { + public void appendFileInfo(final byte[] k, final byte[] v) throws IOException { fileInfo.append(k, v, true); } /** - * Sets the file info offset in the trailer, finishes up populating fields in - * the file info, and writes the file info into the given data output. The - * reason the data output is not always {@link #outputStream} is that we store - * file info as a block in version 2. - * + * Sets the file info offset in the trailer, finishes up populating fields in the file info, and + * writes the file info into the given data output. The reason the data output is not always + * {@link #outputStream} is that we store file info as a block in version 2. * @param trailer fixed file trailer - * @param out the data output to write the file info to + * @param out the data output to write the file info to */ protected final void writeFileInfo(FixedFileTrailer trailer, DataOutputStream out) - throws IOException { + throws IOException { trailer.setFileInfoOffset(outputStream.getPos()); finishFileInfo(); long startTime = EnvironmentEdgeManager.currentTime(); @@ -220,7 +211,6 @@ public class HFileWriterImpl implements HFile.Writer { /** * Checks that the given Cell's key does not violate the key order. - * * @param cell Cell whose key to check. * @return true if the key is duplicate * @throws IOException if the key or the key order is wrong @@ -250,15 +240,15 @@ public class HFileWriterImpl implements HFile.Writer { sb.append(cell); sb.append(", lastCell = "); sb.append(lastCell); - //file context includes HFile path and optionally table and CF of file being written + // file context includes HFile path and optionally table and CF of file being written sb.append("fileContext="); sb.append(hFileContext); return sb.toString(); } /** Checks the given value for validity. */ - protected void checkValue(final byte[] value, final int offset, - final int length) throws IOException { + protected void checkValue(final byte[] value, final int offset, final int length) + throws IOException { if (value == null) { throw new IOException("Value cannot be null"); } @@ -274,8 +264,8 @@ public class HFileWriterImpl implements HFile.Writer { @Override public String toString() { - return "writer=" + (path != null ? path.toString() : null) + ", name=" - + name + ", compression=" + hFileContext.getCompression().getName(); + return "writer=" + (path != null ? path.toString() : null) + ", name=" + name + ", compression=" + + hFileContext.getCompression().getName(); } public static Compression.Algorithm compressionByName(String algoName) { @@ -286,10 +276,9 @@ public class HFileWriterImpl implements HFile.Writer { } /** A helper method to create HFile output streams in constructors */ - protected static FSDataOutputStream createOutputStream(Configuration conf, - FileSystem fs, Path path, InetSocketAddress[] favoredNodes) throws IOException { - FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, - HConstants.DATA_FILE_UMASK_KEY); + protected static FSDataOutputStream createOutputStream(Configuration conf, FileSystem fs, + Path path, InetSocketAddress[] favoredNodes) throws IOException { + FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); return FSUtils.create(conf, fs, path, perms, favoredNodes); } @@ -298,17 +287,14 @@ public class HFileWriterImpl implements HFile.Writer { if (blockWriter != null) { throw new IllegalStateException("finishInit called twice"); } - blockWriter = new HFileBlock.Writer(conf, blockEncoder, hFileContext, - cacheConf.getByteBuffAllocator()); + blockWriter = + new HFileBlock.Writer(conf, blockEncoder, hFileContext, cacheConf.getByteBuffAllocator()); // Data block index writer boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite(); dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(blockWriter, - cacheIndexesOnWrite ? cacheConf : null, - cacheIndexesOnWrite ? name : null); - dataBlockIndexWriter.setMaxChunkSize( - HFileBlockIndex.getMaxChunkSize(conf)); - dataBlockIndexWriter.setMinIndexNumEntries( - HFileBlockIndex.getMinIndexNumEntries(conf)); + cacheIndexesOnWrite ? cacheConf : null, cacheIndexesOnWrite ? name : null); + dataBlockIndexWriter.setMaxChunkSize(HFileBlockIndex.getMaxChunkSize(conf)); + dataBlockIndexWriter.setMinIndexNumEntries(HFileBlockIndex.getMinIndexNumEntries(conf)); inlineBlockWriters.add(dataBlockIndexWriter); // Meta data block index writer @@ -322,15 +308,17 @@ public class HFileWriterImpl implements HFile.Writer { protected void checkBlockBoundary() throws IOException { // For encoder like prefixTree, encoded size is not available, so we have to compare both // encoded size and unencoded size to blocksize limit. - if (blockWriter.encodedBlockSizeWritten() >= encodedBlockSizeLimit - || blockWriter.blockSizeWritten() >= hFileContext.getBlocksize()) { + if ( + blockWriter.encodedBlockSizeWritten() >= encodedBlockSizeLimit + || blockWriter.blockSizeWritten() >= hFileContext.getBlocksize() + ) { finishBlock(); writeInlineBlocks(false); newBlock(); } } - /** Clean up the data block that is currently being written.*/ + /** Clean up the data block that is currently being written. */ private void finishBlock() throws IOException { if (!blockWriter.isWriting() || blockWriter.blockSizeWritten() == 0) { return; @@ -355,14 +343,14 @@ public class HFileWriterImpl implements HFile.Writer { } /** - * Try to return a Cell that falls between left and - * right but that is shorter; i.e. takes up less space. This - * trick is used building HFile block index. Its an optimization. It does not - * always work. In this case we'll just return the right cell. + * Try to return a Cell that falls between left and right but that is + * shorter; i.e. takes up less space. This trick is used building HFile block index. Its an + * optimization. It does not always work. In this case we'll just return the right + * cell. * @return A cell that sorts between left and right. */ public static Cell getMidpoint(final CellComparator comparator, final Cell left, - final Cell right) { + final Cell right) { if (right == null) { throw new IllegalArgumentException("right cell can not be null"); } @@ -376,8 +364,8 @@ public class HFileWriterImpl implements HFile.Writer { return right; } byte[] midRow; - boolean bufferBacked = left instanceof ByteBufferExtendedCell - && right instanceof ByteBufferExtendedCell; + boolean bufferBacked = + left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell; if (bufferBacked) { midRow = getMinimumMidpointArray(((ByteBufferExtendedCell) left).getRowByteBuffer(), ((ByteBufferExtendedCell) left).getRowPosition(), left.getRowLength(), @@ -390,7 +378,7 @@ public class HFileWriterImpl implements HFile.Writer { if (midRow != null) { return PrivateCellUtil.createFirstOnRow(midRow); } - //Rows are same. Compare on families. + // Rows are same. Compare on families. if (bufferBacked) { midRow = getMinimumMidpointArray(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), ((ByteBufferExtendedCell) left).getFamilyPosition(), left.getFamilyLength(), @@ -425,43 +413,42 @@ public class HFileWriterImpl implements HFile.Writer { /** * Try to get a byte array that falls between left and right as short as possible with * lexicographical order; - * - * @return Return a new array that is between left and right and minimally - * sized else just return null if left == right. + * @return Return a new array that is between left and right and minimally sized else just return + * null if left == right. */ private static byte[] getMinimumMidpointArray(final byte[] leftArray, final int leftOffset, - final int leftLength, final byte[] rightArray, final int rightOffset, final int rightLength) { + final int leftLength, final byte[] rightArray, final int rightOffset, final int rightLength) { int minLength = leftLength < rightLength ? leftLength : rightLength; int diffIdx = 0; for (; diffIdx < minLength; diffIdx++) { byte leftByte = leftArray[leftOffset + diffIdx]; byte rightByte = rightArray[rightOffset + diffIdx]; if ((leftByte & 0xff) > (rightByte & 0xff)) { - throw new IllegalArgumentException("Left byte array sorts after right row; left=" + Bytes - .toStringBinary(leftArray, leftOffset, leftLength) + ", right=" + Bytes - .toStringBinary(rightArray, rightOffset, rightLength)); + throw new IllegalArgumentException("Left byte array sorts after right row; left=" + + Bytes.toStringBinary(leftArray, leftOffset, leftLength) + ", right=" + + Bytes.toStringBinary(rightArray, rightOffset, rightLength)); } else if (leftByte != rightByte) { break; } } if (diffIdx == minLength) { if (leftLength > rightLength) { - //right is prefix of left - throw new IllegalArgumentException("Left byte array sorts after right row; left=" + Bytes - .toStringBinary(leftArray, leftOffset, leftLength) + ", right=" + Bytes - .toStringBinary(rightArray, rightOffset, rightLength)); + // right is prefix of left + throw new IllegalArgumentException("Left byte array sorts after right row; left=" + + Bytes.toStringBinary(leftArray, leftOffset, leftLength) + ", right=" + + Bytes.toStringBinary(rightArray, rightOffset, rightLength)); } else if (leftLength < rightLength) { - //left is prefix of right. + // left is prefix of right. byte[] minimumMidpointArray = new byte[minLength + 1]; System.arraycopy(rightArray, rightOffset, minimumMidpointArray, 0, minLength + 1); minimumMidpointArray[minLength] = 0x00; return minimumMidpointArray; } else { - //left == right + // left == right return null; } } - //Note that left[diffIdx] can never be equal to 0xff since left < right + // Note that left[diffIdx] can never be equal to 0xff since left < right byte[] minimumMidpointArray = new byte[diffIdx + 1]; System.arraycopy(leftArray, leftOffset, minimumMidpointArray, 0, diffIdx + 1); minimumMidpointArray[diffIdx] = (byte) (minimumMidpointArray[diffIdx] + 1); @@ -471,46 +458,43 @@ public class HFileWriterImpl implements HFile.Writer { /** * Try to create a new byte array that falls between left and right as short as possible with * lexicographical order. - * - * @return Return a new array that is between left and right and minimally - * sized else just return null if left == right. + * @return Return a new array that is between left and right and minimally sized else just return + * null if left == right. */ private static byte[] getMinimumMidpointArray(ByteBuffer left, int leftOffset, int leftLength, - ByteBuffer right, int rightOffset, int rightLength) { + ByteBuffer right, int rightOffset, int rightLength) { int minLength = leftLength < rightLength ? leftLength : rightLength; int diffIdx = 0; for (; diffIdx < minLength; diffIdx++) { int leftByte = ByteBufferUtils.toByte(left, leftOffset + diffIdx); int rightByte = ByteBufferUtils.toByte(right, rightOffset + diffIdx); if ((leftByte & 0xff) > (rightByte & 0xff)) { - throw new IllegalArgumentException( - "Left byte array sorts after right row; left=" + ByteBufferUtils - .toStringBinary(left, leftOffset, leftLength) + ", right=" + ByteBufferUtils - .toStringBinary(right, rightOffset, rightLength)); + throw new IllegalArgumentException("Left byte array sorts after right row; left=" + + ByteBufferUtils.toStringBinary(left, leftOffset, leftLength) + ", right=" + + ByteBufferUtils.toStringBinary(right, rightOffset, rightLength)); } else if (leftByte != rightByte) { break; } } if (diffIdx == minLength) { if (leftLength > rightLength) { - //right is prefix of left - throw new IllegalArgumentException( - "Left byte array sorts after right row; left=" + ByteBufferUtils - .toStringBinary(left, leftOffset, leftLength) + ", right=" + ByteBufferUtils - .toStringBinary(right, rightOffset, rightLength)); + // right is prefix of left + throw new IllegalArgumentException("Left byte array sorts after right row; left=" + + ByteBufferUtils.toStringBinary(left, leftOffset, leftLength) + ", right=" + + ByteBufferUtils.toStringBinary(right, rightOffset, rightLength)); } else if (leftLength < rightLength) { - //left is prefix of right. + // left is prefix of right. byte[] minimumMidpointArray = new byte[minLength + 1]; - ByteBufferUtils - .copyFromBufferToArray(minimumMidpointArray, right, rightOffset, 0, minLength + 1); + ByteBufferUtils.copyFromBufferToArray(minimumMidpointArray, right, rightOffset, 0, + minLength + 1); minimumMidpointArray[minLength] = 0x00; return minimumMidpointArray; } else { - //left == right + // left == right return null; } } - //Note that left[diffIdx] can never be equal to 0xff since left < right + // Note that left[diffIdx] can never be equal to 0xff since left < right byte[] minimumMidpointArray = new byte[diffIdx + 1]; ByteBufferUtils.copyFromBufferToArray(minimumMidpointArray, left, leftOffset, 0, diffIdx + 1); minimumMidpointArray[diffIdx] = (byte) (minimumMidpointArray[diffIdx] + 1); @@ -523,11 +507,10 @@ public class HFileWriterImpl implements HFile.Writer { while (ibw.shouldWriteBlock(closing)) { long offset = outputStream.getPos(); boolean cacheThisBlock = ibw.getCacheOnWrite(); - ibw.writeInlineBlock(blockWriter.startWriting( - ibw.getInlineBlockType())); + ibw.writeInlineBlock(blockWriter.startWriting(ibw.getInlineBlockType())); blockWriter.writeHeaderAndData(outputStream); ibw.blockWritten(offset, blockWriter.getOnDiskSizeWithHeader(), - blockWriter.getUncompressedSizeWithoutHeader()); + blockWriter.getUncompressedSizeWithoutHeader()); totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader(); if (cacheThisBlock) { @@ -539,15 +522,14 @@ public class HFileWriterImpl implements HFile.Writer { /** * Caches the last written HFile block. - * @param offset the offset of the block we want to cache. Used to determine - * the cache key. + * @param offset the offset of the block we want to cache. Used to determine the cache key. */ private void doCacheOnWrite(long offset) { cacheConf.getBlockCache().ifPresent(cache -> { HFileBlock cacheFormatBlock = blockWriter.getBlockForCaching(cacheConf); try { cache.cacheBlock(new BlockCacheKey(name, offset, true, cacheFormatBlock.getBlockType()), - cacheFormatBlock); + cacheFormatBlock); } finally { // refCnt will auto increase when block add to Cache, see RAMCache#putIfAbsent cacheFormatBlock.release(); @@ -568,15 +550,11 @@ public class HFileWriterImpl implements HFile.Writer { } /** - * Add a meta block to the end of the file. Call before close(). Metadata - * blocks are expensive. Fill one with a bunch of serialized data rather than - * do a metadata block per metadata instance. If metadata is small, consider - * adding to file info using {@link #appendFileInfo(byte[], byte[])} - * - * @param metaBlockName - * name of the block - * @param content - * will call readFields to get data later (DO NOT REUSE) + * Add a meta block to the end of the file. Call before close(). Metadata blocks are expensive. + * Fill one with a bunch of serialized data rather than do a metadata block per metadata instance. + * If metadata is small, consider adding to file info using + * {@link #appendFileInfo(byte[], byte[])} n * name of the block n * will call readFields to get + * data later (DO NOT REUSE) */ @Override public void appendMetaBlock(String metaBlockName, Writable content) { @@ -585,8 +563,7 @@ public class HFileWriterImpl implements HFile.Writer { for (i = 0; i < metaNames.size(); ++i) { // stop when the current key is greater than our own byte[] cur = metaNames.get(i); - if (Bytes.BYTES_RAWCOMPARATOR.compare(cur, 0, cur.length, key, 0, - key.length) > 0) { + if (Bytes.BYTES_RAWCOMPARATOR.compare(cur, 0, cur.length, key, 0, key.length) > 0) { break; } } @@ -623,7 +600,7 @@ public class HFileWriterImpl implements HFile.Writer { // Add the new meta block to the meta index. metaBlockIndexWriter.addEntry(metaNames.get(i), offset, - blockWriter.getOnDiskSizeWithHeader()); + blockWriter.getOnDiskSizeWithHeader()); } } @@ -640,8 +617,8 @@ public class HFileWriterImpl implements HFile.Writer { trailer.setLoadOnOpenOffset(rootIndexOffset); // Meta block index. - metaBlockIndexWriter.writeSingleLevelIndex(blockWriter.startWriting( - BlockType.ROOT_INDEX), "meta"); + metaBlockIndexWriter.writeSingleLevelIndex(blockWriter.startWriting(BlockType.ROOT_INDEX), + "meta"); blockWriter.writeHeaderAndData(outputStream); totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader(); @@ -656,21 +633,19 @@ public class HFileWriterImpl implements HFile.Writer { totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader(); // Load-on-open data supplied by higher levels, e.g. Bloom filters. - for (BlockWritable w : additionalLoadOnOpenData){ + for (BlockWritable w : additionalLoadOnOpenData) { blockWriter.writeBlock(w, outputStream); totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader(); } // Now finish off the trailer. trailer.setNumDataIndexLevels(dataBlockIndexWriter.getNumLevels()); - trailer.setUncompressedDataIndexSize( - dataBlockIndexWriter.getTotalUncompressedSize()); + trailer.setUncompressedDataIndexSize(dataBlockIndexWriter.getTotalUncompressedSize()); trailer.setFirstDataBlockOffset(firstDataBlockOffset); trailer.setLastDataBlockOffset(lastDataBlockOffset); trailer.setComparatorClass(this.hFileContext.getCellComparator().getClass()); trailer.setDataIndexCount(dataBlockIndexWriter.getNumRootEntries()); - finishClose(trailer); blockWriter.release(); @@ -691,16 +666,15 @@ public class HFileWriterImpl implements HFile.Writer { this.addBloomFilter(bfw, BlockType.DELETE_FAMILY_BLOOM_META); } - private void addBloomFilter(final BloomFilterWriter bfw, - final BlockType blockType) { + private void addBloomFilter(final BloomFilterWriter bfw, final BlockType blockType) { if (bfw.getKeyCount() <= 0) { return; } - if (blockType != BlockType.GENERAL_BLOOM_META && - blockType != BlockType.DELETE_FAMILY_BLOOM_META) { - throw new RuntimeException("Block Type: " + blockType.toString() + - "is not supported"); + if ( + blockType != BlockType.GENERAL_BLOOM_META && blockType != BlockType.DELETE_FAMILY_BLOOM_META + ) { + throw new RuntimeException("Block Type: " + blockType.toString() + "is not supported"); } additionalLoadOnOpenData.add(new BlockWritable() { @Override @@ -725,11 +699,8 @@ public class HFileWriterImpl implements HFile.Writer { } /** - * Add key/value to file. Keys must be added in an order that agrees with the - * Comparator passed on construction. - * - * @param cell - * Cell to add. Cannot be empty nor null. + * Add key/value to file. Keys must be added in an order that agrees with the Comparator passed on + * construction. n * Cell to add. Cannot be empty nor null. */ @Override public void append(final Cell cell) throws IOException { @@ -788,20 +759,18 @@ public class HFileWriterImpl implements HFile.Writer { if (lastCell != null) { // Make a copy. The copy is stuffed into our fileinfo map. Needs a clean // byte buffer. Won't take a tuple. - byte [] lastKey = PrivateCellUtil.getCellKeySerializedAsKeyValueKey(this.lastCell); + byte[] lastKey = PrivateCellUtil.getCellKeySerializedAsKeyValueKey(this.lastCell); fileInfo.append(HFileInfo.LASTKEY, lastKey, false); } // Average key length. - int avgKeyLen = - entryCount == 0 ? 0 : (int) (totalKeyLength / entryCount); + int avgKeyLen = entryCount == 0 ? 0 : (int) (totalKeyLength / entryCount); fileInfo.append(HFileInfo.AVG_KEY_LEN, Bytes.toBytes(avgKeyLen), false); fileInfo.append(HFileInfo.CREATE_TIME_TS, Bytes.toBytes(hFileContext.getFileCreateTime()), false); // Average value length. - int avgValueLen = - entryCount == 0 ? 0 : (int) (totalValueLength / entryCount); + int avgValueLen = entryCount == 0 ? 0 : (int) (totalValueLength / entryCount); fileInfo.append(HFileInfo.AVG_VALUE_LEN, Bytes.toBytes(avgValueLen), false); if (hFileContext.isIncludesTags()) { // When tags are not being written in this file, MAX_TAGS_LEN is excluded @@ -827,14 +796,14 @@ public class HFileWriterImpl implements HFile.Writer { if (cryptoContext != Encryption.Context.NONE) { // Wrap the context's key and write it as the encryption metadata, the wrapper includes // all information needed for decryption - trailer.setEncryptionKey(EncryptionUtil.wrapKey(cryptoContext.getConf(), - cryptoContext.getConf().get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, - User.getCurrent().getShortName()), + trailer.setEncryptionKey(EncryptionUtil.wrapKey( + cryptoContext.getConf(), cryptoContext.getConf() + .get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()), cryptoContext.getKey())); } // Now we can finish the close trailer.setMetaIndexCount(metaNames.size()); - trailer.setTotalUncompressedBytes(totalUncompressedBytes+ trailer.getTrailerSize()); + trailer.setTotalUncompressedBytes(totalUncompressedBytes + trailer.getTrailerSize()); trailer.setEntryCount(entryCount); trailer.setCompressionCodec(hFileContext.getCompression()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java index 8b85c68f9a5..bcc5466dcc0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java @@ -1,22 +1,20 @@ -/** - * Copyright The Apache Software Foundation +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile; import org.apache.yetus.audience.InterfaceAudience; @@ -24,13 +22,13 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class InclusiveCombinedBlockCache extends CombinedBlockCache { public InclusiveCombinedBlockCache(FirstLevelBlockCache l1, BlockCache l2) { - super(l1,l2); + super(l1, l2); l1.setVictimCache(l2); } @Override - public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, - boolean repeat, boolean updateCacheMetrics) { + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, + boolean updateCacheMetrics) { // On all external cache set ups the lru should have the l2 cache set as the victimHandler // Because of that all requests that miss inside of the lru block cache will be // tried in the l2 block cache. @@ -38,9 +36,8 @@ public class InclusiveCombinedBlockCache extends CombinedBlockCache { } /** - * * @param cacheKey The block's cache key. - * @param buf The block contents wrapped in a ByteBuffer. + * @param buf The block contents wrapped in a ByteBuffer. * @param inMemory Whether block should be treated as in-memory. This parameter is only useful for * the L1 lru cache. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/IndexOnlyLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/IndexOnlyLruBlockCache.java index 50b195dd8e9..2841d9af16a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/IndexOnlyLruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/IndexOnlyLruBlockCache.java @@ -21,10 +21,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; /** - * An on heap block cache implementation extended LruBlockCache and only cache index block. - * This block cache should be only used by - * {@link org.apache.hadoop.hbase.client.ClientSideRegionScanner} that normally considers to be - * used by client resides out of the region server, e.g. a container of a map reduce job. + * An on heap block cache implementation extended LruBlockCache and only cache index block. This + * block cache should be only used by {@link org.apache.hadoop.hbase.client.ClientSideRegionScanner} + * that normally considers to be used by client resides out of the region server, e.g. a container + * of a map reduce job. **/ @InterfaceAudience.Private public class IndexOnlyLruBlockCache extends LruBlockCache { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java index 12ae6a50a3c..040ca6b9164 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,43 +19,36 @@ package org.apache.hadoop.hbase.io.hfile; import java.io.DataOutput; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * A way to write "inline" blocks into an {@link HFile}. Inline blocks are - * interspersed with data blocks. For example, Bloom filter chunks and - * leaf-level blocks of a multi-level block index are stored as inline blocks. + * A way to write "inline" blocks into an {@link HFile}. Inline blocks are interspersed with data + * blocks. For example, Bloom filter chunks and leaf-level blocks of a multi-level block index are + * stored as inline blocks. */ @InterfaceAudience.Private public interface InlineBlockWriter { /** - * Determines whether there is a new block to be written out. - * - * @param closing - * whether the file is being closed, in which case we need to write - * out all available data and not wait to accumulate another block + * Determines whether there is a new block to be written out. n * whether the file is being + * closed, in which case we need to write out all available data and not wait to accumulate + * another block */ boolean shouldWriteBlock(boolean closing); /** - * Writes the block to the provided stream. Must not write any magic records. - * Called only if {@link #shouldWriteBlock(boolean)} returned true. - * - * @param out - * a stream (usually a compressing stream) to write the block to + * Writes the block to the provided stream. Must not write any magic records. Called only if + * {@link #shouldWriteBlock(boolean)} returned true. n * a stream (usually a compressing stream) + * to write the block to */ void writeInlineBlock(DataOutput out) throws IOException; /** - * Called after a block has been written, and its offset, raw size, and - * compressed size have been determined. Can be used to add an entry to a - * block index. If this type of inline blocks needs a block index, the inline - * block writer is responsible for maintaining it. - * - * @param offset the offset of the block in the stream - * @param onDiskSize the on-disk size of the block + * Called after a block has been written, and its offset, raw size, and compressed size have been + * determined. Can be used to add an entry to a block index. If this type of inline blocks needs a + * block index, the inline block writer is responsible for maintaining it. + * @param offset the offset of the block in the stream + * @param onDiskSize the on-disk size of the block * @param uncompressedSize the uncompressed size of the block */ void blockWritten(long offset, int onDiskSize, int uncompressedSize); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InvalidHFileException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InvalidHFileException.java index d0526656a3b..eed3a53acfe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InvalidHFileException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InvalidHFileException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.io.hfile; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java index 494a588aadb..87932074bff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,8 +25,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.PriorityQueue; -import java.util.SortedSet; -import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -49,14 +47,13 @@ import org.apache.hbase.thirdparty.com.google.common.base.Objects; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * This realisation improve performance of classical LRU - * cache up to 3 times via reduce GC job. + * This realisation improve performance of classical LRU cache up to 3 times via reduce GC + * job. *

          * The classical block cache implementation that is memory-aware using {@link HeapSize}, - * memory-bound using an - * LRU eviction algorithm, and concurrent: backed by a {@link ConcurrentHashMap} and with a - * non-blocking eviction thread giving constant-time {@link #cacheBlock} and {@link #getBlock} - * operations. + * memory-bound using an LRU eviction algorithm, and concurrent: backed by a + * {@link ConcurrentHashMap} and with a non-blocking eviction thread giving constant-time + * {@link #cacheBlock} and {@link #getBlock} operations. *

          * Contains three levels of block priority to allow for scan-resistance and in-memory families * {@link org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder#setInMemory(boolean)} (An @@ -91,57 +88,50 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFacto *

          * Adaptive LRU cache lets speed up performance while we are reading much more data than can fit * into BlockCache and it is the cause of a high rate of evictions. This in turn leads to heavy - * Garbage Collector works. So a lot of blocks put into BlockCache but never read, but spending - * a lot of CPU resources for cleaning. We could avoid this situation via parameters: + * Garbage Collector works. So a lot of blocks put into BlockCache but never read, but spending a + * lot of CPU resources for cleaning. We could avoid this situation via parameters: *

          - * hbase.lru.cache.heavy.eviction.count.limit - set how many times we have to run the - * eviction process that starts to avoid putting data to BlockCache. By default it is 0 and it - * meats the feature will start at the beginning. But if we have some times short reading the same - * data and some times long-term reading - we can divide it by this parameter. For example we know - * that our short reading used to be about 1 minutes, then we have to set the parameter about 10 - * and it will enable the feature only for long time massive reading (after ~100 seconds). So when - * we use short-reading and want all of them in the cache we will have it (except for eviction of - * course). When we use long-term heavy reading the feature will be enabled after some time and - * bring better performance. + * hbase.lru.cache.heavy.eviction.count.limit - set how many times we have to run the + * eviction process that starts to avoid putting data to BlockCache. By default it is 0 and it meats + * the feature will start at the beginning. But if we have some times short reading the same data + * and some times long-term reading - we can divide it by this parameter. For example we know that + * our short reading used to be about 1 minutes, then we have to set the parameter about 10 and it + * will enable the feature only for long time massive reading (after ~100 seconds). So when we use + * short-reading and want all of them in the cache we will have it (except for eviction of course). + * When we use long-term heavy reading the feature will be enabled after some time and bring better + * performance. *

          * hbase.lru.cache.heavy.eviction.mb.size.limit - set how many bytes in 10 seconds desirable * putting into BlockCache (and evicted from it). The feature will try to reach this value and - * maintain it. Don't try to set it too small because it leads to premature exit from this mode. - * For powerful CPUs (about 20-40 physical cores) it could be about 400-500 MB. Average system - * (~10 cores) 200-300 MB. Some weak systems (2-5 cores) may be good with 50-100 MB. - * How it works: we set the limit and after each ~10 second calculate how many bytes were freed. - * Overhead = Freed Bytes Sum (MB) * 100 / Limit (MB) - 100; - * For example we set the limit = 500 and were evicted 2000 MB. Overhead is: - * 2000 * 100 / 500 - 100 = 300% - * The feature is going to reduce a percent caching data blocks and fit evicted bytes closer to - * 100% (500 MB). Some kind of an auto-scaling. - * If freed bytes less then the limit we have got negative overhead. - * For example if were freed 200 MB: - * 200 * 100 / 500 - 100 = -60% - * The feature will increase the percent of caching blocks. - * That leads to fit evicted bytes closer to 100% (500 MB). - * The current situation we can find out in the log of RegionServer: - * BlockCache evicted (MB): 0, overhead (%): -100, heavy eviction counter: 0, current caching - * DataBlock (%): 100 - means no eviction, 100% blocks is caching - * BlockCache evicted (MB): 2000, overhead (%): 300, heavy eviction counter: 1, current caching - * DataBlock (%): 97 - means eviction begin, reduce of caching blocks by 3%. - * It help to tune your system and find out what value is better set. Don't try to reach 0% - * overhead, it is impossible. Quite good 50-100% overhead, - * it prevents premature exit from this mode. + * maintain it. Don't try to set it too small because it leads to premature exit from this mode. For + * powerful CPUs (about 20-40 physical cores) it could be about 400-500 MB. Average system (~10 + * cores) 200-300 MB. Some weak systems (2-5 cores) may be good with 50-100 MB. How it works: we set + * the limit and after each ~10 second calculate how many bytes were freed. Overhead = Freed Bytes + * Sum (MB) * 100 / Limit (MB) - 100; For example we set the limit = 500 and were evicted 2000 MB. + * Overhead is: 2000 * 100 / 500 - 100 = 300% The feature is going to reduce a percent caching data + * blocks and fit evicted bytes closer to 100% (500 MB). Some kind of an auto-scaling. If freed + * bytes less then the limit we have got negative overhead. For example if were freed 200 MB: 200 * + * 100 / 500 - 100 = -60% The feature will increase the percent of caching blocks. That leads to fit + * evicted bytes closer to 100% (500 MB). The current situation we can find out in the log of + * RegionServer: BlockCache evicted (MB): 0, overhead (%): -100, heavy eviction counter: 0, current + * caching DataBlock (%): 100 - means no eviction, 100% blocks is caching BlockCache evicted (MB): + * 2000, overhead (%): 300, heavy eviction counter: 1, current caching DataBlock (%): 97 - means + * eviction begin, reduce of caching blocks by 3%. It help to tune your system and find out what + * value is better set. Don't try to reach 0% overhead, it is impossible. Quite good 50-100% + * overhead, it prevents premature exit from this mode. *

          * hbase.lru.cache.heavy.eviction.overhead.coefficient - set how fast we want to get the * result. If we know that our reading is heavy for a long time, we don't want to wait and can * increase the coefficient and get good performance sooner. But if we aren't sure we can do it - * slowly and it could prevent premature exit from this mode. So, when the coefficient is higher - * we can get better performance when heavy reading is stable. But when reading is changing we - * can adjust to it and set the coefficient to lower value. - * For example, we set the coefficient = 0.01. It means the overhead (see above) will be - * multiplied by 0.01 and the result is the value of reducing percent caching blocks. For example, - * if the overhead = 300% and the coefficient = 0.01, - * then percent of caching blocks will reduce by 3%. - * Similar logic when overhead has got negative value (overshooting). Maybe it is just short-term - * fluctuation and we will try to stay in this mode. It helps avoid premature exit during - * short-term fluctuation. Backpressure has simple logic: more overshooting - more caching blocks. + * slowly and it could prevent premature exit from this mode. So, when the coefficient is higher we + * can get better performance when heavy reading is stable. But when reading is changing we can + * adjust to it and set the coefficient to lower value. For example, we set the coefficient = 0.01. + * It means the overhead (see above) will be multiplied by 0.01 and the result is the value of + * reducing percent caching blocks. For example, if the overhead = 300% and the coefficient = 0.01, + * then percent of caching blocks will reduce by 3%. Similar logic when overhead has got negative + * value (overshooting). Maybe it is just short-term fluctuation and we will try to stay in this + * mode. It helps avoid premature exit during short-term fluctuation. Backpressure has simple logic: + * more overshooting - more caching blocks. *

          * Find more information about improvement: https://issues.apache.org/jira/browse/HBASE-23887 */ @@ -175,14 +165,14 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { "hbase.lru.blockcache.memory.percentage"; /** - * Configuration key to force data-block always (except in-memory are too much) - * cached in memory for in-memory hfile, unlike inMemory, which is a column-family - * configuration, inMemoryForceMode is a cluster-wide configuration + * Configuration key to force data-block always (except in-memory are too much) cached in memory + * for in-memory hfile, unlike inMemory, which is a column-family configuration, inMemoryForceMode + * is a cluster-wide configuration */ private static final String LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME = "hbase.lru.rs.inmemoryforcemode"; - /* Default Configuration Parameters*/ + /* Default Configuration Parameters */ /* Backing Concurrent Map Configuration */ static final float DEFAULT_LOAD_FACTOR = 0.75f; @@ -206,29 +196,28 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { private static final String LRU_MAX_BLOCK_SIZE = "hbase.lru.max.block.size"; private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L; - private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT - = "hbase.lru.cache.heavy.eviction.count.limit"; + private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = + "hbase.lru.cache.heavy.eviction.count.limit"; // Default value actually equal to disable feature of increasing performance. // Because 2147483647 is about ~680 years (after that it will start to work) // We can set it to 0-10 and get the profit right now. // (see details https://issues.apache.org/jira/browse/HBASE-23887). private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = Integer.MAX_VALUE; - private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT - = "hbase.lru.cache.heavy.eviction.mb.size.limit"; + private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = + "hbase.lru.cache.heavy.eviction.mb.size.limit"; private static final long DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = 500; - private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT - = "hbase.lru.cache.heavy.eviction.overhead.coefficient"; + private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = + "hbase.lru.cache.heavy.eviction.overhead.coefficient"; private static final float DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = 0.01f; /** * Defined the cache map as {@link ConcurrentHashMap} here, because in - * {@link LruAdaptiveBlockCache#getBlock}, we need to guarantee the atomicity - * of map#computeIfPresent (key, func). Besides, the func method must execute exactly once only - * when the key is present and under the lock context, otherwise the reference count will be - * messed up. Notice that the - * {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that. + * {@link LruAdaptiveBlockCache#getBlock}, we need to guarantee the atomicity of + * map#computeIfPresent (key, func). Besides, the func method must execute exactly once only when + * the key is present and under the lock context, otherwise the reference count will be messed up. + * Notice that the {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that. */ private transient final ConcurrentHashMap map; @@ -298,8 +287,7 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { /** * Where to send victims (blocks evicted/missing from the cache). This is used only when we use an - * external cache as L2. - * Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache + * external cache as L2. Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache */ private transient BlockCache victimHandler = null; @@ -316,12 +304,10 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { private final float heavyEvictionOverheadCoefficient; /** - * Default constructor. Specify maximum size and expected average block - * size (approximation is fine). - * - *

          All other factors will be calculated based on defaults specified in - * this class. - * + * Default constructor. Specify maximum size and expected average block size (approximation is + * fine). + *

          + * All other factors will be calculated based on defaults specified in this class. * @param maxSize maximum size of cache, in bytes * @param blockSize approximate size of each block, in bytes */ @@ -330,37 +316,27 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { } /** - * Constructor used for testing. Allows disabling of the eviction thread. + * Constructor used for testing. Allows disabling of the eviction thread. */ public LruAdaptiveBlockCache(long maxSize, long blockSize, boolean evictionThread) { - this(maxSize, blockSize, evictionThread, - (int) Math.ceil(1.2 * maxSize / blockSize), - DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, - DEFAULT_MIN_FACTOR, DEFAULT_ACCEPTABLE_FACTOR, - DEFAULT_SINGLE_FACTOR, - DEFAULT_MULTI_FACTOR, - DEFAULT_MEMORY_FACTOR, - DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, - false, - DEFAULT_MAX_BLOCK_SIZE, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, + this(maxSize, blockSize, evictionThread, (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, DEFAULT_MIN_FACTOR, DEFAULT_ACCEPTABLE_FACTOR, + DEFAULT_SINGLE_FACTOR, DEFAULT_MULTI_FACTOR, DEFAULT_MEMORY_FACTOR, + DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, false, DEFAULT_MAX_BLOCK_SIZE, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT); } - public LruAdaptiveBlockCache(long maxSize, long blockSize, - boolean evictionThread, Configuration conf) { - this(maxSize, blockSize, evictionThread, - (int) Math.ceil(1.2 * maxSize / blockSize), - DEFAULT_LOAD_FACTOR, - DEFAULT_CONCURRENCY_LEVEL, + public LruAdaptiveBlockCache(long maxSize, long blockSize, boolean evictionThread, + Configuration conf) { + this(maxSize, blockSize, evictionThread, (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR), conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR), conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR), conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR), conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), - conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, - DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), + conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, @@ -376,38 +352,38 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { } /** - * Configurable constructor. Use this constructor if not using defaults. - * - * @param maxSize maximum size of this cache, in bytes - * @param blockSize expected average size of blocks, in bytes - * @param evictionThread whether to run evictions in a bg thread or not - * @param mapInitialSize initial size of backing ConcurrentHashMap - * @param mapLoadFactor initial load factor of backing ConcurrentHashMap - * @param mapConcurrencyLevel initial concurrency factor for backing CHM - * @param minFactor percentage of total size that eviction will evict until - * @param acceptableFactor percentage of total size that triggers eviction - * @param singleFactor percentage of total size for single-access blocks - * @param multiFactor percentage of total size for multiple-access blocks - * @param memoryFactor percentage of total size for in-memory blocks - * @param hardLimitFactor hard capacity limit - * @param forceInMemory in-memory hfile's data block has higher priority when evicting - * @param maxBlockSize maximum block size for caching - * @param heavyEvictionCountLimit when starts AdaptiveLRU algoritm work - * @param heavyEvictionMbSizeLimit how many bytes desirable putting into BlockCache - * @param heavyEvictionOverheadCoefficient how aggressive AdaptiveLRU will reduce GC + * Configurable constructor. Use this constructor if not using defaults. + * @param maxSize maximum size of this cache, in bytes + * @param blockSize expected average size of blocks, in bytes + * @param evictionThread whether to run evictions in a bg thread or not + * @param mapInitialSize initial size of backing ConcurrentHashMap + * @param mapLoadFactor initial load factor of backing ConcurrentHashMap + * @param mapConcurrencyLevel initial concurrency factor for backing CHM + * @param minFactor percentage of total size that eviction will evict until + * @param acceptableFactor percentage of total size that triggers eviction + * @param singleFactor percentage of total size for single-access blocks + * @param multiFactor percentage of total size for multiple-access blocks + * @param memoryFactor percentage of total size for in-memory blocks + * @param hardLimitFactor hard capacity limit + * @param forceInMemory in-memory hfile's data block has higher priority when + * evicting + * @param maxBlockSize maximum block size for caching + * @param heavyEvictionCountLimit when starts AdaptiveLRU algoritm work + * @param heavyEvictionMbSizeLimit how many bytes desirable putting into BlockCache + * @param heavyEvictionOverheadCoefficient how aggressive AdaptiveLRU will reduce GC */ public LruAdaptiveBlockCache(long maxSize, long blockSize, boolean evictionThread, - int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, - float minFactor, float acceptableFactor, float singleFactor, - float multiFactor, float memoryFactor, float hardLimitFactor, - boolean forceInMemory, long maxBlockSize, - int heavyEvictionCountLimit, long heavyEvictionMbSizeLimit, - float heavyEvictionOverheadCoefficient) { + int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, float minFactor, + float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor, + float hardLimitFactor, boolean forceInMemory, long maxBlockSize, int heavyEvictionCountLimit, + long heavyEvictionMbSizeLimit, float heavyEvictionOverheadCoefficient) { this.maxBlockSize = maxBlockSize; - if(singleFactor + multiFactor + memoryFactor != 1 || - singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { - throw new IllegalArgumentException("Single, multi, and memory factors " + - " should be non-negative and total 1.0"); + if ( + singleFactor + multiFactor + memoryFactor != 1 || singleFactor < 0 || multiFactor < 0 + || memoryFactor < 0 + ) { + throw new IllegalArgumentException( + "Single, multi, and memory factors " + " should be non-negative and total 1.0"); } if (minFactor >= acceptableFactor) { throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor"); @@ -447,7 +423,7 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { heavyEvictionOverheadCoefficient = Math.max(heavyEvictionOverheadCoefficient, 0.001f); this.heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient; - // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log + // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), STAT_THREAD_PERIOD, STAT_THREAD_PERIOD, TimeUnit.SECONDS); @@ -474,13 +450,13 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { } /** - * The block cached in LruAdaptiveBlockCache will always be an heap block: on the one side, - * the heap access will be more faster then off-heap, the small index block or meta block - * cached in CombinedBlockCache will benefit a lot. on other side, the LruAdaptiveBlockCache - * size is always calculated based on the total heap size, if caching an off-heap block in - * LruAdaptiveBlockCache, the heap size will be messed up. Here we will clone the block into an - * heap block if it's an off-heap block, otherwise just use the original block. The key point is - * maintain the refCnt of the block (HBASE-22127):
          + * The block cached in LruAdaptiveBlockCache will always be an heap block: on the one side, the + * heap access will be more faster then off-heap, the small index block or meta block cached in + * CombinedBlockCache will benefit a lot. on other side, the LruAdaptiveBlockCache size is always + * calculated based on the total heap size, if caching an off-heap block in LruAdaptiveBlockCache, + * the heap size will be messed up. Here we will clone the block into an heap block if it's an + * off-heap block, otherwise just use the original block. The key point is maintain the refCnt of + * the block (HBASE-22127):
          * 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle;
          * 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's * reservoir, if both RPC and LruAdaptiveBlockCache release the block, then it can be garbage @@ -507,7 +483,6 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { *

          * It is assumed this will NOT be called on an already cached block. In rare cases (HBASE-8547) * this can happen, for which we compare the buffer contents. - * * @param cacheKey block's cache key * @param buf block buffer * @param inMemory if block is in-memory @@ -532,18 +507,15 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { // big this can make the logs way too noisy. // So we log 2% if (stats.failInsert() % 50 == 0) { - LOG.warn("Trying to cache too large a block " - + cacheKey.getHfileName() + " @ " - + cacheKey.getOffset() - + " is " + buf.heapSize() - + " which is larger than " + maxBlockSize); + LOG.warn("Trying to cache too large a block " + cacheKey.getHfileName() + " @ " + + cacheKey.getOffset() + " is " + buf.heapSize() + " which is larger than " + + maxBlockSize); } return; } LruCachedBlock cb = map.get(cacheKey); - if (cb != null && !BlockCacheUtil.shouldReplaceExistingCacheBlock(this, - cacheKey, buf)) { + if (cb != null && !BlockCacheUtil.shouldReplaceExistingCacheBlock(this, cacheKey, buf)) { return; } long currentSize = size.get(); @@ -581,20 +553,20 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { } /** - * Sanity-checking for parity between actual block cache content and metrics. - * Intended only for use with TRACE level logging and -ea JVM. + * Sanity-checking for parity between actual block cache content and metrics. Intended only for + * use with TRACE level logging and -ea JVM. */ private static void assertCounterSanity(long mapSize, long counterVal) { if (counterVal < 0) { - LOG.trace("counterVal overflow. Assertions unreliable. counterVal=" + counterVal + - ", mapSize=" + mapSize); + LOG.trace("counterVal overflow. Assertions unreliable. counterVal=" + counterVal + + ", mapSize=" + mapSize); return; } if (mapSize < Integer.MAX_VALUE) { double pct_diff = Math.abs((((double) counterVal) / ((double) mapSize)) - 1.); if (pct_diff > 0.05) { - LOG.trace("delta between reported and actual size > 5%. counterVal=" + counterVal + - ", mapSize=" + mapSize); + LOG.trace("delta between reported and actual size > 5%. counterVal=" + counterVal + + ", mapSize=" + mapSize); } } } @@ -607,7 +579,7 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { * switch whether make the LRU on-heap or not, if so we may need copy the memory to on-heap, * otherwise the caching size is based on off-heap. * @param cacheKey block's cache key - * @param buf block buffer + * @param buf block buffer */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { @@ -615,9 +587,8 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { } /** - * Helper function that updates the local size counter and also updates any - * per-cf or per-blocktype metrics it can discern from given - * {@link LruCachedBlock} + * Helper function that updates the local size counter and also updates any per-cf or + * per-blocktype metrics it can discern from given {@link LruCachedBlock} */ private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { long heapsize = cb.heapSize(); @@ -633,14 +604,11 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { /** * Get the buffer of the block with the specified name. - * * @param cacheKey block's cache key * @param caching true if the caller caches blocks on cache misses - * @param repeat Whether this is a repeat lookup for the same block - * (used to avoid double counting cache misses when doing double-check - * locking) + * @param repeat Whether this is a repeat lookup for the same block (used to avoid + * double counting cache misses when doing double-check locking) * @param updateCacheMetrics Whether to update cache metrics or not - * * @return buffer of specified cache key, or null if not in cache */ @Override @@ -683,7 +651,6 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { /** * Whether the cache contains block with specified cacheKey - * * @return true if contains the block */ @Override @@ -698,13 +665,11 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { } /** - * Evicts all blocks for a specific HFile. This is an - * expensive operation implemented as a linear-time search through all blocks - * in the cache. Ideally this should be a search in a log-access-time map. - * + * Evicts all blocks for a specific HFile. This is an expensive operation implemented as a + * linear-time search through all blocks in the cache. Ideally this should be a search in a + * log-access-time map. *

          * This is used for evict-on-close to remove all blocks of a specific HFile. - * * @return the number of blocks evicted */ @Override @@ -718,11 +683,9 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { } /** - * Evict the block, and it will be cached by the victim handler if exists && - * block may be read again later - * - * @param evictedByEvictionProcess true if the given block is evicted by - * EvictionThread + * Evict the block, and it will be cached by the victim handler if exists && block may be + * read again later + * @param evictedByEvictionProcess true if the given block is evicted by EvictionThread * @return the heap size of evicted block */ protected long evictBlock(LruCachedBlock block, boolean evictedByEvictionProcess) { @@ -774,11 +737,8 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { } /** - * Eviction method. - * - * Evict items in order of use, allowing delete items - * which haven't been used for the longest amount of time. - * + * Eviction method. Evict items in order of use, allowing delete items which haven't been used for + * the longest amount of time. * @return how many bytes were freed */ long evict() { @@ -796,9 +756,8 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { bytesToFree = currentSize - minSize(); if (LOG.isTraceEnabled()) { - LOG.trace("Block cache LRU eviction started; Attempting to free " + - StringUtils.byteDesc(bytesToFree) + " of total=" + - StringUtils.byteDesc(currentSize)); + LOG.trace("Block cache LRU eviction started; Attempting to free " + + StringUtils.byteDesc(bytesToFree) + " of total=" + StringUtils.byteDesc(currentSize)); } if (bytesToFree <= 0) { @@ -806,12 +765,9 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { } // Instantiate priority buckets - BlockBucket bucketSingle - = new BlockBucket("single", bytesToFree, blockSize, singleSize()); - BlockBucket bucketMulti - = new BlockBucket("multi", bytesToFree, blockSize, multiSize()); - BlockBucket bucketMemory - = new BlockBucket("memory", bytesToFree, blockSize, memorySize()); + BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize()); + BlockBucket bucketMulti = new BlockBucket("multi", bytesToFree, blockSize, multiSize()); + BlockBucket bucketMemory = new BlockBucket("memory", bytesToFree, blockSize, memorySize()); // Scan entire map putting into appropriate buckets for (LruCachedBlock cachedBlock : map.values()) { @@ -841,13 +797,13 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { bytesFreed = bucketSingle.free(s); bytesFreed += bucketMulti.free(m); if (LOG.isTraceEnabled()) { - LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + - " from single and multi buckets"); + LOG.trace( + "freed " + StringUtils.byteDesc(bytesFreed) + " from single and multi buckets"); } bytesFreed += bucketMemory.free(bytesToFree - bytesFreed); if (LOG.isTraceEnabled()) { - LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + - " total from all three buckets "); + LOG.trace( + "freed " + StringUtils.byteDesc(bytesFreed) + " total from all three buckets "); } } else { // this means no need to evict block in memory bucket, @@ -894,12 +850,11 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { long single = bucketSingle.totalSize(); long multi = bucketMulti.totalSize(); long memory = bucketMemory.totalSize(); - LOG.trace("Block cache LRU eviction completed; " + - "freed=" + StringUtils.byteDesc(bytesFreed) + ", " + - "total=" + StringUtils.byteDesc(this.size.get()) + ", " + - "single=" + StringUtils.byteDesc(single) + ", " + - "multi=" + StringUtils.byteDesc(multi) + ", " + - "memory=" + StringUtils.byteDesc(memory)); + LOG.trace( + "Block cache LRU eviction completed; " + "freed=" + StringUtils.byteDesc(bytesFreed) + + ", " + "total=" + StringUtils.byteDesc(this.size.get()) + ", " + "single=" + + StringUtils.byteDesc(single) + ", " + "multi=" + StringUtils.byteDesc(multi) + ", " + + "memory=" + StringUtils.byteDesc(memory)); } } finally { stats.evict(); @@ -911,26 +866,21 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("blockCount", getBlockCount()) + return MoreObjects.toStringHelper(this).add("blockCount", getBlockCount()) .add("currentSize", StringUtils.byteDesc(getCurrentSize())) .add("freeSize", StringUtils.byteDesc(getFreeSize())) .add("maxSize", StringUtils.byteDesc(getMaxSize())) .add("heapSize", StringUtils.byteDesc(heapSize())) - .add("minSize", StringUtils.byteDesc(minSize())) - .add("minFactor", minFactor) - .add("multiSize", StringUtils.byteDesc(multiSize())) - .add("multiFactor", multiFactor) - .add("singleSize", StringUtils.byteDesc(singleSize())) - .add("singleFactor", singleFactor) + .add("minSize", StringUtils.byteDesc(minSize())).add("minFactor", minFactor) + .add("multiSize", StringUtils.byteDesc(multiSize())).add("multiFactor", multiFactor) + .add("singleSize", StringUtils.byteDesc(singleSize())).add("singleFactor", singleFactor) .toString(); } /** - * Used to group blocks into priority buckets. There will be a BlockBucket - * for each priority (single, multi, memory). Once bucketed, the eviction - * algorithm takes the appropriate number of elements out of each according - * to configuration parameters and their relatives sizes. + * Used to group blocks into priority buckets. There will be a BlockBucket for each priority + * (single, multi, memory). Once bucketed, the eviction algorithm takes the appropriate number of + * elements out of each according to configuration parameters and their relatives sizes. */ private class BlockBucket implements Comparable { @@ -987,7 +937,7 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { if (!(that instanceof BlockBucket)) { return false; } - return compareTo((BlockBucket)that) == 0; + return compareTo((BlockBucket) that) == 0; } @Override @@ -997,17 +947,14 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("name", name) + return MoreObjects.toStringHelper(this).add("name", name) .add("totalSize", StringUtils.byteDesc(totalSize)) - .add("bucketSize", StringUtils.byteDesc(bucketSize)) - .toString(); + .add("bucketSize", StringUtils.byteDesc(bucketSize)).toString(); } } /** * Get the maximum size of this cache. - * * @return max size in bytes */ @@ -1051,10 +998,9 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { } /* - * Eviction thread. Sits in waiting state until an eviction is triggered - * when the cache size grows above the acceptable level.

          - * - * Thread is triggered into action by {@link LruAdaptiveBlockCache#runEviction()} + * Eviction thread. Sits in waiting state until an eviction is triggered when the cache size grows + * above the acceptable level.

          Thread is triggered into action by {@link + * LruAdaptiveBlockCache#runEviction()} */ static class EvictionThread extends Thread { @@ -1079,7 +1025,7 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { while (this.go) { synchronized (this) { try { - this.wait(1000 * 10/*Don't wait for ever*/); + this.wait(1000 * 10/* Don't wait for ever */); } catch (InterruptedException e) { LOG.warn("Interrupted eviction thread ", e); Thread.currentThread().interrupt(); @@ -1089,18 +1035,15 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { if (cache == null) { break; } - freedSumMb += cache.evict()/1024/1024; + freedSumMb += cache.evict() / 1024 / 1024; /* - * Sometimes we are reading more data than can fit into BlockCache - * and it is the cause a high rate of evictions. - * This in turn leads to heavy Garbage Collector works. - * So a lot of blocks put into BlockCache but never read, - * but spending a lot of CPU resources. - * Here we will analyze how many bytes were freed and decide - * decide whether the time has come to reduce amount of caching blocks. - * It help avoid put too many blocks into BlockCache - * when evict() works very active and save CPU for other jobs. - * More delails: https://issues.apache.org/jira/browse/HBASE-23887 + * Sometimes we are reading more data than can fit into BlockCache and it is the cause a + * high rate of evictions. This in turn leads to heavy Garbage Collector works. So a lot of + * blocks put into BlockCache but never read, but spending a lot of CPU resources. Here we + * will analyze how many bytes were freed and decide decide whether the time has come to + * reduce amount of caching blocks. It help avoid put too many blocks into BlockCache when + * evict() works very active and save CPU for other jobs. More delails: + * https://issues.apache.org/jira/browse/HBASE-23887 */ // First of all we have to control how much time @@ -1165,11 +1108,10 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { cache.cacheDataBlockPercent = 100; } } - LOG.info("BlockCache evicted (MB): {}, overhead (%): {}, " + - "heavy eviction counter: {}, " + - "current caching DataBlock (%): {}", - freedSumMb, freedDataOverheadPercent, - heavyEvictionCount, cache.cacheDataBlockPercent); + LOG.info( + "BlockCache evicted (MB): {}, overhead (%): {}, " + "heavy eviction counter: {}, " + + "current caching DataBlock (%): {}", + freedSumMb, freedDataOverheadPercent, heavyEvictionCount, cache.cacheDataBlockPercent); freedSumMb = 0; startTime = stopTime; @@ -1177,8 +1119,8 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { } } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", - justification="This is what we want") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NN_NAKED_NOTIFY", + justification = "This is what we want") public void evict() { synchronized (this) { this.notifyAll(); @@ -1199,7 +1141,7 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { } /* - * Statistics thread. Periodically prints the cache statistics to the log. + * Statistics thread. Periodically prints the cache statistics to the log. */ static class StatisticsThread extends Thread { @@ -1221,28 +1163,27 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { // Log size long totalSize = heapSize(); long freeSize = maxSize - totalSize; - LruAdaptiveBlockCache.LOG.info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + - "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + - "max=" + StringUtils.byteDesc(this.maxSize) + ", " + - "blockCount=" + getBlockCount() + ", " + - "accesses=" + stats.getRequestCount() + ", " + - "hits=" + stats.getHitCount() + ", " + - "hitRatio=" + (stats.getHitCount() == 0 ? - "0" : (StringUtils.formatPercent(stats.getHitRatio(), 2)+ ", ")) + ", " + - "cachingAccesses=" + stats.getRequestCachingCount() + ", " + - "cachingHits=" + stats.getHitCachingCount() + ", " + - "cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ? - "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + - "evictions=" + stats.getEvictionCount() + ", " + - "evicted=" + stats.getEvictedCount() + ", " + - "evictedPerRun=" + stats.evictedPerEviction()); + LruAdaptiveBlockCache.LOG + .info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + "freeSize=" + + StringUtils.byteDesc(freeSize) + ", " + "max=" + StringUtils.byteDesc(this.maxSize) + ", " + + "blockCount=" + getBlockCount() + ", " + "accesses=" + stats.getRequestCount() + ", " + + "hits=" + stats.getHitCount() + ", " + "hitRatio=" + + (stats.getHitCount() == 0 + ? "0" + : (StringUtils.formatPercent(stats.getHitRatio(), 2) + ", ")) + + ", " + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + "cachingHits=" + + stats.getHitCachingCount() + ", " + "cachingHitsRatio=" + + (stats.getHitCachingCount() == 0 + ? "0," + : (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + + "evictions=" + stats.getEvictionCount() + ", " + "evicted=" + stats.getEvictedCount() + + ", " + "evictedPerRun=" + stats.evictedPerEviction()); } /** * Get counter statistics for this cache. - * - *

          Includes: total accesses, hits, misses, evicted blocks, and runs - * of the eviction processes. + *

          + * Includes: total accesses, hits, misses, evicted blocks, and runs of the eviction processes. */ @Override public CacheStats getStats() { @@ -1339,7 +1280,7 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { @Override public boolean equals(Object obj) { if (obj instanceof CachedBlock) { - CachedBlock cb = (CachedBlock)obj; + CachedBlock cb = (CachedBlock) obj; return compareTo(cb) == 0; } else { return false; @@ -1358,17 +1299,21 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { // Simple calculators of sizes given factors and maxSize long acceptableSize() { - return (long)Math.floor(this.maxSize * this.acceptableFactor); + return (long) Math.floor(this.maxSize * this.acceptableFactor); } + private long minSize() { - return (long)Math.floor(this.maxSize * this.minFactor); + return (long) Math.floor(this.maxSize * this.minFactor); } + private long singleSize() { - return (long)Math.floor(this.maxSize * this.singleFactor * this.minFactor); + return (long) Math.floor(this.maxSize * this.singleFactor * this.minFactor); } + private long multiSize() { - return (long)Math.floor(this.maxSize * this.multiFactor * this.minFactor); + return (long) Math.floor(this.maxSize * this.multiFactor * this.minFactor); } + private long memorySize() { return (long) Math.floor(this.maxSize * this.memoryFactor * this.minFactor); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index d7f700a4e26..74514aa4a55 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,46 +48,41 @@ import org.apache.hbase.thirdparty.com.google.common.base.Objects; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * A block cache implementation that is memory-aware using {@link HeapSize}, - * memory-bound using an LRU eviction algorithm, and concurrent: backed by a - * {@link ConcurrentHashMap} and with a non-blocking eviction thread giving - * constant-time {@link #cacheBlock} and {@link #getBlock} operations.

          - * + * A block cache implementation that is memory-aware using {@link HeapSize}, memory-bound using an + * LRU eviction algorithm, and concurrent: backed by a {@link ConcurrentHashMap} and with a + * non-blocking eviction thread giving constant-time {@link #cacheBlock} and {@link #getBlock} + * operations. + *

          * Contains three levels of block priority to allow for scan-resistance and in-memory families * {@link org.apache.hadoop.hbase.HColumnDescriptor#setInMemory(boolean)} (An in-memory column - * family is a column family that should be served from memory if possible): - * single-access, multiple-accesses, and in-memory priority. - * A block is added with an in-memory priority flag if + * family is a column family that should be served from memory if possible): single-access, + * multiple-accesses, and in-memory priority. A block is added with an in-memory priority flag if * {@link org.apache.hadoop.hbase.HColumnDescriptor#isInMemory()}, otherwise a block becomes a - * single access priority the first time it is read into this block cache. If a block is - * accessed again while in cache, it is marked as a multiple access priority block. This - * delineation of blocks is used to prevent scans from thrashing the cache adding a - * least-frequently-used element to the eviction algorithm.

          - * - * Each priority is given its own chunk of the total cache to ensure - * fairness during eviction. Each priority will retain close to its maximum - * size, however, if any priority is not using its entire chunk the others - * are able to grow beyond their chunk size.

          - * - * Instantiated at a minimum with the total size and average block size. - * All sizes are in bytes. The block size is not especially important as this - * cache is fully dynamic in its sizing of blocks. It is only used for - * pre-allocating data structures and in initial heap estimation of the map.

          - * - * The detailed constructor defines the sizes for the three priorities (they - * should total to the maximum size defined). It also sets the levels that - * trigger and control the eviction thread.

          - * - * The acceptable size is the cache size level which triggers the eviction - * process to start. It evicts enough blocks to get the size below the - * minimum size specified.

          - * - * Eviction happens in a separate thread and involves a single full-scan - * of the map. It determines how many bytes must be freed to reach the minimum - * size, and then while scanning determines the fewest least-recently-used - * blocks necessary from each of the three priorities (would be 3 times bytes - * to free). It then uses the priority chunk sizes to evict fairly according - * to the relative sizes and usage. + * single access priority the first time it is read into this block cache. If a block is accessed + * again while in cache, it is marked as a multiple access priority block. This delineation of + * blocks is used to prevent scans from thrashing the cache adding a least-frequently-used element + * to the eviction algorithm. + *

          + * Each priority is given its own chunk of the total cache to ensure fairness during eviction. Each + * priority will retain close to its maximum size, however, if any priority is not using its entire + * chunk the others are able to grow beyond their chunk size. + *

          + * Instantiated at a minimum with the total size and average block size. All sizes are in bytes. The + * block size is not especially important as this cache is fully dynamic in its sizing of blocks. It + * is only used for pre-allocating data structures and in initial heap estimation of the map. + *

          + * The detailed constructor defines the sizes for the three priorities (they should total to the + * maximum size defined). It also sets the levels that trigger and control the eviction + * thread. + *

          + * The acceptable size is the cache size level which triggers the eviction process to + * start. It evicts enough blocks to get the size below the minimum size specified. + *

          + * Eviction happens in a separate thread and involves a single full-scan of the map. It determines + * how many bytes must be freed to reach the minimum size, and then while scanning determines the + * fewest least-recently-used blocks necessary from each of the three priorities (would be 3 times + * bytes to free). It then uses the priority chunk sizes to evict fairly according to the relative + * sizes and usage. */ @InterfaceAudience.Private public class LruBlockCache implements FirstLevelBlockCache { @@ -104,29 +99,29 @@ public class LruBlockCache implements FirstLevelBlockCache { * Acceptable size of cache (no evictions if size < acceptable) */ private static final String LRU_ACCEPTABLE_FACTOR_CONFIG_NAME = - "hbase.lru.blockcache.acceptable.factor"; + "hbase.lru.blockcache.acceptable.factor"; /** * Hard capacity limit of cache, will reject any put if size > this * acceptable */ static final String LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME = - "hbase.lru.blockcache.hard.capacity.limit.factor"; + "hbase.lru.blockcache.hard.capacity.limit.factor"; private static final String LRU_SINGLE_PERCENTAGE_CONFIG_NAME = - "hbase.lru.blockcache.single.percentage"; + "hbase.lru.blockcache.single.percentage"; private static final String LRU_MULTI_PERCENTAGE_CONFIG_NAME = - "hbase.lru.blockcache.multi.percentage"; + "hbase.lru.blockcache.multi.percentage"; private static final String LRU_MEMORY_PERCENTAGE_CONFIG_NAME = - "hbase.lru.blockcache.memory.percentage"; + "hbase.lru.blockcache.memory.percentage"; /** - * Configuration key to force data-block always (except in-memory are too much) - * cached in memory for in-memory hfile, unlike inMemory, which is a column-family - * configuration, inMemoryForceMode is a cluster-wide configuration + * Configuration key to force data-block always (except in-memory are too much) cached in memory + * for in-memory hfile, unlike inMemory, which is a column-family configuration, inMemoryForceMode + * is a cluster-wide configuration */ private static final String LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME = - "hbase.lru.rs.inmemoryforcemode"; + "hbase.lru.rs.inmemoryforcemode"; - /* Default Configuration Parameters*/ + /* Default Configuration Parameters */ /* Backing Concurrent Map Configuration */ static final float DEFAULT_LOAD_FACTOR = 0.75f; @@ -225,18 +220,15 @@ public class LruBlockCache implements FirstLevelBlockCache { /** * Where to send victims (blocks evicted/missing from the cache). This is used only when we use an - * external cache as L2. - * Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache + * external cache as L2. Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache */ private transient BlockCache victimHandler = null; /** - * Default constructor. Specify maximum size and expected average block - * size (approximation is fine). - * - *

          All other factors will be calculated based on defaults specified in - * this class. - * + * Default constructor. Specify maximum size and expected average block size (approximation is + * fine). + *

          + * All other factors will be calculated based on defaults specified in this class. * @param maxSize maximum size of cache, in bytes * @param blockSize approximate size of each block, in bytes */ @@ -245,35 +237,26 @@ public class LruBlockCache implements FirstLevelBlockCache { } /** - * Constructor used for testing. Allows disabling of the eviction thread. + * Constructor used for testing. Allows disabling of the eviction thread. */ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread) { - this(maxSize, blockSize, evictionThread, - (int) Math.ceil(1.2 * maxSize / blockSize), - DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, - DEFAULT_MIN_FACTOR, DEFAULT_ACCEPTABLE_FACTOR, - DEFAULT_SINGLE_FACTOR, - DEFAULT_MULTI_FACTOR, - DEFAULT_MEMORY_FACTOR, - DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, - false, - DEFAULT_MAX_BLOCK_SIZE); + this(maxSize, blockSize, evictionThread, (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, DEFAULT_MIN_FACTOR, DEFAULT_ACCEPTABLE_FACTOR, + DEFAULT_SINGLE_FACTOR, DEFAULT_MULTI_FACTOR, DEFAULT_MEMORY_FACTOR, + DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, false, DEFAULT_MAX_BLOCK_SIZE); } public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { - this(maxSize, blockSize, evictionThread, - (int) Math.ceil(1.2 * maxSize / blockSize), - DEFAULT_LOAD_FACTOR, - DEFAULT_CONCURRENCY_LEVEL, - conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR), - conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR), - conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR), - conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR), - conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), - conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, - DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), - conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), - conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE)); + this(maxSize, blockSize, evictionThread, (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, + conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR), + conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR), + conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR), + conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR), + conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), + conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), + conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), + conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE)); } public LruBlockCache(long maxSize, long blockSize, Configuration conf) { @@ -281,8 +264,7 @@ public class LruBlockCache implements FirstLevelBlockCache { } /** - * Configurable constructor. Use this constructor if not using defaults. - * + * Configurable constructor. Use this constructor if not using defaults. * @param maxSize maximum size of this cache, in bytes * @param blockSize expected average size of blocks, in bytes * @param evictionThread whether to run evictions in a bg thread or not @@ -295,16 +277,17 @@ public class LruBlockCache implements FirstLevelBlockCache { * @param multiFactor percentage of total size for multiple-access blocks * @param memoryFactor percentage of total size for in-memory blocks */ - public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, - int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, - float minFactor, float acceptableFactor, float singleFactor, - float multiFactor, float memoryFactor, float hardLimitFactor, - boolean forceInMemory, long maxBlockSize) { + public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, int mapInitialSize, + float mapLoadFactor, int mapConcurrencyLevel, float minFactor, float acceptableFactor, + float singleFactor, float multiFactor, float memoryFactor, float hardLimitFactor, + boolean forceInMemory, long maxBlockSize) { this.maxBlockSize = maxBlockSize; - if(singleFactor + multiFactor + memoryFactor != 1 || - singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { - throw new IllegalArgumentException("Single, multi, and memory factors " + - " should be non-negative and total 1.0"); + if ( + singleFactor + multiFactor + memoryFactor != 1 || singleFactor < 0 || multiFactor < 0 + || memoryFactor < 0 + ) { + throw new IllegalArgumentException( + "Single, multi, and memory factors " + " should be non-negative and total 1.0"); } if (minFactor >= acceptableFactor) { throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor"); @@ -335,10 +318,10 @@ public class LruBlockCache implements FirstLevelBlockCache { } else { this.evictionThread = null; } - // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log + // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), STAT_THREAD_PERIOD, - STAT_THREAD_PERIOD, TimeUnit.SECONDS); + STAT_THREAD_PERIOD, TimeUnit.SECONDS); } @Override @@ -390,7 +373,6 @@ public class LruBlockCache implements FirstLevelBlockCache { *

          * It is assumed this will NOT be called on an already cached block. In rare cases (HBASE-8547) * this can happen, for which we compare the buffer contents. - * * @param cacheKey block's cache key * @param buf block buffer * @param inMemory if block is in-memory @@ -402,11 +384,9 @@ public class LruBlockCache implements FirstLevelBlockCache { // big this can make the logs way too noisy. // So we log 2% if (stats.failInsert() % 50 == 0) { - LOG.warn("Trying to cache too large a block " - + cacheKey.getHfileName() + " @ " - + cacheKey.getOffset() - + " is " + buf.heapSize() - + " which is larger than " + maxBlockSize); + LOG.warn("Trying to cache too large a block " + cacheKey.getHfileName() + " @ " + + cacheKey.getOffset() + " is " + buf.heapSize() + " which is larger than " + + maxBlockSize); } return; } @@ -450,20 +430,20 @@ public class LruBlockCache implements FirstLevelBlockCache { } /** - * Sanity-checking for parity between actual block cache content and metrics. - * Intended only for use with TRACE level logging and -ea JVM. + * Sanity-checking for parity between actual block cache content and metrics. Intended only for + * use with TRACE level logging and -ea JVM. */ private static void assertCounterSanity(long mapSize, long counterVal) { if (counterVal < 0) { - LOG.trace("counterVal overflow. Assertions unreliable. counterVal=" + counterVal + - ", mapSize=" + mapSize); + LOG.trace("counterVal overflow. Assertions unreliable. counterVal=" + counterVal + + ", mapSize=" + mapSize); return; } if (mapSize < Integer.MAX_VALUE) { double pct_diff = Math.abs((((double) counterVal) / ((double) mapSize)) - 1.); if (pct_diff > 0.05) { - LOG.trace("delta between reported and actual size > 5%. counterVal=" + counterVal + - ", mapSize=" + mapSize); + LOG.trace("delta between reported and actual size > 5%. counterVal=" + counterVal + + ", mapSize=" + mapSize); } } } @@ -476,7 +456,7 @@ public class LruBlockCache implements FirstLevelBlockCache { * switch whether make the LRU on-heap or not, if so we may need copy the memory to on-heap, * otherwise the caching size is based on off-heap. * @param cacheKey block's cache key - * @param buf block buffer + * @param buf block buffer */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { @@ -484,9 +464,8 @@ public class LruBlockCache implements FirstLevelBlockCache { } /** - * Helper function that updates the local size counter and also updates any - * per-cf or per-blocktype metrics it can discern from given - * {@link LruCachedBlock} + * Helper function that updates the local size counter and also updates any per-cf or + * per-blocktype metrics it can discern from given {@link LruCachedBlock} */ private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { long heapsize = cb.heapSize(); @@ -502,19 +481,16 @@ public class LruBlockCache implements FirstLevelBlockCache { /** * Get the buffer of the block with the specified name. - * * @param cacheKey block's cache key * @param caching true if the caller caches blocks on cache misses - * @param repeat Whether this is a repeat lookup for the same block - * (used to avoid double counting cache misses when doing double-check - * locking) + * @param repeat Whether this is a repeat lookup for the same block (used to avoid + * double counting cache misses when doing double-check locking) * @param updateCacheMetrics Whether to update cache metrics or not - * * @return buffer of specified cache key, or null if not in cache */ @Override public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, - boolean updateCacheMetrics) { + boolean updateCacheMetrics) { LruCachedBlock cb = map.computeIfPresent(cacheKey, (key, val) -> { // It will be referenced by RPC path, so increase here. NOTICE: Must do the retain inside // this block. because if retain outside the map#computeIfPresent, the evictBlock may remove @@ -552,7 +528,6 @@ public class LruBlockCache implements FirstLevelBlockCache { /** * Whether the cache contains block with specified cacheKey - * * @return true if contains the block */ @Override @@ -567,13 +542,11 @@ public class LruBlockCache implements FirstLevelBlockCache { } /** - * Evicts all blocks for a specific HFile. This is an - * expensive operation implemented as a linear-time search through all blocks - * in the cache. Ideally this should be a search in a log-access-time map. - * + * Evicts all blocks for a specific HFile. This is an expensive operation implemented as a + * linear-time search through all blocks in the cache. Ideally this should be a search in a + * log-access-time map. *

          * This is used for evict-on-close to remove all blocks of a specific HFile. - * * @return the number of blocks evicted */ @Override @@ -593,11 +566,9 @@ public class LruBlockCache implements FirstLevelBlockCache { } /** - * Evict the block, and it will be cached by the victim handler if exists && - * block may be read again later - * - * @param evictedByEvictionProcess true if the given block is evicted by - * EvictionThread + * Evict the block, and it will be cached by the victim handler if exists && block may be + * read again later + * @param evictedByEvictionProcess true if the given block is evicted by EvictionThread * @return the heap size of evicted block */ protected long evictBlock(LruCachedBlock block, boolean evictedByEvictionProcess) { @@ -664,9 +635,8 @@ public class LruBlockCache implements FirstLevelBlockCache { long bytesToFree = currentSize - minSize(); if (LOG.isTraceEnabled()) { - LOG.trace("Block cache LRU eviction started; Attempting to free " + - StringUtils.byteDesc(bytesToFree) + " of total=" + - StringUtils.byteDesc(currentSize)); + LOG.trace("Block cache LRU eviction started; Attempting to free " + + StringUtils.byteDesc(bytesToFree) + " of total=" + StringUtils.byteDesc(currentSize)); } if (bytesToFree <= 0) { @@ -706,13 +676,13 @@ public class LruBlockCache implements FirstLevelBlockCache { bytesFreed = bucketSingle.free(s); bytesFreed += bucketMulti.free(m); if (LOG.isTraceEnabled()) { - LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + - " from single and multi buckets"); + LOG.trace( + "freed " + StringUtils.byteDesc(bytesFreed) + " from single and multi buckets"); } bytesFreed += bucketMemory.free(bytesToFree - bytesFreed); if (LOG.isTraceEnabled()) { - LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + - " total from all three buckets "); + LOG.trace( + "freed " + StringUtils.byteDesc(bytesFreed) + " total from all three buckets "); } } else { // this means no need to evict block in memory bucket, @@ -749,7 +719,7 @@ public class LruBlockCache implements FirstLevelBlockCache { long overflow = bucket.overflow(); if (overflow > 0) { long bucketBytesToFree = - Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets); + Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets); bytesFreed += bucket.free(bucketBytesToFree); } remainingBuckets--; @@ -759,12 +729,11 @@ public class LruBlockCache implements FirstLevelBlockCache { long single = bucketSingle.totalSize(); long multi = bucketMulti.totalSize(); long memory = bucketMemory.totalSize(); - LOG.trace("Block cache LRU eviction completed; " + - "freed=" + StringUtils.byteDesc(bytesFreed) + ", " + - "total=" + StringUtils.byteDesc(this.size.get()) + ", " + - "single=" + StringUtils.byteDesc(single) + ", " + - "multi=" + StringUtils.byteDesc(multi) + ", " + - "memory=" + StringUtils.byteDesc(memory)); + LOG.trace( + "Block cache LRU eviction completed; " + "freed=" + StringUtils.byteDesc(bytesFreed) + + ", " + "total=" + StringUtils.byteDesc(this.size.get()) + ", " + "single=" + + StringUtils.byteDesc(single) + ", " + "multi=" + StringUtils.byteDesc(multi) + ", " + + "memory=" + StringUtils.byteDesc(memory)); } } finally { stats.evict(); @@ -775,26 +744,21 @@ public class LruBlockCache implements FirstLevelBlockCache { @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("blockCount", getBlockCount()) + return MoreObjects.toStringHelper(this).add("blockCount", getBlockCount()) .add("currentSize", StringUtils.byteDesc(getCurrentSize())) .add("freeSize", StringUtils.byteDesc(getFreeSize())) .add("maxSize", StringUtils.byteDesc(getMaxSize())) .add("heapSize", StringUtils.byteDesc(heapSize())) - .add("minSize", StringUtils.byteDesc(minSize())) - .add("minFactor", minFactor) - .add("multiSize", StringUtils.byteDesc(multiSize())) - .add("multiFactor", multiFactor) - .add("singleSize", StringUtils.byteDesc(singleSize())) - .add("singleFactor", singleFactor) + .add("minSize", StringUtils.byteDesc(minSize())).add("minFactor", minFactor) + .add("multiSize", StringUtils.byteDesc(multiSize())).add("multiFactor", multiFactor) + .add("singleSize", StringUtils.byteDesc(singleSize())).add("singleFactor", singleFactor) .toString(); } /** - * Used to group blocks into priority buckets. There will be a BlockBucket - * for each priority (single, multi, memory). Once bucketed, the eviction - * algorithm takes the appropriate number of elements out of each according - * to configuration parameters and their relatives sizes. + * Used to group blocks into priority buckets. There will be a BlockBucket for each priority + * (single, multi, memory). Once bucketed, the eviction algorithm takes the appropriate number of + * elements out of each according to configuration parameters and their relatives sizes. */ private class BlockBucket implements Comparable { @@ -851,7 +815,7 @@ public class LruBlockCache implements FirstLevelBlockCache { if (that == null || !(that instanceof BlockBucket)) { return false; } - return compareTo((BlockBucket)that) == 0; + return compareTo((BlockBucket) that) == 0; } @Override @@ -861,17 +825,14 @@ public class LruBlockCache implements FirstLevelBlockCache { @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("name", name) + return MoreObjects.toStringHelper(this).add("name", name) .add("totalSize", StringUtils.byteDesc(totalSize)) - .add("bucketSize", StringUtils.byteDesc(bucketSize)) - .toString(); + .add("bucketSize", StringUtils.byteDesc(bucketSize)).toString(); } } /** * Get the maximum size of this cache. - * * @return max size in bytes */ @@ -915,10 +876,9 @@ public class LruBlockCache implements FirstLevelBlockCache { } /* - * Eviction thread. Sits in waiting state until an eviction is triggered - * when the cache size grows above the acceptable level.

          - * - * Thread is triggered into action by {@link LruBlockCache#runEviction()} + * Eviction thread. Sits in waiting state until an eviction is triggered when the cache size grows + * above the acceptable level.

          Thread is triggered into action by {@link + * LruBlockCache#runEviction()} */ static class EvictionThread extends Thread { @@ -939,7 +899,7 @@ public class LruBlockCache implements FirstLevelBlockCache { while (this.go) { synchronized (this) { try { - this.wait(1000 * 10/*Don't wait for ever*/); + this.wait(1000 * 10/* Don't wait for ever */); } catch (InterruptedException e) { LOG.warn("Interrupted eviction thread ", e); Thread.currentThread().interrupt(); @@ -954,8 +914,8 @@ public class LruBlockCache implements FirstLevelBlockCache { } } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", - justification="This is what we want") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NN_NAKED_NOTIFY", + justification = "This is what we want") public void evict() { synchronized (this) { this.notifyAll(); @@ -980,7 +940,7 @@ public class LruBlockCache implements FirstLevelBlockCache { } /* - * Statistics thread. Periodically prints the cache statistics to the log. + * Statistics thread. Periodically prints the cache statistics to the log. */ static class StatisticsThread extends Thread { @@ -1002,28 +962,26 @@ public class LruBlockCache implements FirstLevelBlockCache { // Log size long totalSize = heapSize(); long freeSize = maxSize - totalSize; - LruBlockCache.LOG.info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + - "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + - "max=" + StringUtils.byteDesc(this.maxSize) + ", " + - "blockCount=" + getBlockCount() + ", " + - "accesses=" + stats.getRequestCount() + ", " + - "hits=" + stats.getHitCount() + ", " + - "hitRatio=" + (stats.getHitCount() == 0 ? - "0" : (StringUtils.formatPercent(stats.getHitRatio(), 2)+ ", ")) + ", " + - "cachingAccesses=" + stats.getRequestCachingCount() + ", " + - "cachingHits=" + stats.getHitCachingCount() + ", " + - "cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ? - "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + - "evictions=" + stats.getEvictionCount() + ", " + - "evicted=" + stats.getEvictedCount() + ", " + - "evictedPerRun=" + stats.evictedPerEviction()); + LruBlockCache.LOG.info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + "freeSize=" + + StringUtils.byteDesc(freeSize) + ", " + "max=" + StringUtils.byteDesc(this.maxSize) + ", " + + "blockCount=" + getBlockCount() + ", " + "accesses=" + stats.getRequestCount() + ", " + + "hits=" + stats.getHitCount() + ", " + "hitRatio=" + + (stats.getHitCount() == 0 + ? "0" + : (StringUtils.formatPercent(stats.getHitRatio(), 2) + ", ")) + + ", " + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + "cachingHits=" + + stats.getHitCachingCount() + ", " + "cachingHitsRatio=" + + (stats.getHitCachingCount() == 0 + ? "0," + : (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + + "evictions=" + stats.getEvictionCount() + ", " + "evicted=" + stats.getEvictedCount() + ", " + + "evictedPerRun=" + stats.evictedPerEviction()); } /** * Get counter statistics for this cache. - * - *

          Includes: total accesses, hits, misses, evicted blocks, and runs - * of the eviction processes. + *

          + * Includes: total accesses, hits, misses, evicted blocks, and runs of the eviction processes. */ @Override public CacheStats getStats() { @@ -1031,7 +989,7 @@ public class LruBlockCache implements FirstLevelBlockCache { } public final static long CACHE_FIXED_OVERHEAD = - ClassSize.estimateBase(LruBlockCache.class, false); + ClassSize.estimateBase(LruBlockCache.class, false); @Override public long heapSize() { @@ -1041,8 +999,8 @@ public class LruBlockCache implements FirstLevelBlockCache { private static long calculateOverhead(long maxSize, long blockSize, int concurrency) { // FindBugs ICAST_INTEGER_MULTIPLY_CAST_TO_LONG return CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP - + ((long) Math.ceil(maxSize * 1.2 / blockSize) * ClassSize.CONCURRENT_HASHMAP_ENTRY) - + ((long) concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); + + ((long) Math.ceil(maxSize * 1.2 / blockSize) * ClassSize.CONCURRENT_HASHMAP_ENTRY) + + ((long) concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); } @Override @@ -1120,7 +1078,7 @@ public class LruBlockCache implements FirstLevelBlockCache { @Override public boolean equals(Object obj) { if (obj instanceof CachedBlock) { - CachedBlock cb = (CachedBlock)obj; + CachedBlock cb = (CachedBlock) obj; return compareTo(cb) == 0; } else { return false; @@ -1139,17 +1097,21 @@ public class LruBlockCache implements FirstLevelBlockCache { // Simple calculators of sizes given factors and maxSize long acceptableSize() { - return (long)Math.floor(this.maxSize * this.acceptableFactor); + return (long) Math.floor(this.maxSize * this.acceptableFactor); } + private long minSize() { - return (long)Math.floor(this.maxSize * this.minFactor); + return (long) Math.floor(this.maxSize * this.minFactor); } + private long singleSize() { - return (long)Math.floor(this.maxSize * this.singleFactor * this.minFactor); + return (long) Math.floor(this.maxSize * this.singleFactor * this.minFactor); } + private long multiSize() { - return (long)Math.floor(this.maxSize * this.multiFactor * this.minFactor); + return (long) Math.floor(this.maxSize * this.multiFactor * this.minFactor); } + private long memorySize() { return (long) Math.floor(this.maxSize * this.memoryFactor * this.minFactor); } @@ -1187,7 +1149,6 @@ public class LruBlockCache implements FirstLevelBlockCache { /** * Used in testing. May be very inefficient. - * * @return the set of cached file names */ SortedSet getCachedFileNamesForTest() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java index 32a277d4626..f60e4300e4e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,25 +17,24 @@ */ package org.apache.hadoop.hbase.io.hfile; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.yetus.audience.InterfaceAudience; /** * Represents an entry in the {@link LruBlockCache}. - * - *

          Makes the block memory-aware with {@link HeapSize} and Comparable - * to sort by access time for the LRU. It also takes care of priority by - * either instantiating as in-memory or handling the transition from single - * to multiple access. + *

          + * Makes the block memory-aware with {@link HeapSize} and Comparable to sort by access time for the + * LRU. It also takes care of priority by either instantiating as in-memory or handling the + * transition from single to multiple access. */ @InterfaceAudience.Private public class LruCachedBlock implements HeapSize, Comparable { - public final static long PER_BLOCK_OVERHEAD = ClassSize.align( - ClassSize.OBJECT + (3 * ClassSize.REFERENCE) + (3 * Bytes.SIZEOF_LONG) + - ClassSize.STRING + ClassSize.BYTE_BUFFER); + public final static long PER_BLOCK_OVERHEAD = + ClassSize.align(ClassSize.OBJECT + (3 * ClassSize.REFERENCE) + (3 * Bytes.SIZEOF_LONG) + + ClassSize.STRING + ClassSize.BYTE_BUFFER); private final BlockCacheKey cacheKey; private final Cacheable buf; @@ -44,7 +42,7 @@ public class LruCachedBlock implements HeapSize, Comparable { private long size; private BlockPriority priority; /** - * Time this block was cached. Presumes we are created just before we are added to the cache. + * Time this block was cached. Presumes we are created just before we are added to the cache. */ private final long cachedTime = System.nanoTime(); @@ -52,8 +50,7 @@ public class LruCachedBlock implements HeapSize, Comparable { this(cacheKey, buf, accessTime, false); } - public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime, - boolean inMemory) { + public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime, boolean inMemory) { this.cacheKey = cacheKey; this.buf = buf; this.accessTime = accessTime; @@ -62,9 +59,9 @@ public class LruCachedBlock implements HeapSize, Comparable { // the base classes. We also include the base class // sizes in the PER_BLOCK_OVERHEAD variable rather than align()ing them with // their buffer lengths. This variable is used elsewhere in unit tests. - this.size = ClassSize.align(cacheKey.heapSize()) - + ClassSize.align(buf.heapSize()) + PER_BLOCK_OVERHEAD; - if(inMemory) { + this.size = + ClassSize.align(cacheKey.heapSize()) + ClassSize.align(buf.heapSize()) + PER_BLOCK_OVERHEAD; + if (inMemory) { this.priority = BlockPriority.MEMORY; } else { this.priority = BlockPriority.SINGLE; @@ -74,11 +71,11 @@ public class LruCachedBlock implements HeapSize, Comparable { /** * Block has been accessed. * @param accessTime Last access; this is actually a incremented sequence number rather than an - * actual time. + * actual time. */ public void access(long accessTime) { this.accessTime = accessTime; - if(this.priority == BlockPriority.SINGLE) { + if (this.priority == BlockPriority.SINGLE) { this.priority = BlockPriority.MULTI; } } @@ -104,7 +101,7 @@ public class LruCachedBlock implements HeapSize, Comparable { @Override public int hashCode() { - return (int)(accessTime ^ (accessTime >>> 32)); + return (int) (accessTime ^ (accessTime >>> 32)); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java index e68939191d0..8e45cb772d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,17 +24,15 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.MinMaxPriorityQueue; /** - * A memory-bound queue that will grow until an element brings - * total size >= maxSize. From then on, only entries that are sorted larger - * than the smallest current entry will be inserted/replaced. - * - *

          Use this when you want to find the largest elements (according to their - * ordering, not their heap size) that consume as close to the specified - * maxSize as possible. Default behavior is to grow just above rather than - * just below specified max. - * - *

          Object used in this queue must implement {@link HeapSize} as well as - * {@link Comparable}. + * A memory-bound queue that will grow until an element brings total size >= maxSize. From then + * on, only entries that are sorted larger than the smallest current entry will be + * inserted/replaced. + *

          + * Use this when you want to find the largest elements (according to their ordering, not their heap + * size) that consume as close to the specified maxSize as possible. Default behavior is to grow + * just above rather than just below specified max. + *

          + * Object used in this queue must implement {@link HeapSize} as well as {@link Comparable}. */ @InterfaceAudience.Private public class LruCachedBlockQueue implements HeapSize { @@ -46,7 +43,7 @@ public class LruCachedBlockQueue implements HeapSize { private long maxSize; /** - * @param maxSize the target size of elements in the queue + * @param maxSize the target size of elements in the queue * @param blockSize expected average size of blocks */ public LruCachedBlockQueue(long maxSize, long blockSize) { @@ -63,16 +60,16 @@ public class LruCachedBlockQueue implements HeapSize { /** * Attempt to add the specified cached block to this queue. - * - *

          If the queue is smaller than the max size, or if the specified element - * is ordered before the smallest element in the queue, the element will be - * added to the queue. Otherwise, there is no side effect of this call. + *

          + * If the queue is smaller than the max size, or if the specified element is ordered before the + * smallest element in the queue, the element will be added to the queue. Otherwise, there is no + * side effect of this call. * @param cb block to try to add to the queue */ @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value = "NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE", - justification = "head can not be null as heapSize is greater than maxSize," - + " which means we have something in the queue") + value = "NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE", + justification = "head can not be null as heapSize is greater than maxSize," + + " which means we have something in the queue") public void add(LruCachedBlock cb) { if (heapSize < maxSize) { queue.add(cb); @@ -93,16 +90,14 @@ public class LruCachedBlockQueue implements HeapSize { } /** - * @return The next element in this queue, or {@code null} if the queue is - * empty. + * @return The next element in this queue, or {@code null} if the queue is empty. */ public LruCachedBlock poll() { return queue.poll(); } /** - * @return The last element in this queue, or {@code null} if the queue is - * empty. + * @return The last element in this queue, or {@code null} if the queue is empty. */ public LruCachedBlock pollLast() { return queue.pollLast(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java index c519d9fd809..d64f0e4ce53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java @@ -1,27 +1,26 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; import java.io.DataOutputStream; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.EncodingState; import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext; @@ -29,6 +28,7 @@ import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; import org.apache.hadoop.hbase.io.encoding.NoneEncoder; +import org.apache.yetus.audience.InterfaceAudience; /** * Does not perform any kind of encoding/decoding. @@ -36,8 +36,7 @@ import org.apache.hadoop.hbase.io.encoding.NoneEncoder; @InterfaceAudience.Private public class NoOpDataBlockEncoder implements HFileDataBlockEncoder { - public static final NoOpDataBlockEncoder INSTANCE = - new NoOpDataBlockEncoder(); + public static final NoOpDataBlockEncoder INSTANCE = new NoOpDataBlockEncoder(); private static class NoneEncodingState extends EncodingState { NoneEncoder encoder = null; @@ -48,10 +47,9 @@ public class NoOpDataBlockEncoder implements HFileDataBlockEncoder { } @Override - public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, - DataOutputStream out) throws IOException { - NoneEncodingState state = (NoneEncodingState) encodingCtx - .getEncodingState(); + public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) + throws IOException { + NoneEncodingState state = (NoneEncodingState) encodingCtx.getEncodingState(); NoneEncoder encoder = state.encoder; int size = encoder.write(cell); state.postCellEncode(size, size); @@ -75,7 +73,7 @@ public class NoOpDataBlockEncoder implements HFileDataBlockEncoder { public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction) { return DataBlockEncoding.NONE; } - + @Override public String toString() { return getClass().getSimpleName(); @@ -83,23 +81,22 @@ public class NoOpDataBlockEncoder implements HFileDataBlockEncoder { @Override public HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf, - byte[] dummyHeader, HFileContext meta) { + byte[] dummyHeader, HFileContext meta) { return new HFileBlockDefaultEncodingContext(conf, null, dummyHeader, meta); } @Override public HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, - HFileContext meta) { + HFileContext meta) { return new HFileBlockDefaultDecodingContext(conf, meta); } @Override - public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, - DataOutputStream out) throws IOException { + public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, DataOutputStream out) + throws IOException { if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) { throw new IOException(this.getClass().getName() + " only accepts " - + HFileBlockDefaultEncodingContext.class.getName() + " as the " - + "encoding context."); + + HFileBlockDefaultEncodingContext.class.getName() + " as the " + "encoding context."); } HFileBlockDefaultEncodingContext encodingCtx = @@ -114,7 +111,7 @@ public class NoOpDataBlockEncoder implements HFileDataBlockEncoder { @Override public void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out, - byte[] uncompressedBytesWithHeader, BlockType blockType) throws IOException { + byte[] uncompressedBytesWithHeader, BlockType blockType) throws IOException { encodingCtx.postEncoding(BlockType.DATA); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java index 80de44915f2..8561fc1c893 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +42,7 @@ public final class PrefetchExecutor { private static final Logger LOG = LoggerFactory.getLogger(PrefetchExecutor.class); /** Futures for tracking block prefetch activity */ - private static final Map> prefetchFutures = new ConcurrentSkipListMap<>(); + private static final Map> prefetchFutures = new ConcurrentSkipListMap<>(); /** Executor pool shared among all HFiles for block prefetch */ private static final ScheduledExecutorService prefetchExecutorPool; /** Delay before beginning prefetch */ @@ -59,15 +58,14 @@ public final class PrefetchExecutor { prefetchDelayMillis = conf.getInt("hbase.hfile.prefetch.delay", 1000); prefetchDelayVariation = conf.getFloat("hbase.hfile.prefetch.delay.variation", 0.2f); int prefetchThreads = conf.getInt("hbase.hfile.thread.prefetch", 4); - prefetchExecutorPool = new ScheduledThreadPoolExecutor(prefetchThreads, - new ThreadFactory() { - @Override - public Thread newThread(Runnable r) { - String name = "hfile-prefetch-" + EnvironmentEdgeManager.currentTime(); - Thread t = new Thread(r, name); - t.setDaemon(true); - return t; - } + prefetchExecutorPool = new ScheduledThreadPoolExecutor(prefetchThreads, new ThreadFactory() { + @Override + public Thread newThread(Runnable r) { + String name = "hfile-prefetch-" + EnvironmentEdgeManager.currentTime(); + Thread t = new Thread(r, name); + t.setDaemon(true); + return t; + } }); } @@ -75,24 +73,17 @@ public final class PrefetchExecutor { // prefetching of file blocks but the Store level is where path convention // knowledge should be contained private static final Pattern prefetchPathExclude = - Pattern.compile( - "(" + - Path.SEPARATOR_CHAR + - HConstants.HBASE_TEMP_DIRECTORY.replace(".", "\\.") + - Path.SEPARATOR_CHAR + - ")|(" + - Path.SEPARATOR_CHAR + - HConstants.HREGION_COMPACTIONDIR_NAME.replace(".", "\\.") + - Path.SEPARATOR_CHAR + - ")"); + Pattern.compile("(" + Path.SEPARATOR_CHAR + HConstants.HBASE_TEMP_DIRECTORY.replace(".", "\\.") + + Path.SEPARATOR_CHAR + ")|(" + Path.SEPARATOR_CHAR + + HConstants.HREGION_COMPACTIONDIR_NAME.replace(".", "\\.") + Path.SEPARATOR_CHAR + ")"); public static void request(Path path, Runnable runnable) { if (!prefetchPathExclude.matcher(path.toString()).find()) { long delay; if (prefetchDelayMillis > 0) { - delay = (long)((prefetchDelayMillis * (1.0f - (prefetchDelayVariation/2))) + - (prefetchDelayMillis * (prefetchDelayVariation/2) * - ThreadLocalRandom.current().nextFloat())); + delay = (long) ((prefetchDelayMillis * (1.0f - (prefetchDelayVariation / 2))) + + (prefetchDelayMillis * (prefetchDelayVariation / 2) + * ThreadLocalRandom.current().nextFloat())); } else { delay = 0; } @@ -100,8 +91,8 @@ public final class PrefetchExecutor { if (LOG.isDebugEnabled()) { LOG.debug("Prefetch requested for " + path + ", delay=" + delay + " ms"); } - prefetchFutures.put(path, prefetchExecutorPool.schedule(runnable, delay, - TimeUnit.MILLISECONDS)); + prefetchFutures.put(path, + prefetchExecutorPool.schedule(runnable, delay, TimeUnit.MILLISECONDS)); } catch (RejectedExecutionException e) { prefetchFutures.remove(path); LOG.warn("Prefetch request rejected for " + path); @@ -136,5 +127,6 @@ public final class PrefetchExecutor { return true; } - private PrefetchExecutor() {} + private PrefetchExecutor() { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java index bd3d63dab0c..c652c4a18b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,6 +32,7 @@ public class ReaderContext { PREAD, STREAM } + private final Path filePath; private final FSDataInputStreamWrapper fsdis; private final long fileSize; @@ -41,7 +41,7 @@ public class ReaderContext { private final ReaderType type; public ReaderContext(Path filePath, FSDataInputStreamWrapper fsdis, long fileSize, - HFileSystem hfs, boolean primaryReplicaReader, ReaderType type) { + HFileSystem hfs, boolean primaryReplicaReader, ReaderType type) { this.filePath = filePath; this.fsdis = fsdis; this.fileSize = fileSize; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java index 1f903cfbea6..0ec3de58fff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +19,7 @@ package org.apache.hadoop.hbase.io.hfile; import static org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull; + import java.io.IOException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -40,7 +40,8 @@ public class ReaderContextBuilder { private boolean primaryReplicaReader = true; private ReaderType type = ReaderType.PREAD; - public ReaderContextBuilder() {} + public ReaderContextBuilder() { + } public ReaderContextBuilder withFilePath(Path filePath) { this.filePath = filePath; @@ -82,11 +83,9 @@ public class ReaderContextBuilder { } public ReaderContextBuilder withFileSystemAndPath(FileSystem fs, Path filePath) - throws IOException { - this.withFileSystem(fs) - .withFilePath(filePath) - .withFileSize(fs.getFileStatus(filePath).getLen()) - .withInputStreamWrapper(new FSDataInputStreamWrapper(fs, filePath)); + throws IOException { + this.withFileSystem(fs).withFilePath(filePath).withFileSize(fs.getFileStatus(filePath).getLen()) + .withInputStreamWrapper(new FSDataInputStreamWrapper(fs, filePath)); return this; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ResizableBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ResizableBlockCache.java index 76158b01069..f093073319e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ResizableBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ResizableBlockCache.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SharedMemHFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SharedMemHFileBlock.java index 0d2217e1579..d0d8fa7cfe0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SharedMemHFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SharedMemHFileBlock.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,11 +32,11 @@ import org.apache.yetus.audience.InterfaceAudience; public class SharedMemHFileBlock extends HFileBlock { SharedMemHFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, - int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuff buf, boolean fillHeader, - long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext, - ByteBuffAllocator alloc) { + int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuff buf, boolean fillHeader, + long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext, + ByteBuffAllocator alloc) { super(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, prevBlockOffset, buf, - fillHeader, offset, nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, alloc); + fillHeader, offset, nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, alloc); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java index e5e2e8fb632..7852f19bd63 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,38 +19,36 @@ package org.apache.hadoop.hbase.io.hfile; import static java.util.Objects.requireNonNull; +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.Policy.Eviction; +import com.github.benmanes.caffeine.cache.RemovalCause; +import com.github.benmanes.caffeine.cache.RemovalListener; import java.util.Comparator; import java.util.Iterator; import java.util.concurrent.Executor; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; - -import com.github.benmanes.caffeine.cache.Cache; -import com.github.benmanes.caffeine.cache.Caffeine; -import com.github.benmanes.caffeine.cache.Policy.Eviction; -import com.github.benmanes.caffeine.cache.RemovalCause; -import com.github.benmanes.caffeine.cache.RemovalListener; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.util.StringUtils; -import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * A block cache that is memory-aware using {@link HeapSize}, memory bounded using the W-TinyLFU * eviction algorithm, and concurrent. This implementation delegates to a Caffeine cache to provide * O(1) read and write operations. *

            - *
          • W-TinyLFU: http://arxiv.org/pdf/1512.00727.pdf
          • - *
          • Caffeine: https://github.com/ben-manes/caffeine
          • - *
          • Cache design: http://highscalability.com/blog/2016/1/25/design-of-a-modern-cache.html
          • + *
          • W-TinyLFU: http://arxiv.org/pdf/1512.00727.pdf
          • + *
          • Caffeine: https://github.com/ben-manes/caffeine
          • + *
          • Cache design: http://highscalability.com/blog/2016/1/25/design-of-a-modern-cache.html
          • *
          */ @InterfaceAudience.Private @@ -72,44 +70,39 @@ public final class TinyLfuBlockCache implements FirstLevelBlockCache { /** * Creates a block cache. - * * @param maximumSizeInBytes maximum size of this cache, in bytes - * @param avgBlockSize expected average size of blocks, in bytes - * @param executor the cache's executor - * @param conf additional configuration + * @param avgBlockSize expected average size of blocks, in bytes + * @param executor the cache's executor + * @param conf additional configuration */ - public TinyLfuBlockCache(long maximumSizeInBytes, long avgBlockSize, - Executor executor, Configuration conf) { - this(maximumSizeInBytes, avgBlockSize, - conf.getLong(MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), executor); + public TinyLfuBlockCache(long maximumSizeInBytes, long avgBlockSize, Executor executor, + Configuration conf) { + this(maximumSizeInBytes, avgBlockSize, conf.getLong(MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), + executor); } /** * Creates a block cache. - * * @param maximumSizeInBytes maximum size of this cache, in bytes - * @param avgBlockSize expected average size of blocks, in bytes - * @param maxBlockSize maximum size of a block, in bytes - * @param executor the cache's executor + * @param avgBlockSize expected average size of blocks, in bytes + * @param maxBlockSize maximum size of a block, in bytes + * @param executor the cache's executor */ - public TinyLfuBlockCache(long maximumSizeInBytes, - long avgBlockSize, long maxBlockSize, Executor executor) { - this.cache = Caffeine.newBuilder() - .executor(executor) - .maximumWeight(maximumSizeInBytes) - .removalListener(new EvictionListener()) - .weigher((BlockCacheKey key, Cacheable value) -> - (int) Math.min(value.heapSize(), Integer.MAX_VALUE)) - .initialCapacity((int) Math.ceil((1.2 * maximumSizeInBytes) / avgBlockSize)) - .build(); + public TinyLfuBlockCache(long maximumSizeInBytes, long avgBlockSize, long maxBlockSize, + Executor executor) { + this.cache = Caffeine.newBuilder().executor(executor).maximumWeight(maximumSizeInBytes) + .removalListener(new EvictionListener()) + .weigher( + (BlockCacheKey key, Cacheable value) -> (int) Math.min(value.heapSize(), Integer.MAX_VALUE)) + .initialCapacity((int) Math.ceil((1.2 * maximumSizeInBytes) / avgBlockSize)).build(); this.maxBlockSize = maxBlockSize; this.policy = cache.policy().eviction().get(); this.stats = new CacheStats(getClass().getSimpleName()); statsThreadPool = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder() - .setNameFormat("TinyLfuBlockCacheStatsExecutor").setDaemon(true).build()); - statsThreadPool.scheduleAtFixedRate(this::logStats, - STAT_THREAD_PERIOD_SECONDS, STAT_THREAD_PERIOD_SECONDS, TimeUnit.SECONDS); + .setNameFormat("TinyLfuBlockCacheStatsExecutor").setDaemon(true).build()); + statsThreadPool.scheduleAtFixedRate(this::logStats, STAT_THREAD_PERIOD_SECONDS, + STAT_THREAD_PERIOD_SECONDS, TimeUnit.SECONDS); } @Override @@ -156,8 +149,8 @@ public final class TinyLfuBlockCache implements FirstLevelBlockCache { } @Override - public Cacheable getBlock(BlockCacheKey cacheKey, - boolean caching, boolean repeat, boolean updateCacheMetrics) { + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, + boolean updateCacheMetrics) { Cacheable value = cache.asMap().computeIfPresent(cacheKey, (blockCacheKey, cacheable) -> { // It will be referenced by RPC path, so increase here. NOTICE: Must do the retain inside // this block. because if retain outside the map#computeIfPresent, the evictBlock may remove @@ -214,9 +207,8 @@ public final class TinyLfuBlockCache implements FirstLevelBlockCache { * the block (HBASE-22127):
          * 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle;
          * 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's - * reservoir, if both RPC and TinyLfuBlockCache release the block, then it can be - * garbage collected by JVM, so need a retain here. - * + * reservoir, if both RPC and TinyLfuBlockCache release the block, then it can be garbage + * collected by JVM, so need a retain here. * @param buf the original block * @return an block with an heap memory backend. */ @@ -276,38 +268,29 @@ public final class TinyLfuBlockCache implements FirstLevelBlockCache { public Iterator iterator() { long now = System.nanoTime(); return cache.asMap().entrySet().stream() - .map(entry -> (CachedBlock) new CachedBlockView(entry.getKey(), entry.getValue(), now)) - .iterator(); + .map(entry -> (CachedBlock) new CachedBlockView(entry.getKey(), entry.getValue(), now)) + .iterator(); } private void logStats() { - LOG.info( - "totalSize=" + StringUtils.byteDesc(heapSize()) + ", " + - "freeSize=" + StringUtils.byteDesc(getFreeSize()) + ", " + - "max=" + StringUtils.byteDesc(size()) + ", " + - "blockCount=" + getBlockCount() + ", " + - "accesses=" + stats.getRequestCount() + ", " + - "hits=" + stats.getHitCount() + ", " + - "hitRatio=" + (stats.getHitCount() == 0 ? - "0," : StringUtils.formatPercent(stats.getHitRatio(), 2) + ", ") + - "cachingAccesses=" + stats.getRequestCachingCount() + ", " + - "cachingHits=" + stats.getHitCachingCount() + ", " + - "cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ? - "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + - "evictions=" + stats.getEvictionCount() + ", " + - "evicted=" + stats.getEvictedCount()); + LOG.info("totalSize=" + StringUtils.byteDesc(heapSize()) + ", " + "freeSize=" + + StringUtils.byteDesc(getFreeSize()) + ", " + "max=" + StringUtils.byteDesc(size()) + ", " + + "blockCount=" + getBlockCount() + ", " + "accesses=" + stats.getRequestCount() + ", " + + "hits=" + stats.getHitCount() + ", " + "hitRatio=" + + (stats.getHitCount() == 0 ? "0," : StringUtils.formatPercent(stats.getHitRatio(), 2) + ", ") + + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + "cachingHits=" + + stats.getHitCachingCount() + ", " + "cachingHitsRatio=" + + (stats.getHitCachingCount() == 0 + ? "0," + : (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + + "evictions=" + stats.getEvictionCount() + ", " + "evicted=" + stats.getEvictedCount()); } @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("blockCount", getBlockCount()) - .add("currentSize", getCurrentSize()) - .add("freeSize", getFreeSize()) - .add("maxSize", size()) - .add("heapSize", heapSize()) - .add("victimCache", (victimCache != null)) - .toString(); + return MoreObjects.toStringHelper(this).add("blockCount", getBlockCount()) + .add("currentSize", getCurrentSize()).add("freeSize", getFreeSize()).add("maxSize", size()) + .add("heapSize", heapSize()).add("victimCache", (victimCache != null)).toString(); } /** A removal listener to asynchronously record evictions and populate the victim cache. */ @@ -335,10 +318,10 @@ public final class TinyLfuBlockCache implements FirstLevelBlockCache { } /** - * Records an eviction. The number of eviction operations and evicted blocks are identical, as - * an eviction is triggered immediately when the capacity has been exceeded. An eviction is - * performed asynchronously. See the library's documentation for details on write buffers, - * batching, and maintenance behavior. + * Records an eviction. The number of eviction operations and evicted blocks are identical, as an + * eviction is triggered immediately when the capacity has been exceeded. An eviction is performed + * asynchronously. See the library's documentation for details on write buffers, batching, and + * maintenance behavior. */ private void recordEviction() { // FIXME: Currently does not capture the insertion time @@ -347,9 +330,8 @@ public final class TinyLfuBlockCache implements FirstLevelBlockCache { } private static final class CachedBlockView implements CachedBlock { - private static final Comparator COMPARATOR = Comparator - .comparing(CachedBlock::getFilename) - .thenComparing(CachedBlock::getOffset) + private static final Comparator COMPARATOR = + Comparator.comparing(CachedBlock::getFilename).thenComparing(CachedBlock::getOffset) .thenComparing(CachedBlock::getCachedTime); private final BlockCacheKey key; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java index bbbce76cf8e..5d89f0cbdd3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile.bucket; import java.util.Arrays; @@ -114,8 +113,8 @@ public final class BucketAllocator { } /** - * Allocate a block in this bucket, return the offset representing the - * position in physical space + * Allocate a block in this bucket, return the offset representing the position in physical + * space * @return the offset in the IOEngine */ public long allocate() { @@ -130,18 +129,16 @@ public final class BucketAllocator { public void addAllocation(long offset) throws BucketAllocatorException { offset -= baseOffset; if (offset < 0 || offset % itemAllocationSize != 0) - throw new BucketAllocatorException( - "Attempt to add allocation for bad offset: " + offset + " base=" - + baseOffset + ", bucket size=" + itemAllocationSize); + throw new BucketAllocatorException("Attempt to add allocation for bad offset: " + offset + + " base=" + baseOffset + ", bucket size=" + itemAllocationSize); int idx = (int) (offset / itemAllocationSize); boolean matchFound = false; for (int i = 0; i < freeCount; ++i) { if (matchFound) freeList[i - 1] = freeList[i]; else if (freeList[i] == idx) matchFound = true; } - if (!matchFound) - throw new BucketAllocatorException("Couldn't find match for index " - + idx + " in free list"); + if (!matchFound) throw new BucketAllocatorException( + "Couldn't find match for index " + idx + " in free list"); ++usedCount; --freeCount; } @@ -260,10 +257,8 @@ public final class BucketAllocator { @Override public String toString() { - return MoreObjects.toStringHelper(this.getClass()) - .add("sizeIndex", sizeIndex) - .add("bucketSize", bucketSizes[sizeIndex]) - .toString(); + return MoreObjects.toStringHelper(this.getClass()).add("sizeIndex", sizeIndex) + .add("bucketSize", bucketSizes[sizeIndex]).toString(); } } @@ -272,20 +267,17 @@ public final class BucketAllocator { // The real block size in hfile maybe a little larger than the size we configured , // so we need add extra 1024 bytes for fit. // TODO Support the view of block size distribution statistics - private static final int DEFAULT_BUCKET_SIZES[] = { 4 * 1024 + 1024, 8 * 1024 + 1024, - 16 * 1024 + 1024, 32 * 1024 + 1024, 40 * 1024 + 1024, 48 * 1024 + 1024, - 56 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, 128 * 1024 + 1024, - 192 * 1024 + 1024, 256 * 1024 + 1024, 384 * 1024 + 1024, - 512 * 1024 + 1024 }; + private static final int DEFAULT_BUCKET_SIZES[] = + { 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024, 32 * 1024 + 1024, 40 * 1024 + 1024, + 48 * 1024 + 1024, 56 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, 128 * 1024 + 1024, + 192 * 1024 + 1024, 256 * 1024 + 1024, 384 * 1024 + 1024, 512 * 1024 + 1024 }; /** - * Round up the given block size to bucket size, and get the corresponding - * BucketSizeInfo + * Round up the given block size to bucket size, and get the corresponding BucketSizeInfo */ public BucketSizeInfo roundUpToBucketSizeInfo(int blockSize) { for (int i = 0; i < bucketSizes.length; ++i) - if (blockSize <= bucketSizes[i]) - return bucketSizeInfos[i]; + if (blockSize <= bucketSizes[i]) return bucketSizeInfos[i]; return null; } @@ -303,16 +295,15 @@ public final class BucketAllocator { private final long totalSize; private transient long usedSize = 0; - BucketAllocator(long availableSpace, int[] bucketSizes) - throws BucketAllocatorException { + BucketAllocator(long availableSpace, int[] bucketSizes) throws BucketAllocatorException { this.bucketSizes = bucketSizes == null ? DEFAULT_BUCKET_SIZES : bucketSizes; Arrays.sort(this.bucketSizes); this.bigItemSize = Ints.max(this.bucketSizes); this.bucketCapacity = FEWEST_ITEMS_IN_BUCKET * (long) bigItemSize; buckets = new Bucket[(int) (availableSpace / bucketCapacity)]; if (buckets.length < this.bucketSizes.length) - throw new BucketAllocatorException("Bucket allocator size too small (" + buckets.length + - "); must have room for at least " + this.bucketSizes.length + " buckets"); + throw new BucketAllocatorException("Bucket allocator size too small (" + buckets.length + + "); must have room for at least " + this.bucketSizes.length + " buckets"); bucketSizeInfos = new BucketSizeInfo[this.bucketSizes.length]; for (int i = 0; i < this.bucketSizes.length; ++i) { bucketSizeInfos[i] = new BucketSizeInfo(i); @@ -320,27 +311,26 @@ public final class BucketAllocator { for (int i = 0; i < buckets.length; ++i) { buckets[i] = new Bucket(bucketCapacity * i); bucketSizeInfos[i < this.bucketSizes.length ? i : this.bucketSizes.length - 1] - .instantiateBucket(buckets[i]); + .instantiateBucket(buckets[i]); } this.totalSize = ((long) buckets.length) * bucketCapacity; if (LOG.isInfoEnabled()) { - LOG.info("Cache totalSize=" + this.totalSize + ", buckets=" + this.buckets.length + - ", bucket capacity=" + this.bucketCapacity + - "=(" + FEWEST_ITEMS_IN_BUCKET + "*" + this.bigItemSize + ")=" + - "(FEWEST_ITEMS_IN_BUCKET*(largest configured bucketcache size))"); + LOG.info("Cache totalSize=" + this.totalSize + ", buckets=" + this.buckets.length + + ", bucket capacity=" + this.bucketCapacity + "=(" + FEWEST_ITEMS_IN_BUCKET + "*" + + this.bigItemSize + ")=" + + "(FEWEST_ITEMS_IN_BUCKET*(largest configured bucketcache size))"); } } /** * Rebuild the allocator's data structures from a persisted map. * @param availableSpace capacity of cache - * @param map A map stores the block key and BucketEntry(block's meta data - * like offset, length) - * @param realCacheSize cached data size statistics for bucket cache - * @throws BucketAllocatorException + * @param map A map stores the block key and BucketEntry(block's meta data like offset, + * length) + * @param realCacheSize cached data size statistics for bucket cache n */ BucketAllocator(long availableSpace, int[] bucketSizes, Map map, - LongAdder realCacheSize) throws BucketAllocatorException { + LongAdder realCacheSize) throws BucketAllocatorException { this(availableSpace, bucketSizes); // each bucket has an offset, sizeindex. probably the buckets are too big @@ -381,7 +371,7 @@ public final class BucketAllocator { } else { if (!b.isCompletelyFree()) { throw new BucketAllocatorException( - "Reconfiguring bucket " + bucketNo + " but it's already allocated; corrupt data"); + "Reconfiguring bucket " + bucketNo + " but it's already allocated; corrupt data"); } // Need to remove the bucket from whichever list it's currently in at // the moment... @@ -398,8 +388,8 @@ public final class BucketAllocator { } if (sizeNotMatchedCount > 0) { - LOG.warn("There are " + sizeNotMatchedCount + " blocks which can't be rebuilt because " + - "there is no matching bucket size for these blocks"); + LOG.warn("There are " + sizeNotMatchedCount + " blocks which can't be rebuilt because " + + "there is no matching bucket size for these blocks"); } if (insufficientCapacityCount > 0) { LOG.warn("There are " + insufficientCapacityCount + " blocks which can't be rebuilt - " @@ -433,25 +423,21 @@ public final class BucketAllocator { /** * Allocate a block with specified size. Return the offset - * @param blockSize size of block - * @throws BucketAllocatorException - * @throws CacheFullException - * @return the offset in the IOEngine + * @param blockSize size of block nn * @return the offset in the IOEngine */ - public synchronized long allocateBlock(int blockSize) throws CacheFullException, - BucketAllocatorException { + public synchronized long allocateBlock(int blockSize) + throws CacheFullException, BucketAllocatorException { assert blockSize > 0; BucketSizeInfo bsi = roundUpToBucketSizeInfo(blockSize); if (bsi == null) { - throw new BucketAllocatorException("Allocation too big size=" + blockSize + - "; adjust BucketCache sizes " + BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY + - " to accomodate if size seems reasonable and you want it cached."); + throw new BucketAllocatorException("Allocation too big size=" + blockSize + + "; adjust BucketCache sizes " + BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY + + " to accomodate if size seems reasonable and you want it cached."); } long offset = bsi.allocateBlock(); // Ask caller to free up space and try again! - if (offset < 0) - throw new CacheFullException(blockSize, bsi.sizeIndex()); + if (offset < 0) throw new CacheFullException(blockSize, bsi.sizeIndex()); usedSize += bucketSizes[bsi.sizeIndex()]; return offset; } @@ -539,7 +525,7 @@ public final class BucketAllocator { } } - public Bucket [] getBuckets() { + public Bucket[] getBuckets() { return this.buckets; } @@ -547,11 +533,11 @@ public final class BucketAllocator { IndexStatistics total = new IndexStatistics(); IndexStatistics[] stats = getIndexStatistics(total); LOG.info("Bucket allocator statistics follow:\n"); - LOG.info(" Free bytes=" + total.freeBytes() + "+; used bytes=" - + total.usedBytes() + "; total bytes=" + total.totalBytes()); + LOG.info(" Free bytes=" + total.freeBytes() + "+; used bytes=" + total.usedBytes() + + "; total bytes=" + total.totalBytes()); for (IndexStatistics s : stats) { - LOG.info(" Object size " + s.itemSize() + " used=" + s.usedCount() - + "; free=" + s.freeCount() + "; total=" + s.totalCount()); + LOG.info(" Object size " + s.itemSize() + " used=" + s.usedCount() + "; free=" + + s.freeCount() + "; total=" + s.totalCount()); } } @@ -585,33 +571,28 @@ public final class BucketAllocator { } /** - * Returns a set of indices of the buckets that are least filled - * excluding the offsets, we also the fully free buckets for the - * BucketSizes where everything is empty and they only have one + * Returns a set of indices of the buckets that are least filled excluding the offsets, we also + * the fully free buckets for the BucketSizes where everything is empty and they only have one * completely free bucket as a reserved - * - * @param excludedBuckets the buckets that need to be excluded due to - * currently being in used + * @param excludedBuckets the buckets that need to be excluded due to currently being in used * @param bucketCount max Number of buckets to return * @return set of bucket indices which could be used for eviction */ - public Set getLeastFilledBuckets(Set excludedBuckets, - int bucketCount) { - Queue queue = MinMaxPriorityQueue.orderedBy( - new Comparator() { - @Override - public int compare(Integer left, Integer right) { - // We will always get instantiated buckets - return Float.compare( - ((float) buckets[left].usedCount) / buckets[left].itemCount, - ((float) buckets[right].usedCount) / buckets[right].itemCount); - } - }).maximumSize(bucketCount).create(); + public Set getLeastFilledBuckets(Set excludedBuckets, int bucketCount) { + Queue queue = MinMaxPriorityQueue. orderedBy(new Comparator() { + @Override + public int compare(Integer left, Integer right) { + // We will always get instantiated buckets + return Float.compare(((float) buckets[left].usedCount) / buckets[left].itemCount, + ((float) buckets[right].usedCount) / buckets[right].itemCount); + } + }).maximumSize(bucketCount).create(); - for (int i = 0; i < buckets.length; i ++ ) { + for (int i = 0; i < buckets.length; i++) { if (!excludedBuckets.contains(i) && !buckets[i].isUninstantiated() && - // Avoid the buckets that are the only buckets for a sizeIndex - bucketSizeInfos[buckets[i].sizeIndex()].bucketList.size() != 1) { + // Avoid the buckets that are the only buckets for a sizeIndex + bucketSizeInfos[buckets[i].sizeIndex()].bucketList.size() != 1 + ) { queue.add(i); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocatorException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocatorException.java index 55172cf7fb9..c141edb947d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocatorException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocatorException.java @@ -1,25 +1,23 @@ -/** - * Copyright The Apache Software Foundation +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index f6a4ad128ad..44a45d0fda2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -12,7 +10,6 @@ * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -86,22 +83,21 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFacto import org.apache.hadoop.hbase.shaded.protobuf.generated.BucketCacheProtos; /** - * BucketCache uses {@link BucketAllocator} to allocate/free blocks, and uses - * BucketCache#ramCache and BucketCache#backingMap in order to - * determine if a given element is in the cache. The bucket cache can use - * off-heap memory {@link ByteBufferIOEngine} or mmap {@link ExclusiveMemoryMmapIOEngine} - * or pmem {@link SharedMemoryMmapIOEngine} or local files {@link FileIOEngine} to - * store/read the block data. - * - *

          Eviction is via a similar algorithm as used in + * BucketCache uses {@link BucketAllocator} to allocate/free blocks, and uses BucketCache#ramCache + * and BucketCache#backingMap in order to determine if a given element is in the cache. The bucket + * cache can use off-heap memory {@link ByteBufferIOEngine} or mmap + * {@link ExclusiveMemoryMmapIOEngine} or pmem {@link SharedMemoryMmapIOEngine} or local files + * {@link FileIOEngine} to store/read the block data. + *

          + * Eviction is via a similar algorithm as used in * {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache} - * - *

          BucketCache can be used as mainly a block cache (see - * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with - * a BlockCache to decrease CMS GC and heap fragmentation. - * - *

          It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store - * blocks) to enlarge cache space via a victim cache. + *

          + * BucketCache can be used as mainly a block cache (see + * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with a BlockCache to + * decrease CMS GC and heap fragmentation. + *

          + * It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store blocks) to + * enlarge cache space via a victim cache. */ @InterfaceAudience.Private public class BucketCache implements BlockCache, HeapSize { @@ -142,18 +138,17 @@ public class BucketCache implements BlockCache, HeapSize { transient ConcurrentHashMap backingMap; /** - * Flag if the cache is enabled or not... We shut it off if there are IO - * errors for some time, so that Bucket IO exceptions/errors don't bring down - * the HBase server. + * Flag if the cache is enabled or not... We shut it off if there are IO errors for some time, so + * that Bucket IO exceptions/errors don't bring down the HBase server. */ private volatile boolean cacheEnabled; /** - * A list of writer queues. We have a queue per {@link WriterThread} we have running. - * In other words, the work adding blocks to the BucketCache is divided up amongst the - * running WriterThreads. Its done by taking hash of the cache key modulo queue count. - * WriterThread when it runs takes whatever has been recently added and 'drains' the entries - * to the BucketCache. It then updates the ramCache and backingMap accordingly. + * A list of writer queues. We have a queue per {@link WriterThread} we have running. In other + * words, the work adding blocks to the BucketCache is divided up amongst the running + * WriterThreads. Its done by taking hash of the cache key modulo queue count. WriterThread when + * it runs takes whatever has been recently added and 'drains' the entries to the BucketCache. It + * then updates the ramCache and backingMap accordingly. */ transient final ArrayList> writerQueues = new ArrayList<>(); transient final WriterThread[] writerThreads; @@ -173,9 +168,9 @@ public class BucketCache implements BlockCache, HeapSize { private static final int DEFAULT_CACHE_WAIT_TIME = 50; /** - * Used in tests. If this flag is false and the cache speed is very fast, - * bucket cache will skip some blocks when caching. If the flag is true, we - * will wait until blocks are flushed to IOEngine. + * Used in tests. If this flag is false and the cache speed is very fast, bucket cache will skip + * some blocks when caching. If the flag is true, we will wait until blocks are flushed to + * IOEngine. */ boolean wait_when_cache = false; @@ -196,8 +191,8 @@ public class BucketCache implements BlockCache, HeapSize { private volatile long ioErrorStartTime = -1; /** - * A ReentrantReadWriteLock to lock on a particular block identified by offset. - * The purpose of this is to avoid freeing the block which is being read. + * A ReentrantReadWriteLock to lock on a particular block identified by offset. The purpose of + * this is to avoid freeing the block which is being read. *

          * Key set of offsets in BucketCache is limited so soft reference is the best choice here. */ @@ -225,7 +220,10 @@ public class BucketCache implements BlockCache, HeapSize { /** Minimum threshold of cache (when evicting, evict until size < min) */ private float minFactor; - /** Free this floating point factor of extra blocks when evicting. For example free the number of blocks requested * (1 + extraFreeFactor) */ + /** + * Free this floating point factor of extra blocks when evicting. For example free the number of + * blocks requested * (1 + extraFreeFactor) + */ private float extraFreeFactor; /** Single access bucket size */ @@ -242,9 +240,9 @@ public class BucketCache implements BlockCache, HeapSize { private static final String DEFAULT_FILE_VERIFY_ALGORITHM = "MD5"; /** - * Use {@link java.security.MessageDigest} class's encryption algorithms to check - * persistent file integrity, default algorithm is MD5 - * */ + * Use {@link java.security.MessageDigest} class's encryption algorithms to check persistent file + * integrity, default algorithm is MD5 + */ private String algorithm; /* Tracing failed Bucket Cache allocations. */ @@ -252,14 +250,14 @@ public class BucketCache implements BlockCache, HeapSize { private static final int ALLOCATION_FAIL_LOG_TIME_PERIOD = 60000; // Default 1 minute. public BucketCache(String ioEngineName, long capacity, int blockSize, int[] bucketSizes, - int writerThreadNum, int writerQLen, String persistencePath) throws IOException { + int writerThreadNum, int writerQLen, String persistencePath) throws IOException { this(ioEngineName, capacity, blockSize, bucketSizes, writerThreadNum, writerQLen, - persistencePath, DEFAULT_ERROR_TOLERATION_DURATION, HBaseConfiguration.create()); + persistencePath, DEFAULT_ERROR_TOLERATION_DURATION, HBaseConfiguration.create()); } public BucketCache(String ioEngineName, long capacity, int blockSize, int[] bucketSizes, - int writerThreadNum, int writerQLen, String persistencePath, int ioErrorsTolerationDuration, - Configuration conf) throws IOException { + int writerThreadNum, int writerQLen, String persistencePath, int ioErrorsTolerationDuration, + Configuration conf) throws IOException { this.algorithm = conf.get(FILE_VERIFY_ALGORITHM, DEFAULT_FILE_VERIFY_ALGORITHM); this.ioEngine = getIOEngineFromName(ioEngineName, capacity, persistencePath); this.writerThreads = new WriterThread[writerThreadNum]; @@ -278,9 +276,9 @@ public class BucketCache implements BlockCache, HeapSize { sanityCheckConfigs(); - LOG.info("Instantiating BucketCache with acceptableFactor: " + acceptableFactor + ", minFactor: " + minFactor + - ", extraFreeFactor: " + extraFreeFactor + ", singleFactor: " + singleFactor + ", multiFactor: " + multiFactor + - ", memoryFactor: " + memoryFactor); + LOG.info("Instantiating BucketCache with acceptableFactor: " + acceptableFactor + + ", minFactor: " + minFactor + ", extraFreeFactor: " + extraFreeFactor + ", singleFactor: " + + singleFactor + ", multiFactor: " + multiFactor + ", memoryFactor: " + memoryFactor); this.cacheCapacity = capacity; this.persistencePath = persistencePath; @@ -316,27 +314,34 @@ public class BucketCache implements BlockCache, HeapSize { startWriterThreads(); // Run the statistics thread periodically to print the cache statistics log - // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log + // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. - this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), - statThreadPeriod, statThreadPeriod, TimeUnit.SECONDS); - LOG.info("Started bucket cache; ioengine=" + ioEngineName + - ", capacity=" + StringUtils.byteDesc(capacity) + - ", blockSize=" + StringUtils.byteDesc(blockSize) + ", writerThreadNum=" + - writerThreadNum + ", writerQLen=" + writerQLen + ", persistencePath=" + - persistencePath + ", bucketAllocator=" + this.bucketAllocator.getClass().getName()); + this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), statThreadPeriod, + statThreadPeriod, TimeUnit.SECONDS); + LOG.info("Started bucket cache; ioengine=" + ioEngineName + ", capacity=" + + StringUtils.byteDesc(capacity) + ", blockSize=" + StringUtils.byteDesc(blockSize) + + ", writerThreadNum=" + writerThreadNum + ", writerQLen=" + writerQLen + ", persistencePath=" + + persistencePath + ", bucketAllocator=" + this.bucketAllocator.getClass().getName()); } private void sanityCheckConfigs() { - Preconditions.checkArgument(acceptableFactor <= 1 && acceptableFactor >= 0, ACCEPT_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); - Preconditions.checkArgument(minFactor <= 1 && minFactor >= 0, MIN_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); - Preconditions.checkArgument(minFactor <= acceptableFactor, MIN_FACTOR_CONFIG_NAME + " must be <= " + ACCEPT_FACTOR_CONFIG_NAME); - Preconditions.checkArgument(extraFreeFactor >= 0, EXTRA_FREE_FACTOR_CONFIG_NAME + " must be greater than 0.0"); - Preconditions.checkArgument(singleFactor <= 1 && singleFactor >= 0, SINGLE_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); - Preconditions.checkArgument(multiFactor <= 1 && multiFactor >= 0, MULTI_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); - Preconditions.checkArgument(memoryFactor <= 1 && memoryFactor >= 0, MEMORY_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); - Preconditions.checkArgument((singleFactor + multiFactor + memoryFactor) == 1, SINGLE_FACTOR_CONFIG_NAME + ", " + - MULTI_FACTOR_CONFIG_NAME + ", and " + MEMORY_FACTOR_CONFIG_NAME + " segments must add up to 1.0"); + Preconditions.checkArgument(acceptableFactor <= 1 && acceptableFactor >= 0, + ACCEPT_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); + Preconditions.checkArgument(minFactor <= 1 && minFactor >= 0, + MIN_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); + Preconditions.checkArgument(minFactor <= acceptableFactor, + MIN_FACTOR_CONFIG_NAME + " must be <= " + ACCEPT_FACTOR_CONFIG_NAME); + Preconditions.checkArgument(extraFreeFactor >= 0, + EXTRA_FREE_FACTOR_CONFIG_NAME + " must be greater than 0.0"); + Preconditions.checkArgument(singleFactor <= 1 && singleFactor >= 0, + SINGLE_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); + Preconditions.checkArgument(multiFactor <= 1 && multiFactor >= 0, + MULTI_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); + Preconditions.checkArgument(memoryFactor <= 1 && memoryFactor >= 0, + MEMORY_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); + Preconditions.checkArgument((singleFactor + multiFactor + memoryFactor) == 1, + SINGLE_FACTOR_CONFIG_NAME + ", " + MULTI_FACTOR_CONFIG_NAME + ", and " + + MEMORY_FACTOR_CONFIG_NAME + " segments must add up to 1.0"); } /** @@ -363,21 +368,16 @@ public class BucketCache implements BlockCache, HeapSize { } /** - * Get the IOEngine from the IO engine name - * @param ioEngineName - * @param capacity - * @param persistencePath - * @return the IOEngine - * @throws IOException + * Get the IOEngine from the IO engine name nnn * @return the IOEngine n */ private IOEngine getIOEngineFromName(String ioEngineName, long capacity, String persistencePath) - throws IOException { + throws IOException { if (ioEngineName.startsWith("file:") || ioEngineName.startsWith("files:")) { // In order to make the usage simple, we only need the prefix 'files:' in // document whether one or multiple file(s), but also support 'file:' for // the compatibility - String[] filePaths = ioEngineName.substring(ioEngineName.indexOf(":") + 1) - .split(FileIOEngine.FILE_DELIMITER); + String[] filePaths = + ioEngineName.substring(ioEngineName.indexOf(":") + 1).split(FileIOEngine.FILE_DELIMITER); return new FileIOEngine(capacity, persistencePath != null, filePaths); } else if (ioEngineName.startsWith("offheap")) { return new ByteBufferIOEngine(capacity); @@ -393,14 +393,14 @@ public class BucketCache implements BlockCache, HeapSize { return new SharedMemoryMmapIOEngine(ioEngineName.substring(5), capacity); } else { throw new IllegalArgumentException( - "Don't understand io engine name for cache- prefix with file:, files:, mmap: or offheap"); + "Don't understand io engine name for cache- prefix with file:, files:, mmap: or offheap"); } } /** * Cache the block with the specified name and buffer. * @param cacheKey block's cache key - * @param buf block buffer + * @param buf block buffer */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { @@ -409,9 +409,9 @@ public class BucketCache implements BlockCache, HeapSize { /** * Cache the block with the specified name and buffer. - * @param cacheKey block's cache key + * @param cacheKey block's cache key * @param cachedItem block buffer - * @param inMemory if block is in-memory + * @param inMemory if block is in-memory */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory) { @@ -420,13 +420,13 @@ public class BucketCache implements BlockCache, HeapSize { /** * Cache the block to ramCache - * @param cacheKey block's cache key + * @param cacheKey block's cache key * @param cachedItem block buffer - * @param inMemory if block is in-memory - * @param wait if true, blocking wait when queue is full + * @param inMemory if block is in-memory + * @param wait if true, blocking wait when queue is full */ public void cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory, - boolean wait) { + boolean wait) { if (cacheEnabled) { if (backingMap.containsKey(cacheKey) || ramCache.containsKey(cacheKey)) { if (shouldReplaceExistingCacheBlock(cacheKey, cachedItem)) { @@ -448,14 +448,14 @@ public class BucketCache implements BlockCache, HeapSize { } protected void cacheBlockWithWaitInternal(BlockCacheKey cacheKey, Cacheable cachedItem, - boolean inMemory, boolean wait) { + boolean inMemory, boolean wait) { if (!cacheEnabled) { return; } LOG.trace("Caching key={}, item={}", cacheKey, cachedItem); // Stuff the entry into the RAM cache so it can get drained to the persistent store RAMQueueEntry re = - new RAMQueueEntry(cacheKey, cachedItem, accessCount.incrementAndGet(), inMemory); + new RAMQueueEntry(cacheKey, cachedItem, accessCount.incrementAndGet(), inMemory); /** * Don't use ramCache.put(cacheKey, re) here. because there may be a existing entry with same * key in ramCache, the heap size of bucket cache need to update if replacing entry from @@ -489,15 +489,15 @@ public class BucketCache implements BlockCache, HeapSize { /** * Get the buffer of the block with the specified key. - * @param key block's cache key - * @param caching true if the caller caches blocks on cache misses - * @param repeat Whether this is a repeat lookup for the same block + * @param key block's cache key + * @param caching true if the caller caches blocks on cache misses + * @param repeat Whether this is a repeat lookup for the same block * @param updateCacheMetrics Whether we should update cache metrics or not * @return buffer of specified cache key, or null if not in cache */ @Override public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat, - boolean updateCacheMetrics) { + boolean updateCacheMetrics) { if (!cacheEnabled) { return null; } @@ -599,7 +599,7 @@ public class BucketCache implements BlockCache, HeapSize { * {@link BucketCache#ramCache}.
          * NOTE:When Evict from {@link BucketCache#backingMap},only the matched {@link BlockCacheKey} and * {@link BucketEntry} could be removed. - * @param cacheKey {@link BlockCacheKey} to evict. + * @param cacheKey {@link BlockCacheKey} to evict. * @param bucketEntry {@link BucketEntry} matched {@link BlockCacheKey} to evict. * @return true to indicate whether we've evicted successfully or not. */ @@ -666,7 +666,7 @@ public class BucketCache implements BlockCache, HeapSize { * NOTE:When evict from {@link BucketCache#backingMap},only the matched {@link BlockCacheKey} and * {@link BucketEntry} could be removed. * @param blockCacheKey {@link BlockCacheKey} to evict. - * @param bucketEntry {@link BucketEntry} matched {@link BlockCacheKey} to evict. + * @param bucketEntry {@link BucketEntry} matched {@link BlockCacheKey} to evict. * @return true to indicate whether we've evicted successfully or not. */ boolean evictBucketEntryIfNoRpcReferenced(BlockCacheKey blockCacheKey, BucketEntry bucketEntry) { @@ -686,7 +686,7 @@ public class BucketCache implements BlockCache, HeapSize { } /* - * Statistics thread. Periodically output cache statistics to the log. + * Statistics thread. Periodically output cache statistics to the log. */ private static class StatisticsThread extends Thread { private final BucketCache bucketCache; @@ -708,25 +708,24 @@ public class BucketCache implements BlockCache, HeapSize { long usedSize = bucketAllocator.getUsedSize(); long freeSize = totalSize - usedSize; long cacheSize = getRealCacheSize(); - LOG.info("failedBlockAdditions=" + cacheStats.getFailedInserts() + ", " + - "totalSize=" + StringUtils.byteDesc(totalSize) + ", " + - "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + - "usedSize=" + StringUtils.byteDesc(usedSize) +", " + - "cacheSize=" + StringUtils.byteDesc(cacheSize) +", " + - "accesses=" + cacheStats.getRequestCount() + ", " + - "hits=" + cacheStats.getHitCount() + ", " + - "IOhitsPerSecond=" + cacheStats.getIOHitsPerSecond() + ", " + - "IOTimePerHit=" + String.format("%.2f", cacheStats.getIOTimePerHit())+ ", " + - "hitRatio=" + (cacheStats.getHitCount() == 0 ? "0," : - (StringUtils.formatPercent(cacheStats.getHitRatio(), 2)+ ", ")) + - "cachingAccesses=" + cacheStats.getRequestCachingCount() + ", " + - "cachingHits=" + cacheStats.getHitCachingCount() + ", " + - "cachingHitsRatio=" +(cacheStats.getHitCachingCount() == 0 ? "0," : - (StringUtils.formatPercent(cacheStats.getHitCachingRatio(), 2)+ ", ")) + - "evictions=" + cacheStats.getEvictionCount() + ", " + - "evicted=" + cacheStats.getEvictedCount() + ", " + - "evictedPerRun=" + cacheStats.evictedPerEviction() + ", " + - "allocationFailCount=" + cacheStats.getAllocationFailCount()); + LOG.info("failedBlockAdditions=" + cacheStats.getFailedInserts() + ", " + "totalSize=" + + StringUtils.byteDesc(totalSize) + ", " + "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + + "usedSize=" + StringUtils.byteDesc(usedSize) + ", " + "cacheSize=" + + StringUtils.byteDesc(cacheSize) + ", " + "accesses=" + cacheStats.getRequestCount() + ", " + + "hits=" + cacheStats.getHitCount() + ", " + "IOhitsPerSecond=" + + cacheStats.getIOHitsPerSecond() + ", " + "IOTimePerHit=" + + String.format("%.2f", cacheStats.getIOTimePerHit()) + ", " + "hitRatio=" + + (cacheStats.getHitCount() == 0 + ? "0," + : (StringUtils.formatPercent(cacheStats.getHitRatio(), 2) + ", ")) + + "cachingAccesses=" + cacheStats.getRequestCachingCount() + ", " + "cachingHits=" + + cacheStats.getHitCachingCount() + ", " + "cachingHitsRatio=" + + (cacheStats.getHitCachingCount() == 0 + ? "0," + : (StringUtils.formatPercent(cacheStats.getHitCachingRatio(), 2) + ", ")) + + "evictions=" + cacheStats.getEvictionCount() + ", " + "evicted=" + + cacheStats.getEvictedCount() + ", " + "evictedPerRun=" + cacheStats.evictedPerEviction() + + ", " + "allocationFailCount=" + cacheStats.getAllocationFailCount()); cacheStats.reset(); } @@ -759,12 +758,10 @@ public class BucketCache implements BlockCache, HeapSize { } /** - * This method will find the buckets that are minimally occupied - * and are not reference counted and will free them completely - * without any constraint on the access times of the elements, - * and as a process will completely free at most the number of buckets - * passed, sometimes it might not due to changing refCounts - * + * This method will find the buckets that are minimally occupied and are not reference counted and + * will free them completely without any constraint on the access times of the elements, and as a + * process will completely free at most the number of buckets passed, sometimes it might not due + * to changing refCounts * @param completelyFreeBucketsNeeded number of buckets to free **/ private void freeEntireBuckets(int completelyFreeBucketsNeeded) { @@ -778,7 +775,7 @@ public class BucketCache implements BlockCache, HeapSize { } }); Set candidateBuckets = - bucketAllocator.getLeastFilledBuckets(inUseBuckets, completelyFreeBucketsNeeded); + bucketAllocator.getLeastFilledBuckets(inUseBuckets, completelyFreeBucketsNeeded); for (Map.Entry entry : backingMap.entrySet()) { if (candidateBuckets.contains(bucketAllocator.getBucketIndex(entry.getValue().offset()))) { evictBucketEntryIfNoRpcReferenced(entry.getKey(), entry.getValue()); @@ -788,9 +785,9 @@ public class BucketCache implements BlockCache, HeapSize { } /** - * Free the space if the used size reaches acceptableSize() or one size block - * couldn't be allocated. When freeing the space, we use the LRU algorithm and - * ensure there must be some blocks evicted + * Free the space if the used size reaches acceptableSize() or one size block couldn't be + * allocated. When freeing the space, we use the LRU algorithm and ensure there must be some + * blocks evicted * @param why Why we are being called */ private void freeSpace(final String why) { @@ -802,7 +799,7 @@ public class BucketCache implements BlockCache, HeapSize { freeInProgress = true; long bytesToFreeWithoutExtra = 0; // Calculate free byte for each bucketSizeinfo - StringBuilder msgBuffer = LOG.isDebugEnabled()? new StringBuilder(): null; + StringBuilder msgBuffer = LOG.isDebugEnabled() ? new StringBuilder() : null; BucketAllocator.IndexStatistics[] stats = bucketAllocator.getIndexStatistics(); long[] bytesToFreeForBucket = new long[stats.length]; for (int i = 0; i < stats.length; i++) { @@ -828,21 +825,22 @@ public class BucketCache implements BlockCache, HeapSize { long currentSize = bucketAllocator.getUsedSize(); long totalSize = bucketAllocator.getTotalSize(); if (LOG.isDebugEnabled() && msgBuffer != null) { - LOG.debug("Free started because \"" + why + "\"; " + msgBuffer.toString() + - " of current used=" + StringUtils.byteDesc(currentSize) + ", actual cacheSize=" + - StringUtils.byteDesc(realCacheSize.sum()) + ", total=" + StringUtils.byteDesc(totalSize)); + LOG.debug("Free started because \"" + why + "\"; " + msgBuffer.toString() + + " of current used=" + StringUtils.byteDesc(currentSize) + ", actual cacheSize=" + + StringUtils.byteDesc(realCacheSize.sum()) + ", total=" + + StringUtils.byteDesc(totalSize)); } - long bytesToFreeWithExtra = (long) Math.floor(bytesToFreeWithoutExtra - * (1 + extraFreeFactor)); + long bytesToFreeWithExtra = + (long) Math.floor(bytesToFreeWithoutExtra * (1 + extraFreeFactor)); // Instantiate priority buckets - BucketEntryGroup bucketSingle = new BucketEntryGroup(bytesToFreeWithExtra, - blockSize, getPartitionSize(singleFactor)); - BucketEntryGroup bucketMulti = new BucketEntryGroup(bytesToFreeWithExtra, - blockSize, getPartitionSize(multiFactor)); - BucketEntryGroup bucketMemory = new BucketEntryGroup(bytesToFreeWithExtra, - blockSize, getPartitionSize(memoryFactor)); + BucketEntryGroup bucketSingle = + new BucketEntryGroup(bytesToFreeWithExtra, blockSize, getPartitionSize(singleFactor)); + BucketEntryGroup bucketMulti = + new BucketEntryGroup(bytesToFreeWithExtra, blockSize, getPartitionSize(multiFactor)); + BucketEntryGroup bucketMemory = + new BucketEntryGroup(bytesToFreeWithExtra, blockSize, getPartitionSize(memoryFactor)); // Scan entire map putting bucket entry into appropriate bucket entry // group @@ -863,8 +861,8 @@ public class BucketCache implements BlockCache, HeapSize { } } - PriorityQueue bucketQueue = new PriorityQueue<>(3, - Comparator.comparingLong(BucketEntryGroup::overflow)); + PriorityQueue bucketQueue = + new PriorityQueue<>(3, Comparator.comparingLong(BucketEntryGroup::overflow)); bucketQueue.add(bucketSingle); bucketQueue.add(bucketMulti); @@ -877,8 +875,8 @@ public class BucketCache implements BlockCache, HeapSize { while ((bucketGroup = bucketQueue.poll()) != null) { long overflow = bucketGroup.overflow(); if (overflow > 0) { - long bucketBytesToFree = Math.min(overflow, - (bytesToFreeWithoutExtra - bytesFreed) / remainingBuckets); + long bucketBytesToFree = + Math.min(overflow, (bytesToFreeWithoutExtra - bytesFreed) / remainingBuckets); bytesFreed += bucketGroup.free(bucketBytesToFree); } remainingBuckets--; @@ -905,8 +903,7 @@ public class BucketCache implements BlockCache, HeapSize { // there might be some buckets where the occupancy is very sparse and thus are not // yielding the free for the other bucket sizes, the fix for this to evict some // of the buckets, we do this by evicting the buckets that are least fulled - freeEntireBuckets(DEFAULT_FREE_ENTIRE_BLOCK_FACTOR * - bucketSizesAboveThresholdCount(1.0f)); + freeEntireBuckets(DEFAULT_FREE_ENTIRE_BLOCK_FACTOR * bucketSizesAboveThresholdCount(1.0f)); if (LOG.isDebugEnabled()) { long single = bucketSingle.totalSize(); @@ -914,11 +911,9 @@ public class BucketCache implements BlockCache, HeapSize { long memory = bucketMemory.totalSize(); if (LOG.isDebugEnabled()) { LOG.debug("Bucket cache free space completed; " + "freed=" - + StringUtils.byteDesc(bytesFreed) + ", " + "total=" - + StringUtils.byteDesc(totalSize) + ", " + "single=" - + StringUtils.byteDesc(single) + ", " + "multi=" - + StringUtils.byteDesc(multi) + ", " + "memory=" - + StringUtils.byteDesc(memory)); + + StringUtils.byteDesc(bytesFreed) + ", " + "total=" + StringUtils.byteDesc(totalSize) + + ", " + "single=" + StringUtils.byteDesc(single) + ", " + "multi=" + + StringUtils.byteDesc(multi) + ", " + "memory=" + StringUtils.byteDesc(memory)); } } @@ -982,7 +977,7 @@ public class BucketCache implements BlockCache, HeapSize { * bucketAllocator do not free its memory. * @see BlockCacheUtil#shouldReplaceExistingCacheBlock(BlockCache blockCache,BlockCacheKey * cacheKey, Cacheable newBlock) - * @param key Block cache key + * @param key Block cache key * @param bucketEntry Bucket entry to put into backingMap. */ protected void putIntoBackingMap(BlockCacheKey key, BucketEntry bucketEntry) { @@ -998,11 +993,11 @@ public class BucketCache implements BlockCache, HeapSize { /** * Prepare and return a warning message for Bucket Allocator Exception * @param fle The exception - * @param re The RAMQueueEntry for which the exception was thrown. + * @param re The RAMQueueEntry for which the exception was thrown. * @return A warning message created from the input RAMQueueEntry object. */ private static String getAllocationFailWarningMessage(final BucketAllocatorException fle, - final RAMQueueEntry re) { + final RAMQueueEntry re) { final StringBuilder sb = new StringBuilder(); sb.append("Most recent failed allocation after "); sb.append(ALLOCATION_FAIL_LOG_TIME_PERIOD); @@ -1035,7 +1030,7 @@ public class BucketCache implements BlockCache, HeapSize { * are passed in even if failure being sure to remove from ramCache else we'll never undo the * references and we'll OOME. * @param entries Presumes list passed in here will be processed by this invocation only. No - * interference expected. + * interference expected. */ void doDrain(final List entries, ByteBuffer metaBuff) throws InterruptedException { if (entries.isEmpty()) { @@ -1071,8 +1066,8 @@ public class BucketCache implements BlockCache, HeapSize { // transferred with our current IOEngines. Should take care, when we have new kinds of // IOEngine in the future. metaBuff.clear(); - BucketEntry bucketEntry = re.writeToCache(ioEngine, bucketAllocator, realCacheSize, - this::createRecycler, metaBuff); + BucketEntry bucketEntry = + re.writeToCache(ioEngine, bucketAllocator, realCacheSize, this::createRecycler, metaBuff); // Successfully added. Up index and add bucketEntry. Clear io exceptions. bucketEntries[index] = bucketEntry; if (ioErrorStartTime > 0) { @@ -1082,7 +1077,9 @@ public class BucketCache implements BlockCache, HeapSize { } catch (BucketAllocatorException fle) { long currTs = EnvironmentEdgeManager.currentTime(); cacheStats.allocationFailed(); // Record the warning. - if (allocFailLogPrevTs == 0 || (currTs - allocFailLogPrevTs) > ALLOCATION_FAIL_LOG_TIME_PERIOD) { + if ( + allocFailLogPrevTs == 0 || (currTs - allocFailLogPrevTs) > ALLOCATION_FAIL_LOG_TIME_PERIOD + ) { LOG.warn(getAllocationFailWarningMessage(fle, re)); allocFailLogPrevTs = currTs; } @@ -1155,12 +1152,12 @@ public class BucketCache implements BlockCache, HeapSize { * Blocks until elements available in {@code q} then tries to grab as many as possible before * returning. * @param receptacle Where to stash the elements taken from queue. We clear before we use it just - * in case. - * @param q The queue to take from. + * in case. + * @param q The queue to take from. * @return {@code receptacle} laden with elements taken from the queue or empty if none found. */ static List getRAMQueueEntries(BlockingQueue q, - List receptacle) throws InterruptedException { + List receptacle) throws InterruptedException { // Clear sets all entries to null and sets size to 0. We retain allocations. Presume it // ok even if list grew to accommodate thousands. receptacle.clear(); @@ -1172,7 +1169,7 @@ public class BucketCache implements BlockCache, HeapSize { /** * @see #retrieveFromFile(int[]) */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="OBL_UNSATISFIED_OBLIGATION", + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "OBL_UNSATISFIED_OBLIGATION", justification = "false positive, try-with-resources ensures close is called.") private void persistToFile() throws IOException { assert !cacheEnabled; @@ -1199,11 +1196,11 @@ public class BucketCache implements BlockCache, HeapSize { int pblen = ProtobufMagic.lengthOfPBMagic(); byte[] pbuf = new byte[pblen]; IOUtils.readFully(in, pbuf, 0, pblen); - if (! ProtobufMagic.isPBMagicPrefix(pbuf)) { + if (!ProtobufMagic.isPBMagicPrefix(pbuf)) { // In 3.0 we have enough flexibility to dump the old cache data. // TODO: In 2.x line, this might need to be filled in to support reading the old format - throw new IOException("Persistence file does not start with protobuf magic number. " + - persistencePath); + throw new IOException( + "Persistence file does not start with protobuf magic number. " + persistencePath); } parsePB(BucketCacheProtos.BucketCacheEntry.parseDelimitedFrom(in)); bucketAllocator = new BucketAllocator(cacheCapacity, bucketSizes, backingMap, realCacheSize); @@ -1214,6 +1211,7 @@ public class BucketCache implements BlockCache, HeapSize { /** * Create an input stream that deletes the file after reading it. Use in try-with-resources to * avoid this pattern where an exception thrown from a finally block may mask earlier exceptions: + * *

              *   File f = ...
              *   try (FileInputStream fis = new FileInputStream(f)) {
          @@ -1222,6 +1220,7 @@ public class BucketCache implements BlockCache, HeapSize {
              *     if (!f.delete()) throw new IOException("failed to delete");
              *   }
              * 
          + * * @param file the file to read and delete * @return a FileInputStream for the given file * @throws IOException if there is a problem creating the stream @@ -1229,10 +1228,12 @@ public class BucketCache implements BlockCache, HeapSize { private FileInputStream deleteFileOnClose(final File file) throws IOException { return new FileInputStream(file) { private File myFile; + private FileInputStream init(File file) { myFile = file; return this; } + @Override public void close() throws IOException { // close() will be called during try-with-resources and it will be @@ -1252,19 +1253,18 @@ public class BucketCache implements BlockCache, HeapSize { } private void verifyCapacityAndClasses(long capacitySize, String ioclass, String mapclass) - throws IOException { + throws IOException { if (capacitySize != cacheCapacity) { - throw new IOException("Mismatched cache capacity:" - + StringUtils.byteDesc(capacitySize) + ", expected: " - + StringUtils.byteDesc(cacheCapacity)); + throw new IOException("Mismatched cache capacity:" + StringUtils.byteDesc(capacitySize) + + ", expected: " + StringUtils.byteDesc(cacheCapacity)); } if (!ioEngine.getClass().getName().equals(ioclass)) { - throw new IOException("Class name for IO engine mismatch: " + ioclass - + ", expected:" + ioEngine.getClass().getName()); + throw new IOException("Class name for IO engine mismatch: " + ioclass + ", expected:" + + ioEngine.getClass().getName()); } if (!backingMap.getClass().getName().equals(mapclass)) { - throw new IOException("Class name for cache map mismatch: " + mapclass - + ", expected:" + backingMap.getClass().getName()); + throw new IOException("Class name for cache map mismatch: " + mapclass + ", expected:" + + backingMap.getClass().getName()); } } @@ -1282,9 +1282,8 @@ public class BucketCache implements BlockCache, HeapSize { } /** - * Check whether we tolerate IO error this time. If the duration of IOEngine - * throwing errors exceeds ioErrorsDurationTimeTolerated, we will disable the - * cache + * Check whether we tolerate IO error this time. If the duration of IOEngine throwing errors + * exceeds ioErrorsDurationTimeTolerated, we will disable the cache */ private void checkIOErrorIsTolerated() { long now = EnvironmentEdgeManager.currentTime(); @@ -1292,8 +1291,8 @@ public class BucketCache implements BlockCache, HeapSize { long ioErrorStartTimeTmp = this.ioErrorStartTime; if (ioErrorStartTimeTmp > 0) { if (cacheEnabled && (now - ioErrorStartTimeTmp) > this.ioErrorsTolerationDuration) { - LOG.error("IO errors duration time has exceeded " + ioErrorsTolerationDuration + - "ms, disabling cache, please check your IOEngine"); + LOG.error("IO errors duration time has exceeded " + ioErrorsTolerationDuration + + "ms, disabling cache, please check your IOEngine"); disableCache(); } } else { @@ -1309,7 +1308,8 @@ public class BucketCache implements BlockCache, HeapSize { cacheEnabled = false; ioEngine.shutdown(); this.scheduleThreadPool.shutdown(); - for (int i = 0; i < writerThreads.length; ++i) writerThreads[i].interrupt(); + for (int i = 0; i < writerThreads.length; ++i) + writerThreads[i].interrupt(); this.ramCache.clear(); if (!ioEngine.isPersistent() || persistencePath == null) { // If persistent ioengine and a path, we will serialize out the backingMap. @@ -1325,8 +1325,8 @@ public class BucketCache implements BlockCache, HeapSize { @Override public void shutdown() { disableCache(); - LOG.info("Shutdown bucket cache: IO persistent=" + ioEngine.isPersistent() - + "; path to write=" + persistencePath); + LOG.info("Shutdown bucket cache: IO persistent=" + ioEngine.isPersistent() + "; path to write=" + + persistencePath); if (ioEngine.isPersistent() && persistencePath != null) { try { join(); @@ -1391,19 +1391,17 @@ public class BucketCache implements BlockCache, HeapSize { * Evicts all blocks for a specific HFile. *

          * This is used for evict-on-close to remove all blocks of a specific HFile. - * * @return the number of blocks evicted */ @Override public int evictBlocksByHfileName(String hfileName) { - Set keySet = blocksByHFile.subSet( - new BlockCacheKey(hfileName, Long.MIN_VALUE), true, - new BlockCacheKey(hfileName, Long.MAX_VALUE), true); + Set keySet = blocksByHFile.subSet(new BlockCacheKey(hfileName, Long.MIN_VALUE), + true, new BlockCacheKey(hfileName, Long.MAX_VALUE), true); int numEvicted = 0; for (BlockCacheKey key : keySet) { if (evictBlock(key)) { - ++numEvicted; + ++numEvicted; } } @@ -1411,10 +1409,9 @@ public class BucketCache implements BlockCache, HeapSize { } /** - * Used to group bucket entries into priority buckets. There will be a - * BucketEntryGroup for each priority (single, multi, memory). Once bucketed, - * the eviction algorithm takes the appropriate number of elements out of each - * according to configuration parameters and their relative sizes. + * Used to group bucket entries into priority buckets. There will be a BucketEntryGroup for each + * priority (single, multi, memory). Once bucketed, the eviction algorithm takes the appropriate + * number of elements out of each according to configuration parameters and their relative sizes. */ private class BucketEntryGroup { @@ -1496,8 +1493,8 @@ public class BucketCache implements BlockCache, HeapSize { } public BucketEntry writeToCache(final IOEngine ioEngine, final BucketAllocator alloc, - final LongAdder realCacheSize, Function createRecycler, - ByteBuffer metaBuff) throws IOException { + final LongAdder realCacheSize, Function createRecycler, + ByteBuffer metaBuff) throws IOException { int len = data.getSerializedLength(); // This cacheable thing can't be serialized if (len == 0) { @@ -1508,7 +1505,7 @@ public class BucketCache implements BlockCache, HeapSize { BucketEntry bucketEntry = null; try { bucketEntry = new BucketEntry(offset, len, accessCounter, inMemory, createRecycler, - getByteBuffAllocator()); + getByteBuffAllocator()); bucketEntry.setDeserializerReference(data.getDeserializer()); if (data instanceof HFileBlock) { // If an instance of HFileBlock, save on some allocations. @@ -1535,8 +1532,7 @@ public class BucketCache implements BlockCache, HeapSize { } /** - * Only used in test - * @throws InterruptedException + * Only used in test n */ void stopWriterThreads() throws InterruptedException { for (WriterThread writerThread : writerThreads) { @@ -1549,8 +1545,7 @@ public class BucketCache implements BlockCache, HeapSize { @Override public Iterator iterator() { // Don't bother with ramcache since stuff is in here only a little while. - final Iterator> i = - this.backingMap.entrySet().iterator(); + final Iterator> i = this.backingMap.entrySet().iterator(); return new Iterator() { private final long now = System.nanoTime(); @@ -1575,7 +1570,7 @@ public class BucketCache implements BlockCache, HeapSize { @Override public BlockType getBlockType() { - // Not held by BucketEntry. Could add it if wanted on BucketEntry creation. + // Not held by BucketEntry. Could add it if wanted on BucketEntry creation. return null; } @@ -1607,8 +1602,8 @@ public class BucketCache implements BlockCache, HeapSize { diff = Long.compare(this.getOffset(), other.getOffset()); if (diff != 0) return diff; if (other.getCachedTime() < 0 || this.getCachedTime() < 0) { - throw new IllegalStateException("" + this.getCachedTime() + ", " + - other.getCachedTime()); + throw new IllegalStateException( + "" + this.getCachedTime() + ", " + other.getCachedTime()); } return Long.compare(other.getCachedTime(), this.getCachedTime()); } @@ -1621,7 +1616,7 @@ public class BucketCache implements BlockCache, HeapSize { @Override public boolean equals(Object obj) { if (obj instanceof CachedBlock) { - CachedBlock cb = (CachedBlock)obj; + CachedBlock cb = (CachedBlock) obj; return compareTo(cb) == 0; } else { return false; @@ -1681,11 +1676,11 @@ public class BucketCache implements BlockCache, HeapSize { /** * Defined the map as {@link ConcurrentHashMap} explicitly here, because in * {@link RAMCache#get(BlockCacheKey)} and - * {@link RAMCache#putIfAbsent(BlockCacheKey, BucketCache.RAMQueueEntry)} , we need to - * guarantee the atomicity of map#computeIfPresent(key, func) and map#putIfAbsent(key, func). - * Besides, the func method can execute exactly once only when the key is present(or absent) - * and under the lock context. Otherwise, the reference count of block will be messed up. - * Notice that the {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that. + * {@link RAMCache#putIfAbsent(BlockCacheKey, BucketCache.RAMQueueEntry)} , we need to guarantee + * the atomicity of map#computeIfPresent(key, func) and map#putIfAbsent(key, func). Besides, the + * func method can execute exactly once only when the key is present(or absent) and under the + * lock context. Otherwise, the reference count of block will be messed up. Notice that the + * {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that. */ final ConcurrentHashMap delegate = new ConcurrentHashMap<>(); @@ -1719,7 +1714,8 @@ public class BucketCache implements BlockCache, HeapSize { } public boolean remove(BlockCacheKey key) { - return remove(key, re->{}); + return remove(key, re -> { + }); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java index 4a2b0a13590..73ca011004a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java @@ -1,29 +1,27 @@ -/** - * Copyright The Apache Software Foundation +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.LongAdder; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.CacheStats; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; /** * Class that implements cache metrics for bucket cache. @@ -46,9 +44,8 @@ public class BucketCacheStats extends CacheStats { @Override public String toString() { - return super.toString() + ", ioHitsPerSecond=" + getIOHitsPerSecond() + - ", ioTimePerHit=" + getIOTimePerHit() + ", allocationFailCount=" + - getAllocationFailCount(); + return super.toString() + ", ioHitsPerSecond=" + getIOHitsPerSecond() + ", ioTimePerHit=" + + getIOTimePerHit() + ", allocationFailCount=" + getAllocationFailCount(); } public void ioHit(long time) { @@ -79,7 +76,7 @@ public class BucketCacheStats extends CacheStats { return allocationFailCount.sum(); } - public void allocationFailed () { + public void allocationFailed() { allocationFailCount.increment(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java index 222cd804112..a04a32bfe64 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -12,7 +10,6 @@ * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -26,7 +23,6 @@ import java.util.Comparator; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Function; - import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler; import org.apache.hadoop.hbase.io.hfile.BlockPriority; @@ -50,7 +46,7 @@ import org.apache.yetus.audience.InterfaceAudience; class BucketEntry implements HBaseReferenceCounted { // access counter comparator, descending order static final Comparator COMPARATOR = - Comparator.comparingLong(BucketEntry::getAccessCounter).reversed(); + Comparator.comparingLong(BucketEntry::getAccessCounter).reversed(); private int offsetBase; private int length; @@ -99,11 +95,11 @@ class BucketEntry implements HBaseReferenceCounted { /** * @param createRecycler used to free this {@link BucketEntry} when {@link BucketEntry#refCnt} - * becoming 0. NOTICE that {@link ByteBuffAllocator#NONE} could only be used for test. + * becoming 0. NOTICE that {@link ByteBuffAllocator#NONE} could only be used + * for test. */ BucketEntry(long offset, int length, long accessCounter, boolean inMemory, - Function createRecycler, - ByteBuffAllocator allocator) { + Function createRecycler, ByteBuffAllocator allocator) { if (createRecycler == null) { throw new IllegalArgumentException("createRecycler could not be null!"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java index b2a00f1795e..ff4e90b8865 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java @@ -1,6 +1,4 @@ /* - * Copyright The Apache Software Foundation - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -12,7 +10,6 @@ * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -24,7 +21,6 @@ import java.io.IOException; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Function; - import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; @@ -32,9 +28,10 @@ import org.apache.hadoop.hbase.io.hfile.BlockPriority; import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager; import org.apache.hadoop.hbase.io.hfile.HFileBlock; -import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; + import org.apache.hadoop.hbase.shaded.protobuf.generated.BucketCacheProtos; @InterfaceAudience.Private @@ -44,39 +41,33 @@ final class BucketProtoUtils { } static BucketCacheProtos.BucketCacheEntry toPB(BucketCache cache) { - return BucketCacheProtos.BucketCacheEntry.newBuilder() - .setCacheCapacity(cache.getMaxSize()) + return BucketCacheProtos.BucketCacheEntry.newBuilder().setCacheCapacity(cache.getMaxSize()) .setIoClass(cache.ioEngine.getClass().getName()) .setMapClass(cache.backingMap.getClass().getName()) .putAllDeserializers(CacheableDeserializerIdManager.save()) .setBackingMap(BucketProtoUtils.toPB(cache.backingMap)) - .setChecksum(ByteString.copyFrom(((PersistentIOEngine) cache.ioEngine). - calculateChecksum(cache.getAlgorithm()))).build(); + .setChecksum(ByteString + .copyFrom(((PersistentIOEngine) cache.ioEngine).calculateChecksum(cache.getAlgorithm()))) + .build(); } - private static BucketCacheProtos.BackingMap toPB( - Map backingMap) { + private static BucketCacheProtos.BackingMap toPB(Map backingMap) { BucketCacheProtos.BackingMap.Builder builder = BucketCacheProtos.BackingMap.newBuilder(); for (Map.Entry entry : backingMap.entrySet()) { - builder.addEntry(BucketCacheProtos.BackingMapEntry.newBuilder() - .setKey(toPB(entry.getKey())) - .setValue(toPB(entry.getValue())) - .build()); + builder.addEntry(BucketCacheProtos.BackingMapEntry.newBuilder().setKey(toPB(entry.getKey())) + .setValue(toPB(entry.getValue())).build()); } return builder.build(); } private static BucketCacheProtos.BlockCacheKey toPB(BlockCacheKey key) { - return BucketCacheProtos.BlockCacheKey.newBuilder() - .setHfilename(key.getHfileName()) - .setOffset(key.getOffset()) - .setPrimaryReplicaBlock(key.isPrimary()) - .setBlockType(toPB(key.getBlockType())) - .build(); + return BucketCacheProtos.BlockCacheKey.newBuilder().setHfilename(key.getHfileName()) + .setOffset(key.getOffset()).setPrimaryReplicaBlock(key.isPrimary()) + .setBlockType(toPB(key.getBlockType())).build(); } private static BucketCacheProtos.BlockType toPB(BlockType blockType) { - switch(blockType) { + switch (blockType) { case DATA: return BucketCacheProtos.BlockType.data; case META: @@ -107,13 +98,9 @@ final class BucketProtoUtils { } private static BucketCacheProtos.BucketEntry toPB(BucketEntry entry) { - return BucketCacheProtos.BucketEntry.newBuilder() - .setOffset(entry.offset()) - .setLength(entry.getLength()) - .setDeserialiserIndex(entry.deserializerIndex) - .setAccessCounter(entry.getAccessCounter()) - .setPriority(toPB(entry.getPriority())) - .build(); + return BucketCacheProtos.BucketEntry.newBuilder().setOffset(entry.offset()) + .setLength(entry.getLength()).setDeserialiserIndex(entry.deserializerIndex) + .setAccessCounter(entry.getAccessCounter()).setPriority(toPB(entry.getPriority())).build(); } private static BucketCacheProtos.BlockPriority toPB(BlockPriority p) { @@ -129,24 +116,21 @@ final class BucketProtoUtils { } } - static ConcurrentHashMap fromPB( - Map deserializers, BucketCacheProtos.BackingMap backingMap, - Function createRecycler) - throws IOException { + static ConcurrentHashMap fromPB(Map deserializers, + BucketCacheProtos.BackingMap backingMap, Function createRecycler) + throws IOException { ConcurrentHashMap result = new ConcurrentHashMap<>(); for (BucketCacheProtos.BackingMapEntry entry : backingMap.getEntryList()) { BucketCacheProtos.BlockCacheKey protoKey = entry.getKey(); BlockCacheKey key = new BlockCacheKey(protoKey.getHfilename(), protoKey.getOffset(), - protoKey.getPrimaryReplicaBlock(), fromPb(protoKey.getBlockType())); + protoKey.getPrimaryReplicaBlock(), fromPb(protoKey.getBlockType())); BucketCacheProtos.BucketEntry protoValue = entry.getValue(); // TODO:We use ByteBuffAllocator.HEAP here, because we could not get the ByteBuffAllocator // which created by RpcServer elegantly. - BucketEntry value = new BucketEntry( - protoValue.getOffset(), - protoValue.getLength(), - protoValue.getAccessCounter(), - protoValue.getPriority() == BucketCacheProtos.BlockPriority.memory, createRecycler, - ByteBuffAllocator.HEAP); + BucketEntry value = new BucketEntry(protoValue.getOffset(), protoValue.getLength(), + protoValue.getAccessCounter(), + protoValue.getPriority() == BucketCacheProtos.BlockPriority.memory, createRecycler, + ByteBuffAllocator.HEAP); // This is the deserializer that we stored int oldIndex = protoValue.getDeserialiserIndex(); String deserializerClass = deserializers.get(oldIndex); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java index b0415e3e50b..78166e88ffd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java @@ -1,31 +1,29 @@ -/** - * Copyright The Apache Software Foundation +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.util.ByteBufferAllocator; import org.apache.hadoop.hbase.util.ByteBufferArray; +import org.apache.yetus.audience.InterfaceAudience; /** * IO engine that stores data in memory using an array of ByteBuffers {@link ByteBufferArray}. @@ -66,9 +64,8 @@ public class ByteBufferIOEngine implements IOEngine { private final long capacity; /** - * Construct the ByteBufferIOEngine with the given capacity - * @param capacity - * @throws IOException ideally here no exception to be thrown from the allocator + * Construct the ByteBufferIOEngine with the given capacity n * @throws IOException ideally here + * no exception to be thrown from the allocator */ public ByteBufferIOEngine(long capacity) throws IOException { this.capacity = capacity; @@ -78,14 +75,12 @@ public class ByteBufferIOEngine implements IOEngine { @Override public String toString() { - return "ioengine=" + this.getClass().getSimpleName() + ", capacity=" + - String.format("%,d", this.capacity); + return "ioengine=" + this.getClass().getSimpleName() + ", capacity=" + + String.format("%,d", this.capacity); } /** - * Memory IO engine is always unable to support persistent storage for the - * cache - * @return false + * Memory IO engine is always unable to support persistent storage for the cache n */ @Override public boolean isPersistent() { @@ -111,7 +106,7 @@ public class ByteBufferIOEngine implements IOEngine { /** * Transfers data from the given {@link ByteBuffer} to the buffer array. Position of source will * be advanced by the {@link ByteBuffer#remaining()}. - * @param src the given byte buffer from which bytes are to be read. + * @param src the given byte buffer from which bytes are to be read. * @param offset The offset in the ByteBufferArray of the first byte to be written * @throws IOException throws IOException if writing to the array throws exception */ @@ -123,7 +118,7 @@ public class ByteBufferIOEngine implements IOEngine { /** * Transfers data from the given {@link ByteBuff} to the buffer array. Position of source will be * advanced by the {@link ByteBuffer#remaining()}. - * @param src the given byte buffer from which bytes are to be read. + * @param src the given byte buffer from which bytes are to be read. * @param offset The offset in the ByteBufferArray of the first byte to be written * @throws IOException throws IOException if writing to the array throws exception */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.java index d2cbdb7b16c..15c7ee3236c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.java @@ -1,30 +1,27 @@ -/** - * Copyright The Apache Software Foundation +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown by {@link BucketAllocator#allocateBlock(int)} when cache is full for - * the requested size + * Thrown by {@link BucketAllocator#allocateBlock(int)} when cache is full for the requested size */ @InterfaceAudience.Private public class CacheFullException extends IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java index b4e77bd2348..daa25cee1de 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java @@ -1,43 +1,38 @@ -/** - * Copyright The Apache Software Foundation +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; - import java.util.Comparator; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.MinMaxPriorityQueue; /** - * A memory-bound queue that will grow until an element brings total size larger - * than maxSize. From then on, only entries that are sorted larger than the - * smallest current entry will be inserted/replaced. - * + * A memory-bound queue that will grow until an element brings total size larger than maxSize. From + * then on, only entries that are sorted larger than the smallest current entry will be + * inserted/replaced. *

          - * Use this when you want to find the largest elements (according to their - * ordering, not their heap size) that consume as close to the specified maxSize - * as possible. Default behavior is to grow just above rather than just below - * specified max. + * Use this when you want to find the largest elements (according to their ordering, not their heap + * size) that consume as close to the specified maxSize as possible. Default behavior is to grow + * just above rather than just below specified max. */ @InterfaceAudience.Private public class CachedEntryQueue { @@ -51,7 +46,7 @@ public class CachedEntryQueue { private long maxSize; /** - * @param maxSize the target size of elements in the queue + * @param maxSize the target size of elements in the queue * @param blockSize expected average size of blocks */ public CachedEntryQueue(long maxSize, long blockSize) { @@ -69,15 +64,15 @@ public class CachedEntryQueue { /** * Attempt to add the specified entry to this queue. *

          - * If the queue is smaller than the max size, or if the specified element is - * ordered after the smallest element in the queue, the element will be added - * to the queue. Otherwise, there is no side effect of this call. + * If the queue is smaller than the max size, or if the specified element is ordered after the + * smallest element in the queue, the element will be added to the queue. Otherwise, there is no + * side effect of this call. * @param entry a bucket entry with key to try to add to the queue */ @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value = "NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE", - justification = "head can not be null as cacheSize is greater than maxSize," - + " which means we have something in the queue") + value = "NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE", + justification = "head can not be null as cacheSize is greater than maxSize," + + " which means we have something in the queue") public void add(Map.Entry entry) { if (cacheSize < maxSize) { queue.add(entry); @@ -98,16 +93,14 @@ public class CachedEntryQueue { } /** - * @return The next element in this queue, or {@code null} if the queue is - * empty. + * @return The next element in this queue, or {@code null} if the queue is empty. */ public Map.Entry poll() { return queue.poll(); } /** - * @return The last element in this queue, or {@code null} if the queue is - * empty. + * @return The last element in this queue, or {@code null} if the queue is empty. */ public Map.Entry pollLast() { return queue.pollLast(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java index 3169a66539a..da5f49596c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java @@ -1,23 +1,23 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; - import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java index e4a2c0b1aea..511d8afff46 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java @@ -1,20 +1,19 @@ -/** - * Copyright The Apache Software Foundation +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; @@ -55,7 +54,7 @@ public class FileIOEngine extends PersistentIOEngine { private FileWriteAccessor writeAccessor = new FileWriteAccessor(); public FileIOEngine(long capacity, boolean maintainPersistence, String... filePaths) - throws IOException { + throws IOException { super(filePaths); this.sizePerFile = capacity / filePaths.length; this.capacity = this.sizePerFile * filePaths.length; @@ -82,9 +81,8 @@ public class FileIOEngine extends PersistentIOEngine { if (totalSpace < sizePerFile) { // The next setting length will throw exception,logging this message // is just used for the detail reason of exception, - String msg = "Only " + StringUtils.byteDesc(totalSpace) - + " total space under " + filePath + ", not enough for requested " - + StringUtils.byteDesc(sizePerFile); + String msg = "Only " + StringUtils.byteDesc(totalSpace) + " total space under " + filePath + + ", not enough for requested " + StringUtils.byteDesc(sizePerFile); LOG.warn(msg); } File file = new File(filePath); @@ -95,8 +93,8 @@ public class FileIOEngine extends PersistentIOEngine { } fileChannels[i] = rafs[i].getChannel(); channelLocks[i] = new ReentrantLock(); - LOG.info("Allocating cache " + StringUtils.byteDesc(sizePerFile) - + ", on the path:" + filePath); + LOG.info( + "Allocating cache " + StringUtils.byteDesc(sizePerFile) + ", on the path:" + filePath); } catch (IOException fex) { LOG.error("Failed allocating cache on " + filePath, fex); shutdown(); @@ -107,13 +105,12 @@ public class FileIOEngine extends PersistentIOEngine { @Override public String toString() { - return "ioengine=" + this.getClass().getSimpleName() + ", paths=" - + Arrays.asList(filePaths) + ", capacity=" + String.format("%,d", this.capacity); + return "ioengine=" + this.getClass().getSimpleName() + ", paths=" + Arrays.asList(filePaths) + + ", capacity=" + String.format("%,d", this.capacity); } /** - * File IO engine is always able to support persistent storage for the cache - * @return true + * File IO engine is always able to support persistent storage for the cache n */ @Override public boolean isPersistent() { @@ -141,7 +138,7 @@ public class FileIOEngine extends PersistentIOEngine { // ensure that the results are not corrupted before consuming them. if (dstBuff.limit() != length) { throw new IllegalArgumentIOException( - "Only " + dstBuff.limit() + " bytes read, " + length + " expected"); + "Only " + dstBuff.limit() + " bytes read, " + length + " expected"); } } catch (IOException ioe) { dstBuff.release(); @@ -153,7 +150,7 @@ public class FileIOEngine extends PersistentIOEngine { } void closeFileChannels() { - for (FileChannel fileChannel: fileChannels) { + for (FileChannel fileChannel : fileChannels) { try { fileChannel.close(); } catch (IOException e) { @@ -165,8 +162,7 @@ public class FileIOEngine extends PersistentIOEngine { /** * Transfers data from the given byte buffer to file * @param srcBuffer the given byte buffer from which bytes are to be read - * @param offset The offset in the file where the first byte to be written - * @throws IOException + * @param offset The offset in the file where the first byte to be written n */ @Override public void write(ByteBuffer srcBuffer, long offset) throws IOException { @@ -174,8 +170,7 @@ public class FileIOEngine extends PersistentIOEngine { } /** - * Sync the data to file after writing - * @throws IOException + * Sync the data to file after writing n */ @Override public void sync() throws IOException { @@ -218,8 +213,8 @@ public class FileIOEngine extends PersistentIOEngine { accessFile(writeAccessor, srcBuff, offset); } - private void accessFile(FileAccessor accessor, ByteBuff buff, - long globalOffset) throws IOException { + private void accessFile(FileAccessor accessor, ByteBuff buff, long globalOffset) + throws IOException { int startFileNum = getFileNum(globalOffset); int remainingAccessDataLen = buff.remaining(); int endFileNum = getFileNum(globalOffset + remainingAccessDataLen - 1); @@ -252,17 +247,15 @@ public class FileIOEngine extends PersistentIOEngine { } if (accessFileNum >= fileChannels.length) { throw new IOException("Required data len " + StringUtils.byteDesc(buff.remaining()) - + " exceed the engine's capacity " + StringUtils.byteDesc(capacity) + " where offset=" - + globalOffset); + + " exceed the engine's capacity " + StringUtils.byteDesc(capacity) + " where offset=" + + globalOffset); } } } /** - * Get the absolute offset in given file with the relative global offset. - * @param fileNum - * @param globalOffset - * @return the absolute offset + * Get the absolute offset in given file with the relative global offset. nn * @return the + * absolute offset */ private long getAbsoluteOffsetInFile(int fileNum, long globalOffset) { return globalOffset - fileNum * sizePerFile; @@ -274,8 +267,7 @@ public class FileIOEngine extends PersistentIOEngine { } int fileNum = (int) (offset / sizePerFile); if (fileNum >= fileChannels.length) { - throw new RuntimeException("Not expected offset " + offset - + " where capacity=" + capacity); + throw new RuntimeException("Not expected offset " + offset + " where capacity=" + capacity); } return fileNum; } @@ -298,31 +290,30 @@ public class FileIOEngine extends PersistentIOEngine { fileChannel.close(); } LOG.warn("Caught ClosedChannelException accessing BucketCache, reopening file: " - + filePaths[accessFileNum], ioe); + + filePaths[accessFileNum], ioe); rafs[accessFileNum] = new RandomAccessFile(filePaths[accessFileNum], "rw"); fileChannels[accessFileNum] = rafs[accessFileNum].getChannel(); - } finally{ + } finally { channelLock.unlock(); } } private interface FileAccessor { - int access(FileChannel fileChannel, ByteBuff buff, long accessOffset) - throws IOException; + int access(FileChannel fileChannel, ByteBuff buff, long accessOffset) throws IOException; } private static class FileReadAccessor implements FileAccessor { @Override - public int access(FileChannel fileChannel, ByteBuff buff, - long accessOffset) throws IOException { + public int access(FileChannel fileChannel, ByteBuff buff, long accessOffset) + throws IOException { return buff.read(fileChannel, accessOffset); } } private static class FileWriteAccessor implements FileAccessor { @Override - public int access(FileChannel fileChannel, ByteBuff buff, - long accessOffset) throws IOException { + public int access(FileChannel fileChannel, ByteBuff buff, long accessOffset) + throws IOException { return buff.write(fileChannel, accessOffset); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java index c0cb22d0b07..b09e0963ca2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.util.ByteBufferAllocator; @@ -34,8 +33,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * IO engine that stores data to a file on the specified file system using memory mapping - * mechanism + * IO engine that stores data to a file on the specified file system using memory mapping mechanism */ @InterfaceAudience.Private public abstract class FileMmapIOEngine extends PersistentIOEngine { @@ -93,12 +91,11 @@ public abstract class FileMmapIOEngine extends PersistentIOEngine { @Override public String toString() { return "ioengine=" + this.getClass().getSimpleName() + ", path=" + this.path + ", size=" - + String.format("%,d", this.size); + + String.format("%,d", this.size); } /** - * File IO engine is always able to support persistent storage for the cache - * @return true + * File IO engine is always able to support persistent storage for the cache n */ @Override public boolean isPersistent() { @@ -112,8 +109,7 @@ public abstract class FileMmapIOEngine extends PersistentIOEngine { /** * Transfers data from the given byte buffer to file * @param srcBuffer the given byte buffer from which bytes are to be read - * @param offset The offset in the file where the first byte to be written - * @throws IOException + * @param offset The offset in the file where the first byte to be written n */ @Override public void write(ByteBuffer srcBuffer, long offset) throws IOException { @@ -126,8 +122,7 @@ public abstract class FileMmapIOEngine extends PersistentIOEngine { } /** - * Sync the data to file after writing - * @throws IOException + * Sync the data to file after writing n */ @Override public void sync() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java index 3ffb57ebcf0..62ff19878fc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java @@ -1,33 +1,30 @@ -/** - * Copyright The Apache Software Foundation +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.nio.ByteBuff; +import org.apache.yetus.audience.InterfaceAudience; /** - * A class implementing IOEngine interface supports data services for - * {@link BucketCache}. + * A class implementing IOEngine interface supports data services for {@link BucketCache}. */ @InterfaceAudience.Private public interface IOEngine { @@ -49,7 +46,7 @@ public interface IOEngine { * Transfers data from IOEngine to a Cacheable object. * @param be maintains an (offset,len,refCnt) inside. * @return Cacheable which will wrap the NIO ByteBuffers from IOEngine. - * @throws IOException when any IO error happen + * @throws IOException when any IO error happen * @throws IllegalArgumentException when the length of the ByteBuff read is less than 'len' */ Cacheable read(BucketEntry be) throws IOException; @@ -57,23 +54,19 @@ public interface IOEngine { /** * Transfers data from the given byte buffer to IOEngine * @param srcBuffer the given byte buffer from which bytes are to be read - * @param offset The offset in the IO engine where the first byte to be - * written - * @throws IOException + * @param offset The offset in the IO engine where the first byte to be written n */ void write(ByteBuffer srcBuffer, long offset) throws IOException; /** * Transfers the data from the given MultiByteBuffer to IOEngine * @param srcBuffer the given MultiBytebufffers from which bytes are to be read - * @param offset the offset in the IO engine where the first byte to be written - * @throws IOException + * @param offset the offset in the IO engine where the first byte to be written n */ void write(ByteBuff srcBuffer, long offset) throws IOException; /** - * Sync the data to IOEngine after writing - * @throws IOException + * Sync the data to IOEngine after writing n */ void sync() throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PersistentIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PersistentIOEngine.java index 4ee7d0ed1be..495814fdc5f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PersistentIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PersistentIOEngine.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.io.File; import java.io.IOException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.Shell; import org.apache.yetus.audience.InterfaceAudience; @@ -29,13 +28,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * A class implementing PersistentIOEngine interface supports file integrity verification - * for {@link BucketCache} which use persistent IOEngine + * A class implementing PersistentIOEngine interface supports file integrity verification for + * {@link BucketCache} which use persistent IOEngine */ @InterfaceAudience.Private public abstract class PersistentIOEngine implements IOEngine { private static final Logger LOG = LoggerFactory.getLogger(PersistentIOEngine.class); - private static final DuFileCommand DU = new DuFileCommand(new String[] {"du", ""}); + private static final DuFileCommand DU = new DuFileCommand(new String[] { "du", "" }); protected final String[] filePaths; public PersistentIOEngine(String... filePaths) { @@ -50,22 +49,22 @@ public abstract class PersistentIOEngine implements IOEngine { throws IOException { byte[] calculateChecksum = calculateChecksum(algorithm); if (!Bytes.equals(persistentChecksum, calculateChecksum)) { - throw new IOException("Mismatch of checksum! The persistent checksum is " + - Bytes.toString(persistentChecksum) + ", but the calculate checksum is " + - Bytes.toString(calculateChecksum)); + throw new IOException( + "Mismatch of checksum! The persistent checksum is " + Bytes.toString(persistentChecksum) + + ", but the calculate checksum is " + Bytes.toString(calculateChecksum)); } } /** * Using an encryption algorithm to calculate a checksum, the default encryption algorithm is MD5 * @return the checksum which is convert to HexString - * @throws IOException something happened like file not exists + * @throws IOException something happened like file not exists * @throws NoSuchAlgorithmException no such algorithm */ protected byte[] calculateChecksum(String algorithm) { try { StringBuilder sb = new StringBuilder(); - for (String filePath : filePaths){ + for (String filePath : filePaths) { File file = new File(filePath); sb.append(filePath); sb.append(getFileSize(filePath)); @@ -113,4 +112,3 @@ public abstract class PersistentIOEngine implements IOEngine { } } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java index 53690602093..77c881888fc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java index 4a0191fc0be..f5f9d760a79 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +20,13 @@ package org.apache.hadoop.hbase.io.util; import java.lang.management.ManagementFactory; import java.lang.management.MemoryType; import java.lang.management.MemoryUsage; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.regionserver.MemStoreLAB; +import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.MemStoreLAB; -import org.apache.hadoop.hbase.util.Pair; /** * Util class to calculate memory size for memstore(on heap, off heap), block cache(L1, L2) of RS. @@ -37,15 +36,15 @@ public class MemorySizeUtil { public static final String MEMSTORE_SIZE_KEY = "hbase.regionserver.global.memstore.size"; public static final String MEMSTORE_SIZE_OLD_KEY = - "hbase.regionserver.global.memstore.upperLimit"; + "hbase.regionserver.global.memstore.upperLimit"; public static final String MEMSTORE_SIZE_LOWER_LIMIT_KEY = - "hbase.regionserver.global.memstore.size.lower.limit"; + "hbase.regionserver.global.memstore.size.lower.limit"; public static final String MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY = - "hbase.regionserver.global.memstore.lowerLimit"; + "hbase.regionserver.global.memstore.lowerLimit"; // Max global off heap memory that can be used for all memstores // This should be an absolute value in MBs and not percent. public static final String OFFHEAP_MEMSTORE_SIZE_KEY = - "hbase.regionserver.offheap.global.memstore.size"; + "hbase.regionserver.offheap.global.memstore.size"; public static final float DEFAULT_MEMSTORE_SIZE = 0.4f; // Default lower water mark limit is 95% size of memstore size. @@ -55,15 +54,15 @@ public class MemorySizeUtil { // a constant to convert a fraction to a percentage private static final int CONVERT_TO_PERCENTAGE = 100; - private static final String JVM_HEAP_EXCEPTION = "Got an exception while attempting to read " + - "information about the JVM heap. Please submit this log information in a bug report and " + - "include your JVM settings, specifically the GC in use and any -XX options. Consider " + - "restarting the service."; + private static final String JVM_HEAP_EXCEPTION = "Got an exception while attempting to read " + + "information about the JVM heap. Please submit this log information in a bug report and " + + "include your JVM settings, specifically the GC in use and any -XX options. Consider " + + "restarting the service."; /** * Return JVM memory statistics while properly handling runtime exceptions from the JVM. - * @return a memory usage object, null if there was a runtime exception. (n.b. you - * could also get -1 values back from the JVM) + * @return a memory usage object, null if there was a runtime exception. (n.b. you could also get + * -1 values back from the JVM) * @see MemoryUsage */ public static MemoryUsage safeGetHeapMemoryUsage() { @@ -78,43 +77,41 @@ public class MemorySizeUtil { /** * Checks whether we have enough heap memory left out after portion for Memstore and Block cache. - * We need atleast 20% of heap left out for other RS functions. - * @param conf + * We need atleast 20% of heap left out for other RS functions. n */ public static void checkForClusterFreeHeapMemoryLimit(Configuration conf) { if (conf.get(MEMSTORE_SIZE_OLD_KEY) != null) { LOG.warn(MEMSTORE_SIZE_OLD_KEY + " is deprecated by " + MEMSTORE_SIZE_KEY); } float globalMemstoreSize = getGlobalMemStoreHeapPercent(conf, false); - int gml = (int)(globalMemstoreSize * CONVERT_TO_PERCENTAGE); + int gml = (int) (globalMemstoreSize * CONVERT_TO_PERCENTAGE); float blockCacheUpperLimit = getBlockCacheHeapPercent(conf); - int bcul = (int)(blockCacheUpperLimit * CONVERT_TO_PERCENTAGE); - if (CONVERT_TO_PERCENTAGE - (gml + bcul) - < (int)(CONVERT_TO_PERCENTAGE * - HConstants.HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD)) { + int bcul = (int) (blockCacheUpperLimit * CONVERT_TO_PERCENTAGE); + if ( + CONVERT_TO_PERCENTAGE - (gml + bcul) + < (int) (CONVERT_TO_PERCENTAGE * HConstants.HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD) + ) { throw new RuntimeException("Current heap configuration for MemStore and BlockCache exceeds " - + "the threshold required for successful cluster operation. " - + "The combined value cannot exceed 0.8. Please check " - + "the settings for hbase.regionserver.global.memstore.size and " - + "hfile.block.cache.size in your configuration. " - + "hbase.regionserver.global.memstore.size is " + globalMemstoreSize - + " hfile.block.cache.size is " + blockCacheUpperLimit); + + "the threshold required for successful cluster operation. " + + "The combined value cannot exceed 0.8. Please check " + + "the settings for hbase.regionserver.global.memstore.size and " + + "hfile.block.cache.size in your configuration. " + + "hbase.regionserver.global.memstore.size is " + globalMemstoreSize + + " hfile.block.cache.size is " + blockCacheUpperLimit); } } /** - * Retrieve global memstore configured size as percentage of total heap. - * @param c - * @param logInvalid + * Retrieve global memstore configured size as percentage of total heap. nn */ public static float getGlobalMemStoreHeapPercent(final Configuration c, - final boolean logInvalid) { - float limit = c.getFloat(MEMSTORE_SIZE_KEY, - c.getFloat(MEMSTORE_SIZE_OLD_KEY, DEFAULT_MEMSTORE_SIZE)); + final boolean logInvalid) { + float limit = + c.getFloat(MEMSTORE_SIZE_KEY, c.getFloat(MEMSTORE_SIZE_OLD_KEY, DEFAULT_MEMSTORE_SIZE)); if (limit > 0.8f || limit <= 0.0f) { if (logInvalid) { LOG.warn("Setting global memstore limit to default of " + DEFAULT_MEMSTORE_SIZE - + " because supplied value outside allowed range of (0 -> 0.8]"); + + " because supplied value outside allowed range of (0 -> 0.8]"); } limit = DEFAULT_MEMSTORE_SIZE; } @@ -126,13 +123,13 @@ public class MemorySizeUtil { * size. */ public static float getGlobalMemStoreHeapLowerMark(final Configuration conf, - boolean honorOldConfig) { + boolean honorOldConfig) { String lowMarkPercentStr = conf.get(MEMSTORE_SIZE_LOWER_LIMIT_KEY); if (lowMarkPercentStr != null) { float lowMarkPercent = Float.parseFloat(lowMarkPercentStr); if (lowMarkPercent > 1.0f) { LOG.error("Bad configuration value for " + MEMSTORE_SIZE_LOWER_LIMIT_KEY + ": " - + lowMarkPercent + ". Using 1.0f instead."); + + lowMarkPercent + ". Using 1.0f instead."); lowMarkPercent = 1.0f; } return lowMarkPercent; @@ -141,15 +138,15 @@ public class MemorySizeUtil { String lowerWaterMarkOldValStr = conf.get(MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY); if (lowerWaterMarkOldValStr != null) { LOG.warn(MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY + " is deprecated. Instead use " - + MEMSTORE_SIZE_LOWER_LIMIT_KEY); + + MEMSTORE_SIZE_LOWER_LIMIT_KEY); float lowerWaterMarkOldVal = Float.parseFloat(lowerWaterMarkOldValStr); float upperMarkPercent = getGlobalMemStoreHeapPercent(conf, false); if (lowerWaterMarkOldVal > upperMarkPercent) { lowerWaterMarkOldVal = upperMarkPercent; LOG.error("Value of " + MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY + " (" + lowerWaterMarkOldVal - + ") is greater than global memstore limit (" + upperMarkPercent + ") set by " - + MEMSTORE_SIZE_KEY + "/" + MEMSTORE_SIZE_OLD_KEY + ". Setting memstore lower limit " - + "to " + upperMarkPercent); + + ") is greater than global memstore limit (" + upperMarkPercent + ") set by " + + MEMSTORE_SIZE_KEY + "/" + MEMSTORE_SIZE_OLD_KEY + ". Setting memstore lower limit " + + "to " + upperMarkPercent); } return lowerWaterMarkOldVal / upperMarkPercent; } @@ -174,8 +171,8 @@ public class MemorySizeUtil { // Off heap max memstore size is configured with turning off MSLAB. It makes no sense. Do a // warn log and go with on heap memstore percentage. By default it will be 40% of Xmx LOG.warn("There is no relevance of configuring '" + OFFHEAP_MEMSTORE_SIZE_KEY + "' when '" - + MemStoreLAB.USEMSLAB_KEY + "' is turned off." - + " Going with on heap global memstore size ('" + MEMSTORE_SIZE_KEY + "')"); + + MemStoreLAB.USEMSLAB_KEY + "' is turned off." + + " Going with on heap global memstore size ('" + MEMSTORE_SIZE_KEY + "')"); } } return new Pair<>(getOnheapGlobalMemStoreSize(conf), MemoryType.HEAP); @@ -183,9 +180,7 @@ public class MemorySizeUtil { /** * Returns the onheap global memstore limit based on the config - * 'hbase.regionserver.global.memstore.size'. - * @param conf - * @return the onheap global memstore limt + * 'hbase.regionserver.global.memstore.size'. n * @return the onheap global memstore limt */ public static long getOnheapGlobalMemStoreSize(Configuration conf) { long max = -1L; @@ -198,13 +193,12 @@ public class MemorySizeUtil { } /** - * Retrieve configured size for on heap block cache as percentage of total heap. - * @param conf + * Retrieve configured size for on heap block cache as percentage of total heap. n */ public static float getBlockCacheHeapPercent(final Configuration conf) { // L1 block cache is always on heap float l1CachePercent = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, - HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); + HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); return l1CachePercent; } @@ -220,25 +214,25 @@ public class MemorySizeUtil { return -1; } if (cachePercentage > 1.0) { - throw new IllegalArgumentException(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY + - " must be between 0.0 and 1.0, and not > 1.0"); + throw new IllegalArgumentException( + HConstants.HFILE_BLOCK_CACHE_SIZE_KEY + " must be between 0.0 and 1.0, and not > 1.0"); } long max = -1L; final MemoryUsage usage = safeGetHeapMemoryUsage(); if (usage != null) { max = usage.getMax(); } - float onHeapCacheFixedSize = (float) conf - .getLong(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY, + float onHeapCacheFixedSize = + (float) conf.getLong(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY, HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT) / max; // Calculate the amount of heap to give the heap. - return (onHeapCacheFixedSize > 0 && onHeapCacheFixedSize < cachePercentage) ? - (long) (max * onHeapCacheFixedSize) : - (long) (max * cachePercentage); + return (onHeapCacheFixedSize > 0 && onHeapCacheFixedSize < cachePercentage) + ? (long) (max * onHeapCacheFixedSize) + : (long) (max * cachePercentage); } /** - * @param conf used to read config for bucket cache size. + * @param conf used to read config for bucket cache size. * @return the number of bytes to use for bucket cache, negative if disabled. */ public static long getBucketCacheSize(final Configuration conf) { @@ -246,7 +240,7 @@ public class MemorySizeUtil { float bucketCacheSize = conf.getFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0F); if (bucketCacheSize < 1) { throw new IllegalArgumentException("Bucket Cache should be minimum 1 MB in size." - + "Configure 'hbase.bucketcache.size' with > 1 value"); + + "Configure 'hbase.bucketcache.size' with > 1 value"); } return (long) (bucketCacheSize * 1024 * 1024); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AdaptiveLifoCoDelCallQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AdaptiveLifoCoDelCallQueue.java index b5b79670d93..dee517c2e0a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AdaptiveLifoCoDelCallQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AdaptiveLifoCoDelCallQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,22 +25,17 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.LongAdder; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; /** - * Adaptive LIFO blocking queue utilizing CoDel algorithm to prevent queue overloading. - * - * Implementing {@link BlockingQueue} interface to be compatible with {@link RpcExecutor}. - * - * Currently uses milliseconds internally, need to look into whether we should use - * nanoseconds for timeInterval and minDelay. - * + * Adaptive LIFO blocking queue utilizing CoDel algorithm to prevent queue overloading. Implementing + * {@link BlockingQueue} interface to be compatible with {@link RpcExecutor}. Currently uses + * milliseconds internally, need to look into whether we should use nanoseconds for timeInterval and + * minDelay. * @see Fail at Scale paper - * - * @see - * CoDel version for generic job queues in Wangle library + * @see CoDel + * version for generic job queues in Wangle library */ @InterfaceAudience.Private public class AdaptiveLifoCoDelCallQueue implements BlockingQueue { @@ -76,7 +71,7 @@ public class AdaptiveLifoCoDelCallQueue implements BlockingQueue { private AtomicBoolean isOverloaded = new AtomicBoolean(false); public AdaptiveLifoCoDelCallQueue(int capacity, int targetDelay, int interval, - double lifoThreshold, LongAdder numGeneralCallsDropped, LongAdder numLifoModeSwitches) { + double lifoThreshold, LongAdder numGeneralCallsDropped, LongAdder numLifoModeSwitches) { this.maxCapacity = capacity; this.queue = new LinkedBlockingDeque<>(capacity); this.codelTargetDelay = targetDelay; @@ -88,29 +83,27 @@ public class AdaptiveLifoCoDelCallQueue implements BlockingQueue { /** * Update tunables. - * * @param newCodelTargetDelay new CoDel target delay - * @param newCodelInterval new CoDel interval - * @param newLifoThreshold new Adaptive Lifo threshold + * @param newCodelInterval new CoDel interval + * @param newLifoThreshold new Adaptive Lifo threshold */ public void updateTunables(int newCodelTargetDelay, int newCodelInterval, - double newLifoThreshold) { + double newLifoThreshold) { this.codelTargetDelay = newCodelTargetDelay; this.codelInterval = newCodelInterval; this.lifoThreshold = newLifoThreshold; } /** - * Behaves as {@link LinkedBlockingQueue#take()}, except it will silently - * skip all calls which it thinks should be dropped. - * + * Behaves as {@link LinkedBlockingQueue#take()}, except it will silently skip all calls which it + * thinks should be dropped. * @return the head of this queue * @throws InterruptedException if interrupted while waiting */ @Override public CallRunner take() throws InterruptedException { CallRunner cr; - while(true) { + while (true) { if (((double) queue.size() / this.maxCapacity) > lifoThreshold) { numLifoModeSwitches.increment(); cr = queue.takeLast(); @@ -130,7 +123,7 @@ public class AdaptiveLifoCoDelCallQueue implements BlockingQueue { public CallRunner poll() { CallRunner cr; boolean switched = false; - while(true) { + while (true) { if (((double) queue.size() / this.maxCapacity) > lifoThreshold) { // Only count once per switch. if (!switched) { @@ -156,8 +149,8 @@ public class AdaptiveLifoCoDelCallQueue implements BlockingQueue { /** * @param callRunner to validate - * @return true if this call needs to be skipped based on call timestamp - * and internal queue state (deemed overloaded). + * @return true if this call needs to be skipped based on call timestamp and internal queue state + * (deemed overloaded). */ private boolean needToDrop(CallRunner callRunner) { long now = EnvironmentEdgeManager.currentTime(); @@ -167,9 +160,7 @@ public class AdaptiveLifoCoDelCallQueue implements BlockingQueue { // Try and determine if we should reset // the delay time and determine overload - if (now > intervalTime && - !resetDelay.get() && - !resetDelay.getAndSet(true)) { + if (now > intervalTime && !resetDelay.get() && !resetDelay.getAndSet(true)) { intervalTime = now + codelInterval; isOverloaded.set(localMinDelay > codelTargetDelay); @@ -209,129 +200,128 @@ public class AdaptiveLifoCoDelCallQueue implements BlockingQueue { @Override public CallRunner poll(long timeout, TimeUnit unit) throws InterruptedException { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } - @Override public CallRunner peek() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean remove(Object o) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean contains(Object o) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public Object[] toArray() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public T[] toArray(T[] a) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public void clear() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public int drainTo(Collection c) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public int drainTo(Collection c, int maxElements) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public Iterator iterator() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean add(CallRunner callRunner) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public CallRunner remove() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public CallRunner element() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean addAll(Collection c) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean isEmpty() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean containsAll(Collection c) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean removeAll(Collection c) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean retainAll(Collection c) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public int remainingCapacity() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public void put(CallRunner callRunner) throws InterruptedException { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean offer(CallRunner callRunner, long timeout, TimeUnit unit) - throws InterruptedException { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throws InterruptedException { + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java index 8e5467478ca..24bda5a6e12 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java @@ -37,15 +37,15 @@ public class BalancedQueueRpcExecutor extends RpcExecutor { private final QueueBalancer balancer; public BalancedQueueRpcExecutor(final String name, final int handlerCount, - final int maxQueueLength, final PriorityFunction priority, final Configuration conf, - final Abortable abortable) { + final int maxQueueLength, final PriorityFunction priority, final Configuration conf, + final Abortable abortable) { this(name, handlerCount, conf.get(CALL_QUEUE_TYPE_CONF_KEY, CALL_QUEUE_TYPE_CONF_DEFAULT), - maxQueueLength, priority, conf, abortable); + maxQueueLength, priority, conf, abortable); } public BalancedQueueRpcExecutor(final String name, final int handlerCount, - final String callQueueType, final int maxQueueLength, final PriorityFunction priority, - final Configuration conf, final Abortable abortable) { + final String callQueueType, final int maxQueueLength, final PriorityFunction priority, + final Configuration conf, final Abortable abortable) { super(name, handlerCount, callQueueType, maxQueueLength, priority, conf, abortable); initializeQueues(this.numCallQueues); this.balancer = getBalancer(name, conf, getQueues()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java index 915b82df426..534a467eda1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,12 +20,10 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.GatheringByteChannel; - import org.apache.yetus.audience.InterfaceAudience; /** - * Chain of ByteBuffers. - * Used writing out an array of byte buffers. Writes in chunks. + * Chain of ByteBuffers. Used writing out an array of byte buffers. Writes in chunks. */ @InterfaceAudience.Private class BufferChain { @@ -43,15 +41,15 @@ class BufferChain { } /** - * Expensive. Makes a new buffer to hold a copy of what is in contained ByteBuffers. This - * call drains this instance; it cannot be used subsequent to the call. + * Expensive. Makes a new buffer to hold a copy of what is in contained ByteBuffers. This call + * drains this instance; it cannot be used subsequent to the call. * @return A new byte buffer with the content of all contained ByteBuffers. */ - byte [] getBytes() { + byte[] getBytes() { if (!hasRemaining()) throw new IllegalAccessError(); - byte [] bytes = new byte [this.remaining]; + byte[] bytes = new byte[this.remaining]; int offset = 0; - for (ByteBuffer bb: this.buffers) { + for (ByteBuffer bb : this.buffers) { int length = bb.remaining(); bb.get(bytes, offset, length); offset += length; @@ -65,10 +63,9 @@ class BufferChain { /** * Write out our chain of buffers in chunks - * @param channel Where to write + * @param channel Where to write * @param chunkSize Size of chunks to write. - * @return Amount written. - * @throws IOException + * @return Amount written. n */ long write(GatheringByteChannel channel, int chunkSize) throws IOException { int chunkRemaining = chunkSize; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallQueueInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallQueueInfo.java index 19a75eae110..f0bd9bb2562 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallQueueInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallQueueInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,12 +17,10 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.yetus.audience.InterfaceAudience; - import java.util.HashMap; import java.util.Map; import java.util.Set; - +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class CallQueueInfo { @@ -45,7 +43,8 @@ public class CallQueueInfo { public long getCallMethodCount(String callQueueName, String methodName) { long methodCount; - Map methodCountMap = callQueueMethodCountsSummary.getOrDefault(callQueueName, null); + Map methodCountMap = + callQueueMethodCountsSummary.getOrDefault(callQueueName, null); if (null != methodCountMap) { methodCount = methodCountMap.getOrDefault(methodName, 0L); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java index 1f85346908f..0134e11d891 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java @@ -35,19 +35,18 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; + import org.apache.hbase.thirdparty.com.google.protobuf.Message; /** * The request processing logic, which is usually executed in thread pools provided by an - * {@link RpcScheduler}. Call {@link #run()} to actually execute the contained - * RpcServer.Call + * {@link RpcScheduler}. Call {@link #run()} to actually execute the contained RpcServer.Call */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public class CallRunner { - private static final CallDroppedException CALL_DROPPED_EXCEPTION - = new CallDroppedException(); + private static final CallDroppedException CALL_DROPPED_EXCEPTION = new CallDroppedException(); private RpcCall call; private RpcServerInterface rpcServer; @@ -57,7 +56,7 @@ public class CallRunner { /** * On construction, adds the size of this call to the running count of outstanding call sizes. - * Presumption is that we are put on a queue while we wait on an executor to run us. During this + * Presumption is that we are put on a queue while we wait on an executor to run us. During this * time we occupy heap. */ // The constructor is shutdown so only RpcServer in this class can make one of these. @@ -118,8 +117,8 @@ public class CallRunner { try (Scope ignored1 = ipcServerSpan.makeCurrent()) { if (!this.rpcServer.isStarted()) { InetSocketAddress address = rpcServer.getListenerAddress(); - throw new ServerNotRunningYetException("Server " + - (address != null ? address : "(channel closed)") + " is not running yet"); + throw new ServerNotRunningYetException( + "Server " + (address != null ? address : "(channel closed)") + " is not running yet"); } // make the call resultPair = this.rpcServer.call(call, this.status); @@ -141,7 +140,7 @@ public class CallRunner { errorThrowable = e; error = StringUtils.stringifyException(e); if (e instanceof Error) { - throw (Error)e; + throw (Error) e; } } finally { RpcServer.CurCall.set(null); @@ -163,8 +162,9 @@ public class CallRunner { // don't touch `span` here because its status and `end()` are managed in `call#setResponse()` } catch (OutOfMemoryError e) { TraceUtil.setError(span, e); - if (this.rpcServer.getErrorHandler() != null - && this.rpcServer.getErrorHandler().checkOOME(e)) { + if ( + this.rpcServer.getErrorHandler() != null && this.rpcServer.getErrorHandler().checkOOME(e) + ) { RpcServer.LOG.info("{}: exiting on OutOfMemoryError", Thread.currentThread().getName()); // exception intentionally swallowed } else { @@ -173,9 +173,10 @@ public class CallRunner { } } catch (ClosedChannelException cce) { InetSocketAddress address = rpcServer.getListenerAddress(); - RpcServer.LOG.warn("{}: caught a ClosedChannelException, " + - "this means that the server " + (address != null ? address : "(channel closed)") + - " was processing a request but the client went away. The error message was: {}", + RpcServer.LOG.warn( + "{}: caught a ClosedChannelException, " + "this means that the server " + + (address != null ? address : "(channel closed)") + + " was processing a request but the client went away. The error message was: {}", Thread.currentThread().getName(), cce.getMessage()); TraceUtil.setError(span, cce); } catch (Exception e) { @@ -217,9 +218,10 @@ public class CallRunner { this.rpcServer.getMetrics().exception(CALL_DROPPED_EXCEPTION); } catch (ClosedChannelException cce) { InetSocketAddress address = rpcServer.getListenerAddress(); - RpcServer.LOG.warn("{}: caught a ClosedChannelException, " + - "this means that the server " + (address != null ? address : "(channel closed)") + - " was processing a request but the client went away. The error message was: {}", + RpcServer.LOG.warn( + "{}: caught a ClosedChannelException, " + "this means that the server " + + (address != null ? address : "(channel closed)") + + " was processing a request but the client went away. The error message was: {}", Thread.currentThread().getName(), cce.getMessage()); TraceUtil.setError(span, cce); } catch (Exception e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java index 9ca292751d4..798c3bed959 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,4 +21,5 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @SuppressWarnings("serial") -public class EmptyServiceNameException extends FatalConnectionException {} +public class EmptyServiceNameException extends FatalConnectionException { +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java index 82208d4b5ae..7dcf5c1361a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java @@ -26,10 +26,10 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.yetus.audience.InterfaceAudience; /** - * Balanced queue executor with a fastpath. Because this is FIFO, it has no respect for - * ordering so a fast path skipping the queuing of Calls if an Handler is available, is possible. - * Just pass the Call direct to waiting Handler thread. Try to keep the hot Handlers bubbling - * rather than let them go cold and lose context. Idea taken from Apace Kudu (incubating). See + * Balanced queue executor with a fastpath. Because this is FIFO, it has no respect for ordering so + * a fast path skipping the queuing of Calls if an Handler is available, is possible. Just pass the + * Call direct to waiting Handler thread. Try to keep the hot Handlers bubbling rather than let them + * go cold and lose context. Idea taken from Apace Kudu (incubating). See * https://gerrit.cloudera.org/#/c/2938/7/src/kudu/rpc/service_queue.h */ @InterfaceAudience.Private @@ -42,14 +42,14 @@ public class FastPathBalancedQueueRpcExecutor extends BalancedQueueRpcExecutor { private final Deque fastPathHandlerStack = new ConcurrentLinkedDeque<>(); public FastPathBalancedQueueRpcExecutor(final String name, final int handlerCount, - final int maxQueueLength, final PriorityFunction priority, final Configuration conf, - final Abortable abortable) { + final int maxQueueLength, final PriorityFunction priority, final Configuration conf, + final Abortable abortable) { super(name, handlerCount, maxQueueLength, priority, conf, abortable); } public FastPathBalancedQueueRpcExecutor(final String name, final int handlerCount, - final String callQueueType, final int maxQueueLength, final PriorityFunction priority, - final Configuration conf, final Abortable abortable) { + final String callQueueType, final int maxQueueLength, final PriorityFunction priority, + final Configuration conf, final Abortable abortable) { super(name, handlerCount, callQueueType, maxQueueLength, priority, conf, abortable); } @@ -64,13 +64,13 @@ public class FastPathBalancedQueueRpcExecutor extends BalancedQueueRpcExecutor { @Override public boolean dispatch(CallRunner callTask) { - //FastPathHandlers don't check queue limits, so if we're completely shut down - //we have to prevent ourselves from using the handler in the first place - if (currentQueueLimit == 0){ + // FastPathHandlers don't check queue limits, so if we're completely shut down + // we have to prevent ourselves from using the handler in the first place + if (currentQueueLimit == 0) { return false; } FastPathRpcHandler handler = popReadyHandler(); - return handler != null? handler.loadCallRunner(callTask): super.dispatch(callTask); + return handler != null ? handler.loadCallRunner(callTask) : super.dispatch(callTask); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRWQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRWQueueRpcExecutor.java index b23508a9425..63436e1dd4a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRWQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRWQueueRpcExecutor.java @@ -31,7 +31,7 @@ import org.apache.yetus.audience.InterfaceStability; * RPC Executor that extends {@link RWQueueRpcExecutor} with fast-path feature, used in * {@link FastPathBalancedQueueRpcExecutor}. */ -@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public class FastPathRWQueueRpcExecutor extends RWQueueRpcExecutor { @@ -49,8 +49,9 @@ public class FastPathRWQueueRpcExecutor extends RWQueueRpcExecutor { final int handlerCount, final BlockingQueue q, final AtomicInteger activeHandlerCount, final AtomicInteger failedHandlerCount, final Abortable abortable) { - Deque handlerStack = name.contains("read") ? readHandlerStack : - name.contains("write") ? writeHandlerStack : scanHandlerStack; + Deque handlerStack = name.contains("read") ? readHandlerStack + : name.contains("write") ? writeHandlerStack + : scanHandlerStack; return new FastPathRpcHandler(name, handlerFailureThreshhold, handlerCount, q, activeHandlerCount, failedHandlerCount, abortable, handlerStack); } @@ -60,9 +61,11 @@ public class FastPathRWQueueRpcExecutor extends RWQueueRpcExecutor { RpcCall call = callTask.getRpcCall(); boolean shouldDispatchToWriteQueue = isWriteRequest(call.getHeader(), call.getParam()); boolean shouldDispatchToScanQueue = shouldDispatchToScanQueue(callTask); - FastPathRpcHandler handler = shouldDispatchToWriteQueue ? writeHandlerStack.poll() : - shouldDispatchToScanQueue ? scanHandlerStack.poll() : readHandlerStack.poll(); - return handler != null ? handler.loadCallRunner(callTask) : - dispatchTo(shouldDispatchToWriteQueue, shouldDispatchToScanQueue, callTask); + FastPathRpcHandler handler = shouldDispatchToWriteQueue ? writeHandlerStack.poll() + : shouldDispatchToScanQueue ? scanHandlerStack.poll() + : readHandlerStack.poll(); + return handler != null + ? handler.loadCallRunner(callTask) + : dispatchTo(shouldDispatchToWriteQueue, shouldDispatchToScanQueue, callTask); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRpcHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRpcHandler.java index 6d20aeb0157..991a8019d0f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRpcHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRpcHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,9 +36,8 @@ public class FastPathRpcHandler extends RpcHandler { private CallRunner loadedCallRunner; FastPathRpcHandler(String name, double handlerFailureThreshhold, int handlerCount, - BlockingQueue q, AtomicInteger activeHandlerCount, - AtomicInteger failedHandlerCount, final Abortable abortable, - final Deque fastPathHandlerStack) { + BlockingQueue q, AtomicInteger activeHandlerCount, AtomicInteger failedHandlerCount, + final Abortable abortable, final Deque fastPathHandlerStack) { super(name, handlerFailureThreshhold, handlerCount, q, activeHandlerCount, failedHandlerCount, abortable); this.fastPathHandlerStack = fastPathHandlerStack; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java index cfd085ebc77..b1b2193d5b9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java @@ -28,13 +28,13 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.util.internal.StringUtil; /** - * A very simple {@code }RpcScheduler} that serves incoming requests in order. - * - * This can be used for HMaster, where no prioritization is needed. + * A very simple {@code }RpcScheduler} that serves incoming requests in order. This can be used for + * HMaster, where no prioritization is needed. */ @InterfaceAudience.Private public class FifoRpcScheduler extends RpcScheduler { @@ -47,7 +47,7 @@ public class FifoRpcScheduler extends RpcScheduler { public FifoRpcScheduler(Configuration conf, int handlerCount) { this.handlerCount = handlerCount; this.maxQueueLength = conf.getInt(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH, - handlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); + handlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); } @Override @@ -95,7 +95,7 @@ public class FifoRpcScheduler extends RpcScheduler { } protected boolean executeRpcCall(final ThreadPoolExecutor executor, final AtomicInteger queueSize, - final CallRunner task) { + final CallRunner task) { // Executors provide no offer, so make our own. int queued = queueSize.getAndIncrement(); if (maxQueueLength > 0 && queued >= maxQueueLength) { @@ -103,7 +103,7 @@ public class FifoRpcScheduler extends RpcScheduler { return false; } - executor.execute(new FifoCallRunner(task){ + executor.execute(new FifoCallRunner(task) { @Override public void run() { task.setStatus(RpcServer.getStatus()); @@ -217,7 +217,7 @@ public class FifoRpcScheduler extends RpcScheduler { } protected void updateMethodCountAndSizeByQueue(BlockingQueue queue, - HashMap methodCount, HashMap methodSize) { + HashMap methodCount, HashMap methodSize) { for (Runnable r : queue) { FifoCallRunner mcr = (FifoCallRunner) r; RpcCall rpcCall = mcr.getCallRunner().getRpcCall(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java index f8ba186fb3d..3d1a7aa8ade 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.yetus.audience.InterfaceAudience; @@ -31,5 +29,5 @@ public interface HBaseRPCErrorHandler { * @param e the throwable * @return if the server should be shut down */ - boolean checkOOME(final Throwable e) ; + boolean checkOOME(final Throwable e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java index 6a3899f3290..f139ab3e563 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java @@ -28,7 +28,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -49,7 +48,7 @@ public class MasterFifoRpcScheduler extends FifoRpcScheduler { * is "hbase.regionserver.handler.count" value minus RSReport handlers count, but at least 1 too. */ public static final String MASTER_SERVER_REPORT_HANDLER_COUNT = - "hbase.master.server.report.handler.count"; + "hbase.master.server.report.handler.count"; private static final String REGION_SERVER_REPORT = "RegionServerReport"; private final int rsReportHandlerCount; private final int rsRsreportMaxQueueLength; @@ -57,7 +56,7 @@ public class MasterFifoRpcScheduler extends FifoRpcScheduler { private ThreadPoolExecutor rsReportExecutor; public MasterFifoRpcScheduler(Configuration conf, int callHandlerCount, - int rsReportHandlerCount) { + int rsReportHandlerCount) { super(conf, callHandlerCount); this.rsReportHandlerCount = rsReportHandlerCount; this.rsRsreportMaxQueueLength = conf.getInt(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH, @@ -68,7 +67,7 @@ public class MasterFifoRpcScheduler extends FifoRpcScheduler { public void start() { LOG.info( "Using {} as call queue; handlerCount={}; maxQueueLength={}; rsReportHandlerCount={}; " - + "rsReportMaxQueueLength={}", + + "rsReportMaxQueueLength={}", this.getClass().getSimpleName(), handlerCount, maxQueueLength, rsReportHandlerCount, rsRsreportMaxQueueLength); this.executor = new ThreadPoolExecutor(handlerCount, handlerCount, 60, TimeUnit.SECONDS, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java index c9e4270d918..a86e6554b1c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.conf.Configuration; @@ -30,13 +29,13 @@ import org.apache.yetus.audience.InterfaceStability; @InterfaceStability.Evolving public class MetaRWQueueRpcExecutor extends RWQueueRpcExecutor { public static final String META_CALL_QUEUE_READ_SHARE_CONF_KEY = - "hbase.ipc.server.metacallqueue.read.ratio"; + "hbase.ipc.server.metacallqueue.read.ratio"; public static final String META_CALL_QUEUE_SCAN_SHARE_CONF_KEY = - "hbase.ipc.server.metacallqueue.scan.ratio"; + "hbase.ipc.server.metacallqueue.scan.ratio"; public static final float DEFAULT_META_CALL_QUEUE_READ_SHARE = 0.9f; public MetaRWQueueRpcExecutor(final String name, final int handlerCount, final int maxQueueLength, - final PriorityFunction priority, final Configuration conf, final Abortable abortable) { + final PriorityFunction priority, final Configuration conf, final Abortable abortable) { super(name, handlerCount, maxQueueLength, priority, conf, abortable); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java index 2e78ef37441..53471e684d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,24 +15,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.CallDroppedException; import org.apache.hadoop.hbase.CallQueueTooBigException; +import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.MultiActionResultTooLarge; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionTooBusyException; import org.apache.hadoop.hbase.UnknownScannerException; -import org.apache.hadoop.hbase.exceptions.RequestTooBigException; -import org.apache.hadoop.hbase.quotas.QuotaExceededException; -import org.apache.hadoop.hbase.quotas.RpcThrottlingException; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; import org.apache.hadoop.hbase.exceptions.RegionMovedException; +import org.apache.hadoop.hbase.exceptions.RequestTooBigException; import org.apache.hadoop.hbase.exceptions.ScannerResetException; +import org.apache.hadoop.hbase.quotas.QuotaExceededException; +import org.apache.hadoop.hbase.quotas.RpcThrottlingException; +import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,7 +45,7 @@ public class MetricsHBaseServer { public MetricsHBaseServer(String serverName, MetricsHBaseServerWrapper wrapper) { serverWrapper = wrapper; source = CompatibilitySingletonFactory.getInstance(MetricsHBaseServerSourceFactory.class) - .create(serverName, wrapper); + .create(serverName, wrapper); } void authorizationSuccess() { @@ -78,9 +76,13 @@ public class MetricsHBaseServer { source.receivedBytes(count); } - void sentResponse(long count) { source.sentResponse(count); } + void sentResponse(long count) { + source.sentResponse(count); + } - void receivedRequest(long count) { source.receivedRequest(count); } + void receivedRequest(long count) { + source.receivedRequest(count); + } void dequeuedCall(int qTime) { source.dequeuedCall(qTime); @@ -98,12 +100,9 @@ public class MetricsHBaseServer { source.exception(); /** - * Keep some metrics for commonly seen exceptions - * - * Try and put the most common types first. - * Place child types before the parent type that they extend. - * - * If this gets much larger we might have to go to a hashmap + * Keep some metrics for commonly seen exceptions Try and put the most common types first. Place + * child types before the parent type that they extend. If this gets much larger we might have + * to go to a hashmap */ if (throwable != null) { if (throwable instanceof OutOfOrderScannerNextException) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java index 7df63586ab8..0b00bba04fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.util.DirectMemoryUtils; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java index 01cf9b59d06..0a5dd0ecf50 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

          - * http://www.apache.org/licenses/LICENSE-2.0 - *

          + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -33,10 +33,8 @@ import org.apache.hbase.thirdparty.io.netty.handler.codec.CorruptedFrameExceptio import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; - /** * Decoder for extracting frame - * * @since 2.0.0 */ @InterfaceAudience.Private @@ -59,8 +57,7 @@ public class NettyRpcFrameDecoder extends ByteToMessageDecoder { } @Override - protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) - throws Exception { + protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) throws Exception { if (requestTooBig) { handleTooBigRequest(in); return; @@ -78,11 +75,10 @@ public class NettyRpcFrameDecoder extends ByteToMessageDecoder { if (frameLength > maxFrameLength) { requestTooBig = true; - requestTooBigMessage = - "RPC data length of " + frameLength + " received from " + connection.getHostAddress() - + " is greater than max allowed " + connection.rpcServer.maxRequestSize + ". Set \"" - + SimpleRpcServer.MAX_REQUEST_SIZE - + "\" on server to override this limit (not recommended)"; + requestTooBigMessage = "RPC data length of " + frameLength + " received from " + + connection.getHostAddress() + " is greater than max allowed " + + connection.rpcServer.maxRequestSize + ". Set \"" + SimpleRpcServer.MAX_REQUEST_SIZE + + "\" on server to override this limit (not recommended)"; NettyRpcServer.LOG.warn(requestTooBigMessage); @@ -135,8 +131,10 @@ public class NettyRpcFrameDecoder extends ByteToMessageDecoder { // Make sure the client recognizes the underlying exception // Otherwise, throw a DoNotRetryIOException. - if (VersionInfoUtil.hasMinimumVersion(connection.connectionHeader.getVersionInfo(), - RequestTooBigException.MAJOR_VERSION, RequestTooBigException.MINOR_VERSION)) { + if ( + VersionInfoUtil.hasMinimumVersion(connection.connectionHeader.getVersionInfo(), + RequestTooBigException.MAJOR_VERSION, RequestTooBigException.MINOR_VERSION) + ) { reqTooBig.setResponse(null, null, reqTooBigEx, requestTooBigMessage); } else { reqTooBig.setResponse(null, null, new DoNotRetryIOException(requestTooBigMessage), @@ -174,10 +172,8 @@ public class NettyRpcFrameDecoder extends ByteToMessageDecoder { } /** - * Reads variable length 32bit int from buffer - * This method is from ProtobufVarint32FrameDecoder in Netty and modified a little bit - * to pass the cyeckstyle rule. - * + * Reads variable length 32bit int from buffer This method is from ProtobufVarint32FrameDecoder in + * Netty and modified a little bit to pass the cyeckstyle rule. * @return decoded int if buffers readerIndex has been forwarded else nonsense value */ private static int readRawVarint32(ByteBuf buffer) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java index a518f9f560c..e5ca624002a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java @@ -59,13 +59,13 @@ import org.apache.hbase.thirdparty.io.netty.util.concurrent.GlobalEventExecutor; * An RPC server with Netty4 implementation. * @since 2.0.0 */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.CONFIG}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.CONFIG }) public class NettyRpcServer extends RpcServer { public static final Logger LOG = LoggerFactory.getLogger(NettyRpcServer.class); /** - * Name of property to change netty rpc server eventloop thread count. Default is 0. - * Tests may set this down from unlimited. + * Name of property to change netty rpc server eventloop thread count. Default is 0. Tests may set + * this down from unlimited. */ public static final String HBASE_NETTY_EVENTLOOP_RPCSERVER_THREADCOUNT_KEY = "hbase.netty.eventloop.rpcserver.thread.count"; @@ -79,8 +79,8 @@ public class NettyRpcServer extends RpcServer { new DefaultChannelGroup(GlobalEventExecutor.INSTANCE, true); public NettyRpcServer(Server server, String name, List services, - InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler, - boolean reservoirEnabled) throws IOException { + InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler, + boolean reservoirEnabled) throws IOException { super(server, name, services, bindAddress, conf, scheduler, reservoirEnabled); this.bindAddress = bindAddress; EventLoopGroup eventLoopGroup; @@ -90,31 +90,32 @@ public class NettyRpcServer extends RpcServer { eventLoopGroup = config.group(); channelClass = config.serverChannelClass(); } else { - int threadCount = server == null? EVENTLOOP_THREADCOUNT_DEFAULT: - server.getConfiguration().getInt(HBASE_NETTY_EVENTLOOP_RPCSERVER_THREADCOUNT_KEY, + int threadCount = server == null + ? EVENTLOOP_THREADCOUNT_DEFAULT + : server.getConfiguration().getInt(HBASE_NETTY_EVENTLOOP_RPCSERVER_THREADCOUNT_KEY, EVENTLOOP_THREADCOUNT_DEFAULT); eventLoopGroup = new NioEventLoopGroup(threadCount, new DefaultThreadFactory("NettyRpcServer", true, Thread.MAX_PRIORITY)); channelClass = NioServerSocketChannel.class; } ServerBootstrap bootstrap = new ServerBootstrap().group(eventLoopGroup).channel(channelClass) - .childOption(ChannelOption.TCP_NODELAY, tcpNoDelay) - .childOption(ChannelOption.SO_KEEPALIVE, tcpKeepAlive) - .childOption(ChannelOption.SO_REUSEADDR, true) - .childHandler(new ChannelInitializer() { + .childOption(ChannelOption.TCP_NODELAY, tcpNoDelay) + .childOption(ChannelOption.SO_KEEPALIVE, tcpKeepAlive) + .childOption(ChannelOption.SO_REUSEADDR, true) + .childHandler(new ChannelInitializer() { - @Override - protected void initChannel(Channel ch) throws Exception { - ChannelPipeline pipeline = ch.pipeline(); - FixedLengthFrameDecoder preambleDecoder = new FixedLengthFrameDecoder(6); - preambleDecoder.setSingleDecode(true); - pipeline.addLast("preambleDecoder", preambleDecoder); - pipeline.addLast("preambleHandler", createNettyRpcServerPreambleHandler()); - pipeline.addLast("frameDecoder", new NettyRpcFrameDecoder(maxRequestSize)); - pipeline.addLast("decoder", new NettyRpcServerRequestDecoder(allChannels, metrics)); - pipeline.addLast("encoder", new NettyRpcServerResponseEncoder(metrics)); - } - }); + @Override + protected void initChannel(Channel ch) throws Exception { + ChannelPipeline pipeline = ch.pipeline(); + FixedLengthFrameDecoder preambleDecoder = new FixedLengthFrameDecoder(6); + preambleDecoder.setSingleDecode(true); + pipeline.addLast("preambleDecoder", preambleDecoder); + pipeline.addLast("preambleHandler", createNettyRpcServerPreambleHandler()); + pipeline.addLast("frameDecoder", new NettyRpcFrameDecoder(maxRequestSize)); + pipeline.addLast("decoder", new NettyRpcServerRequestDecoder(allChannels, metrics)); + pipeline.addLast("encoder", new NettyRpcServerResponseEncoder(metrics)); + } + }); try { serverChannel = bootstrap.bind(this.bindAddress).sync().channel(); LOG.info("Bind to {}", serverChannel.localAddress()); @@ -189,19 +190,19 @@ public class NettyRpcServer extends RpcServer { } @Override - public Pair call(BlockingService service, - MethodDescriptor md, Message param, CellScanner cellScanner, - long receiveTime, MonitoredRPCHandler status) throws IOException { + public Pair call(BlockingService service, MethodDescriptor md, + Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status) + throws IOException { return call(service, md, param, cellScanner, receiveTime, status, EnvironmentEdgeManager.currentTime(), 0); } @Override public Pair call(BlockingService service, MethodDescriptor md, - Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, - long startTime, int timeout) throws IOException { + Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, + long startTime, int timeout) throws IOException { NettyServerCall fakeCall = new NettyServerCall(-1, service, md, null, param, cellScanner, null, - -1, null, receiveTime, timeout, bbAllocator, cellBlockBuilder, null); + -1, null, receiveTime, timeout, bbAllocator, cellBlockBuilder, null); return call(fakeCall, status); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java index 855cf2fda4d..cf2551e1c08 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,16 +17,15 @@ */ package org.apache.hadoop.hbase.ipc; +import java.nio.ByteBuffer; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; -import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; - /** * Handle connection preamble. * @since 2.0.0` diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerRequestDecoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerRequestDecoder.java index 40f59ad1259..cc8b07702b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerRequestDecoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerRequestDecoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hbase.ipc; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelInboundHandlerAdapter; import org.apache.hbase.thirdparty.io.netty.channel.group.ChannelGroup; -import org.apache.yetus.audience.InterfaceAudience; - /** * Decoder for rpc request. * @since 2.0.0 @@ -50,7 +50,7 @@ class NettyRpcServerRequestDecoder extends ChannelInboundHandlerAdapter { public void channelActive(ChannelHandlerContext ctx) throws Exception { allChannels.add(ctx.channel()); NettyRpcServer.LOG.trace("Connection {}; # active connections={}", - ctx.channel().remoteAddress(), (allChannels.size() - 1)); + ctx.channel().remoteAddress(), (allChannels.size() - 1)); super.channelActive(ctx); } @@ -66,7 +66,7 @@ class NettyRpcServerRequestDecoder extends ChannelInboundHandlerAdapter { public void channelInactive(ChannelHandlerContext ctx) throws Exception { allChannels.remove(ctx.channel()); NettyRpcServer.LOG.trace("Disconnection {}; # active connections={}", - ctx.channel().remoteAddress(), (allChannels.size() - 1)); + ctx.channel().remoteAddress(), (allChannels.size() - 1)); super.channelInactive(ctx); } @@ -74,7 +74,7 @@ class NettyRpcServerRequestDecoder extends ChannelInboundHandlerAdapter { public void exceptionCaught(ChannelHandlerContext ctx, Throwable e) { allChannels.remove(ctx.channel()); NettyRpcServer.LOG.trace("Connection {}; caught unexpected downstream exception.", - ctx.channel().remoteAddress(), e); + ctx.channel().remoteAddress(), e); ctx.channel().close(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerResponseEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerResponseEncoder.java index 09589da1635..30f8dba236a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerResponseEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerResponseEncoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hbase.ipc; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelOutboundHandlerAdapter; import org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise; -import org.apache.yetus.audience.InterfaceAudience; - /** * Encoder for {@link RpcResponse}. * @since 2.0.0 @@ -39,7 +39,7 @@ class NettyRpcServerResponseEncoder extends ChannelOutboundHandlerAdapter { @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) - throws Exception { + throws Exception { if (msg instanceof RpcResponse) { RpcResponse resp = (RpcResponse) msg; BufferChain buf = resp.getResponse(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java index 8dc08c97bd9..fd0c6d75d88 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,14 +19,15 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; import java.net.InetAddress; - import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; /** @@ -38,11 +39,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader class NettyServerCall extends ServerCall { NettyServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header, - Message param, CellScanner cellScanner, NettyServerRpcConnection connection, long size, - InetAddress remoteAddress, long receiveTime, int timeout, ByteBuffAllocator bbAllocator, - CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) { + Message param, CellScanner cellScanner, NettyServerRpcConnection connection, long size, + InetAddress remoteAddress, long receiveTime, int timeout, ByteBuffAllocator bbAllocator, + CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) { super(id, service, md, header, param, cellScanner, connection, size, remoteAddress, receiveTime, - timeout, bbAllocator, cellBlockBuilder, reqCleanup); + timeout, bbAllocator, cellBlockBuilder, reqCleanup); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java index deed9875670..91468fdd039 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,23 +17,23 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; -import org.apache.hbase.thirdparty.io.netty.channel.Channel; - import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.ByteBuffer; - import org.apache.hadoop.hbase.CellScanner; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.nio.SingleByteBuff; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; +import org.apache.hbase.thirdparty.io.netty.channel.Channel; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; /** @@ -113,12 +113,11 @@ class NettyServerRpcConnection extends ServerRpcConnection { @Override public NettyServerCall createCall(int id, final BlockingService service, - final MethodDescriptor md, RequestHeader header, Message param, CellScanner cellScanner, - long size, final InetAddress remoteAddress, int timeout, - CallCleanup reqCleanup) { + final MethodDescriptor md, RequestHeader header, Message param, CellScanner cellScanner, + long size, final InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) { return new NettyServerCall(id, service, md, header, param, cellScanner, this, size, - remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator, - this.rpcServer.cellBlockBuilder, reqCleanup); + remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator, + this.rpcServer.cellBlockBuilder, reqCleanup); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PluggableBlockingQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PluggableBlockingQueue.java index 0b88b6ccaa7..8f9f3ed7236 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PluggableBlockingQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PluggableBlockingQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,21 +23,15 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Abstract class template for defining a pluggable blocking queue implementation to be used - * by the 'pluggable' call queue type in the RpcExecutor. - * - * The intention is that the constructor shape helps re-inforce the expected parameters needed - * to match up to how the RpcExecutor will instantiate instances of the queue. - * - * If the implementation class implements the - * {@link org.apache.hadoop.hbase.conf.ConfigurationObserver} interface, it will also be wired - * into configuration changes. - * - * Instantiation requires a constructor with {@code + * Abstract class template for defining a pluggable blocking queue implementation to be used by the + * 'pluggable' call queue type in the RpcExecutor. The intention is that the constructor shape helps + * re-inforce the expected parameters needed to match up to how the RpcExecutor will instantiate + * instances of the queue. If the implementation class implements the + * {@link org.apache.hadoop.hbase.conf.ConfigurationObserver} interface, it will also be wired into + * configuration changes. Instantiation requires a constructor with {@code * final int maxQueueLength, * final PriorityFunction priority, - * final Configuration conf)} - * as the arguments. + * final Configuration conf)} as the arguments. */ @InterfaceAudience.Public @InterfaceStability.Evolving @@ -46,8 +40,8 @@ public abstract class PluggableBlockingQueue implements BlockingQueue} */ @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java index b294db3aa45..ef19dea2dfb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,35 +17,30 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; -import org.apache.hadoop.hbase.security.User; /** * Function to figure priority of incoming request. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public interface PriorityFunction { /** - * Returns the 'priority type' of the specified request. - * The returned value is mainly used to select the dispatch queue. - * @param header - * @param param - * @param user - * @return Priority of this request. + * Returns the 'priority type' of the specified request. The returned value is mainly used to + * select the dispatch queue. nnn * @return Priority of this request. */ int getPriority(RequestHeader header, Message param, User user); /** - * Returns the deadline of the specified request. - * The returned value is used to sort the dispatch queue. - * @param header - * @param param - * @return Deadline of this request. 0 now, otherwise msec of 'delay' + * Returns the deadline of the specified request. The returned value is used to sort the dispatch + * queue. nn * @return Deadline of this request. 0 now, otherwise msec of 'delay' */ long getDeadline(RequestHeader header, Message param); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QosPriority.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QosPriority.java index ca1546cd83a..dc496de6b73 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QosPriority.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QosPriority.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QueueBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QueueBalancer.java index d1141d093ed..a13f5d85823 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QueueBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QueueBalancer.java @@ -1,5 +1,4 @@ -/** - +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RPCTInfoGetter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RPCTInfoGetter.java index a5ed6fe0eae..2005cab83fc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RPCTInfoGetter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RPCTInfoGetter.java @@ -23,6 +23,7 @@ import java.util.Collections; import java.util.Map; import java.util.Optional; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.TracingProtos; /** @@ -30,21 +31,18 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.TracingProtos; */ @InterfaceAudience.Private final class RPCTInfoGetter implements TextMapGetter { - RPCTInfoGetter() { } + RPCTInfoGetter() { + } @Override public Iterable keys(TracingProtos.RPCTInfo carrier) { - return Optional.ofNullable(carrier) - .map(TracingProtos.RPCTInfo::getHeadersMap) - .map(Map::keySet) + return Optional.ofNullable(carrier).map(TracingProtos.RPCTInfo::getHeadersMap).map(Map::keySet) .orElse(Collections.emptySet()); } @Override public String get(TracingProtos.RPCTInfo carrier, String key) { - return Optional.ofNullable(carrier) - .map(TracingProtos.RPCTInfo::getHeadersMap) - .map(map -> map.get(key)) - .orElse(null); + return Optional.ofNullable(carrier).map(TracingProtos.RPCTInfo::getHeadersMap) + .map(map -> map.get(key)).orElse(null); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java index 563230932cf..1130fb640cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java @@ -1,5 +1,4 @@ -/** - +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.util.Queue; @@ -29,7 +27,9 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest; @@ -39,19 +39,18 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos; /** - * RPC Executor that uses different queues for reads and writes. - * With the options to use different queues/executors for gets and scans. - * Each handler has its own queue and there is no stealing. + * RPC Executor that uses different queues for reads and writes. With the options to use different + * queues/executors for gets and scans. Each handler has its own queue and there is no stealing. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public class RWQueueRpcExecutor extends RpcExecutor { private static final Logger LOG = LoggerFactory.getLogger(RWQueueRpcExecutor.class); public static final String CALL_QUEUE_READ_SHARE_CONF_KEY = - "hbase.ipc.server.callqueue.read.ratio"; + "hbase.ipc.server.callqueue.read.ratio"; public static final String CALL_QUEUE_SCAN_SHARE_CONF_KEY = - "hbase.ipc.server.callqueue.scan.ratio"; + "hbase.ipc.server.callqueue.scan.ratio"; private final QueueBalancer writeBalancer; private final QueueBalancer readBalancer; @@ -68,7 +67,7 @@ public class RWQueueRpcExecutor extends RpcExecutor { private final AtomicInteger activeScanHandlerCount = new AtomicInteger(0); public RWQueueRpcExecutor(final String name, final int handlerCount, final int maxQueueLength, - final PriorityFunction priority, final Configuration conf, final Abortable abortable) { + final PriorityFunction priority, final Configuration conf, final Abortable abortable) { super(name, handlerCount, maxQueueLength, priority, conf, abortable); float callqReadShare = getReadShare(conf); @@ -80,8 +79,8 @@ public class RWQueueRpcExecutor extends RpcExecutor { int readQueues = calcNumReaders(this.numCallQueues, callqReadShare); int readHandlers = Math.max(readQueues, calcNumReaders(handlerCount, callqReadShare)); - int scanQueues = Math.max(0, (int)Math.floor(readQueues * callqScanShare)); - int scanHandlers = Math.max(0, (int)Math.floor(readHandlers * callqScanShare)); + int scanQueues = Math.max(0, (int) Math.floor(readQueues * callqScanShare)); + int scanHandlers = Math.max(0, (int) Math.floor(readHandlers * callqScanShare)); if ((readQueues - scanQueues) > 0) { readQueues -= scanQueues; @@ -101,11 +100,13 @@ public class RWQueueRpcExecutor extends RpcExecutor { initializeQueues(numScanQueues); this.writeBalancer = getBalancer(name, conf, queues.subList(0, numWriteQueues)); - this.readBalancer = getBalancer(name, conf, queues.subList(numWriteQueues, numWriteQueues + numReadQueues)); - this.scanBalancer = numScanQueues > 0 ? - getBalancer(name, conf, queues.subList(numWriteQueues + numReadQueues, - numWriteQueues + numReadQueues + numScanQueues)) : - null; + this.readBalancer = + getBalancer(name, conf, queues.subList(numWriteQueues, numWriteQueues + numReadQueues)); + this.scanBalancer = numScanQueues > 0 + ? getBalancer(name, conf, + queues.subList(numWriteQueues + numReadQueues, + numWriteQueues + numReadQueues + numScanQueues)) + : null; LOG.info(getName() + " writeQueues=" + numWriteQueues + " writeHandlers=" + writeHandlersCount + " readQueues=" + numReadQueues + " readHandlers=" + readHandlersCount + " scanQueues=" @@ -138,7 +139,7 @@ public class RWQueueRpcExecutor extends RpcExecutor { } protected boolean dispatchTo(boolean toWriteQueue, boolean toScanQueue, - final CallRunner callTask) { + final CallRunner callTask) { int queueIndex; if (toWriteQueue) { queueIndex = writeBalancer.getNextQueue(callTask); @@ -176,8 +177,8 @@ public class RWQueueRpcExecutor extends RpcExecutor { @Override public int getScanQueueLength() { int length = 0; - for (int i = numWriteQueues + numReadQueues; - i < (numWriteQueues + numReadQueues + numScanQueues); i++) { + for (int i = numWriteQueues + numReadQueues; i + < (numWriteQueues + numReadQueues + numScanQueues); i++) { length += queues.get(i).size(); } return length; @@ -186,7 +187,7 @@ public class RWQueueRpcExecutor extends RpcExecutor { @Override public int getActiveHandlerCount() { return activeWriteHandlerCount.get() + activeReadHandlerCount.get() - + activeScanHandlerCount.get(); + + activeScanHandlerCount.get(); } @Override @@ -207,9 +208,9 @@ public class RWQueueRpcExecutor extends RpcExecutor { protected boolean isWriteRequest(final RequestHeader header, final Message param) { // TODO: Is there a better way to do this? if (param instanceof MultiRequest) { - MultiRequest multi = (MultiRequest)param; + MultiRequest multi = (MultiRequest) param; for (RegionAction regionAction : multi.getRegionActionList()) { - for (Action action: regionAction.getActionList()) { + for (Action action : regionAction.getActionList()) { if (action.hasMutation()) { return true; } @@ -267,16 +268,16 @@ public class RWQueueRpcExecutor extends RpcExecutor { } /* - * Calculate the number of writers based on the "total count" and the read share. - * You'll get at least one writer. + * Calculate the number of writers based on the "total count" and the read share. You'll get at + * least one writer. */ private static int calcNumWriters(final int count, final float readShare) { - return Math.max(1, count - Math.max(1, (int)Math.round(count * readShare))); + return Math.max(1, count - Math.max(1, (int) Math.round(count * readShare))); } /* - * Calculate the number of readers based on the "total count" and the read share. - * You'll get at least one reader. + * Calculate the number of readers based on the "total count" and the read share. You'll get at + * least one reader. */ private static int calcNumReaders(final int count, final float readShare) { return count - calcNumWriters(count, readShare); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RandomQueueBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RandomQueueBalancer.java index 528affc4804..22699cc03ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RandomQueueBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RandomQueueBalancer.java @@ -1,5 +1,4 @@ -/** - +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.util.List; @@ -35,7 +33,8 @@ public class RandomQueueBalancer implements QueueBalancer { private final int queueSize; private final List> queues; - public RandomQueueBalancer(Configuration conf, String executorName, List> queues) { + public RandomQueueBalancer(Configuration conf, String executorName, + List> queues) { this.queueSize = queues.size(); this.queues = queues; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java index 7571ac1539c..e12bcf6964f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,25 +15,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; +import java.io.IOException; +import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import java.io.IOException; - -import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; /** * Interface of all necessary to carry out a RPC method invocation on the server. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public interface RpcCall extends RpcCallContext { @@ -84,15 +82,15 @@ public interface RpcCall extends RpcCallContext { int getPriority(); /** - * Return the deadline of this call. If we can not complete this call in time, - * we can throw a TimeoutIOException and RPCServer will drop it. + * Return the deadline of this call. If we can not complete this call in time, we can throw a + * TimeoutIOException and RPCServer will drop it. * @return The system timestamp of deadline. */ long getDeadline(); /** - * Used to calculate the request call queue size. - * If the total request call size exceeds a limit, the call will be rejected. + * Used to calculate the request call queue size. If the total request call size exceeds a limit, + * the call will be rejected. * @return The raw size of this call. */ long getSize(); @@ -109,17 +107,16 @@ public interface RpcCall extends RpcCallContext { /** * Set the response resulting from this RPC call. - * @param param The result message as response. - * @param cells The CellScanner that possibly carries the payload. + * @param param The result message as response. + * @param cells The CellScanner that possibly carries the payload. * @param errorThrowable The error Throwable resulting from the call. - * @param error Extra error message. + * @param error Extra error message. */ void setResponse(Message param, CellScanner cells, Throwable errorThrowable, String error); /** - * Send the response of this RPC call. - * Implementation provides the underlying facility (connection, etc) to send. - * @throws IOException + * Send the response of this RPC call. Implementation provides the underlying facility + * (connection, etc) to send. n */ void sendResponseIfReady() throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java index 6a4d3a29a52..6f045731ecb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,10 +19,10 @@ package org.apache.hadoop.hbase.ipc; import java.net.InetAddress; import java.util.Optional; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; /** * Interface of all necessary to carry out a RPC service invocation on the server. This interface @@ -31,19 +31,19 @@ import org.apache.hadoop.hbase.security.User; @InterfaceAudience.Private public interface RpcCallContext { /** - * Check if the caller who made this IPC call has disconnected. - * If called from outside the context of IPC, this does nothing. - * @return < 0 if the caller is still connected. The time in ms - * since the disconnection otherwise + * Check if the caller who made this IPC call has disconnected. If called from outside the context + * of IPC, this does nothing. + * @return < 0 if the caller is still connected. The time in ms since the disconnection + * otherwise */ long disconnectSince(); /** * If the client connected and specified a codec to use, then we will use this codec making - * cellblocks to return. If the client did not specify a codec, we assume it does not support - * cellblocks and will return all content protobuf'd (though it makes our serving slower). - * We need to ask this question per call because a server could be hosting both clients that - * support cellblocks while fielding requests from clients that do not. + * cellblocks to return. If the client did not specify a codec, we assume it does not support + * cellblocks and will return all content protobuf'd (though it makes our serving slower). We need + * to ask this question per call because a server could be hosting both clients that support + * cellblocks while fielding requests from clients that do not. * @return True if the client supports cellblocks, else return all content in pb */ boolean isClientCellBlockSupported(); @@ -74,27 +74,22 @@ public interface RpcCallContext { /** * Sets a callback which has to be executed at the end of this RPC call. Such a callback is an - * optional one for any Rpc call. - * - * @param callback + * optional one for any Rpc call. n */ void setCallBack(RpcCallback callback); boolean isRetryImmediatelySupported(); /** - * The size of response cells that have been accumulated so far. - * This along with the corresponding increment call is used to ensure that multi's or - * scans dont get too excessively large + * The size of response cells that have been accumulated so far. This along with the corresponding + * increment call is used to ensure that multi's or scans dont get too excessively large */ long getResponseCellSize(); /** - * Add on the given amount to the retained cell size. - * - * This is not thread safe and not synchronized at all. If this is used by more than one thread - * then everything will break. Since this is called for every row synchronization would be too - * onerous. + * Add on the given amount to the retained cell size. This is not thread safe and not synchronized + * at all. If this is used by more than one thread then everything will break. Since this is + * called for every row synchronization would be too onerous. */ void incrementResponseCellSize(long cellSize); @@ -103,5 +98,6 @@ public interface RpcCallContext { void incrementResponseBlockSize(long blockSize); long getResponseExceptionSize(); + void incrementResponseExceptionSize(long exceptionSize); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallback.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallback.java index f0074b54437..a8bf2d76225 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallback.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallback.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,12 +18,10 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** * Denotes a callback action that has to be executed at the end of an Rpc Call. - * * @see RpcCallContext#setCallBack(RpcCallback) */ @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java index 3c87b84b43d..ef163640e51 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.util.ArrayList; @@ -43,6 +42,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.base.Strings; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; @@ -80,7 +80,6 @@ public abstract class RpcExecutor { "hbase.ipc.server.callqueue.balancer.class"; public static final Class CALL_QUEUE_QUEUE_BALANCER_CLASS_DEFAULT = RandomQueueBalancer.class; - // These 3 are only used by Codel executor public static final String CALL_QUEUE_CODEL_TARGET_DELAY = "hbase.ipc.server.callqueue.codel.target.delay"; @@ -119,23 +118,25 @@ public abstract class RpcExecutor { private final Abortable abortable; public RpcExecutor(final String name, final int handlerCount, final int maxQueueLength, - final PriorityFunction priority, final Configuration conf, final Abortable abortable) { - this(name, handlerCount, conf.get(CALL_QUEUE_TYPE_CONF_KEY, - CALL_QUEUE_TYPE_CONF_DEFAULT), maxQueueLength, priority, conf, abortable); + final PriorityFunction priority, final Configuration conf, final Abortable abortable) { + this(name, handlerCount, conf.get(CALL_QUEUE_TYPE_CONF_KEY, CALL_QUEUE_TYPE_CONF_DEFAULT), + maxQueueLength, priority, conf, abortable); } public RpcExecutor(final String name, final int handlerCount, final String callQueueType, - final int maxQueueLength, final PriorityFunction priority, final Configuration conf, - final Abortable abortable) { + final int maxQueueLength, final PriorityFunction priority, final Configuration conf, + final Abortable abortable) { this.name = Strings.nullToEmpty(name); this.conf = conf; this.abortable = abortable; float callQueuesHandlersFactor = this.conf.getFloat(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0.1f); - if (Float.compare(callQueuesHandlersFactor, 1.0f) > 0 || - Float.compare(0.0f, callQueuesHandlersFactor) > 0) { - LOG.warn(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY + - " is *ILLEGAL*, it should be in range [0.0, 1.0]"); + if ( + Float.compare(callQueuesHandlersFactor, 1.0f) > 0 + || Float.compare(0.0f, callQueuesHandlersFactor) > 0 + ) { + LOG.warn( + CALL_QUEUE_HANDLER_FACTOR_CONF_KEY + " is *ILLEGAL*, it should be in range [0.0, 1.0]"); // For callQueuesHandlersFactor > 1.0, we just set it 1.0f. if (Float.compare(callQueuesHandlersFactor, 1.0f) > 0) { LOG.warn("Set " + CALL_QUEUE_HANDLER_FACTOR_CONF_KEY + " 1.0f"); @@ -154,16 +155,16 @@ public abstract class RpcExecutor { if (isDeadlineQueueType(callQueueType)) { this.name += ".Deadline"; - this.queueInitArgs = new Object[] { maxQueueLength, - new CallPriorityComparator(conf, priority) }; + this.queueInitArgs = + new Object[] { maxQueueLength, new CallPriorityComparator(conf, priority) }; this.queueClass = BoundedPriorityBlockingQueue.class; } else if (isCodelQueueType(callQueueType)) { this.name += ".Codel"; - int codelTargetDelay = conf.getInt(CALL_QUEUE_CODEL_TARGET_DELAY, - CALL_QUEUE_CODEL_DEFAULT_TARGET_DELAY); + int codelTargetDelay = + conf.getInt(CALL_QUEUE_CODEL_TARGET_DELAY, CALL_QUEUE_CODEL_DEFAULT_TARGET_DELAY); int codelInterval = conf.getInt(CALL_QUEUE_CODEL_INTERVAL, CALL_QUEUE_CODEL_DEFAULT_INTERVAL); - double codelLifoThreshold = conf.getDouble(CALL_QUEUE_CODEL_LIFO_THRESHOLD, - CALL_QUEUE_CODEL_DEFAULT_LIFO_THRESHOLD); + double codelLifoThreshold = + conf.getDouble(CALL_QUEUE_CODEL_LIFO_THRESHOLD, CALL_QUEUE_CODEL_DEFAULT_LIFO_THRESHOLD); this.queueInitArgs = new Object[] { maxQueueLength, codelTargetDelay, codelInterval, codelLifoThreshold, numGeneralCallsDropped, numLifoModeSwitches }; this.queueClass = AdaptiveLifoCoDelCallQueue.class; @@ -172,8 +173,8 @@ public abstract class RpcExecutor { getPluggableQueueClass(); if (!pluggableQueueClass.isPresent()) { - throw new PluggableRpcQueueNotFound("Pluggable call queue failed to load and selected call" - + " queue type required"); + throw new PluggableRpcQueueNotFound( + "Pluggable call queue failed to load and selected call" + " queue type required"); } else { this.queueInitArgs = new Object[] { maxQueueLength, priority, conf }; this.queueClass = pluggableQueueClass.get(); @@ -184,9 +185,10 @@ public abstract class RpcExecutor { this.queueClass = LinkedBlockingQueue.class; } - LOG.info("Instantiated {} with queueClass={}; " + - "numCallQueues={}, maxQueueLength={}, handlerCount={}", - this.name, this.queueClass, this.numCallQueues, maxQueueLength, this.handlerCount); + LOG.info( + "Instantiated {} with queueClass={}; " + + "numCallQueues={}, maxQueueLength={}, handlerCount={}", + this.name, this.queueClass, this.numCallQueues, maxQueueLength, this.handlerCount); } protected int computeNumCallQueues(final int handlerCount, final float callQueuesHandlersFactor) { @@ -197,33 +199,25 @@ public abstract class RpcExecutor { * Return the {@link Descriptors.MethodDescriptor#getName()} from {@code callRunner} or "Unknown". */ private static String getMethodName(final CallRunner callRunner) { - return Optional.ofNullable(callRunner) - .map(CallRunner::getRpcCall) - .map(RpcCall::getMethod) - .map(Descriptors.MethodDescriptor::getName) - .orElse("Unknown"); + return Optional.ofNullable(callRunner).map(CallRunner::getRpcCall).map(RpcCall::getMethod) + .map(Descriptors.MethodDescriptor::getName).orElse("Unknown"); } /** * Return the {@link RpcCall#getSize()} from {@code callRunner} or 0L. */ private static long getRpcCallSize(final CallRunner callRunner) { - return Optional.ofNullable(callRunner) - .map(CallRunner::getRpcCall) - .map(RpcCall::getSize) + return Optional.ofNullable(callRunner).map(CallRunner::getRpcCall).map(RpcCall::getSize) .orElse(0L); } public Map getCallQueueCountsSummary() { - return queues.stream() - .flatMap(Collection::stream) - .map(RpcExecutor::getMethodName) + return queues.stream().flatMap(Collection::stream).map(RpcExecutor::getMethodName) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); } public Map getCallQueueSizeSummary() { - return queues.stream() - .flatMap(Collection::stream) + return queues.stream().flatMap(Collection::stream) .map(callRunner -> new Pair<>(getMethodName(callRunner), getRpcCallSize(callRunner))) .collect(Collectors.groupingBy(Pair::getFirst, Collectors.summingLong(Pair::getSecond))); } @@ -277,23 +271,24 @@ public abstract class RpcExecutor { * Start up our handlers. */ protected void startHandlers(final String nameSuffix, final int numHandlers, - final List> callQueues, final int qindex, final int qsize, - final int port, final AtomicInteger activeHandlerCount) { + final List> callQueues, final int qindex, final int qsize, + final int port, final AtomicInteger activeHandlerCount) { final String threadPrefix = name + Strings.nullToEmpty(nameSuffix); - double handlerFailureThreshhold = conf == null ? 1.0 : conf.getDouble( - HConstants.REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT, - HConstants.DEFAULT_REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT); + double handlerFailureThreshhold = conf == null + ? 1.0 + : conf.getDouble(HConstants.REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT, + HConstants.DEFAULT_REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT); for (int i = 0; i < numHandlers; i++) { final int index = qindex + (i % qsize); String name = "RpcServer." + threadPrefix + ".handler=" + handlers.size() + ",queue=" + index - + ",port=" + port; + + ",port=" + port; RpcHandler handler = getHandler(name, handlerFailureThreshhold, handlerCount, callQueues.get(index), activeHandlerCount, failedHandlerCount, abortable); handler.start(); handlers.add(handler); } LOG.debug("Started handlerCount={} with threadPrefix={}, numCallQueues={}, port={}", - handlers.size(), threadPrefix, qsize, port); + handlers.size(), threadPrefix, qsize, port); } /** @@ -301,17 +296,14 @@ public abstract class RpcExecutor { */ private static final QueueBalancer ONE_QUEUE = val -> 0; - public static QueueBalancer getBalancer( - final String executorName, - final Configuration conf, - final List> queues - ) { + public static QueueBalancer getBalancer(final String executorName, final Configuration conf, + final List> queues) { Preconditions.checkArgument(queues.size() > 0, "Queue size is <= 0, must be at least 1"); if (queues.size() == 1) { return ONE_QUEUE; } else { - Class balancerClass = conf.getClass( - CALL_QUEUE_QUEUE_BALANCER_CLASS, CALL_QUEUE_QUEUE_BALANCER_CLASS_DEFAULT); + Class balancerClass = + conf.getClass(CALL_QUEUE_QUEUE_BALANCER_CLASS, CALL_QUEUE_QUEUE_BALANCER_CLASS_DEFAULT); return (QueueBalancer) ReflectionUtils.newInstance(balancerClass, conf, executorName, queues); } } @@ -362,16 +354,16 @@ public abstract class RpcExecutor { } public static boolean isPluggableQueueWithFastPath(String callQueueType, Configuration conf) { - return isPluggableQueueType(callQueueType) && - conf.getBoolean(PLUGGABLE_CALL_QUEUE_WITH_FAST_PATH_ENABLED, false); + return isPluggableQueueType(callQueueType) + && conf.getBoolean(PLUGGABLE_CALL_QUEUE_WITH_FAST_PATH_ENABLED, false); } private Optional>> getPluggableQueueClass() { String queueClassName = conf.get(PLUGGABLE_CALL_QUEUE_CLASS_NAME); if (queueClassName == null) { - LOG.error("Pluggable queue class config at " + PLUGGABLE_CALL_QUEUE_CLASS_NAME + - " was not found"); + LOG.error( + "Pluggable queue class config at " + PLUGGABLE_CALL_QUEUE_CLASS_NAME + " was not found"); return Optional.empty(); } @@ -381,8 +373,8 @@ public abstract class RpcExecutor { if (BlockingQueue.class.isAssignableFrom(clazz)) { return Optional.of((Class>) clazz); } else { - LOG.error("Pluggable Queue class " + queueClassName + - " does not extend BlockingQueue"); + LOG.error( + "Pluggable Queue class " + queueClassName + " does not extend BlockingQueue"); return Optional.empty(); } } catch (ClassNotFoundException exception) { @@ -418,7 +410,7 @@ public abstract class RpcExecutor { /** Returns the length of the pending queue */ public int getQueueLength() { int length = 0; - for (final BlockingQueue queue: queues) { + for (final BlockingQueue queue : queues) { length += queue.size(); } return length; @@ -455,18 +447,18 @@ public abstract class RpcExecutor { public void onConfigurationChange(Configuration conf) { // update CoDel Scheduler tunables - int codelTargetDelay = conf.getInt(CALL_QUEUE_CODEL_TARGET_DELAY, - CALL_QUEUE_CODEL_DEFAULT_TARGET_DELAY); + int codelTargetDelay = + conf.getInt(CALL_QUEUE_CODEL_TARGET_DELAY, CALL_QUEUE_CODEL_DEFAULT_TARGET_DELAY); int codelInterval = conf.getInt(CALL_QUEUE_CODEL_INTERVAL, CALL_QUEUE_CODEL_DEFAULT_INTERVAL); - double codelLifoThreshold = conf.getDouble(CALL_QUEUE_CODEL_LIFO_THRESHOLD, - CALL_QUEUE_CODEL_DEFAULT_LIFO_THRESHOLD); + double codelLifoThreshold = + conf.getDouble(CALL_QUEUE_CODEL_LIFO_THRESHOLD, CALL_QUEUE_CODEL_DEFAULT_LIFO_THRESHOLD); for (BlockingQueue queue : queues) { if (queue instanceof AdaptiveLifoCoDelCallQueue) { ((AdaptiveLifoCoDelCallQueue) queue).updateTunables(codelTargetDelay, codelInterval, codelLifoThreshold); } else if (queue instanceof ConfigurationObserver) { - ((ConfigurationObserver)queue).onConfigurationChange(conf); + ((ConfigurationObserver) queue).onConfigurationChange(conf); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcHandler.java index ce103e7ec09..8c762b6873a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,8 +27,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Thread to handle rpc call. - * Should only be used in {@link RpcExecutor} and its sub-classes. + * Thread to handle rpc call. Should only be used in {@link RpcExecutor} and its sub-classes. */ @InterfaceAudience.Private public class RpcHandler extends Thread { @@ -65,8 +64,7 @@ public class RpcHandler extends Thread { } /** - * @return A {@link CallRunner} - * @throws InterruptedException + * @return A {@link CallRunner} n */ protected CallRunner getCallRunner() throws InterruptedException { return this.q.take(); @@ -107,8 +105,10 @@ public class RpcHandler extends Thread { } catch (Throwable e) { if (e instanceof Error) { int failedCount = failedHandlerCount.incrementAndGet(); - if (this.handlerFailureThreshhold >= 0 - && failedCount > handlerCount * this.handlerFailureThreshhold) { + if ( + this.handlerFailureThreshhold >= 0 + && failedCount > handlerCount * this.handlerFailureThreshhold + ) { String message = "Number of failed RpcServer handler runs exceeded threshhold " + this.handlerFailureThreshhold + "; reason: " + StringUtils.stringifyException(e); if (abortable != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcResponse.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcResponse.java index 7174a409c93..7840228621c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcResponse.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcResponse.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcScheduler.java index 0bdaa4ad8a7..d81b3224901 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcScheduler.java @@ -25,14 +25,14 @@ import org.apache.yetus.audience.InterfaceStability; /** * An interface for RPC request scheduling algorithm. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public abstract class RpcScheduler { public static final String IPC_SERVER_MAX_CALLQUEUE_LENGTH = - "hbase.ipc.server.max.callqueue.length"; + "hbase.ipc.server.max.callqueue.length"; public static final String IPC_SERVER_PRIORITY_MAX_CALLQUEUE_LENGTH = - "hbase.ipc.server.priority.max.callqueue.length"; + "hbase.ipc.server.priority.max.callqueue.length"; /** Exposes runtime information of a {@code RpcServer} that a {@code RpcScheduler} may need. */ public static abstract class Context { @@ -40,9 +40,8 @@ public abstract class RpcScheduler { } /** - * Does some quick initialization. Heavy tasks (e.g. starting threads) should be - * done in {@link #start()}. This method is called before {@code start}. - * + * Does some quick initialization. Heavy tasks (e.g. starting threads) should be done in + * {@link #start()}. This method is called before {@code start}. * @param context provides methods to retrieve runtime information from */ public abstract void init(Context context); @@ -58,7 +57,6 @@ public abstract class RpcScheduler { /** * Dispatches an RPC request asynchronously. An implementation is free to choose to process the * request immediately or delay it for later processing. - * * @param task the request to be dispatched */ public abstract boolean dispatch(CallRunner task); @@ -94,15 +92,15 @@ public abstract class RpcScheduler { public abstract int getActiveReplicationRpcHandlerCount(); /** - * If CoDel-based RPC executors are used, retrieves the number of Calls that were dropped - * from general queue because RPC executor is under high load; returns 0 otherwise. + * If CoDel-based RPC executors are used, retrieves the number of Calls that were dropped from + * general queue because RPC executor is under high load; returns 0 otherwise. */ public abstract long getNumGeneralCallsDropped(); /** - * If CoDel-based RPC executors are used, retrieves the number of Calls that were - * picked from the tail of the queue (indicating adaptive LIFO mode, when - * in the period of overloade we serve last requests first); returns 0 otherwise. + * If CoDel-based RPC executors are used, retrieves the number of Calls that were picked from the + * tail of the queue (indicating adaptive LIFO mode, when in the period of overloade we serve last + * requests first); returns 0 otherwise. */ public abstract long getNumLifoModeSwitches(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java index 12da141290f..bab3e80d322 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.ipc; import java.net.InetSocketAddress; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -26,7 +25,7 @@ class RpcSchedulerContext extends RpcScheduler.Context { private final RpcServer rpcServer; /** - * @param rpcServer + * n */ RpcSchedulerContext(final RpcServer rpcServer) { this.rpcServer = rpcServer; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 0c89248fca3..fa3bc2cc63a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION; @@ -80,15 +79,13 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHea /** * An RPC server that hosts protobuf described Services. - * */ @InterfaceAudience.Private -public abstract class RpcServer implements RpcServerInterface, - ConfigurationObserver { +public abstract class RpcServer implements RpcServerInterface, ConfigurationObserver { // LOG is being used in CallRunner and the log level is being changed in tests public static final Logger LOG = LoggerFactory.getLogger(RpcServer.class); - protected static final CallQueueTooBigException CALL_QUEUE_TOO_BIG_EXCEPTION - = new CallQueueTooBigException(); + protected static final CallQueueTooBigException CALL_QUEUE_TOO_BIG_EXCEPTION = + new CallQueueTooBigException(); private static final String MULTI_GETS = "multi.gets"; private static final String MULTI_MUTATIONS = "multi.mutations"; @@ -104,7 +101,7 @@ public abstract class RpcServer implements RpcServerInterface, * Whether we allow a fallback to SIMPLE auth for insecure clients when security is enabled. */ public static final String FALLBACK_TO_INSECURE_CLIENT_AUTH = - "hbase.ipc.server.fallback-to-simple-auth-allowed"; + "hbase.ipc.server.fallback-to-simple-auth-allowed"; /** * How many calls/handler are allowed in the queue. @@ -115,15 +112,15 @@ public abstract class RpcServer implements RpcServerInterface, protected static final String AUTH_FAILED_FOR = "Auth failed for "; protected static final String AUTH_SUCCESSFUL_FOR = "Auth successful for "; - protected static final Logger AUDITLOG = LoggerFactory.getLogger("SecurityLogger." - + Server.class.getName()); + protected static final Logger AUDITLOG = + LoggerFactory.getLogger("SecurityLogger." + Server.class.getName()); protected SecretManager secretManager; protected final Map saslProps; protected ServiceAuthorizationManager authManager; - /** This is set to Call object before Handler invokes an RPC and ybdie - * after the call returns. + /** + * This is set to Call object before Handler invokes an RPC and ybdie after the call returns. */ protected static final ThreadLocal CurCall = new ThreadLocal<>(); @@ -159,9 +156,9 @@ public abstract class RpcServer implements RpcServerInterface, protected final boolean tcpKeepAlive; // if T then use keepalives /** - * This flag is used to indicate to sub threads when they should go down. When we call - * {@link #start()}, all threads started will consult this flag on whether they should - * keep going. It is set to false when {@link #stop()} is called. + * This flag is used to indicate to sub threads when they should go down. When we call + * {@link #start()}, all threads started will consult this flag on whether they should keep going. + * It is set to false when {@link #stop()} is called. */ volatile boolean running = true; @@ -181,8 +178,8 @@ public abstract class RpcServer implements RpcServerInterface, protected static final String WARN_RESPONSE_SIZE = "hbase.ipc.warn.response.size"; /** - * Minimum allowable timeout (in milliseconds) in rpc request's header. This - * configuration exists to prevent the rpc service regarding this request as timeout immediately. + * Minimum allowable timeout (in milliseconds) in rpc request's header. This configuration exists + * to prevent the rpc service regarding this request as timeout immediately. */ protected static final String MIN_CLIENT_REQUEST_TIMEOUT = "hbase.ipc.min.client.request.timeout"; protected static final int DEFAULT_MIN_CLIENT_REQUEST_TIMEOUT = 20; @@ -221,7 +218,6 @@ public abstract class RpcServer implements RpcServerInterface, */ private RSRpcServices rsRpcServices; - /** * Use to add online slowlog responses */ @@ -233,22 +229,25 @@ public abstract class RpcServer implements RpcServerInterface, } /** - * Datastructure for passing a {@link BlockingService} and its associated class of - * protobuf service interface. For example, a server that fielded what is defined - * in the client protobuf service would pass in an implementation of the client blocking service - * and then its ClientService.BlockingInterface.class. Used checking connection setup. + * Datastructure for passing a {@link BlockingService} and its associated class of protobuf + * service interface. For example, a server that fielded what is defined in the client protobuf + * service would pass in an implementation of the client blocking service and then its + * ClientService.BlockingInterface.class. Used checking connection setup. */ public static class BlockingServiceAndInterface { private final BlockingService service; private final Class serviceInterface; + public BlockingServiceAndInterface(final BlockingService service, - final Class serviceInterface) { + final Class serviceInterface) { this.service = service; this.serviceInterface = serviceInterface; } + public Class getServiceInterface() { return this.serviceInterface; } + public BlockingService getBlockingService() { return this.service; } @@ -256,19 +255,15 @@ public abstract class RpcServer implements RpcServerInterface, /** * Constructs a server listening on the named port and address. - * @param server hosting instance of {@link Server}. We will do authentications if an - * instance else pass null for no authentication check. - * @param name Used keying this rpc servers' metrics and for naming the Listener thread. - * @param services A list of services. - * @param bindAddress Where to listen - * @param conf - * @param scheduler - * @param reservoirEnabled Enable ByteBufferPool or not. + * @param server hosting instance of {@link Server}. We will do authentications if an + * instance else pass null for no authentication check. + * @param name Used keying this rpc servers' metrics and for naming the Listener thread. + * @param services A list of services. + * @param bindAddress Where to listen nn * @param reservoirEnabled Enable ByteBufferPool or not. */ public RpcServer(final Server server, final String name, - final List services, - final InetSocketAddress bindAddress, Configuration conf, - RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { + final List services, final InetSocketAddress bindAddress, + Configuration conf, RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { this.bbAllocator = ByteBuffAllocator.create(conf, reservoirEnabled); this.server = server; this.services = services; @@ -280,8 +275,8 @@ public abstract class RpcServer implements RpcServerInterface, this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME, DEFAULT_WARN_RESPONSE_TIME); this.warnResponseSize = conf.getInt(WARN_RESPONSE_SIZE, DEFAULT_WARN_RESPONSE_SIZE); - this.minClientRequestTimeout = conf.getInt(MIN_CLIENT_REQUEST_TIMEOUT, - DEFAULT_MIN_CLIENT_REQUEST_TIMEOUT); + this.minClientRequestTimeout = + conf.getInt(MIN_CLIENT_REQUEST_TIMEOUT, DEFAULT_MIN_CLIENT_REQUEST_TIMEOUT); this.maxRequestSize = conf.getInt(MAX_REQUEST_SIZE, DEFAULT_MAX_REQUEST_SIZE); this.metrics = new MetricsHBaseServer(name, new MetricsHBaseServerWrapperImpl(this)); @@ -354,12 +349,10 @@ public abstract class RpcServer implements RpcServerInterface, if (!isSecurityEnabled) return null; if (server == null) return null; Configuration conf = server.getConfiguration(); - long keyUpdateInterval = - conf.getLong("hbase.auth.key.update.interval", 24*60*60*1000); - long maxAge = - conf.getLong("hbase.auth.token.max.lifetime", 7*24*60*60*1000); + long keyUpdateInterval = conf.getLong("hbase.auth.key.update.interval", 24 * 60 * 60 * 1000); + long maxAge = conf.getLong("hbase.auth.token.max.lifetime", 7 * 24 * 60 * 60 * 1000); return new AuthenticationTokenSecretManager(conf, server.getZooKeeper(), - server.getServerName().toString(), keyUpdateInterval, maxAge); + server.getServerName().toString(), keyUpdateInterval, maxAge); } public SecretManager getSecretManager() { @@ -372,22 +365,21 @@ public abstract class RpcServer implements RpcServerInterface, } /** - * This is a server side method, which is invoked over RPC. On success - * the return response has protobuf response payload. On failure, the - * exception name and the stack trace are returned in the protobuf response. + * This is a server side method, which is invoked over RPC. On success the return response has + * protobuf response payload. On failure, the exception name and the stack trace are returned in + * the protobuf response. */ @Override - public Pair call(RpcCall call, - MonitoredRPCHandler status) throws IOException { + public Pair call(RpcCall call, MonitoredRPCHandler status) + throws IOException { try { MethodDescriptor md = call.getMethod(); Message param = call.getParam(); - status.setRPC(md.getName(), new Object[]{param}, - call.getReceiveTime()); + status.setRPC(md.getName(), new Object[] { param }, call.getReceiveTime()); // TODO: Review after we add in encoded data blocks. status.setRPCPacket(param); status.resume("Servicing call"); - //get an instance of the method arg type + // get an instance of the method arg type HBaseRpcController controller = new HBaseRpcControllerImpl(call.getCellScanner()); controller.setCallTimeout(call.getTimeout()); Message result = call.getService().callBlockingMethod(md, controller, param); @@ -398,11 +390,9 @@ public abstract class RpcServer implements RpcServerInterface, int qTime = (int) (startTime - receiveTime); int totalTime = (int) (endTime - receiveTime); if (LOG.isTraceEnabled()) { - LOG.trace(CurCall.get().toString() + - ", response " + TextFormat.shortDebugString(result) + - " queueTime: " + qTime + - " processingTime: " + processingTime + - " totalTime: " + totalTime); + LOG.trace(CurCall.get().toString() + ", response " + TextFormat.shortDebugString(result) + + " queueTime: " + qTime + " processingTime: " + processingTime + " totalTime: " + + totalTime); } // Use the raw request call size for now. long requestSize = call.getSize(); @@ -425,24 +415,21 @@ public abstract class RpcServer implements RpcServerInterface, final String userName = call.getRequestUserName().orElse(StringUtils.EMPTY); // when tagging, we let TooLarge trump TooSmall to keep output simple // note that large responses will often also be slow. - logResponse(param, - md.getName(), md.getName() + "(" + param.getClass().getName() + ")", - tooLarge, tooSlow, - status.getClient(), startTime, processingTime, qTime, - responseSize, userName); + logResponse(param, md.getName(), md.getName() + "(" + param.getClass().getName() + ")", + tooLarge, tooSlow, status.getClient(), startTime, processingTime, qTime, responseSize, + userName); if (this.namedQueueRecorder != null && this.isOnlineLogProviderEnabled) { // send logs to ring buffer owned by slowLogRecorder final String className = server == null ? StringUtils.EMPTY : server.getClass().getSimpleName(); - this.namedQueueRecorder.addRecord( - new RpcLogDetails(call, param, status.getClient(), responseSize, className, tooSlow, - tooLarge)); + this.namedQueueRecorder.addRecord(new RpcLogDetails(call, param, status.getClient(), + responseSize, className, tooSlow, tooLarge)); } } return new Pair<>(result, controller.cellScanner()); } catch (Throwable e) { - // The above callBlockingMethod will always return a SE. Strip the SE wrapper before - // putting it on the wire. Its needed to adhere to the pb Service Interface but we don't + // The above callBlockingMethod will always return a SE. Strip the SE wrapper before + // putting it on the wire. Its needed to adhere to the pb Service Interface but we don't // need to pass it over the wire. if (e instanceof ServiceException) { if (e.getCause() == null) { @@ -456,33 +443,31 @@ public abstract class RpcServer implements RpcServerInterface, metrics.exception(e); if (e instanceof LinkageError) throw new DoNotRetryIOException(e); - if (e instanceof IOException) throw (IOException)e; + if (e instanceof IOException) throw (IOException) e; LOG.error("Unexpected throwable object ", e); throw new IOException(e.getMessage(), e); } } /** - * Logs an RPC response to the LOG file, producing valid JSON objects for - * client Operations. - * @param param The parameters received in the call. - * @param methodName The name of the method invoked - * @param call The string representation of the call - * @param tooLarge To indicate if the event is tooLarge - * @param tooSlow To indicate if the event is tooSlow - * @param clientAddress The address of the client who made this call. - * @param startTime The time that the call was initiated, in ms. + * Logs an RPC response to the LOG file, producing valid JSON objects for client Operations. + * @param param The parameters received in the call. + * @param methodName The name of the method invoked + * @param call The string representation of the call + * @param tooLarge To indicate if the event is tooLarge + * @param tooSlow To indicate if the event is tooSlow + * @param clientAddress The address of the client who made this call. + * @param startTime The time that the call was initiated, in ms. * @param processingTime The duration that the call took to run, in ms. - * @param qTime The duration that the call spent on the queue - * prior to being initiated, in ms. - * @param responseSize The size in bytes of the response buffer. - * @param userName UserName of the current RPC Call + * @param qTime The duration that the call spent on the queue prior to being initiated, + * in ms. + * @param responseSize The size in bytes of the response buffer. + * @param userName UserName of the current RPC Call */ - void logResponse(Message param, String methodName, String call, boolean tooLarge, - boolean tooSlow, String clientAddress, long startTime, int processingTime, int qTime, - long responseSize, String userName) { - final String className = server == null ? StringUtils.EMPTY : - server.getClass().getSimpleName(); + void logResponse(Message param, String methodName, String call, boolean tooLarge, boolean tooSlow, + String clientAddress, long startTime, int processingTime, int qTime, long responseSize, + String userName) { + final String className = server == null ? StringUtils.EMPTY : server.getClass().getSimpleName(); // base information that is reported regardless of type of call Map responseInfo = new HashMap<>(); responseInfo.put("starttimems", startTime); @@ -517,9 +502,9 @@ public abstract class RpcServer implements RpcServerInterface, int numGets = 0; int numMutations = 0; int numServiceCalls = 0; - ClientProtos.MultiRequest multi = (ClientProtos.MultiRequest)param; + ClientProtos.MultiRequest multi = (ClientProtos.MultiRequest) param; for (ClientProtos.RegionAction regionAction : multi.getRegionActionList()) { - for (ClientProtos.Action action: regionAction.getActionList()) { + for (ClientProtos.Action action : regionAction.getActionList()) { if (action.hasMutation()) { numMutations++; } @@ -535,15 +520,14 @@ public abstract class RpcServer implements RpcServerInterface, responseInfo.put(MULTI_MUTATIONS, numMutations); responseInfo.put(MULTI_SERVICE_CALLS, numServiceCalls); } - final String tag = (tooLarge && tooSlow) ? "TooLarge & TooSlow" - : (tooSlow ? "TooSlow" : "TooLarge"); + final String tag = + (tooLarge && tooSlow) ? "TooLarge & TooSlow" : (tooSlow ? "TooSlow" : "TooLarge"); LOG.warn("(response" + tag + "): " + GSON.toJson(responseInfo)); } - /** - * Truncate to number of chars decided by conf hbase.ipc.trace.log.max.length - * if TRACE is on else to 150 chars Refer to Jira HBASE-20826 and HBASE-20942 + * Truncate to number of chars decided by conf hbase.ipc.trace.log.max.length if TRACE is on else + * to 150 chars Refer to Jira HBASE-20826 and HBASE-20942 * @param strParam stringifiedParam to be truncated * @return truncated trace log string */ @@ -551,7 +535,7 @@ public abstract class RpcServer implements RpcServerInterface, if (LOG.isTraceEnabled()) { int traceLogMaxLength = getConf().getInt(TRACE_LOG_MAX_LENGTH, DEFAULT_TRACE_LOG_MAX_LENGTH); int truncatedLength = - strParam.length() < traceLogMaxLength ? strParam.length() : traceLogMaxLength; + strParam.length() < traceLogMaxLength ? strParam.length() : traceLogMaxLength; String truncatedFlag = truncatedLength == strParam.length() ? "" : KEY_WORD_TRUNCATED; return strParam.subSequence(0, truncatedLength) + truncatedFlag; } @@ -587,13 +571,13 @@ public abstract class RpcServer implements RpcServerInterface, /** * Authorize the incoming client connection. - * @param user client user + * @param user client user * @param connection incoming connection - * @param addr InetAddress of incoming connection + * @param addr InetAddress of incoming connection * @throws AuthorizationException when the client isn't authorized to talk the protocol */ public synchronized void authorize(UserGroupInformation user, ConnectionHeader connection, - InetAddress addr) throws AuthorizationException { + InetAddress addr) throws AuthorizationException { if (authorize) { Class c = getServiceInterface(services, connection.getServiceName()); authManager.authorize(user, c, getConf(), addr); @@ -601,29 +585,28 @@ public abstract class RpcServer implements RpcServerInterface, } /** - * When the read or write buffer size is larger than this limit, i/o will be - * done in chunks of this size. Most RPC requests and responses would be - * be smaller. + * When the read or write buffer size is larger than this limit, i/o will be done in chunks of + * this size. Most RPC requests and responses would be be smaller. */ - protected static final int NIO_BUFFER_LIMIT = 64 * 1024; //should not be more than 64KB. + protected static final int NIO_BUFFER_LIMIT = 64 * 1024; // should not be more than 64KB. /** - * This is a wrapper around {@link java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)}. - * If the amount of data is large, it writes to channel in smaller chunks. - * This is to avoid jdk from creating many direct buffers as the size of - * ByteBuffer increases. There should not be any performance degredation. - * + * This is a wrapper around + * {@link java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)}. If the amount of data + * is large, it writes to channel in smaller chunks. This is to avoid jdk from creating many + * direct buffers as the size of ByteBuffer increases. There should not be any performance + * degredation. * @param channel writable byte channel to write on - * @param buffer buffer to write + * @param buffer buffer to write * @return number of bytes written * @throws java.io.IOException e * @see java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer) */ - protected int channelRead(ReadableByteChannel channel, - ByteBuffer buffer) throws IOException { + protected int channelRead(ReadableByteChannel channel, ByteBuffer buffer) throws IOException { - int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? - channel.read(buffer) : channelIO(channel, null, buffer); + int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) + ? channel.read(buffer) + : channelIO(channel, null, buffer); if (count > 0) { metrics.receivedBytes(count); } @@ -633,17 +616,15 @@ public abstract class RpcServer implements RpcServerInterface, /** * Helper for {@link #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer)}. * Only one of readCh or writeCh should be non-null. - * - * @param readCh read channel + * @param readCh read channel * @param writeCh write channel - * @param buf buffer to read or write into/out of + * @param buf buffer to read or write into/out of * @return bytes written * @throws java.io.IOException e * @see #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer) */ - private static int channelIO(ReadableByteChannel readCh, - WritableByteChannel writeCh, - ByteBuffer buf) throws IOException { + private static int channelIO(ReadableByteChannel readCh, WritableByteChannel writeCh, + ByteBuffer buf) throws IOException { int originalLimit = buf.limit(); int initialRemaining = buf.remaining(); @@ -670,9 +651,8 @@ public abstract class RpcServer implements RpcServerInterface, } /** - * Needed for features such as delayed calls. We need to be able to store the current call - * so that we can complete it later or ask questions of what is supported by the current ongoing - * call. + * Needed for features such as delayed calls. We need to be able to store the current call so that + * we can complete it later or ask questions of what is supported by the current ongoing call. * @return An RpcCallContext backed by the currently ongoing call (gotten from a thread local) */ public static Optional getCurrentCall() { @@ -696,7 +676,6 @@ public abstract class RpcServer implements RpcServerInterface, return rpcCall; } - /** * Used by {@link org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore}. Set the * rpc call back after mutate region. @@ -722,8 +701,8 @@ public abstract class RpcServer implements RpcServerInterface, abstract public int getNumOpenConnections(); /** - * Returns the username for any user associated with the current RPC - * request or not present if no user is set. + * Returns the username for any user associated with the current RPC request or not present if no + * user is set. */ public static Optional getRequestUserName() { return getRequestUser().map(User::getShortName); @@ -738,11 +717,11 @@ public abstract class RpcServer implements RpcServerInterface, /** * @param serviceName Some arbitrary string that represents a 'service'. - * @param services Available service instances + * @param services Available service instances * @return Matching BlockingServiceAndInterface pair */ protected static BlockingServiceAndInterface getServiceAndInterface( - final List services, final String serviceName) { + final List services, final String serviceName) { for (BlockingServiceAndInterface bs : services) { if (bs.getBlockingService().getDescriptorForType().getName().equals(serviceName)) { return bs; @@ -753,32 +732,28 @@ public abstract class RpcServer implements RpcServerInterface, /** * @param serviceName Some arbitrary string that represents a 'service'. - * @param services Available services and their service interfaces. + * @param services Available services and their service interfaces. * @return Service interface class for serviceName */ - protected static Class getServiceInterface( - final List services, - final String serviceName) { - BlockingServiceAndInterface bsasi = - getServiceAndInterface(services, serviceName); - return bsasi == null? null: bsasi.getServiceInterface(); + protected static Class getServiceInterface(final List services, + final String serviceName) { + BlockingServiceAndInterface bsasi = getServiceAndInterface(services, serviceName); + return bsasi == null ? null : bsasi.getServiceInterface(); } /** * @param serviceName Some arbitrary string that represents a 'service'. - * @param services Available services and their service interfaces. + * @param services Available services and their service interfaces. * @return BlockingService that goes with the passed serviceName */ - protected static BlockingService getService( - final List services, - final String serviceName) { - BlockingServiceAndInterface bsasi = - getServiceAndInterface(services, serviceName); - return bsasi == null? null: bsasi.getBlockingService(); + protected static BlockingService getService(final List services, + final String serviceName) { + BlockingServiceAndInterface bsasi = getServiceAndInterface(services, serviceName); + return bsasi == null ? null : bsasi.getBlockingService(); } protected static MonitoredRPCHandler getStatus() { - // It is ugly the way we park status up in RpcServer. Let it be for now. TODO. + // It is ugly the way we park status up in RpcServer. Let it be for now. TODO. MonitoredRPCHandler status = RpcServer.MONITORED_RPC.get(); if (status != null) { return status; @@ -789,9 +764,9 @@ public abstract class RpcServer implements RpcServerInterface, return status; } - /** Returns the remote side ip address when invoked inside an RPC - * Returns null incase of an error. - * @return InetAddress + /** + * Returns the remote side ip address when invoked inside an RPC Returns null incase of an error. + * n */ public static InetAddress getRemoteIp() { RpcCall call = CurCall.get(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java index 298b4723116..83da2cb4a98 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,15 +20,15 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; import java.net.InetSocketAddress; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; +import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; + import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.ServiceDescriptor; -import org.apache.hadoop.hbase.util.ReflectionUtils; @InterfaceAudience.Private public class RpcServerFactory { @@ -44,19 +44,18 @@ public class RpcServerFactory { } public static RpcServer createRpcServer(final Server server, final String name, - final List services, final InetSocketAddress bindAddress, - Configuration conf, RpcScheduler scheduler) throws IOException { + final List services, final InetSocketAddress bindAddress, + Configuration conf, RpcScheduler scheduler) throws IOException { return createRpcServer(server, name, services, bindAddress, conf, scheduler, true); } public static RpcServer createRpcServer(final Server server, final String name, - final List services, - final InetSocketAddress bindAddress, Configuration conf, - RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { - String rpcServerClass = conf.get(CUSTOM_RPC_SERVER_IMPL_CONF_KEY, - NettyRpcServer.class.getName()); + final List services, final InetSocketAddress bindAddress, + Configuration conf, RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { + String rpcServerClass = + conf.get(CUSTOM_RPC_SERVER_IMPL_CONF_KEY, NettyRpcServer.class.getName()); StringBuilder servicesList = new StringBuilder(); - for (BlockingServiceAndInterface s: services) { + for (BlockingServiceAndInterface s : services) { ServiceDescriptor sd = s.getBlockingService().getDescriptorForType(); if (sd == null) continue; // Can be null for certain tests like TestTokenAuthentication if (servicesList.length() > 0) servicesList.append(", "); @@ -64,8 +63,8 @@ public class RpcServerFactory { } LOG.info("Creating " + rpcServerClass + " hosting " + servicesList); return ReflectionUtils.instantiateWithCustomCtor(rpcServerClass, - new Class[] { Server.class, String.class, List.class, - InetSocketAddress.class, Configuration.class, RpcScheduler.class, boolean.class }, - new Object[] { server, name, services, bindAddress, conf, scheduler, reservoirEnabled }); + new Class[] { Server.class, String.class, List.class, InetSocketAddress.class, + Configuration.class, RpcScheduler.class, boolean.class }, + new Object[] { server, name, services, bindAddress, conf, scheduler, reservoirEnabled }); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java index e06daacf5fe..0b06d792c8b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.io.IOException; @@ -25,8 +23,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; -import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; +import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.yetus.audience.InterfaceAudience; @@ -38,34 +36,36 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Message; @InterfaceAudience.Private public interface RpcServerInterface { void start(); + boolean isStarted(); void stop(); + void join() throws InterruptedException; void setSocketSendBufSize(int size); + InetSocketAddress getListenerAddress(); /** * @deprecated As of release 1.3, this will be removed in HBase 3.0 */ @Deprecated - Pair call(BlockingService service, MethodDescriptor md, - Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status) - throws IOException; + Pair call(BlockingService service, MethodDescriptor md, Message param, + CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status) throws IOException; /** * @deprecated As of release 2.0, this will be removed in HBase 3.0 */ @Deprecated Pair call(BlockingService service, MethodDescriptor md, Message param, - CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, long startTime, - int timeout) throws IOException; + CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, long startTime, + int timeout) throws IOException; - Pair call(RpcCall call, MonitoredRPCHandler status) - throws IOException; + Pair call(RpcCall call, MonitoredRPCHandler status) throws IOException; void setErrorHandler(HBaseRPCErrorHandler handler); + HBaseRPCErrorHandler getErrorHandler(); /** @@ -74,15 +74,14 @@ public interface RpcServerInterface { MetricsHBaseServer getMetrics(); /** - * Add/subtract from the current size of all outstanding calls. Called on setup of a call to add + * Add/subtract from the current size of all outstanding calls. Called on setup of a call to add * call total size and then again at end of a call to remove the call size. * @param diff Change (plus or minus) */ void addCallSize(long diff); /** - * Refresh authentication manager policy. - * @param pp + * Refresh authentication manager policy. n */ void refreshAuthManager(Configuration conf, PolicyProvider pp); @@ -98,7 +97,6 @@ public interface RpcServerInterface { /** * Set Online SlowLog Provider - * * @param namedQueueRecorder instance of {@link NamedQueueRecorder} */ void setNamedQueueRecorder(final NamedQueueRecorder namedQueueRecorder); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index d827efea66f..5adc520d904 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,10 +42,12 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; @@ -54,22 +56,22 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader; /** - * Datastructure that holds all necessary to a method invocation and then afterward, carries - * the result. + * Datastructure that holds all necessary to a method invocation and then afterward, carries the + * result. */ @InterfaceAudience.Private public abstract class ServerCall implements RpcCall, RpcResponse { - protected final int id; // the client's call id + protected final int id; // the client's call id protected final BlockingService service; protected final MethodDescriptor md; protected final RequestHeader header; - protected Message param; // the parameter passed + protected Message param; // the parameter passed // Optional cell data passed outside of protobufs. protected final CellScanner cellScanner; - protected final T connection; // connection to client - protected final long receiveTime; // the time received when response is null - // the time served when response is not null + protected final T connection; // connection to client + protected final long receiveTime; // the time received when response is null + // the time served when response is not null protected final int timeout; protected long startTime; protected final long deadline;// the deadline to handle this call, if exceed we can drop it. @@ -83,7 +85,7 @@ public abstract class ServerCall implements RpcCa */ protected BufferChain response; - protected final long size; // size of current call + protected final long size; // size of current call protected boolean isError; protected ByteBufferListOutputStream cellBlockStream = null; protected CallCleanup reqCleanup = null; @@ -111,9 +113,9 @@ public abstract class ServerCall implements RpcCa @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", justification = "Can't figure why this complaint is happening... see below") ServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header, - Message param, CellScanner cellScanner, T connection, long size, InetAddress remoteAddress, - long receiveTime, int timeout, ByteBuffAllocator byteBuffAllocator, - CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) { + Message param, CellScanner cellScanner, T connection, long size, InetAddress remoteAddress, + long receiveTime, int timeout, ByteBuffAllocator byteBuffAllocator, + CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) { this.id = id; this.service = service; this.md = md; @@ -126,7 +128,7 @@ public abstract class ServerCall implements RpcCa this.isError = false; this.size = size; if (connection != null) { - this.user = connection.user; + this.user = connection.user; this.retryImmediatelySupported = connection.retryImmediatelySupported; } else { this.user = null; @@ -142,8 +144,7 @@ public abstract class ServerCall implements RpcCa } /** - * Call is done. Execution happened and we returned results to client. It is - * now safe to cleanup. + * Call is done. Execution happened and we returned results to client. It is now safe to cleanup. */ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IS2_INCONSISTENT_SYNC", justification = "Presume the lock on processing request held by caller is protection enough") @@ -196,9 +197,9 @@ public abstract class ServerCall implements RpcCa @Override public String toString() { - return toShortString() + " param: " + - (this.param != null? ProtobufUtil.getShortTextFormat(this.param): "") + - " connection: " + connection.toString(); + return toShortString() + " param: " + + (this.param != null ? ProtobufUtil.getShortTextFormat(this.param) : "") + " connection: " + + connection.toString(); } @Override @@ -217,12 +218,13 @@ public abstract class ServerCall implements RpcCa */ @Override public String toShortString() { - String serviceName = this.connection.service != null ? - this.connection.service.getDescriptorForType().getName() : "null"; - return "callId: " + this.id + " service: " + serviceName + - " methodName: " + ((this.md != null) ? this.md.getName() : "n/a") + - " size: " + StringUtils.TraditionalBinaryPrefix.long2String(this.size, "", 1) + - " connection: " + connection + " deadline: " + deadline; + String serviceName = this.connection.service != null + ? this.connection.service.getDescriptorForType().getName() + : "null"; + return "callId: " + this.id + " service: " + serviceName + " methodName: " + + ((this.md != null) ? this.md.getName() : "n/a") + " size: " + + StringUtils.TraditionalBinaryPrefix.long2String(this.size, "", 1) + " connection: " + + connection + " deadline: " + deadline; } @Override @@ -274,8 +276,7 @@ public abstract class ServerCall implements RpcCa headerBuilder.setCellBlockMeta(cellBlockBuilder.build()); } Message header = headerBuilder.build(); - ByteBuffer headerBuf = - createHeaderAndMessageBytes(m, header, cellBlockSize, cellBlock); + ByteBuffer headerBuf = createHeaderAndMessageBytes(m, header, cellBlockSize, cellBlock); ByteBuffer[] responseBufs = null; int cellBlockBufferSize = 0; if (cellBlock != null) { @@ -309,16 +310,16 @@ public abstract class ServerCall implements RpcCa } static void setExceptionResponse(Throwable t, String errorMsg, - ResponseHeader.Builder headerBuilder) { + ResponseHeader.Builder headerBuilder) { ExceptionResponse.Builder exceptionBuilder = ExceptionResponse.newBuilder(); exceptionBuilder.setExceptionClassName(t.getClass().getName()); exceptionBuilder.setStackTrace(errorMsg); exceptionBuilder.setDoNotRetry(t instanceof DoNotRetryIOException); if (t instanceof RegionMovedException) { - // Special casing for this exception. This is only one carrying a payload. + // Special casing for this exception. This is only one carrying a payload. // Do this instead of build a generic system for allowing exceptions carry // any kind of payload. - RegionMovedException rme = (RegionMovedException)t; + RegionMovedException rme = (RegionMovedException) t; exceptionBuilder.setHostname(rme.getHostname()); exceptionBuilder.setPort(rme.getPort()); } else if (t instanceof HBaseServerException) { @@ -329,8 +330,8 @@ public abstract class ServerCall implements RpcCa headerBuilder.setException(exceptionBuilder.build()); } - static ByteBuffer createHeaderAndMessageBytes(Message result, Message header, - int cellBlockSize, List cellBlock) throws IOException { + static ByteBuffer createHeaderAndMessageBytes(Message result, Message header, int cellBlockSize, + List cellBlock) throws IOException { // Organize the response as a set of bytebuffers rather than collect it all together inside // one big byte array; save on allocations. // for writing the header, we check if there is available space in the buffers @@ -338,10 +339,8 @@ public abstract class ServerCall implements RpcCa // the last buffer in the cellblock. This applies to the cellblock created from the // pool or even the onheap cellblock buffer in case there is no pool enabled. // Possible reuse would avoid creating a temporary array for storing the header every time. - ByteBuffer possiblePBBuf = - (cellBlockSize > 0) ? cellBlock.get(cellBlock.size() - 1) : null; - int headerSerializedSize = 0, resultSerializedSize = 0, headerVintSize = 0, - resultVintSize = 0; + ByteBuffer possiblePBBuf = (cellBlockSize > 0) ? cellBlock.get(cellBlock.size() - 1) : null; + int headerSerializedSize = 0, resultSerializedSize = 0, headerVintSize = 0, resultVintSize = 0; if (header != null) { headerSerializedSize = header.getSerializedSize(); headerVintSize = CodedOutputStream.computeUInt32SizeNoTag(headerSerializedSize); @@ -351,15 +350,13 @@ public abstract class ServerCall implements RpcCa resultVintSize = CodedOutputStream.computeUInt32SizeNoTag(resultSerializedSize); } // calculate the total size - int totalSize = headerSerializedSize + headerVintSize - + (resultSerializedSize + resultVintSize) - + cellBlockSize; - int totalPBSize = headerSerializedSize + headerVintSize + resultSerializedSize - + resultVintSize + Bytes.SIZEOF_INT; + int totalSize = headerSerializedSize + headerVintSize + (resultSerializedSize + resultVintSize) + + cellBlockSize; + int totalPBSize = headerSerializedSize + headerVintSize + resultSerializedSize + resultVintSize + + Bytes.SIZEOF_INT; // Only if the last buffer has enough space for header use it. Else allocate // a new buffer. Assume they are all flipped - if (possiblePBBuf != null - && possiblePBBuf.limit() + totalPBSize <= possiblePBBuf.capacity()) { + if (possiblePBBuf != null && possiblePBBuf.limit() + totalPBSize <= possiblePBBuf.capacity()) { // duplicate the buffer. This is where the header is going to be written ByteBuffer pbBuf = possiblePBBuf.duplicate(); // get the current limit @@ -380,7 +377,7 @@ public abstract class ServerCall implements RpcCa } private static void writeToCOS(Message result, Message header, int totalSize, ByteBuffer pbBuf) - throws IOException { + throws IOException { ByteBufferUtils.putInt(pbBuf, totalSize); // create COS that works on BB CodedOutputStream cos = CodedOutputStream.newInstance(pbBuf); @@ -395,7 +392,7 @@ public abstract class ServerCall implements RpcCa } private static ByteBuffer createHeaderAndMessageBytes(Message result, Message header, - int totalSize, int totalPBSize) throws IOException { + int totalSize, int totalPBSize) throws IOException { ByteBuffer pbBuf = ByteBuffer.allocate(totalPBSize); writeToCOS(result, header, totalSize, pbBuf); pbBuf.flip(); @@ -406,10 +403,10 @@ public abstract class ServerCall implements RpcCa if (!this.connection.useSasl) { return bc; } - // Looks like no way around this; saslserver wants a byte array. I have to make it one. + // Looks like no way around this; saslserver wants a byte array. I have to make it one. // THIS IS A BIG UGLY COPY. - byte [] responseBytes = bc.getBytes(); - byte [] token; + byte[] responseBytes = bc.getBytes(); + byte[] token; // synchronization may be needed since there can be multiple Handler // threads using saslServer or Crypto AES to wrap responses. if (connection.useCryptoAesWrap) { @@ -423,8 +420,8 @@ public abstract class ServerCall implements RpcCa } } if (RpcServer.LOG.isTraceEnabled()) { - RpcServer.LOG.trace("Adding saslServer wrapped token of size " + token.length - + " as call response."); + RpcServer.LOG + .trace("Adding saslServer wrapped token of size " + token.length + " as call response."); } ByteBuffer[] responseBufs = new ByteBuffer[2]; @@ -471,6 +468,7 @@ public abstract class ServerCall implements RpcCa public long getResponseExceptionSize() { return exceptionSize; } + @Override public void incrementResponseExceptionSize(long exSize) { exceptionSize += exSize; @@ -560,8 +558,8 @@ public abstract class ServerCall implements RpcCa public synchronized BufferChain getResponse() { if (connection.useWrap) { /* - * wrapping result with SASL as the last step just before sending it out, so - * every message must have the right increasing sequence number + * wrapping result with SASL as the last step just before sending it out, so every message + * must have the right increasing sequence number */ try { return wrapWithSasl(response); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java index 2b3c898ba09..f527d31a314 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java @@ -89,9 +89,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformati import org.apache.hadoop.hbase.shaded.protobuf.generated.TracingProtos.RPCTInfo; /** Reads calls from a connection and queues them for handling. */ -@edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="VO_VOLATILE_INCREMENT", - justification="False positive according to http://sourceforge.net/p/findbugs/bugs/1032/") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "VO_VOLATILE_INCREMENT", + justification = "False positive according to http://sourceforge.net/p/findbugs/bugs/1032/") @InterfaceAudience.Private abstract class ServerRpcConnection implements Closeable { @@ -172,25 +171,23 @@ abstract class ServerRpcConnection implements Closeable { } private String getFatalConnectionString(final int version, final byte authByte) { - return "serverVersion=" + RpcServer.CURRENT_VERSION + - ", clientVersion=" + version + ", authMethod=" + authByte + - // The provider may be null if we failed to parse the header of the request - ", authName=" + (provider == null ? "unknown" : provider.getSaslAuthMethod().getName()) + - " from " + toString(); + return "serverVersion=" + RpcServer.CURRENT_VERSION + ", clientVersion=" + version + + ", authMethod=" + authByte + + // The provider may be null if we failed to parse the header of the request + ", authName=" + (provider == null ? "unknown" : provider.getSaslAuthMethod().getName()) + + " from " + toString(); } /** - * Set up cell block codecs - * @throws FatalConnectionException + * Set up cell block codecs n */ - private void setupCellBlockCodecs(final ConnectionHeader header) - throws FatalConnectionException { + private void setupCellBlockCodecs(final ConnectionHeader header) throws FatalConnectionException { // TODO: Plug in other supported decoders. if (!header.hasCellBlockCodecClass()) return; String className = header.getCellBlockCodecClass(); if (className == null || className.length() == 0) return; try { - this.codec = (Codec)Class.forName(className).getDeclaredConstructor().newInstance(); + this.codec = (Codec) Class.forName(className).getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new UnsupportedCellCodecException(className, e); } @@ -198,50 +195,46 @@ abstract class ServerRpcConnection implements Closeable { className = header.getCellBlockCompressorClass(); try { this.compressionCodec = - (CompressionCodec)Class.forName(className).getDeclaredConstructor().newInstance(); + (CompressionCodec) Class.forName(className).getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new UnsupportedCompressionCodecException(className, e); } } /** - * Set up cipher for rpc encryption with Apache Commons Crypto - * - * @throws FatalConnectionException + * Set up cipher for rpc encryption with Apache Commons Crypto n */ private void setupCryptoCipher(final ConnectionHeader header, - RPCProtos.ConnectionHeaderResponse.Builder chrBuilder) - throws FatalConnectionException { + RPCProtos.ConnectionHeaderResponse.Builder chrBuilder) throws FatalConnectionException { // If simple auth, return if (saslServer == null) return; // check if rpc encryption with Crypto AES String qop = saslServer.getNegotiatedQop(); - boolean isEncryption = SaslUtil.QualityOfProtection.PRIVACY - .getSaslQop().equalsIgnoreCase(qop); - boolean isCryptoAesEncryption = isEncryption && this.rpcServer.conf.getBoolean( - "hbase.rpc.crypto.encryption.aes.enabled", false); + boolean isEncryption = SaslUtil.QualityOfProtection.PRIVACY.getSaslQop().equalsIgnoreCase(qop); + boolean isCryptoAesEncryption = isEncryption + && this.rpcServer.conf.getBoolean("hbase.rpc.crypto.encryption.aes.enabled", false); if (!isCryptoAesEncryption) return; if (!header.hasRpcCryptoCipherTransformation()) return; String transformation = header.getRpcCryptoCipherTransformation(); if (transformation == null || transformation.length() == 0) return; - // Negotiates AES based on complete saslServer. - // The Crypto metadata need to be encrypted and send to client. + // Negotiates AES based on complete saslServer. + // The Crypto metadata need to be encrypted and send to client. Properties properties = new Properties(); // the property for SecureRandomFactory properties.setProperty(CryptoRandomFactory.CLASSES_KEY, - this.rpcServer.conf.get("hbase.crypto.sasl.encryption.aes.crypto.random", - "org.apache.commons.crypto.random.JavaCryptoRandom")); + this.rpcServer.conf.get("hbase.crypto.sasl.encryption.aes.crypto.random", + "org.apache.commons.crypto.random.JavaCryptoRandom")); // the property for cipher class properties.setProperty(CryptoCipherFactory.CLASSES_KEY, - this.rpcServer.conf.get("hbase.rpc.crypto.encryption.aes.cipher.class", - "org.apache.commons.crypto.cipher.JceCipher")); + this.rpcServer.conf.get("hbase.rpc.crypto.encryption.aes.cipher.class", + "org.apache.commons.crypto.cipher.JceCipher")); - int cipherKeyBits = this.rpcServer.conf.getInt( - "hbase.rpc.crypto.encryption.aes.cipher.keySizeBits", 128); + int cipherKeyBits = + this.rpcServer.conf.getInt("hbase.rpc.crypto.encryption.aes.cipher.keySizeBits", 128); // generate key and iv if (cipherKeyBits % 8 != 0) { - throw new IllegalArgumentException("The AES cipher key size in bits" + - " should be a multiple of byte"); + throw new IllegalArgumentException( + "The AES cipher key size in bits" + " should be a multiple of byte"); } int len = cipherKeyBits / 8; byte[] inKey = new byte[len]; @@ -258,10 +251,9 @@ abstract class ServerRpcConnection implements Closeable { secureRandom.nextBytes(outIv); // create CryptoAES for server - cryptoAES = new CryptoAES(transformation, properties, - inKey, outKey, inIv, outIv); + cryptoAES = new CryptoAES(transformation, properties, inKey, outKey, inIv, outIv); // create SaslCipherMeta and send to client, - // for client, the [inKey, outKey], [inIv, outIv] should be reversed + // for client, the [inKey, outKey], [inIv, outIv] should be reversed RPCProtos.CryptoCipherMeta.Builder ccmBuilder = RPCProtos.CryptoCipherMeta.newBuilder(); ccmBuilder.setTransformation(transformation); ccmBuilder.setInIv(getByteString(outIv)); @@ -297,8 +289,7 @@ abstract class ServerRpcConnection implements Closeable { } if (effectiveUser != null) { if (realUser != null) { - UserGroupInformation realUserUgi = - UserGroupInformation.createRemoteUser(realUser); + UserGroupInformation realUserUgi = UserGroupInformation.createRemoteUser(realUser); ugi = UserGroupInformation.createProxyUser(effectiveUser, realUserUgi); } else { ugi = UserGroupInformation.createRemoteUser(effectiveUser); @@ -317,13 +308,13 @@ abstract class ServerRpcConnection implements Closeable { /** * No protobuf encoding of raw sasl messages */ - protected final void doRawSaslReply(SaslStatus status, Writable rv, - String errorClass, String error) throws IOException { + protected final void doRawSaslReply(SaslStatus status, Writable rv, String errorClass, + String error) throws IOException { BufferChain bc; // In my testing, have noticed that sasl messages are usually // in the ballpark of 100-200. That's why the initial capacity is 256. try (ByteBufferOutputStream saslResponse = new ByteBufferOutputStream(256); - DataOutputStream out = new DataOutputStream(saslResponse)) { + DataOutputStream out = new DataOutputStream(saslResponse)) { out.writeInt(status.state); // write status if (status == SaslStatus.SUCCESS) { rv.write(out); @@ -336,8 +327,7 @@ abstract class ServerRpcConnection implements Closeable { doRespond(() -> bc); } - public void saslReadAndProcess(ByteBuff saslToken) throws IOException, - InterruptedException { + public void saslReadAndProcess(ByteBuff saslToken) throws IOException, InterruptedException { if (saslContextEstablished) { RpcServer.LOG.trace("Read input token of size={} for processing by saslServer.unwrap()", saslToken.limit()); @@ -345,7 +335,7 @@ abstract class ServerRpcConnection implements Closeable { processOneRpc(saslToken); } else { byte[] b = saslToken.hasArray() ? saslToken.array() : saslToken.toBytes(); - byte [] plaintextData; + byte[] plaintextData; if (useCryptoAesWrap) { // unwrap with CryptoAES plaintextData = cryptoAES.unwrap(b, 0, b.length); @@ -361,18 +351,19 @@ abstract class ServerRpcConnection implements Closeable { try { saslServer = new HBaseSaslRpcServer(provider, rpcServer.saslProps, rpcServer.secretManager); - } catch (Exception e){ + } catch (Exception e) { RpcServer.LOG.error("Error when trying to create instance of HBaseSaslRpcServer " + "with sasl provider: " + provider, e); throw e; } RpcServer.LOG.debug("Created SASL server with mechanism={}", - provider.getSaslAuthMethod().getAuthMethod()); + provider.getSaslAuthMethod().getAuthMethod()); } - RpcServer.LOG.debug("Read input token of size={} for processing by saslServer." + - "evaluateResponse()", saslToken.limit()); - replyToken = saslServer.evaluateResponse(saslToken.hasArray()? - saslToken.array() : saslToken.toBytes()); + RpcServer.LOG.debug( + "Read input token of size={} for processing by saslServer." + "evaluateResponse()", + saslToken.limit()); + replyToken = saslServer + .evaluateResponse(saslToken.hasArray() ? saslToken.array() : saslToken.toBytes()); } catch (IOException e) { RpcServer.LOG.debug("Failed to execute SASL handshake", e); IOException sendToClient = e; @@ -389,26 +380,24 @@ abstract class ServerRpcConnection implements Closeable { this.rpcServer.metrics.authenticationFailure(); String clientIP = this.toString(); // attempting user could be null - RpcServer.AUDITLOG - .warn("{} {}: {}", RpcServer.AUTH_FAILED_FOR, clientIP, saslServer.getAttemptingUser()); + RpcServer.AUDITLOG.warn("{} {}: {}", RpcServer.AUTH_FAILED_FOR, clientIP, + saslServer.getAttemptingUser()); throw e; } if (replyToken != null) { if (RpcServer.LOG.isDebugEnabled()) { - RpcServer.LOG.debug("Will send token of size " + replyToken.length - + " from saslServer."); + RpcServer.LOG.debug("Will send token of size " + replyToken.length + " from saslServer."); } - doRawSaslReply(SaslStatus.SUCCESS, new BytesWritable(replyToken), null, - null); + doRawSaslReply(SaslStatus.SUCCESS, new BytesWritable(replyToken), null, null); } if (saslServer.isComplete()) { String qop = saslServer.getNegotiatedQop(); useWrap = qop != null && !"auth".equalsIgnoreCase(qop); - ugi = provider.getAuthorizedUgi(saslServer.getAuthorizationID(), - this.rpcServer.secretManager); + ugi = + provider.getAuthorizedUgi(saslServer.getAuthorizationID(), this.rpcServer.secretManager); RpcServer.LOG.debug( - "SASL server context established. Authenticated client: {}. Negotiated QoP is {}", - ugi, qop); + "SASL server context established. Authenticated client: {}. Negotiated QoP is {}", ugi, + qop); this.rpcServer.metrics.authenticationSuccess(); RpcServer.AUDITLOG.info(RpcServer.AUTH_SUCCESSFUL_FOR + ugi); saslContextEstablished = true; @@ -423,8 +412,7 @@ abstract class ServerRpcConnection implements Closeable { int count; if (unwrappedDataLengthBuffer.remaining() > 0) { count = this.rpcServer.channelRead(ch, unwrappedDataLengthBuffer); - if (count <= 0 || unwrappedDataLengthBuffer.remaining() > 0) - return; + if (count <= 0 || unwrappedDataLengthBuffer.remaining() > 0) return; } if (unwrappedData == null) { @@ -432,8 +420,7 @@ abstract class ServerRpcConnection implements Closeable { int unwrappedDataLength = unwrappedDataLengthBuffer.getInt(); if (unwrappedDataLength == RpcClient.PING_CALL_ID) { - if (RpcServer.LOG.isDebugEnabled()) - RpcServer.LOG.debug("Received ping message"); + if (RpcServer.LOG.isDebugEnabled()) RpcServer.LOG.debug("Received ping message"); unwrappedDataLengthBuffer.clear(); continue; // ping message } @@ -441,8 +428,7 @@ abstract class ServerRpcConnection implements Closeable { } count = this.rpcServer.channelRead(ch, unwrappedData); - if (count <= 0 || unwrappedData.remaining() > 0) - return; + if (count <= 0 || unwrappedData.remaining() > 0) return; if (unwrappedData.remaining() == 0) { unwrappedDataLengthBuffer.clear(); @@ -453,8 +439,7 @@ abstract class ServerRpcConnection implements Closeable { } } - public void processOneRpc(ByteBuff buf) throws IOException, - InterruptedException { + public void processOneRpc(ByteBuff buf) throws IOException, InterruptedException { if (connectionHeaderRead) { processRequest(buf); } else { @@ -463,8 +448,8 @@ abstract class ServerRpcConnection implements Closeable { if (rpcServer.needAuthorization() && !authorizeConnection()) { // Throw FatalConnectionException wrapping ACE so client does right thing and closes // down the connection instead of trying to read non-existent retun. - throw new AccessDeniedException("Connection from " + this + " for service " + - connectionHeader.getServiceName() + " is unauthorized for user: " + ugi); + throw new AccessDeniedException("Connection from " + this + " for service " + + connectionHeader.getServiceName() + " is unauthorized for user: " + ugi); } this.user = this.rpcServer.userProvider.create(this.ugi); } @@ -476,8 +461,7 @@ abstract class ServerRpcConnection implements Closeable { // real user for the effective user, therefore not required to // authorize real user. doAs is allowed only for simple or kerberos // authentication - if (ugi != null && ugi.getRealUser() != null - && provider.supportsProtocolAuthentication()) { + if (ugi != null && ugi.getRealUser() != null && provider.supportsProtocolAuthentication()) { ProxyUsers.authorize(ugi, this.getHostAddress(), this.rpcServer.conf); } this.rpcServer.authorize(ugi, connectionHeader, getHostInetAddress()); @@ -498,8 +482,8 @@ abstract class ServerRpcConnection implements Closeable { if (buf.hasArray()) { this.connectionHeader = ConnectionHeader.parseFrom(buf.array()); } else { - CodedInputStream cis = UnsafeByteOperations.unsafeWrap( - new ByteBuffByteInput(buf, 0, buf.limit()), 0, buf.limit()).newCodedInput(); + CodedInputStream cis = UnsafeByteOperations + .unsafeWrap(new ByteBuffByteInput(buf, 0, buf.limit()), 0, buf.limit()).newCodedInput(); cis.enableAliasing(true); this.connectionHeader = ConnectionHeader.parseFrom(cis); } @@ -509,7 +493,7 @@ abstract class ServerRpcConnection implements Closeable { if (this.service == null) throw new UnknownServiceException(serviceName); setupCellBlockCodecs(this.connectionHeader); RPCProtos.ConnectionHeaderResponse.Builder chrBuilder = - RPCProtos.ConnectionHeaderResponse.newBuilder(); + RPCProtos.ConnectionHeaderResponse.newBuilder(); setupCryptoCipher(this.connectionHeader, chrBuilder); responseConnectionHeader(chrBuilder); UserGroupInformation protocolUser = createUser(connectionHeader); @@ -520,29 +504,26 @@ abstract class ServerRpcConnection implements Closeable { } // audit logging for SASL authenticated users happens in saslReadAndProcess() if (authenticatedWithFallback) { - RpcServer.LOG.warn("Allowed fallback to SIMPLE auth for {} connecting from {}", - ugi, getHostAddress()); + RpcServer.LOG.warn("Allowed fallback to SIMPLE auth for {} connecting from {}", ugi, + getHostAddress()); } } else { // user is authenticated ugi.setAuthenticationMethod(provider.getSaslAuthMethod().getAuthMethod()); - //Now we check if this is a proxy user case. If the protocol user is - //different from the 'user', it is a proxy user scenario. However, - //this is not allowed if user authenticated with DIGEST. - if ((protocolUser != null) - && (!protocolUser.getUserName().equals(ugi.getUserName()))) { + // Now we check if this is a proxy user case. If the protocol user is + // different from the 'user', it is a proxy user scenario. However, + // this is not allowed if user authenticated with DIGEST. + if ((protocolUser != null) && (!protocolUser.getUserName().equals(ugi.getUserName()))) { if (!provider.supportsProtocolAuthentication()) { // Not allowed to doAs if token authentication is used throw new AccessDeniedException("Authenticated user (" + ugi - + ") doesn't match what the client claims to be (" - + protocolUser + ")"); + + ") doesn't match what the client claims to be (" + protocolUser + ")"); } else { // Effective user can be different from authenticated user // for simple auth or kerberos auth // The user is the real user. Now we create a proxy user UserGroupInformation realUser = ugi; - ugi = UserGroupInformation.createProxyUser(protocolUser - .getUserName(), realUser); + ugi = UserGroupInformation.createProxyUser(protocolUser.getUserName(), realUser); // Now the user is a proxy user, set Authentication method Proxy. ugi.setAuthenticationMethod(AuthenticationMethod.PROXY); } @@ -551,21 +532,20 @@ abstract class ServerRpcConnection implements Closeable { String version; if (this.connectionHeader.hasVersionInfo()) { // see if this connection will support RetryImmediatelyException - this.retryImmediatelySupported = - VersionInfoUtil.hasMinimumVersion(getVersionInfo(), 1, 2); + this.retryImmediatelySupported = VersionInfoUtil.hasMinimumVersion(getVersionInfo(), 1, 2); version = this.connectionHeader.getVersionInfo().getVersion(); } else { version = "UNKNOWN"; } RpcServer.AUDITLOG.info("Connection from {}:{}, version={}, sasl={}, ugi={}, service={}", - this.hostAddress, this.remotePort, version, this.useSasl, this.ugi, serviceName); + this.hostAddress, this.remotePort, version, this.useSasl, this.ugi, serviceName); } /** * Send the response for connection header */ private void responseConnectionHeader(RPCProtos.ConnectionHeaderResponse.Builder chrBuilder) - throws FatalConnectionException { + throws FatalConnectionException { // Response the connection header if Crypto AES is enabled if (!chrBuilder.hasCryptoCipherMeta()) return; try { @@ -577,7 +557,7 @@ abstract class ServerRpcConnection implements Closeable { byte[] wrapped = saslServer.wrap(unwrapped, 0, unwrapped.length); BufferChain bc; try (ByteBufferOutputStream response = new ByteBufferOutputStream(wrapped.length + 4); - DataOutputStream out = new DataOutputStream(response)) { + DataOutputStream out = new DataOutputStream(response)) { out.writeInt(wrapped.length); out.write(wrapped); bc = new BufferChain(response.getByteBuffer()); @@ -591,14 +571,10 @@ abstract class ServerRpcConnection implements Closeable { protected abstract void doRespond(RpcResponse resp) throws IOException; /** - * @param buf - * Has the request header and the request param and optionally - * encoded data buffer all in this one array. - * @throws IOException - * @throws InterruptedException + * n * Has the request header and the request param and optionally encoded data buffer all in this + * one array. nn */ - protected void processRequest(ByteBuff buf) throws IOException, - InterruptedException { + protected void processRequest(ByteBuff buf) throws IOException, InterruptedException { long totalRequestSize = buf.limit(); int offset = 0; // Here we read in the header. We avoid having pb @@ -609,7 +585,7 @@ abstract class ServerRpcConnection implements Closeable { cis = UnsafeByteOperations.unsafeWrap(buf.array(), 0, buf.limit()).newCodedInput(); } else { cis = UnsafeByteOperations - .unsafeWrap(new ByteBuffByteInput(buf, 0, buf.limit()), 0, buf.limit()).newCodedInput(); + .unsafeWrap(new ByteBuffByteInput(buf, 0, buf.limit()), 0, buf.limit()).newCodedInput(); } cis.enableAliasing(true); int headerSize = cis.readRawVarint32(); @@ -631,20 +607,22 @@ abstract class ServerRpcConnection implements Closeable { try (Scope ignored = span.makeCurrent()) { int id = header.getCallId(); if (RpcServer.LOG.isTraceEnabled()) { - RpcServer.LOG.trace("RequestHeader " + TextFormat.shortDebugString(header) + - " totalRequestSize: " + totalRequestSize + " bytes"); + RpcServer.LOG.trace("RequestHeader " + TextFormat.shortDebugString(header) + + " totalRequestSize: " + totalRequestSize + " bytes"); } // Enforcing the call queue size, this triggers a retry in the client // This is a bit late to be doing this check - we have already read in the // total request. - if ((totalRequestSize + - this.rpcServer.callQueueSizeInBytes.sum()) > this.rpcServer.maxQueueSizeInBytes) { + if ( + (totalRequestSize + this.rpcServer.callQueueSizeInBytes.sum()) + > this.rpcServer.maxQueueSizeInBytes + ) { final ServerCall callTooBig = createCall(id, this.service, null, null, null, null, totalRequestSize, null, 0, this.callCleanup); this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION); callTooBig.setResponse(null, null, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION, - "Call queue is full on " + this.rpcServer.server.getServerName() + - ", is hbase.ipc.server.max.callqueue.size too small?"); + "Call queue is full on " + this.rpcServer.server.getServerName() + + ", is hbase.ipc.server.max.callqueue.size too small?"); TraceUtil.setError(span, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION); callTooBig.sendResponseIfReady(); return; @@ -670,8 +648,8 @@ abstract class ServerRpcConnection implements Closeable { } else { // currently header must have request param, so we directly throw // exception here - String msg = "Invalid request header: " + TextFormat.shortDebugString(header) + - ", should have param set in it"; + String msg = "Invalid request header: " + TextFormat.shortDebugString(header) + + ", should have param set in it"; RpcServer.LOG.warn(msg); throw new DoNotRetryIOException(msg); } @@ -684,8 +662,8 @@ abstract class ServerRpcConnection implements Closeable { } } catch (Throwable thrown) { InetSocketAddress address = this.rpcServer.getListenerAddress(); - String msg = (address != null ? address : "(channel closed)") + - " is unable to read call parameter from client " + getHostAddress(); + String msg = (address != null ? address : "(channel closed)") + + " is unable to read call parameter from client " + getHostAddress(); RpcServer.LOG.warn(msg, thrown); this.rpcServer.metrics.exception(thrown); @@ -703,8 +681,8 @@ abstract class ServerRpcConnection implements Closeable { ServerCall readParamsFailedCall = createCall(id, this.service, null, null, null, null, totalRequestSize, null, 0, this.callCleanup); - readParamsFailedCall.setResponse(null, null, responseThrowable, msg + "; " - + responseThrowable.getMessage()); + readParamsFailedCall.setResponse(null, null, responseThrowable, + msg + "; " + responseThrowable.getMessage()); TraceUtil.setError(span, responseThrowable); readParamsFailedCall.sendResponseIfReady(); return; @@ -724,8 +702,8 @@ abstract class ServerRpcConnection implements Closeable { this.rpcServer.callQueueSizeInBytes.add(-1 * call.getSize()); this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION); call.setResponse(null, null, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION, - "Call queue is full on " + this.rpcServer.server.getServerName() + - ", too many items queued ?"); + "Call queue is full on " + this.rpcServer.server.getServerName() + + ", too many items queued ?"); TraceUtil.setError(span, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION); call.sendResponseIfReady(); } @@ -740,7 +718,7 @@ abstract class ServerRpcConnection implements Closeable { ResponseHeader.Builder headerBuilder = ResponseHeader.newBuilder().setCallId(-1); ServerCall.setExceptionResponse(e, msg, headerBuilder); ByteBuffer headerBuf = - ServerCall.createHeaderAndMessageBytes(null, headerBuilder.build(), 0, null); + ServerCall.createHeaderAndMessageBytes(null, headerBuilder.build(), 0, null); BufferChain buf = new BufferChain(headerBuf); return () -> buf; } @@ -759,9 +737,9 @@ abstract class ServerRpcConnection implements Closeable { for (int i = 0; i < RPC_HEADER.length; i++) { if (RPC_HEADER[i] != preambleBuffer.get()) { doBadPreambleHandling( - "Expected HEADER=" + Bytes.toStringBinary(RPC_HEADER) + " but received HEADER=" + - Bytes.toStringBinary(preambleBuffer.array(), 0, RPC_HEADER.length) + " from " + - toString()); + "Expected HEADER=" + Bytes.toStringBinary(RPC_HEADER) + " but received HEADER=" + + Bytes.toStringBinary(preambleBuffer.array(), 0, RPC_HEADER.length) + " from " + + toString()); return false; } } @@ -810,8 +788,8 @@ abstract class ServerRpcConnection implements Closeable { public abstract boolean isConnectionOpen(); public abstract ServerCall createCall(int id, BlockingService service, MethodDescriptor md, - RequestHeader header, Message param, CellScanner cellScanner, long size, - InetAddress remoteAddress, int timeout, CallCleanup reqCleanup); + RequestHeader header, Message param, CellScanner cellScanner, long size, + InetAddress remoteAddress, int timeout, CallCleanup reqCleanup); private static class ByteBuffByteInput extends ByteInput { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java index 117b72a5b11..bb8025ca911 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java @@ -29,11 +29,13 @@ import org.apache.yetus.audience.InterfaceStability; /** * The default scheduler. Configurable. Maintains isolated handler pools for general ('default'), * high-priority ('priority'), and replication ('replication') requests. Default behavior is to - * balance the requests across handlers. Add configs to enable balancing by read vs writes, etc. - * See below article for explanation of options. - * @see Overview on Request Queuing + * balance the requests across handlers. Add configs to enable balancing by read vs writes, etc. See + * below article for explanation of options. + * @see Overview + * on Request Queuing */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObserver { private int port; @@ -53,34 +55,26 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs private Abortable abortable = null; /** - * @param conf - * @param handlerCount the number of handler threads that will be used to process calls - * @param priorityHandlerCount How many threads for priority handling. - * @param replicationHandlerCount How many threads for replication handling. - * @param highPriorityLevel - * @param priority Function to extract request priority. + * n * @param handlerCount the number of handler threads that will be used to process calls + * @param priorityHandlerCount How many threads for priority handling. + * @param replicationHandlerCount How many threads for replication handling. n * @param priority + * Function to extract request priority. */ - public SimpleRpcScheduler( - Configuration conf, - int handlerCount, - int priorityHandlerCount, - int replicationHandlerCount, - int metaTransitionHandler, - PriorityFunction priority, - Abortable server, - int highPriorityLevel) { + public SimpleRpcScheduler(Configuration conf, int handlerCount, int priorityHandlerCount, + int replicationHandlerCount, int metaTransitionHandler, PriorityFunction priority, + Abortable server, int highPriorityLevel) { int maxQueueLength = conf.getInt(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH, - handlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); + handlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); int maxPriorityQueueLength = - conf.getInt(RpcScheduler.IPC_SERVER_PRIORITY_MAX_CALLQUEUE_LENGTH, maxQueueLength); + conf.getInt(RpcScheduler.IPC_SERVER_PRIORITY_MAX_CALLQUEUE_LENGTH, maxQueueLength); this.priority = priority; this.highPriorityLevel = highPriorityLevel; this.abortable = server; - String callQueueType = conf.get(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, - RpcExecutor.CALL_QUEUE_TYPE_CONF_DEFAULT); + String callQueueType = + conf.get(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, RpcExecutor.CALL_QUEUE_TYPE_CONF_DEFAULT); float callqReadShare = conf.getFloat(RWQueueRpcExecutor.CALL_QUEUE_READ_SHARE_CONF_KEY, 0); if (callqReadShare > 0) { @@ -88,48 +82,49 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs callExecutor = new FastPathRWQueueRpcExecutor("default.FPRWQ", Math.max(2, handlerCount), maxQueueLength, priority, conf, server); } else { - if (RpcExecutor.isFifoQueueType(callQueueType) || - RpcExecutor.isCodelQueueType(callQueueType) || - RpcExecutor.isPluggableQueueWithFastPath(callQueueType, conf)) { + if ( + RpcExecutor.isFifoQueueType(callQueueType) || RpcExecutor.isCodelQueueType(callQueueType) + || RpcExecutor.isPluggableQueueWithFastPath(callQueueType, conf) + ) { callExecutor = new FastPathBalancedQueueRpcExecutor("default.FPBQ", handlerCount, - maxQueueLength, priority, conf, server); + maxQueueLength, priority, conf, server); } else { callExecutor = new BalancedQueueRpcExecutor("default.BQ", handlerCount, maxQueueLength, - priority, conf, server); + priority, conf, server); } } float metaCallqReadShare = - conf.getFloat(MetaRWQueueRpcExecutor.META_CALL_QUEUE_READ_SHARE_CONF_KEY, - MetaRWQueueRpcExecutor.DEFAULT_META_CALL_QUEUE_READ_SHARE); + conf.getFloat(MetaRWQueueRpcExecutor.META_CALL_QUEUE_READ_SHARE_CONF_KEY, + MetaRWQueueRpcExecutor.DEFAULT_META_CALL_QUEUE_READ_SHARE); if (metaCallqReadShare > 0) { // different read/write handler for meta, at least 1 read handler and 1 write handler - this.priorityExecutor = - new MetaRWQueueRpcExecutor("priority.RWQ", Math.max(2, priorityHandlerCount), - maxPriorityQueueLength, priority, conf, server); + this.priorityExecutor = new MetaRWQueueRpcExecutor("priority.RWQ", + Math.max(2, priorityHandlerCount), maxPriorityQueueLength, priority, conf, server); } else { // Create 2 queues to help priorityExecutor be more scalable. - this.priorityExecutor = priorityHandlerCount > 0 ? - new FastPathBalancedQueueRpcExecutor("priority.FPBQ", priorityHandlerCount, - RpcExecutor.CALL_QUEUE_TYPE_FIFO_CONF_VALUE, maxPriorityQueueLength, priority, conf, - abortable) : - null; + this.priorityExecutor = priorityHandlerCount > 0 + ? new FastPathBalancedQueueRpcExecutor("priority.FPBQ", priorityHandlerCount, + RpcExecutor.CALL_QUEUE_TYPE_FIFO_CONF_VALUE, maxPriorityQueueLength, priority, conf, + abortable) + : null; } - this.replicationExecutor = replicationHandlerCount > 0 ? new FastPathBalancedQueueRpcExecutor( - "replication.FPBQ", replicationHandlerCount, RpcExecutor.CALL_QUEUE_TYPE_FIFO_CONF_VALUE, - maxQueueLength, priority, conf, abortable) : null; + this.replicationExecutor = replicationHandlerCount > 0 + ? new FastPathBalancedQueueRpcExecutor("replication.FPBQ", replicationHandlerCount, + RpcExecutor.CALL_QUEUE_TYPE_FIFO_CONF_VALUE, maxQueueLength, priority, conf, abortable) + : null; - this.metaTransitionExecutor = metaTransitionHandler > 0 ? - new FastPathBalancedQueueRpcExecutor("metaPriority.FPBQ", metaTransitionHandler, - RpcExecutor.CALL_QUEUE_TYPE_FIFO_CONF_VALUE, maxPriorityQueueLength, priority, conf, - abortable) : - null; + this.metaTransitionExecutor = metaTransitionHandler > 0 + ? new FastPathBalancedQueueRpcExecutor("metaPriority.FPBQ", metaTransitionHandler, + RpcExecutor.CALL_QUEUE_TYPE_FIFO_CONF_VALUE, maxPriorityQueueLength, priority, conf, + abortable) + : null; } public SimpleRpcScheduler(Configuration conf, int handlerCount, int priorityHandlerCount, - int replicationHandlerCount, PriorityFunction priority, int highPriorityLevel) { + int replicationHandlerCount, PriorityFunction priority, int highPriorityLevel) { this(conf, handlerCount, priorityHandlerCount, replicationHandlerCount, 0, priority, null, - highPriorityLevel); + highPriorityLevel); } /** @@ -149,10 +144,11 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs metaTransitionExecutor.resizeQueues(conf); } - String callQueueType = conf.get(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, - RpcExecutor.CALL_QUEUE_TYPE_CONF_DEFAULT); - if (RpcExecutor.isCodelQueueType(callQueueType) || - RpcExecutor.isPluggableQueueType(callQueueType)) { + String callQueueType = + conf.get(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, RpcExecutor.CALL_QUEUE_TYPE_CONF_DEFAULT); + if ( + RpcExecutor.isCodelQueueType(callQueueType) || RpcExecutor.isPluggableQueueType(callQueueType) + ) { callExecutor.onConfigurationChange(conf); } } @@ -195,13 +191,15 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs @Override public boolean dispatch(CallRunner callTask) { RpcCall call = callTask.getRpcCall(); - int level = priority.getPriority(call.getHeader(), call.getParam(), - call.getRequestUser().orElse(null)); + int level = + priority.getPriority(call.getHeader(), call.getParam(), call.getRequestUser().orElse(null)); if (level == HConstants.PRIORITY_UNSET) { level = HConstants.NORMAL_QOS; } - if (metaTransitionExecutor != null && - level == MasterAnnotationReadingPriorityFunction.META_TRANSITION_QOS) { + if ( + metaTransitionExecutor != null + && level == MasterAnnotationReadingPriorityFunction.META_TRANSITION_QOS + ) { return metaTransitionExecutor.dispatch(callTask); } else if (priorityExecutor != null && level > highPriorityLevel) { return priorityExecutor.dispatch(callTask); @@ -235,7 +233,7 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs @Override public int getActiveRpcHandlerCount() { return callExecutor.getActiveHandlerCount() + getActivePriorityRpcHandlerCount() - + getActiveReplicationRpcHandlerCount() + getActiveMetaPriorityRpcHandlerCount(); + + getActiveReplicationRpcHandlerCount() + getActiveMetaPriorityRpcHandlerCount(); } @Override @@ -325,7 +323,7 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs if (null != metaTransitionExecutor) { queueName = "Meta Transition Queue"; callQueueInfo.setCallMethodCount(queueName, - metaTransitionExecutor.getCallQueueCountsSummary()); + metaTransitionExecutor.getCallQueueCountsSummary()); callQueueInfo.setCallMethodSize(queueName, metaTransitionExecutor.getCallQueueSizeSummary()); } @@ -333,4 +331,3 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java index 825c01610d6..156981ebd2d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,47 +60,41 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescrip import org.apache.hbase.thirdparty.com.google.protobuf.Message; /** - * The RPC server with native java NIO implementation deriving from Hadoop to - * host protobuf described Services. It's the original one before HBASE-17262, - * and the default RPC server for now. - * - * An RpcServer instance has a Listener that hosts the socket. Listener has fixed number - * of Readers in an ExecutorPool, 10 by default. The Listener does an accept and then - * round robin a Reader is chosen to do the read. The reader is registered on Selector. Read does - * total read off the channel and the parse from which it makes a Call. The call is wrapped in a - * CallRunner and passed to the scheduler to be run. Reader goes back to see if more to be done - * and loops till done. - * - *

          Scheduler can be variously implemented but default simple scheduler has handlers to which it - * has given the queues into which calls (i.e. CallRunner instances) are inserted. Handlers run - * taking from the queue. They run the CallRunner#run method on each item gotten from queue - * and keep taking while the server is up. - * - * CallRunner#run executes the call. When done, asks the included Call to put itself on new - * queue for Responder to pull from and return result to client. - * + * The RPC server with native java NIO implementation deriving from Hadoop to host protobuf + * described Services. It's the original one before HBASE-17262, and the default RPC server for now. + * An RpcServer instance has a Listener that hosts the socket. Listener has fixed number of Readers + * in an ExecutorPool, 10 by default. The Listener does an accept and then round robin a Reader is + * chosen to do the read. The reader is registered on Selector. Read does total read off the channel + * and the parse from which it makes a Call. The call is wrapped in a CallRunner and passed to the + * scheduler to be run. Reader goes back to see if more to be done and loops till done. + *

          + * Scheduler can be variously implemented but default simple scheduler has handlers to which it has + * given the queues into which calls (i.e. CallRunner instances) are inserted. Handlers run taking + * from the queue. They run the CallRunner#run method on each item gotten from queue and keep taking + * while the server is up. CallRunner#run executes the call. When done, asks the included Call to + * put itself on new queue for Responder to pull from and return result to client. * @see BlockingRpcClient */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.CONFIG}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.CONFIG }) public class SimpleRpcServer extends RpcServer { - protected int port; // port we listen on - protected InetSocketAddress address; // inet address we listen on - private int readThreads; // number of read threads + protected int port; // port we listen on + protected InetSocketAddress address; // inet address we listen on + private int readThreads; // number of read threads protected int socketSendBufferSize; - protected final long purgeTimeout; // in milliseconds + protected final long purgeTimeout; // in milliseconds // maintains the set of client connections and handles idle timeouts private ConnectionManager connectionManager; private Listener listener = null; protected SimpleRpcServerResponder responder = null; - /** Listens on the socket. Creates jobs for the handler threads*/ + /** Listens on the socket. Creates jobs for the handler threads */ private class Listener extends Thread { - private ServerSocketChannel acceptChannel = null; //the accept channel - private Selector selector = null; //the selector that we use for the server + private ServerSocketChannel acceptChannel = null; // the accept channel + private Selector selector = null; // the selector that we use for the server private Reader[] readers = null; private int currentReader = 0; private final int readerPendingConnectionQueueLength; @@ -112,15 +106,15 @@ public class SimpleRpcServer extends RpcServer { // The backlog of requests that we will have the serversocket carry. int backlogLength = conf.getInt("hbase.ipc.server.listen.queue.size", 128); readerPendingConnectionQueueLength = - conf.getInt("hbase.ipc.server.read.connection-queue.size", 100); + conf.getInt("hbase.ipc.server.read.connection-queue.size", 100); // Create a new server socket and set to non blocking mode acceptChannel = ServerSocketChannel.open(); acceptChannel.configureBlocking(false); // Bind the server socket to the binding addrees (can be different from the default interface) bind(acceptChannel.socket(), bindAddress, backlogLength); - port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port - address = (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress(); + port = acceptChannel.socket().getLocalPort(); // Could be an ephemeral port + address = (InetSocketAddress) acceptChannel.socket().getLocalSocketAddress(); // create a selector; selector = Selector.open(); @@ -128,10 +122,9 @@ public class SimpleRpcServer extends RpcServer { // Why this executor thing? Why not like hadoop just start up all the threads? I suppose it // has an advantage in that it is easy to shutdown the pool. readPool = Executors.newFixedThreadPool(readThreads, - new ThreadFactoryBuilder().setNameFormat( - "Reader=%d,bindAddress=" + bindAddress.getHostName() + - ",port=" + port).setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + new ThreadFactoryBuilder() + .setNameFormat("Reader=%d,bindAddress=" + bindAddress.getHostName() + ",port=" + port) + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); for (int i = 0; i < readThreads; ++i) { Reader reader = new Reader(); readers[i] = reader; @@ -145,7 +138,6 @@ public class SimpleRpcServer extends RpcServer { this.setDaemon(true); } - private class Reader implements Runnable { final private LinkedBlockingQueue pendingConnections; private final Selector readSelector; @@ -174,7 +166,7 @@ public class SimpleRpcServer extends RpcServer { // Consume as many connections as currently queued to avoid // unbridled acceptance of connections that starves the select int size = pendingConnections.size(); - for (int i=size; i>0; i--) { + for (int i = size; i > 0; i--) { SimpleServerRpcConnection conn = pendingConnections.take(); conn.channel.register(readSelector, SelectionKey.OP_READ, conn); } @@ -191,7 +183,7 @@ public class SimpleRpcServer extends RpcServer { key = null; } } catch (InterruptedException e) { - if (running) { // unexpected -- log it + if (running) { // unexpected -- log it LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e); } } catch (CancelledKeyException e) { @@ -203,9 +195,9 @@ public class SimpleRpcServer extends RpcServer { } /** - * Updating the readSelector while it's being used is not thread-safe, - * so the connection must be queued. The reader will drain the queue - * and update its readSelector before performing the next select + * Updating the readSelector while it's being used is not thread-safe, so the connection must + * be queued. The reader will drain the queue and update its readSelector before performing + * the next select */ public void addConnection(SimpleServerRpcConnection conn) throws IOException { pendingConnections.add(conn); @@ -214,9 +206,9 @@ public class SimpleRpcServer extends RpcServer { } @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", - justification="selector access is not synchronized; seems fine but concerned changing " + - "it will have per impact") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IS2_INCONSISTENT_SYNC", + justification = "selector access is not synchronized; seems fine but concerned changing " + + "it will have per impact") public void run() { LOG.info(getName() + ": starting"); connectionManager.startIdleScan(); @@ -230,8 +222,7 @@ public class SimpleRpcServer extends RpcServer { iter.remove(); try { if (key.isValid()) { - if (key.isAcceptable()) - doAccept(key); + if (key.isAcceptable()) doAccept(key); } } catch (IOException ignored) { if (LOG.isTraceEnabled()) LOG.trace("ignored", ignored); @@ -272,8 +263,8 @@ public class SimpleRpcServer extends RpcServer { if (LOG.isTraceEnabled()) LOG.trace("ignored", ignored); } - selector= null; - acceptChannel= null; + selector = null; + acceptChannel = null; // close all connections connectionManager.stopIdleScan(); @@ -283,7 +274,7 @@ public class SimpleRpcServer extends RpcServer { private void closeCurrentConnection(SelectionKey key, Throwable e) { if (key != null) { - SimpleServerRpcConnection c = (SimpleServerRpcConnection)key.attachment(); + SimpleServerRpcConnection c = (SimpleServerRpcConnection) key.attachment(); if (c != null) { closeConnection(c); key.attach(null); @@ -311,7 +302,7 @@ public class SimpleRpcServer extends RpcServer { } continue; } - key.attach(c); // so closeCurrentConnection can get the object + key.attach(c); // so closeCurrentConnection can get the object reader.addConnection(c); } } @@ -326,13 +317,14 @@ public class SimpleRpcServer extends RpcServer { try { count = c.readAndProcess(); } catch (InterruptedException ieo) { - LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", ieo); + LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", + ieo); throw ieo; } catch (Exception e) { if (LOG.isDebugEnabled()) { LOG.debug("Caught exception while reading:", e); } - count = -1; //so that the (count < 0) block is executed + count = -1; // so that the (count < 0) block is executed } if (count < 0) { closeConnection(c); @@ -367,24 +359,20 @@ public class SimpleRpcServer extends RpcServer { /** * Constructs a server listening on the named port and address. - * @param server hosting instance of {@link Server}. We will do authentications if an - * instance else pass null for no authentication check. - * @param name Used keying this rpc servers' metrics and for naming the Listener thread. - * @param services A list of services. - * @param bindAddress Where to listen - * @param conf - * @param scheduler - * @param reservoirEnabled Enable ByteBufferPool or not. + * @param server hosting instance of {@link Server}. We will do authentications if an + * instance else pass null for no authentication check. + * @param name Used keying this rpc servers' metrics and for naming the Listener thread. + * @param services A list of services. + * @param bindAddress Where to listen nn * @param reservoirEnabled Enable ByteBufferPool or not. */ public SimpleRpcServer(final Server server, final String name, - final List services, - final InetSocketAddress bindAddress, Configuration conf, - RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { + final List services, final InetSocketAddress bindAddress, + Configuration conf, RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { super(server, name, services, bindAddress, conf, scheduler, reservoirEnabled); this.socketSendBufferSize = 0; this.readThreads = conf.getInt("hbase.ipc.server.read.threadpool.size", 10); - this.purgeTimeout = conf.getLong("hbase.ipc.client.call.purge.timeout", - 2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + this.purgeTimeout = + conf.getLong("hbase.ipc.client.call.purge.timeout", 2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT); // Start the listener here and let it bind to the port listener = new Listener(name); @@ -399,8 +387,7 @@ public class SimpleRpcServer extends RpcServer { } /** - * Subclasses of HBaseServer can override this to provide their own - * Connection implementations. + * Subclasses of HBaseServer can override this to provide their own Connection implementations. */ protected SimpleServerRpcConnection getConnection(SocketChannel channel, long time) { return new SimpleServerRpcConnection(this, channel, time); @@ -410,11 +397,14 @@ public class SimpleRpcServer extends RpcServer { connectionManager.close(connection); } - /** Sets the socket buffer size used for responding to RPCs. + /** + * Sets the socket buffer size used for responding to RPCs. * @param size send size */ @Override - public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; } + public void setSocketSendBufSize(int size) { + this.socketSendBufferSize = size; + } /** Starts the service. Must be called before any calls will be handled. */ @Override @@ -467,10 +457,10 @@ public class SimpleRpcServer extends RpcServer { } /** - * Return the socket (ip+port) on which the RPC server is listening to. May return null if - * the listener channel is closed. + * Return the socket (ip+port) on which the RPC server is listening to. May return null if the + * listener channel is closed. * @return the socket (ip+port) on which the RPC server is listening to, or null if this - * information cannot be determined + * information cannot be determined */ @Override public synchronized InetSocketAddress getListenerAddress() { @@ -482,30 +472,28 @@ public class SimpleRpcServer extends RpcServer { @Override public Pair call(BlockingService service, MethodDescriptor md, - Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status) - throws IOException { + Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status) + throws IOException { return call(service, md, param, cellScanner, receiveTime, status, EnvironmentEdgeManager.currentTime(), 0); } @Override public Pair call(BlockingService service, MethodDescriptor md, - Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, - long startTime, int timeout) throws IOException { + Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, + long startTime, int timeout) throws IOException { SimpleServerCall fakeCall = new SimpleServerCall(-1, service, md, null, param, cellScanner, - null, -1, null, receiveTime, timeout, bbAllocator, cellBlockBuilder, null, null); + null, -1, null, receiveTime, timeout, bbAllocator, cellBlockBuilder, null, null); return call(fakeCall, status); } /** - * This is a wrapper around {@link java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)}. - * If the amount of data is large, it writes to channel in smaller chunks. - * This is to avoid jdk from creating many direct buffers as the size of - * buffer increases. This also minimizes extra copies in NIO layer - * as a result of multiple write operations required to write a large - * buffer. - * - * @param channel writable byte channel to write to + * This is a wrapper around + * {@link java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)}. If the amount of data + * is large, it writes to channel in smaller chunks. This is to avoid jdk from creating many + * direct buffers as the size of buffer increases. This also minimizes extra copies in NIO layer + * as a result of multiple write operations required to write a large buffer. + * @param channel writable byte channel to write to * @param bufferChain Chain of buffers to write * @return number of bytes written * @see java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer) @@ -520,14 +508,14 @@ public class SimpleRpcServer extends RpcServer { } /** - * A convenience method to bind to a given address and report - * better exceptions if the address is not a valid host. - * @param socket the socket to bind + * A convenience method to bind to a given address and report better exceptions if the address is + * not a valid host. + * @param socket the socket to bind * @param address the address to bind to * @param backlog the number of connections allowed in the queue - * @throws BindException if the address can't be bound + * @throws BindException if the address can't be bound * @throws UnknownHostException if the address isn't a valid host name - * @throws IOException other random errors from bind + * @throws IOException other random errors from bind */ public static void bind(ServerSocket socket, InetSocketAddress address, int backlog) throws IOException { @@ -571,18 +559,18 @@ public class SimpleRpcServer extends RpcServer { this.idleScanTimer = new Timer("RpcServer idle connection scanner for port " + port, true); this.idleScanThreshold = conf.getInt("hbase.ipc.client.idlethreshold", 4000); this.idleScanInterval = - conf.getInt("hbase.ipc.client.connection.idle-scan-interval.ms", 10000); + conf.getInt("hbase.ipc.client.connection.idle-scan-interval.ms", 10000); this.maxIdleTime = 2 * conf.getInt("hbase.ipc.client.connection.maxidletime", 10000); this.maxIdleToClose = conf.getInt("hbase.ipc.client.kill.max", 10); int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, - HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); + HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); int maxConnectionQueueSize = - handlerCount * conf.getInt("hbase.ipc.server.handler.queue.size", 100); + handlerCount * conf.getInt("hbase.ipc.server.handler.queue.size", 100); // create a set with concurrency -and- a thread-safe iterator, add 2 // for listener and idle closer threads - this.connections = Collections.newSetFromMap( - new ConcurrentHashMap( - maxConnectionQueueSize, 0.75f, readThreads+2)); + this.connections = + Collections.newSetFromMap(new ConcurrentHashMap( + maxConnectionQueueSize, 0.75f, readThreads + 2)); } private boolean add(SimpleServerRpcConnection connection) { @@ -610,16 +598,15 @@ public class SimpleRpcServer extends RpcServer { } SimpleServerRpcConnection register(SocketChannel channel) { - SimpleServerRpcConnection connection = getConnection(channel, - EnvironmentEdgeManager.currentTime()); + SimpleServerRpcConnection connection = + getConnection(channel, EnvironmentEdgeManager.currentTime()); add(connection); if (LOG.isTraceEnabled()) { - LOG.trace("Connection from " + connection + - "; connections=" + size() + - ", queued calls size (bytes)=" + callQueueSizeInBytes.sum() + - ", general queued calls=" + scheduler.getGeneralQueueLength() + - ", priority queued calls=" + scheduler.getPriorityQueueLength() + - ", meta priority queued calls=" + scheduler.getMetaPriorityQueueLength()); + LOG.trace("Connection from " + connection + "; connections=" + size() + + ", queued calls size (bytes)=" + callQueueSizeInBytes.sum() + ", general queued calls=" + + scheduler.getGeneralQueueLength() + ", priority queued calls=" + + scheduler.getPriorityQueueLength() + ", meta priority queued calls=" + + scheduler.getMetaPriorityQueueLength()); } return connection; } @@ -628,9 +615,8 @@ public class SimpleRpcServer extends RpcServer { boolean exists = remove(connection); if (exists) { if (LOG.isTraceEnabled()) { - LOG.trace(Thread.currentThread().getName() + - ": disconnecting client " + connection + - ". Number of active connections: "+ size()); + LOG.trace(Thread.currentThread().getName() + ": disconnecting client " + connection + + ". Number of active connections: " + size()); } // only close if actually removed to avoid double-closing due // to possible races @@ -653,10 +639,10 @@ public class SimpleRpcServer extends RpcServer { break; } // stop if not scanning all and max connections are closed - if (connection.isIdle() && - connection.getLastContact() < minLastContact && - close(connection) && - !scanAll && (++closed == maxIdleToClose)) { + if ( + connection.isIdle() && connection.getLastContact() < minLastContact && close(connection) + && !scanAll && (++closed == maxIdleToClose) + ) { break; } } @@ -682,7 +668,7 @@ public class SimpleRpcServer extends RpcServer { if (!running) { return; } - TimerTask idleScanTask = new TimerTask(){ + TimerTask idleScanTask = new TimerTask() { @Override public void run() { if (!running) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java index d6d5dd09a85..200c4ebd1af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import java.util.Collections; import java.util.Iterator; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; @@ -43,7 +42,7 @@ class SimpleRpcServerResponder extends Thread { private final SimpleRpcServer simpleRpcServer; private final Selector writeSelector; private final Set writingCons = - Collections.newSetFromMap(new ConcurrentHashMap<>()); + Collections.newSetFromMap(new ConcurrentHashMap<>()); SimpleRpcServerResponder(SimpleRpcServer simpleRpcServer) throws IOException { this.simpleRpcServer = simpleRpcServer; @@ -152,7 +151,7 @@ class SimpleRpcServerResponder extends Thread { } } catch (Exception e) { SimpleRpcServer.LOG - .warn(getName() + ": exception in Responder " + StringUtils.stringifyException(e), e); + .warn(getName() + ": exception in Responder " + StringUtils.stringifyException(e), e); } } SimpleRpcServer.LOG.info(getName() + ": stopped"); @@ -176,8 +175,10 @@ class SimpleRpcServerResponder extends Thread { if (connection == null) { throw new IllegalStateException("Coding error: SelectionKey key without attachment."); } - if (connection.lastSentTime > 0 && - now > connection.lastSentTime + this.simpleRpcServer.purgeTimeout) { + if ( + connection.lastSentTime > 0 + && now > connection.lastSentTime + this.simpleRpcServer.purgeTimeout + ) { conWithOldCalls.add(connection); } } @@ -218,17 +219,15 @@ class SimpleRpcServerResponder extends Thread { /** * Process the response for this call. You need to have the lock on * {@link org.apache.hadoop.hbase.ipc.SimpleServerRpcConnection#responseWriteLock} - * @return true if we proceed the call fully, false otherwise. - * @throws IOException + * @return true if we proceed the call fully, false otherwise. n */ private boolean processResponse(SimpleServerRpcConnection conn, RpcResponse resp) - throws IOException { + throws IOException { boolean error = true; BufferChain buf = resp.getResponse(); try { // Send as much data as we can in the non-blocking fashion - long numBytes = - this.simpleRpcServer.channelWrite(conn.channel, buf); + long numBytes = this.simpleRpcServer.channelWrite(conn.channel, buf); if (numBytes < 0) { throw new HBaseIOException("Error writing on the socket " + conn); } @@ -256,11 +255,10 @@ class SimpleRpcServerResponder extends Thread { /** * Process all the responses for this connection * @return true if all the calls were processed or that someone else is doing it. false if there * - * is still some work to do. In this case, we expect the caller to delay us. - * @throws IOException + * is still some work to do. In this case, we expect the caller to delay us. n */ private boolean processAllResponses(final SimpleServerRpcConnection connection) - throws IOException { + throws IOException { // We want only one writer on the channel for a connection at a time. connection.responseWriteLock.lock(); try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java index 311b4c7b1a9..861da8055d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,14 +19,15 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; import java.net.InetAddress; - import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; /** @@ -41,13 +42,12 @@ class SimpleServerCall extends ServerCall { @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", justification = "Can't figure why this complaint is happening... see below") SimpleServerCall(int id, final BlockingService service, final MethodDescriptor md, - RequestHeader header, Message param, CellScanner cellScanner, - SimpleServerRpcConnection connection, long size, final InetAddress remoteAddress, - long receiveTime, int timeout, ByteBuffAllocator bbAllocator, - CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup, - SimpleRpcServerResponder responder) { + RequestHeader header, Message param, CellScanner cellScanner, + SimpleServerRpcConnection connection, long size, final InetAddress remoteAddress, + long receiveTime, int timeout, ByteBuffAllocator bbAllocator, CellBlockBuilder cellBlockBuilder, + CallCleanup reqCleanup, SimpleRpcServerResponder responder) { super(id, service, md, header, param, cellScanner, connection, size, remoteAddress, receiveTime, - timeout, bbAllocator, cellBlockBuilder, reqCleanup); + timeout, bbAllocator, cellBlockBuilder, reqCleanup); this.responder = responder; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java index 622e67ab781..f59c002e6bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,7 +68,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection { long lastSentTime = -1L; public SimpleServerRpcConnection(SimpleRpcServer rpcServer, SocketChannel channel, - long lastContact) { + long lastContact) { super(rpcServer); this.channel = channel; this.lastContact = lastContact; @@ -144,9 +144,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection { /** * Read off the wire. If there is not enough data to read, update the connection state with what * we have and returns. - * @return Returns -1 if failure (and caller will close connection), else zero or more. - * @throws IOException - * @throws InterruptedException + * @return Returns -1 if failure (and caller will close connection), else zero or more. nn */ public int readAndProcess() throws IOException, InterruptedException { // If we have not read the connection setup preamble, look to see if that is on the wire. @@ -177,14 +175,14 @@ class SimpleServerRpcConnection extends ServerRpcConnection { } if (dataLength < 0) { // A data length of zero is legal. throw new DoNotRetryIOException( - "Unexpected data length " + dataLength + "!! from " + getHostAddress()); + "Unexpected data length " + dataLength + "!! from " + getHostAddress()); } if (dataLength > this.rpcServer.maxRequestSize) { - String msg = "RPC data length of " + dataLength + " received from " + getHostAddress() + - " is greater than max allowed " + this.rpcServer.maxRequestSize + ". Set \"" + - SimpleRpcServer.MAX_REQUEST_SIZE + - "\" on server to override this limit (not recommended)"; + String msg = "RPC data length of " + dataLength + " received from " + getHostAddress() + + " is greater than max allowed " + this.rpcServer.maxRequestSize + ". Set \"" + + SimpleRpcServer.MAX_REQUEST_SIZE + + "\" on server to override this limit (not recommended)"; SimpleRpcServer.LOG.warn(msg); if (connectionHeaderRead && connectionPreambleRead) { @@ -211,14 +209,16 @@ class SimpleServerRpcConnection extends ServerRpcConnection { // Notify the client about the offending request SimpleServerCall reqTooBig = new SimpleServerCall(header.getCallId(), this.service, null, - null, null, null, this, 0, this.addr, EnvironmentEdgeManager.currentTime(), 0, - this.rpcServer.bbAllocator, this.rpcServer.cellBlockBuilder, null, responder); + null, null, null, this, 0, this.addr, EnvironmentEdgeManager.currentTime(), 0, + this.rpcServer.bbAllocator, this.rpcServer.cellBlockBuilder, null, responder); RequestTooBigException reqTooBigEx = new RequestTooBigException(msg); this.rpcServer.metrics.exception(reqTooBigEx); // Make sure the client recognizes the underlying exception // Otherwise, throw a DoNotRetryIOException. - if (VersionInfoUtil.hasMinimumVersion(connectionHeader.getVersionInfo(), - RequestTooBigException.MAJOR_VERSION, RequestTooBigException.MINOR_VERSION)) { + if ( + VersionInfoUtil.hasMinimumVersion(connectionHeader.getVersionInfo(), + RequestTooBigException.MAJOR_VERSION, RequestTooBigException.MINOR_VERSION) + ) { reqTooBig.setResponse(null, null, reqTooBigEx, msg); } else { reqTooBig.setResponse(null, null, new DoNotRetryIOException(msg), msg); @@ -327,11 +327,11 @@ class SimpleServerRpcConnection extends ServerRpcConnection { @Override public SimpleServerCall createCall(int id, BlockingService service, MethodDescriptor md, - RequestHeader header, Message param, CellScanner cellScanner, long size, - InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) { + RequestHeader header, Message param, CellScanner cellScanner, long size, + InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) { return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size, - remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator, - this.rpcServer.cellBlockBuilder, reqCleanup, this.responder); + remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator, + this.rpcServer.cellBlockBuilder, reqCleanup, this.responder); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java index 5b4a2c241b4..d848a7ae495 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index 6f5412f2d58..bee91d53e93 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import java.nio.ByteBuffer; import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; @@ -49,7 +47,7 @@ public class LoadIncrementalHFiles extends org.apache.hadoop.hbase.tool.LoadIncr @Deprecated @InterfaceAudience.Public public static class LoadQueueItem - extends org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.LoadQueueItem { + extends org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.LoadQueueItem { public LoadQueueItem(byte[] family, Path hfilePath) { super(family, hfilePath); @@ -61,7 +59,7 @@ public class LoadIncrementalHFiles extends org.apache.hadoop.hbase.tool.LoadIncr } public Map run(String dirPath, Map> map, - TableName tableName) throws IOException { + TableName tableName) throws IOException { Map originRet; if (dirPath != null) { originRet = run(dirPath, tableName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java index ebe24463ea4..50628f8717b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hbase.master; + import java.io.IOException; import java.io.InterruptedIOException; import java.util.List; @@ -36,22 +36,24 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** - * Handles everything on master-side related to master election. Keeps track of - * currently active master and registered backup masters. - * - *

          Listens and responds to ZooKeeper notifications on the master znodes, - * both nodeCreated and nodeDeleted. - * - *

          Contains blocking methods which will hold up backup masters, waiting - * for the active master to fail. - * - *

          This class is instantiated in the HMaster constructor and the method - * #blockUntilBecomingActiveMaster() is called to wait until becoming - * the active master of the cluster. + * Handles everything on master-side related to master election. Keeps track of currently active + * master and registered backup masters. + *

          + * Listens and responds to ZooKeeper notifications on the master znodes, both + * nodeCreated and nodeDeleted. + *

          + * Contains blocking methods which will hold up backup masters, waiting for the active master to + * fail. + *

          + * This class is instantiated in the HMaster constructor and the method + * #blockUntilBecomingActiveMaster() is called to wait until becoming the active master of the + * cluster. */ @InterfaceAudience.Private public class ActiveMasterManager extends ZKListener { @@ -75,11 +77,11 @@ public class ActiveMasterManager extends ZKListener { /** * @param watcher ZK watcher - * @param sn ServerName - * @param master In an instance of a Master. + * @param sn ServerName + * @param master In an instance of a Master. */ ActiveMasterManager(ZKWatcher watcher, ServerName sn, Server master) - throws InterruptedIOException { + throws InterruptedIOException { super(watcher); watcher.registerListener(this); this.sn = sn; @@ -117,7 +119,7 @@ public class ActiveMasterManager extends ZKListener { // shut down, so that state is now irrelevant. This means that the shutdown // state must be set while we wait on the active master in order // to shutdown this master. See HBASE-8519. - if(path.equals(watcher.getZNodePaths().clusterStateZNode) && !master.isStopped()) { + if (path.equals(watcher.getZNodePaths().clusterStateZNode) && !master.isStopped()) { clusterShutDown.set(true); } handle(path); @@ -131,7 +133,7 @@ public class ActiveMasterManager extends ZKListener { private void updateBackupMasters() throws InterruptedIOException { backupMasters = - ImmutableList.copyOf(MasterAddressTracker.getBackupMastersAndRenewWatch(watcher)); + ImmutableList.copyOf(MasterAddressTracker.getBackupMastersAndRenewWatch(watcher)); } /** @@ -177,22 +179,21 @@ public class ActiveMasterManager extends ZKListener { } /** - * Handle a change in the master node. Doesn't matter whether this was called - * from a nodeCreated or nodeDeleted event because there are no guarantees - * that the current state of the master node matches the event at the time of - * our next ZK request. - * - *

          Uses the watchAndCheckExists method which watches the master address node - * regardless of whether it exists or not. If it does exist (there is an - * active master), it returns true. Otherwise it returns false. - * - *

          A watcher is set which guarantees that this method will get called again if - * there is another change in the master node. + * Handle a change in the master node. Doesn't matter whether this was called from a nodeCreated + * or nodeDeleted event because there are no guarantees that the current state of the master node + * matches the event at the time of our next ZK request. + *

          + * Uses the watchAndCheckExists method which watches the master address node regardless of whether + * it exists or not. If it does exist (there is an active master), it returns true. Otherwise it + * returns false. + *

          + * A watcher is set which guarantees that this method will get called again if there is another + * change in the master node. */ private void handleMasterNodeChange() { // Watch the node and check if it exists. try { - synchronized(clusterHasActiveMaster) { + synchronized (clusterHasActiveMaster) { if (ZKUtil.watchAndCheckExists(watcher, watcher.getZNodePaths().masterAddressZNode)) { // A master node exists, there is an active master LOG.trace("A master is now available"); @@ -214,30 +215,26 @@ public class ActiveMasterManager extends ZKListener { } /** - * Block until becoming the active master. - * - * Method blocks until there is not another active master and our attempt - * to become the new active master is successful. - * - * This also makes sure that we are watching the master znode so will be - * notified if another master dies. + * Block until becoming the active master. Method blocks until there is not another active master + * and our attempt to become the new active master is successful. This also makes sure that we are + * watching the master znode so will be notified if another master dies. * @param checkInterval the interval to check if the master is stopped * @param startupStatus the monitor status to track the progress - * @return True if no issue becoming active master else false if another - * master was running or if some other problem (zookeeper, stop flag has been - * set on this Master) + * @return True if no issue becoming active master else false if another master was running or if + * some other problem (zookeeper, stop flag has been set on this Master) */ - boolean blockUntilBecomingActiveMaster( - int checkInterval, MonitoredTask startupStatus) { - String backupZNode = ZNodePaths.joinZNode( - this.watcher.getZNodePaths().backupMasterAddressesZNode, this.sn.toString()); + boolean blockUntilBecomingActiveMaster(int checkInterval, MonitoredTask startupStatus) { + String backupZNode = ZNodePaths + .joinZNode(this.watcher.getZNodePaths().backupMasterAddressesZNode, this.sn.toString()); while (!(master.isAborted() || master.isStopped())) { startupStatus.setStatus("Trying to register in ZK as active master"); // Try to become the active master, watch if there is another master. // Write out our ServerName as versioned bytes. try { - if (MasterAddressTracker.setMasterAddress(this.watcher, - this.watcher.getZNodePaths().masterAddressZNode, this.sn, infoPort)) { + if ( + MasterAddressTracker.setMasterAddress(this.watcher, + this.watcher.getZNodePaths().masterAddressZNode, this.sn, infoPort) + ) { // If we were a backup master before, delete our ZNode from the backup // master directory since we are the active now) @@ -267,30 +264,30 @@ public class ActiveMasterManager extends ZKListener { byte[] bytes = ZKUtil.getDataAndWatch(this.watcher, this.watcher.getZNodePaths().masterAddressZNode); if (bytes == null) { - msg = ("A master was detected, but went down before its address " + - "could be read. Attempting to become the next active master"); + msg = ("A master was detected, but went down before its address " + + "could be read. Attempting to become the next active master"); } else { ServerName currentMaster; try { currentMaster = ProtobufUtil.parseServerNameFrom(bytes); } catch (DeserializationException e) { LOG.warn("Failed parse", e); - // Hopefully next time around we won't fail the parse. Dangerous. + // Hopefully next time around we won't fail the parse. Dangerous. continue; } if (ServerName.isSameAddress(currentMaster, this.sn)) { - msg = ("Current master has this master's address, " + - currentMaster + "; master was restarted? Deleting node."); + msg = ("Current master has this master's address, " + currentMaster + + "; master was restarted? Deleting node."); // Hurry along the expiration of the znode. ZKUtil.deleteNode(this.watcher, this.watcher.getZNodePaths().masterAddressZNode); // We may have failed to delete the znode at the previous step, but - // we delete the file anyway: a second attempt to delete the znode is likely to fail - // again. + // we delete the file anyway: a second attempt to delete the znode is likely to fail + // again. ZNodeClearer.deleteMyEphemeralNodeOnDisk(); } else { - msg = "Another master is the active master, " + currentMaster + - "; waiting to become the next active master"; + msg = "Another master is the active master, " + currentMaster + + "; waiting to become the next active master"; } } LOG.info(msg); @@ -305,13 +302,12 @@ public class ActiveMasterManager extends ZKListener { clusterHasActiveMaster.wait(checkInterval); } catch (InterruptedException e) { // We expect to be interrupted when a master dies, - // will fall out if so + // will fall out if so LOG.debug("Interrupted waiting for master to die", e); } } if (clusterShutDown.get()) { - this.master.stop( - "Cluster went down before this master became active"); + this.master.stop("Cluster went down before this master became active"); } } } @@ -326,10 +322,8 @@ public class ActiveMasterManager extends ZKListener { if (ZKUtil.checkExists(watcher, watcher.getZNodePaths().masterAddressZNode) >= 0) { return true; } - } - catch (KeeperException ke) { - LOG.info("Received an unexpected KeeperException when checking " + - "isActiveMaster : "+ ke); + } catch (KeeperException ke) { + LOG.info("Received an unexpected KeeperException when checking " + "isActiveMaster : " + ke); } return false; } @@ -348,15 +342,14 @@ public class ActiveMasterManager extends ZKListener { } catch (IOException e) { LOG.warn("Failed get of master address: " + e.toString()); } - if (activeMaster != null && activeMaster.equals(this.sn)) { + if (activeMaster != null && activeMaster.equals(this.sn)) { ZKUtil.deleteNode(watcher, watcher.getZNodePaths().masterAddressZNode); // We may have failed to delete the znode at the previous step, but - // we delete the file anyway: a second attempt to delete the znode is likely to fail again. + // we delete the file anyway: a second attempt to delete the znode is likely to fail again. ZNodeClearer.deleteMyEphemeralNodeOnDisk(); } } catch (KeeperException e) { - LOG.debug(this.watcher.prefix("Failed delete of our master address node; " + - e.getMessage())); + LOG.debug(this.watcher.prefix("Failed delete of our master address node; " + e.getMessage())); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java index 420eef7dbaf..ff4903026f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +24,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -34,15 +32,15 @@ import org.apache.hadoop.hbase.favored.FavoredNodesPlan; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + /** - * Helper class that is used by {@link RegionPlacementMaintainer} to print - * information for favored nodes - * + * Helper class that is used by {@link RegionPlacementMaintainer} to print information for favored + * nodes */ @InterfaceAudience.Private public class AssignmentVerificationReport { - private static final Logger LOG = LoggerFactory.getLogger( - AssignmentVerificationReport.class.getName()); + private static final Logger LOG = + LoggerFactory.getLogger(AssignmentVerificationReport.class.getName()); private TableName tableName = null; private boolean enforceLocality = false; @@ -63,7 +61,7 @@ public class AssignmentVerificationReport { private int totalFavoredAssignments = 0; private int[] favoredNodes = new int[FavoredNodeAssignmentHelper.FAVORED_NODES_NUM]; private float[] favoredNodesLocalitySummary = - new float[FavoredNodeAssignmentHelper.FAVORED_NODES_NUM]; + new float[FavoredNodeAssignmentHelper.FAVORED_NODES_NUM]; private float actualLocalitySummary = 0; // For region balancing information @@ -86,21 +84,19 @@ public class AssignmentVerificationReport { private Set minDispersionNumServerSet = new HashSet<>(); public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snapshot, - Map> regionLocalityMap) { + Map> regionLocalityMap) { // Set the table name this.tableName = tableName; // Get all the regions for this table - List regionInfoList = - snapshot.getTableToRegionMap().get(tableName); + List regionInfoList = snapshot.getTableToRegionMap().get(tableName); // Get the total region num for the current table this.totalRegions = regionInfoList.size(); // Get the existing assignment plan FavoredNodesPlan favoredNodesAssignment = snapshot.getExistingAssignmentPlan(); // Get the region to region server mapping - Map currentAssignment = - snapshot.getRegionToRegionServerMap(); + Map currentAssignment = snapshot.getRegionToRegionServerMap(); // Initialize the server to its hosing region counter map Map serverToHostingRegionCounterMap = new HashMap<>(); @@ -128,18 +124,17 @@ public class AssignmentVerificationReport { // Get the favored nodes from the assignment plan and verify it. List favoredNodes = favoredNodesAssignment.getFavoredNodes(region); - if (favoredNodes == null || - favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { + if ( + favoredNodes == null + || favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM + ) { regionsWithoutValidFavoredNodes.add(region); continue; } // Get the primary, secondary and tertiary region server - ServerName primaryRS = - favoredNodes.get(FavoredNodesPlan.Position.PRIMARY.ordinal()); - ServerName secondaryRS = - favoredNodes.get(FavoredNodesPlan.Position.SECONDARY.ordinal()); - ServerName tertiaryRS = - favoredNodes.get(FavoredNodesPlan.Position.TERTIARY.ordinal()); + ServerName primaryRS = favoredNodes.get(FavoredNodesPlan.Position.PRIMARY.ordinal()); + ServerName secondaryRS = favoredNodes.get(FavoredNodesPlan.Position.SECONDARY.ordinal()); + ServerName tertiaryRS = favoredNodes.get(FavoredNodesPlan.Position.TERTIARY.ordinal()); // Update the primary rs to its region set map Integer regionCounter = primaryRSToRegionCounterMap.get(primaryRS); @@ -187,43 +182,37 @@ public class AssignmentVerificationReport { for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { ServerName favoredNode = favoredNodes.get(p.ordinal()); // Get the locality for the current favored nodes - Float locality = - regionDegreeLocalityMap.get(favoredNode.getHostname()); + Float locality = regionDegreeLocalityMap.get(favoredNode.getHostname()); if (locality != null) { this.favoredNodesLocalitySummary[p.ordinal()] += locality; } } // Get the locality summary for the current region server - Float actualLocality = - regionDegreeLocalityMap.get(currentRS.getHostname()); + Float actualLocality = regionDegreeLocalityMap.get(currentRS.getHostname()); if (actualLocality != null) { this.actualLocalitySummary += actualLocality; } } } catch (Exception e) { - LOG.error("Cannot verify the region assignment for region " + - ((region == null) ? " null " : region.getRegionNameAsString()) + - "because of " + e); + LOG.error("Cannot verify the region assignment for region " + + ((region == null) ? " null " : region.getRegionNameAsString()) + "because of " + e); } } float dispersionScoreSummary = 0; float dispersionNumSummary = 0; // Calculate the secondary score for each primary region server - for (Map.Entry entry : - primaryRSToRegionCounterMap.entrySet()) { + for (Map.Entry entry : primaryRSToRegionCounterMap.entrySet()) { ServerName primaryRS = entry.getKey(); Integer regionsOnPrimary = entry.getValue(); // Process the dispersion number and score float dispersionScore = 0; int dispersionNum = 0; - if (primaryToSecTerRSMap.get(primaryRS) != null - && regionsOnPrimary.intValue() != 0) { + if (primaryToSecTerRSMap.get(primaryRS) != null && regionsOnPrimary.intValue() != 0) { dispersionNum = primaryToSecTerRSMap.get(primaryRS).size(); - dispersionScore = dispersionNum / - ((float) regionsOnPrimary.intValue() * 2); + dispersionScore = dispersionNum / ((float) regionsOnPrimary.intValue() * 2); } // Update the max dispersion score if (dispersionScore > this.maxDispersionScore) { @@ -267,15 +256,14 @@ public class AssignmentVerificationReport { // Update the avg dispersion score if (primaryRSToRegionCounterMap.keySet().size() != 0) { - this.avgDispersionScore = dispersionScoreSummary / - (float) primaryRSToRegionCounterMap.keySet().size(); - this.avgDispersionNum = dispersionNumSummary / - (float) primaryRSToRegionCounterMap.keySet().size(); + this.avgDispersionScore = + dispersionScoreSummary / (float) primaryRSToRegionCounterMap.keySet().size(); + this.avgDispersionNum = + dispersionNumSummary / (float) primaryRSToRegionCounterMap.keySet().size(); } // Fill up the most loaded and least loaded region server information - for (Map.Entry entry : - serverToHostingRegionCounterMap.entrySet()) { + for (Map.Entry entry : serverToHostingRegionCounterMap.entrySet()) { ServerName currentRS = entry.getKey(); int hostRegionCounter = entry.getValue().intValue(); @@ -300,25 +288,21 @@ public class AssignmentVerificationReport { // and total region servers this.totalRegionServers = serverToHostingRegionCounterMap.keySet().size(); - this.avgRegionsOnRS = (totalRegionServers == 0) ? 0 : - (totalRegions / (float) totalRegionServers); + this.avgRegionsOnRS = + (totalRegionServers == 0) ? 0 : (totalRegions / (float) totalRegionServers); // Set the isFilledUp as true isFilledUp = true; } /** - * Use this to project the dispersion scores - * @param tableName - * @param snapshot - * @param newPlan + * Use this to project the dispersion scores nnn */ - public void fillUpDispersion(TableName tableName, - SnapshotOfRegionAssignmentFromMeta snapshot, FavoredNodesPlan newPlan) { + public void fillUpDispersion(TableName tableName, SnapshotOfRegionAssignmentFromMeta snapshot, + FavoredNodesPlan newPlan) { // Set the table name this.tableName = tableName; // Get all the regions for this table - List regionInfoList = snapshot.getTableToRegionMap().get( - tableName); + List regionInfoList = snapshot.getTableToRegionMap().get(tableName); // Get the total region num for the current table this.totalRegions = regionInfoList.size(); FavoredNodesPlan plan = null; @@ -337,18 +321,17 @@ public class AssignmentVerificationReport { try { // Get the favored nodes from the assignment plan and verify it. List favoredNodes = plan.getFavoredNodes(region); - if (favoredNodes == null - || favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { + if ( + favoredNodes == null + || favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM + ) { regionsWithoutValidFavoredNodes.add(region); continue; } // Get the primary, secondary and tertiary region server - ServerName primaryRS = favoredNodes - .get(FavoredNodesPlan.Position.PRIMARY.ordinal()); - ServerName secondaryRS = favoredNodes - .get(FavoredNodesPlan.Position.SECONDARY.ordinal()); - ServerName tertiaryRS = favoredNodes - .get(FavoredNodesPlan.Position.TERTIARY.ordinal()); + ServerName primaryRS = favoredNodes.get(FavoredNodesPlan.Position.PRIMARY.ordinal()); + ServerName secondaryRS = favoredNodes.get(FavoredNodesPlan.Position.SECONDARY.ordinal()); + ServerName tertiaryRS = favoredNodes.get(FavoredNodesPlan.Position.TERTIARY.ordinal()); // Update the primary rs to its region set map Integer regionCounter = primaryRSToRegionCounterMap.get(primaryRS); @@ -368,26 +351,22 @@ public class AssignmentVerificationReport { primaryToSecTerRSMap.put(primaryRS, secAndTerSet); } catch (Exception e) { LOG.error("Cannot verify the region assignment for region " - + ((region == null) ? " null " : region.getRegionNameAsString()) - + "because of " + e); + + ((region == null) ? " null " : region.getRegionNameAsString()) + "because of " + e); } } float dispersionScoreSummary = 0; float dispersionNumSummary = 0; // Calculate the secondary score for each primary region server - for (Map.Entry entry : - primaryRSToRegionCounterMap.entrySet()) { + for (Map.Entry entry : primaryRSToRegionCounterMap.entrySet()) { ServerName primaryRS = entry.getKey(); Integer regionsOnPrimary = entry.getValue(); // Process the dispersion number and score float dispersionScore = 0; int dispersionNum = 0; - if (primaryToSecTerRSMap.get(primaryRS) != null - && regionsOnPrimary.intValue() != 0) { + if (primaryToSecTerRSMap.get(primaryRS) != null && regionsOnPrimary.intValue() != 0) { dispersionNum = primaryToSecTerRSMap.get(primaryRS).size(); - dispersionScore = dispersionNum / - ((float) regionsOnPrimary.intValue() * 2); + dispersionScore = dispersionNum / ((float) regionsOnPrimary.intValue() * 2); } // Update the max dispersion num @@ -423,18 +402,16 @@ public class AssignmentVerificationReport { // Update the avg dispersion score if (primaryRSToRegionCounterMap.keySet().size() != 0) { - this.avgDispersionScore = dispersionScoreSummary / - (float) primaryRSToRegionCounterMap.keySet().size(); - this.avgDispersionNum = dispersionNumSummary / - (float) primaryRSToRegionCounterMap.keySet().size(); + this.avgDispersionScore = + dispersionScoreSummary / (float) primaryRSToRegionCounterMap.keySet().size(); + this.avgDispersionNum = + dispersionNumSummary / (float) primaryRSToRegionCounterMap.keySet().size(); } } /** - * @return list which contains just 3 elements: average dispersion score, max - * dispersion score and min dispersion score as first, second and third element - * respectively. - * + * @return list which contains just 3 elements: average dispersion score, max dispersion score and + * min dispersion score as first, second and third element respectively. */ public List getDispersionInformation() { List dispersion = new ArrayList<>(); @@ -446,41 +423,38 @@ public class AssignmentVerificationReport { public void print(boolean isDetailMode) { if (!isFilledUp) { - System.err.println("[Error] Region assignment verification report" + - "hasn't been filled up"); + System.err.println("[Error] Region assignment verification report" + "hasn't been filled up"); } - DecimalFormat df = new java.text.DecimalFormat( "#.##"); + DecimalFormat df = new java.text.DecimalFormat("#.##"); // Print some basic information - System.out.println("Region Assignment Verification for Table: " + tableName + - "\n\tTotal regions : " + totalRegions); + System.out.println("Region Assignment Verification for Table: " + tableName + + "\n\tTotal regions : " + totalRegions); // Print the number of regions on each kinds of the favored nodes - System.out.println("\tTotal regions on favored nodes " + - totalFavoredAssignments); + System.out.println("\tTotal regions on favored nodes " + totalFavoredAssignments); for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { - System.out.println("\t\tTotal regions on "+ p.toString() + - " region servers: " + favoredNodes[p.ordinal()]); + System.out.println( + "\t\tTotal regions on " + p.toString() + " region servers: " + favoredNodes[p.ordinal()]); } // Print the number of regions in each kinds of invalid assignment - System.out.println("\tTotal unassigned regions: " + - unAssignedRegionsList.size()); + System.out.println("\tTotal unassigned regions: " + unAssignedRegionsList.size()); if (isDetailMode) { for (RegionInfo region : unAssignedRegionsList) { System.out.println("\t\t" + region.getRegionNameAsString()); } } - System.out.println("\tTotal regions NOT on favored nodes: " + - nonFavoredAssignedRegionList.size()); + System.out + .println("\tTotal regions NOT on favored nodes: " + nonFavoredAssignedRegionList.size()); if (isDetailMode) { for (RegionInfo region : nonFavoredAssignedRegionList) { System.out.println("\t\t" + region.getRegionNameAsString()); } } - System.out.println("\tTotal regions without favored nodes: " + - regionsWithoutValidFavoredNodes.size()); + System.out + .println("\tTotal regions without favored nodes: " + regionsWithoutValidFavoredNodes.size()); if (isDetailMode) { for (RegionInfo region : regionsWithoutValidFavoredNodes) { System.out.println("\t\t" + region.getRegionNameAsString()); @@ -490,77 +464,68 @@ public class AssignmentVerificationReport { // Print the locality information if enabled if (this.enforceLocality && totalRegions != 0) { // Print the actual locality for this table - float actualLocality = 100 * - this.actualLocalitySummary / (float) totalRegions; - System.out.println("\n\tThe actual avg locality is " + - df.format(actualLocality) + " %"); + float actualLocality = 100 * this.actualLocalitySummary / (float) totalRegions; + System.out.println("\n\tThe actual avg locality is " + df.format(actualLocality) + " %"); // Print the expected locality if regions are placed on the each kinds of // favored nodes for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { - float avgLocality = 100 * - (favoredNodesLocalitySummary[p.ordinal()] / (float) totalRegions); - System.out.println("\t\tThe expected avg locality if all regions" + - " on the " + p.toString() + " region servers: " - + df.format(avgLocality) + " %"); + float avgLocality = 100 * (favoredNodesLocalitySummary[p.ordinal()] / (float) totalRegions); + System.out.println("\t\tThe expected avg locality if all regions" + " on the " + + p.toString() + " region servers: " + df.format(avgLocality) + " %"); } } // Print the region balancing information - System.out.println("\n\tTotal hosting region servers: " + - totalRegionServers); + System.out.println("\n\tTotal hosting region servers: " + totalRegionServers); // Print the region balance information if (totalRegionServers != 0) { - System.out.println( - "\tAvg dispersion num: " +df.format(avgDispersionNum) + - " hosts;\tMax dispersion num: " + df.format(maxDispersionNum) + - " hosts;\tMin dispersion num: " + df.format(minDispersionNum) + - " hosts;"); + System.out.println("\tAvg dispersion num: " + df.format(avgDispersionNum) + + " hosts;\tMax dispersion num: " + df.format(maxDispersionNum) + + " hosts;\tMin dispersion num: " + df.format(minDispersionNum) + " hosts;"); - System.out.println("\t\tThe number of the region servers with the max" + - " dispersion num: " + this.maxDispersionNumServerSet.size()); + System.out.println("\t\tThe number of the region servers with the max" + " dispersion num: " + + this.maxDispersionNumServerSet.size()); if (isDetailMode) { printHServerAddressSet(maxDispersionNumServerSet); } - System.out.println("\t\tThe number of the region servers with the min" + - " dispersion num: " + this.minDispersionNumServerSet.size()); + System.out.println("\t\tThe number of the region servers with the min" + " dispersion num: " + + this.minDispersionNumServerSet.size()); if (isDetailMode) { printHServerAddressSet(maxDispersionNumServerSet); } - System.out.println( - "\tAvg dispersion score: " + df.format(avgDispersionScore) + - ";\tMax dispersion score: " + df.format(maxDispersionScore) + - ";\tMin dispersion score: " + df.format(minDispersionScore) + ";"); + System.out.println("\tAvg dispersion score: " + df.format(avgDispersionScore) + + ";\tMax dispersion score: " + df.format(maxDispersionScore) + ";\tMin dispersion score: " + + df.format(minDispersionScore) + ";"); - System.out.println("\t\tThe number of the region servers with the max" + - " dispersion score: " + this.maxDispersionScoreServerSet.size()); + System.out.println("\t\tThe number of the region servers with the max" + " dispersion score: " + + this.maxDispersionScoreServerSet.size()); if (isDetailMode) { printHServerAddressSet(maxDispersionScoreServerSet); } - System.out.println("\t\tThe number of the region servers with the min" + - " dispersion score: " + this.minDispersionScoreServerSet.size()); + System.out.println("\t\tThe number of the region servers with the min" + " dispersion score: " + + this.minDispersionScoreServerSet.size()); if (isDetailMode) { printHServerAddressSet(minDispersionScoreServerSet); } - System.out.println( - "\tAvg regions/region server: " + df.format(avgRegionsOnRS) + - ";\tMax regions/region server: " + maxRegionsOnRS + - ";\tMin regions/region server: " + minRegionsOnRS + ";"); + System.out.println("\tAvg regions/region server: " + df.format(avgRegionsOnRS) + + ";\tMax regions/region server: " + maxRegionsOnRS + ";\tMin regions/region server: " + + minRegionsOnRS + ";"); // Print the details about the most loaded region servers - System.out.println("\t\tThe number of the most loaded region servers: " - + mostLoadedRSSet.size()); + System.out + .println("\t\tThe number of the most loaded region servers: " + mostLoadedRSSet.size()); if (isDetailMode) { printHServerAddressSet(mostLoadedRSSet); } // Print the details about the least loaded region servers - System.out.println("\t\tThe number of the least loaded region servers: " - + leastLoadedRSSet.size()); + System.out + .println("\t\tThe number of the least loaded region servers: " + leastLoadedRSSet.size()); if (isDetailMode) { printHServerAddressSet(leastLoadedRSSet); } @@ -601,10 +566,8 @@ public class AssignmentVerificationReport { } /** - * Return the number of regions based on the position (primary/secondary/ - * tertiary) assigned to their favored nodes - * @param position - * @return the number of regions + * Return the number of regions based on the position (primary/secondary/ tertiary) assigned to + * their favored nodes n * @return the number of regions */ int getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position position) { return favoredNodes[position.ordinal()]; @@ -612,10 +575,10 @@ public class AssignmentVerificationReport { private void printHServerAddressSet(Set serverSet) { if (serverSet == null) { - return ; + return; } int i = 0; - for (ServerName addr : serverSet){ + for (ServerName addr : serverSet) { if ((i++) % 3 == 0) { System.out.print("\n\t\t\t"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java index 2f75560dae8..67d8ef80ce6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import java.io.IOException; @@ -35,11 +34,9 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * Caches the cluster ID of the cluster. For standby masters, this is used to serve the client - * RPCs that fetch the cluster ID. ClusterID is only created by an active master if one does not - * already exist. Standby masters just read the information from the file system. This class is - * thread-safe. - * + * Caches the cluster ID of the cluster. For standby masters, this is used to serve the client RPCs + * that fetch the cluster ID. ClusterID is only created by an active master if one does not already + * exist. Standby masters just read the information from the file system. This class is thread-safe. * TODO: Make it a singleton without affecting concurrent junit tests. */ @InterfaceAudience.Private @@ -95,8 +92,8 @@ public class CachedClusterId { /** * Attempts to fetch the cluster ID from the file system. If no attempt is already in progress, - * synchronously fetches the cluster ID and sets it. If an attempt is already in progress, - * returns right away and the caller is expected to wait for the fetch to finish. + * synchronously fetches the cluster ID and sets it. If an attempt is already in progress, returns + * right away and the caller is expected to wait for the fetch to finish. * @return true if the attempt is done, false if another thread is already fetching it. */ private boolean attemptFetch() { @@ -130,12 +127,11 @@ public class CachedClusterId { } /** - * Fetches the ClusterId from FS if it is not cached locally. Atomically updates the cached - * copy and is thread-safe. Optimized to do a single fetch when there are multiple threads are - * trying get from a clean cache. - * - * @return ClusterId by reading from FileSystem or null in any error case or cluster ID does - * not exist on the file system or if the server initiated a tear down. + * Fetches the ClusterId from FS if it is not cached locally. Atomically updates the cached copy + * and is thread-safe. Optimized to do a single fetch when there are multiple threads are trying + * get from a clean cache. + * @return ClusterId by reading from FileSystem or null in any error case or cluster ID does not + * exist on the file system or if the server initiated a tear down. */ public String getFromCacheOrFetch() { if (server.isStopping() || server.isStopped()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java index 0f7153ba801..1121101024d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,20 +19,17 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServiceNotRunningException; import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.NonceKey; +import org.apache.yetus.audience.InterfaceAudience; /** - * View and edit the current cluster schema. Use this API making any modification to - * namespaces, tables, etc. - * - *

          Implementation Notes

          - * Nonces are for when operation is non-idempotent to ensure once-only semantic, even - * across process failures. + * View and edit the current cluster schema. Use this API making any modification to namespaces, + * tables, etc. + *

          Implementation Notes

          Nonces are for when operation is non-idempotent to ensure once-only + * semantic, even across process failures. */ // ClusterSchema is introduced to encapsulate schema modification. Currently the different aspects // are spread about the code base. This effort is about cleanup, shutting down access, and @@ -61,16 +58,15 @@ public interface ClusterSchema { * Timeout for cluster operations in milliseconds. */ public static final String HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT_KEY = - "hbase.master.cluster.schema.operation.timeout"; + "hbase.master.cluster.schema.operation.timeout"; /** * Default operation timeout in milliseconds. */ - public static final int DEFAULT_HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT = - 5 * 60 * 1000; + public static final int DEFAULT_HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT = 5 * 60 * 1000; /** - * For internals use only. Do not use! Provisionally part of this Interface. - * Prefer the high-level APIs available elsewhere in this API. + * For internals use only. Do not use! Provisionally part of this Interface. Prefer the high-level + * APIs available elsewhere in this API. * @return Instance of {@link TableNamespaceManager} */ // TODO: Remove from here. Keep internal. This Interface is too high-level to host this accessor. @@ -79,34 +75,33 @@ public interface ClusterSchema { /** * Create a new Namespace. * @param namespaceDescriptor descriptor for new Namespace - * @param nonceKey A unique identifier for this operation from the client or process. - * @param latch A latch to block on for precondition validation + * @param nonceKey A unique identifier for this operation from the client or process. + * @param latch A latch to block on for precondition validation * @return procedure id * @throws IOException if service is not running see {@link ServiceNotRunningException} */ - long createNamespace(NamespaceDescriptor namespaceDescriptor, NonceKey nonceKey, ProcedurePrepareLatch latch) - throws IOException; + long createNamespace(NamespaceDescriptor namespaceDescriptor, NonceKey nonceKey, + ProcedurePrepareLatch latch) throws IOException; /** * Modify an existing Namespace. * @param nonceKey A unique identifier for this operation from the client or process. - * @param latch A latch to block on for precondition validation + * @param latch A latch to block on for precondition validation * @return procedure id * @throws IOException if service is not running see {@link ServiceNotRunningException} */ - long modifyNamespace(NamespaceDescriptor descriptor, NonceKey nonceKey, ProcedurePrepareLatch latch) - throws IOException; + long modifyNamespace(NamespaceDescriptor descriptor, NonceKey nonceKey, + ProcedurePrepareLatch latch) throws IOException; /** - * Delete an existing Namespace. - * Only empty Namespaces (no tables) can be removed. + * Delete an existing Namespace. Only empty Namespaces (no tables) can be removed. * @param nonceKey A unique identifier for this operation from the client or process. - * @param latch A latch to block on for precondition validation + * @param latch A latch to block on for precondition validation * @return procedure id * @throws IOException if service is not running see {@link ServiceNotRunningException} */ long deleteNamespace(String name, NonceKey nonceKey, ProcedurePrepareLatch latch) - throws IOException; + throws IOException; /** * Get a Namespace diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaService.java index fadb28ccca9..4e4e1d1e5a7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaService.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,10 +18,12 @@ package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.Service; /** * Mixes in ClusterSchema and Service */ @InterfaceAudience.Private -public interface ClusterSchemaService extends ClusterSchema, Service {} +public interface ClusterSchemaService extends ClusterSchema, Service { +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java index 97597218471..ca320871bee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,11 +22,9 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Set; - import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.NamespaceNotFoundException; import org.apache.hadoop.hbase.ServiceNotRunningException; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.master.procedure.CreateNamespaceProcedure; import org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; @@ -34,8 +32,10 @@ import org.apache.hadoop.hbase.master.procedure.ModifyNamespaceProcedure; import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService; import org.apache.hadoop.hbase.util.NonceKey; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService; @InterfaceAudience.Private class ClusterSchemaServiceImpl extends AbstractService implements ClusterSchemaService { @@ -51,7 +51,6 @@ class ClusterSchemaServiceImpl extends AbstractService implements ClusterSchemaS // All below are synchronized so consistent view on whether running or not. - private synchronized void checkIsRunning() throws ServiceNotRunningException { if (!isRunning()) throw new ServiceNotRunningException(); } @@ -82,7 +81,7 @@ class ClusterSchemaServiceImpl extends AbstractService implements ClusterSchemaS } private long submitProcedure(final Procedure procedure, - final NonceKey nonceKey) throws ServiceNotRunningException { + final NonceKey nonceKey) throws ServiceNotRunningException { checkIsRunning(); ProcedureExecutor pe = this.masterServices.getMasterProcedureExecutor(); return pe.submitProcedure(procedure, nonceKey); @@ -90,27 +89,25 @@ class ClusterSchemaServiceImpl extends AbstractService implements ClusterSchemaS @Override public long createNamespace(NamespaceDescriptor namespaceDescriptor, final NonceKey nonceKey, - final ProcedurePrepareLatch latch) - throws IOException { + final ProcedurePrepareLatch latch) throws IOException { return submitProcedure(new CreateNamespaceProcedure( - this.masterServices.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor, latch), - nonceKey); + this.masterServices.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor, + latch), nonceKey); } @Override public long modifyNamespace(NamespaceDescriptor namespaceDescriptor, final NonceKey nonceKey, - final ProcedurePrepareLatch latch) throws IOException { + final ProcedurePrepareLatch latch) throws IOException { return submitProcedure(new ModifyNamespaceProcedure( - this.masterServices.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor, latch), - nonceKey); + this.masterServices.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor, + latch), nonceKey); } @Override - public long deleteNamespace(String name, final NonceKey nonceKey, final ProcedurePrepareLatch latch) - throws IOException { + public long deleteNamespace(String name, final NonceKey nonceKey, + final ProcedurePrepareLatch latch) throws IOException { return submitProcedure(new DeleteNamespaceProcedure( - this.masterServices.getMasterProcedureExecutor().getEnvironment(), name, latch), - nonceKey); + this.masterServices.getMasterProcedureExecutor().getEnvironment(), name, latch), nonceKey); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java index dd67c05eae0..adbbac0dfba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java @@ -15,8 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - - package org.apache.hadoop.hbase.master; import java.io.Closeable; @@ -47,8 +45,11 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.VersionInfo; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap; import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; import org.apache.hbase.thirdparty.io.netty.channel.Channel; @@ -64,27 +65,24 @@ import org.apache.hbase.thirdparty.io.netty.channel.socket.InternetProtocolFamil import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioDatagramChannel; import org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToMessageEncoder; import org.apache.hbase.thirdparty.io.netty.util.internal.StringUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * Class to publish the cluster status to the client. This allows them to know immediately - * the dead region servers, hence to cut the connection they have with them, eventually stop - * waiting on the socket. This improves the mean time to recover, and as well allows to increase - * on the client the different timeouts, as the dead servers will be detected separately. + * Class to publish the cluster status to the client. This allows them to know immediately the dead + * region servers, hence to cut the connection they have with them, eventually stop waiting on the + * socket. This improves the mean time to recover, and as well allows to increase on the client the + * different timeouts, as the dead servers will be detected separately. */ @InterfaceAudience.Private public class ClusterStatusPublisher extends ScheduledChore { private static Logger LOG = LoggerFactory.getLogger(ClusterStatusPublisher.class); /** - * The implementation class used to publish the status. Default is null (no publish). - * Use org.apache.hadoop.hbase.master.ClusterStatusPublisher.MulticastPublisher to multicast the + * The implementation class used to publish the status. Default is null (no publish). Use + * org.apache.hadoop.hbase.master.ClusterStatusPublisher.MulticastPublisher to multicast the * status. */ public static final String STATUS_PUBLISHER_CLASS = "hbase.status.publisher.class"; - public static final Class - DEFAULT_STATUS_PUBLISHER_CLASS = + public static final Class< + ? extends ClusterStatusPublisher.Publisher> DEFAULT_STATUS_PUBLISHER_CLASS = org.apache.hadoop.hbase.master.ClusterStatusPublisher.MulticastPublisher.class; /** @@ -101,8 +99,8 @@ public class ClusterStatusPublisher extends ScheduledChore { private boolean connected = false; /** - * We want to limit the size of the protobuf message sent, do fit into a single packet. - * a reasonable size for ip / ethernet is less than 1Kb. + * We want to limit the size of the protobuf message sent, do fit into a single packet. a + * reasonable size for ip / ethernet is less than 1Kb. */ public final static int MAX_SERVER_PER_MESSAGE = 10; @@ -113,10 +111,9 @@ public class ClusterStatusPublisher extends ScheduledChore { public final static int NB_SEND = 5; public ClusterStatusPublisher(HMaster master, Configuration conf, - Class publisherClass) - throws IOException { - super("ClusterStatusPublisher for=" + master.getName(), master, conf.getInt( - STATUS_PUBLISH_PERIOD, DEFAULT_STATUS_PUBLISH_PERIOD)); + Class publisherClass) throws IOException { + super("ClusterStatusPublisher for=" + master.getName(), master, + conf.getInt(STATUS_PUBLISH_PERIOD, DEFAULT_STATUS_PUBLISH_PERIOD)); this.master = master; this.messagePeriod = conf.getInt(STATUS_PUBLISH_PERIOD, DEFAULT_STATUS_PUBLISH_PERIOD); try { @@ -162,13 +159,10 @@ public class ClusterStatusPublisher extends ScheduledChore { // We're reusing an existing protobuf message, but we don't send everything. // This could be extended in the future, for example if we want to send stuff like the - // hbase:meta server name. - publisher.publish(ClusterMetricsBuilder.newBuilder() - .setHBaseVersion(VersionInfo.getVersion()) + // hbase:meta server name. + publisher.publish(ClusterMetricsBuilder.newBuilder().setHBaseVersion(VersionInfo.getVersion()) .setClusterId(master.getMasterFileSystem().getClusterId().toString()) - .setMasterName(master.getServerName()) - .setDeadServerNames(sns) - .build()); + .setMasterName(master.getServerName()).setDeadServerNames(sns).build()); } @Override @@ -183,8 +177,8 @@ public class ClusterStatusPublisher extends ScheduledChore { /** * Create the dead server to send. A dead server is sent NB_SEND times. We send at max - * MAX_SERVER_PER_MESSAGE at a time. if there are too many dead servers, we send the newly - * dead first. + * MAX_SERVER_PER_MESSAGE at a time. if there are too many dead servers, we send the newly dead + * first. */ protected List generateDeadServersListToSend() { // We're getting the message sent since last time, and add them to the list @@ -221,8 +215,8 @@ public class ClusterStatusPublisher extends ScheduledChore { } /** - * Get the servers which died since a given timestamp. - * protected because it can be subclassed by the tests. + * Get the servers which died since a given timestamp. protected because it can be subclassed by + * the tests. */ protected List> getDeadServers(long since) { if (master.getServerManager() == null) { @@ -232,7 +226,6 @@ public class ClusterStatusPublisher extends ScheduledChore { return master.getServerManager().getDeadServers().copyDeadServersSince(since); } - public interface Publisher extends Closeable { void connect(Configuration conf) throws IOException; @@ -260,10 +253,10 @@ public class ClusterStatusPublisher extends ScheduledChore { @Override public void connect(Configuration conf) throws IOException { - String mcAddress = conf.get(HConstants.STATUS_MULTICAST_ADDRESS, - HConstants.DEFAULT_STATUS_MULTICAST_ADDRESS); - int port = conf.getInt(HConstants.STATUS_MULTICAST_PORT, - HConstants.DEFAULT_STATUS_MULTICAST_PORT); + String mcAddress = + conf.get(HConstants.STATUS_MULTICAST_ADDRESS, HConstants.DEFAULT_STATUS_MULTICAST_ADDRESS); + int port = + conf.getInt(HConstants.STATUS_MULTICAST_PORT, HConstants.DEFAULT_STATUS_MULTICAST_PORT); String bindAddress = conf.get(HConstants.STATUS_MULTICAST_PUBLISHER_BIND_ADDRESS, HConstants.DEFAULT_STATUS_MULTICAST_PUBLISHER_BIND_ADDRESS); String niName = conf.get(HConstants.STATUS_MULTICAST_NI_NAME); @@ -300,8 +293,7 @@ public class ClusterStatusPublisher extends ScheduledChore { Bootstrap b = new Bootstrap(); b.group(group) .channelFactory(new HBaseDatagramChannelFactory(NioDatagramChannel.class, family)) - .option(ChannelOption.SO_REUSEADDR, true) - .handler(new ClusterMetricsEncoder(isa)); + .option(ChannelOption.SO_REUSEADDR, true).handler(new ClusterMetricsEncoder(isa)); try { LOG.debug("Channel bindAddress={}, networkInterface={}, INA={}", bindAddress, ni, ina); channel = (DatagramChannel) b.bind(bindAddress, 0).sync().channel(); @@ -321,7 +313,7 @@ public class ClusterStatusPublisher extends ScheduledChore { } private static final class HBaseDatagramChannelFactory - implements ChannelFactory { + implements ChannelFactory { private final Class clazz; private final InternetProtocolFamily family; @@ -348,7 +340,7 @@ public class ClusterStatusPublisher extends ScheduledChore { } private static final class ClusterMetricsEncoder - extends MessageToMessageEncoder { + extends MessageToMessageEncoder { final private InetSocketAddress isa; private ClusterMetricsEncoder(InetSocketAddress isa) { @@ -358,8 +350,8 @@ public class ClusterStatusPublisher extends ScheduledChore { @Override protected void encode(ChannelHandlerContext channelHandlerContext, ClusterMetrics clusterStatus, List objects) { - objects.add(new DatagramPacket(Unpooled.wrappedBuffer( - ClusterMetricsBuilder.toClusterStatus(clusterStatus).toByteArray()), isa)); + objects.add(new DatagramPacket(Unpooled + .wrappedBuffer(ClusterMetricsBuilder.toClusterStatus(clusterStatus).toByteArray()), isa)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java index c527bc02826..84d660e66ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java @@ -35,15 +35,13 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; - /** - * Class to hold dead servers list and utility querying dead server list. - * Servers are added when they expire or when we find them in filesystem on startup. - * When a server crash procedure is queued, it will populate the processing list and - * then remove the server from processing list when done. Servers are removed from - * dead server list when a new instance is started over the old on same hostname and - * port or when new Master comes online tidying up after all initialization. Processing - * list and deadserver list are not tied together (you don't have to be in deadservers + * Class to hold dead servers list and utility querying dead server list. Servers are added when + * they expire or when we find them in filesystem on startup. When a server crash procedure is + * queued, it will populate the processing list and then remove the server from processing list when + * done. Servers are removed from dead server list when a new instance is started over the old on + * same hostname and port or when new Master comes online tidying up after all initialization. + * Processing list and deadserver list are not tied together (you don't have to be in deadservers * list to be processing and vice versa). */ @InterfaceAudience.Private @@ -51,18 +49,16 @@ public class DeadServer { private static final Logger LOG = LoggerFactory.getLogger(DeadServer.class); /** - * Set of known dead servers. On znode expiration, servers are added here. - * This is needed in case of a network partitioning where the server's lease - * expires, but the server is still running. After the network is healed, - * and it's server logs are recovered, it will be told to call server startup - * because by then, its regions have probably been reassigned. + * Set of known dead servers. On znode expiration, servers are added here. This is needed in case + * of a network partitioning where the server's lease expires, but the server is still running. + * After the network is healed, and it's server logs are recovered, it will be told to call server + * startup because by then, its regions have probably been reassigned. */ private final Map deadServers = new HashMap<>(); /** - * Set of dead servers currently being processed by a SCP. - * Added to this list at the start of SCP and removed after it is done - * processing the crash. + * Set of dead servers currently being processed by a SCP. Added to this list at the start of SCP + * and removed after it is done processing the crash. */ private final Set processingServers = new HashSet<>(); @@ -75,10 +71,8 @@ public class DeadServer { } /** - * Checks if there are currently any dead servers being processed by the - * master. Returns true if at least one region server is currently being - * processed as dead. - * + * Checks if there are currently any dead servers being processed by the master. Returns true if + * at least one region server is currently being processed as dead. * @return true if any RS are being processed as dead */ synchronized boolean areDeadServersInProgress() { @@ -130,10 +124,9 @@ public class DeadServer { } /** - * Handles restart of a server. The new server instance has a different start code. - * The new start code should be greater than the old one. We don't check that here. - * Removes the old server from deadserver list. - * + * Handles restart of a server. The new server instance has a different start code. The new start + * code should be greater than the old one. We don't check that here. Removes the old server from + * deadserver list. * @param newServerName Servername as either host:port or * host,port,startcode. * @return true if this server was dead before and coming back alive again @@ -156,14 +149,13 @@ public class DeadServer { } /** - * @param newServerName Server to match port and hostname against. + * @param newServerName Server to match port and hostname against. * @param deadServerIterator Iterator primed so can call 'next' on it. - * @return True if newServerName and current primed - * iterator ServerName have same host and port and we removed old server - * from iterator and from processing list. + * @return True if newServerName and current primed iterator ServerName have same + * host and port and we removed old server from iterator and from processing list. */ private boolean cleanOldServerName(ServerName newServerName, - Iterator deadServerIterator) { + Iterator deadServerIterator) { ServerName sn = deadServerIterator.next(); if (ServerName.isSameAddress(sn, newServerName)) { // Remove from dead servers list. Don't remove from the processing list -- @@ -200,10 +192,10 @@ public class DeadServer { * @return a sorted array list, by death time, lowest values first. */ synchronized List> copyDeadServersSince(long ts) { - List> res = new ArrayList<>(size()); + List> res = new ArrayList<>(size()); - for (Map.Entry entry:deadServers.entrySet()){ - if (entry.getValue() >= ts){ + for (Map.Entry entry : deadServers.entrySet()) { + if (entry.getValue() >= ts) { res.add(new Pair<>(entry.getKey(), entry.getValue())); } } @@ -211,13 +203,13 @@ public class DeadServer { Collections.sort(res, (o1, o2) -> o1.getSecond().compareTo(o2.getSecond())); return res; } - + /** * Get the time when a server died * @param deadServerName the dead server name - * @return the date when the server died + * @return the date when the server died */ - public synchronized Date getTimeOfDeath(final ServerName deadServerName){ + public synchronized Date getTimeOfDeath(final ServerName deadServerName) { Long time = deadServers.get(deadServerName); return time == null ? null : new Date(time); } @@ -229,8 +221,8 @@ public class DeadServer { */ public synchronized boolean removeDeadServer(final ServerName deadServerName) { Preconditions.checkState(!processingServers.contains(deadServerName), - "Asked to remove server still in processingServers set " + deadServerName + - " (numProcessing=" + processingServers.size() + ")"); + "Asked to remove server still in processingServers set " + deadServerName + " (numProcessing=" + + processingServers.size() + ")"); return this.deadServers.remove(deadServerName) != null; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java index 14c4a3ec85f..28795eab28e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,33 +21,30 @@ import java.io.IOException; import java.util.List; import java.util.NavigableSet; import java.util.TreeSet; - +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.ServerName; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Tracks the list of draining region servers via ZK. - * - *

          This class is responsible for watching for changes to the draining - * servers list. It handles adds/deletes in the draining RS list and - * watches each node. - * - *

          If an RS gets deleted from draining list, we call + *

          + * This class is responsible for watching for changes to the draining servers list. It handles + * adds/deletes in the draining RS list and watches each node. + *

          + * If an RS gets deleted from draining list, we call * {@link ServerManager#removeServerFromDrainList(ServerName)} - * - *

          If an RS gets added to the draining list, we add a watcher to it and call + *

          + * If an RS gets added to the draining list, we add a watcher to it and call * {@link ServerManager#addServerToDrainList(ServerName)} - * - *

          This class is deprecated in 2.0 because decommission/draining API goes through - * master in 2.0. Can remove this class in 3.0. - * + *

          + * This class is deprecated in 2.0 because decommission/draining API goes through master in 2.0. Can + * remove this class in 3.0. */ @InterfaceAudience.Private public class DrainingServerTracker extends ZKListener { @@ -57,8 +54,8 @@ public class DrainingServerTracker extends ZKListener { private final NavigableSet drainingServers = new TreeSet<>(); private Abortable abortable; - public DrainingServerTracker(ZKWatcher watcher, - Abortable abortable, ServerManager serverManager) { + public DrainingServerTracker(ZKWatcher watcher, Abortable abortable, + ServerManager serverManager) { super(watcher); this.abortable = abortable; this.serverManager = serverManager; @@ -66,10 +63,8 @@ public class DrainingServerTracker extends ZKListener { /** * Starts the tracking of draining RegionServers. - * - *

          All Draining RSs will be tracked after this method is called. - * - * @throws KeeperException + *

          + * All Draining RSs will be tracked after this method is called. n */ public void start() throws KeeperException, IOException { watcher.registerListener(this); @@ -77,7 +72,7 @@ public class DrainingServerTracker extends ZKListener { serverManager.registerListener(new ServerListener() { @Override public void serverAdded(ServerName sn) { - if (drainingServers.contains(sn)){ + if (drainingServers.contains(sn)) { serverManager.addServerToDrainList(sn); } } @@ -88,21 +83,20 @@ public class DrainingServerTracker extends ZKListener { } private void add(final List servers) throws IOException { - synchronized(this.drainingServers) { + synchronized (this.drainingServers) { this.drainingServers.clear(); - for (String n: servers) { + for (String n : servers) { final ServerName sn = ServerName.valueOf(ZKUtil.getNodeName(n)); this.drainingServers.add(sn); this.serverManager.addServerToDrainList(sn); - LOG.info("Draining RS node created, adding to list [" + - sn + "]"); + LOG.info("Draining RS node created, adding to list [" + sn + "]"); } } } private void remove(final ServerName sn) { - synchronized(this.drainingServers) { + synchronized (this.drainingServers) { this.drainingServers.remove(sn); this.serverManager.removeServerFromDrainList(sn); } @@ -110,17 +104,16 @@ public class DrainingServerTracker extends ZKListener { @Override public void nodeDeleted(final String path) { - if(path.startsWith(watcher.getZNodePaths().drainingZNode)) { + if (path.startsWith(watcher.getZNodePaths().drainingZNode)) { final ServerName sn = ServerName.valueOf(ZKUtil.getNodeName(path)); - LOG.info("Draining RS node deleted, removing from list [" + - sn + "]"); + LOG.info("Draining RS node deleted, removing from list [" + sn + "]"); remove(sn); } } @Override public void nodeChildrenChanged(final String path) { - if(path.equals(watcher.getZNodePaths().drainingZNode)) { + if (path.equals(watcher.getZNodePaths().drainingZNode)) { try { final List newNodes = ZKUtil.listChildrenAndWatchThem(watcher, watcher.getZNodePaths().drainingZNode); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java index d37a80a1c3d..f8860f7ccca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,17 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import java.util.Map; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.master.locking.LockManager; @@ -33,10 +28,13 @@ import org.apache.hadoop.hbase.mob.ExpiredMobFileCleaner; import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.procedure2.LockType; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * The Class ExpiredMobFileCleanerChore for running cleaner regularly to remove the expired - * mob files. + * The Class ExpiredMobFileCleanerChore for running cleaner regularly to remove the expired mob + * files. */ @InterfaceAudience.Private public class ExpiredMobFileCleanerChore extends ScheduledChore { @@ -46,18 +44,20 @@ public class ExpiredMobFileCleanerChore extends ScheduledChore { private ExpiredMobFileCleaner cleaner; public ExpiredMobFileCleanerChore(HMaster master) { - super(master.getServerName() + "-ExpiredMobFileCleanerChore", master, master.getConfiguration() - .getInt(MobConstants.MOB_CLEANER_PERIOD, MobConstants.DEFAULT_MOB_CLEANER_PERIOD), master - .getConfiguration().getInt(MobConstants.MOB_CLEANER_PERIOD, - MobConstants.DEFAULT_MOB_CLEANER_PERIOD), TimeUnit.SECONDS); + super(master.getServerName() + "-ExpiredMobFileCleanerChore", master, + master.getConfiguration().getInt(MobConstants.MOB_CLEANER_PERIOD, + MobConstants.DEFAULT_MOB_CLEANER_PERIOD), + master.getConfiguration().getInt(MobConstants.MOB_CLEANER_PERIOD, + MobConstants.DEFAULT_MOB_CLEANER_PERIOD), + TimeUnit.SECONDS); this.master = master; cleaner = new ExpiredMobFileCleaner(); cleaner.setConf(master.getConfiguration()); } @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION", - justification="Intentional") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION", + justification = "Intentional") protected void chore() { try { TableDescriptors htds = master.getTableDescriptors(); @@ -68,8 +68,8 @@ public class ExpiredMobFileCleanerChore extends ScheduledChore { // clean only for mob-enabled column. // obtain a read table lock before cleaning, synchronize with MobFileCompactionChore. final LockManager.MasterLock lock = master.getLockManager().createMasterLock( - MobUtils.getTableLockName(htd.getTableName()), LockType.SHARED, - this.getClass().getSimpleName() + ": Cleaning expired mob files"); + MobUtils.getTableLockName(htd.getTableName()), LockType.SHARED, + this.getClass().getSimpleName() + ": Cleaning expired mob files"); try { lock.acquire(); cleaner.cleanExpiredMobFiles(htd.getTableName().getNameAsString(), hcd); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 75eed376a51..ac5faecf47f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -247,6 +247,7 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder; import org.apache.hbase.thirdparty.org.eclipse.jetty.webapp.WebAppContext; import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig; import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer; + import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; @@ -433,8 +434,8 @@ public class HMaster extends HRegionServer implements MasterServices { * *

          * Remaining steps of initialization occur in - * {@link #finishActiveMasterInitialization(MonitoredTask)} after the master becomes the - * active one. + * {@link #finishActiveMasterInitialization(MonitoredTask)} after the master becomes the active + * one. */ public HMaster(final Configuration conf) throws IOException { super(conf); @@ -449,16 +450,16 @@ public class HMaster extends HRegionServer implements MasterServices { maintenanceMode = false; } this.rsFatals = new MemoryBoundedLogMessageBuffer( - conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024)); + conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024)); LOG.info("hbase.rootdir={}, hbase.cluster.distributed={}", getDataRootDir(), - this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false)); + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false)); // Disable usage of meta replicas in the master this.conf.setBoolean(HConstants.USE_META_REPLICAS, false); decorateMasterConfiguration(this.conf); - // Hack! Maps DFSClient => Master for logs. HDFS made this + // Hack! Maps DFSClient => Master for logs. HDFS made this // config param for task trackers, but we can piggyback off of it. if (this.conf.get("mapreduce.task.attempt.id") == null) { this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString()); @@ -471,22 +472,22 @@ public class HMaster extends HRegionServer implements MasterServices { this.maxBalancingTime = getMaxBalancingTime(); this.maxRitPercent = conf.getDouble(HConstants.HBASE_MASTER_BALANCER_MAX_RIT_PERCENT, - HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT); + HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT); // Do we publish the status? - boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED, - HConstants.STATUS_PUBLISHED_DEFAULT); + boolean shouldPublish = + conf.getBoolean(HConstants.STATUS_PUBLISHED, HConstants.STATUS_PUBLISHED_DEFAULT); Class publisherClass = - conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS, - ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS, - ClusterStatusPublisher.Publisher.class); + conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS, + ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS, + ClusterStatusPublisher.Publisher.class); if (shouldPublish) { if (publisherClass == null) { - LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " + - ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS + - " is not set - not publishing status"); + LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " + + ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS + + " is not set - not publishing status"); } else { clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass); LOG.debug("Created {}", this.clusterStatusPublisherChore); @@ -512,7 +513,7 @@ public class HMaster extends HRegionServer implements MasterServices { * implementation. */ protected ActiveMasterManager createActiveMasterManager(ZKWatcher zk, ServerName sn, - org.apache.hadoop.hbase.Server server) throws InterruptedIOException { + org.apache.hadoop.hbase.Server server) throws InterruptedIOException { return new ActiveMasterManager(zk, sn, server); } @@ -548,8 +549,8 @@ public class HMaster extends HRegionServer implements MasterServices { // If on way out, then we are no longer active master. this.clusterSchemaService.stopAsync(); try { - this.clusterSchemaService.awaitTerminated( - getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, + this.clusterSchemaService + .awaitTerminated(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS); } catch (TimeoutException te) { LOG.warn("Failed shutdown of clusterSchemaService", te); @@ -564,8 +565,8 @@ public class HMaster extends HRegionServer implements MasterServices { if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) { return -1; } - final int infoPort = conf.getInt("hbase.master.info.port.orig", - HConstants.DEFAULT_MASTER_INFOPORT); + final int infoPort = + conf.getInt("hbase.master.info.port.orig", HConstants.DEFAULT_MASTER_INFOPORT); // -1 is for disabling info server, so no redirecting if (infoPort < 0 || infoServer == null) { return -1; @@ -576,10 +577,9 @@ public class HMaster extends HRegionServer implements MasterServices { } final String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0"); if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) { - String msg = - "Failed to start redirecting jetty server. Address " + addr - + " does not belong to this host. Correct configuration parameter: " - + "hbase.master.info.bindAddress"; + String msg = "Failed to start redirecting jetty server. Address " + addr + + " does not belong to this host. Correct configuration parameter: " + + "hbase.master.info.bindAddress"; LOG.error(msg); throw new IOException(msg); } @@ -596,10 +596,11 @@ public class HMaster extends HRegionServer implements MasterServices { masterJettyServer.setHandler(HttpServer.buildGzipHandler(masterJettyServer.getHandler())); final String redirectHostname = - StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead; + StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead; final MasterRedirectServlet redirect = new MasterRedirectServlet(infoServer, redirectHostname); - final WebAppContext context = new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS); + final WebAppContext context = + new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS); context.addServlet(new ServletHolder(redirect), "/*"); context.setServer(masterJettyServer); @@ -619,18 +620,17 @@ public class HMaster extends HRegionServer implements MasterServices { try { super.login(user, host); } catch (IOException ie) { - user.login(SecurityConstants.MASTER_KRB_KEYTAB_FILE, - SecurityConstants.MASTER_KRB_PRINCIPAL, host); + user.login(SecurityConstants.MASTER_KRB_KEYTAB_FILE, SecurityConstants.MASTER_KRB_PRINCIPAL, + host); } } /** - * If configured to put regions on active master, - * wait till a backup master becomes active. + * If configured to put regions on active master, wait till a backup master becomes active. * Otherwise, loop till the server is stopped or aborted. */ @Override - protected void waitForMasterActive(){ + protected void waitForMasterActive() { if (maintenanceMode) { return; } @@ -642,7 +642,7 @@ public class HMaster extends HRegionServer implements MasterServices { @InterfaceAudience.Private public MasterRpcServices getMasterRpcServices() { - return (MasterRpcServices)rpcServices; + return (MasterRpcServices) rpcServices; } public boolean balanceSwitch(final boolean b) throws IOException { @@ -738,8 +738,8 @@ public class HMaster extends HRegionServer implements MasterServices { // we need to take care of the ZK information synchronization // if given client ZK are not observer nodes ZKWatcher clientZkWatcher = new ZKWatcher(conf, - getProcessName() + ":" + rpcServices.getSocketAddress().getPort() + "-clientZK", this, - false, true); + getProcessName() + ":" + rpcServices.getSocketAddress().getPort() + "-clientZK", this, + false, true); this.metaLocationSyncer = new MetaLocationSyncer(zooKeeper, clientZkWatcher, this); this.metaLocationSyncer.start(); this.masterAddressSyncer = new MasterAddressSyncer(zooKeeper, clientZkWatcher, this); @@ -748,15 +748,14 @@ public class HMaster extends HRegionServer implements MasterServices { ZKClusterId.setClusterId(clientZkWatcher, fileSystemManager.getClusterId()); } - // Set the cluster as up. If new RSs, they'll be waiting on this before + // Set the cluster as up. If new RSs, they'll be waiting on this before // going ahead with their startup. boolean wasUp = this.clusterStatusTracker.isClusterUp(); if (!wasUp) this.clusterStatusTracker.setClusterUp(); - LOG.info("Active/primary master=" + this.serverName + - ", sessionid=0x" + - Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) + - ", setting cluster-up flag (Was=" + wasUp + ")"); + LOG.info("Active/primary master=" + this.serverName + ", sessionid=0x" + + Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) + + ", setting cluster-up flag (Was=" + wasUp + ")"); // create/initialize the snapshot manager and other procedure managers this.snapshotManager = new SnapshotManager(); @@ -853,7 +852,7 @@ public class HMaster extends HRegionServer implements MasterServices { * time where meta has not been created yet), we will rely on SCP to bring meta online. */ private void finishActiveMasterInitialization(MonitoredTask status) - throws IOException, InterruptedException, KeeperException, ReplicationException { + throws IOException, InterruptedException, KeeperException, ReplicationException { /* * We are active master now... go initialize components we need to run. */ @@ -889,7 +888,7 @@ public class HMaster extends HRegionServer implements MasterServices { Pair result = null; try { result = HBaseFsck.checkAndMarkRunningHbck(this.conf, - HBaseFsck.createLockRetryCounterFactory(this.conf).create()); + HBaseFsck.createLockRetryCounterFactory(this.conf).create()); } finally { if (result != null) { Closeables.close(result.getSecond(), true); @@ -905,19 +904,17 @@ public class HMaster extends HRegionServer implements MasterServices { rsListStorage = new MasterRegionServerList(masterRegion, this); this.serverManager = createServerManager(this, rsListStorage); - if (!conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, - DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) { + if ( + !conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK) + ) { this.splitWALManager = new SplitWALManager(this); } - - tryMigrateMetaLocationsFromZooKeeper(); createProcedureExecutor(); - Map, List>> procsByType = - procedureExecutor.getActiveProceduresNoCopy().stream() - .collect(Collectors.groupingBy(p -> p.getClass())); + Map, List>> procsByType = procedureExecutor + .getActiveProceduresNoCopy().stream().collect(Collectors.groupingBy(p -> p.getClass())); // Create Assignment Manager this.assignmentManager = createAssignmentManager(this, masterRegion); @@ -946,9 +943,8 @@ public class HMaster extends HRegionServer implements MasterServices { // state from zookeeper while hbase2 reads it from hbase:meta. Disable if no hbase1 clients. this.tableStateManager = this.conf.getBoolean(MirroringTableStateManager.MIRROR_TABLE_STATE_TO_ZK_KEY, true) - ? - new MirroringTableStateManager(this): - new TableStateManager(this); + ? new MirroringTableStateManager(this) + : new TableStateManager(this); status.setStatus("Initializing ZK system trackers"); initializeZKBasedSystemTrackers(); @@ -957,7 +953,7 @@ public class HMaster extends HRegionServer implements MasterServices { // Start the Zombie master detector after setting master as active, see HBASE-21535 Thread zombieDetector = new Thread(new MasterInitializationMonitor(this), - "ActiveMasterInitializationMonitor-" + EnvironmentEdgeManager.currentTime()); + "ActiveMasterInitializationMonitor-" + EnvironmentEdgeManager.currentTime()); zombieDetector.setDaemon(true); zombieDetector.start(); @@ -965,7 +961,7 @@ public class HMaster extends HRegionServer implements MasterServices { // See HBASE-11393 status.setStatus("Update TableCFs node in ZNode"); ReplicationPeerConfigUpgrader tableCFsUpdater = - new ReplicationPeerConfigUpgrader(zooKeeper, conf); + new ReplicationPeerConfigUpgrader(zooKeeper, conf); tableCFsUpdater.copyTableCFs(); if (!maintenanceMode) { @@ -1038,12 +1034,11 @@ public class HMaster extends HRegionServer implements MasterServices { if (!waitForMetaOnline()) { return; } - TableDescriptor metaDescriptor = tableDescriptors.get( - TableName.META_TABLE_NAME); - final ColumnFamilyDescriptor tableFamilyDesc = metaDescriptor - .getColumnFamily(HConstants.TABLE_FAMILY); + TableDescriptor metaDescriptor = tableDescriptors.get(TableName.META_TABLE_NAME); + final ColumnFamilyDescriptor tableFamilyDesc = + metaDescriptor.getColumnFamily(HConstants.TABLE_FAMILY); final ColumnFamilyDescriptor replBarrierFamilyDesc = - metaDescriptor.getColumnFamily(HConstants.REPLICATION_BARRIER_FAMILY); + metaDescriptor.getColumnFamily(HConstants.REPLICATION_BARRIER_FAMILY); this.assignmentManager.joinCluster(); // The below depends on hbase:meta being online. @@ -1052,7 +1047,7 @@ public class HMaster extends HRegionServer implements MasterServices { } catch (NoSuchColumnFamilyException e) { if (tableFamilyDesc == null && replBarrierFamilyDesc == null) { LOG.info("TableStates manager could not be started. This is expected" - + " during HBase 1 to 2 upgrade.", e); + + " during HBase 1 to 2 upgrade.", e); } else { throw e; } @@ -1075,8 +1070,8 @@ public class HMaster extends HRegionServer implements MasterServices { int existingReplicasCount = assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size(); if (existingReplicasCount > metaDesc.getRegionReplication()) { - LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)" + - " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount); + LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)" + + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount); metaDesc = TableDescriptorBuilder.newBuilder(metaDesc) .setRegionReplication(existingReplicasCount).build(); tableDescriptors.update(metaDesc); @@ -1084,8 +1079,8 @@ public class HMaster extends HRegionServer implements MasterServices { // check again, and issue a ModifyTableProcedure if needed if (metaDesc.getRegionReplication() != replicasNumInConf) { LOG.info( - "The {} config is {} while the replica count in TableDescriptor is {}" + - " for hbase:meta, altering...", + "The {} config is {} while the replica count in TableDescriptor is {}" + + " for hbase:meta, altering...", HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication()); procedureExecutor.submitProcedure(new ModifyTableProcedure( procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc) @@ -1097,7 +1092,7 @@ public class HMaster extends HRegionServer implements MasterServices { // Initialize after meta is up as below scans meta if (favoredNodesManager != null && !maintenanceMode) { SnapshotOfRegionAssignmentFromMeta snapshotOfRegionAssignment = - new SnapshotOfRegionAssignmentFromMeta(getConnection()); + new SnapshotOfRegionAssignmentFromMeta(getConnection()); snapshotOfRegionAssignment.initialize(); favoredNodesManager.initialize(snapshotOfRegionAssignment); } @@ -1132,10 +1127,12 @@ public class HMaster extends HRegionServer implements MasterServices { try { initClusterSchemaService(); } catch (IllegalStateException e) { - if (e.getCause() != null && e.getCause() instanceof NoSuchColumnFamilyException - && tableFamilyDesc == null && replBarrierFamilyDesc == null) { + if ( + e.getCause() != null && e.getCause() instanceof NoSuchColumnFamilyException + && tableFamilyDesc == null && replBarrierFamilyDesc == null + ) { LOG.info("ClusterSchema service could not be initialized. This is " - + "expected during HBase 1 to 2 upgrade", e); + + "expected during HBase 1 to 2 upgrade", e); } else { throw e; } @@ -1151,7 +1148,7 @@ public class HMaster extends HRegionServer implements MasterServices { status.markComplete("Initialization successful"); LOG.info(String.format("Master has completed initialization %.3fsec", - (EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0f)); + (EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0f)); this.masterFinishedInitializationTime = EnvironmentEdgeManager.currentTime(); configurationManager.registerObserver(this.balancer); configurationManager.registerObserver(this.cleanerPool); @@ -1180,8 +1177,8 @@ public class HMaster extends HRegionServer implements MasterServices { // next active master init will not face any issues and all mandatory // services will be started during master init phase. throw new PleaseRestartMasterException("Aborting active master after missing" - + " CFs are successfully added in meta. Subsequent active master " - + "initialization should be uninterrupted"); + + " CFs are successfully added in meta. Subsequent active master " + + "initialization should be uninterrupted"); } if (maintenanceMode) { @@ -1232,32 +1229,31 @@ public class HMaster extends HRegionServer implements MasterServices { zombieDetector.interrupt(); /* - * After master has started up, lets do balancer post startup initialization. Since this runs - * in activeMasterManager thread, it should be fine. + * After master has started up, lets do balancer post startup initialization. Since this runs in + * activeMasterManager thread, it should be fine. */ long start = EnvironmentEdgeManager.currentTime(); this.balancer.postMasterStartupInitialize(); if (LOG.isDebugEnabled()) { - LOG.debug("Balancer post startup initialization complete, took " + ( - (EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds"); + LOG.debug("Balancer post startup initialization complete, took " + + ((EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds"); } this.rollingUpgradeChore = new RollingUpgradeChore(this); getChoreService().scheduleChore(rollingUpgradeChore); } - private void createMissingCFsInMetaDuringUpgrade( - TableDescriptor metaDescriptor) throws IOException { - TableDescriptor newMetaDesc = - TableDescriptorBuilder.newBuilder(metaDescriptor) - .setColumnFamily(FSTableDescriptors.getTableFamilyDescForMeta(conf)) - .setColumnFamily(FSTableDescriptors.getReplBarrierFamilyDescForMeta()) - .build(); - long pid = this.modifyTable(TableName.META_TABLE_NAME, () -> newMetaDesc, - 0, 0, false); + private void createMissingCFsInMetaDuringUpgrade(TableDescriptor metaDescriptor) + throws IOException { + TableDescriptor newMetaDesc = TableDescriptorBuilder.newBuilder(metaDescriptor) + .setColumnFamily(FSTableDescriptors.getTableFamilyDescForMeta(conf)) + .setColumnFamily(FSTableDescriptors.getReplBarrierFamilyDescForMeta()).build(); + long pid = this.modifyTable(TableName.META_TABLE_NAME, () -> newMetaDesc, 0, 0, false); int tries = 30; - while (!(getMasterProcedureExecutor().isFinished(pid)) - && getMasterProcedureExecutor().isRunning() && tries > 0) { + while ( + !(getMasterProcedureExecutor().isFinished(pid)) && getMasterProcedureExecutor().isRunning() + && tries > 0 + ) { try { Thread.sleep(1000); } catch (InterruptedException e) { @@ -1267,13 +1263,12 @@ public class HMaster extends HRegionServer implements MasterServices { } if (tries <= 0) { throw new HBaseIOException( - "Failed to add table and rep_barrier CFs to meta in a given time."); + "Failed to add table and rep_barrier CFs to meta in a given time."); } else { Procedure result = getMasterProcedureExecutor().getResult(pid); if (result != null && result.isFailed()) { - throw new IOException( - "Failed to add table and rep_barrier CFs to meta. " - + MasterProcedureUtil.unwrapRemoteIOException(result)); + throw new IOException("Failed to add table and rep_barrier CFs to meta. " + + MasterProcedureUtil.unwrapRemoteIOException(result)); } } } @@ -1281,7 +1276,7 @@ public class HMaster extends HRegionServer implements MasterServices { /** * Check hbase:meta is up and ready for reading. For use during Master startup only. * @return True if meta is UP and online and startup can progress. Otherwise, meta is not online - * and we will hold here until operator intervention. + * and we will hold here until operator intervention. */ @InterfaceAudience.Private public boolean waitForMetaOnline() { @@ -1289,8 +1284,8 @@ public class HMaster extends HRegionServer implements MasterServices { } /** - * @return True if region is online and scannable else false if an error or shutdown (Otherwise - * we just block in here holding up all forward-progess). + * @return True if region is online and scannable else false if an error or shutdown (Otherwise we + * just block in here holding up all forward-progess). */ private boolean isRegionOnline(RegionInfo ri) { RetryCounter rc = null; @@ -1302,14 +1297,15 @@ public class HMaster extends HRegionServer implements MasterServices { } } // Region is not OPEN. - Optional> optProc = this.procedureExecutor.getProcedures(). - stream().filter(p -> p instanceof ServerCrashProcedure).findAny(); + Optional> optProc = this.procedureExecutor.getProcedures() + .stream().filter(p -> p instanceof ServerCrashProcedure).findAny(); // TODO: Add a page to refguide on how to do repair. Have this log message point to it. // Page will talk about loss of edits, how to schedule at least the meta WAL recovery, and // then how to assign including how to break region lock if one held. - LOG.warn("{} is NOT online; state={}; ServerCrashProcedures={}. Master startup cannot " + - "progress, in holding-pattern until region onlined.", - ri.getRegionNameAsString(), rs, optProc.isPresent()); + LOG.warn( + "{} is NOT online; state={}; ServerCrashProcedures={}. Master startup cannot " + + "progress, in holding-pattern until region onlined.", + ri.getRegionNameAsString(), rs, optProc.isPresent()); // Check once-a-minute. if (rc == null) { rc = new RetryCounterFactory(Integer.MAX_VALUE, 1000, 60_000).create(); @@ -1326,15 +1322,15 @@ public class HMaster extends HRegionServer implements MasterServices { */ @InterfaceAudience.Private public boolean waitForNamespaceOnline() { - List ris = this.assignmentManager.getRegionStates(). - getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME); + List ris = + this.assignmentManager.getRegionStates().getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME); if (ris.isEmpty()) { // If empty, means we've not assigned the namespace table yet... Just return true so startup // continues and the namespace table gets created. return true; } // Else there are namespace regions up in meta. Ensure they are assigned before we go on. - for (RegionInfo ri: ris) { + for (RegionInfo ri : ris) { if (!isRegionOnline(ri)) { return false; } @@ -1349,9 +1345,10 @@ public class HMaster extends HRegionServer implements MasterServices { @InterfaceAudience.Private public void updateConfigurationForQuotasObserver(Configuration conf) { // We're configured to not delete quotas on table deletion, so we don't need to add the obs. - if (!conf.getBoolean( - MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE, - MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE_DEFAULT)) { + if ( + !conf.getBoolean(MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE, + MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE_DEFAULT) + ) { return; } String[] masterCoprocs = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY); @@ -1369,7 +1366,7 @@ public class HMaster extends HRegionServer implements MasterServices { getChoreService().scheduleChore(expiredMobFileCleanerChore); int mobCompactionPeriod = conf.getInt(MobConstants.MOB_COMPACTION_CHORE_PERIOD, - MobConstants.DEFAULT_MOB_COMPACTION_CHORE_PERIOD); + MobConstants.DEFAULT_MOB_COMPACTION_CHORE_PERIOD); this.mobCompactChore = new MobCompactionChore(this, mobCompactionPeriod); getChoreService().scheduleChore(mobCompactChore); this.mobCompactThread = new MasterMobCompactionThread(this); @@ -1384,8 +1381,8 @@ public class HMaster extends HRegionServer implements MasterServices { *

          */ @InterfaceAudience.Private - protected ServerManager createServerManager(MasterServices master, - RegionServerList storage) throws IOException { + protected ServerManager createServerManager(MasterServices master, RegionServerList storage) + throws IOException { // We put this out here in a method so can do a Mockito.spy and stub it out // w/ a mocked up ServerManager. setupClusterConnection(); @@ -1393,7 +1390,7 @@ public class HMaster extends HRegionServer implements MasterServices { } private void waitForRegionServers(final MonitoredTask status) - throws IOException, InterruptedException { + throws IOException, InterruptedException { this.serverManager.waitForRegionServers(status); } @@ -1403,9 +1400,9 @@ public class HMaster extends HRegionServer implements MasterServices { this.clusterSchemaService = new ClusterSchemaServiceImpl(this); this.clusterSchemaService.startAsync(); try { - this.clusterSchemaService.awaitRunning(getConfiguration().getInt( - HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, - DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS); + this.clusterSchemaService + .awaitRunning(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, + DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS); } catch (TimeoutException toe) { throw new IOException("Timedout starting ClusterSchemaService", toe); } @@ -1419,7 +1416,7 @@ public class HMaster extends HRegionServer implements MasterServices { private SpaceQuotaSnapshotNotifier createQuotaSnapshotNotifier() { SpaceQuotaSnapshotNotifier notifier = - SpaceQuotaSnapshotNotifierFactory.getInstance().create(getConfiguration()); + SpaceQuotaSnapshotNotifierFactory.getInstance().create(getConfiguration()); return notifier; } @@ -1467,53 +1464,55 @@ public class HMaster extends HRegionServer implements MasterServices { } /* - * Start up all services. If any of these threads gets an unhandled exception - * then they just die with a logged message. This should be fine because - * in general, we do not expect the master to get such unhandled exceptions - * as OOMEs; it should be lightly loaded. See what HRegionServer does if - * need to install an unexpected exception handler. + * Start up all services. If any of these threads gets an unhandled exception then they just die + * with a logged message. This should be fine because in general, we do not expect the master to + * get such unhandled exceptions as OOMEs; it should be lightly loaded. See what HRegionServer + * does if need to install an unexpected exception handler. */ private void startServiceThreads() throws IOException { // Start the executor service pools - final int masterOpenRegionPoolSize = conf.getInt( - HConstants.MASTER_OPEN_REGION_THREADS, HConstants.MASTER_OPEN_REGION_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_OPEN_REGION).setCorePoolSize(masterOpenRegionPoolSize)); - final int masterCloseRegionPoolSize = conf.getInt( - HConstants.MASTER_CLOSE_REGION_THREADS, HConstants.MASTER_CLOSE_REGION_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_CLOSE_REGION).setCorePoolSize(masterCloseRegionPoolSize)); + final int masterOpenRegionPoolSize = conf.getInt(HConstants.MASTER_OPEN_REGION_THREADS, + HConstants.MASTER_OPEN_REGION_THREADS_DEFAULT); + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.MASTER_OPEN_REGION).setCorePoolSize(masterOpenRegionPoolSize)); + final int masterCloseRegionPoolSize = conf.getInt(HConstants.MASTER_CLOSE_REGION_THREADS, + HConstants.MASTER_CLOSE_REGION_THREADS_DEFAULT); + executorService.startExecutorService( + executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_CLOSE_REGION) + .setCorePoolSize(masterCloseRegionPoolSize)); final int masterServerOpThreads = conf.getInt(HConstants.MASTER_SERVER_OPERATIONS_THREADS, - HConstants.MASTER_SERVER_OPERATIONS_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_SERVER_OPERATIONS).setCorePoolSize(masterServerOpThreads)); - final int masterServerMetaOpsThreads = conf.getInt( - HConstants.MASTER_META_SERVER_OPERATIONS_THREADS, + HConstants.MASTER_SERVER_OPERATIONS_THREADS_DEFAULT); + executorService.startExecutorService( + executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SERVER_OPERATIONS) + .setCorePoolSize(masterServerOpThreads)); + final int masterServerMetaOpsThreads = + conf.getInt(HConstants.MASTER_META_SERVER_OPERATIONS_THREADS, HConstants.MASTER_META_SERVER_OPERATIONS_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_META_SERVER_OPERATIONS).setCorePoolSize(masterServerMetaOpsThreads)); - final int masterLogReplayThreads = conf.getInt( - HConstants.MASTER_LOG_REPLAY_OPS_THREADS, HConstants.MASTER_LOG_REPLAY_OPS_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.M_LOG_REPLAY_OPS).setCorePoolSize(masterLogReplayThreads)); - final int masterSnapshotThreads = conf.getInt( - SnapshotManager.SNAPSHOT_POOL_THREADS_KEY, SnapshotManager.SNAPSHOT_POOL_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_SNAPSHOT_OPERATIONS).setCorePoolSize(masterSnapshotThreads) - .setAllowCoreThreadTimeout(true)); + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.MASTER_META_SERVER_OPERATIONS) + .setCorePoolSize(masterServerMetaOpsThreads)); + final int masterLogReplayThreads = conf.getInt(HConstants.MASTER_LOG_REPLAY_OPS_THREADS, + HConstants.MASTER_LOG_REPLAY_OPS_THREADS_DEFAULT); + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.M_LOG_REPLAY_OPS).setCorePoolSize(masterLogReplayThreads)); + final int masterSnapshotThreads = conf.getInt(SnapshotManager.SNAPSHOT_POOL_THREADS_KEY, + SnapshotManager.SNAPSHOT_POOL_THREADS_DEFAULT); + executorService.startExecutorService( + executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SNAPSHOT_OPERATIONS) + .setCorePoolSize(masterSnapshotThreads).setAllowCoreThreadTimeout(true)); final int masterMergeDispatchThreads = conf.getInt(HConstants.MASTER_MERGE_DISPATCH_THREADS, - HConstants.MASTER_MERGE_DISPATCH_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_MERGE_OPERATIONS).setCorePoolSize(masterMergeDispatchThreads) - .setAllowCoreThreadTimeout(true)); + HConstants.MASTER_MERGE_DISPATCH_THREADS_DEFAULT); + executorService.startExecutorService( + executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_MERGE_OPERATIONS) + .setCorePoolSize(masterMergeDispatchThreads).setAllowCoreThreadTimeout(true)); // We depend on there being only one instance of this executor running // at a time. To do concurrency, would need fencing of enable/disable of // tables. // Any time changing this maxThreads to > 1, pls see the comment at // AccessController#postCompletedCreateTableAction - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_TABLE_OPERATIONS).setCorePoolSize(1)); + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.MASTER_TABLE_OPERATIONS).setCorePoolSize(1)); startProcedureExecutor(); // Create cleaner thread pool @@ -1523,8 +1522,9 @@ public class HMaster extends HRegionServer implements MasterServices { // Start log cleaner thread int cleanerInterval = conf.getInt(HBASE_MASTER_CLEANER_INTERVAL, DEFAULT_HBASE_MASTER_CLEANER_INTERVAL); - this.logCleaner = new LogCleaner(cleanerInterval, this, conf, - getMasterWalManager().getFileSystem(), getMasterWalManager().getOldLogDir(), cleanerPool, params); + this.logCleaner = + new LogCleaner(cleanerInterval, this, conf, getMasterWalManager().getFileSystem(), + getMasterWalManager().getOldLogDir(), cleanerPool, params); getChoreService().scheduleChore(logCleaner); // start the hfile archive cleaner thread @@ -1535,26 +1535,25 @@ public class HMaster extends HRegionServer implements MasterServices { // Regions Reopen based on very high storeFileRefCount is considered enabled // only if hbase.regions.recovery.store.file.ref.count has value > 0 - final int maxStoreFileRefCount = conf.getInt( - HConstants.STORE_FILE_REF_COUNT_THRESHOLD, + final int maxStoreFileRefCount = conf.getInt(HConstants.STORE_FILE_REF_COUNT_THRESHOLD, HConstants.DEFAULT_STORE_FILE_REF_COUNT_THRESHOLD); if (maxStoreFileRefCount > 0) { this.regionsRecoveryChore = new RegionsRecoveryChore(this, conf, this); getChoreService().scheduleChore(this.regionsRecoveryChore); } else { - LOG.info("Reopening regions with very high storeFileRefCount is disabled. " + - "Provide threshold value > 0 for {} to enable it.", + LOG.info( + "Reopening regions with very high storeFileRefCount is disabled. " + + "Provide threshold value > 0 for {} to enable it.", HConstants.STORE_FILE_REF_COUNT_THRESHOLD); } this.regionsRecoveryConfigManager = new RegionsRecoveryConfigManager(this); - replicationBarrierCleaner = new ReplicationBarrierCleaner(conf, this, getConnection(), - replicationPeerManager); + replicationBarrierCleaner = + new ReplicationBarrierCleaner(conf, this, getConnection(), replicationPeerManager); getChoreService().scheduleChore(replicationBarrierCleaner); - final boolean isSnapshotChoreEnabled = this.snapshotCleanupTracker - .isSnapshotCleanupEnabled(); + final boolean isSnapshotChoreEnabled = this.snapshotCleanupTracker.isSnapshotCleanupEnabled(); this.snapshotCleanerChore = new SnapshotCleanerChore(this, conf, getSnapshotManager()); if (isSnapshotChoreEnabled) { getChoreService().scheduleChore(this.snapshotCleanerChore); @@ -1632,8 +1631,8 @@ public class HMaster extends HRegionServer implements MasterServices { private void createProcedureExecutor() throws IOException { MasterProcedureEnv procEnv = new MasterProcedureEnv(this); - procedureStore = - new RegionProcedureStore(this, masterRegion, new MasterProcedureEnv.FsUtilsLeaseRecovery(this)); + procedureStore = new RegionProcedureStore(this, masterRegion, + new MasterProcedureEnv.FsUtilsLeaseRecovery(this)); procedureStore.registerListener(new ProcedureStoreListener() { @Override @@ -1668,7 +1667,6 @@ public class HMaster extends HRegionServer implements MasterServices { /** * Turn on/off Snapshot Cleanup Chore - * * @param on indicates whether Snapshot Cleanup Chore is to be run */ void switchSnapshotCleanup(final boolean on, final boolean synchronous) { @@ -1694,7 +1692,6 @@ public class HMaster extends HRegionServer implements MasterServices { } } - private void stopProcedureExecutor() { if (procedureExecutor != null) { configurationManager.deregisterObserver(procedureExecutor.getEnvironment()); @@ -1735,8 +1732,8 @@ public class HMaster extends HRegionServer implements MasterServices { /** * @return Get remote side's InetAddress */ - InetAddress getRemoteInetAddress(final int port, - final long serverStartCode) throws UnknownHostException { + InetAddress getRemoteInetAddress(final int port, final long serverStartCode) + throws UnknownHostException { // Do it out here in its own little method so can fake an address when // mocking up in tests. InetAddress ia = RpcServer.getRemoteIp(); @@ -1757,8 +1754,8 @@ public class HMaster extends HRegionServer implements MasterServices { */ private int getMaxBalancingTime() { // if max balancing time isn't set, defaulting it to period time - int maxBalancingTime = getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, - getConfiguration() + int maxBalancingTime = + getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, getConfiguration() .getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD)); return maxBalancingTime; } @@ -1772,20 +1769,22 @@ public class HMaster extends HRegionServer implements MasterServices { } /** - * It first sleep to the next balance plan start time. Meanwhile, throttling by the max - * number regions in transition to protect availability. - * @param nextBalanceStartTime The next balance plan start time + * It first sleep to the next balance plan start time. Meanwhile, throttling by the max number + * regions in transition to protect availability. + * @param nextBalanceStartTime The next balance plan start time * @param maxRegionsInTransition max number of regions in transition - * @param cutoffTime when to exit balancer + * @param cutoffTime when to exit balancer */ private void balanceThrottling(long nextBalanceStartTime, int maxRegionsInTransition, - long cutoffTime) { + long cutoffTime) { boolean interrupted = false; // Sleep to next balance plan start time // But if there are zero regions in transition, it can skip sleep to speed up. - while (!interrupted && EnvironmentEdgeManager.currentTime() < nextBalanceStartTime - && this.assignmentManager.getRegionStates().hasRegionsInTransition()) { + while ( + !interrupted && EnvironmentEdgeManager.currentTime() < nextBalanceStartTime + && this.assignmentManager.getRegionStates().hasRegionsInTransition() + ) { try { Thread.sleep(100); } catch (InterruptedException ie) { @@ -1794,10 +1793,12 @@ public class HMaster extends HRegionServer implements MasterServices { } // Throttling by max number regions in transition - while (!interrupted - && maxRegionsInTransition > 0 + while ( + !interrupted && maxRegionsInTransition > 0 && this.assignmentManager.getRegionStates().getRegionsInTransitionCount() - >= maxRegionsInTransition && EnvironmentEdgeManager.currentTime() <= cutoffTime) { + >= maxRegionsInTransition + && EnvironmentEdgeManager.currentTime() <= cutoffTime + ) { try { // sleep if the number of regions in transition exceeds the limit Thread.sleep(100); @@ -1814,14 +1815,13 @@ public class HMaster extends HRegionServer implements MasterServices { } /** - * Trigger a normal balance, see {@link HMaster#balance()} . If the balance is not executed - * this time, the metrics related to the balance will be updated. - * - * When balance is running, related metrics will be updated at the same time. But if some - * checking logic failed and cause the balancer exit early, we lost the chance to update - * balancer metrics. This will lead to user missing the latest balancer info. - * */ - public BalanceResponse balanceOrUpdateMetrics() throws IOException{ + * Trigger a normal balance, see {@link HMaster#balance()} . If the balance is not executed this + * time, the metrics related to the balance will be updated. When balance is running, related + * metrics will be updated at the same time. But if some checking logic failed and cause the + * balancer exit early, we lost the chance to update balancer metrics. This will lead to user + * missing the latest balancer info. + */ + public BalanceResponse balanceOrUpdateMetrics() throws IOException { synchronized (this.balancer) { BalanceResponse response = balance(); if (!response.isBalancerRan()) { @@ -1865,8 +1865,9 @@ public class HMaster extends HRegionServer implements MasterServices { BalanceResponse.Builder responseBuilder = BalanceResponse.newBuilder(); - if (loadBalancerTracker == null - || !(loadBalancerTracker.isBalancerOn() || request.isDryRun())) { + if ( + loadBalancerTracker == null || !(loadBalancerTracker.isBalancerOn() || request.isDryRun()) + ) { return responseBuilder.build(); } @@ -1875,7 +1876,7 @@ public class HMaster extends HRegionServer implements MasterServices { } synchronized (this.balancer) { - // Only allow one balance run at at time. + // Only allow one balance run at at time. if (this.assignmentManager.hasRegionsInTransition()) { List regionsInTransition = assignmentManager.getRegionsInTransition(); // if hbase:meta region is in transition, result of assignment cannot be recorded @@ -1890,15 +1891,15 @@ public class HMaster extends HRegionServer implements MasterServices { } if (!request.isIgnoreRegionsInTransition() || metaInTransition) { - LOG.info("Not running balancer (ignoreRIT=false" + ", metaRIT=" + metaInTransition + - ") because " + regionsInTransition.size() + " region(s) in transition: " + toPrint - + (truncated? "(truncated list)": "")); + LOG.info("Not running balancer (ignoreRIT=false" + ", metaRIT=" + metaInTransition + + ") because " + regionsInTransition.size() + " region(s) in transition: " + toPrint + + (truncated ? "(truncated list)" : "")); return responseBuilder.build(); } } if (this.serverManager.areDeadServersInProgress()) { - LOG.info("Not running balancer because processing dead regionserver(s): " + - this.serverManager.getDeadServers()); + LOG.info("Not running balancer because processing dead regionserver(s): " + + this.serverManager.getDeadServers()); return responseBuilder.build(); } @@ -1915,13 +1916,13 @@ public class HMaster extends HRegionServer implements MasterServices { } Map>> assignments = - this.assignmentManager.getRegionStates() - .getAssignmentsForBalancer(tableStateManager, this.serverManager.getOnlineServersList()); + this.assignmentManager.getRegionStates().getAssignmentsForBalancer(tableStateManager, + this.serverManager.getOnlineServersList()); for (Map> serverMap : assignments.values()) { serverMap.keySet().removeAll(this.serverManager.getDrainingServersList()); } - //Give the balancer the current cluster state. + // Give the balancer the current cluster state. this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor()); List plans = this.balancer.balanceCluster(assignments); @@ -1935,9 +1936,8 @@ public class HMaster extends HRegionServer implements MasterServices { // For dry run we don't actually want to execute the moves, but we do want // to execute the coprocessor below - List sucRPs = request.isDryRun() - ? Collections.emptyList() - : executeRegionPlansWithThrottling(plans); + List sucRPs = + request.isDryRun() ? Collections.emptyList() : executeRegionPlansWithThrottling(plans); if (this.cpHost != null) { try { @@ -1966,24 +1966,24 @@ public class HMaster extends HRegionServer implements MasterServices { int maxRegionsInTransition = getMaxRegionsInTransition(); long balanceStartTime = EnvironmentEdgeManager.currentTime(); long cutoffTime = balanceStartTime + this.maxBalancingTime; - int rpCount = 0; // number of RegionPlans balanced so far + int rpCount = 0; // number of RegionPlans balanced so far if (plans != null && !plans.isEmpty()) { int balanceInterval = this.maxBalancingTime / plans.size(); - LOG.info("Balancer plans size is " + plans.size() + ", the balance interval is " - + balanceInterval + " ms, and the max number regions in transition is " - + maxRegionsInTransition); + LOG.info( + "Balancer plans size is " + plans.size() + ", the balance interval is " + balanceInterval + + " ms, and the max number regions in transition is " + maxRegionsInTransition); - for (RegionPlan plan: plans) { + for (RegionPlan plan : plans) { LOG.info("balance " + plan); - //TODO: bulk assign + // TODO: bulk assign try { this.assignmentManager.balance(plan); } catch (HBaseIOException hioe) { - //should ignore failed plans here, avoiding the whole balance plans be aborted - //later calls of balance() can fetch up the failed and skipped plans + // should ignore failed plans here, avoiding the whole balance plans be aborted + // later calls of balance() can fetch up the failed and skipped plans LOG.warn("Failed balance plan {}, skipping...", plan, hioe); } - //rpCount records balance plans processed, does not care if a plan succeeds + // rpCount records balance plans processed, does not care if a plan succeeds rpCount++; successRegionPlans.add(plan); @@ -1993,12 +1993,14 @@ public class HMaster extends HRegionServer implements MasterServices { } // if performing next balance exceeds cutoff time, exit the loop - if (this.maxBalancingTime > 0 && rpCount < plans.size() - && EnvironmentEdgeManager.currentTime() > cutoffTime) { + if ( + this.maxBalancingTime > 0 && rpCount < plans.size() + && EnvironmentEdgeManager.currentTime() > cutoffTime + ) { // TODO: After balance, there should not be a cutoff time (keeping it as // a security net for now) - LOG.debug("No more balancing till next balance run; maxBalanceTime=" - + this.maxBalancingTime); + LOG.debug( + "No more balancing till next balance run; maxBalanceTime=" + this.maxBalancingTime); break; } } @@ -2014,10 +2016,8 @@ public class HMaster extends HRegionServer implements MasterServices { } @Override - public boolean normalizeRegions( - final NormalizeTableFilterParams ntfp, - final boolean isHighPriority - ) throws IOException { + public boolean normalizeRegions(final NormalizeTableFilterParams ntfp, + final boolean isHighPriority) throws IOException { if (regionNormalizerManager == null || !regionNormalizerManager.isNormalizerOn()) { LOG.debug("Region normalization is disabled, don't run region normalizer."); return false; @@ -2030,10 +2030,8 @@ public class HMaster extends HRegionServer implements MasterServices { } final Set matchingTables = getTableDescriptors(new LinkedList<>(), - ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false) - .stream() - .map(TableDescriptor::getTableName) - .collect(Collectors.toSet()); + ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false).stream() + .map(TableDescriptor::getTableName).collect(Collectors.toSet()); final Set allEnabledTables = tableStateManager.getTablesInStates(TableState.State.ENABLED); final List targetTables = @@ -2047,14 +2045,13 @@ public class HMaster extends HRegionServer implements MasterServices { */ @Override public String getClientIdAuditPrefix() { - return "Client=" + RpcServer.getRequestUserName().orElse(null) - + "/" + RpcServer.getRemoteAddress().orElse(null); + return "Client=" + RpcServer.getRequestUserName().orElse(null) + "/" + + RpcServer.getRemoteAddress().orElse(null); } /** - * Switch for the background CatalogJanitor thread. - * Used for testing. The thread will continue to run. It will just be a noop - * if disabled. + * Switch for the background CatalogJanitor thread. Used for testing. The thread will continue to + * run. It will just be a noop if disabled. * @param b If false, the catalog janitor won't do anything. */ public void setCatalogJanitorEnabled(final boolean b) { @@ -2062,18 +2059,15 @@ public class HMaster extends HRegionServer implements MasterServices { } @Override - public long mergeRegions( - final RegionInfo[] regionsToMerge, - final boolean forcible, - final long ng, - final long nonce) throws IOException { + public long mergeRegions(final RegionInfo[] regionsToMerge, final boolean forcible, final long ng, + final long nonce) throws IOException { checkInitialized(); if (!isSplitOrMergeEnabled(MasterSwitchType.MERGE)) { String regionsStr = Arrays.deepToString(regionsToMerge); LOG.warn("Merge switch is off! skip merge of " + regionsStr); - throw new DoNotRetryIOException("Merge of " + regionsStr + - " failed because merge switch is off"); + throw new DoNotRetryIOException( + "Merge of " + regionsStr + " failed because merge switch is off"); } final String mergeRegionsStr = Arrays.stream(regionsToMerge).map(RegionInfo::getEncodedName) @@ -2085,7 +2079,7 @@ public class HMaster extends HRegionServer implements MasterServices { String aid = getClientIdAuditPrefix(); LOG.info("{} merge regions {}", aid, mergeRegionsStr); submitProcedure(new MergeTableRegionsProcedure(procedureExecutor.getEnvironment(), - regionsToMerge, forcible)); + regionsToMerge, forcible)); getMaster().getMasterCoprocessorHost().postMergeRegions(regionsToMerge); } @@ -2097,33 +2091,32 @@ public class HMaster extends HRegionServer implements MasterServices { } @Override - public long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, - final long nonceGroup, final long nonce) - throws IOException { + public long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, final long nonceGroup, + final long nonce) throws IOException { checkInitialized(); if (!isSplitOrMergeEnabled(MasterSwitchType.SPLIT)) { LOG.warn("Split switch is off! skip split of " + regionInfo); - throw new DoNotRetryIOException("Split region " + regionInfo.getRegionNameAsString() + - " failed due to split switch off"); + throw new DoNotRetryIOException( + "Split region " + regionInfo.getRegionNameAsString() + " failed due to split switch off"); } - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preSplitRegion(regionInfo.getTable(), splitRow); - LOG.info(getClientIdAuditPrefix() + " split " + regionInfo.getRegionNameAsString()); + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preSplitRegion(regionInfo.getTable(), splitRow); + LOG.info(getClientIdAuditPrefix() + " split " + regionInfo.getRegionNameAsString()); - // Execute the operation asynchronously - submitProcedure(getAssignmentManager().createSplitProcedure(regionInfo, splitRow)); - } + // Execute the operation asynchronously + submitProcedure(getAssignmentManager().createSplitProcedure(regionInfo, splitRow)); + } - @Override - protected String getDescription() { - return "SplitTableProcedure"; - } - }); + @Override + protected String getDescription() { + return "SplitTableProcedure"; + } + }); } // Public so can be accessed by tests. Blocks until move is done. @@ -2131,8 +2124,8 @@ public class HMaster extends HRegionServer implements MasterServices { // a success/failure result. @InterfaceAudience.Private public void move(final byte[] encodedRegionName, byte[] destServerName) throws HBaseIOException { - RegionState regionState = assignmentManager.getRegionStates(). - getRegionState(Bytes.toString(encodedRegionName)); + RegionState regionState = + assignmentManager.getRegionStates().getRegionState(Bytes.toString(encodedRegionName)); RegionInfo hri; if (regionState != null) { @@ -2142,17 +2135,18 @@ public class HMaster extends HRegionServer implements MasterServices { } ServerName dest; - List exclude = hri.getTable().isSystemTable() ? assignmentManager.getExcludedServersForSystemTable() - : new ArrayList<>(1); - if (destServerName != null && exclude.contains(ServerName.valueOf(Bytes.toString(destServerName)))) { - LOG.info( - Bytes.toString(encodedRegionName) + " can not move to " + Bytes.toString(destServerName) - + " because the server is in exclude list"); + List exclude = hri.getTable().isSystemTable() + ? assignmentManager.getExcludedServersForSystemTable() + : new ArrayList<>(1); + if ( + destServerName != null && exclude.contains(ServerName.valueOf(Bytes.toString(destServerName))) + ) { + LOG.info(Bytes.toString(encodedRegionName) + " can not move to " + + Bytes.toString(destServerName) + " because the server is in exclude list"); destServerName = null; } if (destServerName == null || destServerName.length == 0) { - LOG.info("Passed destination servername is null/empty so " + - "choosing a server at random"); + LOG.info("Passed destination servername is null/empty so " + "choosing a server at random"); exclude.add(regionState.getServerName()); final List destServers = this.serverManager.createDestinationServersList(exclude); dest = balancer.randomAssignment(hri, destServers); @@ -2168,8 +2162,10 @@ public class HMaster extends HRegionServer implements MasterServices { return; } // TODO: What is this? I don't get it. - if (dest.equals(serverName) && balancer instanceof BaseLoadBalancer - && !((BaseLoadBalancer)balancer).shouldBeOnMaster(hri)) { + if ( + dest.equals(serverName) && balancer instanceof BaseLoadBalancer + && !((BaseLoadBalancer) balancer).shouldBeOnMaster(hri) + ) { // To avoid unnecessary region moving later by balancer. Don't put user // regions on master. LOG.debug("Skipping move of region " + hri.getRegionNameAsString() @@ -2187,7 +2183,7 @@ public class HMaster extends HRegionServer implements MasterServices { // Now we can do the move RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), dest); - assert rp.getDestination() != null: rp.toString() + " " + dest; + assert rp.getDestination() != null : rp.toString() + " " + dest; try { checkInitialized(); @@ -2196,13 +2192,13 @@ public class HMaster extends HRegionServer implements MasterServices { } TransitRegionStateProcedure proc = - this.assignmentManager.createMoveRegionProcedure(rp.getRegionInfo(), rp.getDestination()); + this.assignmentManager.createMoveRegionProcedure(rp.getRegionInfo(), rp.getDestination()); if (conf.getBoolean(WARMUP_BEFORE_MOVE, DEFAULT_WARMUP_BEFORE_MOVE)) { // Warmup the region on the destination before initiating the move. this call // is synchronous and takes some time. doing it before the source region gets // closed - LOG.info(getClientIdAuditPrefix() + " move " + rp + ", warming up region on " + - rp.getDestination()); + LOG.info(getClientIdAuditPrefix() + " move " + rp + ", warming up region on " + + rp.getDestination()); serverManager.sendRegionWarmup(rp.getDestination(), hri); } LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer"); @@ -2219,7 +2215,7 @@ public class HMaster extends HRegionServer implements MasterServices { } } catch (IOException ioe) { if (ioe instanceof HBaseIOException) { - throw (HBaseIOException)ioe; + throw (HBaseIOException) ioe; } throw new HBaseIOException(ioe); } @@ -2227,7 +2223,7 @@ public class HMaster extends HRegionServer implements MasterServices { @Override public long createTable(final TableDescriptor tableDescriptor, final byte[][] splitKeys, - final long nonceGroup, final long nonce) throws IOException { + final long nonceGroup, final long nonce) throws IOException { checkInitialized(); TableDescriptor desc = getMasterCoprocessorHost().preCreateTableRegionsInfos(tableDescriptor); if (desc == null) { @@ -2283,7 +2279,7 @@ public class HMaster extends HRegionServer implements MasterServices { LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor); - // This special create table is called locally to master. Therefore, no RPC means no need + // This special create table is called locally to master. Therefore, no RPC means no need // to use nonce to detect duplicated RPC call. long procId = this.procedureExecutor.submitProcedure( new CreateTableProcedure(procedureExecutor.getEnvironment(), tableDescriptor, newRegions)); @@ -2292,18 +2288,15 @@ public class HMaster extends HRegionServer implements MasterServices { } private void startActiveMasterManager(int infoPort) throws KeeperException { - String backupZNode = ZNodePaths.joinZNode( - zooKeeper.getZNodePaths().backupMasterAddressesZNode, serverName.toString()); + String backupZNode = ZNodePaths.joinZNode(zooKeeper.getZNodePaths().backupMasterAddressesZNode, + serverName.toString()); /* - * Add a ZNode for ourselves in the backup master directory since we - * may not become the active master. If so, we want the actual active - * master to know we are backup masters, so that it won't assign - * regions to us if so configured. - * - * If we become the active master later, ActiveMasterManager will delete - * this node explicitly. If we crash before then, ZooKeeper will delete - * this node for us since it is ephemeral. - */ + * Add a ZNode for ourselves in the backup master directory since we may not become the active + * master. If so, we want the actual active master to know we are backup masters, so that it + * won't assign regions to us if so configured. If we become the active master later, + * ActiveMasterManager will delete this node explicitly. If we crash before then, ZooKeeper will + * delete this node for us since it is ephemeral. + */ LOG.info("Adding backup master ZNode " + backupZNode); if (!MasterAddressTracker.setMasterAddress(zooKeeper, backupZNode, serverName, infoPort)) { LOG.warn("Failed create of " + backupZNode + " by " + serverName); @@ -2330,12 +2323,14 @@ public class HMaster extends HRegionServer implements MasterServices { status.setStatus("Failed to become active: " + t.getMessage()); LOG.error(HBaseMarkers.FATAL, "Failed to become active master", t); // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility - if (t instanceof NoClassDefFoundError && t.getMessage(). - contains("org/apache/hadoop/hdfs/protocol/HdfsConstants$SafeModeAction")) { + if ( + t instanceof NoClassDefFoundError + && t.getMessage().contains("org/apache/hadoop/hdfs/protocol/HdfsConstants$SafeModeAction") + ) { // improved error message for this special case - abort("HBase is having a problem with its Hadoop jars. You may need to recompile " + - "HBase against Hadoop version " + org.apache.hadoop.util.VersionInfo.getVersion() + - " or change your hadoop jars to start properly", t); + abort("HBase is having a problem with its Hadoop jars. You may need to recompile " + + "HBase against Hadoop version " + org.apache.hadoop.util.VersionInfo.getVersion() + + " or change your hadoop jars to start properly", t); } else { abort("Unhandled exception. Starting shutdown.", t); } @@ -2349,72 +2344,67 @@ public class HMaster extends HRegionServer implements MasterServices { } @Override - public long deleteTable( - final TableName tableName, - final long nonceGroup, - final long nonce) throws IOException { + public long deleteTable(final TableName tableName, final long nonceGroup, final long nonce) + throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preDeleteTable(tableName); + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preDeleteTable(tableName); - LOG.info(getClientIdAuditPrefix() + " delete " + tableName); + LOG.info(getClientIdAuditPrefix() + " delete " + tableName); - // TODO: We can handle/merge duplicate request - // - // We need to wait for the procedure to potentially fail due to "prepare" sanity - // checks. This will block only the beginning of the procedure. See HBASE-19953. - ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); - submitProcedure(new DeleteTableProcedure(procedureExecutor.getEnvironment(), - tableName, latch)); - latch.await(); + // TODO: We can handle/merge duplicate request + // + // We need to wait for the procedure to potentially fail due to "prepare" sanity + // checks. This will block only the beginning of the procedure. See HBASE-19953. + ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); + submitProcedure( + new DeleteTableProcedure(procedureExecutor.getEnvironment(), tableName, latch)); + latch.await(); - getMaster().getMasterCoprocessorHost().postDeleteTable(tableName); - } + getMaster().getMasterCoprocessorHost().postDeleteTable(tableName); + } - @Override - protected String getDescription() { - return "DeleteTableProcedure"; - } - }); + @Override + protected String getDescription() { + return "DeleteTableProcedure"; + } + }); } @Override - public long truncateTable( - final TableName tableName, - final boolean preserveSplits, - final long nonceGroup, - final long nonce) throws IOException { + public long truncateTable(final TableName tableName, final boolean preserveSplits, + final long nonceGroup, final long nonce) throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preTruncateTable(tableName); + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preTruncateTable(tableName); - LOG.info(getClientIdAuditPrefix() + " truncate " + tableName); - ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0); - submitProcedure(new TruncateTableProcedure(procedureExecutor.getEnvironment(), - tableName, preserveSplits, latch)); - latch.await(); + LOG.info(getClientIdAuditPrefix() + " truncate " + tableName); + ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0); + submitProcedure(new TruncateTableProcedure(procedureExecutor.getEnvironment(), tableName, + preserveSplits, latch)); + latch.await(); - getMaster().getMasterCoprocessorHost().postTruncateTable(tableName); - } + getMaster().getMasterCoprocessorHost().postTruncateTable(tableName); + } - @Override - protected String getDescription() { - return "TruncateTableProcedure"; - } - }); + @Override + protected String getDescription() { + return "TruncateTableProcedure"; + } + }); } @Override public long addColumn(final TableName tableName, final ColumnFamilyDescriptor column, - final long nonceGroup, final long nonce) throws IOException { + final long nonceGroup, final long nonce) throws IOException { checkInitialized(); checkTableExists(tableName); @@ -2425,7 +2415,7 @@ public class HMaster extends HRegionServer implements MasterServices { TableDescriptor old = getTableDescriptors().get(tableName); if (old.hasColumnFamily(column.getName())) { throw new InvalidFamilyOperationException("Column family '" + column.getNameAsString() - + "' in table '" + tableName + "' already exists so cannot be added"); + + "' in table '" + tableName + "' already exists so cannot be added"); } return TableDescriptorBuilder.newBuilder(old).setColumnFamily(column).build(); @@ -2442,7 +2432,7 @@ public class HMaster extends HRegionServer implements MasterServices { @Override public long modifyColumn(final TableName tableName, final ColumnFamilyDescriptor descriptor, - final long nonceGroup, final long nonce) throws IOException { + final long nonceGroup, final long nonce) throws IOException { checkInitialized(); checkTableExists(tableName); return modifyTable(tableName, new TableDescriptorGetter() { @@ -2452,7 +2442,7 @@ public class HMaster extends HRegionServer implements MasterServices { TableDescriptor old = getTableDescriptors().get(tableName); if (!old.hasColumnFamily(descriptor.getName())) { throw new InvalidFamilyOperationException("Family '" + descriptor.getNameAsString() - + "' does not exist, so it cannot be modified"); + + "' does not exist, so it cannot be modified"); } return TableDescriptorBuilder.newBuilder(old).modifyColumnFamily(descriptor).build(); @@ -2460,7 +2450,6 @@ public class HMaster extends HRegionServer implements MasterServices { }, nonceGroup, nonce, true); } - @Override public long modifyColumnStoreFileTracker(TableName tableName, byte[] family, String dstSFT, long nonceGroup, long nonce) throws IOException { @@ -2489,7 +2478,7 @@ public class HMaster extends HRegionServer implements MasterServices { @Override public long deleteColumn(final TableName tableName, final byte[] columnName, - final long nonceGroup, final long nonce) throws IOException { + final long nonceGroup, final long nonce) throws IOException { checkInitialized(); checkTableExists(tableName); @@ -2500,12 +2489,12 @@ public class HMaster extends HRegionServer implements MasterServices { TableDescriptor old = getTableDescriptors().get(tableName); if (!old.hasColumnFamily(columnName)) { - throw new InvalidFamilyOperationException("Family '" + Bytes.toString(columnName) - + "' does not exist, so it cannot be deleted"); + throw new InvalidFamilyOperationException( + "Family '" + Bytes.toString(columnName) + "' does not exist, so it cannot be deleted"); } if (old.getColumnFamilyCount() == 1) { throw new InvalidFamilyOperationException("Family '" + Bytes.toString(columnName) - + "' is the only column family in the table, so it cannot be deleted"); + + "' is the only column family in the table, so it cannot be deleted"); } return TableDescriptorBuilder.newBuilder(old).removeColumnFamily(columnName).build(); } @@ -2514,134 +2503,137 @@ public class HMaster extends HRegionServer implements MasterServices { @Override public long enableTable(final TableName tableName, final long nonceGroup, final long nonce) - throws IOException { + throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preEnableTable(tableName); + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preEnableTable(tableName); - // Normally, it would make sense for this authorization check to exist inside - // AccessController, but because the authorization check is done based on internal state - // (rather than explicit permissions) we'll do the check here instead of in the - // coprocessor. - MasterQuotaManager quotaManager = getMasterQuotaManager(); - if (quotaManager != null) { - if (quotaManager.isQuotaInitialized()) { + // Normally, it would make sense for this authorization check to exist inside + // AccessController, but because the authorization check is done based on internal state + // (rather than explicit permissions) we'll do the check here instead of in the + // coprocessor. + MasterQuotaManager quotaManager = getMasterQuotaManager(); + if (quotaManager != null) { + if (quotaManager.isQuotaInitialized()) { SpaceQuotaSnapshot currSnapshotOfTable = - QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName); + QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName); if (currSnapshotOfTable != null) { SpaceQuotaStatus quotaStatus = currSnapshotOfTable.getQuotaStatus(); - if (quotaStatus.isInViolation() - && SpaceViolationPolicy.DISABLE == quotaStatus.getPolicy().orElse(null)) { - throw new AccessDeniedException("Enabling the table '" + tableName + if ( + quotaStatus.isInViolation() + && SpaceViolationPolicy.DISABLE == quotaStatus.getPolicy().orElse(null) + ) { + throw new AccessDeniedException("Enabling the table '" + tableName + "' is disallowed due to a violated space quota."); + } } + } else if (LOG.isTraceEnabled()) { + LOG + .trace("Unable to check for space quotas as the MasterQuotaManager is not enabled"); } - } else if (LOG.isTraceEnabled()) { - LOG.trace("Unable to check for space quotas as the MasterQuotaManager is not enabled"); } + + LOG.info(getClientIdAuditPrefix() + " enable " + tableName); + + // Execute the operation asynchronously - client will check the progress of the operation + // In case the request is from a <1.1 client before returning, + // we want to make sure that the table is prepared to be + // enabled (the table is locked and the table state is set). + // Note: if the procedure throws exception, we will catch it and rethrow. + final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch(); + submitProcedure( + new EnableTableProcedure(procedureExecutor.getEnvironment(), tableName, prepareLatch)); + prepareLatch.await(); + + getMaster().getMasterCoprocessorHost().postEnableTable(tableName); } - LOG.info(getClientIdAuditPrefix() + " enable " + tableName); - - // Execute the operation asynchronously - client will check the progress of the operation - // In case the request is from a <1.1 client before returning, - // we want to make sure that the table is prepared to be - // enabled (the table is locked and the table state is set). - // Note: if the procedure throws exception, we will catch it and rethrow. - final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch(); - submitProcedure(new EnableTableProcedure(procedureExecutor.getEnvironment(), - tableName, prepareLatch)); - prepareLatch.await(); - - getMaster().getMasterCoprocessorHost().postEnableTable(tableName); - } - - @Override - protected String getDescription() { - return "EnableTableProcedure"; - } - }); + @Override + protected String getDescription() { + return "EnableTableProcedure"; + } + }); } @Override public long disableTable(final TableName tableName, final long nonceGroup, final long nonce) - throws IOException { + throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preDisableTable(tableName); + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preDisableTable(tableName); - LOG.info(getClientIdAuditPrefix() + " disable " + tableName); + LOG.info(getClientIdAuditPrefix() + " disable " + tableName); - // Execute the operation asynchronously - client will check the progress of the operation - // In case the request is from a <1.1 client before returning, - // we want to make sure that the table is prepared to be - // enabled (the table is locked and the table state is set). - // Note: if the procedure throws exception, we will catch it and rethrow. - // - // We need to wait for the procedure to potentially fail due to "prepare" sanity - // checks. This will block only the beginning of the procedure. See HBASE-19953. - final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createBlockingLatch(); - submitProcedure(new DisableTableProcedure(procedureExecutor.getEnvironment(), - tableName, false, prepareLatch)); - prepareLatch.await(); + // Execute the operation asynchronously - client will check the progress of the operation + // In case the request is from a <1.1 client before returning, + // we want to make sure that the table is prepared to be + // enabled (the table is locked and the table state is set). + // Note: if the procedure throws exception, we will catch it and rethrow. + // + // We need to wait for the procedure to potentially fail due to "prepare" sanity + // checks. This will block only the beginning of the procedure. See HBASE-19953. + final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createBlockingLatch(); + submitProcedure(new DisableTableProcedure(procedureExecutor.getEnvironment(), tableName, + false, prepareLatch)); + prepareLatch.await(); - getMaster().getMasterCoprocessorHost().postDisableTable(tableName); - } + getMaster().getMasterCoprocessorHost().postDisableTable(tableName); + } - @Override - protected String getDescription() { - return "DisableTableProcedure"; - } - }); + @Override + protected String getDescription() { + return "DisableTableProcedure"; + } + }); } private long modifyTable(final TableName tableName, - final TableDescriptorGetter newDescriptorGetter, final long nonceGroup, final long nonce, - final boolean shouldCheckDescriptor) throws IOException { + final TableDescriptorGetter newDescriptorGetter, final long nonceGroup, final long nonce, + final boolean shouldCheckDescriptor) throws IOException { return MasterProcedureUtil - .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - TableDescriptor oldDescriptor = getMaster().getTableDescriptors().get(tableName); - TableDescriptor newDescriptor = getMaster().getMasterCoprocessorHost() - .preModifyTable(tableName, oldDescriptor, newDescriptorGetter.get()); - TableDescriptorChecker.sanityCheck(conf, newDescriptor); - LOG.info("{} modify table {} from {} to {}", getClientIdAuditPrefix(), tableName, - oldDescriptor, newDescriptor); + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + TableDescriptor oldDescriptor = getMaster().getTableDescriptors().get(tableName); + TableDescriptor newDescriptor = getMaster().getMasterCoprocessorHost() + .preModifyTable(tableName, oldDescriptor, newDescriptorGetter.get()); + TableDescriptorChecker.sanityCheck(conf, newDescriptor); + LOG.info("{} modify table {} from {} to {}", getClientIdAuditPrefix(), tableName, + oldDescriptor, newDescriptor); - // Execute the operation synchronously - wait for the operation completes before - // continuing. - // - // We need to wait for the procedure to potentially fail due to "prepare" sanity - // checks. This will block only the beginning of the procedure. See HBASE-19953. - ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); - submitProcedure(new ModifyTableProcedure(procedureExecutor.getEnvironment(), - newDescriptor, latch, oldDescriptor, shouldCheckDescriptor)); - latch.await(); + // Execute the operation synchronously - wait for the operation completes before + // continuing. + // + // We need to wait for the procedure to potentially fail due to "prepare" sanity + // checks. This will block only the beginning of the procedure. See HBASE-19953. + ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); + submitProcedure(new ModifyTableProcedure(procedureExecutor.getEnvironment(), + newDescriptor, latch, oldDescriptor, shouldCheckDescriptor)); + latch.await(); - getMaster().getMasterCoprocessorHost().postModifyTable(tableName, oldDescriptor, - newDescriptor); - } + getMaster().getMasterCoprocessorHost().postModifyTable(tableName, oldDescriptor, + newDescriptor); + } - @Override - protected String getDescription() { - return "ModifyTableProcedure"; - } - }); + @Override + protected String getDescription() { + return "ModifyTableProcedure"; + } + }); } @Override public long modifyTable(final TableName tableName, final TableDescriptor newDescriptor, - final long nonceGroup, final long nonce) throws IOException { + final long nonceGroup, final long nonce) throws IOException { checkInitialized(); return modifyTable(tableName, new TableDescriptorGetter() { @Override @@ -2686,13 +2678,12 @@ public class HMaster extends HRegionServer implements MasterServices { final TableName dstTable = TableName.valueOf(snapshotDesc.getTable()); getClusterSchema().getNamespace(dstTable.getNamespaceAsString()); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { @Override protected void run() throws IOException { - setProcId( - getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), restoreAcl, - customSFT)); + setProcId(getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), + restoreAcl, customSFT)); } @Override @@ -2711,7 +2702,7 @@ public class HMaster extends HRegionServer implements MasterServices { @Override public void checkTableModifiable(final TableName tableName) - throws IOException, TableNotFoundException, TableNotDisabledException { + throws IOException, TableNotFoundException, TableNotDisabledException { if (isCatalogTable(tableName)) { throw new IOException("Can't modify catalog tables"); } @@ -2727,7 +2718,7 @@ public class HMaster extends HRegionServer implements MasterServices { } public ClusterMetrics getClusterMetricsWithoutCoprocessor(EnumSet

      =-.}) - * If reference, then the regex has more than just one group. - * Group 1, hfile/hfilelink pattern, is this file's id. - * Group 2 '(.+)' is the reference's parent region name. + * Regex that will work for straight reference names ({@code .}) and + * hfilelink reference names ({@code + * +
      + * =-.}) If reference, then the regex has more than just one + * group. Group 1, hfile/hfilelink pattern, is this file's id. Group 2 '(.+)' is the reference's + * parent region name. */ private static final Pattern REF_NAME_PATTERN = - Pattern.compile(String.format("^(%s|%s)\\.(.+)$", - HFILE_NAME_REGEX, HFileLink.LINK_NAME_REGEX)); + Pattern.compile(String.format("^(%s|%s)\\.(.+)$", HFILE_NAME_REGEX, HFileLink.LINK_NAME_REGEX)); public static final String STORE_FILE_READER_NO_READAHEAD = "hbase.store.reader.no-readahead"; public static final boolean DEFAULT_STORE_FILE_READER_NO_READAHEAD = false; @@ -124,18 +120,18 @@ public class StoreFileInfo implements Configurable { /** * Create a Store File Info - * @param conf the {@link Configuration} to use - * @param fs The current file system to use. - * @param initialPath The {@link Path} of the file + * @param conf the {@link Configuration} to use + * @param fs The current file system to use. + * @param initialPath The {@link Path} of the file * @param primaryReplica true if this is a store file for primary replica, otherwise false. */ public StoreFileInfo(final Configuration conf, final FileSystem fs, final Path initialPath, - final boolean primaryReplica) throws IOException { + final boolean primaryReplica) throws IOException { this(conf, fs, null, initialPath, primaryReplica); } private StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus, - final Path initialPath, final boolean primaryReplica) throws IOException { + final Path initialPath, final boolean primaryReplica) throws IOException { assert fs != null; assert initialPath != null; assert conf != null; @@ -144,8 +140,8 @@ public class StoreFileInfo implements Configurable { this.conf = conf; this.initialPath = initialPath; this.primaryReplica = primaryReplica; - this.noReadahead = this.conf.getBoolean(STORE_FILE_READER_NO_READAHEAD, - DEFAULT_STORE_FILE_READER_NO_READAHEAD); + this.noReadahead = + this.conf.getBoolean(STORE_FILE_READER_NO_READAHEAD, DEFAULT_STORE_FILE_READER_NO_READAHEAD); Path p = initialPath; if (HFileLink.isHFileLink(p)) { // HFileLink @@ -182,57 +178,57 @@ public class StoreFileInfo implements Configurable { /** * Create a Store File Info - * @param conf the {@link Configuration} to use - * @param fs The current file system to use. + * @param conf the {@link Configuration} to use + * @param fs The current file system to use. * @param fileStatus The {@link FileStatus} of the file */ public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus) - throws IOException { + throws IOException { this(conf, fs, fileStatus, fileStatus.getPath(), true); } /** * Create a Store File Info from an HFileLink - * @param conf The {@link Configuration} to use - * @param fs The current file system to use + * @param conf The {@link Configuration} to use + * @param fs The current file system to use * @param fileStatus The {@link FileStatus} of the file */ public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus, - final HFileLink link) { + final HFileLink link) { this(conf, fs, fileStatus, null, link); } /** * Create a Store File Info from an HFileLink - * @param conf The {@link Configuration} to use - * @param fs The current file system to use + * @param conf The {@link Configuration} to use + * @param fs The current file system to use * @param fileStatus The {@link FileStatus} of the file - * @param reference The reference instance + * @param reference The reference instance */ public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus, - final Reference reference) { + final Reference reference) { this(conf, fs, fileStatus, reference, null); } /** * Create a Store File Info from an HFileLink and a Reference - * @param conf The {@link Configuration} to use - * @param fs The current file system to use + * @param conf The {@link Configuration} to use + * @param fs The current file system to use * @param fileStatus The {@link FileStatus} of the file - * @param reference The reference instance - * @param link The link instance + * @param reference The reference instance + * @param link The link instance */ public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus, - final Reference reference, final HFileLink link) { + final Reference reference, final HFileLink link) { this.fs = fs; this.conf = conf; this.primaryReplica = false; this.initialPath = (fileStatus == null) ? null : fileStatus.getPath(); - this.createdTimestamp = (fileStatus == null) ? 0 :fileStatus.getModificationTime(); + this.createdTimestamp = (fileStatus == null) ? 0 : fileStatus.getModificationTime(); this.reference = reference; this.link = link; - this.noReadahead = this.conf.getBoolean(STORE_FILE_READER_NO_READAHEAD, - DEFAULT_STORE_FILE_READER_NO_READAHEAD); + this.noReadahead = + this.conf.getBoolean(STORE_FILE_READER_NO_READAHEAD, DEFAULT_STORE_FILE_READER_NO_READAHEAD); } @Override @@ -246,24 +242,22 @@ public class StoreFileInfo implements Configurable { } /** - * Size of the Hfile - * @return size + * Size of the Hfile n */ public long getSize() { return size; } /** - * Sets the region coprocessor env. - * @param coprocessorHost + * Sets the region coprocessor env. n */ public void setRegionCoprocessorHost(RegionCoprocessorHost coprocessorHost) { this.coprocessorHost = coprocessorHost; } /* - * @return the Reference object associated to this StoreFileInfo. - * null if the StoreFile is not a reference. + * @return the Reference object associated to this StoreFileInfo. null if the StoreFile is not a + * reference. */ public Reference getReference() { return this.reference; @@ -289,8 +283,7 @@ public class StoreFileInfo implements Configurable { return this.hdfsBlocksDistribution; } - StoreFileReader createReader(ReaderContext context, CacheConfig cacheConf) - throws IOException { + StoreFileReader createReader(ReaderContext context, CacheConfig cacheConf) throws IOException { StoreFileReader reader = null; if (this.reference != null) { reader = new HalfStoreFileReader(context, hfileInfo, cacheConf, reference, refCount, conf); @@ -301,7 +294,7 @@ public class StoreFileInfo implements Configurable { } ReaderContext createReaderContext(boolean doDropBehind, long readahead, ReaderType type) - throws IOException { + throws IOException { FSDataInputStreamWrapper in; FileStatus status; if (this.link != null) { @@ -327,12 +320,9 @@ public class StoreFileInfo implements Configurable { status = fs.getFileStatus(initialPath); } long length = status.getLen(); - ReaderContextBuilder contextBuilder = new ReaderContextBuilder() - .withInputStreamWrapper(in) - .withFileSize(length) - .withPrimaryReplicaReader(this.primaryReplica) - .withReaderType(type) - .withFileSystem(fs); + ReaderContextBuilder contextBuilder = + new ReaderContextBuilder().withInputStreamWrapper(in).withFileSize(length) + .withPrimaryReplicaReader(this.primaryReplica).withReaderType(type).withFileSystem(fs); if (this.reference != null) { contextBuilder.withFilePath(this.getPath()); } else { @@ -345,7 +335,7 @@ public class StoreFileInfo implements Configurable { * Compute the HDFS Block Distribution for this StoreFile */ public HDFSBlocksDistribution computeHDFSBlocksDistribution(final FileSystem fs) - throws IOException { + throws IOException { // guard against the case where we get the FileStatus from link, but by the time we // call compute the file is moved again if (this.link != null) { @@ -365,7 +355,7 @@ public class StoreFileInfo implements Configurable { } private HDFSBlocksDistribution computeHDFSBlocksDistributionInternal(final FileSystem fs) - throws IOException { + throws IOException { FileStatus status = getReferencedFileStatus(fs); if (this.reference != null) { return computeRefFileHDFSBlockDistribution(fs, reference, status); @@ -436,8 +426,8 @@ public class StoreFileInfo implements Configurable { @Override public String toString() { - return this.getPath() + - (isReference() ? "->" + getReferredToFile(this.getPath()) + "-" + reference : ""); + return this.getPath() + + (isReference() ? "->" + getReferredToFile(this.getPath()) + "-" + reference : ""); } /** @@ -495,8 +485,8 @@ public class StoreFileInfo implements Configurable { } /* - * Return path to the file referred to by a Reference. Presumes a directory - * hierarchy of ${hbase.rootdir}/data/${namespace}/tablename/regionname/familyname. + * Return path to the file referred to by a Reference. Presumes a directory hierarchy of + * ${hbase.rootdir}/data/${namespace}/tablename/regionname/familyname. * @param p Path to a Reference file. * @return Calculated path to parent region file. * @throws IllegalArgumentException when path regex fails to match. @@ -505,8 +495,7 @@ public class StoreFileInfo implements Configurable { Matcher m = REF_NAME_PATTERN.matcher(p.getName()); if (m == null || !m.matches()) { LOG.warn("Failed match of store file name {}", p.toString()); - throw new IllegalArgumentException("Failed match of store file name " + - p.toString()); + throw new IllegalArgumentException("Failed match of store file name " + p.toString()); } // Other region name is suffix on the passed Reference file name @@ -517,9 +506,9 @@ public class StoreFileInfo implements Configurable { LOG.trace("reference {} to region={} hfile={}", p, otherRegion, nameStrippedOfSuffix); // Build up new path with the referenced region in place of our current - // region in the reference path. Also strip regionname suffix from name. - return new Path(new Path(new Path(tableDir, otherRegion), - p.getParent().getName()), nameStrippedOfSuffix); + // region in the reference path. Also strip regionname suffix from name. + return new Path(new Path(new Path(tableDir, otherRegion), p.getParent().getName()), + nameStrippedOfSuffix); } /** @@ -528,8 +517,7 @@ public class StoreFileInfo implements Configurable { * @return true if the file could be a valid store file, false otherwise */ public static boolean validateStoreFileName(final String fileName) { - if (HFileLink.isHFileLink(fileName) || isReference(fileName)) - return(true); + if (HFileLink.isHFileLink(fileName) || isReference(fileName)) return (true); return !fileName.contains("-"); } @@ -538,12 +526,10 @@ public class StoreFileInfo implements Configurable { * @param fileStatus The {@link FileStatus} of the file * @return true if the file is valid */ - public static boolean isValid(final FileStatus fileStatus) - throws IOException { + public static boolean isValid(final FileStatus fileStatus) throws IOException { final Path p = fileStatus.getPath(); - if (fileStatus.isDirectory()) - return false; + if (fileStatus.isDirectory()) return false; // Check for empty hfile. Should never be the case but can happen // after data loss in hdfs for whatever reason (upgrade, etc.): HBASE-646 @@ -557,21 +543,19 @@ public class StoreFileInfo implements Configurable { } /** - * helper function to compute HDFS blocks distribution of a given reference - * file.For reference file, we don't compute the exact value. We use some - * estimate instead given it might be good enough. we assume bottom part - * takes the first half of reference file, top part takes the second half - * of the reference file. This is just estimate, given - * midkey ofregion != midkey of HFile, also the number and size of keys vary. - * If this estimate isn't good enough, we can improve it later. - * @param fs The FileSystem - * @param reference The reference - * @param status The reference FileStatus + * helper function to compute HDFS blocks distribution of a given reference file.For reference + * file, we don't compute the exact value. We use some estimate instead given it might be good + * enough. we assume bottom part takes the first half of reference file, top part takes the second + * half of the reference file. This is just estimate, given midkey ofregion != midkey of HFile, + * also the number and size of keys vary. If this estimate isn't good enough, we can improve it + * later. + * @param fs The FileSystem + * @param reference The reference + * @param status The reference FileStatus * @return HDFS blocks distribution */ - private static HDFSBlocksDistribution computeRefFileHDFSBlockDistribution( - final FileSystem fs, final Reference reference, final FileStatus status) - throws IOException { + private static HDFSBlocksDistribution computeRefFileHDFSBlockDistribution(final FileSystem fs, + final Reference reference, final FileStatus status) throws IOException { if (status == null) { return null; } @@ -580,11 +564,11 @@ public class StoreFileInfo implements Configurable { long length = 0; if (Reference.isTopFileRegion(reference.getFileRegion())) { - start = status.getLen()/2; - length = status.getLen() - status.getLen()/2; + start = status.getLen() / 2; + length = status.getLen() - status.getLen() / 2; } else { start = 0; - length = status.getLen()/2; + length = status.getLen() / 2; } return FSUtils.computeHDFSBlocksDistribution(fs, status, start, length); } @@ -596,16 +580,16 @@ public class StoreFileInfo implements Configurable { if (!(that instanceof StoreFileInfo)) return false; - StoreFileInfo o = (StoreFileInfo)that; + StoreFileInfo o = (StoreFileInfo) that; if (initialPath != null && o.initialPath == null) return false; if (initialPath == null && o.initialPath != null) return false; - if (initialPath != o.initialPath && initialPath != null - && !initialPath.equals(o.initialPath)) return false; + if (initialPath != o.initialPath && initialPath != null && !initialPath.equals(o.initialPath)) + return false; if (reference != null && o.reference == null) return false; if (reference == null && o.reference != null) return false; - if (reference != o.reference && reference != null - && !reference.equals(o.reference)) return false; + if (reference != o.reference && reference != null && !reference.equals(o.reference)) + return false; if (link != null && o.link == null) return false; if (link == null && o.link != null) return false; @@ -614,14 +598,13 @@ public class StoreFileInfo implements Configurable { return true; }; - @Override public int hashCode() { int hash = 17; hash = hash * 31 + ((reference == null) ? 0 : reference.hashCode()); - hash = hash * 31 + ((initialPath == null) ? 0 : initialPath.hashCode()); + hash = hash * 31 + ((initialPath == null) ? 0 : initialPath.hashCode()); hash = hash * 31 + ((link == null) ? 0 : link.hashCode()); - return hash; + return hash; } /** @@ -656,23 +639,21 @@ public class StoreFileInfo implements Configurable { } StoreFileReader preStoreFileReaderOpen(ReaderContext context, CacheConfig cacheConf) - throws IOException { + throws IOException { StoreFileReader reader = null; if (this.coprocessorHost != null) { reader = this.coprocessorHost.preStoreFileReaderOpen(fs, this.getPath(), - context.getInputStreamWrapper(), context.getFileSize(), - cacheConf, reference); + context.getInputStreamWrapper(), context.getFileSize(), cacheConf, reference); } return reader; } StoreFileReader postStoreFileReaderOpen(ReaderContext context, CacheConfig cacheConf, - StoreFileReader reader) throws IOException { + StoreFileReader reader) throws IOException { StoreFileReader res = reader; if (this.coprocessorHost != null) { res = this.coprocessorHost.postStoreFileReaderOpen(fs, this.getPath(), - context.getInputStreamWrapper(), context.getFileSize(), - cacheConf, reference, reader); + context.getInputStreamWrapper(), context.getFileSize(), cacheConf, reference, reader); } return res; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java index a40b209c6eb..f276f5d76a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +24,6 @@ import java.util.Comparator; import java.util.Iterator; import java.util.List; import java.util.Optional; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.yetus.audience.InterfaceAudience; @@ -51,7 +49,7 @@ public interface StoreFileManager { * @param storeFiles The files to load. */ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "", - allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") + allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") void loadFiles(List storeFiles); /** @@ -59,16 +57,16 @@ public interface StoreFileManager { * @param sfs New store files. */ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "", - allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") + allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") void insertNewFiles(Collection sfs); /** * Adds only the new compaction results into the structure. * @param compactedFiles The input files for the compaction. - * @param results The resulting files for the compaction. + * @param results The resulting files for the compaction. */ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "", - allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") + allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") void addCompactionResults(Collection compactedFiles, Collection results); /** @@ -76,7 +74,7 @@ public interface StoreFileManager { * @param compactedFiles the list of compacted files */ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "", - allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") + allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") void removeCompactedFiles(Collection compactedFiles); /** @@ -86,24 +84,23 @@ public interface StoreFileManager { ImmutableCollection clearFiles(); /** - * Clears all the compacted files and returns them. This method is expected to be - * accessed single threaded. + * Clears all the compacted files and returns them. This method is expected to be accessed single + * threaded. * @return The files compacted previously. */ Collection clearCompactedFiles(); /** - * Gets the snapshot of the store files currently in use. Can be used for things like metrics - * and checks; should not assume anything about relations between store files in the list. + * Gets the snapshot of the store files currently in use. Can be used for things like metrics and + * checks; should not assume anything about relations between store files in the list. * @return The list of StoreFiles. */ Collection getStorefiles(); /** - * List of compacted files inside this store that needs to be excluded in reads - * because further new reads will be using only the newly created files out of compaction. - * These compacted files will be deleted/cleared once all the existing readers on these - * compacted files are done. + * List of compacted files inside this store that needs to be excluded in reads because further + * new reads will be using only the newly created files out of compaction. These compacted files + * will be deleted/cleared once all the existing readers on these compacted files are done. * @return the list of compacted files */ Collection getCompactedfiles(); @@ -123,34 +120,33 @@ public interface StoreFileManager { /** * Gets the store files to scan for a Scan or Get request. * @param startRow Start row of the request. - * @param stopRow Stop row of the request. + * @param stopRow Stop row of the request. * @return The list of files that are to be read for this request. */ Collection getFilesForScan(byte[] startRow, boolean includeStartRow, byte[] stopRow, - boolean includeStopRow); + boolean includeStopRow); /** * Gets initial, full list of candidate store files to check for row-key-before. * @param targetKey The key that is the basis of the search. - * @return The files that may have the key less than or equal to targetKey, in reverse - * order of new-ness, and preference for target key. + * @return The files that may have the key less than or equal to targetKey, in reverse order of + * new-ness, and preference for target key. */ Iterator getCandidateFilesForRowKeyBefore(KeyValue targetKey); /** * Updates the candidate list for finding row key before. Based on the list of candidates - * remaining to check from getCandidateFilesForRowKeyBefore, targetKey and current candidate, - * may trim and reorder the list to remove the files where a better candidate cannot be found. - * @param candidateFiles The candidate files not yet checked for better candidates - return - * value from {@link #getCandidateFilesForRowKeyBefore(KeyValue)}, - * with some files already removed. - * @param targetKey The key to search for. - * @param candidate The current best candidate found. + * remaining to check from getCandidateFilesForRowKeyBefore, targetKey and current candidate, may + * trim and reorder the list to remove the files where a better candidate cannot be found. + * @param candidateFiles The candidate files not yet checked for better candidates - return value + * from {@link #getCandidateFilesForRowKeyBefore(KeyValue)}, with some files + * already removed. + * @param targetKey The key to search for. + * @param candidate The current best candidate found. * @return The list to replace candidateFiles. */ Iterator updateCandidateFilesForRowKeyBefore(Iterator candidateFiles, - KeyValue targetKey, Cell candidate); - + KeyValue targetKey, Cell candidate); /** * Gets the split point for the split of this set of store files (approx. middle). @@ -164,7 +160,7 @@ public interface StoreFileManager { int getStoreCompactionPriority(); /** - * @param maxTs Maximum expired timestamp. + * @param maxTs Maximum expired timestamp. * @param filesCompacting Files that are currently compacting. * @return The files which don't have any necessary data according to TTL and other criteria. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java index 32ee47e21f1..8454f4ee79e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,14 +28,13 @@ import java.util.Map; import java.util.Optional; import java.util.SortedSet; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.BlockType; @@ -84,7 +83,7 @@ public class StoreFileReader { private final ReaderContext context; private StoreFileReader(HFile.Reader reader, AtomicInteger refCount, ReaderContext context, - Configuration conf) { + Configuration conf) { this.reader = reader; bloomFilterType = BloomType.NONE; this.refCount = refCount; @@ -93,7 +92,7 @@ public class StoreFileReader { } public StoreFileReader(ReaderContext context, HFileInfo fileInfo, CacheConfig cacheConf, - AtomicInteger refCount, Configuration conf) throws IOException { + AtomicInteger refCount, Configuration conf) throws IOException { this(HFile.createReader(context, fileInfo, cacheConf, conf), refCount, context, conf); } @@ -130,24 +129,24 @@ public class StoreFileReader { /** * Get a scanner to scan over this StoreFile. - * @param cacheBlocks should this scanner cache blocks? - * @param pread use pread (for highly concurrent small readers) - * @param isCompaction is scanner being used for compaction? - * @param scannerOrder Order of this scanner relative to other scanners. See - * {@link KeyValueScanner#getScannerOrder()}. + * @param cacheBlocks should this scanner cache blocks? + * @param pread use pread (for highly concurrent small readers) + * @param isCompaction is scanner being used for compaction? + * @param scannerOrder Order of this scanner relative to other scanners. See + * {@link KeyValueScanner#getScannerOrder()}. * @param canOptimizeForNonNullColumn {@code true} if we can make sure there is no null column, - * otherwise {@code false}. This is a hint for optimization. + * otherwise {@code false}. This is a hint for optimization. * @return a scanner */ public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread, - boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) { - return new StoreFileScanner(this, getScanner(cacheBlocks, pread, isCompaction), - !isCompaction, reader.hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn); + boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) { + return new StoreFileScanner(this, getScanner(cacheBlocks, pread, isCompaction), !isCompaction, + reader.hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn); } /** - * Return the ref count associated with the reader whenever a scanner associated with the - * reader is opened. + * Return the ref count associated with the reader whenever a scanner associated with the reader + * is opened. */ int getRefCount() { return refCount.get(); @@ -178,11 +177,11 @@ public class StoreFileReader { /** * @deprecated since 2.0.0 and will be removed in 3.0.0. Do not write further code which depends - * on this call. Instead use getStoreFileScanner() which uses the StoreFileScanner - * class/interface which is the preferred way to scan a store with higher level concepts. - * + * on this call. Instead use getStoreFileScanner() which uses the StoreFileScanner + * class/interface which is the preferred way to scan a store with higher level + * concepts. * @param cacheBlocks should we cache the blocks? - * @param pread use pread (for concurrent small readers) + * @param pread use pread (for concurrent small readers) * @return the underlying HFileScanner * @see HBASE-15296 */ @@ -193,21 +192,15 @@ public class StoreFileReader { /** * @deprecated since 2.0.0 and will be removed in 3.0.0. Do not write further code which depends - * on this call. Instead use getStoreFileScanner() which uses the StoreFileScanner - * class/interface which is the preferred way to scan a store with higher level concepts. - * - * @param cacheBlocks - * should we cache the blocks? - * @param pread - * use pread (for concurrent small readers) - * @param isCompaction - * is scanner being used for compaction? + * on this call. Instead use getStoreFileScanner() which uses the StoreFileScanner + * class/interface which is the preferred way to scan a store with higher level + * concepts. n * should we cache the blocks? n * use pread (for concurrent small + * readers) n * is scanner being used for compaction? * @return the underlying HFileScanner * @see HBASE-15296 */ @Deprecated - public HFileScanner getScanner(boolean cacheBlocks, boolean pread, - boolean isCompaction) { + public HFileScanner getScanner(boolean cacheBlocks, boolean pread, boolean isCompaction) { return reader.getScanner(conf, cacheBlocks, pread, isCompaction); } @@ -216,33 +209,31 @@ public class StoreFileReader { } /** - * Check if this storeFile may contain keys within the TimeRange that - * have not expired (i.e. not older than oldestUnexpiredTS). - * @param tr the timeRange to restrict - * @param oldestUnexpiredTS the oldest timestamp that is not expired, as - * determined by the column family's TTL + * Check if this storeFile may contain keys within the TimeRange that have not expired (i.e. not + * older than oldestUnexpiredTS). + * @param tr the timeRange to restrict + * @param oldestUnexpiredTS the oldest timestamp that is not expired, as determined by the column + * family's TTL * @return false if queried keys definitely don't exist in this StoreFile */ boolean passesTimerangeFilter(TimeRange tr, long oldestUnexpiredTS) { - return this.timeRange == null? true: - this.timeRange.includesTimeRange(tr) && this.timeRange.getMax() >= oldestUnexpiredTS; + return this.timeRange == null + ? true + : this.timeRange.includesTimeRange(tr) && this.timeRange.getMax() >= oldestUnexpiredTS; } /** - * Checks whether the given scan passes the Bloom filter (if present). Only - * checks Bloom filters for single-row or single-row-column scans. Bloom - * filter checking for multi-gets is implemented as part of the store - * scanner system (see {@link StoreFileScanner#seek(Cell)} and uses - * the lower-level API {@link #passesGeneralRowBloomFilter(byte[], int, int)} - * and {@link #passesGeneralRowColBloomFilter(Cell)}. - * - * @param scan the scan specification. Used to determine the row, and to - * check whether this is a single-row ("get") scan. - * @param columns the set of columns. Only used for row-column Bloom - * filters. - * @return true if the scan with the given column set passes the Bloom - * filter, or if the Bloom filter is not applicable for the scan. - * False if the Bloom filter is applicable and the scan fails it. + * Checks whether the given scan passes the Bloom filter (if present). Only checks Bloom filters + * for single-row or single-row-column scans. Bloom filter checking for multi-gets is implemented + * as part of the store scanner system (see {@link StoreFileScanner#seek(Cell)} and uses the + * lower-level API {@link #passesGeneralRowBloomFilter(byte[], int, int)} and + * {@link #passesGeneralRowColBloomFilter(Cell)}. + * @param scan the scan specification. Used to determine the row, and to check whether this is + * a single-row ("get") scan. + * @param columns the set of columns. Only used for row-column Bloom filters. + * @return true if the scan with the given column set passes the Bloom filter, or if the Bloom + * filter is not applicable for the scan. False if the Bloom filter is applicable and the + * scan fails it. */ boolean passesBloomFilter(Scan scan, final SortedSet columns) { byte[] row = scan.getStartRow(); @@ -274,8 +265,7 @@ public class StoreFileReader { } } - public boolean passesDeleteFamilyBloomFilter(byte[] row, int rowOffset, - int rowLen) { + public boolean passesDeleteFamilyBloomFilter(byte[] row, int rowOffset, int rowLen) { // Cache Bloom filter as a local variable in case it is set to null by // another thread on an IO error. BloomFilter bloomFilter = this.deleteFamilyBloomFilter; @@ -295,8 +285,7 @@ public class StoreFileReader { } return bloomFilter.contains(row, rowOffset, rowLen, null); } catch (IllegalArgumentException e) { - LOG.error("Bad Delete Family bloom filter data -- proceeding without", - e); + LOG.error("Bad Delete Family bloom filter data -- proceeding without", e); setDeleteFamilyBloomFilterFaulty(); } @@ -304,9 +293,8 @@ public class StoreFileReader { } /** - * A method for checking Bloom filters. Called directly from - * StoreFileScanner in case of a multi-column query. - * + * A method for checking Bloom filters. Called directly from StoreFileScanner in case of a + * multi-column query. * @return True if passes */ private boolean passesGeneralRowBloomFilter(byte[] row, int rowOffset, int rowLen) { @@ -318,19 +306,15 @@ public class StoreFileReader { // Used in ROW bloom byte[] key = null; if (rowOffset != 0 || rowLen != row.length) { - throw new AssertionError( - "For row-only Bloom filters the row must occupy the whole array"); + throw new AssertionError("For row-only Bloom filters the row must occupy the whole array"); } key = row; return checkGeneralBloomFilter(key, null, bloomFilter); } /** - * A method for checking Bloom filters. Called directly from - * StoreFileScanner in case of a multi-column query. - * - * @param cell - * the cell to check if present in BloomFilter + * A method for checking Bloom filters. Called directly from StoreFileScanner in case of a + * multi-column query. n * the cell to check if present in BloomFilter * @return True if passes */ public boolean passesGeneralRowColBloomFilter(Cell cell) { @@ -350,9 +334,8 @@ public class StoreFileReader { } /** - * A method for checking Bloom filters. Called directly from - * StoreFileScanner in case of a multi-column query. - * + * A method for checking Bloom filters. Called directly from StoreFileScanner in case of a + * multi-column query. * @return True if passes */ private boolean passesGeneralRowPrefixBloomFilter(Scan scan) { @@ -369,7 +352,7 @@ public class StoreFileReader { // For non-get scans // Find out the common prefix of startRow and stopRow. int commonLength = Bytes.findCommonPrefix(scan.getStartRow(), scan.getStopRow(), - scan.getStartRow().length, scan.getStopRow().length, 0, 0); + scan.getStartRow().length, scan.getStopRow().length, 0, 0); // startRow and stopRow don't have the common prefix. // Or the common prefix length is less than prefixLength if (commonLength <= 0 || commonLength < prefixLength) { @@ -406,7 +389,7 @@ public class StoreFileReader { // a sufficient condition to return false. boolean keyIsAfterLast = (lastBloomKey != null); // hbase:meta does not have blooms. So we need not have special interpretation - // of the hbase:meta cells. We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom + // of the hbase:meta cells. We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom if (keyIsAfterLast) { if (bloomFilterType == BloomType.ROWCOL) { keyIsAfterLast = (CellComparator.getInstance().compare(kvKey, lastBloomKeyOnlyKV)) > 0; @@ -422,25 +405,24 @@ public class StoreFileReader { // required looking only for a row bloom. Cell rowBloomKey = PrivateCellUtil.createFirstOnRow(kvKey); // hbase:meta does not have blooms. So we need not have special interpretation - // of the hbase:meta cells. We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom - if (keyIsAfterLast - && (CellComparator.getInstance().compare(rowBloomKey, lastBloomKeyOnlyKV)) > 0) { + // of the hbase:meta cells. We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom + if ( + keyIsAfterLast + && (CellComparator.getInstance().compare(rowBloomKey, lastBloomKeyOnlyKV)) > 0 + ) { exists = false; } else { - exists = - bloomFilter.contains(kvKey, bloom, BloomType.ROWCOL) || - bloomFilter.contains(rowBloomKey, bloom, BloomType.ROWCOL); + exists = bloomFilter.contains(kvKey, bloom, BloomType.ROWCOL) + || bloomFilter.contains(rowBloomKey, bloom, BloomType.ROWCOL); } } else { - exists = !keyIsAfterLast - && bloomFilter.contains(key, 0, key.length, bloom); + exists = !keyIsAfterLast && bloomFilter.contains(key, 0, key.length, bloom); } return exists; } } catch (IOException e) { - LOG.error("Error reading bloom filter data -- proceeding without", - e); + LOG.error("Error reading bloom filter data -- proceeding without", e); setGeneralBloomFilterFaulty(); } catch (IllegalArgumentException e) { LOG.error("Bad bloom filter data -- proceeding without", e); @@ -466,23 +448,25 @@ public class StoreFileReader { // the file is empty return false; } - if (Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW) && - Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) { + if ( + Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW) + && Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW) + ) { return true; } byte[] smallestScanRow = scan.isReversed() ? scan.getStopRow() : scan.getStartRow(); byte[] largestScanRow = scan.isReversed() ? scan.getStartRow() : scan.getStopRow(); - boolean nonOverLapping = (getComparator() - .compareRows(firstKeyKV.get(), largestScanRow, 0, largestScanRow.length) > 0 && - !Bytes.equals(scan.isReversed() ? scan.getStartRow() : scan.getStopRow(), - HConstants.EMPTY_END_ROW)) || - getComparator().compareRows(lastKeyKV.get(), smallestScanRow, 0, - smallestScanRow.length) < 0; + boolean nonOverLapping = + (getComparator().compareRows(firstKeyKV.get(), largestScanRow, 0, largestScanRow.length) > 0 + && !Bytes.equals(scan.isReversed() ? scan.getStartRow() : scan.getStopRow(), + HConstants.EMPTY_END_ROW)) + || getComparator().compareRows(lastKeyKV.get(), smallestScanRow, 0, smallestScanRow.length) + < 0; return !nonOverLapping; } public Map loadFileInfo() throws IOException { - Map fi = reader.getHFileInfo(); + Map fi = reader.getHFileInfo(); byte[] b = fi.get(BLOOM_FILTER_TYPE_KEY); if (b != null) { @@ -490,12 +474,12 @@ public class StoreFileReader { } byte[] p = fi.get(BLOOM_FILTER_PARAM_KEY); - if (bloomFilterType == BloomType.ROWPREFIX_FIXED_LENGTH) { + if (bloomFilterType == BloomType.ROWPREFIX_FIXED_LENGTH) { prefixLength = Bytes.toInt(p); } lastBloomKey = fi.get(LAST_BLOOM_KEY); - if(bloomFilterType == BloomType.ROWCOL) { + if (bloomFilterType == BloomType.ROWCOL) { lastBloomKeyOnlyKV = new KeyValue.KeyOnlyKeyValue(lastBloomKey, 0, lastBloomKey.length); } byte[] cnt = fi.get(DELETE_FAMILY_COUNT); @@ -514,48 +498,41 @@ public class StoreFileReader { public void loadBloomfilter(BlockType blockType) { try { if (blockType == BlockType.GENERAL_BLOOM_META) { - if (this.generalBloomFilter != null) - return; // Bloom has been loaded + if (this.generalBloomFilter != null) return; // Bloom has been loaded DataInput bloomMeta = reader.getGeneralBloomFilterMetadata(); if (bloomMeta != null) { // sanity check for NONE Bloom filter if (bloomFilterType == BloomType.NONE) { - throw new IOException( - "valid bloom filter type not found in FileInfo"); + throw new IOException("valid bloom filter type not found in FileInfo"); } else { - generalBloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, - reader); + generalBloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); if (LOG.isTraceEnabled()) { LOG.trace("Loaded " + bloomFilterType.toString() + " " - + generalBloomFilter.getClass().getSimpleName() - + " metadata for " + reader.getName()); + + generalBloomFilter.getClass().getSimpleName() + " metadata for " + + reader.getName()); } } } } else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) { - if (this.deleteFamilyBloomFilter != null) - return; // Bloom has been loaded + if (this.deleteFamilyBloomFilter != null) return; // Bloom has been loaded DataInput bloomMeta = reader.getDeleteBloomFilterMetadata(); if (bloomMeta != null) { - deleteFamilyBloomFilter = BloomFilterFactory.createFromMeta( - bloomMeta, reader); - LOG.info("Loaded Delete Family Bloom (" - + deleteFamilyBloomFilter.getClass().getSimpleName() + deleteFamilyBloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); + LOG.info( + "Loaded Delete Family Bloom (" + deleteFamilyBloomFilter.getClass().getSimpleName() + ") metadata for " + reader.getName()); } } else { - throw new RuntimeException("Block Type: " + blockType.toString() - + "is not supported for Bloom filter"); + throw new RuntimeException( + "Block Type: " + blockType.toString() + "is not supported for Bloom filter"); } } catch (IOException e) { - LOG.error("Error reading bloom filter meta for " + blockType - + " -- proceeding without", e); + LOG.error("Error reading bloom filter meta for " + blockType + " -- proceeding without", e); setBloomFilterFaulty(blockType); } catch (IllegalArgumentException e) { - LOG.error("Bad bloom filter meta " + blockType - + " -- proceeding without", e); + LOG.error("Bad bloom filter meta " + blockType + " -- proceeding without", e); setBloomFilterFaulty(blockType); } } @@ -569,15 +546,12 @@ public class StoreFileReader { } /** - * The number of Bloom filter entries in this store file, or an estimate - * thereof, if the Bloom filter is not loaded. This always returns an upper - * bound of the number of Bloom filter entries. - * + * The number of Bloom filter entries in this store file, or an estimate thereof, if the Bloom + * filter is not loaded. This always returns an upper bound of the number of Bloom filter entries. * @return an estimate of the number of Bloom filter entries in this file */ public long getFilterEntries() { - return generalBloomFilter != null ? generalBloomFilter.getKeyCount() - : reader.getEntries(); + return generalBloomFilter != null ? generalBloomFilter.getKeyCount() : reader.getEntries(); } public void setGeneralBloomFilterFaulty() { @@ -653,8 +627,7 @@ public class StoreFileReader { } public long getTotalBloomSize() { - if (generalBloomFilter == null) - return 0; + if (generalBloomFilter == null) return 0; return generalBloomFilter.getByteSize(); } @@ -676,7 +649,7 @@ public class StoreFileReader { } public long getMaxTimestamp() { - return timeRange == null ? TimeRange.INITIAL_MAX_TIMESTAMP: timeRange.getMax(); + return timeRange == null ? TimeRange.INITIAL_MAX_TIMESTAMP : timeRange.getMax(); } boolean isSkipResetSeqId() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index 6e70c5b68de..ce2a3d6f249 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.io.FileNotFoundException; @@ -28,23 +26,21 @@ import java.util.List; import java.util.Optional; import java.util.PriorityQueue; import java.util.concurrent.atomic.LongAdder; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; /** - * KeyValueScanner adaptor over the Reader. It also provides hooks into - * bloom filter things. + * KeyValueScanner adaptor over the Reader. It also provides hooks into bloom filter things. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.PHOENIX) @InterfaceStability.Evolving @@ -77,16 +73,18 @@ public class StoreFileScanner implements KeyValueScanner { /** * Implements a {@link KeyValueScanner} on top of the specified {@link HFileScanner} - * @param useMVCC If true, scanner will filter out updates with MVCC larger than {@code readPt}. - * @param readPt MVCC value to use to filter out the updates newer than this scanner. - * @param hasMVCC Set to true if underlying store file reader has MVCC info. - * @param scannerOrder Order of the scanner relative to other scanners. See - * {@link KeyValueScanner#getScannerOrder()}. + * @param useMVCC If true, scanner will filter out updates with MVCC larger + * than {@code readPt}. + * @param readPt MVCC value to use to filter out the updates newer than this + * scanner. + * @param hasMVCC Set to true if underlying store file reader has MVCC info. + * @param scannerOrder Order of the scanner relative to other scanners. See + * {@link KeyValueScanner#getScannerOrder()}. * @param canOptimizeForNonNullColumn {@code true} if we can make sure there is no null column, - * otherwise {@code false}. This is a hint for optimization. + * otherwise {@code false}. This is a hint for optimization. */ public StoreFileScanner(StoreFileReader reader, HFileScanner hfs, boolean useMVCC, - boolean hasMVCC, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) { + boolean hasMVCC, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) { this.readPt = readPt; this.reader = reader; this.hfs = hfs; @@ -101,8 +99,8 @@ public class StoreFileScanner implements KeyValueScanner { * Return an array of scanners corresponding to the given set of store files. */ public static List getScannersForStoreFiles(Collection files, - boolean cacheBlocks, boolean usePread, boolean isCompaction, boolean useDropBehind, - long readPt) throws IOException { + boolean cacheBlocks, boolean usePread, boolean isCompaction, boolean useDropBehind, long readPt) + throws IOException { return getScannersForStoreFiles(files, cacheBlocks, usePread, isCompaction, useDropBehind, null, readPt); } @@ -112,15 +110,15 @@ public class StoreFileScanner implements KeyValueScanner { * ScanQueryMatcher for each store file scanner for further optimization */ public static List getScannersForStoreFiles(Collection files, - boolean cacheBlocks, boolean usePread, boolean isCompaction, boolean canUseDrop, - ScanQueryMatcher matcher, long readPt) throws IOException { + boolean cacheBlocks, boolean usePread, boolean isCompaction, boolean canUseDrop, + ScanQueryMatcher matcher, long readPt) throws IOException { if (files.isEmpty()) { return Collections.emptyList(); } List scanners = new ArrayList<>(files.size()); boolean canOptimizeForNonNullColumn = matcher != null ? !matcher.hasNullColumnInQuery() : false; PriorityQueue sortedFiles = - new PriorityQueue<>(files.size(), StoreFileComparators.SEQ_ID); + new PriorityQueue<>(files.size(), StoreFileComparators.SEQ_ID); for (HStoreFile file : files) { // The sort function needs metadata so we need to open reader first before sorting the list. file.initReader(); @@ -135,7 +133,7 @@ public class StoreFileScanner implements KeyValueScanner { scanner = sf.getPreadScanner(cacheBlocks, readPt, i, canOptimizeForNonNullColumn); } else { scanner = sf.getStreamScanner(canUseDrop, cacheBlocks, isCompaction, readPt, i, - canOptimizeForNonNullColumn); + canOptimizeForNonNullColumn); } scanners.add(scanner); } @@ -155,7 +153,7 @@ public class StoreFileScanner implements KeyValueScanner { * contention with normal read request. */ public static List getScannersForCompaction(Collection files, - boolean canUseDropBehind, long readPt) throws IOException { + boolean canUseDropBehind, long readPt) throws IOException { List scanners = new ArrayList<>(files.size()); List sortedFiles = new ArrayList<>(files); Collections.sort(sortedFiles, StoreFileComparators.SEQ_ID); @@ -201,7 +199,7 @@ public class StoreFileScanner implements KeyValueScanner { } } catch (FileNotFoundException e) { throw e; - } catch(IOException e) { + } catch (IOException e) { throw new IOException("Could not iterate " + this, e); } return retKey; @@ -213,7 +211,7 @@ public class StoreFileScanner implements KeyValueScanner { try { try { - if(!seekAtOrAfter(hfs, key)) { + if (!seekAtOrAfter(hfs, key)) { this.cur = null; return false; } @@ -258,8 +256,7 @@ public class StoreFileScanner implements KeyValueScanner { } catch (FileNotFoundException e) { throw e; } catch (IOException ioe) { - throw new IOException("Could not reseek " + this + " to key " + key, - ioe); + throw new IOException("Could not reseek " + this + " to key " + key, ioe); } } @@ -274,13 +271,12 @@ public class StoreFileScanner implements KeyValueScanner { // We want to ignore all key-values that are newer than our current // readPoint Cell startKV = cur; - while(enforceMVCC - && cur != null - && (cur.getSequenceId() > readPt)) { + while (enforceMVCC && cur != null && (cur.getSequenceId() > readPt)) { boolean hasNext = hfs.next(); setCurrentCell(hfs.getCell()); - if (hasNext && this.stopSkippingKVsIfNextRow - && getComparator().compareRows(cur, startKV) > 0) { + if ( + hasNext && this.stopSkippingKVsIfNextRow && getComparator().compareRows(cur, startKV) > 0 + ) { return false; } } @@ -304,23 +300,18 @@ public class StoreFileScanner implements KeyValueScanner { } /** - * - * @param s - * @param k - * @return false if not found or if k is after the end. - * @throws IOException + * nn * @return false if not found or if k is after the end. n */ - public static boolean seekAtOrAfter(HFileScanner s, Cell k) - throws IOException { + public static boolean seekAtOrAfter(HFileScanner s, Cell k) throws IOException { int result = s.seekTo(k); - if(result < 0) { + if (result < 0) { if (result == HConstants.INDEX_KEY_MAGIC) { // using faked key return true; } // Passed KV is smaller than first KV in file, work from start of file return s.seekTo(); - } else if(result > 0) { + } else if (result > 0) { // Passed KV is larger than current KV in file, if there is a next // it is the "after", if not then this scanner is done. return s.next(); @@ -329,9 +320,8 @@ public class StoreFileScanner implements KeyValueScanner { return true; } - static boolean reseekAtOrAfter(HFileScanner s, Cell k) - throws IOException { - //This function is similar to seekAtOrAfter function + static boolean reseekAtOrAfter(HFileScanner s, Cell k) throws IOException { + // This function is similar to seekAtOrAfter function int result = s.reseekTo(k); if (result <= 0) { if (result == HConstants.INDEX_KEY_MAGIC) { @@ -342,7 +332,7 @@ public class StoreFileScanner implements KeyValueScanner { // than first KV in file, and it is the first time we seek on this file. // So we also need to work from the start of file. if (!s.isSeeked()) { - return s.seekTo(); + return s.seekTo(); } return true; } @@ -360,22 +350,19 @@ public class StoreFileScanner implements KeyValueScanner { } /** - * Pretend we have done a seek but don't do it yet, if possible. The hope is - * that we find requested columns in more recent files and won't have to seek - * in older files. Creates a fake key/value with the given row/column and the - * highest (most recent) possible timestamp we might get from this file. When - * users of such "lazy scanner" need to know the next KV precisely (e.g. when - * this scanner is at the top of the heap), they run {@link #enforceSeek()}. + * Pretend we have done a seek but don't do it yet, if possible. The hope is that we find + * requested columns in more recent files and won't have to seek in older files. Creates a fake + * key/value with the given row/column and the highest (most recent) possible timestamp we might + * get from this file. When users of such "lazy scanner" need to know the next KV precisely (e.g. + * when this scanner is at the top of the heap), they run {@link #enforceSeek()}. *

      - * Note that this function does guarantee that the current KV of this scanner - * will be advanced to at least the given KV. Because of this, it does have - * to do a real seek in cases when the seek timestamp is older than the - * highest timestamp of the file, e.g. when we are trying to seek to the next - * row/column and use OLDEST_TIMESTAMP in the seek key. + * Note that this function does guarantee that the current KV of this scanner will be advanced to + * at least the given KV. Because of this, it does have to do a real seek in cases when the seek + * timestamp is older than the highest timestamp of the file, e.g. when we are trying to seek to + * the next row/column and use OLDEST_TIMESTAMP in the seek key. */ @Override - public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) - throws IOException { + public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IOException { if (kv.getFamilyLength() == 0) { useBloom = false; } @@ -385,9 +372,10 @@ public class StoreFileScanner implements KeyValueScanner { // check ROWCOL Bloom filter first. if (reader.getBloomFilterType() == BloomType.ROWCOL) { haveToSeek = reader.passesGeneralRowColBloomFilter(kv); - } else if (canOptimizeForNonNullColumn - && ((PrivateCellUtil.isDeleteFamily(kv) - || PrivateCellUtil.isDeleteFamilyVersion(kv)))) { + } else if ( + canOptimizeForNonNullColumn + && ((PrivateCellUtil.isDeleteFamily(kv) || PrivateCellUtil.isDeleteFamilyVersion(kv))) + ) { // if there is no such delete family kv in the store file, // then no need to seek. haveToSeek = reader.passesDeleteFamilyBloomFilter(kv.getRowArray(), kv.getRowOffset(), @@ -450,8 +438,7 @@ public class StoreFileScanner implements KeyValueScanner { @Override public void enforceSeek() throws IOException { - if (realSeekDone) - return; + if (realSeekDone) return; if (delayedReseek) { reseek(delayedSeekKV); @@ -487,8 +474,9 @@ public class StoreFileScanner implements KeyValueScanner { if (timeRange == null) { timeRange = scan.getTimeRange(); } - return reader.passesTimerangeFilter(timeRange, oldestUnexpiredTS) && reader - .passesKeyRangeFilter(scan) && reader.passesBloomFilter(scan, scan.getFamilyMap().get(cf)); + return reader.passesTimerangeFilter(timeRange, oldestUnexpiredTS) + && reader.passesKeyRangeFilter(scan) + && reader.passesBloomFilter(scan, scan.getFamilyMap().get(cf)); } @Override @@ -521,8 +509,7 @@ public class StoreFileScanner implements KeyValueScanner { } finally { this.stopSkippingKVsIfNextRow = false; } - if (!resultOfSkipKVs - || getComparator().compareRows(cur, firstKeyOfPreviousRow) > 0) { + if (!resultOfSkipKVs || getComparator().compareRows(cur, firstKeyOfPreviousRow) > 0) { keepSeeking = true; key = firstKeyOfPreviousRow; continue; @@ -537,8 +524,7 @@ public class StoreFileScanner implements KeyValueScanner { } catch (FileNotFoundException e) { throw e; } catch (IOException ioe) { - throw new IOException("Could not seekToPreviousRow " + this + " to key " - + originalKey, ioe); + throw new IOException("Could not seekToPreviousRow " + this + " to key " + originalKey, ioe); } } @@ -559,8 +545,7 @@ public class StoreFileScanner implements KeyValueScanner { @Override public boolean backwardSeek(Cell key) throws IOException { seek(key); - if (cur == null - || getComparator().compareRows(cur, key) > 0) { + if (cur == null || getComparator().compareRows(cur, key) > 0) { return seekToPreviousRow(key); } return true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java index 829028eec63..2355f29958a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,13 +64,12 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.base.Strings; -import org.apache.hbase.thirdparty.com.google.common.collect.SetMultimap; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** - * A StoreFile writer. Use this to read/write HBase Store Files. It is package - * local because it is an implementation detail of the HBase regionserver. + * A StoreFile writer. Use this to read/write HBase Store Files. It is package local because it is + * an implementation detail of the HBase regionserver. */ @InterfaceAudience.Private public class StoreFileWriter implements CellSink, ShipperListener { @@ -91,13 +90,12 @@ public class StoreFileWriter implements CellSink, ShipperListener { /** * Creates an HFile.Writer that also write helpful meta data. - * * @param fs file system to write to * @param path file name to create * @param conf user configuration * @param bloomType bloom filter setting - * @param maxKeys the expected maximum number of keys to be added. Was used - * for Bloom filter size in {@link HFile} format version 1. + * @param maxKeys the expected maximum number of keys to be added. Was used for + * Bloom filter size in {@link HFile} format version 1. * @param favoredNodes an array of favored nodes or possibly null * @param fileContext The HFile context * @param shouldDropCacheBehind Drop pages written to page cache after writing the store file. @@ -105,31 +103,28 @@ public class StoreFileWriter implements CellSink, ShipperListener { * @throws IOException problem writing to FS */ private StoreFileWriter(FileSystem fs, Path path, final Configuration conf, CacheConfig cacheConf, - BloomType bloomType, long maxKeys, InetSocketAddress[] favoredNodes, HFileContext fileContext, - boolean shouldDropCacheBehind, Supplier> compactedFilesSupplier) - throws IOException { + BloomType bloomType, long maxKeys, InetSocketAddress[] favoredNodes, HFileContext fileContext, + boolean shouldDropCacheBehind, Supplier> compactedFilesSupplier) + throws IOException { this.compactedFilesSupplier = compactedFilesSupplier; this.timeRangeTracker = TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC); // TODO : Change all writers to be specifically created for compaction context - writer = HFile.getWriterFactory(conf, cacheConf) - .withPath(fs, path) - .withFavoredNodes(favoredNodes) - .withFileContext(fileContext) - .withShouldDropCacheBehind(shouldDropCacheBehind) - .create(); + writer = + HFile.getWriterFactory(conf, cacheConf).withPath(fs, path).withFavoredNodes(favoredNodes) + .withFileContext(fileContext).withShouldDropCacheBehind(shouldDropCacheBehind).create(); - generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite( - conf, cacheConf, bloomType, - (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); + generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, + bloomType, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); if (generalBloomFilterWriter != null) { this.bloomType = bloomType; this.bloomParam = BloomFilterUtil.getBloomFilterParam(bloomType, conf); if (LOG.isTraceEnabled()) { LOG.trace("Bloom filter type for " + path + ": " + this.bloomType + ", param: " - + (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH? - Bytes.toInt(bloomParam):Bytes.toStringBinary(bloomParam)) - + ", " + generalBloomFilterWriter.getClass().getSimpleName()); + + (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH + ? Bytes.toInt(bloomParam) + : Bytes.toStringBinary(bloomParam)) + + ", " + generalBloomFilterWriter.getClass().getSimpleName()); } // init bloom context switch (bloomType) { @@ -147,7 +142,7 @@ public class StoreFileWriter implements CellSink, ShipperListener { break; default: throw new IOException( - "Invalid Bloom filter type: " + bloomType + " (ROW or ROWCOL or ROWPREFIX expected)"); + "Invalid Bloom filter type: " + bloomType + " (ROW or ROWCOL or ROWPREFIX expected)"); } } else { // Not using Bloom filters. @@ -157,42 +152,39 @@ public class StoreFileWriter implements CellSink, ShipperListener { // initialize delete family Bloom filter when there is NO RowCol Bloom // filter if (this.bloomType != BloomType.ROWCOL) { - this.deleteFamilyBloomFilterWriter = BloomFilterFactory - .createDeleteBloomAtWrite(conf, cacheConf, - (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); + this.deleteFamilyBloomFilterWriter = BloomFilterFactory.createDeleteBloomAtWrite(conf, + cacheConf, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); deleteFamilyBloomContext = new RowBloomContext(deleteFamilyBloomFilterWriter, fileContext.getCellComparator()); } else { deleteFamilyBloomFilterWriter = null; } if (deleteFamilyBloomFilterWriter != null && LOG.isTraceEnabled()) { - LOG.trace("Delete Family Bloom filter type for " + path + ": " + - deleteFamilyBloomFilterWriter.getClass().getSimpleName()); + LOG.trace("Delete Family Bloom filter type for " + path + ": " + + deleteFamilyBloomFilterWriter.getClass().getSimpleName()); } } /** - * Writes meta data. - * Call before {@link #close()} since its written as meta data to this file. - * @param maxSequenceId Maximum sequence id. + * Writes meta data. Call before {@link #close()} since its written as meta data to this file. + * @param maxSequenceId Maximum sequence id. * @param majorCompaction True if this file is product of a major compaction * @throws IOException problem writing to FS */ public void appendMetadata(final long maxSequenceId, final boolean majorCompaction) - throws IOException { + throws IOException { appendMetadata(maxSequenceId, majorCompaction, Collections.emptySet()); } /** - * Writes meta data. - * Call before {@link #close()} since its written as meta data to this file. - * @param maxSequenceId Maximum sequence id. + * Writes meta data. Call before {@link #close()} since its written as meta data to this file. + * @param maxSequenceId Maximum sequence id. * @param majorCompaction True if this file is product of a major compaction - * @param storeFiles The compacted store files to generate this new file + * @param storeFiles The compacted store files to generate this new file * @throws IOException problem writing to FS */ public void appendMetadata(final long maxSequenceId, final boolean majorCompaction, - final Collection storeFiles) throws IOException { + final Collection storeFiles) throws IOException { writer.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(maxSequenceId)); writer.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(majorCompaction)); writer.appendFileInfo(COMPACTION_EVENT_KEY, toCompactionEventTrackerBytes(storeFiles)); @@ -206,16 +198,14 @@ public class StoreFileWriter implements CellSink, ShipperListener { * recursively. If file A, B, C compacted to new file D, and file D compacted to new file E, will * write A, B, C, D to file E's compacted files. So if file E compacted to new file F, will add E * to F's compacted files first, then add E's compacted files: A, B, C, D to it. And no need to - * add D's compacted file, as D's compacted files has been in E's compacted files, too. - * See HBASE-20724 for more details. - * + * add D's compacted file, as D's compacted files has been in E's compacted files, too. See + * HBASE-20724 for more details. * @param storeFiles The compacted store files to generate this new file * @return bytes of CompactionEventTracker */ private byte[] toCompactionEventTrackerBytes(Collection storeFiles) { - Set notArchivedCompactedStoreFiles = - this.compactedFilesSupplier.get().stream().map(sf -> sf.getPath().getName()) - .collect(Collectors.toSet()); + Set notArchivedCompactedStoreFiles = this.compactedFilesSupplier.get().stream() + .map(sf -> sf.getPath().getName()).collect(Collectors.toSet()); Set compactedStoreFiles = new HashSet<>(); for (HStoreFile storeFile : storeFiles) { compactedStoreFiles.add(storeFile.getFileInfo().getPath().getName()); @@ -229,15 +219,14 @@ public class StoreFileWriter implements CellSink, ShipperListener { } /** - * Writes meta data. - * Call before {@link #close()} since its written as meta data to this file. - * @param maxSequenceId Maximum sequence id. + * Writes meta data. Call before {@link #close()} since its written as meta data to this file. + * @param maxSequenceId Maximum sequence id. * @param majorCompaction True if this file is product of a major compaction - * @param mobCellsCount The number of mob cells. + * @param mobCellsCount The number of mob cells. * @throws IOException problem writing to FS */ public void appendMetadata(final long maxSequenceId, final boolean majorCompaction, - final long mobCellsCount) throws IOException { + final long mobCellsCount) throws IOException { writer.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(maxSequenceId)); writer.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(majorCompaction)); writer.appendFileInfo(MOB_CELLS_COUNT, Bytes.toBytes(mobCellsCount)); @@ -255,10 +244,8 @@ public class StoreFileWriter implements CellSink, ShipperListener { } /** - * Record the earlest Put timestamp. - * - * If the timeRangeTracker is not set, - * update TimeRangeTracker to include the timestamp of this key + * Record the earlest Put timestamp. If the timeRangeTracker is not set, update TimeRangeTracker + * to include the timestamp of this key */ public void trackTimestamps(final Cell cell) { if (KeyValue.Type.Put.getCode() == cell.getTypeByte()) { @@ -271,19 +258,15 @@ public class StoreFileWriter implements CellSink, ShipperListener { if (this.generalBloomFilterWriter != null) { /* * http://2.bp.blogspot.com/_Cib_A77V54U/StZMrzaKufI/AAAAAAAAADo/ZhK7bGoJdMQ/s400/KeyValue.png - * Key = RowLen + Row + FamilyLen + Column [Family + Qualifier] + Timestamp - * - * 3 Types of Filtering: - * 1. Row = Row - * 2. RowCol = Row + Qualifier - * 3. RowPrefixFixedLength = Fixed Length Row Prefix + * Key = RowLen + Row + FamilyLen + Column [Family + Qualifier] + Timestamp 3 Types of + * Filtering: 1. Row = Row 2. RowCol = Row + Qualifier 3. RowPrefixFixedLength = Fixed Length + * Row Prefix */ bloomContext.writeBloom(cell); } } - private void appendDeleteFamilyBloomFilter(final Cell cell) - throws IOException { + private void appendDeleteFamilyBloomFilter(final Cell cell) throws IOException { if (!PrivateCellUtil.isDeleteFamily(cell) && !PrivateCellUtil.isDeleteFamilyVersion(cell)) { return; } @@ -326,7 +309,6 @@ public class StoreFileWriter implements CellSink, ShipperListener { /** * For unit testing only. - * * @return the Bloom filter used by this writer. */ BloomFilterWriter getGeneralBloomWriter() { @@ -380,9 +362,9 @@ public class StoreFileWriter implements CellSink, ShipperListener { // Log final Bloom filter statistics. This needs to be done after close() // because compound Bloom filters might be finalized as part of closing. if (LOG.isTraceEnabled()) { - LOG.trace((hasGeneralBloom ? "" : "NO ") + "General Bloom and " + - (hasDeleteFamilyBloom ? "" : "NO ") + "DeleteFamily" + " was added to HFile " + - getPath()); + LOG.trace( + (hasGeneralBloom ? "" : "NO ") + "General Bloom and " + (hasDeleteFamilyBloom ? "" : "NO ") + + "DeleteFamily" + " was added to HFile " + getPath()); } } @@ -391,7 +373,8 @@ public class StoreFileWriter implements CellSink, ShipperListener { writer.appendFileInfo(key, value); } - /** For use in testing. + /** + * For use in testing. */ HFile.Writer getHFileWriter() { return writer; @@ -408,8 +391,8 @@ public class StoreFileWriter implements CellSink, ShipperListener { return new Path(dir, dash.matcher(UUID.randomUUID().toString()).replaceAll("")); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ICAST_INTEGER_MULTIPLY_CAST_TO_LONG", - justification="Will not overflow") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "ICAST_INTEGER_MULTIPLY_CAST_TO_LONG", + justification = "Will not overflow") public static class Builder { private final Configuration conf; private final CacheConfig cacheConf; @@ -448,9 +431,8 @@ public class StoreFileWriter implements CellSink, ShipperListener { /** * Use either this method or {@link #withFilePath}, but not both. - * @param dir Path to column family directory. The directory is created if - * does not exist. The file is given a unique name within this - * directory. + * @param dir Path to column family directory. The directory is created if does not exist. The + * file is given a unique name within this directory. * @return this (for chained invocation) */ public Builder withOutputDir(Path dir) { @@ -504,8 +486,8 @@ public class StoreFileWriter implements CellSink, ShipperListener { return this; } - public Builder withCompactedFilesSupplier( - Supplier> compactedFilesSupplier) { + public Builder + withCompactedFilesSupplier(Supplier> compactedFilesSupplier) { this.compactedFilesSupplier = compactedFilesSupplier; return this; } @@ -521,14 +503,12 @@ public class StoreFileWriter implements CellSink, ShipperListener { } /** - * Create a store file writer. Client is responsible for closing file when - * done. If metadata, add BEFORE closing using - * {@link StoreFileWriter#appendMetadata}. + * Create a store file writer. Client is responsible for closing file when done. If metadata, + * add BEFORE closing using {@link StoreFileWriter#appendMetadata}. */ public StoreFileWriter build() throws IOException { if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) { - throw new IllegalArgumentException("Either specify parent directory " + - "or file path"); + throw new IllegalArgumentException("Either specify parent directory " + "or file path"); } if (dir == null) { @@ -576,14 +556,8 @@ public class StoreFileWriter implements CellSink, ShipperListener { if (writerCreationTracker != null) { writerCreationTracker.accept(filePath); } - return new StoreFileWriter( - fs, - filePath, - conf, - cacheConf, - bloomType, - maxKeyCount, - favoredNodes, fileContext, shouldDropCacheBehind, compactedFilesSupplier); + return new StoreFileWriter(fs, filePath, conf, cacheConf, bloomType, maxKeyCount, + favoredNodes, fileContext, shouldDropCacheBehind, compactedFilesSupplier); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java index e53fdc0de2a..07db4427641 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.io.IOException; @@ -25,56 +24,43 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.yetus.audience.InterfaceAudience; /** - * A package protected interface for a store flushing. - * A store flush context carries the state required to prepare/flush/commit the store's cache. + * A package protected interface for a store flushing. A store flush context carries the state + * required to prepare/flush/commit the store's cache. */ @InterfaceAudience.Private interface StoreFlushContext { /** - * Prepare for a store flush (create snapshot) - * Requires pausing writes. - * A very short operation. + * Prepare for a store flush (create snapshot) Requires pausing writes. A very short operation. * @return The size of snapshot to flush */ MemStoreSize prepare(); /** - * Flush the cache (create the new store file) - * - * A length operation which doesn't require locking out any function - * of the store. - * + * Flush the cache (create the new store file) A length operation which doesn't require locking + * out any function of the store. * @throws IOException in case the flush fails */ void flushCache(MonitoredTask status) throws IOException; /** - * Commit the flush - add the store file to the store and clear the - * memstore snapshot. - * - * Requires pausing scans. - * - * A very short operation - * - * @return whether compaction is required - * @throws IOException + * Commit the flush - add the store file to the store and clear the memstore snapshot. Requires + * pausing scans. A very short operation + * @return whether compaction is required n */ boolean commit(MonitoredTask status) throws IOException; /** - * Similar to commit, but called in secondary region replicas for replaying the - * flush cache from primary region. Adds the new files to the store, and drops the - * snapshot depending on dropMemstoreSnapshot argument. - * @param fileNames names of the flushed files - * @param dropMemstoreSnapshot whether to drop the prepared memstore snapshot - * @throws IOException + * Similar to commit, but called in secondary region replicas for replaying the flush cache from + * primary region. Adds the new files to the store, and drops the snapshot depending on + * dropMemstoreSnapshot argument. + * @param fileNames names of the flushed files + * @param dropMemstoreSnapshot whether to drop the prepared memstore snapshot n */ void replayFlush(List fileNames, boolean dropMemstoreSnapshot) throws IOException; /** - * Abort the snapshot preparation. Drops the snapshot if any. - * @throws IOException + * Abort the snapshot preparation. Drops the snapshot if any. n */ void abort() throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java index d6461f7729a..82ec1b0c5bf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.io.IOException; @@ -48,9 +47,10 @@ abstract class StoreFlusher { /** * Turns a snapshot of memstore into a set of store files. - * @param snapshot Memstore snapshot. - * @param cacheFlushSeqNum Log cache flush sequence number. - * @param status Task that represents the flush operation and may be updated with status. + * @param snapshot Memstore snapshot. + * @param cacheFlushSeqNum Log cache flush sequence number. + * @param status Task that represents the flush operation and may be updated with + * status. * @param throughputController A controller to avoid flush too fast * @return List of files written. Can be empty; must not be null. */ @@ -58,8 +58,8 @@ abstract class StoreFlusher { MonitoredTask status, ThroughputController throughputController, FlushLifeCycleTracker tracker, Consumer writerCreationTracker) throws IOException; - protected void finalizeWriter(StoreFileWriter writer, long cacheFlushSeqNum, - MonitoredTask status) throws IOException { + protected void finalizeWriter(StoreFileWriter writer, long cacheFlushSeqNum, MonitoredTask status) + throws IOException { // Write out the log sequence number that corresponds to this output // hfile. Also write current time in metadata as minFlushTime. // The hfile is current up to and including cacheFlushSeqNum. @@ -72,14 +72,10 @@ abstract class StoreFlusher { protected final StoreFileWriter createWriter(MemStoreSnapshot snapshot, boolean alwaysIncludesTag, Consumer writerCreationTracker) throws IOException { return store.getStoreEngine() - .createWriter( - CreateStoreFileWriterParams.create() - .maxKeyCount(snapshot.getCellsCount()) - .compression(store.getColumnFamilyDescriptor().getCompressionType()) - .isCompaction(false) - .includeMVCCReadpoint(true) - .includesTag(alwaysIncludesTag || snapshot.isTagsPresent()) - .shouldDropBehind(false).writerCreationTracker(writerCreationTracker)); + .createWriter(CreateStoreFileWriterParams.create().maxKeyCount(snapshot.getCellsCount()) + .compression(store.getColumnFamilyDescriptor().getCompressionType()).isCompaction(false) + .includeMVCCReadpoint(true).includesTag(alwaysIncludesTag || snapshot.isTagsPresent()) + .shouldDropBehind(false).writerCreationTracker(writerCreationTracker)); } /** @@ -96,7 +92,7 @@ abstract class StoreFlusher { } final long smallestReadPoint = store.getSmallestReadPoint(); InternalScanner scanner = new StoreScanner(store, scanInfo, snapshotScanners, - ScanType.COMPACT_RETAIN_DELETES, smallestReadPoint, HConstants.OLDEST_TIMESTAMP); + ScanType.COMPACT_RETAIN_DELETES, smallestReadPoint, HConstants.OLDEST_TIMESTAMP); if (store.getCoprocessorHost() != null) { try { @@ -111,24 +107,24 @@ abstract class StoreFlusher { /** * Performs memstore flush, writing data from scanner into sink. - * @param scanner Scanner to get data from. - * @param sink Sink to write data to. Could be StoreFile.Writer. + * @param scanner Scanner to get data from. + * @param sink Sink to write data to. Could be StoreFile.Writer. * @param throughputController A controller to avoid flush too fast */ protected void performFlush(InternalScanner scanner, CellSink sink, - ThroughputController throughputController) throws IOException { + ThroughputController throughputController) throws IOException { int compactionKVMax = - conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); + conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); ScannerContext scannerContext = - ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); + ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); List kvs = new ArrayList<>(); boolean hasMore; String flushName = ThroughputControlUtil.getNameForThrottling(store, "flush"); // no control on system table (such as meta, namespace, etc) flush boolean control = - throughputController != null && !store.getRegionInfo().getTable().isSystemTable(); + throughputController != null && !store.getRegionInfo().getTable().isSystemTable(); if (control) { throughputController.start(flushName); } @@ -150,7 +146,7 @@ abstract class StoreFlusher { } while (hasMore); } catch (InterruptedException e) { throw new InterruptedIOException( - "Interrupted while control throughput of flushing " + flushName); + "Interrupted while control throughput of flushing " + flushName); } finally { if (control) { throughputController.finish(flushName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index de4325ad844..451465c1b2f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,9 +29,9 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.executor.ExecutorService; @@ -61,7 +60,7 @@ import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUti */ @InterfaceAudience.Private public class StoreScanner extends NonReversedNonLazyKeyValueScanner - implements KeyValueScanner, InternalScanner, ChangedReadersObserver { + implements KeyValueScanner, InternalScanner, ChangedReadersObserver { private static final Logger LOG = LoggerFactory.getLogger(StoreScanner.class); // In unit tests, the store could be null protected final HStore store; @@ -94,15 +93,15 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner long mixedReads; // 1) Collects all the KVHeap that are eagerly getting closed during the - // course of a scan + // course of a scan // 2) Collects the unused memstore scanners. If we close the memstore scanners - // before sending data to client, the chunk may be reclaimed by other - // updates and the data will be corrupt. + // before sending data to client, the chunk may be reclaimed by other + // updates and the data will be corrupt. private final List scannersForDelayedClose = new ArrayList<>(); /** - * The number of KVs seen by the scanner. Includes explicitly skipped KVs, but not - * KVs skipped via seeking to next row/column. TODO: estimate them? + * The number of KVs seen by the scanner. Includes explicitly skipped KVs, but not KVs skipped via + * seeking to next row/column. TODO: estimate them? */ private long kvsScanned = 0; private Cell prevCell = null; @@ -113,7 +112,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner /** We don't ever expect to change this, the constant is just for clarity. */ static final boolean LAZY_SEEK_ENABLED_BY_DEFAULT = true; public static final String STORESCANNER_PARALLEL_SEEK_ENABLE = - "hbase.storescanner.parallel.seek.enable"; + "hbase.storescanner.parallel.seek.enable"; /** Used during unit testing to ensure that lazy seek does save seek ops */ private static boolean lazySeekEnabledGlobally = LAZY_SEEK_ENABLED_BY_DEFAULT; @@ -124,7 +123,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner * timeout checks. */ public static final String HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK = - "hbase.cells.scanned.per.heartbeat.check"; + "hbase.cells.scanned.per.heartbeat.check"; /** * Default value of {@link #HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK}. @@ -134,9 +133,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner /** * If the read type is Scan.ReadType.DEFAULT, we will start with pread, and if the kvs we scanned * reaches this limit, we will reopen the scanner with stream. The default value is 4 times of - * block size for this store. - * If configured with a value <0, for all scans with ReadType DEFAULT, we will open scanner with - * stream mode itself. + * block size for this store. If configured with a value <0, for all scans with ReadType DEFAULT, + * we will open scanner with stream mode itself. */ public static final String STORESCANNER_PREAD_MAX_BYTES = "hbase.storescanner.pread.max.bytes"; @@ -162,8 +160,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner private boolean topChanged = false; /** An internal constructor. */ - private StoreScanner(HStore store, Scan scan, ScanInfo scanInfo, - int numColumns, long readPt, boolean cacheBlocks, ScanType scanType) { + private StoreScanner(HStore store, Scan scan, ScanInfo scanInfo, int numColumns, long readPt, + boolean cacheBlocks, ScanType scanType) { this.readPt = readPt; this.store = store; this.cacheBlocks = cacheBlocks; @@ -179,8 +177,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // the seek operation. However, we also look the row-column Bloom filter // for multi-row (non-"get") scans because this is not done in // StoreFile.passesBloomFilter(Scan, SortedSet). - this.useRowColBloom = numColumns > 1 || (!get && numColumns == 1) - && (store == null || store.getColumnFamilyDescriptor().getBloomFilterType() == BloomType.ROWCOL); + this.useRowColBloom = numColumns > 1 || (!get && numColumns == 1) && (store == null + || store.getColumnFamilyDescriptor().getBloomFilterType() == BloomType.ROWCOL); this.maxRowSize = scanInfo.getTableMaxRowSize(); this.preadMaxBytes = scanInfo.getPreadMaxBytes(); if (get) { @@ -223,18 +221,16 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } /** - * Opens a scanner across memstore, snapshot, and all StoreFiles. Assumes we - * are not in a compaction. - * - * @param store who we scan - * @param scan the spec - * @param columns which columns we are scanning - * @throws IOException + * Opens a scanner across memstore, snapshot, and all StoreFiles. Assumes we are not in a + * compaction. + * @param store who we scan + * @param scan the spec + * @param columns which columns we are scanning n */ public StoreScanner(HStore store, ScanInfo scanInfo, Scan scan, NavigableSet columns, - long readPt) throws IOException { - this(store, scan, scanInfo, columns != null ? columns.size() : 0, readPt, - scan.getCacheBlocks(), ScanType.USER_SCAN); + long readPt) throws IOException { + this(store, scan, scanInfo, columns != null ? columns.size() : 0, readPt, scan.getCacheBlocks(), + ScanType.USER_SCAN); if (columns != null && scan.isRaw()) { throw new DoNotRetryIOException("Cannot specify any column for a raw scan"); } @@ -281,12 +277,12 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner * Used for store file compaction and memstore compaction. *

      * Opens a scanner across specified StoreFiles/MemStoreSegments. - * @param store who we scan - * @param scanners ancillary scanners + * @param store who we scan + * @param scanners ancillary scanners * @param smallestReadPoint the readPoint that we should use for tracking versions */ public StoreScanner(HStore store, ScanInfo scanInfo, List scanners, - ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException { + ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException { this(store, scanInfo, scanners, scanType, smallestReadPoint, earliestPutTs, null, null); } @@ -294,28 +290,28 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner * Used for compactions that drop deletes from a limited range of rows. *

      * Opens a scanner across specified StoreFiles. - * @param store who we scan - * @param scanners ancillary scanners - * @param smallestReadPoint the readPoint that we should use for tracking versions + * @param store who we scan + * @param scanners ancillary scanners + * @param smallestReadPoint the readPoint that we should use for tracking versions * @param dropDeletesFromRow The inclusive left bound of the range; can be EMPTY_START_ROW. - * @param dropDeletesToRow The exclusive right bound of the range; can be EMPTY_END_ROW. + * @param dropDeletesToRow The exclusive right bound of the range; can be EMPTY_END_ROW. */ public StoreScanner(HStore store, ScanInfo scanInfo, List scanners, - long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, - byte[] dropDeletesToRow) throws IOException { + long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) + throws IOException { this(store, scanInfo, scanners, ScanType.COMPACT_RETAIN_DELETES, smallestReadPoint, - earliestPutTs, dropDeletesFromRow, dropDeletesToRow); + earliestPutTs, dropDeletesFromRow, dropDeletesToRow); } private StoreScanner(HStore store, ScanInfo scanInfo, List scanners, - ScanType scanType, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, - byte[] dropDeletesToRow) throws IOException { + ScanType scanType, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, + byte[] dropDeletesToRow) throws IOException { this(store, SCAN_FOR_COMPACTION, scanInfo, 0, - store.getHRegion().getReadPoint(IsolationLevel.READ_COMMITTED), false, scanType); + store.getHRegion().getReadPoint(IsolationLevel.READ_COMMITTED), false, scanType); assert scanType != ScanType.USER_SCAN; matcher = - CompactionScanQueryMatcher.create(scanInfo, scanType, smallestReadPoint, earliestPutTs, - oldestUnexpiredTS, now, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost()); + CompactionScanQueryMatcher.create(scanInfo, scanType, smallestReadPoint, earliestPutTs, + oldestUnexpiredTS, now, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost()); // Filter the list of scanners using Bloom filters, time range, TTL, etc. scanners = selectScannersFrom(store, scanners); @@ -328,7 +324,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } private void seekAllScanner(ScanInfo scanInfo, List scanners) - throws IOException { + throws IOException { // Seek all scanners to the initial key seekScanners(scanners, matcher.getStartKey(), false, parallelSeekEnabled); addCurrentScanners(scanners); @@ -337,7 +333,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // For mob compaction only as we do not have a Store instance when doing mob compaction. public StoreScanner(ScanInfo scanInfo, ScanType scanType, - List scanners) throws IOException { + List scanners) throws IOException { this(null, SCAN_FOR_COMPACTION, scanInfo, 0, Long.MAX_VALUE, false, scanType); assert scanType != ScanType.USER_SCAN; this.matcher = CompactionScanQueryMatcher.create(scanInfo, scanType, Long.MAX_VALUE, 0L, @@ -347,24 +343,24 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // Used to instantiate a scanner for user scan in test StoreScanner(Scan scan, ScanInfo scanInfo, NavigableSet columns, - List scanners) throws IOException { + List scanners) throws IOException { // 0 is passed as readpoint because the test bypasses Store - this(null, scan, scanInfo, columns != null ? columns.size() : 0, 0L, - scan.getCacheBlocks(), ScanType.USER_SCAN); + this(null, scan, scanInfo, columns != null ? columns.size() : 0, 0L, scan.getCacheBlocks(), + ScanType.USER_SCAN); this.matcher = - UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now, null); + UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now, null); seekAllScanner(scanInfo, scanners); } // Used to instantiate a scanner for user scan in test StoreScanner(Scan scan, ScanInfo scanInfo, NavigableSet columns, - List scanners, ScanType scanType) throws IOException { + List scanners, ScanType scanType) throws IOException { // 0 is passed as readpoint because the test bypasses Store this(null, scan, scanInfo, columns != null ? columns.size() : 0, 0L, scan.getCacheBlocks(), - scanType); + scanType); if (scanType == ScanType.USER_SCAN) { this.matcher = - UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now, null); + UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now, null); } else { this.matcher = CompactionScanQueryMatcher.create(scanInfo, scanType, Long.MAX_VALUE, HConstants.OLDEST_TIMESTAMP, oldestUnexpiredTS, now, null, null, null); @@ -374,10 +370,10 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // Used to instantiate a scanner for compaction in test StoreScanner(ScanInfo scanInfo, int maxVersions, ScanType scanType, - List scanners) throws IOException { + List scanners) throws IOException { // 0 is passed as readpoint because the test bypasses Store - this(null, maxVersions > 0 ? new Scan().readVersions(maxVersions) - : SCAN_FOR_COMPACTION, scanInfo, 0, 0L, false, scanType); + this(null, maxVersions > 0 ? new Scan().readVersions(maxVersions) : SCAN_FOR_COMPACTION, + scanInfo, 0, 0L, false, scanType); this.matcher = CompactionScanQueryMatcher.create(scanInfo, scanType, Long.MAX_VALUE, HConstants.OLDEST_TIMESTAMP, oldestUnexpiredTS, now, null, null, null); seekAllScanner(scanInfo, scanners); @@ -388,16 +384,11 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } /** - * Seek the specified scanners with the given key - * @param scanners - * @param seekKey - * @param isLazy true if using lazy seek - * @param isParallelSeek true if using parallel seek - * @throws IOException + * Seek the specified scanners with the given key nn * @param isLazy true if using lazy seek + * @param isParallelSeek true if using parallel seek n */ - protected void seekScanners(List scanners, - Cell seekKey, boolean isLazy, boolean isParallelSeek) - throws IOException { + protected void seekScanners(List scanners, Cell seekKey, + boolean isLazy, boolean isParallelSeek) throws IOException { // Seek all scanners to the start of the Row (or if the exact matching row // key does not exist, then to the start of the next matching Row). // Always check bloom filter to optimize the top row seek for delete @@ -411,8 +402,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner long totalScannersSoughtBytes = 0; for (KeyValueScanner scanner : scanners) { if (matcher.isUserScan() && totalScannersSoughtBytes >= maxRowSize) { - throw new RowTooBigException("Max row size allowed: " + maxRowSize - + ", but row is bigger than that"); + throw new RowTooBigException( + "Max row size allowed: " + maxRowSize + ", but row is bigger than that"); } scanner.seek(seekKey); Cell c = scanner.peek(); @@ -426,14 +417,14 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } } - protected void resetKVHeap(List scanners, - CellComparator comparator) throws IOException { + protected void resetKVHeap(List scanners, CellComparator comparator) + throws IOException { // Combine all seeked scanners with a heap heap = newKVHeap(scanners, comparator); } protected KeyValueHeap newKVHeap(List scanners, - CellComparator comparator) throws IOException { + CellComparator comparator) throws IOException { return new KeyValueHeap(scanners, comparator); } @@ -443,7 +434,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner * Will be overridden by testcase so declared as protected. */ protected List selectScannersFrom(HStore store, - List allScanners) { + List allScanners) { boolean memOnly; boolean filesOnly; if (scan instanceof InternalScan) { @@ -539,10 +530,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } /** - * Get the next row of values from this Store. - * @param outResult - * @param scannerContext - * @return true if there are more rows, false if scanner is done + * Get the next row of values from this Store. nn * @return true if there are more rows, false if + * scanner is done */ @Override public boolean next(List outResult, ScannerContext scannerContext) throws IOException { @@ -592,8 +581,10 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // Or if the preadMaxBytes is reached and we may want to return so we can switch to stream // in // the shipped method below. - if (kvsScanned % cellsPerHeartbeatCheck == 0 - || (scanUsePread && readType == Scan.ReadType.DEFAULT && bytesRead > preadMaxBytes)) { + if ( + kvsScanned % cellsPerHeartbeatCheck == 0 + || (scanUsePread && readType == Scan.ReadType.DEFAULT && bytesRead > preadMaxBytes) + ) { if (scannerContext.checkTimeLimit(LimitScope.BETWEEN_CELLS)) { return scannerContext.setScannerState(NextState.TIME_LIMIT_REACHED).hasMoreValues(); } @@ -643,8 +634,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner totalBytesRead += cellSize; /** - * Increment the metric if all the cells are from memstore. - * If not we will account it for mixed reads + * Increment the metric if all the cells are from memstore. If not we will account it + * for mixed reads */ onlyFromMemstore = onlyFromMemstore && heap.isLatestCellFromMemstore(); // Update the progress of the scanner context @@ -653,10 +644,10 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner if (matcher.isUserScan() && totalBytesRead > maxRowSize) { String message = "Max row size allowed: " + maxRowSize - + ", but the row is bigger than that, the row info: " - + CellUtil.toString(cell, false) + ", already have process row cells = " - + outResult.size() + ", it belong to region = " - + store.getHRegion().getRegionInfo().getRegionNameAsString(); + + ", but the row is bigger than that, the row info: " + + CellUtil.toString(cell, false) + ", already have process row cells = " + + outResult.size() + ", it belong to region = " + + store.getHRegion().getRegionInfo().getRegionNameAsString(); LOG.warn(message); throw new RowTooBigException(message); } @@ -740,8 +731,9 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner Cell nextKV = matcher.getNextKeyHint(cell); if (nextKV != null) { int difference = comparator.compare(nextKV, cell); - if (((!scan.isReversed() && difference > 0) - || (scan.isReversed() && difference < 0))) { + if ( + ((!scan.isReversed() && difference > 0) || (scan.isReversed() && difference < 0)) + ) { seekAsDirection(nextKV); NextState stateAfterSeekByHint = needToReturn(outResult); if (stateAfterSeekByHint != null) { @@ -793,15 +785,13 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } /** - * If the top cell won't be flushed into disk, the new top cell may be - * changed after #reopenAfterFlush. Because the older top cell only exist - * in the memstore scanner but the memstore scanner is replaced by hfile - * scanner after #reopenAfterFlush. If the row of top cell is changed, - * we should return the current cells. Otherwise, we may return - * the cells across different rows. + * If the top cell won't be flushed into disk, the new top cell may be changed after + * #reopenAfterFlush. Because the older top cell only exist in the memstore scanner but the + * memstore scanner is replaced by hfile scanner after #reopenAfterFlush. If the row of top cell + * is changed, we should return the current cells. Otherwise, we may return the cells across + * different rows. * @param outResult the cells which are visible for user scan - * @return null is the top cell doesn't change. Otherwise, the NextState - * to return + * @return null is the top cell doesn't change. Otherwise, the NextState to return */ private NextState needToReturn(List outResult) { if (!outResult.isEmpty() && topChanged) { @@ -829,30 +819,31 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner /** * See if we should actually SEEK or rather just SKIP to the next Cell (see HBASE-13109). - * ScanQueryMatcher may issue SEEK hints, such as seek to next column, next row, - * or seek to an arbitrary seek key. This method decides whether a seek is the most efficient - * _actual_ way to get us to the requested cell (SEEKs are more expensive than SKIP, SKIP, - * SKIP inside the current, loaded block). - * It does this by looking at the next indexed key of the current HFile. This key - * is then compared with the _SEEK_ key, where a SEEK key is an artificial 'last possible key - * on the row' (only in here, we avoid actually creating a SEEK key; in the compare we work with - * the current Cell but compare as though it were a seek key; see down in - * matcher.compareKeyForNextRow, etc). If the compare gets us onto the - * next block we *_SEEK, otherwise we just SKIP to the next requested cell. - * - *

      Other notes: + * ScanQueryMatcher may issue SEEK hints, such as seek to next column, next row, or seek to an + * arbitrary seek key. This method decides whether a seek is the most efficient _actual_ way to + * get us to the requested cell (SEEKs are more expensive than SKIP, SKIP, SKIP inside the + * current, loaded block). It does this by looking at the next indexed key of the current HFile. + * This key is then compared with the _SEEK_ key, where a SEEK key is an artificial 'last possible + * key on the row' (only in here, we avoid actually creating a SEEK key; in the compare we work + * with the current Cell but compare as though it were a seek key; see down in + * matcher.compareKeyForNextRow, etc). If the compare gets us onto the next block we *_SEEK, + * otherwise we just SKIP to the next requested cell. + *

      + * Other notes: *

        *
      • Rows can straddle block boundaries
      • *
      • Versions of columns can straddle block boundaries (i.e. column C1 at T1 might be in a * different block than column C1 at T2)
      • - *
      • We want to SKIP if the chance is high that we'll find the desired Cell after a - * few SKIPs...
      • - *
      • We want to SEEK when the chance is high that we'll be able to seek - * past many Cells, especially if we know we need to go to the next block.
      • + *
      • We want to SKIP if the chance is high that we'll find the desired Cell after a few + * SKIPs...
      • + *
      • We want to SEEK when the chance is high that we'll be able to seek past many Cells, + * especially if we know we need to go to the next block.
      • *
      - *

      A good proxy (best effort) to determine whether SKIP is better than SEEK is whether - * we'll likely end up seeking to the next block (or past the next block) to get our next column. + *

      + * A good proxy (best effort) to determine whether SKIP is better than SEEK is whether we'll + * likely end up seeking to the next block (or past the next block) to get our next column. * Example: + * *

          * |    BLOCK 1              |     BLOCK 2                   |
          * |  r1/c1, r1/c2, r1/c3    |    r1/c4, r1/c5, r2/c1        |
      @@ -867,6 +858,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
          *                                            |              |
          *                                    Next Index Key        SEEK_NEXT_COL
          * 
      + * * Now imagine we want columns c1 and c3 (see first diagram above), the 'Next Index Key' of r1/c4 * is > r1/c3 so we should seek to get to the c1 on the next row, r2. In second case, say we only * want one version of c1, after we have it, a SEEK_COL will be issued to get to c2. Looking at @@ -883,9 +875,11 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner Cell previousIndexedKey = null; do { Cell nextIndexedKey = getNextIndexedKey(); - if (nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY && - (nextIndexedKey == previousIndexedKey || - matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0)) { + if ( + nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY + && (nextIndexedKey == previousIndexedKey + || matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0) + ) { this.heap.next(); ++kvsScanned; previousIndexedKey = nextIndexedKey; @@ -908,9 +902,11 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner Cell previousIndexedKey = null; do { Cell nextIndexedKey = getNextIndexedKey(); - if (nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY && - (nextIndexedKey == previousIndexedKey || - matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0)) { + if ( + nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY + && (nextIndexedKey == previousIndexedKey + || matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0) + ) { this.heap.next(); ++kvsScanned; previousIndexedKey = nextIndexedKey; @@ -945,7 +941,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // Implementation of ChangedReadersObserver @Override public void updateReaders(List sfs, List memStoreScanners) - throws IOException { + throws IOException { if (CollectionUtils.isEmpty(sfs) && CollectionUtils.isEmpty(memStoreScanners)) { return; } @@ -1008,7 +1004,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner flushLock.lock(); try { List allScanners = - new ArrayList<>(flushedstoreFileScanners.size() + memStoreScannersAfterFlush.size()); + new ArrayList<>(flushedstoreFileScanners.size() + memStoreScannersAfterFlush.size()); allScanners.addAll(flushedstoreFileScanners); allScanners.addAll(memStoreScannersAfterFlush); scanners = selectScannersFrom(store, allScanners); @@ -1022,7 +1018,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // Seek the new scanners to the last key seekScanners(scanners, lastTop, false, parallelSeekEnabled); // remove the older memstore scanner - for (int i = currentScanners.size() - 1; i >=0; i--) { + for (int i = currentScanners.size() - 1; i >= 0; i--) { if (!currentScanners.get(i).isFileScanner()) { scannersForDelayedClose.add(currentScanners.remove(i)); } else { @@ -1036,8 +1032,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner resetKVHeap(this.currentScanners, store.getComparator()); resetQueryMatcher(lastTop); if (heap.peek() == null || store.getComparator().compareRows(lastTop, this.heap.peek()) != 0) { - LOG.info("Storescanner.peek() is changed where before = " + lastTop.toString() + - ",and after = " + heap.peek()); + LOG.info("Storescanner.peek() is changed where before = " + lastTop.toString() + + ",and after = " + heap.peek()); topChanged = true; } else { topChanged = false; @@ -1061,17 +1057,13 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } /** - * Check whether scan as expected order - * @param prevKV - * @param kv - * @param comparator - * @throws IOException + * Check whether scan as expected order nnnn */ - protected void checkScanOrder(Cell prevKV, Cell kv, - CellComparator comparator) throws IOException { + protected void checkScanOrder(Cell prevKV, Cell kv, CellComparator comparator) + throws IOException { // Check that the heap gives us KVs in an increasing order. - assert prevKV == null || comparator == null || comparator.compare(prevKV, kv) <= 0 : "Key " - + prevKV + " followed by a smaller key " + kv + " in cf " + store; + assert prevKV == null || comparator == null || comparator.compare(prevKV, kv) <= 0 + : "Key " + prevKV + " followed by a smaller key " + kv + " in cf " + store; } protected boolean seekToNextRow(Cell c) throws IOException { @@ -1079,13 +1071,10 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } /** - * Do a reseek in a normal StoreScanner(scan forward) - * @param kv - * @return true if scanner has values left, false if end of scanner - * @throws IOException + * Do a reseek in a normal StoreScanner(scan forward) n * @return true if scanner has values left, + * false if end of scanner n */ - protected boolean seekAsDirection(Cell kv) - throws IOException { + protected boolean seekAsDirection(Cell kv) throws IOException { return reseek(kv); } @@ -1101,12 +1090,14 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } void trySwitchToStreamRead() { - if (readType != Scan.ReadType.DEFAULT || !scanUsePread || closing || - heap.peek() == null || bytesRead < preadMaxBytes) { + if ( + readType != Scan.ReadType.DEFAULT || !scanUsePread || closing || heap.peek() == null + || bytesRead < preadMaxBytes + ) { return; } LOG.debug("Switch to stream read (scanned={} bytes) of {}", bytesRead, - this.store.getColumnFamilyName()); + this.store.getColumnFamilyName()); scanUsePread = false; Cell lastTop = heap.peek(); List memstoreScanners = new ArrayList<>(); @@ -1125,9 +1116,9 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner try { // We must have a store instance here so no null check // recreate the scanners on the current file scanners - fileScanners = store.recreateScanners(scannersToClose, cacheBlocks, false, false, - matcher, scan.getStartRow(), scan.includeStartRow(), scan.getStopRow(), - scan.includeStopRow(), readPt, false); + fileScanners = store.recreateScanners(scannersToClose, cacheBlocks, false, false, matcher, + scan.getStartRow(), scan.includeStartRow(), scan.getStopRow(), scan.includeStopRow(), + readPt, false); if (fileScanners == null) { return; } @@ -1169,23 +1160,20 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner return false; } - /** * Seek storefiles in parallel to optimize IO latency as much as possible * @param scanners the list {@link KeyValueScanner}s to be read from - * @param kv the KeyValue on which the operation is being requested - * @throws IOException + * @param kv the KeyValue on which the operation is being requested n */ - private void parallelSeek(final List - scanners, final Cell kv) throws IOException { + private void parallelSeek(final List scanners, final Cell kv) + throws IOException { if (scanners.isEmpty()) return; int storeFileScannerCount = scanners.size(); CountDownLatch latch = new CountDownLatch(storeFileScannerCount); List handlers = new ArrayList<>(storeFileScannerCount); for (KeyValueScanner scanner : scanners) { if (scanner instanceof StoreFileScanner) { - ParallelSeekHandler seekHandler = new ParallelSeekHandler(scanner, kv, - this.readPt, latch); + ParallelSeekHandler seekHandler = new ParallelSeekHandler(scanner, kv, this.readPt, latch); executor.submit(seekHandler); handlers.add(seekHandler); } else { @@ -1197,7 +1185,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner try { latch.await(); } catch (InterruptedException ie) { - throw (InterruptedIOException)new InterruptedIOException().initCause(ie); + throw (InterruptedIOException) new InterruptedIOException().initCause(ie); } for (ParallelSeekHandler handler : handlers) { @@ -1214,8 +1202,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner List getAllScannersForTesting() { List allScanners = new ArrayList<>(); KeyValueScanner current = heap.getCurrentForTesting(); - if (current != null) - allScanners.add(current); + if (current != null) allScanners.add(current); for (KeyValueScanner scanner : heap.getHeap()) allScanners.add(scanner); return allScanners; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java index 10a9330f832..98bb68f31fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -86,7 +85,7 @@ public final class StoreUtils { */ static Optional getLargestFile(Collection candidates) { return candidates.stream().filter(f -> f.getReader() != null) - .max((f1, f2) -> Long.compare(f1.getReader().length(), f2.getReader().length())); + .max((f1, f2) -> Long.compare(f1.getReader().length(), f2.getReader().length())); } /** @@ -96,7 +95,7 @@ public final class StoreUtils { */ public static OptionalLong getMaxMemStoreTSInList(Collection sfs) { return sfs.stream().filter(sf -> !sf.isBulkLoadResult()).mapToLong(HStoreFile::getMaxMemStoreTS) - .max(); + .max(); } /** @@ -108,12 +107,12 @@ public final class StoreUtils { /** * Gets the approximate mid-point of the given file that is optimal for use in splitting it. - * @param file the store file + * @param file the store file * @param comparator Comparator used to compare KVs. * @return The split point row, or null if splitting is not possible, or reader is null. */ static Optional getFileSplitPoint(HStoreFile file, CellComparator comparator) - throws IOException { + throws IOException { StoreFileReader reader = file.getReader(); if (reader == null) { LOG.warn("Storefile " + file + " Reader is null; cannot get split point"); @@ -130,8 +129,9 @@ public final class StoreUtils { Cell firstKey = reader.getFirstKey().get(); Cell lastKey = reader.getLastKey().get(); // if the midkey is the same as the first or last keys, we cannot (ever) split this region. - if (comparator.compareRows(midKey, firstKey) == 0 || - comparator.compareRows(midKey, lastKey) == 0) { + if ( + comparator.compareRows(midKey, firstKey) == 0 || comparator.compareRows(midKey, lastKey) == 0 + ) { if (LOG.isDebugEnabled()) { LOG.debug("cannot split {} because midkey is the same as first or last row", file); } @@ -144,10 +144,11 @@ public final class StoreUtils { * Gets the mid point of the largest file passed in as split point. */ static Optional getSplitPoint(Collection storefiles, - CellComparator comparator) throws IOException { + CellComparator comparator) throws IOException { Optional largestFile = StoreUtils.getLargestFile(storefiles); - return largestFile.isPresent() ? StoreUtils.getFileSplitPoint(largestFile.get(), comparator) - : Optional.empty(); + return largestFile.isPresent() + ? StoreUtils.getFileSplitPoint(largestFile.get(), comparator) + : Optional.empty(); } /** @@ -166,16 +167,15 @@ public final class StoreUtils { * @return The bytesPerChecksum that is set in the configuration */ public static int getBytesPerChecksum(Configuration conf) { - return conf.getInt(HConstants.BYTES_PER_CHECKSUM, - HFile.DEFAULT_BYTES_PER_CHECKSUM); + return conf.getInt(HConstants.BYTES_PER_CHECKSUM, HFile.DEFAULT_BYTES_PER_CHECKSUM); } public static Configuration createStoreConfiguration(Configuration conf, TableDescriptor td, - ColumnFamilyDescriptor cfd) { + ColumnFamilyDescriptor cfd) { // CompoundConfiguration will look for keys in reverse order of addition, so we'd // add global config first, then table and cf overrides, then cf metadata. return new CompoundConfiguration().add(conf).addBytesMap(td.getValues()) - .addStringMap(cfd.getConfiguration()).addBytesMap(cfd.getValues()); + .addStringMap(cfd.getConfiguration()).addBytesMap(cfd.getValues()); } public static List toStoreFileInfo(Collection storefiles) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java index 18f7e185eed..40108e346d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,31 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.HashMap; import java.util.Iterator; import java.util.Map; - import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * A chore for refreshing the store files for secondary regions hosted in the region server. - * - * This chore should run periodically with a shorter interval than HFile TTL - * ("hbase.master.hfilecleaner.ttl", default 5 minutes). - * It ensures that if we cannot refresh files longer than that amount, the region - * will stop serving read requests because the referenced files might have been deleted (by the - * primary region). + * A chore for refreshing the store files for secondary regions hosted in the region server. This + * chore should run periodically with a shorter interval than HFile TTL + * ("hbase.master.hfilecleaner.ttl", default 5 minutes). It ensures that if we cannot refresh files + * longer than that amount, the region will stop serving read requests because the referenced files + * might have been deleted (by the primary region). */ @InterfaceAudience.Private public class StorefileRefresherChore extends ScheduledChore { @@ -49,35 +45,35 @@ public class StorefileRefresherChore extends ScheduledChore { /** * The period (in milliseconds) for refreshing the store files for the secondary regions. */ - public static final String REGIONSERVER_STOREFILE_REFRESH_PERIOD - = "hbase.regionserver.storefile.refresh.period"; - static final int DEFAULT_REGIONSERVER_STOREFILE_REFRESH_PERIOD = 0; //disabled by default + public static final String REGIONSERVER_STOREFILE_REFRESH_PERIOD = + "hbase.regionserver.storefile.refresh.period"; + static final int DEFAULT_REGIONSERVER_STOREFILE_REFRESH_PERIOD = 0; // disabled by default /** - * Whether all storefiles should be refreshed, as opposed to just hbase:meta's - * Meta region doesn't have WAL replication for replicas enabled yet + * Whether all storefiles should be refreshed, as opposed to just hbase:meta's Meta region doesn't + * have WAL replication for replicas enabled yet */ - public static final String REGIONSERVER_META_STOREFILE_REFRESH_PERIOD - = "hbase.regionserver.meta.storefile.refresh.period"; + public static final String REGIONSERVER_META_STOREFILE_REFRESH_PERIOD = + "hbase.regionserver.meta.storefile.refresh.period"; private HRegionServer regionServer; private long hfileTtl; private int period; private boolean onlyMetaRefresh = true; - //ts of last time regions store files are refreshed + // ts of last time regions store files are refreshed private Map lastRefreshTimes; // encodedName -> long public StorefileRefresherChore(int period, boolean onlyMetaRefresh, HRegionServer regionServer, - Stoppable stoppable) { + Stoppable stoppable) { super("StorefileRefresherChore", stoppable, period); this.period = period; this.regionServer = regionServer; - this.hfileTtl = this.regionServer.getConfiguration().getLong( - TimeToLiveHFileCleaner.TTL_CONF_KEY, TimeToLiveHFileCleaner.DEFAULT_TTL); + this.hfileTtl = this.regionServer.getConfiguration() + .getLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, TimeToLiveHFileCleaner.DEFAULT_TTL); this.onlyMetaRefresh = onlyMetaRefresh; if (period > hfileTtl / 2) { - throw new RuntimeException(REGIONSERVER_STOREFILE_REFRESH_PERIOD + - " should be set smaller than half of " + TimeToLiveHFileCleaner.TTL_CONF_KEY); + throw new RuntimeException(REGIONSERVER_STOREFILE_REFRESH_PERIOD + + " should be set smaller than half of " + TimeToLiveHFileCleaner.TTL_CONF_KEY); } lastRefreshTimes = new HashMap<>(); } @@ -108,14 +104,15 @@ public class StorefileRefresherChore extends ScheduledChore { LOG.warn("Exception while trying to refresh store files for region:" + r.getRegionInfo() + ", exception:" + StringUtils.stringifyException(ex)); - // Store files have a TTL in the archive directory. If we fail to refresh for that long, we stop serving reads + // Store files have a TTL in the archive directory. If we fail to refresh for that long, we + // stop serving reads if (isRegionStale(encodedName, time)) { - ((HRegion)r).setReadsEnabled(false); // stop serving reads + ((HRegion) r).setReadsEnabled(false); // stop serving reads } continue; } lastRefreshTimes.put(encodedName, time); - ((HRegion)r).setReadsEnabled(true); // restart serving reads + ((HRegion) r).setReadsEnabled(true); // restart serving reads } // remove closed regions diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java index fc0598d89ac..386f64166ef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,15 +23,14 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.Bytes; /** * Base class for cell sink that separates the provided cells into multiple files for stripe @@ -94,11 +93,13 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { * @param cell The cell whose row has to be checked. */ protected void sanityCheckLeft(byte[] left, Cell cell) throws IOException { - if (!Arrays.equals(StripeStoreFileManager.OPEN_KEY, left) - && comparator.compareRows(cell, left, 0, left.length) < 0) { + if ( + !Arrays.equals(StripeStoreFileManager.OPEN_KEY, left) + && comparator.compareRows(cell, left, 0, left.length) < 0 + ) { String error = - "The first row is lower than the left boundary of [" + Bytes.toString(left) + "]: [" - + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) + "]"; + "The first row is lower than the left boundary of [" + Bytes.toString(left) + "]: [" + + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) + "]"; LOG.error(error); throw new IOException(error); } @@ -109,12 +110,13 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { * @param right The right boundary of the writer. */ protected void sanityCheckRight(byte[] right, Cell cell) throws IOException { - if (!Arrays.equals(StripeStoreFileManager.OPEN_KEY, right) - && comparator.compareRows(cell, right, 0, right.length) >= 0) { - String error = - "The last row is higher or equal than the right boundary of [" + Bytes.toString(right) - + "]: [" - + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) + "]"; + if ( + !Arrays.equals(StripeStoreFileManager.OPEN_KEY, right) + && comparator.compareRows(cell, right, 0, right.length) >= 0 + ) { + String error = "The last row is higher or equal than the right boundary of [" + + Bytes.toString(right) + "]: [" + + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) + "]"; LOG.error(error); throw new IOException(error); } @@ -136,12 +138,13 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { /** * @param targetBoundaries The boundaries on which writers/files are separated. - * @param majorRangeFrom Major range is the range for which at least one file should be written - * (because all files are included in compaction). majorRangeFrom is the left boundary. - * @param majorRangeTo The right boundary of majorRange (see majorRangeFrom). + * @param majorRangeFrom Major range is the range for which at least one file should be + * written (because all files are included in compaction). + * majorRangeFrom is the left boundary. + * @param majorRangeTo The right boundary of majorRange (see majorRangeFrom). */ public BoundaryMultiWriter(CellComparator comparator, List targetBoundaries, - byte[] majorRangeFrom, byte[] majorRangeTo) throws IOException { + byte[] majorRangeFrom, byte[] majorRangeTo) throws IOException { super(comparator); this.boundaries = targetBoundaries; this.existingWriters = new ArrayList<>(this.boundaries.size() - 1); @@ -149,16 +152,16 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { // must match some target boundaries, let's find them. assert (majorRangeFrom == null) == (majorRangeTo == null); if (majorRangeFrom != null) { - majorRangeFromIndex = - Arrays.equals(majorRangeFrom, StripeStoreFileManager.OPEN_KEY) ? 0 : Collections - .binarySearch(boundaries, majorRangeFrom, Bytes.BYTES_COMPARATOR); - majorRangeToIndex = - Arrays.equals(majorRangeTo, StripeStoreFileManager.OPEN_KEY) ? boundaries.size() - : Collections.binarySearch(boundaries, majorRangeTo, Bytes.BYTES_COMPARATOR); + majorRangeFromIndex = Arrays.equals(majorRangeFrom, StripeStoreFileManager.OPEN_KEY) + ? 0 + : Collections.binarySearch(boundaries, majorRangeFrom, Bytes.BYTES_COMPARATOR); + majorRangeToIndex = Arrays.equals(majorRangeTo, StripeStoreFileManager.OPEN_KEY) + ? boundaries.size() + : Collections.binarySearch(boundaries, majorRangeTo, Bytes.BYTES_COMPARATOR); if (this.majorRangeFromIndex < 0 || this.majorRangeToIndex < 0) { throw new IOException("Major range does not match writer boundaries: [" - + Bytes.toString(majorRangeFrom) + "] [" + Bytes.toString(majorRangeTo) + "]; from " - + majorRangeFromIndex + " to " + majorRangeToIndex); + + Bytes.toString(majorRangeFrom) + "] [" + Bytes.toString(majorRangeTo) + "]; from " + + majorRangeFromIndex + " to " + majorRangeToIndex); } } } @@ -223,9 +226,9 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { boolean needEmptyFile = isInMajorRange || isLastWriter; existingWriters.add(needEmptyFile ? writerFactory.createWriter() : null); hasAnyWriter |= needEmptyFile; - currentWriterEndKey = - (existingWriters.size() + 1 == boundaries.size()) ? null : boundaries.get(existingWriters - .size() + 1); + currentWriterEndKey = (existingWriters.size() + 1 == boundaries.size()) + ? null + : boundaries.get(existingWriters.size() + 1); } private void checkCanCreateWriter() throws IOException { @@ -233,7 +236,7 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { assert existingWriters.size() <= maxWriterCount; if (existingWriters.size() >= maxWriterCount) { throw new IOException("Cannot create any more writers (created " + existingWriters.size() - + " out of " + maxWriterCount + " - row might be out of range of all valid writers"); + + " out of " + maxWriterCount + " - row might be out of range of all valid writers"); } } @@ -241,14 +244,14 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { if (currentWriter != null) { if (LOG.isDebugEnabled()) { LOG.debug("Stopping to use a writer after [" + Bytes.toString(currentWriterEndKey) - + "] row; wrote out " + cellsInCurrentWriter + " kvs"); + + "] row; wrote out " + cellsInCurrentWriter + " kvs"); } cellsInCurrentWriter = 0; } currentWriter = null; - currentWriterEndKey = - (existingWriters.size() + 1 == boundaries.size()) ? null : boundaries.get(existingWriters - .size() + 1); + currentWriterEndKey = (existingWriters.size() + 1 == boundaries.size()) + ? null + : boundaries.get(existingWriters.size() + 1); } } @@ -272,12 +275,12 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { /** * @param targetCount The maximum count of writers that can be created. - * @param targetKvs The number of KVs to read from source before starting each new writer. - * @param left The left boundary of the first writer. - * @param right The right boundary of the last writer. + * @param targetKvs The number of KVs to read from source before starting each new writer. + * @param left The left boundary of the first writer. + * @param right The right boundary of the last writer. */ public SizeMultiWriter(CellComparator comparator, int targetCount, long targetKvs, byte[] left, - byte[] right) { + byte[] right) { super(comparator); this.targetCount = targetCount; this.targetCells = targetKvs; @@ -297,12 +300,13 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { // First append ever, do a sanity check. sanityCheckLeft(left, cell); doCreateWriter = true; - } else if (lastRowInCurrentWriter != null - && !PrivateCellUtil.matchingRows(cell, lastRowInCurrentWriter, 0, - lastRowInCurrentWriter.length)) { + } else if ( + lastRowInCurrentWriter != null && !PrivateCellUtil.matchingRows(cell, + lastRowInCurrentWriter, 0, lastRowInCurrentWriter.length) + ) { if (LOG.isDebugEnabled()) { LOG.debug("Stopping to use a writer after [" + Bytes.toString(lastRowInCurrentWriter) - + "] row; wrote out " + cellsInCurrentWriter + " kvs"); + + "] row; wrote out " + cellsInCurrentWriter + " kvs"); } lastRowInCurrentWriter = null; cellsInCurrentWriter = 0; @@ -325,20 +329,21 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { ++cellsInCurrentWriter; cellsSeen = cellsInCurrentWriter; if (this.sourceScanner != null) { - cellsSeen = - Math.max(cellsSeen, this.sourceScanner.getEstimatedNumberOfKvsScanned() - - cellsSeenInPrevious); + cellsSeen = Math.max(cellsSeen, + this.sourceScanner.getEstimatedNumberOfKvsScanned() - cellsSeenInPrevious); } // If we are not already waiting for opportunity to close, start waiting if we can // create any more writers and if the current one is too big. - if (lastRowInCurrentWriter == null && existingWriters.size() < targetCount - && cellsSeen >= targetCells) { + if ( + lastRowInCurrentWriter == null && existingWriters.size() < targetCount + && cellsSeen >= targetCells + ) { lastRowInCurrentWriter = CellUtil.cloneRow(cell); // make a copy if (LOG.isDebugEnabled()) { LOG.debug("Preparing to start a new writer after [" - + Bytes.toString(lastRowInCurrentWriter) + "] row; observed " + cellsSeen - + " kvs and wrote out " + cellsInCurrentWriter + " kvs"); + + Bytes.toString(lastRowInCurrentWriter) + "] row; observed " + cellsSeen + + " kvs and wrote out " + cellsInCurrentWriter + " kvs"); } } } @@ -346,11 +351,11 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { @Override protected void preCommitWritersInternal() throws IOException { if (LOG.isDebugEnabled()) { - LOG.debug("Stopping with " - + cellsInCurrentWriter - + " kvs in last writer" - + ((this.sourceScanner == null) ? "" : ("; observed estimated " - + this.sourceScanner.getEstimatedNumberOfKvsScanned() + " KVs total"))); + LOG.debug("Stopping with " + cellsInCurrentWriter + " kvs in last writer" + + ((this.sourceScanner == null) + ? "" + : ("; observed estimated " + this.sourceScanner.getEstimatedNumberOfKvsScanned() + + " KVs total"))); } if (lastCell != null) { sanityCheckRight(right, lastCell); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java index 61deb0b93ce..b354eda2e79 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,16 +17,15 @@ */ package org.apache.hadoop.hbase.regionserver; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; /** - * Configuration class for stripe store and compactions. - * See {@link StripeStoreFileManager} for general documentation. - * See getters for the description of each setting. + * Configuration class for stripe store and compactions. See {@link StripeStoreFileManager} for + * general documentation. See getters for the description of each setting. */ @InterfaceAudience.Private public class StripeStoreConfig { @@ -38,33 +36,42 @@ public class StripeStoreConfig { /** The minimum number of files to compact within a stripe; same as for regular compaction. */ public static final String MIN_FILES_KEY = "hbase.store.stripe.compaction.minFiles"; - /** The minimum number of files to compact when compacting L0; same as minFiles for regular + /** + * The minimum number of files to compact when compacting L0; same as minFiles for regular * compaction. Given that L0 causes unnecessary overwriting of the data, should be higher than - * regular minFiles. */ + * regular minFiles. + */ public static final String MIN_FILES_L0_KEY = "hbase.store.stripe.compaction.minFilesL0"; - /** The size the stripe should achieve to be considered for splitting into multiple stripes. - Stripe will be split when it can be fully compacted, and it is above this size. */ + /** + * The size the stripe should achieve to be considered for splitting into multiple stripes. Stripe + * will be split when it can be fully compacted, and it is above this size. + */ public static final String SIZE_TO_SPLIT_KEY = "hbase.store.stripe.sizeToSplit"; - /** The target count of new stripes to produce when splitting a stripe. A floating point - number, default is 2. Values less than 1 will be converted to 1/x. Non-whole numbers will - produce unbalanced splits, which may be good for some cases. In this case the "smaller" of - the new stripes will always be the rightmost one. If the stripe is bigger than sizeToSplit - when splitting, this will be adjusted by a whole increment. */ + /** + * The target count of new stripes to produce when splitting a stripe. A floating point number, + * default is 2. Values less than 1 will be converted to 1/x. Non-whole numbers will produce + * unbalanced splits, which may be good for some cases. In this case the "smaller" of the new + * stripes will always be the rightmost one. If the stripe is bigger than sizeToSplit when + * splitting, this will be adjusted by a whole increment. + */ public static final String SPLIT_PARTS_KEY = "hbase.store.stripe.splitPartCount"; - /** The initial stripe count to create. If the row distribution is roughly the same over time, - it's good to set this to a count of stripes that is expected to be achieved in most regions, - to get this count from the outset and prevent unnecessary splitting. */ + /** + * The initial stripe count to create. If the row distribution is roughly the same over time, it's + * good to set this to a count of stripes that is expected to be achieved in most regions, to get + * this count from the outset and prevent unnecessary splitting. + */ public static final String INITIAL_STRIPE_COUNT_KEY = "hbase.store.stripe.initialStripeCount"; /** Whether to flush memstore to L0 files, or directly to stripes. */ public static final String FLUSH_TO_L0_KEY = "hbase.store.stripe.compaction.flushToL0"; - /** When splitting region, the maximum size imbalance to allow in an attempt to split at a - stripe boundary, so that no files go to both regions. Most users won't need to change that. */ + /** + * When splitting region, the maximum size imbalance to allow in an attempt to split at a stripe + * boundary, so that no files go to both regions. Most users won't need to change that. + */ public static final String MAX_REGION_SPLIT_IMBALANCE_KEY = - "hbase.store.stripe.region.split.max.imbalance"; - + "hbase.store.stripe.region.split.max.imbalance"; private final float maxRegionSplitImbalance; private final int level0CompactMinFiles; @@ -78,6 +85,7 @@ public class StripeStoreConfig { private final long splitPartSize; // derived from sizeToSplitAt and splitPartCount private static final double EPSILON = 0.001; // good enough for this, not a real epsilon. + public StripeStoreConfig(Configuration config, StoreConfigInformation sci) { this.level0CompactMinFiles = config.getInt(MIN_FILES_L0_KEY, 4); this.flushIntoL0 = config.getBoolean(FLUSH_TO_L0_KEY, false); @@ -85,7 +93,7 @@ public class StripeStoreConfig { int minFiles = config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, -1); this.stripeCompactMinFiles = config.getInt(MIN_FILES_KEY, Math.max(minMinFiles, minFiles)); this.stripeCompactMaxFiles = config.getInt(MAX_FILES_KEY, - config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 10)); + config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 10)); this.maxRegionSplitImbalance = getFloat(config, MAX_REGION_SPLIT_IMBALANCE_KEY, 1.5f, true); float splitPartCount = getFloat(config, SPLIT_PARTS_KEY, 2f, true); @@ -100,7 +108,7 @@ public class StripeStoreConfig { if (flushSize == 0) { flushSize = 128 * 1024 * 1024; } - long defaultSplitSize = (long)(flushSize * getLevel0MinFiles() * 4 * splitPartCount); + long defaultSplitSize = (long) (flushSize * getLevel0MinFiles() * 4 * splitPartCount); this.sizeToSplitAt = config.getLong(SIZE_TO_SPLIT_KEY, defaultSplitSize); int initialCount = config.getInt(INITIAL_STRIPE_COUNT_KEY, 1); if (initialCount == 0) { @@ -108,15 +116,15 @@ public class StripeStoreConfig { initialCount = 1; } this.initialCount = initialCount; - this.splitPartSize = (long)(this.sizeToSplitAt / this.splitPartCount); + this.splitPartSize = (long) (this.sizeToSplitAt / this.splitPartCount); } - private static float getFloat( - Configuration config, String key, float defaultValue, boolean moreThanOne) { + private static float getFloat(Configuration config, String key, float defaultValue, + boolean moreThanOne) { float value = config.getFloat(key, defaultValue); if (value < EPSILON) { - LOG.warn(String.format( - "%s is set to 0 or negative; using default value of %f", key, defaultValue)); + LOG.warn( + String.format("%s is set to 0 or negative; using default value of %f", key, defaultValue)); value = defaultValue; } else if ((value > 1f) != moreThanOne) { value = 1f / value; @@ -157,8 +165,8 @@ public class StripeStoreConfig { } /** - * @return the desired size of the target stripe when splitting, in bytes. - * Derived from {@link #getSplitSize()} and {@link #getSplitCount()}. + * @return the desired size of the target stripe when splitting, in bytes. Derived from + * {@link #getSplitSize()} and {@link #getSplitCount()}. */ public long getSplitPartSize() { return splitPartSize; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java index bfb3f649ff2..348c4e61be0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,8 +40,8 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; * The storage engine that implements the stripe-based store/compaction scheme. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -public class StripeStoreEngine extends StoreEngine { +public class StripeStoreEngine extends + StoreEngine { private static final Logger LOG = LoggerFactory.getLogger(StripeStoreEngine.class); private StripeStoreConfig config; @@ -56,13 +56,13 @@ public class StripeStoreEngine extends StoreEngine filesCompacting, boolean isUserCompaction, - boolean mayUseOffPeak, boolean forceMajor) throws IOException { - this.stripeRequest = compactionPolicy.selectCompaction( - storeFileManager, filesCompacting, mayUseOffPeak); + boolean mayUseOffPeak, boolean forceMajor) throws IOException { + this.stripeRequest = + compactionPolicy.selectCompaction(storeFileManager, filesCompacting, mayUseOffPeak); this.request = (this.stripeRequest == null) - ? new CompactionRequestImpl(new ArrayList<>()) : this.stripeRequest.getRequest(); + ? new CompactionRequestImpl(new ArrayList<>()) + : this.stripeRequest.getRequest(); return this.stripeRequest != null; } @@ -100,7 +101,7 @@ public class StripeStoreEngine extends StoreEngine compact(ThroughputController throughputController, User user) - throws IOException { + throws IOException { Preconditions.checkArgument(this.stripeRequest != null, "Cannot compact without selection"); return this.stripeRequest.execute(compactor, throughputController, user); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java index 0c06564f405..37cea227d89 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +29,6 @@ import java.util.List; import java.util.Map; import java.util.Optional; import java.util.TreeMap; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; @@ -44,24 +42,22 @@ import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollection; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; /** - * Stripe implementation of StoreFileManager. - * Not thread safe - relies on external locking (in HStore). Collections that this class - * returns are immutable or unique to the call, so they should be safe. - * Stripe store splits the key space of the region into non-overlapping stripes, as well as - * some recent files that have all the keys (level 0). Each stripe contains a set of files. - * When L0 is compacted, it's split into the files corresponding to existing stripe boundaries, - * that can thus be added to stripes. - * When scan or get happens, it only has to read the files from the corresponding stripes. - * See StripeCompationPolicy on how the stripes are determined; this class doesn't care. - * - * This class should work together with StripeCompactionPolicy and StripeCompactor. - * With regard to how they work, we make at least the following (reasonable) assumptions: - * - Compaction produces one file per new stripe (if any); that is easy to change. - * - Compaction has one contiguous set of stripes both in and out, except if L0 is involved. + * Stripe implementation of StoreFileManager. Not thread safe - relies on external locking (in + * HStore). Collections that this class returns are immutable or unique to the call, so they should + * be safe. Stripe store splits the key space of the region into non-overlapping stripes, as well as + * some recent files that have all the keys (level 0). Each stripe contains a set of files. When L0 + * is compacted, it's split into the files corresponding to existing stripe boundaries, that can + * thus be added to stripes. When scan or get happens, it only has to read the files from the + * corresponding stripes. See StripeCompationPolicy on how the stripes are determined; this class + * doesn't care. This class should work together with StripeCompactionPolicy and StripeCompactor. + * With regard to how they work, we make at least the following (reasonable) assumptions: - + * Compaction produces one file per new stripe (if any); that is easy to change. - Compaction has + * one contiguous set of stripes both in and out, except if L0 is involved. */ @InterfaceAudience.Private public class StripeStoreFileManager @@ -83,8 +79,8 @@ public class StripeStoreFileManager final static byte[] INVALID_KEY = null; /** - * The state class. Used solely to replace results atomically during - * compactions and avoid complicated error handling. + * The state class. Used solely to replace results atomically during compactions and avoid + * complicated error handling. */ private static class State { /** @@ -95,9 +91,9 @@ public class StripeStoreFileManager public byte[][] stripeEndRows = new byte[0][]; /** - * Files by stripe. Each element of the list corresponds to stripeEndRow element with the - * same index, except the last one. Inside each list, the files are in reverse order by - * seqNum. Note that the length of this is one higher than that of stripeEndKeys. + * Files by stripe. Each element of the list corresponds to stripeEndRow element with the same + * index, except the last one. Inside each list, the files are in reverse order by seqNum. Note + * that the length of this is one higher than that of stripeEndKeys. */ public ArrayList> stripeFiles = new ArrayList<>(); /** Level 0. The files are in reverse order by seqNum. */ @@ -107,14 +103,17 @@ public class StripeStoreFileManager public ImmutableList allFilesCached = ImmutableList.of(); private ImmutableList allCompactedFilesCached = ImmutableList.of(); } + private State state = null; /** Cached file metadata (or overrides as the case may be) */ private HashMap fileStarts = new HashMap<>(); private HashMap fileEnds = new HashMap<>(); - /** Normally invalid key is null, but in the map null is the result for "no key"; so use - * the following constant value in these maps instead. Note that this is a constant and - * we use it to compare by reference when we read from the map. */ + /** + * Normally invalid key is null, but in the map null is the result for "no key"; so use the + * following constant value in these maps instead. Note that this is a constant and we use it to + * compare by reference when we read from the map. + */ private static final byte[] INVALID_KEY_IN_MAP = new byte[0]; private final CellComparator cellComparator; @@ -122,12 +121,12 @@ public class StripeStoreFileManager private final int blockingFileCount; - public StripeStoreFileManager( - CellComparator kvComparator, Configuration conf, StripeStoreConfig config) { + public StripeStoreFileManager(CellComparator kvComparator, Configuration conf, + StripeStoreConfig config) { this.cellComparator = kvComparator; this.config = config; - this.blockingFileCount = conf.getInt( - HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT); + this.blockingFileCount = + conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT); } @Override @@ -178,8 +177,10 @@ public class StripeStoreFileManager return state.allFilesCached.size(); } - /** See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} - * for details on this methods. */ + /** + * See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} for details on this + * methods. + */ @Override public Iterator getCandidateFilesForRowKeyBefore(final KeyValue targetKey) { KeyBeforeConcatenatedLists result = new KeyBeforeConcatenatedLists(); @@ -194,14 +195,16 @@ public class StripeStoreFileManager return result.iterator(); } - /** See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} and - * {@link StoreFileManager#updateCandidateFilesForRowKeyBefore(Iterator, KeyValue, Cell)} - * for details on this methods. */ + /** + * See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} and + * {@link StoreFileManager#updateCandidateFilesForRowKeyBefore(Iterator, KeyValue, Cell)} for + * details on this methods. + */ @Override public Iterator updateCandidateFilesForRowKeyBefore( - Iterator candidateFiles, final KeyValue targetKey, final Cell candidate) { + Iterator candidateFiles, final KeyValue targetKey, final Cell candidate) { KeyBeforeConcatenatedLists.Iterator original = - (KeyBeforeConcatenatedLists.Iterator)candidateFiles; + (KeyBeforeConcatenatedLists.Iterator) candidateFiles; assert original != null; ArrayList> components = original.getComponents(); for (int firstIrrelevant = 0; firstIrrelevant < components.size(); ++firstIrrelevant) { @@ -210,8 +213,7 @@ public class StripeStoreFileManager // Entries are ordered as such: L0, then stripes in reverse order. We never remove // level 0; we remove the stripe, and all subsequent ones, as soon as we find the // first one that cannot possibly have better candidates. - if (!isInvalid(endKey) && !isOpen(endKey) - && (nonOpenRowCompare(targetKey, endKey) >= 0)) { + if (!isInvalid(endKey) && !isOpen(endKey) && (nonOpenRowCompare(targetKey, endKey) >= 0)) { original.removeComponents(firstIrrelevant); break; } @@ -220,10 +222,9 @@ public class StripeStoreFileManager } /** - * Override of getSplitPoint that determines the split point as the boundary between two - * stripes, unless it causes significant imbalance between split sides' sizes. In that - * case, the split boundary will be chosen from the middle of one of the stripes to - * minimize imbalance. + * Override of getSplitPoint that determines the split point as the boundary between two stripes, + * unless it causes significant imbalance between split sides' sizes. In that case, the split + * boundary will be chosen from the middle of one of the stripes to minimize imbalance. * @return The split point, or null if no split is possible. */ @Override @@ -249,13 +250,14 @@ public class StripeStoreFileManager } } if (leftSize == 0 || rightSize == 0) { - String errMsg = String.format("Cannot split on a boundary - left index %d size %d, " - + "right index %d size %d", leftIndex, leftSize, rightIndex, rightSize); + String errMsg = String.format( + "Cannot split on a boundary - left index %d size %d, " + "right index %d size %d", + leftIndex, leftSize, rightIndex, rightSize); debugDumpState(errMsg); LOG.warn(errMsg); return getSplitPointFromAllFiles(); } - double ratio = (double)rightSize / leftSize; + double ratio = (double) rightSize / leftSize; if (ratio < 1) { ratio = 1 / ratio; } @@ -269,16 +271,16 @@ public class StripeStoreFileManager // See if we can achieve better ratio if we split the bigger side in half. boolean isRightLarger = rightSize >= leftSize; double newRatio = isRightLarger - ? getMidStripeSplitRatio(leftSize, rightSize, lastRightSize) - : getMidStripeSplitRatio(rightSize, leftSize, lastLeftSize); + ? getMidStripeSplitRatio(leftSize, rightSize, lastRightSize) + : getMidStripeSplitRatio(rightSize, leftSize, lastLeftSize); if (newRatio < 1) { newRatio = 1 / newRatio; } if (newRatio >= ratio) { return Optional.of(state.stripeEndRows[leftIndex]); } - LOG.debug("Splitting the stripe - ratio w/o split " + ratio + ", ratio with split " - + newRatio + " configured ratio " + config.getMaxSplitImbalance()); + LOG.debug("Splitting the stripe - ratio w/o split " + ratio + ", ratio with split " + newRatio + + " configured ratio " + config.getMaxSplitImbalance()); // OK, we may get better ratio, get it. return StoreUtils.getSplitPoint(state.stripeFiles.get(isRightLarger ? rightIndex : leftIndex), cellComparator); @@ -292,12 +294,12 @@ public class StripeStoreFileManager } private double getMidStripeSplitRatio(long smallerSize, long largerSize, long lastLargerSize) { - return (double)(largerSize - lastLargerSize / 2f) / (smallerSize + lastLargerSize / 2f); + return (double) (largerSize - lastLargerSize / 2f) / (smallerSize + lastLargerSize / 2f); } @Override public Collection getFilesForScan(byte[] startRow, boolean includeStartRow, - byte[] stopRow, boolean includeStopRow) { + byte[] stopRow, boolean includeStopRow) { if (state.stripeFiles.isEmpty()) { return state.level0Files; // There's just L0. } @@ -322,8 +324,8 @@ public class StripeStoreFileManager public void addCompactionResults(Collection compactedFiles, Collection results) { // See class comment for the assumptions we make here. - LOG.debug("Attempting to merge compaction results: " + compactedFiles.size() + - " files replaced by " + results.size()); + LOG.debug("Attempting to merge compaction results: " + compactedFiles.size() + + " files replaced by " + results.size()); // In order to be able to fail in the middle of the operation, we'll operate on lazy // copies and apply the result at the end. CompactionOrFlushMergeCopy cmc = new CompactionOrFlushMergeCopy(false); @@ -366,7 +368,7 @@ public class StripeStoreFileManager // many files we have, so do an approximate mapping to normal priority range; L0 counts // for all stripes. int l0 = state.level0Files.size(), sc = state.stripeFiles.size(); - int priority = (int)Math.ceil(((double)(this.blockingFileCount - fc + l0) / sc) - l0); + int priority = (int) Math.ceil(((double) (this.blockingFileCount - fc + l0) / sc) - l0); return (priority <= HStore.PRIORITY_USER) ? (HStore.PRIORITY_USER + 1) : priority; } @@ -384,10 +386,9 @@ public class StripeStoreFileManager } /** - * Loads initial store files that were picked up from some physical location pertaining to - * this store (presumably). Unlike adding files after compaction, assumes empty initial - * sets, and is forgiving with regard to stripe constraints - at worst, many/all files will - * go to level 0. + * Loads initial store files that were picked up from some physical location pertaining to this + * store (presumably). Unlike adding files after compaction, assumes empty initial sets, and is + * forgiving with regard to stripe constraints - at worst, many/all files will go to level 0. * @param storeFiles Store files to add. */ private void loadUnclassifiedStoreFiles(List storeFiles) { @@ -402,8 +403,7 @@ public class StripeStoreFileManager if (isInvalid(startRow) || isInvalid(endRow)) { insertFileIntoStripe(level0Files, sf); // No metadata - goes to L0. ensureLevel0Metadata(sf); - } else if (!isOpen(startRow) && !isOpen(endRow) && - nonOpenRowCompare(startRow, endRow) >= 0) { + } else if (!isOpen(startRow) && !isOpen(endRow) && nonOpenRowCompare(startRow, endRow) >= 0) { LOG.error("Unexpected metadata - start row [" + Bytes.toString(startRow) + "], end row [" + Bytes.toString(endRow) + "] in file [" + sf.getPath() + "], pushing to L0"); insertFileIntoStripe(level0Files, sf); // Bad metadata - goes to L0 also. @@ -423,7 +423,7 @@ public class StripeStoreFileManager boolean hasOverlaps = false; byte[] expectedStartRow = null; // first stripe can start wherever Iterator>> entryIter = - candidateStripes.entrySet().iterator(); + candidateStripes.entrySet().iterator(); while (entryIter.hasNext()) { Map.Entry> entry = entryIter.next(); ArrayList files = entry.getValue(); @@ -436,8 +436,8 @@ public class StripeStoreFileManager } else if (!rowEquals(expectedStartRow, startRow)) { hasOverlaps = true; LOG.warn("Store file doesn't fit into the tentative stripes - expected to start at [" - + Bytes.toString(expectedStartRow) + "], but starts at [" + Bytes.toString(startRow) - + "], to L0 it goes"); + + Bytes.toString(expectedStartRow) + "], but starts at [" + Bytes.toString(startRow) + + "], to L0 it goes"); HStoreFile badSf = files.remove(i); insertFileIntoStripe(level0Files, badSf); ensureLevel0Metadata(badSf); @@ -462,8 +462,8 @@ public class StripeStoreFileManager boolean isOpen = isOpen(startOf(firstFile)) && isOpen(candidateStripes.lastKey()); if (!isOpen) { LOG.warn("The range of the loaded files does not cover full key space: from [" - + Bytes.toString(startOf(firstFile)) + "], to [" - + Bytes.toString(candidateStripes.lastKey()) + "]"); + + Bytes.toString(startOf(firstFile)) + "], to [" + + Bytes.toString(candidateStripes.lastKey()) + "]"); if (!hasOverlaps) { ensureEdgeStripeMetadata(candidateStripes.firstEntry().getValue(), true); ensureEdgeStripeMetadata(candidateStripes.lastEntry().getValue(), false); @@ -516,23 +516,17 @@ public class StripeStoreFileManager if (!LOG.isDebugEnabled()) return; StringBuilder sb = new StringBuilder(); sb.append("\n" + string + "; current stripe state is as such:"); - sb.append("\n level 0 with ") - .append(state.level0Files.size()) - .append( - " files: " - + TraditionalBinaryPrefix.long2String( - StripeCompactionPolicy.getTotalFileSize(state.level0Files), "", 1) + ";"); + sb.append("\n level 0 with ").append(state.level0Files.size()) + .append(" files: " + TraditionalBinaryPrefix + .long2String(StripeCompactionPolicy.getTotalFileSize(state.level0Files), "", 1) + ";"); for (int i = 0; i < state.stripeFiles.size(); ++i) { String endRow = (i == state.stripeEndRows.length) - ? "(end)" : "[" + Bytes.toString(state.stripeEndRows[i]) + "]"; - sb.append("\n stripe ending in ") - .append(endRow) - .append(" with ") - .append(state.stripeFiles.get(i).size()) - .append( - " files: " - + TraditionalBinaryPrefix.long2String( - StripeCompactionPolicy.getTotalFileSize(state.stripeFiles.get(i)), "", 1) + ";"); + ? "(end)" + : "[" + Bytes.toString(state.stripeEndRows[i]) + "]"; + sb.append("\n stripe ending in ").append(endRow).append(" with ") + .append(state.stripeFiles.get(i).size()) + .append(" files: " + TraditionalBinaryPrefix.long2String( + StripeCompactionPolicy.getTotalFileSize(state.stripeFiles.get(i)), "", 1) + ";"); } sb.append("\n").append(state.stripeFiles.size()).append(" stripes total."); sb.append("\n").append(getStorefileCount()).append(" files total."); @@ -605,25 +599,25 @@ public class StripeStoreFileManager @Override public final byte[] getStartRow(int stripeIndex) { - return (stripeIndex == 0 ? OPEN_KEY : state.stripeEndRows[stripeIndex - 1]); + return (stripeIndex == 0 ? OPEN_KEY : state.stripeEndRows[stripeIndex - 1]); } @Override public final byte[] getEndRow(int stripeIndex) { return (stripeIndex == state.stripeEndRows.length - ? OPEN_KEY : state.stripeEndRows[stripeIndex]); + ? OPEN_KEY + : state.stripeEndRows[stripeIndex]); } - private byte[] startOf(HStoreFile sf) { byte[] result = fileStarts.get(sf); // result and INVALID_KEY_IN_MAP are compared _only_ by reference on purpose here as the latter // serves only as a marker and is not to be confused with other empty byte arrays. // See Javadoc of INVALID_KEY_IN_MAP for more information - return (result == null) - ? sf.getMetadataValue(STRIPE_START_KEY) - : result == INVALID_KEY_IN_MAP ? INVALID_KEY : result; + return (result == null) ? sf.getMetadataValue(STRIPE_START_KEY) + : result == INVALID_KEY_IN_MAP ? INVALID_KEY + : result; } private byte[] endOf(HStoreFile sf) { @@ -632,22 +626,24 @@ public class StripeStoreFileManager // result and INVALID_KEY_IN_MAP are compared _only_ by reference on purpose here as the latter // serves only as a marker and is not to be confused with other empty byte arrays. // See Javadoc of INVALID_KEY_IN_MAP for more information - return (result == null) - ? sf.getMetadataValue(STRIPE_END_KEY) - : result == INVALID_KEY_IN_MAP ? INVALID_KEY : result; + return (result == null) ? sf.getMetadataValue(STRIPE_END_KEY) + : result == INVALID_KEY_IN_MAP ? INVALID_KEY + : result; } /** * Inserts a file in the correct place (by seqnum) in a stripe copy. * @param stripe Stripe copy to insert into. - * @param sf File to insert. + * @param sf File to insert. */ private static void insertFileIntoStripe(ArrayList stripe, HStoreFile sf) { // The only operation for which sorting of the files matters is KeyBefore. Therefore, // we will store the file in reverse order by seqNum from the outset. - for (int insertBefore = 0; ; ++insertBefore) { - if (insertBefore == stripe.size() - || (StoreFileComparators.SEQ_ID.compare(sf, stripe.get(insertBefore)) >= 0)) { + for (int insertBefore = 0;; ++insertBefore) { + if ( + insertBefore == stripe.size() + || (StoreFileComparators.SEQ_ID.compare(sf, stripe.get(insertBefore)) >= 0) + ) { stripe.add(insertBefore, sf); break; } @@ -655,13 +651,12 @@ public class StripeStoreFileManager } /** - * An extension of ConcatenatedLists that has several peculiar properties. - * First, one can cut the tail of the logical list by removing last several sub-lists. - * Second, items can be removed thru iterator. - * Third, if the sub-lists are immutable, they are replaced with mutable copies when needed. - * On average KeyBefore operation will contain half the stripes as potential candidates, - * but will quickly cut down on them as it finds something in the more likely ones; thus, - * the above allow us to avoid unnecessary copying of a bunch of lists. + * An extension of ConcatenatedLists that has several peculiar properties. First, one can cut the + * tail of the logical list by removing last several sub-lists. Second, items can be removed thru + * iterator. Third, if the sub-lists are immutable, they are replaced with mutable copies when + * needed. On average KeyBefore operation will contain half the stripes as potential candidates, + * but will quickly cut down on them as it finds something in the more likely ones; thus, the + * above allow us to avoid unnecessary copying of a bunch of lists. */ private static class KeyBeforeConcatenatedLists extends ConcatenatedLists { @Override @@ -705,9 +700,9 @@ public class StripeStoreFileManager } /** - * Non-static helper class for merging compaction or flush results. - * Since we want to merge them atomically (more or less), it operates on lazy copies, - * then creates a new state object and puts it in place. + * Non-static helper class for merging compaction or flush results. Since we want to merge them + * atomically (more or less), it operates on lazy copies, then creates a new state object and puts + * it in place. */ private class CompactionOrFlushMergeCopy { private ArrayList> stripeFiles = null; @@ -758,14 +753,16 @@ public class StripeStoreFileManager // Stripe count should be the same unless the end rows changed. assert oldState.stripeFiles.size() == this.stripeFiles.size() || this.stripeEndRows != null; State newState = new State(); - newState.level0Files = (this.level0Files == null) ? oldState.level0Files - : ImmutableList.copyOf(this.level0Files); - newState.stripeEndRows = (this.stripeEndRows == null) ? oldState.stripeEndRows - : this.stripeEndRows.toArray(new byte[this.stripeEndRows.size()][]); + newState.level0Files = + (this.level0Files == null) ? oldState.level0Files : ImmutableList.copyOf(this.level0Files); + newState.stripeEndRows = (this.stripeEndRows == null) + ? oldState.stripeEndRows + : this.stripeEndRows.toArray(new byte[this.stripeEndRows.size()][]); newState.stripeFiles = new ArrayList<>(this.stripeFiles.size()); for (List newStripe : this.stripeFiles) { newState.stripeFiles.add(newStripe instanceof ImmutableList - ? (ImmutableList)newStripe : ImmutableList.copyOf(newStripe)); + ? (ImmutableList) newStripe + : ImmutableList.copyOf(newStripe)); } List newAllFiles = new ArrayList<>(oldState.allFilesCached); @@ -812,7 +809,7 @@ public class StripeStoreFileManager result = new ArrayList<>(stripeCopy); this.stripeFiles.set(index, result); } else { - result = (ArrayList)stripeCopy; + result = (ArrayList) stripeCopy; } return result; } @@ -860,8 +857,8 @@ public class StripeStoreFileManager HStoreFile oldSf = newStripes.put(endRow, sf); if (oldSf != null) { throw new IllegalStateException( - "Compactor has produced multiple files for the stripe ending in [" + - Bytes.toString(endRow) + "], found " + sf.getPath() + " and " + oldSf.getPath()); + "Compactor has produced multiple files for the stripe ending in [" + + Bytes.toString(endRow) + "], found " + sf.getPath() + " and " + oldSf.getPath()); } } return newStripes; @@ -880,8 +877,8 @@ public class StripeStoreFileManager int stripeIndex = findStripeIndexByEndRow(oldEndRow); if (stripeIndex < 0) { throw new IllegalStateException( - "An allegedly compacted file [" + oldFile + "] does not belong" + - " to a known stripe (end row - [" + Bytes.toString(oldEndRow) + "])"); + "An allegedly compacted file [" + oldFile + "] does not belong" + + " to a known stripe (end row - [" + Bytes.toString(oldEndRow) + "])"); } source = getStripeCopy(stripeIndex); } @@ -892,14 +889,15 @@ public class StripeStoreFileManager } /** - * See {@link #addCompactionResults(Collection, Collection)} - updates the stripe list with - * new candidate stripes/removes old stripes; produces new set of stripe end rows. - * @param newStripes New stripes - files by end row. + * See {@link #addCompactionResults(Collection, Collection)} - updates the stripe list with new + * candidate stripes/removes old stripes; produces new set of stripe end rows. + * @param newStripes New stripes - files by end row. */ private void processNewCandidateStripes(TreeMap newStripes) { // Validate that the removed and added aggregate ranges still make for a full key space. boolean hasStripes = !this.stripeFiles.isEmpty(); - this.stripeEndRows = new ArrayList<>(Arrays.asList(StripeStoreFileManager.this.state.stripeEndRows)); + this.stripeEndRows = + new ArrayList<>(Arrays.asList(StripeStoreFileManager.this.state.stripeEndRows)); int removeFrom = 0; byte[] firstStartRow = startOf(newStripes.firstEntry().getValue()); byte[] lastEndRow = newStripes.lastKey(); @@ -937,13 +935,13 @@ public class StripeStoreFileManager if (isFlush) { long newSize = StripeCompactionPolicy.getTotalFileSize(newStripes.values()); LOG.warn("Stripes were created by a flush, but results of size " + newSize - + " cannot be added because the stripes have changed"); + + " cannot be added because the stripes have changed"); canAddNewStripes = false; filesForL0 = newStripes.values(); } else { long oldSize = StripeCompactionPolicy.getTotalFileSize(conflictingFiles); LOG.info(conflictingFiles.size() + " conflicting files (likely created by a flush) " - + " of size " + oldSize + " are moved to L0 due to concurrent stripe change"); + + " of size " + oldSize + " are moved to L0 due to concurrent stripe change"); filesForL0 = conflictingFiles; } if (filesForL0 != null) { @@ -979,8 +977,8 @@ public class StripeStoreFileManager assert !isOpen(previousEndRow); byte[] startRow = startOf(newStripe.getValue()); if (!rowEquals(previousEndRow, startRow)) { - throw new IllegalStateException("The new stripes produced by " + - (isFlush ? "flush" : "compaction") + " are not contiguous"); + throw new IllegalStateException("The new stripes produced by " + + (isFlush ? "flush" : "compaction") + " are not contiguous"); } } // Add the new stripe. @@ -1036,7 +1034,7 @@ public class StripeStoreFileManager } private Collection findExpiredFiles(ImmutableList stripe, long maxTs, - List filesCompacting, Collection expiredStoreFiles) { + List filesCompacting, Collection expiredStoreFiles) { // Order by seqnum is reversed. for (int i = 1; i < stripe.size(); ++i) { HStoreFile sf = stripe.get(i); @@ -1044,7 +1042,7 @@ public class StripeStoreFileManager long fileTs = sf.getReader().getMaxTimestamp(); if (fileTs < maxTs && !filesCompacting.contains(sf)) { LOG.info("Found an expired store file: " + sf.getPath() + " whose maxTimestamp is " - + fileTs + ", which is below " + maxTs); + + fileTs + ", which is below " + maxTs); if (expiredStoreFiles == null) { expiredStoreFiles = new ArrayList<>(); } @@ -1072,9 +1070,8 @@ public class StripeStoreFileManager double max = 0.0; for (ImmutableList stripeFile : stateLocal.stripeFiles) { int stripeFileCount = stripeFile.size(); - double normCount = - (double) (stripeFileCount + delta - config.getStripeCompactMinFiles()) - / (blockingFilePerStripe - config.getStripeCompactMinFiles()); + double normCount = (double) (stripeFileCount + delta - config.getStripeCompactMinFiles()) + / (blockingFilePerStripe - config.getStripeCompactMinFiles()); if (normCount >= 1.0) { // This could happen if stripe is not split evenly. Do not return values that larger than // 1.0 because we have not reached the blocking file count actually. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java index fb9115e01ec..6df7bd34e3b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,8 +34,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Stripe implementation of StoreFlusher. Flushes files either into L0 file w/o metadata, or - * into separate striped files, avoiding L0. + * Stripe implementation of StoreFlusher. Flushes files either into L0 file w/o metadata, or into + * separate striped files, avoiding L0. */ @InterfaceAudience.Private public class StripeStoreFlusher extends StoreFlusher { @@ -45,8 +44,8 @@ public class StripeStoreFlusher extends StoreFlusher { private final StripeCompactionPolicy policy; private final StripeCompactionPolicy.StripeInformationProvider stripes; - public StripeStoreFlusher(Configuration conf, HStore store, - StripeCompactionPolicy policy, StripeStoreFileManager stripes) { + public StripeStoreFlusher(Configuration conf, HStore store, StripeCompactionPolicy policy, + StripeStoreFileManager stripes) { super(conf, store); this.policy = policy; this.stripes = stripes; @@ -66,8 +65,8 @@ public class StripeStoreFlusher extends StoreFlusher { InternalScanner scanner = createScanner(snapshot.getScanners(), tracker); // Let policy select flush method. - StripeFlushRequest req = this.policy.selectFlush(store.getComparator(), this.stripes, - cellsCount); + StripeFlushRequest req = + this.policy.selectFlush(store.getComparator(), this.stripes, cellsCount); boolean success = false; StripeMultiFileWriter mw = null; @@ -124,7 +123,7 @@ public class StripeStoreFlusher extends StoreFlusher { public StripeMultiFileWriter createWriter() throws IOException { StripeMultiFileWriter writer = new StripeMultiFileWriter.SizeMultiWriter(comparator, 1, - Long.MAX_VALUE, OPEN_KEY, OPEN_KEY); + Long.MAX_VALUE, OPEN_KEY, OPEN_KEY); writer.setNoStripeMetadata(); return writer; } @@ -143,7 +142,7 @@ public class StripeStoreFlusher extends StoreFlusher { @Override public StripeMultiFileWriter createWriter() throws IOException { return new StripeMultiFileWriter.BoundaryMultiWriter(comparator, targetBoundaries, null, - null); + null); } } @@ -154,8 +153,8 @@ public class StripeStoreFlusher extends StoreFlusher { /** * @param targetCount The maximum number of stripes to flush into. - * @param targetKvs The KV count of each segment. If targetKvs*targetCount is less than - * total number of kvs, all the overflow data goes into the last stripe. + * @param targetKvs The KV count of each segment. If targetKvs*targetCount is less than total + * number of kvs, all the overflow data goes into the last stripe. */ public SizeStripeFlushRequest(CellComparator comparator, int targetCount, long targetKvs) { super(comparator); @@ -166,7 +165,7 @@ public class StripeStoreFlusher extends StoreFlusher { @Override public StripeMultiFileWriter createWriter() throws IOException { return new StripeMultiFileWriter.SizeMultiWriter(comparator, this.targetCount, this.targetKvs, - OPEN_KEY, OPEN_KEY); + OPEN_KEY, OPEN_KEY); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ThreadSafeMemStoreSizing.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ThreadSafeMemStoreSizing.java index e8eaf452d01..a49df9378d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ThreadSafeMemStoreSizing.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ThreadSafeMemStoreSizing.java @@ -19,12 +19,11 @@ package org.apache.hadoop.hbase.regionserver; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; - import org.apache.yetus.audience.InterfaceAudience; /** - * Accounting of current heap and data sizes. - * Thread-safe. Many threads can do updates against this single instance. + * Accounting of current heap and data sizes. Thread-safe. Many threads can do updates against this + * single instance. * @see NonThreadSafeMemStoreSizing * @see MemStoreSize */ @@ -56,7 +55,7 @@ class ThreadSafeMemStoreSizing implements MemStoreSizing { @Override public long incMemStoreSize(long dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta, - int cellsCountDelta) { + int cellsCountDelta) { this.offHeapSize.addAndGet(offHeapSizeDelta); this.heapSize.addAndGet(heapSizeDelta); this.cellsCount.addAndGet(cellsCountDelta); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java index 6a38be1f3b3..702e014f005 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,14 +35,13 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** * Stores minimum and maximum timestamp values, it is [minimumTimestamp, maximumTimestamp] in - * interval notation. - * Use this class at write-time ONLY. Too much synchronization to use at read time - * Use {@link TimeRange} at read time instead of this. See toTimeRange() to make TimeRange to use. - * MemStores use this class to track minimum and maximum timestamps. The TimeRangeTracker made by - * the MemStore is passed to the StoreFile for it to write out as part a flush in the the file + * interval notation. Use this class at write-time ONLY. Too much synchronization to use at read + * time Use {@link TimeRange} at read time instead of this. See toTimeRange() to make TimeRange to + * use. MemStores use this class to track minimum and maximum timestamps. The TimeRangeTracker made + * by the MemStore is passed to the StoreFile for it to write out as part a flush in the the file * metadata. If no memstore involved -- i.e. a compaction -- then the StoreFile will calculate its - * own TimeRangeTracker as it appends. The StoreFile serialized TimeRangeTracker is used - * at read time via an instance of {@link TimeRange} to test if Cells fit the StoreFile TimeRange. + * own TimeRangeTracker as it appends. The StoreFile serialized TimeRangeTracker is used at read + * time via an instance of {@link TimeRange} to test if Cells fit the StoreFile TimeRange. */ @InterfaceAudience.Private public abstract class TimeRangeTracker { @@ -92,13 +90,17 @@ public abstract class TimeRangeTracker { } protected abstract void setMax(long ts); + protected abstract void setMin(long ts); + protected abstract boolean compareAndSetMin(long expect, long update); + protected abstract boolean compareAndSetMax(long expect, long update); + /** - * Update the current TimestampRange to include the timestamp from cell. - * If the Key is of type DeleteColumn or DeleteFamily, it includes the - * entire time range from 0 to timestamp of the key. + * Update the current TimestampRange to include the timestamp from cell. If the Key + * is of type DeleteColumn or DeleteFamily, it includes the entire time range from 0 to timestamp + * of the key. * @param cell the Cell to include */ public void includeTimestamp(final Cell cell) { @@ -112,8 +114,8 @@ public abstract class TimeRangeTracker { * If required, update the current TimestampRange to include timestamp * @param timestamp the timestamp value to include */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MT_CORRECTNESS", - justification="Intentional") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MT_CORRECTNESS", + justification = "Intentional") void includeTimestamp(final long timestamp) { long initialMinTimestamp = getMin(); if (timestamp < initialMinTimestamp) { @@ -128,14 +130,14 @@ public abstract class TimeRangeTracker { } // When it reaches here, there are two possibilities: - // 1). timestamp >= curMinTimestamp, someone already sets the minimumTimestamp. In this case, - // it still needs to check if initialMinTimestamp == INITIAL_MIN_TIMESTAMP to see - // if it needs to update minimumTimestamp. Someone may already set both - // minimumTimestamp/minimumTimestamp to the same value(curMinTimestamp), - // need to check if maximumTimestamp needs to be updated. - // 2). timestamp < curMinTimestamp, it sets the minimumTimestamp successfully. - // In this case,it still needs to check if initialMinTimestamp == INITIAL_MIN_TIMESTAMP - // to see if it needs to set maximumTimestamp. + // 1). timestamp >= curMinTimestamp, someone already sets the minimumTimestamp. In this case, + // it still needs to check if initialMinTimestamp == INITIAL_MIN_TIMESTAMP to see + // if it needs to update minimumTimestamp. Someone may already set both + // minimumTimestamp/minimumTimestamp to the same value(curMinTimestamp), + // need to check if maximumTimestamp needs to be updated. + // 2). timestamp < curMinTimestamp, it sets the minimumTimestamp successfully. + // In this case,it still needs to check if initialMinTimestamp == INITIAL_MIN_TIMESTAMP + // to see if it needs to set maximumTimestamp. if (initialMinTimestamp != INITIAL_MIN_TIMESTAMP) { // Someone already sets minimumTimestamp and timestamp is less than minimumTimestamp. // In this case, no need to set maximumTimestamp as it will be set to at least @@ -185,8 +187,7 @@ public abstract class TimeRangeTracker { /** * @param data the serialization data. It can't be null! * @return An instance of NonSyncTimeRangeTracker filled w/ the content of serialized - * NonSyncTimeRangeTracker in timeRangeTrackerBytes. - * @throws IOException + * NonSyncTimeRangeTracker in timeRangeTrackerBytes. n */ public static TimeRangeTracker parseFrom(final byte[] data) throws IOException { return parseFrom(data, Type.NON_SYNC); @@ -207,11 +208,11 @@ public abstract class TimeRangeTracker { } /** - * This method used to serialize TimeRangeTracker (TRT) by protobuf while this breaks the - * forward compatibility on HFile.(See HBASE-21008) In previous hbase version ( < 2.0.0 ) we use - * DataOutput to serialize TRT, these old versions don't have capability to deserialize TRT - * which is serialized by protobuf. So we need to revert the change of serializing - * TimeRangeTracker back to DataOutput. For more information, please check HBASE-21012. + * This method used to serialize TimeRangeTracker (TRT) by protobuf while this breaks the forward + * compatibility on HFile.(See HBASE-21008) In previous hbase version ( < 2.0.0 ) we use + * DataOutput to serialize TRT, these old versions don't have capability to deserialize TRT which + * is serialized by protobuf. So we need to revert the change of serializing TimeRangeTracker back + * to DataOutput. For more information, please check HBASE-21012. * @param tracker TimeRangeTracker needed to be serialized. * @return byte array filled with serialized TimeRangeTracker. * @throws IOException if something goes wrong in writeLong. @@ -242,7 +243,7 @@ public abstract class TimeRangeTracker { return new TimeRange(min, max); } - //In order to estimate the heap size, this inner class need to be accessible to TestHeapSize. + // In order to estimate the heap size, this inner class need to be accessible to TestHeapSize. public static class NonSyncTimeRangeTracker extends TimeRangeTracker { private long minimumTimestamp = INITIAL_MIN_TIMESTAMP; private long maximumTimestamp = INITIAL_MAX_TIMESTAMP; @@ -299,7 +300,7 @@ public abstract class TimeRangeTracker { } } - //In order to estimate the heap size, this inner class need to be accessible to TestHeapSize. + // In order to estimate the heap size, this inner class need to be accessible to TestHeapSize. public static class SyncTimeRangeTracker extends TimeRangeTracker { private final AtomicLong minimumTimestamp = new AtomicLong(INITIAL_MIN_TIMESTAMP); private final AtomicLong maximumTimestamp = new AtomicLong(INITIAL_MAX_TIMESTAMP); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java index d5be356f93f..2d80fad37a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,13 +22,10 @@ import org.apache.yetus.audience.InterfaceAudience; /** * A list of segment managers coupled with the version of the memstore (version at the time it was - * created). - * This structure helps to guarantee that the compaction pipeline updates after the compaction is - * updated in a consistent (atomic) way. - * Specifically, swapping some of the elements in a compaction pipeline with a new compacted - * element is permitted only if the pipeline version is the same as the version attached to the - * elements. - * + * created). This structure helps to guarantee that the compaction pipeline updates after the + * compaction is updated in a consistent (atomic) way. Specifically, swapping some of the elements + * in a compaction pipeline with a new compacted element is permitted only if the pipeline version + * is the same as the version attached to the elements. */ @InterfaceAudience.Private public class VersionedSegmentsList { @@ -70,9 +66,9 @@ public class VersionedSegmentsList { for (ImmutableSegment s : storeSegments) { double segmentUniques = s.getNumUniqueKeys(); - if(segmentUniques != CellSet.UNKNOWN_NUM_UNIQUES) { + if (segmentUniques != CellSet.UNKNOWN_NUM_UNIQUES) { segmentCells = s.getCellsCount(); - if(segmentCells > maxCells) { + if (segmentCells > maxCells) { maxCells = segmentCells; est = segmentUniques / segmentCells; } @@ -80,7 +76,7 @@ public class VersionedSegmentsList { // else ignore this segment specifically since if the unique number is unknown counting // cells can be expensive } - if(maxCells == 0) { + if (maxCells == 0) { return 1.0; } return est; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java index 23d16934b65..f5a662ffe14 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory; */ @InterfaceAudience.Private public abstract class AbstractMultiOutputCompactor - extends Compactor { + extends Compactor { private static final Logger LOG = LoggerFactory.getLogger(AbstractMultiOutputCompactor.class); @@ -52,15 +52,15 @@ public abstract class AbstractMultiOutputCompactor - * Compaction configuration for a particular instance of HStore. - * Takes into account both global settings and ones set on the column family/store. - * Control knobs for default compaction algorithm: + * Compaction configuration for a particular instance of HStore. Takes into account both global + * settings and ones set on the column family/store. Control knobs for default compaction algorithm: *

      *

      - * maxCompactSize - upper bound on file size to be included in minor compactions - * minCompactSize - lower bound below which compaction is selected without ratio test - * minFilesToCompact - lower bound on number of files in any minor compaction - * maxFilesToCompact - upper bound on number of files in any minor compaction - * compactionRatio - Ratio used for compaction - * minLocalityToForceCompact - Locality threshold for a store file to major compact (HBASE-11195) + * maxCompactSize - upper bound on file size to be included in minor compactions minCompactSize - + * lower bound below which compaction is selected without ratio test minFilesToCompact - lower bound + * on number of files in any minor compaction maxFilesToCompact - upper bound on number of files in + * any minor compaction compactionRatio - Ratio used for compaction minLocalityToForceCompact - + * Locality threshold for a store file to major compact (HBASE-11195) *

      * Set parameter as "hbase.hstore.compaction.<attribute>" */ @@ -60,14 +56,14 @@ public class CompactionConfiguration { public static final String HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY = "hbase.hstore.compaction.max.size"; public static final String HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY = - "hbase.hstore.compaction.max.size.offpeak"; + "hbase.hstore.compaction.max.size.offpeak"; public static final String HBASE_HSTORE_OFFPEAK_END_HOUR = "hbase.offpeak.end.hour"; public static final String HBASE_HSTORE_OFFPEAK_START_HOUR = "hbase.offpeak.start.hour"; public static final String HBASE_HSTORE_MIN_LOCALITY_TO_SKIP_MAJOR_COMPACT = - "hbase.hstore.min.locality.to.skip.major.compact"; + "hbase.hstore.min.locality.to.skip.major.compact"; public static final String HBASE_HFILE_COMPACTION_DISCHARGER_THREAD_COUNT = - "hbase.hfile.compaction.discharger.thread.count"; + "hbase.hfile.compaction.discharger.thread.count"; /* * The epoch time length for the windows we no longer compact @@ -81,14 +77,16 @@ public class CompactionConfiguration { public static final String DATE_TIERED_SINGLE_OUTPUT_FOR_MINOR_COMPACTION_KEY = "hbase.hstore.compaction.date.tiered.single.output.for.minor.compaction"; - private static final Class - DEFAULT_COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS = ExploringCompactionPolicy.class; + private static final Class< + ? extends RatioBasedCompactionPolicy> DEFAULT_COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS = + ExploringCompactionPolicy.class; public static final String DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS_KEY = "hbase.hstore.compaction.date.tiered.window.factory.class"; - private static final Class - DEFAULT_DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS = ExponentialCompactionWindowFactory.class; + private static final Class< + ? extends CompactionWindowFactory> DEFAULT_DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS = + ExponentialCompactionWindowFactory.class; public static final String DATE_TIERED_STORAGE_POLICY_ENABLE_KEY = "hbase.hstore.compaction.date.tiered.storage.policy.enable"; @@ -137,34 +135,34 @@ public class CompactionConfiguration { this.storeConfigInfo = storeConfigInfo; maxCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, Long.MAX_VALUE); - offPeakMaxCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY, - maxCompactSize); - minCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY, - storeConfigInfo.getMemStoreFlushSize()); + offPeakMaxCompactSize = + conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY, maxCompactSize); + minCompactSize = + conf.getLong(HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY, storeConfigInfo.getMemStoreFlushSize()); minFilesToCompact = Math.max(2, conf.getInt(HBASE_HSTORE_COMPACTION_MIN_KEY, - /*old name*/ conf.getInt("hbase.hstore.compactionThreshold", 3))); + /* old name */ conf.getInt("hbase.hstore.compactionThreshold", 3))); maxFilesToCompact = conf.getInt(HBASE_HSTORE_COMPACTION_MAX_KEY, 10); compactionRatio = conf.getFloat(HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.2F); offPeakCompactionRatio = conf.getFloat(HBASE_HSTORE_COMPACTION_RATIO_OFFPEAK_KEY, 5.0F); throttlePoint = conf.getLong("hbase.regionserver.thread.compaction.throttle", - 2 * maxFilesToCompact * storeConfigInfo.getMemStoreFlushSize()); - majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, - HConstants.DEFAULT_MAJOR_COMPACTION_PERIOD); - majorCompactionJitter = conf.getFloat(HConstants.MAJOR_COMPACTION_JITTER, - HConstants.DEFAULT_MAJOR_COMPACTION_JITTER); + 2 * maxFilesToCompact * storeConfigInfo.getMemStoreFlushSize()); + majorCompactionPeriod = + conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, HConstants.DEFAULT_MAJOR_COMPACTION_PERIOD); + majorCompactionJitter = + conf.getFloat(HConstants.MAJOR_COMPACTION_JITTER, HConstants.DEFAULT_MAJOR_COMPACTION_JITTER); minLocalityToForceCompact = conf.getFloat(HBASE_HSTORE_MIN_LOCALITY_TO_SKIP_MAJOR_COMPACT, 0f); dateTieredMaxStoreFileAgeMillis = conf.getLong(DATE_TIERED_MAX_AGE_MILLIS_KEY, Long.MAX_VALUE); dateTieredIncomingWindowMin = conf.getInt(DATE_TIERED_INCOMING_WINDOW_MIN_KEY, 6); - compactionPolicyForDateTieredWindow = conf.get( - COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS_KEY, - DEFAULT_COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS.getName()); - dateTieredSingleOutputForMinorCompaction = conf - .getBoolean(DATE_TIERED_SINGLE_OUTPUT_FOR_MINOR_COMPACTION_KEY, true); - this.dateTieredCompactionWindowFactory = conf.get( - DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS_KEY, - DEFAULT_DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS.getName()); + compactionPolicyForDateTieredWindow = + conf.get(COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS_KEY, + DEFAULT_COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS.getName()); + dateTieredSingleOutputForMinorCompaction = + conf.getBoolean(DATE_TIERED_SINGLE_OUTPUT_FOR_MINOR_COMPACTION_KEY, true); + this.dateTieredCompactionWindowFactory = + conf.get(DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS_KEY, + DEFAULT_DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS.getName()); // for Heterogeneous Storage dateTieredStoragePolicyEnable = conf.getBoolean(DATE_TIERED_STORAGE_POLICY_ENABLE_KEY, false); hotWindowAgeMillis = conf.getLong(DATE_TIERED_HOT_WINDOW_AGE_MILLIS_KEY, 86400000L); @@ -179,32 +177,20 @@ public class CompactionConfiguration { public String toString() { return String.format( "size [minCompactSize:%s, maxCompactSize:%s, offPeakMaxCompactSize:%s);" - + " files [minFilesToCompact:%d, maxFilesToCompact:%d);" - + " ratio %f; off-peak ratio %f; throttle point %d;" - + " major period %d, major jitter %f, min locality to compact %f;" - + " tiered compaction: max_age %d, incoming window min %d," - + " compaction policy for tiered window %s, single output for minor %b," - + " compaction window factory %s," - + " region %s columnFamilyName %s", - StringUtils.byteDesc(minCompactSize), - StringUtils.byteDesc(maxCompactSize), - StringUtils.byteDesc(offPeakMaxCompactSize), - minFilesToCompact, - maxFilesToCompact, - compactionRatio, - offPeakCompactionRatio, - throttlePoint, - majorCompactionPeriod, - majorCompactionJitter, - minLocalityToForceCompact, - dateTieredMaxStoreFileAgeMillis, - dateTieredIncomingWindowMin, - compactionPolicyForDateTieredWindow, - dateTieredSingleOutputForMinorCompaction, - dateTieredCompactionWindowFactory, + + " files [minFilesToCompact:%d, maxFilesToCompact:%d);" + + " ratio %f; off-peak ratio %f; throttle point %d;" + + " major period %d, major jitter %f, min locality to compact %f;" + + " tiered compaction: max_age %d, incoming window min %d," + + " compaction policy for tiered window %s, single output for minor %b," + + " compaction window factory %s," + " region %s columnFamilyName %s", + StringUtils.byteDesc(minCompactSize), StringUtils.byteDesc(maxCompactSize), + StringUtils.byteDesc(offPeakMaxCompactSize), minFilesToCompact, maxFilesToCompact, + compactionRatio, offPeakCompactionRatio, throttlePoint, majorCompactionPeriod, + majorCompactionJitter, minLocalityToForceCompact, dateTieredMaxStoreFileAgeMillis, + dateTieredIncomingWindowMin, compactionPolicyForDateTieredWindow, + dateTieredSingleOutputForMinorCompaction, dateTieredCompactionWindowFactory, RegionInfo.prettyPrint(storeConfigInfo.getRegionInfo().getEncodedName()), - storeConfigInfo.getColumnFamilyName() - ); + storeConfigInfo.getColumnFamilyName()); } /** @@ -265,16 +251,16 @@ public class CompactionConfiguration { } /** - * @return Major compaction period from compaction. - * Major compactions are selected periodically according to this parameter plus jitter + * @return Major compaction period from compaction. Major compactions are selected periodically + * according to this parameter plus jitter */ public long getMajorCompactionPeriod() { return majorCompactionPeriod; } /** - * @return Major the jitter fraction, the fraction within which the major compaction - * period is randomly chosen from the majorCompactionPeriod in each store. + * @return Major the jitter fraction, the fraction within which the major compaction period is + * randomly chosen from the majorCompactionPeriod in each store. */ public float getMajorCompactionJitter() { return majorCompactionJitter; @@ -282,8 +268,8 @@ public class CompactionConfiguration { /** * @return Block locality ratio, the ratio at which we will include old regions with a single - * store file for major compaction. Used to improve block locality for regions that - * haven't had writes in a while but are still being read. + * store file for major compaction. Used to improve block locality for regions that + * haven't had writes in a while but are still being read. */ public float getMinLocalityToForceCompact() { return minLocalityToForceCompact; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java index 9aa383c4e66..df65777ab5d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,27 +19,24 @@ package org.apache.hadoop.hbase.regionserver.compactions; import java.io.IOException; import java.util.List; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; - /** - * This class holds all "physical" details necessary to run a compaction, - * and abstracts away the details specific to a particular compaction. - * It also has compaction request with all the logical details. - * Hence, this class is basically the compaction. + * This class holds all "physical" details necessary to run a compaction, and abstracts away the + * details specific to a particular compaction. It also has compaction request with all the logical + * details. Hence, this class is basically the compaction. */ @InterfaceAudience.Private public abstract class CompactionContext { protected CompactionRequestImpl request = null; /** - * Called before coprocessor preCompactSelection and should filter the candidates - * for coprocessor; i.e. exclude the files that definitely cannot be compacted at this time. + * Called before coprocessor preCompactSelection and should filter the candidates for coprocessor; + * i.e. exclude the files that definitely cannot be compacted at this time. * @param filesCompacting files currently compacting * @return the list of files that can theoretically be compacted. */ @@ -48,14 +44,14 @@ public abstract class CompactionContext { /** * Called to select files for compaction. Must fill in the request field if successful. - * @param filesCompacting Files currently being compacted by other compactions. + * @param filesCompacting Files currently being compacted by other compactions. * @param isUserCompaction Whether this is a user compaction. - * @param mayUseOffPeak Whether the underlying policy may assume it's off-peak hours. - * @param forceMajor Whether to force major compaction. + * @param mayUseOffPeak Whether the underlying policy may assume it's off-peak hours. + * @param forceMajor Whether to force major compaction. * @return Whether the selection succeeded. Selection may be empty and lead to no compaction. */ public abstract boolean select(List filesCompacting, boolean isUserCompaction, - boolean mayUseOffPeak, boolean forceMajor) throws IOException; + boolean mayUseOffPeak, boolean forceMajor) throws IOException; /** * Forces external selection to be applied for this compaction. @@ -66,7 +62,7 @@ public abstract class CompactionContext { } public abstract List compact(ThroughputController throughputController, User user) - throws IOException; + throws IOException; public CompactionRequestImpl getRequest() { assert hasSelection(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionLifeCycleTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionLifeCycleTracker.java index dfff2f980fb..5feaf15b631 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionLifeCycleTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionLifeCycleTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java index 755b9d39cb2..772e21f0424 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,20 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.compactions; import java.io.IOException; import java.util.Collection; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; import org.apache.yetus.audience.InterfaceAudience; /** - * A compaction policy determines how to select files for compaction, - * how to compact them, and how to generate the compacted files. + * A compaction policy determines how to select files for compaction, how to compact them, and how + * to generate the compacted files. */ @InterfaceAudience.Private public abstract class CompactionPolicy { @@ -46,7 +43,7 @@ public abstract class CompactionPolicy { * @return True if we should run a major compaction. */ public abstract boolean shouldPerformMajorCompaction(Collection filesToCompact) - throws IOException; + throws IOException; /** * @param compactionSize Total size of some compaction @@ -55,8 +52,8 @@ public abstract class CompactionPolicy { public abstract boolean throttleCompaction(long compactionSize); /** - * Inform the policy that some configuration has been change, - * so cached value should be updated it any. + * Inform the policy that some configuration has been change, so cached value should be updated it + * any. */ public void setConf(Configuration conf) { this.comConf = new CompactionConfiguration(conf, this.storeConfigInfo); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java index 2ccdd150cd2..72634bbf2ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.compactions; import org.apache.yetus.audience.InterfaceAudience; @@ -24,13 +22,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This class holds information relevant for tracking the progress of a - * compaction. - * - *

      The metrics tracked allow one to calculate the percent completion of the - * compaction based on the number of Key/Value pairs already compacted vs. - * total amount scheduled to be compacted. - * + * This class holds information relevant for tracking the progress of a compaction. + *

      + * The metrics tracked allow one to calculate the percent completion of the compaction based on the + * number of Key/Value pairs already compacted vs. total amount scheduled to be compacted. */ @InterfaceAudience.Private public class CompactionProgress { @@ -43,18 +38,19 @@ public class CompactionProgress { /** the total size of data processed by the currently running compaction, in bytes */ public long totalCompactedSize = 0; - /** Constructor + /** + * Constructor * @param totalCompactingKVs the total Key/Value pairs to be compacted */ public CompactionProgress(long totalCompactingKVs) { this.totalCompactingKVs = totalCompactingKVs; } - /** getter for calculated percent complete - * @return float + /** + * getter for calculated percent complete n */ public float getProgressPct() { - return (float)currentCompactedKVs / getTotalCompactingKVs(); + return (float) currentCompactedKVs / getTotalCompactingKVs(); } /** @@ -65,8 +61,8 @@ public class CompactionProgress { } /** - * Marks the compaction as complete by setting total to current KV count; - * Total KV count is an estimate, so there might be a discrepancy otherwise. + * Marks the compaction as complete by setting total to current KV count; Total KV count is an + * estimate, so there might be a discrepancy otherwise. */ public void complete() { this.totalCompactingKVs = this.currentCompactedKVs; @@ -77,8 +73,8 @@ public class CompactionProgress { */ public long getTotalCompactingKVs() { if (totalCompactingKVs < currentCompactedKVs) { - LOG.debug("totalCompactingKVs={} less than currentCompactedKVs={}", - totalCompactingKVs, currentCompactedKVs); + LOG.debug("totalCompactingKVs={} less than currentCompactedKVs={}", totalCompactingKVs, + currentCompactedKVs); return currentCompactedKVs; } return totalCompactingKVs; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java index 73f36837f9e..723cefb73d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,12 +17,11 @@ */ package org.apache.hadoop.hbase.regionserver.compactions; +import java.util.Collection; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.yetus.audience.InterfaceAudience; -import java.util.Collection; - /** * Coprocessors use this interface to get details about compaction. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequestImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequestImpl.java index 5d8285aecdb..a553ba27997 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequestImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequestImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +39,13 @@ public class CompactionRequestImpl implements CompactionRequest { // was this compaction promoted to an off-peak private boolean isOffPeak = false; - private enum DisplayCompactionType { MINOR, ALL_FILES, MAJOR } + + private enum DisplayCompactionType { + MINOR, + ALL_FILES, + MAJOR + } + private DisplayCompactionType isMajor = DisplayCompactionType.MINOR; private int priority = NO_PRIORITY; private Collection filesToCompact; @@ -87,7 +92,7 @@ public class CompactionRequestImpl implements CompactionRequest { @Override public boolean isAllFiles() { return this.isMajor == DisplayCompactionType.MAJOR - || this.isMajor == DisplayCompactionType.ALL_FILES; + || this.isMajor == DisplayCompactionType.ALL_FILES; } @Override @@ -123,12 +128,13 @@ public class CompactionRequestImpl implements CompactionRequest { /** * Specify if this compaction should be a major compaction based on the state of the store * @param isMajor true if the system determines that this compaction should be a major - * compaction + * compaction */ public void setIsMajor(boolean isMajor, boolean isAllFiles) { assert isAllFiles || !isMajor; - this.isMajor = !isAllFiles ? DisplayCompactionType.MINOR - : (isMajor ? DisplayCompactionType.MAJOR : DisplayCompactionType.ALL_FILES); + this.isMajor = !isAllFiles + ? DisplayCompactionType.MINOR + : (isMajor ? DisplayCompactionType.MAJOR : DisplayCompactionType.ALL_FILES); } public void setTracker(CompactionLifeCycleTracker tracker) { @@ -236,14 +242,14 @@ public class CompactionRequestImpl implements CompactionRequest { @Override public String toString() { String fsList = filesToCompact.stream().filter(f -> f.getReader() != null) - .map(f -> TraditionalBinaryPrefix.long2String(f.getReader().length(), "", 1)) - .collect(Collectors.joining(", ")); + .map(f -> TraditionalBinaryPrefix.long2String(f.getReader().length(), "", 1)) + .collect(Collectors.joining(", ")); - return "regionName=" + regionName + ", storeName=" + storeName + ", fileCount=" + - this.getFiles().size() + ", fileSize=" + - TraditionalBinaryPrefix.long2String(totalSize, "", 1) + - ((fsList.isEmpty()) ? "" : " (" + fsList + ")") + ", priority=" + priority + ", time=" + - selectionTime; + return "regionName=" + regionName + ", storeName=" + storeName + ", fileCount=" + + this.getFiles().size() + ", fileSize=" + + TraditionalBinaryPrefix.long2String(totalSize, "", 1) + + ((fsList.isEmpty()) ? "" : " (" + fsList + ")") + ", priority=" + priority + ", time=" + + selectionTime; } /** @@ -251,6 +257,6 @@ public class CompactionRequestImpl implements CompactionRequest { */ private void recalculateSize() { this.totalSize = filesToCompact.stream().map(HStoreFile::getReader) - .mapToLong(r -> r != null ? r.length() : 0L).sum(); + .mapToLong(r -> r != null ? r.length() : 0L).sum(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequester.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequester.java index 31a7ca7ea4e..b55c173dda9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequester.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequester.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,15 +17,13 @@ */ package org.apache.hadoop.hbase.regionserver.compactions; +import edu.umd.cs.findbugs.annotations.Nullable; import java.io.IOException; - import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; -import edu.umd.cs.findbugs.annotations.Nullable; - /** * Request a compaction. */ @@ -36,19 +34,19 @@ public interface CompactionRequester { * Request compaction on all the stores of the given region. */ void requestCompaction(HRegion region, String why, int priority, - CompactionLifeCycleTracker tracker, @Nullable User user) throws IOException; + CompactionLifeCycleTracker tracker, @Nullable User user) throws IOException; /** * Request compaction on the given store. */ void requestCompaction(HRegion region, HStore store, String why, int priority, - CompactionLifeCycleTracker tracker, @Nullable User user) throws IOException; + CompactionLifeCycleTracker tracker, @Nullable User user) throws IOException; /** * Request system compaction on the given store. */ void requestSystemCompaction(HRegion region, HStore store, String why, - boolean giveUpIfRequestedOrCompacting) throws IOException; + boolean giveUpIfRequestedOrCompacting) throws IOException; /** * on/off compaction diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindow.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindow.java index ad0cfb4cb39..d71a9c0593c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindow.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindow.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindowFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindowFactory.java index bd5c85c5770..9689464f1ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindowFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindowFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index 5ac64823ae0..0de3eeb024d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,10 +71,9 @@ import org.apache.hbase.thirdparty.com.google.common.io.Closeables; * A compactor is a compaction algorithm associated a given policy. Base class also contains * reusable parts for implementing compactors (what is common and what isn't is evolving). *

      - * Compactions might be concurrent against a given store and the Compactor is shared among - * them. Do not put mutable state into class fields. All Compactor class fields should be - * final or effectively final. - * 'keepSeqIdPeriod' is an exception to this rule because unit tests may set it. + * Compactions might be concurrent against a given store and the Compactor is shared among them. Do + * not put mutable state into class fields. All Compactor class fields should be final or + * effectively final. 'keepSeqIdPeriod' is an exception to this rule because unit tests may set it. */ @InterfaceAudience.Private public abstract class Compactor { @@ -90,10 +89,10 @@ public abstract class Compactor { protected int keepSeqIdPeriod; // Configs that drive whether we drop page cache behind compactions - protected static final String MAJOR_COMPACTION_DROP_CACHE = - "hbase.regionserver.majorcompaction.pagecache.drop"; + protected static final String MAJOR_COMPACTION_DROP_CACHE = + "hbase.regionserver.majorcompaction.pagecache.drop"; protected static final String MINOR_COMPACTION_DROP_CACHE = - "hbase.regionserver.minorcompaction.pagecache.drop"; + "hbase.regionserver.minorcompaction.pagecache.drop"; protected final boolean dropCacheMajor; protected final boolean dropCacheMinor; @@ -109,12 +108,15 @@ public abstract class Compactor { this.store = store; this.compactionKVMax = this.conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); - this.majorCompactionCompression = (store.getColumnFamilyDescriptor() == null) ? - Compression.Algorithm.NONE : store.getColumnFamilyDescriptor().getMajorCompactionCompressionType(); - this.minorCompactionCompression = (store.getColumnFamilyDescriptor() == null) ? - Compression.Algorithm.NONE : store.getColumnFamilyDescriptor().getMinorCompactionCompressionType(); - this.keepSeqIdPeriod = Math.max(this.conf.getInt(HConstants.KEEP_SEQID_PERIOD, - HConstants.MIN_KEEP_SEQID_PERIOD), HConstants.MIN_KEEP_SEQID_PERIOD); + this.majorCompactionCompression = (store.getColumnFamilyDescriptor() == null) + ? Compression.Algorithm.NONE + : store.getColumnFamilyDescriptor().getMajorCompactionCompressionType(); + this.minorCompactionCompression = (store.getColumnFamilyDescriptor() == null) + ? Compression.Algorithm.NONE + : store.getColumnFamilyDescriptor().getMinorCompactionCompressionType(); + this.keepSeqIdPeriod = + Math.max(this.conf.getInt(HConstants.KEEP_SEQID_PERIOD, HConstants.MIN_KEEP_SEQID_PERIOD), + HConstants.MIN_KEEP_SEQID_PERIOD); this.dropCacheMajor = conf.getBoolean(MAJOR_COMPACTION_DROP_CACHE, true); this.dropCacheMinor = conf.getBoolean(MINOR_COMPACTION_DROP_CACHE, true); } @@ -136,7 +138,7 @@ public abstract class Compactor { public long maxSeqId = 0; /** Latest memstore read point found in any of the involved files */ public long maxMVCCReadpoint = 0; - /** Max tags length**/ + /** Max tags length **/ public int maxTagsLength = 0; /** Min SeqId to keep during a major compaction **/ public long minSeqIdToKeep = 0; @@ -147,21 +149,21 @@ public abstract class Compactor { /** * Extracts some details about the files to compact that are commonly needed by compactors. * @param filesToCompact Files. - * @param allFiles Whether all files are included for compaction + * @param allFiles Whether all files are included for compaction * @parma major If major compaction * @return The result. */ - private FileDetails getFileDetails( - Collection filesToCompact, boolean allFiles, boolean major) throws IOException { + private FileDetails getFileDetails(Collection filesToCompact, boolean allFiles, + boolean major) throws IOException { FileDetails fd = new FileDetails(); - long oldestHFileTimestampToKeepMVCC = EnvironmentEdgeManager.currentTime() - - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod); + long oldestHFileTimestampToKeepMVCC = + EnvironmentEdgeManager.currentTime() - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod); for (HStoreFile file : filesToCompact) { - if(allFiles && (file.getModificationTimestamp() < oldestHFileTimestampToKeepMVCC)) { + if (allFiles && (file.getModificationTimestamp() < oldestHFileTimestampToKeepMVCC)) { // when isAllFiles is true, all files are compacted so we can calculate the smallest // MVCC value to keep - if(fd.minSeqIdToKeep < file.getMaxMemStoreTS()) { + if (fd.minSeqIdToKeep < file.getMaxMemStoreTS()) { fd.minSeqIdToKeep = file.getMaxMemStoreTS(); } } @@ -188,8 +190,7 @@ public abstract class Compactor { // SeqId number. if (r.isBulkLoaded()) { fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID()); - } - else { + } else { tmp = fileInfo.get(HFile.Writer.MAX_MEMSTORE_TS_KEY); if (tmp != null) { fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp)); @@ -214,17 +215,16 @@ public abstract class Compactor { } } tmp = fileInfo.get(TIMERANGE_KEY); - fd.latestPutTs = tmp == null ? HConstants.LATEST_TIMESTAMP: TimeRangeTracker.parseFrom(tmp).getMax(); - LOG.debug("Compacting {}, keycount={}, bloomtype={}, size={}, " - + "encoding={}, compression={}, seqNum={}{}", - (file.getPath() == null? null: file.getPath().getName()), - keyCount, - r.getBloomFilterType().toString(), - TraditionalBinaryPrefix.long2String(r.length(), "", 1), - r.getHFileReader().getDataBlockEncoding(), - major ? majorCompactionCompression : minorCompactionCompression, - seqNum, - (allFiles? ", earliestPutTs=" + earliestPutTs: "")); + fd.latestPutTs = + tmp == null ? HConstants.LATEST_TIMESTAMP : TimeRangeTracker.parseFrom(tmp).getMax(); + LOG.debug( + "Compacting {}, keycount={}, bloomtype={}, size={}, " + + "encoding={}, compression={}, seqNum={}{}", + (file.getPath() == null ? null : file.getPath().getName()), keyCount, + r.getBloomFilterType().toString(), TraditionalBinaryPrefix.long2String(r.length(), "", 1), + r.getHFileReader().getDataBlockEncoding(), + major ? majorCompactionCompression : minorCompactionCompression, seqNum, + (allFiles ? ", earliestPutTs=" + earliestPutTs : "")); } return fd; } @@ -235,7 +235,7 @@ public abstract class Compactor { * @return Scanners. */ private List createFileScanners(Collection filesToCompact, - long smallestReadPoint, boolean useDropBehind) throws IOException { + long smallestReadPoint, boolean useDropBehind) throws IOException { return StoreFileScanner.getScannersForCompaction(filesToCompact, useDropBehind, smallestReadPoint); } @@ -248,8 +248,8 @@ public abstract class Compactor { ScanType getScanType(CompactionRequestImpl request); - InternalScanner createScanner(ScanInfo scanInfo, List scanners, ScanType scanType, - FileDetails fd, long smallestReadPoint) throws IOException; + InternalScanner createScanner(ScanInfo scanInfo, List scanners, + ScanType scanType, FileDetails fd, long smallestReadPoint) throws IOException; } protected final InternalScannerFactory defaultScannerFactory = new InternalScannerFactory() { @@ -261,7 +261,7 @@ public abstract class Compactor { @Override public InternalScanner createScanner(ScanInfo scanInfo, List scanners, - ScanType scanType, FileDetails fd, long smallestReadPoint) throws IOException { + ScanType scanType, FileDetails fd, long smallestReadPoint) throws IOException { return Compactor.this.createScanner(store, scanInfo, scanners, scanType, smallestReadPoint, fd.earliestPutTs); } @@ -295,13 +295,12 @@ public abstract class Compactor { String fileStoragePolicy, boolean major, Consumer writerCreationTracker) throws IOException { return store.getStoreEngine() - .createWriter( - createParams(fd, shouldDropBehind, major, writerCreationTracker) - .fileStoragePolicy(fileStoragePolicy)); + .createWriter(createParams(fd, shouldDropBehind, major, writerCreationTracker) + .fileStoragePolicy(fileStoragePolicy)); } private ScanInfo preCompactScannerOpen(CompactionRequestImpl request, ScanType scanType, - User user) throws IOException { + User user) throws IOException { if (store.getCoprocessorHost() == null) { return store.getScanInfo(); } @@ -311,13 +310,13 @@ public abstract class Compactor { /** * Calls coprocessor, if any, to create scanners - after normal scanner creation. - * @param request Compaction request. + * @param request Compaction request. * @param scanType Scan type. - * @param scanner The default scanner created for compaction. + * @param scanner The default scanner created for compaction. * @return Scanner scanner to use (usually the default); null if compaction should not proceed. */ private InternalScanner postCompactScannerOpen(CompactionRequestImpl request, ScanType scanType, - InternalScanner scanner, User user) throws IOException { + InternalScanner scanner, User user) throws IOException { if (store.getCoprocessorHost() == null) { return scanner; } @@ -326,8 +325,8 @@ public abstract class Compactor { } protected final List compact(final CompactionRequestImpl request, - InternalScannerFactory scannerFactory, CellSinkFactory sinkFactory, - ThroughputController throughputController, User user) throws IOException { + InternalScannerFactory scannerFactory, CellSinkFactory sinkFactory, + ThroughputController throughputController, User user) throws IOException { FileDetails fd = getFileDetails(request.getFiles(), request.isAllFiles(), request.isMajor()); // Find the smallest read point across all the Scanners. @@ -359,17 +358,13 @@ public abstract class Compactor { smallestReadPoint = Math.min(fd.minSeqIdToKeep, smallestReadPoint); cleanSeqId = true; } - writer = sinkFactory.createWriter( - scanner, - fd, - dropCache, - request.isMajor(), + writer = sinkFactory.createWriter(scanner, fd, dropCache, request.isMajor(), request.getWriterCreationTracker()); finished = performCompaction(fd, scanner, writer, smallestReadPoint, cleanSeqId, throughputController, request.isAllFiles(), request.getFiles().size(), progress); if (!finished) { throw new InterruptedIOException("Aborting compaction of store " + store + " in region " - + store.getRegionInfo().getRegionNameAsString() + " because it was interrupted."); + + store.getRegionInfo().getRegionNameAsString() + " because it was interrupted."); } } finally { // createScanner may fail when seeking hfiles encounter Exception, e.g. even only one hfile @@ -399,26 +394,26 @@ public abstract class Compactor { } protected abstract List commitWriter(T writer, FileDetails fd, - CompactionRequestImpl request) throws IOException; + CompactionRequestImpl request) throws IOException; protected abstract void abortWriter(T writer) throws IOException; /** * Performs the compaction. - * @param fd FileDetails of cell sink writer - * @param scanner Where to read from. - * @param writer Where to write to. - * @param smallestReadPoint Smallest read point. - * @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <= - * smallestReadPoint - * @param major Is a major compaction. + * @param fd FileDetails of cell sink writer + * @param scanner Where to read from. + * @param writer Where to write to. + * @param smallestReadPoint Smallest read point. + * @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <= + * smallestReadPoint + * @param major Is a major compaction. * @param numofFilesToCompact the number of files to compact - * @param progress Progress reporter. + * @param progress Progress reporter. * @return Whether compaction ended; false if it was interrupted for some reason. */ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, CellSink writer, - long smallestReadPoint, boolean cleanSeqId, ThroughputController throughputController, - boolean major, int numofFilesToCompact, CompactionProgress progress) throws IOException { + long smallestReadPoint, boolean cleanSeqId, ThroughputController throughputController, + boolean major, int numofFilesToCompact, CompactionProgress progress) throws IOException { assert writer instanceof ShipperListener; long bytesWrittenProgressForLog = 0; long bytesWrittenProgressForShippedCall = 0; @@ -435,12 +430,12 @@ public abstract class Compactor { long now = 0; boolean hasMore; ScannerContext scannerContext = - ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); + ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); throughputController.start(compactionName); KeyValueScanner kvs = (scanner instanceof KeyValueScanner) ? (KeyValueScanner) scanner : null; long shippedCallSizeLimit = - (long) numofFilesToCompact * this.store.getColumnFamilyDescriptor().getBlocksize(); + (long) numofFilesToCompact * this.store.getColumnFamilyDescriptor().getBlocksize(); try { do { hasMore = scanner.next(cells, scannerContext); @@ -518,7 +513,7 @@ public abstract class Compactor { } catch (InterruptedException e) { progress.cancel(); throw new InterruptedIOException( - "Interrupted while control throughput of compacting " + compactionName); + "Interrupted while control throughput of compacting " + compactionName); } finally { // Clone last cell in the final because writer will append last cell when committing. If // don't clone here and once the scanner get closed, then the memory of last cell will be @@ -531,33 +526,33 @@ public abstract class Compactor { } /** - * @param store store - * @param scanners Store file scanners. - * @param scanType Scan type. + * @param store store + * @param scanners Store file scanners. + * @param scanType Scan type. * @param smallestReadPoint Smallest MVCC read point. - * @param earliestPutTs Earliest put across all files. + * @param earliestPutTs Earliest put across all files. * @return A compaction scanner. */ protected InternalScanner createScanner(HStore store, ScanInfo scanInfo, - List scanners, ScanType scanType, long smallestReadPoint, - long earliestPutTs) throws IOException { + List scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs) + throws IOException { return new StoreScanner(store, scanInfo, scanners, scanType, smallestReadPoint, earliestPutTs); } /** - * @param store The store. - * @param scanners Store file scanners. - * @param smallestReadPoint Smallest MVCC read point. - * @param earliestPutTs Earliest put across all files. + * @param store The store. + * @param scanners Store file scanners. + * @param smallestReadPoint Smallest MVCC read point. + * @param earliestPutTs Earliest put across all files. * @param dropDeletesFromRow Drop deletes starting with this row, inclusive. Can be null. - * @param dropDeletesToRow Drop deletes ending with this row, exclusive. Can be null. + * @param dropDeletesToRow Drop deletes ending with this row, exclusive. Can be null. * @return A compaction scanner. */ protected InternalScanner createScanner(HStore store, ScanInfo scanInfo, - List scanners, long smallestReadPoint, long earliestPutTs, - byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException { + List scanners, long smallestReadPoint, long earliestPutTs, + byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException { return new StoreScanner(store, scanInfo, scanners, smallestReadPoint, earliestPutTs, - dropDeletesFromRow, dropDeletesToRow); + dropDeletesFromRow, dropDeletesToRow); } /** @@ -568,7 +563,7 @@ public abstract class Compactor { long totalCompactingKVs = 0; long currentCompactedKVs = 0; long totalCompactedSize = 0; - for (CompactionProgress progress: progressSet) { + for (CompactionProgress progress : progressSet) { totalCompactingKVs += progress.totalCompactingKVs; currentCompactedKVs += progress.currentCompactedKVs; totalCompactedSize += progress.totalCompactedSize; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CurrentHourProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CurrentHourProvider.java index ebbaa456047..8c2a65395fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CurrentHourProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CurrentHourProvider.java @@ -68,7 +68,7 @@ public class CurrentHourProvider { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") static void advanceTick() { tick = nextTick(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java index 2c6046befd7..812bb96430c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,24 +71,24 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { private final CompactionWindowFactory windowFactory; public DateTieredCompactionPolicy(Configuration conf, StoreConfigInformation storeConfigInfo) - throws IOException { + throws IOException { super(conf, storeConfigInfo); try { - compactionPolicyPerWindow = ReflectionUtils.instantiateWithCustomCtor( - comConf.getCompactionPolicyForDateTieredWindow(), - new Class[] { Configuration.class, StoreConfigInformation.class }, - new Object[] { conf, storeConfigInfo }); + compactionPolicyPerWindow = + ReflectionUtils.instantiateWithCustomCtor(comConf.getCompactionPolicyForDateTieredWindow(), + new Class[] { Configuration.class, StoreConfigInformation.class }, + new Object[] { conf, storeConfigInfo }); } catch (Exception e) { throw new IOException("Unable to load configured compaction policy '" - + comConf.getCompactionPolicyForDateTieredWindow() + "'", e); + + comConf.getCompactionPolicyForDateTieredWindow() + "'", e); } try { - windowFactory = ReflectionUtils.instantiateWithCustomCtor( - comConf.getDateTieredCompactionWindowFactory(), - new Class[] { CompactionConfiguration.class }, new Object[] { comConf }); + windowFactory = + ReflectionUtils.instantiateWithCustomCtor(comConf.getDateTieredCompactionWindowFactory(), + new Class[] { CompactionConfiguration.class }, new Object[] { comConf }); } catch (Exception e) { throw new IOException("Unable to load configured window factory '" - + comConf.getDateTieredCompactionWindowFactory() + "'", e); + + comConf.getDateTieredCompactionWindowFactory() + "'", e); } } @@ -99,7 +98,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { @InterfaceAudience.Private @Override public boolean needsCompaction(Collection storeFiles, - List filesCompacting) { + List filesCompacting) { ArrayList candidates = new ArrayList<>(storeFiles); try { return !selectMinorCompaction(candidates, false, true).getFiles().isEmpty(); @@ -111,7 +110,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { @Override public boolean shouldPerformMajorCompaction(Collection filesToCompact) - throws IOException { + throws IOException { long mcTime = getNextMajorCompactTime(filesToCompact); if (filesToCompact == null || mcTime == 0) { if (LOG.isDebugEnabled()) { @@ -125,8 +124,8 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { long now = EnvironmentEdgeManager.currentTime(); if (lowTimestamp <= 0L || lowTimestamp >= (now - mcTime)) { if (LOG.isDebugEnabled()) { - LOG.debug("lowTimestamp: " + lowTimestamp + " lowTimestamp: " + lowTimestamp + " now: " + - now + " mcTime: " + mcTime); + LOG.debug("lowTimestamp: " + lowTimestamp + " lowTimestamp: " + lowTimestamp + " now: " + + now + " mcTime: " + mcTime); } return false; } @@ -136,12 +135,11 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { List boundaries = getCompactBoundariesForMajor(filesToCompact, now); boolean[] filesInWindow = new boolean[boundaries.size()]; - for (HStoreFile file: filesToCompact) { + for (HStoreFile file : filesToCompact) { OptionalLong minTimestamp = file.getMinimumTimestamp(); long oldest = minTimestamp.isPresent() ? now - minTimestamp.getAsLong() : Long.MIN_VALUE; if (cfTTL != Long.MAX_VALUE && oldest >= cfTTL) { - LOG.debug("Major compaction triggered on store " + this - + "; for TTL maintenance"); + LOG.debug("Major compaction triggered on store " + this + "; for TTL maintenance"); return true; } if (!file.isMajorCompactionResult() || file.isBulkLoadResult()) { @@ -152,19 +150,19 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { } int lowerWindowIndex = - Collections.binarySearch(boundaries, minTimestamp.orElse(Long.MAX_VALUE)); + Collections.binarySearch(boundaries, minTimestamp.orElse(Long.MAX_VALUE)); int upperWindowIndex = - Collections.binarySearch(boundaries, file.getMaximumTimestamp().orElse(Long.MAX_VALUE)); + Collections.binarySearch(boundaries, file.getMaximumTimestamp().orElse(Long.MAX_VALUE)); // Handle boundary conditions and negative values of binarySearch lowerWindowIndex = (lowerWindowIndex < 0) ? Math.abs(lowerWindowIndex + 2) : lowerWindowIndex; upperWindowIndex = (upperWindowIndex < 0) ? Math.abs(upperWindowIndex + 2) : upperWindowIndex; if (lowerWindowIndex != upperWindowIndex) { - LOG.debug("Major compaction triggered on store " + this + "; because file " - + file.getPath() + " has data with timestamps cross window boundaries"); + LOG.debug("Major compaction triggered on store " + this + "; because file " + file.getPath() + + " has data with timestamps cross window boundaries"); return true; } else if (filesInWindow[upperWindowIndex]) { - LOG.debug("Major compaction triggered on store " + this + - "; because there are more than one file in some windows"); + LOG.debug("Major compaction triggered on store " + this + + "; because there are more than one file in some windows"); return true; } else { filesInWindow[upperWindowIndex] = true; @@ -173,23 +171,24 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { } float blockLocalityIndex = hdfsBlocksDistribution - .getBlockLocalityIndex(DNS.getHostname(comConf.conf, DNS.ServerType.REGIONSERVER)); + .getBlockLocalityIndex(DNS.getHostname(comConf.conf, DNS.ServerType.REGIONSERVER)); if (blockLocalityIndex < comConf.getMinLocalityToForceCompact()) { LOG.debug("Major compaction triggered on store " + this - + "; to make hdfs blocks local, current blockLocalityIndex is " - + blockLocalityIndex + " (min " + comConf.getMinLocalityToForceCompact() + ")"); + + "; to make hdfs blocks local, current blockLocalityIndex is " + blockLocalityIndex + + " (min " + comConf.getMinLocalityToForceCompact() + ")"); return true; } - LOG.debug("Skipping major compaction of " + this + - ", because the files are already major compacted"); + LOG.debug( + "Skipping major compaction of " + this + ", because the files are already major compacted"); return false; } @Override protected CompactionRequestImpl createCompactionRequest(ArrayList candidateSelection, boolean tryingMajor, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { - CompactionRequestImpl result = tryingMajor ? selectMajorCompaction(candidateSelection) + CompactionRequestImpl result = tryingMajor + ? selectMajorCompaction(candidateSelection) : selectMinorCompaction(candidateSelection, mayUseOffPeak, mayBeStuck); if (LOG.isDebugEnabled()) { LOG.debug("Generated compaction request: " + result); @@ -201,8 +200,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { long now = EnvironmentEdgeManager.currentTime(); List boundaries = getCompactBoundariesForMajor(candidateSelection, now); Map boundariesPolicies = getBoundariesStoragePolicyForMajor(boundaries, now); - return new DateTieredCompactionRequest(candidateSelection, - boundaries, boundariesPolicies); + return new DateTieredCompactionRequest(candidateSelection, boundaries, boundariesPolicies); } /** @@ -214,18 +212,18 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { * data into the same compaction windows, guaranteeing contiguous compaction based on sequence id. */ public CompactionRequestImpl selectMinorCompaction(ArrayList candidateSelection, - boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { + boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { long now = EnvironmentEdgeManager.currentTime(); long oldestToCompact = getOldestToCompact(comConf.getDateTieredMaxStoreFileAgeMillis(), now); List> storefileMaxTimestampPairs = - Lists.newArrayListWithCapacity(candidateSelection.size()); + Lists.newArrayListWithCapacity(candidateSelection.size()); long maxTimestampSeen = Long.MIN_VALUE; for (HStoreFile storeFile : candidateSelection) { // if there is out-of-order data, // we put them in the same window as the last file in increasing order maxTimestampSeen = - Math.max(maxTimestampSeen, storeFile.getMaximumTimestamp().orElse(Long.MIN_VALUE)); + Math.max(maxTimestampSeen, storeFile.getMaximumTimestamp().orElse(Long.MIN_VALUE)); storefileMaxTimestampPairs.add(new Pair<>(storeFile, maxTimestampSeen)); } Collections.reverse(storefileMaxTimestampPairs); @@ -233,7 +231,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { CompactionWindow window = getIncomingWindow(now); int minThreshold = comConf.getDateTieredIncomingWindowMin(); PeekingIterator> it = - Iterators.peekingIterator(storefileMaxTimestampPairs.iterator()); + Iterators.peekingIterator(storefileMaxTimestampPairs.iterator()); while (it.hasNext()) { if (window.compareToTimestamp(oldestToCompact) < 0) { break; @@ -268,27 +266,28 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { } private DateTieredCompactionRequest generateCompactionRequest(ArrayList storeFiles, - CompactionWindow window, boolean mayUseOffPeak, boolean mayBeStuck, int minThreshold, - long now) throws IOException { + CompactionWindow window, boolean mayUseOffPeak, boolean mayBeStuck, int minThreshold, long now) + throws IOException { // The files has to be in ascending order for ratio-based compaction to work right // and removeExcessFile to exclude youngest files. Collections.reverse(storeFiles); // Compact everything in the window if have more files than comConf.maxBlockingFiles compactionPolicyPerWindow.setMinThreshold(minThreshold); - ArrayList storeFileSelection = mayBeStuck ? storeFiles + ArrayList storeFileSelection = mayBeStuck + ? storeFiles : compactionPolicyPerWindow.applyCompactionPolicy(storeFiles, mayUseOffPeak, false); if (storeFileSelection != null && !storeFileSelection.isEmpty()) { // If there is any file in the window excluded from compaction, // only one file will be output from compaction. - boolean singleOutput = storeFiles.size() != storeFileSelection.size() || - comConf.useDateTieredSingleOutputForMinorCompaction(); + boolean singleOutput = storeFiles.size() != storeFileSelection.size() + || comConf.useDateTieredSingleOutputForMinorCompaction(); List boundaries = getCompactionBoundariesForMinor(window, singleOutput); // we want to generate policy to boundaries for minor compaction Map boundaryPolicyMap = getBoundariesStoragePolicyForMinor(singleOutput, window, now); - DateTieredCompactionRequest result = new DateTieredCompactionRequest(storeFileSelection, - boundaries, boundaryPolicyMap); + DateTieredCompactionRequest result = + new DateTieredCompactionRequest(storeFileSelection, boundaries, boundaryPolicyMap); return result; } return null; @@ -298,15 +297,14 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { * Return a list of boundaries for multiple compaction output in ascending order. */ private List getCompactBoundariesForMajor(Collection filesToCompact, long now) { - long minTimestamp = - filesToCompact.stream().mapToLong(f -> f.getMinimumTimestamp().orElse(Long.MAX_VALUE)).min() - .orElse(Long.MAX_VALUE); + long minTimestamp = filesToCompact.stream() + .mapToLong(f -> f.getMinimumTimestamp().orElse(Long.MAX_VALUE)).min().orElse(Long.MAX_VALUE); List boundaries = new ArrayList<>(); // Add startMillis of all windows between now and min timestamp - for (CompactionWindow window = getIncomingWindow(now); window - .compareToTimestamp(minTimestamp) > 0; window = window.nextEarlierWindow()) { + for (CompactionWindow window = getIncomingWindow(now); window.compareToTimestamp(minTimestamp) + > 0; window = window.nextEarlierWindow()) { boundaries.add(window.startMillis()); } boundaries.add(Long.MIN_VALUE); @@ -318,7 +316,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { * @return a list of boundaries for multiple compaction output from minTimestamp to maxTimestamp. */ private static List getCompactionBoundariesForMinor(CompactionWindow window, - boolean singleOutput) { + boolean singleOutput) { List boundaries = new ArrayList<>(); boundaries.add(Long.MIN_VALUE); if (!singleOutput) { @@ -336,13 +334,13 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { return LongMath.checkedSubtract(now, maxAgeMillis); } catch (ArithmeticException ae) { LOG.warn("Value for " + CompactionConfiguration.DATE_TIERED_MAX_AGE_MILLIS_KEY + ": " - + maxAgeMillis + ". All the files will be eligible for minor compaction."); + + maxAgeMillis + ". All the files will be eligible for minor compaction."); return Long.MIN_VALUE; } } private Map getBoundariesStoragePolicyForMinor(boolean singleOutput, - CompactionWindow window, long now) { + CompactionWindow window, long now) { Map boundariesPolicy = new HashMap<>(); if (!comConf.isDateTieredStoragePolicyEnable()) { return boundariesPolicy; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java index ddf9a0ce2ef..8311b6d3664 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,8 +24,8 @@ import java.util.Map; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.yetus.audience.InterfaceAudience; -@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_DOESNT_OVERRIDE_EQUALS", - justification="It is intended to use the same equal method as superclass") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "EQ_DOESNT_OVERRIDE_EQUALS", + justification = "It is intended to use the same equal method as superclass") @InterfaceAudience.Private public class DateTieredCompactionRequest extends CompactionRequestImpl { private List boundaries; @@ -33,7 +33,7 @@ public class DateTieredCompactionRequest extends CompactionRequestImpl { private Map boundariesPolicies; public DateTieredCompactionRequest(Collection files, List boundaryList, - Map boundaryPolicyMap) { + Map boundaryPolicyMap) { super(files); boundaries = boundaryList; boundariesPolicies = boundaryPolicyMap; @@ -50,6 +50,6 @@ public class DateTieredCompactionRequest extends CompactionRequestImpl { @Override public String toString() { return super.toString() + " boundaries=" + Arrays.toString(boundaries.toArray()) - + " boundariesPolicies="+boundariesPolicies.toString(); + + " boundariesPolicies=" + boundariesPolicies.toString(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java index c8c10e16ff1..b5911b0cec4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,16 +51,16 @@ public class DateTieredCompactor extends AbstractMultiOutputCompactor compact(final CompactionRequestImpl request, final List lowerBoundaries, - final Map lowerBoundariesPolicies, - ThroughputController throughputController, User user) throws IOException { + final Map lowerBoundariesPolicies, ThroughputController throughputController, + User user) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Executing compaction with " + lowerBoundaries.size() - + "windows, lower boundaries: " + lowerBoundaries); + + "windows, lower boundaries: " + lowerBoundaries); } return compact(request, defaultScannerFactory, @@ -70,21 +70,17 @@ public class DateTieredCompactor extends AbstractMultiOutputCompactor writerCreationTracker) throws IOException { - DateTieredMultiFileWriter writer = new DateTieredMultiFileWriter( - lowerBoundaries, - lowerBoundariesPolicies, - needEmptyFile(request)); + DateTieredMultiFileWriter writer = new DateTieredMultiFileWriter(lowerBoundaries, + lowerBoundariesPolicies, needEmptyFile(request)); initMultiWriter(writer, scanner, fd, shouldDropBehind, major, writerCreationTracker); return writer; } - }, - throughputController, - user); + }, throughputController, user); } @Override protected List commitWriter(DateTieredMultiFileWriter writer, FileDetails fd, - CompactionRequestImpl request) throws IOException { + CompactionRequestImpl request) throws IOException { List pathList = writer.commitWriters(fd.maxSeqId, request.isAllFiles(), request.getFiles()); return pathList; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java index 0e91d8870b6..eb803c3e2a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,8 +51,8 @@ public class DefaultCompactor extends Compactor { public StoreFileWriter createWriter(InternalScanner scanner, FileDetails fd, boolean shouldDropBehind, boolean major, Consumer writerCreationTracker) throws IOException { - return DefaultCompactor.this - .createWriter(fd, shouldDropBehind, major, writerCreationTracker); + return DefaultCompactor.this.createWriter(fd, shouldDropBehind, major, + writerCreationTracker); } }; @@ -60,13 +60,13 @@ public class DefaultCompactor extends Compactor { * Do a minor/major compaction on an explicit set of storefiles from a Store. */ public List compact(final CompactionRequestImpl request, - ThroughputController throughputController, User user) throws IOException { + ThroughputController throughputController, User user) throws IOException { return compact(request, defaultScannerFactory, writerFactory, throughputController, user); } @Override protected List commitWriter(StoreFileWriter writer, FileDetails fd, - CompactionRequestImpl request) throws IOException { + CompactionRequestImpl request) throws IOException { List newFiles = Lists.newArrayList(writer.getPath()); writer.appendMetadata(fd.maxSeqId, request.isAllFiles(), request.getFiles()); writer.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java index 76bf1d7ac47..2b54081642f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.compactions; import java.io.IOException; @@ -30,10 +28,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Class to pick which files if any to compact together. - * - * This class will search all possibilities for different and if it gets stuck it will choose - * the smallest set of files to compact. + * Class to pick which files if any to compact together. This class will search all possibilities + * for different and if it gets stuck it will choose the smallest set of files to compact. */ @InterfaceAudience.Private public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { @@ -41,25 +37,25 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { /** * Constructor for ExploringCompactionPolicy. - * @param conf The configuration object + * @param conf The configuration object * @param storeConfigInfo An object to provide info about the store. */ public ExploringCompactionPolicy(final Configuration conf, - final StoreConfigInformation storeConfigInfo) { + final StoreConfigInformation storeConfigInfo) { super(conf, storeConfigInfo); } @Override protected final ArrayList applyCompactionPolicy(ArrayList candidates, - boolean mayUseOffPeak, boolean mightBeStuck) throws IOException { + boolean mayUseOffPeak, boolean mightBeStuck) throws IOException { return new ArrayList<>(applyCompactionPolicy(candidates, mightBeStuck, mayUseOffPeak, comConf.getMinFilesToCompact(), comConf.getMaxFilesToCompact())); } public List applyCompactionPolicy(List candidates, boolean mightBeStuck, - boolean mayUseOffPeak, int minFiles, int maxFiles) { - final double currentRatio = mayUseOffPeak - ? comConf.getCompactionRatioOffPeak() : comConf.getCompactionRatio(); + boolean mayUseOffPeak, int minFiles, int maxFiles) { + final double currentRatio = + mayUseOffPeak ? comConf.getCompactionRatioOffPeak() : comConf.getCompactionRatio(); // Start off choosing nothing. List bestSelection = new ArrayList<>(0); @@ -71,8 +67,7 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { // Consider every starting place. for (int start = 0; start < candidates.size(); start++) { // Consider every different sub list permutation in between start and end with min files. - for (int currentEnd = start + minFiles - 1; - currentEnd < candidates.size(); currentEnd++) { + for (int currentEnd = start + minFiles - 1; currentEnd < candidates.size(); currentEnd++) { List potentialMatchFiles = candidates.subList(start, currentEnd + 1); // Sanity checks @@ -87,7 +82,7 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { // have to be read if this set of files is compacted. long size = getTotalStoreSize(potentialMatchFiles); - // Store the smallest set of files. This stored set of files will be used + // Store the smallest set of files. This stored set of files will be used // if it looks like the algorithm is stuck. if (mightBeStuck && size < smallestSize) { smallest = potentialMatchFiles; @@ -99,8 +94,9 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { } ++opts; - if (size >= comConf.getMinCompactSize() - && !filesInRatio(potentialMatchFiles, currentRatio)) { + if ( + size >= comConf.getMinCompactSize() && !filesInRatio(potentialMatchFiles, currentRatio) + ) { continue; } @@ -113,25 +109,26 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { } } if (bestSelection.isEmpty() && mightBeStuck) { - LOG.debug("Exploring compaction algorithm has selected " + smallest.size() - + " files of size "+ smallestSize + " because the store might be stuck"); + LOG.debug("Exploring compaction algorithm has selected " + smallest.size() + " files of size " + + smallestSize + " because the store might be stuck"); return new ArrayList<>(smallest); } - LOG.debug("Exploring compaction algorithm has selected {} files of size {} starting at " + - "candidate #{} after considering {} permutations with {} in ratio", bestSelection.size(), - bestSize, bestStart, opts, optsInRatio); + LOG.debug( + "Exploring compaction algorithm has selected {} files of size {} starting at " + + "candidate #{} after considering {} permutations with {} in ratio", + bestSelection.size(), bestSize, bestStart, opts, optsInRatio); return new ArrayList<>(bestSelection); } /** - * Select at least one file in the candidates list to compact, through choosing files - * from the head to the index that the accumulation length larger the max compaction size. - * This method is a supplementary of the selectSimpleCompaction() method, aims to make sure - * at least one file can be selected to compact, for compactions like L0 files, which need to - * compact all files and as soon as possible. + * Select at least one file in the candidates list to compact, through choosing files from the + * head to the index that the accumulation length larger the max compaction size. This method is a + * supplementary of the selectSimpleCompaction() method, aims to make sure at least one file can + * be selected to compact, for compactions like L0 files, which need to compact all files and as + * soon as possible. */ public List selectCompactFiles(final List candidates, int maxFiles, - boolean isOffpeak) { + boolean isOffpeak) { long selectedSize = 0L; for (int end = 0; end < Math.min(candidates.size(), maxFiles); end++) { selectedSize += candidates.get(end).getReader().length(); @@ -143,17 +140,17 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { } private boolean isBetterSelection(List bestSelection, long bestSize, - List selection, long size, boolean mightBeStuck) { + List selection, long size, boolean mightBeStuck) { if (mightBeStuck && bestSize > 0 && size > 0) { // Keep the selection that removes most files for least size. That penaltizes adding // large files to compaction, but not small files, so we don't become totally inefficient // (might want to tweak that in future). Also, given the current order of looking at // permutations, prefer earlier files and smaller selection if the difference is small. final double REPLACE_IF_BETTER_BY = 1.05; - double thresholdQuality = ((double)bestSelection.size() / bestSize) * REPLACE_IF_BETTER_BY; - return thresholdQuality < ((double)selection.size() / size); + double thresholdQuality = ((double) bestSelection.size() / bestSize) * REPLACE_IF_BETTER_BY; + return thresholdQuality < ((double) selection.size() / size); } - // Keep if this gets rid of more files. Or the same number of files for less io. + // Keep if this gets rid of more files. Or the same number of files for less io. return selection.size() > bestSelection.size() || (selection.size() == bestSelection.size() && size < bestSize); } @@ -168,10 +165,9 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { } /** - * Check that all files satisfy the constraint - * FileSize(i) <= ( Sum(0,N,FileSize(_)) - FileSize(i) ) * Ratio. - * - * @param files List of store files to consider as a compaction candidate. + * Check that all files satisfy the constraint FileSize(i) <= ( Sum(0,N,FileSize(_)) - FileSize(i) + * ) * Ratio. + * @param files List of store files to consider as a compaction candidate. * @param currentRatio The ratio to use. * @return a boolean if these files satisfy the ratio constraints. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExponentialCompactionWindowFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExponentialCompactionWindowFactory.java index 2ec010807ce..b5e86c589cb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExponentialCompactionWindowFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExponentialCompactionWindowFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ import org.apache.hbase.thirdparty.com.google.common.math.LongMath; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class ExponentialCompactionWindowFactory extends CompactionWindowFactory { private static final Logger LOG = - LoggerFactory.getLogger(ExponentialCompactionWindowFactory.class); + LoggerFactory.getLogger(ExponentialCompactionWindowFactory.class); public static final String BASE_WINDOW_MILLIS_KEY = "hbase.hstore.compaction.date.tiered.base.window.millis"; @@ -80,12 +80,14 @@ public class ExponentialCompactionWindowFactory extends CompactionWindowFactory public Window nextEarlierWindow() { // Don't promote to the next tier if there is not even 1 window at current tier // or if the next window crosses the max age. - if (divPosition % windowsPerTier > 0 - || startMillis() - windowMillis * windowsPerTier < maxTierAgeCutoff) { + if ( + divPosition % windowsPerTier > 0 + || startMillis() - windowMillis * windowsPerTier < maxTierAgeCutoff + ) { return new Window(windowMillis, divPosition - 1, maxTierAgeCutoff); } else { return new Window(windowMillis * windowsPerTier, divPosition / windowsPerTier - 1, - maxTierAgeCutoff); + maxTierAgeCutoff); } } @@ -126,8 +128,8 @@ public class ExponentialCompactionWindowFactory extends CompactionWindowFactory Configuration conf = comConf.conf; baseWindowMillis = conf.getLong(BASE_WINDOW_MILLIS_KEY, 3600000 * 6); windowsPerTier = conf.getInt(WINDOWS_PER_TIER_KEY, 4); - maxTierAgeMillis = conf.getLong(MAX_TIER_AGE_MILLIS_KEY, - comConf.getDateTieredMaxStoreFileAgeMillis()); + maxTierAgeMillis = + conf.getLong(MAX_TIER_AGE_MILLIS_KEY, comConf.getDateTieredMaxStoreFileAgeMillis()); LOG.info(toString()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java index 344b90d5f85..716f95a8dac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +21,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; @@ -33,22 +31,18 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * - * FIFO compaction policy selects only files which have all cells expired. - * The column family MUST have non-default TTL. One of the use cases for this - * policy is when we need to store raw data which will be post-processed later - * and discarded completely after quite short period of time. Raw time-series vs. - * time-based roll up aggregates and compacted time-series. We collect raw time-series - * and store them into CF with FIFO compaction policy, periodically we run task - * which creates roll up aggregates and compacts time-series, the original raw data - * can be discarded after that. - * + * FIFO compaction policy selects only files which have all cells expired. The column family MUST + * have non-default TTL. One of the use cases for this policy is when we need to store raw data + * which will be post-processed later and discarded completely after quite short period of time. Raw + * time-series vs. time-based roll up aggregates and compacted time-series. We collect raw + * time-series and store them into CF with FIFO compaction policy, periodically we run task which + * creates roll up aggregates and compacts time-series, the original raw data can be discarded after + * that. */ @InterfaceAudience.Private public class FIFOCompactionPolicy extends ExploringCompactionPolicy { - - private static final Logger LOG = LoggerFactory.getLogger(FIFOCompactionPolicy.class); + private static final Logger LOG = LoggerFactory.getLogger(FIFOCompactionPolicy.class); public FIFOCompactionPolicy(Configuration conf, StoreConfigInformation storeConfigInfo) { super(conf, storeConfigInfo); @@ -56,15 +50,15 @@ public class FIFOCompactionPolicy extends ExploringCompactionPolicy { @Override public CompactionRequestImpl selectCompaction(Collection candidateFiles, - List filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, - boolean forceMajor) throws IOException { - if(forceMajor){ + List filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, + boolean forceMajor) throws IOException { + if (forceMajor) { LOG.warn("Major compaction is not supported for FIFO compaction policy. Ignore the flag."); } boolean isAfterSplit = StoreUtils.hasReferences(candidateFiles); - if(isAfterSplit){ + if (isAfterSplit) { LOG.info("Split detected, delegate selection to the parent policy."); - return super.selectCompaction(candidateFiles, filesCompacting, isUserCompaction, + return super.selectCompaction(candidateFiles, filesCompacting, isUserCompaction, mayUseOffPeak, forceMajor); } @@ -78,7 +72,7 @@ public class FIFOCompactionPolicy extends ExploringCompactionPolicy { public boolean shouldPerformMajorCompaction(Collection filesToCompact) throws IOException { boolean isAfterSplit = StoreUtils.hasReferences(filesToCompact); - if(isAfterSplit){ + if (isAfterSplit) { LOG.info("Split detected, delegate to the parent policy."); return super.shouldPerformMajorCompaction(filesToCompact); } @@ -87,9 +81,9 @@ public class FIFOCompactionPolicy extends ExploringCompactionPolicy { @Override public boolean needsCompaction(Collection storeFiles, - List filesCompacting) { + List filesCompacting) { boolean isAfterSplit = StoreUtils.hasReferences(storeFiles); - if(isAfterSplit){ + if (isAfterSplit) { LOG.info("Split detected, delegate to the parent policy."); return super.needsCompaction(storeFiles, filesCompacting); } @@ -126,7 +120,7 @@ public class FIFOCompactionPolicy extends ExploringCompactionPolicy { } private Collection getExpiredStores(Collection files, - Collection filesCompacting) { + Collection filesCompacting) { long currentTime = EnvironmentEdgeManager.currentTime(); Collection expiredStores = new ArrayList<>(); for (HStoreFile sf : files) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/OffPeakHours.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/OffPeakHours.java index b920de2b57d..979ec368cc4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/OffPeakHours.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/OffPeakHours.java @@ -17,18 +17,25 @@ */ package org.apache.hadoop.hbase.regionserver.compactions; +import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; @InterfaceAudience.Private public abstract class OffPeakHours { private static final Logger LOG = LoggerFactory.getLogger(OffPeakHours.class); public static final OffPeakHours DISABLED = new OffPeakHours() { - @Override public boolean isOffPeakHour() { return false; } - @Override public boolean isOffPeakHour(int targetHour) { return false; } + @Override + public boolean isOffPeakHour() { + return false; + } + + @Override + public boolean isOffPeakHour(int targetHour) { + return false; + } }; public static OffPeakHours getInstance(Configuration conf) { @@ -39,18 +46,17 @@ public abstract class OffPeakHours { /** * @param startHour inclusive - * @param endHour exclusive + * @param endHour exclusive */ public static OffPeakHours getInstance(int startHour, int endHour) { if (startHour == -1 && endHour == -1) { return DISABLED; } - if (! isValidHour(startHour) || ! isValidHour(endHour)) { + if (!isValidHour(startHour) || !isValidHour(endHour)) { if (LOG.isWarnEnabled()) { - LOG.warn("Ignoring invalid start/end hour for peak hour : start = " + - startHour + " end = " + endHour + - ". Valid numbers are [0-23]"); + LOG.warn("Ignoring invalid start/end hour for peak hour : start = " + startHour + " end = " + + endHour + ". Valid numbers are [0-23]"); } return DISABLED; } @@ -82,7 +88,7 @@ public abstract class OffPeakHours { /** * @param startHour inclusive - * @param endHour exclusive + * @param endHour exclusive */ OffPeakHoursImpl(int startHour, int endHour) { this.startHour = startHour; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java index 425df1bb10b..932b18d6180 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.OptionalLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.regionserver.HStore; @@ -37,16 +35,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * The default algorithm for selecting files for compaction. - * Combines the compaction configuration and the provisional file selection that - * it's given to produce the list of suitable candidates for compaction. + * The default algorithm for selecting files for compaction. Combines the compaction configuration + * and the provisional file selection that it's given to produce the list of suitable candidates for + * compaction. */ @InterfaceAudience.Private public class RatioBasedCompactionPolicy extends SortedCompactionPolicy { private static final Logger LOG = LoggerFactory.getLogger(RatioBasedCompactionPolicy.class); - public RatioBasedCompactionPolicy(Configuration conf, - StoreConfigInformation storeConfigInfo) { + public RatioBasedCompactionPolicy(Configuration conf, StoreConfigInformation storeConfigInfo) { super(conf, storeConfigInfo); } @@ -68,14 +65,14 @@ public class RatioBasedCompactionPolicy extends SortedCompactionPolicy { if (lowTimestamp > 0L && lowTimestamp < (now - mcTime)) { String regionInfo; if (this.storeConfigInfo != null && this.storeConfigInfo instanceof HStore) { - regionInfo = ((HStore)this.storeConfigInfo).getRegionInfo().getRegionNameAsString(); + regionInfo = ((HStore) this.storeConfigInfo).getRegionInfo().getRegionNameAsString(); } else { regionInfo = this.toString(); } // Major compaction time has elapsed. long cfTTL = HConstants.FOREVER; if (this.storeConfigInfo != null) { - cfTTL = this.storeConfigInfo.getStoreFileTtl(); + cfTTL = this.storeConfigInfo.getStoreFileTtl(); } if (filesToCompact.size() == 1) { // Single file @@ -83,19 +80,18 @@ public class RatioBasedCompactionPolicy extends SortedCompactionPolicy { OptionalLong minTimestamp = sf.getMinimumTimestamp(); long oldest = minTimestamp.isPresent() ? now - minTimestamp.getAsLong() : Long.MIN_VALUE; if (sf.isMajorCompactionResult() && (cfTTL == Long.MAX_VALUE || oldest < cfTTL)) { - float blockLocalityIndex = - sf.getHDFSBlockDistribution().getBlockLocalityIndex( - DNS.getHostname(comConf.conf, DNS.ServerType.REGIONSERVER)); + float blockLocalityIndex = sf.getHDFSBlockDistribution() + .getBlockLocalityIndex(DNS.getHostname(comConf.conf, DNS.ServerType.REGIONSERVER)); if (blockLocalityIndex < comConf.getMinLocalityToForceCompact()) { LOG.debug("Major compaction triggered on only store " + regionInfo - + "; to make hdfs blocks local, current blockLocalityIndex is " - + blockLocalityIndex + " (min " + comConf.getMinLocalityToForceCompact() + ")"); + + "; to make hdfs blocks local, current blockLocalityIndex is " + blockLocalityIndex + + " (min " + comConf.getMinLocalityToForceCompact() + ")"); result = true; } else { LOG.debug("Skipping major compaction of " + regionInfo - + " because one (major) compacted file only, oldestTime " + oldest - + "ms is < TTL=" + cfTTL + " and blockLocalityIndex is " + blockLocalityIndex - + " (min " + comConf.getMinLocalityToForceCompact() + ")"); + + " because one (major) compacted file only, oldestTime " + oldest + "ms is < TTL=" + + cfTTL + " and blockLocalityIndex is " + blockLocalityIndex + " (min " + + comConf.getMinLocalityToForceCompact() + ")"); } } else if (cfTTL != HConstants.FOREVER && oldest > cfTTL) { LOG.debug("Major compaction triggered on store " + regionInfo @@ -113,48 +109,33 @@ public class RatioBasedCompactionPolicy extends SortedCompactionPolicy { } @Override - protected CompactionRequestImpl createCompactionRequest(ArrayList - candidateSelection, boolean tryingMajor, boolean mayUseOffPeak, boolean mayBeStuck) - throws IOException { + protected CompactionRequestImpl createCompactionRequest(ArrayList candidateSelection, + boolean tryingMajor, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { if (!tryingMajor) { filterBulk(candidateSelection); candidateSelection = applyCompactionPolicy(candidateSelection, mayUseOffPeak, mayBeStuck); - candidateSelection = checkMinFilesCriteria(candidateSelection, - comConf.getMinFilesToCompact()); + candidateSelection = + checkMinFilesCriteria(candidateSelection, comConf.getMinFilesToCompact()); } return new CompactionRequestImpl(candidateSelection); } /** - * -- Default minor compaction selection algorithm: - * choose CompactSelection from candidates -- - * First exclude bulk-load files if indicated in configuration. - * Start at the oldest file and stop when you find the first file that - * meets compaction criteria: - * (1) a recently-flushed, small file (i.e. <= minCompactSize) - * OR - * (2) within the compactRatio of sum(newer_files) - * Given normal skew, any newer files will also meet this criteria - *

      - * Additional Note: - * If fileSizes.size() >> maxFilesToCompact, we will recurse on - * compact(). Consider the oldest files first to avoid a - * situation where we always compact [end-threshold,end). Then, the - * last file becomes an aggregate of the previous compactions. - * - * normal skew: - * - * older ----> newer (increasing seqID) - * _ - * | | _ - * | | | | _ - * --|-|- |-|- |-|---_-------_------- minCompactSize - * | | | | | | | | _ | | - * | | | | | | | | | | | | - * | | | | | | | | | | | | - * @param candidates pre-filtrate - * @return filtered subset - */ + * -- Default minor compaction selection algorithm: choose CompactSelection from candidates -- + * First exclude bulk-load files if indicated in configuration. Start at the oldest file and stop + * when you find the first file that meets compaction criteria: (1) a recently-flushed, small file + * (i.e. <= minCompactSize) OR (2) within the compactRatio of sum(newer_files) Given normal skew, + * any newer files will also meet this criteria + *

      + * Additional Note: If fileSizes.size() >> maxFilesToCompact, we will recurse on compact(). + * Consider the oldest files first to avoid a situation where we always compact + * [end-threshold,end). Then, the last file becomes an aggregate of the previous compactions. + * normal skew: older ----> newer (increasing seqID) _ | | _ | | | | _ --|-|- |-|- + * |-|---_-------_------- minCompactSize | | | | | | | | _ | | | | | | | | | | | | | | | | | | | | + * | | | | | | + * @param candidates pre-filtrate + * @return filtered subset + */ protected ArrayList applyCompactionPolicy(ArrayList candidates, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { if (candidates.isEmpty()) { @@ -178,15 +159,14 @@ public class RatioBasedCompactionPolicy extends SortedCompactionPolicy { fileSizes[i] = file.getReader().length(); // calculate the sum of fileSizes[i,i+maxFilesToCompact-1) for algo int tooFar = i + comConf.getMaxFilesToCompact() - 1; - sumSize[i] = fileSizes[i] - + ((i + 1 < countOfFiles) ? sumSize[i + 1] : 0) + sumSize[i] = fileSizes[i] + ((i + 1 < countOfFiles) ? sumSize[i + 1] : 0) - ((tooFar < countOfFiles) ? fileSizes[tooFar] : 0); } - - while (countOfFiles - start >= comConf.getMinFilesToCompact() && - fileSizes[start] > Math.max(comConf.getMinCompactSize(), - (long) (sumSize[start + 1] * ratio))) { + while ( + countOfFiles - start >= comConf.getMinFilesToCompact() && fileSizes[start] + > Math.max(comConf.getMinCompactSize(), (long) (sumSize[start + 1] * ratio)) + ) { ++start; } if (start < countOfFiles) { @@ -205,13 +185,13 @@ public class RatioBasedCompactionPolicy extends SortedCompactionPolicy { /** * A heuristic method to decide whether to schedule a compaction request - * @param storeFiles files in the store. + * @param storeFiles files in the store. * @param filesCompacting files being scheduled to compact. * @return true to schedule a request. */ @Override public boolean needsCompaction(Collection storeFiles, - List filesCompacting) { + List filesCompacting) { int numCandidates = storeFiles.size() - filesCompacting.size(); return numCandidates >= comConf.getMinFilesToCompact(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java index db469c420ca..1d039de96fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java @@ -1,12 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.regionserver.compactions; @@ -23,6 +30,7 @@ import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @@ -41,19 +49,19 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { } public List preSelectCompactionForCoprocessor(Collection candidates, - List filesCompacting) { + List filesCompacting) { return getCurrentEligibleFiles(new ArrayList<>(candidates), filesCompacting); } /** * @param candidateFiles candidate files, ordered from oldest to newest by seqId. We rely on - * DefaultStoreFileManager to sort the files by seqId to guarantee contiguous compaction based - * on seqId for data consistency. + * DefaultStoreFileManager to sort the files by seqId to guarantee + * contiguous compaction based on seqId for data consistency. * @return subset copy of candidate list that meets compaction criteria */ public CompactionRequestImpl selectCompaction(Collection candidateFiles, - List filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, - boolean forceMajor) throws IOException { + List filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, + boolean forceMajor) throws IOException { // Preliminary compaction subject to filters ArrayList candidateSelection = new ArrayList<>(candidateFiles); // Stuck and not compacting enough (estimate). It is not guaranteed that we will be @@ -64,9 +72,9 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { >= storeConfigInfo.getBlockingFileCount(); candidateSelection = getCurrentEligibleFiles(candidateSelection, filesCompacting); - LOG.debug("Selecting compaction from " + candidateFiles.size() + " store files, " + - filesCompacting.size() + " compacting, " + candidateSelection.size() + - " eligible, " + storeConfigInfo.getBlockingFileCount() + " blocking"); + LOG.debug("Selecting compaction from " + candidateFiles.size() + " store files, " + + filesCompacting.size() + " compacting, " + candidateSelection.size() + " eligible, " + + storeConfigInfo.getBlockingFileCount() + " blocking"); // If we can't have all files, we cannot do major anyway boolean isAllFiles = candidateFiles.size() == candidateSelection.size(); @@ -78,8 +86,8 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { // Try a major compaction if this is a user-requested major compaction, // or if we do not have too many files to compact and this was requested as a major compaction boolean isTryingMajor = (forceMajor && isAllFiles && isUserCompaction) - || (((forceMajor && isAllFiles) || shouldPerformMajorCompaction(candidateSelection)) - && (candidateSelection.size() < comConf.getMaxFilesToCompact())); + || (((forceMajor && isAllFiles) || shouldPerformMajorCompaction(candidateSelection)) + && (candidateSelection.size() < comConf.getMaxFilesToCompact())); // Or, if there are any references among the candidates. boolean isAfterSplit = StoreUtils.hasReferences(candidateSelection); @@ -99,8 +107,8 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { } protected abstract CompactionRequestImpl createCompactionRequest( - ArrayList candidateSelection, boolean tryingMajor, boolean mayUseOffPeak, - boolean mayBeStuck) throws IOException; + ArrayList candidateSelection, boolean tryingMajor, boolean mayUseOffPeak, + boolean mayBeStuck) throws IOException; /** * @param filesToCompact Files to compact. Can be null. @@ -108,11 +116,10 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { */ @Override public abstract boolean shouldPerformMajorCompaction(Collection filesToCompact) - throws IOException; + throws IOException; /** - * @param filesToCompact - * @return When to run next major compaction + * n * @return When to run next major compaction */ public long getNextMajorCompactTime(Collection filesToCompact) { /** Default to {@link org.apache.hadoop.hbase.HConstants#DEFAULT_MAJOR_COMPACTION_PERIOD}. */ @@ -122,8 +129,8 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { } /** - * Default to {@link org.apache.hadoop.hbase.HConstants#DEFAULT_MAJOR_COMPACTION_JITTER}, - * that is, +/- 3.5 days (7 days * 0.5). + * Default to {@link org.apache.hadoop.hbase.HConstants#DEFAULT_MAJOR_COMPACTION_JITTER}, that + * is, +/- 3.5 days (7 days * 0.5). */ double jitterPct = comConf.getMajorCompactionJitter(); if (jitterPct <= 0) { @@ -154,10 +161,10 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { } public abstract boolean needsCompaction(Collection storeFiles, - List filesCompacting); + List filesCompacting); protected ArrayList getCurrentEligibleFiles(ArrayList candidateFiles, - final List filesCompacting) { + final List filesCompacting) { // candidates = all storefiles not already in compaction queue if (!filesCompacting.isEmpty()) { // exclude all files older than the newest file we're currently @@ -172,19 +179,20 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { /** * @param candidates pre-filtrate - * @return filtered subset exclude all files above maxCompactSize - * Also save all references. We MUST compact them + * @return filtered subset exclude all files above maxCompactSize Also save all references. We + * MUST compact them */ protected ArrayList skipLargeFiles(ArrayList candidates, boolean mayUseOffpeak) { int pos = 0; - while (pos < candidates.size() && !candidates.get(pos).isReference() - && (candidates.get(pos).getReader().length() > comConf.getMaxCompactSize(mayUseOffpeak))) { + while ( + pos < candidates.size() && !candidates.get(pos).isReference() + && (candidates.get(pos).getReader().length() > comConf.getMaxCompactSize(mayUseOffpeak)) + ) { ++pos; } if (pos > 0) { - LOG.debug("Some files are too large. Excluding " + pos - + " files from compaction candidates"); + LOG.debug("Some files are too large. Excluding " + pos + " files from compaction candidates"); candidates.subList(0, pos).clear(); } return candidates; @@ -200,16 +208,16 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { /** * @param candidates pre-filtrate */ - protected void removeExcessFiles(ArrayList candidates, - boolean isUserCompaction, boolean isMajorCompaction) { + protected void removeExcessFiles(ArrayList candidates, boolean isUserCompaction, + boolean isMajorCompaction) { int excess = candidates.size() - comConf.getMaxFilesToCompact(); if (excess > 0) { if (isMajorCompaction && isUserCompaction) { LOG.debug("Warning, compacting more than " + comConf.getMaxFilesToCompact() - + " files because of a user-requested major compaction"); + + " files because of a user-requested major compaction"); } else { - LOG.debug("Too many admissible files. Excluding " + excess - + " files from compaction candidates"); + LOG.debug( + "Too many admissible files. Excluding " + excess + " files from compaction candidates"); candidates.subList(comConf.getMaxFilesToCompact(), candidates.size()).clear(); } } @@ -220,11 +228,11 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { * @return filtered subset forget the compactionSelection if we don't have enough files */ protected ArrayList checkMinFilesCriteria(ArrayList candidates, - int minFiles) { + int minFiles) { if (candidates.size() < minFiles) { if (LOG.isDebugEnabled()) { - LOG.debug("Not compacting files because we only have " + candidates.size() + - " files ready for compaction. Need " + minFiles + " to initiate."); + LOG.debug("Not compacting files because we only have " + candidates.size() + + " files ready for compaction. Need " + minFiles + " to initiate."); } candidates.clear(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java index 19c5b24a4f6..575b7c352ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +18,11 @@ package org.apache.hadoop.hbase.regionserver.compactions; import static org.apache.hadoop.hbase.regionserver.StripeStoreFileManager.OPEN_KEY; + import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellComparator; @@ -41,6 +40,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; /** @@ -54,15 +54,15 @@ public class StripeCompactionPolicy extends CompactionPolicy { private StripeStoreConfig config; - public StripeCompactionPolicy( - Configuration conf, StoreConfigInformation storeConfigInfo, StripeStoreConfig config) { + public StripeCompactionPolicy(Configuration conf, StoreConfigInformation storeConfigInfo, + StripeStoreConfig config) { super(conf, storeConfigInfo); this.config = config; stripePolicy = new ExploringCompactionPolicy(conf, storeConfigInfo); } public List preSelectFilesForCoprocessor(StripeInformationProvider si, - List filesCompacting) { + List filesCompacting) { // We sincerely hope nobody is messing with us with their coprocessors. // If they do, they are very likely to shoot themselves in the foot. // We'll just exclude all the filesCompacting from the list. @@ -71,20 +71,20 @@ public class StripeCompactionPolicy extends CompactionPolicy { return candidateFiles; } - public StripeCompactionRequest createEmptyRequest( - StripeInformationProvider si, CompactionRequestImpl request) { + public StripeCompactionRequest createEmptyRequest(StripeInformationProvider si, + CompactionRequestImpl request) { // Treat as L0-ish compaction with fixed set of files, and hope for the best. if (si.getStripeCount() > 0) { return new BoundaryStripeCompactionRequest(request, si.getStripeBoundaries()); } - Pair targetKvsAndCount = estimateTargetKvs( - request.getFiles(), this.config.getInitialCount()); - return new SplitStripeCompactionRequest( - request, OPEN_KEY, OPEN_KEY, targetKvsAndCount.getSecond(), targetKvsAndCount.getFirst()); + Pair targetKvsAndCount = + estimateTargetKvs(request.getFiles(), this.config.getInitialCount()); + return new SplitStripeCompactionRequest(request, OPEN_KEY, OPEN_KEY, + targetKvsAndCount.getSecond(), targetKvsAndCount.getFirst()); } public StripeStoreFlusher.StripeFlushRequest selectFlush(CellComparator comparator, - StripeInformationProvider si, int kvCount) { + StripeInformationProvider si, int kvCount) { if (this.config.isUsingL0Flush()) { // L0 is used, return dumb request. return new StripeStoreFlusher.StripeFlushRequest(comparator); @@ -93,16 +93,16 @@ public class StripeCompactionPolicy extends CompactionPolicy { // No stripes - start with the requisite count, derive KVs per stripe. int initialCount = this.config.getInitialCount(); return new StripeStoreFlusher.SizeStripeFlushRequest(comparator, initialCount, - kvCount / initialCount); + kvCount / initialCount); } // There are stripes - do according to the boundaries. return new StripeStoreFlusher.BoundaryStripeFlushRequest(comparator, si.getStripeBoundaries()); } public StripeCompactionRequest selectCompaction(StripeInformationProvider si, - List filesCompacting, boolean isOffpeak) throws IOException { + List filesCompacting, boolean isOffpeak) throws IOException { // TODO: first cut - no parallel compactions. To have more fine grained control we - // probably need structure more sophisticated than a list. + // probably need structure more sophisticated than a list. if (!filesCompacting.isEmpty()) { LOG.debug("Not selecting compaction: " + filesCompacting.size() + " files compacting"); return null; @@ -118,8 +118,8 @@ public class StripeCompactionPolicy extends CompactionPolicy { if (StoreUtils.hasReferences(allFiles)) { LOG.debug("There are references in the store; compacting all files"); long targetKvs = estimateTargetKvs(allFiles, config.getInitialCount()).getFirst(); - SplitStripeCompactionRequest request = new SplitStripeCompactionRequest( - allFiles, OPEN_KEY, OPEN_KEY, targetKvs); + SplitStripeCompactionRequest request = + new SplitStripeCompactionRequest(allFiles, OPEN_KEY, OPEN_KEY, targetKvs); request.setMajorRangeFull(); request.getRequest().setAfterSplit(true); return request; @@ -130,7 +130,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { // See if we need to make new stripes. boolean shouldCompactL0 = - this.config.getLevel0MinFiles() <= l0Files.size() || allL0FilesExpired(si); + this.config.getLevel0MinFiles() <= l0Files.size() || allL0FilesExpired(si); if (stripeCount == 0) { if (!shouldCompactL0) { return null; // nothing to do. @@ -142,8 +142,8 @@ public class StripeCompactionPolicy extends CompactionPolicy { if (shouldCompactL0) { if (!canDropDeletesNoL0) { // If we need to compact L0, see if we can add something to it, and drop deletes. - StripeCompactionRequest result = selectSingleStripeCompaction( - si, !shouldSelectL0Files(si), canDropDeletesNoL0, isOffpeak); + StripeCompactionRequest result = + selectSingleStripeCompaction(si, !shouldSelectL0Files(si), canDropDeletesNoL0, isOffpeak); if (result != null) { return result; } @@ -165,10 +165,9 @@ public class StripeCompactionPolicy extends CompactionPolicy { public boolean needsCompactions(StripeInformationProvider si, List filesCompacting) { // Approximation on whether we need compaction. - return filesCompacting.isEmpty() - && (StoreUtils.hasReferences(si.getStorefiles()) - || (si.getLevel0Files().size() >= this.config.getLevel0MinFiles()) - || needsSingleStripeCompaction(si) || hasExpiredStripes(si) || allL0FilesExpired(si)); + return filesCompacting.isEmpty() && (StoreUtils.hasReferences(si.getStorefiles()) + || (si.getLevel0Files().size() >= this.config.getLevel0MinFiles()) + || needsSingleStripeCompaction(si) || hasExpiredStripes(si) || allL0FilesExpired(si)); } @Override @@ -195,7 +194,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { } protected StripeCompactionRequest selectSingleStripeCompaction(StripeInformationProvider si, - boolean includeL0, boolean canDropDeletesWithoutL0, boolean isOffpeak) throws IOException { + boolean includeL0, boolean canDropDeletesWithoutL0, boolean isOffpeak) throws IOException { ArrayList> stripes = si.getStripes(); int bqIndex = -1; @@ -206,14 +205,16 @@ public class StripeCompactionPolicy extends CompactionPolicy { // If we want to compact L0 to drop deletes, we only want whole-stripe compactions. // So, pass includeL0 as 2nd parameter to indicate that. List selection = selectSimpleCompaction(stripes.get(i), - !canDropDeletesWithoutL0 && includeL0, isOffpeak, false); + !canDropDeletesWithoutL0 && includeL0, isOffpeak, false); if (selection.isEmpty()) continue; long size = 0; for (HStoreFile sf : selection) { size += sf.getReader().length(); } - if (bqSelection == null || selection.size() > bqSelection.size() || - (selection.size() == bqSelection.size() && size < bqTotalSize)) { + if ( + bqSelection == null || selection.size() > bqSelection.size() + || (selection.size() == bqSelection.size() && size < bqTotalSize) + ) { bqSelection = selection; bqIndex = i; bqTotalSize = size; @@ -238,13 +239,12 @@ public class StripeCompactionPolicy extends CompactionPolicy { Pair kvsAndCount = estimateTargetKvs(filesToCompact, config.getSplitCount()); targetKvs = kvsAndCount.getFirst(); targetCount = kvsAndCount.getSecond(); - splitString = "; the stripe will be split into at most " - + targetCount + " stripes with " + targetKvs + " target KVs"; + splitString = "; the stripe will be split into at most " + targetCount + " stripes with " + + targetKvs + " target KVs"; } - LOG.debug("Found compaction in a stripe with end key [" - + Bytes.toString(si.getEndRow(bqIndex)) + "], with " - + filesToCompact.size() + " files of total size " + bqTotalSize + splitString); + LOG.debug("Found compaction in a stripe with end key [" + Bytes.toString(si.getEndRow(bqIndex)) + + "], with " + filesToCompact.size() + " files of total size " + bqTotalSize + splitString); // See if we can drop deletes. StripeCompactionRequest req; @@ -257,8 +257,8 @@ public class StripeCompactionPolicy extends CompactionPolicy { sfs.addSublist(l0Files); req = new BoundaryStripeCompactionRequest(sfs, si.getStripeBoundaries()); } else { - req = new SplitStripeCompactionRequest( - filesToCompact, si.getStartRow(bqIndex), si.getEndRow(bqIndex), targetCount, targetKvs); + req = new SplitStripeCompactionRequest(filesToCompact, si.getStartRow(bqIndex), + si.getEndRow(bqIndex), targetCount, targetKvs); } if (hasAllFiles && (canDropDeletesWithoutL0 || includeL0)) { req.setMajorRange(si.getStartRow(bqIndex), si.getEndRow(bqIndex)); @@ -269,17 +269,17 @@ public class StripeCompactionPolicy extends CompactionPolicy { /** * Selects the compaction of a single stripe using default policy. - * @param sfs Files. + * @param sfs Files. * @param allFilesOnly Whether a compaction of all-or-none files is needed. * @return The resulting selection. */ - private List selectSimpleCompaction( - List sfs, boolean allFilesOnly, boolean isOffpeak, boolean forceCompact) { - int minFilesLocal = Math.max( - allFilesOnly ? sfs.size() : 0, this.config.getStripeCompactMinFiles()); + private List selectSimpleCompaction(List sfs, boolean allFilesOnly, + boolean isOffpeak, boolean forceCompact) { + int minFilesLocal = + Math.max(allFilesOnly ? sfs.size() : 0, this.config.getStripeCompactMinFiles()); int maxFilesLocal = Math.max(this.config.getStripeCompactMaxFiles(), minFilesLocal); - List selected = stripePolicy.applyCompactionPolicy(sfs, false, - isOffpeak, minFilesLocal, maxFilesLocal); + List selected = + stripePolicy.applyCompactionPolicy(sfs, false, isOffpeak, minFilesLocal, maxFilesLocal); if (forceCompact && (selected == null || selected.isEmpty()) && !sfs.isEmpty()) { return stripePolicy.selectCompactFiles(sfs, maxFilesLocal, isOffpeak); } @@ -287,8 +287,8 @@ public class StripeCompactionPolicy extends CompactionPolicy { } private boolean shouldSelectL0Files(StripeInformationProvider si) { - return si.getLevel0Files().size() > this.config.getStripeCompactMaxFiles() || - getTotalFileSize(si.getLevel0Files()) > comConf.getMaxCompactSize(); + return si.getLevel0Files().size() > this.config.getStripeCompactMaxFiles() + || getTotalFileSize(si.getLevel0Files()) > comConf.getMaxCompactSize(); } private StripeCompactionRequest selectL0OnlyCompaction(StripeInformationProvider si) { @@ -317,8 +317,8 @@ public class StripeCompactionPolicy extends CompactionPolicy { return request; } - private StripeCompactionRequest selectExpiredMergeCompaction( - StripeInformationProvider si, boolean canDropDeletesNoL0) { + private StripeCompactionRequest selectExpiredMergeCompaction(StripeInformationProvider si, + boolean canDropDeletesNoL0) { long cfTtl = this.storeConfigInfo.getStoreFileTtl(); if (cfTtl == Long.MAX_VALUE) { return null; // minversion might be set, cannot delete old files @@ -362,7 +362,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { ConcatenatedLists sfs = new ConcatenatedLists<>(); sfs.addAllSublists(stripes.subList(bestStart, endIndex + 1)); SplitStripeCompactionRequest result = new SplitStripeCompactionRequest(sfs, - si.getStartRow(bestStart), si.getEndRow(endIndex), 1, Long.MAX_VALUE); + si.getStartRow(bestStart), si.getEndRow(endIndex), 1, Long.MAX_VALUE); if (canDropDeletesNoL0) { result.setMajorRangeFull(); } @@ -395,8 +395,10 @@ public class StripeCompactionPolicy extends CompactionPolicy { long timestampCutoff = EnvironmentEdgeManager.currentTime() - cfTtl; for (HStoreFile storeFile : storeFiles) { // Check store file is not empty and has not expired - if (storeFile.getReader().getMaxTimestamp() >= timestampCutoff - && storeFile.getReader().getEntries() != 0) { + if ( + storeFile.getReader().getMaxTimestamp() >= timestampCutoff + && storeFile.getReader().getEntries() != 0 + ) { return false; } } @@ -435,8 +437,8 @@ public class StripeCompactionPolicy extends CompactionPolicy { ratio = newRatio; splitCount += 1.0; } - long kvCount = (long)(getTotalKvCount(files) / splitCount); - return new Pair<>(kvCount, (int)Math.ceil(splitCount)); + long kvCount = (long) (getTotalKvCount(files) / splitCount); + return new Pair<>(kvCount, (int) Math.ceil(splitCount)); } /** Stripe compaction request wrapper. */ @@ -444,28 +446,29 @@ public class StripeCompactionPolicy extends CompactionPolicy { protected CompactionRequestImpl request; protected byte[] majorRangeFromRow = null, majorRangeToRow = null; - public List execute(StripeCompactor compactor, - ThroughputController throughputController) throws IOException { + public List execute(StripeCompactor compactor, ThroughputController throughputController) + throws IOException { return execute(compactor, throughputController, null); } + /** - * Executes the request against compactor (essentially, just calls correct overload of - * compact method), to simulate more dynamic dispatch. + * Executes the request against compactor (essentially, just calls correct overload of compact + * method), to simulate more dynamic dispatch. * @param compactor Compactor. * @return result of compact(...) */ public abstract List execute(StripeCompactor compactor, - ThroughputController throughputController, User user) throws IOException; + ThroughputController throughputController, User user) throws IOException; public StripeCompactionRequest(CompactionRequestImpl request) { this.request = request; } /** - * Sets compaction "major range". Major range is the key range for which all - * the files are included, so they can be treated like major-compacted files. + * Sets compaction "major range". Major range is the key range for which all the files are + * included, so they can be treated like major-compacted files. * @param startRow Left boundary, inclusive. - * @param endRow Right boundary, exclusive. + * @param endRow Right boundary, exclusive. */ public void setMajorRange(byte[] startRow, byte[] endRow) { this.majorRangeFromRow = startRow; @@ -484,40 +487,40 @@ public class StripeCompactionPolicy extends CompactionPolicy { } /** - * Request for stripe compactor that will cause it to split the source files into several - * separate files at the provided boundaries. + * Request for stripe compactor that will cause it to split the source files into several separate + * files at the provided boundaries. */ private static class BoundaryStripeCompactionRequest extends StripeCompactionRequest { private final List targetBoundaries; /** - * @param request Original request. + * @param request Original request. * @param targetBoundaries New files should be written with these boundaries. */ public BoundaryStripeCompactionRequest(CompactionRequestImpl request, - List targetBoundaries) { + List targetBoundaries) { super(request); this.targetBoundaries = targetBoundaries; } public BoundaryStripeCompactionRequest(Collection files, - List targetBoundaries) { + List targetBoundaries) { this(new CompactionRequestImpl(files), targetBoundaries); } @Override - public List execute(StripeCompactor compactor, - ThroughputController throughputController, User user) throws IOException { + public List execute(StripeCompactor compactor, ThroughputController throughputController, + User user) throws IOException { return compactor.compact(this.request, this.targetBoundaries, this.majorRangeFromRow, this.majorRangeToRow, throughputController, user); } } /** - * Request for stripe compactor that will cause it to split the source files into several - * separate files into based on key-value count, as well as file count limit. - * Most of the files will be roughly the same size. The last file may be smaller or larger - * depending on the interplay of the amount of data and maximum number of files allowed. + * Request for stripe compactor that will cause it to split the source files into several separate + * files into based on key-value count, as well as file count limit. Most of the files will be + * roughly the same size. The last file may be smaller or larger depending on the interplay of the + * amount of data and maximum number of files allowed. */ private static class SplitStripeCompactionRequest extends StripeCompactionRequest { private final byte[] startRow, endRow; @@ -525,15 +528,15 @@ public class StripeCompactionPolicy extends CompactionPolicy { private final long targetKvs; /** - * @param request Original request. - * @param startRow Left boundary of the range to compact, inclusive. - * @param endRow Right boundary of the range to compact, exclusive. + * @param request Original request. + * @param startRow Left boundary of the range to compact, inclusive. + * @param endRow Right boundary of the range to compact, exclusive. * @param targetCount The maximum number of stripe to compact into. - * @param targetKvs The KV count of each segment. If targetKvs*targetCount is less than - * total number of kvs, all the overflow data goes into the last stripe. + * @param targetKvs The KV count of each segment. If targetKvs*targetCount is less than total + * number of kvs, all the overflow data goes into the last stripe. */ - public SplitStripeCompactionRequest(CompactionRequestImpl request, - byte[] startRow, byte[] endRow, int targetCount, long targetKvs) { + public SplitStripeCompactionRequest(CompactionRequestImpl request, byte[] startRow, + byte[] endRow, int targetCount, long targetKvs) { super(request); this.startRow = startRow; this.endRow = endRow; @@ -541,25 +544,27 @@ public class StripeCompactionPolicy extends CompactionPolicy { this.targetKvs = targetKvs; } - public SplitStripeCompactionRequest( - Collection files, byte[] startRow, byte[] endRow, long targetKvs) { + public SplitStripeCompactionRequest(Collection files, byte[] startRow, + byte[] endRow, long targetKvs) { this(files, startRow, endRow, Integer.MAX_VALUE, targetKvs); } - public SplitStripeCompactionRequest(Collection files, - byte[] startRow, byte[] endRow, int targetCount, long targetKvs) { + public SplitStripeCompactionRequest(Collection files, byte[] startRow, + byte[] endRow, int targetCount, long targetKvs) { this(new CompactionRequestImpl(files), startRow, endRow, targetCount, targetKvs); } @Override - public List execute(StripeCompactor compactor, - ThroughputController throughputController, User user) throws IOException { + public List execute(StripeCompactor compactor, ThroughputController throughputController, + User user) throws IOException { return compactor.compact(this.request, this.targetCount, this.targetKvs, this.startRow, this.endRow, this.majorRangeFromRow, this.majorRangeToRow, throughputController, user); } - /** Set major range of the compaction to the entire compaction range. - * See {@link #setMajorRange(byte[], byte[])}. */ + /** + * Set major range of the compaction to the entire compaction range. See + * {@link #setMajorRange(byte[], byte[])}. + */ public void setMajorRangeFull() { setMajorRange(this.startRow, this.endRow); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java index 6413a304d55..96c317b60ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,18 +68,18 @@ public class StripeCompactor extends AbstractMultiOutputCompactor scanners, - ScanType scanType, FileDetails fd, long smallestReadPoint) throws IOException { + ScanType scanType, FileDetails fd, long smallestReadPoint) throws IOException { return (majorRangeFromRow == null) - ? StripeCompactor.this.createScanner(store, scanInfo, scanners, scanType, - smallestReadPoint, fd.earliestPutTs) - : StripeCompactor.this.createScanner(store, scanInfo, scanners, smallestReadPoint, - fd.earliestPutTs, majorRangeFromRow, majorRangeToRow); + ? StripeCompactor.this.createScanner(store, scanInfo, scanners, scanType, smallestReadPoint, + fd.earliestPutTs) + : StripeCompactor.this.createScanner(store, scanInfo, scanners, smallestReadPoint, + fd.earliestPutTs, majorRangeFromRow, majorRangeToRow); } } public List compact(CompactionRequestImpl request, final List targetBoundaries, - final byte[] majorRangeFromRow, final byte[] majorRangeToRow, - ThroughputController throughputController, User user) throws IOException { + final byte[] majorRangeFromRow, final byte[] majorRangeToRow, + ThroughputController throughputController, User user) throws IOException { if (LOG.isDebugEnabled()) { StringBuilder sb = new StringBuilder(); sb.append("Executing compaction with " + targetBoundaries.size() + " boundaries:"); @@ -88,9 +88,7 @@ public class StripeCompactor extends AbstractMultiOutputCompactor() { @Override @@ -98,25 +96,21 @@ public class StripeCompactor extends AbstractMultiOutputCompactor writerCreationTracker) throws IOException { StripeMultiFileWriter writer = new StripeMultiFileWriter.BoundaryMultiWriter( - store.getComparator(), - targetBoundaries, - majorRangeFromRow, - majorRangeToRow); + store.getComparator(), targetBoundaries, majorRangeFromRow, majorRangeToRow); initMultiWriter(writer, scanner, fd, shouldDropBehind, major, writerCreationTracker); return writer; } - }, - throughputController, - user); + }, throughputController, user); } - public List compact(CompactionRequestImpl request, final int targetCount, final long targetSize, - final byte[] left, final byte[] right, byte[] majorRangeFromRow, byte[] majorRangeToRow, - ThroughputController throughputController, User user) throws IOException { + public List compact(CompactionRequestImpl request, final int targetCount, + final long targetSize, final byte[] left, final byte[] right, byte[] majorRangeFromRow, + byte[] majorRangeToRow, ThroughputController throughputController, User user) + throws IOException { if (LOG.isDebugEnabled()) { LOG.debug( "Executing compaction with " + targetSize + " target file size, no more than " + targetCount - + " files, in [" + Bytes.toString(left) + "] [" + Bytes.toString(right) + "] range"); + + " files, in [" + Bytes.toString(left) + "] [" + Bytes.toString(right) + "] range"); } return compact(request, new StripeInternalScannerFactory(majorRangeFromRow, majorRangeToRow), new CellSinkFactory() { @@ -126,22 +120,16 @@ public class StripeCompactor extends AbstractMultiOutputCompactor writerCreationTracker) throws IOException { StripeMultiFileWriter writer = new StripeMultiFileWriter.SizeMultiWriter( - store.getComparator(), - targetCount, - targetSize, - left, - right); + store.getComparator(), targetCount, targetSize, left, right); initMultiWriter(writer, scanner, fd, shouldDropBehind, major, writerCreationTracker); return writer; } - }, - throughputController, - user); + }, throughputController, user); } @Override protected List commitWriter(StripeMultiFileWriter writer, FileDetails fd, - CompactionRequestImpl request) throws IOException { + CompactionRequestImpl request) throws IOException { List newFiles = writer.commitWriters(fd.maxSeqId, request.isMajor(), request.getFiles()); assert !newFiles.isEmpty() : "Should have produced an empty file to preserve metadata."; return newFiles; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java index 5d9819ca56a..7f9f95bee68 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; /** @@ -63,7 +64,7 @@ public class AssignRegionHandler extends EventHandler { private final RetryCounter retryCounter; public AssignRegionHandler(HRegionServer server, RegionInfo regionInfo, long openProcId, - @Nullable TableDescriptor tableDesc, long masterSystemTime, EventType eventType) { + @Nullable TableDescriptor tableDesc, long masterSystemTime, EventType eventType) { super(server, eventType); this.regionInfo = regionInfo; this.openProcId = openProcId; @@ -81,8 +82,10 @@ public class AssignRegionHandler extends EventHandler { error); HRegionServer rs = getServer(); rs.getRegionsInTransitionInRS().remove(regionInfo.getEncodedNameAsBytes(), Boolean.TRUE); - if (!rs.reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.FAILED_OPEN, - HConstants.NO_SEQNUM, openProcId, masterSystemTime, regionInfo))) { + if ( + !rs.reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.FAILED_OPEN, + HConstants.NO_SEQNUM, openProcId, masterSystemTime, regionInfo)) + ) { throw new IOException( "Failed to report failed open to master: " + regionInfo.getRegionNameAsString()); } @@ -108,16 +111,15 @@ public class AssignRegionHandler extends EventHandler { if (previous != null) { if (previous) { // The region is opening and this maybe a retry on the rpc call, it is safe to ignore it. - LOG.info("Receiving OPEN for {} which we are already trying to OPEN" + - " - ignoring this new request for this region.", regionName); + LOG.info("Receiving OPEN for {} which we are already trying to OPEN" + + " - ignoring this new request for this region.", regionName); } else { // The region is closing. This is possible as we will update the region state to CLOSED when // calling reportRegionStateTransition, so the HMaster will think the region is offline, // before we actually close the region, as reportRegionStateTransition is part of the // closing process. long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); - LOG.info( - "Receiving OPEN for {} which we are trying to close, try again after {}ms", + LOG.info("Receiving OPEN for {} which we are trying to close, try again after {}ms", regionName, backoff); rs.getExecutorService().delayedSubmit(this, backoff, TimeUnit.MILLISECONDS); } @@ -138,8 +140,8 @@ public class AssignRegionHandler extends EventHandler { if (ServerRegionReplicaUtil.isMetaRegionReplicaReplicationEnabled(conf, tn)) { if (RegionReplicaUtil.isDefaultReplica(this.regionInfo.getReplicaId())) { // Add the hbase:meta replication source on replica zero/default. - rs.getReplicationSourceService().getReplicationManager(). - addCatalogReplicationSource(this.regionInfo); + rs.getReplicationSourceService().getReplicationManager() + .addCatalogReplicationSource(this.regionInfo); } } region = HRegion.openHRegion(regionInfo, htd, rs.getWAL(regionInfo), conf, rs, null); @@ -176,12 +178,14 @@ public class AssignRegionHandler extends EventHandler { } public static AssignRegionHandler create(HRegionServer server, RegionInfo regionInfo, - long openProcId, TableDescriptor tableDesc, long masterSystemTime) { + long openProcId, TableDescriptor tableDesc, long masterSystemTime) { EventType eventType; if (regionInfo.isMetaRegion()) { eventType = EventType.M_RS_OPEN_META; - } else if (regionInfo.getTable().isSystemTable() || - (tableDesc != null && tableDesc.getPriority() >= HConstants.ADMIN_QOS)) { + } else if ( + regionInfo.getTable().isSystemTable() + || (tableDesc != null && tableDesc.getPriority() >= HConstants.ADMIN_QOS) + ) { eventType = EventType.M_RS_OPEN_PRIORITY_REGION; } else { eventType = EventType.M_RS_OPEN_REGION; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java index 38097bafd6e..a5336430dec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,10 +30,8 @@ import org.apache.yetus.audience.InterfaceAudience; public class CloseMetaHandler extends CloseRegionHandler { // Called when regionserver determines its to go down; not master orchestrated - public CloseMetaHandler(final Server server, - final RegionServerServices rsServices, - final RegionInfo regionInfo, - final boolean abort) { + public CloseMetaHandler(final Server server, final RegionServerServices rsServices, + final RegionInfo regionInfo, final boolean abort) { super(server, rsServices, regionInfo, abort, EventType.M_RS_CLOSE_META, null); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java index f9f0e91cc46..2301b9b8b49 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,25 +31,28 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices.RegionStateTran import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; /** * Handles closing of a region on a region server. *

      * In normal operation, we use {@link UnassignRegionHandler} closing Regions but when shutting down - * the region server and closing out Regions, we use this handler instead; it does not expect to - * be able to communicate the close back to the Master. - *

      Expects that the close *has* been registered in the hosting RegionServer before - * submitting this Handler; i.e. rss.getRegionsInTransitionInRS().putIfAbsent( - * this.regionInfo.getEncodedNameAsBytes(), Boolean.FALSE); has been called first. - * In here when done, we do the deregister.

      + * the region server and closing out Regions, we use this handler instead; it does not expect to be + * able to communicate the close back to the Master. + *

      + * Expects that the close *has* been registered in the hosting RegionServer before submitting this + * Handler; i.e. rss.getRegionsInTransitionInRS().putIfAbsent( + * this.regionInfo.getEncodedNameAsBytes(), Boolean.FALSE); has been called first. In here + * when done, we do the deregister. + *

      * @see UnassignRegionHandler */ @InterfaceAudience.Private public class CloseRegionHandler extends EventHandler { - // NOTE on priorities shutting down. There are none for close. There are some - // for open. I think that is right. On shutdown, we want the meta to close - // after the user regions have closed. What + // NOTE on priorities shutting down. There are none for close. There are some + // for open. I think that is right. On shutdown, we want the meta to close + // after the user regions have closed. What // about the case where master tells us to shutdown a catalog region and we // have a running queue of user regions to close? private static final Logger LOG = LoggerFactory.getLogger(CloseRegionHandler.class); @@ -58,7 +60,7 @@ public class CloseRegionHandler extends EventHandler { private final RegionServerServices rsServices; private final RegionInfo regionInfo; - // If true, the hosting server is aborting. Region close process is different + // If true, the hosting server is aborting. Region close process is different // when we are aborting. private final boolean abort; private ServerName destination; @@ -67,17 +69,13 @@ public class CloseRegionHandler extends EventHandler { * This method used internally by the RegionServer to close out regions. * @param abort If the regionserver is aborting. */ - public CloseRegionHandler(final Server server, - final RegionServerServices rsServices, - final RegionInfo regionInfo, final boolean abort, - ServerName destination) { - this(server, rsServices, regionInfo, abort, - EventType.M_RS_CLOSE_REGION, destination); + public CloseRegionHandler(final Server server, final RegionServerServices rsServices, + final RegionInfo regionInfo, final boolean abort, ServerName destination) { + this(server, rsServices, regionInfo, abort, EventType.M_RS_CLOSE_REGION, destination); } - protected CloseRegionHandler(final Server server, - final RegionServerServices rsServices, RegionInfo regionInfo, - boolean abort, EventType eventType, ServerName destination) { + protected CloseRegionHandler(final Server server, final RegionServerServices rsServices, + RegionInfo regionInfo, boolean abort, EventType eventType, ServerName destination) { super(server, eventType); this.server = server; this.rsServices = rsServices; @@ -95,7 +93,7 @@ public class CloseRegionHandler extends EventHandler { String name = regionInfo.getEncodedName(); LOG.trace("Processing close of {}", name); // Check that this region is being served here - HRegion region = (HRegion)rsServices.getRegion(name); + HRegion region = (HRegion) rsServices.getRegion(name); try { if (region == null) { LOG.warn("Received CLOSE for region {} but currently not serving - ignoring", name); @@ -115,7 +113,7 @@ public class CloseRegionHandler extends EventHandler { rsServices.reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.CLOSED, HConstants.NO_SEQNUM, Procedure.NO_PROC_ID, -1, regionInfo)); - // Done! Region is closed on this RS + // Done! Region is closed on this RS LOG.debug("Closed {}", region.getRegionInfo().getRegionNameAsString()); } finally { // Clear any reference in getServer().getRegionsInTransitionInRS() on success or failure, @@ -125,8 +123,9 @@ public class CloseRegionHandler extends EventHandler { } } - @Override protected void handleException(Throwable t) { - server.abort("Unrecoverable exception while closing " + - this.regionInfo.getRegionNameAsString(), t); + @Override + protected void handleException(Throwable t) { + server.abort("Unrecoverable exception while closing " + this.regionInfo.getRegionNameAsString(), + t); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HandlerUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HandlerUtil.java index 02ed0ef71c5..43051893376 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HandlerUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HandlerUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java index ca5f9e179a9..1107b60bf15 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,9 +31,8 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Private public class OpenMetaHandler extends OpenRegionHandler { - public OpenMetaHandler(final Server server, - final RegionServerServices rsServices, RegionInfo regionInfo, - final TableDescriptor htd, long masterSystemTime) { + public OpenMetaHandler(final Server server, final RegionServerServices rsServices, + RegionInfo regionInfo, final TableDescriptor htd, long masterSystemTime) { super(server, rsServices, regionInfo, htd, masterSystemTime, EventType.M_RS_OPEN_META); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java index 1861a2bba33..36f91068341 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.handler; import org.apache.hadoop.hbase.Server; @@ -33,8 +32,8 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class OpenPriorityRegionHandler extends OpenRegionHandler { public OpenPriorityRegionHandler(Server server, RegionServerServices rsServices, - RegionInfo regionInfo, TableDescriptor htd, long masterSystemTime) { + RegionInfo regionInfo, TableDescriptor htd, long masterSystemTime) { super(server, rsServices, regionInfo, htd, masterSystemTime, - EventType.M_RS_OPEN_PRIORITY_REGION); + EventType.M_RS_OPEN_PRIORITY_REGION); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java index 3ae38864ba1..bde771f6ab8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ package org.apache.hadoop.hbase.regionserver.handler; import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.client.RegionInfo; @@ -36,7 +34,9 @@ import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; + /** * Handles opening of a region on a region server. *

      @@ -55,15 +55,14 @@ public class OpenRegionHandler extends EventHandler { private final TableDescriptor htd; private final long masterSystemTime; - public OpenRegionHandler(final Server server, - final RegionServerServices rsServices, RegionInfo regionInfo, - TableDescriptor htd, long masterSystemTime) { + public OpenRegionHandler(final Server server, final RegionServerServices rsServices, + RegionInfo regionInfo, TableDescriptor htd, long masterSystemTime) { this(server, rsServices, regionInfo, htd, masterSystemTime, EventType.M_RS_OPEN_REGION); } - protected OpenRegionHandler(final Server server, - final RegionServerServices rsServices, final RegionInfo regionInfo, - final TableDescriptor htd, long masterSystemTime, EventType eventType) { + protected OpenRegionHandler(final Server server, final RegionServerServices rsServices, + final RegionInfo regionInfo, final TableDescriptor htd, long masterSystemTime, + EventType eventType) { super(server, eventType); this.rsServices = rsServices; this.regionInfo = regionInfo; @@ -93,28 +92,30 @@ public class OpenRegionHandler extends EventHandler { // Check that this region is not already online if (this.rsServices.getRegion(encodedName) != null) { - LOG.error("Region " + encodedName + - " was already online when we started processing the opening. " + - "Marking this new attempt as failed"); + LOG.error( + "Region " + encodedName + " was already online when we started processing the opening. " + + "Marking this new attempt as failed"); return; } // Check that we're still supposed to open the region. - // If fails, just return. Someone stole the region from under us. - if (!isRegionStillOpening()){ + // If fails, just return. Someone stole the region from under us. + if (!isRegionStillOpening()) { LOG.error("Region " + encodedName + " opening cancelled"); return; } - // Open region. After a successful open, failures in subsequent + // Open region. After a successful open, failures in subsequent // processing needs to do a close as part of cleanup. region = openRegion(); if (region == null) { return; } - if (!updateMeta(region, masterSystemTime) || this.server.isStopped() || - this.rsServices.isStopping()) { + if ( + !updateMeta(region, masterSystemTime) || this.server.isStopped() + || this.rsServices.isStopping() + ) { return; } @@ -126,30 +127,30 @@ public class OpenRegionHandler extends EventHandler { this.rsServices.addRegion(region); openSuccessful = true; - // Done! Successful region open + // Done! Successful region open LOG.debug("Opened " + regionName + " on " + this.server.getServerName()); } finally { // Do all clean up here if (!openSuccessful) { doCleanUpOnFailedOpen(region); } - final Boolean current = this.rsServices.getRegionsInTransitionInRS(). - remove(this.regionInfo.getEncodedNameAsBytes()); + final Boolean current = this.rsServices.getRegionsInTransitionInRS() + .remove(this.regionInfo.getEncodedNameAsBytes()); // Let's check if we have met a race condition on open cancellation.... // A better solution would be to not have any race condition. // this.rsServices.getRegionsInTransitionInRS().remove( - // this.regionInfo.getEncodedNameAsBytes(), Boolean.TRUE); + // this.regionInfo.getEncodedNameAsBytes(), Boolean.TRUE); // would help. if (openSuccessful) { if (current == null) { // Should NEVER happen, but let's be paranoid. LOG.error("Bad state: we've just opened a region that was NOT in transition. Region=" - + regionName); + + regionName); } else if (Boolean.FALSE.equals(current)) { // Can happen, if we're // really unlucky. LOG.error("Race condition: we've finished to open a region, while a close was requested " - + " on region=" + regionName + ". It can be a critical error, as a region that" - + " should be closed is now opened. Closing it now"); + + " on region=" + regionName + ". It can be a critical error, as a region that" + + " should be closed is now opened. Closing it now"); cleanupFailedOpen(region); } } @@ -168,27 +169,28 @@ public class OpenRegionHandler extends EventHandler { } /** - * Update ZK or META. This can take a while if for example the - * hbase:meta is not available -- if server hosting hbase:meta crashed and we are - * waiting on it to come back -- so run in a thread and keep updating znode - * state meantime so master doesn't timeout our region-in-transition. + * Update ZK or META. This can take a while if for example the hbase:meta is not available -- if + * server hosting hbase:meta crashed and we are waiting on it to come back -- so run in a thread + * and keep updating znode state meantime so master doesn't timeout our region-in-transition. * Caller must cleanup region if this fails. */ private boolean updateMeta(final HRegion r, long masterSystemTime) { if (this.server.isStopped() || this.rsServices.isStopping()) { return false; } - // Object we do wait/notify on. Make it boolean. If set, we're done. + // Object we do wait/notify on. Make it boolean. If set, we're done. // Else, wait. final AtomicBoolean signaller = new AtomicBoolean(false); - PostOpenDeployTasksThread t = new PostOpenDeployTasksThread(r, - this.server, this.rsServices, signaller, masterSystemTime); + PostOpenDeployTasksThread t = + new PostOpenDeployTasksThread(r, this.server, this.rsServices, signaller, masterSystemTime); t.start(); // Post open deploy task: - // meta => update meta location in ZK - // other region => update meta - while (!signaller.get() && t.isAlive() && !this.server.isStopped() && - !this.rsServices.isStopping() && isRegionStillOpening()) { + // meta => update meta location in ZK + // other region => update meta + while ( + !signaller.get() && t.isAlive() && !this.server.isStopped() && !this.rsServices.isStopping() + && isRegionStillOpening() + ) { synchronized (signaller) { try { // Wait for 10 seconds, so that server shutdown @@ -199,8 +201,8 @@ public class OpenRegionHandler extends EventHandler { } } } - // Is thread still alive? We may have left above loop because server is - // stopping or we timed out the edit. Is so, interrupt it. + // Is thread still alive? We may have left above loop because server is + // stopping or we timed out the edit. Is so, interrupt it. if (t.isAlive()) { if (!signaller.get()) { // Thread still running; interrupt @@ -210,20 +212,19 @@ public class OpenRegionHandler extends EventHandler { try { t.join(); } catch (InterruptedException ie) { - LOG.warn("Interrupted joining " + - r.getRegionInfo().getRegionNameAsString(), ie); + LOG.warn("Interrupted joining " + r.getRegionInfo().getRegionNameAsString(), ie); Thread.currentThread().interrupt(); } } - // Was there an exception opening the region? This should trigger on - // InterruptedException too. If so, we failed. + // Was there an exception opening the region? This should trigger on + // InterruptedException too. If so, we failed. return (!Thread.interrupted() && t.getException() == null); } /** - * Thread to run region post open tasks. Call {@link #getException()} after the thread finishes - * to check for exceptions running + * Thread to run region post open tasks. Call {@link #getException()} after the thread finishes to + * check for exceptions running * {@link RegionServerServices#postOpenDeployTasks(PostOpenDeployContext)} */ static class PostOpenDeployTasksThread extends Thread { @@ -235,7 +236,7 @@ public class OpenRegionHandler extends EventHandler { private final long masterSystemTime; PostOpenDeployTasksThread(final HRegion region, final Server server, - final RegionServerServices services, final AtomicBoolean signaller, long masterSystemTime) { + final RegionServerServices services, final AtomicBoolean signaller, long masterSystemTime) { super("PostOpenDeployTasks:" + region.getRegionInfo().getEncodedName()); this.setDaemon(true); this.server = server; @@ -251,8 +252,8 @@ public class OpenRegionHandler extends EventHandler { this.services.postOpenDeployTasks( new PostOpenDeployContext(region, Procedure.NO_PROC_ID, masterSystemTime)); } catch (Throwable e) { - String msg = "Exception running postOpenDeployTasks; region=" + - this.region.getRegionInfo().getEncodedName(); + String msg = "Exception running postOpenDeployTasks; region=" + + this.region.getRegionInfo().getEncodedName(); this.exception = e; if (e instanceof IOException && isRegionStillOpening(region.getRegionInfo(), services)) { server.abort(msg, e); @@ -281,28 +282,25 @@ public class OpenRegionHandler extends EventHandler { private HRegion openRegion() { HRegion region = null; try { - // Instantiate the region. This also periodically tickles OPENING + // Instantiate the region. This also periodically tickles OPENING // state so master doesn't timeout this region in transition. - region = HRegion.openHRegion(this.regionInfo, this.htd, - this.rsServices.getWAL(this.regionInfo), - this.server.getConfiguration(), - this.rsServices, - new CancelableProgressable() { - @Override - public boolean progress() { - if (!isRegionStillOpening()) { - LOG.warn("Open region aborted since it isn't opening any more"); - return false; + region = + HRegion.openHRegion(this.regionInfo, this.htd, this.rsServices.getWAL(this.regionInfo), + this.server.getConfiguration(), this.rsServices, new CancelableProgressable() { + @Override + public boolean progress() { + if (!isRegionStillOpening()) { + LOG.warn("Open region aborted since it isn't opening any more"); + return false; + } + return true; } - return true; - } - }); + }); } catch (Throwable t) { // We failed open. Our caller will see the 'null' return value // and transition the node back to FAILED_OPEN. If that fails, // we rely on the Timeout Monitor in the master to reassign. - LOG.error( - "Failed open of region=" + this.regionInfo.getRegionNameAsString(), t); + LOG.error("Failed open of region=" + this.regionInfo.getRegionNameAsString(), t); } return region; } @@ -314,8 +312,8 @@ public class OpenRegionHandler extends EventHandler { } } - private static boolean isRegionStillOpening( - RegionInfo regionInfo, RegionServerServices rsServices) { + private static boolean isRegionStillOpening(RegionInfo regionInfo, + RegionServerServices rsServices) { byte[] encodedName = regionInfo.getEncodedNameAsBytes(); Boolean action = rsServices.getRegionsInTransitionInRS().get(encodedName); return Boolean.TRUE.equals(action); // true means opening for RIT diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java index ed1b2c760f9..41fb3e7bf12 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +19,13 @@ package org.apache.hadoop.hbase.regionserver.handler; import java.io.IOException; import java.util.concurrent.CountDownLatch; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.regionserver.KeyValueScanner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Handler to seek storefiles in parallel. @@ -41,8 +39,8 @@ public class ParallelSeekHandler extends EventHandler { private CountDownLatch latch; private Throwable err = null; - public ParallelSeekHandler(KeyValueScanner scanner,Cell keyValue, - long readPoint, CountDownLatch latch) { + public ParallelSeekHandler(KeyValueScanner scanner, Cell keyValue, long readPoint, + CountDownLatch latch) { super(null, EventType.RS_PARALLEL_SEEK); this.scanner = scanner; this.keyValue = keyValue; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java index b5204af5e53..79bdc1e4934 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.handler; import java.io.IOException; @@ -24,9 +23,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.FlushRegionCallable; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -34,21 +30,25 @@ import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.hadoop.hbase.util.RetryCounterFactory; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; /** * HBASE-11580: With the async wal approach (HBASE-11568), the edits are not persisted to WAL in * secondary region replicas. This means that a secondary region replica can serve some edits from - * it's memstore that are still not flushed from primary. We do not want to allow secondary - * region's seqId to go back in time, when this secondary region is opened elsewhere after a - * crash or region move. We will trigger a flush cache in the primary region replica and wait - * for observing a complete flush cycle before marking the region readsEnabled. This handler does - * the flushing of the primary region replica and ensures that regular region opening is not - * blocked while the secondary replica is blocked on flush. + * it's memstore that are still not flushed from primary. We do not want to allow secondary region's + * seqId to go back in time, when this secondary region is opened elsewhere after a crash or region + * move. We will trigger a flush cache in the primary region replica and wait for observing a + * complete flush cycle before marking the region readsEnabled. This handler does the flushing of + * the primary region replica and ensures that regular region opening is not blocked while the + * secondary replica is blocked on flush. */ @InterfaceAudience.Private public class RegionReplicaFlushHandler extends EventHandler { @@ -61,8 +61,8 @@ public class RegionReplicaFlushHandler extends EventHandler { private final HRegion region; public RegionReplicaFlushHandler(Server server, ClusterConnection connection, - RpcRetryingCallerFactory rpcRetryingCallerFactory, RpcControllerFactory rpcControllerFactory, - int operationTimeout, HRegion region) { + RpcRetryingCallerFactory rpcRetryingCallerFactory, RpcControllerFactory rpcControllerFactory, + int operationTimeout, HRegion region) { super(server, EventType.RS_REGION_REPLICA_FLUSH); this.connection = connection; this.rpcRetryingCallerFactory = rpcRetryingCallerFactory; @@ -106,28 +106,31 @@ public class RegionReplicaFlushHandler extends EventHandler { HConstants.DEFAULT_HBASE_CLIENT_PAUSE); int maxAttempts = getRetriesCount(connection.getConfiguration()); - RetryCounter counter = new RetryCounterFactory(maxAttempts, (int)pause).create(); + RetryCounter counter = new RetryCounterFactory(maxAttempts, (int) pause).create(); if (LOG.isDebugEnabled()) { - LOG.debug("RPC'ing to primary " + ServerRegionReplicaUtil. - getRegionInfoForDefaultReplica(region.getRegionInfo()).getRegionNameAsString() + - " from " + region.getRegionInfo().getRegionNameAsString() + " to trigger FLUSH"); + LOG.debug("RPC'ing to primary " + + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) + .getRegionNameAsString() + + " from " + region.getRegionInfo().getRegionNameAsString() + " to trigger FLUSH"); } - while (!region.isClosing() && !region.isClosed() - && !server.isAborted() && !server.isStopped()) { - FlushRegionCallable flushCallable = new FlushRegionCallable( - connection, rpcControllerFactory, + while ( + !region.isClosing() && !region.isClosed() && !server.isAborted() && !server.isStopped() + ) { + FlushRegionCallable flushCallable = new FlushRegionCallable(connection, rpcControllerFactory, RegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()), true); // TODO: flushRegion() is a blocking call waiting for the flush to complete. Ideally we // do not have to wait for the whole flush here, just initiate it. FlushRegionResponse response = null; try { - response = rpcRetryingCallerFactory.newCaller() + response = rpcRetryingCallerFactory. newCaller() .callWithRetries(flushCallable, this.operationTimeout); } catch (IOException ex) { - if (ex instanceof TableNotFoundException - || connection.isTableDisabled(region.getRegionInfo().getTable())) { + if ( + ex instanceof TableNotFoundException + || connection.isTableDisabled(region.getRegionInfo().getTable()) + ) { return; } throw ex; @@ -138,20 +141,21 @@ public class RegionReplicaFlushHandler extends EventHandler { // a complete flush cycle or replay a region open event if (LOG.isDebugEnabled()) { LOG.debug("Triggered flush of primary region replica " - + ServerRegionReplicaUtil - .getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName() - + " for " + region.getRegionInfo().getEncodedName() - + "; now waiting and blocking reads until completes a full flush cycle"); + + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) + .getEncodedName() + + " for " + region.getRegionInfo().getEncodedName() + + "; now waiting and blocking reads until completes a full flush cycle"); } region.setReadsEnabled(true); break; } else { if (response.hasWroteFlushWalMarker()) { - if(response.getWroteFlushWalMarker()) { + if (response.getWroteFlushWalMarker()) { if (LOG.isDebugEnabled()) { - LOG.debug("Triggered empty flush marker (memstore empty) on primary " - + "region replica " + ServerRegionReplicaUtil - .getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName() + LOG.debug( + "Triggered empty flush marker (memstore empty) on primary " + "region replica " + + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) + .getEncodedName() + " for " + region.getRegionInfo().getEncodedName() + "; now waiting and " + "blocking reads until observing a flush marker"); } @@ -161,16 +165,16 @@ public class RegionReplicaFlushHandler extends EventHandler { // somehow we were not able to get the primary to write the flush request. It may be // closing or already flushing. Retry flush again after some sleep. if (!counter.shouldRetry()) { - throw new IOException("Cannot cause primary to flush or drop a wal marker after " + - "retries. Failing opening of this region replica " - + region.getRegionInfo().getEncodedName()); + throw new IOException("Cannot cause primary to flush or drop a wal marker after " + + "retries. Failing opening of this region replica " + + region.getRegionInfo().getEncodedName()); } } } else { // nothing to do. Are we dealing with an old server? LOG.warn("Was not able to trigger a flush from primary region due to old server version? " - + "Continuing to open the secondary region replica: " - + region.getRegionInfo().getEncodedName()); + + "Continuing to open the secondary region replica: " + + region.getRegionInfo().getEncodedName()); region.setReadsEnabled(true); break; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java index 0d02f30e5ab..33eaf675c6a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; /** @@ -62,7 +63,7 @@ public class UnassignRegionHandler extends EventHandler { private final RetryCounter retryCounter; public UnassignRegionHandler(HRegionServer server, String encodedName, long closeProcId, - boolean abort, @Nullable ServerName destination, EventType eventType) { + boolean abort, @Nullable ServerName destination, EventType eventType) { super(server, eventType); this.encodedName = encodedName; this.closeProcId = closeProcId; @@ -86,12 +87,14 @@ public class UnassignRegionHandler extends EventHandler { // reportRegionStateTransition, so the HMaster will think the region is online, before we // actually open the region, as reportRegionStateTransition is part of the opening process. long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); - LOG.warn("Received CLOSE for {} which we are already " + - "trying to OPEN; try again after {}ms", encodedName, backoff); + LOG.warn( + "Received CLOSE for {} which we are already " + "trying to OPEN; try again after {}ms", + encodedName, backoff); rs.getExecutorService().delayedSubmit(this, backoff, TimeUnit.MILLISECONDS); } else { - LOG.info("Received CLOSE for {} which we are already trying to CLOSE," + - " but not completed yet", encodedName); + LOG.info( + "Received CLOSE for {} which we are already trying to CLOSE," + " but not completed yet", + encodedName); } return; } @@ -121,18 +124,21 @@ public class UnassignRegionHandler extends EventHandler { } rs.removeRegion(region, destination); - if (ServerRegionReplicaUtil.isMetaRegionReplicaReplicationEnabled(rs.getConfiguration(), - region.getTableDescriptor().getTableName())) { + if ( + ServerRegionReplicaUtil.isMetaRegionReplicaReplicationEnabled(rs.getConfiguration(), + region.getTableDescriptor().getTableName()) + ) { if (RegionReplicaUtil.isDefaultReplica(region.getRegionInfo().getReplicaId())) { // If hbase:meta read replicas enabled, remove replication source for hbase:meta Regions. // See assign region handler where we add the replication source on open. - rs.getReplicationSourceService().getReplicationManager(). - removeCatalogReplicationSource(region.getRegionInfo()); + rs.getReplicationSourceService().getReplicationManager() + .removeCatalogReplicationSource(region.getRegionInfo()); } } - if (!rs.reportRegionStateTransition( - new RegionStateTransitionContext(TransitionCode.CLOSED, HConstants.NO_SEQNUM, closeProcId, - -1, region.getRegionInfo()))) { + if ( + !rs.reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.CLOSED, + HConstants.NO_SEQNUM, closeProcId, -1, region.getRegionInfo())) + ) { throw new IOException("Failed to report close to master: " + regionName); } // Cache the close region procedure id after report region transition succeed. @@ -151,13 +157,13 @@ public class UnassignRegionHandler extends EventHandler { } public static UnassignRegionHandler create(HRegionServer server, String encodedName, - long closeProcId, boolean abort, @Nullable ServerName destination) { + long closeProcId, boolean abort, @Nullable ServerName destination) { // Just try our best to determine whether it is for closing meta. It is not the end of the world // if we put the handler into a wrong executor. Region region = server.getRegion(encodedName); - EventType eventType = - region != null && region.getRegionInfo().isMetaRegion() ? EventType.M_RS_CLOSE_META - : EventType.M_RS_CLOSE_REGION; + EventType eventType = region != null && region.getRegionInfo().isMetaRegion() + ? EventType.M_RS_CLOSE_META + : EventType.M_RS_CLOSE_REGION; return new UnassignRegionHandler(server, encodedName, closeProcId, abort, destination, eventType); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java index ffdade15372..dadcf8b410f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +19,6 @@ package org.apache.hadoop.hbase.regionserver.handler; import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogCounters; @@ -35,13 +30,16 @@ import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor; import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Handles log splitting a wal - * Used by the zk-based distributed log splitting. Created by ZKSplitLogWorkerCoordination. + * Handles log splitting a wal Used by the zk-based distributed log splitting. Created by + * ZKSplitLogWorkerCoordination. * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter, see SplitWALManager - */ + * distributed WAL splitter, see SplitWALManager + */ @Deprecated @InterfaceAudience.Private public class WALSplitterHandler extends EventHandler { @@ -53,10 +51,9 @@ public class WALSplitterHandler extends EventHandler { private final SplitLogWorkerCoordination.SplitTaskDetails splitTaskDetails; private final SplitLogWorkerCoordination coordination; - public WALSplitterHandler(final Server server, SplitLogWorkerCoordination coordination, - SplitLogWorkerCoordination.SplitTaskDetails splitDetails, CancelableProgressable reporter, - AtomicInteger inProgressTasks, TaskExecutor splitTaskExecutor) { + SplitLogWorkerCoordination.SplitTaskDetails splitDetails, CancelableProgressable reporter, + AtomicInteger inProgressTasks, TaskExecutor splitTaskExecutor) { super(server, EventType.RS_LOG_REPLAY); this.splitTaskDetails = splitDetails; this.coordination = coordination; @@ -74,35 +71,35 @@ public class WALSplitterHandler extends EventHandler { try { status = this.splitTaskExecutor.exec(splitTaskDetails.getWALFile(), reporter); switch (status) { - case DONE: - coordination.endTask(new SplitLogTask.Done(this.serverName), - SplitLogCounters.tot_wkr_task_done, splitTaskDetails); - break; - case PREEMPTED: - SplitLogCounters.tot_wkr_preempt_task.increment(); - LOG.warn("task execution preempted " + splitTaskDetails.getWALFile()); - break; - case ERR: - if (server != null && !server.isStopped()) { - coordination.endTask(new SplitLogTask.Err(this.serverName), - SplitLogCounters.tot_wkr_task_err, splitTaskDetails); + case DONE: + coordination.endTask(new SplitLogTask.Done(this.serverName), + SplitLogCounters.tot_wkr_task_done, splitTaskDetails); break; - } - // if the RS is exiting then there is probably a tons of stuff - // that can go wrong. Resign instead of signaling error. - //$FALL-THROUGH$ - case RESIGNED: - if (server != null && server.isStopped()) { - LOG.info("task execution interrupted because worker is exiting " + case PREEMPTED: + SplitLogCounters.tot_wkr_preempt_task.increment(); + LOG.warn("task execution preempted " + splitTaskDetails.getWALFile()); + break; + case ERR: + if (server != null && !server.isStopped()) { + coordination.endTask(new SplitLogTask.Err(this.serverName), + SplitLogCounters.tot_wkr_task_err, splitTaskDetails); + break; + } + // if the RS is exiting then there is probably a tons of stuff + // that can go wrong. Resign instead of signaling error. + // $FALL-THROUGH$ + case RESIGNED: + if (server != null && server.isStopped()) { + LOG.info("task execution interrupted because worker is exiting " + splitTaskDetails.toString()); - } - coordination.endTask(new SplitLogTask.Resigned(this.serverName), - SplitLogCounters.tot_wkr_task_resigned, splitTaskDetails); - break; + } + coordination.endTask(new SplitLogTask.Resigned(this.serverName), + SplitLogCounters.tot_wkr_task_resigned, splitTaskDetails); + break; } } finally { LOG.info("Worker " + serverName + " done with task " + splitTaskDetails.toString() + " in " - + (EnvironmentEdgeManager.currentTime() - startTime) + "ms. Status = " + status); + + (EnvironmentEdgeManager.currentTime() - startTime) + "ms. Status = " + status); this.inProgressTasks.decrementAndGet(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSDumpServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSDumpServlet.java index cc48d9ef18a..fe9e41a960c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSDumpServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSDumpServlet.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,14 +38,12 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class RSDumpServlet extends StateDumpServlet { private static final long serialVersionUID = 1L; - private static final String LINE = - "==========================================================="; + private static final String LINE = "==========================================================="; @Override - public void doGet(HttpServletRequest request, HttpServletResponse response) - throws IOException { - HRegionServer hrs = (HRegionServer)getServletContext().getAttribute( - HRegionServer.REGIONSERVER); + public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { + HRegionServer hrs = + (HRegionServer) getServletContext().getAttribute(HRegionServer.REGIONSERVER); assert hrs != null : "No RS in context!"; response.setContentType("text/plain"); @@ -60,8 +57,7 @@ public class RSDumpServlet extends StateDumpServlet { OutputStream os = response.getOutputStream(); try (PrintWriter out = new PrintWriter(os)) { - out.println("RegionServer status for " + hrs.getServerName() - + " as of " + new Date()); + out.println("RegionServer status for " + hrs.getServerName() + " as of " + new Date()); out.println("\n\nVersion Info:"); out.println(LINE); @@ -128,29 +124,28 @@ public class RSDumpServlet extends StateDumpServlet { } } - public static void dumpCallQueues(HRegionServer hrs, PrintWriter out) { CallQueueInfo callQueueInfo = hrs.getRpcServer().getScheduler().getCallQueueInfo(); - for(String queueName: callQueueInfo.getCallQueueNames()) { + for (String queueName : callQueueInfo.getCallQueueNames()) { out.println("\nQueue Name: " + queueName); long totalCallCount = 0L, totalCallSize = 0L; - for (String methodName: callQueueInfo.getCalledMethodNames(queueName)) { + for (String methodName : callQueueInfo.getCalledMethodNames(queueName)) { long thisMethodCount, thisMethodSize; thisMethodCount = callQueueInfo.getCallMethodCount(queueName, methodName); thisMethodSize = callQueueInfo.getCallMethodSize(queueName, methodName); - out.println("Method in call: "+methodName); - out.println("Total call count for method: "+thisMethodCount); - out.println("Total call size for method (bytes): "+thisMethodSize); + out.println("Method in call: " + methodName); + out.println("Total call count for method: " + thisMethodCount); + out.println("Total call size for method (bytes): " + thisMethodSize); totalCallCount += thisMethodCount; totalCallSize += thisMethodSize; } - out.println("Total call count for queue: "+totalCallCount); - out.println("Total call size for queue (bytes): "+totalCallSize); + out.println("Total call count for queue: " + totalCallCount); + out.println("Total call size for queue (bytes): " + totalCallSize); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusServlet.java index f2d8d48865c..b9bf2da6080 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusServlet.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,15 +18,13 @@ package org.apache.hadoop.hbase.regionserver.http; import java.io.IOException; - import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.yetus.audience.InterfaceAudience; - import org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class RSStatusServlet extends HttpServlet { @@ -35,8 +32,9 @@ public class RSStatusServlet extends HttpServlet { @Override protected void doGet(HttpServletRequest req, HttpServletResponse resp) - throws ServletException, IOException { - HRegionServer hrs = (HRegionServer)getServletContext().getAttribute(HRegionServer.REGIONSERVER); + throws ServletException, IOException { + HRegionServer hrs = + (HRegionServer) getServletContext().getAttribute(HRegionServer.REGIONSERVER); assert hrs != null : "No RS in context!"; resp.setContentType("text/html"); @@ -48,14 +46,10 @@ public class RSStatusServlet extends HttpServlet { } RSStatusTmpl tmpl = new RSStatusTmpl(); - if (req.getParameter("format") != null) - tmpl.setFormat(req.getParameter("format")); - if (req.getParameter("filter") != null) - tmpl.setFilter(req.getParameter("filter")); - if (req.getParameter("bcn") != null) - tmpl.setBcn(req.getParameter("bcn")); - if (req.getParameter("bcv") != null) - tmpl.setBcv(req.getParameter("bcv")); + if (req.getParameter("format") != null) tmpl.setFormat(req.getParameter("format")); + if (req.getParameter("filter") != null) tmpl.setFilter(req.getParameter("filter")); + if (req.getParameter("bcn") != null) tmpl.setBcn(req.getParameter("bcn")); + if (req.getParameter("bcv") != null) tmpl.setBcv(req.getParameter("bcv")); tmpl.render(resp.getWriter(), hrs); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnCount.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnCount.java index 35726ab0f2e..b23a016c964 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnCount.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnCount.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +42,7 @@ class ColumnCount { /** * Constructor * @param column the qualifier to count the versions for - * @param count initial count + * @param count initial count */ public ColumnCount(byte[] column, int count) { this(column, 0, column.length, count); @@ -54,7 +53,7 @@ class ColumnCount { * @param column the qualifier to count the versions for * @param offset in the passed buffer where to start the qualifier from * @param length of the qualifier - * @param count initial count + * @param count initial count */ public ColumnCount(byte[] column, int offset, int length, int count) { this.bytes = column; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java index bd6cb20d829..1fd47888986 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,34 +18,32 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ShipperListener; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; +import org.apache.yetus.audience.InterfaceAudience; /** - * Implementing classes of this interface will be used for the tracking - * and enforcement of columns and numbers of versions and timeToLive during - * the course of a Get or Scan operation. + * Implementing classes of this interface will be used for the tracking and enforcement of columns + * and numbers of versions and timeToLive during the course of a Get or Scan operation. *

      * Currently there are two different types of Store/Family-level queries. - *

      • {@link ExplicitColumnTracker} is used when the query specifies - * one or more column qualifiers to return in the family.
      • - *
      • {@link ScanWildcardColumnTracker} is used when no columns are - * explicitly specified.
      • + *
          + *
        • {@link ExplicitColumnTracker} is used when the query specifies one or more column qualifiers + * to return in the family.
        • + *
        • {@link ScanWildcardColumnTracker} is used when no columns are explicitly specified.
        • *
        *

        * This class is utilized by {@link ScanQueryMatcher} mainly through two methods: - *

        • {@link #checkColumn} is called when a Put satisfies all other - * conditions of the query.
        • - *
        • {@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher - * believes that the current column should be skipped (by timestamp, filter etc.)
        • + *
            + *
          • {@link #checkColumn} is called when a Put satisfies all other conditions of the query.
          • + *
          • {@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher believes that the current + * column should be skipped (by timestamp, filter etc.)
          • *
          *

          * These two methods returns a - * {@link org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode} - * to define what action should be taken. + * {@link org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode} to define + * what action should be taken. *

          * This class is NOT thread-safe as queries are never multi-threaded */ @@ -56,82 +53,73 @@ public interface ColumnTracker extends ShipperListener { /** * Checks if the column is present in the list of requested columns by returning the match code * instance. It does not check against the number of versions for the columns asked for. To do the - * version check, one has to call {@link #checkVersions(Cell, long, byte, boolean)} - * method based on the return type (INCLUDE) of this method. The values that can be returned by - * this method are {@link MatchCode#INCLUDE}, {@link MatchCode#SEEK_NEXT_COL} and - * {@link MatchCode#SEEK_NEXT_ROW}. + * version check, one has to call {@link #checkVersions(Cell, long, byte, boolean)} method based + * on the return type (INCLUDE) of this method. The values that can be returned by this method are + * {@link MatchCode#INCLUDE}, {@link MatchCode#SEEK_NEXT_COL} and {@link MatchCode#SEEK_NEXT_ROW}. * @param cell a cell with the column to match against * @param type The type of the Cell * @return The match code instance. * @throws IOException in case there is an internal consistency problem caused by a data - * corruption. + * corruption. */ ScanQueryMatcher.MatchCode checkColumn(Cell cell, byte type) throws IOException; /** * Keeps track of the number of versions for the columns asked for. It assumes that the user has * already checked if the cell needs to be included by calling the - * {@link #checkColumn(Cell, byte)} method. The enum values returned by this method - * are {@link MatchCode#SKIP}, {@link MatchCode#INCLUDE}, - * {@link MatchCode#INCLUDE_AND_SEEK_NEXT_COL} and {@link MatchCode#INCLUDE_AND_SEEK_NEXT_ROW}. - * Implementations which include all the columns could just return {@link MatchCode#INCLUDE} in - * the {@link #checkColumn(Cell, byte)} method and perform all the operations in this - * checkVersions method. - * @param cell a cell with the column to match against - * @param timestamp The timestamp of the cell. - * @param type the type of the key value (Put/Delete) + * {@link #checkColumn(Cell, byte)} method. The enum values returned by this method are + * {@link MatchCode#SKIP}, {@link MatchCode#INCLUDE}, {@link MatchCode#INCLUDE_AND_SEEK_NEXT_COL} + * and {@link MatchCode#INCLUDE_AND_SEEK_NEXT_ROW}. Implementations which include all the columns + * could just return {@link MatchCode#INCLUDE} in the {@link #checkColumn(Cell, byte)} method and + * perform all the operations in this checkVersions method. + * @param cell a cell with the column to match against + * @param timestamp The timestamp of the cell. + * @param type the type of the key value (Put/Delete) * @param ignoreCount indicates if the KV needs to be excluded while counting (used during - * compactions. We only count KV's that are older than all the scanners' read points.) + * compactions. We only count KV's that are older than all the scanners' read + * points.) * @return the scan query matcher match code instance * @throws IOException in case there is an internal consistency problem caused by a data - * corruption. + * corruption. */ ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, - boolean ignoreCount) throws IOException; + boolean ignoreCount) throws IOException; + /** * Resets the Matcher */ void reset(); /** - * * @return true when done. */ boolean done(); /** - * Used by matcher and scan/get to get a hint of the next column - * to seek to after checkColumn() returns SKIP. Returns the next interesting - * column we want, or NULL there is none (wildcard scanner). - * - * Implementations aren't required to return anything useful unless the most recent - * call was to checkColumn() and the return code was SKIP. This is pretty implementation - * detail-y, but optimizations are like that. - * + * Used by matcher and scan/get to get a hint of the next column to seek to after checkColumn() + * returns SKIP. Returns the next interesting column we want, or NULL there is none (wildcard + * scanner). Implementations aren't required to return anything useful unless the most recent call + * was to checkColumn() and the return code was SKIP. This is pretty implementation detail-y, but + * optimizations are like that. * @return null, or a ColumnCount that we should seek to */ ColumnCount getColumnHint(); /** - * Retrieve the MatchCode for the next row or column - * @param cell + * Retrieve the MatchCode for the next row or column n */ MatchCode getNextRowOrNextColumn(Cell cell); /** - * Give the tracker a chance to declare it's done based on only the timestamp - * to allow an early out. - * - * @param timestamp - * @return true to early out based on timestamp. + * Give the tracker a chance to declare it's done based on only the timestamp to allow an early + * out. n * @return true to early out based on timestamp. */ boolean isDone(long timestamp); /** * This method is used to inform the column tracker that we are done with this column. We may get * this information from external filters or timestamp range and we then need to indicate this - * information to tracker. It is currently implemented for ExplicitColumnTracker. - * @param cell + * information to tracker. It is currently implemented for ExplicitColumnTracker. n */ default void doneWithColumn(Cell cell) { } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java index f9fb6029db3..9a4361a956a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,15 +20,14 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeepDeletedCells; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for compaction. @@ -46,9 +45,9 @@ public abstract class CompactionScanQueryMatcher extends ScanQueryMatcher { protected final KeepDeletedCells keepDeletedCells; protected CompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, - ColumnTracker columnTracker, long readPointToUse, long oldestUnexpiredTS, long now) { + ColumnTracker columnTracker, long readPointToUse, long oldestUnexpiredTS, long now) { super(createStartKeyFromRow(EMPTY_START_ROW, scanInfo), scanInfo, columnTracker, - oldestUnexpiredTS, now); + oldestUnexpiredTS, now); this.maxReadPointToTrackVersions = readPointToUse; this.deletes = deletes; this.keepDeletedCells = scanInfo.getKeepDeletedCells(); @@ -98,37 +97,39 @@ public abstract class CompactionScanQueryMatcher extends ScanQueryMatcher { // If keepDeletedCells is TTL and the delete marker is expired, then we can make sure that the // minVerions is larger than 0(otherwise we will just return at preCheck). So here we still // need to track the delete marker to see if it masks some cells. - if (keepDeletedCells == KeepDeletedCells.FALSE - || (keepDeletedCells == KeepDeletedCells.TTL && cell.getTimestamp() < oldestUnexpiredTS)) { + if ( + keepDeletedCells == KeepDeletedCells.FALSE + || (keepDeletedCells == KeepDeletedCells.TTL && cell.getTimestamp() < oldestUnexpiredTS) + ) { deletes.add(cell); } } public static CompactionScanQueryMatcher create(ScanInfo scanInfo, ScanType scanType, - long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, long now, - byte[] dropDeletesFromRow, byte[] dropDeletesToRow, - RegionCoprocessorHost regionCoprocessorHost) throws IOException { - Pair trackers = getTrackers(regionCoprocessorHost, null, - scanInfo,oldestUnexpiredTS, null); + long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, long now, + byte[] dropDeletesFromRow, byte[] dropDeletesToRow, RegionCoprocessorHost regionCoprocessorHost) + throws IOException { + Pair trackers = + getTrackers(regionCoprocessorHost, null, scanInfo, oldestUnexpiredTS, null); DeleteTracker deleteTracker = trackers.getFirst(); ColumnTracker columnTracker = trackers.getSecond(); if (dropDeletesFromRow == null) { if (scanType == ScanType.COMPACT_RETAIN_DELETES) { if (scanInfo.isNewVersionBehavior()) { return new IncludeAllCompactionQueryMatcher(scanInfo, deleteTracker, columnTracker, - readPointToUse, oldestUnexpiredTS, now); + readPointToUse, oldestUnexpiredTS, now); } else { return new MinorCompactionScanQueryMatcher(scanInfo, deleteTracker, columnTracker, - readPointToUse, oldestUnexpiredTS, now); + readPointToUse, oldestUnexpiredTS, now); } } else { return new MajorCompactionScanQueryMatcher(scanInfo, deleteTracker, columnTracker, - readPointToUse, earliestPutTs, oldestUnexpiredTS, now); + readPointToUse, earliestPutTs, oldestUnexpiredTS, now); } } else { return new StripeCompactionScanQueryMatcher(scanInfo, deleteTracker, columnTracker, - readPointToUse, earliestPutTs, oldestUnexpiredTS, now, dropDeletesFromRow, - dropDeletesToRow); + readPointToUse, earliestPutTs, oldestUnexpiredTS, now, dropDeletesFromRow, + dropDeletesToRow); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java index be9c51eca85..0b48e1ca3b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hbase.regionserver.querymatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.regionserver.ShipperListener; +import org.apache.yetus.audience.InterfaceAudience; /** * This interface is used for the tracking and enforcement of Deletes during the course of a Get or @@ -81,8 +81,8 @@ public interface DeleteTracker extends ShipperListener { COLUMN_DELETED, // The Cell is deleted by a delete column. VERSION_DELETED, // The Cell is deleted by a version delete. NOT_DELETED, - VERSION_MASKED // The Cell is masked by max number of versions which is considered as - // deleted in strong semantics of versions(See MvccTracker) + VERSION_MASKED // The Cell is masked by max number of versions which is considered as + // deleted in strong semantics of versions(See MvccTracker) } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java index c9899d51041..397e2631a44 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,8 +19,8 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeepDeletedCells; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * A query matcher for compaction which can drop delete markers. @@ -53,8 +53,8 @@ public abstract class DropDeletesCompactionScanQueryMatcher extends CompactionSc protected final long earliestPutTs; protected DropDeletesCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, - ColumnTracker columns, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, - long now) { + ColumnTracker columns, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, + long now) { super(scanInfo, deletes, columns, readPointToUse, oldestUnexpiredTS, now); this.timeToPurgeDeletes = scanInfo.getTimeToPurgeDeletes(); this.earliestPutTs = earliestPutTs; @@ -66,8 +66,10 @@ public abstract class DropDeletesCompactionScanQueryMatcher extends CompactionSc if (timeToPurgeDeletes > 0 && now - timestamp <= timeToPurgeDeletes) { return MatchCode.INCLUDE; } - if (keepDeletedCells == KeepDeletedCells.TRUE - || (keepDeletedCells == KeepDeletedCells.TTL && timestamp >= oldestUnexpiredTS)) { + if ( + keepDeletedCells == KeepDeletedCells.TRUE + || (keepDeletedCells == KeepDeletedCells.TTL && timestamp >= oldestUnexpiredTS) + ) { // If keepDeletedCell is true, or the delete marker is not expired yet, we should include it // in version counting to see if we can drop it. The only exception is that, we can make // sure that no put is older than this delete marker. And under this situation, all later diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java index c0f13c0ac55..1ce2c6136cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,13 +19,12 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; import java.util.NavigableSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; +import org.apache.yetus.audience.InterfaceAudience; /** * This class is used for the tracking and enforcement of columns and numbers of versions during the @@ -68,13 +67,13 @@ public class ExplicitColumnTracker implements ColumnTracker { /** * Default constructor. - * @param columns columns specified user in query - * @param minVersions minimum number of versions to keep - * @param maxVersions maximum versions to return per column + * @param columns columns specified user in query + * @param minVersions minimum number of versions to keep + * @param maxVersions maximum versions to return per column * @param oldestUnexpiredTS the oldest timestamp we are interested in, based on TTL */ public ExplicitColumnTracker(NavigableSet columns, int minVersions, int maxVersions, - long oldestUnexpiredTS) { + long oldestUnexpiredTS) { this.maxVersions = maxVersions; this.minVersions = minVersions; this.oldestStamp = oldestUnexpiredTS; @@ -154,7 +153,7 @@ public class ExplicitColumnTracker implements ColumnTracker { @Override public ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, - boolean ignoreCount) throws IOException { + boolean ignoreCount) throws IOException { assert !PrivateCellUtil.isDelete(type); if (ignoreCount) { return ScanQueryMatcher.MatchCode.INCLUDE; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java index a486bec4377..c6776a05a41 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,19 +18,18 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * A compaction query matcher that always return INCLUDE and drops nothing. */ @InterfaceAudience.Private -public class IncludeAllCompactionQueryMatcher extends MinorCompactionScanQueryMatcher{ +public class IncludeAllCompactionQueryMatcher extends MinorCompactionScanQueryMatcher { public IncludeAllCompactionQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, - ColumnTracker columns, long readPointToUse, long oldestUnexpiredTS, long now) { + ColumnTracker columns, long readPointToUse, long oldestUnexpiredTS, long now) { super(scanInfo, deletes, columns, readPointToUse, oldestUnexpiredTS, now); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java index 2f02d77d0f5..7d3d973779c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for major compaction. @@ -31,8 +30,8 @@ import org.apache.hadoop.hbase.regionserver.ScanInfo; public class MajorCompactionScanQueryMatcher extends DropDeletesCompactionScanQueryMatcher { public MajorCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, - ColumnTracker columns, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, - long now) { + ColumnTracker columns, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, + long now) { super(scanInfo, deletes, columns, readPointToUse, earliestPutTs, oldestUnexpiredTS, now); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java index b3815dae1e7..70e474e106b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for minor compaction. @@ -31,7 +30,7 @@ import org.apache.hadoop.hbase.regionserver.ScanInfo; public class MinorCompactionScanQueryMatcher extends CompactionScanQueryMatcher { public MinorCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, - ColumnTracker columns, long readPointToUse, long oldestUnexpiredTS, long now) { + ColumnTracker columns, long readPointToUse, long oldestUnexpiredTS, long now) { super(scanInfo, deletes, columns, readPointToUse, oldestUnexpiredTS, now); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java index d62e2aa1f5b..146f67dbd2f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

          - * http://www.apache.org/licenses/LICENSE-2.0 - *

          + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -26,18 +26,17 @@ import java.util.SortedMap; import java.util.SortedSet; import java.util.TreeMap; import java.util.TreeSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue.Type; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; +import org.apache.yetus.audience.InterfaceAudience; /** - * A tracker both implementing ColumnTracker and DeleteTracker, used for mvcc-sensitive scanning. - * We should make sure in one QueryMatcher the ColumnTracker and DeleteTracker is the same instance. + * A tracker both implementing ColumnTracker and DeleteTracker, used for mvcc-sensitive scanning. We + * should make sure in one QueryMatcher the ColumnTracker and DeleteTracker is the same instance. */ @InterfaceAudience.Private public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { @@ -71,7 +70,6 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { /** * Note maxVersion and minVersion must set according to cf's conf, not user's scan parameter. - * * @param columns columns specified user in query * @param comparartor the cell comparator * @param minVersion The minimum number of versions to keep(used when TTL is set). @@ -81,7 +79,7 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { * @param oldestUnexpiredTS the oldest timestamp we are interested in, based on TTL */ public NewVersionBehaviorTracker(NavigableSet columns, CellComparator comparartor, - int minVersion, int maxVersion, int resultMaxVersions, long oldestUnexpiredTS) { + int minVersion, int maxVersion, int resultMaxVersions, long oldestUnexpiredTS) { this.maxVersions = maxVersion; this.minVersions = minVersion; this.resultMaxVersions = resultMaxVersions; @@ -103,8 +101,8 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { } /** - * A data structure which contains infos we need that happens before this node's mvcc and - * after the previous node's mvcc. A node means there is a version deletion at the mvcc and ts. + * A data structure which contains infos we need that happens before this node's mvcc and after + * the previous node's mvcc. A node means there is a version deletion at the mvcc and ts. */ protected class DeleteVersionsNode { public long ts; @@ -158,11 +156,10 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { } /** - * Reset the map if it is different with the last Cell. - * Save the cq array/offset/length for next Cell. - * - * @return If this put has duplicate ts with last cell, return the mvcc of last cell. - * Else return MAX_VALUE. + * Reset the map if it is different with the last Cell. Save the cq array/offset/length for next + * Cell. + * @return If this put has duplicate ts with last cell, return the mvcc of last cell. Else return + * MAX_VALUE. */ protected long prepare(Cell cell) { if (isColumnQualifierChanged(cell)) { @@ -173,8 +170,10 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { delColMap.put(e.getKey(), e.getValue().getDeepCopy()); } countCurrentCol = 0; - } else if (!PrivateCellUtil.isDelete(lastCqType) && lastCqType == cell.getTypeByte() - && lastCqTs == cell.getTimestamp()) { + } else if ( + !PrivateCellUtil.isDelete(lastCqType) && lastCqType == cell.getTypeByte() + && lastCqTs == cell.getTimestamp() + ) { // Put with duplicate timestamp, ignore. return lastCqMvcc; } @@ -188,8 +187,10 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { } private boolean isColumnQualifierChanged(Cell cell) { - if (delColMap.isEmpty() && lastCqArray == null && cell.getQualifierLength() == 0 - && (PrivateCellUtil.isDeleteColumns(cell) || PrivateCellUtil.isDeleteColumnVersion(cell))) { + if ( + delColMap.isEmpty() && lastCqArray == null && cell.getQualifierLength() == 0 + && (PrivateCellUtil.isDeleteColumns(cell) || PrivateCellUtil.isDeleteColumnVersion(cell)) + ) { // for null columnQualifier return true; } @@ -202,25 +203,25 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { prepare(cell); byte type = cell.getTypeByte(); switch (Type.codeToType(type)) { - // By the order of seen. We put null cq at first. - case DeleteFamily: // Delete all versions of all columns of the specified family - delFamMap.put(cell.getSequenceId(), + // By the order of seen. We put null cq at first. + case DeleteFamily: // Delete all versions of all columns of the specified family + delFamMap.put(cell.getSequenceId(), new DeleteVersionsNode(cell.getTimestamp(), cell.getSequenceId())); - break; - case DeleteFamilyVersion: // Delete all columns of the specified family and specified version - delFamMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); - break; + break; + case DeleteFamilyVersion: // Delete all columns of the specified family and specified version + delFamMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); + break; - // These two kinds of markers are mix with Puts. - case DeleteColumn: // Delete all versions of the specified column - delColMap.put(cell.getSequenceId(), + // These two kinds of markers are mix with Puts. + case DeleteColumn: // Delete all versions of the specified column + delColMap.put(cell.getSequenceId(), new DeleteVersionsNode(cell.getTimestamp(), cell.getSequenceId())); - break; - case Delete: // Delete the specified version of the specified column. - delColMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); - break; - default: - throw new AssertionError("Unknown delete marker type for " + cell); + break; + case Delete: // Delete the specified version of the specified column. + delColMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); + break; + default: + throw new AssertionError("Unknown delete marker type for " + cell); } } @@ -234,7 +235,7 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { long duplicateMvcc = prepare(cell); for (Map.Entry e : delColMap.tailMap(cell.getSequenceId()) - .entrySet()) { + .entrySet()) { DeleteVersionsNode node = e.getValue(); long deleteMvcc = Long.MAX_VALUE; SortedSet deleteVersionMvccs = node.deletesMap.get(cell.getTimestamp()); @@ -244,9 +245,8 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { deleteMvcc = tail.first(); } } - SortedMap> subMap = - node.mvccCountingMap - .subMap(cell.getSequenceId(), true, Math.min(duplicateMvcc, deleteMvcc), true); + SortedMap> subMap = node.mvccCountingMap.subMap(cell.getSequenceId(), + true, Math.min(duplicateMvcc, deleteMvcc), true); for (Map.Entry> seg : subMap.entrySet()) { if (seg.getValue().size() >= maxVersions) { return DeleteResult.VERSION_MASKED; @@ -270,7 +270,7 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { @Override public boolean isEmpty() { return delColMap.size() == 1 && delColMap.get(Long.MAX_VALUE).mvccCountingMap.size() == 1 - && delFamMap.size() == 1 && delFamMap.get(Long.MAX_VALUE).mvccCountingMap.size() == 1; + && delFamMap.size() == 1 && delFamMap.get(Long.MAX_VALUE).mvccCountingMap.size() == 1; } @Override @@ -278,17 +278,17 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { // ignore } - //ColumnTracker + // ColumnTracker @Override public MatchCode checkColumn(Cell cell, byte type) throws IOException { if (columns == null) { - return MatchCode.INCLUDE; + return MatchCode.INCLUDE; } while (!done()) { - int c = CellUtil.compareQualifiers(cell, - columns[columnIndex], 0, columns[columnIndex].length); + int c = + CellUtil.compareQualifiers(cell, columns[columnIndex], 0, columns[columnIndex].length); if (c < 0) { return MatchCode.SEEK_NEXT_COL; } @@ -305,8 +305,8 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { } @Override - public MatchCode checkVersions(Cell cell, long timestamp, byte type, - boolean ignoreCount) throws IOException { + public MatchCode checkVersions(Cell cell, long timestamp, byte type, boolean ignoreCount) + throws IOException { assert !PrivateCellUtil.isDelete(type); // We drop old version in #isDeleted, so here we won't SKIP because of versioning. But we should // consider TTL. @@ -350,7 +350,7 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { resetInternal(); } - protected void resetInternal(){ + protected void resetInternal() { delFamMap.put(Long.MAX_VALUE, new DeleteVersionsNode()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java index c755ff5c455..93288cba8cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,13 +18,12 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeepDeletedCells; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for normal user scan. @@ -42,7 +41,7 @@ public abstract class NormalUserScanQueryMatcher extends UserScanQueryMatcher { protected final boolean seePastDeleteMarkers; protected NormalUserScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker columns, - boolean hasNullColumn, DeleteTracker deletes, long oldestUnexpiredTS, long now) { + boolean hasNullColumn, DeleteTracker deletes, long oldestUnexpiredTS, long now) { super(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS, now); this.deletes = deletes; this.get = scan.isGetScan(); @@ -67,8 +66,8 @@ public abstract class NormalUserScanQueryMatcher extends UserScanQueryMatcher { long timestamp = cell.getTimestamp(); byte typeByte = cell.getTypeByte(); if (PrivateCellUtil.isDelete(typeByte)) { - boolean includeDeleteMarker = seePastDeleteMarkers ? tr.withinTimeRange(timestamp) - : tr.withinOrAfterTimeRange(timestamp); + boolean includeDeleteMarker = + seePastDeleteMarkers ? tr.withinTimeRange(timestamp) : tr.withinOrAfterTimeRange(timestamp); if (includeDeleteMarker) { this.deletes.add(cell); } @@ -92,12 +91,12 @@ public abstract class NormalUserScanQueryMatcher extends UserScanQueryMatcher { } public static NormalUserScanQueryMatcher create(Scan scan, ScanInfo scanInfo, - ColumnTracker columns, DeleteTracker deletes, boolean hasNullColumn, long oldestUnexpiredTS, - long now) throws IOException { + ColumnTracker columns, DeleteTracker deletes, boolean hasNullColumn, long oldestUnexpiredTS, + long now) throws IOException { if (scan.isReversed()) { if (scan.includeStopRow()) { return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes, - oldestUnexpiredTS, now) { + oldestUnexpiredTS, now) { @Override protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { @@ -106,7 +105,7 @@ public abstract class NormalUserScanQueryMatcher extends UserScanQueryMatcher { }; } else { return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes, - oldestUnexpiredTS, now) { + oldestUnexpiredTS, now) { @Override protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { @@ -117,7 +116,7 @@ public abstract class NormalUserScanQueryMatcher extends UserScanQueryMatcher { } else { if (scan.includeStopRow()) { return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes, - oldestUnexpiredTS, now) { + oldestUnexpiredTS, now) { @Override protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { @@ -126,7 +125,7 @@ public abstract class NormalUserScanQueryMatcher extends UserScanQueryMatcher { }; } else { return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes, - oldestUnexpiredTS, now) { + oldestUnexpiredTS, now) { @Override protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java index ed9ba58c990..180d2dd2ed3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for raw scan. @@ -31,7 +30,7 @@ import org.apache.hadoop.hbase.regionserver.ScanInfo; public abstract class RawScanQueryMatcher extends UserScanQueryMatcher { protected RawScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker columns, - boolean hasNullColumn, long oldestUnexpiredTS, long now) { + boolean hasNullColumn, long oldestUnexpiredTS, long now) { super(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS, now); } @@ -61,11 +60,11 @@ public abstract class RawScanQueryMatcher extends UserScanQueryMatcher { } public static RawScanQueryMatcher create(Scan scan, ScanInfo scanInfo, ColumnTracker columns, - boolean hasNullColumn, long oldestUnexpiredTS, long now) { + boolean hasNullColumn, long oldestUnexpiredTS, long now) { if (scan.isReversed()) { if (scan.includeStopRow()) { return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS, - now) { + now) { @Override protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { @@ -74,7 +73,7 @@ public abstract class RawScanQueryMatcher extends UserScanQueryMatcher { }; } else { return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS, - now) { + now) { @Override protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { @@ -85,7 +84,7 @@ public abstract class RawScanQueryMatcher extends UserScanQueryMatcher { } else { if (scan.includeStopRow()) { return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS, - now) { + now) { @Override protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { @@ -94,7 +93,7 @@ public abstract class RawScanQueryMatcher extends UserScanQueryMatcher { }; } else { return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS, - now) { + now) { @Override protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java index 26da698f477..3557973aae5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,20 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; import java.util.SortedSet; import java.util.TreeSet; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * This class is responsible for the tracking and enforcement of Deletes during the course of a Scan @@ -98,8 +96,7 @@ public class ScanDeleteTracker implements DeleteTracker { /** * Check if the specified Cell buffer has been deleted by a previously seen delete. - * @param cell - current cell to check if deleted by a previously seen delete - * @return deleteResult + * @param cell - current cell to check if deleted by a previously seen delete n */ @Override public DeleteResult isDeleted(Cell cell) { @@ -133,12 +130,12 @@ public class ScanDeleteTracker implements DeleteTracker { deleteCell = null; } else { throw new IllegalStateException("isDelete failed: deleteBuffer=" - + Bytes.toStringBinary(deleteCell.getQualifierArray(), - deleteCell.getQualifierOffset(), deleteCell.getQualifierLength()) - + ", qualifier=" - + Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()) - + ", timestamp=" + timestamp + ", comparison result: " + ret); + + Bytes.toStringBinary(deleteCell.getQualifierArray(), deleteCell.getQualifierOffset(), + deleteCell.getQualifierLength()) + + ", qualifier=" + + Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()) + + ", timestamp=" + timestamp + ", comparison result: " + ret); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java index 96d3bab6149..77f4dcee7c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; import java.util.Iterator; import java.util.NavigableSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; @@ -130,7 +129,7 @@ public abstract class ScanQueryMatcher implements ShipperListener { protected Cell currentRow; protected ScanQueryMatcher(Cell startKey, ScanInfo scanInfo, ColumnTracker columns, - long oldestUnexpiredTS, long now) { + long oldestUnexpiredTS, long now) { this.rowComparator = scanInfo.getComparator(); this.startKey = startKey; this.oldestUnexpiredTS = oldestUnexpiredTS; @@ -139,12 +138,10 @@ public abstract class ScanQueryMatcher implements ShipperListener { } /** - * @param cell - * @param oldestTimestamp - * @return true if the cell is expired + * nn * @return true if the cell is expired */ private static boolean isCellTTLExpired(final Cell cell, final long oldestTimestamp, - final long now) { + final long now) { // Look for a TTL tag first. Use it instead of the family setting if // found. If a cell has multiple TTLs, resolve the conflict by using the // first tag encountered. @@ -225,7 +222,6 @@ public abstract class ScanQueryMatcher implements ShipperListener { } } - /** * Determines if the caller should do one of several things: *

            @@ -238,7 +234,7 @@ public abstract class ScanQueryMatcher implements ShipperListener { * @param cell KeyValue to check * @return The match code instance. * @throws IOException in case there is an internal consistency problem caused by a data - * corruption. + * corruption. */ public abstract MatchCode match(Cell cell) throws IOException; @@ -271,8 +267,7 @@ public abstract class ScanQueryMatcher implements ShipperListener { protected abstract void reset(); /** - * Set the row when there is change in row - * @param currentRow + * Set the row when there is change in row n */ public void setToNewRow(Cell currentRow) { this.currentRow = currentRow; @@ -300,7 +295,8 @@ public abstract class ScanQueryMatcher implements ShipperListener { if (nextKey != cell) { return nextKey; } - // The cell is at the end of row/family/qualifier, so it is impossible to find any DeleteFamily cells. + // The cell is at the end of row/family/qualifier, so it is impossible to find any + // DeleteFamily cells. // Let us seek to next column. } ColumnCount nextColumn = columns.getColumnHint(); @@ -318,8 +314,8 @@ public abstract class ScanQueryMatcher implements ShipperListener { * @return result of the compare between the indexed key and the key portion of the passed cell */ public int compareKeyForNextRow(Cell nextIndexed, Cell currentCell) { - return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, null, 0, - 0, HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); + return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, + null, 0, 0, HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); } /** @@ -330,8 +326,8 @@ public abstract class ScanQueryMatcher implements ShipperListener { public int compareKeyForNextColumn(Cell nextIndexed, Cell currentCell) { ColumnCount nextColumn = columns.getColumnHint(); if (nextColumn == null) { - return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, null, - 0, 0, HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); + return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, + null, 0, 0, HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); } else { return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, currentCell.getFamilyOffset(), currentCell.getFamilyLength(), nextColumn.getBuffer(), @@ -365,8 +361,8 @@ public abstract class ScanQueryMatcher implements ShipperListener { } protected static Pair getTrackers(RegionCoprocessorHost host, - NavigableSet columns, ScanInfo scanInfo, long oldestUnexpiredTS, Scan userScan) - throws IOException { + NavigableSet columns, ScanInfo scanInfo, long oldestUnexpiredTS, Scan userScan) + throws IOException { int resultMaxVersion = scanInfo.getMaxVersions(); int maxVersionToCheck = resultMaxVersion; if (userScan != null) { @@ -382,8 +378,7 @@ public abstract class ScanQueryMatcher implements ShipperListener { DeleteTracker deleteTracker; if (scanInfo.isNewVersionBehavior() && (userScan == null || !userScan.isRaw())) { deleteTracker = new NewVersionBehaviorTracker(columns, scanInfo.getComparator(), - scanInfo.getMinVersions(), scanInfo.getMaxVersions(), resultMaxVersion, - oldestUnexpiredTS); + scanInfo.getMinVersions(), scanInfo.getMaxVersions(), resultMaxVersion, oldestUnexpiredTS); } else { deleteTracker = new ScanDeleteTracker(scanInfo.getComparator()); } @@ -391,8 +386,8 @@ public abstract class ScanQueryMatcher implements ShipperListener { deleteTracker = host.postInstantiateDeleteTracker(deleteTracker); if (deleteTracker instanceof VisibilityScanDeleteTracker && scanInfo.isNewVersionBehavior()) { deleteTracker = new VisibilityNewVersionBehaivorTracker(columns, scanInfo.getComparator(), - scanInfo.getMinVersions(), scanInfo.getMaxVersions(), resultMaxVersion, - oldestUnexpiredTS); + scanInfo.getMinVersions(), scanInfo.getMaxVersions(), resultMaxVersion, + oldestUnexpiredTS); } } @@ -402,7 +397,7 @@ public abstract class ScanQueryMatcher implements ShipperListener { columnTracker = (NewVersionBehaviorTracker) deleteTracker; } else if (columns == null || columns.size() == 0) { columnTracker = new ScanWildcardColumnTracker(scanInfo.getMinVersions(), maxVersionToCheck, - oldestUnexpiredTS, scanInfo.getComparator()); + oldestUnexpiredTS, scanInfo.getComparator()); } else { columnTracker = new ExplicitColumnTracker(columns, scanInfo.getMinVersions(), maxVersionToCheck, oldestUnexpiredTS); @@ -412,7 +407,7 @@ public abstract class ScanQueryMatcher implements ShipperListener { // Used only for testing purposes static MatchCode checkColumn(ColumnTracker columnTracker, byte[] bytes, int offset, int length, - long ttl, byte type, boolean ignoreCount) throws IOException { + long ttl, byte type, boolean ignoreCount) throws IOException { KeyValue kv = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY, 0, 0, HConstants.EMPTY_BYTE_ARRAY, 0, 0, bytes, offset, length); MatchCode matchCode = columnTracker.checkColumn(kv, type); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java index f2ad1e6b87c..4d84e5a0fdf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,20 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * Keeps track of the columns for a scan if they are not explicitly specified @@ -50,15 +48,16 @@ public class ScanWildcardColumnTracker implements ColumnTracker { private long oldestStamp; private final CellComparator comparator; + /** * Return maxVersions of every row. - * @param minVersion Minimum number of versions to keep - * @param maxVersion Maximum number of versions to return + * @param minVersion Minimum number of versions to keep + * @param maxVersion Maximum number of versions to return * @param oldestUnexpiredTS oldest timestamp that has not expired according to the TTL. - * @param comparator used to compare the qualifier of cell + * @param comparator used to compare the qualifier of cell */ - public ScanWildcardColumnTracker(int minVersion, int maxVersion, - long oldestUnexpiredTS, CellComparator comparator) { + public ScanWildcardColumnTracker(int minVersion, int maxVersion, long oldestUnexpiredTS, + CellComparator comparator) { this.maxVersions = maxVersion; this.minVersions = minVersion; this.oldestStamp = oldestUnexpiredTS; @@ -79,7 +78,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker { */ @Override public ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, - boolean ignoreCount) throws IOException { + boolean ignoreCount) throws IOException { if (columnCell == null) { // first iteration. resetCell(cell); @@ -119,8 +118,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker { // was incorrectly stored into the store for this one. Throw an exception, // because this might lead to data corruption. throw new IOException("ScanWildcardColumnTracker.checkColumn ran into a column actually " - + "smaller than the previous column: " - + Bytes.toStringBinary(CellUtil.cloneQualifier(cell))); + + "smaller than the previous column: " + Bytes.toStringBinary(CellUtil.cloneQualifier(cell))); } private void resetCell(Cell columnCell) { @@ -186,8 +184,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker { } /** - * We can never know a-priori if we are done, so always return false. - * @return false + * We can never know a-priori if we are done, so always return false. n */ @Override public boolean done() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java index 763735e1078..370164c8a0d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for stripe compaction if range drop deletes is used. @@ -35,14 +34,16 @@ public class StripeCompactionScanQueryMatcher extends DropDeletesCompactionScanQ private final byte[] dropDeletesToRow; private enum DropDeletesInOutput { - BEFORE, IN, AFTER + BEFORE, + IN, + AFTER } private DropDeletesInOutput dropDeletesInOutput = DropDeletesInOutput.BEFORE; public StripeCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, - ColumnTracker columns, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, - long now, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) { + ColumnTracker columns, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, + long now, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) { super(scanInfo, deletes, columns, readPointToUse, earliestPutTs, oldestUnexpiredTS, now); this.dropDeletesFromRow = dropDeletesFromRow; this.dropDeletesToRow = dropDeletesToRow; @@ -83,13 +84,14 @@ public class StripeCompactionScanQueryMatcher extends DropDeletesCompactionScanQ } private boolean entered() { - return dropDeletesFromRow.length == 0 || rowComparator.compareRows(currentRow, - dropDeletesFromRow, 0, dropDeletesFromRow.length) >= 0; + return dropDeletesFromRow.length == 0 + || rowComparator.compareRows(currentRow, dropDeletesFromRow, 0, dropDeletesFromRow.length) + >= 0; } private boolean left() { return dropDeletesToRow.length > 0 - && rowComparator.compareRows(currentRow, dropDeletesToRow, 0, dropDeletesToRow.length) >= 0; + && rowComparator.compareRows(currentRow, dropDeletesToRow, 0, dropDeletesToRow.length) >= 0; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java index cc994466b33..6c3d002b092 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,12 +19,10 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; import java.util.NavigableSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.Filter.ReturnCode; @@ -32,6 +30,7 @@ import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for user scan. @@ -69,14 +68,14 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher { } protected UserScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker columns, - boolean hasNullColumn, long oldestUnexpiredTS, long now) { + boolean hasNullColumn, long oldestUnexpiredTS, long now) { super(createStartKey(scan, scanInfo), scanInfo, columns, oldestUnexpiredTS, now); this.hasNullColumn = hasNullColumn; this.filter = scan.getFilter(); if (this.filter != null) { - this.versionsAfterFilter = - scan.isRaw() ? scan.getMaxVersions() : Math.min(scan.getMaxVersions(), - scanInfo.getMaxVersions()); + this.versionsAfterFilter = scan.isRaw() + ? scan.getMaxVersions() + : Math.min(scan.getMaxVersions(), scanInfo.getMaxVersions()); } else { this.versionsAfterFilter = 0; } @@ -122,7 +121,7 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher { } protected final MatchCode matchColumn(Cell cell, long timestamp, byte typeByte) - throws IOException { + throws IOException { int tsCmp = tr.compare(timestamp); if (tsCmp > 0) { return MatchCode.SKIP; @@ -148,12 +147,13 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher { default: // It means it is INCLUDE, INCLUDE_AND_SEEK_NEXT_COL or INCLUDE_AND_SEEK_NEXT_ROW. assert matchCode == MatchCode.INCLUDE || matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_COL - || matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW; + || matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW; break; } - return filter == null ? matchCode : mergeFilterResponse(cell, matchCode, - filter.filterCell(cell)); + return filter == null + ? matchCode + : mergeFilterResponse(cell, matchCode, filter.filterCell(cell)); } /** @@ -188,7 +188,7 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher { * */ private final MatchCode mergeFilterResponse(Cell cell, MatchCode matchCode, - ReturnCode filterResponse) { + ReturnCode filterResponse) { switch (filterResponse) { case SKIP: if (matchCode == MatchCode.INCLUDE) { @@ -226,7 +226,7 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher { // It means it is INCLUDE, INCLUDE_AND_SEEK_NEXT_COL or INCLUDE_AND_SEEK_NEXT_ROW. assert matchCode == MatchCode.INCLUDE || matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_COL - || matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW; + || matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW; // We need to make sure that the number of cells returned will not exceed max version in scan // when the match code is INCLUDE* case. @@ -276,12 +276,12 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher { } public static UserScanQueryMatcher create(Scan scan, ScanInfo scanInfo, - NavigableSet columns, long oldestUnexpiredTS, long now, - RegionCoprocessorHost regionCoprocessorHost) throws IOException { + NavigableSet columns, long oldestUnexpiredTS, long now, + RegionCoprocessorHost regionCoprocessorHost) throws IOException { boolean hasNullColumn = - !(columns != null && columns.size() != 0 && columns.first().length != 0); - Pair trackers = getTrackers(regionCoprocessorHost, columns, - scanInfo, oldestUnexpiredTS, scan); + !(columns != null && columns.size() != 0 && columns.first().length != 0); + Pair trackers = + getTrackers(regionCoprocessorHost, columns, scanInfo, oldestUnexpiredTS, scan); DeleteTracker deleteTracker = trackers.getFirst(); ColumnTracker columnTracker = trackers.getSecond(); if (scan.isRaw()) { @@ -289,7 +289,7 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher { oldestUnexpiredTS, now); } else { return NormalUserScanQueryMatcher.create(scan, scanInfo, columnTracker, deleteTracker, - hasNullColumn, oldestUnexpiredTS, now); + hasNullColumn, oldestUnexpiredTS, now); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java index a20a001e27c..c9fd11d0680 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +20,6 @@ package org.apache.hadoop.hbase.regionserver.snapshot; import java.io.IOException; import java.util.List; import java.util.concurrent.Callable; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; @@ -34,15 +29,20 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult; import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager.SnapshotSubprocedurePool; -import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; /** - * This online snapshot implementation uses the distributed procedure framework to force a - * store flush and then records the hfiles. Its enter stage does nothing. Its leave stage then - * flushes the memstore, builds the region server's snapshot manifest from its hfiles list, and - * copies .regioninfos into the snapshot working directory. At the master side, there is an atomic - * rename of the working dir into the proper snapshot directory. + * This online snapshot implementation uses the distributed procedure framework to force a store + * flush and then records the hfiles. Its enter stage does nothing. Its leave stage then flushes the + * memstore, builds the region server's snapshot manifest from its hfiles list, and copies + * .regioninfos into the snapshot working directory. At the master side, there is an atomic rename + * of the working dir into the proper snapshot directory. */ @InterfaceAudience.Private @InterfaceStability.Unstable @@ -57,10 +57,9 @@ public class FlushSnapshotSubprocedure extends Subprocedure { // the maximum number of attempts we flush final static int MAX_RETRIES = 3; - public FlushSnapshotSubprocedure(ProcedureMember member, - ForeignExceptionDispatcher errorListener, long wakeFrequency, long timeout, - List regions, SnapshotDescription snapshot, - SnapshotSubprocedurePool taskManager) { + public FlushSnapshotSubprocedure(ProcedureMember member, ForeignExceptionDispatcher errorListener, + long wakeFrequency, long timeout, List regions, SnapshotDescription snapshot, + SnapshotSubprocedurePool taskManager) { super(member, snapshot.getName(), errorListener, wakeFrequency, timeout); this.snapshot = snapshot; @@ -72,7 +71,7 @@ public class FlushSnapshotSubprocedure extends Subprocedure { } /** - * Callable for adding files to snapshot manifest working dir. Ready for multithreading. + * Callable for adding files to snapshot manifest working dir. Ready for multithreading. */ public static class RegionSnapshotTask implements Callable { private HRegion region; @@ -80,8 +79,8 @@ public class FlushSnapshotSubprocedure extends Subprocedure { private ForeignExceptionDispatcher monitor; private SnapshotDescription snapshotDesc; - public RegionSnapshotTask(HRegion region, SnapshotDescription snapshotDesc, - boolean skipFlush, ForeignExceptionDispatcher monitor) { + public RegionSnapshotTask(HRegion region, SnapshotDescription snapshotDesc, boolean skipFlush, + ForeignExceptionDispatcher monitor) { this.region = region; this.skipFlush = skipFlush; this.monitor = monitor; @@ -91,21 +90,21 @@ public class FlushSnapshotSubprocedure extends Subprocedure { @Override public Void call() throws Exception { // Taking the region read lock prevents the individual region from being closed while a - // snapshot is in progress. This is helpful but not sufficient for preventing races with - // snapshots that involve multiple regions and regionservers. It is still possible to have + // snapshot is in progress. This is helpful but not sufficient for preventing races with + // snapshots that involve multiple regions and regionservers. It is still possible to have // an interleaving such that globally regions are missing, so we still need the verification // step. LOG.debug("Starting snapshot operation on " + region); region.startRegionOperation(Operation.SNAPSHOT); try { if (skipFlush) { - /* - * This is to take an online-snapshot without force a coordinated flush to prevent pause - * The snapshot type is defined inside the snapshot description. FlushSnapshotSubprocedure - * should be renamed to distributedSnapshotSubprocedure, and the flush() behavior can be - * turned on/off based on the flush type. - * To minimized the code change, class name is not changed. - */ + /* + * This is to take an online-snapshot without force a coordinated flush to prevent pause + * The snapshot type is defined inside the snapshot description. FlushSnapshotSubprocedure + * should be renamed to distributedSnapshotSubprocedure, and the flush() behavior can be + * turned on/off based on the flush type. To minimized the code change, class name is not + * changed. + */ LOG.debug("take snapshot without flush memstore first"); } else { LOG.debug("Flush Snapshotting region " + region.toString() + " started..."); @@ -155,8 +154,8 @@ public class FlushSnapshotSubprocedure extends Subprocedure { // assert that the taskManager is empty. if (taskManager.hasTasks()) { - throw new IllegalStateException("Attempting to take snapshot " - + ClientSnapshotDescriptionUtils.toString(snapshot) + throw new IllegalStateException( + "Attempting to take snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot) + " but we currently have outstanding tasks"); } @@ -200,7 +199,7 @@ public class FlushSnapshotSubprocedure extends Subprocedure { @Override public void cleanup(Exception e) { LOG.info("Aborting all online FLUSH snapshot subprocedure task threads for '" - + snapshot.getName() + "' due to error", e); + + snapshot.getName() + "' due to error", e); try { taskManager.cancelTasks(); } catch (InterruptedException e1) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index f2df1a0e0a8..2cb5b7e6f47 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; @@ -63,11 +62,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.Snapshot /** * This manager class handles the work dealing with snapshots for a {@link HRegionServer}. *

            - * This provides the mechanism necessary to kick off a online snapshot specific - * {@link Subprocedure} that is responsible for the regions being served by this region server. - * If any failures occur with the subprocedure, the RegionSeverSnapshotManager's subprocedure - * handler, {@link ProcedureMember}, notifies the master's ProcedureCoordinator to abort all - * others. + * This provides the mechanism necessary to kick off a online snapshot specific {@link Subprocedure} + * that is responsible for the regions being served by this region server. If any failures occur + * with the subprocedure, the RegionSeverSnapshotManager's subprocedure handler, + * {@link ProcedureMember}, notifies the master's ProcedureCoordinator to abort all others. *

            * On startup, requires {@link #start()} to be called. *

            @@ -79,7 +77,8 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { private static final Logger LOG = LoggerFactory.getLogger(RegionServerSnapshotManager.class); /** Maximum number of snapshot region tasks that can run concurrently */ - private static final String CONCURENT_SNAPSHOT_TASKS_KEY = "hbase.snapshot.region.concurrentTasks"; + private static final String CONCURENT_SNAPSHOT_TASKS_KEY = + "hbase.snapshot.region.concurrentTasks"; private static final int DEFAULT_CONCURRENT_SNAPSHOT_TASKS = 3; /** Conf key for number of request threads to start snapshots on regionservers */ @@ -92,8 +91,9 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { /** Keep threads alive in request pool for max of 300 seconds */ public static final long SNAPSHOT_TIMEOUT_MILLIS_DEFAULT = 5 * 60000; - /** Conf key for millis between checks to see if snapshot completed or if there are errors*/ - public static final String SNAPSHOT_REQUEST_WAKE_MILLIS_KEY = "hbase.snapshot.region.wakefrequency"; + /** Conf key for millis between checks to see if snapshot completed or if there are errors */ + public static final String SNAPSHOT_REQUEST_WAKE_MILLIS_KEY = + "hbase.snapshot.region.wakefrequency"; /** Default amount of time to check for errors while regions finish snapshotting */ private static final long SNAPSHOT_REQUEST_WAKE_MILLIS_DEFAULT = 500; @@ -103,19 +103,20 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { /** * Exposed for testing. - * @param conf HBase configuration. - * @param parent parent running the snapshot handler - * @param memberRpc use specified memberRpc instance + * @param conf HBase configuration. + * @param parent parent running the snapshot handler + * @param memberRpc use specified memberRpc instance * @param procMember use specified ProcedureMember */ - RegionServerSnapshotManager(Configuration conf, HRegionServer parent, - ProcedureMemberRpcs memberRpc, ProcedureMember procMember) { + RegionServerSnapshotManager(Configuration conf, HRegionServer parent, + ProcedureMemberRpcs memberRpc, ProcedureMember procMember) { this.rss = parent; this.memberRpcs = memberRpc; this.member = procMember; } - public RegionServerSnapshotManager() {} + public RegionServerSnapshotManager() { + } /** * Start accepting snapshot requests. @@ -128,8 +129,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { /** * Close this and all running snapshot tasks - * @param force forcefully stop all running tasks - * @throws IOException + * @param force forcefully stop all running tasks n */ @Override public void stop(boolean force) throws IOException { @@ -145,20 +145,16 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { /** * If in a running state, creates the specified subprocedure for handling an online snapshot. - * - * Because this gets the local list of regions to snapshot and not the set the master had, - * there is a possibility of a race where regions may be missed. This detected by the master in - * the snapshot verification step. - * - * @param snapshot - * @return Subprocedure to submit to the ProcedureMember. + * Because this gets the local list of regions to snapshot and not the set the master had, there + * is a possibility of a race where regions may be missed. This detected by the master in the + * snapshot verification step. n * @return Subprocedure to submit to the ProcedureMember. */ public Subprocedure buildSubprocedure(SnapshotDescription snapshot) { // don't run a snapshot if the parent is stop(ping) if (rss.isStopping() || rss.isStopped()) { - throw new IllegalStateException("Can't start snapshot on RS: " + rss.getServerName() - + ", because stopping/stopped!"); + throw new IllegalStateException( + "Can't start snapshot on RS: " + rss.getServerName() + ", because stopping/stopped!"); } // check to see if this server is hosting any regions for the snapshots @@ -168,64 +164,57 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { involvedRegions = getRegionsToSnapshot(snapshot); } catch (IOException e1) { throw new IllegalStateException("Failed to figure out if we should handle a snapshot - " - + "something has gone awry with the online regions.", e1); + + "something has gone awry with the online regions.", e1); } - // We need to run the subprocedure even if we have no relevant regions. The coordinator + // We need to run the subprocedure even if we have no relevant regions. The coordinator // expects participation in the procedure and without sending message the snapshot attempt // will hang and fail. LOG.debug("Launching subprocedure for snapshot " + snapshot.getName() + " from table " - + snapshot.getTable() + " type " + snapshot.getType()); + + snapshot.getTable() + " type " + snapshot.getType()); ForeignExceptionDispatcher exnDispatcher = new ForeignExceptionDispatcher(snapshot.getName()); Configuration conf = rss.getConfiguration(); - long timeoutMillis = conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, - SNAPSHOT_TIMEOUT_MILLIS_DEFAULT); - long wakeMillis = conf.getLong(SNAPSHOT_REQUEST_WAKE_MILLIS_KEY, - SNAPSHOT_REQUEST_WAKE_MILLIS_DEFAULT); + long timeoutMillis = conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, SNAPSHOT_TIMEOUT_MILLIS_DEFAULT); + long wakeMillis = + conf.getLong(SNAPSHOT_REQUEST_WAKE_MILLIS_KEY, SNAPSHOT_REQUEST_WAKE_MILLIS_DEFAULT); switch (snapshot.getType()) { - case FLUSH: - SnapshotSubprocedurePool taskManager = - new SnapshotSubprocedurePool(rss.getServerName().toString(), conf, rss); - return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, - timeoutMillis, involvedRegions, snapshot, taskManager); - case SKIPFLUSH: + case FLUSH: + SnapshotSubprocedurePool taskManager = + new SnapshotSubprocedurePool(rss.getServerName().toString(), conf, rss); + return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, timeoutMillis, + involvedRegions, snapshot, taskManager); + case SKIPFLUSH: /* - * This is to take an online-snapshot without force a coordinated flush to prevent pause - * The snapshot type is defined inside the snapshot description. FlushSnapshotSubprocedure + * This is to take an online-snapshot without force a coordinated flush to prevent pause The + * snapshot type is defined inside the snapshot description. FlushSnapshotSubprocedure * should be renamed to distributedSnapshotSubprocedure, and the flush() behavior can be - * turned on/off based on the flush type. - * To minimized the code change, class name is not changed. + * turned on/off based on the flush type. To minimized the code change, class name is not + * changed. */ SnapshotSubprocedurePool taskManager2 = - new SnapshotSubprocedurePool(rss.getServerName().toString(), conf, rss); - return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, - timeoutMillis, involvedRegions, snapshot, taskManager2); + new SnapshotSubprocedurePool(rss.getServerName().toString(), conf, rss); + return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, timeoutMillis, + involvedRegions, snapshot, taskManager2); - default: - throw new UnsupportedOperationException("Unrecognized snapshot type:" + snapshot.getType()); + default: + throw new UnsupportedOperationException("Unrecognized snapshot type:" + snapshot.getType()); } } /** - * Determine if the snapshot should be handled on this server - * - * NOTE: This is racy -- the master expects a list of regionservers. - * This means if a region moves somewhere between the calls we'll miss some regions. - * For example, a region move during a snapshot could result in a region to be skipped or done - * twice. This is manageable because the {@link MasterSnapshotVerifier} will double check the - * region lists after the online portion of the snapshot completes and will explicitly fail the - * snapshot. - * - * @param snapshot - * @return the list of online regions. Empty list is returned if no regions are responsible for - * the given snapshot. - * @throws IOException + * Determine if the snapshot should be handled on this server NOTE: This is racy -- the master + * expects a list of regionservers. This means if a region moves somewhere between the calls we'll + * miss some regions. For example, a region move during a snapshot could result in a region to be + * skipped or done twice. This is manageable because the {@link MasterSnapshotVerifier} will + * double check the region lists after the online portion of the snapshot completes and will + * explicitly fail the snapshot. n * @return the list of online regions. Empty list is returned if + * no regions are responsible for the given snapshot. n */ private List getRegionsToSnapshot(SnapshotDescription snapshot) throws IOException { - List onlineRegions = (List) rss - .getRegions(TableName.valueOf(snapshot.getTable())); + List onlineRegions = + (List) rss.getRegions(TableName.valueOf(snapshot.getTable())); Iterator iterator = onlineRegions.iterator(); // remove the non-default regions while (iterator.hasNext()) { @@ -257,16 +246,13 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { /** * We use the SnapshotSubprocedurePool, a class specific thread pool instead of - * {@link org.apache.hadoop.hbase.executor.ExecutorService}. - * - * It uses a {@link java.util.concurrent.ExecutorCompletionService} which provides queuing of - * completed tasks which lets us efficiently cancel pending tasks upon the earliest operation - * failures. - * + * {@link org.apache.hadoop.hbase.executor.ExecutorService}. It uses a + * {@link java.util.concurrent.ExecutorCompletionService} which provides queuing of completed + * tasks which lets us efficiently cancel pending tasks upon the earliest operation failures. * HBase's ExecutorService (different from {@link java.util.concurrent.ExecutorService}) isn't - * really built for coordinated tasks where multiple threads as part of one larger task. In - * RS's the HBase Executor services are only used for open and close and not other threadpooled - * operations such as compactions and replication sinks. + * really built for coordinated tasks where multiple threads as part of one larger task. In RS's + * the HBase Executor services are only used for open and close and not other threadpooled + * operations such as compactions and replication sinks. */ static class SnapshotSubprocedurePool { private final Abortable abortable; @@ -279,8 +265,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { SnapshotSubprocedurePool(String name, Configuration conf, Abortable abortable) { this.abortable = abortable; // configure the executor service - long keepAlive = conf.getLong( - RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_KEY, + long keepAlive = conf.getLong(RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_KEY, RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_DEFAULT); int threads = conf.getInt(CONCURENT_SNAPSHOT_TASKS_KEY, DEFAULT_CONCURRENT_SNAPSHOT_TASKS); this.name = name; @@ -295,10 +280,9 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { } /** - * Submit a task to the pool. - * - * NOTE: all must be submitted before you can safely {@link #waitForOutstandingTasks()}. This - * version does not support issuing tasks from multiple concurrent table snapshots requests. + * Submit a task to the pool. NOTE: all must be submitted before you can safely + * {@link #waitForOutstandingTasks()}. This version does not support issuing tasks from multiple + * concurrent table snapshots requests. */ void submitTask(final Callable task) { Future f = this.taskPool.submit(task); @@ -308,10 +292,8 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { /** * Wait for all of the currently outstanding tasks submitted via {@link #submitTask(Callable)}. * This *must* be called after all tasks are submitted via submitTask. - * - * @return true on success, false otherwise - * @throws InterruptedException - * @throws SnapshotCreationException if the snapshot failed while we were waiting + * @return true on success, false otherwise n * @throws + * SnapshotCreationException if the snapshot failed while we were waiting */ boolean waitForOutstandingTasks() throws ForeignException, InterruptedException { LOG.debug("Waiting for local region snapshots to finish."); @@ -325,9 +307,9 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { if (!futures.remove(f)) { LOG.warn("unexpected future" + f); } - LOG.debug("Completed " + (i+1) + "/" + sz + " local region snapshots."); + LOG.debug("Completed " + (i + 1) + "/" + sz + " local region snapshots."); } - LOG.debug("Completed " + sz + " local region snapshots."); + LOG.debug("Completed " + sz + " local region snapshots."); return true; } catch (InterruptedException e) { LOG.warn("Got InterruptedException in SnapshotSubprocedurePool", e); @@ -340,7 +322,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { Throwable cause = e.getCause(); if (cause instanceof ForeignException) { LOG.warn("Rethrowing ForeignException from SnapshotSubprocedurePool", e); - throw (ForeignException)e.getCause(); + throw (ForeignException) e.getCause(); } else if (cause instanceof DroppedSnapshotException) { // we have to abort the region server according to contract of flush abortable.abort("Received DroppedSnapshotException, aborting", cause); @@ -354,28 +336,28 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { } /** - * This attempts to cancel out all pending and in progress tasks (interruptions issues) - * @throws InterruptedException + * This attempts to cancel out all pending and in progress tasks (interruptions issues) n */ void cancelTasks() throws InterruptedException { Collection> tasks = futures; LOG.debug("cancelling " + tasks.size() + " tasks for snapshot " + name); - for (Future f: tasks) { - // TODO Ideally we'd interrupt hbase threads when we cancel. However it seems that there + for (Future f : tasks) { + // TODO Ideally we'd interrupt hbase threads when we cancel. However it seems that there // are places in the HBase code where row/region locks are taken and not released in a - // finally block. Thus we cancel without interrupting. Cancellations will be slower to + // finally block. Thus we cancel without interrupting. Cancellations will be slower to // complete but we won't suffer from unreleased locks due to poor code discipline. f.cancel(false); } // evict remaining tasks and futures from taskPool. futures.clear(); - while (taskPool.poll() != null) {} + while (taskPool.poll() != null) { + } stop(); } /** - * Abruptly shutdown the thread pool. Call when exiting a region server. + * Abruptly shutdown the thread pool. Call when exiting a region server. */ void stop() { if (this.stopped) return; @@ -394,8 +376,8 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { public void initialize(RegionServerServices rss) throws KeeperException { this.rss = rss; ZKWatcher zkw = rss.getZooKeeper(); - this.memberRpcs = new ZKProcedureMemberRpcs(zkw, - SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION); + this.memberRpcs = + new ZKProcedureMemberRpcs(zkw, SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION); // read in the snapshot request configuration properties Configuration conf = rss.getConfiguration(); @@ -403,8 +385,8 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { int opThreads = conf.getInt(SNAPSHOT_REQUEST_THREADS_KEY, SNAPSHOT_REQUEST_THREADS_DEFAULT); // create the actual snapshot procedure member - ThreadPoolExecutor pool = ProcedureMember.defaultPool(rss.getServerName().toString(), - opThreads, keepAlive); + ThreadPoolExecutor pool = + ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive); this.member = new ProcedureMember(memberRpcs, pool, new SnapshotSubprocedureBuilder()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java index b1e298dbbe2..161af02c627 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.Collection; import java.util.Collections; import java.util.List; import org.apache.hadoop.conf.Configuration; - import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java index 8d9b66e53d2..a669d90a0db 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,9 +56,9 @@ class FileBasedStoreFileTracker extends StoreFileTrackerBase { public FileBasedStoreFileTracker(Configuration conf, boolean isPrimaryReplica, StoreContext ctx) { super(conf, isPrimaryReplica, ctx); - //CreateTableProcedure needs to instantiate the configured SFT impl, in order to update table - //descriptors with the SFT impl specific configs. By the time this happens, the table has no - //regions nor stores yet, so it can't create a proper StoreContext. + // CreateTableProcedure needs to instantiate the configured SFT impl, in order to update table + // descriptors with the SFT impl specific configs. By the time this happens, the table has no + // regions nor stores yet, so it can't create a proper StoreContext. if (ctx != null) { backedFile = new StoreFileListFile(ctx); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/InitializeStoreFileTrackerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/InitializeStoreFileTrackerProcedure.java index 5a88f99588b..838b3db95c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/InitializeStoreFileTrackerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/InitializeStoreFileTrackerProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,8 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class InitializeStoreFileTrackerProcedure extends ModifyTableDescriptorProcedure { - public InitializeStoreFileTrackerProcedure(){} + public InitializeStoreFileTrackerProcedure() { + } public InitializeStoreFileTrackerProcedure(MasterProcedureEnv env, TableName tableName) { super(env, tableName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java index 53a474d3bde..7353f0a08d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyColumnFamilyStoreFileTrackerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyColumnFamilyStoreFileTrackerProcedure.java index 1ecfee26e25..0244a09d66a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyColumnFamilyStoreFileTrackerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyColumnFamilyStoreFileTrackerProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyStoreFileTrackerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyStoreFileTrackerProcedure.java index a7d8e703acc..2c3434365d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyStoreFileTrackerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyStoreFileTrackerProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,8 +71,10 @@ public abstract class ModifyStoreFileTrackerProcedure } private void checkDstSFT(String dstSFT) throws DoNotRetryIOException { - if (MigrationStoreFileTracker.class - .isAssignableFrom(StoreFileTrackerFactory.getTrackerClass(dstSFT))) { + if ( + MigrationStoreFileTracker.class + .isAssignableFrom(StoreFileTrackerFactory.getTrackerClass(dstSFT)) + ) { throw new DoNotRetryIOException("Do not need to transfer to " + dstSFT); } } @@ -88,7 +90,9 @@ public abstract class ModifyStoreFileTrackerProcedure } private enum StoreFileTrackerState { - NEED_FINISH_PREVIOUS_MIGRATION_FIRST, NEED_START_MIGRATION, NEED_FINISH_MIGRATION, + NEED_FINISH_PREVIOUS_MIGRATION_FIRST, + NEED_START_MIGRATION, + NEED_FINISH_MIGRATION, ALREADY_FINISHED } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyTableStoreFileTrackerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyTableStoreFileTrackerProcedure.java index 096f38fa36e..ccb153b3b1b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyTableStoreFileTrackerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyTableStoreFileTrackerProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java index 42233e4aa7a..c842337c64c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -87,8 +87,8 @@ class StoreFileListFile { try (FSDataInputStream in = fs.open(path)) { int length = in.readInt(); if (length <= 0 || length > MAX_FILE_SIZE) { - throw new IOException("Invalid file length " + length + - ", either less than 0 or greater then max allowed size " + MAX_FILE_SIZE); + throw new IOException("Invalid file length " + length + + ", either less than 0 or greater then max allowed size " + MAX_FILE_SIZE); } data = new byte[length]; in.readFully(data); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java index aabbe8d8749..b0024b73786 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java index 300b6e9bf36..16a4aa60b44 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTra import java.io.IOException; import java.util.Collection; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -124,12 +123,14 @@ abstract class StoreFileTrackerBase implements StoreFileTracker { // if data blocks are to be cached on write // during compaction, we should forcefully // cache index and bloom blocks as well - if (cacheCompactedBlocksOnWrite && - totalCompactedFilesSize <= cacheConf.getCacheCompactedBlocksOnWriteThreshold()) { + if ( + cacheCompactedBlocksOnWrite + && totalCompactedFilesSize <= cacheConf.getCacheCompactedBlocksOnWriteThreshold() + ) { writerCacheConf.enableCacheOnWrite(); if (!cacheOnWriteLogged) { - LOG.info("For {} , cacheCompactedBlocksOnWrite is true, hence enabled " + - "cacheOnWrite for Data blocks, Index blocks and Bloom filter blocks", this); + LOG.info("For {} , cacheCompactedBlocksOnWrite is true, hence enabled " + + "cacheOnWrite for Data blocks, Index blocks and Bloom filter blocks", this); cacheOnWriteLogged = true; } } else { @@ -137,8 +138,8 @@ abstract class StoreFileTrackerBase implements StoreFileTracker { if (totalCompactedFilesSize > cacheConf.getCacheCompactedBlocksOnWriteThreshold()) { // checking condition once again for logging LOG.debug( - "For {}, setting cacheCompactedBlocksOnWrite as false as total size of compacted " + - "files - {}, is greater than cacheCompactedBlocksOnWriteThreshold - {}", + "For {}, setting cacheCompactedBlocksOnWrite as false as total size of compacted " + + "files - {}, is greater than cacheCompactedBlocksOnWriteThreshold - {}", this, totalCompactedFilesSize, cacheConf.getCacheCompactedBlocksOnWriteThreshold()); } } @@ -147,8 +148,8 @@ abstract class StoreFileTrackerBase implements StoreFileTracker { if (shouldCacheDataOnWrite) { writerCacheConf.enableCacheOnWrite(); if (!cacheOnWriteLogged) { - LOG.info("For {} , cacheDataOnWrite is true, hence enabled cacheOnWrite for " + - "Index blocks and Bloom filter blocks", this); + LOG.info("For {} , cacheDataOnWrite is true, hence enabled cacheOnWrite for " + + "Index blocks and Bloom filter blocks", this); cacheOnWriteLogged = true; } } @@ -165,12 +166,9 @@ abstract class StoreFileTrackerBase implements StoreFileTracker { } StoreFileWriter.Builder builder = new StoreFileWriter.Builder(conf, writerCacheConf, ctx.getRegionFileSystem().getFileSystem()) - .withOutputDir(outputDir) - .withBloomType(ctx.getBloomFilterType()) - .withMaxKeyCount(params.maxKeyCount()) - .withFavoredNodes(ctx.getFavoredNodes()) - .withFileContext(hFileContext) - .withShouldDropCacheBehind(params.shouldDropBehind()) + .withOutputDir(outputDir).withBloomType(ctx.getBloomFilterType()) + .withMaxKeyCount(params.maxKeyCount()).withFavoredNodes(ctx.getFavoredNodes()) + .withFileContext(hFileContext).withShouldDropCacheBehind(params.shouldDropBehind()) .withCompactedFilesSupplier(ctx.getCompactedFilesSupplier()) .withFileStoragePolicy(params.fileStoragePolicy()) .withWriterCreationTracker(params.writerCreationTracker()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java index 85c5ee24f3b..4c7c19eebb9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -57,7 +59,8 @@ public final class StoreFileTrackerFactory { * Maps between configuration names for trackers and implementation classes. */ public enum Trackers { - DEFAULT(DefaultStoreFileTracker.class), FILE(FileBasedStoreFileTracker.class), + DEFAULT(DefaultStoreFileTracker.class), + FILE(FileBasedStoreFileTracker.class), MIGRATION(MigrationStoreFileTracker.class); final Class clazz; @@ -172,13 +175,12 @@ public final class StoreFileTrackerFactory { } public static TableDescriptor updateWithTrackerConfigs(Configuration conf, - TableDescriptor descriptor) { - //CreateTableProcedure needs to instantiate the configured SFT impl, in order to update table - //descriptors with the SFT impl specific configs. By the time this happens, the table has no - //regions nor stores yet, so it can't create a proper StoreContext. + TableDescriptor descriptor) { + // CreateTableProcedure needs to instantiate the configured SFT impl, in order to update table + // descriptors with the SFT impl specific configs. By the time this happens, the table has no + // regions nor stores yet, so it can't create a proper StoreContext. if (StringUtils.isEmpty(descriptor.getValue(TRACKER_IMPL))) { - StoreFileTracker tracker = - StoreFileTrackerFactory.create(conf, true, null); + StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, true, null); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(descriptor); return tracker.updateWithTrackerConfigs(builder).build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerValidationUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerValidationUtils.java index 38040bc4f00..fcddb982147 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerValidationUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerValidationUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,8 +41,8 @@ public final class StoreFileTrackerValidationUtils { Class tracker = StoreFileTrackerFactory.getTrackerClass(mergedConf); if (MigrationStoreFileTracker.class.isAssignableFrom(tracker)) { throw new DoNotRetryIOException( - "Should not use " + Trackers.MIGRATION + " as store file tracker for new family " + - family.getNameAsString() + " of table " + table.getTableName()); + "Should not use " + Trackers.MIGRATION + " as store file tracker for new family " + + family.getNameAsString() + " of table " + table.getTableName()); } } @@ -51,7 +51,7 @@ public final class StoreFileTrackerValidationUtils { *

            * For now, only make sure that we do not use {@link Trackers#MIGRATION} for newly created tables. * @throws IOException when there are check errors, the upper layer should fail the - * {@code CreateTableProcedure}. + * {@code CreateTableProcedure}. */ public static void checkForCreateTable(Configuration conf, TableDescriptor table) throws IOException { @@ -92,7 +92,7 @@ public final class StoreFileTrackerValidationUtils { * *

          * @throws IOException when there are check errors, the upper layer should fail the - * {@code ModifyTableProcedure}. + * {@code ModifyTableProcedure}. */ public static void checkForModifyTable(Configuration conf, TableDescriptor oldTable, TableDescriptor newTable, boolean isTableDisabled) throws IOException { @@ -120,18 +120,18 @@ public final class StoreFileTrackerValidationUtils { Class newSrcTracker = MigrationStoreFileTracker.getSrcTrackerClass(newConf); if (!oldSrcTracker.equals(newSrcTracker)) { - throw new DoNotRetryIOException("The src tracker has been changed from " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldSrcTracker) + " to " + - StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + throw new DoNotRetryIOException("The src tracker has been changed from " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldSrcTracker) + " to " + + StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } Class newDstTracker = MigrationStoreFileTracker.getDstTrackerClass(newConf); if (!oldDstTracker.equals(newDstTracker)) { - throw new DoNotRetryIOException("The dst tracker has been changed from " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldDstTracker) + " to " + - StoreFileTrackerFactory.getStoreFileTrackerName(newDstTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + throw new DoNotRetryIOException("The dst tracker has been changed from " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldDstTracker) + " to " + + StoreFileTrackerFactory.getStoreFileTrackerName(newDstTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } } else { // do not allow changing from MIGRATION to its dst SFT implementation while the table is @@ -140,16 +140,16 @@ public final class StoreFileTrackerValidationUtils { // details. if (isTableDisabled) { throw new TableNotEnabledException( - "Should not change store file tracker implementation from " + - StoreFileTrackerFactory.Trackers.MIGRATION.name() + " while table " + - newTable.getTableName() + " is disabled"); + "Should not change store file tracker implementation from " + + StoreFileTrackerFactory.Trackers.MIGRATION.name() + " while table " + + newTable.getTableName() + " is disabled"); } // we can only change to the dst tracker if (!newTracker.equals(oldDstTracker)) { - throw new DoNotRetryIOException("Should migrate tracker to " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldDstTracker) + " but got " + - StoreFileTrackerFactory.getStoreFileTrackerName(newTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + throw new DoNotRetryIOException("Should migrate tracker to " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldDstTracker) + " but got " + + StoreFileTrackerFactory.getStoreFileTrackerName(newTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } } } else { @@ -158,9 +158,9 @@ public final class StoreFileTrackerValidationUtils { // tracker if (!MigrationStoreFileTracker.class.isAssignableFrom(newTracker)) { throw new DoNotRetryIOException( - "Should change to " + Trackers.MIGRATION + " first when migrating from " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + "Should change to " + Trackers.MIGRATION + " first when migrating from " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } // here we do not check whether the table is disabled, as after changing to MIGRATION, we // still rely on the src SFT implementation to actually load the store files, so there @@ -168,20 +168,20 @@ public final class StoreFileTrackerValidationUtils { Class newSrcTracker = MigrationStoreFileTracker.getSrcTrackerClass(newConf); if (!oldTracker.equals(newSrcTracker)) { - throw new DoNotRetryIOException("Should use src tracker " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " first but got " + - StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + - " when migrating from " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + throw new DoNotRetryIOException("Should use src tracker " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " first but got " + + StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + + " when migrating from " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } Class newDstTracker = MigrationStoreFileTracker.getDstTrackerClass(newConf); // the src and dst tracker should not be the same if (newSrcTracker.equals(newDstTracker)) { - throw new DoNotRetryIOException("The src tracker and dst tracker are both " + - StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + throw new DoNotRetryIOException("The src tracker and dst tracker are both " + + StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } } } @@ -191,9 +191,9 @@ public final class StoreFileTrackerValidationUtils { /** * Makes sure restoring a snapshot does not break the current SFT setup follows * StoreUtils.createStoreConfiguration - * @param currentTableDesc Existing Table's TableDescriptor + * @param currentTableDesc Existing Table's TableDescriptor * @param snapshotTableDesc Snapshot's TableDescriptor - * @param baseConf Current global configuration + * @param baseConf Current global configuration * @throws RestoreSnapshotException if restore would break the current SFT setup */ public static void validatePreRestoreSnapshot(TableDescriptor currentTableDesc, @@ -216,9 +216,9 @@ public final class StoreFileTrackerValidationUtils { // restoration is not possible if there is an SFT mismatch if (currentSFT != snapSFT) { throw new RestoreSnapshotException( - "Restoring Snapshot is not possible because " + " the config for column family " + - cfDesc.getNameAsString() + " has incompatible configuration. Current SFT: " + - currentSFT + " SFT from snapshot: " + snapSFT); + "Restoring Snapshot is not possible because " + " the config for column family " + + cfDesc.getNameAsString() + " has incompatible configuration. Current SFT: " + + currentSFT + " SFT from snapshot: " + snapSFT); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/CompactionThroughputControllerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/CompactionThroughputControllerFactory.java index 45e7267ed26..03c922ce0a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/CompactionThroughputControllerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/CompactionThroughputControllerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,25 +19,25 @@ package org.apache.hadoop.hbase.regionserver.throttle; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.util.ReflectionUtils; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public final class CompactionThroughputControllerFactory { private static final Logger LOG = - LoggerFactory.getLogger(CompactionThroughputControllerFactory.class); + LoggerFactory.getLogger(CompactionThroughputControllerFactory.class); public static final String HBASE_THROUGHPUT_CONTROLLER_KEY = - "hbase.regionserver.throughput.controller"; + "hbase.regionserver.throughput.controller"; private CompactionThroughputControllerFactory() { } - private static final Class - DEFAULT_THROUGHPUT_CONTROLLER_CLASS = PressureAwareCompactionThroughputController.class; + private static final Class DEFAULT_THROUGHPUT_CONTROLLER_CLASS = + PressureAwareCompactionThroughputController.class; // for backward compatibility and may not be supported in the future private static final String DEPRECATED_NAME_OF_PRESSURE_AWARE_THROUGHPUT_CONTROLLER_CLASS = @@ -45,26 +45,24 @@ public final class CompactionThroughputControllerFactory { private static final String DEPRECATED_NAME_OF_NO_LIMIT_THROUGHPUT_CONTROLLER_CLASS = "org.apache.hadoop.hbase.regionserver.compactions.NoLimitThroughputController"; - public static ThroughputController create(RegionServerServices server, - Configuration conf) { + public static ThroughputController create(RegionServerServices server, Configuration conf) { Class clazz = getThroughputControllerClass(conf); ThroughputController controller = ReflectionUtils.newInstance(clazz, conf); controller.setup(server); return controller; } - public static Class getThroughputControllerClass( - Configuration conf) { + public static Class + getThroughputControllerClass(Configuration conf) { String className = - conf.get(HBASE_THROUGHPUT_CONTROLLER_KEY, DEFAULT_THROUGHPUT_CONTROLLER_CLASS.getName()); + conf.get(HBASE_THROUGHPUT_CONTROLLER_KEY, DEFAULT_THROUGHPUT_CONTROLLER_CLASS.getName()); className = resolveDeprecatedClassName(className); try { return Class.forName(className).asSubclass(ThroughputController.class); } catch (Exception e) { - LOG.warn( - "Unable to load configured throughput controller '" + className - + "', load default throughput controller " - + DEFAULT_THROUGHPUT_CONTROLLER_CLASS.getName() + " instead", e); + LOG.warn("Unable to load configured throughput controller '" + className + + "', load default throughput controller " + DEFAULT_THROUGHPUT_CONTROLLER_CLASS.getName() + + " instead", e); return DEFAULT_THROUGHPUT_CONTROLLER_CLASS; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/FlushThroughputControllerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/FlushThroughputControllerFactory.java index fc75c583583..5b998e5b72f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/FlushThroughputControllerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/FlushThroughputControllerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +19,11 @@ package org.apache.hadoop.hbase.regionserver.throttle; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.util.ReflectionUtils; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public final class FlushThroughputControllerFactory { @@ -31,34 +31,32 @@ public final class FlushThroughputControllerFactory { private static final Logger LOG = LoggerFactory.getLogger(FlushThroughputControllerFactory.class); public static final String HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY = - "hbase.regionserver.flush.throughput.controller"; + "hbase.regionserver.flush.throughput.controller"; - private static final Class - DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS = NoLimitThroughputController.class; + private static final Class< + ? extends ThroughputController> DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS = + NoLimitThroughputController.class; private FlushThroughputControllerFactory() { } - public static ThroughputController create(RegionServerServices server, - Configuration conf) { + public static ThroughputController create(RegionServerServices server, Configuration conf) { Class clazz = getThroughputControllerClass(conf); ThroughputController controller = ReflectionUtils.newInstance(clazz, conf); controller.setup(server); return controller; } - public static Class getThroughputControllerClass( - Configuration conf) { - String className = - conf.get(HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY, - DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS.getName()); + public static Class + getThroughputControllerClass(Configuration conf) { + String className = conf.get(HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY, + DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS.getName()); try { return Class.forName(className).asSubclass(ThroughputController.class); } catch (Exception e) { - LOG.warn( - "Unable to load configured flush throughput controller '" + className - + "', load default throughput controller " - + DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS.getName() + " instead", e); + LOG.warn("Unable to load configured flush throughput controller '" + className + + "', load default throughput controller " + + DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS.getName() + " instead", e); return DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/NoLimitThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/NoLimitThroughputController.java index 4b1b2610852..31a424d5e99 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/NoLimitThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/NoLimitThroughputController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.regionserver.throttle; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class NoLimitThroughputController implements ThroughputController { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java index 1c3952ed049..4ccabde07f1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +20,11 @@ package org.apache.hadoop.hbase.regionserver.throttle; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; /** * A throughput controller which uses the follow schema to limit throughput @@ -42,28 +42,28 @@ import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class PressureAwareCompactionThroughputController extends PressureAwareThroughputController { - private final static Logger LOG = LoggerFactory - .getLogger(PressureAwareCompactionThroughputController.class); + private final static Logger LOG = + LoggerFactory.getLogger(PressureAwareCompactionThroughputController.class); public static final String HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND = - "hbase.hstore.compaction.throughput.higher.bound"; + "hbase.hstore.compaction.throughput.higher.bound"; private static final long DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND = - 100L * 1024 * 1024; + 100L * 1024 * 1024; public static final String HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND = - "hbase.hstore.compaction.throughput.lower.bound"; + "hbase.hstore.compaction.throughput.lower.bound"; private static final long DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND = - 50L * 1024 * 1024; + 50L * 1024 * 1024; public static final String HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK = - "hbase.hstore.compaction.throughput.offpeak"; + "hbase.hstore.compaction.throughput.offpeak"; private static final long DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK = Long.MAX_VALUE; public static final String HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD = - "hbase.hstore.compaction.throughput.tune.period"; + "hbase.hstore.compaction.throughput.tune.period"; private static final int DEFAULT_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD = 60 * 1000; @@ -75,8 +75,8 @@ public class PressureAwareCompactionThroughputController extends PressureAwareTh @Override public void setup(final RegionServerServices server) { - server.getChoreService().scheduleChore( - new ScheduledChore("CompactionThroughputTuner", this, tuningPeriod) { + server.getChoreService() + .scheduleChore(new ScheduledChore("CompactionThroughputTuner", this, tuningPeriod) { @Override protected void chore() { @@ -95,17 +95,16 @@ public class PressureAwareCompactionThroughputController extends PressureAwareTh } else { // compactionPressure is between 0.0 and 1.0, we use a simple linear formula to // calculate the throughput limitation. - maxThroughputToSet = - maxThroughputLowerBound + (maxThroughputUpperBound - maxThroughputLowerBound) - * compactionPressure; + maxThroughputToSet = maxThroughputLowerBound + + (maxThroughputUpperBound - maxThroughputLowerBound) * compactionPressure; } if (LOG.isDebugEnabled()) { if (Math.abs(maxThroughputToSet - getMaxThroughput()) < .0000001) { LOG.debug("CompactionPressure is " + compactionPressure + ", tune throughput to " - + throughputDesc(maxThroughputToSet)); + + throughputDesc(maxThroughputToSet)); } else if (LOG.isTraceEnabled()) { LOG.trace("CompactionPressure is " + compactionPressure + ", keep throughput throttling to " - + throughputDesc(maxThroughputToSet)); + + throughputDesc(maxThroughputToSet)); } } this.setMaxThroughput(maxThroughputToSet); @@ -117,33 +116,27 @@ public class PressureAwareCompactionThroughputController extends PressureAwareTh if (conf == null) { return; } - this.maxThroughputUpperBound = - conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND, - DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND); - this.maxThroughputLowerBound = - conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND, - DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND); - this.maxThroughputOffpeak = - conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK, - DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK); + this.maxThroughputUpperBound = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND, + DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND); + this.maxThroughputLowerBound = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND, + DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND); + this.maxThroughputOffpeak = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK, + DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK); this.offPeakHours = OffPeakHours.getInstance(conf); - this.controlPerSize = - conf.getLong(HBASE_HSTORE_COMPACTION_THROUGHPUT_CONTROL_CHECK_INTERVAL, - this.maxThroughputLowerBound); + this.controlPerSize = conf.getLong(HBASE_HSTORE_COMPACTION_THROUGHPUT_CONTROL_CHECK_INTERVAL, + this.maxThroughputLowerBound); this.setMaxThroughput(this.maxThroughputLowerBound); - this.tuningPeriod = - getConf().getInt(HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD, - DEFAULT_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD); + this.tuningPeriod = getConf().getInt(HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD, + DEFAULT_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD); LOG.info("Compaction throughput configurations, higher bound: " - + throughputDesc(maxThroughputUpperBound) + ", lower bound " - + throughputDesc(maxThroughputLowerBound) + ", off peak: " - + throughputDesc(maxThroughputOffpeak) + ", tuning period: " + tuningPeriod + " ms"); + + throughputDesc(maxThroughputUpperBound) + ", lower bound " + + throughputDesc(maxThroughputLowerBound) + ", off peak: " + + throughputDesc(maxThroughputOffpeak) + ", tuning period: " + tuningPeriod + " ms"); } @Override public String toString() { return "DefaultCompactionThroughputController [maxThroughput=" - + throughputDesc(getMaxThroughput()) + ", activeCompactions=" + activeOperations.size() - + "]"; + + throughputDesc(getMaxThroughput()) + ", activeCompactions=" + activeOperations.size() + "]"; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareFlushThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareFlushThroughputController.java index 51e7b42bf9d..4720d26d231 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareFlushThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareFlushThroughputController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +20,11 @@ package org.apache.hadoop.hbase.regionserver.throttle; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; /** * A throughput controller which uses the follow schema to limit throughput @@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; *
        • If flush pressure is greater than or equal to 1.0, no limitation.
        • *
        • In normal case, the max throughput is tuned between * {@value #HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND} and - * {@value #HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND}, using the formula "lower + - * (upper - lower) * flushPressure", where flushPressure is in range [0.0, 1.0)
        • + * {@value #HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND}, using the formula "lower + (upper - + * lower) * flushPressure", where flushPressure is in range [0.0, 1.0) *
        * @see org.apache.hadoop.hbase.regionserver.HRegionServer#getFlushPressure() */ @@ -41,31 +41,31 @@ import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; public class PressureAwareFlushThroughputController extends PressureAwareThroughputController { private static final Logger LOG = - LoggerFactory.getLogger(PressureAwareFlushThroughputController.class); + LoggerFactory.getLogger(PressureAwareFlushThroughputController.class); public static final String HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND = - "hbase.hstore.flush.throughput.upper.bound"; + "hbase.hstore.flush.throughput.upper.bound"; private static final long DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND = - 200L * 1024 * 1024; + 200L * 1024 * 1024; public static final String HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND = - "hbase.hstore.flush.throughput.lower.bound"; + "hbase.hstore.flush.throughput.lower.bound"; private static final long DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND = - 100L * 1024 * 1024; + 100L * 1024 * 1024; public static final String HBASE_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD = - "hbase.hstore.flush.throughput.tune.period"; + "hbase.hstore.flush.throughput.tune.period"; private static final int DEFAULT_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD = 20 * 1000; // check flush throughput every this size public static final String HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL = - "hbase.hstore.flush.throughput.control.check.interval"; + "hbase.hstore.flush.throughput.control.check.interval"; private static final long DEFAULT_HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL = - 10L * 1024 * 1024;// 10MB + 10L * 1024 * 1024;// 10MB @Override public void setup(final RegionServerServices server) { @@ -87,13 +87,12 @@ public class PressureAwareFlushThroughputController extends PressureAwareThrough } else { // flushPressure is between 0.0 and 1.0, we use a simple linear formula to // calculate the throughput limitation. - maxThroughputToSet = - maxThroughputLowerBound + (maxThroughputUpperBound - maxThroughputLowerBound) - * flushPressure; + maxThroughputToSet = maxThroughputLowerBound + + (maxThroughputUpperBound - maxThroughputLowerBound) * flushPressure; } if (LOG.isDebugEnabled()) { LOG.debug("flushPressure is " + flushPressure + ", tune flush throughput to " - + throughputDesc(maxThroughputToSet)); + + throughputDesc(maxThroughputToSet)); } this.setMaxThroughput(maxThroughputToSet); } @@ -104,28 +103,24 @@ public class PressureAwareFlushThroughputController extends PressureAwareThrough if (conf == null) { return; } - this.maxThroughputUpperBound = - conf.getLong(HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND, - DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND); - this.maxThroughputLowerBound = - conf.getLong(HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND, - DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND); + this.maxThroughputUpperBound = conf.getLong(HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND, + DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND); + this.maxThroughputLowerBound = conf.getLong(HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND, + DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND); this.offPeakHours = OffPeakHours.getInstance(conf); - this.controlPerSize = - conf.getLong(HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL, - DEFAULT_HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL); + this.controlPerSize = conf.getLong(HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL, + DEFAULT_HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL); this.setMaxThroughput(this.maxThroughputLowerBound); - this.tuningPeriod = - getConf().getInt(HBASE_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD, - DEFAULT_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD); + this.tuningPeriod = getConf().getInt(HBASE_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD, + DEFAULT_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD); LOG.info("Flush throughput configurations, upper bound: " - + throughputDesc(maxThroughputUpperBound) + ", lower bound " - + throughputDesc(maxThroughputLowerBound) + ", tuning period: " + tuningPeriod + " ms"); + + throughputDesc(maxThroughputUpperBound) + ", lower bound " + + throughputDesc(maxThroughputLowerBound) + ", tuning period: " + tuningPeriod + " ms"); } @Override public String toString() { return "DefaultFlushController [maxThroughput=" + throughputDesc(getMaxThroughput()) - + ", activeFlushNumber=" + activeOperations.size() + "]"; + + ", activeFlushNumber=" + activeOperations.size() + "]"; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java index 306df0b9d5a..c55b507cb51 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,22 +19,21 @@ package org.apache.hadoop.hbase.regionserver.throttle; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; - import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Stoppable; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -public abstract class PressureAwareThroughputController extends Configured implements - ThroughputController, Stoppable { +public abstract class PressureAwareThroughputController extends Configured + implements ThroughputController, Stoppable { private static final Logger LOG = - LoggerFactory.getLogger(PressureAwareThroughputController.class); + LoggerFactory.getLogger(PressureAwareThroughputController.class); /** * Stores the information of one controlled compaction. @@ -77,7 +76,8 @@ public abstract class PressureAwareThroughputController extends Configured imple private volatile double maxThroughput; private volatile double maxThroughputPerOperation; - protected final ConcurrentMap activeOperations = new ConcurrentHashMap<>(); + protected final ConcurrentMap activeOperations = + new ConcurrentHashMap<>(); @Override public abstract void setup(final RegionServerServices server); @@ -123,10 +123,10 @@ public abstract class PressureAwareThroughputController extends Configured imple if (now - operation.lastLogTime > 5L * 1000) { LOG.debug("deltaSize: " + deltaSize + " bytes; elapseTime: " + elapsedTime + " ns"); LOG.debug(opName + " sleep=" + sleepTime + "ms because current throughput is " - + throughputDesc(deltaSize, elapsedTime) + ", max allowed is " - + throughputDesc(maxThroughputPerOperation) + ", already slept " - + operation.numberOfSleeps + " time(s) and total slept time is " - + operation.totalSleepTime + " ms till now."); + + throughputDesc(deltaSize, elapsedTime) + ", max allowed is " + + throughputDesc(maxThroughputPerOperation) + ", already slept " + + operation.numberOfSleeps + " time(s) and total slept time is " + + operation.totalSleepTime + " ms till now."); operation.lastLogTime = now; } } @@ -142,11 +142,10 @@ public abstract class PressureAwareThroughputController extends Configured imple ActiveOperation operation = activeOperations.remove(opName); maxThroughputPerOperation = getMaxThroughput() / activeOperations.size(); long elapsedTime = EnvironmentEdgeManager.currentTime() - operation.startTime; - LOG.info(opName + " average throughput is " - + throughputDesc(operation.totalSize, elapsedTime) + ", slept " - + operation.numberOfSleeps + " time(s) and total slept time is " - + operation.totalSleepTime + " ms. " + activeOperations.size() - + " active operations remaining, total limit is " + throughputDesc(getMaxThroughput())); + LOG.info(opName + " average throughput is " + throughputDesc(operation.totalSize, elapsedTime) + + ", slept " + operation.numberOfSleeps + " time(s) and total slept time is " + + operation.totalSleepTime + " ms. " + activeOperations.size() + + " active operations remaining, total limit is " + throughputDesc(getMaxThroughput())); } private volatile boolean stopped = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java index 5a470976da7..1ab38adb97b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java @@ -47,8 +47,8 @@ import org.slf4j.LoggerFactory; * 2. parallelPutToStoreThreadLimit: The amount of concurrency allowed to write puts to a Store at * the same time. *

        - * 3. parallelPreparePutToStoreThreadLimit: The amount of concurrency allowed to - * prepare writing puts to a Store at the same time. + * 3. parallelPreparePutToStoreThreadLimit: The amount of concurrency allowed to prepare writing + * puts to a Store at the same time. *

        * Notice that our writing pipeline includes three key process: MVCC acquire, writing MemStore, and * WAL. Only limit the concurrency of writing puts to Store(parallelPutToStoreThreadLimit) is not @@ -68,18 +68,18 @@ public class StoreHotnessProtector { private volatile int parallelPreparePutToStoreThreadLimit; public final static String PARALLEL_PUT_STORE_THREADS_LIMIT = - "hbase.region.store.parallel.put.limit"; + "hbase.region.store.parallel.put.limit"; public final static String PARALLEL_PREPARE_PUT_STORE_MULTIPLIER = - "hbase.region.store.parallel.prepare.put.multiplier"; + "hbase.region.store.parallel.prepare.put.multiplier"; private final static int DEFAULT_PARALLEL_PUT_STORE_THREADS_LIMIT = 0; private volatile int parallelPutToStoreThreadLimitCheckMinColumnCount; public final static String PARALLEL_PUT_STORE_THREADS_LIMIT_MIN_COLUMN_COUNT = - "hbase.region.store.parallel.put.limit.min.column.count"; + "hbase.region.store.parallel.put.limit.min.column.count"; private final static int DEFAULT_PARALLEL_PUT_STORE_THREADS_LIMIT_MIN_COLUMN_NUM = 100; private final static int DEFAULT_PARALLEL_PREPARE_PUT_STORE_MULTIPLIER = 2; private final Map preparePutToStoreMap = - new ConcurrentSkipListMap<>(Bytes.BYTES_RAWCOMPARATOR); + new ConcurrentSkipListMap<>(Bytes.BYTES_RAWCOMPARATOR); private final Region region; public StoreHotnessProtector(Region region, Configuration conf) { @@ -89,12 +89,12 @@ public class StoreHotnessProtector { public void init(Configuration conf) { this.parallelPutToStoreThreadLimit = - conf.getInt(PARALLEL_PUT_STORE_THREADS_LIMIT, DEFAULT_PARALLEL_PUT_STORE_THREADS_LIMIT); + conf.getInt(PARALLEL_PUT_STORE_THREADS_LIMIT, DEFAULT_PARALLEL_PUT_STORE_THREADS_LIMIT); this.parallelPreparePutToStoreThreadLimit = conf.getInt(PARALLEL_PREPARE_PUT_STORE_MULTIPLIER, - DEFAULT_PARALLEL_PREPARE_PUT_STORE_MULTIPLIER) * parallelPutToStoreThreadLimit; + DEFAULT_PARALLEL_PREPARE_PUT_STORE_MULTIPLIER) * parallelPutToStoreThreadLimit; this.parallelPutToStoreThreadLimitCheckMinColumnCount = - conf.getInt(PARALLEL_PUT_STORE_THREADS_LIMIT_MIN_COLUMN_COUNT, - DEFAULT_PARALLEL_PUT_STORE_THREADS_LIMIT_MIN_COLUMN_NUM); + conf.getInt(PARALLEL_PUT_STORE_THREADS_LIMIT_MIN_COLUMN_COUNT, + DEFAULT_PARALLEL_PUT_STORE_THREADS_LIMIT_MIN_COLUMN_NUM); if (!isEnable()) { logDisabledMessageOnce(); @@ -102,14 +102,14 @@ public class StoreHotnessProtector { } /** - * {@link #init(Configuration)} is called for every Store that opens on a RegionServer. - * Here we make a lightweight attempt to log this message once per RegionServer, rather than - * per-Store. The goal is just to draw attention to this feature if debugging overload due to - * heavy writes. + * {@link #init(Configuration)} is called for every Store that opens on a RegionServer. Here we + * make a lightweight attempt to log this message once per RegionServer, rather than per-Store. + * The goal is just to draw attention to this feature if debugging overload due to heavy writes. */ private static void logDisabledMessageOnce() { if (!loggedDisableMessage) { - LOG.info("StoreHotnessProtector is disabled. Set {} > 0 to enable, " + LOG.info( + "StoreHotnessProtector is disabled. Set {} > 0 to enable, " + "which may help mitigate load under heavy write pressure.", PARALLEL_PUT_STORE_THREADS_LIMIT); loggedDisableMessage = true; @@ -139,38 +139,39 @@ public class StoreHotnessProtector { if (e.getValue().size() > this.parallelPutToStoreThreadLimitCheckMinColumnCount) { - //we need to try to add #preparePutCount at first because preparePutToStoreMap will be - //cleared when changing the configuration. + // we need to try to add #preparePutCount at first because preparePutToStoreMap will be + // cleared when changing the configuration. int preparePutCount = preparePutToStoreMap - .computeIfAbsent(e.getKey(), key -> new AtomicInteger()) - .incrementAndGet(); + .computeIfAbsent(e.getKey(), key -> new AtomicInteger()).incrementAndGet(); boolean storeAboveThread = store.getCurrentParallelPutCount() > this.parallelPutToStoreThreadLimit; boolean storeAbovePrePut = preparePutCount > this.parallelPreparePutToStoreThreadLimit; if (storeAboveThread || storeAbovePrePut) { - tooBusyStore = (tooBusyStore == null ? - store.getColumnFamilyName() : - tooBusyStore + "," + store.getColumnFamilyName()); + tooBusyStore = (tooBusyStore == null + ? store.getColumnFamilyName() + : tooBusyStore + "," + store.getColumnFamilyName()); } aboveParallelThreadLimit |= storeAboveThread; aboveParallelPrePutLimit |= storeAbovePrePut; if (LOG.isTraceEnabled()) { LOG.trace(store.getColumnFamilyName() + ": preparePutCount=" + preparePutCount - + "; currentParallelPutCount=" + store.getCurrentParallelPutCount()); + + "; currentParallelPutCount=" + store.getCurrentParallelPutCount()); } } } if (aboveParallelThreadLimit || aboveParallelPrePutLimit) { - String msg = - "StoreTooBusy," + this.region.getRegionInfo().getRegionNameAsString() + ":" + tooBusyStore - + " Above " - + (aboveParallelThreadLimit ? "parallelPutToStoreThreadLimit(" - + this.parallelPutToStoreThreadLimit + ")" : "") - + (aboveParallelThreadLimit && aboveParallelPrePutLimit ? " or " : "") - + (aboveParallelPrePutLimit ? "parallelPreparePutToStoreThreadLimit(" - + this.parallelPreparePutToStoreThreadLimit + ")" : ""); + String msg = "StoreTooBusy," + this.region.getRegionInfo().getRegionNameAsString() + ":" + + tooBusyStore + " Above " + + (aboveParallelThreadLimit + ? "parallelPutToStoreThreadLimit(" + this.parallelPutToStoreThreadLimit + ")" + : "") + + (aboveParallelThreadLimit && aboveParallelPrePutLimit ? " or " : "") + + (aboveParallelPrePutLimit + ? "parallelPreparePutToStoreThreadLimit(" + this.parallelPreparePutToStoreThreadLimit + + ")" + : ""); LOG.trace(msg); throw new RegionTooBusyException(msg); } @@ -200,11 +201,10 @@ public class StoreHotnessProtector { public String toString() { return "StoreHotnessProtector, parallelPutToStoreThreadLimit=" - + this.parallelPutToStoreThreadLimit + " ; minColumnNum=" - + this.parallelPutToStoreThreadLimitCheckMinColumnCount + " ; preparePutThreadLimit=" - + this.parallelPreparePutToStoreThreadLimit + " ; hotProtect now " + (this.isEnable() ? - "enable" : - "disable"); + + this.parallelPutToStoreThreadLimit + " ; minColumnNum=" + + this.parallelPutToStoreThreadLimitCheckMinColumnCount + " ; preparePutThreadLimit=" + + this.parallelPreparePutToStoreThreadLimit + " ; hotProtect now " + + (this.isEnable() ? "enable" : "disable"); } public boolean isEnable() { @@ -217,5 +217,5 @@ public class StoreHotnessProtector { } public static final long FIXED_SIZE = - ClassSize.align(ClassSize.OBJECT + 2 * ClassSize.REFERENCE + 3 * Bytes.SIZEOF_INT); + ClassSize.align(ClassSize.OBJECT + 2 * ClassSize.REFERENCE + 3 * Bytes.SIZEOF_INT); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java index ad65c59436e..466dc96757d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver.throttle; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.yetus.audience.InterfaceAudience; @@ -36,7 +35,7 @@ public final class ThroughputControlUtil { /** * Generate a name for throttling, to prevent name conflict when multiple IO operation running * parallel on the same store. - * @param store the Store instance on which IO operation is happening + * @param store the Store instance on which IO operation is happening * @param opName Name of the IO operation, e.g. "flush", "compaction", etc. * @return The name for throttling */ @@ -49,8 +48,8 @@ public final class ThroughputControlUtil { break; } } - return store.getRegionInfo().getEncodedName() + NAME_DELIMITER + - store.getColumnFamilyDescriptor().getNameAsString() + NAME_DELIMITER + opName + - NAME_DELIMITER + counter; + return store.getRegionInfo().getEncodedName() + NAME_DELIMITER + + store.getColumnFamilyDescriptor().getNameAsString() + NAME_DELIMITER + opName + + NAME_DELIMITER + counter; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputController.java index 707d02d5f92..284aa6814dd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,12 +19,12 @@ package org.apache.hadoop.hbase.regionserver.throttle; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Stoppable; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.yetus.audience.InterfaceAudience; /** - * A utility that constrains the total throughput of one or more simultaneous flows by - * sleeping when necessary. + * A utility that constrains the total throughput of one or more simultaneous flows by sleeping when + * necessary. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public interface ThroughputController extends Stoppable { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index 2707f929087..7b5b31bc455 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -122,7 +122,7 @@ public abstract class AbstractFSWAL implements WAL { private static final Logger LOG = LoggerFactory.getLogger(AbstractFSWAL.class); - protected static final String SLOW_SYNC_TIME_MS ="hbase.regionserver.wal.slowsync.ms"; + protected static final String SLOW_SYNC_TIME_MS = "hbase.regionserver.wal.slowsync.ms"; protected static final int DEFAULT_SLOW_SYNC_TIME_MS = 100; // in ms protected static final String ROLL_ON_SYNC_TIME_MS = "hbase.regionserver.wal.roll.on.sync.ms"; protected static final int DEFAULT_ROLL_ON_SYNC_TIME_MS = 10000; // in ms @@ -281,7 +281,8 @@ public abstract class AbstractFSWAL implements WAL { /** * Map the encoded region name to the highest sequence id. - *

        Contains all the regions it has an entry for. + *

        + * Contains all the regions it has an entry for. */ public final Map encodedName2HighestSequenceId; @@ -339,7 +340,7 @@ public abstract class AbstractFSWAL implements WAL { checkNotNull(fileName, "file name can't be null"); if (!ourFiles.accept(fileName)) { throw new IllegalArgumentException( - "The log file " + fileName + " doesn't belong to this WAL. (" + toString() + ")"); + "The log file " + fileName + " doesn't belong to this WAL. (" + toString() + ")"); } final String fileNameString = fileName.toString(); String chompedPath = fileNameString.substring(prefixPathStr.length(), @@ -358,8 +359,7 @@ public abstract class AbstractFSWAL implements WAL { // be stuck and make no progress if the buffer is filled with appends only and there is no // sync. If no sync, then the handlers will be outstanding just waiting on sync completion // before they return. - int preallocatedEventCount = - this.conf.getInt(RING_BUFFER_SLOT_COUNT, 1024 * 16); + int preallocatedEventCount = this.conf.getInt(RING_BUFFER_SLOT_COUNT, 1024 * 16); checkArgument(preallocatedEventCount >= 0, RING_BUFFER_SLOT_COUNT + " must > 0"); int floor = Integer.highestOneBit(preallocatedEventCount); if (floor == preallocatedEventCount) { @@ -373,17 +373,16 @@ public abstract class AbstractFSWAL implements WAL { } protected AbstractFSWAL(final FileSystem fs, final Path rootDir, final String logDir, - final String archiveDir, final Configuration conf, final List listeners, - final boolean failIfWALExists, final String prefix, final String suffix) - throws FailedLogCloseException, IOException { + final String archiveDir, final Configuration conf, final List listeners, + final boolean failIfWALExists, final String prefix, final String suffix) + throws FailedLogCloseException, IOException { this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); } protected AbstractFSWAL(final FileSystem fs, final Abortable abortable, final Path rootDir, - final String logDir, final String archiveDir, final Configuration conf, - final List listeners, final boolean failIfWALExists, final String prefix, - final String suffix) - throws FailedLogCloseException, IOException { + final String logDir, final String archiveDir, final Configuration conf, + final List listeners, final boolean failIfWALExists, final String prefix, + final String suffix) throws FailedLogCloseException, IOException { this.fs = fs; this.walDir = new Path(rootDir, logDir); this.walArchiveDir = new Path(rootDir, archiveDir); @@ -405,13 +404,13 @@ public abstract class AbstractFSWAL implements WAL { prefix == null || prefix.isEmpty() ? "wal" : URLEncoder.encode(prefix, "UTF8"); // we only correctly differentiate suffices when numeric ones start with '.' if (suffix != null && !(suffix.isEmpty()) && !(suffix.startsWith(WAL_FILE_NAME_DELIMITER))) { - throw new IllegalArgumentException("WAL suffix must start with '" + WAL_FILE_NAME_DELIMITER + - "' but instead was '" + suffix + "'"); + throw new IllegalArgumentException("WAL suffix must start with '" + WAL_FILE_NAME_DELIMITER + + "' but instead was '" + suffix + "'"); } // Now that it exists, set the storage policy for the entire directory of wal files related to // this FSHLog instance String storagePolicy = - conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY); + conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY); CommonFSUtils.setStoragePolicy(fs, this.walDir, storagePolicy); this.walFileSuffix = (suffix == null) ? "" : URLEncoder.encode(suffix, "UTF8"); this.prefixPathStr = new Path(walDir, walFilePrefix + WAL_FILE_NAME_DELIMITER).toString(); @@ -427,7 +426,7 @@ public abstract class AbstractFSWAL implements WAL { if (walFileSuffix.isEmpty()) { // in the case of the null suffix, we need to ensure the filename ends with a timestamp. return org.apache.commons.lang3.StringUtils - .isNumeric(fileNameString.substring(prefixPathStr.length())); + .isNumeric(fileNameString.substring(prefixPathStr.length())); } else if (!fileNameString.endsWith(walFileSuffix)) { return false; } @@ -463,18 +462,18 @@ public abstract class AbstractFSWAL implements WAL { this.logrollsize = (long) (this.blocksize * multiplier); this.maxLogs = conf.getInt(MAX_LOGS, Math.max(32, calculateMaxLogFiles(conf, logrollsize))); - LOG.info("WAL configuration: blocksize=" + StringUtils.byteDesc(blocksize) + ", rollsize=" + - StringUtils.byteDesc(this.logrollsize) + ", prefix=" + this.walFilePrefix + ", suffix=" + - walFileSuffix + ", logDir=" + this.walDir + ", archiveDir=" + this.walArchiveDir + - ", maxLogs=" + this.maxLogs); + LOG.info("WAL configuration: blocksize=" + StringUtils.byteDesc(blocksize) + ", rollsize=" + + StringUtils.byteDesc(this.logrollsize) + ", prefix=" + this.walFilePrefix + ", suffix=" + + walFileSuffix + ", logDir=" + this.walDir + ", archiveDir=" + this.walArchiveDir + + ", maxLogs=" + this.maxLogs); this.slowSyncNs = TimeUnit.MILLISECONDS.toNanos(conf.getInt(SLOW_SYNC_TIME_MS, conf.getInt("hbase.regionserver.hlog.slowsync.ms", DEFAULT_SLOW_SYNC_TIME_MS))); - this.rollOnSyncNs = TimeUnit.MILLISECONDS.toNanos(conf.getInt(ROLL_ON_SYNC_TIME_MS, - DEFAULT_ROLL_ON_SYNC_TIME_MS)); - this.slowSyncRollThreshold = conf.getInt(SLOW_SYNC_ROLL_THRESHOLD, - DEFAULT_SLOW_SYNC_ROLL_THRESHOLD); - this.slowSyncCheckInterval = conf.getInt(SLOW_SYNC_ROLL_INTERVAL_MS, - DEFAULT_SLOW_SYNC_ROLL_INTERVAL_MS); + this.rollOnSyncNs = TimeUnit.MILLISECONDS + .toNanos(conf.getInt(ROLL_ON_SYNC_TIME_MS, DEFAULT_ROLL_ON_SYNC_TIME_MS)); + this.slowSyncRollThreshold = + conf.getInt(SLOW_SYNC_ROLL_THRESHOLD, DEFAULT_SLOW_SYNC_ROLL_THRESHOLD); + this.slowSyncCheckInterval = + conf.getInt(SLOW_SYNC_ROLL_INTERVAL_MS, DEFAULT_SLOW_SYNC_ROLL_INTERVAL_MS); this.walSyncTimeoutNs = TimeUnit.MILLISECONDS.toNanos(conf.getLong(WAL_SYNC_TIMEOUT_MS, conf.getLong("hbase.regionserver.hlog.sync.timeout", DEFAULT_WAL_SYNC_TIMEOUT_MS))); this.syncFutureCache = new SyncFutureCache(conf); @@ -572,10 +571,10 @@ public abstract class AbstractFSWAL implements WAL { protected abstract void doSync(boolean forceSync) throws IOException; protected abstract void doSync(long txid, boolean forceSync) throws IOException; + /** * This is a convenience method that computes a new filename with a given file-number. - * @param filenum to use - * @return Path + * @param filenum to use n */ protected Path computeFilename(final long filenum) { if (filenum < 0) { @@ -587,8 +586,7 @@ public abstract class AbstractFSWAL implements WAL { /** * This is a convenience method that computes a new filename with a given using the current WAL - * file-number - * @return Path + * file-number n */ public Path getCurrentFileName() { return computeFilename(this.filenum.get()); @@ -621,7 +619,7 @@ public abstract class AbstractFSWAL implements WAL { * Tell listeners about pre log roll. */ private void tellListenersAboutPreLogRoll(final Path oldPath, final Path newPath) - throws IOException { + throws IOException { coprocessorHost.preWALRoll(oldPath, newPath); if (!this.listeners.isEmpty()) { @@ -635,7 +633,7 @@ public abstract class AbstractFSWAL implements WAL { * Tell listeners about post log roll. */ private void tellListenersAboutPostLogRoll(final Path oldPath, final Path newPath) - throws IOException { + throws IOException { if (!this.listeners.isEmpty()) { for (WALActionsListener i : this.listeners) { i.postLogRoll(oldPath, newPath); @@ -659,9 +657,9 @@ public abstract class AbstractFSWAL implements WAL { } /** - * If the number of un-archived WAL files ('live' WALs) is greater than maximum allowed, - * check the first (oldest) WAL, and return those regions which should be flushed so that - * it can be let-go/'archived'. + * If the number of un-archived WAL files ('live' WALs) is greater than maximum allowed, check the + * first (oldest) WAL, and return those regions which should be flushed so that it can be + * let-go/'archived'. * @return stores of regions (encodedRegionNames) to flush in order to archive oldest WAL file. */ Map> findRegionsToForceFlush() throws IOException { @@ -684,9 +682,9 @@ public abstract class AbstractFSWAL implements WAL { } listForPrint.add(Bytes.toStringBinary(r.getKey()) + "[" + families.toString() + "]"); } - LOG.info("Too many WALs; count=" + logCount + ", max=" + this.maxLogs + - "; forcing (partial) flush of " + regions.size() + " region(s): " + - StringUtils.join(",", listForPrint)); + LOG.info("Too many WALs; count=" + logCount + ", max=" + this.maxLogs + + "; forcing (partial) flush of " + regions.size() + " region(s): " + + StringUtils.join(",", listForPrint)); } return regions; } @@ -740,8 +738,7 @@ public abstract class AbstractFSWAL implements WAL { break; } } else { - LOG.error("Log archiving failed for the log {} - attempt {}", log.getFirst(), retry, - e); + LOG.error("Log archiving failed for the log {} - attempt {}", log.getFirst(), retry, e); } retry++; } @@ -805,8 +802,8 @@ public abstract class AbstractFSWAL implements WAL { *

      • In the case of closing out this FSHLog with no further use newPath and nextWriter will be * null.
      • *
      - * @param oldPath may be null - * @param newPath may be null + * @param oldPath may be null + * @param newPath may be null * @param nextWriter may be null * @return the passed in newPath * @throws IOException if there is a problem flushing or closing the underlying FS @@ -871,8 +868,8 @@ public abstract class AbstractFSWAL implements WAL { newPath = replaceWriter(oldPath, newPath, nextWriter); tellListenersAboutPostLogRoll(oldPath, newPath); if (LOG.isDebugEnabled()) { - LOG.debug("Create new " + implClassName + " writer with pipeline: " + - Arrays.toString(getPipeline())); + LOG.debug("Create new " + implClassName + " writer with pipeline: " + + Arrays.toString(getPipeline())); } // We got a new writer, so reset the slow sync count lastTimeCheckSlowSync = EnvironmentEdgeManager.currentTime(); @@ -988,7 +985,7 @@ public abstract class AbstractFSWAL implements WAL { */ @Override public void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceid, - boolean onlyIfGreater) { + boolean onlyIfGreater) { sequenceIdAccounting.updateStore(encodedRegionName, familyName, sequenceid, onlyIfGreater); } @@ -1081,19 +1078,18 @@ public abstract class AbstractFSWAL implements WAL { protected final void postSync(final long timeInNanos, final int handlerSyncs) { if (timeInNanos > this.slowSyncNs) { String msg = new StringBuilder().append("Slow sync cost: ") - .append(TimeUnit.NANOSECONDS.toMillis(timeInNanos)) - .append(" ms, current pipeline: ") - .append(Arrays.toString(getPipeline())).toString(); + .append(TimeUnit.NANOSECONDS.toMillis(timeInNanos)).append(" ms, current pipeline: ") + .append(Arrays.toString(getPipeline())).toString(); LOG.info(msg); // A single sync took too long. // Elsewhere in checkSlowSync, called from checkLogRoll, we will look at cumulative // effects. Here we have a single data point that indicates we should take immediate // action, so do so. if (timeInNanos > this.rollOnSyncNs) { - LOG.warn("Requesting log roll because we exceeded slow sync threshold; time=" + - TimeUnit.NANOSECONDS.toMillis(timeInNanos) + " ms, threshold=" + - TimeUnit.NANOSECONDS.toMillis(rollOnSyncNs) + " ms, current pipeline: " + - Arrays.toString(getPipeline())); + LOG.warn("Requesting log roll because we exceeded slow sync threshold; time=" + + TimeUnit.NANOSECONDS.toMillis(timeInNanos) + " ms, threshold=" + + TimeUnit.NANOSECONDS.toMillis(rollOnSyncNs) + " ms, current pipeline: " + + Arrays.toString(getPipeline())); requestLogRoll(SLOW_SYNC); } slowSyncCount.incrementAndGet(); // it's fine to unconditionally increment this @@ -1193,26 +1189,26 @@ public abstract class AbstractFSWAL implements WAL { * passed in WALKey walKey parameter. Be warned that the WriteEntry is not * immediately available on return from this method. It WILL be available subsequent to a sync of * this append; otherwise, you will just have to wait on the WriteEntry to get filled in. - * @param info the regioninfo associated with append - * @param key Modified by this call; we add to it this edits region edit/sequence id. - * @param edits Edits to append. MAY CONTAIN NO EDITS for case where we want to get an edit - * sequence id that is after all currently appended edits. - * @param inMemstore Always true except for case where we are writing a region event meta - * marker edit, for example, a compaction completion record into the WAL or noting a - * Region Open event. In these cases the entry is just so we can finish an unfinished - * compaction after a crash when the new Server reads the WAL on recovery, etc. These - * transition event 'Markers' do not go via the memstore. When memstore is false, - * we presume a Marker event edit. + * @param info the regioninfo associated with append + * @param key Modified by this call; we add to it this edits region edit/sequence id. + * @param edits Edits to append. MAY CONTAIN NO EDITS for case where we want to get an edit + * sequence id that is after all currently appended edits. + * @param inMemstore Always true except for case where we are writing a region event meta marker + * edit, for example, a compaction completion record into the WAL or noting a + * Region Open event. In these cases the entry is just so we can finish an + * unfinished compaction after a crash when the new Server reads the WAL on + * recovery, etc. These transition event 'Markers' do not go via the memstore. + * When memstore is false, we presume a Marker event edit. * @return Returns a 'transaction id' and key will have the region edit/sequence id * in it. */ protected abstract long append(RegionInfo info, WALKeyImpl key, WALEdit edits, boolean inMemstore) - throws IOException; + throws IOException; protected abstract void doAppend(W writer, FSWALEntry entry) throws IOException; protected abstract W createWriterInstance(Path path) - throws IOException, CommonFSUtils.StreamLacksCapabilityException; + throws IOException, CommonFSUtils.StreamLacksCapabilityException; /** * Notice that you need to clear the {@link #rollRequested} flag in this method, as the new writer @@ -1222,15 +1218,14 @@ public abstract class AbstractFSWAL implements WAL { * start writing to the new writer. */ protected abstract void doReplaceWriter(Path oldPath, Path newPath, W nextWriter) - throws IOException; + throws IOException; protected abstract void doShutdown() throws IOException; protected abstract boolean doCheckLogLowReplication(); /** - * @return true if we exceeded the slow sync roll threshold over the last check - * interval + * @return true if we exceeded the slow sync roll threshold over the last check interval */ protected boolean doCheckSlowSync() { boolean result = false; @@ -1244,16 +1239,15 @@ public abstract class AbstractFSWAL implements WAL { // interval from then until the one more that pushed us over. If so, we // should do nothing and let the count reset. if (LOG.isDebugEnabled()) { - LOG.debug("checkSlowSync triggered but we decided to ignore it; " + - "count=" + slowSyncCount.get() + ", threshold=" + slowSyncRollThreshold + - ", elapsedTime=" + elapsedTime + " ms, slowSyncCheckInterval=" + - slowSyncCheckInterval + " ms"); + LOG.debug("checkSlowSync triggered but we decided to ignore it; " + "count=" + + slowSyncCount.get() + ", threshold=" + slowSyncRollThreshold + ", elapsedTime=" + + elapsedTime + " ms, slowSyncCheckInterval=" + slowSyncCheckInterval + " ms"); } // Fall through to count reset below } else { - LOG.warn("Requesting log roll because we exceeded slow sync threshold; count=" + - slowSyncCount.get() + ", threshold=" + slowSyncRollThreshold + - ", current pipeline: " + Arrays.toString(getPipeline())); + LOG.warn("Requesting log roll because we exceeded slow sync threshold; count=" + + slowSyncCount.get() + ", threshold=" + slowSyncRollThreshold + ", current pipeline: " + + Arrays.toString(getPipeline())); result = true; } } @@ -1303,8 +1297,10 @@ public abstract class AbstractFSWAL implements WAL { final Path baseDir = CommonFSUtils.getWALRootDir(conf); Path archiveDir = new Path(baseDir, HConstants.HREGION_OLDLOGDIR_NAME); - if (conf.getBoolean(AbstractFSWALProvider.SEPARATE_OLDLOGDIR, - AbstractFSWALProvider.DEFAULT_SEPARATE_OLDLOGDIR)) { + if ( + conf.getBoolean(AbstractFSWALProvider.SEPARATE_OLDLOGDIR, + AbstractFSWALProvider.DEFAULT_SEPARATE_OLDLOGDIR) + ) { archiveDir = new Path(archiveDir, p.getName()); } WALSplitter.split(baseDir, p, archiveDir, fs, conf, WALFactory.getInstance(conf)); @@ -1314,8 +1310,8 @@ public abstract class AbstractFSWAL implements WAL { System.err.println("Usage: AbstractFSWAL "); System.err.println("Arguments:"); System.err.println(" --dump Dump textual representation of passed one or more files"); - System.err.println(" For example: " + - "AbstractFSWAL --dump hdfs://example.com:9000/hbase/WALs/MACHINE/LOGFILE"); + System.err.println(" For example: " + + "AbstractFSWAL --dump hdfs://example.com:9000/hbase/WALs/MACHINE/LOGFILE"); System.err.println(" --split Split the passed directory of WAL logs"); System.err.println( " For example: AbstractFSWAL --split hdfs://example.com:9000/hbase/WALs/DIR"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java index a56a31a5a63..608032b8e4a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,6 +46,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALTrailer; @@ -70,7 +71,7 @@ public abstract class AbstractProtobufLogWriter { protected AtomicLong length = new AtomicLong(); private WALCellCodec getCodec(Configuration conf, CompressionContext compressionContext) - throws IOException { + throws IOException { return WALCellCodec.create(conf, null, compressionContext); } @@ -79,14 +80,13 @@ public abstract class AbstractProtobufLogWriter { builder.setWriterClsName(getWriterClassName()); } if (!builder.hasCellCodecClsName()) { - builder.setCellCodecClsName( - WALCellCodec.getWALCellCodecClass(conf).getName()); + builder.setCellCodecClsName(WALCellCodec.getWALCellCodecClass(conf).getName()); } return builder.build(); } protected WALHeader buildWALHeader(Configuration conf, WALHeader.Builder builder) - throws IOException { + throws IOException { return buildWALHeader0(conf, builder); } @@ -94,7 +94,7 @@ public abstract class AbstractProtobufLogWriter { // environment. Do not forget to override the setEncryptor method as it will be called in this // method to init your encryptor. protected final WALHeader buildSecureWALHeader(Configuration conf, WALHeader.Builder builder) - throws IOException { + throws IOException { builder.setWriterClsName(getWriterClassName()); if (conf.getBoolean(HConstants.ENABLE_WAL_ENCRYPTION, false)) { EncryptionTest.testKeyProvider(conf); @@ -102,7 +102,7 @@ public abstract class AbstractProtobufLogWriter { // Get an instance of our cipher final String cipherName = - conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Cipher cipher = Encryption.getCipher(conf, cipherName); if (cipher == null) { throw new RuntimeException("Cipher '" + cipherName + "' is not available"); @@ -111,10 +111,9 @@ public abstract class AbstractProtobufLogWriter { // Generate a random encryption key for this WAL Key key = cipher.getRandomKey(); builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(EncryptionUtil.wrapKey(conf, - conf.get(HConstants.CRYPTO_WAL_KEY_NAME_CONF_KEY, - conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, - User.getCurrent().getShortName())), - key))); + conf.get(HConstants.CRYPTO_WAL_KEY_NAME_CONF_KEY, + conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName())), + key))); // Set up the encryptor Encryptor encryptor = cipher.getEncryptor(); @@ -144,13 +143,14 @@ public abstract class AbstractProtobufLogWriter { conf.getBoolean(CompressionContext.ENABLE_WAL_TAGS_COMPRESSION, true); final boolean useValueCompression = conf.getBoolean(CompressionContext.ENABLE_WAL_VALUE_COMPRESSION, false); - final Compression.Algorithm valueCompressionType = - useValueCompression ? CompressionContext.getValueCompressionAlgorithm(conf) : - Compression.Algorithm.NONE; + final Compression.Algorithm valueCompressionType = useValueCompression + ? CompressionContext.getValueCompressionAlgorithm(conf) + : Compression.Algorithm.NONE; if (LOG.isTraceEnabled()) { - LOG.trace("Initializing compression context for {}: isRecoveredEdits={}" + - ", hasTagCompression={}, hasValueCompression={}, valueCompressionType={}", path, - CommonFSUtils.isRecoveredEdits(path), useTagCompression, useValueCompression, + LOG.trace( + "Initializing compression context for {}: isRecoveredEdits={}" + + ", hasTagCompression={}, hasValueCompression={}, valueCompressionType={}", + path, CommonFSUtils.isRecoveredEdits(path), useTagCompression, useValueCompression, valueCompressionType); } this.compressionContext = @@ -164,8 +164,7 @@ public abstract class AbstractProtobufLogWriter { } public void init(FileSystem fs, Path path, Configuration conf, boolean overwritable, - long blocksize, StreamSlowMonitor monitor) throws IOException, - StreamLacksCapabilityException { + long blocksize, StreamSlowMonitor monitor) throws IOException, StreamLacksCapabilityException { try { this.conf = conf; boolean doCompress = initializeCompressionContext(conf, path); @@ -180,9 +179,8 @@ public abstract class AbstractProtobufLogWriter { doCompress && conf.getBoolean(CompressionContext.ENABLE_WAL_TAGS_COMPRESSION, true); boolean doValueCompress = doCompress && conf.getBoolean(CompressionContext.ENABLE_WAL_VALUE_COMPRESSION, false); - WALHeader.Builder headerBuilder = - WALHeader.newBuilder().setHasCompression(doCompress).setHasTagCompression(doTagCompress) - .setHasValueCompression(doValueCompress); + WALHeader.Builder headerBuilder = WALHeader.newBuilder().setHasCompression(doCompress) + .setHasTagCompression(doTagCompress).setHasValueCompression(doValueCompress); if (doValueCompress) { headerBuilder.setValueCompressionAlgorithm( CompressionContext.getValueCompressionAlgorithm(conf).ordinal()); @@ -222,7 +220,7 @@ public abstract class AbstractProtobufLogWriter { // should be called in sub classes's initAfterHeader method to init SecureWALCellCodec. protected final void secureInitAfterHeader(boolean doCompress, Encryptor encryptor) - throws IOException { + throws IOException { if (conf.getBoolean(HConstants.ENABLE_WAL_ENCRYPTION, false) && encryptor != null) { WALCellCodec codec = SecureWALCellCodec.getCodec(this.conf, encryptor); this.cellEncoder = codec.getEncoder(getOutputStreamForCellEncoder()); @@ -257,7 +255,7 @@ public abstract class AbstractProtobufLogWriter { } else if ((trailerSize = this.trailer.getSerializedSize()) > this.trailerWarnSize) { // continue writing after warning the user. LOG.warn("Please investigate WALTrailer usage. Trailer size > maximum size : " + trailerSize - + " > " + this.trailerWarnSize); + + " > " + this.trailerWarnSize); } length.set(writeWALTrailerAndMagic(trailer, ProtobufLogReader.PB_WAL_COMPLETE_MAGIC)); this.trailerWritten = true; @@ -267,8 +265,8 @@ public abstract class AbstractProtobufLogWriter { } protected abstract void initOutput(FileSystem fs, Path path, boolean overwritable, int bufferSize, - short replication, long blockSize, StreamSlowMonitor monitor) - throws IOException, StreamLacksCapabilityException; + short replication, long blockSize, StreamSlowMonitor monitor) + throws IOException, StreamLacksCapabilityException; /** * simply close the output, do not need to write trailer like the Writer.close @@ -281,7 +279,7 @@ public abstract class AbstractProtobufLogWriter { protected abstract long writeMagicAndWALHeader(byte[] magic, WALHeader header) throws IOException; protected abstract long writeWALTrailerAndMagic(WALTrailer trailer, byte[] magic) - throws IOException; + throws IOException; protected abstract OutputStream getOutputStreamForCellEncoder(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java index 3af2c3aa44c..9fa36630d49 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver.wal; import static org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.RollRequestReason.ERROR; import static org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.RollRequestReason.SIZE; import static org.apache.hadoop.hbase.util.FutureUtils.addListener; + import com.lmax.disruptor.RingBuffer; import com.lmax.disruptor.Sequence; import com.lmax.disruptor.Sequencer; @@ -59,6 +60,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.channel.Channel; @@ -66,7 +68,6 @@ import org.apache.hbase.thirdparty.io.netty.channel.EventLoop; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; import org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor; - /** * An asynchronous implementation of FSWAL. *

      @@ -129,8 +130,8 @@ public class AsyncFSWAL extends AbstractFSWAL { private static final Logger LOG = LoggerFactory.getLogger(AsyncFSWAL.class); - private static final Comparator SEQ_COMPARATOR = Comparator.comparingLong( - SyncFuture::getTxid).thenComparingInt(System::identityHashCode); + private static final Comparator SEQ_COMPARATOR = + Comparator.comparingLong(SyncFuture::getTxid).thenComparingInt(System::identityHashCode); public static final String WAL_BATCH_SIZE = "hbase.wal.batch.size"; public static final long DEFAULT_WAL_BATCH_SIZE = 64L * 1024; @@ -202,20 +203,20 @@ public class AsyncFSWAL extends AbstractFSWAL { private final StreamSlowMonitor streamSlowMonitor; public AsyncFSWAL(FileSystem fs, Path rootDir, String logDir, String archiveDir, - Configuration conf, List listeners, boolean failIfWALExists, - String prefix, String suffix, EventLoopGroup eventLoopGroup, - Class channelClass) throws FailedLogCloseException, IOException { + Configuration conf, List listeners, boolean failIfWALExists, String prefix, + String suffix, EventLoopGroup eventLoopGroup, Class channelClass) + throws FailedLogCloseException, IOException { this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix, - eventLoopGroup, channelClass, StreamSlowMonitor.create(conf, "monitorForSuffix")); + eventLoopGroup, channelClass, StreamSlowMonitor.create(conf, "monitorForSuffix")); } public AsyncFSWAL(FileSystem fs, Abortable abortable, Path rootDir, String logDir, - String archiveDir, Configuration conf, List listeners, - boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, - Class channelClass, StreamSlowMonitor monitor) - throws FailedLogCloseException, IOException { + String archiveDir, Configuration conf, List listeners, + boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, + Class channelClass, StreamSlowMonitor monitor) + throws FailedLogCloseException, IOException { super(fs, abortable, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, - suffix); + suffix); this.eventLoopGroup = eventLoopGroup; this.channelClass = channelClass; this.streamSlowMonitor = monitor; @@ -229,19 +230,17 @@ public class AsyncFSWAL extends AbstractFSWAL { Queue queue = (Queue) field.get(consumeExecutor); hasConsumerTask = () -> queue.peek() == consumer; } catch (Exception e) { - LOG.warn("Can not get task queue of " + consumeExecutor + - ", this is not necessary, just give up", e); + LOG.warn("Can not get task queue of " + consumeExecutor + + ", this is not necessary, just give up", e); hasConsumerTask = () -> false; } } else { hasConsumerTask = () -> false; } } else { - ThreadPoolExecutor threadPool = - new ThreadPoolExecutor(1, 1, 0L, - TimeUnit.MILLISECONDS, new LinkedBlockingQueue(), - new ThreadFactoryBuilder().setNameFormat("AsyncFSWAL-%d-" + rootDir.toString()). - setDaemon(true).build()); + ThreadPoolExecutor threadPool = new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, + new LinkedBlockingQueue(), new ThreadFactoryBuilder() + .setNameFormat("AsyncFSWAL-%d-" + rootDir.toString()).setDaemon(true).build()); hasConsumerTask = () -> threadPool.getQueue().peek() == consumer; this.consumeExecutor = threadPool; } @@ -391,8 +390,8 @@ public class AsyncFSWAL extends AbstractFSWAL { // If we haven't already requested a roll, check if we have exceeded logrollsize if (!isLogRollRequested() && writer.getLength() > logrollsize) { if (LOG.isDebugEnabled()) { - LOG.debug("Requesting log roll because of file size threshold; length=" + - writer.getLength() + ", logrollsize=" + logrollsize); + LOG.debug("Requesting log roll because of file size threshold; length=" + writer.getLength() + + ", logrollsize=" + logrollsize); } requestLogRoll(SIZE); } @@ -401,9 +400,8 @@ public class AsyncFSWAL extends AbstractFSWAL { // find all the sync futures between these two txids to see if we need to issue a hsync, if no // sync futures then just use the default one. private boolean isHsync(long beginTxid, long endTxid) { - SortedSet futures = - syncFutures.subSet(new SyncFuture().reset(beginTxid, false), - new SyncFuture().reset(endTxid + 1, false)); + SortedSet futures = syncFutures.subSet(new SyncFuture().reset(beginTxid, false), + new SyncFuture().reset(endTxid + 1, false)); if (futures.isEmpty()) { return useHsync; } @@ -509,8 +507,10 @@ public class AsyncFSWAL extends AbstractFSWAL { if (appended) { // This is possible, when we fail to sync, we will add the unackedAppends back to // toWriteAppends, so here we may get an entry which is already in the unackedAppends. - if (addedToUnackedAppends || unackedAppends.isEmpty() || - getLastTxid(unackedAppends) < entry.getTxid()) { + if ( + addedToUnackedAppends || unackedAppends.isEmpty() + || getLastTxid(unackedAppends) < entry.getTxid() + ) { unackedAppends.addLast(entry); addedToUnackedAppends = true; } @@ -522,8 +522,10 @@ public class AsyncFSWAL extends AbstractFSWAL { // There could be other ways to fix, such as changing the logic in the consume method, but // it will break the assumption and then (may) lead to a big refactoring. So here let's use // this way to fix first, can optimize later. - if (writer.getLength() - fileLengthAtLastSync >= batchSize && - (addedToUnackedAppends || entry.getTxid() >= getLastTxid(unackedAppends))) { + if ( + writer.getLength() - fileLengthAtLastSync >= batchSize + && (addedToUnackedAppends || entry.getTxid() >= getLastTxid(unackedAppends)) + ) { break; } } @@ -580,8 +582,8 @@ public class AsyncFSWAL extends AbstractFSWAL { consumeLock.unlock(); } long nextCursor = waitingConsumePayloadsGatingSequence.get() + 1; - for (long cursorBound = waitingConsumePayloads.getCursor(); nextCursor <= cursorBound; - nextCursor++) { + for (long cursorBound = waitingConsumePayloads.getCursor(); nextCursor + <= cursorBound; nextCursor++) { if (!waitingConsumePayloads.isPublished(nextCursor)) { break; } @@ -614,8 +616,10 @@ public class AsyncFSWAL extends AbstractFSWAL { // 3. we set consumerScheduled to false and also give up scheduling consumer task. if (waitingConsumePayloadsGatingSequence.get() == waitingConsumePayloads.getCursor()) { // we will give up consuming so if there are some unsynced data we need to issue a sync. - if (writer.getLength() > fileLengthAtLastSync && !syncFutures.isEmpty() && - syncFutures.last().getTxid() > highestProcessedAppendTxidAtLastSync) { + if ( + writer.getLength() > fileLengthAtLastSync && !syncFutures.isEmpty() + && syncFutures.last().getTxid() > highestProcessedAppendTxidAtLastSync + ) { // no new data in the ringbuffer and we have at least one sync request sync(writer); } @@ -642,9 +646,9 @@ public class AsyncFSWAL extends AbstractFSWAL { @Override protected long append(RegionInfo hri, WALKeyImpl key, WALEdit edits, boolean inMemstore) - throws IOException { - long txid = stampSequenceIdAndPublishToRingBuffer(hri, key, edits, inMemstore, - waitingConsumePayloads); + throws IOException { + long txid = + stampSequenceIdAndPublishToRingBuffer(hri, key, edits, inMemstore, waitingConsumePayloads); if (shouldScheduleConsumer()) { consumeExecutor.execute(consumer); } @@ -735,7 +739,7 @@ public class AsyncFSWAL extends AbstractFSWAL { @Override protected void doReplaceWriter(Path oldPath, Path newPath, AsyncWriter nextWriter) - throws IOException { + throws IOException { Preconditions.checkNotNull(nextWriter); waitForSafePoint(); long oldFileLen = closeWriter(this.writer, oldPath); @@ -769,11 +773,11 @@ public class AsyncFSWAL extends AbstractFSWAL { closeExecutor.shutdown(); try { if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, TimeUnit.SECONDS)) { - LOG.error("We have waited " + waitOnShutdownInSeconds + " seconds but" + - " the close of async writer doesn't complete." + - "Please check the status of underlying filesystem" + - " or increase the wait time by the config \"" + ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS + - "\""); + LOG.error("We have waited " + waitOnShutdownInSeconds + " seconds but" + + " the close of async writer doesn't complete." + + "Please check the status of underlying filesystem" + + " or increase the wait time by the config \"" + ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS + + "\""); } } catch (InterruptedException e) { LOG.error("The wait for close of async writer is interrupted"); @@ -782,8 +786,8 @@ public class AsyncFSWAL extends AbstractFSWAL { IOException error = new IOException("WAL has been closed"); long nextCursor = waitingConsumePayloadsGatingSequence.get() + 1; // drain all the pending sync requests - for (long cursorBound = waitingConsumePayloads.getCursor(); nextCursor <= cursorBound; - nextCursor++) { + for (long cursorBound = waitingConsumePayloads.getCursor(); nextCursor + <= cursorBound; nextCursor++) { if (!waitingConsumePayloads.isPublished(nextCursor)) { break; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java index fbd3882d4f7..42d9a1f15f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,6 +47,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Throwables; import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALTrailer; @@ -55,7 +56,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALTrailer; */ @InterfaceAudience.Private public class AsyncProtobufLogWriter extends AbstractProtobufLogWriter - implements AsyncFSWALProvider.AsyncWriter { + implements AsyncFSWALProvider.AsyncWriter { private static final Logger LOG = LoggerFactory.getLogger(AsyncProtobufLogWriter.class); @@ -69,8 +70,7 @@ public class AsyncProtobufLogWriter extends AbstractProtobufLogWriter */ private volatile long finalSyncedLength = -1; - private static final class OutputStreamWrapper extends OutputStream - implements ByteBufferWriter { + private static final class OutputStreamWrapper extends OutputStream implements ByteBufferWriter { private final AsyncFSOutput out; @@ -114,7 +114,7 @@ public class AsyncProtobufLogWriter extends AbstractProtobufLogWriter private long waitTimeout; public AsyncProtobufLogWriter(EventLoopGroup eventLoopGroup, - Class channelClass) { + Class channelClass) { this.eventLoopGroup = eventLoopGroup; this.channelClass = channelClass; // Reuse WAL_ROLL_WAIT_TIMEOUT here to avoid an infinite wait if somehow a wait on a future @@ -129,7 +129,7 @@ public class AsyncProtobufLogWriter extends AbstractProtobufLogWriter /* * @return class name which is recognized by hbase-1.x to avoid ProtobufLogReader throwing error: - * IOException: Got unknown writer class: AsyncProtobufLogWriter + * IOException: Got unknown writer class: AsyncProtobufLogWriter */ @Override protected String getWriterClassName() { @@ -140,9 +140,8 @@ public class AsyncProtobufLogWriter extends AbstractProtobufLogWriter public void append(Entry entry) { int buffered = output.buffered(); try { - entry.getKey(). - getBuilder(compressor).setFollowingKvCount(entry.getEdit().size()).build() - .writeDelimitedTo(asyncOutputWrapper); + entry.getKey().getBuilder(compressor).setFollowingKvCount(entry.getEdit().size()).build() + .writeDelimitedTo(asyncOutputWrapper); } catch (IOException e) { throw new AssertionError("should not happen", e); } @@ -174,9 +173,8 @@ public class AsyncProtobufLogWriter extends AbstractProtobufLogWriter output.recoverAndClose(null); } /** - * We have to call {@link AsyncFSOutput#getSyncedLength()} - * after {@link AsyncFSOutput#close()} to get the final length - * synced to underlying filesystem because {@link AsyncFSOutput#close()} + * We have to call {@link AsyncFSOutput#getSyncedLength()} after {@link AsyncFSOutput#close()} + * to get the final length synced to underlying filesystem because {@link AsyncFSOutput#close()} * may also flush some data to underlying filesystem. */ this.finalSyncedLength = this.output.getSyncedLength(); @@ -189,10 +187,10 @@ public class AsyncProtobufLogWriter extends AbstractProtobufLogWriter @Override protected void initOutput(FileSystem fs, Path path, boolean overwritable, int bufferSize, - short replication, long blockSize, StreamSlowMonitor monitor) throws IOException, - StreamLacksCapabilityException { + short replication, long blockSize, StreamSlowMonitor monitor) + throws IOException, StreamLacksCapabilityException { this.output = AsyncFSOutputHelper.createOutput(fs, path, overwritable, false, replication, - blockSize, eventLoopGroup, channelClass, monitor); + blockSize, eventLoopGroup, channelClass, monitor); this.asyncOutputWrapper = new OutputStreamWrapper(output); } @@ -206,7 +204,7 @@ public class AsyncProtobufLogWriter extends AbstractProtobufLogWriter } } } - + private long writeWALMetadata(Consumer> action) throws IOException { CompletableFuture future = new CompletableFuture<>(); action.accept(future); @@ -270,16 +268,16 @@ public class AsyncProtobufLogWriter extends AbstractProtobufLogWriter @Override public long getSyncedLength() { - /** - * The statement "this.output = null;" in {@link AsyncProtobufLogWriter#close} - * is a sync point, if output is null, then finalSyncedLength must set, - * so we can return finalSyncedLength, else we return output.getSyncedLength - */ + /** + * The statement "this.output = null;" in {@link AsyncProtobufLogWriter#close} is a sync point, + * if output is null, then finalSyncedLength must set, so we can return finalSyncedLength, else + * we return output.getSyncedLength + */ AsyncFSOutput outputToUse = this.output; - if(outputToUse == null) { - long finalSyncedLengthToUse = this.finalSyncedLength; - assert finalSyncedLengthToUse >= 0; - return finalSyncedLengthToUse; + if (outputToUse == null) { + long finalSyncedLengthToUse = this.finalSyncedLength; + assert finalSyncedLengthToUse >= 0; + return finalSyncedLengthToUse; } return outputToUse.getSyncedLength(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java index bfb7f9a85a5..e626d9e14a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.ByteArrayOutputStream; @@ -41,10 +40,10 @@ import org.slf4j.LoggerFactory; /** * Context that holds the various dictionaries for compression in WAL. *

      - * CompressionContexts are not expected to be shared among threads. Multithreaded use may - * produce unexpected results. + * CompressionContexts are not expected to be shared among threads. Multithreaded use may produce + * unexpected results. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) public class CompressionContext { private static final Logger LOG = LoggerFactory.getLogger(CompressionContext.class); @@ -59,12 +58,16 @@ public class CompressionContext { "hbase.regionserver.wal.value.compression.type"; public enum DictionaryIndex { - REGION, TABLE, FAMILY, QUALIFIER, ROW + REGION, + TABLE, + FAMILY, + QUALIFIER, + ROW } /** - * Encapsulates the compression algorithm and its streams that we will use for value - * compression in this WAL. + * Encapsulates the compression algorithm and its streams that we will use for value compression + * in this WAL. */ static class ValueCompressor { @@ -86,16 +89,14 @@ public class CompressionContext { return algorithm; } - public byte[] compress(byte[] valueArray, int valueOffset, int valueLength) - throws IOException { + public byte[] compress(byte[] valueArray, int valueOffset, int valueLength) throws IOException { if (compressedOut == null) { // Create the output streams here the first time around. lowerOut = new ByteArrayOutputStream(); if (compressor == null) { compressor = algorithm.getCompressor(); } - compressedOut = algorithm.createCompressionStream(lowerOut, compressor, - IO_BUFFER_SIZE); + compressedOut = algorithm.createCompressionStream(lowerOut, compressor, IO_BUFFER_SIZE); } else { lowerOut.reset(); } @@ -105,7 +106,7 @@ public class CompressionContext { } public int decompress(InputStream in, int inLength, byte[] outArray, int outOffset, - int outLength) throws IOException { + int outLength) throws IOException { // Our input is a sequence of bounded byte ranges (call them segments), with // BoundedDelegatingInputStream providing a way to switch in a new segment when the @@ -117,8 +118,7 @@ public class CompressionContext { if (decompressor == null) { decompressor = algorithm.getDecompressor(); } - compressedIn = algorithm.createDecompressionStream(lowerIn, decompressor, - IO_BUFFER_SIZE); + compressedIn = algorithm.createDecompressionStream(lowerIn, decompressor, IO_BUFFER_SIZE); } else { lowerIn.setDelegate(in, inLength); } @@ -173,23 +173,21 @@ public class CompressionContext { } private final Map dictionaries = - new EnumMap<>(DictionaryIndex.class); + new EnumMap<>(DictionaryIndex.class); // Context used for compressing tags TagCompressionContext tagCompressionContext = null; ValueCompressor valueCompressor = null; - public CompressionContext(Class dictType, - boolean recoveredEdits, boolean hasTagCompression, boolean hasValueCompression, - Compression.Algorithm valueCompressionType) - throws SecurityException, NoSuchMethodException, InstantiationException, - IllegalAccessException, InvocationTargetException, IOException { - Constructor dictConstructor = - dictType.getConstructor(); + public CompressionContext(Class dictType, boolean recoveredEdits, + boolean hasTagCompression, boolean hasValueCompression, + Compression.Algorithm valueCompressionType) throws SecurityException, NoSuchMethodException, + InstantiationException, IllegalAccessException, InvocationTargetException, IOException { + Constructor dictConstructor = dictType.getConstructor(); for (DictionaryIndex dictionaryIndex : DictionaryIndex.values()) { Dictionary newDictionary = dictConstructor.newInstance(); dictionaries.put(dictionaryIndex, newDictionary); } - if(recoveredEdits) { + if (recoveredEdits) { getDictionary(DictionaryIndex.REGION).init(1); getDictionary(DictionaryIndex.TABLE).init(1); } else { @@ -210,9 +208,8 @@ public class CompressionContext { } public CompressionContext(Class dictType, boolean recoveredEdits, - boolean hasTagCompression) - throws SecurityException, NoSuchMethodException, InstantiationException, - IllegalAccessException, InvocationTargetException, IOException { + boolean hasTagCompression) throws SecurityException, NoSuchMethodException, + InstantiationException, IllegalAccessException, InvocationTargetException, IOException { this(dictType, recoveredEdits, hasTagCompression, false, null); } @@ -233,7 +230,7 @@ public class CompressionContext { } void clear() { - for(Dictionary dictionary : dictionaries.values()){ + for (Dictionary dictionary : dictionaries.values()) { dictionary.clear(); } if (tagCompressionContext != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java index 13f5d6ef35b..d283a19e45f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -13,15 +13,13 @@ * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and - * limitations under the License + * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -29,17 +27,17 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.util.Dictionary; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.WritableUtils; - -import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; - import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALProvider; +import org.apache.hadoop.io.WritableUtils; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * A set of static functions for running our custom WAL compression/decompression. - * Also contains a command line tool to compress and uncompress WALs. + * A set of static functions for running our custom WAL compression/decompression. Also contains a + * command line tool to compress and uncompress WALs. */ @InterfaceAudience.Private public class Compressor { @@ -65,8 +63,7 @@ public class Compressor { return; } - private static void transformFile(Path input, Path output) - throws IOException { + private static void transformFile(Path input, Path output) throws IOException { Configuration conf = HBaseConfiguration.create(); FileSystem inFS = input.getFileSystem(conf); @@ -80,12 +77,13 @@ public class Compressor { System.err.println("Cannot proceed, invalid reader type: " + in.getClass().getName()); return; } - boolean compress = ((ReaderBase)in).hasCompression(); + boolean compress = ((ReaderBase) in).hasCompression(); conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, !compress); out = WALFactory.createWALWriter(outFS, output, conf); WAL.Entry e = null; - while ((e = in.next()) != null) out.append(e); + while ((e = in.next()) != null) + out.append(e); } finally { in.close(); if (out != null) { @@ -97,14 +95,12 @@ public class Compressor { /** * Reads the next compressed entry and returns it as a byte array - * - * @param in the DataInput to read from + * @param in the DataInput to read from * @param dict the dictionary we use for our read. * @return the uncompressed array. */ @Deprecated - static byte[] readCompressed(DataInput in, Dictionary dict) - throws IOException { + static byte[] readCompressed(DataInput in, Dictionary dict) throws IOException { byte status = in.readByte(); if (status == Dictionary.NOT_IN_DICTIONARY) { @@ -121,27 +117,23 @@ public class Compressor { short dictIdx = toShort(status, in.readByte()); byte[] entry = dict.getEntry(dictIdx); if (entry == null) { - throw new IOException("Missing dictionary entry for index " - + dictIdx); + throw new IOException("Missing dictionary entry for index " + dictIdx); } return entry; } } /** - * Reads a compressed entry into an array. - * The output into the array ends up length-prefixed. - * - * @param to the array to write into + * Reads a compressed entry into an array. The output into the array ends up length-prefixed. + * @param to the array to write into * @param offset array offset to start writing to - * @param in the DataInput to read from - * @param dict the dictionary to use for compression - * + * @param in the DataInput to read from + * @param dict the dictionary to use for compression * @return the length of the uncompressed data */ @Deprecated - static int uncompressIntoArray(byte[] to, int offset, DataInput in, - Dictionary dict) throws IOException { + static int uncompressIntoArray(byte[] to, int offset, DataInput in, Dictionary dict) + throws IOException { byte status = in.readByte(); if (status == Dictionary.NOT_IN_DICTIONARY) { @@ -162,8 +154,7 @@ public class Compressor { throw new IOException("Unable to uncompress the log entry", ex); } if (entry == null) { - throw new IOException("Missing dictionary entry for index " - + dictIdx); + throw new IOException("Missing dictionary entry for index " + dictIdx); } // now we write the uncompressed value. Bytes.putBytes(to, offset, entry, 0, entry.length); @@ -173,15 +164,13 @@ public class Compressor { /** * Compresses and writes an array to a DataOutput - * * @param data the array to write. - * @param out the DataOutput to write into + * @param out the DataOutput to write into * @param dict the dictionary to use for compression */ @Deprecated - static void writeCompressed(byte[] data, int offset, int length, - DataOutput out, Dictionary dict) - throws IOException { + static void writeCompressed(byte[] data, int offset, int length, DataOutput out, Dictionary dict) + throws IOException { short dictIdx = Dictionary.NOT_IN_DICTIONARY; if (dict != null) { dictIdx = dict.findEntry(data, offset, length); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DamagedWALException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DamagedWALException.java index c38515e0817..5825ba3217f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DamagedWALException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DamagedWALException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +21,8 @@ import org.apache.hadoop.hbase.HBaseIOException; import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when a failed append or sync on a WAL. - * Thrown when WAL can no longer be used. Roll the WAL. + * Thrown when a failed append or sync on a WAL. Thrown when WAL can no longer be used. Roll the + * WAL. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 8d61cf940f8..e9e8192d93d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,6 @@ import org.apache.hadoop.hbase.wal.WALProvider.Writer; import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -108,8 +107,10 @@ public class FSHLog extends AbstractFSWAL { // We use ring buffer sequence as txid of FSWALEntry and SyncFuture. private static final Logger LOG = LoggerFactory.getLogger(FSHLog.class); - private static final String TOLERABLE_LOW_REPLICATION = "hbase.regionserver.hlog.tolerable.lowreplication"; - private static final String LOW_REPLICATION_ROLL_LIMIT = "hbase.regionserver.hlog.lowreplication.rolllimit"; + private static final String TOLERABLE_LOW_REPLICATION = + "hbase.regionserver.hlog.tolerable.lowreplication"; + private static final String LOW_REPLICATION_ROLL_LIMIT = + "hbase.regionserver.hlog.lowreplication.rolllimit"; private static final int DEFAULT_LOW_REPLICATION_ROLL_LIMIT = 5; private static final String ROLL_ERRORS_TOLERATED = "hbase.regionserver.logroll.errors.tolerated"; private static final int DEFAULT_ROLL_ERRORS_TOLERATED = 2; @@ -118,7 +119,8 @@ public class FSHLog extends AbstractFSWAL { private static final String MAX_BATCH_COUNT = "hbase.regionserver.wal.sync.batch.count"; private static final int DEFAULT_MAX_BATCH_COUNT = 200; - private static final String FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS = "hbase.wal.fshlog.wait.on.shutdown.seconds"; + private static final String FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS = + "hbase.wal.fshlog.wait.on.shutdown.seconds"; private static final int DEFAULT_FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS = 5; /** @@ -168,7 +170,7 @@ public class FSHLog extends AbstractFSWAL { private final int waitOnShutdownInSeconds; private final ExecutorService closeExecutor = Executors.newCachedThreadPool( - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Close-WAL-Writer-%d").build()); + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Close-WAL-Writer-%d").build()); /** * Exception handler to pass the disruptor ringbuffer. Same as native implementation only it logs @@ -197,25 +199,25 @@ public class FSHLog extends AbstractFSWAL { /** * Constructor. - * @param fs filesystem handle - * @param root path for stored and archived wals + * @param fs filesystem handle + * @param root path for stored and archived wals * @param logDir dir where wals are stored - * @param conf configuration to use + * @param conf configuration to use */ public FSHLog(final FileSystem fs, final Path root, final String logDir, final Configuration conf) - throws IOException { + throws IOException { this(fs, root, logDir, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null); } public FSHLog(final FileSystem fs, Abortable abortable, final Path root, final String logDir, - final Configuration conf) throws IOException { + final Configuration conf) throws IOException { this(fs, abortable, root, logDir, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, - null); + null); } public FSHLog(final FileSystem fs, final Path rootDir, final String logDir, - final String archiveDir, final Configuration conf, final List listeners, - final boolean failIfWALExists, final String prefix, final String suffix) throws IOException { + final String archiveDir, final Configuration conf, final List listeners, + final boolean failIfWALExists, final String prefix, final String suffix) throws IOException { this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); } @@ -223,32 +225,33 @@ public class FSHLog extends AbstractFSWAL { * Create an edit log at the given dir location. You should never have to load an * existing log. If there is a log at startup, it should have already been processed and deleted * by the time the WAL object is started up. - * @param fs filesystem handle - * @param rootDir path to where logs and oldlogs - * @param logDir dir where wals are stored - * @param archiveDir dir where wals are archived - * @param conf configuration to use - * @param listeners Listeners on WAL events. Listeners passed here will be registered before we do - * anything else; e.g. the Constructor {@link #rollWriter()}. + * @param fs filesystem handle + * @param rootDir path to where logs and oldlogs + * @param logDir dir where wals are stored + * @param archiveDir dir where wals are archived + * @param conf configuration to use + * @param listeners Listeners on WAL events. Listeners passed here will be registered before + * we do anything else; e.g. the Constructor {@link #rollWriter()}. * @param failIfWALExists If true IOException will be thrown if files related to this wal already - * exist. - * @param prefix should always be hostname and port in distributed env and it will be URL encoded - * before being used. If prefix is null, "wal" will be used - * @param suffix will be url encoded. null is treated as empty. non-empty must start with - * {@link org.apache.hadoop.hbase.wal.AbstractFSWALProvider#WAL_FILE_NAME_DELIMITER} + * exist. + * @param prefix should always be hostname and port in distributed env and it will be URL + * encoded before being used. If prefix is null, "wal" will be used + * @param suffix will be url encoded. null is treated as empty. non-empty must start with + * {@link org.apache.hadoop.hbase.wal.AbstractFSWALProvider#WAL_FILE_NAME_DELIMITER} */ public FSHLog(final FileSystem fs, final Abortable abortable, final Path rootDir, - final String logDir, final String archiveDir, final Configuration conf, - final List listeners, final boolean failIfWALExists, final String prefix, - final String suffix) throws IOException { + final String logDir, final String archiveDir, final Configuration conf, + final List listeners, final boolean failIfWALExists, final String prefix, + final String suffix) throws IOException { super(fs, abortable, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, - suffix); - this.minTolerableReplication = conf.getInt(TOLERABLE_LOW_REPLICATION, - CommonFSUtils.getDefaultReplication(fs, this.walDir)); - this.lowReplicationRollLimit = conf.getInt(LOW_REPLICATION_ROLL_LIMIT, DEFAULT_LOW_REPLICATION_ROLL_LIMIT); + suffix); + this.minTolerableReplication = + conf.getInt(TOLERABLE_LOW_REPLICATION, CommonFSUtils.getDefaultReplication(fs, this.walDir)); + this.lowReplicationRollLimit = + conf.getInt(LOW_REPLICATION_ROLL_LIMIT, DEFAULT_LOW_REPLICATION_ROLL_LIMIT); this.closeErrorsTolerated = conf.getInt(ROLL_ERRORS_TOLERATED, DEFAULT_ROLL_ERRORS_TOLERATED); - this.waitOnShutdownInSeconds = conf.getInt(FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS, - DEFAULT_FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS); + this.waitOnShutdownInSeconds = + conf.getInt(FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS, DEFAULT_FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS); // This is the 'writer' -- a single threaded executor. This single thread 'consumes' what is // put on the ring buffer. String hostingThreadName = Thread.currentThread().getName(); @@ -263,7 +266,7 @@ public class FSHLog extends AbstractFSWAL { this.disruptor.getRingBuffer().next(); int syncerCount = conf.getInt(SYNCER_COUNT, DEFAULT_SYNCER_COUNT); int maxBatchCount = conf.getInt(MAX_BATCH_COUNT, - conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, DEFAULT_MAX_BATCH_COUNT)); + conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, DEFAULT_MAX_BATCH_COUNT)); this.ringBufferEventHandler = new RingBufferEventHandler(syncerCount, maxBatchCount); this.disruptor.setDefaultExceptionHandler(new RingBufferExceptionHandler()); this.disruptor.handleEventsWith(new RingBufferEventHandler[] { this.ringBufferEventHandler }); @@ -273,9 +276,9 @@ public class FSHLog extends AbstractFSWAL { /** * Currently, we need to expose the writer's OutputStream to tests so that they can manipulate the - * default behavior (such as setting the maxRecoveryErrorCount value). This is - * done using reflection on the underlying HDFS OutputStream. NOTE: This could be removed once Hadoop1 support is - * removed. + * default behavior (such as setting the maxRecoveryErrorCount value). This is done using + * reflection on the underlying HDFS OutputStream. NOTE: This could be removed once Hadoop1 + * support is removed. * @return null if underlying stream is not ready. */ OutputStream getOutputStream() { @@ -444,11 +447,11 @@ public class FSHLog extends AbstractFSWAL { boolean hasUnflushedEntries = isUnflushedEntries(); if (syncCloseCall && (hasUnflushedEntries || (errors > this.closeErrorsTolerated))) { LOG.error("Close of WAL " + path + " failed. Cause=\"" + ioe.getMessage() + "\", errors=" - + errors + ", hasUnflushedEntries=" + hasUnflushedEntries); + + errors + ", hasUnflushedEntries=" + hasUnflushedEntries); throw ioe; } LOG.warn("Riding over failed WAL close of " + path - + "; THIS FILE WAS NOT CLOSED BUT ALL EDITS SYNCED SO SHOULD BE OK", ioe); + + "; THIS FILE WAS NOT CLOSED BUT ALL EDITS SYNCED SO SHOULD BE OK", ioe); } finally { inflightWALClosures.remove(path.getName()); } @@ -465,7 +468,7 @@ public class FSHLog extends AbstractFSWAL { this.disruptor.shutdown(timeoutms, TimeUnit.MILLISECONDS); } catch (TimeoutException e) { LOG.warn("Timed out bringing down disruptor after " + timeoutms + "ms; forcing halt " - + "(It is a problem if this is NOT an ABORT! -- DATALOSS!!!!)"); + + "(It is a problem if this is NOT an ABORT! -- DATALOSS!!!!)"); this.disruptor.halt(); this.disruptor.shutdown(); } @@ -481,10 +484,11 @@ public class FSHLog extends AbstractFSWAL { closeExecutor.shutdown(); try { if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, TimeUnit.SECONDS)) { - LOG.error("We have waited {} seconds but the close of writer(s) doesn't complete." + LOG.error( + "We have waited {} seconds but the close of writer(s) doesn't complete." + "Please check the status of underlying filesystem" - + " or increase the wait time by the config \"{}\"", this.waitOnShutdownInSeconds, - FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS); + + " or increase the wait time by the config \"{}\"", + this.waitOnShutdownInSeconds, FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS); } } catch (InterruptedException e) { LOG.error("The wait for termination of FSHLog writer(s) is interrupted"); @@ -552,7 +556,7 @@ public class FSHLog extends AbstractFSWAL { * @return Returns 1. */ private int releaseSyncFuture(final SyncFuture syncFuture, final long currentSequence, - final Throwable t) { + final Throwable t) { if (!syncFuture.done(currentSequence, t)) { throw new IllegalStateException(); } @@ -603,8 +607,7 @@ public class FSHLog extends AbstractFSWAL { boolean areSyncFuturesReleased() { // check whether there is no sync futures offered, and no in-flight sync futures that is being // processed. - return syncFutures.size() <= 0 - && takeSyncFuture == null; + return syncFutures.size() <= 0 && takeSyncFuture == null; } @Override @@ -614,8 +617,8 @@ public class FSHLog extends AbstractFSWAL { int syncCount = 0; try { - // Make a local copy of takeSyncFuture after we get it. We've been running into NPEs - // 2020-03-22 16:54:32,180 WARN [sync.1] wal.FSHLog$SyncRunner(589): UNEXPECTED + // Make a local copy of takeSyncFuture after we get it. We've been running into NPEs + // 2020-03-22 16:54:32,180 WARN [sync.1] wal.FSHLog$SyncRunner(589): UNEXPECTED // java.lang.NullPointerException // at org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:582) // at java.lang.Thread.run(Thread.java:748) @@ -630,7 +633,7 @@ public class FSHLog extends AbstractFSWAL { long syncFutureSequence = sf.getTxid(); if (syncFutureSequence > currentSequence) { throw new IllegalStateException("currentSequence=" + currentSequence - + ", syncFutureSequence=" + syncFutureSequence); + + ", syncFutureSequence=" + syncFutureSequence); } // See if we can process any syncfutures BEFORE we go sync. long currentHighestSyncedSequence = highestSyncedTxid.get(); @@ -694,13 +697,13 @@ public class FSHLog extends AbstractFSWAL { } try { if (doCheckLogLowReplication()) { - LOG.warn("Requesting log roll because of low replication, current pipeline: " + - Arrays.toString(getPipeline())); + LOG.warn("Requesting log roll because of low replication, current pipeline: " + + Arrays.toString(getPipeline())); requestLogRoll(LOW_REPLICATION); } else if (writer != null && writer.getLength() > logrollsize) { if (LOG.isDebugEnabled()) { - LOG.debug("Requesting log roll because of file size threshold; length=" + - writer.getLength() + ", logrollsize=" + logrollsize); + LOG.debug("Requesting log roll because of file size threshold; length=" + + writer.getLength() + ", logrollsize=" + logrollsize); } requestLogRoll(SIZE); } else if (doCheckSlowSync()) { @@ -726,9 +729,9 @@ public class FSHLog extends AbstractFSWAL { if (this.lowReplicationRollEnabled) { if (this.consecutiveLogRolls.get() < this.lowReplicationRollLimit) { LOG.warn("HDFS pipeline error detected. " + "Found " + numCurrentReplicas - + " replicas but expecting no less than " + this.minTolerableReplication - + " replicas. " + " Requesting close of WAL. current pipeline: " - + Arrays.toString(getPipeline())); + + " replicas but expecting no less than " + this.minTolerableReplication + + " replicas. " + " Requesting close of WAL. current pipeline: " + + Arrays.toString(getPipeline())); logRollNeeded = true; // If rollWriter is requested, increase consecutiveLogRolls. Once it // is larger than lowReplicationRollLimit, disable the @@ -736,7 +739,7 @@ public class FSHLog extends AbstractFSWAL { this.consecutiveLogRolls.getAndIncrement(); } else { LOG.warn("Too many consecutive RollWriter requests, it's a sign of " - + "the total number of live datanodes is lower than the tolerable replicas."); + + "the total number of live datanodes is lower than the tolerable replicas."); this.consecutiveLogRolls.set(0); this.lowReplicationRollEnabled = false; } @@ -826,9 +829,9 @@ public class FSHLog extends AbstractFSWAL { return lowReplicationRollEnabled; } - public static final long FIXED_OVERHEAD = ClassSize - .align(ClassSize.OBJECT + (5 * ClassSize.REFERENCE) + (2 * ClassSize.ATOMIC_INTEGER) - + (3 * Bytes.SIZEOF_INT) + (4 * Bytes.SIZEOF_LONG)); + public static final long FIXED_OVERHEAD = + ClassSize.align(ClassSize.OBJECT + (5 * ClassSize.REFERENCE) + (2 * ClassSize.ATOMIC_INTEGER) + + (3 * Bytes.SIZEOF_INT) + (4 * Bytes.SIZEOF_LONG)); /** * This class is used coordinating two threads holding one thread at a 'safe point' while the @@ -846,13 +849,13 @@ public class FSHLog extends AbstractFSWAL { * To start up the drama, Thread A creates an instance of this class each time it would do this * zigzag dance and passes it to Thread B (these classes use Latches so it is one shot only). * Thread B notices the new instance (via reading a volatile reference or how ever) and it starts - * to work toward the 'safe point'. Thread A calls {@link #waitSafePoint(SyncFuture)} when it cannot proceed - * until the Thread B 'safe point' is attained. Thread A will be held inside in - * {@link #waitSafePoint(SyncFuture)} until Thread B reaches the 'safe point'. Once there, Thread B frees - * Thread A by calling {@link #safePointAttained()}. Thread A now knows Thread B is at the 'safe - * point' and that it is holding there (When Thread B calls {@link #safePointAttained()} it blocks - * here until Thread A calls {@link #releaseSafePoint()}). Thread A proceeds to do what it needs - * to do while Thread B is paused. When finished, it lets Thread B lose by calling + * to work toward the 'safe point'. Thread A calls {@link #waitSafePoint(SyncFuture)} when it + * cannot proceed until the Thread B 'safe point' is attained. Thread A will be held inside in + * {@link #waitSafePoint(SyncFuture)} until Thread B reaches the 'safe point'. Once there, Thread + * B frees Thread A by calling {@link #safePointAttained()}. Thread A now knows Thread B is at the + * 'safe point' and that it is holding there (When Thread B calls {@link #safePointAttained()} it + * blocks here until Thread A calls {@link #releaseSafePoint()}). Thread A proceeds to do what it + * needs to do while Thread B is paused. When finished, it lets Thread B lose by calling * {@link #releaseSafePoint()} and away go both Threads again. */ static class SafePointZigZagLatch { @@ -876,11 +879,11 @@ public class FSHLog extends AbstractFSWAL { * For Thread A to call when it is ready to wait on the 'safe point' to be attained. Thread A * will be held in here until Thread B calls {@link #safePointAttained()} * @param syncFuture We need this as barometer on outstanding syncs. If it comes home with an - * exception, then something is up w/ our syncing. + * exception, then something is up w/ our syncing. * @return The passed syncFuture */ - SyncFuture waitSafePoint(SyncFuture syncFuture) throws InterruptedException, - FailedSyncBeforeLogCloseException { + SyncFuture waitSafePoint(SyncFuture syncFuture) + throws InterruptedException, FailedSyncBeforeLogCloseException { while (!this.safePointAttainedLatch.await(1, TimeUnit.MILLISECONDS)) { checkIfSyncFailed(syncFuture); } @@ -919,7 +922,7 @@ public class FSHLog extends AbstractFSWAL { */ boolean isCocked() { return this.safePointAttainedLatch.getCount() > 0 - && this.safePointReleasedLatch.getCount() > 0; + && this.safePointReleasedLatch.getCount() > 0; } } @@ -1011,8 +1014,8 @@ public class FSHLog extends AbstractFSWAL { private boolean isOutstandingSyncsFromRunners() { // Look at SyncFutures in the SyncRunners - for (SyncRunner syncRunner: syncRunners) { - if(syncRunner.isAlive() && !syncRunner.areSyncFuturesReleased()) { + for (SyncRunner syncRunner : syncRunners) { + if (syncRunner.isAlive() && !syncRunner.areSyncFuturesReleased()) { return true; } } @@ -1022,7 +1025,7 @@ public class FSHLog extends AbstractFSWAL { @Override // We can set endOfBatch in the below method if at end of our this.syncFutures array public void onEvent(final RingBufferTruck truck, final long sequence, boolean endOfBatch) - throws Exception { + throws Exception { // Appends and syncs are coming in order off the ringbuffer. We depend on this fact. We'll // add appends to dfsclient as they come in. Batching appends doesn't give any significant // benefit on measurement. Handler sync calls we will batch up. If we get an exception @@ -1050,11 +1053,14 @@ public class FSHLog extends AbstractFSWAL { // Failed append. Record the exception. this.exception = e; // invoking cleanupOutstandingSyncsOnException when append failed with exception, - // it will cleanup existing sync requests recorded in syncFutures but not offered to SyncRunner yet, - // so there won't be any sync future left over if no further truck published to disruptor. + // it will cleanup existing sync requests recorded in syncFutures but not offered to + // SyncRunner yet, + // so there won't be any sync future left over if no further truck published to + // disruptor. cleanupOutstandingSyncsOnException(sequence, - this.exception instanceof DamagedWALException ? this.exception - : new DamagedWALException("On sync", this.exception)); + this.exception instanceof DamagedWALException + ? this.exception + : new DamagedWALException("On sync", this.exception)); // Return to keep processing events coming off the ringbuffer return; } finally { @@ -1077,15 +1083,15 @@ public class FSHLog extends AbstractFSWAL { return; } // syncRunnerIndex is bound to the range [0, Integer.MAX_INT - 1] as follows: - // * The maximum value possible for syncRunners.length is Integer.MAX_INT - // * syncRunnerIndex starts at 0 and is incremented only here - // * after the increment, the value is bounded by the '%' operator to - // [0, syncRunners.length), presuming the value was positive prior to - // the '%' operator. - // * after being bound to [0, Integer.MAX_INT - 1], the new value is stored in - // syncRunnerIndex ensuring that it can't grow without bound and overflow. - // * note that the value after the increment must be positive, because the most it - // could have been prior was Integer.MAX_INT - 1 and we only increment by 1. + // * The maximum value possible for syncRunners.length is Integer.MAX_INT + // * syncRunnerIndex starts at 0 and is incremented only here + // * after the increment, the value is bounded by the '%' operator to + // [0, syncRunners.length), presuming the value was positive prior to + // the '%' operator. + // * after being bound to [0, Integer.MAX_INT - 1], the new value is stored in + // syncRunnerIndex ensuring that it can't grow without bound and overflow. + // * note that the value after the increment must be positive, because the most it + // could have been prior was Integer.MAX_INT - 1 and we only increment by 1. this.syncRunnerIndex = (this.syncRunnerIndex + 1) % this.syncRunners.length; try { // Below expects that the offer 'transfers' responsibility for the outstanding syncs to @@ -1100,8 +1106,10 @@ public class FSHLog extends AbstractFSWAL { } // We may have picked up an exception above trying to offer sync if (this.exception != null) { - cleanupOutstandingSyncsOnException(sequence, this.exception instanceof DamagedWALException - ? this.exception : new DamagedWALException("On sync", this.exception)); + cleanupOutstandingSyncsOnException(sequence, + this.exception instanceof DamagedWALException + ? this.exception + : new DamagedWALException("On sync", this.exception)); } attainSafePoint(sequence); // It is critical that we offer the futures back to the cache for reuse here after the @@ -1132,13 +1140,15 @@ public class FSHLog extends AbstractFSWAL { // Wait on outstanding syncers; wait for them to finish syncing (unless we've been // shutdown or unless our latch has been thrown because we have been aborted or unless // this WAL is broken and we can't get a sync/append to complete). - while ((!this.shutdown && this.zigzagLatch.isCocked() + while ( + (!this.shutdown && this.zigzagLatch.isCocked() && highestSyncedTxid.get() < currentSequence && // We could be in here and all syncs are failing or failed. Check for this. Otherwise // we'll just be stuck here for ever. In other words, ensure there syncs running. isOutstandingSyncs()) // Wait for all SyncRunners to finish their work so that we can replace the writer - || isOutstandingSyncsFromRunners()) { + || isOutstandingSyncsFromRunners() + ) { synchronized (this.safePointWaiter) { this.safePointWaiter.wait(0, 1); } @@ -1161,8 +1171,8 @@ public class FSHLog extends AbstractFSWAL { try { FSHLog.this.appendEntry(writer, entry); } catch (Exception e) { - String msg = "Append sequenceId=" + entry.getKey().getSequenceId() - + ", requesting roll of WAL"; + String msg = + "Append sequenceId=" + entry.getKey().getSequenceId() + ", requesting roll of WAL"; LOG.warn(msg, e); requestLogRoll(ERROR); throw new DamagedWALException(msg, e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java index ca51ec0c568..24043ab504d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java @@ -37,12 +37,12 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; /** - * A WAL Entry for {@link AbstractFSWAL} implementation. Immutable. - * A subclass of {@link Entry} that carries extra info across the ring buffer such as - * region sequenceid (we want to use this later, just before we write the WAL to ensure region - * edits maintain order). The extra info added here is not 'serialized' as part of the WALEdit - * hence marked 'transient' to underline this fact. It also adds mechanism so we can wait on - * the assign of the region sequence id. See #stampRegionSequenceId(). + * A WAL Entry for {@link AbstractFSWAL} implementation. Immutable. A subclass of {@link Entry} that + * carries extra info across the ring buffer such as region sequenceid (we want to use this later, + * just before we write the WAL to ensure region edits maintain order). The extra info added here is + * not 'serialized' as part of the WALEdit hence marked 'transient' to underline this fact. It also + * adds mechanism so we can wait on the assign of the region sequence id. See + * #stampRegionSequenceId(). */ @InterfaceAudience.Private class FSWALEntry extends Entry { @@ -51,9 +51,9 @@ class FSWALEntry extends Entry { private final transient long txid; /** - * If false, means this is a meta edit written by the hbase system itself. It was not in - * memstore. HBase uses these edit types to note in the log operational transitions such - * as compactions, flushes, or region open/closes. + * If false, means this is a meta edit written by the hbase system itself. It was not in memstore. + * HBase uses these edit types to note in the log operational transitions such as compactions, + * flushes, or region open/closes. */ private final transient boolean inMemstore; @@ -67,8 +67,8 @@ class FSWALEntry extends Entry { private final transient ServerCall rpcCall; /** - * @param inMemstore If true, then this is a data edit, one that came from client. If false, it - * is a meta edit made by the hbase system itself and is for the WAL only. + * @param inMemstore If true, then this is a data edit, one that came from client. If false, it is + * a meta edit made by the hbase system itself and is for the WAL only. */ FSWALEntry(final long txid, final WALKeyImpl key, final WALEdit edit, final RegionInfo regionInfo, final boolean inMemstore, ServerCall rpcCall) { @@ -95,7 +95,7 @@ class FSWALEntry extends Entry { return Collections.emptySet(); } else { Set set = new TreeSet<>(Bytes.BYTES_COMPARATOR); - for (Cell cell: cells) { + for (Cell cell : cells) { if (!WALEdit.isMetaEditFamily(cell)) { set.add(CellUtil.cloneFamily(cell)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java index 4d0619c79bb..89481161f4a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,24 +15,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Class used to push numbers about the WAL into the metrics subsystem. This will take a - * single function call and turn it into multiple manipulations of the hadoop metrics system. + * Class used to push numbers about the WAL into the metrics subsystem. This will take a single + * function call and turn it into multiple manipulations of the hadoop metrics system. */ @InterfaceAudience.Private public class MetricsWAL implements WALActionsListener { @@ -51,12 +48,12 @@ public class MetricsWAL implements WALActionsListener { @Override public void postSync(final long timeInNanos, final int handlerSyncs) { - source.incrementSyncTime(timeInNanos/1000000L); + source.incrementSyncTime(timeInNanos / 1000000L); } @Override public void postAppend(final long size, final long time, final WALKey logkey, - final WALEdit logEdit) throws IOException { + final WALEdit logEdit) throws IOException { TableName tableName = logkey.getTableName(); source.incrementAppendCount(tableName); source.incrementAppendTime(time); @@ -66,9 +63,7 @@ public class MetricsWAL implements WALActionsListener { if (time > 1000) { source.incrementSlowAppendCount(); LOG.warn(String.format("%s took %d ms appending an edit to wal; len~=%s", - Thread.currentThread().getName(), - time, - StringUtils.humanReadableInt(size))); + Thread.currentThread().getName(), time, StringUtils.humanReadableInt(size))); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java index 8aba943d0fb..42dcb51e1e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.EOFException; @@ -25,23 +23,16 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.List; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader.Builder; -import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey; -import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALTrailer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,6 +40,12 @@ import org.apache.hbase.thirdparty.com.google.common.io.ByteStreams; import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader.Builder; +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey; +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALTrailer; + /** * A Protobuf based WAL has the following structure: *

      @@ -56,12 +53,12 @@ import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferExce * <TrailerSize> <PB_WAL_COMPLETE_MAGIC> *

      * The Reader reads meta information (WAL Compression state, WALTrailer, etc) in - * ProtobufLogReader#initReader(FSDataInputStream). A WALTrailer is an extensible structure - * which is appended at the end of the WAL. This is empty for now; it can contain some meta - * information such as Region level stats, etc in future. + * ProtobufLogReader#initReader(FSDataInputStream). A WALTrailer is an extensible structure which is + * appended at the end of the WAL. This is empty for now; it can contain some meta information such + * as Region level stats, etc in future. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, - HBaseInterfaceAudience.CONFIG}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, + HBaseInterfaceAudience.CONFIG }) public class ProtobufLogReader extends ReaderBase { private static final Logger LOG = LoggerFactory.getLogger(ProtobufLogReader.class); // public for WALFactory until we move everything to o.a.h.h.wal @@ -97,7 +94,7 @@ public class ProtobufLogReader extends ReaderBase { writerClsNames.add(ProtobufLogWriter.class.getSimpleName()); writerClsNames.add(AsyncProtobufLogWriter.class.getSimpleName()); } - + // cell codec classname private String codecClsName = null; @@ -105,12 +102,12 @@ public class ProtobufLogReader extends ReaderBase { public long trailerSize() { if (trailerPresent) { // sizeof PB_WAL_COMPLETE_MAGIC + sizof trailerSize + trailer - final long calculatedSize = (long) PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT - + trailer.getSerializedSize(); + final long calculatedSize = + (long) PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT + trailer.getSerializedSize(); final long expectedSize = fileLength - walEditsStopOffset; if (expectedSize != calculatedSize) { LOG.warn("After parsing the trailer, we expect the total footer to be {} bytes, but we " - + "calculate it as being {}", expectedSize, calculatedSize); + + "calculate it as being {}", expectedSize, calculatedSize); } return expectedSize; } else { @@ -119,23 +116,25 @@ public class ProtobufLogReader extends ReaderBase { } enum WALHdrResult { - EOF, // stream is at EOF when method starts + EOF, // stream is at EOF when method starts SUCCESS, - UNKNOWN_WRITER_CLS // name of writer class isn't recognized + UNKNOWN_WRITER_CLS // name of writer class isn't recognized } - + // context for WALHdr carrying information such as Cell Codec classname static class WALHdrContext { WALHdrResult result; String cellCodecClsName; - + WALHdrContext(WALHdrResult result, String cellCodecClsName) { this.result = result; this.cellCodecClsName = cellCodecClsName; } + WALHdrResult getResult() { return result; } + String getCellCodecClsName() { return cellCodecClsName; } @@ -166,7 +165,7 @@ public class ProtobufLogReader extends ReaderBase { @Override public void init(FileSystem fs, Path path, Configuration conf, FSDataInputStream stream) - throws IOException { + throws IOException { this.trailerWarnSize = conf.getInt(WAL_TRAILER_WARN_SIZE, DEFAULT_WAL_TRAILER_WARN_SIZE); super.init(fs, path, conf, stream); } @@ -182,31 +181,28 @@ public class ProtobufLogReader extends ReaderBase { public List getWriterClsNames() { return writerClsNames; } - + /* * Returns the cell codec classname */ public String getCodecClsName() { - return codecClsName; + return codecClsName; } - protected WALHdrContext readHeader(Builder builder, FSDataInputStream stream) - throws IOException { - boolean res = builder.mergeDelimitedFrom(stream); - if (!res) return new WALHdrContext(WALHdrResult.EOF, null); - if (builder.hasWriterClsName() && - !getWriterClsNames().contains(builder.getWriterClsName())) { - return new WALHdrContext(WALHdrResult.UNKNOWN_WRITER_CLS, null); - } - String clsName = null; - if (builder.hasCellCodecClsName()) { - clsName = builder.getCellCodecClsName(); - } - return new WALHdrContext(WALHdrResult.SUCCESS, clsName); + protected WALHdrContext readHeader(Builder builder, FSDataInputStream stream) throws IOException { + boolean res = builder.mergeDelimitedFrom(stream); + if (!res) return new WALHdrContext(WALHdrResult.EOF, null); + if (builder.hasWriterClsName() && !getWriterClsNames().contains(builder.getWriterClsName())) { + return new WALHdrContext(WALHdrResult.UNKNOWN_WRITER_CLS, null); + } + String clsName = null; + if (builder.hasCellCodecClsName()) { + clsName = builder.getCellCodecClsName(); + } + return new WALHdrContext(WALHdrResult.SUCCESS, clsName); } - private String initInternal(FSDataInputStream stream, boolean isFirst) - throws IOException { + private String initInternal(FSDataInputStream stream, boolean isFirst) throws IOException { close(); if (!isFirst) { // Re-compute the file length. @@ -234,8 +230,7 @@ public class ProtobufLogReader extends ReaderBase { WALProtos.WALHeader header = builder.build(); this.hasCompression = header.hasHasCompression() && header.getHasCompression(); this.hasTagCompression = header.hasHasTagCompression() && header.getHasTagCompression(); - this.hasValueCompression = header.hasHasValueCompression() && - header.getHasValueCompression(); + this.hasValueCompression = header.hasHasValueCompression() && header.getHasValueCompression(); if (header.hasValueCompressionAlgorithm()) { try { this.valueCompressionType = @@ -252,13 +247,13 @@ public class ProtobufLogReader extends ReaderBase { this.seekOnFs(currentPosition); if (LOG.isTraceEnabled()) { LOG.trace("After reading the trailer: walEditsStopOffset: " + this.walEditsStopOffset - + ", fileLength: " + this.fileLength + ", " + "trailerPresent: " + - (trailerPresent ? "true, size: " + trailer.getSerializedSize() : "false") + - ", currentPosition: " + currentPosition); + + ", fileLength: " + this.fileLength + ", " + "trailerPresent: " + + (trailerPresent ? "true, size: " + trailer.getSerializedSize() : "false") + + ", currentPosition: " + currentPosition); } - + codecClsName = hdrCtxt.getCellCodecClsName(); - + return hdrCtxt.getCellCodecClsName(); } @@ -276,8 +271,7 @@ public class ProtobufLogReader extends ReaderBase { * *

      * In case the trailer size > this.trailerMaxSize, it is read after a WARN message. - * @return true if a valid trailer is present - * @throws IOException + * @return true if a valid trailer is present n */ private boolean setTrailerIfPresent() { try { @@ -316,7 +310,7 @@ public class ProtobufLogReader extends ReaderBase { } protected WALCellCodec getCodec(Configuration conf, String cellCodecClsName, - CompressionContext compressionContext) throws IOException { + CompressionContext compressionContext) throws IOException { return WALCellCodec.create(conf, cellCodecClsName, compressionContext); } @@ -324,7 +318,7 @@ public class ProtobufLogReader extends ReaderBase { protected void initAfterCompression() throws IOException { initAfterCompression(null); } - + @Override protected void initAfterCompression(String cellCodecClsName) throws IOException { WALCellCodec codec = getCodec(this.conf, cellCodecClsName, this.compressionContext); @@ -375,28 +369,25 @@ public class ProtobufLogReader extends ReaderBase { throw new EOFException(); } size = CodedInputStream.readRawVarint32(firstByte, this.inputStream); - // available may be < 0 on local fs for instance. If so, can't depend on it. + // available may be < 0 on local fs for instance. If so, can't depend on it. available = this.inputStream.available(); if (available > 0 && available < size) { - throw new EOFException( - "Available stream not enough for edit, " + "inputStream.available()= " - + this.inputStream.available() + ", " + "entry size= " + size + " at offset = " - + this.inputStream.getPos()); + throw new EOFException("Available stream not enough for edit, " + + "inputStream.available()= " + this.inputStream.available() + ", " + "entry size= " + + size + " at offset = " + this.inputStream.getPos()); } ProtobufUtil.mergeFrom(builder, ByteStreams.limit(this.inputStream, size), (int) size); } catch (InvalidProtocolBufferException ipbe) { resetPosition = true; - throw (EOFException) new EOFException( - "Invalid PB, EOF? Ignoring; originalPosition=" + originalPosition + ", currentPosition=" - + this.inputStream.getPos() + ", messageSize=" + size + ", currentAvailable=" - + available).initCause(ipbe); + throw (EOFException) new EOFException("Invalid PB, EOF? Ignoring; originalPosition=" + + originalPosition + ", currentPosition=" + this.inputStream.getPos() + ", messageSize=" + + size + ", currentAvailable=" + available).initCause(ipbe); } if (!builder.isInitialized()) { // TODO: not clear if we should try to recover from corrupt PB that looks semi-legit. - // If we can get the KV count, we could, theoretically, try to get next record. - throw new EOFException( - "Partial PB while reading WAL, " + "probably an unexpected EOF, ignoring. current offset=" - + this.inputStream.getPos()); + // If we can get the KV count, we could, theoretically, try to get next record. + throw new EOFException("Partial PB while reading WAL, " + + "probably an unexpected EOF, ignoring. current offset=" + this.inputStream.getPos()); } WALKey walKey = builder.build(); entry.getKey().readFieldsFromPb(walKey, this.byteStringUncompressor); @@ -421,12 +412,11 @@ public class ProtobufLogReader extends ReaderBase { } catch (Throwable t) { LOG.trace("Error getting pos for error message - ignoring", t); } - String message = - " while reading " + expectedCells + " WAL KVs; started reading at " + posBefore - + " and read up to " + posAfterStr; + String message = " while reading " + expectedCells + " WAL KVs; started reading at " + + posBefore + " and read up to " + posAfterStr; IOException realEofEx = extractHiddenEof(ex); - throw (EOFException) new EOFException("EOF " + message). - initCause(realEofEx != null ? realEofEx : ex); + throw (EOFException) new EOFException("EOF " + message) + .initCause(realEofEx != null ? realEofEx : ex); } if (trailerPresent && this.inputStream.getPos() > this.walEditsStopOffset) { LOG.error( @@ -437,8 +427,10 @@ public class ProtobufLogReader extends ReaderBase { } catch (EOFException eof) { // If originalPosition is < 0, it is rubbish and we cannot use it (probably local fs) if (originalPosition < 0) { - LOG.debug("Encountered a malformed edit, but can't seek back to last good position " - + "because originalPosition is negative. last offset={}", this.inputStream.getPos(), eof); + LOG.debug( + "Encountered a malformed edit, but can't seek back to last good position " + + "because originalPosition is negative. last offset={}", + this.inputStream.getPos(), eof); throw eof; } // If stuck at the same place and we got an exception, lets go back at the beginning. @@ -467,12 +459,14 @@ public class ProtobufLogReader extends ReaderBase { // for EOF, not EOFException; and scanner further hides it inside RuntimeException. IOException ioEx = null; if (ex instanceof EOFException) { - return (EOFException)ex; + return (EOFException) ex; } else if (ex instanceof IOException) { - ioEx = (IOException)ex; - } else if (ex instanceof RuntimeException - && ex.getCause() != null && ex.getCause() instanceof IOException) { - ioEx = (IOException)ex.getCause(); + ioEx = (IOException) ex; + } else if ( + ex instanceof RuntimeException && ex.getCause() != null + && ex.getCause() instanceof IOException + ) { + ioEx = (IOException) ex.getCause(); } if ((ioEx != null) && (ioEx.getMessage() != null)) { if (ioEx.getMessage().contains("EOF")) return ioEx; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java index 2df19fd3dd9..7cc16360c3a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,8 +42,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALTrailer; * Writer for protobuf-based WAL. */ @InterfaceAudience.Private -public class ProtobufLogWriter extends AbstractProtobufLogWriter - implements FSHLogProvider.Writer { +public class ProtobufLogWriter extends AbstractProtobufLogWriter implements FSHLogProvider.Writer { private static final Logger LOG = LoggerFactory.getLogger(ProtobufLogWriter.class); @@ -53,8 +52,8 @@ public class ProtobufLogWriter extends AbstractProtobufLogWriter @Override public void append(Entry entry) throws IOException { - entry.getKey().getBuilder(compressor). - setFollowingKvCount(entry.getEdit().size()).build().writeDelimitedTo(output); + entry.getKey().getBuilder(compressor).setFollowingKvCount(entry.getEdit().size()).build() + .writeDelimitedTo(output); for (Cell cell : entry.getEdit().getCells()) { // cellEncoder must assume little about the stream, since we write PB and cells in turn. cellEncoder.write(cell); @@ -104,10 +103,10 @@ public class ProtobufLogWriter extends AbstractProtobufLogWriter @Override protected void initOutput(FileSystem fs, Path path, boolean overwritable, int bufferSize, - short replication, long blockSize, StreamSlowMonitor monitor) throws IOException, - StreamLacksCapabilityException { - this.output = CommonFSUtils.createForWal(fs, path, overwritable, bufferSize, replication, - blockSize, false); + short replication, long blockSize, StreamSlowMonitor monitor) + throws IOException, StreamLacksCapabilityException { + this.output = + CommonFSUtils.createForWal(fs, path, overwritable, bufferSize, replication, blockSize, false); if (fs.getConf().getBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, true)) { if (!output.hasCapability(StreamCapabilities.HFLUSH)) { throw new StreamLacksCapabilityException(StreamCapabilities.HFLUSH); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReaderBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReaderBase.java index 90a1653a514..8e84169a7a2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReaderBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReaderBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; @@ -35,7 +33,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) public abstract class ReaderBase implements AbstractFSWALProvider.Reader { private static final Logger LOG = LoggerFactory.getLogger(ReaderBase.class); protected Configuration conf; @@ -44,7 +42,7 @@ public abstract class ReaderBase implements AbstractFSWALProvider.Reader { protected long edit = 0; protected long fileLength; /** - * Compression context to use reading. Can be null if no compression. + * Compression context to use reading. Can be null if no compression. */ protected CompressionContext compressionContext = null; protected boolean emptyCompressionContext = true; @@ -57,7 +55,7 @@ public abstract class ReaderBase implements AbstractFSWALProvider.Reader { @Override public void init(FileSystem fs, Path path, Configuration conf, FSDataInputStream stream) - throws IOException { + throws IOException { this.conf = conf; this.path = path; this.fs = fs; @@ -70,14 +68,15 @@ public abstract class ReaderBase implements AbstractFSWALProvider.Reader { try { if (compressionContext == null) { if (LOG.isDebugEnabled()) { - LOG.debug("Initializing compression context for {}: isRecoveredEdits={}" + - ", hasTagCompression={}, hasValueCompression={}, valueCompressionType={}", path, - CommonFSUtils.isRecoveredEdits(path), hasTagCompression(), hasValueCompression(), - getValueCompressionAlgorithm()); + LOG.debug( + "Initializing compression context for {}: isRecoveredEdits={}" + + ", hasTagCompression={}, hasValueCompression={}, valueCompressionType={}", + path, CommonFSUtils.isRecoveredEdits(path), hasTagCompression(), + hasValueCompression(), getValueCompressionAlgorithm()); } - compressionContext = new CompressionContext(LRUDictionary.class, - CommonFSUtils.isRecoveredEdits(path), hasTagCompression(), - hasValueCompression(), getValueCompressionAlgorithm()); + compressionContext = + new CompressionContext(LRUDictionary.class, CommonFSUtils.isRecoveredEdits(path), + hasTagCompression(), hasValueCompression(), getValueCompressionAlgorithm()); } else { compressionContext.clear(); } @@ -109,8 +108,7 @@ public abstract class ReaderBase implements AbstractFSWALProvider.Reader { // It is old ROOT table edit, ignore it LOG.info("Got an old ROOT edit, ignoring "); return next(e); - } - else throw iae; + } else throw iae; } edit++; if (compressionContext != null && emptyCompressionContext) { @@ -133,8 +131,8 @@ public abstract class ReaderBase implements AbstractFSWALProvider.Reader { } /** - * Initializes the log reader with a particular stream (may be null). - * Reader assumes ownership of the stream if not null and may use it. Called once. + * Initializes the log reader with a particular stream (may be null). Reader assumes ownership of + * the stream if not null and may use it. Called once. * @return the class name of cell Codec, null if such information is not available */ protected abstract String initReader(FSDataInputStream stream) throws IOException; @@ -149,6 +147,7 @@ public abstract class ReaderBase implements AbstractFSWALProvider.Reader { * @param cellCodecClsName class name of cell Codec */ protected abstract void initAfterCompression(String cellCodecClsName) throws IOException; + /** * @return Whether compression is enabled for this log. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java index dfef429455c..b03540dcd88 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +29,9 @@ import org.apache.yetus.audience.InterfaceAudience; final class RingBufferTruck { public enum Type { - APPEND, SYNC, EMPTY + APPEND, + SYNC, + EMPTY } private Type type = Type.EMPTY; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureAsyncProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureAsyncProtobufLogWriter.java index e2d294ac1f2..ea56e0926b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureAsyncProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureAsyncProtobufLogWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.io.crypto.Encryptor; @@ -26,6 +25,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @@ -34,21 +34,22 @@ public class SecureAsyncProtobufLogWriter extends AsyncProtobufLogWriter { private Encryptor encryptor = null; public SecureAsyncProtobufLogWriter(EventLoopGroup eventLoopGroup, - Class channelClass) { + Class channelClass) { super(eventLoopGroup, channelClass); } /* * @return class name which is recognized by hbase-1.x to avoid ProtobufLogReader throwing error: - * IOException: Got unknown writer class: SecureAsyncProtobufLogWriter + * IOException: Got unknown writer class: SecureAsyncProtobufLogWriter */ @Override protected String getWriterClassName() { return "SecureProtobufLogWriter"; } + @Override protected WALHeader buildWALHeader(Configuration conf, WALHeader.Builder builder) - throws IOException { + throws IOException { return super.buildSecureWALHeader(conf, builder); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java index e43d140826c..863739c72f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; @@ -23,20 +22,20 @@ import java.security.Key; import java.security.KeyException; import java.util.ArrayList; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.crypto.Cipher; import org.apache.hadoop.hbase.io.crypto.Decryptor; import org.apache.hadoop.hbase.io.crypto.Encryption; -import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; import org.apache.hadoop.hbase.security.EncryptionUtil; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.EncryptionTest; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class SecureProtobufLogReader extends ProtobufLogReader { @@ -59,7 +58,7 @@ public class SecureProtobufLogReader extends ProtobufLogReader { @Override protected WALHdrContext readHeader(WALHeader.Builder builder, FSDataInputStream stream) - throws IOException { + throws IOException { WALHdrContext hdrCtxt = super.readHeader(builder, stream); WALHdrResult result = hdrCtxt.getResult(); // We need to unconditionally handle the case where the WAL has a key in @@ -89,8 +88,8 @@ public class SecureProtobufLogReader extends ProtobufLogReader { } } if (key == null) { - String masterKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, - User.getCurrent().getShortName()); + String masterKeyName = + conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()); try { // Then, try the cluster master key key = EncryptionUtil.unwrapWALKey(conf, masterKeyName, keyBytes); @@ -100,8 +99,7 @@ public class SecureProtobufLogReader extends ProtobufLogReader { if (LOG.isDebugEnabled()) { LOG.debug("Unable to unwrap key with current master key '" + masterKeyName + "'"); } - String alternateKeyName = - conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY); + String alternateKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY); if (alternateKeyName != null) { try { key = EncryptionUtil.unwrapWALKey(conf, alternateKeyName, keyBytes); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java index eb8c591a15e..0928f148de0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.crypto.Encryptor; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @@ -33,7 +32,7 @@ public class SecureProtobufLogWriter extends ProtobufLogWriter { @Override protected WALHeader buildWALHeader(Configuration conf, WALHeader.Builder builder) - throws IOException { + throws IOException { return super.buildSecureWALHeader(conf, builder); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java index 6d2bd61a023..4201dd07533 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java @@ -22,14 +22,12 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.commons.io.IOUtils; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags; import org.apache.hadoop.hbase.io.ByteBufferWriterOutputStream; import org.apache.hadoop.hbase.io.crypto.Decryptor; @@ -37,6 +35,7 @@ import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.crypto.Encryptor; import org.apache.hadoop.hbase.io.util.StreamUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * A WALCellCodec that encrypts the WALedits. @@ -91,8 +90,7 @@ public class SecureWALCellCodec extends WALCellCodec { // encoder supports that just read the remainder in directly if (ivLength != this.iv.length) { - throw new IOException("Incorrect IV length: expected=" + iv.length + " have=" + - ivLength); + throw new IOException("Incorrect IV length: expected=" + iv.length + " have=" + ivLength); } IOUtils.readFully(in, this.iv); @@ -124,12 +122,12 @@ public class SecureWALCellCodec extends WALCellCodec { // Row int elemLen = StreamUtils.readRawVarint32(cin); - pos = Bytes.putShort(backingArray, pos, (short)elemLen); + pos = Bytes.putShort(backingArray, pos, (short) elemLen); IOUtils.readFully(cin, backingArray, pos, elemLen); pos += elemLen; // Family elemLen = StreamUtils.readRawVarint32(cin); - pos = Bytes.putByte(backingArray, pos, (byte)elemLen); + pos = Bytes.putByte(backingArray, pos, (byte) elemLen); IOUtils.readFully(cin, backingArray, pos, elemLen); pos += elemLen; // Qualifier diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java index 4618b872ad2..64fe638eca7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,41 +54,42 @@ class SequenceIdAccounting { /** * This lock ties all operations on {@link SequenceIdAccounting#flushingSequenceIds} and - * {@link #lowestUnflushedSequenceIds} Maps. {@link #lowestUnflushedSequenceIds} has the - * lowest outstanding sequence ids EXCEPT when flushing. When we flush, the current - * lowest set for the region/column family are moved (atomically because of this lock) to + * {@link #lowestUnflushedSequenceIds} Maps. {@link #lowestUnflushedSequenceIds} has the lowest + * outstanding sequence ids EXCEPT when flushing. When we flush, the current lowest set for the + * region/column family are moved (atomically because of this lock) to * {@link #flushingSequenceIds}. - * - *

      The two Maps are tied by this locking object EXCEPT when we go to update the lowest - * entry; see {@link #lowestUnflushedSequenceIds}. In here is a putIfAbsent call on - * {@link #lowestUnflushedSequenceIds}. In this latter case, we will add this lowest - * sequence id if we find that there is no entry for the current column family. There will be no - * entry only if we just came up OR we have moved aside current set of lowest sequence ids - * because the current set are being flushed (by putting them into {@link #flushingSequenceIds}). - * This is how we pick up the next 'lowest' sequence id per region per column family to be used - * figuring what is in the next flush. + *

      + * The two Maps are tied by this locking object EXCEPT when we go to update the lowest entry; see + * {@link #lowestUnflushedSequenceIds}. In here is a putIfAbsent call on + * {@link #lowestUnflushedSequenceIds}. In this latter case, we will add this lowest sequence id + * if we find that there is no entry for the current column family. There will be no entry only if + * we just came up OR we have moved aside current set of lowest sequence ids because the current + * set are being flushed (by putting them into {@link #flushingSequenceIds}). This is how we pick + * up the next 'lowest' sequence id per region per column family to be used figuring what is in + * the next flush. */ private final Object tieLock = new Object(); /** - * Map of encoded region names and family names to their OLDEST -- i.e. their first, - * the longest-lived, their 'earliest', the 'lowest' -- sequence id. - * - *

      When we flush, the current lowest sequence ids get cleared and added to - * {@link #flushingSequenceIds}. The next append that comes in, is then added - * here to {@link #lowestUnflushedSequenceIds} as the next lowest sequenceid. - * - *

      If flush fails, currently server is aborted so no need to restore previous sequence ids. - *

      Needs to be concurrent Maps because we use putIfAbsent updating oldest. + * Map of encoded region names and family names to their OLDEST -- i.e. their first, the + * longest-lived, their 'earliest', the 'lowest' -- sequence id. + *

      + * When we flush, the current lowest sequence ids get cleared and added to + * {@link #flushingSequenceIds}. The next append that comes in, is then added here to + * {@link #lowestUnflushedSequenceIds} as the next lowest sequenceid. + *

      + * If flush fails, currently server is aborted so no need to restore previous sequence ids. + *

      + * Needs to be concurrent Maps because we use putIfAbsent updating oldest. */ - private final ConcurrentMap> - lowestUnflushedSequenceIds = new ConcurrentHashMap<>(); + private final ConcurrentMap> lowestUnflushedSequenceIds = new ConcurrentHashMap<>(); /** * Map of encoded region names and family names to their lowest or OLDEST sequence/edit id * currently being flushed out to hfiles. Entries are moved here from - * {@link #lowestUnflushedSequenceIds} while the lock {@link #tieLock} is held - * (so movement between the Maps is atomic). + * {@link #lowestUnflushedSequenceIds} while the lock {@link #tieLock} is held (so movement + * between the Maps is atomic). */ private final Map> flushingSequenceIds = new HashMap<>(); @@ -107,8 +108,8 @@ class SequenceIdAccounting { /** * Returns the lowest unflushed sequence id for the region. - * @return Lowest outstanding unflushed sequenceid for encodedRegionName. Will - * return {@link HConstants#NO_SEQNUM} when none. + * @return Lowest outstanding unflushed sequenceid for encodedRegionName. Will return + * {@link HConstants#NO_SEQNUM} when none. */ long getLowestSequenceId(final byte[] encodedRegionName) { synchronized (this.tieLock) { @@ -149,7 +150,7 @@ class SequenceIdAccounting { /** * Reset the accounting of highest sequenceid by regionname. * @return Return the previous accounting Map of regions to the last sequence id written into - * each. + * each. */ Map resetHighest() { Map old = this.highestSequenceIds; @@ -159,15 +160,11 @@ class SequenceIdAccounting { /** * We've been passed a new sequenceid for the region. Set it as highest seen for this region and - * if we are to record oldest, or lowest sequenceids, save it as oldest seen if nothing - * currently older. - * @param encodedRegionName - * @param families - * @param sequenceid - * @param lowest Whether to keep running account of oldest sequence id. + * if we are to record oldest, or lowest sequenceids, save it as oldest seen if nothing currently + * older. nnn * @param lowest Whether to keep running account of oldest sequence id. */ void update(byte[] encodedRegionName, Set families, long sequenceid, - final boolean lowest) { + final boolean lowest) { Long l = Long.valueOf(sequenceid); this.highestSequenceIds.put(encodedRegionName, l); if (lowest) { @@ -206,7 +203,7 @@ class SequenceIdAccounting { * Update the store sequence id, e.g., upon executing in-memory compaction */ void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceId, - boolean onlyIfGreater) { + boolean onlyIfGreater) { if (sequenceId == null) { return; } @@ -249,8 +246,8 @@ class SequenceIdAccounting { */ private static long getLowestSequenceId(Map sequenceids) { long lowest = HConstants.NO_SEQNUM; - for (Map.Entry entry : sequenceids.entrySet()){ - if (entry.getKey().toString().equals("METAFAMILY")){ + for (Map.Entry entry : sequenceids.entrySet()) { + if (entry.getKey().toString().equals("METAFAMILY")) { continue; } Long sid = entry.getValue(); @@ -262,9 +259,8 @@ class SequenceIdAccounting { } /** - * @param src - * @return New Map that has same keys as src but instead of a Map for a value, it - * instead has found the smallest sequence id and it returns that as the value instead. + * n * @return New Map that has same keys as src but instead of a Map for a value, it + * instead has found the smallest sequence id and it returns that as the value instead. */ private > Map flattenToLowestSequenceId(Map src) { if (src == null || src.isEmpty()) { @@ -282,19 +278,19 @@ class SequenceIdAccounting { /** * @param encodedRegionName Region to flush. - * @param families Families to flush. May be a subset of all families in the region. - * @return Returns {@link HConstants#NO_SEQNUM} if we are flushing the whole region OR if - * we are flushing a subset of all families but there are no edits in those families not - * being flushed; in other words, this is effectively same as a flush of all of the region - * though we were passed a subset of regions. Otherwise, it returns the sequence id of the - * oldest/lowest outstanding edit. + * @param families Families to flush. May be a subset of all families in the region. + * @return Returns {@link HConstants#NO_SEQNUM} if we are flushing the whole region OR if we are + * flushing a subset of all families but there are no edits in those families not being + * flushed; in other words, this is effectively same as a flush of all of the region + * though we were passed a subset of regions. Otherwise, it returns the sequence id of the + * oldest/lowest outstanding edit. */ Long startCacheFlush(final byte[] encodedRegionName, final Set families) { - Map familytoSeq = new HashMap<>(); - for (byte[] familyName : families){ - familytoSeq.put(familyName,HConstants.NO_SEQNUM); + Map familytoSeq = new HashMap<>(); + for (byte[] familyName : families) { + familytoSeq.put(familyName, HConstants.NO_SEQNUM); } - return startCacheFlush(encodedRegionName,familytoSeq); + return startCacheFlush(encodedRegionName, familytoSeq); } Long startCacheFlush(final byte[] encodedRegionName, final Map familyToSeq) { @@ -310,7 +306,7 @@ class SequenceIdAccounting { for (Map.Entry entry : familyToSeq.entrySet()) { ImmutableByteArray familyNameWrapper = ImmutableByteArray.wrap((byte[]) entry.getKey()); Long seqId = null; - if(entry.getValue() == HConstants.NO_SEQNUM) { + if (entry.getValue() == HConstants.NO_SEQNUM) { seqId = m.remove(familyNameWrapper); } else { seqId = m.replace(familyNameWrapper, entry.getValue()); @@ -324,8 +320,8 @@ class SequenceIdAccounting { } if (oldSequenceIds != null && !oldSequenceIds.isEmpty()) { if (this.flushingSequenceIds.put(encodedRegionName, oldSequenceIds) != null) { - LOG.warn("Flushing Map not cleaned up for " + Bytes.toString(encodedRegionName) + - ", sequenceid=" + oldSequenceIds); + LOG.warn("Flushing Map not cleaned up for " + Bytes.toString(encodedRegionName) + + ", sequenceid=" + oldSequenceIds); } } if (m.isEmpty()) { @@ -397,7 +393,7 @@ class SequenceIdAccounting { flushing = this.flushingSequenceIds.remove(encodedRegionName); if (flushing != null) { Map unflushed = getOrCreateLowestSequenceIds(encodedRegionName); - for (Map.Entry e: flushing.entrySet()) { + for (Map.Entry e : flushing.entrySet()) { // Set into unflushed the 'old' oldest sequenceid and if any value in flushed with this // value, it will now be in tmpMap. tmpMap.put(e.getKey(), unflushed.put(e.getKey(), e.getValue())); @@ -411,9 +407,9 @@ class SequenceIdAccounting { for (Map.Entry e : flushing.entrySet()) { Long currentId = tmpMap.get(e.getKey()); if (currentId != null && currentId.longValue() < e.getValue().longValue()) { - String errorStr = Bytes.toString(encodedRegionName) + " family " - + e.getKey().toString() + " acquired edits out of order current memstore seq=" - + currentId + ", previous oldest unflushed id=" + e.getValue(); + String errorStr = Bytes.toString(encodedRegionName) + " family " + e.getKey().toString() + + " acquired edits out of order current memstore seq=" + currentId + + ", previous oldest unflushed id=" + e.getValue(); LOG.error(errorStr); Runtime.getRuntime().halt(1); } @@ -425,7 +421,7 @@ class SequenceIdAccounting { * See if passed sequenceids are lower -- i.e. earlier -- than any outstanding * sequenceids, sequenceids we are holding on to in this accounting instance. * @param sequenceids Keyed by encoded region name. Cannot be null (doesn't make sense for it to - * be null). + * be null). * @return true if all sequenceids are lower, older than, the old sequenceids in this instance. */ boolean areAllLower(Map sequenceids) { @@ -456,9 +452,8 @@ class SequenceIdAccounting { /** * Iterates over the given Map and compares sequence ids with corresponding entries in - * {@link #lowestUnflushedSequenceIds}. If a region in - * {@link #lowestUnflushedSequenceIds} has a sequence id less than that passed in - * sequenceids then return it. + * {@link #lowestUnflushedSequenceIds}. If a region in {@link #lowestUnflushedSequenceIds} has a + * sequence id less than that passed in sequenceids then return it. * @param sequenceids Sequenceids keyed by encoded region name. * @return stores of regions found in this instance with sequence ids less than those passed in. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java index 862e91826b5..90825d4884c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java @@ -93,9 +93,7 @@ class SyncFuture { /** * Call this method to clear old usage and get it ready for new deploy. - * - * @param txid the new transaction id - * @return this + * @param txid the new transaction id n */ SyncFuture reset(long txid, boolean forceSync) { if (t != null && t != Thread.currentThread()) { @@ -114,8 +112,8 @@ class SyncFuture { @Override public String toString() { - return "done=" + isDone() + ", txid=" + this.txid + " threadID=" + t.getId() + - " threadName=" + t.getName(); + return "done=" + isDone() + ", txid=" + this.txid + " threadID=" + t.getId() + " threadName=" + + t.getName(); } long getTxid() { @@ -137,7 +135,7 @@ class SyncFuture { /** * @param txid the transaction id at which this future 'completed'. - * @param t Can be null. Set if we are 'completing' on error (and this 't' is the error). + * @param t Can be null. Set if we are 'completing' on error (and this 't' is the error). * @return True if we successfully marked this outstanding future as completed/done. Returns false * if this future is already 'done' when this method called. */ @@ -152,7 +150,7 @@ class SyncFuture { // Something badly wrong. if (throwable == null) { this.throwable = - new IllegalStateException("done txid=" + txid + ", my txid=" + this.txid); + new IllegalStateException("done txid=" + txid + ", my txid=" + this.txid); } } // Mark done. @@ -164,15 +162,14 @@ class SyncFuture { } } - long get(long timeoutNs) throws InterruptedException, - ExecutionException, TimeoutIOException { + long get(long timeoutNs) throws InterruptedException, ExecutionException, TimeoutIOException { doneLock.lock(); try { while (doneTxid == NOT_DONE) { if (!doneCondition.await(timeoutNs, TimeUnit.NANOSECONDS)) { - throw new TimeoutIOException("Failed to get sync result after " - + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + " ms for txid=" + this.txid - + ", WAL system stuck?"); + throw new TimeoutIOException( + "Failed to get sync result after " + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + + " ms for txid=" + this.txid + ", WAL system stuck?"); } } if (this.throwable != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFutureCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFutureCache.java index 9dd2e0aba93..27b9f4769ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFutureCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFutureCache.java @@ -21,22 +21,18 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.cache.Cache; import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder; /** - * A cache of {@link SyncFuture}s. This class supports two methods - * {@link SyncFutureCache#getIfPresentOrNew()} and {@link SyncFutureCache#offer(SyncFuture)}. - * - * Usage pattern: - * SyncFuture sf = syncFutureCache.getIfPresentOrNew(); - * sf.reset(...); - * // Use the sync future - * finally: syncFutureCache.offer(sf); - * - * Offering the sync future back to the cache makes it eligible for reuse within the same thread - * context. Cache keyed by the accessing thread instance and automatically invalidated if it remains - * unused for {@link SyncFutureCache#SYNC_FUTURE_INVALIDATION_TIMEOUT_MINS} minutes. + * A cache of {@link SyncFuture}s. This class supports two methods + * {@link SyncFutureCache#getIfPresentOrNew()} and {@link SyncFutureCache#offer(SyncFuture)}. Usage + * pattern: SyncFuture sf = syncFutureCache.getIfPresentOrNew(); sf.reset(...); // Use the sync + * future finally: syncFutureCache.offer(sf); Offering the sync future back to the cache makes it + * eligible for reuse within the same thread context. Cache keyed by the accessing thread instance + * and automatically invalidated if it remains unused for + * {@link SyncFutureCache#SYNC_FUTURE_INVALIDATION_TIMEOUT_MINS} minutes. */ @InterfaceAudience.Private public final class SyncFutureCache { @@ -47,9 +43,9 @@ public final class SyncFutureCache { public SyncFutureCache(final Configuration conf) { final int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, - HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); + HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); syncFutureCache = CacheBuilder.newBuilder().initialCapacity(handlerCount) - .expireAfterWrite(SYNC_FUTURE_INVALIDATION_TIMEOUT_MINS, TimeUnit.MINUTES).build(); + .expireAfterWrite(SYNC_FUTURE_INVALIDATION_TIMEOUT_MINS, TimeUnit.MINUTES).build(); } public SyncFuture getIfPresentOrNew() { @@ -71,4 +67,4 @@ public final class SyncFutureCache { syncFutureCache.invalidateAll(); } } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java index c109a1b4bdd..901ada78015 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,8 +25,8 @@ import org.apache.hadoop.hbase.wal.WALKey; import org.apache.yetus.audience.InterfaceAudience; /** - * Get notification of WAL events. The invocations are inline - * so make sure your implementation is fast else you'll slow hbase. + * Get notification of WAL events. The invocations are inline so make sure your implementation is + * fast else you'll slow hbase. */ @InterfaceAudience.Private public interface WALActionsListener { @@ -45,67 +44,75 @@ public interface WALActionsListener { }; /** - * The WAL is going to be rolled. The oldPath can be null if this is - * the first log file from the regionserver. + * The WAL is going to be rolled. The oldPath can be null if this is the first log file from the + * regionserver. * @param oldPath the path to the old wal * @param newPath the path to the new wal */ - default void preLogRoll(Path oldPath, Path newPath) throws IOException {} + default void preLogRoll(Path oldPath, Path newPath) throws IOException { + } /** - * The WAL has been rolled. The oldPath can be null if this is - * the first log file from the regionserver. + * The WAL has been rolled. The oldPath can be null if this is the first log file from the + * regionserver. * @param oldPath the path to the old wal * @param newPath the path to the new wal */ - default void postLogRoll(Path oldPath, Path newPath) throws IOException {} + default void postLogRoll(Path oldPath, Path newPath) throws IOException { + } /** * The WAL is going to be archived. * @param oldPath the path to the old wal * @param newPath the path to the new wal */ - default void preLogArchive(Path oldPath, Path newPath) throws IOException {} + default void preLogArchive(Path oldPath, Path newPath) throws IOException { + } /** * The WAL has been archived. * @param oldPath the path to the old wal * @param newPath the path to the new wal */ - default void postLogArchive(Path oldPath, Path newPath) throws IOException {} + default void postLogArchive(Path oldPath, Path newPath) throws IOException { + } /** * A request was made that the WAL be rolled. */ - default void logRollRequested(RollRequestReason reason) {} + default void logRollRequested(RollRequestReason reason) { + } /** * The WAL is about to close. */ - default void logCloseRequested() {} + default void logCloseRequested() { + } /** - * Called before each write. - */ - default void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit logEdit) {} + * Called before each write. + */ + default void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit logEdit) { + } /** - * For notification post append to the writer. Used by metrics system at least. - * TODO: Combine this with above. - * @param entryLen approx length of cells in this append. + * For notification post append to the writer. Used by metrics system at least. TODO: Combine this + * with above. + * @param entryLen approx length of cells in this append. * @param elapsedTimeMillis elapsed time in milliseconds. - * @param logKey A WAL key - * @param logEdit A WAL edit containing list of cells. + * @param logKey A WAL key + * @param logEdit A WAL edit containing list of cells. * @throws IOException if any network or I/O error occurred */ default void postAppend(final long entryLen, final long elapsedTimeMillis, final WALKey logKey, - final WALEdit logEdit) throws IOException {} + final WALEdit logEdit) throws IOException { + } /** - * For notification post writer sync. Used by metrics system at least. - * @param timeInNanos How long the filesystem sync took in nanoseconds. - * @param handlerSyncs How many sync handler calls were released by this call to filesystem - * sync. + * For notification post writer sync. Used by metrics system at least. + * @param timeInNanos How long the filesystem sync took in nanoseconds. + * @param handlerSyncs How many sync handler calls were released by this call to filesystem sync. */ - default void postSync(final long timeInNanos, final int handlerSyncs) {} + default void postSync(final long timeInNanos, final int handlerSyncs) { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java index 31eccc7a18a..5b60b10e128 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java @@ -21,14 +21,12 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.codec.BaseDecoder; import org.apache.hadoop.hbase.codec.BaseEncoder; import org.apache.hadoop.hbase.codec.Codec; @@ -43,20 +41,19 @@ import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.io.IOUtils; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; - /** - * Compression in this class is lifted off Compressor/KeyValueCompression. - * This is a pure coincidence... they are independent and don't have to be compatible. - * - * This codec is used at server side for writing cells to WAL as well as for sending edits - * as part of the distributed splitting process. + * Compression in this class is lifted off Compressor/KeyValueCompression. This is a pure + * coincidence... they are independent and don't have to be compatible. This codec is used at server + * side for writing cells to WAL as well as for sending edits as part of the distributed splitting + * process. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, - HBaseInterfaceAudience.PHOENIX, HBaseInterfaceAudience.CONFIG}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, + HBaseInterfaceAudience.CONFIG }) public class WALCellCodec implements Codec { /** Configuration key for the class to use when encoding cells in the WAL */ public static final String WAL_CELL_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec"; @@ -73,9 +70,9 @@ public class WALCellCodec implements Codec { /** * Default constructor - all subclasses must implement a constructor with this signature * if they are to be dynamically loaded from the {@link Configuration}. - * @param conf configuration to configure this + * @param conf configuration to configure this * @param compression compression the codec should support, can be null to indicate no - * compression + * compression */ public WALCellCodec(Configuration conf, CompressionContext compression) { this.compression = compression; @@ -87,42 +84,41 @@ public class WALCellCodec implements Codec { /** * Create and setup a {@link WALCellCodec} from the {@code cellCodecClsName} and - * CompressionContext, if {@code cellCodecClsName} is specified. - * Otherwise Cell Codec classname is read from {@link Configuration}. - * Fully prepares the codec for use. - * @param conf {@link Configuration} to read for the user-specified codec. If none is specified, - * uses a {@link WALCellCodec}. + * CompressionContext, if {@code cellCodecClsName} is specified. Otherwise Cell Codec classname is + * read from {@link Configuration}. Fully prepares the codec for use. + * @param conf {@link Configuration} to read for the user-specified codec. If none is + * specified, uses a {@link WALCellCodec}. * @param cellCodecClsName name of codec - * @param compression compression the codec should use + * @param compression compression the codec should use * @return a {@link WALCellCodec} ready for use. * @throws UnsupportedOperationException if the codec cannot be instantiated */ public static WALCellCodec create(Configuration conf, String cellCodecClsName, - CompressionContext compression) throws UnsupportedOperationException { + CompressionContext compression) throws UnsupportedOperationException { if (cellCodecClsName == null) { cellCodecClsName = getWALCellCodecClass(conf).getName(); } - return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[] - { Configuration.class, CompressionContext.class }, new Object[] { conf, compression }); + return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, + new Class[] { Configuration.class, CompressionContext.class }, + new Object[] { conf, compression }); } /** - * Create and setup a {@link WALCellCodec} from the - * CompressionContext. - * Cell Codec classname is read from {@link Configuration}. - * Fully prepares the codec for use. - * @param conf {@link Configuration} to read for the user-specified codec. If none is specified, - * uses a {@link WALCellCodec}. + * Create and setup a {@link WALCellCodec} from the CompressionContext. Cell Codec classname is + * read from {@link Configuration}. Fully prepares the codec for use. + * @param conf {@link Configuration} to read for the user-specified codec. If none is + * specified, uses a {@link WALCellCodec}. * @param compression compression the codec should use * @return a {@link WALCellCodec} ready for use. * @throws UnsupportedOperationException if the codec cannot be instantiated */ - public static WALCellCodec create(Configuration conf, - CompressionContext compression) throws UnsupportedOperationException { + public static WALCellCodec create(Configuration conf, CompressionContext compression) + throws UnsupportedOperationException { String cellCodecClsName = getWALCellCodecClass(conf).getName(); - return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[] - { Configuration.class, CompressionContext.class }, new Object[] { conf, compression }); + return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, + new Class[] { Configuration.class, CompressionContext.class }, + new Object[] { conf, compression }); } public interface ByteStringCompressor { @@ -152,6 +148,7 @@ public class WALCellCodec implements Codec { public BaosAndCompressor(CompressionContext compressionContext) { this.compressionContext = compressionContext; } + public ByteString toByteString() { // We need this copy to create the ByteString as the byte[] 'buf' is not immutable. We reuse // them. @@ -200,7 +197,7 @@ public class WALCellCodec implements Codec { private static byte[] uncompressByteString(ByteString bs, Dictionary dict) throws IOException { InputStream in = bs.newInput(); - byte status = (byte)in.read(); + byte status = (byte) in.read(); if (status == Dictionary.NOT_IN_DICTIONARY) { byte[] arr = new byte[StreamUtils.readRawVarint32(in)]; int bytesRead = in.read(arr); @@ -211,7 +208,7 @@ public class WALCellCodec implements Codec { return arr; } else { // Status here is the higher-order byte of index of the dictionary entry. - short dictIdx = StreamUtils.toShort(status, (byte)in.read()); + short dictIdx = StreamUtils.toShort(status, (byte) in.read()); byte[] entry = dict.getEntry(dictIdx); if (entry == null) { throw new IOException("Missing dictionary entry for index " + dictIdx); @@ -224,6 +221,7 @@ public class WALCellCodec implements Codec { private final CompressionContext compression; private final boolean hasValueCompression; private final boolean hasTagCompression; + public CompressedKvEncoder(OutputStream out, CompressionContext compression) { super(out); this.compression = compression; @@ -278,6 +276,7 @@ public class WALCellCodec implements Codec { private final CompressionContext compression; private final boolean hasValueCompression; private final boolean hasTagCompression; + public CompressedKvDecoder(InputStream in, CompressionContext compression) { super(in); this.compression = compression; @@ -291,7 +290,7 @@ public class WALCellCodec implements Codec { int vlength = StreamUtils.readRawVarint32(in); int tagsLength = StreamUtils.readRawVarint32(in); int length = 0; - if(tagsLength == 0) { + if (tagsLength == 0) { length = KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE + keylength + vlength; } else { length = KeyValue.KEYVALUE_WITH_TAGS_INFRASTRUCTURE_SIZE + keylength + vlength + tagsLength; @@ -306,14 +305,14 @@ public class WALCellCodec implements Codec { int elemLen = readIntoArray(backingArray, pos + Bytes.SIZEOF_SHORT, compression.getDictionary(CompressionContext.DictionaryIndex.ROW)); checkLength(elemLen, Short.MAX_VALUE); - pos = Bytes.putShort(backingArray, pos, (short)elemLen); + pos = Bytes.putShort(backingArray, pos, (short) elemLen); pos += elemLen; // family elemLen = readIntoArray(backingArray, pos + Bytes.SIZEOF_BYTE, compression.getDictionary(CompressionContext.DictionaryIndex.FAMILY)); checkLength(elemLen, Byte.MAX_VALUE); - pos = Bytes.putByte(backingArray, pos, (byte)elemLen); + pos = Bytes.putByte(backingArray, pos, (byte) elemLen); pos += elemLen; // qualifier @@ -329,7 +328,7 @@ public class WALCellCodec implements Codec { if (tagsLength > 0) { typeValLen = typeValLen - tagsLength - KeyValue.TAGS_LENGTH_SIZE; } - pos = Bytes.putByte(backingArray, pos, (byte)in.read()); + pos = Bytes.putByte(backingArray, pos, (byte) in.read()); int valLen = typeValLen - 1; if (hasValueCompression) { readCompressedValue(in, backingArray, pos, valLen); @@ -351,7 +350,7 @@ public class WALCellCodec implements Codec { } private int readIntoArray(byte[] to, int offset, Dictionary dict) throws IOException { - byte status = (byte)in.read(); + byte status = (byte) in.read(); if (status == Dictionary.NOT_IN_DICTIONARY) { // status byte indicating that data to be read is not in dictionary. // if this isn't in the dictionary, we need to add to the dictionary. @@ -361,7 +360,7 @@ public class WALCellCodec implements Codec { return length; } else { // the status byte also acts as the higher order byte of the dictionary entry. - short dictIdx = StreamUtils.toShort(status, (byte)in.read()); + short dictIdx = StreamUtils.toShort(status, (byte) in.read()); byte[] entry = dict.getEntry(dictIdx); if (entry == null) { throw new IOException("Missing dictionary entry for index " + dictIdx); @@ -379,10 +378,10 @@ public class WALCellCodec implements Codec { } private void readCompressedValue(InputStream in, byte[] outArray, int outOffset, - int expectedLength) throws IOException { + int expectedLength) throws IOException { int compressedLen = StreamUtils.readRawVarint32(in); - int read = compression.getValueCompressor().decompress(in, compressedLen, outArray, - outOffset, expectedLength); + int read = compression.getValueCompressor().decompress(in, compressedLen, outArray, outOffset, + expectedLength); if (read != expectedLength) { throw new IOException("ValueCompressor state error: short read"); } @@ -394,6 +393,7 @@ public class WALCellCodec implements Codec { public EnsureKvEncoder(OutputStream out) { super(out); } + @Override public void write(Cell cell) throws IOException { checkFlushed(); @@ -406,7 +406,8 @@ public class WALCellCodec implements Codec { @Override public Decoder getDecoder(InputStream is) { return (compression == null) - ? new KeyValueCodecWithTags.KeyValueDecoder(is) : new CompressedKvDecoder(is, compression); + ? new KeyValueCodecWithTags.KeyValueDecoder(is) + : new CompressedKvDecoder(is, compression); } @Override @@ -416,8 +417,7 @@ public class WALCellCodec implements Codec { @Override public Encoder getEncoder(OutputStream os) { - os = (os instanceof ByteBufferWriter) ? os - : new ByteBufferWriterOutputStream(os); + os = (os instanceof ByteBufferWriter) ? os : new ByteBufferWriterOutputStream(os); if (compression == null) { return new EnsureKvEncoder(os); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java index 40d6d0fc948..c594122c29b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java @@ -1,6 +1,4 @@ - /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -9,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; import java.lang.reflect.InvocationTargetException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.client.RegionInfo; @@ -41,12 +37,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Implements the coprocessor environment and runtime support for coprocessors - * loaded within a {@link WAL}. + * Implements the coprocessor environment and runtime support for coprocessors loaded within a + * {@link WAL}. */ @InterfaceAudience.Private -public class WALCoprocessorHost - extends CoprocessorHost { +public class WALCoprocessorHost extends CoprocessorHost { private static final Logger LOG = LoggerFactory.getLogger(WALCoprocessorHost.class); /** @@ -66,18 +61,18 @@ public class WALCoprocessorHost /** * Constructor - * @param impl the coprocessor instance + * @param impl the coprocessor instance * @param priority chaining priority - * @param seq load sequence - * @param conf configuration - * @param wal WAL + * @param seq load sequence + * @param conf configuration + * @param wal WAL */ private WALEnvironment(final WALCoprocessor impl, final int priority, final int seq, - final Configuration conf, final WAL wal) { + final Configuration conf, final WAL wal) { super(impl, priority, seq, conf); this.wal = wal; - this.metricRegistry = MetricsCoprocessor.createRegistryForWALCoprocessor( - impl.getClass().getName()); + this.metricRegistry = + MetricsCoprocessor.createRegistryForWALCoprocessor(impl.getClass().getName()); } @Override @@ -96,7 +91,7 @@ public class WALCoprocessorHost /** * Constructor - * @param log the write ahead log + * @param log the write ahead log * @param conf the configuration */ public WALCoprocessorHost(final WAL log, final Configuration conf) { @@ -114,13 +109,13 @@ public class WALCoprocessorHost @Override public WALEnvironment createEnvironment(final WALCoprocessor instance, final int priority, - final int seq, final Configuration conf) { + final int seq, final Configuration conf) { return new WALEnvironment(instance, priority, seq, conf, this.wal); } @Override - public WALCoprocessor checkAndGetInstance(Class implClass) throws IllegalAccessException, - InstantiationException { + public WALCoprocessor checkAndGetInstance(Class implClass) + throws IllegalAccessException, InstantiationException { if (WALCoprocessor.class.isAssignableFrom(implClass)) { try { return implClass.asSubclass(WALCoprocessor.class).getDeclaredConstructor().newInstance(); @@ -129,23 +124,22 @@ public class WALCoprocessorHost } } else { LOG.error(implClass.getName() + " is not of type WALCoprocessor. Check the " - + "configuration " + CoprocessorHost.WAL_COPROCESSOR_CONF_KEY); + + "configuration " + CoprocessorHost.WAL_COPROCESSOR_CONF_KEY); return null; } } private ObserverGetter walObserverGetter = - WALCoprocessor::getWALObserver; + WALCoprocessor::getWALObserver; - abstract class WALObserverOperation extends - ObserverOperationWithoutResult { + abstract class WALObserverOperation extends ObserverOperationWithoutResult { public WALObserverOperation() { super(walObserverGetter); } } public void preWALWrite(final RegionInfo info, final WALKey logKey, final WALEdit logEdit) - throws IOException { + throws IOException { // Not bypassable. if (this.coprocEnvironments.isEmpty()) { return; @@ -159,7 +153,7 @@ public class WALCoprocessorHost } public void postWALWrite(final RegionInfo info, final WALKey logKey, final WALEdit logEdit) - throws IOException { + throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new WALObserverOperation() { @Override protected void call(WALObserver observer) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java index 221f90c728a..d89d03a145c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; @@ -23,7 +22,6 @@ import java.util.ArrayList; import java.util.Map; import java.util.NavigableMap; import java.util.function.Function; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -38,16 +36,18 @@ import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor; /** - * Helper methods to ease Region Server integration with the Write Ahead Log (WAL). - * Note that methods in this class specifically should not require access to anything - * other than the API found in {@link WAL}. For internal use only. + * Helper methods to ease Region Server integration with the Write Ahead Log (WAL). Note that + * methods in this class specifically should not require access to anything other than the API found + * in {@link WAL}. For internal use only. */ @InterfaceAudience.Private public class WALUtil { @@ -100,11 +100,10 @@ public class WALUtil { * only. Not for external client consumption. */ public static WALKeyImpl writeRegionEventMarker(WAL wal, - NavigableMap replicationScope, RegionInfo hri, RegionEventDescriptor r, - MultiVersionConcurrencyControl mvcc) - throws IOException { - WALKeyImpl walKey = writeMarker(wal, replicationScope, hri, - WALEdit.createRegionEventWALEdit(hri, r), mvcc, null); + NavigableMap replicationScope, RegionInfo hri, RegionEventDescriptor r, + MultiVersionConcurrencyControl mvcc) throws IOException { + WALKeyImpl walKey = + writeMarker(wal, replicationScope, hri, WALEdit.createRegionEventWALEdit(hri, r), mvcc, null); if (LOG.isTraceEnabled()) { LOG.trace("Appended region event marker " + TextFormat.shortDebugString(r)); } @@ -112,21 +111,22 @@ public class WALUtil { } /** - * Write a log marker that a bulk load has succeeded and is about to be committed. - * This write is for internal use only. Not for external client consumption. - * @param wal The log to write into. + * Write a log marker that a bulk load has succeeded and is about to be committed. This write is + * for internal use only. Not for external client consumption. + * @param wal The log to write into. * @param replicationScope The replication scope of the families in the HRegion - * @param hri A description of the region in the table that we are bulk loading into. - * @param desc A protocol buffers based description of the client's bulk loading request + * @param hri A description of the region in the table that we are bulk loading into. + * @param desc A protocol buffers based description of the client's bulk loading + * request * @return walKey with sequenceid filled out for this bulk load marker * @throws IOException We will throw an IOException if we can not append to the HLog. */ public static WALKeyImpl writeBulkLoadMarkerAndSync(final WAL wal, - final NavigableMap replicationScope, final RegionInfo hri, - final WALProtos.BulkLoadDescriptor desc, final MultiVersionConcurrencyControl mvcc) + final NavigableMap replicationScope, final RegionInfo hri, + final WALProtos.BulkLoadDescriptor desc, final MultiVersionConcurrencyControl mvcc) throws IOException { - WALKeyImpl walKey = writeMarker(wal, replicationScope, hri, - WALEdit.createBulkLoadEvent(hri, desc), mvcc, null); + WALKeyImpl walKey = + writeMarker(wal, replicationScope, hri, WALEdit.createBulkLoadEvent(hri, desc), mvcc, null); if (LOG.isTraceEnabled()) { LOG.trace("Appended Bulk Load marker " + TextFormat.shortDebugString(desc)); } @@ -134,12 +134,12 @@ public class WALUtil { } private static WALKeyImpl writeMarker(final WAL wal, - NavigableMap replicationScope, RegionInfo hri, WALEdit edit, MultiVersionConcurrencyControl mvcc, - Map extendedAttributes) + NavigableMap replicationScope, RegionInfo hri, WALEdit edit, + MultiVersionConcurrencyControl mvcc, Map extendedAttributes) throws IOException { // If sync == true in below, then timeout is not used; safe to pass UNSPECIFIED_TIMEOUT - return doFullMarkerAppendTransaction(wal, replicationScope, hri, edit, mvcc, - extendedAttributes, true); + return doFullMarkerAppendTransaction(wal, replicationScope, hri, edit, mvcc, extendedAttributes, + true); } /** @@ -151,8 +151,8 @@ public class WALUtil { * @return WALKeyImpl that was added to the WAL. */ private static WALKeyImpl doFullMarkerAppendTransaction(WAL wal, - NavigableMap replicationScope, RegionInfo hri, final WALEdit edit, - MultiVersionConcurrencyControl mvcc, Map extendedAttributes, boolean sync) + NavigableMap replicationScope, RegionInfo hri, final WALEdit edit, + MultiVersionConcurrencyControl mvcc, Map extendedAttributes, boolean sync) throws IOException { // TODO: Pass in current time to use? WALKeyImpl walKey = new WALKeyImpl(hri.getEncodedNameAsBytes(), hri.getTable(), @@ -181,17 +181,17 @@ public class WALUtil { * @return Blocksize to use writing WALs. */ public static long getWALBlockSize(Configuration conf, FileSystem fs, Path dir) - throws IOException { + throws IOException { return getWALBlockSize(conf, fs, dir, false); } /** * Public because of FSHLog. Should be package-private * @param isRecoverEdits the created writer is for recovered edits or WAL. For recovered edits, it - * is true and for WAL it is false. + * is true and for WAL it is false. */ public static long getWALBlockSize(Configuration conf, FileSystem fs, Path dir, - boolean isRecoverEdits) throws IOException { + boolean isRecoverEdits) throws IOException { long defaultBlockSize = CommonFSUtils.getDefaultBlockSize(fs, dir) * 2; if (isRecoverEdits) { return conf.getLong("hbase.regionserver.recoverededits.blocksize", defaultBlockSize); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java index 56576a6cf3e..1a39dd222a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import java.io.IOException; import java.util.ArrayList; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService; @@ -38,30 +37,31 @@ public abstract class BaseReplicationEndpoint extends AbstractService implements ReplicationEndpoint { private static final Logger LOG = LoggerFactory.getLogger(BaseReplicationEndpoint.class); - public static final String REPLICATION_WALENTRYFILTER_CONFIG_KEY - = "hbase.replication.source.custom.walentryfilters"; + public static final String REPLICATION_WALENTRYFILTER_CONFIG_KEY = + "hbase.replication.source.custom.walentryfilters"; protected Context ctx; @Override public void init(Context context) throws IOException { this.ctx = context; - if (this.ctx != null){ + if (this.ctx != null) { ReplicationPeer peer = this.ctx.getReplicationPeer(); - if (peer != null){ + if (peer != null) { peer.registerPeerConfigListener(this); } else { - LOG.warn("Not tracking replication peer config changes for Peer Id " + this.ctx.getPeerId() + - " because there's no such peer"); + LOG.warn("Not tracking replication peer config changes for Peer Id " + this.ctx.getPeerId() + + " because there's no such peer"); } } } @Override /** - * No-op implementation for subclasses to override if they wish to execute logic if their config changes + * No-op implementation for subclasses to override if they wish to execute logic if their config + * changes */ - public void peerConfigUpdated(ReplicationPeerConfig rpc){ + public void peerConfigUpdated(ReplicationPeerConfig rpc) { } @@ -78,7 +78,8 @@ public abstract class BaseReplicationEndpoint extends AbstractService filters.add(tableCfFilter); } if (ctx != null && ctx.getPeerConfig() != null) { - String filterNameCSV = ctx.getPeerConfig().getConfiguration().get(REPLICATION_WALENTRYFILTER_CONFIG_KEY); + String filterNameCSV = + ctx.getPeerConfig().getConfiguration().get(REPLICATION_WALENTRYFILTER_CONFIG_KEY); if (filterNameCSV != null && !filterNameCSV.isEmpty()) { String[] filterNames = filterNameCSV.split(","); for (String filterName : filterNames) { @@ -94,14 +95,18 @@ public abstract class BaseReplicationEndpoint extends AbstractService return filters.isEmpty() ? null : new ChainWALEntryFilter(filters); } - /** Returns a WALEntryFilter for checking the scope. Subclasses can - * return null if they don't want this filter */ + /** + * Returns a WALEntryFilter for checking the scope. Subclasses can return null if they don't want + * this filter + */ protected WALEntryFilter getScopeWALEntryFilter() { return new ScopeWALEntryFilter(); } - /** Returns a WALEntryFilter for checking replication per table and CF. Subclasses can - * return null if they don't want this filter */ + /** + * Returns a WALEntryFilter for checking replication per table and CF. Subclasses can return null + * if they don't want this filter + */ protected WALEntryFilter getNamespaceTableCfWALEntryFilter() { return new NamespaceTableCfWALEntryFilter(ctx.getReplicationPeer()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java index 6814640dfe5..c06c6d19a65 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,10 +40,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescript public class BulkLoadCellFilter { private static final Logger LOG = LoggerFactory.getLogger(BulkLoadCellFilter.class); - private final ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + private final ExtendedCellBuilder cellBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + /** * Filters the bulk load cell using the supplied predicate. - * @param cell The WAL cell to filter. + * @param cell The WAL cell to filter. * @param famPredicate Returns true of given family should be removed. * @return The filtered cell. */ @@ -75,19 +77,13 @@ public class BulkLoadCellFilter { } else if (copiedStoresList.isEmpty()) { return null; } - BulkLoadDescriptor.Builder newDesc = - BulkLoadDescriptor.newBuilder().setTableName(bld.getTableName()) - .setEncodedRegionName(bld.getEncodedRegionName()) - .setBulkloadSeqNum(bld.getBulkloadSeqNum()); + BulkLoadDescriptor.Builder newDesc = BulkLoadDescriptor.newBuilder() + .setTableName(bld.getTableName()).setEncodedRegionName(bld.getEncodedRegionName()) + .setBulkloadSeqNum(bld.getBulkloadSeqNum()); newDesc.addAllStores(copiedStoresList); BulkLoadDescriptor newBulkLoadDescriptor = newDesc.build(); - return cellBuilder.clear() - .setRow(CellUtil.cloneRow(cell)) - .setFamily(WALEdit.METAFAMILY) - .setQualifier(WALEdit.BULK_LOAD) - .setTimestamp(cell.getTimestamp()) - .setType(cell.getTypeByte()) - .setValue(newBulkLoadDescriptor.toByteArray()) - .build(); + return cellBuilder.clear().setRow(CellUtil.cloneRow(cell)).setFamily(WALEdit.METAFAMILY) + .setQualifier(WALEdit.BULK_LOAD).setTimestamp(cell.getTimestamp()).setType(cell.getTypeByte()) + .setValue(newBulkLoadDescriptor.toByteArray()).build(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java index 6f2c764e21a..aa84f4705b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import java.util.ArrayList; import java.util.Collections; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.wal.WAL.Entry; +import org.apache.yetus.audience.InterfaceAudience; /** - * A {@link WALEntryFilter} which contains multiple filters and applies them - * in chain order + * A {@link WALEntryFilter} which contains multiple filters and applies them in chain order */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) public class ChainWALEntryFilter implements WALEntryFilter { @@ -37,7 +34,7 @@ public class ChainWALEntryFilter implements WALEntryFilter { private final WALEntryFilter[] filters; private WALCellFilter[] cellFilters; - public ChainWALEntryFilter(WALEntryFilter...filters) { + public ChainWALEntryFilter(WALEntryFilter... filters) { this.filters = filters; initCellFilters(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ClusterMarkingEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ClusterMarkingEntryFilter.java index 5f92bbf3a65..e05e79eab5a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ClusterMarkingEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ClusterMarkingEntryFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,18 +18,16 @@ package org.apache.hadoop.hbase.replication; import java.util.UUID; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; +import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKeyImpl; -import org.apache.hadoop.hbase.wal.WAL.Entry; - +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; /** - * Filters out entries with our peerClusterId (i.e. already replicated) - * and marks all other entries with our clusterID + * Filters out entries with our peerClusterId (i.e. already replicated) and marks all other entries + * with our clusterID */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) @InterfaceStability.Evolving @@ -40,22 +37,26 @@ public class ClusterMarkingEntryFilter implements WALEntryFilter { private ReplicationEndpoint replicationEndpoint; /** - * @param clusterId id of this cluster - * @param peerClusterId of the other cluster + * @param clusterId id of this cluster + * @param peerClusterId of the other cluster * @param replicationEndpoint ReplicationEndpoint which will handle the actual replication */ - public ClusterMarkingEntryFilter(UUID clusterId, UUID peerClusterId, ReplicationEndpoint replicationEndpoint) { + public ClusterMarkingEntryFilter(UUID clusterId, UUID peerClusterId, + ReplicationEndpoint replicationEndpoint) { this.clusterId = clusterId; this.peerClusterId = peerClusterId; this.replicationEndpoint = replicationEndpoint; } + @Override public Entry filter(Entry entry) { // don't replicate if the log entries have already been consumed by the cluster - if (replicationEndpoint.canReplicateToSameCluster() - || !entry.getKey().getClusterIds().contains(peerClusterId)) { + if ( + replicationEndpoint.canReplicateToSameCluster() + || !entry.getKey().getClusterIds().contains(peerClusterId) + ) { WALEdit edit = entry.getEdit(); - WALKeyImpl logKey = (WALKeyImpl)entry.getKey(); + WALKeyImpl logKey = (WALKeyImpl) entry.getKey(); if (edit != null && !edit.isEmpty()) { // Mark that the current cluster has the change diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java index 15d6fbe1b07..eee379c5201 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import java.io.IOException; @@ -23,11 +22,11 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.UUID; -import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; +import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; @@ -39,8 +38,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * A {@link BaseReplicationEndpoint} for replication endpoints whose - * target cluster is an HBase cluster. + * A {@link BaseReplicationEndpoint} for replication endpoints whose target cluster is an HBase + * cluster. */ @InterfaceAudience.Private public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint @@ -60,12 +59,13 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint } /** - * A private method used to re-establish a zookeeper session with a peer cluster. - * @param ke + * A private method used to re-establish a zookeeper session with a peer cluster. n */ protected void reconnect(KeeperException ke) { - if (ke instanceof ConnectionLossException || ke instanceof SessionExpiredException - || ke instanceof AuthFailedException) { + if ( + ke instanceof ConnectionLossException || ke instanceof SessionExpiredException + || ke instanceof AuthFailedException + ) { String clusterKey = ctx.getPeerConfig().getClusterKey(); LOG.warn("Lost the ZooKeeper connection for peer " + clusterKey, ke); try { @@ -131,15 +131,14 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint */ synchronized void reloadZkWatcher() throws IOException { if (zkw != null) zkw.close(); - zkw = new ZKWatcher(ctx.getConfiguration(), - "connection to cluster: " + ctx.getPeerId(), this); + zkw = new ZKWatcher(ctx.getConfiguration(), "connection to cluster: " + ctx.getPeerId(), this); getZkw().registerListener(new PeerRegionServerListener(this)); } @Override public void abort(String why, Throwable e) { LOG.error("The HBaseReplicationEndpoint corresponding to peer " + ctx.getPeerId() - + " was aborted for the following reason(s):" + why, e); + + " was aborted for the following reason(s):" + why, e); } @Override @@ -153,10 +152,9 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint * @param zkw zk connection to use * @return list of region server addresses or an empty list if the slave is unavailable */ - protected static List fetchSlavesAddresses(ZKWatcher zkw) - throws KeeperException { - List children = ZKUtil.listChildrenAndWatchForNewChildren(zkw, - zkw.getZNodePaths().rsZNode); + protected static List fetchSlavesAddresses(ZKWatcher zkw) throws KeeperException { + List children = + ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.getZNodePaths().rsZNode); if (children == null) { return Collections.emptyList(); } @@ -168,8 +166,8 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint } /** - * Get a list of all the addresses of all the available region servers - * for this peer cluster, or an empty list if no region servers available at peer cluster. + * Get a list of all the addresses of all the available region servers for this peer cluster, or + * an empty list if no region servers available at peer cluster. * @return list of addresses */ // Synchronize peer cluster connection attempts to avoid races and rate diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java index 4fe04cd6ee5..82ac9ebd1f3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import org.apache.hadoop.hbase.Cell; @@ -27,7 +26,6 @@ import org.apache.yetus.audience.InterfaceAudience; /** * Filter a WAL Entry by the peer config according to the table and family which it belongs to. - * * @see ReplicationPeerConfig#needToReplicate(TableName, byte[]) */ @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java index b1d12792b91..c6d9e4c91b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import java.io.IOException; @@ -23,28 +22,26 @@ import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; - -import org.apache.hadoop.hbase.Abortable; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; +import org.apache.hadoop.hbase.wal.WAL.Entry; +import org.apache.yetus.audience.InterfaceAudience; /** - * ReplicationEndpoint is a plugin which implements replication - * to other HBase clusters, or other systems. ReplicationEndpoint implementation - * can be specified at the peer creation time by specifying it - * in the {@link ReplicationPeerConfig}. A ReplicationEndpoint is run in a thread - * in each region server in the same process. + * ReplicationEndpoint is a plugin which implements replication to other HBase clusters, or other + * systems. ReplicationEndpoint implementation can be specified at the peer creation time by + * specifying it in the {@link ReplicationPeerConfig}. A ReplicationEndpoint is run in a thread in + * each region server in the same process. *

      - * ReplicationEndpoint is closely tied to ReplicationSource in a producer-consumer - * relation. ReplicationSource is an HBase-private class which tails the logs and manages - * the queue of logs plus management and persistence of all the state for replication. - * ReplicationEndpoint on the other hand is responsible for doing the actual shipping - * and persisting of the WAL entries in the other cluster. + * ReplicationEndpoint is closely tied to ReplicationSource in a producer-consumer relation. + * ReplicationSource is an HBase-private class which tails the logs and manages the queue of logs + * plus management and persistence of all the state for replication. ReplicationEndpoint on the + * other hand is responsible for doing the actual shipping and persisting of the WAL entries in the + * other cluster. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) public interface ReplicationEndpoint extends ReplicationPeerConfigListener { @@ -64,16 +61,10 @@ public interface ReplicationEndpoint extends ReplicationPeerConfigListener { private final Abortable abortable; @InterfaceAudience.Private - public Context( - final Configuration localConf, - final Configuration conf, - final FileSystem fs, - final String peerId, - final UUID clusterId, - final ReplicationPeer replicationPeer, - final MetricsSource metrics, - final TableDescriptors tableDescriptors, - final Abortable abortable) { + public Context(final Configuration localConf, final Configuration conf, final FileSystem fs, + final String peerId, final UUID clusterId, final ReplicationPeer replicationPeer, + final MetricsSource metrics, final TableDescriptors tableDescriptors, + final Abortable abortable) { this.localConf = localConf; this.conf = conf; this.fs = fs; @@ -84,34 +75,46 @@ public interface ReplicationEndpoint extends ReplicationPeerConfigListener { this.tableDescriptors = tableDescriptors; this.abortable = abortable; } + public Configuration getConfiguration() { return conf; } + public Configuration getLocalConfiguration() { return localConf; } + public FileSystem getFilesystem() { return fs; } + public UUID getClusterId() { return clusterId; } + public String getPeerId() { return peerId; } + public ReplicationPeerConfig getPeerConfig() { return replicationPeer.getPeerConfig(); } + public ReplicationPeer getReplicationPeer() { return replicationPeer; } + public MetricsSource getMetrics() { return metrics; } + public TableDescriptors getTableDescriptors() { return tableDescriptors; } - public Abortable getAbortable() { return abortable; } + + public Abortable getAbortable() { + return abortable; + } } /** @@ -121,14 +124,16 @@ public interface ReplicationEndpoint extends ReplicationPeerConfigListener { */ void init(Context context) throws IOException; - /** Whether or not, the replication endpoint can replicate to it's source cluster with the same - * UUID */ + /** + * Whether or not, the replication endpoint can replicate to it's source cluster with the same + * UUID + */ boolean canReplicateToSameCluster(); /** - * Returns a UUID of the provided peer id. Every HBase cluster instance has a persisted - * associated UUID. If the replication is not performed to an actual HBase cluster (but - * some other system), the UUID returned has to uniquely identify the connected target system. + * Returns a UUID of the provided peer id. Every HBase cluster instance has a persisted associated + * UUID. If the replication is not performed to an actual HBase cluster (but some other system), + * the UUID returned has to uniquely identify the connected target system. * @return a UUID or null if the peer cluster does not exist or is not connected. */ UUID getPeerUUID(); @@ -149,6 +154,7 @@ public interface ReplicationEndpoint extends ReplicationPeerConfigListener { int size; String walGroupId; int timeout; + @InterfaceAudience.Private public ReplicateContext() { } @@ -157,42 +163,46 @@ public interface ReplicationEndpoint extends ReplicationPeerConfigListener { this.entries = entries; return this; } + public ReplicateContext setSize(int size) { this.size = size; return this; } + public ReplicateContext setWalGroupId(String walGroupId) { this.walGroupId = walGroupId; return this; } + public List getEntries() { return entries; } + public int getSize() { return size; } - public String getWalGroupId(){ + + public String getWalGroupId() { return walGroupId; } + public void setTimeout(int timeout) { this.timeout = timeout; } + public int getTimeout() { return this.timeout; } } /** - * Replicate the given set of entries (in the context) to the other cluster. - * Can block until all the given entries are replicated. Upon this method is returned, - * all entries that were passed in the context are assumed to be persisted in the - * target cluster. - * @param replicateContext a context where WAL entries and other - * parameters can be obtained. + * Replicate the given set of entries (in the context) to the other cluster. Can block until all + * the given entries are replicated. Upon this method is returned, all entries that were passed in + * the context are assumed to be persisted in the target cluster. + * @param replicateContext a context where WAL entries and other parameters can be obtained. */ boolean replicate(ReplicateContext replicateContext); - // The below methods are inspired by Guava Service. See // https://github.com/google/guava/wiki/ServiceExplained for overview of Guava Service. // Below we implement a subset only with different names on some methods so we can implement @@ -218,23 +228,24 @@ public interface ReplicationEndpoint extends ReplicationPeerConfigListener { /** * Waits for the {@link ReplicationEndpoint} to be up and running. - * * @throws IllegalStateException if the service reaches a state from which it is not possible to - * enter the (internal) running state. e.g. if the state is terminated when this method is - * called then this will throw an IllegalStateException. + * enter the (internal) running state. e.g. if the state is + * terminated when this method is called then this will throw an + * IllegalStateException. */ void awaitRunning(); /** - * Waits for the {@link ReplicationEndpoint} to to be up and running for no more - * than the given time. - * + * Waits for the {@link ReplicationEndpoint} to to be up and running for no more than the given + * time. * @param timeout the maximum time to wait - * @param unit the time unit of the timeout argument - * @throws TimeoutException if the service has not reached the given state within the deadline + * @param unit the time unit of the timeout argument + * @throws TimeoutException if the service has not reached the given state within the + * deadline * @throws IllegalStateException if the service reaches a state from which it is not possible to - * enter the (internal) running state. e.g. if the state is terminated when this method is - * called then this will throw an IllegalStateException. + * enter the (internal) running state. e.g. if the state is + * terminated when this method is called then this will throw an + * IllegalStateException. */ void awaitRunning(long timeout, TimeUnit unit) throws TimeoutException; @@ -247,25 +258,23 @@ public interface ReplicationEndpoint extends ReplicationPeerConfigListener { /** * Waits for the {@link ReplicationEndpoint} to reach the terminated (internal) state. - * * @throws IllegalStateException if the service FAILED. */ void awaitTerminated(); /** - * Waits for the {@link ReplicationEndpoint} to reach a terminal state for no - * more than the given time. - * + * Waits for the {@link ReplicationEndpoint} to reach a terminal state for no more than the given + * time. * @param timeout the maximum time to wait - * @param unit the time unit of the timeout argument - * @throws TimeoutException if the service has not reached the given state within the deadline + * @param unit the time unit of the timeout argument + * @throws TimeoutException if the service has not reached the given state within the + * deadline * @throws IllegalStateException if the service FAILED. */ void awaitTerminated(long timeout, TimeUnit unit) throws TimeoutException; /** * Returns the {@link Throwable} that caused this service to fail. - * * @throws IllegalStateException if this service's state isn't FAILED. */ Throwable failureCause(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java index edd567914dc..7021bd27cfe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -33,7 +32,6 @@ import org.apache.hadoop.hbase.regionserver.ReplicationSinkService; import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; import org.apache.hadoop.hbase.replication.regionserver.ReplicationSink; import org.apache.hadoop.hbase.wal.WALFactory; -import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -68,17 +66,15 @@ public class ReplicationSinkServiceImpl implements ReplicationSinkService { WALFactory walFactory) throws IOException { this.server = server; this.conf = server.getConfiguration(); - this.statsPeriodInSecond = - this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); + this.statsPeriodInSecond = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); this.replicationLoad = new ReplicationLoad(); } @Override public void startReplicationService() throws IOException { this.replicationSink = new ReplicationSink(this.conf); - this.server.getChoreService().scheduleChore( - new ReplicationStatisticsChore("ReplicationSinkStatistics", server, - (int) TimeUnit.SECONDS.toMillis(statsPeriodInSecond))); + this.server.getChoreService().scheduleChore(new ReplicationStatisticsChore( + "ReplicationSinkStatistics", server, (int) TimeUnit.SECONDS.toMillis(statsPeriodInSecond))); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java index f8722eb3da4..6dc41bcc014 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,6 +47,7 @@ public class ScopeWALEntryFilter implements WALEntryFilter, WALCellFilter { Integer scope = scopes.get(family); return scope != null && scope.intValue() == HConstants.REPLICATION_SCOPE_GLOBAL; } + @Override public Cell filterCell(Entry entry, Cell cell) { NavigableMap scopes = entry.getKey().getReplicationScopes(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/SystemTableWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/SystemTableWALEntryFilter.java index 3cda94a1c02..d71260cce5c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/SystemTableWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/SystemTableWALEntryFilter.java @@ -15,8 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; + import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.yetus.audience.InterfaceAudience; @@ -27,6 +27,6 @@ import org.apache.yetus.audience.InterfaceAudience; public class SystemTableWALEntryFilter implements WALEntryFilter { @Override public Entry filter(Entry entry) { - return entry.getKey().getTableName().isSystemTable()? null: entry; + return entry.getKey().getTableName().isSystemTable() ? null : entry; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/VerifyWALEntriesReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/VerifyWALEntriesReplicationEndpoint.java index 088827f4d2e..229cec57e97 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/VerifyWALEntriesReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/VerifyWALEntriesReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALCellFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALCellFilter.java index 46b2f6cb4dd..2e79fa35b0f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALCellFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALCellFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,8 +19,8 @@ package org.apache.hadoop.hbase.replication; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.wal.WAL.Entry; +import org.apache.yetus.audience.InterfaceAudience; /** * A filter for WAL entry cells before being sent over to replication. @@ -29,12 +29,12 @@ import org.apache.hadoop.hbase.wal.WAL.Entry; public interface WALCellFilter { /** - * Applies the filter, possibly returning a different Cell instance. - * If null is returned, the cell will be skipped. + * Applies the filter, possibly returning a different Cell instance. If null is returned, the cell + * will be skipped. * @param entry Entry which contains the cell - * @param cell Cell to filter - * @return a (possibly modified) Cell to use. Returning null will cause the cell - * to be skipped for replication. + * @param cell Cell to filter + * @return a (possibly modified) Cell to use. Returning null will cause the cell to be skipped for + * replication. */ public Cell filterCell(Entry entry, Cell cell); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java index 23c1c60f2db..8aa60f74ebb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java @@ -16,20 +16,22 @@ * limitations under the License. */ package org.apache.hadoop.hbase.replication; + import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.yetus.audience.InterfaceAudience; /** - * A Filter for WAL entries before being sent over to replication. Multiple - * filters might be chained together using {@link ChainWALEntryFilter}. - * Applied on the replication source side. - *

      There is also a filter that can be installed on the sink end of a replication stream. - * See {@link org.apache.hadoop.hbase.replication.regionserver.WALEntrySinkFilter}. Certain - * use-cases may need such a facility but better to filter here on the source side rather - * than later, after the edit arrives at the sink.

      + * A Filter for WAL entries before being sent over to replication. Multiple filters might be chained + * together using {@link ChainWALEntryFilter}. Applied on the replication source side. + *

      + * There is also a filter that can be installed on the sink end of a replication stream. See + * {@link org.apache.hadoop.hbase.replication.regionserver.WALEntrySinkFilter}. Certain use-cases + * may need such a facility but better to filter here on the source side rather than later, after + * the edit arrives at the sink. + *

      * @see org.apache.hadoop.hbase.replication.regionserver.WALEntrySinkFilter for filtering - * replication on the sink-side. + * replication on the sink-side. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) public interface WALEntryFilter { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java index f06b29ccdef..819e4c5e54a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -90,13 +90,13 @@ public class ReplicationHFileCleaner extends BaseHFileCleanerDelegate { @Override public void setConf(Configuration config) { // If either replication or replication of bulk load hfiles is disabled, keep all members null - if (!(config.getBoolean( - HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, - HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT))) { - LOG.warn(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY - + " is not enabled. Better to remove " - + ReplicationHFileCleaner.class + " from " + HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS - + " configuration."); + if ( + !(config.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, + HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT)) + ) { + LOG.warn(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY + " is not enabled. Better to remove " + + ReplicationHFileCleaner.class + " from " + HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS + + " configuration."); return; } // Make my own Configuration. Then I'll have my own connection to zk that @@ -154,7 +154,7 @@ public class ReplicationHFileCleaner extends BaseHFileCleanerDelegate { hfileRefsFromQueue = rqs.getAllHFileRefs(); } catch (ReplicationException e) { LOG.warn("Failed to read hfile references from zookeeper, skipping checking deletable " - + "file for " + fStat.getPath()); + + "file for " + fStat.getPath()); return false; } return !hfileRefsFromQueue.contains(fStat.getPath().getName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java index 705302efcd2..a355c61e621 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,17 +31,17 @@ import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Predicate; import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; /** - * Implementation of a log cleaner that checks if a log is still scheduled for - * replication before deleting it when its TTL is over. + * Implementation of a log cleaner that checks if a log is still scheduled for replication before + * deleting it when its TTL is over. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class ReplicationLogCleaner extends BaseLogCleanerDelegate { @@ -125,7 +125,7 @@ public class ReplicationLogCleaner extends BaseLogCleanerDelegate { LOG.error("Error while configuring " + this.getClass().getName(), e); } } - + @Override public void stop(String why) { if (this.stopped) return; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationPeerConfigUpgrader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationPeerConfigUpgrader.java index 8d049e99dd8..ee1a86c3acd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationPeerConfigUpgrader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationPeerConfigUpgrader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerStorage; import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; @@ -43,13 +42,15 @@ import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; + /** * This class is used to upgrade TableCFs from HBase 1.0, 1.1, 1.2, 1.3 to HBase 1.4 or 2.x. It will * be removed in HBase 3.x. See HBASE-11393 */ @InterfaceAudience.Private @InterfaceStability.Unstable -public class ReplicationPeerConfigUpgrader{ +public class ReplicationPeerConfigUpgrader { private static final String TABLE_CFS_ZNODE = "zookeeper.znode.replication.peers.tableCFs"; private static final String TABLE_CFS_ZNODE_DEFAULT = "tableCFs"; @@ -71,8 +72,10 @@ public class ReplicationPeerConfigUpgrader{ admin.listReplicationPeers().forEach((peerDesc) -> { String peerId = peerDesc.getPeerId(); ReplicationPeerConfig peerConfig = peerDesc.getPeerConfig(); - if ((peerConfig.getNamespaces() != null && !peerConfig.getNamespaces().isEmpty()) - || (peerConfig.getTableCFsMap() != null && !peerConfig.getTableCFsMap().isEmpty())) { + if ( + (peerConfig.getNamespaces() != null && !peerConfig.getNamespaces().isEmpty()) + || (peerConfig.getTableCFsMap() != null && !peerConfig.getTableCFsMap().isEmpty()) + ) { peerConfig.setReplicateAllUserTables(false); try { admin.updateReplicationPeerConfig(peerId, peerConfig); @@ -96,7 +99,7 @@ public class ReplicationPeerConfigUpgrader{ String replicationZNode = ZNodePaths.joinZNode(zookeeper.getZNodePaths().baseZNode, conf.get(REPLICATION_ZNODE, REPLICATION_ZNODE_DEFAULT)); String peersZNode = - ZNodePaths.joinZNode(replicationZNode, conf.get(PEERS_ZNODE, PEERS_ZNODE_DEFAULT)); + ZNodePaths.joinZNode(replicationZNode, conf.get(PEERS_ZNODE, PEERS_ZNODE_DEFAULT)); return ZNodePaths.joinZNode(peersZNode, ZNodePaths.joinZNode(peerId, conf.get(TABLE_CFS_ZNODE, TABLE_CFS_ZNODE_DEFAULT))); } @@ -111,7 +114,7 @@ public class ReplicationPeerConfigUpgrader{ // we copy TableCFs node into PeerNode LOG.info("Copy table ColumnFamilies into peer=" + peerId); ReplicationProtos.TableCF[] tableCFs = - ReplicationPeerConfigUtil.parseTableCFs(ZKUtil.getData(this.zookeeper, tableCFsNode)); + ReplicationPeerConfigUtil.parseTableCFs(ZKUtil.getData(this.zookeeper, tableCFsNode)); if (tableCFs != null && tableCFs.length > 0) { rpc.setTableCFsMap(ReplicationPeerConfigUtil.convert2Map(tableCFs)); peerStorage.updatePeerConfig(peerId, rpc); @@ -136,7 +139,7 @@ public class ReplicationPeerConfigUpgrader{ private static void printUsageAndExit() { System.err.printf( "Usage: hbase org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader" - + " [options]"); + + " [options]"); System.err.println(" where [options] are:"); System.err.println(" -h|-help Show this help and exit."); System.err.println(" copyTableCFs Copy table-cfs to replication peer config"); @@ -155,7 +158,7 @@ public class ReplicationPeerConfigUpgrader{ Configuration conf = HBaseConfiguration.create(); try (ZKWatcher zkw = new ZKWatcher(conf, "ReplicationPeerConfigUpgrader", null)) { ReplicationPeerConfigUpgrader tableCFsUpdater = - new ReplicationPeerConfigUpgrader(zkw, conf); + new ReplicationPeerConfigUpgrader(zkw, conf); tableCFsUpdater.copyTableCFs(); } } else if (args[0].equals("upgrade")) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/CatalogReplicationSourcePeer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/CatalogReplicationSourcePeer.java index 36628285630..353542ecfed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/CatalogReplicationSourcePeer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/CatalogReplicationSourcePeer.java @@ -24,10 +24,10 @@ import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.apache.yetus.audience.InterfaceAudience; /** - * The 'peer' used internally by Catalog Region Replicas Replication Source. - * The Replication system has 'peer' baked into its core so though we do not need 'peering', we - * need a 'peer' and its configuration else the replication system breaks at a few locales. - * Set "hbase.region.replica.catalog.replication" if you want to change the configured endpoint. + * The 'peer' used internally by Catalog Region Replicas Replication Source. The Replication system + * has 'peer' baked into its core so though we do not need 'peering', we need a 'peer' and its + * configuration else the replication system breaks at a few locales. Set + * "hbase.region.replica.catalog.replication" if you want to change the configured endpoint. */ @InterfaceAudience.Private class CatalogReplicationSourcePeer extends ReplicationPeerImpl { @@ -35,15 +35,11 @@ class CatalogReplicationSourcePeer extends ReplicationPeerImpl { * @param clusterKey Usually the UUID from zk passed in by caller as a String. */ CatalogReplicationSourcePeer(Configuration configuration, String clusterKey) { - super(configuration, ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_PEER + "_catalog", - true, - ReplicationPeerConfig.newBuilder(). - setClusterKey(clusterKey). - setReplicationEndpointImpl( - configuration.get("hbase.region.replica.catalog.replication", - RegionReplicaReplicationEndpoint.class.getName())). - setBandwidth(0). // '0' means no bandwidth. - setSerial(false). - build()); + super(configuration, ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_PEER + "_catalog", true, + ReplicationPeerConfig.newBuilder().setClusterKey(clusterKey) + .setReplicationEndpointImpl(configuration.get("hbase.region.replica.catalog.replication", + RegionReplicaReplicationEndpoint.class.getName())) + .setBandwidth(0). // '0' means no bandwidth. + setSerial(false).build()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java index ddae7311225..b9a7be813af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java index f7040d6fc81..8b334dfb809 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java @@ -1,12 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.replication.regionserver; @@ -15,7 +22,6 @@ import java.io.IOException; import java.net.URL; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; @@ -26,13 +32,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This will load all the xml configuration files for the source cluster replication ID from - * user configured replication configuration directory. + * This will load all the xml configuration files for the source cluster replication ID from user + * configured replication configuration directory. */ @InterfaceAudience.Private public class DefaultSourceFSConfigurationProvider implements SourceFSConfigurationProvider { private static final Logger LOG = - LoggerFactory.getLogger(DefaultSourceFSConfigurationProvider.class); + LoggerFactory.getLogger(DefaultSourceFSConfigurationProvider.class); // Map containing all the source clusters configurations against their replication cluster id private final Map sourceClustersConfs = new HashMap<>(); @@ -40,7 +46,7 @@ public class DefaultSourceFSConfigurationProvider implements SourceFSConfigurati @Override public Configuration getConf(Configuration sinkConf, String replicationClusterId) - throws IOException { + throws IOException { if (sourceClustersConfs.get(replicationClusterId) == null) { synchronized (this.sourceClustersConfs) { if (sourceClustersConfs.get(replicationClusterId) == null) { @@ -62,8 +68,7 @@ public class DefaultSourceFSConfigurationProvider implements SourceFSConfigurati File confDir = new File(replicationConfDir, replicationClusterId); LOG.info("Loading source cluster " + replicationClusterId - + " file system configurations from xml " - + "files under directory " + confDir); + + " file system configurations from xml " + "files under directory " + confDir); String[] listofConfFiles = FileUtil.list(confDir); for (String confFile : listofConfFiles) { if (new File(confDir, confFile).isFile() && confFile.endsWith(XML)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java index 2f81128a792..dd28b10d184 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,10 +61,9 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.AtomicLongM /** * Provides information about the existing states of replication, replication peers and queues. - * * Usage: hbase org.apache.hadoop.hbase.replication.regionserver.DumpReplicationQueues [args] - * Arguments: --distributed Polls each RS to dump information about the queue - * --hdfs Reports HDFS usage by the replication queues (note: can be overestimated). + * Arguments: --distributed Polls each RS to dump information about the queue --hdfs Reports HDFS + * usage by the replication queues (note: can be overestimated). */ @InterfaceAudience.Private public class DumpReplicationQueues extends Configured implements Tool { @@ -97,7 +96,7 @@ public class DumpReplicationQueues extends Configured implements Tool { this.distributed = that.distributed; } - boolean isHdfs () { + boolean isHdfs() { return hdfs; } @@ -105,7 +104,7 @@ public class DumpReplicationQueues extends Configured implements Tool { return distributed; } - void setHdfs (boolean hdfs) { + void setHdfs(boolean hdfs) { this.hdfs = hdfs; } @@ -137,7 +136,7 @@ public class DumpReplicationQueues extends Configured implements Tool { printUsageAndExit("ERROR: Unrecognized option/command: " + cmd, -1); } // check that --distributed is present when --hdfs is in the arguments - if (!opts.isDistributed() && opts.isHdfs()) { + if (!opts.isDistributed() && opts.isHdfs()) { printUsageAndExit("ERROR: --hdfs option can only be used with --distributed: " + cmd, -1); } } @@ -145,10 +144,7 @@ public class DumpReplicationQueues extends Configured implements Tool { } /** - * Main - * - * @param args - * @throws Exception + * Main nn */ public static void main(String[] args) throws Exception { Configuration conf = HBaseConfiguration.create(); @@ -191,10 +187,10 @@ public class DumpReplicationQueues extends Configured implements Tool { System.err.println("General Options:"); System.err.println(" -h|--h|--help Show this help and exit."); System.err.println(" --distributed Poll each RS and print its own replication queue. " - + "Default only polls ZooKeeper"); + + "Default only polls ZooKeeper"); System.err.println(" --hdfs Use HDFS to calculate usage of WALs by replication." - + " It could be overestimated if replicating to multiple peers." - + " --distributed flag is also needed."); + + " It could be overestimated if replicating to multiple peers." + + " --distributed flag is also needed."); } protected static void printUsageAndExit(final String message, final int exitCode) { @@ -209,9 +205,9 @@ public class DumpReplicationQueues extends Configured implements Tool { ClusterConnection connection = (ClusterConnection) ConnectionFactory.createConnection(conf); Admin admin = connection.getAdmin(); - ZKWatcher zkw = new ZKWatcher(conf, "DumpReplicationQueues" + - EnvironmentEdgeManager.currentTime(), - new WarnOnlyAbortable(), true); + ZKWatcher zkw = + new ZKWatcher(conf, "DumpReplicationQueues" + EnvironmentEdgeManager.currentTime(), + new WarnOnlyAbortable(), true); try { // Our zk watcher @@ -219,7 +215,7 @@ public class DumpReplicationQueues extends Configured implements Tool { List replicatedTableCFs = admin.listReplicatedTableCFs(); if (replicatedTableCFs.isEmpty()) { LOG.info("No tables with a configured replication peer were found."); - return(0); + return (0); } else { LOG.info("Replicated Tables: " + replicatedTableCFs); } @@ -235,8 +231,8 @@ public class DumpReplicationQueues extends Configured implements Tool { if (opts.isDistributed()) { LOG.info("Found [--distributed], will poll each RegionServer."); - Set peerIds = peers.stream().map((peer) -> peer.getPeerId()) - .collect(Collectors.toSet()); + Set peerIds = + peers.stream().map((peer) -> peer.getPeerId()).collect(Collectors.toSet()); System.out.println(dumpQueues(zkw, peerIds, opts.isHdfs())); System.out.println(dumpReplicationSummary()); } else { @@ -256,14 +252,14 @@ public class DumpReplicationQueues extends Configured implements Tool { StringBuilder sb = new StringBuilder(); if (!deletedQueues.isEmpty()) { sb.append("Found " + deletedQueues.size() + " deleted queues" - + ", run hbck -fixReplication in order to remove the deleted replication queues\n"); + + ", run hbck -fixReplication in order to remove the deleted replication queues\n"); for (String deletedQueue : deletedQueues) { sb.append(" " + deletedQueue + "\n"); } } if (!deadRegionServers.isEmpty()) { sb.append("Found " + deadRegionServers.size() + " dead regionservers" - + ", restart one regionserver to transfer the queues of dead regionservers\n"); + + ", restart one regionserver to transfer the queues of dead regionservers\n"); for (String deadRs : deadRegionServers) { sb.append(" " + deadRs + "\n"); } @@ -271,7 +267,8 @@ public class DumpReplicationQueues extends Configured implements Tool { if (!peersQueueSize.isEmpty()) { sb.append("Dumping all peers's number of WALs in replication queue\n"); for (Map.Entry entry : peersQueueSize.asMap().entrySet()) { - sb.append(" PeerId: " + entry.getKey() + " , sizeOfLogQueue: " + entry.getValue() + "\n"); + sb.append( + " PeerId: " + entry.getKey() + " , sizeOfLogQueue: " + entry.getValue() + "\n"); } } sb.append(" Total size of WALs on HDFS: " + StringUtils.humanSize(totalSizeOfWALs) + "\n"); @@ -335,8 +332,8 @@ public class DumpReplicationQueues extends Configured implements Tool { } private String formatQueue(ServerName regionserver, ReplicationQueueStorage queueStorage, - ReplicationQueueInfo queueInfo, String queueId, List wals, boolean isDeleted, - boolean hdfs) throws Exception { + ReplicationQueueInfo queueInfo, String queueId, List wals, boolean isDeleted, + boolean hdfs) throws Exception { StringBuilder sb = new StringBuilder(); List deadServers; @@ -357,23 +354,23 @@ public class DumpReplicationQueues extends Configured implements Tool { for (String wal : wals) { long position = queueStorage.getWALPosition(regionserver, queueInfo.getPeerId(), wal); - sb.append(" Replication position for " + wal + ": " + (position > 0 ? position : "0" - + " (not started or nothing to replicate)") + "\n"); + sb.append(" Replication position for " + wal + ": " + + (position > 0 ? position : "0" + " (not started or nothing to replicate)") + "\n"); } if (hdfs) { FileSystem fs = FileSystem.get(getConf()); sb.append(" Total size of WALs on HDFS for this queue: " - + StringUtils.humanSize(getTotalWALSize(fs, wals, regionserver)) + "\n"); + + StringUtils.humanSize(getTotalWALSize(fs, wals, regionserver)) + "\n"); } return sb.toString(); } /** - * return total size in bytes from a list of WALs + * return total size in bytes from a list of WALs */ private long getTotalWALSize(FileSystem fs, List wals, ServerName server) - throws IOException { + throws IOException { long size = 0; FileStatus fileStatus; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index cb16ac5cca0..a899d37c594 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import java.io.IOException; @@ -75,29 +74,28 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFacto import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface; /** - * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} - * implementation for replicating to another HBase cluster. - * For the slave cluster it selects a random number of peers - * using a replication ratio. For example, if replication ration = 0.1 - * and slave cluster has 100 region servers, 10 will be selected. + * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} implementation for replicating + * to another HBase cluster. For the slave cluster it selects a random number of peers using a + * replication ratio. For example, if replication ration = 0.1 and slave cluster has 100 region + * servers, 10 will be selected. *

      - * A stream is considered down when we cannot contact a region server on the - * peer cluster for more than 55 seconds by default. + * A stream is considered down when we cannot contact a region server on the peer cluster for more + * than 55 seconds by default. *

      */ @InterfaceAudience.Private public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoint { private static final Logger LOG = - LoggerFactory.getLogger(HBaseInterClusterReplicationEndpoint.class); + LoggerFactory.getLogger(HBaseInterClusterReplicationEndpoint.class); private static final long DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER = 2; /** Drop edits for tables that been deleted from the replication source and target */ public static final String REPLICATION_DROP_ON_DELETED_TABLE_KEY = - "hbase.replication.drop.on.deleted.table"; + "hbase.replication.drop.on.deleted.table"; /** Drop edits for CFs that been deleted from the replication source and target */ public static final String REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY = - "hbase.replication.drop.on.deleted.columnfamily"; + "hbase.replication.drop.on.deleted.columnfamily"; private ClusterConnection conn; private Configuration localConf; @@ -112,7 +110,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi private long maxTerminationWait; // Size limit for replication RPCs, in bytes private int replicationRpcLimit; - //Metrics for this source + // Metrics for this source private MetricsSource metrics; // Handles connecting to peer region servers private ReplicationSinkManager replicationSinkMgr; @@ -127,13 +125,13 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi private boolean dropOnDeletedTables; private boolean dropOnDeletedColumnFamilies; private boolean isSerial = false; - //Initialising as 0 to guarantee at least one logging message + // Initialising as 0 to guarantee at least one logging message private long lastSinkFetchTime = 0; /* * Some implementations of HBaseInterClusterReplicationEndpoint may require instantiating - * different Connection implementations, or initialize it in a different way, - * so defining createConnection as protected for possible overridings. + * different Connection implementations, or initialize it in a different way, so defining + * createConnection as protected for possible overridings. */ protected Connection createConnection(Configuration conf) throws IOException { return ConnectionFactory.createConnection(conf); @@ -141,12 +139,12 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi /* * Some implementations of HBaseInterClusterReplicationEndpoint may require instantiating - * different ReplicationSinkManager implementations, or initialize it in a different way, - * so defining createReplicationSinkManager as protected for possible overridings. + * different ReplicationSinkManager implementations, or initialize it in a different way, so + * defining createReplicationSinkManager as protected for possible overridings. */ protected ReplicationSinkManager createReplicationSinkManager(Connection conn) { - return new ReplicationSinkManager((ClusterConnection) conn, this.ctx.getPeerId(), - this, this.conf); + return new ReplicationSinkManager((ClusterConnection) conn, this.ctx.getPeerId(), this, + this.conf); } @Override @@ -156,25 +154,23 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi this.localConf = HBaseConfiguration.create(ctx.getLocalConfiguration()); decorateConf(); this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300); - this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier", - maxRetriesMultiplier); + this.socketTimeoutMultiplier = + this.conf.getInt("replication.source.socketTimeoutMultiplier", maxRetriesMultiplier); // A Replicator job is bound by the RPC timeout. We will wait this long for all Replicator // tasks to terminate when doStop() is called. long maxTerminationWaitMultiplier = this.conf.getLong( - "replication.source.maxterminationmultiplier", - DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER); - this.maxTerminationWait = maxTerminationWaitMultiplier * - this.conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + "replication.source.maxterminationmultiplier", DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER); + this.maxTerminationWait = maxTerminationWaitMultiplier + * this.conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); // TODO: This connection is replication specific or we should make it particular to // replication and make replication specific settings such as compression or codec to use // passing Cells. Connection connection = createConnection(this.conf); - //Since createConnection method may be overridden by extending classes, we need to make sure - //it's indeed returning a ClusterConnection instance. + // Since createConnection method may be overridden by extending classes, we need to make sure + // it's indeed returning a ClusterConnection instance. Preconditions.checkState(connection instanceof ClusterConnection); this.conn = (ClusterConnection) connection; - this.sleepForRetries = - this.conf.getLong("replication.source.sleepforretries", 1000); + this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); this.metrics = context.getMetrics(); // ReplicationQueueInfo parses the peerId out of the znode for us this.replicationSinkMgr = createReplicationSinkManager(conn); @@ -182,21 +178,19 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT); this.exec = Threads.getBoundedCachedThreadPool(maxThreads, 60, TimeUnit.SECONDS, - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("SinkThread-%d").build()); + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("SinkThread-%d").build()); this.abortable = ctx.getAbortable(); // Set the size limit for replication RPCs to 95% of the max request size. // We could do with less slop if we have an accurate estimate of encoded size. Being // conservative for now. - this.replicationRpcLimit = (int)(0.95 * conf.getLong(RpcServer.MAX_REQUEST_SIZE, - RpcServer.DEFAULT_MAX_REQUEST_SIZE)); - this.dropOnDeletedTables = - this.conf.getBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, false); - this.dropOnDeletedColumnFamilies = this.conf - .getBoolean(REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY, false); + this.replicationRpcLimit = + (int) (0.95 * conf.getLong(RpcServer.MAX_REQUEST_SIZE, RpcServer.DEFAULT_MAX_REQUEST_SIZE)); + this.dropOnDeletedTables = this.conf.getBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, false); + this.dropOnDeletedColumnFamilies = + this.conf.getBoolean(REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY, false); - this.replicationBulkLoadDataEnabled = - conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, - HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT); + this.replicationBulkLoadDataEnabled = conf.getBoolean( + HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT); if (this.replicationBulkLoadDataEnabled) { replicationClusterId = this.conf.get(HConstants.REPLICATION_CLUSTER_ID); } @@ -233,15 +227,15 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi /** * Do the sleeping logic - * @param msg Why we sleep + * @param msg Why we sleep * @param sleepMultiplier by how many times the default sleeping time is augmented * @return True if sleepMultiplier is < maxRetriesMultiplier */ private boolean sleepForRetries(String msg, int sleepMultiplier) { try { if (LOG.isTraceEnabled()) { - LOG.trace("{} {}, sleeping {} times {}", - logPeerId(), msg, sleepForRetries, sleepMultiplier); + LOG.trace("{} {}, sleeping {} times {}", logPeerId(), msg, sleepForRetries, + sleepMultiplier); } Thread.sleep(this.sleepForRetries * sleepMultiplier); } catch (InterruptedException e) { @@ -262,7 +256,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi int numSinks = Math.max(replicationSinkMgr.getNumSinks(), 1); int n = Math.min(Math.min(this.maxThreads, entries.size() / 100 + 1), numSinks); List> entryLists = - Stream.generate(ArrayList::new).limit(n).collect(Collectors.toList()); + Stream.generate(ArrayList::new).limit(n).collect(Collectors.toList()); int[] sizes = new int[n]; for (Entry e : entries) { int index = Math.abs(Bytes.hashCode(e.getKey().getEncodedRegionName()) % n); @@ -284,7 +278,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi Map> regionEntries = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Entry e : entries) { regionEntries.computeIfAbsent(e.getKey().getEncodedRegionName(), key -> new ArrayList<>()) - .add(e); + .add(e); } return new ArrayList<>(regionEntries.values()); } @@ -344,7 +338,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi List> entryList = new ArrayList<>(); Map existMap = new HashMap<>(); try (Connection localConn = ConnectionFactory.createConnection(ctx.getLocalConfiguration()); - Admin localAdmin = localConn.getAdmin()) { + Admin localAdmin = localConn.getAdmin()) { for (List oldEntries : oldEntryList) { List entries = new ArrayList<>(); for (Entry e : oldEntries) { @@ -369,7 +363,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi // and add a table filter there; but that would break the encapsulation, // so we're doing the filtering here. LOG.warn("Missing table detected at sink, local table also does not exist, " - + "filtering edits for table '{}'", tableName); + + "filtering edits for table '{}'", tableName); } } if (!entries.isEmpty()) { @@ -387,7 +381,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi List> entryList = new ArrayList<>(); Map> existColumnFamilyMap = new HashMap<>(); try (Connection localConn = ConnectionFactory.createConnection(ctx.getLocalConfiguration()); - Admin localAdmin = localConn.getAdmin()) { + Admin localAdmin = localConn.getAdmin()) { for (List oldEntries : oldEntryList) { List entries = new ArrayList<>(); for (Entry e : oldEntries) { @@ -395,7 +389,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi if (!existColumnFamilyMap.containsKey(tableName)) { try { Set cfs = localAdmin.getDescriptor(tableName).getColumnFamilyNames().stream() - .map(Bytes::toString).collect(Collectors.toSet()); + .map(Bytes::toString).collect(Collectors.toSet()); existColumnFamilyMap.put(tableName, cfs); } catch (Exception ex) { LOG.warn("Exception getting cf names for local table {}", tableName, ex); @@ -429,8 +423,9 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi // and add a table filter there; but that would break the encapsulation, // so we're doing the filtering here. LOG.warn( - "Missing column family detected at sink, local column family also does not exist," - + " filtering edits for table '{}',column family '{}'", tableName, missingCFs); + "Missing column family detected at sink, local column family also does not exist," + + " filtering edits for table '{}',column family '{}'", + tableName, missingCFs); } } if (!entries.isEmpty()) { @@ -457,7 +452,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi } private long parallelReplicate(CompletionService pool, ReplicateContext replicateContext, - List> batches) throws IOException { + List> batches) throws IOException { int futures = 0; for (int i = 0; i < batches.size(); i++) { List entries = batches.get(i); @@ -490,8 +485,9 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi } catch (InterruptedException ie) { iox = new IOException(ie); } catch (ExecutionException ee) { - iox = ee.getCause() instanceof IOException? - (IOException)ee.getCause(): new IOException(ee.getCause()); + iox = ee.getCause() instanceof IOException + ? (IOException) ee.getCause() + : new IOException(ee.getCause()); } } if (iox != null) { @@ -516,12 +512,12 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi int numSinks = replicationSinkMgr.getNumSinks(); if (numSinks == 0) { - if ((EnvironmentEdgeManager.currentTime() - lastSinkFetchTime) >= - (maxRetriesMultiplier*1000)) { - LOG.warn( - "No replication sinks found, returning without replicating. " - + "The source should retry with the same set of edits. Not logging this again for " - + "the next {} seconds.", maxRetriesMultiplier); + if ( + (EnvironmentEdgeManager.currentTime() - lastSinkFetchTime) >= (maxRetriesMultiplier * 1000) + ) { + LOG.warn("No replication sinks found, returning without replicating. " + + "The source should retry with the same set of edits. Not logging this again for " + + "the next {} seconds.", maxRetriesMultiplier); lastSinkFetchTime = EnvironmentEdgeManager.currentTime(); } sleepForRetries("No sinks available at peer", sleepMultiplier); @@ -557,12 +553,12 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi batches = filterNotExistColumnFamilyEdits(batches); if (batches.isEmpty()) { LOG.warn("After filter not exist column family's edits, 0 edits to replicate, " - + "just return"); + + "just return"); return true; } } else { LOG.warn("{} Peer encountered RemoteException, rechecking all sinks: ", logPeerId(), - ioe); + ioe); replicationSinkMgr.chooseSinks(); } } else { @@ -570,9 +566,10 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi // This exception means we waited for more than 60s and nothing // happened, the cluster is alive and calling it right away // even for a test just makes things worse. - sleepForRetries("Encountered a SocketTimeoutException. Since the " + - "call to the remote cluster timed out, which is usually " + - "caused by a machine failure or a massive slowdown", + sleepForRetries( + "Encountered a SocketTimeoutException. Since the " + + "call to the remote cluster timed out, which is usually " + + "caused by a machine failure or a massive slowdown", this.socketTimeoutMultiplier); } else if (ioe instanceof ConnectException || ioe instanceof UnknownHostException) { LOG.warn("{} Peer is unavailable, rechecking all sinks: ", logPeerId(), ioe); @@ -612,16 +609,16 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi } // Abort if the tasks did not terminate in time if (!exec.isTerminated()) { - String errMsg = "HBaseInterClusterReplicationEndpoint termination failed. The " + - "ThreadPoolExecutor failed to finish all tasks within " + maxTerminationWait + "ms. " + - "Aborting to prevent Replication from deadlocking. See HBASE-16081."; + String errMsg = "HBaseInterClusterReplicationEndpoint termination failed. The " + + "ThreadPoolExecutor failed to finish all tasks within " + maxTerminationWait + "ms. " + + "Aborting to prevent Replication from deadlocking. See HBASE-16081."; abortable.abort(errMsg, new IOException(errMsg)); } notifyStopped(); } protected int replicateEntries(List entries, int batchIndex, int timeout) - throws IOException { + throws IOException { SinkPeer sinkPeer = null; try { int entriesHashCode = System.identityHashCode(entries); @@ -655,7 +652,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi } private int serialReplicateRegionEntries(List entries, int batchIndex, int timeout) - throws IOException { + throws IOException { int batchSize = 0, index = 0; List batch = new ArrayList<>(); for (Entry entry : entries) { @@ -675,11 +672,12 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi } protected Callable createReplicator(List entries, int batchIndex, int timeout) { - return isSerial ? () -> serialReplicateRegionEntries(entries, batchIndex, timeout) - : () -> replicateEntries(entries, batchIndex, timeout); + return isSerial + ? () -> serialReplicateRegionEntries(entries, batchIndex, timeout) + : () -> replicateEntries(entries, batchIndex, timeout); } - private String logPeerId(){ + private String logPeerId() { return "[Source for peer " + this.ctx.getPeerId() + "]:"; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java index 2d72d3f7d4a..b3ed68ee00b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java @@ -1,12 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.replication.regionserver; @@ -64,11 +71,11 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFacto public class HFileReplicator implements Closeable { /** Maximum number of threads to allow in pool to copy hfiles during replication */ public static final String REPLICATION_BULKLOAD_COPY_MAXTHREADS_KEY = - "hbase.replication.bulkload.copy.maxthreads"; + "hbase.replication.bulkload.copy.maxthreads"; public static final int REPLICATION_BULKLOAD_COPY_MAXTHREADS_DEFAULT = 10; /** Number of hfiles to copy per thread during replication */ public static final String REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_KEY = - "hbase.replication.bulkload.copy.hfiles.perthread"; + "hbase.replication.bulkload.copy.hfiles.perthread"; public static final int REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_DEFAULT = 10; private static final Logger LOG = LoggerFactory.getLogger(HFileReplicator.class); @@ -90,10 +97,9 @@ public class HFileReplicator implements Closeable { private int copiesPerThread; private List sourceClusterIds; - public HFileReplicator(Configuration sourceClusterConf, - String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath, - Map>>> tableQueueMap, Configuration conf, - Connection connection, List sourceClusterIds) throws IOException { + public HFileReplicator(Configuration sourceClusterConf, String sourceBaseNamespaceDirPath, + String sourceHFileArchiveDirPath, Map>>> tableQueueMap, + Configuration conf, Connection connection, List sourceClusterIds) throws IOException { this.sourceClusterConf = sourceClusterConf; this.sourceBaseNamespaceDirPath = sourceBaseNamespaceDirPath; this.sourceHFileArchiveDirPath = sourceHFileArchiveDirPath; @@ -106,16 +112,13 @@ public class HFileReplicator implements Closeable { fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); this.hbaseStagingDir = new Path(CommonFSUtils.getRootDir(conf), HConstants.BULKLOAD_STAGING_DIR_NAME); - this.maxCopyThreads = - this.conf.getInt(REPLICATION_BULKLOAD_COPY_MAXTHREADS_KEY, - REPLICATION_BULKLOAD_COPY_MAXTHREADS_DEFAULT); + this.maxCopyThreads = this.conf.getInt(REPLICATION_BULKLOAD_COPY_MAXTHREADS_KEY, + REPLICATION_BULKLOAD_COPY_MAXTHREADS_DEFAULT); this.exec = Threads.getBoundedCachedThreadPool(maxCopyThreads, 60, TimeUnit.SECONDS, - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("HFileReplicationCopier-%1$d-" + this.sourceBaseNamespaceDirPath). - build()); - this.copiesPerThread = - conf.getInt(REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_KEY, - REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_DEFAULT); + new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("HFileReplicationCopier-%1$d-" + this.sourceBaseNamespaceDirPath).build()); + this.copiesPerThread = conf.getInt(REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_KEY, + REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_DEFAULT); sinkFs = FileSystem.get(conf); } @@ -174,8 +177,8 @@ public class HFileReplicator implements Closeable { return null; } - private void doBulkLoad(LoadIncrementalHFiles loadHFiles, Table table, - Deque queue, RegionLocator locator, int maxRetries) throws IOException { + private void doBulkLoad(LoadIncrementalHFiles loadHFiles, Table table, Deque queue, + RegionLocator locator, int maxRetries) throws IOException { int count = 0; Pair startEndKeys; while (!queue.isEmpty()) { @@ -244,7 +247,7 @@ public class HFileReplicator implements Closeable { */ String sourceScheme = sourceClusterPath.toUri().getScheme(); String disableCacheName = - String.format("fs.%s.impl.disable.cache", new Object[] { sourceScheme }); + String.format("fs.%s.impl.disable.cache", new Object[] { sourceScheme }); sourceClusterConf.setBoolean(disableCacheName, true); sourceFs = sourceClusterPath.getFileSystem(sourceClusterConf); @@ -252,12 +255,11 @@ public class HFileReplicator implements Closeable { User user = userProvider.getCurrent(); // For each table name in the map for (Entry>>> tableEntry : bulkLoadHFileMap - .entrySet()) { + .entrySet()) { String tableName = tableEntry.getKey(); // Create staging directory for each table - Path stagingDir = - createStagingDir(hbaseStagingDir, user, TableName.valueOf(tableName)); + Path stagingDir = createStagingDir(hbaseStagingDir, user, TableName.valueOf(tableName)); familyHFilePathsPairsList = tableEntry.getValue(); familyHFilePathsPairsListSize = familyHFilePathsPairsList.size(); @@ -279,9 +281,8 @@ public class HFileReplicator implements Closeable { int currentCopied = 0; // Copy the hfiles parallely while (totalNoOfHFiles > currentCopied + this.copiesPerThread) { - c = - new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, - currentCopied + this.copiesPerThread)); + c = new Copier(sourceFs, familyStagingDir, + hfilePaths.subList(currentCopied, currentCopied + this.copiesPerThread)); future = exec.submit(c); futures.add(future); currentCopied += this.copiesPerThread; @@ -289,9 +290,8 @@ public class HFileReplicator implements Closeable { int remaining = totalNoOfHFiles - currentCopied; if (remaining > 0) { - c = - new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, - currentCopied + remaining)); + c = new Copier(sourceFs, familyStagingDir, + hfilePaths.subList(currentCopied, currentCopied + remaining)); future = exec.submit(c); futures.add(future); } @@ -300,15 +300,14 @@ public class HFileReplicator implements Closeable { try { f.get(); } catch (InterruptedException e) { - InterruptedIOException iioe = - new InterruptedIOException( - "Failed to copy HFiles to local file system. This will be retried again " - + "by the source cluster."); + InterruptedIOException iioe = new InterruptedIOException( + "Failed to copy HFiles to local file system. This will be retried again " + + "by the source cluster."); iioe.initCause(e); throw iioe; } catch (ExecutionException e) { throw new IOException("Failed to copy HFiles to local file system. This will " - + "be retried again by the source cluster.", e); + + "be retried again by the source cluster.", e); } } } @@ -321,7 +320,7 @@ public class HFileReplicator implements Closeable { if (sourceFs != null) { sourceFs.close(); } - if(exec != null) { + if (exec != null) { exec.shutdown(); } } @@ -333,7 +332,7 @@ public class HFileReplicator implements Closeable { int RANDOM_RADIX = 32; String doubleUnderScore = UNDERSCORE + UNDERSCORE; String randomDir = user.getShortName() + doubleUnderScore + tblName + doubleUnderScore - + (new BigInteger(RANDOM_WIDTH, ThreadLocalRandom.current()).toString(RANDOM_RADIX)); + + (new BigInteger(RANDOM_WIDTH, ThreadLocalRandom.current()).toString(RANDOM_RADIX)); return createStagingDir(baseDir, user, randomDir); } @@ -354,7 +353,7 @@ public class HFileReplicator implements Closeable { private List hfiles; public Copier(FileSystem sourceFs, final Path stagingDir, final List hfiles) - throws IOException { + throws IOException { this.sourceFs = sourceFs; this.stagingDir = stagingDir; this.hfiles = hfiles; @@ -374,8 +373,7 @@ public class HFileReplicator implements Closeable { // source will retry to replicate these data. } catch (FileNotFoundException e) { LOG.info("Failed to copy hfile from " + sourceHFilePath + " to " + localHFilePath - + ". Trying to copy from hfile archive directory.", - e); + + ". Trying to copy from hfile archive directory.", e); sourceHFilePath = new Path(sourceHFileArchiveDirPath, hfiles.get(i)); try { @@ -384,8 +382,7 @@ public class HFileReplicator implements Closeable { // This will mean that the hfile does not exists any where in source cluster FS. So we // cannot do anything here just log and continue. LOG.debug("Failed to copy hfile from " + sourceHFilePath + " to " + localHFilePath - + ". Hence ignoring this hfile from replication..", - e1); + + ". Hence ignoring this hfile from replication..", e1); continue; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java index 8f07c08c4eb..48964481ce7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,12 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; /** * This class is for maintaining the various replication statistics for a sink and publishing them @@ -35,12 +34,11 @@ public class MetricsSink { public MetricsSink() { mss = - CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class).getSink(); + CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class).getSink(); } /** * Set the age of the last applied operation - * * @param timestamp The timestamp of the last operation applied. * @return the age that was set */ @@ -55,8 +53,8 @@ public class MetricsSink { } /** - * Refreshing the age makes sure the value returned is the actual one and - * not the one set a replication time + * Refreshing the age makes sure the value returned is the actual one and not the one set a + * replication time * @return refreshed age */ public long refreshAgeOfLastAppliedOp() { @@ -64,9 +62,7 @@ public class MetricsSink { } /** - * Convience method to change metrics when a batch of operations are applied. - * - * @param batchSize + * Convience method to change metrics when a batch of operations are applied. n */ public void applyBatch(long batchSize) { mss.incrAppliedBatches(1); @@ -75,7 +71,6 @@ public class MetricsSink { /** * Convience method to change metrics when a batch of operations are applied. - * * @param batchSize total number of mutations that are applied/replicated * @param hfileSize total number of hfiles that are applied/replicated */ @@ -87,21 +82,19 @@ public class MetricsSink { /** * Convenience method to update metrics when batch of operations has failed. */ - public void incrementFailedBatches(){ + public void incrementFailedBatches() { mss.incrFailedBatches(); } /** - * Get the count of the failed bathes - * @return failedBatches + * Get the count of the failed bathes n */ protected long getFailedBatches() { return mss.getFailedBatches(); } /** - * Get the Age of Last Applied Op - * @return ageOfLastAppliedOp + * Get the Age of Last Applied Op n */ public long getAgeOfLastAppliedOp() { return mss.getLastAppliedOpAge(); @@ -129,16 +122,14 @@ public class MetricsSink { } /** - * Gets the time stamp from when the Sink was initialized. - * @return startTimestamp + * Gets the time stamp from when the Sink was initialized. n */ public long getStartTimestamp() { return this.startTimestamp; } /** - * Gets the total number of OPs delivered to this sink. - * @return totalAplliedOps + * Gets the total number of OPs delivered to this sink. n */ public long getAppliedOps() { return this.mss.getSinkAppliedOps(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java index 3510254959b..463e618752e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import java.util.HashMap; @@ -53,14 +52,12 @@ public class MetricsSource implements BaseSource { /** * Constructor used to register the metrics - * * @param id Name of the source this class is monitoring */ public MetricsSource(String id) { this.id = id; - singleSourceSource = - CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class) - .getSource(id); + singleSourceSource = CompatibilitySingletonFactory + .getInstance(MetricsReplicationSourceFactory.class).getSource(id); globalSourceSource = CompatibilitySingletonFactory .getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); singleSourceSourceByTable = new HashMap<>(); @@ -68,13 +65,13 @@ public class MetricsSource implements BaseSource { /** * Constructor for injecting custom (or test) MetricsReplicationSourceSources - * @param id Name of the source this class is monitoring + * @param id Name of the source this class is monitoring * @param singleSourceSource Class to monitor id-scoped metrics * @param globalSourceSource Class to monitor global-scoped metrics */ public MetricsSource(String id, MetricsReplicationSourceSource singleSourceSource, - MetricsReplicationGlobalSourceSource globalSourceSource, - Map singleSourceSourceByTable) { + MetricsReplicationGlobalSourceSource globalSourceSource, + Map singleSourceSourceByTable) { this.id = id; this.singleSourceSource = singleSourceSource; this.globalSourceSource = globalSourceSource; @@ -84,7 +81,7 @@ public class MetricsSource implements BaseSource { /** * Set the age of the last edit that was shipped * @param timestamp target write time of the edit - * @param walGroup which group we are setting + * @param walGroup which group we are setting */ public void setAgeOfLastShippedOp(long timestamp, String walGroup) { long age = EnvironmentEdgeManager.currentTime() - timestamp; @@ -96,7 +93,6 @@ public class MetricsSource implements BaseSource { /** * Update the table level replication metrics per table - * * @param walEntries List of pairs of WAL entry and it's size */ public void updateTableLevelMetrics(List> walEntries) { @@ -109,9 +105,8 @@ public class MetricsSource implements BaseSource { // get the replication metrics source for table at the run time MetricsReplicationTableSource tableSource = this.getSingleSourceSourceByTable() - .computeIfAbsent(tableName, - t -> CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class) - .getTableSource(t)); + .computeIfAbsent(tableName, t -> CompatibilitySingletonFactory + .getInstance(MetricsReplicationSourceFactory.class).getTableSource(t)); tableSource.setLastShippedAge(age); tableSource.incrShippedBytes(entrySize); } @@ -124,16 +119,15 @@ public class MetricsSource implements BaseSource { */ public void setAgeOfLastShippedOpByTable(long timestamp, String tableName) { long age = EnvironmentEdgeManager.currentTime() - timestamp; - this.getSingleSourceSourceByTable().computeIfAbsent( - tableName, t -> CompatibilitySingletonFactory - .getInstance(MetricsReplicationSourceFactory.class).getTableSource(t)) - .setLastShippedAge(age); + this.getSingleSourceSourceByTable() + .computeIfAbsent(tableName, t -> CompatibilitySingletonFactory + .getInstance(MetricsReplicationSourceFactory.class).getTableSource(t)) + .setLastShippedAge(age); } /** * get age of last shipped op of given wal group. If the walGroup is null, return 0 - * @param walGroup which group we are getting - * @return age + * @param walGroup which group we are getting n */ public long getAgeOfLastShippedOp(String walGroup) { return this.ageOfLastShippedOp.get(walGroup) == null ? 0 : ageOfLastShippedOp.get(walGroup); @@ -186,7 +180,6 @@ public class MetricsSource implements BaseSource { /** * Add on the the number of log edits read - * * @param delta the number of log edits read. */ private void incrLogEditsRead(long delta) { @@ -201,7 +194,6 @@ public class MetricsSource implements BaseSource { /** * Add on the number of log edits filtered - * * @param delta the number filtered. */ public void incrLogEditsFiltered(long delta) { @@ -216,7 +208,6 @@ public class MetricsSource implements BaseSource { /** * Convience method to apply changes to metrics do to shipping a batch of logs. - * * @param batchSize the size of the batch that was shipped to sinks. */ public void shipBatch(long batchSize, int sizeInBytes) { @@ -233,17 +224,16 @@ public class MetricsSource implements BaseSource { /** * Convenience method to update metrics when batch of operations has failed. */ - public void incrementFailedBatches(){ + public void incrementFailedBatches() { singleSourceSource.incrFailedBatches(); globalSourceSource.incrFailedBatches(); } - /** * Gets the number of edits not eligible for replication this source queue logs so far. * @return logEditsFiltered non-replicable edits filtered from this queue logs. */ - public long getEditsFiltered(){ + public long getEditsFiltered() { return this.singleSourceSource.getEditsFiltered(); } @@ -251,7 +241,7 @@ public class MetricsSource implements BaseSource { * Gets the number of edits eligible for replication read from this source queue logs so far. * @return replicableEdits total number of replicable edits read from this queue logs. */ - public long getReplicableEdits(){ + public long getReplicableEdits() { return this.singleSourceSource.getWALEditsRead() - this.singleSourceSource.getEditsFiltered(); } @@ -265,9 +255,8 @@ public class MetricsSource implements BaseSource { /** * Convience method to apply changes to metrics do to shipping a batch of logs. - * * @param batchSize the size of the batch that was shipped to sinks. - * @param hfiles total number of hfiles shipped to sinks. + * @param hfiles total number of hfiles shipped to sinks. */ public void shipBatch(long batchSize, int sizeInBytes, long hfiles) { shipBatch(batchSize, sizeInBytes); @@ -294,25 +283,22 @@ public class MetricsSource implements BaseSource { } /** - * Get AgeOfLastShippedOp - * @return AgeOfLastShippedOp + * Get AgeOfLastShippedOp n */ public Long getAgeOfLastShippedOp() { return singleSourceSource.getLastShippedAge(); } /** - * Get the sizeOfLogQueue - * @return sizeOfLogQueue + * Get the sizeOfLogQueue n */ public int getSizeOfLogQueue() { return singleSourceSource.getSizeOfLogQueue(); } /** - * Get the timeStampsOfLastShippedOp, if there are multiple groups, return the latest one - * @return lastTimestampForAge - * @deprecated Since 2.0.0. Removed in 3.0.0. + * Get the timeStampsOfLastShippedOp, if there are multiple groups, return the latest one n + * * @deprecated Since 2.0.0. Removed in 3.0.0. * @see #getTimestampOfLastShippedOp() */ @Deprecated @@ -320,18 +306,15 @@ public class MetricsSource implements BaseSource { return getTimestampOfLastShippedOp(); } - /** - * Get the value of uncleanlyClosedWAL counter - * @return uncleanlyClosedWAL + * Get the value of uncleanlyClosedWAL counter n */ public long getUncleanlyClosedWALs() { return singleSourceSource.getUncleanlyClosedWALs(); } /** - * Get the timestampsOfLastShippedOp, if there are multiple groups, return the latest one - * @return lastTimestampForAge + * Get the timestampsOfLastShippedOp, if there are multiple groups, return the latest one n */ public long getTimestampOfLastShippedOp() { long lastTimestamp = 0L; @@ -352,9 +335,9 @@ public class MetricsSource implements BaseSource { } /** - * TimeStamp of next edit targeted for replication. Used for calculating lag, - * as if this timestamp is greater than timestamp of last shipped, it means there's - * at least one edit pending replication. + * TimeStamp of next edit targeted for replication. Used for calculating lag, as if this timestamp + * is greater than timestamp of last shipped, it means there's at least one edit pending + * replication. * @param timeStampNextToReplicate timestamp of next edit in the queue that should be replicated. */ public void setTimeStampNextToReplicate(long timeStampNextToReplicate) { @@ -362,9 +345,9 @@ public class MetricsSource implements BaseSource { } public long getReplicationDelay() { - if(getTimestampOfLastShippedOp()>=timeStampNextToReplicate){ + if (getTimestampOfLastShippedOp() >= timeStampNextToReplicate) { return 0; - }else{ + } else { return EnvironmentEdgeManager.currentTime() - timeStampNextToReplicate; } } @@ -378,8 +361,7 @@ public class MetricsSource implements BaseSource { } /** - * Get the slave peer ID - * @return peerID + * Get the slave peer ID n */ public String getPeerID() { return id; @@ -440,8 +422,8 @@ public class MetricsSource implements BaseSource { } /* - Sets the age of oldest log file just for source. - */ + * Sets the age of oldest log file just for source. + */ public void setOldestWalAge(long age) { singleSourceSource.setOldestWalAge(age); } @@ -525,8 +507,7 @@ public class MetricsSource implements BaseSource { } /** - * Returns the amount of memory in bytes used in this RegionServer by edits pending replication. - * @return + * Returns the amount of memory in bytes used in this RegionServer by edits pending replication. n */ public long getWALReaderEditsBufferUsage() { return globalSourceSource.getWALReaderEditsBufferBytes(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/NoopReplicationQueueStorage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/NoopReplicationQueueStorage.java index 4ad41fc6983..094851f059f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/NoopReplicationQueueStorage.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/NoopReplicationQueueStorage.java @@ -34,22 +34,27 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Private class NoopReplicationQueueStorage implements ReplicationQueueStorage { - NoopReplicationQueueStorage() {} + NoopReplicationQueueStorage() { + } @Override - public void removeQueue(ServerName serverName, String queueId) throws ReplicationException {} + public void removeQueue(ServerName serverName, String queueId) throws ReplicationException { + } @Override public void addWAL(ServerName serverName, String queueId, String fileName) - throws ReplicationException {} + throws ReplicationException { + } @Override public void removeWAL(ServerName serverName, String queueId, String fileName) - throws ReplicationException { } + throws ReplicationException { + } @Override public void setWALPosition(ServerName serverName, String queueId, String fileName, long position, - Map lastSeqIds) throws ReplicationException {} + Map lastSeqIds) throws ReplicationException { + } @Override public long getLastSequenceId(String encodedRegionName, String peerId) @@ -59,14 +64,17 @@ class NoopReplicationQueueStorage implements ReplicationQueueStorage { @Override public void setLastSequenceIds(String peerId, Map lastSeqIds) - throws ReplicationException {} + throws ReplicationException { + } @Override - public void removeLastSequenceIds(String peerId) throws ReplicationException {} + public void removeLastSequenceIds(String peerId) throws ReplicationException { + } @Override public void removeLastSequenceIds(String peerId, List encodedRegionNames) - throws ReplicationException {} + throws ReplicationException { + } @Override public long getWALPosition(ServerName serverName, String queueId, String fileName) @@ -76,7 +84,7 @@ class NoopReplicationQueueStorage implements ReplicationQueueStorage { @Override public List getWALsInQueue(ServerName serverName, String queueId) - throws ReplicationException { + throws ReplicationException { return Collections.EMPTY_LIST; } @@ -92,8 +100,8 @@ class NoopReplicationQueueStorage implements ReplicationQueueStorage { } @Override - public void removeReplicatorIfQueueIsEmpty(ServerName serverName) - throws ReplicationException {} + public void removeReplicatorIfQueueIsEmpty(ServerName serverName) throws ReplicationException { + } @Override public List getListOfReplicators() throws ReplicationException { @@ -106,17 +114,21 @@ class NoopReplicationQueueStorage implements ReplicationQueueStorage { } @Override - public void addPeerToHFileRefs(String peerId) throws ReplicationException {} + public void addPeerToHFileRefs(String peerId) throws ReplicationException { + } @Override - public void removePeerFromHFileRefs(String peerId) throws ReplicationException {} + public void removePeerFromHFileRefs(String peerId) throws ReplicationException { + } @Override public void addHFileRefs(String peerId, List> pairs) - throws ReplicationException {} + throws ReplicationException { + } @Override - public void removeHFileRefs(String peerId, List files) throws ReplicationException {} + public void removeHFileRefs(String peerId, List files) throws ReplicationException { + } @Override public List getAllPeersFromHFileRefsQueue() throws ReplicationException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandler.java index a96e86067ea..c4e57367173 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import java.io.IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java index f12a6542222..429276806f1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -118,9 +118,11 @@ public class PeerProcedureHandlerImpl implements PeerProcedureHandler { // disable it first and then enable it. PeerState newState = peers.refreshPeerState(peerId); // RS need to start work with the new replication config change - if (!ReplicationUtils.isNamespacesAndTableCFsEqual(oldConfig, newConfig) || - oldConfig.isSerial() != newConfig.isSerial() || - (oldState.equals(PeerState.ENABLED) && newState.equals(PeerState.DISABLED))) { + if ( + !ReplicationUtils.isNamespacesAndTableCFsEqual(oldConfig, newConfig) + || oldConfig.isSerial() != newConfig.isSerial() + || (oldState.equals(PeerState.ENABLED) && newState.equals(PeerState.DISABLED)) + ) { replicationSourceManager.refreshSources(peerId); } success = true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java index 516ccf14929..b615eed7d53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java @@ -37,8 +37,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Class that handles the recovered source of a replication stream, which is transfered from - * another dead region server. This will be closed when all logs are pushed to peer cluster. + * Class that handles the recovered source of a replication stream, which is transfered from another + * dead region server. This will be closed when all logs are pushed to peer cluster. */ @InterfaceAudience.Private public class RecoveredReplicationSource extends ReplicationSource { @@ -49,9 +49,9 @@ public class RecoveredReplicationSource extends ReplicationSource { @Override public void init(Configuration conf, FileSystem fs, ReplicationSourceManager manager, - ReplicationQueueStorage queueStorage, ReplicationPeer replicationPeer, Server server, - String peerClusterZnode, UUID clusterId, WALFileLengthProvider walFileLengthProvider, - MetricsSource metrics) throws IOException { + ReplicationQueueStorage queueStorage, ReplicationPeer replicationPeer, Server server, + String peerClusterZnode, UUID clusterId, WALFileLengthProvider walFileLengthProvider, + MetricsSource metrics) throws IOException { super.init(conf, fs, manager, queueStorage, replicationPeer, server, peerClusterZnode, clusterId, walFileLengthProvider, metrics); this.actualPeerId = this.replicationQueueInfo.getPeerId(); @@ -87,11 +87,10 @@ public class RecoveredReplicationSource extends ReplicationSource { LOG.info("NB dead servers : " + deadRegionServers.size()); final Path walDir = CommonFSUtils.getWALRootDir(conf); for (ServerName curDeadServerName : deadRegionServers) { - final Path deadRsDirectory = - new Path(walDir, AbstractFSWALProvider.getWALDirectoryName(curDeadServerName - .getServerName())); - Path[] locs = new Path[] { new Path(deadRsDirectory, path.getName()), new Path( - deadRsDirectory.suffix(AbstractFSWALProvider.SPLITTING_EXT), path.getName()) }; + final Path deadRsDirectory = new Path(walDir, + AbstractFSWALProvider.getWALDirectoryName(curDeadServerName.getServerName())); + Path[] locs = new Path[] { new Path(deadRsDirectory, path.getName()), + new Path(deadRsDirectory.suffix(AbstractFSWALProvider.SPLITTING_EXT), path.getName()) }; for (Path possibleLogLocation : locs) { LOG.info("Possible location " + possibleLogLocation.toUri().toString()); if (manager.getFs().exists(possibleLogLocation)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java index d9371c81394..471ca68f659 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,19 +27,19 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Used by a {@link RecoveredReplicationSource}. + * Used by a {@link RecoveredReplicationSource}. */ @InterfaceAudience.Private public class RecoveredReplicationSourceShipper extends ReplicationSourceShipper { private static final Logger LOG = - LoggerFactory.getLogger(RecoveredReplicationSourceShipper.class); + LoggerFactory.getLogger(RecoveredReplicationSourceShipper.class); protected final RecoveredReplicationSource source; private final ReplicationQueueStorage replicationQueues; public RecoveredReplicationSourceShipper(Configuration conf, String walGroupId, - ReplicationSourceLogQueue logQueue, RecoveredReplicationSource source, - ReplicationQueueStorage queueStorage) { + ReplicationSourceLogQueue logQueue, RecoveredReplicationSource source, + ReplicationQueueStorage queueStorage) { super(conf, walGroupId, logQueue, source); this.source = source; this.replicationQueues = queueStorage; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java index 2beb9f56a0b..15dc5ca0725 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java index d5ebbe999d2..21a2d51326a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import java.io.IOException; @@ -65,21 +64,20 @@ import org.apache.hadoop.hbase.wal.OutputSink; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALSplitter.PipelineController; import org.apache.hadoop.util.StringUtils; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.cache.Cache; import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; /** - * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} endpoint - * which receives the WAL edits from the WAL, and sends the edits to replicas - * of regions. + * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} endpoint which receives the WAL + * edits from the WAL, and sends the edits to replicas of regions. */ @InterfaceAudience.Private public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { @@ -87,8 +85,8 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { private static final Logger LOG = LoggerFactory.getLogger(RegionReplicaReplicationEndpoint.class); // Can be configured differently than hbase.client.retries.number - private static String CLIENT_RETRIES_NUMBER - = "hbase.region.replica.replication.client.retries.number"; + private static String CLIENT_RETRIES_NUMBER = + "hbase.region.replica.replication.client.retries.number"; private Configuration conf; private ClusterConnection connection; @@ -128,11 +126,10 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { int numRetries = conf.getInt(CLIENT_RETRIES_NUMBER, defaultNumRetries); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, numRetries); - this.numWriterThreads = this.conf.getInt( - "hbase.region.replica.replication.writer.threads", 3); + this.numWriterThreads = this.conf.getInt("hbase.region.replica.replication.writer.threads", 3); controller = new PipelineController(); entryBuffers = new EntryBuffers(controller, - this.conf.getLong("hbase.region.replica.replication.buffersize", 128 * 1024 * 1024)); + this.conf.getLong("hbase.region.replica.replication.buffersize", 128 * 1024 * 1024)); // use the regular RPC timeout for replica replication RPC's this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, @@ -186,8 +183,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { } /** - * Returns a Thread pool for the RPC's to region replicas. Similar to - * Connection's thread pool. + * Returns a Thread pool for the RPC's to region replicas. Similar to Connection's thread pool. */ private ExecutorService getDefaultThreadPool(Configuration conf) { int maxThreads = conf.getInt("hbase.region.replica.replication.threads.max", 256); @@ -196,9 +192,8 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { } long keepAliveTime = conf.getLong("hbase.region.replica.replication.threads.keepalivetime", 60); LinkedBlockingQueue workQueue = - new LinkedBlockingQueue<>(maxThreads * - conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, - HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); + new LinkedBlockingQueue<>(maxThreads * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, + HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); ThreadPoolExecutor tpe = new ThreadPoolExecutor(maxThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, new ThreadFactoryBuilder() @@ -210,44 +205,40 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { @Override public boolean replicate(ReplicateContext replicateContext) { - /* A note on batching in RegionReplicaReplicationEndpoint (RRRE): - * - * RRRE relies on batching from two different mechanisms. The first is the batching from - * ReplicationSource since RRRE is a ReplicationEndpoint driven by RS. RS reads from a single - * WAL file filling up a buffer of heap size "replication.source.size.capacity"(64MB) or at most - * "replication.source.nb.capacity" entries or until it sees the end of file (in live tailing). - * Then RS passes all the buffered edits in this replicate() call context. RRRE puts the edits - * to the WALSplitter.EntryBuffers which is a blocking buffer space of up to - * "hbase.region.replica.replication.buffersize" (128MB) in size. This buffer splits the edits - * based on regions. - * - * There are "hbase.region.replica.replication.writer.threads"(default 3) writer threads which - * pick largest per-region buffer and send it to the SinkWriter (see RegionReplicaOutputSink). - * The SinkWriter in this case will send the wal edits to all secondary region replicas in - * parallel via a retrying rpc call. EntryBuffers guarantees that while a buffer is - * being written to the sink, another buffer for the same region will not be made available to - * writers ensuring regions edits are not replayed out of order. - * - * The replicate() call won't return until all the buffers are sent and ack'd by the sinks so - * that the replication can assume all edits are persisted. We may be able to do a better - * pipelining between the replication thread and output sinks later if it becomes a bottleneck. + /* + * A note on batching in RegionReplicaReplicationEndpoint (RRRE): RRRE relies on batching from + * two different mechanisms. The first is the batching from ReplicationSource since RRRE is a + * ReplicationEndpoint driven by RS. RS reads from a single WAL file filling up a buffer of heap + * size "replication.source.size.capacity"(64MB) or at most "replication.source.nb.capacity" + * entries or until it sees the end of file (in live tailing). Then RS passes all the buffered + * edits in this replicate() call context. RRRE puts the edits to the WALSplitter.EntryBuffers + * which is a blocking buffer space of up to "hbase.region.replica.replication.buffersize" + * (128MB) in size. This buffer splits the edits based on regions. There are + * "hbase.region.replica.replication.writer.threads"(default 3) writer threads which pick + * largest per-region buffer and send it to the SinkWriter (see RegionReplicaOutputSink). The + * SinkWriter in this case will send the wal edits to all secondary region replicas in parallel + * via a retrying rpc call. EntryBuffers guarantees that while a buffer is being written to the + * sink, another buffer for the same region will not be made available to writers ensuring + * regions edits are not replayed out of order. The replicate() call won't return until all the + * buffers are sent and ack'd by the sinks so that the replication can assume all edits are + * persisted. We may be able to do a better pipelining between the replication thread and output + * sinks later if it becomes a bottleneck. */ while (this.isRunning()) { try { - for (Entry entry: replicateContext.getEntries()) { + for (Entry entry : replicateContext.getEntries()) { entryBuffers.appendEntry(entry); } outputSink.flush(); // make sure everything is flushed - ctx.getMetrics().incrLogEditsFiltered( - outputSink.getSkippedEditsCounter().getAndSet(0)); + ctx.getMetrics().incrLogEditsFiltered(outputSink.getSkippedEditsCounter().getAndSet(0)); return true; } catch (InterruptedException e) { Thread.currentThread().interrupt(); return false; } catch (IOException e) { - LOG.warn("Received IOException while trying to replicate" - + StringUtils.stringifyException(e)); + LOG.warn( + "Received IOException while trying to replicate" + StringUtils.stringifyException(e)); outputSink.restartWriterThreadsIfNeeded(); } } @@ -272,11 +263,11 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { private final Cache memstoreReplicationEnabled; public RegionReplicaOutputSink(PipelineController controller, TableDescriptors tableDescriptors, - EntryBuffers entryBuffers, ClusterConnection connection, ExecutorService pool, - int numWriters, int operationTimeout) { + EntryBuffers entryBuffers, ClusterConnection connection, ExecutorService pool, int numWriters, + int operationTimeout) { super(controller, entryBuffers, numWriters); this.sinkWriter = - new RegionReplicaSinkWriter(this, connection, pool, operationTimeout, tableDescriptors); + new RegionReplicaSinkWriter(this, connection, pool, operationTimeout, tableDescriptors); this.tableDescriptors = tableDescriptors; // A cache for the table "memstore replication enabled" flag. @@ -287,9 +278,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { .getInt("hbase.region.replica.replication.cache.memstoreReplicationEnabled.expiryMs", 5000); this.memstoreReplicationEnabled = CacheBuilder.newBuilder() .expireAfterWrite(memstoreReplicationEnabledCacheExpiryMs, TimeUnit.MILLISECONDS) - .initialCapacity(10) - .maximumSize(1000) - .build(); + .initialCapacity(10).maximumSize(1000).build(); } @Override @@ -342,12 +331,12 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { } /** - * returns true if the specified entry must be replicated. - * We should always replicate meta operations (e.g. flush) - * and use the user HTD flag to decide whether or not replicate the memstore. + * returns true if the specified entry must be replicated. We should always replicate meta + * operations (e.g. flush) and use the user HTD flag to decide whether or not replicate the + * memstore. */ private boolean requiresReplication(final TableName tableName, final List entries) - throws IOException { + throws IOException { // unit-tests may not the TableDescriptors, bypass the check and always replicate if (tableDescriptors == null) return true; @@ -397,12 +386,12 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { TableDescriptors tableDescriptors; public RegionReplicaSinkWriter(RegionReplicaOutputSink sink, ClusterConnection connection, - ExecutorService pool, int operationTimeout, TableDescriptors tableDescriptors) { + ExecutorService pool, int operationTimeout, TableDescriptors tableDescriptors) { this.sink = sink; this.connection = connection; this.operationTimeout = operationTimeout; - this.rpcRetryingCallerFactory - = RpcRetryingCallerFactory.instantiate(connection.getConfiguration()); + this.rpcRetryingCallerFactory = + RpcRetryingCallerFactory.instantiate(connection.getConfiguration()); this.rpcControllerFactory = RpcControllerFactory.instantiate(connection.getConfiguration()); this.pool = pool; this.tableDescriptors = tableDescriptors; @@ -413,14 +402,12 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { // table is created again with the same name, we might miss to replicate for that amount of // time. But this cache prevents overloading meta requests for every edit from a deleted file. disabledAndDroppedTables = CacheBuilder.newBuilder() - .expireAfterWrite(nonExistentTableCacheExpiryMs, TimeUnit.MILLISECONDS) - .initialCapacity(10) - .maximumSize(1000) - .build(); + .expireAfterWrite(nonExistentTableCacheExpiryMs, TimeUnit.MILLISECONDS).initialCapacity(10) + .maximumSize(1000).build(); } public void append(TableName tableName, byte[] encodedRegionName, byte[] row, - List entries) throws IOException { + List entries) throws IOException { if (disabledAndDroppedTables.getIfPresent(tableName) != null) { if (LOG.isTraceEnabled()) { @@ -442,11 +429,11 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { while (true) { // get the replicas of the primary region try { - locations = RegionReplicaReplayCallable - .getRegionLocations(connection, tableName, row, useCache, 0); + locations = + RegionReplicaReplayCallable.getRegionLocations(connection, tableName, row, useCache, 0); if (locations == null) { - throw new HBaseIOException("Cannot locate locations for " - + tableName + ", row:" + Bytes.toStringBinary(row)); + throw new HBaseIOException( + "Cannot locate locations for " + tableName + ", row:" + Bytes.toStringBinary(row)); } // Replicas can take a while to come online. The cache may have only the primary. If we // keep going to the cache, we will not learn of the replicas and their locations after @@ -477,8 +464,9 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { // check whether we should still replay this entry. If the regions are changed, or the // entry is not coming from the primary region, filter it out. HRegionLocation primaryLocation = locations.getDefaultRegionLocation(); - if (!Bytes.equals(primaryLocation.getRegionInfo().getEncodedNameAsBytes(), - encodedRegionName)) { + if ( + !Bytes.equals(primaryLocation.getRegionInfo().getEncodedNameAsBytes(), encodedRegionName) + ) { if (useCache) { useCache = false; continue; // this will retry location lookup @@ -510,15 +498,15 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { HRegionLocation location = locations.getRegionLocation(replicaId); if (!RegionReplicaUtil.isDefaultReplica(replicaId)) { RegionInfo regionInfo = location == null - ? RegionReplicaUtil.getRegionInfoForReplica( - locations.getDefaultRegionLocation().getRegionInfo(), replicaId) - : location.getRegionInfo(); - RegionReplicaReplayCallable callable = new RegionReplicaReplayCallable(connection, - rpcControllerFactory, tableName, location, regionInfo, row, entries, - sink.getSkippedEditsCounter()); - Future task = pool.submit( - new RetryingRpcCallable<>(rpcRetryingCallerFactory, callable, operationTimeout)); - tasks.add(task); + ? RegionReplicaUtil.getRegionInfoForReplica( + locations.getDefaultRegionLocation().getRegionInfo(), replicaId) + : location.getRegionInfo(); + RegionReplicaReplayCallable callable = + new RegionReplicaReplayCallable(connection, rpcControllerFactory, tableName, location, + regionInfo, row, entries, sink.getSkippedEditsCounter()); + Future task = pool.submit( + new RetryingRpcCallable<>(rpcRetryingCallerFactory, callable, operationTimeout)); + tasks.add(task); } } @@ -538,23 +526,23 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { // RPC. So instead we start the replay RPC with retries and check whether the table is // dropped or disabled which might cause SocketTimeoutException, or // RetriesExhaustedException or similar if we get IOE. - if (cause instanceof TableNotFoundException - || connection.isTableDisabled(tableName)) { + if (cause instanceof TableNotFoundException || connection.isTableDisabled(tableName)) { disabledAndDroppedTables.put(tableName, Boolean.TRUE); // put to cache for later. canBeSkipped = true; } else if (tableDescriptors != null) { TableDescriptor tableDescriptor = tableDescriptors.get(tableName); - if (tableDescriptor != null - //(replicaId + 1) as no task is added for primary replica for replication - && tableDescriptor.getRegionReplication() <= (replicaId + 1)) { + if ( + tableDescriptor != null + // (replicaId + 1) as no task is added for primary replica for replication + && tableDescriptor.getRegionReplication() <= (replicaId + 1) + ) { canBeSkipped = true; } } if (canBeSkipped) { if (LOG.isTraceEnabled()) { LOG.trace("Skipping " + entries.size() + " entries in table " + tableName - + " because received exception for dropped or disabled table", - cause); + + " because received exception for dropped or disabled table", cause); for (Entry entry : entries) { LOG.trace("Skipping : " + entry); } @@ -567,7 +555,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { } // otherwise rethrow - throw (IOException)cause; + throw (IOException) cause; } // unexpected exception throw new IOException(cause); @@ -580,15 +568,17 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { RpcRetryingCallerFactory factory; RetryingCallable callable; int timeout; + public RetryingRpcCallable(RpcRetryingCallerFactory factory, RetryingCallable callable, - int timeout) { + int timeout) { this.factory = factory; this.callable = callable; this.timeout = timeout; } + @Override public V call() throws Exception { - return factory.newCaller().callWithRetries(callable, timeout); + return factory. newCaller().callWithRetries(callable, timeout); } } @@ -596,16 +586,15 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { * Calls replay on the passed edits for the given set of entries belonging to the region. It skips * the entry if the region boundaries have changed or the region is gone. */ - static class RegionReplicaReplayCallable extends - RegionAdminServiceCallable { + static class RegionReplicaReplayCallable + extends RegionAdminServiceCallable { private final List entries; private final byte[] initialEncodedRegionName; private final AtomicLong skippedEntries; public RegionReplicaReplayCallable(ClusterConnection connection, - RpcControllerFactory rpcControllerFactory, TableName tableName, - HRegionLocation location, RegionInfo regionInfo, byte[] row,List entries, - AtomicLong skippedEntries) { + RpcControllerFactory rpcControllerFactory, TableName tableName, HRegionLocation location, + RegionInfo regionInfo, byte[] row, List entries, AtomicLong skippedEntries) { super(connection, rpcControllerFactory, location, tableName, row, regionInfo.getReplicaId()); this.entries = entries; this.skippedEntries = skippedEntries; @@ -618,8 +607,9 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { // entry is not coming form the primary region, filter it out because we do not need it. // Regions can change because of (1) region split (2) region merge (3) table recreated boolean skip = false; - if (!Bytes.equals(location.getRegionInfo().getEncodedNameAsBytes(), - initialEncodedRegionName)) { + if ( + !Bytes.equals(location.getRegionInfo().getEncodedNameAsBytes(), initialEncodedRegionName) + ) { skip = true; } if (!this.entries.isEmpty() && !skip) { @@ -628,8 +618,8 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { // set the region name for the target region replica Pair p = - ReplicationProtbufUtil.buildReplicateWALEntryRequest(entriesArray, location - .getRegionInfo().getEncodedNameAsBytes(), null, null, null); + ReplicationProtbufUtil.buildReplicateWALEntryRequest(entriesArray, + location.getRegionInfo().getEncodedNameAsBytes(), null, null, null); controller.setCellScanner(p.getSecond()); return stub.replay(controller, p.getFirst()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index 842955ec9a1..9859d21bdb6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -51,13 +51,13 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry; + /** * Gateway to Replication. Used by {@link org.apache.hadoop.hbase.regionserver.HRegionServer}. */ @InterfaceAudience.Private public class Replication implements ReplicationSourceService, ReplicationSinkService { - private static final Logger LOG = - LoggerFactory.getLogger(Replication.class); + private static final Logger LOG = LoggerFactory.getLogger(Replication.class); private boolean isReplicationForBulkLoadDataEnabled; private ReplicationSourceManager replicationManager; private ReplicationQueueStorage queueStorage; @@ -83,7 +83,7 @@ public class Replication implements ReplicationSourceService, ReplicationSinkSer @Override public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir, - WALFactory walFactory) throws IOException { + WALFactory walFactory) throws IOException { this.server = server; this.conf = this.server.getConfiguration(); this.isReplicationForBulkLoadDataEnabled = @@ -91,22 +91,23 @@ public class Replication implements ReplicationSourceService, ReplicationSinkSer this.scheduleThreadPool = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder() .setNameFormat(server.getServerName().toShortString() + "Replication Statistics #%d") - .setDaemon(true) - .build()); + .setDaemon(true).build()); if (this.isReplicationForBulkLoadDataEnabled) { - if (conf.get(HConstants.REPLICATION_CLUSTER_ID) == null - || conf.get(HConstants.REPLICATION_CLUSTER_ID).isEmpty()) { - throw new IllegalArgumentException(HConstants.REPLICATION_CLUSTER_ID - + " cannot be null/empty when " + HConstants.REPLICATION_BULKLOAD_ENABLE_KEY - + " is set to true."); + if ( + conf.get(HConstants.REPLICATION_CLUSTER_ID) == null + || conf.get(HConstants.REPLICATION_CLUSTER_ID).isEmpty() + ) { + throw new IllegalArgumentException( + HConstants.REPLICATION_CLUSTER_ID + " cannot be null/empty when " + + HConstants.REPLICATION_BULKLOAD_ENABLE_KEY + " is set to true."); } } try { this.queueStorage = - ReplicationStorageFactory.getReplicationQueueStorage(server.getZooKeeper(), conf); + ReplicationStorageFactory.getReplicationQueueStorage(server.getZooKeeper(), conf); this.replicationPeers = - ReplicationFactory.getReplicationPeers(server.getZooKeeper(), this.conf); + ReplicationFactory.getReplicationPeers(server.getZooKeeper(), this.conf); this.replicationPeers.init(); } catch (Exception e) { throw new IOException("Failed replication handler create", e); @@ -118,18 +119,17 @@ public class Replication implements ReplicationSourceService, ReplicationSinkSer throw new IOException("Could not read cluster id", ke); } this.globalMetricsSource = CompatibilitySingletonFactory - .getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); + .getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); this.replicationManager = new ReplicationSourceManager(queueStorage, replicationPeers, conf, this.server, fs, logDir, oldLogDir, clusterId, walFactory, globalMetricsSource); // Get the user-space WAL provider - WALProvider walProvider = walFactory != null? walFactory.getWALProvider(): null; + WALProvider walProvider = walFactory != null ? walFactory.getWALProvider() : null; if (walProvider != null) { walProvider .addWALActionsListener(new ReplicationSourceWALActionListener(conf, replicationManager)); } - this.statsThreadPeriod = - this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); - LOG.debug("Replication stats-in-log period={} seconds", this.statsThreadPeriod); + this.statsThreadPeriod = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); + LOG.debug("Replication stats-in-log period={} seconds", this.statsThreadPeriod); this.replicationLoad = new ReplicationLoad(); this.peerProcedureHandler = new PeerProcedureHandlerImpl(replicationManager); @@ -161,28 +161,27 @@ public class Replication implements ReplicationSourceService, ReplicationSinkSer /** * Carry on the list of log entries down to the sink - * @param entries list of entries to replicate - * @param cells The data -- the cells -- that entries describes (the entries do not - * contain the Cells we are replicating; they are passed here on the side in this - * CellScanner). - * @param replicationClusterId Id which will uniquely identify source cluster FS client - * configurations in the replication configuration directory + * @param entries list of entries to replicate + * @param cells The data -- the cells -- that entries describes + * (the entries do not contain the Cells we are replicating; + * they are passed here on the side in this CellScanner). + * @param replicationClusterId Id which will uniquely identify source cluster FS client + * configurations in the replication configuration directory * @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace - * directory required for replicating hfiles - * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory - * @throws IOException + * directory required for replicating hfiles + * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory + * n */ @Override public void replicateLogEntries(List entries, CellScanner cells, - String replicationClusterId, String sourceBaseNamespaceDirPath, - String sourceHFileArchiveDirPath) throws IOException { + String replicationClusterId, String sourceBaseNamespaceDirPath, + String sourceHFileArchiveDirPath) throws IOException { this.replicationSink.replicateEntries(entries, cells, replicationClusterId, sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath); } /** - * If replication is enabled and this cluster is a master, - * it starts + * If replication is enabled and this cluster is a master, it starts */ @Override public void startReplicationService() throws IOException { @@ -203,7 +202,7 @@ public class Replication implements ReplicationSourceService, ReplicationSinkSer } void addHFileRefsToQueue(TableName tableName, byte[] family, List> pairs) - throws IOException { + throws IOException { try { this.replicationManager.addHFileRefs(tableName, family, pairs); } catch (IOException e) { @@ -221,7 +220,7 @@ public class Replication implements ReplicationSourceService, ReplicationSinkSer private final ReplicationSourceManager replicationManager; public ReplicationStatisticsTask(ReplicationSink replicationSink, - ReplicationSourceManager replicationManager) { + ReplicationSourceManager replicationManager) { this.replicationManager = replicationManager; this.replicationSink = replicationSink; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java index e011e0af737..6cfe61f3943 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java @@ -7,23 +7,20 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.replication.regionserver; +import java.util.ArrayList; import java.util.Date; import java.util.List; -import java.util.ArrayList; - import org.apache.hadoop.hbase.util.Strings; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; @@ -48,17 +45,16 @@ public class ReplicationLoad { /** * buildReplicationLoad - * @param sources List of ReplicationSource instances for which metrics should be reported - * @param skMetrics + * @param sources List of ReplicationSource instances for which metrics should be reported n */ public void buildReplicationLoad(final List sources, - final MetricsSink skMetrics) { + final MetricsSink skMetrics) { this.sinkMetrics = skMetrics; // build the SinkLoad ClusterStatusProtos.ReplicationLoadSink.Builder rLoadSinkBuild = - ClusterStatusProtos.ReplicationLoadSink.newBuilder(); + ClusterStatusProtos.ReplicationLoadSink.newBuilder(); rLoadSinkBuild.setAgeOfLastAppliedOp(sinkMetrics.getAgeOfLastAppliedOp()); rLoadSinkBuild.setTimeStampsOfLastAppliedOp(sinkMetrics.getTimestampOfLastAppliedOp()); rLoadSinkBuild.setTimestampStarted(sinkMetrics.getStartTimestamp()); @@ -81,7 +77,7 @@ public class ReplicationLoad { long timeStampOfNextToReplicate = sm.getTimeStampNextToReplicate(); long replicationLag = sm.getReplicationDelay(); ClusterStatusProtos.ReplicationLoadSource.Builder rLoadSourceBuild = - ClusterStatusProtos.ReplicationLoadSource.newBuilder(); + ClusterStatusProtos.ReplicationLoadSource.newBuilder(); rLoadSourceBuild.setPeerID(peerId); rLoadSourceBuild.setAgeOfLastShippedOp(ageOfLastShippedOp); rLoadSourceBuild.setSizeOfLogQueue(sizeOfLogQueue); @@ -90,12 +86,12 @@ public class ReplicationLoad { rLoadSourceBuild.setTimeStampOfNextToReplicate(timeStampOfNextToReplicate); rLoadSourceBuild.setEditsRead(editsRead); rLoadSourceBuild.setOPsShipped(oPsShipped); - if (source instanceof ReplicationSource){ - ReplicationSource replSource = (ReplicationSource)source; + if (source instanceof ReplicationSource) { + ReplicationSource replSource = (ReplicationSource) source; rLoadSourceBuild.setRecovered(replSource.getReplicationQueueInfo().isQueueRecovered()); rLoadSourceBuild.setQueueId(replSource.getReplicationQueueInfo().getQueueId()); rLoadSourceBuild.setRunning(replSource.isWorkerRunning()); - rLoadSourceBuild.setEditsSinceRestart(timeStampOfNextToReplicate>0); + rLoadSourceBuild.setEditsSinceRestart(timeStampOfNextToReplicate > 0); } this.replicationLoadSourceEntries.add(rLoadSourceBuild.build()); @@ -109,15 +105,13 @@ public class ReplicationLoad { public String sourceToString() { StringBuilder sb = new StringBuilder(); - for (ClusterStatusProtos.ReplicationLoadSource rls : - this.replicationLoadSourceEntries) { + for (ClusterStatusProtos.ReplicationLoadSource rls : this.replicationLoadSourceEntries) { sb = Strings.appendKeyValue(sb, "\n PeerID", rls.getPeerID()); sb = Strings.appendKeyValue(sb, "AgeOfLastShippedOp", rls.getAgeOfLastShippedOp()); sb = Strings.appendKeyValue(sb, "SizeOfLogQueue", rls.getSizeOfLogQueue()); - sb = - Strings.appendKeyValue(sb, "TimestampsOfLastShippedOp", - (new Date(rls.getTimeStampOfLastShippedOp()).toString())); + sb = Strings.appendKeyValue(sb, "TimestampsOfLastShippedOp", + (new Date(rls.getTimeStampOfLastShippedOp()).toString())); sb = Strings.appendKeyValue(sb, "Replication Lag", rls.getReplicationLag()); } @@ -132,12 +126,10 @@ public class ReplicationLoad { if (this.replicationLoadSink == null) return null; StringBuilder sb = new StringBuilder(); - sb = - Strings.appendKeyValue(sb, "AgeOfLastAppliedOp", - this.replicationLoadSink.getAgeOfLastAppliedOp()); - sb = - Strings.appendKeyValue(sb, "TimestampsOfLastAppliedOp", - (new Date(this.replicationLoadSink.getTimeStampsOfLastAppliedOp()).toString())); + sb = Strings.appendKeyValue(sb, "AgeOfLastAppliedOp", + this.replicationLoadSink.getAgeOfLastAppliedOp()); + sb = Strings.appendKeyValue(sb, "TimestampsOfLastAppliedOp", + (new Date(this.replicationLoadSink.getTimeStampsOfLastAppliedOp()).toString())); return sb.toString(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationObserver.java index b7e437f4624..541021f4d5d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationObserver.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import java.io.IOException; import java.util.List; import java.util.Optional; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; @@ -53,24 +50,26 @@ public class ReplicationObserver implements RegionCoprocessor, RegionObserver { } @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", - justification="NPE should never happen; if it does it is a bigger issue") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", + justification = "NPE should never happen; if it does it is a bigger issue") public void preCommitStoreFile(final ObserverContext ctx, - final byte[] family, final List> pairs) throws IOException { + final byte[] family, final List> pairs) throws IOException { RegionCoprocessorEnvironment env = ctx.getEnvironment(); Configuration c = env.getConfiguration(); - if (pairs == null || pairs.isEmpty() || - !c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, - HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT)) { + if ( + pairs == null || pairs.isEmpty() + || !c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, + HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT) + ) { LOG.debug("Skipping recording bulk load entries in preCommitStoreFile for bulkloaded " - + "data replication."); + + "data replication."); return; } // This is completely cheating AND getting a HRegionServer from a RegionServerEnvironment is // just going to break. This is all private. Not allowed. Regions shouldn't assume they are // hosted in a RegionServer. TODO: fix. - RegionServerServices rss = ((HasRegionServerServices)env).getRegionServerServices(); - Replication rep = (Replication)((HRegionServer)rss).getReplicationSourceService(); + RegionServerServices rss = ((HasRegionServerServices) env).getRegionServerServices(); + Replication rep = (Replication) ((HRegionServer) rss).getReplicationSourceService(); rep.addHFileRefsToQueue(env.getRegionInfo().getTable(), family, pairs); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationRuntimeException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationRuntimeException.java index 81ec0d9129c..bbf03620fff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationRuntimeException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationRuntimeException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +21,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** * This exception is thrown when a replication source is terminated and source threads got - * interrupted. - * - * It is inherited from RuntimeException so that it can skip all the following processing logic - * and be propagated to the most top level and handled there. + * interrupted. It is inherited from RuntimeException so that it can skip all the following + * processing logic and be propagated to the most top level and handled there. */ @InterfaceAudience.Private public class ReplicationRuntimeException extends RuntimeException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java index 34fbb55b1b7..616dcfeb8cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,9 +39,6 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; @@ -54,7 +50,12 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor; @@ -62,17 +63,17 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescript /** *

      - * This class is responsible for replicating the edits coming - * from another cluster. - *

      - * This replication process is currently waiting for the edits to be applied - * before the method can return. This means that the replication of edits - * is synchronized (after reading from WALs in ReplicationSource) and that a - * single region server cannot receive edits from two sources at the same time - *

      + * This class is responsible for replicating the edits coming from another cluster. + *

      + *

      + * This replication process is currently waiting for the edits to be applied before the method can + * return. This means that the replication of edits is synchronized (after reading from WALs in + * ReplicationSource) and that a single region server cannot receive edits from two sources at the + * same time + *

      + *

      * This class uses the native HBase client in order to replicate entries. *

      - * * TODO make this class more like ReplicationSource wrt log handling */ @InterfaceAudience.Private @@ -101,34 +102,33 @@ public class ReplicationSink { * @param conf conf object * @throws IOException thrown when HDFS goes bad or bad file name */ - public ReplicationSink(Configuration conf) - throws IOException { + public ReplicationSink(Configuration conf) throws IOException { this.conf = HBaseConfiguration.create(conf); - rowSizeWarnThreshold = conf.getInt( - HConstants.BATCH_ROWS_THRESHOLD_NAME, HConstants.BATCH_ROWS_THRESHOLD_DEFAULT); + rowSizeWarnThreshold = + conf.getInt(HConstants.BATCH_ROWS_THRESHOLD_NAME, HConstants.BATCH_ROWS_THRESHOLD_DEFAULT); decorateConf(); this.metrics = new MetricsSink(); this.walEntrySinkFilter = setupWALEntrySinkFilter(); - String className = - conf.get("hbase.replication.source.fs.conf.provider", - DefaultSourceFSConfigurationProvider.class.getCanonicalName()); + String className = conf.get("hbase.replication.source.fs.conf.provider", + DefaultSourceFSConfigurationProvider.class.getCanonicalName()); try { @SuppressWarnings("rawtypes") Class c = Class.forName(className); this.provider = (SourceFSConfigurationProvider) c.getDeclaredConstructor().newInstance(); } catch (Exception e) { - throw new IllegalArgumentException("Configured source fs configuration provider class " - + className + " throws error.", e); + throw new IllegalArgumentException( + "Configured source fs configuration provider class " + className + " throws error.", e); } } private WALEntrySinkFilter setupWALEntrySinkFilter() throws IOException { Class walEntryFilterClass = - this.conf.getClass(WALEntrySinkFilter.WAL_ENTRY_FILTER_KEY, null); + this.conf.getClass(WALEntrySinkFilter.WAL_ENTRY_FILTER_KEY, null); WALEntrySinkFilter filter = null; try { - filter = walEntryFilterClass == null? null: - (WALEntrySinkFilter)walEntryFilterClass.getDeclaredConstructor().newInstance(); + filter = walEntryFilterClass == null + ? null + : (WALEntrySinkFilter) walEntryFilterClass.getDeclaredConstructor().newInstance(); } catch (Exception e) { LOG.warn("Failed to instantiate " + walEntryFilterClass); } @@ -139,14 +139,14 @@ public class ReplicationSink { } /** - * decorate the Configuration object to make replication more receptive to delays: - * lessen the timeout and numTries. + * decorate the Configuration object to make replication more receptive to delays: lessen the + * timeout and numTries. */ private void decorateConf() { this.conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - this.conf.getInt("replication.sink.client.retries.number", 4)); + this.conf.getInt("replication.sink.client.retries.number", 4)); this.conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, - this.conf.getInt("replication.sink.client.ops.timeout", 10000)); + this.conf.getInt("replication.sink.client.ops.timeout", 10000)); String replicationCodec = this.conf.get(HConstants.REPLICATION_CODEC_CONF_KEY); if (StringUtils.isNotEmpty(replicationCodec)) { this.conf.set(HConstants.RPC_CODEC_CONF_KEY, replicationCodec); @@ -155,21 +155,21 @@ public class ReplicationSink { if (this.conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM) != null) { this.conf.unset(HConstants.CLIENT_ZOOKEEPER_QUORUM); } - } + } /** * Replicate this array of entries directly into the local cluster using the native client. Only * operates against raw protobuf type saving on a conversion from pb to pojo. - * @param replicationClusterId Id which will uniquely identify source cluster FS client - * configurations in the replication configuration directory + * @param replicationClusterId Id which will uniquely identify source cluster FS client + * configurations in the replication configuration directory * @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace - * directory - * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory + * directory + * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory * @throws IOException If failed to replicate the data */ public void replicateEntries(List entries, final CellScanner cells, - String replicationClusterId, String sourceBaseNamespaceDirPath, - String sourceHFileArchiveDirPath) throws IOException { + String replicationClusterId, String sourceBaseNamespaceDirPath, + String sourceHFileArchiveDirPath) throws IOException { if (entries.isEmpty()) return; // Very simple optimization where we batch sequences of rows going // to the same table. @@ -181,8 +181,7 @@ public class ReplicationSink { Map, Map>>>> bulkLoadsPerClusters = null; for (WALEntry entry : entries) { - TableName table = - TableName.valueOf(entry.getKey().getTableName().toByteArray()); + TableName table = TableName.valueOf(entry.getKey().getTableName().toByteArray()); if (this.walEntrySinkFilter != null) { if (this.walEntrySinkFilter.filter(table, entry.getKey().getWriteTime())) { // Skip Cells in CellScanner associated with this entry. @@ -210,7 +209,7 @@ public class ReplicationSink { // Handle bulk load hfiles replication if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) { BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cell); - if(bld.getReplicate()) { + if (bld.getReplicate()) { if (bulkLoadsPerClusters == null) { bulkLoadsPerClusters = new HashMap<>(); } @@ -224,10 +223,9 @@ public class ReplicationSink { // Handle wal replication if (isNewRowOrType(previousCell, cell)) { // Create new mutation - mutation = - CellUtil.isDelete(cell) ? new Delete(cell.getRowArray(), cell.getRowOffset(), - cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(), - cell.getRowLength()); + mutation = CellUtil.isDelete(cell) + ? new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) + : new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); List clusterIds = new ArrayList<>(entry.getKey().getClusterIdsList().size()); for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) { clusterIds.add(toUUID(clusterId)); @@ -255,16 +253,16 @@ public class ReplicationSink { LOG.debug("Finished replicating mutations."); } - if(bulkLoadsPerClusters != null) { - for (Entry, Map>>>> entry : bulkLoadsPerClusters.entrySet()) { + if (bulkLoadsPerClusters != null) { + for (Entry, + Map>>>> entry : bulkLoadsPerClusters.entrySet()) { Map>>> bulkLoadHFileMap = entry.getValue(); if (bulkLoadHFileMap != null && !bulkLoadHFileMap.isEmpty()) { LOG.debug("Replicating {} bulk loaded data", entry.getKey().toString()); Configuration providerConf = this.provider.getConf(this.conf, replicationClusterId); try (HFileReplicator hFileReplicator = new HFileReplicator(providerConf, - sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath, bulkLoadHFileMap, conf, - getConnection(), entry.getKey())) { + sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath, bulkLoadHFileMap, conf, + getConnection(), entry.getKey())) { hFileReplicator.replicate(); LOG.debug("Finished replicating {} bulk loaded data", entry.getKey().toString()); } @@ -284,8 +282,8 @@ public class ReplicationSink { } private void buildBulkLoadHFileMap( - final Map>>> bulkLoadHFileMap, TableName table, - BulkLoadDescriptor bld) throws IOException { + final Map>>> bulkLoadHFileMap, TableName table, + BulkLoadDescriptor bld) throws IOException { List storesList = bld.getStoresList(); int storesSize = storesList.size(); for (int j = 0; j < storesSize; j++) { @@ -302,7 +300,7 @@ public class ReplicationSink { List>> familyHFilePathsList = bulkLoadHFileMap.get(tableName); if (familyHFilePathsList != null) { boolean foundFamily = false; - for (Pair> familyHFilePathsPair : familyHFilePathsList) { + for (Pair> familyHFilePathsPair : familyHFilePathsList) { if (Bytes.equals(familyHFilePathsPair.getFirst(), family)) { // Found family already present, just add the path to the existing list familyHFilePathsPair.getSecond().add(pathToHfileFromNS); @@ -323,15 +321,15 @@ public class ReplicationSink { } private void addFamilyAndItsHFilePathToTableInMap(byte[] family, String pathToHfileFromNS, - List>> familyHFilePathsList) { + List>> familyHFilePathsList) { List hfilePaths = new ArrayList<>(1); hfilePaths.add(pathToHfileFromNS); familyHFilePathsList.add(new Pair<>(family, hfilePaths)); } private void addNewTableEntryInMap( - final Map>>> bulkLoadHFileMap, byte[] family, - String pathToHfileFromNS, String tableName) { + final Map>>> bulkLoadHFileMap, byte[] family, + String pathToHfileFromNS, String tableName) { List hfilePaths = new ArrayList<>(1); hfilePaths.add(pathToHfileFromNS); Pair> newFamilyHFilePathsPair = new Pair<>(family, hfilePaths); @@ -341,21 +339,19 @@ public class ReplicationSink { } private String getHFilePath(TableName table, BulkLoadDescriptor bld, String storeFile, - byte[] family) { + byte[] family) { return new StringBuilder(100).append(table.getNamespaceAsString()).append(Path.SEPARATOR) - .append(table.getQualifierAsString()).append(Path.SEPARATOR) - .append(Bytes.toString(bld.getEncodedRegionName().toByteArray())).append(Path.SEPARATOR) - .append(Bytes.toString(family)).append(Path.SEPARATOR).append(storeFile).toString(); + .append(table.getQualifierAsString()).append(Path.SEPARATOR) + .append(Bytes.toString(bld.getEncodedRegionName().toByteArray())).append(Path.SEPARATOR) + .append(Bytes.toString(family)).append(Path.SEPARATOR).append(storeFile).toString(); } /** - * @param previousCell - * @param cell - * @return True if we have crossed over onto a new row or type + * nn * @return True if we have crossed over onto a new row or type */ private boolean isNewRowOrType(final Cell previousCell, final Cell cell) { - return previousCell == null || previousCell.getTypeByte() != cell.getTypeByte() || - !CellUtil.matchingRows(previousCell, cell); + return previousCell == null || previousCell.getTypeByte() != cell.getTypeByte() + || !CellUtil.matchingRows(previousCell, cell); } private java.util.UUID toUUID(final HBaseProtos.UUID uuid) { @@ -363,16 +359,11 @@ public class ReplicationSink { } /** - * Simple helper to a map from key to (a list of) values - * TODO: Make a general utility method - * @param map - * @param key1 - * @param key2 - * @param value - * @return the list of values corresponding to key1 and key2 + * Simple helper to a map from key to (a list of) values TODO: Make a general utility method nnnn + * * @return the list of values corresponding to key1 and key2 */ private List addToHashMultiMap(Map>> map, K1 key1, K2 key2, - V value) { + V value) { Map> innerMap = map.computeIfAbsent(key1, k -> new HashMap<>()); List values = innerMap.computeIfAbsent(key2, k -> new ArrayList<>()); values.add(value); @@ -397,15 +388,14 @@ public class ReplicationSink { } } - /** * Do the changes and handle the pool - * @param tableName table to insert into - * @param allRows list of actions + * @param tableName table to insert into + * @param allRows list of actions * @param batchRowSizeThreshold rowSize threshold for batch mutation */ private void batch(TableName tableName, Collection> allRows, int batchRowSizeThreshold) - throws IOException { + throws IOException { if (allRows.isEmpty()) { return; } @@ -420,7 +410,7 @@ public class ReplicationSink { } else { batchRows = Collections.singletonList(rows); } - for(List rowList:batchRows){ + for (List rowList : batchRows) { table.batch(rowList, null); } } @@ -456,18 +446,18 @@ public class ReplicationSink { /** * Get a string representation of this sink's metrics - * @return string with the total replicated edits count and the date - * of the last edit that was applied + * @return string with the total replicated edits count and the date of the last edit that was + * applied */ public String getStats() { - return this.totalReplicatedEdits.get() == 0 ? "" : "Sink: " + - "age in ms of last applied edit: " + this.metrics.refreshAgeOfLastAppliedOp() + - ", total replicated edits: " + this.totalReplicatedEdits; + return this.totalReplicatedEdits.get() == 0 + ? "" + : "Sink: " + "age in ms of last applied edit: " + this.metrics.refreshAgeOfLastAppliedOp() + + ", total replicated edits: " + this.totalReplicatedEdits; } /** - * Get replication Sink Metrics - * @return MetricsSink + * Get replication Sink Metrics n */ public MetricsSink getSinkMetrics() { return this.metrics; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java index 01cfe88bcd2..6c34cd0a5d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java @@ -38,9 +38,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; /** - * Maintains a collection of peers to replicate to, and randomly selects a - * single peer to replicate to per set of data to replicate. Also handles - * keeping track of peer availability. + * Maintains a collection of peers to replicate to, and randomly selects a single peer to replicate + * to per set of data to replicate. Also handles keeping track of peer availability. */ @InterfaceAudience.Private public class ReplicationSinkManager { @@ -48,19 +47,17 @@ public class ReplicationSinkManager { private static final Logger LOG = LoggerFactory.getLogger(ReplicationSinkManager.class); /** - * Default maximum number of times a replication sink can be reported as bad before - * it will no longer be provided as a sink for replication without the pool of - * replication sinks being refreshed. + * Default maximum number of times a replication sink can be reported as bad before it will no + * longer be provided as a sink for replication without the pool of replication sinks being + * refreshed. */ static final int DEFAULT_BAD_SINK_THRESHOLD = 3; /** - * Default ratio of the total number of peer cluster region servers to consider - * replicating to. + * Default ratio of the total number of peer cluster region servers to consider replicating to. */ static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.5f; - private final Connection conn; private final String peerClusterId; @@ -87,27 +84,26 @@ public class ReplicationSinkManager { /** * Instantiate for a single replication peer cluster. - * @param conn connection to the peer cluster + * @param conn connection to the peer cluster * @param peerClusterId identifier of the peer cluster - * @param endpoint replication endpoint for inter cluster replication - * @param conf HBase configuration, used for determining replication source ratio and bad peer - * threshold + * @param endpoint replication endpoint for inter cluster replication + * @param conf HBase configuration, used for determining replication source ratio and bad + * peer threshold */ public ReplicationSinkManager(ClusterConnection conn, String peerClusterId, - HBaseReplicationEndpoint endpoint, Configuration conf) { + HBaseReplicationEndpoint endpoint, Configuration conf) { this.conn = conn; this.peerClusterId = peerClusterId; this.endpoint = endpoint; this.badReportCounts = Maps.newHashMap(); this.ratio = conf.getFloat("replication.source.ratio", DEFAULT_REPLICATION_SOURCE_RATIO); - this.badSinkThreshold = conf.getInt("replication.bad.sink.threshold", - DEFAULT_BAD_SINK_THRESHOLD); + this.badSinkThreshold = + conf.getInt("replication.bad.sink.threshold", DEFAULT_BAD_SINK_THRESHOLD); this.random = new Random(); } /** * Get a randomly-chosen replication sink to replicate to. - * * @return a replication sink to replicate to */ public synchronized SinkPeer getReplicationSink() throws IOException { @@ -124,18 +120,15 @@ public class ReplicationSinkManager { } /** - * Report a {@code SinkPeer} as being bad (i.e. an attempt to replicate to it - * failed). If a single SinkPeer is reported as bad more than - * replication.bad.sink.threshold times, it will be removed - * from the pool of potential replication targets. - * - * @param sinkPeer - * The SinkPeer that had a failed replication attempt on it + * Report a {@code SinkPeer} as being bad (i.e. an attempt to replicate to it failed). If a single + * SinkPeer is reported as bad more than replication.bad.sink.threshold times, it will be removed + * from the pool of potential replication targets. n * The SinkPeer that had a failed replication + * attempt on it */ public synchronized void reportBadSink(SinkPeer sinkPeer) { ServerName serverName = sinkPeer.getServerName(); - int badReportCount = (badReportCounts.containsKey(serverName) - ? badReportCounts.get(serverName) : 0) + 1; + int badReportCount = + (badReportCounts.containsKey(serverName) ? badReportCounts.get(serverName) : 0) + 1; badReportCounts.put(serverName, badReportCount); if (badReportCount > badSinkThreshold) { this.sinks.remove(serverName); @@ -146,10 +139,8 @@ public class ReplicationSinkManager { } /** - * Report that a {@code SinkPeer} successfully replicated a chunk of data. - * - * @param sinkPeer - * The SinkPeer that had a failed replication attempt on it + * Report that a {@code SinkPeer} successfully replicated a chunk of data. n * The SinkPeer that + * had a failed replication attempt on it */ public synchronized void reportSinkSuccess(SinkPeer sinkPeer) { badReportCounts.remove(sinkPeer.getServerName()); @@ -160,7 +151,7 @@ public class ReplicationSinkManager { */ public synchronized void chooseSinks() { List slaveAddresses = endpoint.getRegionServers(); - if(slaveAddresses.isEmpty()){ + if (slaveAddresses.isEmpty()) { LOG.warn("No sinks available at peer. Will not be able to replicate"); } Collections.shuffle(slaveAddresses, random); @@ -179,8 +170,7 @@ public class ReplicationSinkManager { } /** - * Wraps a replication region server sink to provide the ability to identify - * it. + * Wraps a replication region server sink to provide the ability to identify it. */ public static class SinkPeer { private ServerName serverName; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 5ac86970c9d..3d1746edd8d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.replication.regionserver; import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.findArchivedLog; + import java.io.FileNotFoundException; import java.io.IOException; import java.lang.reflect.InvocationTargetException; @@ -65,17 +66,17 @@ import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * Class that handles the source of a replication stream. - * Currently does not handle more than 1 slave cluster. - * For each slave cluster it selects a random number of peers - * using a replication ratio. For example, if replication ration = 0.1 - * and slave cluster has 100 region servers, 10 will be selected. + * Class that handles the source of a replication stream. Currently does not handle more than 1 + * slave cluster. For each slave cluster it selects a random number of peers using a replication + * ratio. For example, if replication ration = 0.1 and slave cluster has 100 region servers, 10 will + * be selected. *

      - * A stream is considered down when we cannot contact a region server on the - * peer cluster for more than 55 seconds by default. + * A stream is considered down when we cannot contact a region server on the peer cluster for more + * than 55 seconds by default. *

      */ @InterfaceAudience.Private @@ -116,13 +117,13 @@ public class ReplicationSource implements ReplicationSourceInterface { private volatile ReplicationEndpoint replicationEndpoint; private boolean abortOnError; - //This is needed for the startup loop to identify when there's already - //an initialization happening (but not finished yet), - //so that it doesn't try submit another initialize thread. - //NOTE: this should only be set to false at the end of initialize method, prior to return. + // This is needed for the startup loop to identify when there's already + // an initialization happening (but not finished yet), + // so that it doesn't try submit another initialize thread. + // NOTE: this should only be set to false at the end of initialize method, prior to return. private AtomicBoolean startupOngoing = new AtomicBoolean(false); - //Flag that signalizes uncaught error happening while starting up the source - //and a retry should be attempted + // Flag that signalizes uncaught error happening while starting up the source + // and a retry should be attempted private AtomicBoolean retryStartup = new AtomicBoolean(false); /** @@ -136,7 +137,7 @@ public class ReplicationSource implements ReplicationSourceInterface { private long currentBandwidth; private WALFileLengthProvider walFileLengthProvider; protected final ConcurrentHashMap workerThreads = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); private AtomicLong totalBufferUsed; @@ -148,17 +149,17 @@ public class ReplicationSource implements ReplicationSourceInterface { private Thread initThread; /** - * WALs to replicate. - * Predicate that returns 'true' for WALs to replicate and false for WALs to skip. + * WALs to replicate. Predicate that returns 'true' for WALs to replicate and false for WALs to + * skip. */ private final Predicate filterInWALs; /** - * Base WALEntry filters for this class. Unmodifiable. Set on construction. - * Filters *out* edits we do not want replicated, passed on to replication endpoints. - * This is the basic set. Down in #initializeWALEntryFilter this set is added to the end of - * the WALEntry filter chain. These are put after those that we pick up from the configured - * endpoints and other machinations to create the final {@link #walEntryFilter}. + * Base WALEntry filters for this class. Unmodifiable. Set on construction. Filters *out* edits we + * do not want replicated, passed on to replication endpoints. This is the basic set. Down in + * #initializeWALEntryFilter this set is added to the end of the WALEntry filter chain. These are + * put after those that we pick up from the configured endpoints and other machinations to create + * the final {@link #walEntryFilter}. * @see WALEntryFilter */ private final List baseFilterOutWALEntries; @@ -170,10 +171,11 @@ public class ReplicationSource implements ReplicationSourceInterface { } /** - * @param replicateWAL Pass a filter to run against WAL Path; filter *in* WALs to Replicate; - * i.e. return 'true' if you want to replicate the content of the WAL. + * @param replicateWAL Pass a filter to run against WAL Path; filter *in* WALs to + * Replicate; i.e. return 'true' if you want to replicate the + * content of the WAL. * @param baseFilterOutWALEntries Base set of filters you want applied always; filters *out* - * WALEntries so they never make it out of this ReplicationSource. + * WALEntries so they never make it out of this ReplicationSource. */ ReplicationSource(Predicate replicateWAL, List baseFilterOutWALEntries) { this.filterInWALs = replicateWAL; @@ -182,19 +184,19 @@ public class ReplicationSource implements ReplicationSourceInterface { /** * Instantiation method used by region servers - * @param conf configuration to use - * @param fs file system to use - * @param manager replication manager to ping to - * @param server the server for this region server - * @param queueId the id of our replication queue + * @param conf configuration to use + * @param fs file system to use + * @param manager replication manager to ping to + * @param server the server for this region server + * @param queueId the id of our replication queue * @param clusterId unique UUID for the cluster - * @param metrics metrics for replication source + * @param metrics metrics for replication source */ @Override public void init(Configuration conf, FileSystem fs, ReplicationSourceManager manager, - ReplicationQueueStorage queueStorage, ReplicationPeer replicationPeer, Server server, - String queueId, UUID clusterId, WALFileLengthProvider walFileLengthProvider, - MetricsSource metrics) throws IOException { + ReplicationQueueStorage queueStorage, ReplicationPeer replicationPeer, Server server, + String queueId, UUID clusterId, WALFileLengthProvider walFileLengthProvider, + MetricsSource metrics) throws IOException { this.server = server; this.conf = HBaseConfiguration.create(conf); this.waitOnEndpointSeconds = @@ -225,8 +227,7 @@ public class ReplicationSource implements ReplicationSourceInterface { this.totalBufferUsed = manager.getTotalBufferUsed(); this.walFileLengthProvider = walFileLengthProvider; - this.abortOnError = this.conf.getBoolean("replication.source.regionserver.abort", - true); + this.abortOnError = this.conf.getBoolean("replication.source.regionserver.abort", true); LOG.info("queueId={}, ReplicationSource: {}, currentBandwidth={}", queueId, replicationPeer.getId(), this.currentBandwidth); @@ -270,7 +271,7 @@ public class ReplicationSource implements ReplicationSourceInterface { @Override public void addHFileRefs(TableName tableName, byte[] family, List> pairs) - throws ReplicationException { + throws ReplicationException { String peerId = replicationPeer.getId(); if (replicationPeer.getPeerConfig().needToReplicate(tableName, family)) { this.queueStorage.addHFileRefs(peerId, pairs); @@ -282,7 +283,7 @@ public class ReplicationSource implements ReplicationSourceInterface { } private ReplicationEndpoint createReplicationEndpoint() - throws InstantiationException, IllegalAccessException, ClassNotFoundException, IOException { + throws InstantiationException, IllegalAccessException, ClassNotFoundException, IOException { RegionServerCoprocessorHost rsServerHost = null; if (server instanceof HRegionServer) { rsServerHost = ((HRegionServer) server).getRegionServerCoprocessorHost(); @@ -296,9 +297,7 @@ public class ReplicationSource implements ReplicationSourceInterface { } else { try { replicationEndpoint = Class.forName(replicationEndpointImpl) - .asSubclass(ReplicationEndpoint.class) - .getDeclaredConstructor() - .newInstance(); + .asSubclass(ReplicationEndpoint.class).getDeclaredConstructor().newInstance(); } catch (NoSuchMethodException | InvocationTargetException e) { throw new IllegalArgumentException(e); } @@ -315,14 +314,14 @@ public class ReplicationSource implements ReplicationSourceInterface { } private void initAndStartReplicationEndpoint(ReplicationEndpoint replicationEndpoint) - throws IOException, TimeoutException { + throws IOException, TimeoutException { TableDescriptors tableDescriptors = null; if (server instanceof HRegionServer) { tableDescriptors = ((HRegionServer) server).getTableDescriptors(); } replicationEndpoint - .init(new ReplicationEndpoint.Context(conf, replicationPeer.getConfiguration(), fs, peerId, - clusterId, replicationPeer, metrics, tableDescriptors, server)); + .init(new ReplicationEndpoint.Context(conf, replicationPeer.getConfiguration(), fs, peerId, + clusterId, replicationPeer, metrics, tableDescriptors, server)); replicationEndpoint.start(); replicationEndpoint.awaitRunning(waitOnEndpointSeconds, TimeUnit.SECONDS); } @@ -347,13 +346,13 @@ public class ReplicationSource implements ReplicationSourceInterface { LOG.debug("{} starting shipping worker for walGroupId={}", logPeerId(), walGroupId); ReplicationSourceShipper worker = createNewShipper(walGroupId); ReplicationSourceWALReader walReader = - createNewWALReader(walGroupId, worker.getStartPosition()); + createNewWALReader(walGroupId, worker.getStartPosition()); Threads.setDaemonThreadRunning( - walReader, Thread.currentThread().getName() - + ".replicationSource.wal-reader." + walGroupId + "," + queueId, - (t,e) -> this.uncaughtException(t, e, this.manager, this.getPeerId())); + walReader, Thread.currentThread().getName() + ".replicationSource.wal-reader." + + walGroupId + "," + queueId, + (t, e) -> this.uncaughtException(t, e, this.manager, this.getPeerId())); worker.setWALReader(walReader); - worker.startup((t,e) -> this.uncaughtException(t, e, this.manager, this.getPeerId())); + worker.startup((t, e) -> this.uncaughtException(t, e, this.manager, this.getPeerId())); return worker; } }); @@ -382,14 +381,10 @@ public class ReplicationSource implements ReplicationSourceInterface { LOG.warn("{} No replication ongoing, waiting for new log", logPeerId()); } ReplicationStatus.ReplicationStatusBuilder statusBuilder = ReplicationStatus.newBuilder(); - statusBuilder.withPeerId(this.getPeerId()) - .withQueueSize(queueSize) - .withWalGroup(walGroupId) - .withCurrentPath(currentPath) - .withCurrentPosition(shipper.getCurrentPosition()) - .withFileSize(fileSize) - .withAgeOfLastShippedOp(ageOfLastShippedOp) - .withReplicationDelay(replicationDelay); + statusBuilder.withPeerId(this.getPeerId()).withQueueSize(queueSize).withWalGroup(walGroupId) + .withCurrentPath(currentPath).withCurrentPosition(shipper.getCurrentPosition()) + .withFileSize(fileSize).withAgeOfLastShippedOp(ageOfLastShippedOp) + .withReplicationDelay(replicationDelay); sourceReplicationStatus.put(this.getPeerId() + "=>" + walGroupId, statusBuilder.build()); } return sourceReplicationStatus; @@ -417,9 +412,9 @@ public class ReplicationSource implements ReplicationSourceInterface { private ReplicationSourceWALReader createNewWALReader(String walGroupId, long startPosition) { return replicationPeer.getPeerConfig().isSerial() ? new SerialReplicationSourceWALReader(fs, conf, logQueue, startPosition, walEntryFilter, - this, walGroupId) - : new ReplicationSourceWALReader(fs, conf, logQueue, startPosition, walEntryFilter, - this, walGroupId); + this, walGroupId) + : new ReplicationSourceWALReader(fs, conf, logQueue, startPosition, walEntryFilter, this, + walGroupId); } /** @@ -430,15 +425,14 @@ public class ReplicationSource implements ReplicationSourceInterface { return walEntryFilter; } - protected final void uncaughtException(Thread t, Throwable e, - ReplicationSourceManager manager, String peerId) { + protected final void uncaughtException(Thread t, Throwable e, ReplicationSourceManager manager, + String peerId) { RSRpcServices.exitIfOOME(e); - LOG.error("Unexpected exception in {} currentPath={}", - t.getName(), getCurrentPath(), e); - if(abortOnError){ + LOG.error("Unexpected exception in {} currentPath={}", t.getName(), getCurrentPath(), e); + if (abortOnError) { server.abort("Unexpected exception in " + t.getName(), e); } - if(manager != null){ + if (manager != null) { while (true) { try { LOG.info("Refreshing replication sources now due to previous error on thread: {}", @@ -447,8 +441,7 @@ public class ReplicationSource implements ReplicationSourceInterface { break; } catch (IOException e1) { LOG.error("Replication sources refresh failed.", e1); - sleepForRetries("Sleeping before try refreshing sources again", - maxRetriesMultiplier); + sleepForRetries("Sleeping before try refreshing sources again", maxRetriesMultiplier); } } } @@ -485,8 +478,8 @@ public class ReplicationSource implements ReplicationSourceInterface { if (peerBandwidth != currentBandwidth) { currentBandwidth = peerBandwidth; throttler.setBandwidth((double) currentBandwidth / 10.0); - LOG.info("ReplicationSource : " + peerId - + " bandwidth throttling changed, currentBandWidth=" + currentBandwidth); + LOG.info("ReplicationSource : " + peerId + " bandwidth throttling changed, currentBandWidth=" + + currentBandwidth); } } @@ -498,19 +491,19 @@ public class ReplicationSource implements ReplicationSourceInterface { /** * Do the sleeping logic - * @param msg Why we sleep + * @param msg Why we sleep * @param sleepMultiplier by how many times the default sleeping time is augmented * @return True if sleepMultiplier is < maxRetriesMultiplier */ protected boolean sleepForRetries(String msg, int sleepMultiplier) { try { if (LOG.isTraceEnabled()) { - LOG.trace("{} {}, sleeping {} times {}", - logPeerId(), msg, sleepForRetries, sleepMultiplier); + LOG.trace("{} {}, sleeping {} times {}", logPeerId(), msg, sleepForRetries, + sleepMultiplier); } Thread.sleep(this.sleepForRetries * sleepMultiplier); } catch (InterruptedException e) { - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug("{} Interrupted while sleeping between retries", logPeerId()); } Thread.currentThread().interrupt(); @@ -570,7 +563,7 @@ public class ReplicationSource implements ReplicationSourceInterface { for (;;) { peerClusterId = replicationEndpoint.getPeerUUID(); if (this.isSourceActive() && peerClusterId == null) { - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug("{} Could not connect to Peer ZK. Sleeping for {} millis", logPeerId(), (this.sleepForRetries * sleepMultiplier)); } @@ -582,17 +575,16 @@ public class ReplicationSource implements ReplicationSourceInterface { } } - if(!this.isSourceActive()) { + if (!this.isSourceActive()) { retryStartup.set(!this.abortOnError); setSourceStartupStatus(false); throw new IllegalStateException("Source should be active."); } - LOG.info("{} queueId={} (queues={}) is replicating from cluster={} to cluster={}", - logPeerId(), this.replicationQueueInfo.getQueueId(), logQueue.getNumQueues(), clusterId, - peerClusterId); + LOG.info("{} queueId={} (queues={}) is replicating from cluster={} to cluster={}", logPeerId(), + this.replicationQueueInfo.getQueueId(), logQueue.getNumQueues(), clusterId, peerClusterId); initializeWALEntryFilter(peerClusterId); // Start workers - for (String walGroupId: logQueue.getQueues().keySet()) { + for (String walGroupId : logQueue.getQueues().keySet()) { tryStartNewShipper(walGroupId); } setSourceStartupStatus(false); @@ -616,22 +608,21 @@ public class ReplicationSource implements ReplicationSourceInterface { setSourceStartupStatus(true); initThread = new Thread(this::initialize); Threads.setDaemonThreadRunning(initThread, - Thread.currentThread().getName() + ".replicationSource," + this.queueId, - (t,e) -> { - //if first initialization attempt failed, and abortOnError is false, we will - //keep looping in this thread until initialize eventually succeeds, - //while the server main startup one can go on with its work. + Thread.currentThread().getName() + ".replicationSource," + this.queueId, (t, e) -> { + // if first initialization attempt failed, and abortOnError is false, we will + // keep looping in this thread until initialize eventually succeeds, + // while the server main startup one can go on with its work. sourceRunning = false; uncaughtException(t, e, null, null); retryStartup.set(!this.abortOnError); do { - if(retryStartup.get()) { + if (retryStartup.get()) { this.sourceRunning = true; setSourceStartupStatus(true); retryStartup.set(false); try { initialize(); - } catch(Throwable error){ + } catch (Throwable error) { setSourceStartupStatus(false); uncaughtException(t, error, null, null); retryStartup.set(!this.abortOnError); @@ -657,13 +648,12 @@ public class ReplicationSource implements ReplicationSourceInterface { terminate(reason, cause, clearMetrics, true); } - public void terminate(String reason, Exception cause, boolean clearMetrics, - boolean join) { + public void terminate(String reason, Exception cause, boolean clearMetrics, boolean join) { if (cause == null) { LOG.info("{} Closing source {} because: {}", logPeerId(), this.queueId, reason); } else { - LOG.error(String.format("%s Closing source %s because an error occurred: %s", - logPeerId(), this.queueId, reason), cause); + LOG.error(String.format("%s Closing source %s because an error occurred: %s", logPeerId(), + this.queueId, reason), cause); } this.sourceRunning = false; if (initThread != null && Thread.currentThread() != initThread) { @@ -677,7 +667,7 @@ public class ReplicationSource implements ReplicationSourceInterface { for (ReplicationSourceShipper worker : workers) { worker.stopWorker(); - if(worker.entryReader != null) { + if (worker.entryReader != null) { worker.entryReader.setReaderRunning(false); } } @@ -705,8 +695,8 @@ public class ReplicationSource implements ReplicationSourceInterface { } } if (!server.isAborted() && !server.isStopped()) { - //If server is running and worker is already stopped but there was still entries batched, - //we need to clear buffer used for non processed entries + // If server is running and worker is already stopped but there was still entries batched, + // we need to clear buffer used for non processed entries worker.clearWALEntryBatch(); } } @@ -764,9 +754,9 @@ public class ReplicationSource implements ReplicationSourceInterface { return replicationQueueInfo; } - public boolean isWorkerRunning(){ - for(ReplicationSourceShipper worker : this.workerThreads.values()){ - if(worker.isActive()){ + public boolean isWorkerRunning() { + for (ReplicationSourceShipper worker : this.workerThreads.values()) { + if (worker.isActive()) { return worker.isActive(); } } @@ -777,7 +767,7 @@ public class ReplicationSource implements ReplicationSourceInterface { public String getStats() { StringBuilder sb = new StringBuilder(); sb.append("Total replicated edits: ").append(totalReplicatedEdits) - .append(", current progress: \n"); + .append(", current progress: \n"); for (Map.Entry entry : workerThreads.entrySet()) { String walGroupId = entry.getKey(); ReplicationSourceShipper worker = entry.getValue(); @@ -786,7 +776,7 @@ public class ReplicationSource implements ReplicationSourceInterface { sb.append("walGroup [").append(walGroupId).append("]: "); if (currentPath != null) { sb.append("currently replicating from: ").append(currentPath).append(" at position: ") - .append(position).append("\n"); + .append(position).append("\n"); } else { sb.append("no replication ongoing, waiting for new log"); } @@ -800,7 +790,7 @@ public class ReplicationSource implements ReplicationSourceInterface { } @Override - //offsets totalBufferUsed by deducting shipped batchSize. + // offsets totalBufferUsed by deducting shipped batchSize. public void postShipEdits(List entries, int batchSize) { if (throttler.isEnabled()) { throttler.addPushSize(batchSize); @@ -833,7 +823,7 @@ public class ReplicationSource implements ReplicationSourceInterface { /** * @return String to use as a log prefix that contains current peerId. */ - public String logPeerId(){ + public String logPeerId() { return "peerId=" + this.getPeerId() + ","; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java index 8863f141f1a..c9731447a69 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,31 +24,31 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Constructs a {@link ReplicationSourceInterface} - * Note, not used to create specialized ReplicationSources + * Constructs a {@link ReplicationSourceInterface} Note, not used to create specialized + * ReplicationSources * @see CatalogReplicationSource */ @InterfaceAudience.Private public final class ReplicationSourceFactory { private static final Logger LOG = LoggerFactory.getLogger(ReplicationSourceFactory.class); - private ReplicationSourceFactory() {} + private ReplicationSourceFactory() { + } static ReplicationSourceInterface create(Configuration conf, String queueId) { ReplicationQueueInfo replicationQueueInfo = new ReplicationQueueInfo(queueId); boolean isQueueRecovered = replicationQueueInfo.isQueueRecovered(); ReplicationSourceInterface src; try { - String defaultReplicationSourceImpl = - isQueueRecovered ? RecoveredReplicationSource.class.getCanonicalName() - : ReplicationSource.class.getCanonicalName(); + String defaultReplicationSourceImpl = isQueueRecovered + ? RecoveredReplicationSource.class.getCanonicalName() + : ReplicationSource.class.getCanonicalName(); Class c = Class.forName( conf.get("replication.replicationsource.implementation", defaultReplicationSourceImpl)); src = c.asSubclass(ReplicationSourceInterface.class).getDeclaredConstructor().newInstance(); } catch (Exception e) { LOG.warn("Passed replication source implementation throws errors, " - + "defaulting to ReplicationSource", - e); + + "defaulting to ReplicationSource", e); src = isQueueRecovered ? new RecoveredReplicationSource() : new ReplicationSource(); } return src; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java index ca68f9a305e..9e83a3f6c1e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,15 +43,15 @@ import org.apache.yetus.audience.InterfaceAudience; public interface ReplicationSourceInterface { /** * Initializer for the source - * @param conf the configuration to use - * @param fs the file system to use + * @param conf the configuration to use + * @param fs the file system to use * @param manager the manager to use - * @param server the server for this region server + * @param server the server for this region server */ void init(Configuration conf, FileSystem fs, ReplicationSourceManager manager, - ReplicationQueueStorage queueStorage, ReplicationPeer replicationPeer, Server server, - String queueId, UUID clusterId, WALFileLengthProvider walFileLengthProvider, - MetricsSource metrics) throws IOException; + ReplicationQueueStorage queueStorage, ReplicationPeer replicationPeer, Server server, + String queueId, UUID clusterId, WALFileLengthProvider walFileLengthProvider, + MetricsSource metrics) throws IOException; /** * Add a log to the list of logs to replicate @@ -63,13 +62,13 @@ public interface ReplicationSourceInterface { /** * Add hfile names to the queue to be replicated. * @param tableName Name of the table these files belongs to - * @param family Name of the family these files belong to - * @param pairs list of pairs of { HFile location in staging dir, HFile path in region dir which - * will be added in the queue for replication} + * @param family Name of the family these files belong to + * @param pairs list of pairs of { HFile location in staging dir, HFile path in region dir + * which will be added in the queue for replication} * @throws ReplicationException If failed to add hfile references */ void addHFileRefs(TableName tableName, byte[] family, List> pairs) - throws ReplicationException; + throws ReplicationException; /** * Start the replication @@ -85,14 +84,14 @@ public interface ReplicationSourceInterface { /** * End the replication * @param reason why it's terminating - * @param cause the error that's causing it + * @param cause the error that's causing it */ void terminate(String reason, Exception cause); /** * End the replication - * @param reason why it's terminating - * @param cause the error that's causing it + * @param reason why it's terminating + * @param cause the error that's causing it * @param clearMetrics removes all metrics about this Source */ void terminate(String reason, Exception cause, boolean clearMetrics); @@ -105,21 +104,18 @@ public interface ReplicationSourceInterface { /** * Get the queue id that the source is replicating to - * * @return queue id */ String getQueueId(); /** * Get the id that the source is replicating to. - * * @return peer id */ String getPeerId(); /** - * Get a string representation of the current statistics - * for this source + * Get a string representation of the current statistics for this source * @return printable stats */ String getStats(); @@ -162,7 +158,7 @@ public interface ReplicationSourceInterface { /** * Call this after the shipper thread ship some entries to peer cluster. - * @param entries pushed + * @param entries pushed * @param batchSize entries size pushed */ void postShipEdits(List entries, int batchSize); @@ -195,10 +191,10 @@ public interface ReplicationSourceInterface { ReplicationQueueStorage getReplicationQueueStorage(); /** - * Log the current position to storage. Also clean old logs from the replication queue. - * Use to bypass the default call to - * {@link ReplicationSourceManager#logPositionAndCleanOldLogs(ReplicationSourceInterface, - * WALEntryBatch)} whem implementation does not need to persist state to backing storage. + * Log the current position to storage. Also clean old logs from the replication queue. Use to + * bypass the default call to + * {@link ReplicationSourceManager#logPositionAndCleanOldLogs(ReplicationSourceInterface, WALEntryBatch)} + * whem implementation does not need to persist state to backing storage. * @param entryBatch the wal entry batch we just shipped * @return The instance of queueStorage used by this ReplicationSource. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceLogQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceLogQueue.java index 4d89edef5fd..a0e6f1b8d1f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceLogQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceLogQueue.java @@ -50,7 +50,7 @@ public class ReplicationSourceLogQueue { private ReplicationSource source; public ReplicationSourceLogQueue(Configuration conf, MetricsSource metrics, - ReplicationSource source) { + ReplicationSource source) { this.conf = conf; this.metrics = metrics; this.source = source; @@ -60,7 +60,7 @@ public class ReplicationSourceLogQueue { /** * Enqueue the wal - * @param wal wal to be enqueued + * @param wal wal to be enqueued * @param walGroupId Key for the wal in @queues map * @return boolean whether this is the first time we are seeing this walGroupId. */ @@ -85,9 +85,9 @@ public class ReplicationSourceLogQueue { // This will wal a warning for each new wal that gets created above the warn threshold int queueSize = queue.size(); if (queueSize > this.logQueueWarnThreshold) { - LOG.warn("{} WAL group {} queue size: {} exceeds value of " + - "replication.source.log.queue.warn {}", source.logPeerId(), walGroupId, queueSize, - logQueueWarnThreshold); + LOG.warn( + "{} WAL group {} queue size: {} exceeds value of " + "replication.source.log.queue.warn {}", + source.logPeerId(), walGroupId, queueSize, logQueueWarnThreshold); } return exists; } @@ -116,9 +116,8 @@ public class ReplicationSourceLogQueue { } /** - * Return queue for the given walGroupId - * Please don't add or remove elements from the returned queue. - * Use @enqueueLog and @remove methods respectively. + * Return queue for the given walGroupId Please don't add or remove elements from the returned + * queue. Use @enqueueLog and @remove methods respectively. * @param walGroupId walGroupId */ public PriorityBlockingQueue getQueue(String walGroupId) { @@ -156,7 +155,7 @@ public class ReplicationSourceLogQueue { } /* - Returns the age of oldest wal. + * Returns the age of oldest wal. */ long getOldestWalAge() { long now = EnvironmentEdgeManager.currentTime(); @@ -171,8 +170,8 @@ public class ReplicationSourceLogQueue { } /* - Get the oldest wal timestamp from all the queues. - */ + * Get the oldest wal timestamp from all the queues. + */ private long getOldestWalTimestamp() { long oldestWalTimestamp = Long.MAX_VALUE; for (Map.Entry> entry : queues.entrySet()) { @@ -180,8 +179,8 @@ public class ReplicationSourceLogQueue { Path path = queue.peek(); // Can path ever be null ? if (path != null) { - oldestWalTimestamp = Math.min(oldestWalTimestamp, - AbstractFSWALProvider.WALStartTimeComparator.getTS(path)); + oldestWalTimestamp = + Math.min(oldestWalTimestamp, AbstractFSWALProvider.WALStartTimeComparator.getTS(path)); } } return oldestWalTimestamp; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index fa445307c3c..36ec13ec636 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -97,17 +97,15 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFacto * case need synchronized is {@link #cleanOldLogs(NavigableSet, String, boolean, String)} and * {@link #preLogRoll(Path)}. *
    • No need synchronized on {@link #walsByIdRecoveredQueues}. There are three methods which - * modify it, {@link #removePeer(String)} , -<<<<<<< HEAD + * modify it, {@link #removePeer(String)} , <<<<<<< HEAD * {@link #cleanOldLogs(NavigableSet, String, boolean, String)} and * {@link ReplicationSourceManager.NodeFailoverWorker#run()}. - * {@link #cleanOldLogs(NavigableSet, String, boolean, String)} is called by -======= + * {@link #cleanOldLogs(NavigableSet, String, boolean, String)} is called by ======= * {@link #cleanOldLogs(String, boolean, ReplicationSourceInterface)} and * {@link ReplicationSourceManager#claimQueue(ServerName, String)}. - * {@link #cleanOldLogs(String, boolean, ReplicationSourceInterface)} is called by ->>>>>>> 51893b9ba3... HBASE-26029 It is not reliable to use nodeDeleted event to track region server's death (#3430) - * {@link ReplicationSourceInterface}. {@link #removePeer(String)} will terminate the + * {@link #cleanOldLogs(String, boolean, ReplicationSourceInterface)} is called by >>>>>>> + * 51893b9ba3... HBASE-26029 It is not reliable to use nodeDeleted event to track region server's + * death (#3430) {@link ReplicationSourceInterface}. {@link #removePeer(String)} will terminate the * {@link ReplicationSourceInterface} firstly, then remove the wals from * {@link #walsByIdRecoveredQueues}. And * {@link ReplicationSourceManager#claimQueue(ServerName, String)} will add the wals to @@ -129,10 +127,9 @@ public class ReplicationSourceManager { private final List oldsources; /** - * Storage for queues that need persistance; e.g. Replication state so can be recovered - * after a crash. queueStorage upkeep is spread about this class and passed - * to ReplicationSource instances for these to do updates themselves. Not all ReplicationSource - * instances keep state. + * Storage for queues that need persistance; e.g. Replication state so can be recovered after a + * crash. queueStorage upkeep is spread about this class and passed to ReplicationSource instances + * for these to do updates themselves. Not all ReplicationSource instances keep state. */ private final ReplicationQueueStorage queueStorage; @@ -167,36 +164,34 @@ public class ReplicationSourceManager { private final boolean replicationForBulkLoadDataEnabled; - private AtomicLong totalBufferUsed = new AtomicLong(); // Total buffer size on this RegionServer for holding batched edits to be shipped. private final long totalBufferLimit; private final MetricsReplicationGlobalSourceSource globalMetrics; /** - * A special ReplicationSource for hbase:meta Region Read Replicas. - * Usually this reference remains empty. If an hbase:meta Region is opened on this server, we - * will create an instance of a hbase:meta CatalogReplicationSource and it will live the life of - * the Server thereafter; i.e. we will not shut it down even if the hbase:meta moves away from - * this server (in case it later gets moved back). We synchronize on this instance testing for - * presence and if absent, while creating so only created and started once. + * A special ReplicationSource for hbase:meta Region Read Replicas. Usually this reference remains + * empty. If an hbase:meta Region is opened on this server, we will create an instance of a + * hbase:meta CatalogReplicationSource and it will live the life of the Server thereafter; i.e. we + * will not shut it down even if the hbase:meta moves away from this server (in case it later gets + * moved back). We synchronize on this instance testing for presence and if absent, while creating + * so only created and started once. */ AtomicReference catalogReplicationSource = new AtomicReference<>(); /** * Creates a replication manager and sets the watch on all the other registered region servers * @param queueStorage the interface for manipulating replication queues - * @param conf the configuration to use - * @param server the server for this region server - * @param fs the file system to use - * @param logDir the directory that contains all wal directories of live RSs - * @param oldLogDir the directory where old logs are archived + * @param conf the configuration to use + * @param server the server for this region server + * @param fs the file system to use + * @param logDir the directory that contains all wal directories of live RSs + * @param oldLogDir the directory where old logs are archived */ public ReplicationSourceManager(ReplicationQueueStorage queueStorage, - ReplicationPeers replicationPeers, Configuration conf, - Server server, FileSystem fs, Path logDir, Path oldLogDir, UUID clusterId, - WALFactory walFactory, - MetricsReplicationGlobalSourceSource globalMetrics) throws IOException { + ReplicationPeers replicationPeers, Configuration conf, Server server, FileSystem fs, + Path logDir, Path oldLogDir, UUID clusterId, WALFactory walFactory, + MetricsReplicationGlobalSourceSource globalMetrics) throws IOException { // CopyOnWriteArrayList is thread-safe. // Generally, reading is more than modifying. this.sources = new ConcurrentHashMap<>(); @@ -219,8 +214,8 @@ public class ReplicationSourceManager { int nbWorkers = conf.getInt("replication.executor.workers", 1); // use a short 100ms sleep since this could be done inline with a RS startup // even if we fail, other region servers can take care of it - this.executor = new ThreadPoolExecutor(nbWorkers, nbWorkers, 100, - TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>()); + this.executor = new ThreadPoolExecutor(nbWorkers, nbWorkers, 100, TimeUnit.MILLISECONDS, + new LinkedBlockingQueue<>()); ThreadFactoryBuilder tfb = new ThreadFactoryBuilder(); tfb.setNameFormat("ReplicationExecutor-%d"); tfb.setDaemon(true); @@ -229,7 +224,7 @@ public class ReplicationSourceManager { replicationForBulkLoadDataEnabled = conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT); this.totalBufferLimit = conf.getLong(HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_KEY, - HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_DFAULT); + HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_DFAULT); this.globalMetrics = globalMetrics; } @@ -291,8 +286,8 @@ public class ReplicationSourceManager { removeRecoveredSource(src); } } - LOG.info( - "Number of deleted recovered sources for " + peerId + ": " + oldSourcesToDelete.size()); + LOG + .info("Number of deleted recovered sources for " + peerId + ": " + oldSourcesToDelete.size()); // Now close the normal source for this peer ReplicationSourceInterface srcToRemove = this.sources.get(peerId); if (srcToRemove != null) { @@ -316,14 +311,14 @@ public class ReplicationSourceManager { * @see #createCatalogReplicationSource(RegionInfo) for creating a ReplicationSource for meta. */ private ReplicationSourceInterface createSource(String queueId, ReplicationPeer replicationPeer) - throws IOException { + throws IOException { ReplicationSourceInterface src = ReplicationSourceFactory.create(conf, queueId); // Init the just created replication source. Pass the default walProvider's wal file length // provider. Presumption is we replicate user-space Tables only. For hbase:meta region replica // replication, see #createCatalogReplicationSource(). - WALFileLengthProvider walFileLengthProvider = - this.walFactory.getWALProvider() != null? - this.walFactory.getWALProvider().getWALFileLengthProvider() : p -> OptionalLong.empty(); + WALFileLengthProvider walFileLengthProvider = this.walFactory.getWALProvider() != null + ? this.walFactory.getWALProvider().getWALFileLengthProvider() + : p -> OptionalLong.empty(); src.init(conf, fs, this, queueStorage, replicationPeer, server, queueId, clusterId, walFileLengthProvider, new MetricsSource(queueId)); return src; @@ -367,12 +362,11 @@ public class ReplicationSourceManager { * Close the previous replication sources of this peer id and open new sources to trigger the new * replication state changes or new replication config changes. Here we don't need to change * replication queue storage and only to enqueue all logs to the new replication source - * @param peerId the id of the replication peer - * @throws IOException + * @param peerId the id of the replication peer n */ public void refreshSources(String peerId) throws IOException { - String terminateMessage = "Peer " + peerId + - " state or config changed. Will close the previous replication source and open a new one"; + String terminateMessage = "Peer " + peerId + + " state or config changed. Will close the previous replication source and open a new one"; ReplicationPeer peer = replicationPeers.getPeer(peerId); ReplicationSourceInterface src = createSource(peerId, peer); // synchronized on latestPaths to avoid missing the new log @@ -387,8 +381,7 @@ public class ReplicationSourceManager { walsByGroup.forEach(wal -> { Path walPath = new Path(this.logDir, wal); src.enqueueLog(walPath); - LOG.trace("Enqueued {} to source {} during source creation.", - walPath, src.getQueueId()); + LOG.trace("Enqueued {} to source {} during source creation.", walPath, src.getQueueId()); }); } @@ -401,7 +394,7 @@ public class ReplicationSourceManager { synchronized (this.oldsources) { List previousQueueIds = new ArrayList<>(); for (Iterator iter = this.oldsources.iterator(); iter - .hasNext();) { + .hasNext();) { ReplicationSourceInterface oldSource = iter.next(); if (oldSource.getPeerId().equals(peerId)) { previousQueueIds.add(oldSource.getQueueId()); @@ -468,9 +461,11 @@ public class ReplicationSourceManager { try { op.exec(); } catch (ReplicationException e) { - if (e.getCause() != null && e.getCause() instanceof KeeperException.SystemErrorException - && e.getCause().getCause() != null && e.getCause() - .getCause() instanceof InterruptedException) { + if ( + e.getCause() != null && e.getCause() instanceof KeeperException.SystemErrorException + && e.getCause().getCause() != null + && e.getCause().getCause() instanceof InterruptedException + ) { // ReplicationRuntimeException(a RuntimeException) is thrown out here. The reason is // that thread is interrupted deep down in the stack, it should pass the following // processing logic and propagate to the most top layer which can handle this exception @@ -514,21 +509,20 @@ public class ReplicationSourceManager { * @param entryBatch the wal entry batch we just shipped */ public void logPositionAndCleanOldLogs(ReplicationSourceInterface source, - WALEntryBatch entryBatch) { + WALEntryBatch entryBatch) { String fileName = entryBatch.getLastWalPath().getName(); String queueId = source.getQueueId(); - interruptOrAbortWhenFail(() -> this.queueStorage - .setWALPosition(server.getServerName(), queueId, fileName, entryBatch.getLastWalPosition(), - entryBatch.getLastSeqIds())); + interruptOrAbortWhenFail(() -> this.queueStorage.setWALPosition(server.getServerName(), queueId, + fileName, entryBatch.getLastWalPosition(), entryBatch.getLastSeqIds())); cleanOldLogs(fileName, entryBatch.isEndOfFile(), queueId, source.isRecovered()); } /** * Cleans a log file and all older logs from replication queue. Called when we are sure that a log * file is closed and has no more entries. - * @param log Path to the log - * @param inclusive whether we should also remove the given log file - * @param queueId id of the replication queue + * @param log Path to the log + * @param inclusive whether we should also remove the given log file + * @param queueId id of the replication queue * @param queueRecovered Whether this is a recovered queue */ void cleanOldLogs(String log, boolean inclusive, String queueId, boolean queueRecovered) { @@ -615,8 +609,8 @@ public class ReplicationSourceManager { // This only updates the sources we own, not the recovered ones for (ReplicationSourceInterface source : this.sources.values()) { source.enqueueLog(newLog); - LOG.trace("Enqueued {} to source {} while performing postLogRoll operation.", - newLog, source.getQueueId()); + LOG.trace("Enqueued {} to source {} while performing postLogRoll operation.", newLog, + source.getQueueId()); } } @@ -624,8 +618,8 @@ public class ReplicationSourceManager { // Wait a bit before transferring the queues, we may be shutting down. // This sleep may not be enough in some cases. try { - Thread.sleep(sleepBeforeFailover + - (long) (ThreadLocalRandom.current().nextFloat() * sleepBeforeFailover)); + Thread.sleep(sleepBeforeFailover + + (long) (ThreadLocalRandom.current().nextFloat() * sleepBeforeFailover)); } catch (InterruptedException e) { LOG.warn("Interrupted while waiting before transferring a queue."); Thread.currentThread().interrupt(); @@ -653,10 +647,9 @@ public class ReplicationSourceManager { claimedQueue = queueStorage.claimQueue(deadRS, queue, server.getServerName()); } catch (ReplicationException e) { LOG.error( - "ReplicationException: cannot claim dead region ({})'s " + - "replication queue. Znode : ({})" + - " Possible solution: check if znode size exceeds jute.maxBuffer value. " + - " If so, increase it for both client and server side.", + "ReplicationException: cannot claim dead region ({})'s " + "replication queue. Znode : ({})" + + " Possible solution: check if znode size exceeds jute.maxBuffer value. " + + " If so, increase it for both client and server side.", deadRS, queueStorage.getRsNode(deadRS), e); server.abort("Failed to claim queue from dead regionserver.", e); return; @@ -672,8 +665,10 @@ public class ReplicationSourceManager { abortWhenFail(() -> queueStorage.removeQueue(server.getServerName(), queueId)); return; } - if (server instanceof ReplicationSyncUp.DummyServer && - peer.getPeerState().equals(PeerState.DISABLED)) { + if ( + server instanceof ReplicationSyncUp.DummyServer + && peer.getPeerState().equals(PeerState.DISABLED) + ) { LOG.warn( "Peer {} is disabled. ReplicationSyncUp tool will skip " + "replicating data to this peer.", peerId); @@ -798,8 +793,8 @@ public class ReplicationSourceManager { } /** - * Returns the maximum size in bytes of edits held in memory which are pending replication - * across all sources inside this RegionServer. + * Returns the maximum size in bytes of edits held in memory which are pending replication across + * all sources inside this RegionServer. */ public long getTotalBufferLimit() { return totalBufferLimit; @@ -845,7 +840,7 @@ public class ReplicationSourceManager { // Print stats that apply across all Replication Sources stats.append("Global stats: "); stats.append("WAL Edits Buffer Used=").append(getTotalBufferUsed().get()).append("B, Limit=") - .append(getTotalBufferLimit()).append("B\n"); + .append(getTotalBufferLimit()).append("B\n"); for (ReplicationSourceInterface source : this.sources.values()) { stats.append("Normal source for cluster " + source.getPeerId() + ": "); stats.append(source.getStats() + "\n"); @@ -858,7 +853,7 @@ public class ReplicationSourceManager { } public void addHFileRefs(TableName tableName, byte[] family, List> pairs) - throws IOException { + throws IOException { for (ReplicationSourceInterface source : this.sources.values()) { throwIOExceptionWhenFail(() -> source.addHFileRefs(tableName, family, pairs)); } @@ -877,24 +872,24 @@ public class ReplicationSourceManager { } /** - * Add an hbase:meta Catalog replication source. Called on open of an hbase:meta Region. - * Create it once only. If exists already, use the existing one. + * Add an hbase:meta Catalog replication source. Called on open of an hbase:meta Region. Create it + * once only. If exists already, use the existing one. * @see #removeCatalogReplicationSource(RegionInfo) * @see #addSource(String) This is specialization on the addSource method. */ public ReplicationSourceInterface addCatalogReplicationSource(RegionInfo regionInfo) - throws IOException { + throws IOException { // Poor-man's putIfAbsent synchronized (this.catalogReplicationSource) { ReplicationSourceInterface rs = this.catalogReplicationSource.get(); - return rs != null ? rs : - this.catalogReplicationSource.getAndSet(createCatalogReplicationSource(regionInfo)); + return rs != null + ? rs + : this.catalogReplicationSource.getAndSet(createCatalogReplicationSource(regionInfo)); } } /** - * Remove the hbase:meta Catalog replication source. - * Called when we close hbase:meta. + * Remove the hbase:meta Catalog replication source. Called when we close hbase:meta. * @see #addCatalogReplicationSource(RegionInfo regionInfo) */ public void removeCatalogReplicationSource(RegionInfo regionInfo) { @@ -903,15 +898,15 @@ public class ReplicationSourceManager { } /** - * Create, initialize, and start the Catalog ReplicationSource. - * Presumes called one-time only (caller must ensure one-time only call). - * This ReplicationSource is NOT created via {@link ReplicationSourceFactory}. + * Create, initialize, and start the Catalog ReplicationSource. Presumes called one-time only + * (caller must ensure one-time only call). This ReplicationSource is NOT created via + * {@link ReplicationSourceFactory}. * @see #addSource(String) This is a specialization of the addSource call. * @see #catalogReplicationSource for a note on this ReplicationSource's lifecycle (and more on - * why the special handling). + * why the special handling). */ private ReplicationSourceInterface createCatalogReplicationSource(RegionInfo regionInfo) - throws IOException { + throws IOException { // Instantiate meta walProvider. Instantiated here or over in the #warmupRegion call made by the // Master on a 'move' operation. Need to do extra work if we did NOT instantiate the provider. WALProvider walProvider = this.walFactory.getMetaWALProvider(); @@ -924,14 +919,15 @@ public class ReplicationSourceManager { // read replicas feature that makes use of the source does a reset on a crash of the WAL // source process. See "4.1 Skip maintaining zookeeper replication queue (offsets/WALs)" in the // design doc attached to HBASE-18070 'Enable memstore replication for meta replica' for detail. - CatalogReplicationSourcePeer peer = new CatalogReplicationSourcePeer(this.conf, - this.clusterId.toString()); + CatalogReplicationSourcePeer peer = + new CatalogReplicationSourcePeer(this.conf, this.clusterId.toString()); final ReplicationSourceInterface crs = new CatalogReplicationSource(); crs.init(conf, fs, this, new NoopReplicationQueueStorage(), peer, server, peer.getId(), clusterId, walProvider.getWALFileLengthProvider(), new MetricsSource(peer.getId())); // Add listener on the provider so we can pick up the WAL to replicate on roll. WALActionsListener listener = new WALActionsListener() { - @Override public void postLogRoll(Path oldPath, Path newPath) throws IOException { + @Override + public void postLogRoll(Path oldPath, Path newPath) throws IOException { crs.enqueueLog(newPath); } }; @@ -943,7 +939,7 @@ public class ReplicationSourceManager { // Replication Source so it can start replicating it. WAL wal = walProvider.getWAL(regionInfo); wal.registerWALActionsListener(listener); - crs.enqueueLog(((AbstractFSWAL)wal).getCurrentFileName()); + crs.enqueueLog(((AbstractFSWAL) wal).getCurrentFileName()); } return crs.startup(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java index 95719273828..8e5b2e5a1d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,10 +18,10 @@ package org.apache.hadoop.hbase.replication.regionserver; import static org.apache.hadoop.hbase.replication.ReplicationUtils.getAdaptiveTimeout; + import java.io.IOException; import java.util.List; import java.util.concurrent.atomic.LongAccumulator; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor; @@ -50,7 +51,7 @@ public class ReplicationSourceShipper extends Thread { public enum WorkerState { RUNNING, STOPPED, - FINISHED, // The worker is done processing a recovered queue + FINISHED, // The worker is done processing a recovered queue } private final Configuration conf; @@ -76,7 +77,7 @@ public class ReplicationSourceShipper extends Thread { private final int shipEditsTimeout; public ReplicationSourceShipper(Configuration conf, String walGroupId, - ReplicationSourceLogQueue logQueue, ReplicationSource source) { + ReplicationSourceLogQueue logQueue, ReplicationSource source) { this.conf = conf; this.walGroupId = walGroupId; this.logQueue = logQueue; @@ -107,8 +108,8 @@ public class ReplicationSourceShipper extends Thread { } try { WALEntryBatch entryBatch = entryReader.poll(getEntriesTimeout); - LOG.debug("Shipper from source {} got entry batch from reader: {}", - source.getQueueId(), entryBatch); + LOG.debug("Shipper from source {} got entry batch from reader: {}", source.getQueueId(), + entryBatch); if (entryBatch == null) { continue; } @@ -142,15 +143,15 @@ public class ReplicationSourceShipper extends Thread { } /** - * get batchEntry size excludes bulk load file sizes. - * Uses ReplicationSourceWALReader's static method. + * get batchEntry size excludes bulk load file sizes. Uses ReplicationSourceWALReader's static + * method. */ private int getBatchEntrySizeExcludeBulkLoad(WALEntryBatch entryBatch) { int totalSize = 0; - for(Entry entry : entryBatch.getWalEntries()) { + for (Entry entry : entryBatch.getWalEntries()) { totalSize += ReplicationSourceWALReader.getEntrySizeExcludeBulkLoad(entry); } - return totalSize; + return totalSize; } /** @@ -165,8 +166,8 @@ public class ReplicationSourceShipper extends Thread { } int currentSize = (int) entryBatch.getHeapSize(); int sizeExcludeBulkLoad = getBatchEntrySizeExcludeBulkLoad(entryBatch); - source.getSourceMetrics().setTimeStampNextToReplicate(entries.get(entries.size() - 1) - .getKey().getWriteTime()); + source.getSourceMetrics() + .setTimeStampNextToReplicate(entries.get(entries.size() - 1).getKey().getWriteTime()); while (isActive()) { try { try { @@ -181,7 +182,7 @@ public class ReplicationSourceShipper extends Thread { // create replicateContext here, so the entries can be GC'd upon return from this call // stack ReplicationEndpoint.ReplicateContext replicateContext = - new ReplicationEndpoint.ReplicateContext(); + new ReplicationEndpoint.ReplicateContext(); replicateContext.setEntries(entries).setSize(currentSize); replicateContext.setWalGroupId(walGroupId); replicateContext.setTimeout(getAdaptiveTimeout(this.shipEditsTimeout, sleepMultiplier)); @@ -204,10 +205,10 @@ public class ReplicationSourceShipper extends Thread { // Log and clean up WAL logs updateLogPosition(entryBatch); - //offsets totalBufferUsed by deducting shipped batchSize (excludes bulk load size) - //this sizeExcludeBulkLoad has to use same calculation that when calling - //acquireBufferQuota() in ReplicationSourceWALReader because they maintain - //same variable: totalBufferUsed + // offsets totalBufferUsed by deducting shipped batchSize (excludes bulk load size) + // this sizeExcludeBulkLoad has to use same calculation that when calling + // acquireBufferQuota() in ReplicationSourceWALReader because they maintain + // same variable: totalBufferUsed source.postShipEdits(entries, sizeExcludeBulkLoad); // FIXME check relationship between wal group and overall source.getSourceMetrics().shipBatch(entryBatch.getNbOperations(), currentSize, @@ -217,8 +218,8 @@ public class ReplicationSourceShipper extends Thread { source.getSourceMetrics().updateTableLevelMetrics(entryBatch.getWalEntriesWithSize()); if (LOG.isTraceEnabled()) { - LOG.debug("Replicated {} entries or {} operations in {} ms", - entries.size(), entryBatch.getNbOperations(), (endTimeNs - startTimeNs) / 1000000); + LOG.debug("Replicated {} entries or {} operations in {} ms", entries.size(), + entryBatch.getNbOperations(), (endTimeNs - startTimeNs) / 1000000); } break; } catch (Exception ex) { @@ -262,8 +263,10 @@ public class ReplicationSourceShipper extends Thread { // record on zk, so let's call it. The last wal position maybe zero if end of file is true and // there is no entry in the batch. It is OK because that the queue storage will ignore the zero // position and the file will be removed soon in cleanOldLogs. - if (batch.isEndOfFile() || !batch.getLastWalPath().equals(currentPath) || - batch.getLastWalPosition() != currentPosition) { + if ( + batch.isEndOfFile() || !batch.getLastWalPath().equals(currentPath) + || batch.getLastWalPosition() != currentPosition + ) { source.logPositionAndCleanOldLogs(batch); updated = true; } @@ -321,7 +324,7 @@ public class ReplicationSourceShipper extends Thread { /** * Do the sleeping logic - * @param msg Why we sleep + * @param msg Why we sleep * @param sleepMultiplier by how many times the default sleeping time is augmented * @return True if sleepMultiplier is < maxRetriesMultiplier */ @@ -337,28 +340,26 @@ public class ReplicationSourceShipper extends Thread { } /** - * Attempts to properly update ReplicationSourceManager.totalBufferUser, - * in case there were unprocessed entries batched by the reader to the shipper, - * but the shipper didn't manage to ship those because the replication source is being terminated. - * In that case, it iterates through the batched entries and decrease the pending - * entries size from ReplicationSourceManager.totalBufferUser + * Attempts to properly update ReplicationSourceManager.totalBufferUser, in case + * there were unprocessed entries batched by the reader to the shipper, but the shipper didn't + * manage to ship those because the replication source is being terminated. In that case, it + * iterates through the batched entries and decrease the pending entries size from + * ReplicationSourceManager.totalBufferUser *

      - * NOTES - * 1) This method should only be called upon replication source termination. - * It blocks waiting for both shipper and reader threads termination, - * to make sure no race conditions - * when updating ReplicationSourceManager.totalBufferUser. - * - * 2) It does not attempt to terminate reader and shipper threads. Those must - * have been triggered interruption/termination prior to calling this method. + * NOTES 1) This method should only be called upon replication source termination. It + * blocks waiting for both shipper and reader threads termination, to make sure no race conditions + * when updating ReplicationSourceManager.totalBufferUser. 2) It does not + * attempt to terminate reader and shipper threads. Those must have been triggered + * interruption/termination prior to calling this method. */ void clearWALEntryBatch() { long timeout = EnvironmentEdgeManager.currentTime() + this.shipEditsTimeout; - while(this.isAlive() || this.entryReader.isAlive()){ + while (this.isAlive() || this.entryReader.isAlive()) { try { if (EnvironmentEdgeManager.currentTime() >= timeout) { - LOG.warn("Shipper clearWALEntryBatch method timed out whilst waiting reader/shipper " - + "thread to stop. Not cleaning buffer usage. Shipper alive: {}; Reader alive: {}", + LOG.warn( + "Shipper clearWALEntryBatch method timed out whilst waiting reader/shipper " + + "thread to stop. Not cleaning buffer usage. Shipper alive: {}; Reader alive: {}", this.source.getPeerId(), this.isAlive(), this.entryReader.isAlive()); return; } else { @@ -367,11 +368,11 @@ public class ReplicationSourceShipper extends Thread { } } catch (InterruptedException e) { LOG.warn("{} Interrupted while waiting {} to stop on clearWALEntryBatch. " - + "Not cleaning buffer usage: {}", this.source.getPeerId(), this.getName(), e); + + "Not cleaning buffer usage: {}", this.source.getPeerId(), this.getName(), e); return; } } - LongAccumulator totalToDecrement = new LongAccumulator((a,b) -> a + b, 0); + LongAccumulator totalToDecrement = new LongAccumulator((a, b) -> a + b, 0); entryReader.entryBatchQueue.forEach(w -> { entryReader.entryBatchQueue.remove(w); w.getWalEntries().forEach(e -> { @@ -379,12 +380,12 @@ public class ReplicationSourceShipper extends Thread { totalToDecrement.accumulate(entrySizeExcludeBulkLoad); }); }); - if( LOG.isTraceEnabled()) { + if (LOG.isTraceEnabled()) { LOG.trace("Decrementing totalBufferUsed by {}B while stopping Replication WAL Readers.", totalToDecrement.longValue()); } - long newBufferUsed = source.getSourceManager().getTotalBufferUsed() - .addAndGet(-totalToDecrement.longValue()); + long newBufferUsed = + source.getSourceManager().getTotalBufferUsed().addAndGet(-totalToDecrement.longValue()); source.getSourceManager().getGlobalMetrics().setWALReaderEditsBufferBytes(newBufferUsed); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.java index f3311eedbe0..6e5da0feffb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ class ReplicationSourceWALActionListener implements WALActionsListener { /** * Utility method used to set the correct scopes on each log key. Doesn't set a scope on keys from * compaction WAL edits and if the scope is local. - * @param logKey Key that may get scoped according to its edits + * @param logKey Key that may get scoped according to its edits * @param logEdit Edits used to lookup the scopes */ static void scopeWALEdits(WALKey logKey, WALEdit logEdit, Configuration conf) { @@ -70,8 +70,9 @@ class ReplicationSourceWALActionListener implements WALActionsListener { return; } // For replay, or if all the cells are markers, do not need to store replication scope. - if (logEdit.isReplay() || - logEdit.getCells().stream().allMatch(c -> WALEdit.isMetaEditFamily(c))) { + if ( + logEdit.isReplay() || logEdit.getCells().stream().allMatch(c -> WALEdit.isMetaEditFamily(c)) + ) { ((WALKeyImpl) logKey).clearReplicationScope(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java index c61494e12c6..d95e21241e6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,6 +41,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor; @@ -72,7 +72,7 @@ class ReplicationSourceWALReader extends Thread { private final int maxRetriesMultiplier; private final boolean eofAutoRecovery; - //Indicates whether this particular worker is running + // Indicates whether this particular worker is running private boolean isReaderRunning = true; private AtomicLong totalBufferUsed; @@ -82,16 +82,16 @@ class ReplicationSourceWALReader extends Thread { /** * Creates a reader worker for a given WAL queue. Reads WAL entries off a given queue, batches the * entries, and puts them on a batch queue. - * @param fs the files system to use - * @param conf configuration to use - * @param logQueue The WAL queue to read off of + * @param fs the files system to use + * @param conf configuration to use + * @param logQueue The WAL queue to read off of * @param startPosition position in the first WAL to start reading from - * @param filter The filter to use while reading - * @param source replication source + * @param filter The filter to use while reading + * @param source replication source */ public ReplicationSourceWALReader(FileSystem fs, Configuration conf, - ReplicationSourceLogQueue logQueue, long startPosition, WALEntryFilter filter, - ReplicationSource source, String walGroupId) { + ReplicationSourceLogQueue logQueue, long startPosition, WALEntryFilter filter, + ReplicationSource source, String walGroupId) { this.logQueue = logQueue; this.currentPosition = startPosition; this.fs = fs; @@ -99,7 +99,7 @@ class ReplicationSourceWALReader extends Thread { this.filter = filter; this.source = source; this.replicationBatchSizeCapacity = - this.conf.getLong("replication.source.size.capacity", 1024 * 1024 * 64); + this.conf.getLong("replication.source.size.capacity", 1024 * 1024 * 64); this.replicationBatchCountCapacity = this.conf.getInt("replication.source.nb.capacity", 25000); // memory used will be batchSizeCapacity * (nb.batches + 1) // the +1 is for the current thread reading before placing onto the queue @@ -113,11 +113,10 @@ class ReplicationSourceWALReader extends Thread { this.eofAutoRecovery = conf.getBoolean("replication.source.eof.autorecovery", false); this.entryBatchQueue = new LinkedBlockingQueue<>(batchCount); this.walGroupId = walGroupId; - LOG.info("peerClusterZnode=" + source.getQueueId() - + ", ReplicationSourceWALReaderThread : " + source.getPeerId() - + " inited, replicationBatchSizeCapacity=" + replicationBatchSizeCapacity - + ", replicationBatchCountCapacity=" + replicationBatchCountCapacity - + ", replicationBatchQueueCapacity=" + batchCount); + LOG.info("peerClusterZnode=" + source.getQueueId() + ", ReplicationSourceWALReaderThread : " + + source.getPeerId() + " inited, replicationBatchSizeCapacity=" + replicationBatchSizeCapacity + + ", replicationBatchCountCapacity=" + replicationBatchCountCapacity + + ", replicationBatchQueueCapacity=" + batchCount); } @Override @@ -126,9 +125,8 @@ class ReplicationSourceWALReader extends Thread { while (isReaderRunning()) { // we only loop back here if something fatal happened to our stream WALEntryBatch batch = null; try (WALEntryStream entryStream = - new WALEntryStream(logQueue, conf, currentPosition, - source.getWALFileLengthProvider(), source.getServerWALsBelongTo(), - source.getSourceMetrics(), walGroupId)) { + new WALEntryStream(logQueue, conf, currentPosition, source.getWALFileLengthProvider(), + source.getServerWALsBelongTo(), source.getSourceMetrics(), walGroupId)) { while (isReaderRunning()) { // loop here to keep reusing stream while we can batch = null; if (!source.isPeerEnabled()) { @@ -179,7 +177,7 @@ class ReplicationSourceWALReader extends Thread { return false; } LOG.debug("updating TimeStampOfLastAttempted to {}, from entry {}, for source queue: {}", - entry.getKey().getWriteTime(), entry.getKey(), this.source.getQueueId()); + entry.getKey().getWriteTime(), entry.getKey(), this.source.getQueueId()); long entrySize = getEntrySizeIncludeBulkLoad(entry); long entrySizeExcludeBulkLoad = getEntrySizeExcludeBulkLoad(entry); batch.addEntry(entry, entrySize); @@ -187,8 +185,8 @@ class ReplicationSourceWALReader extends Thread { boolean totalBufferTooLarge = acquireBufferQuota(entrySizeExcludeBulkLoad); // Stop if too many entries or too big - return totalBufferTooLarge || batch.getHeapSize() >= replicationBatchSizeCapacity || - batch.getNbEntries() >= replicationBatchCountCapacity; + return totalBufferTooLarge || batch.getHeapSize() >= replicationBatchSizeCapacity + || batch.getNbEntries() >= replicationBatchCountCapacity; } protected static final boolean switched(WALEntryStream entryStream, Path path) { @@ -257,24 +255,25 @@ class ReplicationSourceWALReader extends Thread { } /** - * This is to handle the EOFException from the WAL entry stream. EOFException should - * be handled carefully because there are chances of data loss because of never replicating - * the data. Thus we should always try to ship existing batch of entries here. - * If there was only one log in the queue before EOF, we ship the empty batch here - * and since reader is still active, in the next iteration of reader we will - * stop the reader. + * This is to handle the EOFException from the WAL entry stream. EOFException should be handled + * carefully because there are chances of data loss because of never replicating the data. Thus we + * should always try to ship existing batch of entries here. If there was only one log in the + * queue before EOF, we ship the empty batch here and since reader is still active, in the next + * iteration of reader we will stop the reader. *

      - * If there was more than one log in the queue before EOF, we ship the existing batch - * and reset the wal patch and position to the log with EOF, so shipper can remove - * logs from replication queue + * If there was more than one log in the queue before EOF, we ship the existing batch and reset + * the wal patch and position to the log with EOF, so shipper can remove logs from replication + * queue * @return true only the IOE can be handled */ private boolean handleEofException(Exception e, WALEntryBatch batch) { PriorityBlockingQueue queue = logQueue.getQueue(walGroupId); // Dump the log even if logQueue size is 1 if the source is from recovered Source // since we don't add current log to recovered source queue so it is safe to remove. - if ((e instanceof EOFException || e.getCause() instanceof EOFException) && - (source.isRecovered() || queue.size() > 1) && this.eofAutoRecovery) { + if ( + (e instanceof EOFException || e.getCause() instanceof EOFException) + && (source.isRecovered() || queue.size() > 1) && this.eofAutoRecovery + ) { Path path = queue.peek(); try { if (!fs.exists(path)) { @@ -325,12 +324,12 @@ class ReplicationSourceWALReader extends Thread { return logQueue.getQueue(walGroupId).peek(); } - //returns false if we've already exceeded the global quota + // returns false if we've already exceeded the global quota private boolean checkQuota() { // try not to go over total quota if (totalBufferUsed.get() > totalBufferQuota) { LOG.warn("peer={}, can't read more edits from WAL as buffer usage {}B exceeds limit {}B", - this.source.getPeerId(), totalBufferUsed.get(), totalBufferQuota); + this.source.getPeerId(), totalBufferUsed.get(), totalBufferQuota); Threads.sleep(sleepForRetries); return false; } @@ -366,7 +365,7 @@ class ReplicationSourceWALReader extends Thread { private long getEntrySizeIncludeBulkLoad(Entry entry) { WALEdit edit = entry.getEdit(); - return getEntrySizeExcludeBulkLoad(entry) + sizeOfStoreFilesIncludeBulkLoad(edit); + return getEntrySizeExcludeBulkLoad(entry) + sizeOfStoreFilesIncludeBulkLoad(edit); } public static long getEntrySizeExcludeBulkLoad(Entry entry) { @@ -375,7 +374,6 @@ class ReplicationSourceWALReader extends Thread { return edit.heapSize() + key.estimatedSerializedSizeOf(); } - private void updateBatchStats(WALEntryBatch batch, Entry entry, long entrySize) { WALEdit edit = entry.getEdit(); batch.incrementHeapSize(entrySize); @@ -409,7 +407,7 @@ class ReplicationSourceWALReader extends Thread { } } catch (IOException e) { LOG.error("Failed to deserialize bulk load entry from wal edit. " - + "Then its hfiles count will not be added into metric.", e); + + "Then its hfiles count will not be added into metric.", e); } } @@ -441,12 +439,12 @@ class ReplicationSourceWALReader extends Thread { int totalStores = stores.size(); for (int j = 0; j < totalStores; j++) { totalStoreFilesSize = - (int) (totalStoreFilesSize + stores.get(j).getStoreFileSizeBytes()); + (int) (totalStoreFilesSize + stores.get(j).getStoreFileSizeBytes()); } } catch (IOException e) { LOG.error("Failed to deserialize bulk load entry from wal edit. " - + "Size of HFiles part of cell will not be considered in replication " - + "request size calculation.", e); + + "Size of HFiles part of cell will not be considered in replication " + + "request size calculation.", e); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatus.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatus.java index 10d6cd59d4a..2161cc35ed9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatus.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatus.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index a2ce6e69d50..dbdcc140a45 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -71,8 +71,9 @@ public class ReplicationSyncUp extends Configured implements Tool { private Set getLiveRegionServers(ZKWatcher zkw) throws KeeperException { List rsZNodes = ZKUtil.listChildrenNoWatch(zkw, zkw.getZNodePaths().rsZNode); - return rsZNodes == null ? Collections.emptySet() : - rsZNodes.stream().map(ServerName::parseServerName).collect(Collectors.toSet()); + return rsZNodes == null + ? Collections.emptySet() + : rsZNodes.stream().map(ServerName::parseServerName).collect(Collectors.toSet()); } // When using this tool, usually the source cluster is unhealthy, so we should try to claim the @@ -106,8 +107,7 @@ public class ReplicationSyncUp extends Configured implements Tool { }; Configuration conf = getConf(); try (ZKWatcher zkw = new ZKWatcher(conf, - "syncupReplication" + EnvironmentEdgeManager.currentTime(), - abortable, true)) { + "syncupReplication" + EnvironmentEdgeManager.currentTime(), abortable, true)) { Path walRootDir = CommonFSUtils.getWALRootDir(conf); FileSystem fs = CommonFSUtils.getWALFileSystem(conf); Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java index 7f73030699e..3e4bb77b23f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hbase.replication.regionserver; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; /** - * Per-peer per-node throttling controller for replication: enabled if - * bandwidth > 0, a cycle = 100ms, by throttling we guarantee data pushed - * to peer within each cycle won't exceed 'bandwidth' bytes + * Per-peer per-node throttling controller for replication: enabled if bandwidth > 0, a cycle = + * 100ms, by throttling we guarantee data pushed to peer within each cycle won't exceed 'bandwidth' + * bytes */ @InterfaceAudience.Private public class ReplicationThrottler { @@ -33,8 +33,7 @@ public class ReplicationThrottler { private long cycleStartTick; /** - * ReplicationThrottler constructor - * If bandwidth less than 1, throttling is disabled + * ReplicationThrottler constructor If bandwidth less than 1, throttling is disabled * @param bandwidth per cycle(100ms) */ public ReplicationThrottler(final double bandwidth) { @@ -55,9 +54,8 @@ public class ReplicationThrottler { } /** - * Get how long the caller should sleep according to the current size and - * current cycle's total push size and start tick, return the sleep interval - * for throttling control. + * Get how long the caller should sleep according to the current size and current cycle's total + * push size and start tick, return the sleep interval for throttling control. * @param size is the size of edits to be pushed * @return sleep interval for throttling control */ @@ -69,11 +67,11 @@ public class ReplicationThrottler { long sleepTicks = 0; long now = EnvironmentEdgeManager.currentTime(); // 1. if cyclePushSize exceeds bandwidth, we need to sleep some - // following cycles to amortize, this case can occur when a single push - // exceeds the bandwidth - if ((double)this.cyclePushSize > bandwidth) { - double cycles = Math.ceil((double)this.cyclePushSize / bandwidth); - long shouldTillTo = this.cycleStartTick + (long)(cycles * 100); + // following cycles to amortize, this case can occur when a single push + // exceeds the bandwidth + if ((double) this.cyclePushSize > bandwidth) { + double cycles = Math.ceil((double) this.cyclePushSize / bandwidth); + long shouldTillTo = this.cycleStartTick + (long) (cycles * 100); if (shouldTillTo > now) { sleepTicks = shouldTillTo - now; } else { @@ -82,16 +80,15 @@ public class ReplicationThrottler { } this.cyclePushSize = 0; } else { - long nextCycleTick = this.cycleStartTick + 100; //a cycle is 100ms + long nextCycleTick = this.cycleStartTick + 100; // a cycle is 100ms if (now >= nextCycleTick) { // 2. switch to next cycle if the current cycle has passed this.cycleStartTick = now; this.cyclePushSize = 0; - } else if (this.cyclePushSize > 0 && - (double)(this.cyclePushSize + size) >= bandwidth) { + } else if (this.cyclePushSize > 0 && (double) (this.cyclePushSize + size) >= bandwidth) { // 3. delay the push to next cycle if exceeds throttling bandwidth. - // enforcing cyclePushSize > 0 to avoid the unnecessary sleep for case - // where a cycle's first push size(currentSize) > bandwidth + // enforcing cyclePushSize > 0 to avoid the unnecessary sleep for case + // where a cycle's first push size(currentSize) > bandwidth sleepTicks = nextCycleTick - now; this.cyclePushSize = 0; } @@ -101,8 +98,7 @@ public class ReplicationThrottler { /** * Add current size to the current cycle's total push size - * @param size is the current size added to the current cycle's - * total push size + * @param size is the current size added to the current cycle's total push size */ public void addPushSize(final int size) { if (this.enabled) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationChecker.java index 4dd6611763e..40ecf2f2f01 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.cache.Cache; import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader; @@ -50,12 +51,11 @@ import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache; *

      * We record all the open sequence number for a region in a special family in meta, which is called * 'rep_barrier', so there will be a sequence of open sequence number (b1, b2, b3, ...). We call - * [bn, bn+1) a range, and it is obvious that a region will always be on the same RS within a - * range. + * [bn, bn+1) a range, and it is obvious that a region will always be on the same RS within a range. *

      * When split and merge, we will also record the parent for the generated region(s) in the special - * family in meta. And also, we will write an extra 'open sequence number' for the parent - * region(s), which is the max sequence id of the region plus one. + * family in meta. And also, we will write an extra 'open sequence number' for the parent region(s), + * which is the max sequence id of the region plus one. *

      *

      *

      @@ -170,8 +170,8 @@ class SerialReplicationChecker { // if a region is in OPENING state and we are in the last range, it is not safe to say we can push // even if the previous range is finished. private boolean isLastRangeAndOpening(ReplicationBarrierResult barrierResult, int index) { - return index == barrierResult.getBarriers().length && - barrierResult.getState() == RegionState.State.OPENING; + return index == barrierResult.getBarriers().length + && barrierResult.getState() == RegionState.State.OPENING; } private void recordCanPush(String encodedNameAsString, long seqId, long[] barriers, int index) { @@ -263,7 +263,7 @@ class SerialReplicationChecker { } public void waitUntilCanPush(Entry entry, Cell firstCellInEdit) - throws IOException, InterruptedException { + throws IOException, InterruptedException { byte[] row = CellUtil.cloneRow(firstCellInEdit); while (!canPush(entry, row)) { LOG.debug("Can not push {}, wait", entry); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationSourceWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationSourceWALReader.java index 1de4c998546..1a8bbf74a2c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationSourceWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationSourceWALReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,8 +43,8 @@ public class SerialReplicationSourceWALReader extends ReplicationSourceWALReader private final SerialReplicationChecker checker; public SerialReplicationSourceWALReader(FileSystem fs, Configuration conf, - ReplicationSourceLogQueue logQueue, long startPosition, WALEntryFilter filter, - ReplicationSource source, String walGroupId) { + ReplicationSourceLogQueue logQueue, long startPosition, WALEntryFilter filter, + ReplicationSource source, String walGroupId) { super(fs, conf, logQueue, startPosition, filter, source, walGroupId); checker = new SerialReplicationChecker(conf, source); } @@ -108,7 +108,7 @@ public class SerialReplicationSourceWALReader extends ReplicationSourceWALReader } private void removeEntryFromStream(WALEntryStream entryStream, WALEntryBatch batch) - throws IOException { + throws IOException { entryStream.next(); firstCellInEntryBeforeFiltering = null; batch.setLastWalPosition(entryStream.getPosition()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.java index b578587193d..88d86c3217d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.java @@ -1,17 +1,23 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.replication.regionserver; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; @@ -29,12 +35,12 @@ public interface SourceFSConfigurationProvider { /** * Returns the source cluster file system configuration for the given source cluster replication * ID. - * @param sinkConf sink cluster configuration + * @param sinkConf sink cluster configuration * @param replicationClusterId unique ID which identifies the source cluster * @return source cluster file system configuration * @throws IOException for invalid directory or for a bad disk. */ public Configuration getConf(Configuration sinkConf, String replicationClusterId) - throws IOException; + throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java index c78fe40b028..d09c821b9ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java index 8301dff26d6..65575aba5a6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,6 @@ class WALEntryBatch { this.lastWalPath = lastWalPath; } - static WALEntryBatch endOfFile(Path lastWalPath) { WALEntryBatch batch = new WALEntryBatch(0, lastWalPath); batch.setLastWalPosition(-1L); @@ -174,9 +173,9 @@ class WALEntryBatch { @Override public String toString() { - return "WALEntryBatch [walEntries=" + walEntriesWithSize + ", lastWalPath=" + lastWalPath + - ", lastWalPosition=" + lastWalPosition + ", nbRowKeys=" + nbRowKeys + ", nbHFiles=" + - nbHFiles + ", heapSize=" + heapSize + ", lastSeqIds=" + lastSeqIds + ", endOfFile=" + - endOfFile + "]"; + return "WALEntryBatch [walEntries=" + walEntriesWithSize + ", lastWalPath=" + lastWalPath + + ", lastWalPosition=" + lastWalPosition + ", nbRowKeys=" + nbRowKeys + ", nbHFiles=" + + nbHFiles + ", heapSize=" + heapSize + ", lastSeqIds=" + lastSeqIds + ", endOfFile=" + + endOfFile + "]"; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryFilterRetryableException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryFilterRetryableException.java index f93f8b058b2..861f2d72007 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryFilterRetryableException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryFilterRetryableException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +21,9 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; /** - * This exception should be thrown from any wal filter when the filter is expected - * to recover from the failures and it wants the replication to backup till it fails. - * There is special handling in replication wal reader to catch this exception and - * retry. + * This exception should be thrown from any wal filter when the filter is expected to recover from + * the failures and it wants the replication to backup till it fails. There is special handling in + * replication wal reader to catch this exception and retry. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) public class WALEntryFilterRetryableException extends RuntimeException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java index f0b13e1bc2f..f7c5758708a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java @@ -18,22 +18,23 @@ package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; import org.apache.yetus.audience.InterfaceAudience; /** * Implementations are installed on a Replication Sink called from inside * ReplicationSink#replicateEntries to filter replicated WALEntries based off WALEntry attributes. - * Currently only table name and replication write time are exposed (WALEntry is a private, - * internal class so we cannot pass it here). To install, set - * hbase.replication.sink.walentryfilter to the name of the implementing - * class. Implementing class must have a no-param Constructor. - *

      This filter is of limited use. It is better to filter on the replication source rather than - * here after the edits have been shipped on the replication sink. That said, applications such - * as the hbase-indexer want to filter out any edits that were made before replication was enabled. + * Currently only table name and replication write time are exposed (WALEntry is a private, internal + * class so we cannot pass it here). To install, set + * hbase.replication.sink.walentryfilter to the name of the implementing class. + * Implementing class must have a no-param Constructor. + *

      + * This filter is of limited use. It is better to filter on the replication source rather than here + * after the edits have been shipped on the replication sink. That said, applications such as the + * hbase-indexer want to filter out any edits that were made before replication was enabled. * @see org.apache.hadoop.hbase.replication.WALEntryFilter for filtering on the replication - * source-side. + * source-side. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) public interface WALEntrySinkFilter { @@ -43,13 +44,12 @@ public interface WALEntrySinkFilter { public static final String WAL_ENTRY_FILTER_KEY = "hbase.replication.sink.walentrysinkfilter"; /** - * Called after Construction. - * Use passed Connection to keep any context the filter might need. + * Called after Construction. Use passed Connection to keep any context the filter might need. */ void init(Connection connection); /** - * @param table Table edit is destined for. + * @param table Table edit is destined for. * @param writeTime Time at which the edit was created on the source. * @return True if we are to filter out the edit. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java index 441bc1beaee..70f2ce965a2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -74,17 +73,18 @@ class WALEntryStream implements Closeable { /** * Create an entry stream over the given queue at the given start position - * @param logQueue the queue of WAL paths - * @param conf the {@link Configuration} to use to create {@link Reader} for this stream - * @param startPosition the position in the first WAL to start reading at + * @param logQueue the queue of WAL paths + * @param conf the {@link Configuration} to use to create {@link Reader} for this + * stream + * @param startPosition the position in the first WAL to start reading at * @param walFileLengthProvider provides the length of the WAL file - * @param serverName the server name which all WALs belong to - * @param metrics the replication metrics + * @param serverName the server name which all WALs belong to + * @param metrics the replication metrics * @throws IOException throw IO exception from stream */ - public WALEntryStream(ReplicationSourceLogQueue logQueue, Configuration conf, - long startPosition, WALFileLengthProvider walFileLengthProvider, ServerName serverName, - MetricsSource metrics, String walGroupId) throws IOException { + public WALEntryStream(ReplicationSourceLogQueue logQueue, Configuration conf, long startPosition, + WALFileLengthProvider walFileLengthProvider, ServerName serverName, MetricsSource metrics, + String walGroupId) throws IOException { this.logQueue = logQueue; this.fs = CommonFSUtils.getWALFileSystem(conf); this.conf = conf; @@ -109,7 +109,7 @@ class WALEntryStream implements Closeable { * Returns the next WAL entry in this stream but does not advance. */ public Entry peek() throws IOException { - return hasNext() ? currentEntry: null; + return hasNext() ? currentEntry : null; } /** @@ -148,7 +148,7 @@ class WALEntryStream implements Closeable { StringBuilder sb = new StringBuilder(); if (currentPath != null) { sb.append("currently replicating from: ").append(currentPath).append(" at position: ") - .append(currentPositionOfEntry).append("\n"); + .append(currentPositionOfEntry).append("\n"); } else { sb.append("no replication ongoing, waiting for new log"); } @@ -224,15 +224,15 @@ class WALEntryStream implements Closeable { if (currentPositionOfReader < stat.getLen()) { final long skippedBytes = stat.getLen() - currentPositionOfReader; // See the commits in HBASE-25924/HBASE-25932 for context. - LOG.warn("Reached the end of WAL {}. It was not closed cleanly," + - " so we did not parse {} bytes of data.", currentPath, skippedBytes); + LOG.warn("Reached the end of WAL {}. It was not closed cleanly," + + " so we did not parse {} bytes of data.", currentPath, skippedBytes); metrics.incrUncleanlyClosedWALs(); metrics.incrBytesSkippedInUncleanlyClosedWALs(skippedBytes); } } else if (currentPositionOfReader + trailerSize < stat.getLen()) { LOG.warn( - "Processing end of WAL {} at position {}, which is too far away from" + - " reported file length {}. Restarting WAL reading (see HBASE-15983 for details). {}", + "Processing end of WAL {} at position {}, which is too far away from" + + " reported file length {}. Restarting WAL reading (see HBASE-15983 for details). {}", currentPath, currentPositionOfReader, stat.getLen(), getCurrentPathStat()); setPosition(0); resetReader(); @@ -242,8 +242,8 @@ class WALEntryStream implements Closeable { } } if (LOG.isTraceEnabled()) { - LOG.trace("Reached the end of " + this.currentPath + " and length of the file is " + - (stat == null ? "N/A" : stat.getLen())); + LOG.trace("Reached the end of " + this.currentPath + " and length of the file is " + + (stat == null ? "N/A" : stat.getLen())); } metrics.incrCompletedWAL(); return true; @@ -268,8 +268,8 @@ class WALEntryStream implements Closeable { // See HBASE-14004, for AsyncFSWAL which uses fan-out, it is possible that we read uncommitted // data, so we need to make sure that we do not read beyond the committed file length. if (LOG.isDebugEnabled()) { - LOG.debug("The provider tells us the valid length for " + currentPath + " is " + - fileLength.getAsLong() + ", but we have advanced to " + readerPos); + LOG.debug("The provider tells us the valid length for " + currentPath + " is " + + fileLength.getAsLong() + ", but we have advanced to " + readerPos); } resetReader(); return true; @@ -340,12 +340,12 @@ class WALEntryStream implements Closeable { } } catch (FileNotFoundException fnfe) { handleFileNotFound(path, fnfe); - } catch (RemoteException re) { + } catch (RemoteException re) { IOException ioe = re.unwrapRemoteException(FileNotFoundException.class); if (!(ioe instanceof FileNotFoundException)) { throw ioe; } - handleFileNotFound(path, (FileNotFoundException)ioe); + handleFileNotFound(path, (FileNotFoundException) ioe); } catch (LeaseNotRecoveredException lnre) { // HBASE-15019 the WAL was not closed due to some hiccup. LOG.warn("Try to recover the WAL lease " + path, lnre); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALFileLengthProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALFileLengthProvider.java index c60faa9e5db..b0550cc37cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALFileLengthProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALFileLengthProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,16 +18,15 @@ package org.apache.hadoop.hbase.replication.regionserver; import java.util.OptionalLong; - import org.apache.hadoop.fs.Path; import org.apache.yetus.audience.InterfaceAudience; /** * Used by replication to prevent replicating unacked log entries. See - * https://issues.apache.org/jira/browse/HBASE-14004 for more details. - * WALFileLengthProvider exists because we do not want to reference WALFactory and WALProvider - * directly in the replication code so in the future it will be easier to decouple them. - * Each walProvider will have its own implementation. + * https://issues.apache.org/jira/browse/HBASE-14004 for more details. WALFileLengthProvider exists + * because we do not want to reference WALFactory and WALProvider directly in the replication code + * so in the future it will be easier to decouple them. Each walProvider will have its own + * implementation. */ @InterfaceAudience.Private @FunctionalInterface diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java index 8fbe6ac418d..91323a72215 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java @@ -32,22 +32,20 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos; /** - * Implementation of secure Hadoop policy provider for mapping - * protocol interfaces to hbase-policy.xml entries. + * Implementation of secure Hadoop policy provider for mapping protocol interfaces to + * hbase-policy.xml entries. */ @InterfaceAudience.Private public class HBasePolicyProvider extends PolicyProvider { - protected final static Service[] services = { - new Service("security.client.protocol.acl", ClientService.BlockingInterface.class), - new Service("security.client.protocol.acl", AdminService.BlockingInterface.class), - new Service("security.client.protocol.acl", - MasterProtos.HbckService.BlockingInterface.class), - new Service("security.client.protocol.acl", - RegistryProtos.ClientMetaService.BlockingInterface.class), - new Service("security.admin.protocol.acl", MasterService.BlockingInterface.class), - new Service("security.masterregion.protocol.acl", - RegionServerStatusService.BlockingInterface.class) - }; + protected final static Service[] services = + { new Service("security.client.protocol.acl", ClientService.BlockingInterface.class), + new Service("security.client.protocol.acl", AdminService.BlockingInterface.class), + new Service("security.client.protocol.acl", MasterProtos.HbckService.BlockingInterface.class), + new Service("security.client.protocol.acl", + RegistryProtos.ClientMetaService.BlockingInterface.class), + new Service("security.admin.protocol.acl", MasterService.BlockingInterface.class), + new Service("security.masterregion.protocol.acl", + RegionServerStatusService.BlockingInterface.class) }; @Override public Service[] getServices() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java index 3074fcea1dc..eb9913174e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java @@ -42,8 +42,8 @@ public class HBaseSaslRpcServer { private final SaslServer saslServer; public HBaseSaslRpcServer(SaslServerAuthenticationProvider provider, - Map saslProps, SecretManager secretManager) - throws IOException { + Map saslProps, SecretManager secretManager) + throws IOException { serverWithProvider = provider.createServer(secretManager, saslProps); saslServer = serverWithProvider.getServer(); } @@ -62,9 +62,7 @@ public class HBaseSaslRpcServer { } public String getAttemptingUser() { - return serverWithProvider.getAttemptingUser() - .map(Object::toString) - .orElse("Unknown"); + return serverWithProvider.getAttemptingUser().map(Object::toString).orElse("Unknown"); } public byte[] wrap(byte[] buf, int off, int len) throws SaslException { @@ -84,7 +82,7 @@ public class HBaseSaslRpcServer { } public static T getIdentifier(String id, - SecretManager secretManager) throws InvalidToken { + SecretManager secretManager) throws InvalidToken { byte[] tokenId = SaslUtil.decodeIdentifier(id); T tokenIdentifier = secretManager.createIdentifier(); try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java index 2bf351b6325..5f9433a3f14 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java index 15e5e09e9c4..8912c34c51b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.security.access; @@ -28,7 +27,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.AuthUtil; import org.apache.hadoop.hbase.Cell; @@ -52,6 +50,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -59,7 +58,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; public class AccessChecker { private static final Logger LOG = LoggerFactory.getLogger(AccessChecker.class); private static final Logger AUDITLOG = - LoggerFactory.getLogger("SecurityLogger." + AccessChecker.class.getName()); + LoggerFactory.getLogger("SecurityLogger." + AccessChecker.class.getName()); private final AuthManager authManager; /** Group service to retrieve the user group information */ @@ -71,7 +70,6 @@ public class AccessChecker { /** * Constructor with existing configuration - * * @param conf Existing configuration to use */ public AccessChecker(final Configuration conf) { @@ -85,27 +83,26 @@ public class AccessChecker { /** * Authorizes that the current user has any of the given permissions to access the table. - * - * @param user Active user to which authorization checks should be applied - * @param request Request type. + * @param user Active user to which authorization checks should be applied + * @param request Request type. * @param tableName Table requested * @param permissions Actions being requested - * @throws IOException if obtaining the current user fails + * @throws IOException if obtaining the current user fails * @throws AccessDeniedException if user has no authorization */ - public void requireAccess(User user, String request, TableName tableName, - Action... permissions) throws IOException { + public void requireAccess(User user, String request, TableName tableName, Action... permissions) + throws IOException { AuthResult result = null; for (Action permission : permissions) { if (authManager.accessUserTable(user, tableName, permission)) { - result = AuthResult.allow(request, "Table permission granted", - user, permission, tableName, null, null); + result = AuthResult.allow(request, "Table permission granted", user, permission, tableName, + null, null); break; } else { // rest of the world - result = AuthResult.deny(request, "Insufficient permissions", - user, permission, tableName, null, null); + result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName, + null, null); } } logResult(result); @@ -116,33 +113,30 @@ public class AccessChecker { /** * Authorizes that the current user has global privileges for the given action. - * @param user Active user to which authorization checks should be applied - * @param request Request type + * @param user Active user to which authorization checks should be applied + * @param request Request type * @param filterUser User name to be filtered from permission as requested - * @param perm The action being requested - * @throws IOException if obtaining the current user fails + * @param perm The action being requested + * @throws IOException if obtaining the current user fails * @throws AccessDeniedException if authorization is denied */ public void requirePermission(User user, String request, String filterUser, Action perm) - throws IOException { + throws IOException { requireGlobalPermission(user, request, perm, null, null, filterUser); } /** - * Checks that the user has the given global permission. The generated - * audit log message will contain context information for the operation - * being authorized, based on the given parameters. - * - * @param user Active user to which authorization checks should be applied - * @param request Request type - * @param perm Action being requested - * @param tableName Affected table name. - * @param familyMap Affected column families. + * Checks that the user has the given global permission. The generated audit log message will + * contain context information for the operation being authorized, based on the given parameters. + * @param user Active user to which authorization checks should be applied + * @param request Request type + * @param perm Action being requested + * @param tableName Affected table name. + * @param familyMap Affected column families. * @param filterUser User name to be filtered from permission as requested */ - public void requireGlobalPermission(User user, String request, - Action perm, TableName tableName, - Map> familyMap, String filterUser) throws IOException { + public void requireGlobalPermission(User user, String request, Action perm, TableName tableName, + Map> familyMap, String filterUser) throws IOException { AuthResult result; if (authManager.authorizeUserGlobal(user, perm)) { result = AuthResult.allow(request, "Global check allowed", user, perm, tableName, familyMap); @@ -154,23 +148,21 @@ public class AccessChecker { logResult(result); if (!result.isAllowed()) { throw new AccessDeniedException( - "Insufficient permissions for user '" + (user != null ? user.getShortName() : "null") - + "' (global, action=" + perm.toString() + ")"); + "Insufficient permissions for user '" + (user != null ? user.getShortName() : "null") + + "' (global, action=" + perm.toString() + ")"); } } /** - * Checks that the user has the given global permission. The generated - * audit log message will contain context information for the operation - * being authorized, based on the given parameters. - * - * @param user Active user to which authorization checks should be applied - * @param request Request type + * Checks that the user has the given global permission. The generated audit log message will + * contain context information for the operation being authorized, based on the given parameters. + * @param user Active user to which authorization checks should be applied + * @param request Request type * @param perm Action being requested * @param namespace The given namespace */ - public void requireGlobalPermission(User user, String request, Action perm, - String namespace) throws IOException { + public void requireGlobalPermission(User user, String request, Action perm, String namespace) + throws IOException { AuthResult authResult; if (authManager.authorizeUserGlobal(user, perm)) { authResult = AuthResult.allow(request, "Global check allowed", user, perm, null); @@ -181,27 +173,27 @@ public class AccessChecker { authResult.getParams().setNamespace(namespace); logResult(authResult); throw new AccessDeniedException( - "Insufficient permissions for user '" + (user != null ? user.getShortName() : "null") - + "' (global, action=" + perm.toString() + ")"); + "Insufficient permissions for user '" + (user != null ? user.getShortName() : "null") + + "' (global, action=" + perm.toString() + ")"); } } /** * Checks that the user has the given global or namespace permission. - * @param user Active user to which authorization checks should be applied - * @param request Request type - * @param namespace Name space as requested - * @param filterUser User name to be filtered from permission as requested + * @param user Active user to which authorization checks should be applied + * @param request Request type + * @param namespace Name space as requested + * @param filterUser User name to be filtered from permission as requested * @param permissions Actions being requested */ public void requireNamespacePermission(User user, String request, String namespace, - String filterUser, Action... permissions) throws IOException { + String filterUser, Action... permissions) throws IOException { AuthResult result = null; for (Action permission : permissions) { if (authManager.authorizeUserNamespace(user, namespace, permission)) { result = - AuthResult.allow(request, "Namespace permission granted", user, permission, namespace); + AuthResult.allow(request, "Namespace permission granted", user, permission, namespace); break; } else { // rest of the world @@ -217,23 +209,22 @@ public class AccessChecker { /** * Checks that the user has the given global or namespace permission. - * - * @param user Active user to which authorization checks should be applied - * @param request Request type - * @param namespace The given namespace - * @param tableName Table requested - * @param familyMap Column family map requested + * @param user Active user to which authorization checks should be applied + * @param request Request type + * @param namespace The given namespace + * @param tableName Table requested + * @param familyMap Column family map requested * @param permissions Actions being requested */ public void requireNamespacePermission(User user, String request, String namespace, - TableName tableName, Map> familyMap, - Action... permissions) throws IOException { + TableName tableName, Map> familyMap, Action... permissions) + throws IOException { AuthResult result = null; for (Action permission : permissions) { if (authManager.authorizeUserNamespace(user, namespace, permission)) { result = - AuthResult.allow(request, "Namespace permission granted", user, permission, namespace); + AuthResult.allow(request, "Namespace permission granted", user, permission, namespace); result.getParams().setTableName(tableName).setFamilies(familyMap); break; } else { @@ -249,32 +240,31 @@ public class AccessChecker { } /** - * Authorizes that the current user has any of the given permissions for the - * given table, column family and column qualifier. - * - * @param user Active user to which authorization checks should be applied - * @param request Request type - * @param tableName Table requested - * @param family Column family requested - * @param qualifier Column qualifier requested - * @param filterUser User name to be filtered from permission as requested + * Authorizes that the current user has any of the given permissions for the given table, column + * family and column qualifier. + * @param user Active user to which authorization checks should be applied + * @param request Request type + * @param tableName Table requested + * @param family Column family requested + * @param qualifier Column qualifier requested + * @param filterUser User name to be filtered from permission as requested * @param permissions Actions being requested - * @throws IOException if obtaining the current user fails + * @throws IOException if obtaining the current user fails * @throws AccessDeniedException if user has no authorization */ public void requirePermission(User user, String request, TableName tableName, byte[] family, - byte[] qualifier, String filterUser, Action... permissions) throws IOException { + byte[] qualifier, String filterUser, Action... permissions) throws IOException { AuthResult result = null; for (Action permission : permissions) { if (authManager.authorizeUserTable(user, tableName, family, qualifier, permission)) { - result = AuthResult.allow(request, "Table permission granted", - user, permission, tableName, family, qualifier); + result = AuthResult.allow(request, "Table permission granted", user, permission, tableName, + family, qualifier); break; } else { // rest of the world - result = AuthResult.deny(request, "Insufficient permissions", - user, permission, tableName, family, qualifier); + result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName, + family, qualifier); } } result.getParams().addExtraParam("filterUser", filterUser); @@ -285,32 +275,30 @@ public class AccessChecker { } /** - * Authorizes that the current user has any of the given permissions for the - * given table, column family and column qualifier. - * - * @param user Active user to which authorization checks should be applied - * @param request Request type + * Authorizes that the current user has any of the given permissions for the given table, column + * family and column qualifier. + * @param user Active user to which authorization checks should be applied + * @param request Request type * @param tableName Table requested * @param family Column family param * @param qualifier Column qualifier param * @throws IOException if obtaining the current user fails * @throws AccessDeniedException if user has no authorization */ - public void requireTablePermission(User user, String request, - TableName tableName,byte[] family, byte[] qualifier, - Action... permissions) throws IOException { + public void requireTablePermission(User user, String request, TableName tableName, byte[] family, + byte[] qualifier, Action... permissions) throws IOException { AuthResult result = null; for (Action permission : permissions) { if (authManager.authorizeUserTable(user, tableName, permission)) { - result = AuthResult.allow(request, "Table permission granted", - user, permission, tableName, null, null); + result = AuthResult.allow(request, "Table permission granted", user, permission, tableName, + null, null); result.getParams().setFamily(family).setQualifier(qualifier); break; } else { // rest of the world - result = AuthResult.deny(request, "Insufficient permissions", - user, permission, tableName, family, qualifier); + result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName, + family, qualifier); result.getParams().setFamily(family).setQualifier(qualifier); } } @@ -322,13 +310,13 @@ public class AccessChecker { /** * Check if caller is granting or revoking superusers's or supergroups's permissions. - * @param request request name - * @param caller caller + * @param request request name + * @param caller caller * @param userToBeChecked target user or group * @throws IOException AccessDeniedException if target user is superuser */ public void performOnSuperuser(String request, User caller, String userToBeChecked) - throws IOException { + throws IOException { List userGroups = new ArrayList<>(); userGroups.add(userToBeChecked); if (!AuthUtil.isGroupPrincipal(userToBeChecked)) { @@ -338,28 +326,23 @@ public class AccessChecker { } for (String name : userGroups) { if (Superusers.isSuperUser(name)) { - AuthResult result = AuthResult.deny( - request, - "Granting or revoking superusers's or supergroups's permissions is not allowed", - caller, - Action.ADMIN, - NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); + AuthResult result = AuthResult.deny(request, + "Granting or revoking superusers's or supergroups's permissions is not allowed", caller, + Action.ADMIN, NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); logResult(result); throw new AccessDeniedException(result.getReason()); } } } - public void checkLockPermissions(User user, String namespace, - TableName tableName, RegionInfo[] regionInfos, String reason) - throws IOException { + public void checkLockPermissions(User user, String namespace, TableName tableName, + RegionInfo[] regionInfos, String reason) throws IOException { if (namespace != null && !namespace.isEmpty()) { requireNamespacePermission(user, reason, namespace, null, Action.ADMIN, Action.CREATE); } else if (tableName != null || (regionInfos != null && regionInfos.length > 0)) { // So, either a table or regions op. If latter, check perms ons table. - TableName tn = tableName != null? tableName: regionInfos[0].getTable(); - requireTablePermission(user, reason, tn, null, null, - Action.ADMIN, Action.CREATE); + TableName tn = tableName != null ? tableName : regionInfos[0].getTable(); + requireTablePermission(user, reason, tn, null, null, Action.ADMIN, Action.CREATE); } else { throw new DoNotRetryIOException("Invalid lock level when requesting permissions."); } @@ -370,13 +353,12 @@ public class AccessChecker { User user = result.getUser(); UserGroupInformation ugi = user != null ? user.getUGI() : null; AUDITLOG.trace( - "Access {} for user {}; reason: {}; remote address: {}; request: {}; context: {};" + - "auth method: {}", + "Access {} for user {}; reason: {}; remote address: {}; request: {}; context: {};" + + "auth method: {}", (result.isAllowed() ? "allowed" : "denied"), - (user != null ? user.getShortName() : "UNKNOWN"), - result.getReason(), RpcServer.getRemoteAddress().map(InetAddress::toString).orElse(""), - result.getRequest(), result.toContextString(), - ugi != null ? ugi.getAuthenticationMethod() : "UNKNOWN"); + (user != null ? user.getShortName() : "UNKNOWN"), result.getReason(), + RpcServer.getRemoteAddress().map(InetAddress::toString).orElse(""), result.getRequest(), + result.toContextString(), ugi != null ? ugi.getAuthenticationMethod() : "UNKNOWN"); } } @@ -385,7 +367,7 @@ public class AccessChecker { * any privilege but for others caller must have ADMIN privilege. */ public User validateCallerWithFilterUser(User caller, TablePermission tPerm, String inputUserName) - throws IOException { + throws IOException { User filterUser = null; if (!caller.getShortName().equals(inputUserName)) { // User should have admin privilege if checking permission for other users @@ -425,7 +407,7 @@ public class AccessChecker { this.shortName = new HadoopKerberosName(this.name).getShortName(); } catch (IOException ioe) { throw new IllegalArgumentException( - "Illegal principal name " + this.name + ": " + ioe.toString(), ioe); + "Illegal principal name " + this.name + ": " + ioe.toString(), ioe); } } return shortName; @@ -444,14 +426,14 @@ public class AccessChecker { @Override public T runAs(PrivilegedAction action) { throw new UnsupportedOperationException( - "Method not supported, this class has limited implementation"); + "Method not supported, this class has limited implementation"); } @Override public T runAs(PrivilegedExceptionAction action) - throws IOException, InterruptedException { + throws IOException, InterruptedException { throw new UnsupportedOperationException( - "Method not supported, this class has limited implementation"); + "Method not supported, this class has limited implementation"); } @Override @@ -476,8 +458,7 @@ public class AccessChecker { /** * Retrieve the groups of the given user. - * @param user User name - * @return Groups + * @param user User name n */ public static List getUserGroups(String user) { try { @@ -490,8 +471,8 @@ public class AccessChecker { /** * Authorizes that if the current user has the given permissions. - * @param user Active user to which authorization checks should be applied - * @param request Request type + * @param user Active user to which authorization checks should be applied + * @param request Request type * @param permission Actions being requested * @return True if the user has the specific permission */ @@ -512,10 +493,10 @@ public class AccessChecker { for (Action action : nsPerm.getActions()) { if (getAuthManager().authorizeUserNamespace(user, nsPerm.getNamespace(), action)) { authResult = - AuthResult.allow(request, "Namespace action allowed", user, action, null, null); + AuthResult.allow(request, "Namespace action allowed", user, action, null, null); } else { authResult = - AuthResult.deny(request, "Namespace action denied", user, action, null, null); + AuthResult.deny(request, "Namespace action denied", user, action, null, null); } AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { @@ -540,7 +521,7 @@ public class AccessChecker { } private AuthResult permissionGranted(String request, User user, Action permRequest, - TableName tableName, byte[] family, byte[] qualifier) { + TableName tableName, byte[] family, byte[] qualifier) { Map> map = makeFamilyMap(family, qualifier); return permissionGranted(request, user, permRequest, tableName, map); } @@ -552,15 +533,15 @@ public class AccessChecker { * Note: Ordering of the authorization checks has been carefully optimized to short-circuit the * most common requests and minimize the amount of processing required. *

      - * @param request User request - * @param user User name + * @param request User request + * @param user User name * @param permRequest the action being requested - * @param tableName Table name - * @param families the map of column families to qualifiers present in the request + * @param tableName Table name + * @param families the map of column families to qualifiers present in the request * @return an authorization result */ public AuthResult permissionGranted(String request, User user, Action permRequest, - TableName tableName, Map> families) { + TableName tableName, Map> families) { // 1. All users need read access to hbase:meta table. // this is a very common operation, so deal with it quickly. if (TableName.META_TABLE_NAME.equals(tableName)) { @@ -596,8 +577,10 @@ public class AccessChecker { // for each qualifier of the family Set familySet = (Set) family.getValue(); for (byte[] qualifier : familySet) { - if (!getAuthManager().authorizeUserTable(user, tableName, family.getKey(), qualifier, - permRequest)) { + if ( + !getAuthManager().authorizeUserTable(user, tableName, family.getKey(), qualifier, + permRequest) + ) { return AuthResult.deny(request, "Failed qualifier check", user, permRequest, tableName, makeFamilyMap(family.getKey(), qualifier)); } @@ -605,8 +588,10 @@ public class AccessChecker { } else if (family.getValue() instanceof List) { // List List cellList = (List) family.getValue(); for (Cell cell : cellList) { - if (!getAuthManager().authorizeUserTable(user, tableName, family.getKey(), - CellUtil.cloneQualifier(cell), permRequest)) { + if ( + !getAuthManager().authorizeUserTable(user, tableName, family.getKey(), + CellUtil.cloneQualifier(cell), permRequest) + ) { return AuthResult.deny(request, "Failed qualifier check", user, permRequest, tableName, makeFamilyMap(family.getKey(), CellUtil.cloneQualifier(cell))); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java index 907d550f02b..1f6dd9c6e74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.IOException; import java.util.Map; import java.util.Objects; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -32,20 +29,19 @@ import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.ByteRange; import org.apache.hadoop.hbase.util.SimpleMutableByteRange; +import org.apache.yetus.audience.InterfaceAudience; /** * NOTE: for internal use only by AccessController implementation - * *

      - * TODO: There is room for further performance optimization here. - * Calling AuthManager.authorize() per KeyValue imposes a fair amount of - * overhead. A more optimized solution might look at the qualifiers where - * permissions are actually granted and explicitly limit the scan to those. + * TODO: There is room for further performance optimization here. Calling AuthManager.authorize() + * per KeyValue imposes a fair amount of overhead. A more optimized solution might look at the + * qualifiers where permissions are actually granted and explicitly limit the scan to those. *

      *

      - * We should aim to use this _only_ when access to the requested column families - * is not granted at the column family levels. If table or column family - * access succeeds, then there is no need to impose the overhead of this filter. + * We should aim to use this _only_ when access to the requested column families is not granted at + * the column family levels. If table or column family access succeeds, then there is no need to + * impose the overhead of this filter. *

      */ @InterfaceAudience.Private @@ -75,8 +71,8 @@ class AccessControlFilter extends FilterBase { AccessControlFilter() { } - AccessControlFilter(AuthManager mgr, User ugi, TableName tableName, - Strategy strategy, Map cfVsMaxVersions) { + AccessControlFilter(AuthManager mgr, User ugi, TableName tableName, Strategy strategy, + Map cfVsMaxVersions) { authManager = mgr; table = tableName; user = ugi; @@ -98,20 +94,21 @@ class AccessControlFilter extends FilterBase { if (isSystemTable) { return ReturnCode.INCLUDE; } - if (prevFam.getBytes() == null - || !(PrivateCellUtil.matchingFamily(cell, prevFam.getBytes(), prevFam.getOffset(), - prevFam.getLength()))) { + if ( + prevFam.getBytes() == null || !(PrivateCellUtil.matchingFamily(cell, prevFam.getBytes(), + prevFam.getOffset(), prevFam.getLength())) + ) { prevFam.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); // Similar to VisibilityLabelFilter familyMaxVersions = cfVsMaxVersions.get(prevFam); // Family is changed. Just unset curQualifier. prevQual.unset(); } - if (prevQual.getBytes() == null - || !(PrivateCellUtil.matchingQualifier(cell, prevQual.getBytes(), prevQual.getOffset(), - prevQual.getLength()))) { - prevQual.set(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()); + if ( + prevQual.getBytes() == null || !(PrivateCellUtil.matchingQualifier(cell, prevQual.getBytes(), + prevQual.getOffset(), prevQual.getLength())) + ) { + prevQual.set(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); currentVersions = 0; } currentVersions++; @@ -128,15 +125,17 @@ class AccessControlFilter extends FilterBase { return ReturnCode.INCLUDE; } } - break; + break; // Cell permissions can override table or CF permissions case CHECK_CELL_DEFAULT: { - if (authManager.authorizeUserTable(user, table, f, q, Permission.Action.READ) || - authManager.authorizeCell(user, table, cell, Permission.Action.READ)) { + if ( + authManager.authorizeUserTable(user, table, f, q, Permission.Action.READ) + || authManager.authorizeCell(user, table, cell, Permission.Action.READ) + ) { return ReturnCode.INCLUDE; } } - break; + break; default: throw new RuntimeException("Unhandled strategy " + strategy); } @@ -156,7 +155,7 @@ class AccessControlFilter extends FilterBase { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { // no implementation, server-side use only throw new UnsupportedOperationException( "Serialization not supported. Intended for server-side use only."); @@ -168,8 +167,8 @@ class AccessControlFilter extends FilterBase { * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray() */ - public static AccessControlFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static AccessControlFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { // no implementation, server-side use only throw new UnsupportedOperationException( "Serialization not supported. Intended for server-side use only."); @@ -180,15 +179,13 @@ class AccessControlFilter extends FilterBase { if (!(obj instanceof AccessControlFilter)) { return false; } - if (this == obj){ + if (this == obj) { return true; } - AccessControlFilter f=(AccessControlFilter)obj; - return this.authManager.equals(f.authManager) && - this.table.equals(f.table) && - this.user.equals(f.user) && - this.strategy.equals(f.strategy) && - this.cfVsMaxVersions.equals(f.cfVsMaxVersions); + AccessControlFilter f = (AccessControlFilter) obj; + return this.authManager.equals(f.authManager) && this.table.equals(f.table) + && this.user.equals(f.user) && this.strategy.equals(f.strategy) + && this.cfVsMaxVersions.equals(f.cfVsMaxVersions); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 82eb317ec48..46ff234890c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.security.access; @@ -143,49 +142,41 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; /** - * Provides basic authorization checks for data access and administrative - * operations. - * + * Provides basic authorization checks for data access and administrative operations. *

      - * {@code AccessController} performs authorization checks for HBase operations - * based on: + * {@code AccessController} performs authorization checks for HBase operations based on: *

      *
        - *
      • the identity of the user performing the operation
      • - *
      • the scope over which the operation is performed, in increasing - * specificity: global, table, column family, or qualifier
      • - *
      • the type of action being performed (as mapped to - * {@link Permission.Action} values)
      • + *
      • the identity of the user performing the operation
      • + *
      • the scope over which the operation is performed, in increasing specificity: global, table, + * column family, or qualifier
      • + *
      • the type of action being performed (as mapped to {@link Permission.Action} values)
      • *
      *

      - * If the authorization check fails, an {@link AccessDeniedException} - * will be thrown for the operation. + * If the authorization check fails, an {@link AccessDeniedException} will be thrown for the + * operation. *

      - * *

      - * To perform authorization checks, {@code AccessController} relies on the - * RpcServerEngine being loaded to provide - * the user identities for remote requests. + * To perform authorization checks, {@code AccessController} relies on the RpcServerEngine being + * loaded to provide the user identities for remote requests. *

      - * *

      - * The access control lists used for authorization can be manipulated via the - * exposed {@link AccessControlService} Interface implementation, and the associated - * {@code grant}, {@code revoke}, and {@code user_permission} HBase shell - * commands. + * The access control lists used for authorization can be manipulated via the exposed + * {@link AccessControlService} Interface implementation, and the associated {@code grant}, + * {@code revoke}, and {@code user_permission} HBase shell commands. *

      */ @CoreCoprocessor @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class AccessController implements MasterCoprocessor, RegionCoprocessor, - RegionServerCoprocessor, AccessControlService.Interface, - MasterObserver, RegionObserver, RegionServerObserver, EndpointObserver, BulkLoadObserver { + RegionServerCoprocessor, AccessControlService.Interface, MasterObserver, RegionObserver, + RegionServerObserver, EndpointObserver, BulkLoadObserver { // TODO: encapsulate observer functions into separate class/sub-class. private static final Logger LOG = LoggerFactory.getLogger(AccessController.class); private static final Logger AUDITLOG = - LoggerFactory.getLogger("SecurityLogger."+AccessController.class.getName()); + LoggerFactory.getLogger("SecurityLogger." + AccessController.class.getName()); private static final String CHECK_COVERING_PERM = "check_covering_perm"; private static final String TAG_CHECK_PASSED = "tag_check_passed"; private static final byte[] TRUE = Bytes.toBytes(true); @@ -196,21 +187,23 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, /** flags if we are running on a region of the _acl_ table */ private boolean aclRegion = false; - /** defined only for Endpoint implementation, so it can have way to - access region services */ + /** + * defined only for Endpoint implementation, so it can have way to access region services + */ private RegionCoprocessorEnvironment regionEnv; /** Mapping of scanner instances to the user who created them */ - private Map scannerOwners = - new MapMaker().weakKeys().makeMap(); + private Map scannerOwners = new MapMaker().weakKeys().makeMap(); private Map> tableAcls; /** Provider for mapping principal names to Users */ private UserProvider userProvider; - /** if we are active, usually false, only true if "hbase.security.authorization" - has been set to true in site configuration */ + /** + * if we are active, usually false, only true if "hbase.security.authorization" has been set to + * true in site configuration + */ private boolean authorizationEnabled; /** if we are able to support cell ACLs */ @@ -219,8 +212,10 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, /** if we should check EXEC permissions */ private boolean shouldCheckExecPermission; - /** if we should terminate access checks early as soon as table or CF grants - allow access; pre-0.98 compatible behavior */ + /** + * if we should terminate access checks early as soon as table or CF grants allow access; pre-0.98 + * compatible behavior + */ private boolean compatibleEarlyTermination; /** if we have been successfully initialized */ @@ -230,8 +225,8 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, private volatile boolean aclTabAvailable = false; public static boolean isCellAuthorizationSupported(Configuration conf) { - return AccessChecker.isAuthorizationSupported(conf) && - (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS); + return AccessChecker.isAuthorizationSupported(conf) + && (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS); } public Region getRegion() { @@ -248,8 +243,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, Map> tables = PermissionStorage.loadAll(region); // For each table, write out the table's permissions to the respective // znode for that table. - for (Map.Entry> t: - tables.entrySet()) { + for (Map.Entry> t : tables.entrySet()) { byte[] entry = t.getKey(); ListMultimap perms = t.getValue(); byte[] serialized = PermissionStorage.writePermissionsAsBytes(perms, conf); @@ -259,27 +253,25 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, } /** - * Writes all table ACLs for the tables in the given Map up into ZooKeeper - * znodes. This is called to synchronize ACL changes following {@code _acl_} - * table updates. + * Writes all table ACLs for the tables in the given Map up into ZooKeeper znodes. This is called + * to synchronize ACL changes following {@code _acl_} table updates. */ - private void updateACL(RegionCoprocessorEnvironment e, - final Map> familyMap) { + private void updateACL(RegionCoprocessorEnvironment e, final Map> familyMap) { Set entries = new TreeSet<>(Bytes.BYTES_RAWCOMPARATOR); for (Map.Entry> f : familyMap.entrySet()) { List cells = f.getValue(); - for (Cell cell: cells) { + for (Cell cell : cells) { if (CellUtil.matchingFamily(cell, PermissionStorage.ACL_LIST_FAMILY)) { entries.add(CellUtil.cloneRow(cell)); } } } Configuration conf = regionEnv.getConfiguration(); - byte [] currentEntry = null; + byte[] currentEntry = null; // TODO: Here we are already on the ACL region. (And it is single // region) We can even just get the region from the env and do get // directly. The short circuit connection would avoid the RPC overhead - // so no socket communication, req write/read .. But we have the PB + // so no socket communication, req write/read .. But we have the PB // to and fro conversion overhead. get req is converted to PB req // and results are converted to PB results 1st and then to POJOs // again. We could have avoided such at least in ACL table context.. @@ -287,31 +279,30 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, for (byte[] entry : entries) { currentEntry = entry; ListMultimap perms = - PermissionStorage.getPermissions(conf, entry, t, null, null, null, false); + PermissionStorage.getPermissions(conf, entry, t, null, null, null, false); byte[] serialized = PermissionStorage.writePermissionsAsBytes(perms, conf); zkPermissionWatcher.writeToZookeeper(entry, serialized); } - } catch(IOException ex) { - LOG.error("Failed updating permissions mirror for '" + - (currentEntry == null? "null": Bytes.toString(currentEntry)) + "'", ex); + } catch (IOException ex) { + LOG.error("Failed updating permissions mirror for '" + + (currentEntry == null ? "null" : Bytes.toString(currentEntry)) + "'", ex); } } /** - * Check the current user for authorization to perform a specific action - * against the given set of row data. - * @param opType the operation type - * @param user the user - * @param e the coprocessor environment - * @param families the map of column families to qualifiers present in - * the request - * @param actions the desired actions + * Check the current user for authorization to perform a specific action against the given set of + * row data. + * @param opType the operation type + * @param user the user + * @param e the coprocessor environment + * @param families the map of column families to qualifiers present in the request + * @param actions the desired actions * @return an authorization result */ private AuthResult permissionGranted(OpType opType, User user, RegionCoprocessorEnvironment e, - Map> families, Action... actions) { + Map> families, Action... actions) { AuthResult result = null; - for (Action action: actions) { + for (Action action : actions) { result = accessChecker.permissionGranted(opType.toString(), user, action, e.getRegion().getRegionInfo().getTable(), families); if (!result.isAllowed()) { @@ -322,70 +313,63 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, } public void requireAccess(ObserverContext ctx, String request, TableName tableName, - Action... permissions) throws IOException { + Action... permissions) throws IOException { accessChecker.requireAccess(getActiveUser(ctx), request, tableName, permissions); } - public void requirePermission(ObserverContext ctx, String request, - Action perm) throws IOException { + public void requirePermission(ObserverContext ctx, String request, Action perm) + throws IOException { accessChecker.requirePermission(getActiveUser(ctx), request, null, perm); } - public void requireGlobalPermission(ObserverContext ctx, String request, - Action perm, TableName tableName, - Map> familyMap) throws IOException { + public void requireGlobalPermission(ObserverContext ctx, String request, Action perm, + TableName tableName, Map> familyMap) throws IOException { accessChecker.requireGlobalPermission(getActiveUser(ctx), request, perm, tableName, familyMap, null); } - public void requireGlobalPermission(ObserverContext ctx, String request, - Action perm, String namespace) throws IOException { - accessChecker.requireGlobalPermission(getActiveUser(ctx), - request, perm, namespace); + public void requireGlobalPermission(ObserverContext ctx, String request, Action perm, + String namespace) throws IOException { + accessChecker.requireGlobalPermission(getActiveUser(ctx), request, perm, namespace); } public void requireNamespacePermission(ObserverContext ctx, String request, String namespace, - Action... permissions) throws IOException { - accessChecker.requireNamespacePermission(getActiveUser(ctx), - request, namespace, null, permissions); + Action... permissions) throws IOException { + accessChecker.requireNamespacePermission(getActiveUser(ctx), request, namespace, null, + permissions); } public void requireNamespacePermission(ObserverContext ctx, String request, String namespace, - TableName tableName, Map> familyMap, - Action... permissions) throws IOException { - accessChecker.requireNamespacePermission(getActiveUser(ctx), - request, namespace, tableName, familyMap, - permissions); + TableName tableName, Map> familyMap, Action... permissions) + throws IOException { + accessChecker.requireNamespacePermission(getActiveUser(ctx), request, namespace, tableName, + familyMap, permissions); } public void requirePermission(ObserverContext ctx, String request, TableName tableName, - byte[] family, byte[] qualifier, Action... permissions) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), request, - tableName, family, qualifier, null, permissions); + byte[] family, byte[] qualifier, Action... permissions) throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), request, tableName, family, qualifier, null, + permissions); } - public void requireTablePermission(ObserverContext ctx, String request, - TableName tableName,byte[] family, byte[] qualifier, - Action... permissions) throws IOException { - accessChecker.requireTablePermission(getActiveUser(ctx), - request, tableName, family, qualifier, permissions); + public void requireTablePermission(ObserverContext ctx, String request, TableName tableName, + byte[] family, byte[] qualifier, Action... permissions) throws IOException { + accessChecker.requireTablePermission(getActiveUser(ctx), request, tableName, family, qualifier, + permissions); } - public void checkLockPermissions(ObserverContext ctx, String namespace, - TableName tableName, RegionInfo[] regionInfos, String reason) - throws IOException { - accessChecker.checkLockPermissions(getActiveUser(ctx), - namespace, tableName, regionInfos, reason); + public void checkLockPermissions(ObserverContext ctx, String namespace, TableName tableName, + RegionInfo[] regionInfos, String reason) throws IOException { + accessChecker.checkLockPermissions(getActiveUser(ctx), namespace, tableName, regionInfos, + reason); } /** - * Returns true if the current user is allowed the given action - * over at least one of the column qualifiers in the given column families. + * Returns true if the current user is allowed the given action over at least one of + * the column qualifiers in the given column families. */ - private boolean hasFamilyQualifierPermission(User user, - Action perm, - RegionCoprocessorEnvironment env, - Map> familyMap) + private boolean hasFamilyQualifierPermission(User user, Action perm, + RegionCoprocessorEnvironment env, Map> familyMap) throws IOException { RegionInfo hri = env.getRegion().getRegionInfo(); TableName tableName = hri.getTable(); @@ -396,12 +380,12 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, if (familyMap != null && familyMap.size() > 0) { // at least one family must be allowed - for (Map.Entry> family : - familyMap.entrySet()) { + for (Map.Entry> family : familyMap.entrySet()) { if (family.getValue() != null && !family.getValue().isEmpty()) { for (byte[] qualifier : family.getValue()) { - if (getAuthManager().authorizeUserTable(user, tableName, - family.getKey(), qualifier, perm)) { + if ( + getAuthManager().authorizeUserTable(user, tableName, family.getKey(), qualifier, perm) + ) { return true; } } @@ -443,12 +427,11 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, /** * Determine if cell ACLs covered by the operation grant access. This is expensive. - * @return false if cell ACLs failed to grant access, true otherwise - * @throws IOException + * @return false if cell ACLs failed to grant access, true otherwise n */ private boolean checkCoveringPermission(User user, OpType request, RegionCoprocessorEnvironment e, - byte[] row, Map> familyMap, long opTs, Action... actions) - throws IOException { + byte[] row, Map> familyMap, long opTs, Action... actions) + throws IOException { if (!cellFeaturesEnabled) { return false; } @@ -462,36 +445,37 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, // consider only one such passing cell. In case of Delete we have to consider all the cell // versions under this passing version. When Delete Mutation contains columns which are a // version delete just consider only one version for those column cells. - boolean considerCellTs = (request == OpType.PUT || request == OpType.DELETE); + boolean considerCellTs = (request == OpType.PUT || request == OpType.DELETE); if (considerCellTs) { get.setMaxVersions(); } else { get.setMaxVersions(1); } boolean diffCellTsFromOpTs = false; - for (Map.Entry> entry: familyMap.entrySet()) { + for (Map.Entry> entry : familyMap.entrySet()) { byte[] col = entry.getKey(); // TODO: HBASE-7114 could possibly unify the collection type in family // maps so we would not need to do this if (entry.getValue() instanceof Set) { - Set set = (Set)entry.getValue(); + Set set = (Set) entry.getValue(); if (set == null || set.isEmpty()) { get.addFamily(col); } else { - for (byte[] qual: set) { + for (byte[] qual : set) { get.addColumn(col, qual); } } } else if (entry.getValue() instanceof List) { - List list = (List)entry.getValue(); + List list = (List) entry.getValue(); if (list == null || list.isEmpty()) { get.addFamily(col); } else { // In case of family delete, a Cell will be added into the list with Qualifier as null. for (Cell cell : list) { - if (cell.getQualifierLength() == 0 - && (cell.getTypeByte() == Type.DeleteFamily.getCode() - || cell.getTypeByte() == Type.DeleteFamilyVersion.getCode())) { + if ( + cell.getQualifierLength() == 0 && (cell.getTypeByte() == Type.DeleteFamily.getCode() + || cell.getTypeByte() == Type.DeleteFamilyVersion.getCode()) + ) { get.addFamily(col); } else { get.addColumn(col, CellUtil.cloneQualifier(cell)); @@ -506,8 +490,8 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, } else if (entry.getValue() == null) { get.addFamily(col); } else { - throw new RuntimeException("Unhandled collection type " + - entry.getValue().getClass().getName()); + throw new RuntimeException( + "Unhandled collection type " + entry.getValue().getClass().getName()); } } // We want to avoid looking into the future. So, if the cells of the @@ -554,7 +538,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, cells.clear(); // scan with limit as 1 to hold down memory use on wide rows more = scanner.next(cells, scannerContext); - for (Cell cell: cells) { + for (Cell cell : cells) { if (LOG.isTraceEnabled()) { LOG.trace("Found cell " + cell); } @@ -571,8 +555,10 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, // null/empty qualifier is used to denote a Family delete. The TS and delete type // associated with this is applicable for all columns within the family. That is // why the below (col.getQualifierLength() == 0) check. - if ((col.getQualifierLength() == 0 && request == OpType.DELETE) - || CellUtil.matchingQualifier(cell, col)) { + if ( + (col.getQualifierLength() == 0 && request == OpType.DELETE) + || CellUtil.matchingQualifier(cell, col) + ) { byte type = col.getTypeByte(); if (considerCellTs) { curColCheckTs = col.getTimestamp(); @@ -582,7 +568,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, // that column. Check all versions when Type is DeleteColumn or DeleteFamily // One version delete types are Delete/DeleteFamilyVersion curColAllVersions = (KeyValue.Type.DeleteColumn.getCode() == type) - || (KeyValue.Type.DeleteFamily.getCode() == type); + || (KeyValue.Type.DeleteFamily.getCode() == type); break; } } @@ -592,7 +578,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, continue; } foundColumn = true; - for (Action action: actions) { + for (Action action : actions) { // Are there permissions for this user for the cell? if (!getAuthManager().authorizeCell(user, getTableName(e), cell, action)) { // We can stop if the cell ACL denies access @@ -618,9 +604,9 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, private static void addCellPermissions(final byte[] perms, Map> familyMap) { // Iterate over the entries in the familyMap, replacing the cells therein // with new cells including the ACL data - for (Map.Entry> e: familyMap.entrySet()) { + for (Map.Entry> e : familyMap.entrySet()) { List newCells = Lists.newArrayList(); - for (Cell cell: e.getValue()) { + for (Cell cell : e.getValue()) { // Prepend the supplied perms in a new ACL tag to an update list of tags for the cell List tags = new ArrayList<>(); tags.add(new ArrayBackedTag(PermissionStorage.ACL_TAG_TYPE, perms)); @@ -680,8 +666,8 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, cellFeaturesEnabled = (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS); if (!cellFeaturesEnabled) { LOG.info("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS - + " is required to persist cell ACLs. Consider setting " + HFile.FORMAT_VERSION_KEY - + " accordingly."); + + " is required to persist cell ACLs. Consider setting " + HFile.FORMAT_VERSION_KEY + + " accordingly."); } if (env instanceof MasterCoprocessorEnvironment) { @@ -696,7 +682,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, RegionServerCoprocessorEnvironment rsEnv = (RegionServerCoprocessorEnvironment) env; if (rsEnv instanceof HasRegionServerServices) { RegionServerServices rsServices = - ((HasRegionServerServices) rsEnv).getRegionServerServices(); + ((HasRegionServerServices) rsEnv).getRegionServerServices(); zkPermissionWatcher = rsServices.getZKPermissionWatcher(); accessChecker = rsServices.getAccessChecker(); } @@ -708,7 +694,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, AccessControlConstants.DEFAULT_ATTRIBUTE_EARLY_OUT); if (regionEnv instanceof HasRegionServerServices) { RegionServerServices rsServices = - ((HasRegionServerServices) regionEnv).getRegionServerServices(); + ((HasRegionServerServices) regionEnv).getRegionServerServices(); zkPermissionWatcher = rsServices.getZKPermissionWatcher(); accessChecker = rsServices.getAccessChecker(); } @@ -756,40 +742,37 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public Iterable getServices() { - return Collections.singleton( - AccessControlProtos.AccessControlService.newReflectiveService(this)); + return Collections + .singleton(AccessControlProtos.AccessControlService.newReflectiveService(this)); } /*********************************** Observer implementations ***********************************/ @Override - public void preCreateTable(ObserverContext c, - TableDescriptor desc, RegionInfo[] regions) throws IOException { + public void preCreateTable(ObserverContext c, TableDescriptor desc, + RegionInfo[] regions) throws IOException { Set families = desc.getColumnFamilyNames(); Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for (byte[] family: families) { + for (byte[] family : families) { familyMap.put(family, null); } - requireNamespacePermission(c, "createTable", - desc.getTableName().getNamespaceAsString(), desc.getTableName(), familyMap, Action.ADMIN, - Action.CREATE); + requireNamespacePermission(c, "createTable", desc.getTableName().getNamespaceAsString(), + desc.getTableName(), familyMap, Action.ADMIN, Action.CREATE); } @Override - public void postCompletedCreateTableAction( - final ObserverContext c, - final TableDescriptor desc, - final RegionInfo[] regions) throws IOException { + public void postCompletedCreateTableAction(final ObserverContext c, + final TableDescriptor desc, final RegionInfo[] regions) throws IOException { // When AC is used, it should be configured as the 1st CP. // In Master, the table operations like create, are handled by a Thread pool but the max size // for this pool is 1. So if multiple CPs create tables on startup, these creations will happen // sequentially only. // Related code in HMaster#startServiceThreads // {code} - // // We depend on there being only one instance of this executor running - // // at a time. To do concurrency, would need fencing of enable/disable of - // // tables. - // this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1); + // // We depend on there being only one instance of this executor running + // // at a time. To do concurrency, would need fencing of enable/disable of + // // tables. + // this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1); // {code} // In future if we change this pool to have more threads, then there is a chance for thread, // creating acl table, getting delayed and by that time another table creation got over and @@ -800,21 +783,20 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, } else if (!(TableName.NAMESPACE_TABLE_NAME.equals(desc.getTableName()))) { if (!aclTabAvailable) { LOG.warn("Not adding owner permission for table " + desc.getTableName() + ". " - + PermissionStorage.ACL_TABLE_NAME + " is not yet created. " - + getClass().getSimpleName() + " should be configured as the first Coprocessor"); + + PermissionStorage.ACL_TABLE_NAME + " is not yet created. " + getClass().getSimpleName() + + " should be configured as the first Coprocessor"); } else { String owner = desc.getOwnerString(); // default the table owner to current user, if not specified. - if (owner == null) - owner = getActiveUser(c).getShortName(); + if (owner == null) owner = getActiveUser(c).getShortName(); final UserPermission userPermission = new UserPermission(owner, - Permission.newBuilder(desc.getTableName()).withActions(Action.values()).build()); + Permission.newBuilder(desc.getTableName()).withActions(Action.values()).build()); // switch to the real hbase master user for doing the RPC on the ACL table User.runAsLoginUser(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (Table table = - c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { PermissionStorage.addUserPermission(c.getEnvironment().getConfiguration(), userPermission, table); } @@ -827,20 +809,19 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preDeleteTable(ObserverContext c, TableName tableName) - throws IOException { - requirePermission(c, "deleteTable", - tableName, null, null, Action.ADMIN, Action.CREATE); + throws IOException { + requirePermission(c, "deleteTable", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override public void postDeleteTable(ObserverContext c, - final TableName tableName) throws IOException { + final TableName tableName) throws IOException { final Configuration conf = c.getEnvironment().getConfiguration(); User.runAsLoginUser(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (Table table = - c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { PermissionStorage.removeTablePermissions(conf, tableName, table); } return null; @@ -851,16 +832,15 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preTruncateTable(ObserverContext c, - final TableName tableName) throws IOException { - requirePermission(c, "truncateTable", - tableName, null, null, Action.ADMIN, Action.CREATE); + final TableName tableName) throws IOException { + requirePermission(c, "truncateTable", tableName, null, null, Action.ADMIN, Action.CREATE); final Configuration conf = c.getEnvironment().getConfiguration(); User.runAsLoginUser(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { List acls = - PermissionStorage.getUserTablePermissions(conf, tableName, null, null, null, false); + PermissionStorage.getUserTablePermissions(conf, tableName, null, null, null, false); if (acls != null) { tableAcls.put(tableName, acls); } @@ -871,7 +851,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void postTruncateTable(ObserverContext ctx, - final TableName tableName) throws IOException { + final TableName tableName) throws IOException { final Configuration conf = ctx.getEnvironment().getConfiguration(); User.runAsLoginUser(new PrivilegedExceptionAction() { @Override @@ -880,7 +860,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, if (perms != null) { for (UserPermission perm : perms) { try (Table table = - ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { PermissionStorage.addUserPermission(conf, perm, table); } } @@ -893,27 +873,26 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public TableDescriptor preModifyTable(ObserverContext c, - TableName tableName, TableDescriptor currentDesc, TableDescriptor newDesc) - throws IOException { + TableName tableName, TableDescriptor currentDesc, TableDescriptor newDesc) throws IOException { // TODO: potentially check if this is a add/modify/delete column operation requirePermission(c, "modifyTable", tableName, null, null, Action.ADMIN, Action.CREATE); return newDesc; } @Override - public void postModifyTable(ObserverContext c, - TableName tableName, final TableDescriptor htd) throws IOException { + public void postModifyTable(ObserverContext c, TableName tableName, + final TableDescriptor htd) throws IOException { final Configuration conf = c.getEnvironment().getConfiguration(); // default the table owner to current user, if not specified. - final String owner = (htd.getOwnerString() != null) ? htd.getOwnerString() : - getActiveUser(c).getShortName(); + final String owner = + (htd.getOwnerString() != null) ? htd.getOwnerString() : getActiveUser(c).getShortName(); User.runAsLoginUser(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { UserPermission userperm = new UserPermission(owner, - Permission.newBuilder(htd.getTableName()).withActions(Action.values()).build()); + Permission.newBuilder(htd.getTableName()).withActions(Action.values()).build()); try (Table table = - c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { PermissionStorage.addUserPermission(conf, userperm, table); } return null; @@ -939,113 +918,103 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preEnableTable(ObserverContext c, TableName tableName) - throws IOException { - requirePermission(c, "enableTable", - tableName, null, null, Action.ADMIN, Action.CREATE); + throws IOException { + requirePermission(c, "enableTable", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override public void preDisableTable(ObserverContext c, TableName tableName) - throws IOException { + throws IOException { if (Bytes.equals(tableName.getName(), PermissionStorage.ACL_GLOBAL_NAME)) { // We have to unconditionally disallow disable of the ACL table when we are installed, // even if not enforcing authorizations. We are still allowing grants and revocations, // checking permissions and logging audit messages, etc. If the ACL table is not // available we will fail random actions all over the place. throw new AccessDeniedException("Not allowed to disable " + PermissionStorage.ACL_TABLE_NAME - + " table with AccessController installed"); + + " table with AccessController installed"); } - requirePermission(c, "disableTable", - tableName, null, null, Action.ADMIN, Action.CREATE); + requirePermission(c, "disableTable", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override public void preAbortProcedure(ObserverContext ctx, - final long procId) throws IOException { + final long procId) throws IOException { requirePermission(ctx, "abortProcedure", Action.ADMIN); } @Override public void postAbortProcedure(ObserverContext ctx) - throws IOException { + throws IOException { // There is nothing to do at this time after the procedure abort request was sent. } @Override public void preGetProcedures(ObserverContext ctx) - throws IOException { + throws IOException { requirePermission(ctx, "getProcedure", Action.ADMIN); } @Override - public void preGetLocks(ObserverContext ctx) - throws IOException { + public void preGetLocks(ObserverContext ctx) throws IOException { User user = getActiveUser(ctx); accessChecker.requirePermission(user, "getLocks", null, Action.ADMIN); } @Override public void preMove(ObserverContext c, RegionInfo region, - ServerName srcServer, ServerName destServer) throws IOException { - requirePermission(c, "move", - region.getTable(), null, null, Action.ADMIN); + ServerName srcServer, ServerName destServer) throws IOException { + requirePermission(c, "move", region.getTable(), null, null, Action.ADMIN); } @Override public void preAssign(ObserverContext c, RegionInfo regionInfo) - throws IOException { - requirePermission(c, "assign", - regionInfo.getTable(), null, null, Action.ADMIN); + throws IOException { + requirePermission(c, "assign", regionInfo.getTable(), null, null, Action.ADMIN); } @Override public void preUnassign(ObserverContext c, RegionInfo regionInfo) - throws IOException { - requirePermission(c, "unassign", - regionInfo.getTable(), null, null, Action.ADMIN); + throws IOException { + requirePermission(c, "unassign", regionInfo.getTable(), null, null, Action.ADMIN); } @Override public void preRegionOffline(ObserverContext c, - RegionInfo regionInfo) throws IOException { - requirePermission(c, "regionOffline", - regionInfo.getTable(), null, null, Action.ADMIN); + RegionInfo regionInfo) throws IOException { + requirePermission(c, "regionOffline", regionInfo.getTable(), null, null, Action.ADMIN); } @Override public void preSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final MasterSwitchType switchType) throws IOException { - requirePermission(ctx, "setSplitOrMergeEnabled", - Action.ADMIN); + final boolean newValue, final MasterSwitchType switchType) throws IOException { + requirePermission(ctx, "setSplitOrMergeEnabled", Action.ADMIN); } @Override public void preBalance(ObserverContext c, BalanceRequest request) - throws IOException { + throws IOException { requirePermission(c, "balance", Action.ADMIN); } @Override - public void preBalanceSwitch(ObserverContext c, - boolean newValue) throws IOException { + public void preBalanceSwitch(ObserverContext c, boolean newValue) + throws IOException { requirePermission(c, "balanceSwitch", Action.ADMIN); } @Override - public void preShutdown(ObserverContext c) - throws IOException { + public void preShutdown(ObserverContext c) throws IOException { requirePermission(c, "shutdown", Action.ADMIN); } @Override - public void preStopMaster(ObserverContext c) - throws IOException { + public void preStopMaster(ObserverContext c) throws IOException { requirePermission(c, "stopMaster", Action.ADMIN); } @Override public void postStartMaster(ObserverContext ctx) - throws IOException { + throws IOException { try (Admin admin = ctx.getEnvironment().getConnection().getAdmin()) { if (!admin.tableExists(PermissionStorage.ACL_TABLE_NAME)) { createACLTable(admin); @@ -1054,43 +1023,37 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, } } } + /** - * Create the ACL table - * @throws IOException + * Create the ACL table n */ private static void createACLTable(Admin admin) throws IOException { /** Table descriptor for ACL table */ ColumnFamilyDescriptor cfd = - ColumnFamilyDescriptorBuilder.newBuilder(PermissionStorage.ACL_LIST_FAMILY). - setMaxVersions(1). - setInMemory(true). - setBlockCacheEnabled(true). - setBlocksize(8 * 1024). - setBloomFilterType(BloomType.NONE). - setScope(HConstants.REPLICATION_SCOPE_LOCAL).build(); - TableDescriptor td = - TableDescriptorBuilder.newBuilder(PermissionStorage.ACL_TABLE_NAME). - setColumnFamily(cfd).build(); + ColumnFamilyDescriptorBuilder.newBuilder(PermissionStorage.ACL_LIST_FAMILY).setMaxVersions(1) + .setInMemory(true).setBlockCacheEnabled(true).setBlocksize(8 * 1024) + .setBloomFilterType(BloomType.NONE).setScope(HConstants.REPLICATION_SCOPE_LOCAL).build(); + TableDescriptor td = TableDescriptorBuilder.newBuilder(PermissionStorage.ACL_TABLE_NAME) + .setColumnFamily(cfd).build(); admin.createTable(td); } @Override public void preSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) - throws IOException { + final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) throws IOException { // Move this ACL check to SnapshotManager#checkPermissions as part of AC deprecation. - requirePermission(ctx, "snapshot " + snapshot.getName(), - hTableDescriptor.getTableName(), null, null, Permission.Action.ADMIN); + requirePermission(ctx, "snapshot " + snapshot.getName(), hTableDescriptor.getTableName(), null, + null, Permission.Action.ADMIN); } @Override public void preListSnapshot(ObserverContext ctx, - final SnapshotDescription snapshot) throws IOException { + final SnapshotDescription snapshot) throws IOException { User user = getActiveUser(ctx); if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user)) { // list it, if user is the owner of snapshot AuthResult result = AuthResult.allow("listSnapshot " + snapshot.getName(), - "Snapshot owner check allowed", user, null, null, null); + "Snapshot owner check allowed", user, null, null, null); AccessChecker.logResult(result); } else { accessChecker.requirePermission(user, "listSnapshot " + snapshot.getName(), null, @@ -1100,11 +1063,12 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preCloneSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) - throws IOException { + final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) throws IOException { User user = getActiveUser(ctx); - if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user) - && hTableDescriptor.getTableName().getNameAsString().equals(snapshot.getTable())) { + if ( + SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user) + && hTableDescriptor.getTableName().getNameAsString().equals(snapshot.getTable()) + ) { // Snapshot owner is allowed to create a table with the same name as the snapshot he took AuthResult result = AuthResult.allow("cloneSnapshot " + snapshot.getName(), "Snapshot owner check allowed", user, null, hTableDescriptor.getTableName(), null); @@ -1117,8 +1081,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preRestoreSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) - throws IOException { + final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) throws IOException { User user = getActiveUser(ctx); if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user)) { accessChecker.requirePermission(user, "restoreSnapshot " + snapshot.getName(), @@ -1131,12 +1094,12 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preDeleteSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot) throws IOException { + final SnapshotDescription snapshot) throws IOException { User user = getActiveUser(ctx); if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user)) { // Snapshot owner is allowed to delete the snapshot AuthResult result = AuthResult.allow("deleteSnapshot " + snapshot.getName(), - "Snapshot owner check allowed", user, null, null, null); + "Snapshot owner check allowed", user, null, null, null); AccessChecker.logResult(result); } else { accessChecker.requirePermission(user, "deleteSnapshot " + snapshot.getName(), null, @@ -1146,27 +1109,25 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preCreateNamespace(ObserverContext ctx, - NamespaceDescriptor ns) throws IOException { - requireGlobalPermission(ctx, "createNamespace", - Action.ADMIN, ns.getName()); + NamespaceDescriptor ns) throws IOException { + requireGlobalPermission(ctx, "createNamespace", Action.ADMIN, ns.getName()); } @Override - public void preDeleteNamespace(ObserverContext ctx, String namespace) - throws IOException { - requireGlobalPermission(ctx, "deleteNamespace", - Action.ADMIN, namespace); + public void preDeleteNamespace(ObserverContext ctx, + String namespace) throws IOException { + requireGlobalPermission(ctx, "deleteNamespace", Action.ADMIN, namespace); } @Override public void postDeleteNamespace(ObserverContext ctx, - final String namespace) throws IOException { + final String namespace) throws IOException { final Configuration conf = ctx.getEnvironment().getConfiguration(); User.runAsLoginUser(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (Table table = - ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { PermissionStorage.removeNamespacePermissions(conf, namespace, table); } return null; @@ -1178,29 +1139,27 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preModifyNamespace(ObserverContext ctx, - NamespaceDescriptor ns) throws IOException { + NamespaceDescriptor ns) throws IOException { // We require only global permission so that // a user with NS admin cannot altering namespace configurations. i.e. namespace quota - requireGlobalPermission(ctx, "modifyNamespace", - Action.ADMIN, ns.getName()); + requireGlobalPermission(ctx, "modifyNamespace", Action.ADMIN, ns.getName()); } @Override - public void preGetNamespaceDescriptor(ObserverContext ctx, String namespace) - throws IOException { - requireNamespacePermission(ctx, "getNamespaceDescriptor", - namespace, Action.ADMIN); + public void preGetNamespaceDescriptor(ObserverContext ctx, + String namespace) throws IOException { + requireNamespacePermission(ctx, "getNamespaceDescriptor", namespace, Action.ADMIN); } @Override public void postListNamespaces(ObserverContext ctx, - List namespaces) throws IOException { + List namespaces) throws IOException { /* always allow namespace listing */ } @Override public void postListNamespaceDescriptors(ObserverContext ctx, - List descriptors) throws IOException { + List descriptors) throws IOException { // Retains only those which passes authorization checks, as the checks weren't done as part // of preGetTableDescriptors. Iterator itr = descriptors.iterator(); @@ -1218,52 +1177,46 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preTableFlush(final ObserverContext ctx, - final TableName tableName) throws IOException { + final TableName tableName) throws IOException { // Move this ACL check to MasterFlushTableProcedureManager#checkPermissions as part of AC // deprecation. - requirePermission(ctx, "flushTable", tableName, - null, null, Action.ADMIN, Action.CREATE); + requirePermission(ctx, "flushTable", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override - public void preSplitRegion( - final ObserverContext ctx, - final TableName tableName, - final byte[] splitRow) throws IOException { - requirePermission(ctx, "split", tableName, - null, null, Action.ADMIN); + public void preSplitRegion(final ObserverContext ctx, + final TableName tableName, final byte[] splitRow) throws IOException { + requirePermission(ctx, "split", tableName, null, null, Action.ADMIN); } @Override public void preClearDeadServers(ObserverContext ctx) - throws IOException { + throws IOException { requirePermission(ctx, "clearDeadServers", Action.ADMIN); } @Override public void preDecommissionRegionServers(ObserverContext ctx, - List servers, boolean offload) throws IOException { + List servers, boolean offload) throws IOException { requirePermission(ctx, "decommissionRegionServers", Action.ADMIN); } @Override public void preListDecommissionedRegionServers(ObserverContext ctx) - throws IOException { - requirePermission(ctx, "listDecommissionedRegionServers", - Action.ADMIN); + throws IOException { + requirePermission(ctx, "listDecommissionedRegionServers", Action.ADMIN); } @Override public void preRecommissionRegionServer(ObserverContext ctx, - ServerName server, List encodedRegionNames) throws IOException { + ServerName server, List encodedRegionNames) throws IOException { requirePermission(ctx, "recommissionRegionServers", Action.ADMIN); } /* ---- RegionObserver implementation ---- */ @Override - public void preOpen(ObserverContext c) - throws IOException { + public void preOpen(ObserverContext c) throws IOException { RegionCoprocessorEnvironment env = c.getEnvironment(); final Region region = env.getRegion(); if (region == null) { @@ -1302,22 +1255,22 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preFlush(ObserverContext c, - FlushLifeCycleTracker tracker) throws IOException { - requirePermission(c, "flush", getTableName(c.getEnvironment()), - null, null, Action.ADMIN, Action.CREATE); + FlushLifeCycleTracker tracker) throws IOException { + requirePermission(c, "flush", getTableName(c.getEnvironment()), null, null, Action.ADMIN, + Action.CREATE); } @Override public InternalScanner preCompact(ObserverContext c, Store store, - InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, - CompactionRequest request) throws IOException { - requirePermission(c, "compact", getTableName(c.getEnvironment()), - null, null, Action.ADMIN, Action.CREATE); + InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException { + requirePermission(c, "compact", getTableName(c.getEnvironment()), null, null, Action.ADMIN, + Action.CREATE); return scanner; } private void internalPreRead(final ObserverContext c, - final Query query, OpType opType) throws IOException { + final Query query, OpType opType) throws IOException { Filter filter = query.getFilter(); // Don't wrap an AccessControlFilter if (filter != null && filter instanceof AccessControlFilter) { @@ -1325,17 +1278,17 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, } User user = getActiveUser(c); RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = null; + Map> families = null; switch (opType) { - case GET: - case EXISTS: - families = ((Get)query).getFamilyMap(); - break; - case SCAN: - families = ((Scan)query).getFamilyMap(); - break; - default: - throw new RuntimeException("Unhandled operation " + opType); + case GET: + case EXISTS: + families = ((Get) query).getFamilyMap(); + break; + case SCAN: + families = ((Scan) query).getFamilyMap(); + break; + default: + throw new RuntimeException("Unhandled operation " + opType); } AuthResult authResult = permissionGranted(opType, user, env, families, Action.READ); Region region = getRegion(env); @@ -1360,8 +1313,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, // Only wrap the filter if we are enforcing authorizations if (authorizationEnabled) { Filter ourFilter = new AccessControlFilter(getAuthManager(), user, table, - AccessControlFilter.Strategy.CHECK_TABLE_AND_CF_ONLY, - cfVsMaxVersions); + AccessControlFilter.Strategy.CHECK_TABLE_AND_CF_ONLY, cfVsMaxVersions); // wrap any existing filter if (filter != null) { ourFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, @@ -1370,10 +1322,10 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, switch (opType) { case GET: case EXISTS: - ((Get)query).setFilter(ourFilter); + ((Get) query).setFilter(ourFilter); break; case SCAN: - ((Scan)query).setFilter(ourFilter); + ((Scan) query).setFilter(ourFilter); break; default: throw new RuntimeException("Unhandled operation " + opType); @@ -1399,10 +1351,10 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, switch (opType) { case GET: case EXISTS: - ((Get)query).setFilter(ourFilter); + ((Get) query).setFilter(ourFilter); break; case SCAN: - ((Scan)query).setFilter(ourFilter); + ((Scan) query).setFilter(ourFilter); break; default: throw new RuntimeException("Unhandled operation " + opType); @@ -1414,28 +1366,26 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, AccessChecker.logResult(authResult); if (authorizationEnabled && !authResult.isAllowed()) { throw new AccessDeniedException("Insufficient permissions for user '" - + (user != null ? user.getShortName() : "null") - + "' (table=" + table + ", action=READ)"); + + (user != null ? user.getShortName() : "null") + "' (table=" + table + ", action=READ)"); } } @Override - public void preGetOp(final ObserverContext c, - final Get get, final List result) throws IOException { + public void preGetOp(final ObserverContext c, final Get get, + final List result) throws IOException { internalPreRead(c, get, OpType.GET); } @Override - public boolean preExists(final ObserverContext c, - final Get get, final boolean exists) throws IOException { + public boolean preExists(final ObserverContext c, final Get get, + final boolean exists) throws IOException { internalPreRead(c, get, OpType.EXISTS); return exists; } @Override - public void prePut(final ObserverContext c, - final Put put, final WALEdit edit, final Durability durability) - throws IOException { + public void prePut(final ObserverContext c, final Put put, + final WALEdit edit, final Durability durability) throws IOException { User user = getActiveUser(c); checkForReservedTagPresence(user, put); @@ -1446,9 +1396,8 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, // change the ACL of any previous Put. This allows simple evolution of // security policy over time without requiring expensive updates. RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = put.getFamilyCellMap(); - AuthResult authResult = permissionGranted(OpType.PUT, - user, env, families, Action.WRITE); + Map> families = put.getFamilyCellMap(); + AuthResult authResult = permissionGranted(OpType.PUT, user, env, families, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { @@ -1470,17 +1419,16 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, } @Override - public void postPut(final ObserverContext c, - final Put put, final WALEdit edit, final Durability durability) { + public void postPut(final ObserverContext c, final Put put, + final WALEdit edit, final Durability durability) { if (aclRegion) { updateACL(c.getEnvironment(), put.getFamilyCellMap()); } } @Override - public void preDelete(final ObserverContext c, - final Delete delete, final WALEdit edit, final Durability durability) - throws IOException { + public void preDelete(final ObserverContext c, final Delete delete, + final WALEdit edit, final Durability durability) throws IOException { // An ACL on a delete is useless, we shouldn't allow it if (delete.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL) != null) { throw new DoNotRetryIOException("ACL on delete has no effect: " + delete.toString()); @@ -1491,24 +1439,22 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, // overwrite any of the visible versions ('visible' defined as not covered // by a tombstone already) then we have to disallow this operation. RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = delete.getFamilyCellMap(); + Map> families = delete.getFamilyCellMap(); User user = getActiveUser(c); - AuthResult authResult = permissionGranted(OpType.DELETE, - user, env, families, Action.WRITE); + AuthResult authResult = permissionGranted(OpType.DELETE, user, env, families, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { delete.setAttribute(CHECK_COVERING_PERM, TRUE); } else if (authorizationEnabled) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); + throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } } @Override public void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws IOException { + MiniBatchOperationInProgress miniBatchOp) throws IOException { if (cellFeaturesEnabled && !compatibleEarlyTermination) { TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable(); User user = getActiveUser(c); @@ -1537,18 +1483,20 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, continue; } AuthResult authResult = null; - if (checkCoveringPermission(user, opType, c.getEnvironment(), m.getRow(), - m.getFamilyCellMap(), timestamp, Action.WRITE)) { - authResult = AuthResult.allow(opType.toString(), "Covering cell set", - user, Action.WRITE, table, m.getFamilyCellMap()); + if ( + checkCoveringPermission(user, opType, c.getEnvironment(), m.getRow(), + m.getFamilyCellMap(), timestamp, Action.WRITE) + ) { + authResult = AuthResult.allow(opType.toString(), "Covering cell set", user, + Action.WRITE, table, m.getFamilyCellMap()); } else { - authResult = AuthResult.deny(opType.toString(), "Covering cell set", - user, Action.WRITE, table, m.getFamilyCellMap()); + authResult = AuthResult.deny(opType.toString(), "Covering cell set", user, Action.WRITE, + table, m.getFamilyCellMap()); } AccessChecker.logResult(authResult); if (authorizationEnabled && !authResult.isAllowed()) { - throw new AccessDeniedException("Insufficient permissions " - + authResult.toContextString()); + throw new AccessDeniedException( + "Insufficient permissions " + authResult.toContextString()); } } } @@ -1556,9 +1504,8 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, } @Override - public void postDelete(final ObserverContext c, - final Delete delete, final WALEdit edit, final Durability durability) - throws IOException { + public void postDelete(final ObserverContext c, final Delete delete, + final WALEdit edit, final Durability durability) throws IOException { if (aclRegion) { updateACL(c.getEnvironment(), delete.getFamilyCellMap()); } @@ -1566,25 +1513,22 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public boolean preCheckAndPut(final ObserverContext c, - final byte [] row, final byte [] family, final byte [] qualifier, - final CompareOperator op, - final ByteArrayComparable comparator, final Put put, - final boolean result) throws IOException { + final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op, + final ByteArrayComparable comparator, final Put put, final boolean result) throws IOException { User user = getActiveUser(c); checkForReservedTagPresence(user, put); // Require READ and WRITE permissions on the table, CF, and KV to update RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = makeFamilyMap(family, qualifier); - AuthResult authResult = permissionGranted(OpType.CHECK_AND_PUT, - user, env, families, Action.READ, Action.WRITE); + Map> families = makeFamilyMap(family, qualifier); + AuthResult authResult = + permissionGranted(OpType.CHECK_AND_PUT, user, env, families, Action.READ, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { put.setAttribute(CHECK_COVERING_PERM, TRUE); } else if (authorizationEnabled) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); + throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } @@ -1601,9 +1545,8 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public boolean preCheckAndPutAfterRowLock(final ObserverContext c, - final byte[] row, final byte[] family, final byte[] qualifier, - final CompareOperator opp, final ByteArrayComparable comparator, final Put put, - final boolean result) throws IOException { + final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator opp, + final ByteArrayComparable comparator, final Put put, final boolean result) throws IOException { if (put.getAttribute(CHECK_COVERING_PERM) != null) { // We had failure with table, cf and q perm checks and now giving a chance for cell // perm check @@ -1611,13 +1554,15 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, Map> families = makeFamilyMap(family, qualifier); AuthResult authResult = null; User user = getActiveUser(c); - if (checkCoveringPermission(user, OpType.CHECK_AND_PUT, c.getEnvironment(), row, families, - HConstants.LATEST_TIMESTAMP, Action.READ)) { - authResult = AuthResult.allow(OpType.CHECK_AND_PUT.toString(), - "Covering cell set", user, Action.READ, table, families); + if ( + checkCoveringPermission(user, OpType.CHECK_AND_PUT, c.getEnvironment(), row, families, + HConstants.LATEST_TIMESTAMP, Action.READ) + ) { + authResult = AuthResult.allow(OpType.CHECK_AND_PUT.toString(), "Covering cell set", user, + Action.READ, table, families); } else { - authResult = AuthResult.deny(OpType.CHECK_AND_PUT.toString(), - "Covering cell set", user, Action.READ, table, families); + authResult = AuthResult.deny(OpType.CHECK_AND_PUT.toString(), "Covering cell set", user, + Action.READ, table, families); } AccessChecker.logResult(authResult); if (authorizationEnabled && !authResult.isAllowed()) { @@ -1629,29 +1574,26 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public boolean preCheckAndDelete(final ObserverContext c, - final byte [] row, final byte [] family, final byte [] qualifier, - final CompareOperator op, - final ByteArrayComparable comparator, final Delete delete, - final boolean result) throws IOException { + final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op, + final ByteArrayComparable comparator, final Delete delete, final boolean result) + throws IOException { // An ACL on a delete is useless, we shouldn't allow it if (delete.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL) != null) { - throw new DoNotRetryIOException("ACL on checkAndDelete has no effect: " + - delete.toString()); + throw new DoNotRetryIOException("ACL on checkAndDelete has no effect: " + delete.toString()); } // Require READ and WRITE permissions on the table, CF, and the KV covered // by the delete RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = makeFamilyMap(family, qualifier); + Map> families = makeFamilyMap(family, qualifier); User user = getActiveUser(c); - AuthResult authResult = permissionGranted( - OpType.CHECK_AND_DELETE, user, env, families, Action.READ, Action.WRITE); + AuthResult authResult = + permissionGranted(OpType.CHECK_AND_DELETE, user, env, families, Action.READ, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { delete.setAttribute(CHECK_COVERING_PERM, TRUE); } else if (authorizationEnabled) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); + throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } return result; @@ -1659,10 +1601,9 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public boolean preCheckAndDeleteAfterRowLock( - final ObserverContext c, final byte[] row, - final byte[] family, final byte[] qualifier, final CompareOperator op, - final ByteArrayComparable comparator, final Delete delete, final boolean result) - throws IOException { + final ObserverContext c, final byte[] row, final byte[] family, + final byte[] qualifier, final CompareOperator op, final ByteArrayComparable comparator, + final Delete delete, final boolean result) throws IOException { if (delete.getAttribute(CHECK_COVERING_PERM) != null) { // We had failure with table, cf and q perm checks and now giving a chance for cell // perm check @@ -1670,13 +1611,15 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, Map> families = makeFamilyMap(family, qualifier); AuthResult authResult = null; User user = getActiveUser(c); - if (checkCoveringPermission(user, OpType.CHECK_AND_DELETE, c.getEnvironment(), - row, families, HConstants.LATEST_TIMESTAMP, Action.READ)) { - authResult = AuthResult.allow(OpType.CHECK_AND_DELETE.toString(), - "Covering cell set", user, Action.READ, table, families); + if ( + checkCoveringPermission(user, OpType.CHECK_AND_DELETE, c.getEnvironment(), row, families, + HConstants.LATEST_TIMESTAMP, Action.READ) + ) { + authResult = AuthResult.allow(OpType.CHECK_AND_DELETE.toString(), "Covering cell set", user, + Action.READ, table, families); } else { - authResult = AuthResult.deny(OpType.CHECK_AND_DELETE.toString(), - "Covering cell set", user, Action.READ, table, families); + authResult = AuthResult.deny(OpType.CHECK_AND_DELETE.toString(), "Covering cell set", user, + Action.READ, table, families); } AccessChecker.logResult(authResult); if (authorizationEnabled && !authResult.isAllowed()) { @@ -1688,22 +1631,20 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public Result preAppend(ObserverContext c, Append append) - throws IOException { + throws IOException { User user = getActiveUser(c); checkForReservedTagPresence(user, append); // Require WRITE permission to the table, CF, and the KV to be appended RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = append.getFamilyCellMap(); - AuthResult authResult = permissionGranted(OpType.APPEND, user, - env, families, Action.WRITE); + Map> families = append.getFamilyCellMap(); + AuthResult authResult = permissionGranted(OpType.APPEND, user, env, families, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { append.setAttribute(CHECK_COVERING_PERM, TRUE); - } else if (authorizationEnabled) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); + } else if (authorizationEnabled) { + throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } @@ -1721,24 +1662,21 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public Result preIncrement(final ObserverContext c, - final Increment increment) - throws IOException { + final Increment increment) throws IOException { User user = getActiveUser(c); checkForReservedTagPresence(user, increment); // Require WRITE permission to the table, CF, and the KV to be replaced by // the incremented value RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = increment.getFamilyCellMap(); - AuthResult authResult = permissionGranted(OpType.INCREMENT, - user, env, families, Action.WRITE); + Map> families = increment.getFamilyCellMap(); + AuthResult authResult = permissionGranted(OpType.INCREMENT, user, env, families, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { increment.setAttribute(CHECK_COVERING_PERM, TRUE); } else if (authorizationEnabled) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); + throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } @@ -1756,30 +1694,32 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public List> postIncrementBeforeWAL( - ObserverContext ctx, Mutation mutation, - List> cellPairs) throws IOException { + ObserverContext ctx, Mutation mutation, + List> cellPairs) throws IOException { // If the HFile version is insufficient to persist tags, we won't have any // work to do here if (!cellFeaturesEnabled || mutation.getACL() == null) { return cellPairs; } - return cellPairs.stream().map(pair -> new Pair<>(pair.getFirst(), + return cellPairs.stream() + .map(pair -> new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getFirst(), pair.getSecond()))) - .collect(Collectors.toList()); + .collect(Collectors.toList()); } @Override public List> postAppendBeforeWAL( - ObserverContext ctx, Mutation mutation, - List> cellPairs) throws IOException { + ObserverContext ctx, Mutation mutation, + List> cellPairs) throws IOException { // If the HFile version is insufficient to persist tags, we won't have any // work to do here if (!cellFeaturesEnabled || mutation.getACL() == null) { return cellPairs; } - return cellPairs.stream().map(pair -> new Pair<>(pair.getFirst(), + return cellPairs.stream() + .map(pair -> new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getFirst(), pair.getSecond()))) - .collect(Collectors.toList()); + .collect(Collectors.toList()); } private Cell createNewCellWithTags(Mutation mutation, Cell oldCell, Cell newCell) { @@ -1795,7 +1735,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, // Not an ACL tag, just carry it through if (LOG.isTraceEnabled()) { LOG.trace("Carrying forward tag from " + newCell + ": type " + tag.getType() - + " length " + tag.getValueLength()); + + " length " + tag.getValueLength()); } tags.add(tag); } @@ -1810,13 +1750,13 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preScannerOpen(final ObserverContext c, final Scan scan) - throws IOException { + throws IOException { internalPreRead(c, scan, OpType.SCAN); } @Override public RegionScanner postScannerOpen(final ObserverContext c, - final Scan scan, final RegionScanner s) throws IOException { + final Scan scan, final RegionScanner s) throws IOException { User user = getActiveUser(c); if (user != null && user.getShortName() != null) { // store reference to scanner owner for later checks @@ -1827,29 +1767,28 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public boolean preScannerNext(final ObserverContext c, - final InternalScanner s, final List result, - final int limit, final boolean hasNext) throws IOException { + final InternalScanner s, final List result, final int limit, final boolean hasNext) + throws IOException { requireScannerOwner(s); return hasNext; } @Override public void preScannerClose(final ObserverContext c, - final InternalScanner s) throws IOException { + final InternalScanner s) throws IOException { requireScannerOwner(s); } @Override public void postScannerClose(final ObserverContext c, - final InternalScanner s) throws IOException { + final InternalScanner s) throws IOException { // clean up any associated owner mapping scannerOwners.remove(s); } /** - * Verify, when servicing an RPC, that the caller is the scanner owner. - * If so, we assume that access control is correctly enforced based on - * the checks performed in preScannerOpen() + * Verify, when servicing an RPC, that the caller is the scanner owner. If so, we assume that + * access control is correctly enforced based on the checks performed in preScannerOpen() */ private void requireScannerOwner(InternalScanner s) throws AccessDeniedException { if (!RpcServer.isInRpcCallContext()) { @@ -1858,21 +1797,19 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, String requestUserName = RpcServer.getRequestUserName().orElse(null); String owner = scannerOwners.get(s); if (authorizationEnabled && owner != null && !owner.equals(requestUserName)) { - throw new AccessDeniedException("User '"+ requestUserName +"' is not the scanner owner!"); + throw new AccessDeniedException("User '" + requestUserName + "' is not the scanner owner!"); } } /** - * Verifies user has CREATE or ADMIN privileges on - * the Column Families involved in the bulkLoadHFile - * request. Specific Column Write privileges are presently - * ignored. + * Verifies user has CREATE or ADMIN privileges on the Column Families involved in the + * bulkLoadHFile request. Specific Column Write privileges are presently ignored. */ @Override public void preBulkLoadHFile(ObserverContext ctx, - List> familyPaths) throws IOException { + List> familyPaths) throws IOException { User user = getActiveUser(ctx); - for(Pair el : familyPaths) { + for (Pair el : familyPaths) { accessChecker.requirePermission(user, "preBulkLoadHFile", ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), el.getFirst(), null, null, Action.ADMIN, Action.CREATE); @@ -1880,67 +1817,62 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, } /** - * Authorization check for - * SecureBulkLoadProtocol.prepareBulkLoad() - * @param ctx the context - * @throws IOException + * Authorization check for SecureBulkLoadProtocol.prepareBulkLoad() + * @param ctx the context n */ @Override public void prePrepareBulkLoad(ObserverContext ctx) - throws IOException { + throws IOException { requireAccess(ctx, "prePrepareBulkLoad", - ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), Action.ADMIN, - Action.CREATE); + ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), Action.ADMIN, + Action.CREATE); } /** - * Authorization security check for - * SecureBulkLoadProtocol.cleanupBulkLoad() - * @param ctx the context - * @throws IOException + * Authorization security check for SecureBulkLoadProtocol.cleanupBulkLoad() + * @param ctx the context n */ @Override public void preCleanupBulkLoad(ObserverContext ctx) - throws IOException { + throws IOException { requireAccess(ctx, "preCleanupBulkLoad", - ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), Action.ADMIN, - Action.CREATE); + ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), Action.ADMIN, + Action.CREATE); } /* ---- EndpointObserver implementation ---- */ @Override public Message preEndpointInvocation(ObserverContext ctx, - Service service, String methodName, Message request) throws IOException { + Service service, String methodName, Message request) throws IOException { // Don't intercept calls to our own AccessControlService, we check for // appropriate permissions in the service handlers if (shouldCheckExecPermission && !(service instanceof AccessControlService)) { requirePermission(ctx, - "invoke(" + service.getDescriptorForType().getName() + "." + methodName + ")", - getTableName(ctx.getEnvironment()), null, null, - Action.EXEC); + "invoke(" + service.getDescriptorForType().getName() + "." + methodName + ")", + getTableName(ctx.getEnvironment()), null, null, Action.EXEC); } return request; } @Override public void postEndpointInvocation(ObserverContext ctx, - Service service, String methodName, Message request, Message.Builder responseBuilder) - throws IOException { } + Service service, String methodName, Message request, Message.Builder responseBuilder) + throws IOException { + } /* ---- Protobuf AccessControlService implementation ---- */ /** * @deprecated since 2.2.0 and will be removed in 4.0.0. Use - * {@link Admin#grant(UserPermission, boolean)} instead. + * {@link Admin#grant(UserPermission, boolean)} instead. * @see Admin#grant(UserPermission, boolean) * @see HBASE-21739 */ @Deprecated @Override - public void grant(RpcController controller, - AccessControlProtos.GrantRequest request, - RpcCallback done) { + public void grant(RpcController controller, AccessControlProtos.GrantRequest request, + RpcCallback done) { final UserPermission perm = AccessControlUtil.toUserPermission(request.getUserPermission()); AccessControlProtos.GrantResponse response = null; try { @@ -1951,8 +1883,8 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, } User caller = RpcServer.getRequestUser().orElse(null); if (LOG.isDebugEnabled()) { - LOG.debug("Received request from {} to grant access permission {}", - caller.getName(), perm.toString()); + LOG.debug("Received request from {} to grant access permission {}", caller.getName(), + perm.toString()); } preGrantOrRevoke(caller, "grant", perm); @@ -1965,8 +1897,8 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, AUDITLOG.trace("Granted permission " + perm.toString()); } } else { - throw new CoprocessorException(AccessController.class, "This method " - + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); + throw new CoprocessorException(AccessController.class, + "This method " + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); } response = AccessControlProtos.GrantResponse.getDefaultInstance(); } catch (IOException ioe) { @@ -1978,14 +1910,14 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, /** * @deprecated since 2.2.0 and will be removed in 4.0.0. Use {@link Admin#revoke(UserPermission)} - * instead. + * instead. * @see Admin#revoke(UserPermission) * @see HBASE-21739 */ @Deprecated @Override public void revoke(RpcController controller, AccessControlProtos.RevokeRequest request, - RpcCallback done) { + RpcCallback done) { final UserPermission perm = AccessControlUtil.toUserPermission(request.getUserPermission()); AccessControlProtos.RevokeResponse response = null; try { @@ -2002,14 +1934,14 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, preGrantOrRevoke(caller, "revoke", perm); // regionEnv is set at #start. Hopefully not null here. regionEnv.getConnection().getAdmin() - .revoke(new UserPermission(perm.getUser(), perm.getPermission())); + .revoke(new UserPermission(perm.getUser(), perm.getPermission())); if (AUDITLOG.isTraceEnabled()) { // audit log should record all permission changes AUDITLOG.trace("Revoked permission " + perm.toString()); } } else { - throw new CoprocessorException(AccessController.class, "This method " - + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); + throw new CoprocessorException(AccessController.class, + "This method " + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); } response = AccessControlProtos.RevokeResponse.getDefaultInstance(); } catch (IOException ioe) { @@ -2021,15 +1953,15 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, /** * @deprecated since 2.2.0 and will be removed in 4.0.0. Use - * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. + * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. * @see Admin#getUserPermissions(GetUserPermissionsRequest) * @see HBASE-21911 */ @Deprecated @Override public void getUserPermissions(RpcController controller, - AccessControlProtos.GetUserPermissionsRequest request, - RpcCallback done) { + AccessControlProtos.GetUserPermissionsRequest request, + RpcCallback done) { AccessControlProtos.GetUserPermissionsResponse response = null; try { // only allowed to be called on _acl_ region @@ -2040,31 +1972,31 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, User caller = RpcServer.getRequestUser().orElse(null); final String userName = request.hasUserName() ? request.getUserName().toStringUtf8() : null; final String namespace = - request.hasNamespaceName() ? request.getNamespaceName().toStringUtf8() : null; + request.hasNamespaceName() ? request.getNamespaceName().toStringUtf8() : null; final TableName table = - request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null; + request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null; final byte[] cf = - request.hasColumnFamily() ? request.getColumnFamily().toByteArray() : null; + request.hasColumnFamily() ? request.getColumnFamily().toByteArray() : null; final byte[] cq = - request.hasColumnQualifier() ? request.getColumnQualifier().toByteArray() : null; + request.hasColumnQualifier() ? request.getColumnQualifier().toByteArray() : null; preGetUserPermissions(caller, userName, namespace, table, cf, cq); GetUserPermissionsRequest getUserPermissionsRequest = null; if (request.getType() == AccessControlProtos.Permission.Type.Table) { getUserPermissionsRequest = GetUserPermissionsRequest.newBuilder(table).withFamily(cf) - .withQualifier(cq).withUserName(userName).build(); + .withQualifier(cq).withUserName(userName).build(); } else if (request.getType() == AccessControlProtos.Permission.Type.Namespace) { getUserPermissionsRequest = - GetUserPermissionsRequest.newBuilder(namespace).withUserName(userName).build(); + GetUserPermissionsRequest.newBuilder(namespace).withUserName(userName).build(); } else { getUserPermissionsRequest = - GetUserPermissionsRequest.newBuilder().withUserName(userName).build(); + GetUserPermissionsRequest.newBuilder().withUserName(userName).build(); } List perms = - regionEnv.getConnection().getAdmin().getUserPermissions(getUserPermissionsRequest); + regionEnv.getConnection().getAdmin().getUserPermissions(getUserPermissionsRequest); response = AccessControlUtil.buildGetUserPermissionsResponse(perms); } else { - throw new CoprocessorException(AccessController.class, "This method " - + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); + throw new CoprocessorException(AccessController.class, + "This method " + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); } } catch (IOException ioe) { // pass exception back up @@ -2075,15 +2007,15 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, /** * @deprecated since 2.2.0 and will be removed 4.0.0. Use {@link Admin#hasUserPermissions(List)} - * instead. + * instead. * @see Admin#hasUserPermissions(List) * @see HBASE-22117 */ @Deprecated @Override public void checkPermissions(RpcController controller, - AccessControlProtos.CheckPermissionsRequest request, - RpcCallback done) { + AccessControlProtos.CheckPermissionsRequest request, + RpcCallback done) { AccessControlProtos.CheckPermissionsResponse response = null; try { User user = RpcServer.getRequestUser().orElse(null); @@ -2096,16 +2028,16 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, TablePermission tperm = (TablePermission) permission; if (!tperm.getTableName().equals(tableName)) { throw new CoprocessorException(AccessController.class, - String.format( - "This method can only execute at the table specified in " - + "TablePermission. Table of the region:%s , requested table:%s", - tableName, tperm.getTableName())); + String.format( + "This method can only execute at the table specified in " + + "TablePermission. Table of the region:%s , requested table:%s", + tableName, tperm.getTableName())); } } } for (Permission permission : permissions) { boolean hasPermission = - accessChecker.hasUserPermission(user, "checkPermissions", permission); + accessChecker.hasUserPermission(user, "checkPermissions", permission); if (!hasPermission) { throw new AccessDeniedException("Insufficient permissions " + permission.toString()); } @@ -2139,7 +2071,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preClose(ObserverContext c, boolean abortRequested) - throws IOException { + throws IOException { requirePermission(c, "preClose", Action.ADMIN); } @@ -2149,20 +2081,19 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, return; } if (!Superusers.isSuperUser(activeUser)) { - throw new AccessDeniedException("User '" + (activeUser != null ? - activeUser.getShortName() : "null") + "' is not system or super user."); + throw new AccessDeniedException( + "User '" + (activeUser != null ? activeUser.getShortName() : "null") + + "' is not system or super user."); } } @Override - public void preStopRegionServer( - ObserverContext ctx) - throws IOException { + public void preStopRegionServer(ObserverContext ctx) + throws IOException { requirePermission(ctx, "preStopRegionServer", Action.ADMIN); } - private Map> makeFamilyMap(byte[] family, - byte[] qualifier) { + private Map> makeFamilyMap(byte[] family, byte[] qualifier) { if (family == null) { return null; } @@ -2174,22 +2105,22 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preGetTableDescriptors(ObserverContext ctx, - List tableNamesList, List descriptors, - String regex) throws IOException { + List tableNamesList, List descriptors, String regex) + throws IOException { // We are delegating the authorization check to postGetTableDescriptors as we don't have // any concrete set of table names when a regex is present or the full list is requested. if (regex == null && tableNamesList != null && !tableNamesList.isEmpty()) { // Otherwise, if the requestor has ADMIN or CREATE privs for all listed tables, the // request can be granted. - TableName [] sns = null; + TableName[] sns = null; try (Admin admin = ctx.getEnvironment().getConnection().getAdmin()) { sns = admin.listTableNames(); if (sns == null) return; - for (TableName tableName: tableNamesList) { + for (TableName tableName : tableNamesList) { // Skip checks for a table that does not exist if (!admin.tableExists(tableName)) continue; - requirePermission(ctx, "getTableDescriptors", tableName, null, null, - Action.ADMIN, Action.CREATE); + requirePermission(ctx, "getTableDescriptors", tableName, null, null, Action.ADMIN, + Action.CREATE); } } } @@ -2197,8 +2128,8 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void postGetTableDescriptors(ObserverContext ctx, - List tableNamesList, List descriptors, - String regex) throws IOException { + List tableNamesList, List descriptors, String regex) + throws IOException { // Skipping as checks in this case are already done by preGetTableDescriptors. if (regex == null && tableNamesList != null && !tableNamesList.isEmpty()) { return; @@ -2210,8 +2141,8 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, while (itr.hasNext()) { TableDescriptor htd = itr.next(); try { - requirePermission(ctx, "getTableDescriptors", htd.getTableName(), null, null, - Action.ADMIN, Action.CREATE); + requirePermission(ctx, "getTableDescriptors", htd.getTableName(), null, null, Action.ADMIN, + Action.CREATE); } catch (AccessDeniedException e) { itr.remove(); } @@ -2220,7 +2151,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void postGetTableNames(ObserverContext ctx, - List descriptors, String regex) throws IOException { + List descriptors, String regex) throws IOException { // Retains only those which passes authorization checks. Iterator itr = descriptors.iterator(); while (itr.hasNext()) { @@ -2235,123 +2166,123 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preMergeRegions(final ObserverContext ctx, - final RegionInfo[] regionsToMerge) throws IOException { - requirePermission(ctx, "mergeRegions", regionsToMerge[0].getTable(), null, null, - Action.ADMIN); + final RegionInfo[] regionsToMerge) throws IOException { + requirePermission(ctx, "mergeRegions", regionsToMerge[0].getTable(), null, null, Action.ADMIN); } @Override public void preRollWALWriterRequest(ObserverContext ctx) - throws IOException { + throws IOException { requirePermission(ctx, "preRollLogWriterRequest", Permission.Action.ADMIN); } @Override public void postRollWALWriterRequest(ObserverContext ctx) - throws IOException { } + throws IOException { + } @Override public void preSetUserQuota(final ObserverContext ctx, - final String userName, final GlobalQuotaSettings quotas) throws IOException { + final String userName, final GlobalQuotaSettings quotas) throws IOException { requirePermission(ctx, "setUserQuota", Action.ADMIN); } @Override public void preSetUserQuota(final ObserverContext ctx, - final String userName, final TableName tableName, final GlobalQuotaSettings quotas) - throws IOException { + final String userName, final TableName tableName, final GlobalQuotaSettings quotas) + throws IOException { requirePermission(ctx, "setUserTableQuota", tableName, null, null, Action.ADMIN); } @Override public void preSetUserQuota(final ObserverContext ctx, - final String userName, final String namespace, final GlobalQuotaSettings quotas) - throws IOException { + final String userName, final String namespace, final GlobalQuotaSettings quotas) + throws IOException { requirePermission(ctx, "setUserNamespaceQuota", Action.ADMIN); } @Override public void preSetTableQuota(final ObserverContext ctx, - final TableName tableName, final GlobalQuotaSettings quotas) throws IOException { + final TableName tableName, final GlobalQuotaSettings quotas) throws IOException { requirePermission(ctx, "setTableQuota", tableName, null, null, Action.ADMIN); } @Override public void preSetNamespaceQuota(final ObserverContext ctx, - final String namespace, final GlobalQuotaSettings quotas) throws IOException { + final String namespace, final GlobalQuotaSettings quotas) throws IOException { requirePermission(ctx, "setNamespaceQuota", Action.ADMIN); } @Override public void preSetRegionServerQuota(ObserverContext ctx, - final String regionServer, GlobalQuotaSettings quotas) throws IOException { + final String regionServer, GlobalQuotaSettings quotas) throws IOException { requirePermission(ctx, "setRegionServerQuota", Action.ADMIN); } @Override public ReplicationEndpoint postCreateReplicationEndPoint( - ObserverContext ctx, ReplicationEndpoint endpoint) { + ObserverContext ctx, ReplicationEndpoint endpoint) { return endpoint; } @Override public void preReplicateLogEntries(ObserverContext ctx) - throws IOException { + throws IOException { requirePermission(ctx, "replicateLogEntries", Action.WRITE); } - + @Override - public void preClearCompactionQueues(ObserverContext ctx) - throws IOException { + public void preClearCompactionQueues(ObserverContext ctx) + throws IOException { requirePermission(ctx, "preClearCompactionQueues", Permission.Action.ADMIN); } @Override public void preAddReplicationPeer(final ObserverContext ctx, - String peerId, ReplicationPeerConfig peerConfig) throws IOException { + String peerId, ReplicationPeerConfig peerConfig) throws IOException { requirePermission(ctx, "addReplicationPeer", Action.ADMIN); } @Override public void preRemoveReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException { + String peerId) throws IOException { requirePermission(ctx, "removeReplicationPeer", Action.ADMIN); } @Override public void preEnableReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException { + String peerId) throws IOException { requirePermission(ctx, "enableReplicationPeer", Action.ADMIN); } @Override public void preDisableReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException { + String peerId) throws IOException { requirePermission(ctx, "disableReplicationPeer", Action.ADMIN); } @Override public void preGetReplicationPeerConfig(final ObserverContext ctx, - String peerId) throws IOException { + String peerId) throws IOException { requirePermission(ctx, "getReplicationPeerConfig", Action.ADMIN); } @Override public void preUpdateReplicationPeerConfig( - final ObserverContext ctx, String peerId, - ReplicationPeerConfig peerConfig) throws IOException { + final ObserverContext ctx, String peerId, + ReplicationPeerConfig peerConfig) throws IOException { requirePermission(ctx, "updateReplicationPeerConfig", Action.ADMIN); } @Override public void preListReplicationPeers(final ObserverContext ctx, - String regex) throws IOException { + String regex) throws IOException { requirePermission(ctx, "listReplicationPeers", Action.ADMIN); } @Override public void preRequestLock(ObserverContext ctx, String namespace, - TableName tableName, RegionInfo[] regionInfos, String description) throws IOException { + TableName tableName, RegionInfo[] regionInfos, String description) throws IOException { // There are operations in the CREATE and ADMIN domain which may require lock, READ // or WRITE. So for any lock request, we check for these two perms irrespective of lock type. String reason = String.format("Description=%s", description); @@ -2360,38 +2291,38 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preLockHeartbeat(ObserverContext ctx, - TableName tableName, String description) throws IOException { + TableName tableName, String description) throws IOException { checkLockPermissions(ctx, null, tableName, null, description); } @Override public void preExecuteProcedures(ObserverContext ctx) - throws IOException { + throws IOException { checkSystemOrSuperUser(getActiveUser(ctx)); } @Override public void preSwitchRpcThrottle(ObserverContext ctx, - boolean enable) throws IOException { + boolean enable) throws IOException { requirePermission(ctx, "switchRpcThrottle", Action.ADMIN); } @Override public void preIsRpcThrottleEnabled(ObserverContext ctx) - throws IOException { + throws IOException { requirePermission(ctx, "isRpcThrottleEnabled", Action.ADMIN); } @Override public void preSwitchExceedThrottleQuota(ObserverContext ctx, - boolean enable) throws IOException { + boolean enable) throws IOException { requirePermission(ctx, "switchExceedThrottleQuota", Action.ADMIN); } /** - * Returns the active user to which authorization checks should be applied. - * If we are in the context of an RPC call, the remote user is used, - * otherwise the currently logged in user is used. + * Returns the active user to which authorization checks should be applied. If we are in the + * context of an RPC call, the remote user is used, otherwise the currently logged in user is + * used. */ private User getActiveUser(ObserverContext ctx) throws IOException { // for non-rpc handling, fallback to system user @@ -2404,14 +2335,14 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, /** * @deprecated since 2.2.0 and will be removed in 4.0.0. Use - * {@link Admin#hasUserPermissions(String, List)} instead. + * {@link Admin#hasUserPermissions(String, List)} instead. * @see Admin#hasUserPermissions(String, List) * @see HBASE-22117 */ @Deprecated @Override public void hasPermission(RpcController controller, HasPermissionRequest request, - RpcCallback done) { + RpcCallback done) { // Converts proto to a TablePermission object. TablePermission tPerm = AccessControlUtil.toTablePermission(request.getTablePermission()); // Check input user name @@ -2424,8 +2355,8 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, User caller = RpcServer.getRequestUser().orElse(null); List permissions = Lists.newArrayList(tPerm); preHasUserPermissions(caller, inputUserName, permissions); - boolean hasPermission = regionEnv.getConnection().getAdmin() - .hasUserPermissions(inputUserName, permissions).get(0); + boolean hasPermission = + regionEnv.getConnection().getAdmin().hasUserPermissions(inputUserName, permissions).get(0); response = ResponseConverter.buildHasPermissionResponse(hasPermission); } catch (IOException ioe) { ResponseConverter.setControllerException(controller, ioe); @@ -2435,18 +2366,18 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preGrant(ObserverContext ctx, - UserPermission userPermission, boolean mergeExistingPermissions) throws IOException { + UserPermission userPermission, boolean mergeExistingPermissions) throws IOException { preGrantOrRevoke(getActiveUser(ctx), "grant", userPermission); } @Override public void preRevoke(ObserverContext ctx, - UserPermission userPermission) throws IOException { + UserPermission userPermission) throws IOException { preGrantOrRevoke(getActiveUser(ctx), "revoke", userPermission); } private void preGrantOrRevoke(User caller, String request, UserPermission userPermission) - throws IOException { + throws IOException { switch (userPermission.getPermission().scope) { case GLOBAL: accessChecker.requireGlobalPermission(caller, request, Action.ADMIN, ""); @@ -2470,13 +2401,13 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preGetUserPermissions(ObserverContext ctx, - String userName, String namespace, TableName tableName, byte[] family, byte[] qualifier) - throws IOException { + String userName, String namespace, TableName tableName, byte[] family, byte[] qualifier) + throws IOException { preGetUserPermissions(getActiveUser(ctx), userName, namespace, tableName, family, qualifier); } private void preGetUserPermissions(User caller, String userName, String namespace, - TableName tableName, byte[] family, byte[] qualifier) throws IOException { + TableName tableName, byte[] family, byte[] qualifier) throws IOException { if (tableName != null) { accessChecker.requirePermission(caller, "getUserPermissions", tableName, family, qualifier, userName, Action.ADMIN); @@ -2490,12 +2421,12 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public void preHasUserPermissions(ObserverContext ctx, - String userName, List permissions) throws IOException { + String userName, List permissions) throws IOException { preHasUserPermissions(getActiveUser(ctx), userName, permissions); } private void preHasUserPermissions(User caller, String userName, List permissions) - throws IOException { + throws IOException { String request = "hasUserPermissions"; for (Permission permission : permissions) { if (!caller.getShortName().equals(userName)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java index 3ced725e0ad..7f9853d8939 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.IOException; @@ -26,7 +25,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.AuthUtil; import org.apache.hadoop.hbase.Cell; @@ -44,17 +42,15 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; /** * Performs authorization checks for a given user's assigned permissions. *

      - * There're following scopes: Global, Namespace, Table, Family, - * Qualifier, Cell. - * Generally speaking, higher scopes can overrides lower scopes, - * except for Cell permission can be granted even a user has not permission on specified table, - * which means the user can get/scan only those granted cells parts. + * There're following scopes: Global, Namespace, Table, Family, + * Qualifier, Cell. Generally speaking, higher scopes can overrides lower scopes, + * except for Cell permission can be granted even a user has not permission on specified table, + * which means the user can get/scan only those granted cells parts. *

      - * e.g, if user A has global permission R(ead), he can - * read table T without checking table scope permission, so authorization checks alway starts from - * Global scope. + * e.g, if user A has global permission R(ead), he can read table T without checking table scope + * permission, so authorization checks alway starts from Global scope. *

      - * For each scope, not only user but also groups he belongs to will be checked. + * For each scope, not only user but also groups he belongs to will be checked. *

      */ @InterfaceAudience.Private @@ -91,12 +87,13 @@ public final class AuthManager { } } } + PermissionCache NS_NO_PERMISSION = new PermissionCache<>(); PermissionCache TBL_NO_PERMISSION = new PermissionCache<>(); /** - * Cache for global permission excluding superuser and supergroup. - * Since every user/group can only have one global permission, no need to use PermissionCache. + * Cache for global permission excluding superuser and supergroup. Since every user/group can only + * have one global permission, no need to use PermissionCache. */ private Map globalCache = new ConcurrentHashMap<>(); /** Cache for namespace permission. */ @@ -118,7 +115,7 @@ public final class AuthManager { /** * Update acl info for table. * @param table name of table - * @param data updated acl data + * @param data updated acl data * @throws IOException exception when deserialize data */ public void refreshTableCacheFromWritable(TableName table, byte[] data) throws IOException { @@ -143,7 +140,7 @@ public final class AuthManager { /** * Update acl info for namespace. * @param namespace namespace - * @param data updated acl data + * @param data updated acl data * @throws IOException exception when deserialize data */ public void refreshNamespaceCacheFromWritable(String namespace, byte[] data) throws IOException { @@ -183,7 +180,7 @@ public final class AuthManager { /** * Updates the internal table permissions cache for specified table. - * @param table updated table name + * @param table updated table name * @param tablePerms new table permissions */ private void updateTableCache(TableName table, ListMultimap tablePerms) { @@ -198,10 +195,9 @@ public final class AuthManager { /** * Updates the internal namespace permissions cache for specified namespace. * @param namespace updated namespace - * @param nsPerms new namespace permissions + * @param nsPerms new namespace permissions */ - private void updateNamespaceCache(String namespace, - ListMultimap nsPerms) { + private void updateNamespaceCache(String namespace, ListMultimap nsPerms) { PermissionCache cacheToUpdate = namespaceCache.getOrDefault(namespace, new PermissionCache<>()); clearCache(cacheToUpdate); @@ -216,7 +212,7 @@ public final class AuthManager { @SuppressWarnings("unchecked") private void updateCache(ListMultimap newPermissions, - PermissionCache cacheToUpdate) { + PermissionCache cacheToUpdate) { for (String name : newPermissions.keySet()) { for (Permission permission : newPermissions.get(name)) { cacheToUpdate.put(name, permission); @@ -226,7 +222,7 @@ public final class AuthManager { /** * Check if user has given action privilige in global scope. - * @param user user name + * @param user user name * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ @@ -254,9 +250,9 @@ public final class AuthManager { /** * Check if user has given action privilige in namespace scope. - * @param user user name + * @param user user name * @param namespace namespace - * @param action one of action in [Read, Write, Create, Exec, Admin] + * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ public boolean authorizeUserNamespace(User user, String namespace, Permission.Action action) { @@ -266,8 +262,8 @@ public final class AuthManager { if (authorizeUserGlobal(user, action)) { return true; } - PermissionCache nsPermissions = namespaceCache.getOrDefault(namespace, - NS_NO_PERMISSION); + PermissionCache nsPermissions = + namespaceCache.getOrDefault(namespace, NS_NO_PERMISSION); if (authorizeNamespace(nsPermissions.get(user.getShortName()), namespace, action)) { return true; } @@ -279,8 +275,8 @@ public final class AuthManager { return false; } - private boolean authorizeNamespace(Set permissions, - String namespace, Permission.Action action) { + private boolean authorizeNamespace(Set permissions, String namespace, + Permission.Action action) { if (permissions == null) { return false; } @@ -293,10 +289,10 @@ public final class AuthManager { } /** - * Checks if the user has access to the full table or at least a family/qualifier - * for the specified action. - * @param user user name - * @param table table name + * Checks if the user has access to the full table or at least a family/qualifier for the + * specified action. + * @param user user name + * @param table table name * @param action action in one of [Read, Write, Create, Exec, Admin] * @return true if the user has access to the table, false otherwise */ @@ -310,8 +306,8 @@ public final class AuthManager { if (authorizeUserNamespace(user, table.getNamespaceAsString(), action)) { return true; } - PermissionCache tblPermissions = tableCache.getOrDefault(table, - TBL_NO_PERMISSION); + PermissionCache tblPermissions = + tableCache.getOrDefault(table, TBL_NO_PERMISSION); if (hasAccessTable(tblPermissions.get(user.getShortName()), action)) { return true; } @@ -337,8 +333,8 @@ public final class AuthManager { /** * Check if user has given action privilige in table scope. - * @param user user name - * @param table table name + * @param user user name + * @param table table name * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ @@ -348,28 +344,28 @@ public final class AuthManager { /** * Check if user has given action privilige in table:family scope. - * @param user user name - * @param table table name + * @param user user name + * @param table table name * @param family family name * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ public boolean authorizeUserTable(User user, TableName table, byte[] family, - Permission.Action action) { + Permission.Action action) { return authorizeUserTable(user, table, family, null, action); } /** * Check if user has given action privilige in table:family:qualifier scope. - * @param user user name - * @param table table name - * @param family family name + * @param user user name + * @param table table name + * @param family family name * @param qualifier qualifier name - * @param action one of action in [Read, Write, Create, Exec, Admin] + * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ - public boolean authorizeUserTable(User user, TableName table, byte[] family, - byte[] qualifier, Permission.Action action) { + public boolean authorizeUserTable(User user, TableName table, byte[] family, byte[] qualifier, + Permission.Action action) { if (user == null) { return false; } @@ -379,22 +375,24 @@ public final class AuthManager { if (authorizeUserNamespace(user, table.getNamespaceAsString(), action)) { return true; } - PermissionCache tblPermissions = tableCache.getOrDefault(table, - TBL_NO_PERMISSION); + PermissionCache tblPermissions = + tableCache.getOrDefault(table, TBL_NO_PERMISSION); if (authorizeTable(tblPermissions.get(user.getShortName()), table, family, qualifier, action)) { return true; } for (String group : user.getGroupNames()) { - if (authorizeTable(tblPermissions.get(AuthUtil.toGroupEntry(group)), - table, family, qualifier, action)) { + if ( + authorizeTable(tblPermissions.get(AuthUtil.toGroupEntry(group)), table, family, qualifier, + action) + ) { return true; } } return false; } - private boolean authorizeTable(Set permissions, - TableName table, byte[] family, byte[] qualifier, Permission.Action action) { + private boolean authorizeTable(Set permissions, TableName table, byte[] family, + byte[] qualifier, Permission.Action action) { if (permissions == null) { return false; } @@ -407,32 +405,33 @@ public final class AuthManager { } /** - * Check if user has given action privilige in table:family scope. - * This method is for backward compatibility. - * @param user user name - * @param table table name + * Check if user has given action privilige in table:family scope. This method is for backward + * compatibility. + * @param user user name + * @param table table name * @param family family names * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ - public boolean authorizeUserFamily(User user, TableName table, - byte[] family, Permission.Action action) { - PermissionCache tblPermissions = tableCache.getOrDefault(table, - TBL_NO_PERMISSION); + public boolean authorizeUserFamily(User user, TableName table, byte[] family, + Permission.Action action) { + PermissionCache tblPermissions = + tableCache.getOrDefault(table, TBL_NO_PERMISSION); if (authorizeFamily(tblPermissions.get(user.getShortName()), table, family, action)) { return true; } for (String group : user.getGroupNames()) { - if (authorizeFamily(tblPermissions.get(AuthUtil.toGroupEntry(group)), - table, family, action)) { + if ( + authorizeFamily(tblPermissions.get(AuthUtil.toGroupEntry(group)), table, family, action) + ) { return true; } } return false; } - private boolean authorizeFamily(Set permissions, - TableName table, byte[] family, Permission.Action action) { + private boolean authorizeFamily(Set permissions, TableName table, byte[] family, + Permission.Action action) { if (permissions == null) { return false; } @@ -446,9 +445,9 @@ public final class AuthManager { /** * Check if user has given action privilige in cell scope. - * @param user user name - * @param table table name - * @param cell cell to be checked + * @param user user name + * @param table table name + * @param cell cell to be checked * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ @@ -456,11 +455,11 @@ public final class AuthManager { try { List perms = PermissionStorage.getCellPermissionsForUser(user, cell); if (LOG.isTraceEnabled()) { - LOG.trace("Perms for user {} in table {} in cell {}: {}", - user.getShortName(), table, cell, (perms != null ? perms : "")); + LOG.trace("Perms for user {} in table {} in cell {}: {}", user.getShortName(), table, cell, + (perms != null ? perms : "")); } if (perms != null) { - for (Permission p: perms) { + for (Permission p : perms) { if (p.implies(action)) { return true; } @@ -492,8 +491,7 @@ public final class AuthManager { } /** - * Last modification logical time - * @return time + * Last modification logical time n */ public long getMTime() { return mtime.get(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java index 64a8c4cfeae..5842d583437 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,25 +15,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.util.Collection; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Joiner; /** - * Represents the result of an authorization check for logging and error - * reporting. + * Represents the result of an authorization check for logging and error reporting. */ @InterfaceAudience.Private public class AuthResult { @@ -52,7 +49,7 @@ public class AuthResult { private final Map> families; public AuthResult(boolean allowed, String request, String reason, User user, - Permission.Action action, TableName table, byte[] family, byte[] qualifier) { + Permission.Action action, TableName table, byte[] family, byte[] qualifier) { this.allowed = allowed; this.request = request; this.reason = reason; @@ -67,8 +64,7 @@ public class AuthResult { } public AuthResult(boolean allowed, String request, String reason, User user, - Permission.Action action, TableName table, - Map> families) { + Permission.Action action, TableName table, Map> families) { this.allowed = allowed; this.request = request; this.reason = reason; @@ -83,7 +79,7 @@ public class AuthResult { } public AuthResult(boolean allowed, String request, String reason, User user, - Permission.Action action, String namespace) { + Permission.Action action, String namespace) { this.allowed = allowed; this.request = request; this.reason = reason; @@ -129,7 +125,9 @@ public class AuthResult { return request; } - public Params getParams() { return this.params;} + public Params getParams() { + return this.params; + } public void setAllowed(boolean allowed) { this.allowed = allowed; @@ -140,7 +138,7 @@ public class AuthResult { } private static String toFamiliesString(Map> families, - byte[] family, byte[] qual) { + byte[] family, byte[] qual) { StringBuilder sb = new StringBuilder(); if (families != null) { boolean first = true; @@ -150,11 +148,11 @@ public class AuthResult { for (Object o : entry.getValue()) { String qualifier; if (o instanceof byte[]) { - qualifier = Bytes.toString((byte[])o); + qualifier = Bytes.toString((byte[]) o); } else if (o instanceof Cell) { Cell c = (Cell) o; qualifier = Bytes.toString(c.getQualifierArray(), c.getQualifierOffset(), - c.getQualifierLength()); + c.getQualifierLength()); } else { // Shouldn't really reach this? qualifier = o.toString(); @@ -185,27 +183,20 @@ public class AuthResult { public String toContextString() { StringBuilder sb = new StringBuilder(); String familiesString = toFamiliesString(families, family, qualifier); - sb.append("(user=") - .append(user != null ? user.getName() : "UNKNOWN") - .append(", "); + sb.append("(user=").append(user != null ? user.getName() : "UNKNOWN").append(", "); sb.append("scope=") - .append(namespace != null ? namespace : - table == null ? "GLOBAL" : table.getNameWithNamespaceInclAsString()) - .append(", "); - if(namespace == null && familiesString.length() > 0) { - sb.append("family=") - .append(familiesString) - .append(", "); + .append(namespace != null ? namespace + : table == null ? "GLOBAL" + : table.getNameWithNamespaceInclAsString()) + .append(", "); + if (namespace == null && familiesString.length() > 0) { + sb.append("family=").append(familiesString).append(", "); } String paramsString = params.toString(); - if(paramsString.length() > 0) { - sb.append("params=[") - .append(paramsString) - .append("],"); + if (paramsString.length() > 0) { + sb.append("params=[").append(paramsString).append("],"); } - sb.append("action=") - .append(action != null ? action.toString() : "") - .append(")"); + sb.append("action=").append(action != null ? action.toString() : "").append(")"); return sb.toString(); } @@ -214,35 +205,33 @@ public class AuthResult { return "AuthResult" + toContextString(); } - public static AuthResult allow(String request, String reason, User user, - Permission.Action action, String namespace) { + public static AuthResult allow(String request, String reason, User user, Permission.Action action, + String namespace) { return new AuthResult(true, request, reason, user, action, namespace); } - public static AuthResult allow(String request, String reason, User user, - Permission.Action action, TableName table, byte[] family, byte[] qualifier) { + public static AuthResult allow(String request, String reason, User user, Permission.Action action, + TableName table, byte[] family, byte[] qualifier) { return new AuthResult(true, request, reason, user, action, table, family, qualifier); } - public static AuthResult allow(String request, String reason, User user, - Permission.Action action, TableName table, - Map> families) { + public static AuthResult allow(String request, String reason, User user, Permission.Action action, + TableName table, Map> families) { return new AuthResult(true, request, reason, user, action, table, families); } - public static AuthResult deny(String request, String reason, User user, - Permission.Action action, String namespace) { + public static AuthResult deny(String request, String reason, User user, Permission.Action action, + String namespace) { return new AuthResult(false, request, reason, user, action, namespace); } - public static AuthResult deny(String request, String reason, User user, - Permission.Action action, TableName table, byte[] family, byte[] qualifier) { + public static AuthResult deny(String request, String reason, User user, Permission.Action action, + TableName table, byte[] family, byte[] qualifier) { return new AuthResult(false, request, reason, user, action, table, family, qualifier); } - public static AuthResult deny(String request, String reason, User user, - Permission.Action action, TableName table, - Map> families) { + public static AuthResult deny(String request, String reason, User user, Permission.Action action, + TableName table, Map> families) { return new AuthResult(false, request, reason, user, action, table, families); } @@ -292,12 +281,10 @@ public class AuthResult { @Override public String toString() { String familiesString = toFamiliesString(families, family, qualifier); - String[] params = new String[] { - namespace != null ? "namespace=" + namespace : null, - tableName != null ? "table=" + tableName.getNameWithNamespaceInclAsString() : null, - familiesString.length() > 0 ? "family=" + familiesString : null, - extraParams.isEmpty() ? null : concatenateExtraParams() - }; + String[] params = new String[] { namespace != null ? "namespace=" + namespace : null, + tableName != null ? "table=" + tableName.getNameWithNamespaceInclAsString() : null, + familiesString.length() > 0 ? "family=" + familiesString : null, + extraParams.isEmpty() ? null : concatenateExtraParams() }; return Joiner.on(",").skipNulls().join(params); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java index 1e83e966102..231fd8bcaef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.IOException; @@ -43,10 +42,10 @@ import org.slf4j.LoggerFactory; public class CoprocessorWhitelistMasterObserver implements MasterCoprocessor, MasterObserver { public static final String CP_COPROCESSOR_WHITELIST_PATHS_KEY = - "hbase.coprocessor.region.whitelist.paths"; + "hbase.coprocessor.region.whitelist.paths"; - private static final Logger LOG = LoggerFactory - .getLogger(CoprocessorWhitelistMasterObserver.class); + private static final Logger LOG = + LoggerFactory.getLogger(CoprocessorWhitelistMasterObserver.class); @Override public Optional getMasterObserver() { @@ -55,35 +54,31 @@ public class CoprocessorWhitelistMasterObserver implements MasterCoprocessor, Ma @Override public TableDescriptor preModifyTable(ObserverContext ctx, - TableName tableName, TableDescriptor currentDesc, TableDescriptor newDesc) - throws IOException { + TableName tableName, TableDescriptor currentDesc, TableDescriptor newDesc) throws IOException { verifyCoprocessors(ctx, newDesc); return newDesc; } @Override - public void preCreateTable(ObserverContext ctx, - TableDescriptor htd, RegionInfo[] regions) throws IOException { + public void preCreateTable(ObserverContext ctx, TableDescriptor htd, + RegionInfo[] regions) throws IOException { verifyCoprocessors(ctx, htd); } /** * Validates a single whitelist path against the coprocessor path - * @param coprocPath the path to the coprocessor including scheme - * @param wlPath can be: - * 1) a "*" to wildcard all coprocessor paths - * 2) a specific filesystem (e.g. hdfs://my-cluster/) - * 3) a wildcard path to be evaluated by - * {@link FilenameUtils#wildcardMatch(String, String)} - * path can specify scheme or not (e.g. - * "file:///usr/hbase/coprocessors" or for all - * filesystems "/usr/hbase/coprocessors") - * @return if the path was found under the wlPath + * @param coprocPath the path to the coprocessor including scheme + * @param wlPath can be: 1) a "*" to wildcard all coprocessor paths 2) a specific filesystem + * (e.g. hdfs://my-cluster/) 3) a wildcard path to be evaluated by + * {@link FilenameUtils#wildcardMatch(String, String)} path can specify scheme + * or not (e.g. "file:///usr/hbase/coprocessors" or for all filesystems + * "/usr/hbase/coprocessors") + * @return if the path was found under the wlPath */ private static boolean validatePath(Path coprocPath, Path wlPath) { // verify if all are allowed if (wlPath.toString().equals("*")) { - return(true); + return (true); } // verify we are on the same filesystem if wlPath has a scheme @@ -113,50 +108,48 @@ public class CoprocessorWhitelistMasterObserver implements MasterCoprocessor, Ma coprocPathHost = ""; } if (!wlPathScheme.equals(coprocPathScheme) || !wlPathHost.equals(coprocPathHost)) { - return(false); + return (false); } } // allow any on this file-system (file systems were verified to be the same above) if (wlPath.isRoot()) { - return(true); + return (true); } // allow "loose" matches stripping scheme - if (FilenameUtils.wildcardMatch( - Path.getPathWithoutSchemeAndAuthority(coprocPath).toString(), - Path.getPathWithoutSchemeAndAuthority(wlPath).toString())) { - return(true); + if ( + FilenameUtils.wildcardMatch(Path.getPathWithoutSchemeAndAuthority(coprocPath).toString(), + Path.getPathWithoutSchemeAndAuthority(wlPath).toString()) + ) { + return (true); } - return(false); + return (false); } /** - * Perform the validation checks for a coprocessor to determine if the path - * is white listed or not. - * @throws IOException if path is not included in whitelist or a failure - * occurs in processing - * @param ctx as passed in from the coprocessor - * @param htd as passed in from the coprocessor + * Perform the validation checks for a coprocessor to determine if the path is white listed or + * not. + * @throws IOException if path is not included in whitelist or a failure occurs in processing + * @param ctx as passed in from the coprocessor + * @param htd as passed in from the coprocessor */ private static void verifyCoprocessors(ObserverContext ctx, - TableDescriptor htd) throws IOException { - Collection paths = - ctx.getEnvironment().getConfiguration().getStringCollection( - CP_COPROCESSOR_WHITELIST_PATHS_KEY); + TableDescriptor htd) throws IOException { + Collection paths = ctx.getEnvironment().getConfiguration() + .getStringCollection(CP_COPROCESSOR_WHITELIST_PATHS_KEY); for (CoprocessorDescriptor cp : htd.getCoprocessorDescriptors()) { if (cp.getJarPath().isPresent()) { if (paths.stream().noneMatch(p -> { Path wlPath = new Path(p); if (validatePath(new Path(cp.getJarPath().get()), wlPath)) { - LOG.debug(String.format("Coprocessor %s found in directory %s", - cp.getClassName(), p)); + LOG.debug(String.format("Coprocessor %s found in directory %s", cp.getClassName(), p)); return true; } return false; })) { - throw new IOException(String.format("Loading %s DENIED in %s", - cp.getClassName(), CP_COPROCESSOR_WHITELIST_PATHS_KEY)); + throw new IOException(String.format("Loading %s DENIED in %s", cp.getClassName(), + CP_COPROCESSOR_WHITELIST_PATHS_KEY)); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/NoopAccessChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/NoopAccessChecker.java index 95927c0b164..1a5bb63cbd3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/NoopAccessChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/NoopAccessChecker.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.security.access; import java.util.Collection; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -28,8 +27,8 @@ import org.apache.hadoop.hbase.security.access.Permission.Action; import org.apache.yetus.audience.InterfaceAudience; /** - * NoopAccessChecker is returned when hbase.security.authorization is not enabled. - * Always allow authorization if any user require any permission. + * NoopAccessChecker is returned when hbase.security.authorization is not enabled. Always allow + * authorization if any user require any permission. */ @InterfaceAudience.Private public final class NoopAccessChecker extends AccessChecker { @@ -49,7 +48,7 @@ public final class NoopAccessChecker extends AccessChecker { @Override public void requireGlobalPermission(User user, String request, Action perm, TableName tableName, - Map> familyMap, String filterUser) { + Map> familyMap, String filterUser) { } @Override @@ -58,23 +57,23 @@ public final class NoopAccessChecker extends AccessChecker { @Override public void requireNamespacePermission(User user, String request, String namespace, - String filterUser, Action... permissions) { + String filterUser, Action... permissions) { } @Override public void requireNamespacePermission(User user, String request, String namespace, - TableName tableName, Map> familyMap, - Action... permissions) { + TableName tableName, Map> familyMap, + Action... permissions) { } @Override public void requirePermission(User user, String request, TableName tableName, byte[] family, - byte[] qualifier, String filterUser, Action... permissions) { + byte[] qualifier, String filterUser, Action... permissions) { } @Override public void requireTablePermission(User user, String request, TableName tableName, byte[] family, - byte[] qualifier, Action... permissions) { + byte[] qualifier, Action... permissions) { } @Override @@ -83,7 +82,7 @@ public final class NoopAccessChecker extends AccessChecker { @Override public void checkLockPermissions(User user, String namespace, TableName tableName, - RegionInfo[] regionInfos, String reason) { + RegionInfo[] regionInfos, String reason) { } @Override @@ -93,7 +92,7 @@ public final class NoopAccessChecker extends AccessChecker { @Override public AuthResult permissionGranted(String request, User user, Action permRequest, - TableName tableName, Map> families) { + TableName tableName, Map> families) { return AuthResult.allow(request, "All users allowed because authorization is disabled", user, permRequest, tableName, families); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java index eab3861d7ae..d9fdd0273e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.ByteArrayInputStream; @@ -77,14 +76,13 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * Maintains lists of permission grants to users and groups to allow for - * authorization checks by {@link AccessController}. - * + * Maintains lists of permission grants to users and groups to allow for authorization checks by + * {@link AccessController}. *

      - * Access control lists are stored in an "internal" metadata table named - * {@code _acl_}. Each table's permission grants are stored as a separate row, - * keyed by the table name. KeyValues for permissions assignments are stored - * in one of the formats: + * Access control lists are stored in an "internal" metadata table named {@code _acl_}. Each table's + * permission grants are stored as a separate row, keyed by the table name. KeyValues for + * permissions assignments are stored in one of the formats: + * *

        * Key                      Desc
        * --------                 --------
      @@ -104,7 +102,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
       public final class PermissionStorage {
         /** Internal storage table for access control lists */
         public static final TableName ACL_TABLE_NAME =
      -      TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "acl");
      +    TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "acl");
         public static final byte[] ACL_GLOBAL_NAME = ACL_TABLE_NAME.getName();
         /** Column family used to store ACL grants */
         public static final String ACL_LIST_FAMILY_STR = "l";
      @@ -115,8 +113,8 @@ public final class PermissionStorage {
         public static final char NAMESPACE_PREFIX = '@';
       
         /**
      -   * Delimiter to separate user, column family, and qualifier in
      -   * _acl_ table info: column keys */
      +   * Delimiter to separate user, column family, and qualifier in _acl_ table info: column keys
      +   */
         public static final char ACL_KEY_DELIMITER = ',';
       
         private static final Logger LOG = LoggerFactory.getLogger(PermissionStorage.class);
      @@ -126,13 +124,13 @@ public final class PermissionStorage {
       
         /**
          * Stores a new user permission grant in the access control lists table.
      -   * @param conf the configuration
      +   * @param conf     the configuration
          * @param userPerm the details of the permission to be granted
      -   * @param t acl table instance. It is closed upon method return.
      +   * @param t        acl table instance. It is closed upon method return.
          * @throws IOException in the case of an error accessing the metadata table
          */
         public static void addUserPermission(Configuration conf, UserPermission userPerm, Table t,
      -      boolean mergeExistingPermissions) throws IOException {
      +    boolean mergeExistingPermissions) throws IOException {
           Permission permission = userPerm.getPermission();
           Permission.Action[] actions = permission.getActions();
           byte[] rowKey = userPermissionRowKey(permission);
      @@ -146,7 +144,7 @@ public final class PermissionStorage {
           }
       
           Set actionSet = new TreeSet();
      -    if(mergeExistingPermissions){
      +    if (mergeExistingPermissions) {
             List perms = getUserPermissions(conf, rowKey, null, null, null, false);
             UserPermission currentPerm = null;
             for (UserPermission perm : perms) {
      @@ -156,7 +154,7 @@ public final class PermissionStorage {
               }
             }
       
      -      if (currentPerm != null && currentPerm.getPermission().getActions() != null){
      +      if (currentPerm != null && currentPerm.getPermission().getActions() != null) {
               actionSet.addAll(Arrays.asList(currentPerm.getPermission().getActions()));
             }
           }
      @@ -170,17 +168,12 @@ public final class PermissionStorage {
           for (Permission.Action action : actionSet) {
             value[index++] = action.code();
           }
      -    p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
      -        .setRow(p.getRow())
      -        .setFamily(ACL_LIST_FAMILY)
      -        .setQualifier(key)
      -        .setTimestamp(p.getTimestamp())
      -        .setType(Type.Put)
      -        .setValue(value)
      -        .build());
      +    p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(p.getRow())
      +      .setFamily(ACL_LIST_FAMILY).setQualifier(key).setTimestamp(p.getTimestamp()).setType(Type.Put)
      +      .setValue(value).build());
           if (LOG.isDebugEnabled()) {
             LOG.debug("Writing permission with rowKey " + Bytes.toString(rowKey) + " "
      -          + Bytes.toString(key) + ": " + Bytes.toStringBinary(value));
      +        + Bytes.toString(key) + ": " + Bytes.toStringBinary(value));
           }
           try {
             t.put(p);
      @@ -190,34 +183,33 @@ public final class PermissionStorage {
         }
       
         static void addUserPermission(Configuration conf, UserPermission userPerm, Table t)
      -          throws IOException{
      +    throws IOException {
           addUserPermission(conf, userPerm, t, false);
         }
       
         /**
      -   * Removes a previously granted permission from the stored access control
      -   * lists.  The {@link TablePermission} being removed must exactly match what
      -   * is stored -- no wildcard matching is attempted.  Ie, if user "bob" has
      -   * been granted "READ" access to the "data" table, but only to column family
      -   * plus qualifier "info:colA", then trying to call this method with only
      -   * user "bob" and the table name "data" (but without specifying the
      -   * column qualifier "info:colA") will have no effect.
      -   *
      -   * @param conf the configuration
      +   * Removes a previously granted permission from the stored access control lists. The
      +   * {@link TablePermission} being removed must exactly match what is stored -- no wildcard matching
      +   * is attempted. Ie, if user "bob" has been granted "READ" access to the "data" table, but only to
      +   * column family plus qualifier "info:colA", then trying to call this method with only user "bob"
      +   * and the table name "data" (but without specifying the column qualifier "info:colA") will have
      +   * no effect.
      +   * @param conf     the configuration
          * @param userPerm the details of the permission to be revoked
      -   * @param t acl table
      +   * @param t        acl table
          * @throws IOException if there is an error accessing the metadata table
          */
         public static void removeUserPermission(Configuration conf, UserPermission userPerm, Table t)
      -      throws IOException {
      -    if (null == userPerm.getPermission().getActions() ||
      -        userPerm.getPermission().getActions().length == 0) {
      +    throws IOException {
      +    if (
      +      null == userPerm.getPermission().getActions()
      +        || userPerm.getPermission().getActions().length == 0
      +    ) {
             removePermissionRecord(conf, userPerm, t);
           } else {
             // Get all the global user permissions from the acl table
      -      List permsList =
      -        getUserPermissions(conf, userPermissionRowKey(userPerm.getPermission()),
      -          null, null, null, false);
      +      List permsList = getUserPermissions(conf,
      +        userPermissionRowKey(userPerm.getPermission()), null, null, null, false);
             List remainingActions = new ArrayList<>();
             List dropActions = Arrays.asList(userPerm.getPermission().getActions());
             for (UserPermission perm : permsList) {
      @@ -229,8 +221,8 @@ public final class PermissionStorage {
                   }
                 }
                 if (!remainingActions.isEmpty()) {
      -            perm.getPermission().setActions(
      -              remainingActions.toArray(new Permission.Action[remainingActions.size()]));
      +            perm.getPermission()
      +              .setActions(remainingActions.toArray(new Permission.Action[remainingActions.size()]));
                   addUserPermission(conf, perm, t);
                 } else {
                   removePermissionRecord(conf, userPerm, t);
      @@ -240,12 +232,12 @@ public final class PermissionStorage {
             }
           }
           if (LOG.isDebugEnabled()) {
      -      LOG.debug("Removed permission "+ userPerm.toString());
      +      LOG.debug("Removed permission " + userPerm.toString());
           }
         }
       
         private static void removePermissionRecord(Configuration conf, UserPermission userPerm, Table t)
      -      throws IOException {
      +    throws IOException {
           Delete d = new Delete(userPermissionRowKey(userPerm.getPermission()));
           d.addColumns(ACL_LIST_FAMILY, userPermissionKey(userPerm));
           try {
      @@ -259,12 +251,12 @@ public final class PermissionStorage {
          * Remove specified table from the _acl_ table.
          */
         static void removeTablePermissions(Configuration conf, TableName tableName, Table t)
      -      throws IOException{
      +    throws IOException {
           Delete d = new Delete(tableName.getName());
           d.addFamily(ACL_LIST_FAMILY);
       
           if (LOG.isDebugEnabled()) {
      -      LOG.debug("Removing permissions of removed table "+ tableName);
      +      LOG.debug("Removing permissions of removed table " + tableName);
           }
           try {
             t.delete(d);
      @@ -277,11 +269,11 @@ public final class PermissionStorage {
          * Remove specified namespace from the acl table.
          */
         static void removeNamespacePermissions(Configuration conf, String namespace, Table t)
      -      throws IOException{
      +    throws IOException {
           Delete d = new Delete(Bytes.toBytes(toNamespaceEntry(namespace)));
           d.addFamily(ACL_LIST_FAMILY);
           if (LOG.isDebugEnabled()) {
      -      LOG.debug("Removing permissions of removed namespace "+ namespace);
      +      LOG.debug("Removing permissions of removed namespace " + namespace);
           }
       
           try {
      @@ -292,15 +284,14 @@ public final class PermissionStorage {
         }
       
         static private void removeTablePermissions(TableName tableName, byte[] column, Table table,
      -      boolean closeTable) throws IOException {
      +    boolean closeTable) throws IOException {
           Scan scan = new Scan();
           scan.addFamily(ACL_LIST_FAMILY);
       
           String columnName = Bytes.toString(column);
      -    scan.setFilter(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator(
      -        String.format("(%s%s%s)|(%s%s)$",
      -            ACL_KEY_DELIMITER, columnName, ACL_KEY_DELIMITER,
      -            ACL_KEY_DELIMITER, columnName))));
      +    scan.setFilter(new QualifierFilter(CompareOperator.EQUAL,
      +      new RegexStringComparator(String.format("(%s%s%s)|(%s%s)$", ACL_KEY_DELIMITER, columnName,
      +        ACL_KEY_DELIMITER, ACL_KEY_DELIMITER, columnName))));
       
           Set qualifierSet = new TreeSet<>(Bytes.BYTES_COMPARATOR);
           ResultScanner scanner = null;
      @@ -333,10 +324,10 @@ public final class PermissionStorage {
          * Remove specified table column from the acl table.
          */
         static void removeTablePermissions(Configuration conf, TableName tableName, byte[] column,
      -      Table t) throws IOException {
      +    Table t) throws IOException {
           if (LOG.isDebugEnabled()) {
      -      LOG.debug("Removing permissions of removed column " + Bytes.toString(column) +
      -          " from table "+ tableName);
      +      LOG.debug("Removing permissions of removed column " + Bytes.toString(column) + " from table "
      +        + tableName);
           }
           removeTablePermissions(tableName, column, t, true);
         }
      @@ -357,10 +348,7 @@ public final class PermissionStorage {
         }
       
         /**
      -   * Build qualifier key from user permission:
      -   *  username
      -   *  username,family
      -   *  username,family,qualifier
      +   * Build qualifier key from user permission: username username,family username,family,qualifier
          */
         static byte[] userPermissionKey(UserPermission permission) {
           byte[] key = Bytes.toBytes(permission.getUser());
      @@ -373,9 +361,9 @@ public final class PermissionStorage {
           }
       
           if (family != null && family.length > 0) {
      -      key = Bytes.add(key, Bytes.add(new byte[]{ACL_KEY_DELIMITER}, family));
      +      key = Bytes.add(key, Bytes.add(new byte[] { ACL_KEY_DELIMITER }, family));
             if (qualifier != null && qualifier.length > 0) {
      -        key = Bytes.add(key, Bytes.add(new byte[]{ACL_KEY_DELIMITER}, qualifier));
      +        key = Bytes.add(key, Bytes.add(new byte[] { ACL_KEY_DELIMITER }, qualifier));
             }
           }
       
      @@ -383,8 +371,7 @@ public final class PermissionStorage {
         }
       
         /**
      -   * Returns {@code true} if the given region is part of the {@code _acl_}
      -   * metadata table.
      +   * Returns {@code true} if the given region is part of the {@code _acl_} metadata table.
          */
         static boolean isAclRegion(Region region) {
           return ACL_TABLE_NAME.equals(region.getTableDescriptor().getTableName());
      @@ -398,17 +385,15 @@ public final class PermissionStorage {
         }
       
         /**
      -   * Loads all of the permission grants stored in a region of the {@code _acl_}
      -   * table.
      -   *
      +   * Loads all of the permission grants stored in a region of the {@code _acl_} table.
          * @param aclRegion the acl region
          * @return a map of the permissions for this table.
          * @throws IOException if an error occurs
          */
         static Map> loadAll(Region aclRegion)
      -      throws IOException {
      +    throws IOException {
           if (!isAclRegion(aclRegion)) {
      -      throw new IOException("Can only load permissions from "+ACL_TABLE_NAME);
      +      throw new IOException("Can only load permissions from " + ACL_TABLE_NAME);
           }
       
           Map> allPerms =
      @@ -434,7 +419,7 @@ public final class PermissionStorage {
                   entry = CellUtil.cloneRow(kv);
                 }
                 Pair permissionsOfUserOnTable =
      -              parsePermissionRecord(entry, kv, null, null, false, null);
      +            parsePermissionRecord(entry, kv, null, null, false, null);
                 if (permissionsOfUserOnTable != null) {
                   String username = permissionsOfUserOnTable.getFirst();
                   Permission permission = permissionsOfUserOnTable.getSecond();
      @@ -458,11 +443,11 @@ public final class PermissionStorage {
         }
       
         /**
      -   * Load all permissions from the region server holding {@code _acl_},
      -   * primarily intended for testing purposes.
      +   * Load all permissions from the region server holding {@code _acl_}, primarily intended for
      +   * testing purposes.
          */
      -  static Map> loadAll(
      -      Configuration conf) throws IOException {
      +  static Map> loadAll(Configuration conf)
      +    throws IOException {
           Map> allPerms =
             new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
       
      @@ -479,7 +464,7 @@ public final class PermissionStorage {
               try {
                 for (Result row : scanner) {
                   ListMultimap resultPerms =
      -                parsePermissions(row.getRow(), row, null, null, null, false);
      +              parsePermissions(row.getRow(), row, null, null, null, false);
                   allPerms.put(row.getRow(), resultPerms);
                 }
               } finally {
      @@ -494,19 +479,19 @@ public final class PermissionStorage {
         }
       
         public static ListMultimap getTablePermissions(Configuration conf,
      -      TableName tableName) throws IOException {
      +    TableName tableName) throws IOException {
           return getPermissions(conf, tableName != null ? tableName.getName() : null, null, null, null,
             null, false);
         }
       
         public static ListMultimap getNamespacePermissions(Configuration conf,
      -      String namespace) throws IOException {
      +    String namespace) throws IOException {
           return getPermissions(conf, Bytes.toBytes(toNamespaceEntry(namespace)), null, null, null, null,
             false);
         }
       
         public static ListMultimap getGlobalPermissions(Configuration conf)
      -      throws IOException {
      +    throws IOException {
           return getPermissions(conf, null, null, null, null, null, false);
         }
       
      @@ -518,7 +503,7 @@ public final class PermissionStorage {
          * 

      */ static ListMultimap getPermissions(Configuration conf, byte[] entryName, - Table t, byte[] cf, byte[] cq, String user, boolean hasFilterUser) throws IOException { + Table t, byte[] cf, byte[] cq, String user, boolean hasFilterUser) throws IOException { if (entryName == null) { entryName = ACL_GLOBAL_NAME; } @@ -540,7 +525,7 @@ public final class PermissionStorage { perms = parsePermissions(entryName, row, cf, cq, user, hasFilterUser); } else { LOG.info("No permissions found in " + ACL_TABLE_NAME + " for acl entry " - + Bytes.toString(entryName)); + + Bytes.toString(entryName)); } return perms; @@ -551,8 +536,8 @@ public final class PermissionStorage { * associated permissions. */ public static List getUserTablePermissions(Configuration conf, - TableName tableName, byte[] cf, byte[] cq, String userName, boolean hasFilterUser) - throws IOException { + TableName tableName, byte[] cf, byte[] cq, String userName, boolean hasFilterUser) + throws IOException { return getUserPermissions(conf, tableName == null ? null : tableName.getName(), cf, cq, userName, hasFilterUser); } @@ -562,7 +547,7 @@ public final class PermissionStorage { * associated permissions. */ public static List getUserNamespacePermissions(Configuration conf, - String namespace, String user, boolean hasFilterUser) throws IOException { + String namespace, String user, boolean hasFilterUser) throws IOException { return getUserPermissions(conf, Bytes.toBytes(toNamespaceEntry(namespace)), null, null, user, hasFilterUser); } @@ -570,19 +555,19 @@ public final class PermissionStorage { /** * Returns the currently granted permissions for a given table/namespace with associated * permissions based on the specified column family, column qualifier and user name. - * @param conf the configuration - * @param entryName Table name or the namespace - * @param cf Column family - * @param cq Column qualifier - * @param user User name to be filtered from permission as requested + * @param conf the configuration + * @param entryName Table name or the namespace + * @param cf Column family + * @param cq Column qualifier + * @param user User name to be filtered from permission as requested * @param hasFilterUser true if filter user is provided, otherwise false. * @return List of UserPermissions * @throws IOException on failure */ public static List getUserPermissions(Configuration conf, byte[] entryName, - byte[] cf, byte[] cq, String user, boolean hasFilterUser) throws IOException { + byte[] cf, byte[] cq, String user, boolean hasFilterUser) throws IOException { ListMultimap allPerms = - getPermissions(conf, entryName, null, cf, cq, user, hasFilterUser); + getPermissions(conf, entryName, null, cf, cq, user, hasFilterUser); List perms = new ArrayList<>(); for (Map.Entry entry : allPerms.entries()) { perms.add(entry.getValue()); @@ -595,12 +580,12 @@ public final class PermissionStorage { * name. */ private static ListMultimap parsePermissions(byte[] entryName, - Result result, byte[] cf, byte[] cq, String user, boolean hasFilterUser) { + Result result, byte[] cf, byte[] cq, String user, boolean hasFilterUser) { ListMultimap perms = ArrayListMultimap.create(); if (result != null && result.size() > 0) { for (Cell kv : result.rawCells()) { Pair permissionsOfUserOnTable = - parsePermissionRecord(entryName, kv, cf, cq, hasFilterUser, user); + parsePermissionRecord(entryName, kv, cf, cq, hasFilterUser, user); if (permissionsOfUserOnTable != null) { String username = permissionsOfUserOnTable.getFirst(); @@ -613,7 +598,7 @@ public final class PermissionStorage { } private static Pair parsePermissionRecord(byte[] entryName, Cell kv, - byte[] cf, byte[] cq, boolean filterPerms, String filterUser) { + byte[] cf, byte[] cq, boolean filterPerms, String filterUser) { // return X given a set of permissions encoded in the permissionRecord kv. byte[] family = CellUtil.cloneFamily(kv); if (!Bytes.equals(family, ACL_LIST_FAMILY)) { @@ -623,10 +608,8 @@ public final class PermissionStorage { byte[] key = CellUtil.cloneQualifier(kv); byte[] value = CellUtil.cloneValue(kv); if (LOG.isDebugEnabled()) { - LOG.debug("Read acl: entry[" + - Bytes.toStringBinary(entryName) + "], kv [" + - Bytes.toStringBinary(key) + ": " + - Bytes.toStringBinary(value)+"]"); + LOG.debug("Read acl: entry[" + Bytes.toStringBinary(entryName) + "], kv [" + + Bytes.toStringBinary(key) + ": " + Bytes.toStringBinary(value) + "]"); } // check for a column family appended to the key @@ -637,8 +620,9 @@ public final class PermissionStorage { // Group list is not required when filterUser itself a group List filterUserGroups = null; if (filterPerms) { - if (username.charAt(0) == '@' && !StringUtils.isEmpty(filterUser) - && filterUser.charAt(0) != '@') { + if ( + username.charAt(0) == '@' && !StringUtils.isEmpty(filterUser) && filterUser.charAt(0) != '@' + ) { filterUserGroups = AccessChecker.getUserGroups(filterUser); } } @@ -650,9 +634,8 @@ public final class PermissionStorage { return null; } - return new Pair<>(username, - Permission.newBuilder(Bytes.toString(fromNamespaceEntry(entryName))) - .withActionCodes(value).build()); + return new Pair<>(username, Permission + .newBuilder(Bytes.toString(fromNamespaceEntry(entryName))).withActionCodes(value).build()); } // Handle global entry @@ -669,13 +652,13 @@ public final class PermissionStorage { int idx = username.indexOf(ACL_KEY_DELIMITER); byte[] permFamily = null; byte[] permQualifier = null; - if (idx > 0 && idx < username.length()-1) { - String remainder = username.substring(idx+1); + if (idx > 0 && idx < username.length() - 1) { + String remainder = username.substring(idx + 1); username = username.substring(0, idx); idx = remainder.indexOf(ACL_KEY_DELIMITER); - if (idx > 0 && idx < remainder.length()-1) { + if (idx > 0 && idx < remainder.length() - 1) { permFamily = Bytes.toBytes(remainder.substring(0, idx)); - permQualifier = Bytes.toBytes(remainder.substring(idx+1)); + permQualifier = Bytes.toBytes(remainder.substring(idx + 1)); } else { permFamily = Bytes.toBytes(remainder); } @@ -696,7 +679,7 @@ public final class PermissionStorage { } return new Pair<>(username, Permission.newBuilder(TableName.valueOf(entryName)) - .withFamily(permFamily).withQualifier(permQualifier).withActionCodes(value).build()); + .withFamily(permFamily).withQualifier(permQualifier).withActionCodes(value).build()); } /* @@ -707,7 +690,7 @@ public final class PermissionStorage { * filtered if not equal. */ private static boolean validateFilterUser(String username, String filterUser, - List filterUserGroups) { + List filterUserGroups) { if (filterUserGroups == null) { // Validate user name or group names whether equal if (filterUser.equals(username)) { @@ -727,7 +710,7 @@ public final class PermissionStorage { * all CQ records. */ private static boolean validateCFAndCQ(byte[] permFamily, byte[] cf, byte[] permQualifier, - byte[] cq) { + byte[] cq) { boolean include = true; if (cf != null) { if (Bytes.equals(cf, permFamily)) { @@ -748,9 +731,9 @@ public final class PermissionStorage { * resulting byte array. Writes a set of permission [user: table permission] */ public static byte[] writePermissionsAsBytes(ListMultimap perms, - Configuration conf) { + Configuration conf) { return ProtobufUtil - .prependPBMagic(AccessControlUtil.toUserTablePermissions(perms).toByteArray()); + .prependPBMagic(AccessControlUtil.toUserTablePermissions(perms).toByteArray()); } // This is part of the old HbaseObjectWritableFor96Migration. @@ -760,8 +743,8 @@ public final class PermissionStorage { private static final int WRITABLE_NOT_ENCODED = 0; - private static List readWritableUserPermission(DataInput in, - Configuration conf) throws IOException, ClassNotFoundException { + private static List readWritableUserPermission(DataInput in, Configuration conf) + throws IOException, ClassNotFoundException { assert WritableUtils.readVInt(in) == LIST_CODE; int length = in.readInt(); List list = new ArrayList<>(length); @@ -778,7 +761,7 @@ public final class PermissionStorage { } public static ListMultimap readUserPermission(byte[] data, - Configuration conf) throws DeserializationException { + Configuration conf) throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(data)) { int pblen = ProtobufUtil.lengthOfPBMagic(); try { @@ -810,8 +793,8 @@ public final class PermissionStorage { } } - public static ListMultimap readPermissions(byte[] data, - Configuration conf) throws DeserializationException { + public static ListMultimap readPermissions(byte[] data, Configuration conf) + throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(data)) { int pblen = ProtobufUtil.lengthOfPBMagic(); try { @@ -849,7 +832,7 @@ public final class PermissionStorage { } public static boolean isNamespaceEntry(byte[] entryName) { - return entryName != null && entryName.length !=0 && entryName[0] == NAMESPACE_PREFIX; + return entryName != null && entryName.length != 0 && entryName[0] == NAMESPACE_PREFIX; } public static boolean isTableEntry(byte[] entryName) { @@ -868,22 +851,22 @@ public final class PermissionStorage { } public static byte[] toNamespaceEntry(byte[] namespace) { - byte[] ret = new byte[namespace.length+1]; + byte[] ret = new byte[namespace.length + 1]; ret[0] = NAMESPACE_PREFIX; System.arraycopy(namespace, 0, ret, 1, namespace.length); return ret; } public static byte[] fromNamespaceEntry(byte[] namespace) { - if(namespace[0] != NAMESPACE_PREFIX) { - throw new IllegalArgumentException("Argument is not a valid namespace entry: " + - Bytes.toString(namespace)); + if (namespace[0] != NAMESPACE_PREFIX) { + throw new IllegalArgumentException( + "Argument is not a valid namespace entry: " + Bytes.toString(namespace)); } return Arrays.copyOfRange(namespace, 1, namespace.length); } public static List getCellPermissionsForUser(User user, Cell cell) - throws IOException { + throws IOException { // Save an object allocation where we can if (cell.getTagsLength() == 0) { return null; @@ -897,15 +880,15 @@ public final class PermissionStorage { // TODO: This can be improved. Don't build UsersAndPermissions just to unpack it again, // use the builder AccessControlProtos.UsersAndPermissions.Builder builder = - AccessControlProtos.UsersAndPermissions.newBuilder(); + AccessControlProtos.UsersAndPermissions.newBuilder(); if (tag.hasArray()) { ProtobufUtil.mergeFrom(builder, tag.getValueArray(), tag.getValueOffset(), tag.getValueLength()); } else { ProtobufUtil.mergeFrom(builder, Tag.cloneValue(tag)); } - ListMultimap kvPerms = - AccessControlUtil.toUsersAndPermissions(builder.build()); + ListMultimap kvPerms = + AccessControlUtil.toUsersAndPermissions(builder.build()); // Are there permissions for this user? List userPerms = kvPerms.get(user.getShortName()); if (userPerms != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java index 72da07cee5e..656f59576fc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.security.access; import java.io.IOException; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; @@ -84,8 +83,10 @@ public class SnapshotScannerHDFSAclCleaner extends BaseHFileCleanerDelegate { return false; } else if (isArchiveNamespaceDir(dir) && namespaceExists(dir.getName())) { return false; - } else if (isArchiveTableDir(dir) - && tableExists(TableName.valueOf(dir.getParent().getName(), dir.getName()))) { + } else if ( + isArchiveTableDir(dir) + && tableExists(TableName.valueOf(dir.getParent().getName(), dir.getName())) + ) { return false; } return true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java index 4bcf8c608ba..f4fcfc41df0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.IOException; @@ -115,24 +114,26 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast @Override public void preMasterInitialization(ObserverContext c) - throws IOException { - if (c.getEnvironment().getConfiguration() - .getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)) { + throws IOException { + if ( + c.getEnvironment().getConfiguration() + .getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false) + ) { MasterCoprocessorEnvironment mEnv = c.getEnvironment(); if (!(mEnv instanceof HasMasterServices)) { throw new IOException("Does not implement HMasterServices"); } masterServices = ((HasMasterServices) mEnv).getMasterServices(); hdfsAclHelper = new SnapshotScannerHDFSAclHelper(masterServices.getConfiguration(), - masterServices.getConnection()); + masterServices.getConnection()); pathHelper = hdfsAclHelper.getPathHelper(); hdfsAclHelper.setCommonDirectoryPermission(); initialized = true; userProvider = UserProvider.instantiate(c.getEnvironment().getConfiguration()); } else { LOG.warn("Try to initialize the coprocessor SnapshotScannerHDFSAclController but failure " - + "because the config " + SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE - + " is false."); + + "because the config " + SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE + + " is false."); } } @@ -149,15 +150,15 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast family -> Bytes.equals(family.getName(), SnapshotScannerHDFSAclStorage.HDFS_ACL_FAMILY)); if (!containHdfsAclFamily) { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor) - .setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(SnapshotScannerHDFSAclStorage.HDFS_ACL_FAMILY).build()); + .setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(SnapshotScannerHDFSAclStorage.HDFS_ACL_FAMILY).build()); admin.modifyTable(builder.build()); } aclTableInitialized = true; } else { - throw new TableNotFoundException("Table " + PermissionStorage.ACL_TABLE_NAME - + " is not created yet. Please check if " + getClass().getName() - + " is configured after " + AccessController.class.getName()); + throw new TableNotFoundException( + "Table " + PermissionStorage.ACL_TABLE_NAME + " is not created yet. Please check if " + + getClass().getName() + " is configured after " + AccessController.class.getName()); } } } @@ -171,14 +172,14 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast @Override public void postCompletedCreateTableAction(ObserverContext c, - TableDescriptor desc, RegionInfo[] regions) throws IOException { + TableDescriptor desc, RegionInfo[] regions) throws IOException { if (needHandleTableHdfsAcl(desc, "createTable " + desc.getTableName())) { TableName tableName = desc.getTableName(); // 1. Create table directories to make HDFS acls can be inherited hdfsAclHelper.createTableDirectories(tableName); // 2. Add table owner HDFS acls String owner = - desc.getOwnerString() == null ? getActiveUser(c).getShortName() : desc.getOwnerString(); + desc.getOwnerString() == null ? getActiveUser(c).getShortName() : desc.getOwnerString(); hdfsAclHelper.addTableAcl(tableName, Sets.newHashSet(owner), "create"); // 3. Record table owner permission is synced to HDFS in acl table SnapshotScannerHDFSAclStorage.addUserTableHdfsAcl(c.getEnvironment().getConnection(), owner, @@ -188,7 +189,7 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast @Override public void postCreateNamespace(ObserverContext c, - NamespaceDescriptor ns) throws IOException { + NamespaceDescriptor ns) throws IOException { if (checkInitialized("createNamespace " + ns.getName())) { // Create namespace directories to make HDFS acls can be inherited List paths = hdfsAclHelper.getNamespaceRootPaths(ns.getName()); @@ -200,7 +201,7 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast @Override public void postCompletedSnapshotAction(ObserverContext c, - SnapshotDescription snapshot, TableDescriptor tableDescriptor) throws IOException { + SnapshotDescription snapshot, TableDescriptor tableDescriptor) throws IOException { if (needHandleTableHdfsAcl(tableDescriptor, "snapshot " + snapshot.getName())) { // Add HDFS acls of users with table read permission to snapshot files hdfsAclHelper.snapshotAcl(snapshot); @@ -209,7 +210,7 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast @Override public void postCompletedTruncateTableAction(ObserverContext c, - TableName tableName) throws IOException { + TableName tableName) throws IOException { if (needHandleTableHdfsAcl(tableName, "truncateTable " + tableName)) { // 1. create tmp table directories hdfsAclHelper.createTableDirectories(tableName); @@ -221,7 +222,7 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast @Override public void postCompletedDeleteTableAction(ObserverContext ctx, - TableName tableName) throws IOException { + TableName tableName) throws IOException { if (!tableName.isSystemTable() && checkInitialized("deleteTable " + tableName)) { /* * Remove table user access HDFS acl from namespace directory if the user has no permissions @@ -231,7 +232,7 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast * remove Bob access acl. */ try (Table aclTable = - ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { Set users = SnapshotScannerHDFSAclStorage.getTableUsers(aclTable, tableName); if (users.size() > 0) { // 1. Remove table archive directory default ACLs @@ -250,25 +251,29 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast @Override public void postModifyTable(ObserverContext ctx, - TableName tableName, TableDescriptor oldDescriptor, TableDescriptor currentDescriptor) - throws IOException { + TableName tableName, TableDescriptor oldDescriptor, TableDescriptor currentDescriptor) + throws IOException { try (Table aclTable = - ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { - if (needHandleTableHdfsAcl(currentDescriptor, "modifyTable " + tableName) - && !hdfsAclHelper.isAclSyncToHdfsEnabled(oldDescriptor)) { + ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + if ( + needHandleTableHdfsAcl(currentDescriptor, "modifyTable " + tableName) + && !hdfsAclHelper.isAclSyncToHdfsEnabled(oldDescriptor) + ) { // 1. Create table directories used for acl inherited hdfsAclHelper.createTableDirectories(tableName); // 2. Add table users HDFS acls Set tableUsers = hdfsAclHelper.getUsersWithTableReadAction(tableName, false, false); Set users = - hdfsAclHelper.getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), true); + hdfsAclHelper.getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), true); users.addAll(tableUsers); hdfsAclHelper.addTableAcl(tableName, users, "modify"); // 3. Record table user acls are synced to HDFS in acl table SnapshotScannerHDFSAclStorage.addUserTableHdfsAcl(ctx.getEnvironment().getConnection(), tableUsers, tableName); - } else if (needHandleTableHdfsAcl(oldDescriptor, "modifyTable " + tableName) - && !hdfsAclHelper.isAclSyncToHdfsEnabled(currentDescriptor)) { + } else if ( + needHandleTableHdfsAcl(oldDescriptor, "modifyTable " + tableName) + && !hdfsAclHelper.isAclSyncToHdfsEnabled(currentDescriptor) + ) { // 1. Remove empty table directories List tableRootPaths = hdfsAclHelper.getTableRootPaths(tableName, false); for (Path path : tableRootPaths) { @@ -276,8 +281,8 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast } // 2. Remove all table HDFS acls Set tableUsers = hdfsAclHelper.getUsersWithTableReadAction(tableName, false, false); - Set users = hdfsAclHelper - .getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), true); + Set users = + hdfsAclHelper.getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), true); users.addAll(tableUsers); hdfsAclHelper.removeTableAcl(tableName, users); // 3. Remove namespace access HDFS acls for users who only own permission for this table @@ -292,10 +297,10 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast @Override public void postDeleteNamespace(ObserverContext ctx, - String namespace) throws IOException { + String namespace) throws IOException { if (checkInitialized("deleteNamespace " + namespace)) { try (Table aclTable = - ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { // 1. Delete namespace archive dir default ACLs Set users = SnapshotScannerHDFSAclStorage.getEntryUsers(aclTable, PermissionStorage.toNamespaceEntry(Bytes.toBytes(namespace))); @@ -317,13 +322,15 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast @Override public void postGrant(ObserverContext c, - UserPermission userPermission, boolean mergeExistingPermissions) throws IOException { - if (!checkInitialized( - "grant " + userPermission + ", merge existing permissions " + mergeExistingPermissions)) { + UserPermission userPermission, boolean mergeExistingPermissions) throws IOException { + if ( + !checkInitialized( + "grant " + userPermission + ", merge existing permissions " + mergeExistingPermissions) + ) { return; } try (Table aclTable = - c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { Configuration conf = c.getEnvironment().getConfiguration(); String userName = userPermission.getUser(); switch (userPermission.getAccessScope()) { @@ -333,11 +340,11 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast if (!isHdfsAclSet(aclTable, userName)) { // 1. Get namespaces and tables which global user acls are already synced Pair, Set> skipNamespaceAndTables = - SnapshotScannerHDFSAclStorage.getUserNamespaceAndTable(aclTable, userName); + SnapshotScannerHDFSAclStorage.getUserNamespaceAndTable(aclTable, userName); Set skipNamespaces = skipNamespaceAndTables.getFirst(); Set skipTables = skipNamespaceAndTables.getSecond().stream() - .filter(t -> !skipNamespaces.contains(t.getNamespaceAsString())) - .collect(Collectors.toSet()); + .filter(t -> !skipNamespaces.contains(t.getNamespaceAsString())) + .collect(Collectors.toSet()); // 2. Add HDFS acl(skip namespaces and tables directories whose acl is set) hdfsAclHelper.grantAcl(userPermission, skipNamespaces, skipTables); // 3. Record global acl is sync to HDFS @@ -356,7 +363,7 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast if (!isHdfsAclSet(aclTable, userName, namespace)) { // 1. Get tables which namespace user acls are already synced Set skipTables = SnapshotScannerHDFSAclStorage - .getUserNamespaceAndTable(aclTable, userName).getSecond(); + .getUserNamespaceAndTable(aclTable, userName).getSecond(); // 2. Add HDFS acl(skip tables directories whose acl is set) hdfsAclHelper.grantAcl(userPermission, new HashSet<>(0), skipTables); } @@ -391,17 +398,17 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast break; default: throw new IllegalArgumentException( - "Illegal user permission scope " + userPermission.getAccessScope()); + "Illegal user permission scope " + userPermission.getAccessScope()); } } } @Override public void postRevoke(ObserverContext c, - UserPermission userPermission) throws IOException { + UserPermission userPermission) throws IOException { if (checkInitialized("revoke " + userPermission)) { try (Table aclTable = - c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { String userName = userPermission.getUser(); Configuration conf = c.getEnvironment().getConfiguration(); switch (userPermission.getAccessScope()) { @@ -414,7 +421,7 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast case NAMESPACE: NamespacePermission nsPerm = (NamespacePermission) userPermission.getPermission(); UserPermission userNsPerm = - getUserNamespacePermission(conf, userName, nsPerm.getNamespace()); + getUserNamespacePermission(conf, userName, nsPerm.getNamespace()); if (userNsPerm == null || !hdfsAclHelper.containReadAction(userNsPerm)) { removeUserNamespaceHdfsAcl(aclTable, userName, nsPerm.getNamespace(), userPermission); } @@ -431,22 +438,22 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast break; default: throw new IllegalArgumentException( - "Illegal user permission scope " + userPermission.getAccessScope()); + "Illegal user permission scope " + userPermission.getAccessScope()); } } } } private void removeUserGlobalHdfsAcl(Table aclTable, String userName, - UserPermission userPermission) throws IOException { + UserPermission userPermission) throws IOException { if (SnapshotScannerHDFSAclStorage.hasUserGlobalHdfsAcl(aclTable, userName)) { // 1. Get namespaces and tables which global user acls are already synced Pair, Set> namespaceAndTable = - SnapshotScannerHDFSAclStorage.getUserNamespaceAndTable(aclTable, userName); + SnapshotScannerHDFSAclStorage.getUserNamespaceAndTable(aclTable, userName); Set skipNamespaces = namespaceAndTable.getFirst(); Set skipTables = namespaceAndTable.getSecond().stream() - .filter(t -> !skipNamespaces.contains(t.getNamespaceAsString())) - .collect(Collectors.toSet()); + .filter(t -> !skipNamespaces.contains(t.getNamespaceAsString())) + .collect(Collectors.toSet()); // 2. Remove user HDFS acls(skip namespaces and tables directories // whose acl must be reversed) hdfsAclHelper.revokeAcl(userPermission, skipNamespaces, skipTables); @@ -456,12 +463,12 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast } private void removeUserNamespaceHdfsAcl(Table aclTable, String userName, String namespace, - UserPermission userPermission) throws IOException { + UserPermission userPermission) throws IOException { if (SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl(aclTable, userName, namespace)) { if (!SnapshotScannerHDFSAclStorage.hasUserGlobalHdfsAcl(aclTable, userName)) { // 1. Get tables whose namespace user acls are already synced Set skipTables = - SnapshotScannerHDFSAclStorage.getUserNamespaceAndTable(aclTable, userName).getSecond(); + SnapshotScannerHDFSAclStorage.getUserNamespaceAndTable(aclTable, userName).getSecond(); // 2. Remove user HDFS acls(skip tables directories whose acl must be reversed) hdfsAclHelper.revokeAcl(userPermission, new HashSet<>(), skipTables); } @@ -471,11 +478,13 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast } private void removeUserTableHdfsAcl(Table aclTable, String userName, TableName tableName, - UserPermission userPermission) throws IOException { + UserPermission userPermission) throws IOException { if (SnapshotScannerHDFSAclStorage.hasUserTableHdfsAcl(aclTable, userName, tableName)) { - if (!SnapshotScannerHDFSAclStorage.hasUserGlobalHdfsAcl(aclTable, userName) + if ( + !SnapshotScannerHDFSAclStorage.hasUserGlobalHdfsAcl(aclTable, userName) && !SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl(aclTable, userName, - tableName.getNamespaceAsString())) { + tableName.getNamespaceAsString()) + ) { // 1. Remove table acls hdfsAclHelper.revokeAcl(userPermission, new HashSet<>(0), new HashSet<>(0)); } @@ -485,26 +494,26 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast } private UserPermission getUserGlobalPermission(Configuration conf, String userName) - throws IOException { + throws IOException { List permissions = PermissionStorage.getUserPermissions(conf, PermissionStorage.ACL_GLOBAL_NAME, null, null, userName, true); return permissions.size() > 0 ? permissions.get(0) : null; } private UserPermission getUserNamespacePermission(Configuration conf, String userName, - String namespace) throws IOException { + String namespace) throws IOException { List permissions = - PermissionStorage.getUserNamespacePermissions(conf, namespace, userName, true); + PermissionStorage.getUserNamespacePermissions(conf, namespace, userName, true); return permissions.size() > 0 ? permissions.get(0) : null; } private UserPermission getUserTablePermission(Configuration conf, String userName, - TableName tableName) throws IOException { + TableName tableName) throws IOException { List permissions = PermissionStorage - .getUserTablePermissions(conf, tableName, null, null, userName, true).stream() - .filter(userPermission -> hdfsAclHelper - .isNotFamilyOrQualifierPermission((TablePermission) userPermission.getPermission())) - .collect(Collectors.toList()); + .getUserTablePermissions(conf, tableName, null, null, userName, true).stream() + .filter(userPermission -> hdfsAclHelper + .isNotFamilyOrQualifierPermission((TablePermission) userPermission.getPermission())) + .collect(Collectors.toList()); return permissions.size() > 0 ? permissions.get(0) : null; } @@ -513,12 +522,12 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast } private boolean isHdfsAclSet(Table aclTable, String userName, String namespace) - throws IOException { + throws IOException { return isHdfsAclSet(aclTable, userName, namespace, null); } private boolean isHdfsAclSet(Table aclTable, String userName, TableName tableName) - throws IOException { + throws IOException { return isHdfsAclSet(aclTable, userName, null, tableName); } @@ -526,17 +535,17 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast * Check if user global/namespace/table HDFS acls is already set */ private boolean isHdfsAclSet(Table aclTable, String userName, String namespace, - TableName tableName) throws IOException { + TableName tableName) throws IOException { boolean isSet = SnapshotScannerHDFSAclStorage.hasUserGlobalHdfsAcl(aclTable, userName); if (namespace != null) { isSet = isSet - || SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl(aclTable, userName, namespace); + || SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl(aclTable, userName, namespace); } if (tableName != null) { isSet = isSet - || SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl(aclTable, userName, - tableName.getNamespaceAsString()) - || SnapshotScannerHDFSAclStorage.hasUserTableHdfsAcl(aclTable, userName, tableName); + || SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl(aclTable, userName, + tableName.getNamespaceAsString()) + || SnapshotScannerHDFSAclStorage.hasUserTableHdfsAcl(aclTable, userName, tableName); } return isSet; } @@ -555,18 +564,18 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast private boolean needHandleTableHdfsAcl(TablePermission tablePermission) throws IOException { return needHandleTableHdfsAcl(tablePermission.getTableName(), "") - && hdfsAclHelper.isNotFamilyOrQualifierPermission(tablePermission); + && hdfsAclHelper.isNotFamilyOrQualifierPermission(tablePermission); } private boolean needHandleTableHdfsAcl(TableName tableName, String operation) throws IOException { - return !tableName.isSystemTable() && checkInitialized(operation) && hdfsAclHelper - .isAclSyncToHdfsEnabled(masterServices.getTableDescriptors().get(tableName)); + return !tableName.isSystemTable() && checkInitialized(operation) + && hdfsAclHelper.isAclSyncToHdfsEnabled(masterServices.getTableDescriptors().get(tableName)); } private boolean needHandleTableHdfsAcl(TableDescriptor tableDescriptor, String operation) { TableName tableName = tableDescriptor.getTableName(); return !tableName.isSystemTable() && checkInitialized(operation) - && hdfsAclHelper.isAclSyncToHdfsEnabled(tableDescriptor); + && hdfsAclHelper.isAclSyncToHdfsEnabled(tableDescriptor); } private User getActiveUser(ObserverContext ctx) throws IOException { @@ -584,25 +593,27 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast * delete 'ns1:t1', if Bob has global read permission, '@ns1' read permission or * 'ns1:other_tables' read permission, then skip remove Bob access acl in ns1Dirs, otherwise, * remove Bob access acl. - * @param aclTable acl table - * @param tableName the name of the table + * @param aclTable acl table + * @param tableName the name of the table * @param tablesUsers table users set * @return users whose access acl will be removed from the namespace of the table * @throws IOException if an error occurred */ private Set filterUsersToRemoveNsAccessAcl(Table aclTable, TableName tableName, - Set tablesUsers) throws IOException { + Set tablesUsers) throws IOException { Set removeUsers = new HashSet<>(); byte[] namespace = tableName.getNamespace(); for (String user : tablesUsers) { List userEntries = SnapshotScannerHDFSAclStorage.getUserEntries(aclTable, user); boolean remove = true; for (byte[] entry : userEntries) { - if (PermissionStorage.isGlobalEntry(entry) + if ( + PermissionStorage.isGlobalEntry(entry) || (PermissionStorage.isNamespaceEntry(entry) - && Bytes.equals(PermissionStorage.fromNamespaceEntry(entry), namespace)) + && Bytes.equals(PermissionStorage.fromNamespaceEntry(entry), namespace)) || (!Bytes.equals(tableName.getName(), entry) - && Bytes.equals(TableName.valueOf(entry).getNamespace(), namespace))) { + && Bytes.equals(TableName.valueOf(entry).getNamespace(), namespace)) + ) { remove = false; break; } @@ -634,12 +645,12 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast } static void addUserNamespaceHdfsAcl(Table aclTable, String user, String namespace) - throws IOException { + throws IOException { addUserEntry(aclTable, user, Bytes.toBytes(PermissionStorage.toNamespaceEntry(namespace))); } static void addUserTableHdfsAcl(Connection connection, Set users, TableName tableName) - throws IOException { + throws IOException { try (Table aclTable = connection.getTable(PermissionStorage.ACL_TABLE_NAME)) { for (String user : users) { addUserTableHdfsAcl(aclTable, user, tableName); @@ -648,14 +659,14 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast } static void addUserTableHdfsAcl(Connection connection, String user, TableName tableName) - throws IOException { + throws IOException { try (Table aclTable = connection.getTable(PermissionStorage.ACL_TABLE_NAME)) { addUserTableHdfsAcl(aclTable, user, tableName); } } static void addUserTableHdfsAcl(Table aclTable, String user, TableName tableName) - throws IOException { + throws IOException { addUserEntry(aclTable, user, tableName.getName()); } @@ -670,17 +681,17 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast } static void deleteUserNamespaceHdfsAcl(Table aclTable, String user, String namespace) - throws IOException { + throws IOException { deleteUserEntry(aclTable, user, Bytes.toBytes(PermissionStorage.toNamespaceEntry(namespace))); } static void deleteUserTableHdfsAcl(Table aclTable, String user, TableName tableName) - throws IOException { + throws IOException { deleteUserEntry(aclTable, user, tableName.getName()); } static void deleteUserTableHdfsAcl(Connection connection, Set users, - TableName tableName) throws IOException { + TableName tableName) throws IOException { try (Table aclTable = connection.getTable(PermissionStorage.ACL_TABLE_NAME)) { for (String user : users) { deleteUserTableHdfsAcl(aclTable, user, tableName); @@ -689,7 +700,7 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast } private static void deleteUserEntry(Table aclTable, String user, byte[] entry) - throws IOException { + throws IOException { Delete delete = new Delete(entry); delete.addColumns(HDFS_ACL_FAMILY, Bytes.toBytes(user)); aclTable.delete(delete); @@ -732,7 +743,7 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast } static Pair, Set> getUserNamespaceAndTable(Table aclTable, - String userName) throws IOException { + String userName) throws IOException { Set namespaces = new HashSet<>(); Set tables = new HashSet<>(); List userEntries = getUserEntries(aclTable, userName); @@ -764,18 +775,18 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast } static boolean hasUserNamespaceHdfsAcl(Table aclTable, String user, String namespace) - throws IOException { + throws IOException { return hasUserEntry(aclTable, user, Bytes.toBytes(PermissionStorage.toNamespaceEntry(namespace))); } static boolean hasUserTableHdfsAcl(Table aclTable, String user, TableName tableName) - throws IOException { + throws IOException { return hasUserEntry(aclTable, user, tableName.getName()); } private static boolean hasUserEntry(Table aclTable, String userName, byte[] entry) - throws IOException { + throws IOException { Get get = new Get(entry); get.addColumn(HDFS_ACL_FAMILY, Bytes.toBytes(userName)); return aclTable.exists(get); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java index fbdc6381d7a..f5af1254a71 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; @@ -38,7 +37,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -76,21 +74,21 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { public static final String ACL_SYNC_TO_HDFS_ENABLE = "hbase.acl.sync.to.hdfs.enable"; public static final String ACL_SYNC_TO_HDFS_THREAD_NUMBER = - "hbase.acl.sync.to.hdfs.thread.number"; + "hbase.acl.sync.to.hdfs.thread.number"; // The tmp directory to restore snapshot, it can not be a sub directory of HBase root dir public static final String SNAPSHOT_RESTORE_TMP_DIR = "hbase.snapshot.restore.tmp.dir"; public static final String SNAPSHOT_RESTORE_TMP_DIR_DEFAULT = - "/hbase/.tmpdir-to-restore-snapshot"; + "/hbase/.tmpdir-to-restore-snapshot"; // The default permission of the common directories if the feature is enabled. public static final String COMMON_DIRECTORY_PERMISSION = - "hbase.acl.sync.to.hdfs.common.directory.permission"; + "hbase.acl.sync.to.hdfs.common.directory.permission"; // The secure HBase permission is 700, 751 means all others have execute access and the mask is // set to read-execute to make the extended access ACL entries can work. Be cautious to set // this value. public static final String COMMON_DIRECTORY_PERMISSION_DEFAULT = "751"; // The default permission of the snapshot restore directories if the feature is enabled. public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION = - "hbase.acl.sync.to.hdfs.restore.directory.permission"; + "hbase.acl.sync.to.hdfs.restore.directory.permission"; // 753 means all others have write-execute access. public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT = "753"; @@ -101,7 +99,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { private ExecutorService pool; public SnapshotScannerHDFSAclHelper(Configuration configuration, Connection connection) - throws IOException { + throws IOException { this.conf = configuration; this.pathHelper = new PathHelper(conf); this.fs = pathHelper.getFileSystem(); @@ -135,11 +133,11 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { for (Path path : paths) { createDirIfNotExist(path); fs.setPermission(path, new FsPermission( - conf.get(COMMON_DIRECTORY_PERMISSION, COMMON_DIRECTORY_PERMISSION_DEFAULT))); + conf.get(COMMON_DIRECTORY_PERMISSION, COMMON_DIRECTORY_PERMISSION_DEFAULT))); } // create snapshot restore directory Path restoreDir = - new Path(conf.get(SNAPSHOT_RESTORE_TMP_DIR, SNAPSHOT_RESTORE_TMP_DIR_DEFAULT)); + new Path(conf.get(SNAPSHOT_RESTORE_TMP_DIR, SNAPSHOT_RESTORE_TMP_DIR_DEFAULT)); createDirIfNotExist(restoreDir); fs.setPermission(restoreDir, new FsPermission(conf.get(SNAPSHOT_RESTORE_DIRECTORY_PERMISSION, SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT))); @@ -149,11 +147,11 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { * Set acl when grant user permission * @param userPermission the user and permission * @param skipNamespaces the namespace set to skip set acl because already set - * @param skipTables the table set to skip set acl because already set + * @param skipTables the table set to skip set acl because already set * @return false if an error occurred, otherwise true */ public boolean grantAcl(UserPermission userPermission, Set skipNamespaces, - Set skipTables) { + Set skipTables) { try { long start = EnvironmentEdgeManager.currentTime(); handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.MODIFY, skipNamespaces, @@ -171,11 +169,11 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { * Remove acl when grant or revoke user permission * @param userPermission the user and permission * @param skipNamespaces the namespace set to skip remove acl - * @param skipTables the table set to skip remove acl + * @param skipTables the table set to skip remove acl * @return false if an error occurred, otherwise true */ public boolean revokeAcl(UserPermission userPermission, Set skipNamespaces, - Set skipTables) { + Set skipTables) { try { long start = EnvironmentEdgeManager.currentTime(); handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.REMOVE, skipNamespaces, @@ -203,7 +201,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { if (userSet.size() > 0) { Path path = pathHelper.getSnapshotDir(snapshot.getName()); handleHDFSAcl(new HDFSAclOperation(fs, path, userSet, HDFSAclOperation.OperationType.MODIFY, - true, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).get(); + true, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).get(); } LOG.info("Set HDFS acl when snapshot {}, cost {} ms", snapshot.getName(), EnvironmentEdgeManager.currentTime() - start); @@ -216,12 +214,12 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { /** * Remove table access acl from namespace dir when delete table - * @param tableName the table + * @param tableName the table * @param removeUsers the users whose access acl will be removed * @return false if an error occurred, otherwise true */ public boolean removeNamespaceAccessAcl(TableName tableName, Set removeUsers, - String operation) { + String operation) { try { long start = EnvironmentEdgeManager.currentTime(); if (removeUsers.size() > 0) { @@ -239,7 +237,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { /** * Remove default acl from namespace archive dir when delete namespace - * @param namespace the namespace + * @param namespace the namespace * @param removeUsers the users whose default acl will be removed * @return false if an error occurred, otherwise true */ @@ -248,7 +246,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { long start = EnvironmentEdgeManager.currentTime(); Path archiveNsDir = pathHelper.getArchiveNsDir(namespace); HDFSAclOperation operation = new HDFSAclOperation(fs, archiveNsDir, removeUsers, - HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT); + HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT); operation.handleAcl(); LOG.info("Remove HDFS acl when delete namespace {}, cost {} ms", namespace, EnvironmentEdgeManager.currentTime() - start); @@ -261,7 +259,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { /** * Remove default acl from table archive dir when delete table - * @param tableName the table name + * @param tableName the table name * @param removeUsers the users whose default acl will be removed * @return false if an error occurred, otherwise true */ @@ -270,7 +268,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { long start = EnvironmentEdgeManager.currentTime(); Path archiveTableDir = pathHelper.getArchiveTableDir(tableName); HDFSAclOperation operation = new HDFSAclOperation(fs, archiveTableDir, removeUsers, - HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT); + HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT); operation.handleAcl(); LOG.info("Remove HDFS acl when delete table {}, cost {} ms", tableName, EnvironmentEdgeManager.currentTime() - start); @@ -284,7 +282,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { /** * Add table user acls * @param tableName the table - * @param users the table users with READ permission + * @param users the table users with READ permission * @return false if an error occurred, otherwise true */ public boolean addTableAcl(TableName tableName, Set users, String operation) { @@ -308,7 +306,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { /** * Remove table acls when modify table * @param tableName the table - * @param users the table users with READ permission + * @param users the table users with READ permission * @return false if an error occurred, otherwise true */ public boolean removeTableAcl(TableName tableName, Set users) { @@ -328,8 +326,8 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { } private void handleGrantOrRevokeAcl(UserPermission userPermission, - HDFSAclOperation.OperationType operationType, Set skipNamespaces, - Set skipTables) throws ExecutionException, InterruptedException, IOException { + HDFSAclOperation.OperationType operationType, Set skipNamespaces, + Set skipTables) throws ExecutionException, InterruptedException, IOException { Set users = Sets.newHashSet(userPermission.getUser()); switch (userPermission.getAccessScope()) { case GLOBAL: @@ -337,7 +335,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { break; case NAMESPACE: NamespacePermission namespacePermission = - (NamespacePermission) userPermission.getPermission(); + (NamespacePermission) userPermission.getPermission(); handleNamespaceAcl(Sets.newHashSet(namespacePermission.getNamespace()), users, skipNamespaces, skipTables, operationType); break; @@ -349,18 +347,17 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { break; default: throw new IllegalArgumentException( - "Illegal user permission scope " + userPermission.getAccessScope()); + "Illegal user permission scope " + userPermission.getAccessScope()); } } private void handleGlobalAcl(Set users, Set skipNamespaces, - Set skipTables, HDFSAclOperation.OperationType operationType) - throws ExecutionException, InterruptedException, IOException { + Set skipTables, HDFSAclOperation.OperationType operationType) + throws ExecutionException, InterruptedException, IOException { // handle global root directories HDFS acls - List hdfsAclOperations = getGlobalRootPaths().stream() - .map(path -> new HDFSAclOperation(fs, path, users, operationType, false, - HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)) - .collect(Collectors.toList()); + List hdfsAclOperations = + getGlobalRootPaths().stream().map(path -> new HDFSAclOperation(fs, path, users, operationType, + false, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).collect(Collectors.toList()); handleHDFSAclParallel(hdfsAclOperations).get(); // handle namespace HDFS acls handleNamespaceAcl(Sets.newHashSet(admin.listNamespaces()), users, skipNamespaces, skipTables, @@ -368,15 +365,15 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { } private void handleNamespaceAcl(Set namespaces, Set users, - Set skipNamespaces, Set skipTables, - HDFSAclOperation.OperationType operationType) - throws ExecutionException, InterruptedException, IOException { + Set skipNamespaces, Set skipTables, + HDFSAclOperation.OperationType operationType) + throws ExecutionException, InterruptedException, IOException { namespaces.removeAll(skipNamespaces); namespaces.remove(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); // handle namespace root directories HDFS acls List hdfsAclOperations = new ArrayList<>(); Set skipTableNamespaces = - skipTables.stream().map(TableName::getNamespaceAsString).collect(Collectors.toSet()); + skipTables.stream().map(TableName::getNamespaceAsString).collect(Collectors.toSet()); for (String ns : namespaces) { /** * When op is REMOVE, remove the DEFAULT namespace ACL while keep the ACCESS for skipTables, @@ -385,8 +382,9 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { */ HDFSAclOperation.OperationType op = operationType; HDFSAclOperation.AclType aclType = HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS; - if (operationType == HDFSAclOperation.OperationType.REMOVE - && skipTableNamespaces.contains(ns)) { + if ( + operationType == HDFSAclOperation.OperationType.REMOVE && skipTableNamespaces.contains(ns) + ) { // remove namespace directories default HDFS acls for skip tables op = HDFSAclOperation.OperationType.REMOVE; aclType = HDFSAclOperation.AclType.DEFAULT; @@ -400,20 +398,22 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { Set tables = new HashSet<>(); for (String namespace : namespaces) { tables.addAll(admin.listTableDescriptorsByNamespace(Bytes.toBytes(namespace)).stream() - .filter(this::isAclSyncToHdfsEnabled).map(TableDescriptor::getTableName) - .collect(Collectors.toSet())); + .filter(this::isAclSyncToHdfsEnabled).map(TableDescriptor::getTableName) + .collect(Collectors.toSet())); } handleTableAcl(tables, users, skipNamespaces, skipTables, operationType); } private void handleTableAcl(Set tableNames, Set users, - Set skipNamespaces, Set skipTables, - HDFSAclOperation.OperationType operationType) - throws ExecutionException, InterruptedException, IOException { + Set skipNamespaces, Set skipTables, + HDFSAclOperation.OperationType operationType) + throws ExecutionException, InterruptedException, IOException { Set filterTableNames = new HashSet<>(); for (TableName tableName : tableNames) { - if (!skipTables.contains(tableName) - && !skipNamespaces.contains(tableName.getNamespaceAsString())) { + if ( + !skipTables.contains(tableName) + && !skipNamespaces.contains(tableName.getNamespaceAsString()) + ) { filterTableNames.add(tableName); } } @@ -421,24 +421,23 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { // handle table HDFS acls for (TableName tableName : filterTableNames) { List hdfsAclOperations = getTableRootPaths(tableName, true).stream() - .map(path -> new HDFSAclOperation(fs, path, users, operationType, true, - HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)) - .collect(Collectors.toList()); + .map(path -> new HDFSAclOperation(fs, path, users, operationType, true, + HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)) + .collect(Collectors.toList()); CompletableFuture future = handleHDFSAclSequential(hdfsAclOperations); futures.add(future); } CompletableFuture future = - CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()])); + CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()])); future.get(); } private void handleNamespaceAccessAcl(String namespace, Set users, - HDFSAclOperation.OperationType operationType) - throws ExecutionException, InterruptedException { + HDFSAclOperation.OperationType operationType) throws ExecutionException, InterruptedException { // handle namespace access HDFS acls List hdfsAclOperations = - getNamespaceRootPaths(namespace).stream().map(path -> new HDFSAclOperation(fs, path, users, - operationType, false, HDFSAclOperation.AclType.ACCESS)).collect(Collectors.toList()); + getNamespaceRootPaths(namespace).stream().map(path -> new HDFSAclOperation(fs, path, users, + operationType, false, HDFSAclOperation.AclType.ACCESS)).collect(Collectors.toList()); CompletableFuture future = handleHDFSAclParallel(hdfsAclOperations); future.get(); } @@ -471,16 +470,15 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { /** * return paths that user will table permission will visit - * @param tableName the table + * @param tableName the table * @param includeSnapshotPath true if return table snapshots paths, otherwise false * @return the path list * @throws IOException if an error occurred */ List getTableRootPaths(TableName tableName, boolean includeSnapshotPath) - throws IOException { + throws IOException { List paths = Lists.newArrayList(pathHelper.getDataTableDir(tableName), - pathHelper.getMobTableDir(tableName), - pathHelper.getArchiveTableDir(tableName)); + pathHelper.getMobTableDir(tableName), pathHelper.getArchiveTableDir(tableName)); if (includeSnapshotPath) { paths.addAll(getTableSnapshotPaths(tableName)); } @@ -489,9 +487,9 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { private List getTableSnapshotPaths(TableName tableName) throws IOException { return admin.listSnapshots().stream() - .filter(snapDesc -> snapDesc.getTableName().equals(tableName)) - .map(snapshotDescription -> pathHelper.getSnapshotDir(snapshotDescription.getName())) - .collect(Collectors.toList()); + .filter(snapDesc -> snapDesc.getTableName().equals(tableName)) + .map(snapshotDescription -> pathHelper.getSnapshotDir(snapshotDescription.getName())) + .collect(Collectors.toList()); } /** @@ -505,15 +503,15 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { /** * Return users with namespace read permission - * @param namespace the namespace + * @param namespace the namespace * @param includeGlobal true if include users with global read action * @return users with namespace read permission * @throws IOException if an error occurred */ Set getUsersWithNamespaceReadAction(String namespace, boolean includeGlobal) - throws IOException { + throws IOException { Set users = - getUsersWithReadAction(PermissionStorage.getNamespacePermissions(conf, namespace)); + getUsersWithReadAction(PermissionStorage.getNamespacePermissions(conf, namespace)); if (includeGlobal) { users.addAll(getUsersWithGlobalReadAction()); } @@ -522,28 +520,28 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { /** * Return users with table read permission - * @param tableName the table + * @param tableName the table * @param includeNamespace true if include users with namespace read action - * @param includeGlobal true if include users with global read action + * @param includeGlobal true if include users with global read action * @return users with table read permission * @throws IOException if an error occurred */ Set getUsersWithTableReadAction(TableName tableName, boolean includeNamespace, - boolean includeGlobal) throws IOException { + boolean includeGlobal) throws IOException { Set users = - getUsersWithReadAction(PermissionStorage.getTablePermissions(conf, tableName)); + getUsersWithReadAction(PermissionStorage.getTablePermissions(conf, tableName)); if (includeNamespace) { users - .addAll(getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), includeGlobal)); + .addAll(getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), includeGlobal)); } return users; } private Set - getUsersWithReadAction(ListMultimap permissionMultimap) { + getUsersWithReadAction(ListMultimap permissionMultimap) { return permissionMultimap.entries().stream() - .filter(entry -> checkUserPermission(entry.getValue())).map(Map.Entry::getKey) - .collect(Collectors.toSet()); + .filter(entry -> checkUserPermission(entry.getValue())).map(Map.Entry::getKey) + .collect(Collectors.toSet()); } private boolean checkUserPermission(UserPermission userPermission) { @@ -569,13 +567,14 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { Collections.addAll(masterCoprocessorSet, masterCoprocessors); } return conf.getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false) - && masterCoprocessorSet.contains(SnapshotScannerHDFSAclController.class.getName()) - && masterCoprocessorSet.contains(AccessController.class.getName()); + && masterCoprocessorSet.contains(SnapshotScannerHDFSAclController.class.getName()) + && masterCoprocessorSet.contains(AccessController.class.getName()); } boolean isAclSyncToHdfsEnabled(TableDescriptor tableDescriptor) { - return tableDescriptor == null ? false - : Boolean.valueOf(tableDescriptor.getValue(ACL_SYNC_TO_HDFS_ENABLE)); + return tableDescriptor == null + ? false + : Boolean.valueOf(tableDescriptor.getValue(ACL_SYNC_TO_HDFS_ENABLE)); } PathHelper getPathHelper() { @@ -612,14 +611,14 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { private CompletableFuture handleHDFSAclParallel(List operations) { List> futures = - operations.stream().map(this::handleHDFSAcl).collect(Collectors.toList()); + operations.stream().map(this::handleHDFSAcl).collect(Collectors.toList()); return CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()])); } private static AclEntry aclEntry(AclEntryScope scope, String name) { return new AclEntry.Builder().setScope(scope) - .setType(AuthUtil.isGroupPrincipal(name) ? GROUP : USER).setName(name) - .setPermission(READ_EXECUTE).build(); + .setType(AuthUtil.isGroupPrincipal(name) ? GROUP : USER).setName(name) + .setPermission(READ_EXECUTE).build(); } void createDirIfNotExist(Path path) throws IOException { @@ -640,11 +639,14 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { */ private static class HDFSAclOperation { enum OperationType { - MODIFY, REMOVE + MODIFY, + REMOVE } enum AclType { - ACCESS, DEFAULT, DEFAULT_ADN_ACCESS + ACCESS, + DEFAULT, + DEFAULT_ADN_ACCESS } private interface Operation { @@ -661,7 +663,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { private List defaultAclEntries; HDFSAclOperation(FileSystem fs, Path path, Set users, OperationType operationType, - boolean recursive, AclType aclType) { + boolean recursive, AclType aclType) { this.fs = fs; this.path = path; this.defaultAndAccessAclEntries = getAclEntries(AclType.DEFAULT_ADN_ACCESS, users); @@ -760,12 +762,12 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { PathHelper(Configuration conf) { this.conf = conf; rootDir = new Path(conf.get(HConstants.HBASE_DIR)); - tmpDataDir = new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY), - HConstants.BASE_NAMESPACE_DIR); + tmpDataDir = + new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY), HConstants.BASE_NAMESPACE_DIR); dataDir = new Path(rootDir, HConstants.BASE_NAMESPACE_DIR); mobDataDir = new Path(MobUtils.getMobHome(rootDir), HConstants.BASE_NAMESPACE_DIR); archiveDataDir = new Path(new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY), - HConstants.BASE_NAMESPACE_DIR); + HConstants.BASE_NAMESPACE_DIR); snapshotDir = new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME); } @@ -811,12 +813,12 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { Path getDataTableDir(TableName tableName) { return new Path(getDataNsDir(tableName.getNamespaceAsString()), - tableName.getQualifierAsString()); + tableName.getQualifierAsString()); } Path getMobTableDir(TableName tableName) { return new Path(getMobDataNsDir(tableName.getNamespaceAsString()), - tableName.getQualifierAsString()); + tableName.getQualifierAsString()); } Path getArchiveNsDir(String namespace) { @@ -825,7 +827,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { Path getArchiveTableDir(TableName tableName) { return new Path(getArchiveNsDir(tableName.getNamespaceAsString()), - tableName.getQualifierAsString()); + tableName.getQualifierAsString()); } Path getTmpNsDir(String namespace) { @@ -834,7 +836,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { Path getTmpTableDir(TableName tableName) { return new Path(getTmpNsDir(tableName.getNamespaceAsString()), - tableName.getQualifierAsString()); + tableName.getQualifierAsString()); } Path getSnapshotRootDir() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java index 8ff238cc38f..5d674c78cdc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.Closeable; @@ -44,13 +43,12 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * Handles synchronization of access control list entries and updates - * throughout all nodes in the cluster. The {@link AccessController} instance - * on the {@code _acl_} table regions, creates a znode for each table as - * {@code /hbase/acl/tablename}, with the znode data containing a serialized - * list of the permissions granted for the table. The {@code AccessController} - * instances on all other cluster hosts watch the znodes for updates, which - * trigger updates in the {@link AuthManager} permission cache. + * Handles synchronization of access control list entries and updates throughout all nodes in the + * cluster. The {@link AccessController} instance on the {@code _acl_} table regions, creates a + * znode for each table as {@code /hbase/acl/tablename}, with the znode data containing a serialized + * list of the permissions granted for the table. The {@code AccessController} instances on all + * other cluster hosts watch the znodes for updates, which trigger updates in the + * {@link AuthManager} permission cache. */ @InterfaceAudience.Private public class ZKPermissionWatcher extends ZKListener implements Closeable { @@ -63,8 +61,7 @@ public class ZKPermissionWatcher extends ZKListener implements Closeable { private final ExecutorService executor; private Future childrenChangedFuture; - public ZKPermissionWatcher(ZKWatcher watcher, - AuthManager authManager, Configuration conf) { + public ZKPermissionWatcher(ZKWatcher watcher, AuthManager authManager, Configuration conf) { super(watcher); this.authManager = authManager; String aclZnodeParent = conf.get("zookeeper.znode.acl.parent", ACL_NODE); @@ -83,7 +80,7 @@ public class ZKPermissionWatcher extends ZKListener implements Closeable { @Override public Void call() throws KeeperException { List existing = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode); + ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode); if (existing != null) { refreshNodes(existing); } @@ -92,7 +89,7 @@ public class ZKPermissionWatcher extends ZKListener implements Closeable { }).get(); } catch (ExecutionException ex) { if (ex.getCause() instanceof KeeperException) { - throw (KeeperException)ex.getCause(); + throw (KeeperException) ex.getCause(); } else { throw new RuntimeException(ex.getCause()); } @@ -128,7 +125,7 @@ public class ZKPermissionWatcher extends ZKListener implements Closeable { public void run() { try { List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode); + ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode); refreshNodes(nodes); } catch (KeeperException ke) { LOG.error("Error reading data from zookeeper", ke); @@ -182,7 +179,6 @@ public class ZKPermissionWatcher extends ZKListener implements Closeable { } } - @Override public void nodeChildrenChanged(final String path) { waitUntilStarted(); @@ -239,8 +235,7 @@ public class ZKPermissionWatcher extends ZKListener implements Closeable { try { refreshAuthManager(entry, n.getData()); } catch (IOException ioe) { - LOG.error("Failed parsing permissions for table '" + entry + - "' from zk", ioe); + LOG.error("Failed parsing permissions for table '" + entry + "' from zk", ioe); } } } @@ -248,7 +243,7 @@ public class ZKPermissionWatcher extends ZKListener implements Closeable { private void refreshAuthManager(String entry, byte[] nodeData) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Updating permissions cache from {} with data {}", entry, - Bytes.toStringBinary(nodeData)); + Bytes.toStringBinary(nodeData)); } if (PermissionStorage.isNamespaceEntry(entry)) { authManager.refreshNamespaceCacheFromWritable(PermissionStorage.fromNamespaceEntry(entry), @@ -259,9 +254,7 @@ public class ZKPermissionWatcher extends ZKListener implements Closeable { } /*** - * Write a table's access controls to the permissions mirror in zookeeper - * @param entry - * @param permsData + * Write a table's access controls to the permissions mirror in zookeeper nn */ public void writeToZookeeper(byte[] entry, byte[] permsData) { String entryName = Bytes.toString(entry); @@ -272,15 +265,13 @@ public class ZKPermissionWatcher extends ZKListener implements Closeable { ZKUtil.createWithParents(watcher, zkNode); ZKUtil.updateExistingNodeData(watcher, zkNode, permsData, -1); } catch (KeeperException e) { - LOG.error("Failed updating permissions for entry '" + - entryName + "'", e); - watcher.abort("Failed writing node "+zkNode+" to zookeeper", e); + LOG.error("Failed updating permissions for entry '" + entryName + "'", e); + watcher.abort("Failed writing node " + zkNode + " to zookeeper", e); } } /*** - * Delete the acl notify node of table - * @param tableName + * Delete the acl notify node of table n */ public void deleteTableACLNode(final TableName tableName) { String zkNode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, ACL_NODE); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/AttemptingUserProvidingSaslServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/AttemptingUserProvidingSaslServer.java index e6dc3574726..ce0dc7ce255 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/AttemptingUserProvidingSaslServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/AttemptingUserProvidingSaslServer.java @@ -19,17 +19,15 @@ package org.apache.hadoop.hbase.security.provider; import java.util.Optional; import java.util.function.Supplier; - import javax.security.sasl.SaslServer; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.security.UserGroupInformation; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Wrapper around a SaslServer which provides the last user attempting to authenticate via SASL, - * if the server/mechanism allow figuring that out. + * Wrapper around a SaslServer which provides the last user attempting to authenticate via SASL, if + * the server/mechanism allow figuring that out. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving @@ -37,8 +35,8 @@ public class AttemptingUserProvidingSaslServer { private final Supplier producer; private final SaslServer saslServer; - public AttemptingUserProvidingSaslServer( - SaslServer saslServer, Supplier producer) { + public AttemptingUserProvidingSaslServer(SaslServer saslServer, + Supplier producer) { this.saslServer = saslServer; this.producer = producer; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslServerAuthenticationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslServerAuthenticationProvider.java index b3236d65376..03610014d5c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslServerAuthenticationProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslServerAuthenticationProvider.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.security.provider; import java.io.IOException; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; - import javax.security.auth.callback.Callback; import javax.security.auth.callback.CallbackHandler; import javax.security.auth.callback.NameCallback; @@ -30,7 +29,6 @@ import javax.security.sasl.AuthorizeCallback; import javax.security.sasl.RealmCallback; import javax.security.sasl.Sasl; import javax.security.sasl.SaslServer; - import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.HBaseSaslRpcServer; import org.apache.hadoop.hbase.security.SaslUtil; @@ -44,16 +42,16 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.Private public class DigestSaslServerAuthenticationProvider extends DigestSaslAuthenticationProvider - implements SaslServerAuthenticationProvider { - private static final Logger LOG = LoggerFactory.getLogger( - DigestSaslServerAuthenticationProvider.class); + implements SaslServerAuthenticationProvider { + private static final Logger LOG = + LoggerFactory.getLogger(DigestSaslServerAuthenticationProvider.class); private AtomicReference attemptingUser = new AtomicReference<>(null); @Override - public AttemptingUserProvidingSaslServer createServer( - SecretManager secretManager, - Map saslProps) throws IOException { + public AttemptingUserProvidingSaslServer + createServer(SecretManager secretManager, Map saslProps) + throws IOException { if (secretManager == null) { throw new AccessDeniedException("Server is not configured to do DIGEST authentication."); } @@ -70,7 +68,7 @@ public class DigestSaslServerAuthenticationProvider extends DigestSaslAuthentica private final AtomicReference attemptingUser; public SaslDigestCallbackHandler(SecretManager secretManager, - AtomicReference attemptingUser) { + AtomicReference attemptingUser) { this.secretManager = secretManager; this.attemptingUser = attemptingUser; } @@ -99,13 +97,13 @@ public class DigestSaslServerAuthenticationProvider extends DigestSaslAuthentica } } if (pc != null) { - TokenIdentifier tokenIdentifier = HBaseSaslRpcServer.getIdentifier( - nc.getDefaultName(), secretManager); + TokenIdentifier tokenIdentifier = + HBaseSaslRpcServer.getIdentifier(nc.getDefaultName(), secretManager); attemptingUser.set(tokenIdentifier.getUser()); char[] password = getPassword(tokenIdentifier); if (LOG.isTraceEnabled()) { LOG.trace("SASL server DIGEST-MD5 callback: setting password for client: {}", - tokenIdentifier.getUser()); + tokenIdentifier.getUser()); } pc.setPassword(password); } @@ -123,8 +121,8 @@ public class DigestSaslServerAuthenticationProvider extends DigestSaslAuthentica if (authenticatedUserId.equals(userRequestedToExecuteAs)) { ac.setAuthorized(true); if (LOG.isTraceEnabled()) { - String username = HBaseSaslRpcServer.getIdentifier( - userRequestedToExecuteAs, secretManager).getUser().getUserName(); + String username = HBaseSaslRpcServer + .getIdentifier(userRequestedToExecuteAs, secretManager).getUser().getUserName(); LOG.trace( "SASL server DIGEST-MD5 callback: setting " + "canonicalized client ID: " + username); } @@ -143,13 +141,12 @@ public class DigestSaslServerAuthenticationProvider extends DigestSaslAuthentica @Override public UserGroupInformation getAuthorizedUgi(String authzId, - SecretManager secretManager) throws IOException { + SecretManager secretManager) throws IOException { UserGroupInformation authorizedUgi; TokenIdentifier tokenId = HBaseSaslRpcServer.getIdentifier(authzId, secretManager); authorizedUgi = tokenId.getUser(); if (authorizedUgi == null) { - throw new AccessDeniedException( - "Can't retrieve username from tokenIdentifier."); + throw new AccessDeniedException("Can't retrieve username from tokenIdentifier."); } authorizedUgi.addTokenIdentifier(tokenId); authorizedUgi.setAuthenticationMethod(getSaslAuthMethod().getAuthMethod()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslServerAuthenticationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslServerAuthenticationProvider.java index 8a542c69c0d..1d9b1bd1aa9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslServerAuthenticationProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslServerAuthenticationProvider.java @@ -1,5 +1,5 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one + * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file @@ -20,14 +20,12 @@ package org.apache.hadoop.hbase.security.provider; import java.io.IOException; import java.security.PrivilegedExceptionAction; import java.util.Map; - import javax.security.auth.callback.Callback; import javax.security.auth.callback.CallbackHandler; import javax.security.auth.callback.UnsupportedCallbackException; import javax.security.sasl.AuthorizeCallback; import javax.security.sasl.Sasl; import javax.security.sasl.SaslException; - import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.SaslUtil; import org.apache.hadoop.security.UserGroupInformation; @@ -39,29 +37,30 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.Private public class GssSaslServerAuthenticationProvider extends GssSaslAuthenticationProvider - implements SaslServerAuthenticationProvider { - private static final Logger LOG = LoggerFactory.getLogger( - GssSaslServerAuthenticationProvider.class); + implements SaslServerAuthenticationProvider { + private static final Logger LOG = + LoggerFactory.getLogger(GssSaslServerAuthenticationProvider.class); @Override - public AttemptingUserProvidingSaslServer createServer( - SecretManager secretManager, - Map saslProps) throws IOException { + public AttemptingUserProvidingSaslServer + createServer(SecretManager secretManager, Map saslProps) + throws IOException { UserGroupInformation current = UserGroupInformation.getCurrentUser(); String fullName = current.getUserName(); LOG.debug("Server's Kerberos principal name is {}", fullName); String[] names = SaslUtil.splitKerberosName(fullName); if (names.length != 3) { throw new AccessDeniedException( - "Kerberos principal does NOT contain an instance (hostname): " + fullName); + "Kerberos principal does NOT contain an instance (hostname): " + fullName); } try { return current.doAs(new PrivilegedExceptionAction() { @Override public AttemptingUserProvidingSaslServer run() throws SaslException { - return new AttemptingUserProvidingSaslServer(Sasl.createSaslServer( - getSaslAuthMethod().getSaslMechanism(), names[0], names[1], saslProps, - new SaslGssCallbackHandler()), () -> null); + return new AttemptingUserProvidingSaslServer( + Sasl.createSaslServer(getSaslAuthMethod().getSaslMechanism(), names[0], names[1], + saslProps, new SaslGssCallbackHandler()), + () -> null); } }); } catch (InterruptedException e) { @@ -107,7 +106,7 @@ public class GssSaslServerAuthenticationProvider extends GssSaslAuthenticationPr @Override public UserGroupInformation getAuthorizedUgi(String authzId, - SecretManager secretManager) throws IOException { + SecretManager secretManager) throws IOException { UserGroupInformation ugi = UserGroupInformation.createRemoteUser(authzId); ugi.setAuthenticationMethod(getSaslAuthMethod().getAuthMethod()); return ugi; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProvider.java index 3487cfcd586..866b00d6e34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProvider.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.security.provider; import java.io.IOException; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.security.UserGroupInformation; @@ -29,8 +28,8 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Encapsulates the server-side logic to authenticate a client over SASL. Tied one-to-one to - * a single client authentication implementation. + * Encapsulates the server-side logic to authenticate a client over SASL. Tied one-to-one to a + * single client authentication implementation. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving @@ -39,16 +38,17 @@ public interface SaslServerAuthenticationProvider extends SaslAuthenticationProv /** * Allows implementations to initialize themselves, prior to creating a server. */ - default void init(Configuration conf) throws IOException {} + default void init(Configuration conf) throws IOException { + } /** * Creates the SaslServer to accept incoming SASL authentication requests. */ AttemptingUserProvidingSaslServer createServer(SecretManager secretManager, - Map saslProps) throws IOException; + Map saslProps) throws IOException; boolean supportsProtocolAuthentication(); UserGroupInformation getAuthorizedUgi(String authzId, - SecretManager secretManager) throws IOException; + SecretManager secretManager) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProviders.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProviders.java index 829498dfd9f..17480cb4659 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProviders.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProviders.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.Optional; import java.util.ServiceLoader; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -32,17 +31,17 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.Private public final class SaslServerAuthenticationProviders { - private static final Logger LOG = LoggerFactory.getLogger( - SaslClientAuthenticationProviders.class); + private static final Logger LOG = + LoggerFactory.getLogger(SaslClientAuthenticationProviders.class); public static final String EXTRA_PROVIDERS_KEY = "hbase.server.sasl.provider.extras"; private static final AtomicReference holder = - new AtomicReference<>(); + new AtomicReference<>(); private final HashMap providers; private SaslServerAuthenticationProviders(Configuration conf, - HashMap providers) { + HashMap providers) { this.providers = providers; } @@ -87,14 +86,14 @@ public final class SaslServerAuthenticationProviders { * already exist in the map. */ static void addProviderIfNotExists(SaslServerAuthenticationProvider provider, - HashMap providers) { + HashMap providers) { final byte newProviderAuthCode = provider.getSaslAuthMethod().getCode(); - final SaslServerAuthenticationProvider alreadyRegisteredProvider = providers.get( - newProviderAuthCode); + final SaslServerAuthenticationProvider alreadyRegisteredProvider = + providers.get(newProviderAuthCode); if (alreadyRegisteredProvider != null) { throw new RuntimeException("Trying to load SaslServerAuthenticationProvider " - + provider.getClass() + ", but "+ alreadyRegisteredProvider.getClass() - + " is already registered with the same auth code"); + + provider.getClass() + ", but " + alreadyRegisteredProvider.getClass() + + " is already registered with the same auth code"); } providers.put(newProviderAuthCode, provider); } @@ -103,7 +102,7 @@ public final class SaslServerAuthenticationProviders { * Adds any providers defined in the configuration. */ static void addExtraProviders(Configuration conf, - HashMap providers) { + HashMap providers) { for (String implName : conf.getStringCollection(EXTRA_PROVIDERS_KEY)) { Class clz; try { @@ -115,16 +114,16 @@ public final class SaslServerAuthenticationProviders { if (!SaslServerAuthenticationProvider.class.isAssignableFrom(clz)) { LOG.warn("Server authentication class {} is not an instance of " - + "SaslServerAuthenticationProvider", clz); + + "SaslServerAuthenticationProvider", clz); continue; } try { SaslServerAuthenticationProvider provider = - (SaslServerAuthenticationProvider) clz.getConstructor().newInstance(); + (SaslServerAuthenticationProvider) clz.getConstructor().newInstance(); addProviderIfNotExists(provider, providers); } catch (InstantiationException | IllegalAccessException | NoSuchMethodException - | InvocationTargetException e) { + | InvocationTargetException e) { LOG.warn("Failed to instantiate {}", clz, e); } } @@ -136,8 +135,8 @@ public final class SaslServerAuthenticationProviders { */ static SaslServerAuthenticationProviders createProviders(Configuration conf) { ServiceLoader loader = - ServiceLoader.load(SaslServerAuthenticationProvider.class); - HashMap providers = new HashMap<>(); + ServiceLoader.load(SaslServerAuthenticationProvider.class); + HashMap providers = new HashMap<>(); for (SaslServerAuthenticationProvider provider : loader) { addProviderIfNotExists(provider, providers); } @@ -146,8 +145,7 @@ public final class SaslServerAuthenticationProviders { if (LOG.isTraceEnabled()) { String loadedProviders = providers.values().stream() - .map((provider) -> provider.getClass().getName()) - .collect(Collectors.joining(", ")); + .map((provider) -> provider.getClass().getName()).collect(Collectors.joining(", ")); if (loadedProviders.isEmpty()) { loadedProviders = "None!"; } @@ -155,14 +153,13 @@ public final class SaslServerAuthenticationProviders { } // Initialize the providers once, before we get into the RPC path. - providers.forEach((b,provider) -> { + providers.forEach((b, provider) -> { try { // Give them a copy, just to make sure there is no funny-business going on. provider.init(new Configuration(conf)); } catch (IOException e) { LOG.error("Failed to initialize {}", provider.getClass(), e); - throw new RuntimeException( - "Failed to initialize " + provider.getClass().getName(), e); + throw new RuntimeException("Failed to initialize " + provider.getClass().getName(), e); } }); @@ -181,10 +178,8 @@ public final class SaslServerAuthenticationProviders { * Extracts the SIMPLE authentication provider. */ public SaslServerAuthenticationProvider getSimpleProvider() { - Optional opt = providers.values() - .stream() - .filter((p) -> p instanceof SimpleSaslServerAuthenticationProvider) - .findFirst(); + Optional opt = providers.values().stream() + .filter((p) -> p instanceof SimpleSaslServerAuthenticationProvider).findFirst(); if (!opt.isPresent()) { throw new RuntimeException("SIMPLE authentication provider not available when it should be"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslServerAuthenticationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslServerAuthenticationProvider.java index ed7bf4ce9e7..27154174469 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslServerAuthenticationProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslServerAuthenticationProvider.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.security.provider; import java.io.IOException; import java.util.Map; - import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.TokenIdentifier; @@ -27,12 +26,12 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class SimpleSaslServerAuthenticationProvider extends SimpleSaslAuthenticationProvider - implements SaslServerAuthenticationProvider { + implements SaslServerAuthenticationProvider { @Override - public AttemptingUserProvidingSaslServer createServer( - SecretManager secretManager, - Map saslProps) throws IOException { + public AttemptingUserProvidingSaslServer + createServer(SecretManager secretManager, Map saslProps) + throws IOException { throw new RuntimeException("HBase SIMPLE authentication doesn't use SASL"); } @@ -43,7 +42,7 @@ public class SimpleSaslServerAuthenticationProvider extends SimpleSaslAuthentica @Override public UserGroupInformation getAuthorizedUgi(String authzId, - SecretManager secretManager) throws IOException { + SecretManager secretManager) throws IOException { UserGroupInformation ugi = UserGroupInformation.createRemoteUser(authzId); ugi.setAuthenticationMethod(getSaslAuthMethod().getAuthMethod()); return ugi; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationKey.java index 9e124a54111..b250e98f572 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationKey.java @@ -15,25 +15,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; -import javax.crypto.SecretKey; - import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.time.Instant; import java.util.Arrays; - -import org.apache.yetus.audience.InterfaceAudience; +import javax.crypto.SecretKey; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; +import org.apache.yetus.audience.InterfaceAudience; /** - * Represents a secret key used for signing and verifying authentication tokens - * by {@link AuthenticationTokenSecretManager}. + * Represents a secret key used for signing and verifying authentication tokens by + * {@link AuthenticationTokenSecretManager}. */ @InterfaceAudience.Private public class AuthenticationKey implements Writable { @@ -80,21 +77,18 @@ public class AuthenticationKey implements Writable { if (obj == null || !(obj instanceof AuthenticationKey)) { return false; } - AuthenticationKey other = (AuthenticationKey)obj; - return id == other.getKeyId() && - expirationDate == other.getExpiration() && - (secret == null ? other.getKey() == null : - other.getKey() != null && - Bytes.equals(secret.getEncoded(), other.getKey().getEncoded())); + AuthenticationKey other = (AuthenticationKey) obj; + return id == other.getKeyId() && expirationDate == other.getExpiration() + && (secret == null + ? other.getKey() == null + : other.getKey() != null && Bytes.equals(secret.getEncoded(), other.getKey().getEncoded())); } @Override public String toString() { StringBuilder buf = new StringBuilder(); - buf.append("AuthenticationKey[") - .append("id=").append(id) - .append(", expiration=").append(Instant.ofEpochMilli(this.expirationDate)) - .append(", obj=").append(super.toString()) + buf.append("AuthenticationKey[").append("id=").append(id).append(", expiration=") + .append(Instant.ofEpochMilli(this.expirationDate)).append(", obj=").append(super.toString()) .append("]"); return buf.toString(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java index 641288c0383..5070697439f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java @@ -15,53 +15,46 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; -import javax.crypto.SecretKey; import java.io.IOException; import java.util.Iterator; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; - -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; +import javax.crypto.SecretKey; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.hbase.zookeeper.ZKLeaderManager; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.Token; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Manages an internal list of secret keys used to sign new authentication - * tokens as they are generated, and to valid existing tokens used for - * authentication. - * + * Manages an internal list of secret keys used to sign new authentication tokens as they are + * generated, and to valid existing tokens used for authentication. *

      - * A single instance of {@code AuthenticationTokenSecretManager} will be - * running as the "leader" in a given HBase cluster. The leader is responsible - * for periodically generating new secret keys, which are then distributed to - * followers via ZooKeeper, and for expiring previously used secret keys that - * are no longer needed (as any tokens using them have expired). + * A single instance of {@code AuthenticationTokenSecretManager} will be running as the "leader" in + * a given HBase cluster. The leader is responsible for periodically generating new secret keys, + * which are then distributed to followers via ZooKeeper, and for expiring previously used secret + * keys that are no longer needed (as any tokens using them have expired). *

      */ @InterfaceAudience.Private -public class AuthenticationTokenSecretManager - extends SecretManager { +public class AuthenticationTokenSecretManager extends SecretManager { static final String NAME_PREFIX = "SecretManager-"; - private static final Logger LOG = LoggerFactory.getLogger( - AuthenticationTokenSecretManager.class); + private static final Logger LOG = LoggerFactory.getLogger(AuthenticationTokenSecretManager.class); private long lastKeyUpdate; private long keyUpdateInterval; @@ -70,7 +63,7 @@ public class AuthenticationTokenSecretManager private LeaderElector leaderElector; private ZKClusterId clusterId; - private Map allKeys = new ConcurrentHashMap<>(); + private Map allKeys = new ConcurrentHashMap<>(); private AuthenticationKey currentKey; private int idSeq; @@ -79,23 +72,25 @@ public class AuthenticationTokenSecretManager /** * Create a new secret manager instance for generating keys. - * @param conf Configuration to use - * @param zk Connection to zookeeper for handling leader elections - * @param keyUpdateInterval Time (in milliseconds) between rolling a new master key for token signing - * @param tokenMaxLifetime Maximum age (in milliseconds) before a token expires and is no longer valid + * @param conf Configuration to use + * @param zk Connection to zookeeper for handling leader elections + * @param keyUpdateInterval Time (in milliseconds) between rolling a new master key for token + * signing + * @param tokenMaxLifetime Maximum age (in milliseconds) before a token expires and is no longer + * valid */ - /* TODO: Restrict access to this constructor to make rogues instances more difficult. - * For the moment this class is instantiated from - * org.apache.hadoop.hbase.ipc.SecureServer so public access is needed. + /* + * TODO: Restrict access to this constructor to make rogues instances more difficult. For the + * moment this class is instantiated from org.apache.hadoop.hbase.ipc.SecureServer so public + * access is needed. */ - public AuthenticationTokenSecretManager(Configuration conf, - ZKWatcher zk, String serverName, - long keyUpdateInterval, long tokenMaxLifetime) { + public AuthenticationTokenSecretManager(Configuration conf, ZKWatcher zk, String serverName, + long keyUpdateInterval, long tokenMaxLifetime) { this.zkWatcher = new ZKSecretWatcher(conf, zk, this); this.keyUpdateInterval = keyUpdateInterval; this.tokenMaxLifetime = tokenMaxLifetime; this.leaderElector = new LeaderElector(zk, serverName); - this.name = NAME_PREFIX+serverName; + this.name = NAME_PREFIX + serverName; this.clusterId = new ZKClusterId(zk, zk); } @@ -130,31 +125,29 @@ public class AuthenticationTokenSecretManager identifier.setIssueDate(now); identifier.setExpirationDate(now + tokenMaxLifetime); identifier.setSequenceNumber(tokenSeq.getAndIncrement()); - return createPassword(identifier.getBytes(), - secretKey.getKey()); + return createPassword(identifier.getBytes(), secretKey.getKey()); } @Override - public byte[] retrievePassword(AuthenticationTokenIdentifier identifier) - throws InvalidToken { + public byte[] retrievePassword(AuthenticationTokenIdentifier identifier) throws InvalidToken { long now = EnvironmentEdgeManager.currentTime(); if (identifier.getExpirationDate() < now) { throw new InvalidToken("Token has expired"); } AuthenticationKey masterKey = allKeys.get(identifier.getKeyId()); - if(masterKey == null) { - if(zkWatcher.getWatcher().isAborted()) { + if (masterKey == null) { + if (zkWatcher.getWatcher().isAborted()) { LOG.error("ZKWatcher is abort"); - throw new InvalidToken("Token keys could not be sync from zookeeper" - + " because of ZKWatcher abort"); + throw new InvalidToken( + "Token keys could not be sync from zookeeper" + " because of ZKWatcher abort"); } synchronized (this) { if (!leaderElector.isAlive() || leaderElector.isStopped()) { - LOG.warn("Thread leaderElector[" + leaderElector.getName() + ":" - + leaderElector.getId() + "] is stopped or not alive"); + LOG.warn("Thread leaderElector[" + leaderElector.getName() + ":" + leaderElector.getId() + + "] is stopped or not alive"); leaderElector.start(); - LOG.info("Thread leaderElector [" + leaderElector.getName() + ":" - + leaderElector.getId() + "] is started"); + LOG.info("Thread leaderElector [" + leaderElector.getName() + ":" + leaderElector.getId() + + "] is started"); } } zkWatcher.refreshKeys(); @@ -164,12 +157,10 @@ public class AuthenticationTokenSecretManager masterKey = allKeys.get(identifier.getKeyId()); } if (masterKey == null) { - throw new InvalidToken("Unknown master key for token (id="+ - identifier.getKeyId()+")"); + throw new InvalidToken("Unknown master key for token (id=" + identifier.getKeyId() + ")"); } // regenerate the password - return createPassword(identifier.getBytes(), - masterKey.getKey()); + return createPassword(identifier.getBytes(), masterKey.getKey()); } @Override @@ -178,8 +169,7 @@ public class AuthenticationTokenSecretManager } public Token generateToken(String username) { - AuthenticationTokenIdentifier ident = - new AuthenticationTokenIdentifier(username); + AuthenticationTokenIdentifier ident = new AuthenticationTokenIdentifier(username); Token token = new Token<>(ident, this); if (clusterId.hasId()) { token.setService(new Text(clusterId.getId())); @@ -259,9 +249,11 @@ public class AuthenticationTokenSecretManager long now = EnvironmentEdgeManager.currentTime(); AuthenticationKey prev = currentKey; - AuthenticationKey newKey = new AuthenticationKey(++idSeq, - Long.MAX_VALUE, // don't allow to expire until it's replaced by a new key - generateSecret()); + AuthenticationKey newKey = new AuthenticationKey(++idSeq, Long.MAX_VALUE, // don't allow to + // expire until it's + // replaced by a new + // key + generateSecret()); allKeys.put(newKey.getKeyId(), newKey); currentKey = newKey; zkWatcher.addKeyToZK(newKey); @@ -292,8 +284,8 @@ public class AuthenticationTokenSecretManager public LeaderElector(ZKWatcher watcher, String serverName) { setDaemon(true); setName("ZKSecretWatcher-leaderElector"); - zkLeader = new ZKLeaderManager(watcher, - ZNodePaths.joinZNode(zkWatcher.getRootKeyZNode(), "keymaster"), + zkLeader = + new ZKLeaderManager(watcher, ZNodePaths.joinZNode(zkWatcher.getRootKeyZNode(), "keymaster"), Bytes.toBytes(serverName), this); } @@ -318,7 +310,7 @@ public class AuthenticationTokenSecretManager zkLeader.stepDownAsLeader(); } isMaster = false; - LOG.info("Stopping leader election, because: "+reason); + LOG.info("Stopping leader election, because: " + reason); interrupt(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/FsDelegationToken.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/FsDelegationToken.java index 9a58006343e..64b889cd066 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/FsDelegationToken.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/FsDelegationToken.java @@ -35,9 +35,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Helper class to obtain a filesystem delegation token. - * Mainly used by Map-Reduce jobs that requires to read/write data to - * a remote file-system (e.g. BulkLoad, ExportSnapshot). + * Helper class to obtain a filesystem delegation token. Mainly used by Map-Reduce jobs that + * requires to read/write data to a remote file-system (e.g. BulkLoad, ExportSnapshot). */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -60,15 +59,13 @@ public class FsDelegationToken { } /** - * Acquire the delegation token for the specified filesystem. - * Before requesting a new delegation token, tries to find one already available. - * Currently supports checking existing delegation tokens for swebhdfs, webhdfs and hdfs. - * + * Acquire the delegation token for the specified filesystem. Before requesting a new delegation + * token, tries to find one already available. Currently supports checking existing delegation + * tokens for swebhdfs, webhdfs and hdfs. * @param fs the filesystem that requires the delegation token * @throws IOException on fs.getDelegationToken() failure */ - public void acquireDelegationToken(final FileSystem fs) - throws IOException { + public void acquireDelegationToken(final FileSystem fs) throws IOException { String tokenKind; String scheme = fs.getUri().getScheme(); if (SWEBHDFS_SCHEME.equalsIgnoreCase(scheme)) { @@ -87,15 +84,14 @@ public class FsDelegationToken { } /** - * Acquire the delegation token for the specified filesystem and token kind. - * Before requesting a new delegation token, tries to find one already available. - * + * Acquire the delegation token for the specified filesystem and token kind. Before requesting a + * new delegation token, tries to find one already available. * @param tokenKind non-null token kind to get delegation token from the {@link UserProvider} - * @param fs the filesystem that requires the delegation token + * @param fs the filesystem that requires the delegation token * @throws IOException on fs.getDelegationToken() failure */ public void acquireDelegationToken(final String tokenKind, final FileSystem fs) - throws IOException { + throws IOException { Objects.requireNonNull(tokenKind, "tokenKind:null"); if (userProvider.isHadoopSecurityEnabled()) { this.fs = fs; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java index 28fef37f5b0..4549c85be43 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java @@ -20,10 +20,8 @@ package org.apache.hadoop.hbase.security.token; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; - import java.io.IOException; import java.util.Collections; - import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor; import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices; @@ -45,33 +43,33 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Provides a service for obtaining authentication tokens via the - * {@link AuthenticationProtos} AuthenticationService coprocessor service. + * Provides a service for obtaining authentication tokens via the {@link AuthenticationProtos} + * AuthenticationService coprocessor service. */ @CoreCoprocessor @InterfaceAudience.Private -public class TokenProvider implements AuthenticationProtos.AuthenticationService.Interface, - RegionCoprocessor { +public class TokenProvider + implements AuthenticationProtos.AuthenticationService.Interface, RegionCoprocessor { private static final Logger LOG = LoggerFactory.getLogger(TokenProvider.class); private AuthenticationTokenSecretManager secretManager; - @Override public void start(CoprocessorEnvironment env) { // if running at region if (env instanceof RegionCoprocessorEnvironment) { - RegionCoprocessorEnvironment regionEnv = (RegionCoprocessorEnvironment)env; - /* Getting the RpcServer from a RegionCE is wrong. There cannot be an expectation that Region - is hosted inside a RegionServer. If you need RpcServer, then pass in a RegionServerCE. - TODO: FIX. + RegionCoprocessorEnvironment regionEnv = (RegionCoprocessorEnvironment) env; + /* + * Getting the RpcServer from a RegionCE is wrong. There cannot be an expectation that Region + * is hosted inside a RegionServer. If you need RpcServer, then pass in a RegionServerCE. + * TODO: FIX. */ - RegionServerServices rss = ((HasRegionServerServices)regionEnv).getRegionServerServices(); + RegionServerServices rss = ((HasRegionServerServices) regionEnv).getRegionServerServices(); RpcServerInterface server = rss.getRpcServer(); - SecretManager mgr = ((RpcServer)server).getSecretManager(); + SecretManager mgr = ((RpcServer) server).getSecretManager(); if (mgr instanceof AuthenticationTokenSecretManager) { - secretManager = (AuthenticationTokenSecretManager)mgr; + secretManager = (AuthenticationTokenSecretManager) mgr; } } } @@ -89,9 +87,10 @@ public class TokenProvider implements AuthenticationProtos.AuthenticationService if (authMethod == AuthenticationMethod.PROXY) { authMethod = ugi.getRealUser().getAuthenticationMethod(); } - if (authMethod != AuthenticationMethod.KERBEROS - && authMethod != AuthenticationMethod.KERBEROS_SSL - && authMethod != AuthenticationMethod.CERTIFICATE) { + if ( + authMethod != AuthenticationMethod.KERBEROS && authMethod != AuthenticationMethod.KERBEROS_SSL + && authMethod != AuthenticationMethod.CERTIFICATE + ) { return false; } return true; @@ -101,34 +100,33 @@ public class TokenProvider implements AuthenticationProtos.AuthenticationService @Override public Iterable getServices() { - return Collections.singleton( - AuthenticationProtos.AuthenticationService.newReflectiveService(this)); + return Collections + .singleton(AuthenticationProtos.AuthenticationService.newReflectiveService(this)); } @Override public void getAuthenticationToken(RpcController controller, - AuthenticationProtos.GetAuthenticationTokenRequest request, - RpcCallback done) { + AuthenticationProtos.GetAuthenticationTokenRequest request, + RpcCallback done) { AuthenticationProtos.GetAuthenticationTokenResponse.Builder response = - AuthenticationProtos.GetAuthenticationTokenResponse.newBuilder(); + AuthenticationProtos.GetAuthenticationTokenResponse.newBuilder(); try { if (secretManager == null) { - throw new IOException( - "No secret manager configured for token authentication"); + throw new IOException("No secret manager configured for token authentication"); } User currentUser = RpcServer.getRequestUser() - .orElseThrow(() -> new AccessDeniedException("No authenticated user for request!")); + .orElseThrow(() -> new AccessDeniedException("No authenticated user for request!")); UserGroupInformation ugi = currentUser.getUGI(); if (!isAllowedDelegationTokenOp(ugi)) { - LOG.warn("Token generation denied for user=" + currentUser.getName() + ", authMethod=" + - ugi.getAuthenticationMethod()); + LOG.warn("Token generation denied for user=" + currentUser.getName() + ", authMethod=" + + ugi.getAuthenticationMethod()); throw new AccessDeniedException( - "Token generation only allowed for Kerberos authenticated clients"); + "Token generation only allowed for Kerberos authenticated clients"); } Token token = - secretManager.generateToken(currentUser.getName()); + secretManager.generateToken(currentUser.getName()); response.setToken(ClientTokenUtil.toToken(token)); } catch (IOException ioe) { CoprocessorRpcUtils.setControllerException(controller, ioe); @@ -138,9 +136,9 @@ public class TokenProvider implements AuthenticationProtos.AuthenticationService @Override public void whoAmI(RpcController controller, AuthenticationProtos.WhoAmIRequest request, - RpcCallback done) { + RpcCallback done) { AuthenticationProtos.WhoAmIResponse.Builder response = - AuthenticationProtos.WhoAmIResponse.newBuilder(); + AuthenticationProtos.WhoAmIResponse.newBuilder(); RpcServer.getRequestUser().ifPresent(requestUser -> { response.setUsername(requestUser.getShortName()); AuthenticationMethod method = requestUser.getUGI().getAuthenticationMethod(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java index 3f47a3cba5f..cbd51f4de9d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; import java.io.IOException; @@ -36,7 +35,6 @@ import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Utility methods for obtaining authentication tokens. */ @@ -52,7 +50,7 @@ public class TokenUtil { */ @Deprecated public static Token obtainToken(Configuration conf) - throws IOException { + throws IOException { try (Connection connection = ConnectionFactory.createConnection(conf)) { return obtainToken(connection); } @@ -60,22 +58,21 @@ public class TokenUtil { /** * See {@link ClientTokenUtil#obtainToken(org.apache.hadoop.hbase.client.Connection)}. - * @deprecated External users should not use this method. Please post on - * the HBase dev mailing list if you need this method. Internal - * HBase code should use {@link ClientTokenUtil} instead. + * @deprecated External users should not use this method. Please post on the HBase dev mailing + * list if you need this method. Internal HBase code should use + * {@link ClientTokenUtil} instead. */ @Deprecated public static Token obtainToken(Connection conn) - throws IOException { + throws IOException { return ClientTokenUtil.obtainToken(conn); } - /** * See {@link ClientTokenUtil#toToken(Token)}. - * @deprecated External users should not use this method. Please post on - * the HBase dev mailing list if you need this method. Internal - * HBase code should use {@link ClientTokenUtil} instead. + * @deprecated External users should not use this method. Please post on the HBase dev mailing + * list if you need this method. Internal HBase code should use + * {@link ClientTokenUtil} instead. */ @Deprecated public static AuthenticationProtos.Token toToken(Token token) { @@ -84,54 +81,51 @@ public class TokenUtil { /** * See {@link ClientTokenUtil#obtainToken(Connection, User)}. - * @deprecated External users should not use this method. Please post on - * the HBase dev mailing list if you need this method. Internal - * HBase code should use {@link ClientTokenUtil} instead. + * @deprecated External users should not use this method. Please post on the HBase dev mailing + * list if you need this method. Internal HBase code should use + * {@link ClientTokenUtil} instead. */ @Deprecated - public static Token obtainToken( - final Connection conn, User user) throws IOException, InterruptedException { + public static Token obtainToken(final Connection conn, User user) + throws IOException, InterruptedException { return ClientTokenUtil.obtainToken(conn, user); } /** * See {@link ClientTokenUtil#obtainAndCacheToken(Connection, User)}. */ - public static void obtainAndCacheToken(final Connection conn, - User user) - throws IOException, InterruptedException { + public static void obtainAndCacheToken(final Connection conn, User user) + throws IOException, InterruptedException { ClientTokenUtil.obtainAndCacheToken(conn, user); } /** - * See {@link ClientTokenUtil#toToken(org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos.Token)}. - * @deprecated External users should not use this method. Please post on - * the HBase dev mailing list if you need this method. Internal - * HBase code should use {@link ClientTokenUtil} instead. + * See + * {@link ClientTokenUtil#toToken(org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos.Token)}. + * @deprecated External users should not use this method. Please post on the HBase dev mailing + * list if you need this method. Internal HBase code should use + * {@link ClientTokenUtil} instead. */ @Deprecated public static Token toToken(AuthenticationProtos.Token proto) { return ClientTokenUtil.toToken(proto); } - private static Text getClusterId(Token token) - throws IOException { - return token.getService() != null - ? token.getService() : new Text("default"); + private static Text getClusterId(Token token) throws IOException { + return token.getService() != null ? token.getService() : new Text("default"); } /** - * Obtain an authentication token on behalf of the given user and add it to - * the credentials for the given map reduce job. + * Obtain an authentication token on behalf of the given user and add it to the credentials for + * the given map reduce job. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token - * @param job The job instance in which the token should be stored - * @throws IOException If making a remote call to the authentication service fails + * @param job The job instance in which the token should be stored + * @throws IOException If making a remote call to the authentication service fails * @throws InterruptedException If executing as the given user is interrupted */ - public static void obtainTokenForJob(final Connection conn, - User user, Job job) - throws IOException, InterruptedException { + public static void obtainTokenForJob(final Connection conn, User user, Job job) + throws IOException, InterruptedException { try { Token token = ClientTokenUtil.obtainToken(conn, user); @@ -140,8 +134,8 @@ public class TokenUtil { } Text clusterId = getClusterId(token); if (LOG.isDebugEnabled()) { - LOG.debug("Obtained token " + token.getKind().toString() + " for user " + - user.getName() + " on cluster " + clusterId.toString()); + LOG.debug("Obtained token " + token.getKind().toString() + " for user " + user.getName() + + " on cluster " + clusterId.toString()); } job.getCredentials().addToken(clusterId, token); } catch (IOException ioe) { @@ -152,21 +146,21 @@ public class TokenUtil { throw re; } catch (Exception e) { throw new UndeclaredThrowableException(e, - "Unexpected exception obtaining token for user " + user.getName()); + "Unexpected exception obtaining token for user " + user.getName()); } } /** - * Obtain an authentication token on behalf of the given user and add it to - * the credentials for the given map reduce job. + * Obtain an authentication token on behalf of the given user and add it to the credentials for + * the given map reduce job. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token - * @param job The job configuration in which the token should be stored - * @throws IOException If making a remote call to the authentication service fails + * @param job The job configuration in which the token should be stored + * @throws IOException If making a remote call to the authentication service fails * @throws InterruptedException If executing as the given user is interrupted */ public static void obtainTokenForJob(final Connection conn, final JobConf job, User user) - throws IOException, InterruptedException { + throws IOException, InterruptedException { try { Token token = ClientTokenUtil.obtainToken(conn, user); @@ -175,8 +169,8 @@ public class TokenUtil { } Text clusterId = getClusterId(token); if (LOG.isDebugEnabled()) { - LOG.debug("Obtained token " + token.getKind().toString() + " for user " + - user.getName() + " on cluster " + clusterId.toString()); + LOG.debug("Obtained token " + token.getKind().toString() + " for user " + user.getName() + + " on cluster " + clusterId.toString()); } job.getCredentials().addToken(clusterId, token); } catch (IOException ioe) { @@ -187,22 +181,21 @@ public class TokenUtil { throw re; } catch (Exception e) { throw new UndeclaredThrowableException(e, - "Unexpected exception obtaining token for user "+user.getName()); + "Unexpected exception obtaining token for user " + user.getName()); } } /** - * Checks for an authentication token for the given user, obtaining a new token if necessary, - * and adds it to the credentials for the given map reduce job. - * + * Checks for an authentication token for the given user, obtaining a new token if necessary, and + * adds it to the credentials for the given map reduce job. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token - * @param job The job configuration in which the token should be stored - * @throws IOException If making a remote call to the authentication service fails + * @param job The job configuration in which the token should be stored + * @throws IOException If making a remote call to the authentication service fails * @throws InterruptedException If executing as the given user is interrupted */ public static void addTokenForJob(final Connection conn, final JobConf job, User user) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Token token = getAuthToken(conn.getConfiguration(), user); if (token == null) { @@ -212,17 +205,16 @@ public class TokenUtil { } /** - * Checks for an authentication token for the given user, obtaining a new token if necessary, - * and adds it to the credentials for the given map reduce job. - * + * Checks for an authentication token for the given user, obtaining a new token if necessary, and + * adds it to the credentials for the given map reduce job. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token - * @param job The job instance in which the token should be stored - * @throws IOException If making a remote call to the authentication service fails + * @param job The job instance in which the token should be stored + * @throws IOException If making a remote call to the authentication service fails * @throws InterruptedException If executing as the given user is interrupted */ public static void addTokenForJob(final Connection conn, User user, Job job) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Token token = getAuthToken(conn.getConfiguration(), user); if (token == null) { token = ClientTokenUtil.obtainToken(conn, user); @@ -231,17 +223,16 @@ public class TokenUtil { } /** - * Checks if an authentication tokens exists for the connected cluster, - * obtaining one if needed and adding it to the user's credentials. - * + * Checks if an authentication tokens exists for the connected cluster, obtaining one if needed + * and adding it to the user's credentials. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token - * @throws IOException If making a remote call to the authentication service fails + * @throws IOException If making a remote call to the authentication service fails * @throws InterruptedException If executing as the given user is interrupted * @return true if the token was added, false if it already existed */ public static boolean addTokenIfMissing(Connection conn, User user) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Token token = getAuthToken(conn.getConfiguration(), user); if (token == null) { token = ClientTokenUtil.obtainToken(conn, user); @@ -256,7 +247,7 @@ public class TokenUtil { * @return null if the user does not have the token, otherwise the auth token for the cluster. */ private static Token getAuthToken(Configuration conf, User user) - throws IOException, InterruptedException { + throws IOException, InterruptedException { ZKWatcher zkw = new ZKWatcher(conf, "TokenUtil-getAuthToken", null); try { String clusterId = ZKClusterId.readClusterIdZNode(zkw); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java index 0fd2205d5a0..b4cf115e432 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; import java.io.IOException; @@ -45,9 +44,8 @@ public class ZKSecretWatcher extends ZKListener { private String baseKeyZNode; private String keysParentZNode; - public ZKSecretWatcher(Configuration conf, - ZKWatcher watcher, - AuthenticationTokenSecretManager secretManager) { + public ZKSecretWatcher(Configuration conf, ZKWatcher watcher, + AuthenticationTokenSecretManager secretManager) { super(watcher); this.secretManager = secretManager; String keyZNodeParent = conf.get("zookeeper.znode.tokenauth.parent", DEFAULT_ROOT_NODE); @@ -62,7 +60,7 @@ public class ZKSecretWatcher extends ZKListener { if (ZKUtil.watchAndCheckExists(watcher, keysParentZNode)) { List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); + ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); refreshNodes(nodes); } } @@ -72,11 +70,11 @@ public class ZKSecretWatcher extends ZKListener { if (path.equals(keysParentZNode)) { try { List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); + ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); refreshNodes(nodes); } catch (KeeperException ke) { LOG.error(HBaseMarkers.FATAL, "Error reading data from zookeeper", ke); - watcher.abort("Error reading new key znode "+path, ke); + watcher.abort("Error reading new key znode " + path, ke); } } } @@ -90,7 +88,7 @@ public class ZKSecretWatcher extends ZKListener { secretManager.removeKey(id); LOG.info("Node deleted id={}", id); } catch (NumberFormatException nfe) { - LOG.error("Invalid znode name for key ID '"+keyId+"'", nfe); + LOG.error("Invalid znode name for key ID '" + keyId + "'", nfe); } } } @@ -101,19 +99,19 @@ public class ZKSecretWatcher extends ZKListener { try { byte[] data = ZKUtil.getDataAndWatch(watcher, path); if (data == null || data.length == 0) { - LOG.debug("Ignoring empty node "+path); + LOG.debug("Ignoring empty node " + path); return; } - AuthenticationKey key = (AuthenticationKey)Writables.getWritable(data, - new AuthenticationKey()); + AuthenticationKey key = + (AuthenticationKey) Writables.getWritable(data, new AuthenticationKey()); secretManager.addKey(key); } catch (KeeperException ke) { LOG.error(HBaseMarkers.FATAL, "Error reading data from zookeeper", ke); - watcher.abort("Error reading updated key znode "+path, ke); + watcher.abort("Error reading updated key znode " + path, ke); } catch (IOException ioe) { LOG.error(HBaseMarkers.FATAL, "Error reading key writables", ioe); - watcher.abort("Error reading key writables from znode "+path, ioe); + watcher.abort("Error reading key writables from znode " + path, ioe); } } } @@ -124,7 +122,7 @@ public class ZKSecretWatcher extends ZKListener { // keys changed try { List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); + ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); refreshNodes(nodes); } catch (KeeperException ke) { LOG.error(HBaseMarkers.FATAL, "Error reading data from zookeeper", ke); @@ -144,16 +142,16 @@ public class ZKSecretWatcher extends ZKListener { try { byte[] data = n.getData(); if (data == null || data.length == 0) { - LOG.debug("Ignoring empty node "+path); + LOG.debug("Ignoring empty node " + path); continue; } - AuthenticationKey key = (AuthenticationKey)Writables.getWritable( - data, new AuthenticationKey()); + AuthenticationKey key = + (AuthenticationKey) Writables.getWritable(data, new AuthenticationKey()); secretManager.addKey(key); } catch (IOException ioe) { - LOG.error(HBaseMarkers.FATAL, "Failed reading new secret key for id '" + - keyId + "' from zk", ioe); - watcher.abort("Error deserializing key from znode "+path, ioe); + LOG.error(HBaseMarkers.FATAL, + "Failed reading new secret key for id '" + keyId + "' from zk", ioe); + watcher.abort("Error deserializing key from znode " + path, ioe); } } } @@ -167,12 +165,12 @@ public class ZKSecretWatcher extends ZKListener { try { ZKUtil.deleteNode(watcher, keyZNode); } catch (KeeperException.NoNodeException nne) { - LOG.error("Non-existent znode "+keyZNode+" for key "+key.getKeyId(), nne); + LOG.error("Non-existent znode " + keyZNode + " for key " + key.getKeyId(), nne); } catch (KeeperException ke) { - LOG.error(HBaseMarkers.FATAL, "Failed removing znode "+keyZNode+" for key "+ - key.getKeyId(), ke); - watcher.abort("Unhandled zookeeper error removing znode "+keyZNode+ - " for key "+key.getKeyId(), ke); + LOG.error(HBaseMarkers.FATAL, + "Failed removing znode " + keyZNode + " for key " + key.getKeyId(), ke); + watcher.abort( + "Unhandled zookeeper error removing znode " + keyZNode + " for key " + key.getKeyId(), ke); } } @@ -183,13 +181,12 @@ public class ZKSecretWatcher extends ZKListener { // TODO: is there any point in retrying beyond what ZK client does? ZKUtil.createSetData(watcher, keyZNode, keyData); } catch (KeeperException ke) { - LOG.error(HBaseMarkers.FATAL, "Unable to synchronize master key "+key.getKeyId()+ - " to znode "+keyZNode, ke); - watcher.abort("Unable to synchronize secret key "+ - key.getKeyId()+" in zookeeper", ke); + LOG.error(HBaseMarkers.FATAL, + "Unable to synchronize master key " + key.getKeyId() + " to znode " + keyZNode, ke); + watcher.abort("Unable to synchronize secret key " + key.getKeyId() + " in zookeeper", ke); } catch (IOException ioe) { // this can only happen from an error serializing the key - watcher.abort("Failed serializing key "+key.getKeyId(), ioe); + watcher.abort("Failed serializing key " + key.getKeyId(), ioe); } } @@ -204,13 +201,12 @@ public class ZKSecretWatcher extends ZKListener { ZKUtil.createSetData(watcher, keyZNode, keyData); } } catch (KeeperException ke) { - LOG.error(HBaseMarkers.FATAL, "Unable to update master key "+key.getKeyId()+ - " in znode "+keyZNode); - watcher.abort("Unable to synchronize secret key "+ - key.getKeyId()+" in zookeeper", ke); + LOG.error(HBaseMarkers.FATAL, + "Unable to update master key " + key.getKeyId() + " in znode " + keyZNode); + watcher.abort("Unable to synchronize secret key " + key.getKeyId() + " in zookeeper", ke); } catch (IOException ioe) { // this can only happen from an error serializing the key - watcher.abort("Failed serializing key "+key.getKeyId(), ioe); + watcher.abort("Failed serializing key " + key.getKeyId(), ioe); } } @@ -220,7 +216,7 @@ public class ZKSecretWatcher extends ZKListener { synchronized void refreshKeys() { try { List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); + ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); refreshNodes(nodes); } catch (KeeperException ke) { LOG.error(HBaseMarkers.FATAL, "Error reading data from zookeeper", ke); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java index 519502e5aea..b6163123cbf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java @@ -39,7 +39,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Pattern; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.AuthUtil; @@ -78,7 +77,7 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.Private public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService { private static final Logger LOG = - LoggerFactory.getLogger(DefaultVisibilityLabelServiceImpl.class); + LoggerFactory.getLogger(DefaultVisibilityLabelServiceImpl.class); // "system" label is having an ordinal value 1. private static final int SYSTEM_LABEL_ORDINAL = 1; @@ -118,15 +117,15 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService @Override public void init(RegionCoprocessorEnvironment e) throws IOException { - /* So, presumption that the RegionCE has a ZK Connection is too much. Why would a RCE have - * a ZK instance? This is cheating presuming we have access to the RS ZKW. TODO: Fix. - * - * And what is going on here? This ain't even a Coprocessor? And its being passed a CP Env? + /* + * So, presumption that the RegionCE has a ZK Connection is too much. Why would a RCE have a ZK + * instance? This is cheating presuming we have access to the RS ZKW. TODO: Fix. And what is + * going on here? This ain't even a Coprocessor? And its being passed a CP Env? */ // This is a CoreCoprocessor. On creation, we should have gotten an environment that // implements HasRegionServerServices so we can get at RSS. FIX!!!! Integrate this CP as // native service. - ZKWatcher zk = ((HasRegionServerServices)e).getRegionServerServices().getZooKeeper(); + ZKWatcher zk = ((HasRegionServerServices) e).getRegionServerServices().getZooKeeper(); try { labelsCache = VisibilityLabelsCache.createAndGet(zk, this.conf); } catch (IOException ioe) { @@ -137,7 +136,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService if (e.getRegion().getRegionInfo().getTable().equals(LABELS_TABLE_NAME)) { this.labelsRegion = e.getRegion(); Pair, Map>> labelsAndUserAuths = - extractLabelsAndAuths(getExistingLabelsWithAuths()); + extractLabelsAndAuths(getExistingLabelsWithAuths()); Map labels = labelsAndUserAuths.getFirst(); Map> userAuths = labelsAndUserAuths.getSecond(); // Add the "system" label if it is not added into the system yet @@ -182,20 +181,20 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService return existingLabels; } - protected Pair, Map>> extractLabelsAndAuths( - List> labelDetails) { + protected Pair, Map>> + extractLabelsAndAuths(List> labelDetails) { Map labels = new HashMap<>(); Map> userAuths = new HashMap<>(); for (List cells : labelDetails) { for (Cell cell : cells) { if (CellUtil.matchingQualifier(cell, LABEL_QUALIFIER)) { labels.put( - Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()), - PrivateCellUtil.getRowAsInt(cell)); + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()), + PrivateCellUtil.getRowAsInt(cell)); } else { // These are user cells who has authorization for this label String user = Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()); + cell.getQualifierLength()); List auths = userAuths.get(user); if (auths == null) { auths = new ArrayList<>(); @@ -209,18 +208,13 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService } protected void addSystemLabel(Region region, Map labels, - Map> userAuths) throws IOException { + Map> userAuths) throws IOException { if (!labels.containsKey(SYSTEM_LABEL)) { byte[] row = Bytes.toBytes(SYSTEM_LABEL_ORDINAL); Put p = new Put(row); - p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(row) - .setFamily(LABELS_TABLE_FAMILY) - .setQualifier(LABEL_QUALIFIER) - .setTimestamp(p.getTimestamp()) - .setType(Type.Put) - .setValue(Bytes.toBytes(SYSTEM_LABEL)) - .build()); + p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row) + .setFamily(LABELS_TABLE_FAMILY).setQualifier(LABEL_QUALIFIER).setTimestamp(p.getTimestamp()) + .setType(Type.Put).setValue(Bytes.toBytes(SYSTEM_LABEL)).build()); region.put(p); labels.put(SYSTEM_LABEL, SYSTEM_LABEL_ORDINAL); } @@ -237,19 +231,13 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService String labelStr = Bytes.toString(label); if (this.labelsCache.getLabelOrdinal(labelStr) > 0) { finalOpStatus[i] = new OperationStatus(OperationStatusCode.FAILURE, - new LabelAlreadyExistsException("Label '" + labelStr + "' already exists")); + new LabelAlreadyExistsException("Label '" + labelStr + "' already exists")); } else { byte[] row = Bytes.toBytes(ordinalCounter.get()); Put p = new Put(row); - p.add(builder.clear() - .setRow(row) - .setFamily(LABELS_TABLE_FAMILY) - .setQualifier(LABEL_QUALIFIER) - .setTimestamp(p.getTimestamp()) - .setType(Type.Put) - .setValue(label) - .setTags(TagUtil.fromList(Arrays.asList(LABELS_TABLE_TAGS))) - .build()); + p.add(builder.clear().setRow(row).setFamily(LABELS_TABLE_FAMILY) + .setQualifier(LABEL_QUALIFIER).setTimestamp(p.getTimestamp()).setType(Type.Put) + .setValue(label).setTags(TagUtil.fromList(Arrays.asList(LABELS_TABLE_TAGS))).build()); if (LOG.isDebugEnabled()) { LOG.debug("Adding the label " + labelStr); } @@ -277,19 +265,13 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService if (labelOrdinal == 0) { // This label is not yet added. 1st this should be added to the system finalOpStatus[i] = new OperationStatus(OperationStatusCode.FAILURE, - new InvalidLabelException("Label '" + authStr + "' doesn't exists")); + new InvalidLabelException("Label '" + authStr + "' doesn't exists")); } else { byte[] row = Bytes.toBytes(labelOrdinal); Put p = new Put(row); - p.add(builder.clear() - .setRow(row) - .setFamily(LABELS_TABLE_FAMILY) - .setQualifier(user) - .setTimestamp(p.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(DUMMY_VALUE) - .setTags(TagUtil.fromList(Arrays.asList(LABELS_TABLE_TAGS))) - .build()); + p.add(builder.clear().setRow(row).setFamily(LABELS_TABLE_FAMILY).setQualifier(user) + .setTimestamp(p.getTimestamp()).setType(Cell.Type.Put).setValue(DUMMY_VALUE) + .setTags(TagUtil.fromList(Arrays.asList(LABELS_TABLE_TAGS))).build()); puts.add(p); } i++; @@ -307,9 +289,8 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService List currentAuths; if (AuthUtil.isGroupPrincipal(Bytes.toString(user))) { String group = AuthUtil.getGroupName(Bytes.toString(user)); - currentAuths = this.getGroupAuths(new String[]{group}, true); - } - else { + currentAuths = this.getGroupAuths(new String[] { group }, true); + } else { currentAuths = this.getUserAuths(user, true); } List deletes = new ArrayList<>(authLabels.size()); @@ -324,9 +305,9 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService deletes.add(d); } else { // This label is not set for the user. - finalOpStatus[i] = new OperationStatus(OperationStatusCode.FAILURE, - new InvalidLabelException("Label '" + authLabelStr + "' is not set for the user " - + Bytes.toString(user))); + finalOpStatus[i] = + new OperationStatus(OperationStatusCode.FAILURE, new InvalidLabelException( + "Label '" + authLabelStr + "' is not set for the user " + Bytes.toString(user))); } i++; } @@ -339,15 +320,12 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService /** * Adds the mutations to labels region and set the results to the finalOpStatus. finalOpStatus * might have some entries in it where the OpStatus is FAILURE. We will leave those and set in - * others in the order. - * @param mutations - * @param finalOpStatus - * @return whether we need a ZK update or not. + * others in the order. nn * @return whether we need a ZK update or not. */ private boolean mutateLabelsRegion(List mutations, OperationStatus[] finalOpStatus) - throws IOException { - OperationStatus[] opStatus = this.labelsRegion.batchMutate(mutations - .toArray(new Mutation[mutations.size()])); + throws IOException { + OperationStatus[] opStatus = + this.labelsRegion.batchMutate(mutations.toArray(new Mutation[mutations.size()])); int i = 0; boolean updateZk = false; for (OperationStatus status : opStatus) { @@ -364,8 +342,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService } @Override - public List getUserAuths(byte[] user, boolean systemCall) - throws IOException { + public List getUserAuths(byte[] user, boolean systemCall) throws IOException { assert (labelsRegion != null || systemCall); if (systemCall || labelsRegion == null) { return this.labelsCache.getUserAuths(Bytes.toString(user)); @@ -375,7 +352,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService s.addColumn(LABELS_TABLE_FAMILY, user); } Filter filter = VisibilityUtils.createVisibilityLabelFilter(this.labelsRegion, - new Authorizations(SYSTEM_LABEL)); + new Authorizations(SYSTEM_LABEL)); s.setFilter(filter); ArrayList auths = new ArrayList<>(); RegionScanner scanner = this.labelsRegion.getScanner(s); @@ -399,8 +376,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService } @Override - public List getGroupAuths(String[] groups, boolean systemCall) - throws IOException { + public List getGroupAuths(String[] groups, boolean systemCall) throws IOException { assert (labelsRegion != null || systemCall); if (systemCall || labelsRegion == null) { return this.labelsCache.getGroupAuths(groups); @@ -412,7 +388,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService } } Filter filter = VisibilityUtils.createVisibilityLabelFilter(this.labelsRegion, - new Authorizations(SYSTEM_LABEL)); + new Authorizations(SYSTEM_LABEL)); s.setFilter(filter); Set auths = new HashSet<>(); RegionScanner scanner = this.labelsRegion.getScanner(s); @@ -439,7 +415,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService public List listLabels(String regex) throws IOException { assert (labelsRegion != null); Pair, Map>> labelsAndUserAuths = - extractLabelsAndAuths(getExistingLabelsWithAuths()); + extractLabelsAndAuths(getExistingLabelsWithAuths()); Map labels = labelsAndUserAuths.getFirst(); labels.remove(SYSTEM_LABEL); if (regex != null) { @@ -457,7 +433,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService @Override public List createVisibilityExpTags(String visExpression, boolean withSerializationFormat, - boolean checkAuths) throws IOException { + boolean checkAuths) throws IOException { Set auths = new HashSet<>(); if (checkAuths) { User user = VisibilityUtils.getActiveUser(); @@ -465,7 +441,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService auths.addAll(this.labelsCache.getGroupAuthsAsOrdinals(user.getGroupNames())); } return VisibilityUtils.createVisibilityExpTags(visExpression, withSerializationFormat, - checkAuths, auths, labelsCache); + checkAuths, auths, labelsCache); } protected void updateZk(boolean labelAddition) throws IOException { @@ -474,7 +450,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService // so many labels and auth in the system, we will end up adding lots of data to zk. Most // possibly we will exceed zk node data limit! Pair, Map>> labelsAndUserAuths = - extractLabelsAndAuths(getExistingLabelsWithAuths()); + extractLabelsAndAuths(getExistingLabelsWithAuths()); Map existingLabels = labelsAndUserAuths.getFirst(); Map> userAuths = labelsAndUserAuths.getSecond(); if (labelAddition) { @@ -488,7 +464,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService @Override public VisibilityExpEvaluator getVisibilityExpEvaluator(Authorizations authorizations) - throws IOException { + throws IOException { // If a super user issues a get/scan, he should be able to scan the cells // irrespective of the Visibility labels if (isReadFromSystemAuthUser()) { @@ -593,7 +569,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService @Override public boolean matchVisibility(List putVisTags, Byte putTagsFormat, List deleteVisTags, - Byte deleteTagsFormat) throws IOException { + Byte deleteTagsFormat) throws IOException { // Early out if there are no tags in both of cell and delete if (putVisTags.isEmpty() && deleteVisTags.isEmpty()) { return true; @@ -602,8 +578,10 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService if (putVisTags.isEmpty() ^ deleteVisTags.isEmpty()) { return false; } - if ((deleteTagsFormat != null && deleteTagsFormat == SORTED_ORDINAL_SERIALIZATION_FORMAT) - && (putTagsFormat == null || putTagsFormat == SORTED_ORDINAL_SERIALIZATION_FORMAT)) { + if ( + (deleteTagsFormat != null && deleteTagsFormat == SORTED_ORDINAL_SERIALIZATION_FORMAT) + && (putTagsFormat == null || putTagsFormat == SORTED_ORDINAL_SERIALIZATION_FORMAT) + ) { if (putTagsFormat == null) { return matchUnSortedVisibilityTags(putVisTags, deleteVisTags); } else { @@ -611,29 +589,29 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService } } throw new IOException("Unexpected tag format passed for comparison, deleteTagsFormat : " - + deleteTagsFormat + ", putTagsFormat : " + putTagsFormat); + + deleteTagsFormat + ", putTagsFormat : " + putTagsFormat); } /** - * @param putVisTags Visibility tags in Put Mutation + * @param putVisTags Visibility tags in Put Mutation * @param deleteVisTags Visibility tags in Delete Mutation - * @return true when all the visibility tags in Put matches with visibility tags in Delete. - * This is used when, at least one set of tags are not sorted based on the label ordinal. + * @return true when all the visibility tags in Put matches with visibility tags in Delete. This + * is used when, at least one set of tags are not sorted based on the label ordinal. */ - private static boolean matchUnSortedVisibilityTags(List putVisTags, - List deleteVisTags) throws IOException { + private static boolean matchUnSortedVisibilityTags(List putVisTags, List deleteVisTags) + throws IOException { return compareTagsOrdinals(sortTagsBasedOnOrdinal(putVisTags), - sortTagsBasedOnOrdinal(deleteVisTags)); + sortTagsBasedOnOrdinal(deleteVisTags)); } /** - * @param putVisTags Visibility tags in Put Mutation + * @param putVisTags Visibility tags in Put Mutation * @param deleteVisTags Visibility tags in Delete Mutation - * @return true when all the visibility tags in Put matches with visibility tags in Delete. - * This is used when both the set of tags are sorted based on the label ordinal. + * @return true when all the visibility tags in Put matches with visibility tags in Delete. This + * is used when both the set of tags are sorted based on the label ordinal. */ private static boolean matchOrdinalSortedVisibilityTags(List putVisTags, - List deleteVisTags) { + List deleteVisTags) { boolean matchFound = false; // If the size does not match. Definitely we are not comparing the equal tags. if ((deleteVisTags.size()) == putVisTags.size()) { @@ -662,7 +640,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService } private static void getSortedTagOrdinals(List> fullTagsList, Tag tag) - throws IOException { + throws IOException { List tagsOrdinalInSortedOrder = new ArrayList<>(); int offset = tag.getValueOffset(); int endOffset = offset + tag.getValueLength(); @@ -679,7 +657,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService * @return true when all the visibility tags in Put matches with visibility tags in Delete. */ private static boolean compareTagsOrdinals(List> putVisTags, - List> deleteVisTags) { + List> deleteVisTags) { boolean matchFound = false; if (deleteVisTags.size() == putVisTags.size()) { for (List deleteTagOrdinals : deleteVisTags) { @@ -698,28 +676,27 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService @Override public byte[] encodeVisibilityForReplication(final List tags, final Byte serializationFormat) - throws IOException { - if (tags.size() > 0 - && (serializationFormat == null || - serializationFormat == SORTED_ORDINAL_SERIALIZATION_FORMAT)) { + throws IOException { + if ( + tags.size() > 0 && (serializationFormat == null + || serializationFormat == SORTED_ORDINAL_SERIALIZATION_FORMAT) + ) { return createModifiedVisExpression(tags); } return null; } /** - * @param tags - * - all the visibility tags associated with the current Cell + * n * - all the visibility tags associated with the current Cell * @return - the modified visibility expression as byte[] */ - private byte[] createModifiedVisExpression(final List tags) - throws IOException { + private byte[] createModifiedVisExpression(final List tags) throws IOException { StringBuilder visibilityString = new StringBuilder(); for (Tag tag : tags) { if (tag.getType() == TagType.VISIBILITY_TAG_TYPE) { if (visibilityString.length() != 0) { - visibilityString.append(VisibilityConstants.CLOSED_PARAN).append( - VisibilityConstants.OR_OPERATOR); + visibilityString.append(VisibilityConstants.CLOSED_PARAN) + .append(VisibilityConstants.OR_OPERATOR); } int offset = tag.getValueOffset(); int endOffset = offset + tag.getValueLength(); @@ -733,19 +710,19 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService if (expressionStart) { // Quote every label in case of unicode characters if present visibilityString.append(VisibilityConstants.OPEN_PARAN) - .append(VisibilityConstants.NOT_OPERATOR).append(CellVisibility.quote(label)); + .append(VisibilityConstants.NOT_OPERATOR).append(CellVisibility.quote(label)); } else { visibilityString.append(VisibilityConstants.AND_OPERATOR) - .append(VisibilityConstants.NOT_OPERATOR).append(CellVisibility.quote(label)); + .append(VisibilityConstants.NOT_OPERATOR).append(CellVisibility.quote(label)); } } else { String label = this.labelsCache.getLabel(currLabelOrdinal); if (expressionStart) { - visibilityString.append(VisibilityConstants.OPEN_PARAN).append( - CellVisibility.quote(label)); + visibilityString.append(VisibilityConstants.OPEN_PARAN) + .append(CellVisibility.quote(label)); } else { - visibilityString.append(VisibilityConstants.AND_OPERATOR).append( - CellVisibility.quote(label)); + visibilityString.append(VisibilityConstants.AND_OPERATOR) + .append(CellVisibility.quote(label)); } } expressionStart = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefinedSetFilterScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefinedSetFilterScanLabelGenerator.java index 77bc2057cdc..ec03913d56f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefinedSetFilterScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefinedSetFilterScanLabelGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,23 +21,21 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; - +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.security.User; /** - * This is an implementation for ScanLabelGenerator. - * It will extract labels from passed in authorizations and cross check - * against the set of predefined authorization labels for given user. + * This is an implementation for ScanLabelGenerator. It will extract labels from passed in + * authorizations and cross check against the set of predefined authorization labels for given user. * The labels for which the user is not authorized will be dropped. */ @InterfaceAudience.Private public class DefinedSetFilterScanLabelGenerator implements ScanLabelGenerator { private static final Logger LOG = - LoggerFactory.getLogger(DefinedSetFilterScanLabelGenerator.class); + LoggerFactory.getLogger(DefinedSetFilterScanLabelGenerator.class); private Configuration conf; @@ -71,7 +69,7 @@ public class DefinedSetFilterScanLabelGenerator implements ScanLabelGenerator { } private List dropLabelsNotInUserAuths(List labels, List auths, - String userName) { + String userName) { List droppedLabels = new ArrayList<>(); List passedLabels = new ArrayList<>(labels.size()); for (String label : labels) { @@ -86,7 +84,7 @@ public class DefinedSetFilterScanLabelGenerator implements ScanLabelGenerator { sb.append("Dropping invalid authorizations requested by user "); sb.append(userName); sb.append(": [ "); - for (String label: droppedLabels) { + for (String label : droppedLabels) { sb.append(label); sb.append(' '); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/EnforcingScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/EnforcingScanLabelGenerator.java index e2bc16b5f02..3be8ac1de97 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/EnforcingScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/EnforcingScanLabelGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,18 +21,16 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; - +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.security.User; /** - * This ScanLabelGenerator enforces a set of predefined authorizations for a - * given user, the set defined by the admin using the VisibilityClient admin - * interface or the set_auths shell command. Any authorizations requested with - * Scan#authorizations will be ignored. + * This ScanLabelGenerator enforces a set of predefined authorizations for a given user, the set + * defined by the admin using the VisibilityClient admin interface or the set_auths shell command. + * Any authorizations requested with Scan#authorizations will be ignored. */ @InterfaceAudience.Private public class EnforcingScanLabelGenerator implements ScanLabelGenerator { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionExpander.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionExpander.java index 11842a2bd80..603cf12b678 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionExpander.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionExpander.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,12 +18,11 @@ package org.apache.hadoop.hbase.security.visibility; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.security.visibility.expression.ExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.LeafExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.NonLeafExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.Operator; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class ExpressionExpander { @@ -47,8 +46,10 @@ public class ExpressionExpander { } return nlExp; } - if (src instanceof NonLeafExpressionNode - && ((NonLeafExpressionNode) src).getOperator() == Operator.NOT) { + if ( + src instanceof NonLeafExpressionNode + && ((NonLeafExpressionNode) src).getOperator() == Operator.NOT + ) { // Negate the exp return negate((NonLeafExpressionNode) src); } @@ -111,12 +112,14 @@ public class ExpressionExpander { // (a | b) & (c & d) ... if (outerOp == Operator.OR) { // (a | b) | (c & d) - if (leftChildNLE.getOperator() == Operator.OR - && rightChildNLE.getOperator() == Operator.AND) { + if ( + leftChildNLE.getOperator() == Operator.OR && rightChildNLE.getOperator() == Operator.AND + ) { leftChildNLE.addChildExp(rightChildNLE); newNode = leftChildNLE; - } else if (leftChildNLE.getOperator() == Operator.AND - && rightChildNLE.getOperator() == Operator.OR) { + } else if ( + leftChildNLE.getOperator() == Operator.AND && rightChildNLE.getOperator() == Operator.OR + ) { // (a & b) | (c | d) rightChildNLE.addChildExp(leftChildNLE); newNode = rightChildNLE; @@ -126,16 +129,18 @@ public class ExpressionExpander { } else { // outer op is & // (a | b) & (c & d) => (a & c & d) | (b & c & d) - if (leftChildNLE.getOperator() == Operator.OR - && rightChildNLE.getOperator() == Operator.AND) { + if ( + leftChildNLE.getOperator() == Operator.OR && rightChildNLE.getOperator() == Operator.AND + ) { newNode = new NonLeafExpressionNode(Operator.OR); for (ExpressionNode exp : leftChildNLE.getChildExps()) { NonLeafExpressionNode rightChildNLEClone = rightChildNLE.deepClone(); rightChildNLEClone.addChildExp(exp); newNode.addChildExp(rightChildNLEClone); } - } else if (leftChildNLE.getOperator() == Operator.AND - && rightChildNLE.getOperator() == Operator.OR) { + } else if ( + leftChildNLE.getOperator() == Operator.AND && rightChildNLE.getOperator() == Operator.OR + ) { // (a & b) & (c | d) => (a & b & c) | (a & b & d) newNode = new NonLeafExpressionNode(Operator.OR); for (ExpressionNode exp : rightChildNLE.getChildExps()) { @@ -162,7 +167,7 @@ public class ExpressionExpander { } private NonLeafExpressionNode mergeChildNodes(NonLeafExpressionNode newOuterNode, - Operator outerOp, ExpressionNode lChild, NonLeafExpressionNode nlChild) { + Operator outerOp, ExpressionNode lChild, NonLeafExpressionNode nlChild) { // Merge the single right/left node into the other side if (nlChild.getOperator() == outerOp) { NonLeafExpressionNode leftChildNLEClone = nlChild.deepClone(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java index 313e8801e3e..f56757abf1b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,13 +21,12 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Stack; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.security.visibility.expression.ExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.LeafExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.NonLeafExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.Operator; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class ExpressionParser { @@ -39,6 +38,7 @@ public class ExpressionParser { private static final char NOT = '!'; private static final char SPACE = ' '; private static final char DOUBLE_QUOTES = '"'; + public ExpressionNode parse(String expS) throws ParseException { expS = expS.trim(); Stack expStack = new Stack<>(); @@ -66,28 +66,28 @@ public class ExpressionParser { break; case DOUBLE_QUOTES: int labelOffset = ++index; - // We have to rewrite the expression within double quotes as incase of expressions + // We have to rewrite the expression within double quotes as incase of expressions // with escape characters we may have to avoid them as the original expression did // not have them List list = new ArrayList<>(); while (index < endPos && !endDoubleQuotesFound(exp[index])) { if (exp[index] == '\\') { index++; - if (exp[index] != '\\' && exp[index] != '"') - throw new ParseException("invalid escaping with quotes " + expS + " at column : " - + index); + if (exp[index] != '\\' && exp[index] != '"') throw new ParseException( + "invalid escaping with quotes " + expS + " at column : " + index); } list.add(exp[index]); index++; } - // The expression has come to the end. still no double quotes found - if(index == endPos) { + // The expression has come to the end. still no double quotes found + if (index == endPos) { throw new ParseException("No terminating quotes " + expS + " at column : " + index); } // This could be costly. but do we have any alternative? // If we don't do this way then we may have to handle while checking the authorizations. // Better to do it here. - byte[] array = org.apache.hbase.thirdparty.com.google.common.primitives.Bytes.toArray(list); + byte[] array = + org.apache.hbase.thirdparty.com.google.common.primitives.Bytes.toArray(list); String leafExp = Bytes.toString(array).trim(); if (leafExp.isEmpty()) { throw new ParseException("Error parsing expression " + expS + " at column : " + index); @@ -99,13 +99,13 @@ public class ExpressionParser { labelOffset = index; do { if (!VisibilityLabelsValidator.isValidAuthChar(exp[index])) { - throw new ParseException("Error parsing expression " - + expS + " at column : " + index); + throw new ParseException( + "Error parsing expression " + expS + " at column : " + index); } index++; } while (index < endPos && !isEndOfLabel(exp[index])); leafExp = - new String(exp, labelOffset, index - labelOffset, StandardCharsets.UTF_8).trim(); + new String(exp, labelOffset, index - labelOffset, StandardCharsets.UTF_8).trim(); if (leafExp.isEmpty()) { throw new ParseException("Error parsing expression " + expS + " at column : " + index); } @@ -137,14 +137,14 @@ public class ExpressionParser { } private int skipSpaces(byte[] exp, int index) { - while (index < exp.length -1 && exp[index+1] == SPACE) { + while (index < exp.length - 1 && exp[index + 1] == SPACE) { index++; } return index; } private void processCloseParan(Stack expStack, String expS, int index) - throws ParseException { + throws ParseException { if (expStack.size() < 2) { // When ) comes we expect atleast a ( node and another leaf/non leaf node // in stack. @@ -154,8 +154,9 @@ public class ExpressionParser { ExpressionNode secondTop = expStack.pop(); // The second top must be a ( node and top should not be a ). Top can be // any thing else - if (top == LeafExpressionNode.OPEN_PARAN_NODE - || secondTop != LeafExpressionNode.OPEN_PARAN_NODE) { + if ( + top == LeafExpressionNode.OPEN_PARAN_NODE || secondTop != LeafExpressionNode.OPEN_PARAN_NODE + ) { throw new ParseException("Error parsing expression " + expS + " at column : " + index); } // a&(b|) is not valid. @@ -164,8 +165,10 @@ public class ExpressionParser { // (a&) is not valid. if (top instanceof NonLeafExpressionNode) { NonLeafExpressionNode nlTop = (NonLeafExpressionNode) top; - if ((nlTop.getOperator() == Operator.NOT && nlTop.getChildExps().size() != 1) - || (nlTop.getOperator() != Operator.NOT && nlTop.getChildExps().size() != 2)) { + if ( + (nlTop.getOperator() == Operator.NOT && nlTop.getChildExps().size() != 1) + || (nlTop.getOperator() != Operator.NOT && nlTop.getChildExps().size() != 2) + ) { throw new ParseException("Error parsing expression " + expS + " at column : " + index); } } @@ -204,7 +207,7 @@ public class ExpressionParser { } private void processOpenParan(Stack expStack, String expS, int index) - throws ParseException { + throws ParseException { if (!expStack.isEmpty()) { ExpressionNode top = expStack.peek(); // Top can not be a Label Node. a(.. is not valid. but ((a.. is fine. @@ -217,8 +220,10 @@ public class ExpressionParser { // a&b( is not valid. // a&( is valid though. Also !( is valid NonLeafExpressionNode nlTop = (NonLeafExpressionNode) top; - if ((nlTop.getOperator() == Operator.NOT && nlTop.getChildExps().size() != 0) - || (nlTop.getOperator() != Operator.NOT && nlTop.getChildExps().size() != 1)) { + if ( + (nlTop.getOperator() == Operator.NOT && nlTop.getChildExps().size() != 0) + || (nlTop.getOperator() != Operator.NOT && nlTop.getChildExps().size() != 1) + ) { throw new ParseException("Error parsing expression " + expS + " at column : " + index); } } @@ -227,7 +232,7 @@ public class ExpressionParser { } private void processLabelExpNode(LeafExpressionNode node, Stack expStack, - String expS, int index) throws ParseException { + String expS, int index) throws ParseException { if (expStack.isEmpty()) { expStack.push(node); } else { @@ -254,7 +259,7 @@ public class ExpressionParser { } private void processANDorOROp(Operator op, Stack expStack, String expS, int index) - throws ParseException { + throws ParseException { if (expStack.isEmpty()) { throw new ParseException("Error parsing expression " + expS + " at column : " + index); } @@ -274,7 +279,7 @@ public class ExpressionParser { } private void processNOTOp(Stack expStack, String expS, int index) - throws ParseException { + throws ParseException { // When ! comes, the stack can be empty or top ( or top can be some exp like // a& // !!.., a!, a&b!, !a! are invalid @@ -293,21 +298,21 @@ public class ExpressionParser { private static boolean endDoubleQuotesFound(byte b) { return (b == DOUBLE_QUOTES); } + private static boolean isEndOfLabel(byte b) { - return (b == OPEN_PARAN || b == CLOSE_PARAN || b == OR || b == AND || - b == NOT || b == SPACE); + return (b == OPEN_PARAN || b == CLOSE_PARAN || b == OR || b == AND || b == NOT || b == SPACE); } private static Operator getOperator(byte op) { switch (op) { - case AND: - return Operator.AND; - case OR: - return Operator.OR; - case NOT: - return Operator.NOT; - default: - return null; + case AND: + return Operator.AND; + case OR: + return Operator.OR; + case NOT: + return Operator.NOT; + default: + return null; } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/FeedUserAuthScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/FeedUserAuthScanLabelGenerator.java index 1c77a4d008d..038b15c39bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/FeedUserAuthScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/FeedUserAuthScanLabelGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,22 +21,18 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; - +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.security.User; /** - * If the passed in authorization is null, then this ScanLabelGenerator - * feeds the set of predefined authorization labels for the given user. That is - * the set defined by the admin using the VisibilityClient admin interface - * or the set_auths shell command. - * Otherwise the passed in authorization labels are returned with no change. - * - * Note: This SLG should not be used alone because it does not check - * the passed in authorization labels against what the user is authorized for. + * If the passed in authorization is null, then this ScanLabelGenerator feeds the set of predefined + * authorization labels for the given user. That is the set defined by the admin using the + * VisibilityClient admin interface or the set_auths shell command. Otherwise the passed in + * authorization labels are returned with no change. Note: This SLG should not be used alone because + * it does not check the passed in authorization labels against what the user is authorized for. */ @InterfaceAudience.Private public class FeedUserAuthScanLabelGenerator implements ScanLabelGenerator { @@ -62,8 +58,10 @@ public class FeedUserAuthScanLabelGenerator implements ScanLabelGenerator { @Override public List getLabels(User user, Authorizations authorizations) { - if (authorizations == null || authorizations.getLabels() == null - || authorizations.getLabels().isEmpty()) { + if ( + authorizations == null || authorizations.getLabels() == null + || authorizations.getLabels().isEmpty() + ) { String userName = user.getShortName(); Set auths = new HashSet<>(); auths.addAll(this.labelsCache.getUserAuths(userName)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ParseException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ParseException.java index b6c11b80651..594e27b9f5f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ParseException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ParseException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java index fbbf8f5a08e..3f969ef64f4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,24 +18,19 @@ package org.apache.hadoop.hbase.security.visibility; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; /** - * This would be the interface which would be used add labels to the RPC context - * and this would be stored against the UGI. - * + * This would be the interface which would be used add labels to the RPC context and this would be + * stored against the UGI. */ @InterfaceAudience.Public public interface ScanLabelGenerator extends Configurable { /** - * Helps to get a list of lables associated with an UGI - * @param user - * @param authorizations - * @return The labels + * Helps to get a list of lables associated with an UGI nn * @return The labels */ public List getLabels(User user, Authorizations authorizations); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/SimpleScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/SimpleScanLabelGenerator.java index 840ee32da4e..bbd49d3e371 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/SimpleScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/SimpleScanLabelGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,10 +18,9 @@ package org.apache.hadoop.hbase.security.visibility; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; /** * This is a simple implementation for ScanLabelGenerator. It will just extract labels passed via diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index 6b39f49874b..edf7342bcae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.visibility; import static org.apache.hadoop.hbase.HConstants.OperationStatusCode.SANITY_CHECK_FAILURE; @@ -120,12 +119,11 @@ import org.apache.hbase.thirdparty.com.google.common.collect.MapMaker; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) // TODO: break out Observer functions into separate class/sub-class. public class VisibilityController implements MasterCoprocessor, RegionCoprocessor, - VisibilityLabelsService.Interface, MasterObserver, RegionObserver { - + VisibilityLabelsService.Interface, MasterObserver, RegionObserver { private static final Logger LOG = LoggerFactory.getLogger(VisibilityController.class); - private static final Logger AUDITLOG = LoggerFactory.getLogger("SecurityLogger." - + VisibilityController.class.getName()); + private static final Logger AUDITLOG = + LoggerFactory.getLogger("SecurityLogger." + VisibilityController.class.getName()); // flags if we are running on a region of the 'labels' table private boolean labelsRegion = false; // Flag denoting whether AcessController is available or not. @@ -134,13 +132,14 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso private volatile boolean initialized = false; private boolean checkAuths = false; /** Mapping of scanner instances to the user who created them */ - private Map scannerOwners = - new MapMaker().weakKeys().makeMap(); + private Map scannerOwners = new MapMaker().weakKeys().makeMap(); private VisibilityLabelService visibilityLabelService; - /** if we are active, usually false, only true if "hbase.security.authorization" - has been set to true in site configuration */ + /** + * if we are active, usually false, only true if "hbase.security.authorization" has been set to + * true in site configuration + */ boolean authorizationEnabled; // Add to this list if there are any reserved tag types @@ -172,8 +171,8 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso // Do not create for master CPs if (!(env instanceof MasterCoprocessorEnvironment)) { - visibilityLabelService = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService(this.conf); + visibilityLabelService = + VisibilityLabelServiceManager.getInstance().getVisibilityLabelService(this.conf); } } @@ -195,8 +194,8 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso @Override public Iterable getServices() { - return Collections.singleton( - VisibilityLabelsProtos.VisibilityLabelsService.newReflectiveService(this)); + return Collections + .singleton(VisibilityLabelsProtos.VisibilityLabelsService.newReflectiveService(this)); } /********************************* Master related hooks **********************************/ @@ -223,8 +222,8 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso @Override public TableDescriptor preModifyTable(ObserverContext ctx, - TableName tableName, TableDescriptor currentDescriptor, TableDescriptor newDescriptor) - throws IOException { + TableName tableName, TableDescriptor currentDescriptor, TableDescriptor newDescriptor) + throws IOException { if (authorizationEnabled) { if (LABELS_TABLE_NAME.equals(tableName)) { throw new ConstraintException("Cannot alter " + LABELS_TABLE_NAME); @@ -234,8 +233,8 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso } @Override - public void preDisableTable(ObserverContext ctx, TableName tableName) - throws IOException { + public void preDisableTable(ObserverContext ctx, + TableName tableName) throws IOException { if (!authorizationEnabled) { return; } @@ -252,13 +251,13 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso if (e.getEnvironment().getRegion().getRegionInfo().getTable().equals(LABELS_TABLE_NAME)) { this.labelsRegion = true; synchronized (this) { - this.accessControllerAvailable = CoprocessorHost.getLoadedCoprocessors() - .contains(AccessController.class.getName()); + this.accessControllerAvailable = + CoprocessorHost.getLoadedCoprocessors().contains(AccessController.class.getName()); } initVisibilityLabelService(e.getEnvironment()); } else { checkAuths = e.getEnvironment().getConfiguration() - .getBoolean(VisibilityConstants.CHECK_AUTHS_FOR_MUTATION, false); + .getBoolean(VisibilityConstants.CHECK_AUTHS_FOR_MUTATION, false); initVisibilityLabelService(e.getEnvironment()); } } @@ -275,12 +274,12 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso @Override public void postSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final MasterSwitchType switchType) throws IOException { + final boolean newValue, final MasterSwitchType switchType) throws IOException { } @Override public void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws IOException { + MiniBatchOperationInProgress miniBatchOp) throws IOException { if (c.getEnvironment().getRegion().getRegionInfo().getTable().isSystemTable()) { return; } @@ -293,7 +292,7 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso cellVisibility = m.getCellVisibility(); } catch (DeserializationException de) { miniBatchOp.setOperationStatus(i, - new OperationStatus(SANITY_CHECK_FAILURE, de.getMessage())); + new OperationStatus(SANITY_CHECK_FAILURE, de.getMessage())); continue; } boolean sanityFailure = false; @@ -327,11 +326,11 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso // Don't check user auths for labels with Mutations when the user is super user boolean authCheck = authorizationEnabled && checkAuths && !(isSystemOrSuperUser()); try { - visibilityTags = this.visibilityLabelService.createVisibilityExpTags(labelsExp, true, - authCheck); + visibilityTags = + this.visibilityLabelService.createVisibilityExpTags(labelsExp, true, authCheck); } catch (InvalidLabelException e) { miniBatchOp.setOperationStatus(i, - new OperationStatus(SANITY_CHECK_FAILURE, e.getMessage())); + new OperationStatus(SANITY_CHECK_FAILURE, e.getMessage())); } if (visibilityTags != null) { labelCache.put(labelsExp, visibilityTags); @@ -368,9 +367,8 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso } @Override - public void prePrepareTimeStampForDeleteVersion( - ObserverContext ctx, Mutation delete, Cell cell, - byte[] byteNow, Get get) throws IOException { + public void prePrepareTimeStampForDeleteVersion(ObserverContext ctx, + Mutation delete, Cell cell, byte[] byteNow, Get get) throws IOException { // Nothing to do if we are not filtering by visibility if (!authorizationEnabled) { return; @@ -388,14 +386,14 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso if (cellVisibility != null) { String labelsExp = cellVisibility.getExpression(); try { - visibilityTags = this.visibilityLabelService.createVisibilityExpTags(labelsExp, false, - false); + visibilityTags = + this.visibilityLabelService.createVisibilityExpTags(labelsExp, false, false); } catch (InvalidLabelException e) { throw new IOException("Invalid cell visibility specified " + labelsExp, e); } } get.setFilter(new DeleteVersionVisibilityExpressionFilter(visibilityTags, - VisibilityConstants.SORTED_ORDINAL_SERIALIZATION_FORMAT)); + VisibilityConstants.SORTED_ORDINAL_SERIALIZATION_FORMAT)); try (RegionScanner scanner = ctx.getEnvironment().getRegion().getScanner(new Scan(get))) { // NOTE: Please don't use HRegion.get() instead, // because it will copy cells to heap. See HBASE-26036 @@ -408,8 +406,8 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso return; } if (result.size() > get.getMaxVersions()) { - throw new RuntimeException("Unexpected size: " + result.size() + - ". Results more than the max versions obtained."); + throw new RuntimeException( + "Unexpected size: " + result.size() + ". Results more than the max versions obtained."); } Cell getCell = result.get(get.getMaxVersions() - 1); PrivateCellUtil.setTimestamp(cell, getCell.getTimestamp()); @@ -423,17 +421,16 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso } /** - * Checks whether cell contains any tag with type as VISIBILITY_TAG_TYPE. This - * tag type is reserved and should not be explicitly set by user. - * + * Checks whether cell contains any tag with type as VISIBILITY_TAG_TYPE. This tag type is + * reserved and should not be explicitly set by user. * @param cell The cell under consideration * @param pair An optional pair of type {@code } which would be reused if already - * set and new one will be created if NULL is passed + * set and new one will be created if NULL is passed * @return If the boolean is false then it indicates that the cell has a RESERVERD_VIS_TAG and - * with boolean as true, not null tag indicates that a string modified tag was found. + * with boolean as true, not null tag indicates that a string modified tag was found. */ private Pair checkForReservedVisibilityTagPresence(Cell cell, - Pair pair) throws IOException { + Pair pair) throws IOException { if (pair == null) { pair = new Pair<>(false, null); } else { @@ -483,7 +480,7 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso @Override public void preScannerOpen(ObserverContext e, Scan scan) - throws IOException { + throws IOException { if (!initialized) { throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized!"); } @@ -508,8 +505,8 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso } } - Filter visibilityLabelFilter = VisibilityUtils.createVisibilityLabelFilter(region, - authorizations); + Filter visibilityLabelFilter = + VisibilityUtils.createVisibilityLabelFilter(region, authorizations); if (visibilityLabelFilter != null) { Filter filter = scan.getFilter(); if (filter != null) { @@ -522,8 +519,8 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso @Override public DeleteTracker postInstantiateDeleteTracker( - ObserverContext ctx, DeleteTracker delTracker) - throws IOException { + ObserverContext ctx, DeleteTracker delTracker) + throws IOException { // Nothing to do if we are not filtering by visibility if (!authorizationEnabled) { return delTracker; @@ -543,7 +540,7 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso @Override public RegionScanner postScannerOpen(final ObserverContext c, - final Scan scan, final RegionScanner s) throws IOException { + final Scan scan, final RegionScanner s) throws IOException { User user = VisibilityUtils.getActiveUser(); if (user != null && user.getShortName() != null) { scannerOwners.put(s, user.getShortName()); @@ -553,21 +550,21 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso @Override public boolean preScannerNext(final ObserverContext c, - final InternalScanner s, final List result, final int limit, final boolean hasNext) - throws IOException { + final InternalScanner s, final List result, final int limit, final boolean hasNext) + throws IOException { requireScannerOwner(s); return hasNext; } @Override public void preScannerClose(final ObserverContext c, - final InternalScanner s) throws IOException { + final InternalScanner s) throws IOException { requireScannerOwner(s); } @Override public void postScannerClose(final ObserverContext c, - final InternalScanner s) throws IOException { + final InternalScanner s) throws IOException { // clean up any associated owner mapping scannerOwners.remove(s); } @@ -577,8 +574,7 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso * access control is correctly enforced based on the checks performed in preScannerOpen() */ private void requireScannerOwner(InternalScanner s) throws AccessDeniedException { - if (!RpcServer.isInRpcCallContext()) - return; + if (!RpcServer.isInRpcCallContext()) return; String requestUName = RpcServer.getRequestUserName().orElse(null); String owner = scannerOwners.get(s); if (authorizationEnabled && owner != null && !owner.equals(requestUName)) { @@ -587,8 +583,8 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso } @Override - public void preGetOp(ObserverContext e, Get get, - List results) throws IOException { + public void preGetOp(ObserverContext e, Get get, List results) + throws IOException { if (!initialized) { throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized"); } @@ -612,8 +608,8 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso return; } } - Filter visibilityLabelFilter = VisibilityUtils.createVisibilityLabelFilter(e.getEnvironment() - .getRegion(), authorizations); + Filter visibilityLabelFilter = + VisibilityUtils.createVisibilityLabelFilter(e.getEnvironment().getRegion(), authorizations); if (visibilityLabelFilter != null) { Filter filter = get.getFilter(); if (filter != null) { @@ -630,24 +626,24 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso @Override public List> postIncrementBeforeWAL( - ObserverContext ctx, Mutation mutation, - List> cellPairs) throws IOException { + ObserverContext ctx, Mutation mutation, + List> cellPairs) throws IOException { List> resultPairs = new ArrayList<>(cellPairs.size()); for (Pair pair : cellPairs) { resultPairs - .add(new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getSecond()))); + .add(new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getSecond()))); } return resultPairs; } @Override public List> postAppendBeforeWAL( - ObserverContext ctx, Mutation mutation, - List> cellPairs) throws IOException { + ObserverContext ctx, Mutation mutation, + List> cellPairs) throws IOException { List> resultPairs = new ArrayList<>(cellPairs.size()); for (Pair pair : cellPairs) { resultPairs - .add(new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getSecond()))); + .add(new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getSecond()))); } return resultPairs; } @@ -667,13 +663,15 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso // Don't check user auths for labels with Mutations when the user is super user boolean authCheck = authorizationEnabled && checkAuths && !(isSystemOrSuperUser()); tags.addAll(this.visibilityLabelService.createVisibilityExpTags(cellVisibility.getExpression(), - true, authCheck)); + true, authCheck)); // Carry forward all other tags Iterator tagsItr = PrivateCellUtil.tagsIterator(newCell); while (tagsItr.hasNext()) { Tag tag = tagsItr.next(); - if (tag.getType() != TagType.VISIBILITY_TAG_TYPE - && tag.getType() != TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE) { + if ( + tag.getType() != TagType.VISIBILITY_TAG_TYPE + && tag.getType() != TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE + ) { tags.add(tag); } } @@ -681,10 +679,12 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso return PrivateCellUtil.createCell(newCell, tags); } - /****************************** VisibilityEndpoint service related methods ******************************/ + /****************************** + * VisibilityEndpoint service related methods + ******************************/ @Override public synchronized void addLabels(RpcController controller, VisibilityLabelsRequest request, - RpcCallback done) { + RpcCallback done) { VisibilityLabelsResponse.Builder response = VisibilityLabelsResponse.newBuilder(); List visLabels = request.getVisLabelList(); if (!initialized) { @@ -715,8 +715,8 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso } if (status.getOperationStatusCode() != SUCCESS) { RegionActionResult.Builder failureResultBuilder = RegionActionResult.newBuilder(); - failureResultBuilder.setException(buildException(new DoNotRetryIOException( - status.getExceptionMsg()))); + failureResultBuilder + .setException(buildException(new DoNotRetryIOException(status.getExceptionMsg()))); response.setResult(i, failureResultBuilder.build()); } i++; @@ -735,7 +735,7 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso } private void setExceptionResults(int size, IOException e, - VisibilityLabelsResponse.Builder response) { + VisibilityLabelsResponse.Builder response) { RegionActionResult.Builder failureResultBuilder = RegionActionResult.newBuilder(); failureResultBuilder.setException(buildException(e)); RegionActionResult failureResult = failureResultBuilder.build(); @@ -746,7 +746,7 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso @Override public synchronized void setAuths(RpcController controller, SetAuthsRequest request, - RpcCallback done) { + RpcCallback done) { VisibilityLabelsResponse.Builder response = VisibilityLabelsResponse.newBuilder(); List auths = request.getAuthList(); if (!initialized) { @@ -772,8 +772,8 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso response.addResult(successResult); } else { RegionActionResult.Builder failureResultBuilder = RegionActionResult.newBuilder(); - failureResultBuilder.setException(buildException(new DoNotRetryIOException( - status.getExceptionMsg()))); + failureResultBuilder + .setException(buildException(new DoNotRetryIOException(status.getExceptionMsg()))); response.addResult(failureResultBuilder.build()); } } @@ -790,7 +790,7 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso } private void logResult(boolean isAllowed, String request, String reason, byte[] user, - List labelAuths, String regex) { + List labelAuths, String regex) { if (AUDITLOG.isTraceEnabled()) { // This is more duplicated code! List labelAuthsStr = new ArrayList<>(); @@ -809,18 +809,18 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso LOG.warn("Failed to get active system user."); LOG.debug("Details on failure to get active system user.", e); } - AUDITLOG.trace("Access " + (isAllowed ? "allowed" : "denied") + " for user " + - (requestingUser != null ? requestingUser.getShortName() : "UNKNOWN") + "; reason: " + - reason + "; remote address: " + - RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("") + "; request: " + - request + "; user: " + (user != null ? Bytes.toShort(user) : "null") + "; labels: " + - labelAuthsStr + "; regex: " + regex); + AUDITLOG.trace("Access " + (isAllowed ? "allowed" : "denied") + " for user " + + (requestingUser != null ? requestingUser.getShortName() : "UNKNOWN") + "; reason: " + + reason + "; remote address: " + + RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("") + "; request: " + + request + "; user: " + (user != null ? Bytes.toShort(user) : "null") + "; labels: " + + labelAuthsStr + "; regex: " + regex); } } @Override public synchronized void getAuths(RpcController controller, GetAuthsRequest request, - RpcCallback done) { + RpcCallback done) { GetAuthsResponse.Builder response = GetAuthsResponse.newBuilder(); if (!initialized) { controller.setFailed("VisibilityController not yet initialized"); @@ -832,15 +832,14 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso // AccessController CP methods. if (authorizationEnabled && accessControllerAvailable && !isSystemOrSuperUser()) { User requestingUser = VisibilityUtils.getActiveUser(); - throw new AccessDeniedException("User '" - + (requestingUser != null ? requestingUser.getShortName() : "null") + throw new AccessDeniedException( + "User '" + (requestingUser != null ? requestingUser.getShortName() : "null") + "' is not authorized to perform this action."); } if (AuthUtil.isGroupPrincipal(Bytes.toString(user))) { String group = AuthUtil.getGroupName(Bytes.toString(user)); - labels = this.visibilityLabelService.getGroupAuths(new String[]{group}, false); - } - else { + labels = this.visibilityLabelService.getGroupAuths(new String[] { group }, false); + } else { labels = this.visibilityLabelService.getUserAuths(user, false); } logResult(true, "getAuths", "Get authorizations for user allowed", user, null, null); @@ -862,12 +861,12 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso @Override public synchronized void clearAuths(RpcController controller, SetAuthsRequest request, - RpcCallback done) { + RpcCallback done) { VisibilityLabelsResponse.Builder response = VisibilityLabelsResponse.newBuilder(); List auths = request.getAuthList(); if (!initialized) { - setExceptionResults(auths.size(), new CoprocessorException( - "VisibilityController not yet initialized"), response); + setExceptionResults(auths.size(), + new CoprocessorException("VisibilityController not yet initialized"), response); } else { byte[] requestUser = request.getUser().toByteArray(); List labelAuths = new ArrayList<>(auths.size()); @@ -876,7 +875,7 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso if (authorizationEnabled && accessControllerAvailable && !isSystemOrSuperUser()) { User user = VisibilityUtils.getActiveUser(); throw new AccessDeniedException("User '" + (user != null ? user.getShortName() : "null") - + " is not authorized to perform this action."); + + " is not authorized to perform this action."); } if (authorizationEnabled) { checkCallingUserAuth(); // When AC is not in place the calling user should have @@ -887,7 +886,7 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso } OperationStatus[] opStatus = - this.visibilityLabelService.clearAuths(requestUser, labelAuths); + this.visibilityLabelService.clearAuths(requestUser, labelAuths); logResult(true, "clearAuths", "Removing authorization for labels allowed", requestUser, labelAuths, null); RegionActionResult successResult = RegionActionResult.newBuilder().build(); @@ -896,8 +895,8 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso response.addResult(successResult); } else { RegionActionResult.Builder failureResultBuilder = RegionActionResult.newBuilder(); - failureResultBuilder.setException(buildException(new DoNotRetryIOException( - status.getExceptionMsg()))); + failureResultBuilder + .setException(buildException(new DoNotRetryIOException(status.getExceptionMsg()))); response.addResult(failureResultBuilder.build()); } } @@ -915,7 +914,7 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso @Override public synchronized void listLabels(RpcController controller, ListLabelsRequest request, - RpcCallback done) { + RpcCallback done) { ListLabelsResponse.Builder response = ListLabelsResponse.newBuilder(); if (!initialized) { controller.setFailed("VisibilityController not yet initialized"); @@ -927,8 +926,8 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso // AccessController CP methods. if (authorizationEnabled && accessControllerAvailable && !isSystemOrSuperUser()) { User requestingUser = VisibilityUtils.getActiveUser(); - throw new AccessDeniedException("User '" - + (requestingUser != null ? requestingUser.getShortName() : "null") + throw new AccessDeniedException( + "User '" + (requestingUser != null ? requestingUser.getShortName() : "null") + "' is not authorized to perform this action."); } labels = this.visibilityLabelService.listLabels(regex); @@ -958,8 +957,8 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso throw new IOException("Unable to retrieve calling user"); } if (!(this.visibilityLabelService.havingSystemAuth(user))) { - throw new AccessDeniedException("User '" + user.getShortName() - + "' is not authorized to perform this action."); + throw new AccessDeniedException( + "User '" + user.getShortName() + "' is not authorized to perform this action."); } } } @@ -969,7 +968,7 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso private Byte deleteCellVisTagsFormat; public DeleteVersionVisibilityExpressionFilter(List deleteCellVisTags, - Byte deleteCellVisTagsFormat) { + Byte deleteCellVisTagsFormat) { this.deleteCellVisTags = deleteCellVisTags; this.deleteCellVisTagsFormat = deleteCellVisTagsFormat; } @@ -988,10 +987,9 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso // Early out if there are no tags in the cell return ReturnCode.INCLUDE; } - boolean matchFound = VisibilityLabelServiceManager - .getInstance().getVisibilityLabelService() - .matchVisibility(putVisTags, putCellVisTagsFormat, deleteCellVisTags, - deleteCellVisTagsFormat); + boolean matchFound = + VisibilityLabelServiceManager.getInstance().getVisibilityLabelService().matchVisibility( + putVisTags, putCellVisTagsFormat, deleteCellVisTags, deleteCellVisTagsFormat); return matchFound ? ReturnCode.INCLUDE : ReturnCode.SKIP; } @@ -1000,12 +998,12 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso if (!(obj instanceof DeleteVersionVisibilityExpressionFilter)) { return false; } - if (this == obj){ + if (this == obj) { return true; } - DeleteVersionVisibilityExpressionFilter f = (DeleteVersionVisibilityExpressionFilter)obj; - return this.deleteCellVisTags.equals(f.deleteCellVisTags) && - this.deleteCellVisTagsFormat.equals(f.deleteCellVisTagsFormat); + DeleteVersionVisibilityExpressionFilter f = (DeleteVersionVisibilityExpressionFilter) obj; + return this.deleteCellVisTags.equals(f.deleteCellVisTags) + && this.deleteCellVisTagsFormat.equals(f.deleteCellVisTagsFormat); } @Override @@ -1015,15 +1013,13 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso } /** - * @param t - * @return NameValuePair of the exception name to stringified version os exception. + * n * @return NameValuePair of the exception name to stringified version os exception. */ // Copied from ResponseConverter and made private. Only used in here. private static NameBytesPair buildException(final Throwable t) { NameBytesPair.Builder parameterBuilder = NameBytesPair.newBuilder(); parameterBuilder.setName(t.getClass().getName()); - parameterBuilder.setValue( - ByteString.copyFromUtf8(StringUtils.stringifyException(t))); + parameterBuilder.setValue(ByteString.copyFromUtf8(StringUtils.stringifyException(t))); return parameterBuilder.build(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityExpEvaluator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityExpEvaluator.java index 64058b71505..8f67afd3395 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityExpEvaluator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityExpEvaluator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,9 +18,8 @@ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.yetus.audience.InterfaceAudience; /** * During the read (ie. get/Scan) the VisibilityController calls this interface for each of the diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.java index 4c3f1414b86..e5b06eb23c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,13 +20,12 @@ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; import java.util.Map; import java.util.Objects; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.util.ByteRange; import org.apache.hadoop.hbase.util.SimpleMutableByteRange; +import org.apache.yetus.audience.InterfaceAudience; /** * This Filter checks the visibility expression with each KV against visibility labels associated @@ -43,7 +42,7 @@ class VisibilityLabelFilter extends FilterBase { private int curQualMetVersions; public VisibilityLabelFilter(VisibilityExpEvaluator expEvaluator, - Map cfVsMaxVersions) { + Map cfVsMaxVersions) { this.expEvaluator = expEvaluator; this.cfVsMaxVersions = cfVsMaxVersions; this.curFamily = new SimpleMutableByteRange(); @@ -58,9 +57,10 @@ class VisibilityLabelFilter extends FilterBase { @Override public ReturnCode filterCell(final Cell cell) throws IOException { - if (curFamily.getBytes() == null - || !(PrivateCellUtil.matchingFamily(cell, curFamily.getBytes(), curFamily.getOffset(), - curFamily.getLength()))) { + if ( + curFamily.getBytes() == null || !(PrivateCellUtil.matchingFamily(cell, curFamily.getBytes(), + curFamily.getOffset(), curFamily.getLength())) + ) { curFamily.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); // For this family, all the columns can have max of curFamilyMaxVersions versions. No need to // consider the older versions for visibility label check. @@ -69,10 +69,12 @@ class VisibilityLabelFilter extends FilterBase { // Family is changed. Just unset curQualifier. curQualifier.unset(); } - if (curQualifier.getBytes() == null || !(PrivateCellUtil.matchingQualifier(cell, - curQualifier.getBytes(), curQualifier.getOffset(), curQualifier.getLength()))) { + if ( + curQualifier.getBytes() == null || !(PrivateCellUtil.matchingQualifier(cell, + curQualifier.getBytes(), curQualifier.getOffset(), curQualifier.getLength())) + ) { curQualifier.set(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()); + cell.getQualifierLength()); curQualMetVersions = 0; } curQualMetVersions++; @@ -96,12 +98,12 @@ class VisibilityLabelFilter extends FilterBase { if (!(obj instanceof VisibilityLabelFilter)) { return false; } - if(this == obj){ + if (this == obj) { return true; } - VisibilityLabelFilter f = (VisibilityLabelFilter)obj; - return this.expEvaluator.equals(f.expEvaluator) && - this.cfVsMaxVersions.equals(f.cfVsMaxVersions); + VisibilityLabelFilter f = (VisibilityLabelFilter) obj; + return this.expEvaluator.equals(f.expEvaluator) + && this.cfVsMaxVersions.equals(f.cfVsMaxVersions); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelOrdinalProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelOrdinalProvider.java index b1e4d8909c6..9e58cff23bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelOrdinalProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelOrdinalProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java index 55ba344670f..a55ab2aae22 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,14 +19,13 @@ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; import java.util.List; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.regionserver.OperationStatus; import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; /** * The interface which deals with visibility labels and user auths admin service as well as the cell @@ -37,101 +36,82 @@ public interface VisibilityLabelService extends Configurable { /** * System calls this after opening of regions. Gives a chance for the VisibilityLabelService to so - * any initialization logic. - * @param e - * the region coprocessor env + * any initialization logic. n * the region coprocessor env */ void init(RegionCoprocessorEnvironment e) throws IOException; /** - * Adds the set of labels into the system. - * @param labels - * Labels to add to the system. + * Adds the set of labels into the system. n * Labels to add to the system. * @return OperationStatus for each of the label addition */ OperationStatus[] addLabels(List labels) throws IOException; /** - * Sets given labels globally authorized for the user. - * @param user - * The authorizing user - * @param authLabels - * Labels which are getting authorized for the user + * Sets given labels globally authorized for the user. n * The authorizing user n * Labels which + * are getting authorized for the user * @return OperationStatus for each of the label auth addition */ OperationStatus[] setAuths(byte[] user, List authLabels) throws IOException; /** - * Removes given labels from user's globally authorized list of labels. - * @param user - * The user whose authorization to be removed - * @param authLabels - * Labels which are getting removed from authorization set + * Removes given labels from user's globally authorized list of labels. n * The user whose + * authorization to be removed n * Labels which are getting removed from authorization set * @return OperationStatus for each of the label auth removal */ OperationStatus[] clearAuths(byte[] user, List authLabels) throws IOException; /** - * Retrieve the visibility labels for the user. - * @param user - * Name of the user whose authorization to be retrieved - * @param systemCall - * Whether a system or user originated call. + * Retrieve the visibility labels for the user. n * Name of the user whose authorization to be + * retrieved n * Whether a system or user originated call. * @return Visibility labels authorized for the given user. */ List getUserAuths(byte[] user, boolean systemCall) throws IOException; /** - * Retrieve the visibility labels for the groups. - * @param groups - * Name of the groups whose authorization to be retrieved - * @param systemCall - * Whether a system or user originated call. + * Retrieve the visibility labels for the groups. n * Name of the groups whose authorization to be + * retrieved n * Whether a system or user originated call. * @return Visibility labels authorized for the given group. */ List getGroupAuths(String[] groups, boolean systemCall) throws IOException; /** * Retrieve the list of visibility labels defined in the system. - * @param regex The regular expression to filter which labels are returned. + * @param regex The regular expression to filter which labels are returned. * @return List of visibility labels */ List listLabels(String regex) throws IOException; /** - * Creates tags corresponding to given visibility expression. - *
      - * Note: This will be concurrently called from multiple threads and implementation should - * take care of thread safety. - * @param visExpression The Expression for which corresponding Tags to be created. - * @param withSerializationFormat specifies whether a tag, denoting the serialization version - * of the tags, to be added in the list. When this is true make sure to add the - * serialization format Tag also. The format tag value should be byte type. - * @param checkAuths denotes whether to check individual labels in visExpression against user's - * global auth label. + * Creates tags corresponding to given visibility expression.
      + * Note: This will be concurrently called from multiple threads and implementation should take + * care of thread safety. + * @param visExpression The Expression for which corresponding Tags to be created. + * @param withSerializationFormat specifies whether a tag, denoting the serialization version of + * the tags, to be added in the list. When this is true make sure + * to add the serialization format Tag also. The format tag value + * should be byte type. + * @param checkAuths denotes whether to check individual labels in visExpression + * against user's global auth label. * @return The list of tags corresponds to the visibility expression. These tags will be stored * along with the Cells. */ List createVisibilityExpTags(String visExpression, boolean withSerializationFormat, - boolean checkAuths) throws IOException; + boolean checkAuths) throws IOException; /** * Creates VisibilityExpEvaluator corresponding to given Authorizations.
      * Note: This will be concurrently called from multiple threads and implementation should take - * care of thread safety. - * @param authorizations - * Authorizations for the read request + * care of thread safety. n * Authorizations for the read request * @return The VisibilityExpEvaluator corresponding to the given set of authorization labels. */ VisibilityExpEvaluator getVisibilityExpEvaluator(Authorizations authorizations) - throws IOException; + throws IOException; /** * System checks for user auth during admin operations. (ie. Label add, set/clear auth). The * operation is allowed only for users having system auth. Also during read, if the requesting - * user has system auth, he can view all the data irrespective of its labels. - * @param user - * User for whom system auth check to be done. + * user has system auth, he can view all the data irrespective of its labels. n * User for whom + * system auth check to be done. * @return true if the given user is having system/super auth */ boolean havingSystemAuth(User user) throws IOException; @@ -141,41 +121,28 @@ public interface VisibilityLabelService extends Configurable { * in Delete mutation and the cell in consideration. Also system passes the serialization format * of visibility tags in Put and Delete.
      * Note: This will be concurrently called from multiple threads and implementation should take - * care of thread safety. - * @param putVisTags - * The visibility tags present in the Put mutation - * @param putVisTagFormat - * The serialization format for the Put visibility tags. A null value for - * this format means the tags are written with unsorted label ordinals - * @param deleteVisTags - * - The visibility tags in the delete mutation (the specified Cell Visibility) - * @param deleteVisTagFormat - * The serialization format for the Delete visibility tags. A null value for - * this format means the tags are written with unsorted label ordinals + * care of thread safety. n * The visibility tags present in the Put mutation n * The + * serialization format for the Put visibility tags. A null value for this format + * means the tags are written with unsorted label ordinals n * - The visibility tags in the delete + * mutation (the specified Cell Visibility) n * The serialization format for the Delete visibility + * tags. A null value for this format means the tags are written with unsorted label + * ordinals * @return true if matching tags are found * @see VisibilityConstants#SORTED_ORDINAL_SERIALIZATION_FORMAT */ boolean matchVisibility(List putVisTags, Byte putVisTagFormat, List deleteVisTags, - Byte deleteVisTagFormat) throws IOException; + Byte deleteVisTagFormat) throws IOException; /** - * Provides a way to modify the visibility tags of type {@link TagType} - * .VISIBILITY_TAG_TYPE, that are part of the cell created from the WALEdits - * that are prepared for replication while calling - * {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} - * .replicate(). - * {@link org.apache.hadoop.hbase.security.visibility.VisibilityReplicationEndpoint} - * calls this API to provide an opportunity to modify the visibility tags - * before replicating. - * - * @param visTags - * the visibility tags associated with the cell - * @param serializationFormat - * the serialization format associated with the tag - * @return the modified visibility expression in the form of byte[] - * @throws IOException + * Provides a way to modify the visibility tags of type {@link TagType} .VISIBILITY_TAG_TYPE, that + * are part of the cell created from the WALEdits that are prepared for replication while calling + * {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} .replicate(). + * {@link org.apache.hadoop.hbase.security.visibility.VisibilityReplicationEndpoint} calls this + * API to provide an opportunity to modify the visibility tags before replicating. n * the + * visibility tags associated with the cell n * the serialization format associated with the tag + * @return the modified visibility expression in the form of byte[] n */ - byte[] encodeVisibilityForReplication(final List visTags, - final Byte serializationFormat) throws IOException; + byte[] encodeVisibilityForReplication(final List visTags, final Byte serializationFormat) + throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelServiceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelServiceManager.java index 74531b92ce7..ec009116a6b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelServiceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelServiceManager.java @@ -18,12 +18,11 @@ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; - +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.util.ReflectionUtils; /** * Manages singleton instance of {@link VisibilityLabelService} @@ -34,7 +33,7 @@ public class VisibilityLabelServiceManager { private static final Logger LOG = LoggerFactory.getLogger(VisibilityLabelServiceManager.class); public static final String VISIBILITY_LABEL_SERVICE_CLASS = - "hbase.regionserver.visibility.label.service.class"; + "hbase.regionserver.visibility.label.service.class"; private static final VisibilityLabelServiceManager INSTANCE = new VisibilityLabelServiceManager(); private volatile VisibilityLabelService visibilityLabelService = null; @@ -49,14 +48,14 @@ public class VisibilityLabelServiceManager { } /** - * @param conf - * @return singleton instance of {@link VisibilityLabelService}. The FQCN of the implementation - * class can be specified using "hbase.regionserver.visibility.label.service.class". + * n * @return singleton instance of {@link VisibilityLabelService}. The FQCN of the + * implementation class can be specified using + * "hbase.regionserver.visibility.label.service.class". * @throws IOException When VLS implementation, as specified in conf, can not be loaded. */ public VisibilityLabelService getVisibilityLabelService(Configuration conf) throws IOException { String vlsClassName = conf.get(VISIBILITY_LABEL_SERVICE_CLASS, - DefaultVisibilityLabelServiceImpl.class.getCanonicalName()).trim(); + DefaultVisibilityLabelServiceImpl.class.getCanonicalName()).trim(); if (this.visibilityLabelService != null) { checkForClusterLevelSingleConf(vlsClassName); return this.visibilityLabelService; @@ -68,8 +67,8 @@ public class VisibilityLabelServiceManager { } this.vlsClazzName = vlsClassName; try { - this.visibilityLabelService = (VisibilityLabelService) ReflectionUtils.newInstance( - Class.forName(vlsClassName), conf); + this.visibilityLabelService = + (VisibilityLabelService) ReflectionUtils.newInstance(Class.forName(vlsClassName), conf); } catch (ClassNotFoundException e) { throw new IOException(e); } @@ -81,8 +80,8 @@ public class VisibilityLabelServiceManager { assert this.vlsClazzName != null; if (!this.vlsClazzName.equals(vlsClassName)) { LOG.warn("Trying to use table specific value for config " - + "'hbase.regionserver.visibility.label.service.class' which is not supported." - + " Will use the cluster level VisibilityLabelService class " + this.vlsClazzName); + + "'hbase.regionserver.visibility.label.service.class' which is not supported." + + " Will use the cluster level VisibilityLabelService class " + this.vlsClazzName); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java index 438b6169478..cb9a148e05a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java @@ -26,16 +26,15 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.locks.ReentrantReadWriteLock; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.AuthUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.MultiUserAuthorizations; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.UserAuthorizations; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabel; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -75,14 +74,11 @@ public class VisibilityLabelsCache implements VisibilityLabelOrdinalProvider { } /** - * Creates the singleton instance, if not yet present, and returns the same. - * @param watcher - * @param conf - * @return Singleton instance of VisibilityLabelsCache - * @throws IOException + * Creates the singleton instance, if not yet present, and returns the same. nn * @return + * Singleton instance of VisibilityLabelsCache n */ public synchronized static VisibilityLabelsCache createAndGet(ZKWatcher watcher, - Configuration conf) throws IOException { + Configuration conf) throws IOException { // VisibilityLabelService#init() for different regions (in same RS) passes same instance of // watcher as all get the instance from RS. // watcher != instance.zkVisibilityWatcher.getWatcher() - This check is needed only in UTs with @@ -96,10 +92,8 @@ public class VisibilityLabelsCache implements VisibilityLabelOrdinalProvider { } /** - * @return Singleton instance of VisibilityLabelsCache - * @throws IllegalStateException - * when this is called before calling - * {@link #createAndGet(ZKWatcher, Configuration)} + * @return Singleton instance of VisibilityLabelsCache n * when this is called before calling + * {@link #createAndGet(ZKWatcher, Configuration)} */ public static VisibilityLabelsCache get() { // By the time this method is called, the singleton instance of VisibilityLabelsCache should @@ -239,7 +233,6 @@ public class VisibilityLabelsCache implements VisibilityLabelOrdinalProvider { /** * Returns the list of ordinals of labels associated with the user - * * @param user Not null value. * @return the list of ordinals */ @@ -254,10 +247,8 @@ public class VisibilityLabelsCache implements VisibilityLabelOrdinalProvider { } /** - * Returns the list of ordinals of labels associated with the groups - * - * @param groups - * @return the list of ordinals + * Returns the list of ordinals of labels associated with the groups n * @return the list of + * ordinals */ public Set getGroupAuthsAsOrdinals(String[] groups) { this.lock.readLock().lock(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java index b25b7e21c01..026a99796c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,17 +38,17 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Similar to MvccSensitiveTracker but tracks the visibility expression also before - * deciding if a Cell can be considered deleted + * Similar to MvccSensitiveTracker but tracks the visibility expression also before deciding if a + * Cell can be considered deleted */ @InterfaceAudience.Private public class VisibilityNewVersionBehaivorTracker extends NewVersionBehaviorTracker { private static final Logger LOG = - LoggerFactory.getLogger(VisibilityNewVersionBehaivorTracker.class); + LoggerFactory.getLogger(VisibilityNewVersionBehaivorTracker.class); public VisibilityNewVersionBehaivorTracker(NavigableSet columns, - CellComparator cellComparator, int minVersion, int maxVersion, int resultMaxVersions, - long oldestUnexpiredTS) { + CellComparator cellComparator, int minVersion, int maxVersion, int resultMaxVersions, + long oldestUnexpiredTS) { super(columns, cellComparator, minVersion, maxVersion, resultMaxVersions, oldestUnexpiredTS); } @@ -122,37 +121,35 @@ public class VisibilityNewVersionBehaivorTracker extends NewVersionBehaviorTrack prepare(cell); byte type = cell.getTypeByte(); switch (KeyValue.Type.codeToType(type)) { - // By the order of seen. We put null cq at first. - case DeleteFamily: // Delete all versions of all columns of the specified family - delFamMap.put(cell.getSequenceId(), - new VisibilityDeleteVersionsNode(cell.getTimestamp(), cell.getSequenceId(), - new TagInfo(cell))); - break; - case DeleteFamilyVersion: // Delete all columns of the specified family and specified version - delFamMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); - break; + // By the order of seen. We put null cq at first. + case DeleteFamily: // Delete all versions of all columns of the specified family + delFamMap.put(cell.getSequenceId(), new VisibilityDeleteVersionsNode(cell.getTimestamp(), + cell.getSequenceId(), new TagInfo(cell))); + break; + case DeleteFamilyVersion: // Delete all columns of the specified family and specified version + delFamMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); + break; - // These two kinds of markers are mix with Puts. - case DeleteColumn: // Delete all versions of the specified column - delColMap.put(cell.getSequenceId(), - new VisibilityDeleteVersionsNode(cell.getTimestamp(), cell.getSequenceId(), - new TagInfo(cell))); - break; - case Delete: // Delete the specified version of the specified column. - delColMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); - break; - default: - throw new AssertionError("Unknown delete marker type for " + cell); + // These two kinds of markers are mix with Puts. + case DeleteColumn: // Delete all versions of the specified column + delColMap.put(cell.getSequenceId(), new VisibilityDeleteVersionsNode(cell.getTimestamp(), + cell.getSequenceId(), new TagInfo(cell))); + break; + case Delete: // Delete the specified version of the specified column. + delColMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); + break; + default: + throw new AssertionError("Unknown delete marker type for " + cell); } } private boolean tagMatched(Cell put, TagInfo delInfo) throws IOException { List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(put, putVisTags); - return putVisTags.isEmpty() == delInfo.tags.isEmpty() && ( - (putVisTags.isEmpty() && delInfo.tags.isEmpty()) || VisibilityLabelServiceManager - .getInstance().getVisibilityLabelService() - .matchVisibility(putVisTags, putCellVisTagsFormat, delInfo.tags, delInfo.format)); + return putVisTags.isEmpty() == delInfo.tags.isEmpty() + && ((putVisTags.isEmpty() && delInfo.tags.isEmpty()) + || VisibilityLabelServiceManager.getInstance().getVisibilityLabelService() + .matchVisibility(putVisTags, putCellVisTagsFormat, delInfo.tags, delInfo.format)); } @Override @@ -161,7 +158,7 @@ public class VisibilityNewVersionBehaivorTracker extends NewVersionBehaviorTrack long duplicateMvcc = prepare(cell); for (Map.Entry e : delColMap.tailMap(cell.getSequenceId()) - .entrySet()) { + .entrySet()) { VisibilityDeleteVersionsNode node = (VisibilityDeleteVersionsNode) e.getValue(); long deleteMvcc = Long.MAX_VALUE; SortedMap deleteVersionMvccs = node.deletesMap.get(cell.getTimestamp()); @@ -174,8 +171,8 @@ public class VisibilityNewVersionBehaivorTracker extends NewVersionBehaviorTrack } } } - SortedMap> subMap = node.mvccCountingMap - .subMap(cell.getSequenceId(), true, Math.min(duplicateMvcc, deleteMvcc), true); + SortedMap> subMap = node.mvccCountingMap.subMap(cell.getSequenceId(), + true, Math.min(duplicateMvcc, deleteMvcc), true); for (Map.Entry> seg : subMap.entrySet()) { if (seg.getValue().size() >= maxVersions) { return DeleteResult.VERSION_MASKED; @@ -202,6 +199,6 @@ public class VisibilityNewVersionBehaivorTracker extends NewVersionBehaviorTrack @Override protected void resetInternal() { delFamMap.put(Long.MAX_VALUE, - new VisibilityDeleteVersionsNode(Long.MIN_VALUE, Long.MAX_VALUE, new TagInfo())); + new VisibilityDeleteVersionsNode(Long.MIN_VALUE, Long.MAX_VALUE, new TagInfo())); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java index e39d6016463..c5a3acaac11 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; @@ -31,10 +29,9 @@ import org.apache.hadoop.hbase.replication.ReplicationEndpoint; import org.apache.yetus.audience.InterfaceAudience; /** - * A RegionServerObserver impl that provides the custom - * VisibilityReplicationEndpoint. This class should be configured as the - * 'hbase.coprocessor.regionserver.classes' for the visibility tags to be - * replicated as string. The value for the configuration should be + * A RegionServerObserver impl that provides the custom VisibilityReplicationEndpoint. This class + * should be configured as the 'hbase.coprocessor.regionserver.classes' for the visibility tags to + * be replicated as string. The value for the configuration should be * 'org.apache.hadoop.hbase.security.visibility.VisibilityController$VisibilityReplication'. */ @InterfaceAudience.Private @@ -45,21 +42,22 @@ public class VisibilityReplication implements RegionServerCoprocessor, RegionSer @Override public void start(CoprocessorEnvironment env) throws IOException { this.conf = env.getConfiguration(); - visibilityLabelService = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService(this.conf); + visibilityLabelService = + VisibilityLabelServiceManager.getInstance().getVisibilityLabelService(this.conf); } @Override public void stop(CoprocessorEnvironment env) throws IOException { } - @Override public Optional getRegionServerObserver() { + @Override + public Optional getRegionServerObserver() { return Optional.of(this); } @Override public ReplicationEndpoint postCreateReplicationEndPoint( - ObserverContext ctx, ReplicationEndpoint endpoint) { + ObserverContext ctx, ReplicationEndpoint endpoint) { return new VisibilityReplicationEndpoint(endpoint, visibilityLabelService); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java index cd495ce442a..5cffb51500a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; - import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -47,7 +46,7 @@ public class VisibilityReplicationEndpoint implements ReplicationEndpoint { private final VisibilityLabelService visibilityLabelsService; public VisibilityReplicationEndpoint(ReplicationEndpoint endpoint, - VisibilityLabelService visibilityLabelsService) { + VisibilityLabelService visibilityLabelsService) { this.delegator = endpoint; this.visibilityLabelsService = visibilityLabelsService; } @@ -58,7 +57,7 @@ public class VisibilityReplicationEndpoint implements ReplicationEndpoint { } @Override - public void peerConfigUpdated(ReplicationPeerConfig rpc){ + public void peerConfigUpdated(ReplicationPeerConfig rpc) { delegator.peerConfigUpdated(rpc); } @@ -80,22 +79,22 @@ public class VisibilityReplicationEndpoint implements ReplicationEndpoint { if (cell.getTagsLength() > 0) { visTags.clear(); nonVisTags.clear(); - Byte serializationFormat = VisibilityUtils.extractAndPartitionTags(cell, visTags, - nonVisTags); + Byte serializationFormat = + VisibilityUtils.extractAndPartitionTags(cell, visTags, nonVisTags); if (!visTags.isEmpty()) { try { byte[] modifiedVisExpression = visibilityLabelsService - .encodeVisibilityForReplication(visTags, serializationFormat); + .encodeVisibilityForReplication(visTags, serializationFormat); if (modifiedVisExpression != null) { nonVisTags - .add(new ArrayBackedTag(TagType.STRING_VIS_TAG_TYPE, modifiedVisExpression)); + .add(new ArrayBackedTag(TagType.STRING_VIS_TAG_TYPE, modifiedVisExpression)); } } catch (Exception ioe) { LOG.error( - "Exception while reading the visibility labels from the cell. The replication " - + "would happen as per the existing format and not as " + - "string type for the cell " - + cell + ".", ioe); + "Exception while reading the visibility labels from the cell. The replication " + + "would happen as per the existing format and not as " + + "string type for the cell " + cell + ".", + ioe); // just return the old entries as it is without applying the string type change newEdit.add(cell); continue; @@ -140,7 +139,9 @@ public class VisibilityReplicationEndpoint implements ReplicationEndpoint { } @Override - public boolean isStarting() {return this.delegator.isStarting();} + public boolean isStarting() { + return this.delegator.isStarting(); + } @Override public void start() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java index 6b9ac7449a4..59623ece135 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,24 +21,23 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.querymatcher.ScanDeleteTracker; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.Tag; +import org.apache.hadoop.hbase.regionserver.querymatcher.ScanDeleteTracker; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Triple; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Similar to ScanDeletTracker but tracks the visibility expression also before - * deciding if a Cell can be considered deleted + * Similar to ScanDeletTracker but tracks the visibility expression also before deciding if a Cell + * can be considered deleted */ @InterfaceAudience.Private public class VisibilityScanDeleteTracker extends ScanDeleteTracker { @@ -50,10 +48,10 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { * This tag is used for the DELETE cell which has no visibility label. */ private static final List EMPTY_TAG = Collections.EMPTY_LIST; - // Its better to track the visibility tags in delete based on each type. Create individual - // data structures for tracking each of them. This would ensure that there is no tracking based + // Its better to track the visibility tags in delete based on each type. Create individual + // data structures for tracking each of them. This would ensure that there is no tracking based // on time and also would handle all cases where deletefamily or deletecolumns is specified with - // Latest_timestamp. In such cases the ts in the delete marker and the masking + // Latest_timestamp. In such cases the ts in the delete marker and the masking // put will not be same. So going with individual data structures for different delete // type would solve this problem and also ensure that the combination of different type // of deletes with diff ts would also work fine @@ -73,7 +71,7 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { @Override public void add(Cell delCell) { - //Cannot call super.add because need to find if the delete needs to be considered + // Cannot call super.add because need to find if the delete needs to be considered long timestamp = delCell.getTimestamp(); byte type = delCell.getTypeByte(); if (type == KeyValue.Type.DeleteFamily.getCode()) { @@ -124,23 +122,27 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { } deleteCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(delCell, delTags); if (!delTags.isEmpty()) { - visibilityTagsDeleteFamily.add(new Triple<>(delTags, deleteCellVisTagsFormat, delCell.getTimestamp())); + visibilityTagsDeleteFamily + .add(new Triple<>(delTags, deleteCellVisTagsFormat, delCell.getTimestamp())); hasVisTag = true; } else { - visibilityTagsDeleteFamily.add(new Triple<>(EMPTY_TAG, deleteCellVisTagsFormat, delCell.getTimestamp())); + visibilityTagsDeleteFamily + .add(new Triple<>(EMPTY_TAG, deleteCellVisTagsFormat, delCell.getTimestamp())); } break; case DeleteFamilyVersion: - if(visibilityTagsDeleteFamilyVersion == null) { + if (visibilityTagsDeleteFamilyVersion == null) { visibilityTagsDeleteFamilyVersion = new ArrayList<>(); } delTags = new ArrayList<>(); deleteCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(delCell, delTags); if (!delTags.isEmpty()) { - visibilityTagsDeleteFamilyVersion.add(new Triple<>(delTags, deleteCellVisTagsFormat, delCell.getTimestamp())); + visibilityTagsDeleteFamilyVersion + .add(new Triple<>(delTags, deleteCellVisTagsFormat, delCell.getTimestamp())); hasVisTag = true; } else { - visibilityTagsDeleteFamilyVersion.add(new Triple<>(EMPTY_TAG, deleteCellVisTagsFormat, delCell.getTimestamp())); + visibilityTagsDeleteFamilyVersion + .add(new Triple<>(EMPTY_TAG, deleteCellVisTagsFormat, delCell.getTimestamp())); } break; case DeleteColumn: @@ -189,8 +191,8 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(cell, putVisTags); boolean matchFound = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, - triple.getFirst(), triple.getSecond()); + .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, + triple.getFirst(), triple.getSecond()); if (matchFound) { // A return type of FAMILY_DELETED will cause skip for all remaining cells from // this @@ -225,8 +227,8 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(cell, putVisTags); boolean matchFound = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, - triple.getFirst(), triple.getSecond()); + .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, + triple.getFirst(), triple.getSecond()); if (matchFound) { return DeleteResult.FAMILY_VERSION_DELETED; } @@ -254,10 +256,10 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { for (Pair, Byte> tags : visibilityTagsDeleteColumns) { List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = - VisibilityUtils.extractVisibilityTags(cell, putVisTags); + VisibilityUtils.extractVisibilityTags(cell, putVisTags); boolean matchFound = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, - tags.getFirst(), tags.getSecond()); + .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, + tags.getFirst(), tags.getSecond()); if (matchFound) { return DeleteResult.VERSION_DELETED; } @@ -283,10 +285,10 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { for (Pair, Byte> tags : visiblityTagsDeleteColumnVersion) { List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = - VisibilityUtils.extractVisibilityTags(cell, putVisTags); + VisibilityUtils.extractVisibilityTags(cell, putVisTags); boolean matchFound = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, - tags.getFirst(), tags.getSecond()); + .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, + tags.getFirst(), tags.getSecond()); if (matchFound) { return DeleteResult.VERSION_DELETED; } @@ -312,12 +314,12 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { visiblityTagsDeleteColumnVersion = null; } else { throw new IllegalStateException("isDeleted failed: deleteBuffer=" - + Bytes.toStringBinary(deleteCell.getQualifierArray(), - deleteCell.getQualifierOffset(), deleteCell.getQualifierLength()) - + ", qualifier=" - + Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()) - + ", timestamp=" + timestamp + ", comparison result: " + ret); + + Bytes.toStringBinary(deleteCell.getQualifierArray(), deleteCell.getQualifierOffset(), + deleteCell.getQualifierLength()) + + ", qualifier=" + + Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()) + + ", timestamp=" + timestamp + ", comparison result: " + ret); } } } catch (IOException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java index c177c2b09e7..05e9f0f03ec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.security.visibility; import static org.apache.hadoop.hbase.TagType.VISIBILITY_TAG_TYPE; import com.google.protobuf.ByteString; - import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.IOException; @@ -33,7 +32,6 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Optional; import java.util.Set; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; @@ -75,10 +73,10 @@ public class VisibilityUtils { private static final Logger LOG = LoggerFactory.getLogger(VisibilityUtils.class); public static final String VISIBILITY_LABEL_GENERATOR_CLASS = - "hbase.regionserver.scan.visibility.label.generator.class"; + "hbase.regionserver.scan.visibility.label.generator.class"; public static final String SYSTEM_LABEL = "system"; - public static final Tag SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG = new ArrayBackedTag( - TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE, + public static final Tag SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG = + new ArrayBackedTag(TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE, VisibilityConstants.SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG_VAL); private static final String COMMA = ","; @@ -86,9 +84,8 @@ public class VisibilityUtils { private static final ExpressionExpander EXP_EXPANDER = new ExpressionExpander(); /** - * Creates the labels data to be written to zookeeper. - * @param existingLabels - * @return Bytes form of labels and their ordinal details to be written to zookeeper. + * Creates the labels data to be written to zookeeper. n * @return Bytes form of labels and their + * ordinal details to be written to zookeeper. */ public static byte[] getDataToWriteToZooKeeper(Map existingLabels) { VisibilityLabelsRequest.Builder visReqBuilder = VisibilityLabelsRequest.newBuilder(); @@ -102,9 +99,8 @@ public class VisibilityUtils { } /** - * Creates the user auth data to be written to zookeeper. - * @param userAuths - * @return Bytes form of user auths details to be written to zookeeper. + * Creates the user auth data to be written to zookeeper. n * @return Bytes form of user auths + * details to be written to zookeeper. */ public static byte[] getUserAuthsDataToWriteToZooKeeper(Map> userAuths) { MultiUserAuthorizations.Builder builder = MultiUserAuthorizations.newBuilder(); @@ -121,14 +117,11 @@ public class VisibilityUtils { /** * Reads back from the zookeeper. The data read here is of the form written by - * writeToZooKeeper(Map<byte[], Integer> entries). - * - * @param data - * @return Labels and their ordinal details - * @throws DeserializationException + * writeToZooKeeper(Map<byte[], Integer> entries). n * @return Labels and their ordinal + * details n */ public static List readLabelsFromZKData(byte[] data) - throws DeserializationException { + throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(data)) { int pblen = ProtobufUtil.lengthOfPBMagic(); try { @@ -143,13 +136,10 @@ public class VisibilityUtils { } /** - * Reads back User auth data written to zookeeper. - * @param data - * @return User auth details - * @throws DeserializationException + * Reads back User auth data written to zookeeper. n * @return User auth details n */ - public static MultiUserAuthorizations readUserAuthsFromZKData(byte[] data) - throws DeserializationException { + public static MultiUserAuthorizations readUserAuthsFromZKData(byte[] data) + throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(data)) { int pblen = ProtobufUtil.lengthOfPBMagic(); try { @@ -167,9 +157,8 @@ public class VisibilityUtils { * @param conf The configuration to use * @return Stack of ScanLabelGenerator instances. ScanLabelGenerator classes can be specified in * Configuration as comma separated list using key - * "hbase.regionserver.scan.visibility.label.generator.class" - * @throws IllegalArgumentException - * when any of the specified ScanLabelGenerator class can not be loaded. + * "hbase.regionserver.scan.visibility.label.generator.class" n * when any of the + * specified ScanLabelGenerator class can not be loaded. */ public static List getScanLabelGenerators(Configuration conf) { // There can be n SLG specified as comma separated in conf @@ -194,9 +183,9 @@ public class VisibilityUtils { // 2. DefinedSetFilterScanLabelGenerator // This stacking will achieve the following default behavior: // 1. If there is no Auths in the scan, we will obtain the global defined set for the user - // from the labels table. + // from the labels table. // 2. If there is Auths in the scan, we will examine the passed in Auths and filter out the - // labels that the user is not entitled to. Then use the resulting label set. + // labels that the user is not entitled to. Then use the resulting label set. if (slgs.isEmpty()) { slgs.add(ReflectionUtils.newInstance(FeedUserAuthScanLabelGenerator.class, conf)); slgs.add(ReflectionUtils.newInstance(DefinedSetFilterScanLabelGenerator.class, conf)); @@ -226,18 +215,14 @@ public class VisibilityUtils { /** * Extracts and partitions the visibility tags and nonVisibility Tags - * - * @param cell - the cell for which we would extract and partition the - * visibility and non visibility tags - * @param visTags - * - all the visibilty tags of type TagType.VISIBILITY_TAG_TYPE would - * be added to this list + * @param cell - the cell for which we would extract and partition the visibility and non + * visibility tags n * - all the visibilty tags of type + * TagType.VISIBILITY_TAG_TYPE would be added to this list * @param nonVisTags - all the non visibility tags would be added to this list - * @return - the serailization format of the tag. Can be null if no tags are found or - * if there is no visibility tag found + * @return - the serailization format of the tag. Can be null if no tags are found or if there is + * no visibility tag found */ - public static Byte extractAndPartitionTags(Cell cell, List visTags, - List nonVisTags) { + public static Byte extractAndPartitionTags(Cell cell, List visTags, List nonVisTags) { Byte serializationFormat = null; Iterator tagsIterator = PrivateCellUtil.tagsIterator(cell); while (tagsIterator.hasNext()) { @@ -266,15 +251,15 @@ public class VisibilityUtils { } public static Filter createVisibilityLabelFilter(Region region, Authorizations authorizations) - throws IOException { + throws IOException { Map cfVsMaxVersions = new HashMap<>(); for (ColumnFamilyDescriptor hcd : region.getTableDescriptor().getColumnFamilies()) { cfVsMaxVersions.put(new SimpleMutableByteRange(hcd.getName()), hcd.getMaxVersions()); } - VisibilityLabelService vls = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService(); - Filter visibilityLabelFilter = new VisibilityLabelFilter( - vls.getVisibilityExpEvaluator(authorizations), cfVsMaxVersions); + VisibilityLabelService vls = + VisibilityLabelServiceManager.getInstance().getVisibilityLabelService(); + Filter visibilityLabelFilter = + new VisibilityLabelFilter(vls.getVisibilityExpEvaluator(authorizations), cfVsMaxVersions); return visibilityLabelFilter; } @@ -297,8 +282,8 @@ public class VisibilityUtils { } public static List createVisibilityExpTags(String visExpression, - boolean withSerializationFormat, boolean checkAuths, Set auths, - VisibilityLabelOrdinalProvider ordinalProvider) throws IOException { + boolean withSerializationFormat, boolean checkAuths, Set auths, + VisibilityLabelOrdinalProvider ordinalProvider) throws IOException { ExpressionNode node = null; try { node = EXP_PARSER.parse(visExpression); @@ -341,8 +326,8 @@ public class VisibilityUtils { } private static void getLabelOrdinals(ExpressionNode node, List labelOrdinals, - Set auths, boolean checkAuths, VisibilityLabelOrdinalProvider ordinalProvider) - throws IOException, InvalidLabelException { + Set auths, boolean checkAuths, VisibilityLabelOrdinalProvider ordinalProvider) + throws IOException, InvalidLabelException { if (node.isSingleNode()) { String identifier = null; int labelOrdinal = 0; @@ -355,8 +340,8 @@ public class VisibilityUtils { checkAuths(auths, labelOrdinal, identifier, checkAuths); } else { // This is a NOT node. - LeafExpressionNode lNode = (LeafExpressionNode) ((NonLeafExpressionNode) node) - .getChildExps().get(0); + LeafExpressionNode lNode = + (LeafExpressionNode) ((NonLeafExpressionNode) node).getChildExps().get(0); identifier = lNode.getIdentifier(); labelOrdinal = ordinalProvider.getLabelOrdinal(identifier); checkAuths(auths, labelOrdinal, identifier, checkAuths); @@ -376,16 +361,11 @@ public class VisibilityUtils { /** * This will sort the passed labels in ascending oder and then will write one after the other to - * the passed stream. - * @param labelOrdinals - * Unsorted label ordinals - * @param dos - * Stream where to write the labels. - * @throws IOException - * When IOE during writes to Stream. + * the passed stream. n * Unsorted label ordinals n * Stream where to write the labels. n * When + * IOE during writes to Stream. */ private static void writeLabelOrdinalsToStream(List labelOrdinals, DataOutputStream dos) - throws IOException { + throws IOException { Collections.sort(labelOrdinals); for (Integer labelOrdinal : labelOrdinals) { StreamUtils.writeRawVInt32(dos, labelOrdinal); @@ -393,11 +373,11 @@ public class VisibilityUtils { } private static void checkAuths(Set auths, int labelOrdinal, String identifier, - boolean checkAuths) throws IOException { + boolean checkAuths) throws IOException { if (checkAuths) { if (auths == null || (!auths.contains(labelOrdinal))) { throw new AccessDeniedException("Visibility label " + identifier - + " not authorized for the user " + VisibilityUtils.getActiveUser().getShortName()); + + " not authorized for the user " + VisibilityUtils.getActiveUser().getShortName()); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java index bcb3b8ba4fb..3150dc448f9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,13 +18,12 @@ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; - -import org.apache.hadoop.hbase.zookeeper.ZKListener; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,7 +39,7 @@ public class ZKVisibilityLabelWatcher extends ZKListener { private static final String VISIBILITY_LABEL_ZK_PATH = "zookeeper.znode.visibility.label.parent"; private static final String DEFAULT_VISIBILITY_LABEL_NODE = "visibility/labels"; private static final String VISIBILITY_USER_AUTHS_ZK_PATH = - "zookeeper.znode.visibility.user.auths.parent"; + "zookeeper.znode.visibility.user.auths.parent"; private static final String DEFAULT_VISIBILITY_USER_AUTHS_NODE = "visibility/user_auths"; private VisibilityLabelsCache labelsCache; @@ -48,15 +47,15 @@ public class ZKVisibilityLabelWatcher extends ZKListener { private String userAuthsZnode; public ZKVisibilityLabelWatcher(ZKWatcher watcher, VisibilityLabelsCache labelsCache, - Configuration conf) { + Configuration conf) { super(watcher); this.labelsCache = labelsCache; String labelZnodeParent = conf.get(VISIBILITY_LABEL_ZK_PATH, DEFAULT_VISIBILITY_LABEL_NODE); - String userAuthsZnodeParent = conf.get(VISIBILITY_USER_AUTHS_ZK_PATH, - DEFAULT_VISIBILITY_USER_AUTHS_NODE); + String userAuthsZnodeParent = + conf.get(VISIBILITY_USER_AUTHS_ZK_PATH, DEFAULT_VISIBILITY_USER_AUTHS_NODE); this.labelZnode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, labelZnodeParent); - this.userAuthsZnode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, - userAuthsZnodeParent); + this.userAuthsZnode = + ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, userAuthsZnodeParent); } public void start() throws KeeperException { @@ -132,10 +131,8 @@ public class ZKVisibilityLabelWatcher extends ZKListener { } /** - * Write a labels mirror or user auths mirror into zookeeper - * - * @param data - * @param labelsOrUserAuths true for writing labels and false for user auths. + * Write a labels mirror or user auths mirror into zookeeper n * @param labelsOrUserAuths true for + * writing labels and false for user auths. */ public void writeToZookeeper(byte[] data, boolean labelsOrUserAuths) { String znode = this.labelZnode; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/ExpressionNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/ExpressionNode.java index fcc66a8b5ea..4a3cbd358b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/ExpressionNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/ExpressionNode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java index fd479b40594..4151ecff450 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java index 83610fadc8f..94bb99faa03 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.security.visibility.expression; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java index f7ffe464cb3..3492ab9d1c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,9 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public enum Operator { - AND('&'), OR('|'), NOT('!'); + AND('&'), + OR('|'), + NOT('!'); private final char rep; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/server/trace/IpcServerSpanBuilder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/server/trace/IpcServerSpanBuilder.java index 342aa87feda..3c73c3d5d04 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/server/trace/IpcServerSpanBuilder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/server/trace/IpcServerSpanBuilder.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.server.trace; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RPC_METHOD; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RPC_SERVICE; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RPC_SYSTEM; + import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.SpanBuilder; @@ -34,12 +34,14 @@ import org.apache.hadoop.hbase.ipc.RpcCall; import org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RpcSystem; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; /** * Construct {@link Span} instances originating from the server side of an IPC. - * - * @see Semantic conventions for RPC spans + * @see Semantic + * conventions for RPC spans */ @InterfaceAudience.Private public class IpcServerSpanBuilder implements Supplier { @@ -48,13 +50,11 @@ public class IpcServerSpanBuilder implements Supplier { private final Map, Object> attributes = new HashMap<>(); public IpcServerSpanBuilder(final RpcCall rpcCall) { - final String packageAndService = Optional.ofNullable(rpcCall.getService()) - .map(BlockingService::getDescriptorForType) - .map(IpcClientSpanBuilder::getRpcPackageAndService) - .orElse(""); - final String method = Optional.ofNullable(rpcCall.getMethod()) - .map(IpcClientSpanBuilder::getRpcName) - .orElse(""); + final String packageAndService = + Optional.ofNullable(rpcCall.getService()).map(BlockingService::getDescriptorForType) + .map(IpcClientSpanBuilder::getRpcPackageAndService).orElse(""); + final String method = + Optional.ofNullable(rpcCall.getMethod()).map(IpcClientSpanBuilder::getRpcName).orElse(""); setName(IpcClientSpanBuilder.buildSpanName(packageAndService, method)); addAttribute(RPC_SYSTEM, RpcSystem.HBASE_RPC.name()); addAttribute(RPC_SERVICE, packageAndService); @@ -78,9 +78,8 @@ public class IpcServerSpanBuilder implements Supplier { @SuppressWarnings("unchecked") public Span build() { - final SpanBuilder builder = TraceUtil.getGlobalTracer() - .spanBuilder(name) - .setSpanKind(SpanKind.SERVER); + final SpanBuilder builder = + TraceUtil.getGlobalTracer().spanBuilder(name).setSpanKind(SpanKind.SERVER); attributes.forEach((k, v) -> builder.setAttribute((AttributeKey) k, v)); return builder.startSpan(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java index f8e54c9c459..be86d6fc8a9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ package org.apache.hadoop.hbase.snapshot; import java.util.Arrays; import java.util.Locale; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; @@ -37,43 +35,42 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; */ @InterfaceAudience.Private public class CreateSnapshot extends AbstractHBaseTool { - private SnapshotType snapshotType = SnapshotType.FLUSH; - private TableName tableName = null; - private String snapshotName = null; + private SnapshotType snapshotType = SnapshotType.FLUSH; + private TableName tableName = null; + private String snapshotName = null; - public static void main(String[] args) { - new CreateSnapshot().doStaticMain(args); - } + public static void main(String[] args) { + new CreateSnapshot().doStaticMain(args); + } - @Override - protected void addOptions() { - this.addRequiredOptWithArg("t", "table", "The name of the table"); - this.addRequiredOptWithArg("n", "name", "The name of the created snapshot"); - this.addOptWithArg("s", "snapshot_type", - "Snapshot Type. FLUSH is default. Posible values are " - + Arrays.toString(SnapshotType.values())); - } + @Override + protected void addOptions() { + this.addRequiredOptWithArg("t", "table", "The name of the table"); + this.addRequiredOptWithArg("n", "name", "The name of the created snapshot"); + this.addOptWithArg("s", "snapshot_type", "Snapshot Type. FLUSH is default. Posible values are " + + Arrays.toString(SnapshotType.values())); + } - @Override - protected void processOptions(CommandLine cmd) { - this.tableName = TableName.valueOf(cmd.getOptionValue('t')); - this.snapshotName = cmd.getOptionValue('n'); - String snapshotTypeName = cmd.getOptionValue('s'); - if (snapshotTypeName != null) { - snapshotTypeName = snapshotTypeName.toUpperCase(Locale.ROOT); - this.snapshotType = SnapshotType.valueOf(snapshotTypeName); - } + @Override + protected void processOptions(CommandLine cmd) { + this.tableName = TableName.valueOf(cmd.getOptionValue('t')); + this.snapshotName = cmd.getOptionValue('n'); + String snapshotTypeName = cmd.getOptionValue('s'); + if (snapshotTypeName != null) { + snapshotTypeName = snapshotTypeName.toUpperCase(Locale.ROOT); + this.snapshotType = SnapshotType.valueOf(snapshotTypeName); } + } - @Override - protected int doWork() throws Exception { - try (Connection connection = ConnectionFactory.createConnection(getConf()); - Admin admin = connection.getAdmin()) { - admin.snapshot(new SnapshotDescription(snapshotName, tableName, snapshotType)); - } catch (Exception e) { - System.err.println("failed to take the snapshot: " + e.getMessage()); - return -1; - } - return 0; + @Override + protected int doWork() throws Exception { + try (Connection connection = ConnectionFactory.createConnection(getConf()); + Admin admin = connection.getAdmin()) { + admin.snapshot(new SnapshotDescription(snapshotName, tableName, snapshotType)); + } catch (Exception e) { + System.err.println("failed to take the snapshot: " + e.getMessage()); + return -1; } + return 0; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java index e4928e8fa77..73408a7edb7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.IOException; @@ -71,50 +70,52 @@ import org.apache.hadoop.io.IOUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; /** * Helper to Restore/Clone a Snapshot - * - *

      The helper assumes that a table is already created, and by calling restore() - * the content present in the snapshot will be restored as the new content of the table. - * - *

      Clone from Snapshot: If the target table is empty, the restore operation - * is just a "clone operation", where the only operations are: + *

      + * The helper assumes that a table is already created, and by calling restore() the content present + * in the snapshot will be restored as the new content of the table. + *

      + * Clone from Snapshot: If the target table is empty, the restore operation is just a "clone + * operation", where the only operations are: *

        - *
      • for each region in the snapshot create a new region - * (note that the region will have a different name, since the encoding contains the table name) - *
      • for each file in the region create a new HFileLink to point to the original file. - *
      • restore the logs, if any + *
      • for each region in the snapshot create a new region (note that the region will have a + * different name, since the encoding contains the table name) + *
      • for each file in the region create a new HFileLink to point to the original file. + *
      • restore the logs, if any *
      - * - *

      Restore from Snapshot: + *

      + * Restore from Snapshot: *

        - *
      • for each region in the table verify which are available in the snapshot and which are not - *
          - *
        • if the region is not present in the snapshot, remove it. - *
        • if the region is present in the snapshot - *
            - *
          • for each file in the table region verify which are available in the snapshot - *
              - *
            • if the hfile is not present in the snapshot, remove it - *
            • if the hfile is present, keep it (nothing to do) - *
            - *
          • for each file in the snapshot region but not in the table - *
              - *
            • create a new HFileLink that point to the original file - *
            - *
          - *
        - *
      • for each region in the snapshot not present in the current table state - *
          - *
        • create a new region and for each file in the region create a new HFileLink - * (This is the same as the clone operation) - *
        - *
      • restore the logs, if any + *
      • for each region in the table verify which are available in the snapshot and which are not + *
          + *
        • if the region is not present in the snapshot, remove it. + *
        • if the region is present in the snapshot + *
            + *
          • for each file in the table region verify which are available in the snapshot + *
              + *
            • if the hfile is not present in the snapshot, remove it + *
            • if the hfile is present, keep it (nothing to do) + *
            + *
          • for each file in the snapshot region but not in the table + *
              + *
            • create a new HFileLink that point to the original file + *
            + *
          + *
        + *
      • for each region in the snapshot not present in the current table state + *
          + *
        • create a new region and for each file in the region create a new HFileLink (This is the same + * as the clone operation) + *
        + *
      • restore the logs, if any *
      */ @InterfaceAudience.Private @@ -123,7 +124,7 @@ public class RestoreSnapshotHelper { private final Map regionsMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - private final Map > parentsMap = new HashMap<>(); + private final Map> parentsMap = new HashMap<>(); private final ForeignExceptionDispatcher monitor; private final MonitoredTask status; @@ -140,13 +141,9 @@ public class RestoreSnapshotHelper { private final FileSystem fs; private final boolean createBackRefs; - public RestoreSnapshotHelper(final Configuration conf, - final FileSystem fs, - final SnapshotManifest manifest, - final TableDescriptor tableDescriptor, - final Path rootDir, - final ForeignExceptionDispatcher monitor, - final MonitoredTask status) { + public RestoreSnapshotHelper(final Configuration conf, final FileSystem fs, + final SnapshotManifest manifest, final TableDescriptor tableDescriptor, final Path rootDir, + final ForeignExceptionDispatcher monitor, final MonitoredTask status) { this(conf, fs, manifest, tableDescriptor, rootDir, monitor, status, true); } @@ -224,13 +221,13 @@ public class RestoreSnapshotHelper { // NOTE: we rely upon the region name as: "table name, start key, end key" if (tableRegions != null) { monitor.rethrowException(); - for (RegionInfo regionInfo: tableRegions) { + for (RegionInfo regionInfo : tableRegions) { String regionName = regionInfo.getEncodedName(); if (regionNames.contains(regionName)) { LOG.info("region to restore: " + regionName); regionNames.remove(regionName); - metaChanges.addRegionToRestore(ProtobufUtil.toRegionInfo(regionManifests.get(regionName) - .getRegionInfo())); + metaChanges.addRegionToRestore( + ProtobufUtil.toRegionInfo(regionManifests.get(regionName).getRegionInfo())); } else { LOG.info("region to remove: " + regionName); metaChanges.addRegionToRemove(regionInfo); @@ -242,10 +239,10 @@ public class RestoreSnapshotHelper { List regionsToAdd = new ArrayList<>(regionNames.size()); if (regionNames.size() > 0) { monitor.rethrowException(); - for (String regionName: regionNames) { + for (String regionName : regionNames) { LOG.info("region to add: " + regionName); - regionsToAdd.add(ProtobufUtil.toRegionInfo(regionManifests.get(regionName) - .getRegionInfo())); + regionsToAdd + .add(ProtobufUtil.toRegionInfo(regionManifests.get(regionName).getRegionInfo())); } } @@ -279,14 +276,14 @@ public class RestoreSnapshotHelper { * Describe the set of operations needed to update hbase:meta after restore. */ public static class RestoreMetaChanges { - private final Map > parentsMap; + private final Map> parentsMap; private final TableDescriptor htd; private List regionsToRestore = null; private List regionsToRemove = null; private List regionsToAdd = null; - public RestoreMetaChanges(TableDescriptor htd, Map > parentsMap) { + public RestoreMetaChanges(TableDescriptor htd, Map> parentsMap) { this.parentsMap = parentsMap; this.htd = htd; } @@ -311,9 +308,8 @@ public class RestoreSnapshotHelper { } /** - * Returns the list of new regions added during the on-disk restore. - * The caller is responsible to add the regions to META. - * e.g MetaTableAccessor.addRegionsToMeta(...) + * Returns the list of new regions added during the on-disk restore. The caller is responsible + * to add the regions to META. e.g MetaTableAccessor.addRegionsToMeta(...) * @return the list of regions to add to META */ public List getRegionsToAdd() { @@ -328,8 +324,8 @@ public class RestoreSnapshotHelper { } /** - * Returns the list of 'restored regions' during the on-disk restore. - * The caller is responsible to add the regions to hbase:meta if not present. + * Returns the list of 'restored regions' during the on-disk restore. The caller is responsible + * to add the regions to hbase:meta if not present. * @return the list of regions restored */ public List getRegionsToRestore() { @@ -344,9 +340,8 @@ public class RestoreSnapshotHelper { } /** - * Returns the list of regions removed during the on-disk restore. - * The caller is responsible to remove the regions from META. - * e.g. MetaTableAccessor.deleteRegions(...) + * Returns the list of regions removed during the on-disk restore. The caller is responsible to + * remove the regions from META. e.g. MetaTableAccessor.deleteRegions(...) * @return the list of regions to remove from META */ public List getRegionsToRemove() { @@ -375,14 +370,14 @@ public class RestoreSnapshotHelper { regionsToRestore.add(hri); } - public void updateMetaParentRegions(Connection connection, - final List regionInfos) throws IOException { + public void updateMetaParentRegions(Connection connection, final List regionInfos) + throws IOException { if (regionInfos == null || parentsMap.isEmpty()) return; // Extract region names and offlined regions Map regionsByName = new HashMap<>(regionInfos.size()); List parentRegions = new LinkedList<>(); - for (RegionInfo regionInfo: regionInfos) { + for (RegionInfo regionInfo : regionInfos) { if (regionInfo.isSplitParent()) { parentRegions.add(regionInfo); } else { @@ -391,7 +386,7 @@ public class RestoreSnapshotHelper { } // Update Offline parents - for (RegionInfo regionInfo: parentRegions) { + for (RegionInfo regionInfo : parentRegions) { Pair daughters = parentsMap.get(regionInfo.getEncodedName()); if (daughters == null) { // The snapshot contains an unreferenced region. @@ -407,8 +402,7 @@ public class RestoreSnapshotHelper { LOG.debug("Update splits parent " + regionInfo.getEncodedName() + " -> " + daughters); MetaTableAccessor.addSplitsToParent(connection, regionInfo, - regionsByName.get(daughters.getFirst()), - regionsByName.get(daughters.getSecond())); + regionsByName.get(daughters.getFirst()), regionsByName.get(daughters.getSecond())); } } } @@ -417,7 +411,7 @@ public class RestoreSnapshotHelper { * Remove specified regions from the file-system, using the archiver. */ private void removeHdfsRegions(final ThreadPoolExecutor exec, final List regions) - throws IOException { + throws IOException { if (regions == null || regions.isEmpty()) return; ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() { @Override @@ -431,8 +425,8 @@ public class RestoreSnapshotHelper { * Restore specified regions by restoring content to the snapshot state. */ private void restoreHdfsRegions(final ThreadPoolExecutor exec, - final Map regionManifests, - final List regions) throws IOException { + final Map regionManifests, final List regions) + throws IOException { if (regions == null || regions.isEmpty()) return; ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() { @Override @@ -446,8 +440,8 @@ public class RestoreSnapshotHelper { * Restore specified mob regions by restoring content to the snapshot state. */ private void restoreHdfsMobRegions(final ThreadPoolExecutor exec, - final Map regionManifests, - final List regions) throws IOException { + final Map regionManifests, final List regions) + throws IOException { if (regions == null || regions.isEmpty()) return; ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() { @Override @@ -457,11 +451,11 @@ public class RestoreSnapshotHelper { }); } - private Map> getRegionHFileReferences( - final SnapshotRegionManifest manifest) { + private Map> + getRegionHFileReferences(final SnapshotRegionManifest manifest) { Map> familyMap = new HashMap<>(manifest.getFamilyFilesCount()); - for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) { + for (SnapshotRegionManifest.FamilyFiles familyFiles : manifest.getFamilyFilesList()) { familyMap.put(familyFiles.getFamilyName().toStringUtf8(), new ArrayList<>(familyFiles.getStoreFilesList())); } @@ -469,20 +463,20 @@ public class RestoreSnapshotHelper { } /** - * Restore region by removing files not in the snapshot - * and adding the missing ones from the snapshot. + * Restore region by removing files not in the snapshot and adding the missing ones from the + * snapshot. */ private void restoreRegion(final RegionInfo regionInfo, - final SnapshotRegionManifest regionManifest) throws IOException { + final SnapshotRegionManifest regionManifest) throws IOException { restoreRegion(regionInfo, regionManifest, new Path(tableDir, regionInfo.getEncodedName())); } /** - * Restore mob region by removing files not in the snapshot - * and adding the missing ones from the snapshot. + * Restore mob region by removing files not in the snapshot and adding the missing ones from the + * snapshot. */ private void restoreMobRegion(final RegionInfo regionInfo, - final SnapshotRegionManifest regionManifest) throws IOException { + final SnapshotRegionManifest regionManifest) throws IOException { if (regionManifest == null) { return; } @@ -491,39 +485,39 @@ public class RestoreSnapshotHelper { } /** - * Restore region by removing files not in the snapshot - * and adding the missing ones from the snapshot. + * Restore region by removing files not in the snapshot and adding the missing ones from the + * snapshot. */ private void restoreRegion(final RegionInfo regionInfo, - final SnapshotRegionManifest regionManifest, Path regionDir) throws IOException { + final SnapshotRegionManifest regionManifest, Path regionDir) throws IOException { Map> snapshotFiles = - getRegionHFileReferences(regionManifest); + getRegionHFileReferences(regionManifest); String tableName = tableDesc.getTableName().getNameAsString(); final String snapshotName = snapshotDesc.getName(); Path regionPath = new Path(tableDir, regionInfo.getEncodedName()); - HRegionFileSystem regionFS = (fs.exists(regionPath)) ? - HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, regionInfo, false) : - HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, regionInfo); + HRegionFileSystem regionFS = (fs.exists(regionPath)) + ? HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, regionInfo, false) + : HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, regionInfo); // Restore families present in the table - for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) { + for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) { byte[] family = Bytes.toBytes(familyDir.getName()); Set familyFiles = getTableRegionFamilyFiles(familyDir); List snapshotFamilyFiles = - snapshotFiles.remove(familyDir.getName()); + snapshotFiles.remove(familyDir.getName()); List filesToTrack = new ArrayList<>(); if (snapshotFamilyFiles != null) { List hfilesToAdd = new ArrayList<>(); - for (SnapshotRegionManifest.StoreFile storeFile: snapshotFamilyFiles) { + for (SnapshotRegionManifest.StoreFile storeFile : snapshotFamilyFiles) { if (familyFiles.contains(storeFile.getName())) { // HFile already present familyFiles.remove(storeFile.getName()); - //no need to restore already present files, but we need to add those to tracker - filesToTrack.add(new StoreFileInfo(conf, fs, - new Path(familyDir, storeFile.getName()), true)); + // no need to restore already present files, but we need to add those to tracker + filesToTrack + .add(new StoreFileInfo(conf, fs, new Path(familyDir, storeFile.getName()), true)); } else { // HFile missing hfilesToAdd.add(storeFile); @@ -531,57 +525,55 @@ public class RestoreSnapshotHelper { } // Remove hfiles not present in the snapshot - for (String hfileName: familyFiles) { + for (String hfileName : familyFiles) { Path hfile = new Path(familyDir, hfileName); if (!fs.getFileStatus(hfile).isDirectory()) { - LOG.trace("Removing HFile=" + hfileName + " not present in snapshot=" + - snapshotName + " from region=" + regionInfo.getEncodedName() + " table=" + tableName); + LOG.trace("Removing HFile=" + hfileName + " not present in snapshot=" + snapshotName + + " from region=" + regionInfo.getEncodedName() + " table=" + tableName); HFileArchiver.archiveStoreFile(conf, fs, regionInfo, tableDir, family, hfile); } } // Restore Missing files - for (SnapshotRegionManifest.StoreFile storeFile: hfilesToAdd) { - LOG.debug("Restoring missing HFileLink " + storeFile.getName() + - " of snapshot=" + snapshotName+ - " to region=" + regionInfo.getEncodedName() + " table=" + tableName); + for (SnapshotRegionManifest.StoreFile storeFile : hfilesToAdd) { + LOG.debug("Restoring missing HFileLink " + storeFile.getName() + " of snapshot=" + + snapshotName + " to region=" + regionInfo.getEncodedName() + " table=" + tableName); String fileName = restoreStoreFile(familyDir, regionInfo, storeFile, createBackRefs); // mark the reference file to be added to tracker - filesToTrack.add(new StoreFileInfo(conf, fs, - new Path(familyDir, fileName), true)); + filesToTrack.add(new StoreFileInfo(conf, fs, new Path(familyDir, fileName), true)); } } else { // Family doesn't exists in the snapshot - LOG.trace("Removing family=" + Bytes.toString(family) + " in snapshot=" + snapshotName + - " from region=" + regionInfo.getEncodedName() + " table=" + tableName); + LOG.trace("Removing family=" + Bytes.toString(family) + " in snapshot=" + snapshotName + + " from region=" + regionInfo.getEncodedName() + " table=" + tableName); HFileArchiver.archiveFamilyByFamilyDir(fs, conf, regionInfo, familyDir, family); fs.delete(familyDir, true); } - StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, true, - StoreContext.getBuilder().withFamilyStoreDirectoryPath(familyDir). - withRegionFileSystem(regionFS).build()); + StoreFileTracker tracker = + StoreFileTrackerFactory.create(conf, true, StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build()); - //simply reset list of tracked files with the matching files - //and the extra one present in the snapshot + // simply reset list of tracked files with the matching files + // and the extra one present in the snapshot tracker.set(filesToTrack); } // Add families not present in the table - for (Map.Entry> familyEntry: - snapshotFiles.entrySet()) { + for (Map.Entry> familyEntry : snapshotFiles + .entrySet()) { Path familyDir = new Path(regionDir, familyEntry.getKey()); - StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, true, - StoreContext.getBuilder().withFamilyStoreDirectoryPath(familyDir). - withRegionFileSystem(regionFS).build()); + StoreFileTracker tracker = + StoreFileTrackerFactory.create(conf, true, StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build()); List files = new ArrayList<>(); if (!fs.mkdirs(familyDir)) { throw new IOException("Unable to create familyDir=" + familyDir); } - for (SnapshotRegionManifest.StoreFile storeFile: familyEntry.getValue()) { + for (SnapshotRegionManifest.StoreFile storeFile : familyEntry.getValue()) { LOG.trace("Adding HFileLink (Not present in the table) " + storeFile.getName() - + " of snapshot " + snapshotName + " to table=" + tableName); + + " of snapshot " + snapshotName + " to table=" + tableName); String fileName = restoreStoreFile(familyDir, regionInfo, storeFile, createBackRefs); files.add(new StoreFileInfo(conf, fs, new Path(familyDir, fileName), true)); } @@ -608,12 +600,12 @@ public class RestoreSnapshotHelper { } /** - * Clone specified regions. For each region create a new region - * and create a HFileLink for each hfile. + * Clone specified regions. For each region create a new region and create a HFileLink for each + * hfile. */ private RegionInfo[] cloneHdfsRegions(final ThreadPoolExecutor exec, - final Map regionManifests, - final List regions) throws IOException { + final Map regionManifests, final List regions) + throws IOException { if (regions == null || regions.isEmpty()) return null; final Map snapshotRegions = new HashMap<>(regions.size()); @@ -630,16 +622,16 @@ public class RestoreSnapshotHelper { String snapshotRegionName = snapshotRegionInfo.getEncodedName(); String clonedRegionName = clonedRegionsInfo[i].getEncodedName(); regionsMap.put(Bytes.toBytes(snapshotRegionName), Bytes.toBytes(clonedRegionName)); - LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName + - " in snapshot " + snapshotName); + LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName + " in snapshot " + + snapshotName); // Add mapping between cloned region name and snapshot region info snapshotRegions.put(clonedRegionName, snapshotRegionInfo); } // create the regions on disk - ModifyRegionUtils.createRegions(exec, conf, rootDir, - tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() { + ModifyRegionUtils.createRegions(exec, conf, rootDir, tableDesc, clonedRegionsInfo, + new ModifyRegionUtils.RegionFillTask() { @Override public void fillRegion(final HRegion region) throws IOException { RegionInfo snapshotHri = snapshotRegions.get(region.getRegionInfo().getEncodedName()); @@ -651,42 +643,35 @@ public class RestoreSnapshotHelper { } /** - * Clone the mob region. For the region create a new region - * and create a HFileLink for each hfile. + * Clone the mob region. For the region create a new region and create a HFileLink for each hfile. */ private void cloneHdfsMobRegion(final Map regionManifests, - final RegionInfo region) throws IOException { + final RegionInfo region) throws IOException { // clone region info (change embedded tableName with the new one) Path clonedRegionPath = MobUtils.getMobRegionPath(rootDir, tableDesc.getTableName()); - cloneRegion(MobUtils.getMobRegionInfo(tableDesc.getTableName()), - clonedRegionPath, region, regionManifests.get(region.getEncodedName())); + cloneRegion(MobUtils.getMobRegionInfo(tableDesc.getTableName()), clonedRegionPath, region, + regionManifests.get(region.getEncodedName())); } /** - * Clone region directory content from the snapshot info. - * - * Each region is encoded with the table name, so the cloned region will have - * a different region name. - * - * Instead of copying the hfiles a HFileLink is created. - * - * @param regionDir {@link Path} cloned dir - * @param snapshotRegionInfo + * Clone region directory content from the snapshot info. Each region is encoded with the table + * name, so the cloned region will have a different region name. Instead of copying the hfiles a + * HFileLink is created. + * @param regionDir {@link Path} cloned dir n */ private void cloneRegion(final RegionInfo newRegionInfo, final Path regionDir, - final RegionInfo snapshotRegionInfo, final SnapshotRegionManifest manifest) - throws IOException { + final RegionInfo snapshotRegionInfo, final SnapshotRegionManifest manifest) throws IOException { final String tableName = tableDesc.getTableName().getNameAsString(); final String snapshotName = snapshotDesc.getName(); - for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) { + for (SnapshotRegionManifest.FamilyFiles familyFiles : manifest.getFamilyFilesList()) { Path familyDir = new Path(regionDir, familyFiles.getFamilyName().toStringUtf8()); List clonedFiles = new ArrayList<>(); - for (SnapshotRegionManifest.StoreFile storeFile: familyFiles.getStoreFilesList()) { - LOG.info("Adding HFileLink " + storeFile.getName() +" from cloned region " - + "in snapshot " + snapshotName + " to table=" + tableName); + for (SnapshotRegionManifest.StoreFile storeFile : familyFiles.getStoreFilesList()) { + LOG.info("Adding HFileLink " + storeFile.getName() + " from cloned region " + "in snapshot " + + snapshotName + " to table=" + tableName); if (MobUtils.isMobRegionInfo(newRegionInfo)) { - String mobFileName = HFileLink.createHFileLinkName(snapshotRegionInfo, - storeFile.getName()); + String mobFileName = + HFileLink.createHFileLinkName(snapshotRegionInfo, storeFile.getName()); Path mobPath = new Path(familyDir, mobFileName); if (fs.exists(mobPath)) { fs.delete(mobPath, true); @@ -697,18 +682,18 @@ public class RestoreSnapshotHelper { clonedFiles.add(new StoreFileInfo(conf, fs, new Path(familyDir, file), true)); } } - //we don't need to track files under mobdir + // we don't need to track files under mobdir if (!MobUtils.isMobRegionInfo(newRegionInfo)) { Path regionPath = new Path(tableDir, newRegionInfo.getEncodedName()); - HRegionFileSystem regionFS = (fs.exists(regionPath)) ? - HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, newRegionInfo, false) : - HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, newRegionInfo); + HRegionFileSystem regionFS = (fs.exists(regionPath)) + ? HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, newRegionInfo, false) + : HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, newRegionInfo); Configuration sftConf = StoreUtils.createStoreConfiguration(conf, tableDesc, tableDesc.getColumnFamily(familyFiles.getFamilyName().toByteArray())); - StoreFileTracker tracker = StoreFileTrackerFactory.create(sftConf, true, - StoreContext.getBuilder().withFamilyStoreDirectoryPath(familyDir). - withRegionFileSystem(regionFS).build()); + StoreFileTracker tracker = + StoreFileTrackerFactory.create(sftConf, true, StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build()); tracker.set(clonedFiles); } } @@ -716,40 +701,34 @@ public class RestoreSnapshotHelper { } /** - * Clone region directory content from the snapshot info. - * - * Each region is encoded with the table name, so the cloned region will have - * a different region name. - * - * Instead of copying the hfiles a HFileLink is created. - * - * @param region {@link HRegion} cloned - * @param snapshotRegionInfo + * Clone region directory content from the snapshot info. Each region is encoded with the table + * name, so the cloned region will have a different region name. Instead of copying the hfiles a + * HFileLink is created. + * @param region {@link HRegion} cloned n */ private void cloneRegion(final HRegion region, final RegionInfo snapshotRegionInfo, - final SnapshotRegionManifest manifest) throws IOException { - cloneRegion(region.getRegionInfo(), - new Path(tableDir, region.getRegionInfo().getEncodedName()), - snapshotRegionInfo, - manifest); + final SnapshotRegionManifest manifest) throws IOException { + cloneRegion(region.getRegionInfo(), new Path(tableDir, region.getRegionInfo().getEncodedName()), + snapshotRegionInfo, manifest); } /** * Create a new {@link HFileLink} to reference the store file. - *

      The store file in the snapshot can be a simple hfile, an HFileLink or a reference. + *

      + * The store file in the snapshot can be a simple hfile, an HFileLink or a reference. *

        - *
      • hfile: abc -> table=region-abc - *
      • reference: abc.1234 -> table=region-abc.1234 - *
      • hfilelink: table=region-hfile -> table=region-hfile + *
      • hfile: abc -> table=region-abc + *
      • reference: abc.1234 -> table=region-abc.1234 + *
      • hfilelink: table=region-hfile -> table=region-hfile *
      - * @param familyDir destination directory for the store file - * @param regionInfo destination region info for the table + * @param familyDir destination directory for the store file + * @param regionInfo destination region info for the table * @param createBackRef - Whether back reference should be created. Defaults to true. - * @param storeFile store file name (can be a Reference, HFileLink or simple HFile) + * @param storeFile store file name (can be a Reference, HFileLink or simple HFile) */ private String restoreStoreFile(final Path familyDir, final RegionInfo regionInfo, - final SnapshotRegionManifest.StoreFile storeFile, final boolean createBackRef) - throws IOException { + final SnapshotRegionManifest.StoreFile storeFile, final boolean createBackRef) + throws IOException { String hfileName = storeFile.getName(); if (HFileLink.isHFileLink(hfileName)) { return HFileLink.createFromHFileLink(conf, fs, familyDir, hfileName, createBackRef); @@ -762,7 +741,10 @@ public class RestoreSnapshotHelper { /** * Create a new {@link Reference} as copy of the source one. - *

      +   * 

      + *

      + * + *
          * The source table looks like:
          *    1234/abc      (original file)
          *    5678/abc.1234 (reference file)
      @@ -773,20 +755,27 @@ public class RestoreSnapshotHelper {
          *
          * NOTE that the region name in the clone changes (md5 of regioninfo)
          * and the reference should reflect that change.
      -   * 
      - * @param familyDir destination directory for the store file + *
      + * + *
      + * @param familyDir destination directory for the store file * @param regionInfo destination region info for the table - * @param storeFile reference file name + * @param storeFile reference file name */ private String restoreReferenceFile(final Path familyDir, final RegionInfo regionInfo, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { String hfileName = storeFile.getName(); // Extract the referred information (hfile name and parent region) Path refPath = - StoreFileInfo.getReferredToFile(new Path(new Path(new Path(new Path(snapshotTable - .getNamespaceAsString(), snapshotTable.getQualifierAsString()), regionInfo - .getEncodedName()), familyDir.getName()), hfileName)); + StoreFileInfo + .getReferredToFile( + new Path( + new Path( + new Path(new Path(snapshotTable.getNamespaceAsString(), + snapshotTable.getQualifierAsString()), regionInfo.getEncodedName()), + familyDir.getName()), + hfileName)); String snapshotRegionName = refPath.getParent().getParent().getName(); String fileName = refPath.getName(); @@ -814,15 +803,15 @@ public class RestoreSnapshotHelper { if (linkPath != null) { in = HFileLink.buildFromHFileLinkPattern(conf, linkPath).open(fs); } else { - linkPath = new Path(new Path(HRegion.getRegionDir(snapshotManifest.getSnapshotDir(), - regionInfo.getEncodedName()), familyDir.getName()), hfileName); + linkPath = new Path(new Path( + HRegion.getRegionDir(snapshotManifest.getSnapshotDir(), regionInfo.getEncodedName()), + familyDir.getName()), hfileName); in = fs.open(linkPath); } OutputStream out = fs.create(outPath); IOUtils.copyBytes(in, out, conf); } - // Add the daughter region to the map String regionName = Bytes.toString(regionsMap.get(regionInfo.getEncodedNameAsBytes())); if (regionName == null) { @@ -844,10 +833,8 @@ public class RestoreSnapshotHelper { } /** - * Create a new {@link RegionInfo} from the snapshot region info. - * Keep the same startKey, endKey, regionId and split information but change - * the table name. - * + * Create a new {@link RegionInfo} from the snapshot region info. Keep the same startKey, endKey, + * regionId and split information but change the table name. * @param snapshotRegionInfo Info for region to clone. * @return the new HRegion instance */ @@ -856,13 +843,10 @@ public class RestoreSnapshotHelper { } public static RegionInfo cloneRegionInfo(TableName tableName, RegionInfo snapshotRegionInfo) { - return RegionInfoBuilder.newBuilder(tableName) - .setStartKey(snapshotRegionInfo.getStartKey()) - .setEndKey(snapshotRegionInfo.getEndKey()) - .setSplit(snapshotRegionInfo.isSplit()) - .setRegionId(snapshotRegionInfo.getRegionId()) - .setOffline(snapshotRegionInfo.isOffline()) - .build(); + return RegionInfoBuilder.newBuilder(tableName).setStartKey(snapshotRegionInfo.getStartKey()) + .setEndKey(snapshotRegionInfo.getEndKey()).setSplit(snapshotRegionInfo.isSplit()) + .setRegionId(snapshotRegionInfo.getRegionId()).setOffline(snapshotRegionInfo.isOffline()) + .build(); } /** @@ -881,44 +865,38 @@ public class RestoreSnapshotHelper { RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDirs[i].getPath()); regions.add(hri); } - LOG.debug("found " + regions.size() + " regions for table=" + - tableDesc.getTableName().getNameAsString()); + LOG.debug("found " + regions.size() + " regions for table=" + + tableDesc.getTableName().getNameAsString()); return regions; } /** - * Copy the snapshot files for a snapshot scanner, discards meta changes. - * @param conf - * @param fs - * @param rootDir - * @param restoreDir - * @param snapshotName - * @throws IOException + * Copy the snapshot files for a snapshot scanner, discards meta changes. nnnnnn */ public static RestoreMetaChanges copySnapshotForScanner(Configuration conf, FileSystem fs, - Path rootDir, Path restoreDir, String snapshotName) throws IOException { + Path rootDir, Path restoreDir, String snapshotName) throws IOException { // ensure that restore dir is not under root dir if (!restoreDir.getFileSystem(conf).getUri().equals(rootDir.getFileSystem(conf).getUri())) { - throw new IllegalArgumentException("Filesystems for restore directory and HBase root " + - "directory should be the same"); + throw new IllegalArgumentException( + "Filesystems for restore directory and HBase root " + "directory should be the same"); } - if (restoreDir.toUri().getPath().startsWith(rootDir.toUri().getPath() +"/")) { - throw new IllegalArgumentException("Restore directory cannot be a sub directory of HBase " + - "root directory. RootDir: " + rootDir + ", restoreDir: " + restoreDir); + if (restoreDir.toUri().getPath().startsWith(rootDir.toUri().getPath() + "/")) { + throw new IllegalArgumentException("Restore directory cannot be a sub directory of HBase " + + "root directory. RootDir: " + rootDir + ", restoreDir: " + restoreDir); } Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); - MonitoredTask status = TaskMonitor.get().createStatus( - "Restoring snapshot '" + snapshotName + "' to directory " + restoreDir); + MonitoredTask status = TaskMonitor.get() + .createStatus("Restoring snapshot '" + snapshotName + "' to directory " + restoreDir); ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(); // we send createBackRefs=false so that restored hfiles do not create back reference links // in the base hbase root dir. - RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs, - manifest, manifest.getTableDescriptor(), restoreDir, monitor, status, false); + RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs, manifest, + manifest.getTableDescriptor(), restoreDir, monitor, status, false); RestoreMetaChanges metaChanges = helper.restoreHdfsRegions(); // TODO: parallelize. if (LOG.isDebugEnabled()) { @@ -929,11 +907,11 @@ public class RestoreSnapshotHelper { } public static void restoreSnapshotAcl(SnapshotDescription snapshot, TableName newTableName, - Configuration conf) throws IOException { + Configuration conf) throws IOException { if (snapshot.hasUsersAndPermissions() && snapshot.getUsersAndPermissions() != null) { LOG.info("Restore snapshot acl to table. snapshot: " + snapshot + ", table: " + newTableName); ListMultimap perms = - ShadedAccessControlUtil.toUserTablePermissions(snapshot.getUsersAndPermissions()); + ShadedAccessControlUtil.toUserTablePermissions(snapshot.getUsersAndPermissions()); try (Connection conn = ConnectionFactory.createConnection(conf)) { for (Entry e : perms.entries()) { String user = e.getKey(); @@ -943,7 +921,7 @@ public class RestoreSnapshotHelper { } } catch (Throwable e) { throw new IOException("Grant acl into newly creatd table failed. snapshot: " + snapshot - + ", table: " + newTableName, e); + + ", table: " + newTableName, e); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java index c66991f8564..fb54b57ba17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,6 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; /** @@ -82,9 +81,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.Snapshot *
      * * Utility methods in this class are useful for getting the correct locations for different parts of - * the snapshot, as well as moving completed snapshots into place (see - * {@link #completeSnapshot}, and writing the - * {@link SnapshotDescription} to the working snapshot directory. + * the snapshot, as well as moving completed snapshots into place (see {@link #completeSnapshot}, + * and writing the {@link SnapshotDescription} to the working snapshot directory. */ @InterfaceAudience.Private public final class SnapshotDescriptionUtils { @@ -95,7 +93,7 @@ public final class SnapshotDescriptionUtils { public static class CompletedSnaphotDirectoriesFilter extends FSUtils.BlackListDirFilter { /** - * @param fs + * n */ public CompletedSnaphotDirectoriesFilter(FileSystem fs) { super(fs, Collections.singletonList(SNAPSHOT_TMP_DIR_NAME)); @@ -119,8 +117,7 @@ public final class SnapshotDescriptionUtils { public static final String SNAPSHOT_TMP_DIR_NAME = ".tmp"; /** - * The configuration property that determines the filepath of the snapshot - * base working directory + * The configuration property that determines the filepath of the snapshot base working directory */ public static final String SNAPSHOT_WORKING_DIR = "hbase.snapshot.working.dir"; @@ -131,16 +128,16 @@ public final class SnapshotDescriptionUtils { // Default value if no ttl is specified for Snapshot private static final long NO_SNAPSHOT_TTL_SPECIFIED = 0; - public static final String MASTER_SNAPSHOT_TIMEOUT_MILLIS = "hbase.snapshot.master.timeout.millis"; + public static final String MASTER_SNAPSHOT_TIMEOUT_MILLIS = + "hbase.snapshot.master.timeout.millis"; /** By default, wait 300 seconds for a snapshot to complete */ - public static final long DEFAULT_MAX_WAIT_TIME = 60000 * 5 ; - + public static final long DEFAULT_MAX_WAIT_TIME = 60000 * 5; /** * By default, check to see if the snapshot is complete (ms) * @deprecated Use {@link #DEFAULT_MAX_WAIT_TIME} instead. - * */ + */ @Deprecated public static final int SNAPSHOT_TIMEOUT_MILLIS_DEFAULT = 60000 * 5; @@ -157,21 +154,21 @@ public final class SnapshotDescriptionUtils { } /** - * @param conf {@link Configuration} from which to check for the timeout - * @param type type of snapshot being taken + * @param conf {@link Configuration} from which to check for the timeout + * @param type type of snapshot being taken * @param defaultMaxWaitTime Default amount of time to wait, if none is in the configuration * @return the max amount of time the master should wait for a snapshot to complete */ public static long getMaxMasterTimeout(Configuration conf, SnapshotDescription.Type type, - long defaultMaxWaitTime) { + long defaultMaxWaitTime) { String confKey; switch (type) { - case DISABLED: - default: - confKey = MASTER_SNAPSHOT_TIMEOUT_MILLIS; + case DISABLED: + default: + confKey = MASTER_SNAPSHOT_TIMEOUT_MILLIS; } return Math.max(conf.getLong(confKey, defaultMaxWaitTime), - conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, defaultMaxWaitTime)); + conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, defaultMaxWaitTime)); } /** @@ -188,10 +185,11 @@ public final class SnapshotDescriptionUtils { * Get the directory for a specified snapshot. This directory is a sub-directory of snapshot root * directory and all the data files for a snapshot are kept under this directory. * @param snapshot snapshot being taken - * @param rootDir hbase root directory + * @param rootDir hbase root directory * @return the final directory for the completed snapshot */ - public static Path getCompletedSnapshotDir(final SnapshotDescription snapshot, final Path rootDir) { + public static Path getCompletedSnapshotDir(final SnapshotDescription snapshot, + final Path rootDir) { return getCompletedSnapshotDir(snapshot.getName(), rootDir); } @@ -199,7 +197,7 @@ public final class SnapshotDescriptionUtils { * Get the directory for a completed snapshot. This directory is a sub-directory of snapshot root * directory and all the data files for a snapshot are kept under this directory. * @param snapshotName name of the snapshot being taken - * @param rootDir hbase root directory + * @param rootDir hbase root directory * @return the final directory for the completed snapshot */ public static Path getCompletedSnapshotDir(final String snapshotName, final Path rootDir) { @@ -210,35 +208,35 @@ public final class SnapshotDescriptionUtils { * Get the general working directory for snapshots - where they are built, where they are * temporarily copied on export, etc. * @param rootDir root directory of the HBase installation - * @param conf Configuration of the HBase instance + * @param conf Configuration of the HBase instance * @return Path to the snapshot tmp directory, relative to the passed root directory */ public static Path getWorkingSnapshotDir(final Path rootDir, final Configuration conf) { - return new Path(conf.get(SNAPSHOT_WORKING_DIR, - getDefaultWorkingSnapshotDir(rootDir).toString())); + return new Path( + conf.get(SNAPSHOT_WORKING_DIR, getDefaultWorkingSnapshotDir(rootDir).toString())); } /** * Get the directory to build a snapshot, before it is finalized * @param snapshot snapshot that will be built - * @param rootDir root directory of the hbase installation - * @param conf Configuration of the HBase instance + * @param rootDir root directory of the hbase installation + * @param conf Configuration of the HBase instance * @return {@link Path} where one can build a snapshot */ public static Path getWorkingSnapshotDir(SnapshotDescription snapshot, final Path rootDir, - Configuration conf) { + Configuration conf) { return getWorkingSnapshotDir(snapshot.getName(), rootDir, conf); } /** * Get the directory to build a snapshot, before it is finalized * @param snapshotName name of the snapshot - * @param rootDir root directory of the hbase installation - * @param conf Configuration of the HBase instance + * @param rootDir root directory of the hbase installation + * @param conf Configuration of the HBase instance * @return {@link Path} where one can build a snapshot */ public static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir, - Configuration conf) { + Configuration conf) { return getSpecifiedSnapshotDir(getWorkingSnapshotDir(rootDir, conf), snapshotName); } @@ -263,9 +261,9 @@ public final class SnapshotDescriptionUtils { /** * Determines if the given workingDir is a subdirectory of the given "root directory" * @param workingDir a directory to check - * @param rootDir root directory of the HBase installation - * @return true if the given workingDir is a subdirectory of the given root directory, - * false otherwise + * @param rootDir root directory of the HBase installation + * @return true if the given workingDir is a subdirectory of the given root directory, false + * otherwise */ public static boolean isSubDirectoryOf(final Path workingDir, final Path rootDir) { return workingDir.toString().startsWith(rootDir.toString() + Path.SEPARATOR); @@ -274,9 +272,9 @@ public final class SnapshotDescriptionUtils { /** * Determines if the given workingDir is a subdirectory of the default working snapshot directory * @param workingDir a directory to check - * @param conf configuration for the HBase cluster + * @param conf configuration for the HBase cluster * @return true if the given workingDir is a subdirectory of the default working directory for - * snapshots, false otherwise + * snapshots, false otherwise * @throws IOException if we can't get the root dir */ public static boolean isWithinDefaultWorkingDir(final Path workingDir, Configuration conf) @@ -300,16 +298,16 @@ public final class SnapshotDescriptionUtils { * parameters, if none have been supplied. This resolves any 'optional' parameters that aren't * supplied to their default values. * @param snapshot general snapshot descriptor - * @param conf Configuration to read configured snapshot defaults if snapshot is not complete + * @param conf Configuration to read configured snapshot defaults if snapshot is not complete * @return a valid snapshot description * @throws IllegalArgumentException if the {@link SnapshotDescription} is not a complete - * {@link SnapshotDescription}. + * {@link SnapshotDescription}. */ public static SnapshotDescription validate(SnapshotDescription snapshot, Configuration conf) - throws IllegalArgumentException, IOException { + throws IllegalArgumentException, IOException { if (!snapshot.hasTable()) { throw new IllegalArgumentException( - "Descriptor doesn't apply to a table, so we can't build it."); + "Descriptor doesn't apply to a table, so we can't build it."); } // set the creation time, if one hasn't been set @@ -317,7 +315,7 @@ public final class SnapshotDescriptionUtils { if (time == SnapshotDescriptionUtils.NO_SNAPSHOT_START_TIME_SPECIFIED) { time = EnvironmentEdgeManager.currentTime(); LOG.debug("Creation time not specified, setting to:" + time + " (current time:" - + EnvironmentEdgeManager.currentTime() + ")."); + + EnvironmentEdgeManager.currentTime() + ")."); SnapshotDescription.Builder builder = snapshot.toBuilder(); builder.setCreationTime(time); snapshot = builder.build(); @@ -325,13 +323,15 @@ public final class SnapshotDescriptionUtils { long ttl = snapshot.getTtl(); // set default ttl(sec) if it is not set already or the value is out of the range - if (ttl == SnapshotDescriptionUtils.NO_SNAPSHOT_TTL_SPECIFIED || - ttl > TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE)) { - final long defaultSnapshotTtl = conf.getLong(HConstants.DEFAULT_SNAPSHOT_TTL_CONFIG_KEY, - HConstants.DEFAULT_SNAPSHOT_TTL); + if ( + ttl == SnapshotDescriptionUtils.NO_SNAPSHOT_TTL_SPECIFIED + || ttl > TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE) + ) { + final long defaultSnapshotTtl = + conf.getLong(HConstants.DEFAULT_SNAPSHOT_TTL_CONFIG_KEY, HConstants.DEFAULT_SNAPSHOT_TTL); if (LOG.isDebugEnabled()) { LOG.debug("Snapshot current TTL value: {} resetting it to default value: {}", ttl, - defaultSnapshotTtl); + defaultSnapshotTtl); } ttl = defaultSnapshotTtl; } @@ -348,18 +348,18 @@ public final class SnapshotDescriptionUtils { /** * Write the snapshot description into the working directory of a snapshot - * @param snapshot description of the snapshot being taken + * @param snapshot description of the snapshot being taken * @param workingDir working directory of the snapshot - * @param fs {@link FileSystem} on which the snapshot should be taken + * @param fs {@link FileSystem} on which the snapshot should be taken * @throws IOException if we can't reach the filesystem and the file cannot be cleaned up on - * failure + * failure */ public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingDir, FileSystem fs) - throws IOException { - FsPermission perms = CommonFSUtils.getFilePermissions(fs, fs.getConf(), - HConstants.DATA_FILE_UMASK_KEY); + throws IOException { + FsPermission perms = + CommonFSUtils.getFilePermissions(fs, fs.getConf(), HConstants.DATA_FILE_UMASK_KEY); Path snapshotInfo = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE); - try (FSDataOutputStream out = CommonFSUtils.create(fs, snapshotInfo, perms, true)){ + try (FSDataOutputStream out = CommonFSUtils.create(fs, snapshotInfo, perms, true)) { snapshot.writeTo(out); } catch (IOException e) { // if we get an exception, try to remove the snapshot info @@ -372,17 +372,17 @@ public final class SnapshotDescriptionUtils { } /** - * Read in the {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} stored for the snapshot in the passed directory - * @param fs filesystem where the snapshot was taken + * Read in the {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} + * stored for the snapshot in the passed directory + * @param fs filesystem where the snapshot was taken * @param snapshotDir directory where the snapshot was stored * @return the stored snapshot description - * @throws CorruptedSnapshotException if the - * snapshot cannot be read + * @throws CorruptedSnapshotException if the snapshot cannot be read */ public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir) - throws CorruptedSnapshotException { + throws CorruptedSnapshotException { Path snapshotInfo = new Path(snapshotDir, SNAPSHOTINFO_FILE); - try (FSDataInputStream in = fs.open(snapshotInfo)){ + try (FSDataInputStream in = fs.open(snapshotInfo)) { return SnapshotDescription.parseFrom(in); } catch (IOException e) { throw new CorruptedSnapshotException("Couldn't read snapshot info from:" + snapshotInfo, e); @@ -390,36 +390,34 @@ public final class SnapshotDescriptionUtils { } /** - * Commits the snapshot process by moving the working snapshot - * to the finalized filepath - * - * @param snapshotDir The file path of the completed snapshots - * @param workingDir The file path of the in progress snapshots - * @param fs The file system of the completed snapshots + * Commits the snapshot process by moving the working snapshot to the finalized filepath + * @param snapshotDir The file path of the completed snapshots + * @param workingDir The file path of the in progress snapshots + * @param fs The file system of the completed snapshots * @param workingDirFs The file system of the in progress snapshots - * @param conf Configuration - * + * @param conf Configuration * @throws SnapshotCreationException if the snapshot could not be moved - * @throws IOException the filesystem could not be reached + * @throws IOException the filesystem could not be reached */ public static void completeSnapshot(Path snapshotDir, Path workingDir, FileSystem fs, FileSystem workingDirFs, final Configuration conf) throws SnapshotCreationException, IOException { - LOG.debug("Sentinel is done, just moving the snapshot from " + workingDir + " to " - + snapshotDir); + LOG.debug( + "Sentinel is done, just moving the snapshot from " + workingDir + " to " + snapshotDir); // If the working and completed snapshot directory are on the same file system, attempt // to rename the working snapshot directory to the completed location. If that fails, // or the file systems differ, attempt to copy the directory over, throwing an exception // if this fails URI workingURI = workingDirFs.getUri(); URI rootURI = fs.getUri(); - if ((!workingURI.getScheme().equals(rootURI.getScheme()) || - workingURI.getAuthority() == null || - !workingURI.getAuthority().equals(rootURI.getAuthority()) || - workingURI.getUserInfo() == null || - !workingURI.getUserInfo().equals(rootURI.getUserInfo()) || - !fs.rename(workingDir, snapshotDir)) && !FileUtil.copy(workingDirFs, workingDir, fs, - snapshotDir, true, true, conf)) { + if ( + (!workingURI.getScheme().equals(rootURI.getScheme()) || workingURI.getAuthority() == null + || !workingURI.getAuthority().equals(rootURI.getAuthority()) + || workingURI.getUserInfo() == null + || !workingURI.getUserInfo().equals(rootURI.getUserInfo()) + || !fs.rename(workingDir, snapshotDir)) + && !FileUtil.copy(workingDirFs, workingDir, fs, snapshotDir, true, true, conf) + ) { throw new SnapshotCreationException("Failed to copy working directory(" + workingDir + ") to completed directory(" + snapshotDir + ")."); } @@ -428,33 +426,34 @@ public final class SnapshotDescriptionUtils { /** * Check if the user is this table snapshot's owner * @param snapshot the table snapshot description - * @param user the user - * @return true if the user is the owner of the snapshot, - * false otherwise or the snapshot owner field is not present. + * @param user the user + * @return true if the user is the owner of the snapshot, false otherwise or the snapshot owner + * field is not present. */ public static boolean isSnapshotOwner(org.apache.hadoop.hbase.client.SnapshotDescription snapshot, - User user) { + User user) { if (user == null) return false; return user.getShortName().equals(snapshot.getOwner()); } public static boolean isSecurityAvailable(Configuration conf) throws IOException { - try (Connection conn = ConnectionFactory.createConnection(conf); Admin admin = conn.getAdmin()) { + try (Connection conn = ConnectionFactory.createConnection(conf); + Admin admin = conn.getAdmin()) { return admin.tableExists(PermissionStorage.ACL_TABLE_NAME); } } private static SnapshotDescription writeAclToSnapshotDescription(SnapshotDescription snapshot, - Configuration conf) throws IOException { + Configuration conf) throws IOException { ListMultimap perms = - User.runAsLoginUser(new PrivilegedExceptionAction>() { - @Override - public ListMultimap run() throws Exception { - return PermissionStorage.getTablePermissions(conf, - TableName.valueOf(snapshot.getTable())); - } - }); + User.runAsLoginUser(new PrivilegedExceptionAction>() { + @Override + public ListMultimap run() throws Exception { + return PermissionStorage.getTablePermissions(conf, + TableName.valueOf(snapshot.getTable())); + } + }); return snapshot.toBuilder() - .setUsersAndPermissions(ShadedAccessControlUtil.toUserTablePermissions(perms)).build(); + .setUsersAndPermissions(ShadedAccessControlUtil.toUserTablePermissions(perms)).build(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java index 8b69675d9ef..29dbeacda8e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.FileNotFoundException; @@ -44,18 +43,16 @@ import org.apache.hadoop.hbase.io.WALLink; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.util.StringUtils; -import org.apache.hbase.thirdparty.org.apache.commons.cli.AlreadySelectedException; -import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; -import org.apache.hbase.thirdparty.org.apache.commons.cli.DefaultParser; -import org.apache.hbase.thirdparty.org.apache.commons.cli.MissingOptionException; -import org.apache.hbase.thirdparty.org.apache.commons.cli.Options; -import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; +import org.apache.hbase.thirdparty.org.apache.commons.cli.DefaultParser; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; +import org.apache.hbase.thirdparty.org.apache.commons.cli.Options; +import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; @@ -64,10 +61,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.Snapshot /** * Tool for dumping snapshot information. *
        - *
      1. Table Descriptor - *
      2. Snapshot creation time, type, format version, ... - *
      3. List of hfiles and wals - *
      4. Stats about hfiles and logs sizes, percentage of shared with the source table, ... + *
      5. Table Descriptor + *
      6. Snapshot creation time, type, format version, ... + *
      7. List of hfiles and wals + *
      8. Stats about hfiles and logs sizes, percentage of shared with the source table, ... *
      */ @InterfaceAudience.Public @@ -75,33 +72,33 @@ public final class SnapshotInfo extends AbstractHBaseTool { private static final Logger LOG = LoggerFactory.getLogger(SnapshotInfo.class); static final class Options { - static final Option SNAPSHOT = new Option(null, "snapshot", true, - "The name of the snapshot to be detailed."); - static final Option REMOTE_DIR = new Option(null, "remote-dir", true, - "A custom root directory where snapshots are stored. " - + "Use it together with the --snapshot option."); - static final Option LIST_SNAPSHOTS = new Option(null, "list-snapshots", false, - "List all the available snapshots and exit."); - static final Option FILES = new Option(null, "files", false, - "The list of files retained by the specified snapshot. " + static final Option SNAPSHOT = + new Option(null, "snapshot", true, "The name of the snapshot to be detailed."); + static final Option REMOTE_DIR = + new Option(null, "remote-dir", true, "A custom root directory where snapshots are stored. " + "Use it together with the --snapshot option."); - static final Option STATS = new Option(null, "stats", false, - "Additional information about the specified snapshot. " + static final Option LIST_SNAPSHOTS = + new Option(null, "list-snapshots", false, "List all the available snapshots and exit."); + static final Option FILES = + new Option(null, "files", false, "The list of files retained by the specified snapshot. " + + "Use it together with the --snapshot option."); + static final Option STATS = + new Option(null, "stats", false, "Additional information about the specified snapshot. " + "Use it together with the --snapshot option."); static final Option SCHEMA = new Option(null, "schema", false, - "Show the descriptor of the table for the specified snapshot. " - + "Use it together with the --snapshot option."); - static final Option SIZE_IN_BYTES = new Option(null, "size-in-bytes", false, - "Print the size of the files in bytes. " - + "Use it together with the --snapshot and --files options."); + "Show the descriptor of the table for the specified snapshot. " + + "Use it together with the --snapshot option."); + static final Option SIZE_IN_BYTES = + new Option(null, "size-in-bytes", false, "Print the size of the files in bytes. " + + "Use it together with the --snapshot and --files options."); } /** * Statistics about the snapshot *
        - *
      1. How many store files and logs are in the archive - *
      2. How many store files and logs are shared with the table - *
      3. Total store files and logs size and shared amount + *
      4. How many store files and logs are in the archive + *
      5. How many store files and logs are shared with the table + *
      6. Total store files and logs size and shared amount *
      */ public static class SnapshotStats { @@ -164,8 +161,7 @@ public final class SnapshotInfo extends AbstractHBaseTool { private final FileSystem fs; SnapshotStats(final Configuration conf, final FileSystem fs, - final SnapshotDescription snapshot) - { + final SnapshotDescription snapshot) { this.snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); this.snapshotTable = snapshot.getTableName(); this.conf = conf; @@ -173,14 +169,13 @@ public final class SnapshotInfo extends AbstractHBaseTool { } SnapshotStats(final Configuration conf, final FileSystem fs, - final SnapshotProtos.SnapshotDescription snapshot) { + final SnapshotProtos.SnapshotDescription snapshot) { this.snapshot = snapshot; this.snapshotTable = TableName.valueOf(snapshot.getTable()); this.conf = conf; this.fs = fs; } - /** @return the snapshot descriptor */ public SnapshotDescription getSnapshotDescription() { return ProtobufUtil.createSnapshotDesc(this.snapshot); @@ -188,9 +183,7 @@ public final class SnapshotInfo extends AbstractHBaseTool { /** @return true if the snapshot is corrupted */ public boolean isSnapshotCorrupted() { - return hfilesMissing.get() > 0 || - logsMissing.get() > 0 || - hfilesCorrupted.get() > 0; + return hfilesMissing.get() > 0 || logsMissing.get() > 0 || hfilesCorrupted.get() > 0; } /** @return the number of available store files */ @@ -204,7 +197,9 @@ public final class SnapshotInfo extends AbstractHBaseTool { } /** @return the number of available store files in the mob dir */ - public int getMobStoreFilesCount() { return hfilesMobCount.get(); } + public int getMobStoreFilesCount() { + return hfilesMobCount.get(); + } /** @return the number of available log files */ public int getLogsCount() { @@ -241,15 +236,16 @@ public final class SnapshotInfo extends AbstractHBaseTool { return hfilesArchiveSize.get(); } - /** @return the total size of the store files in the mob store*/ - public long getMobStoreFilesSize() { return hfilesMobSize.get(); } + /** @return the total size of the store files in the mob store */ + public long getMobStoreFilesSize() { + return hfilesMobSize.get(); + } - /** @return the total size of the store files in the archive which is not shared - * with other snapshots and tables - * - * This is only calculated when - * {@link #getSnapshotStats(Configuration, SnapshotProtos.SnapshotDescription, Map)} - * is called with a non-null Map + /** + * @return the total size of the store files in the archive which is not shared with other + * snapshots and tables This is only calculated when + * {@link #getSnapshotStats(Configuration, SnapshotProtos.SnapshotDescription, Map)} is + * called with a non-null Map */ public long getNonSharedArchivedStoreFilesSize() { return nonSharedHfilesArchiveSize.get(); @@ -270,15 +266,15 @@ public final class SnapshotInfo extends AbstractHBaseTool { return logSize.get(); } - /** Check if for a give file in archive, if there are other snapshots/tables still - * reference it. - * @param filePath file path in archive - * @param snapshotFilesMap a map for store files in snapshots about how many snapshots refer - * to it. + /** + * Check if for a give file in archive, if there are other snapshots/tables still reference it. + * @param filePath file path in archive + * @param snapshotFilesMap a map for store files in snapshots about how many snapshots refer to + * it. * @return true or false */ private boolean isArchivedFileStillReferenced(final Path filePath, - final Map snapshotFilesMap) { + final Map snapshotFilesMap) { Integer c = snapshotFilesMap.get(filePath); @@ -301,17 +297,17 @@ public final class SnapshotInfo extends AbstractHBaseTool { /** * Add the specified store file to the stats - * @param region region encoded Name - * @param family family name + * @param region region encoded Name + * @param family family name * @param storeFile store file name - * @param filesMap store files map for all snapshots, it may be null + * @param filesMap store files map for all snapshots, it may be null * @return the store file information */ FileInfo addStoreFile(final RegionInfo region, final String family, - final SnapshotRegionManifest.StoreFile storeFile, - final Map filesMap) throws IOException { - HFileLink link = HFileLink.build(conf, snapshotTable, region.getEncodedName(), - family, storeFile.getName()); + final SnapshotRegionManifest.StoreFile storeFile, final Map filesMap) + throws IOException { + HFileLink link = + HFileLink.build(conf, snapshotTable, region.getEncodedName(), family, storeFile.getName()); boolean isCorrupted = false; boolean inArchive = false; long size = -1; @@ -324,8 +320,9 @@ public final class SnapshotInfo extends AbstractHBaseTool { // If store file is not shared with other snapshots and tables, // increase nonSharedHfilesArchiveSize - if ((filesMap != null) && - !isArchivedFileStillReferenced(link.getArchivePath(), filesMap)) { + if ( + (filesMap != null) && !isArchivedFileStillReferenced(link.getArchivePath(), filesMap) + ) { nonSharedHfilesArchiveSize.addAndGet(size); } } else if (fs.exists(link.getMobPath())) { @@ -348,7 +345,7 @@ public final class SnapshotInfo extends AbstractHBaseTool { /** * Add the specified log file to the stats - * @param server server name + * @param server server name * @param logfile log file name * @return the log information */ @@ -391,11 +388,10 @@ public final class SnapshotInfo extends AbstractHBaseTool { if (listSnapshots) { SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); System.out.printf("%-20s | %-20s | %-20s | %s%n", "SNAPSHOT", "CREATION TIME", "TTL IN SEC", - "TABLE NAME"); - for (SnapshotDescription desc: getSnapshotList(conf)) { + "TABLE NAME"); + for (SnapshotDescription desc : getSnapshotList(conf)) { System.out.printf("%-20s | %20s | %20s | %s%n", desc.getName(), - df.format(new Date(desc.getCreationTime())), desc.getTtl(), - desc.getTableNameAsString()); + df.format(new Date(desc.getCreationTime())), desc.getTtl(), desc.getTableNameAsString()); } return 0; } @@ -432,7 +428,7 @@ public final class SnapshotInfo extends AbstractHBaseTool { } SnapshotProtos.SnapshotDescription snapshotDesc = - SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); snapshotManifest = SnapshotManifest.open(getConf(), fs, snapshotDir, snapshotDesc); return true; } @@ -466,8 +462,8 @@ public final class SnapshotInfo extends AbstractHBaseTool { } /** - * Collect the hfiles and logs statistics of the snapshot and - * dump the file list if requested and the collected information. + * Collect the hfiles and logs statistics of the snapshot and dump the file list if requested and + * the collected information. */ private void printFiles(final boolean showFiles, final boolean showStats) throws IOException { if (showFiles) { @@ -476,28 +472,28 @@ public final class SnapshotInfo extends AbstractHBaseTool { } // Collect information about hfiles and logs in the snapshot - final SnapshotProtos.SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription(); + final SnapshotProtos.SnapshotDescription snapshotDesc = + snapshotManifest.getSnapshotDescription(); final String table = snapshotDesc.getTable(); final SnapshotDescription desc = ProtobufUtil.createSnapshotDesc(snapshotDesc); final SnapshotStats stats = new SnapshotStats(this.getConf(), this.fs, desc); SnapshotReferenceUtil.concurrentVisitReferencedFiles(getConf(), fs, snapshotManifest, - "SnapshotInfo", - new SnapshotReferenceUtil.SnapshotVisitor() { + "SnapshotInfo", new SnapshotReferenceUtil.SnapshotVisitor() { @Override public void storeFile(final RegionInfo regionInfo, final String family, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { if (storeFile.hasReference()) return; SnapshotStats.FileInfo info = stats.addStoreFile(regionInfo, family, storeFile, null); if (showFiles) { String state = info.getStateToString(); System.out.printf("%8s %s/%s/%s/%s %s%n", - (info.isMissing() ? "-" : fileSizeToString(info.getSize())), - table, regionInfo.getEncodedName(), family, storeFile.getName(), + (info.isMissing() ? "-" : fileSizeToString(info.getSize())), table, + regionInfo.getEncodedName(), family, storeFile.getName(), state == null ? "" : "(" + state + ")"); } } - }); + }); // Dump the stats System.out.println(); @@ -511,18 +507,15 @@ public final class SnapshotInfo extends AbstractHBaseTool { } if (showStats) { - System.out.printf("%d HFiles (%d in archive, %d in mob storage), total size %s " + - "(%.2f%% %s shared with the source table, %.2f%% %s in mob dir)%n", + System.out.printf( + "%d HFiles (%d in archive, %d in mob storage), total size %s " + + "(%.2f%% %s shared with the source table, %.2f%% %s in mob dir)%n", stats.getStoreFilesCount(), stats.getArchivedStoreFilesCount(), - stats.getMobStoreFilesCount(), - fileSizeToString(stats.getStoreFilesSize()), - stats.getSharedStoreFilePercentage(), - fileSizeToString(stats.getSharedStoreFilesSize()), - stats.getMobStoreFilePercentage(), - fileSizeToString(stats.getMobStoreFilesSize()) - ); - System.out.printf("%d Logs, total size %s%n", - stats.getLogsCount(), fileSizeToString(stats.getLogsSize())); + stats.getMobStoreFilesCount(), fileSizeToString(stats.getStoreFilesSize()), + stats.getSharedStoreFilePercentage(), fileSizeToString(stats.getSharedStoreFilesSize()), + stats.getMobStoreFilePercentage(), fileSizeToString(stats.getMobStoreFilesSize())); + System.out.printf("%d Logs, total size %s%n", stats.getLogsCount(), + fileSizeToString(stats.getLogsSize())); System.out.println(); } } @@ -542,17 +535,16 @@ public final class SnapshotInfo extends AbstractHBaseTool { addOption(Options.SIZE_IN_BYTES); } - @Override protected CommandLineParser newParser() { // Commons-CLI lacks the capability to handle combinations of options, so we do it ourselves // Validate in parse() to get helpful error messages instead of exploding in processOptions() return new DefaultParser() { @Override - public CommandLine parse(org.apache.hbase.thirdparty.org.apache.commons.cli.Options opts, String[] args, Properties props, boolean stop) - throws ParseException { + public CommandLine parse(org.apache.hbase.thirdparty.org.apache.commons.cli.Options opts, + String[] args, Properties props, boolean stop) throws ParseException { CommandLine cl = super.parse(opts, args, props, stop); - if(!cmd.hasOption(Options.LIST_SNAPSHOTS) && !cmd.hasOption(Options.SNAPSHOT)) { + if (!cmd.hasOption(Options.LIST_SNAPSHOTS) && !cmd.hasOption(Options.SNAPSHOT)) { throw new ParseException("Missing required snapshot option!"); } return cl; @@ -564,8 +556,8 @@ public final class SnapshotInfo extends AbstractHBaseTool { protected void processOptions(CommandLine cmd) { snapshotName = cmd.getOptionValue(Options.SNAPSHOT.getLongOpt()); showFiles = cmd.hasOption(Options.FILES.getLongOpt()); - showStats = cmd.hasOption(Options.FILES.getLongOpt()) - || cmd.hasOption(Options.STATS.getLongOpt()); + showStats = + cmd.hasOption(Options.FILES.getLongOpt()) || cmd.hasOption(Options.STATS.getLongOpt()); showSchema = cmd.hasOption(Options.SCHEMA.getLongOpt()); listSnapshots = cmd.hasOption(Options.LIST_SNAPSHOTS.getLongOpt()); printSizeInBytes = cmd.hasOption(Options.SIZE_IN_BYTES.getLongOpt()); @@ -583,12 +575,12 @@ public final class SnapshotInfo extends AbstractHBaseTool { /** * Returns the snapshot stats - * @param conf the {@link Configuration} to use + * @param conf the {@link Configuration} to use * @param snapshot {@link SnapshotDescription} to get stats from * @return the snapshot stats */ public static SnapshotStats getSnapshotStats(final Configuration conf, - final SnapshotDescription snapshot) throws IOException { + final SnapshotDescription snapshot) throws IOException { SnapshotProtos.SnapshotDescription snapshotDesc = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); return getSnapshotStats(conf, snapshotDesc, null); @@ -596,28 +588,29 @@ public final class SnapshotInfo extends AbstractHBaseTool { /** * Returns the snapshot stats - * @param conf the {@link Configuration} to use - * @param snapshotDesc HBaseProtos.SnapshotDescription to get stats from - * @param filesMap {@link Map} store files map for all snapshots, it may be null + * @param conf the {@link Configuration} to use + * @param snapshotDesc HBaseProtos.SnapshotDescription to get stats from + * @param filesMap {@link Map} store files map for all snapshots, it may be null * @return the snapshot stats */ public static SnapshotStats getSnapshotStats(final Configuration conf, - final SnapshotProtos.SnapshotDescription snapshotDesc, - final Map filesMap) throws IOException { + final SnapshotProtos.SnapshotDescription snapshotDesc, final Map filesMap) + throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = FileSystem.get(rootDir.toUri(), conf); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotDesc, rootDir); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); final SnapshotStats stats = new SnapshotStats(conf, fs, snapshotDesc); SnapshotReferenceUtil.concurrentVisitReferencedFiles(conf, fs, manifest, - "SnapshotsStatsAggregation", new SnapshotReferenceUtil.SnapshotVisitor() { - @Override - public void storeFile(final RegionInfo regionInfo, final String family, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { - if (!storeFile.hasReference()) { - stats.addStoreFile(regionInfo, family, storeFile, filesMap); - } - }}); + "SnapshotsStatsAggregation", new SnapshotReferenceUtil.SnapshotVisitor() { + @Override + public void storeFile(final RegionInfo regionInfo, final String family, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + if (!storeFile.hasReference()) { + stats.addStoreFile(regionInfo, family, storeFile, filesMap); + } + } + }); return stats; } @@ -627,16 +620,16 @@ public final class SnapshotInfo extends AbstractHBaseTool { * @return the list of snapshots */ public static List getSnapshotList(final Configuration conf) - throws IOException { + throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = FileSystem.get(rootDir.toUri(), conf); Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir); FileStatus[] snapshots = fs.listStatus(snapshotDir, - new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); + new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); List snapshotLists = new ArrayList<>(snapshots.length); - for (FileStatus snapshotDirStat: snapshots) { + for (FileStatus snapshotDirStat : snapshots) { SnapshotProtos.SnapshotDescription snapshotDesc = - SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDirStat.getPath()); + SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDirStat.getPath()); snapshotLists.add(ProtobufUtil.createSnapshotDesc(snapshotDesc)); } return snapshotLists; @@ -644,81 +637,80 @@ public final class SnapshotInfo extends AbstractHBaseTool { /** * Gets the store files map for snapshot - * @param conf the {@link Configuration} to use - * @param snapshot {@link SnapshotDescription} to get stats from - * @param exec the {@link ExecutorService} to use - * @param filesMap {@link Map} the map to put the mapping entries + * @param conf the {@link Configuration} to use + * @param snapshot {@link SnapshotDescription} to get stats from + * @param exec the {@link ExecutorService} to use + * @param filesMap {@link Map} the map to put the mapping entries * @param uniqueHFilesArchiveSize {@link AtomicLong} the accumulated store file size in archive - * @param uniqueHFilesSize {@link AtomicLong} the accumulated store file size shared - * @param uniqueHFilesMobSize {@link AtomicLong} the accumulated mob store file size shared + * @param uniqueHFilesSize {@link AtomicLong} the accumulated store file size shared + * @param uniqueHFilesMobSize {@link AtomicLong} the accumulated mob store file size shared */ private static void getSnapshotFilesMap(final Configuration conf, - final SnapshotDescription snapshot, final ExecutorService exec, - final ConcurrentHashMap filesMap, - final AtomicLong uniqueHFilesArchiveSize, final AtomicLong uniqueHFilesSize, - final AtomicLong uniqueHFilesMobSize) throws IOException { + final SnapshotDescription snapshot, final ExecutorService exec, + final ConcurrentHashMap filesMap, final AtomicLong uniqueHFilesArchiveSize, + final AtomicLong uniqueHFilesSize, final AtomicLong uniqueHFilesMobSize) throws IOException { SnapshotProtos.SnapshotDescription snapshotDesc = - ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); Path rootDir = CommonFSUtils.getRootDir(conf); final FileSystem fs = FileSystem.get(rootDir.toUri(), conf); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotDesc, rootDir); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); SnapshotReferenceUtil.concurrentVisitReferencedFiles(conf, fs, manifest, exec, - new SnapshotReferenceUtil.SnapshotVisitor() { - @Override public void storeFile(final RegionInfo regionInfo, final String family, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { - if (!storeFile.hasReference()) { - HFileLink link = HFileLink.build(conf, snapshot.getTableName(), - regionInfo.getEncodedName(), family, storeFile.getName()); - long size; - Integer count; - Path p; - AtomicLong al; - int c = 0; + new SnapshotReferenceUtil.SnapshotVisitor() { + @Override + public void storeFile(final RegionInfo regionInfo, final String family, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + if (!storeFile.hasReference()) { + HFileLink link = HFileLink.build(conf, snapshot.getTableName(), + regionInfo.getEncodedName(), family, storeFile.getName()); + long size; + Integer count; + Path p; + AtomicLong al; + int c = 0; - if (fs.exists(link.getArchivePath())) { - p = link.getArchivePath(); - al = uniqueHFilesArchiveSize; - size = fs.getFileStatus(p).getLen(); - } else if (fs.exists(link.getMobPath())) { - p = link.getMobPath(); - al = uniqueHFilesMobSize; - size = fs.getFileStatus(p).getLen(); - } else { - p = link.getOriginPath(); - al = uniqueHFilesSize; - size = link.getFileStatus(fs).getLen(); - } - - // If it has been counted, do not double count - count = filesMap.get(p); - if (count != null) { - c = count.intValue(); - } else { - al.addAndGet(size); - } - - filesMap.put(p, ++c); + if (fs.exists(link.getArchivePath())) { + p = link.getArchivePath(); + al = uniqueHFilesArchiveSize; + size = fs.getFileStatus(p).getLen(); + } else if (fs.exists(link.getMobPath())) { + p = link.getMobPath(); + al = uniqueHFilesMobSize; + size = fs.getFileStatus(p).getLen(); + } else { + p = link.getOriginPath(); + al = uniqueHFilesSize; + size = link.getFileStatus(fs).getLen(); } + + // If it has been counted, do not double count + count = filesMap.get(p); + if (count != null) { + c = count.intValue(); + } else { + al.addAndGet(size); + } + + filesMap.put(p, ++c); } - }); + } + }); } /** * Returns the map of store files based on path for all snapshots - * @param conf the {@link Configuration} to use + * @param conf the {@link Configuration} to use * @param uniqueHFilesArchiveSize pass out the size for store files in archive - * @param uniqueHFilesSize pass out the size for store files shared - * @param uniqueHFilesMobSize pass out the size for mob store files shared + * @param uniqueHFilesSize pass out the size for store files shared + * @param uniqueHFilesMobSize pass out the size for mob store files shared * @return the map of store files */ public static Map getSnapshotsFilesMap(final Configuration conf, - AtomicLong uniqueHFilesArchiveSize, AtomicLong uniqueHFilesSize, - AtomicLong uniqueHFilesMobSize) throws IOException { + AtomicLong uniqueHFilesArchiveSize, AtomicLong uniqueHFilesSize, AtomicLong uniqueHFilesMobSize) + throws IOException { List snapshotList = getSnapshotList(conf); - if (snapshotList.isEmpty()) { return Collections.emptyMap(); } @@ -730,7 +722,7 @@ public final class SnapshotInfo extends AbstractHBaseTool { try { for (final SnapshotDescription snapshot : snapshotList) { getSnapshotFilesMap(conf, snapshot, exec, fileMap, uniqueHFilesArchiveSize, - uniqueHFilesSize, uniqueHFilesMobSize); + uniqueHFilesSize, uniqueHFilesMobSize); } } finally { exec.shutdown(); @@ -739,7 +731,6 @@ public final class SnapshotInfo extends AbstractHBaseTool { return fileMap; } - public static void main(String[] args) { new SnapshotInfo().doStaticMain(args); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index 368e72b3d02..8f5a4e32c6b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.FileNotFoundException; @@ -66,11 +65,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.Snapshot import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; /** - * Utility class to help read/write the Snapshot Manifest. - * - * The snapshot format is transparent for the users of this class, - * once the snapshot is written, it will never be modified. - * On open() the snapshot will be loaded to the current in-memory format. + * Utility class to help read/write the Snapshot Manifest. The snapshot format is transparent for + * the users of this class, once the snapshot is written, it will never be modified. On open() the + * snapshot will be loaded to the current in-memory format. */ @InterfaceAudience.Private public final class SnapshotManifest { @@ -93,18 +90,17 @@ public final class SnapshotManifest { private final MonitoredTask statusTask; /** - * - * @param conf configuration file for HBase setup - * @param rootFs root filesystem containing HFiles + * @param conf configuration file for HBase setup + * @param rootFs root filesystem containing HFiles * @param workingDir file path of where the manifest should be located - * @param desc description of snapshot being taken - * @param monitor monitor of foreign exceptions - * @throws IOException if the working directory file system cannot be - * determined from the config file + * @param desc description of snapshot being taken + * @param monitor monitor of foreign exceptions + * @throws IOException if the working directory file system cannot be determined from the config + * file */ - private SnapshotManifest(final Configuration conf, final FileSystem rootFs, - final Path workingDir, final SnapshotDescription desc, - final ForeignExceptionSnare monitor, final MonitoredTask statusTask) throws IOException { + private SnapshotManifest(final Configuration conf, final FileSystem rootFs, final Path workingDir, + final SnapshotDescription desc, final ForeignExceptionSnare monitor, + final MonitoredTask statusTask) throws IOException { this.monitor = monitor; this.desc = desc; this.workingDir = workingDir; @@ -116,49 +112,40 @@ public final class SnapshotManifest { } /** - * Return a SnapshotManifest instance, used for writing a snapshot. - * - * There are two usage pattern: - * - The Master will create a manifest, add the descriptor, offline regions - * and consolidate the snapshot by writing all the pending stuff on-disk. - * manifest = SnapshotManifest.create(...) - * manifest.addRegion(tableDir, hri) - * manifest.consolidate() - * - The RegionServer will create a single region manifest - * manifest = SnapshotManifest.create(...) - * manifest.addRegion(region) + * Return a SnapshotManifest instance, used for writing a snapshot. There are two usage pattern: - + * The Master will create a manifest, add the descriptor, offline regions and consolidate the + * snapshot by writing all the pending stuff on-disk. manifest = SnapshotManifest.create(...) + * manifest.addRegion(tableDir, hri) manifest.consolidate() - The RegionServer will create a + * single region manifest manifest = SnapshotManifest.create(...) manifest.addRegion(region) */ public static SnapshotManifest create(final Configuration conf, final FileSystem fs, - final Path workingDir, final SnapshotDescription desc, - final ForeignExceptionSnare monitor) throws IOException { + final Path workingDir, final SnapshotDescription desc, final ForeignExceptionSnare monitor) + throws IOException { return create(conf, fs, workingDir, desc, monitor, null); } public static SnapshotManifest create(final Configuration conf, final FileSystem fs, - final Path workingDir, final SnapshotDescription desc, final ForeignExceptionSnare monitor, - final MonitoredTask statusTask) throws IOException { + final Path workingDir, final SnapshotDescription desc, final ForeignExceptionSnare monitor, + final MonitoredTask statusTask) throws IOException { return new SnapshotManifest(conf, fs, workingDir, desc, monitor, statusTask); } /** * Return a SnapshotManifest instance with the information already loaded in-memory. - * SnapshotManifest manifest = SnapshotManifest.open(...) - * TableDescriptor htd = manifest.getTableDescriptor() - * for (SnapshotRegionManifest regionManifest: manifest.getRegionManifests()) - * hri = regionManifest.getRegionInfo() - * for (regionManifest.getFamilyFiles()) - * ... + * SnapshotManifest manifest = SnapshotManifest.open(...) TableDescriptor htd = + * manifest.getTableDescriptor() for (SnapshotRegionManifest regionManifest: + * manifest.getRegionManifests()) hri = regionManifest.getRegionInfo() for + * (regionManifest.getFamilyFiles()) ... */ public static SnapshotManifest open(final Configuration conf, final FileSystem fs, - final Path workingDir, final SnapshotDescription desc) throws IOException { + final Path workingDir, final SnapshotDescription desc) throws IOException { SnapshotManifest manifest = new SnapshotManifest(conf, fs, workingDir, desc, null, null); manifest.load(); return manifest; } - /** * Add the table descriptor to the snapshot manifest */ @@ -168,9 +155,11 @@ public final class SnapshotManifest { interface RegionVisitor { TRegion regionOpen(final RegionInfo regionInfo) throws IOException; + void regionClose(final TRegion region) throws IOException; TFamily familyOpen(final TRegion region, final byte[] familyName) throws IOException; + void familyClose(final TRegion region, final TFamily family) throws IOException; void storeFile(final TRegion region, final TFamily family, final StoreFileInfo storeFile) @@ -184,8 +173,8 @@ public final class SnapshotManifest { case SnapshotManifestV2.DESCRIPTOR_VERSION: return new SnapshotManifestV2.ManifestBuilder(conf, rootFs, workingDir); default: - throw new CorruptedSnapshotException("Invalid Snapshot version: " + desc.getVersion(), - ProtobufUtil.createSnapshotDesc(desc)); + throw new CorruptedSnapshotException("Invalid Snapshot version: " + desc.getVersion(), + ProtobufUtil.createSnapshotDesc(desc)); } } @@ -256,13 +245,13 @@ public final class SnapshotManifest { for (HStore store : region.getStores()) { // 2.1. build the snapshot reference for the store - Object familyData = visitor.familyOpen(regionData, - store.getColumnFamilyDescriptor().getName()); + Object familyData = + visitor.familyOpen(regionData, store.getColumnFamilyDescriptor().getName()); monitor.rethrowException(); List storeFiles = new ArrayList<>(store.getStorefiles()); if (LOG.isDebugEnabled()) { - LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); + LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); } // 2.2. iterate through all the store's files and create "references". @@ -271,8 +260,8 @@ public final class SnapshotManifest { monitor.rethrowException(); // create "reference" to this store file. - LOG.debug("Adding reference for file (" + (i+1) + "/" + sz + "): " + storeFile.getPath() + - " for snapshot=" + snapshotName); + LOG.debug("Adding reference for file (" + (i + 1) + "/" + sz + "): " + storeFile.getPath() + + " for snapshot=" + snapshotName); visitor.storeFile(regionData, familyData, storeFile.getFileInfo()); } visitor.familyClose(regionData, familyData); @@ -281,8 +270,8 @@ public final class SnapshotManifest { } /** - * Creates a 'manifest' for the specified region, by reading directly from the disk. - * This is used by the "offline snapshot" when the table is disabled. + * Creates a 'manifest' for the specified region, by reading directly from the disk. This is used + * by the "offline snapshot" when the table is disabled. */ public void addRegion(final Path tableDir, final RegionInfo regionInfo) throws IOException { // Get the ManifestBuilder/RegionVisitor @@ -353,7 +342,7 @@ public final class SnapshotManifest { } private void addReferenceFiles(RegionVisitor visitor, Object regionData, Object familyData, - Collection storeFiles, boolean isMob) throws IOException { + Collection storeFiles, boolean isMob) throws IOException { final String fileType = isMob ? "mob file" : "hfile"; if (LOG.isDebugEnabled()) { @@ -362,11 +351,11 @@ public final class SnapshotManifest { int i = 0; int sz = storeFiles.size(); - for (StoreFileInfo storeFile: storeFiles) { + for (StoreFileInfo storeFile : storeFiles) { monitor.rethrowException(); - LOG.debug(String.format("Adding reference for %s (%d/%d): %s", - fileType, ++i, sz, storeFile.getPath())); + LOG.debug(String.format("Adding reference for %s (%d/%d): %s", fileType, ++i, sz, + storeFile.getPath())); // create "reference" to this store file. visitor.storeFile(regionData, familyData, storeFile); @@ -374,11 +363,9 @@ public final class SnapshotManifest { } /** - * Load the information in the SnapshotManifest. Called by SnapshotManifest.open() - * - * If the format is v2 and there is no data-manifest, means that we are loading an - * in-progress snapshot. Since we support rolling-upgrades, we loook for v1 and v2 - * regions format. + * Load the information in the SnapshotManifest. Called by SnapshotManifest.open() If the format + * is v2 and there is no data-manifest, means that we are loading an in-progress snapshot. Since + * we support rolling-upgrades, we loook for v1 and v2 regions format. */ private void load() throws IOException { switch (getSnapshotFormat(desc)) { @@ -404,13 +391,13 @@ public final class SnapshotManifest { List v1Regions, v2Regions; ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader"); try { - v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, - workingDir, desc); - v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, rootFs, - workingDir, desc, manifestSizeLimit); + v1Regions = + SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, workingDir, desc); + v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, rootFs, workingDir, + desc, manifestSizeLimit); } catch (InvalidProtocolBufferException e) { - throw new CorruptedSnapshotException("unable to parse region manifest " + - e.getMessage(), e); + throw new CorruptedSnapshotException( + "unable to parse region manifest " + e.getMessage(), e); } finally { tpool.shutdown(); } @@ -427,8 +414,8 @@ public final class SnapshotManifest { break; } default: - throw new CorruptedSnapshotException("Invalid Snapshot version: " + desc.getVersion(), - ProtobufUtil.createSnapshotDesc(desc)); + throw new CorruptedSnapshotException("Invalid Snapshot version: " + desc.getVersion(), + ProtobufUtil.createSnapshotDesc(desc)); } } @@ -467,14 +454,14 @@ public final class SnapshotManifest { } /** - * Get all the Region Manifest from the snapshot. - * This is an helper to get a map with the region encoded name + * Get all the Region Manifest from the snapshot. This is an helper to get a map with the region + * encoded name */ public Map getRegionManifestsMap() { if (regionManifests == null || regionManifests.isEmpty()) return null; HashMap regionsMap = new HashMap<>(regionManifests.size()); - for (SnapshotRegionManifest manifest: regionManifests) { + for (SnapshotRegionManifest manifest : regionManifests) { String regionName = getRegionNameFromManifest(manifest); regionsMap.put(regionName, manifest); } @@ -486,7 +473,7 @@ public final class SnapshotManifest { LOG.info("Using old Snapshot Format"); // write a copy of descriptor to the snapshot directory FSTableDescriptors.createTableDescriptorForTableDirectory(workingDirFs, workingDir, htd, - false); + false); } else { LOG.debug("Convert to Single Snapshot Manifest for {}", this.desc.getName()); convertToV2SingleManifest(); @@ -494,8 +481,8 @@ public final class SnapshotManifest { } /* - * In case of rolling-upgrade, we try to read all the formats and build - * the snapshot with the latest format. + * In case of rolling-upgrade, we try to read all the formats and build the snapshot with the + * latest format. */ private void convertToV2SingleManifest() throws IOException { // Try to load v1 and v2 regions @@ -503,10 +490,10 @@ public final class SnapshotManifest { ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader"); setStatusMsg("Loading Region manifests for " + this.desc.getName()); try { - v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, workingDirFs, - workingDir, desc); - v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, workingDirFs, - workingDir, desc, manifestSizeLimit); + v1Regions = + SnapshotManifestV1.loadRegionManifests(conf, tpool, workingDirFs, workingDir, desc); + v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, workingDirFs, workingDir, + desc, manifestSizeLimit); SnapshotDataManifest.Builder dataManifestBuilder = SnapshotDataManifest.newBuilder(); dataManifestBuilder.setTableSchema(ProtobufUtil.toTableSchema(htd)); @@ -535,7 +522,7 @@ public final class SnapshotManifest { int totalDeletes = 0; ExecutorCompletionService completionService = new ExecutorCompletionService<>(tpool); if (v1Regions != null) { - for (SnapshotRegionManifest regionManifest: v1Regions) { + for (SnapshotRegionManifest regionManifest : v1Regions) { ++totalDeletes; completionService.submit(() -> { SnapshotManifestV1.deleteRegionManifest(workingDirFs, workingDir, regionManifest); @@ -544,7 +531,7 @@ public final class SnapshotManifest { } } if (v2Regions != null) { - for (SnapshotRegionManifest regionManifest: v2Regions) { + for (SnapshotRegionManifest regionManifest : v2Regions) { ++totalDeletes; completionService.submit(() -> { SnapshotManifestV2.deleteRegionManifest(workingDirFs, workingDir, regionManifest); @@ -570,9 +557,9 @@ public final class SnapshotManifest { /* * Write the SnapshotDataManifest file */ - private void writeDataManifest(final SnapshotDataManifest manifest) - throws IOException { - try (FSDataOutputStream stream = workingDirFs.create(new Path(workingDir, DATA_MANIFEST_NAME))) { + private void writeDataManifest(final SnapshotDataManifest manifest) throws IOException { + try ( + FSDataOutputStream stream = workingDirFs.create(new Path(workingDir, DATA_MANIFEST_NAME))) { manifest.writeTo(stream); } } @@ -607,10 +594,10 @@ public final class SnapshotManifest { * Extract the region encoded name from the region manifest */ static String getRegionNameFromManifest(final SnapshotRegionManifest manifest) { - byte[] regionName = RegionInfo.createRegionName( - ProtobufUtil.toTableName(manifest.getRegionInfo().getTableName()), - manifest.getRegionInfo().getStartKey().toByteArray(), - manifest.getRegionInfo().getRegionId(), true); + byte[] regionName = + RegionInfo.createRegionName(ProtobufUtil.toTableName(manifest.getRegionInfo().getTableName()), + manifest.getRegionInfo().getStartKey().toByteArray(), + manifest.getRegionInfo().getRegionId(), true); return RegionInfo.encodeRegionName(regionName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java index b1eca35febf..61c366de971 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.IOException; @@ -48,13 +47,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.Snapshot import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; /** - * DO NOT USE DIRECTLY. USE {@link SnapshotManifest}. - * - * Snapshot v1 layout format - * - Each region in the table is represented by a directory with the .hregioninfo file - * /snapshotName/regionName/.hregioninfo - * - Each file present in the table is represented by an empty file - * /snapshotName/regionName/familyName/fileName + * DO NOT USE DIRECTLY. USE {@link SnapshotManifest}. Snapshot v1 layout format - Each region in the + * table is represented by a directory with the .hregioninfo file + * /snapshotName/regionName/.hregioninfo - Each file present in the table is represented by an empty + * file /snapshotName/regionName/familyName/fileName */ @InterfaceAudience.Private public final class SnapshotManifestV1 { @@ -65,15 +61,14 @@ public final class SnapshotManifestV1 { private SnapshotManifestV1() { } - static class ManifestBuilder implements SnapshotManifest.RegionVisitor< - HRegionFileSystem, Path> { + static class ManifestBuilder implements SnapshotManifest.RegionVisitor { private final Configuration conf; private final Path snapshotDir; private final FileSystem rootFs; private final FileSystem workingDirFs; public ManifestBuilder(final Configuration conf, final FileSystem rootFs, - final Path snapshotDir) throws IOException { + final Path snapshotDir) throws IOException { this.snapshotDir = snapshotDir; this.conf = conf; this.rootFs = rootFs; @@ -82,8 +77,8 @@ public final class SnapshotManifestV1 { @Override public HRegionFileSystem regionOpen(final RegionInfo regionInfo) throws IOException { - HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf, - workingDirFs, snapshotDir, regionInfo); + HRegionFileSystem snapshotRegionFs = + HRegionFileSystem.createRegionOnFileSystem(conf, workingDirFs, snapshotDir, regionInfo); return snapshotRegionFs; } @@ -103,15 +98,15 @@ public final class SnapshotManifestV1 { @Override public void storeFile(final HRegionFileSystem region, final Path familyDir, - final StoreFileInfo storeFile) throws IOException { + final StoreFileInfo storeFile) throws IOException { Path referenceFile = new Path(familyDir, storeFile.getPath().getName()); boolean success = true; if (storeFile.isReference()) { // write the Reference object to the snapshot storeFile.getReference().write(workingDirFs, referenceFile); } else { - // create "reference" to this store file. It is intentionally an empty file -- all - // necessary information is captured by its fs location and filename. This allows us to + // create "reference" to this store file. It is intentionally an empty file -- all + // necessary information is captured by its fs location and filename. This allows us to // only figure out what needs to be done via a single nn operation (instead of having to // open and read the files as well). success = workingDirFs.createNewFile(referenceFile); @@ -123,8 +118,8 @@ public final class SnapshotManifestV1 { } static List loadRegionManifests(final Configuration conf, - final Executor executor,final FileSystem fs, final Path snapshotDir, - final SnapshotDescription desc) throws IOException { + final Executor executor, final FileSystem fs, final Path snapshotDir, + final SnapshotDescription desc) throws IOException { FileStatus[] regions = CommonFSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs)); if (regions == null) { @@ -134,7 +129,7 @@ public final class SnapshotManifestV1 { final ExecutorCompletionService completionService = new ExecutorCompletionService<>(executor); - for (final FileStatus region: regions) { + for (final FileStatus region : regions) { completionService.submit(new Callable() { @Override public SnapshotRegionManifest call() throws IOException { @@ -158,15 +153,15 @@ public final class SnapshotManifestV1 { } static void deleteRegionManifest(final FileSystem fs, final Path snapshotDir, - final SnapshotRegionManifest manifest) throws IOException { + final SnapshotRegionManifest manifest) throws IOException { String regionName = SnapshotManifest.getRegionNameFromManifest(manifest); fs.delete(new Path(snapshotDir, regionName), true); } - static SnapshotRegionManifest buildManifestFromDisk(final Configuration conf, - final FileSystem fs, final Path tableDir, final RegionInfo regionInfo) throws IOException { - HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, - tableDir, regionInfo, true); + static SnapshotRegionManifest buildManifestFromDisk(final Configuration conf, final FileSystem fs, + final Path tableDir, final RegionInfo regionInfo) throws IOException { + HRegionFileSystem regionFs = + HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, regionInfo, true); SnapshotRegionManifest.Builder manifest = SnapshotRegionManifest.newBuilder(); // 1. dump region meta info into the snapshot directory @@ -183,7 +178,7 @@ public final class SnapshotManifestV1 { // files/batch, far more than the number of store files under a single column family. Collection familyNames = regionFs.getFamilies(); if (familyNames != null) { - for (String familyName: familyNames) { + for (String familyName : familyNames) { Collection storeFiles = regionFs.getStoreFiles(familyName, false); if (storeFiles == null) { LOG.debug("No files under family: " + familyName); @@ -192,21 +187,21 @@ public final class SnapshotManifestV1 { // 2.1. build the snapshot reference for the store SnapshotRegionManifest.FamilyFiles.Builder family = - SnapshotRegionManifest.FamilyFiles.newBuilder(); + SnapshotRegionManifest.FamilyFiles.newBuilder(); family.setFamilyName(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(familyName))); if (LOG.isDebugEnabled()) { - LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); + LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); } // 2.2. iterate through all the store's files and create "references". int i = 0; int sz = storeFiles.size(); - for (StoreFileInfo storeFile: storeFiles) { + for (StoreFileInfo storeFile : storeFiles) { // create "reference" to this store file. - LOG.debug("Adding reference for file ("+ (++i) +"/" + sz + "): " + storeFile.getPath()); + LOG.debug("Adding reference for file (" + (++i) + "/" + sz + "): " + storeFile.getPath()); SnapshotRegionManifest.StoreFile.Builder sfManifest = - SnapshotRegionManifest.StoreFile.newBuilder(); + SnapshotRegionManifest.StoreFile.newBuilder(); sfManifest.setName(storeFile.getPath().getName()); family.addStoreFiles(sfManifest.build()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java index ae914f69b5c..90b7f8a1c5e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.IOException; @@ -49,12 +48,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.Snapshot import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; /** - * DO NOT USE DIRECTLY. USE {@link SnapshotManifest}. - * - * Snapshot v2 layout format - * - Single Manifest file containing all the information of regions - * - In the online-snapshot case each region will write a "region manifest" - * /snapshotName/manifest.regionName + * DO NOT USE DIRECTLY. USE {@link SnapshotManifest}. Snapshot v2 layout format - Single Manifest + * file containing all the information of regions - In the online-snapshot case each region will + * write a "region manifest" /snapshotName/manifest.regionName */ @InterfaceAudience.Private public final class SnapshotManifestV2 { @@ -64,16 +60,17 @@ public final class SnapshotManifestV2 { public static final String SNAPSHOT_MANIFEST_PREFIX = "region-manifest."; - private SnapshotManifestV2() {} + private SnapshotManifestV2() { + } static class ManifestBuilder implements SnapshotManifest.RegionVisitor< - SnapshotRegionManifest.Builder, SnapshotRegionManifest.FamilyFiles.Builder> { + SnapshotRegionManifest.Builder, SnapshotRegionManifest.FamilyFiles.Builder> { private final Configuration conf; private final Path snapshotDir; private final FileSystem rootFs; public ManifestBuilder(final Configuration conf, final FileSystem rootFs, - final Path snapshotDir) { + final Path snapshotDir) { this.snapshotDir = snapshotDir; this.conf = conf; this.rootFs = rootFs; @@ -93,8 +90,8 @@ public final class SnapshotManifestV2 { FileSystem workingDirFs = snapshotDir.getFileSystem(this.conf); if (workingDirFs.exists(snapshotDir)) { SnapshotRegionManifest manifest = region.build(); - try (FSDataOutputStream stream = workingDirFs.create( - getRegionManifestPath(snapshotDir, manifest))) { + try (FSDataOutputStream stream = + workingDirFs.create(getRegionManifestPath(snapshotDir, manifest))) { manifest.writeTo(stream); } } else { @@ -103,26 +100,26 @@ public final class SnapshotManifestV2 { } @Override - public SnapshotRegionManifest.FamilyFiles.Builder familyOpen( - final SnapshotRegionManifest.Builder region, final byte[] familyName) { + public SnapshotRegionManifest.FamilyFiles.Builder + familyOpen(final SnapshotRegionManifest.Builder region, final byte[] familyName) { SnapshotRegionManifest.FamilyFiles.Builder family = - SnapshotRegionManifest.FamilyFiles.newBuilder(); + SnapshotRegionManifest.FamilyFiles.newBuilder(); family.setFamilyName(UnsafeByteOperations.unsafeWrap(familyName)); return family; } @Override public void familyClose(final SnapshotRegionManifest.Builder region, - final SnapshotRegionManifest.FamilyFiles.Builder family) { + final SnapshotRegionManifest.FamilyFiles.Builder family) { region.addFamilyFiles(family.build()); } @Override public void storeFile(final SnapshotRegionManifest.Builder region, - final SnapshotRegionManifest.FamilyFiles.Builder family, final StoreFileInfo storeFile) - throws IOException { + final SnapshotRegionManifest.FamilyFiles.Builder family, final StoreFileInfo storeFile) + throws IOException { SnapshotRegionManifest.StoreFile.Builder sfManifest = - SnapshotRegionManifest.StoreFile.newBuilder(); + SnapshotRegionManifest.StoreFile.newBuilder(); sfManifest.setName(storeFile.getPath().getName()); if (storeFile.isReference()) { sfManifest.setReference(storeFile.getReference().convert()); @@ -137,8 +134,8 @@ public final class SnapshotManifestV2 { } static List loadRegionManifests(final Configuration conf, - final Executor executor, final FileSystem fs, final Path snapshotDir, - final SnapshotDescription desc, final int manifestSizeLimit) throws IOException { + final Executor executor, final FileSystem fs, final Path snapshotDir, + final SnapshotDescription desc, final int manifestSizeLimit) throws IOException { FileStatus[] manifestFiles = CommonFSUtils.listStatus(fs, snapshotDir, new PathFilter() { @Override public boolean accept(Path path) { @@ -150,7 +147,7 @@ public final class SnapshotManifestV2 { final ExecutorCompletionService completionService = new ExecutorCompletionService<>(executor); - for (final FileStatus st: manifestFiles) { + for (final FileStatus st : manifestFiles) { completionService.submit(new Callable() { @Override public SnapshotRegionManifest call() throws IOException { @@ -173,8 +170,8 @@ public final class SnapshotManifestV2 { } catch (ExecutionException e) { Throwable t = e.getCause(); - if(t instanceof InvalidProtocolBufferException) { - throw (InvalidProtocolBufferException)t; + if (t instanceof InvalidProtocolBufferException) { + throw (InvalidProtocolBufferException) t; } else { throw new IOException("ExecutionException", e.getCause()); } @@ -183,12 +180,12 @@ public final class SnapshotManifestV2 { } static void deleteRegionManifest(final FileSystem fs, final Path snapshotDir, - final SnapshotRegionManifest manifest) throws IOException { + final SnapshotRegionManifest manifest) throws IOException { fs.delete(getRegionManifestPath(snapshotDir, manifest), true); } private static Path getRegionManifestPath(final Path snapshotDir, - final SnapshotRegionManifest manifest) { + final SnapshotRegionManifest manifest) { String regionName = SnapshotManifest.getRegionNameFromManifest(manifest); return new Path(snapshotDir, SNAPSHOT_MANIFEST_PREFIX + regionName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java index b6d3c489366..1c3acdbd205 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.FileNotFoundException; @@ -28,7 +27,6 @@ import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ExecutorService; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -42,6 +40,7 @@ import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; @@ -55,7 +54,7 @@ public final class SnapshotReferenceUtil { public interface StoreFileVisitor { void storeFile(final RegionInfo regionInfo, final String familyName, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException; + final SnapshotRegionManifest.StoreFile storeFile) throws IOException; } public interface SnapshotVisitor extends StoreFileVisitor { @@ -67,49 +66,45 @@ public final class SnapshotReferenceUtil { /** * Iterate over the snapshot store files - * - * @param conf The current {@link Configuration} instance. - * @param fs {@link FileSystem} + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory - * @param visitor callback object to get the referenced files + * @param visitor callback object to get the referenced files * @throws IOException if an error occurred while scanning the directory */ public static void visitReferencedFiles(final Configuration conf, final FileSystem fs, - final Path snapshotDir, final SnapshotVisitor visitor) - throws IOException { + final Path snapshotDir, final SnapshotVisitor visitor) throws IOException { SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); visitReferencedFiles(conf, fs, snapshotDir, desc, visitor); } /** * Iterate over the snapshot store files, restored.edits and logs - * - * @param conf The current {@link Configuration} instance. - * @param fs {@link FileSystem} + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory - * @param desc the {@link SnapshotDescription} of the snapshot to verify - * @param visitor callback object to get the referenced files + * @param desc the {@link SnapshotDescription} of the snapshot to verify + * @param visitor callback object to get the referenced files * @throws IOException if an error occurred while scanning the directory */ public static void visitReferencedFiles(final Configuration conf, final FileSystem fs, - final Path snapshotDir, final SnapshotDescription desc, final SnapshotVisitor visitor) - throws IOException { + final Path snapshotDir, final SnapshotDescription desc, final SnapshotVisitor visitor) + throws IOException { visitTableStoreFiles(conf, fs, snapshotDir, desc, visitor); } - /**© - * Iterate over the snapshot store files - * - * @param conf The current {@link Configuration} instance. - * @param fs {@link FileSystem} + /** + * © Iterate over the snapshot store files + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory - * @param desc the {@link SnapshotDescription} of the snapshot to verify - * @param visitor callback object to get the store files + * @param desc the {@link SnapshotDescription} of the snapshot to verify + * @param visitor callback object to get the store files * @throws IOException if an error occurred while scanning the directory */ static void visitTableStoreFiles(final Configuration conf, final FileSystem fs, - final Path snapshotDir, final SnapshotDescription desc, final StoreFileVisitor visitor) - throws IOException { + final Path snapshotDir, final SnapshotDescription desc, final StoreFileVisitor visitor) + throws IOException { SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, desc); List regionManifests = manifest.getRegionManifests(); if (regionManifests == null || regionManifests.isEmpty()) { @@ -117,24 +112,23 @@ public final class SnapshotReferenceUtil { return; } - for (SnapshotRegionManifest regionManifest: regionManifests) { + for (SnapshotRegionManifest regionManifest : regionManifests) { visitRegionStoreFiles(regionManifest, visitor); } } /** * Iterate over the snapshot store files in the specified region - * * @param manifest snapshot manifest to inspect - * @param visitor callback object to get the store files + * @param visitor callback object to get the store files * @throws IOException if an error occurred while scanning the directory */ static void visitRegionStoreFiles(final SnapshotRegionManifest manifest, - final StoreFileVisitor visitor) throws IOException { + final StoreFileVisitor visitor) throws IOException { RegionInfo regionInfo = ProtobufUtil.toRegionInfo(manifest.getRegionInfo()); - for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) { + for (SnapshotRegionManifest.FamilyFiles familyFiles : manifest.getFamilyFilesList()) { String familyName = familyFiles.getFamilyName().toStringUtf8(); - for (SnapshotRegionManifest.StoreFile storeFile: familyFiles.getStoreFilesList()) { + for (SnapshotRegionManifest.StoreFile storeFile : familyFiles.getStoreFilesList()) { visitor.storeFile(regionInfo, familyName, storeFile); } } @@ -142,45 +136,43 @@ public final class SnapshotReferenceUtil { /** * Verify the validity of the snapshot - * - * @param conf The current {@link Configuration} instance. - * @param fs {@link FileSystem} - * @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} + * @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify * @param snapshotDesc the {@link SnapshotDescription} of the snapshot to verify * @throws CorruptedSnapshotException if the snapshot is corrupted - * @throws IOException if an error occurred while scanning the directory + * @throws IOException if an error occurred while scanning the directory */ public static void verifySnapshot(final Configuration conf, final FileSystem fs, - final Path snapshotDir, final SnapshotDescription snapshotDesc) throws IOException { + final Path snapshotDir, final SnapshotDescription snapshotDesc) throws IOException { SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); verifySnapshot(conf, fs, manifest); } /** * Verify the validity of the snapshot - * - * @param conf The current {@link Configuration} instance. - * @param fs {@link FileSystem} + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} * @param manifest snapshot manifest to inspect * @throws CorruptedSnapshotException if the snapshot is corrupted - * @throws IOException if an error occurred while scanning the directory + * @throws IOException if an error occurred while scanning the directory */ public static void verifySnapshot(final Configuration conf, final FileSystem fs, - final SnapshotManifest manifest) throws IOException { + final SnapshotManifest manifest) throws IOException { final SnapshotDescription snapshotDesc = manifest.getSnapshotDescription(); final Path snapshotDir = manifest.getSnapshotDir(); concurrentVisitReferencedFiles(conf, fs, manifest, "VerifySnapshot", new StoreFileVisitor() { @Override public void storeFile(final RegionInfo regionInfo, final String family, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { verifyStoreFile(conf, fs, snapshotDir, snapshotDesc, regionInfo, family, storeFile); } }); } public static void concurrentVisitReferencedFiles(final Configuration conf, final FileSystem fs, - final SnapshotManifest manifest, final String desc, final StoreFileVisitor visitor) - throws IOException { + final SnapshotManifest manifest, final String desc, final StoreFileVisitor visitor) + throws IOException { final Path snapshotDir = manifest.getSnapshotDir(); List regionManifests = manifest.getRegionManifests(); @@ -199,8 +191,8 @@ public final class SnapshotReferenceUtil { } public static void concurrentVisitReferencedFiles(final Configuration conf, final FileSystem fs, - final SnapshotManifest manifest, final ExecutorService exec, final StoreFileVisitor visitor) - throws IOException { + final SnapshotManifest manifest, final ExecutorService exec, final StoreFileVisitor visitor) + throws IOException { final SnapshotDescription snapshotDesc = manifest.getSnapshotDescription(); final Path snapshotDir = manifest.getSnapshotDir(); @@ -214,7 +206,8 @@ public final class SnapshotReferenceUtil { for (final SnapshotRegionManifest regionManifest : regionManifests) { completionService.submit(new Callable() { - @Override public Void call() throws IOException { + @Override + public Void call() throws IOException { visitRegionStoreFiles(regionManifest, visitor); return null; } @@ -229,7 +222,7 @@ public final class SnapshotReferenceUtil { } catch (ExecutionException e) { if (e.getCause() instanceof CorruptedSnapshotException) { throw new CorruptedSnapshotException(e.getCause().getMessage(), - ProtobufUtil.createSnapshotDesc(snapshotDesc)); + ProtobufUtil.createSnapshotDesc(snapshotDesc)); } else { throw new IOException(e.getCause()); } @@ -238,20 +231,19 @@ public final class SnapshotReferenceUtil { /** * Verify the validity of the snapshot store file - * - * @param conf The current {@link Configuration} instance. - * @param fs {@link FileSystem} + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify - * @param snapshot the {@link SnapshotDescription} of the snapshot to verify - * @param regionInfo {@link RegionInfo} of the region that contains the store file - * @param family family that contains the store file - * @param storeFile the store file to verify + * @param snapshot the {@link SnapshotDescription} of the snapshot to verify + * @param regionInfo {@link RegionInfo} of the region that contains the store file + * @param family family that contains the store file + * @param storeFile the store file to verify * @throws CorruptedSnapshotException if the snapshot is corrupted - * @throws IOException if an error occurred while scanning the directory + * @throws IOException if an error occurred while scanning the directory */ private static void verifyStoreFile(final Configuration conf, final FileSystem fs, - final Path snapshotDir, final SnapshotDescription snapshot, final RegionInfo regionInfo, - final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + final Path snapshotDir, final SnapshotDescription snapshot, final RegionInfo regionInfo, + final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException { TableName table = TableName.valueOf(snapshot.getTable()); String fileName = storeFile.getName(); @@ -264,8 +256,8 @@ public final class SnapshotReferenceUtil { refPath = HFileLink.createPath(table, refRegion, family, refPath.getName()); if (!HFileLink.buildFromHFileLinkPattern(conf, refPath).exists(fs)) { throw new CorruptedSnapshotException( - "Missing parent hfile for: " + fileName + " path=" + refPath, - ProtobufUtil.createSnapshotDesc(snapshot)); + "Missing parent hfile for: " + fileName + " path=" + refPath, + ProtobufUtil.createSnapshotDesc(snapshot)); } if (storeFile.hasReference()) { @@ -281,8 +273,8 @@ public final class SnapshotReferenceUtil { } else if (HFileLink.isHFileLink(fileName)) { linkPath = new Path(family, fileName); } else { - linkPath = new Path(family, HFileLink.createHFileLinkName( - table, regionInfo.getEncodedName(), fileName)); + linkPath = new Path(family, + HFileLink.createHFileLinkName(table, regionInfo.getEncodedName(), fileName)); } // check if the linked file exists (in the archive, or in the table dir) @@ -290,7 +282,7 @@ public final class SnapshotReferenceUtil { if (MobUtils.isMobRegionInfo(regionInfo)) { // for mob region link = HFileLink.buildFromHFileLinkPattern(MobUtils.getQualifiedMobRootDir(conf), - HFileArchiveUtil.getArchivePath(conf), linkPath); + HFileArchiveUtil.getArchivePath(conf), linkPath); } else { // not mob region link = HFileLink.buildFromHFileLinkPattern(conf, linkPath); @@ -298,62 +290,57 @@ public final class SnapshotReferenceUtil { try { FileStatus fstat = link.getFileStatus(fs); if (storeFile.hasFileSize() && storeFile.getFileSize() != fstat.getLen()) { - String msg = "hfile: " + fileName + " size does not match with the expected one. " + - " found=" + fstat.getLen() + " expected=" + storeFile.getFileSize(); + String msg = "hfile: " + fileName + " size does not match with the expected one. " + + " found=" + fstat.getLen() + " expected=" + storeFile.getFileSize(); LOG.error(msg); - throw new CorruptedSnapshotException(msg, - ProtobufUtil.createSnapshotDesc(snapshot)); + throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot)); } } catch (FileNotFoundException e) { - String msg = "Can't find hfile: " + fileName + " in the real (" + - link.getOriginPath() + ") or archive (" + link.getArchivePath() - + ") directory for the primary table."; + String msg = "Can't find hfile: " + fileName + " in the real (" + link.getOriginPath() + + ") or archive (" + link.getArchivePath() + ") directory for the primary table."; LOG.error(msg); - throw new CorruptedSnapshotException(msg, - ProtobufUtil.createSnapshotDesc(snapshot)); + throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot)); } } /** * Returns the store file names in the snapshot. - * - * @param conf The current {@link Configuration} instance. - * @param fs {@link FileSystem} + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory * @throws IOException if an error occurred while scanning the directory * @return the names of hfiles in the specified snaphot */ public static Set getHFileNames(final Configuration conf, final FileSystem fs, - final Path snapshotDir) throws IOException { + final Path snapshotDir) throws IOException { SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); return getHFileNames(conf, fs, snapshotDir, desc); } /** * Returns the store file names in the snapshot. - * - * @param conf The current {@link Configuration} instance. - * @param fs {@link FileSystem} - * @param snapshotDir {@link Path} to the Snapshot directory + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} + * @param snapshotDir {@link Path} to the Snapshot directory * @param snapshotDesc the {@link SnapshotDescription} of the snapshot to inspect * @throws IOException if an error occurred while scanning the directory * @return the names of hfiles in the specified snaphot */ private static Set getHFileNames(final Configuration conf, final FileSystem fs, - final Path snapshotDir, final SnapshotDescription snapshotDesc) - throws IOException { + final Path snapshotDir, final SnapshotDescription snapshotDesc) throws IOException { final Set names = new HashSet<>(); visitTableStoreFiles(conf, fs, snapshotDir, snapshotDesc, new StoreFileVisitor() { @Override public void storeFile(final RegionInfo regionInfo, final String family, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { String hfile = storeFile.getName(); if (HFileLink.isHFileLink(hfile)) { names.add(HFileLink.getReferencedHFileName(hfile)); } else if (StoreFileInfo.isReference(hfile)) { - Path refPath = StoreFileInfo.getReferredToFile(new Path(new Path( + Path refPath = + StoreFileInfo.getReferredToFile(new Path(new Path( new Path(new Path(regionInfo.getTable().getNamespaceAsString(), - regionInfo.getTable().getQualifierAsString()), regionInfo.getEncodedName()), + regionInfo.getTable().getQualifierAsString()), regionInfo.getEncodedName()), family), hfile)); names.add(hfile); names.add(refPath.getName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFiles.java index c7fe1e24a1a..1bee3243284 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFiles.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public interface BulkLoadHFiles { static final String RETRY_ON_IO_EXCEPTION = "hbase.bulkload.retries.retryOnIOException"; static final String MAX_FILES_PER_REGION_PER_FAMILY = - "hbase.mapreduce.bulkload.max.hfiles.perRegion.perFamily"; + "hbase.mapreduce.bulkload.max.hfiles.perRegion.perFamily"; static final String ASSIGN_SEQ_IDS = "hbase.mapreduce.bulkload.assign.sequenceNumbers"; static final String CREATE_TABLE_CONF_KEY = "create.table"; static final String IGNORE_UNMATCHED_CF_CONF_KEY = "ignore.unmatched.families"; @@ -82,22 +82,22 @@ public interface BulkLoadHFiles { /** * Perform a bulk load of the given directory into the given pre-existing table. - * @param tableName the table to load into + * @param tableName the table to load into * @param family2Files map of family to List of hfiles * @throws TableNotFoundException if table does not yet exist */ Map bulkLoad(TableName tableName, Map> family2Files) - throws TableNotFoundException, IOException; + throws TableNotFoundException, IOException; /** * Perform a bulk load of the given directory into the given pre-existing table. * @param tableName the table to load into - * @param dir the directory that was provided as the output path of a job using - * {@code HFileOutputFormat} + * @param dir the directory that was provided as the output path of a job using + * {@code HFileOutputFormat} * @throws TableNotFoundException if table does not yet exist */ Map bulkLoad(TableName tableName, Path dir) - throws TableNotFoundException, IOException; + throws TableNotFoundException, IOException; static BulkLoadHFiles create(Configuration conf) { return new BulkLoadHFilesTool(conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java index 795bd66986a..6450bb7e55a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,20 +44,20 @@ public class BulkLoadHFilesTool extends LoadIncrementalHFiles implements BulkLoa super(conf); } - private Map convert( - Map map) { + private Map + convert(Map map) { return map.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue())); } @Override public Map bulkLoad(TableName tableName, - Map> family2Files) throws TableNotFoundException, IOException { + Map> family2Files) throws TableNotFoundException, IOException { return convert(run(family2Files, tableName)); } @Override public Map bulkLoad(TableName tableName, Path dir) - throws TableNotFoundException, IOException { + throws TableNotFoundException, IOException { return convert(run(dir, tableName)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index be67d43acfd..bd97f8466d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,10 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool; - import java.util.Map; import java.util.concurrent.ExecutorService; import org.apache.hadoop.conf.Configuration; @@ -39,7 +36,6 @@ public interface Canary { /** * Run Canary in Region mode. - * * @param targets -- list of monitor tables. * @return the exit code of the Canary tool. */ @@ -47,7 +43,6 @@ public interface Canary { /** * Runs Canary in Region server mode. - * * @param targets -- list of monitor tables. * @return the exit code of the Canary tool. */ @@ -55,7 +50,6 @@ public interface Canary { /** * Runs Canary in Zookeeper mode. - * * @return the exit code of the Canary tool. */ public int checkZooKeeper() throws Exception; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryStatusServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryStatusServlet.java index ce214a7a297..42e749f8364 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryStatusServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryStatusServlet.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,15 +25,13 @@ import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.hbase.tmpl.tool.CanaryStatusTmpl; import org.apache.yetus.audience.InterfaceAudience; - @InterfaceAudience.Private public class CanaryStatusServlet extends HttpServlet { @Override protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { CanaryTool.RegionStdOutSink sink = - (CanaryTool.RegionStdOutSink) getServletContext().getAttribute( - "sink"); + (CanaryTool.RegionStdOutSink) getServletContext().getAttribute("sink"); if (sink == null) { throw new ServletException( "RegionStdOutSink is null! The CanaryTool's InfoServer is not initialized correctly"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java index 1f791b25b27..f1aba97cc05 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool; import static org.apache.hadoop.hbase.HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT; import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT; import static org.apache.hadoop.hbase.util.Addressing.inetSocketAddress2String; + import java.io.Closeable; import java.io.IOException; import java.net.BindException; @@ -99,24 +99,18 @@ import org.apache.zookeeper.client.ConnectStringParser; import org.apache.zookeeper.data.Stat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * HBase Canary Tool for "canary monitoring" of a running HBase cluster. - * - * There are three modes: + * HBase Canary Tool for "canary monitoring" of a running HBase cluster. There are three modes: *
        *
      1. region mode (Default): For each region, try to get one row per column family outputting - * information on failure (ERROR) or else the latency. - *
      2. - * - *
      3. regionserver mode: For each regionserver try to get one row from one table selected - * randomly outputting information on failure (ERROR) or else the latency. - *
      4. - * + * information on failure (ERROR) or else the latency. + *
      5. regionserver mode: For each regionserver try to get one row from one table selected randomly + * outputting information on failure (ERROR) or else the latency.
      6. *
      7. zookeeper mode: for each zookeeper instance, selects a znode outputting information on - * failure (ERROR) or else the latency. - *
      8. + * failure (ERROR) or else the latency. *
      */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @@ -179,16 +173,27 @@ public class CanaryTool implements Tool, Canary { */ public interface Sink { long getReadFailureCount(); + long incReadFailureCount(); - Map getReadFailures(); + + Map getReadFailures(); + void updateReadFailures(String regionName, String serverName); + long getWriteFailureCount(); + long incWriteFailureCount(); - Map getWriteFailures(); + + Map getWriteFailures(); + void updateWriteFailures(String regionName, String serverName); + long getReadSuccessCount(); + long incReadSuccessCount(); + long getWriteSuccessCount(); + long incWriteSuccessCount(); } @@ -196,10 +201,8 @@ public class CanaryTool implements Tool, Canary { * Simple implementation of canary sink that allows plotting to a file or standard output. */ public static class StdOutSink implements Sink { - private AtomicLong readFailureCount = new AtomicLong(0), - writeFailureCount = new AtomicLong(0), - readSuccessCount = new AtomicLong(0), - writeSuccessCount = new AtomicLong(0); + private AtomicLong readFailureCount = new AtomicLong(0), writeFailureCount = new AtomicLong(0), + readSuccessCount = new AtomicLong(0), writeSuccessCount = new AtomicLong(0); private Map readFailures = new ConcurrentHashMap<>(); private Map writeFailures = new ConcurrentHashMap<>(); @@ -293,15 +296,14 @@ public class CanaryTool implements Tool, Canary { } /** - * By Region, for 'region' mode. + * By Region, for 'region' mode. */ public static class RegionStdOutSink extends StdOutSink { private Map perTableReadLatency = new HashMap<>(); private LongAdder writeLatency = new LongAdder(); private final ConcurrentMap> regionMap = new ConcurrentHashMap<>(); - private ConcurrentMap perServerFailuresCount = - new ConcurrentHashMap<>(); + private ConcurrentMap perServerFailuresCount = new ConcurrentHashMap<>(); private ConcurrentMap perTableFailuresCount = new ConcurrentHashMap<>(); public ConcurrentMap getPerServerFailuresCount() { @@ -337,21 +339,20 @@ public class CanaryTool implements Tool, Canary { public void publishReadFailure(ServerName serverName, RegionInfo region, Exception e) { incReadFailureCount(); incFailuresCountDetails(serverName, region); - LOG.error("Read from {} on serverName={} failed", - region.getRegionNameAsString(), serverName, e); + LOG.error("Read from {} on serverName={} failed", region.getRegionNameAsString(), serverName, + e); } public void publishReadFailure(ServerName serverName, RegionInfo region, - ColumnFamilyDescriptor column, Exception e) { + ColumnFamilyDescriptor column, Exception e) { incReadFailureCount(); incFailuresCountDetails(serverName, region); LOG.error("Read from {} on serverName={}, columnFamily={} failed", - region.getRegionNameAsString(), serverName, - column.getNameAsString(), e); + region.getRegionNameAsString(), serverName, column.getNameAsString(), e); } public void publishReadTiming(ServerName serverName, RegionInfo region, - ColumnFamilyDescriptor column, long msTime) { + ColumnFamilyDescriptor column, long msTime) { RegionTaskResult rtr = new RegionTaskResult(region, region.getTable(), serverName, column); rtr.setReadSuccess(); rtr.setReadLatency(msTime); @@ -360,7 +361,7 @@ public class CanaryTool implements Tool, Canary { // Note that read success count will be equal to total column family read successes. incReadSuccessCount(); LOG.info("Read from {} on {} {} in {}ms", region.getRegionNameAsString(), serverName, - column.getNameAsString(), msTime); + column.getNameAsString(), msTime); } public void publishWriteFailure(ServerName serverName, RegionInfo region, Exception e) { @@ -370,15 +371,15 @@ public class CanaryTool implements Tool, Canary { } public void publishWriteFailure(ServerName serverName, RegionInfo region, - ColumnFamilyDescriptor column, Exception e) { + ColumnFamilyDescriptor column, Exception e) { incWriteFailureCount(); incFailuresCountDetails(serverName, region); LOG.error("Write to {} on {} {} failed", region.getRegionNameAsString(), serverName, - column.getNameAsString(), e); + column.getNameAsString(), e); } public void publishWriteTiming(ServerName serverName, RegionInfo region, - ColumnFamilyDescriptor column, long msTime) { + ColumnFamilyDescriptor column, long msTime) { RegionTaskResult rtr = new RegionTaskResult(region, region.getTable(), serverName, column); rtr.setWriteSuccess(); rtr.setWriteLatency(msTime); @@ -386,8 +387,8 @@ public class CanaryTool implements Tool, Canary { rtrs.add(rtr); // Note that write success count will be equal to total column family write successes. incWriteSuccessCount(); - LOG.info("Write to {} on {} {} in {}ms", - region.getRegionNameAsString(), serverName, column.getNameAsString(), msTime); + LOG.info("Write to {} on {} {} in {}ms", region.getRegionNameAsString(), serverName, + column.getNameAsString(), msTime); } public Map getReadLatencyMap() { @@ -428,7 +429,7 @@ public class CanaryTool implements Tool, Canary { private ZookeeperStdOutSink sink; public ZookeeperTask(Connection connection, String host, String znode, int timeout, - ZookeeperStdOutSink sink) { + ZookeeperStdOutSink sink) { this.connection = connection; this.host = host; this.znode = znode; @@ -436,7 +437,8 @@ public class CanaryTool implements Tool, Canary { this.sink = sink; } - @Override public Void call() throws Exception { + @Override + public Void call() throws Exception { ZooKeeper zooKeeper = null; try { zooKeeper = new ZooKeeper(host, timeout, EmptyWatcher.instance); @@ -462,9 +464,11 @@ public class CanaryTool implements Tool, Canary { * output latency or failure. */ static class RegionTask implements Callable { - public enum TaskType{ - READ, WRITE + public enum TaskType { + READ, + WRITE } + private Connection connection; private RegionInfo region; private RegionStdOutSink sink; @@ -475,8 +479,8 @@ public class CanaryTool implements Tool, Canary { private boolean readAllCF; RegionTask(Connection connection, RegionInfo region, ServerName serverName, - RegionStdOutSink sink, TaskType taskType, boolean rawScanEnabled, LongAdder rwLatency, - boolean readAllCF) { + RegionStdOutSink sink, TaskType taskType, boolean rawScanEnabled, LongAdder rwLatency, + boolean readAllCF) { this.connection = connection; this.region = region; this.serverName = serverName; @@ -600,17 +604,17 @@ public class CanaryTool implements Tool, Canary { tableDesc = table.getDescriptor(); byte[] rowToCheck = region.getStartKey(); if (rowToCheck.length == 0) { - rowToCheck = new byte[]{0x0}; + rowToCheck = new byte[] { 0x0 }; } - int writeValueSize = connection.getConfiguration() - .getInt(HConstants.HBASE_CANARY_WRITE_VALUE_SIZE_KEY, 10); + int writeValueSize = + connection.getConfiguration().getInt(HConstants.HBASE_CANARY_WRITE_VALUE_SIZE_KEY, 10); for (ColumnFamilyDescriptor column : tableDesc.getColumnFamilies()) { Put put = new Put(rowToCheck); byte[] value = new byte[writeValueSize]; Bytes.random(value); put.addColumn(column.getName(), HConstants.EMPTY_BYTE_ARRAY, value); - LOG.debug("Writing to {} {} {} {}", - tableDesc.getTableName(), region.getRegionNameAsString(), column.getNameAsString(), + LOG.debug("Writing to {} {} {} {}", tableDesc.getTableName(), + region.getRegionNameAsString(), column.getNameAsString(), Bytes.toStringBinary(rowToCheck)); try { long startTime = EnvironmentEdgeManager.currentTime(); @@ -632,8 +636,8 @@ public class CanaryTool implements Tool, Canary { } /** - * Run a single RegionServer Task and then exit. - * Get one row from a region on the regionserver and output latency or the failure. + * Run a single RegionServer Task and then exit. Get one row from a region on the regionserver and + * output latency or the failure. */ static class RegionServerTask implements Callable { private Connection connection; @@ -643,7 +647,7 @@ public class CanaryTool implements Tool, Canary { private AtomicLong successes; RegionServerTask(Connection connection, String serverName, RegionInfo region, - RegionServerStdOutSink sink, AtomicLong successes) { + RegionServerStdOutSink sink, AtomicLong successes) { this.connection = connection; this.serverName = serverName; this.region = region; @@ -666,9 +670,8 @@ public class CanaryTool implements Tool, Canary { table = connection.getTable(tableName); startKey = region.getStartKey(); // Can't do a get on empty start row so do a Scan of first element if any instead. - LOG.debug("Reading from {} {} {} {}", - serverName, region.getTable(), region.getRegionNameAsString(), - Bytes.toStringBinary(startKey)); + LOG.debug("Reading from {} {} {} {}", serverName, region.getTable(), + region.getRegionNameAsString(), Bytes.toStringBinary(startKey)); if (startKey.length > 0) { get = new Get(startKey); get.setCacheBlocks(false); @@ -733,8 +736,8 @@ public class CanaryTool implements Tool, Canary { private static final Logger LOG = LoggerFactory.getLogger(Canary.class); - public static final TableName DEFAULT_WRITE_TABLE_NAME = TableName.valueOf( - NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "canary"); + public static final TableName DEFAULT_WRITE_TABLE_NAME = + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "canary"); private static final String CANARY_TABLE_FAMILY_NAME = "Test"; @@ -753,32 +756,31 @@ public class CanaryTool implements Tool, Canary { private boolean zookeeperMode = false; /** - * This is a Map of table to timeout. The timeout is for reading all regions in the table; i.e. - * we aggregate time to fetch each region and it needs to be less than this value else we - * log an ERROR. + * This is a Map of table to timeout. The timeout is for reading all regions in the table; i.e. we + * aggregate time to fetch each region and it needs to be less than this value else we log an + * ERROR. */ private HashMap configuredReadTableTimeouts = new HashMap<>(); - public static final String HBASE_CANARY_REGIONSERVER_ALL_REGIONS - = "hbase.canary.regionserver_all_regions"; + public static final String HBASE_CANARY_REGIONSERVER_ALL_REGIONS = + "hbase.canary.regionserver_all_regions"; - public static final String HBASE_CANARY_REGION_WRITE_SNIFFING - = "hbase.canary.region.write.sniffing"; - public static final String HBASE_CANARY_REGION_WRITE_TABLE_TIMEOUT - = "hbase.canary.region.write.table.timeout"; - public static final String HBASE_CANARY_REGION_WRITE_TABLE_NAME - = "hbase.canary.region.write.table.name"; - public static final String HBASE_CANARY_REGION_READ_TABLE_TIMEOUT - = "hbase.canary.region.read.table.timeout"; + public static final String HBASE_CANARY_REGION_WRITE_SNIFFING = + "hbase.canary.region.write.sniffing"; + public static final String HBASE_CANARY_REGION_WRITE_TABLE_TIMEOUT = + "hbase.canary.region.write.table.timeout"; + public static final String HBASE_CANARY_REGION_WRITE_TABLE_NAME = + "hbase.canary.region.write.table.name"; + public static final String HBASE_CANARY_REGION_READ_TABLE_TIMEOUT = + "hbase.canary.region.read.table.timeout"; - public static final String HBASE_CANARY_ZOOKEEPER_PERMITTED_FAILURES - = "hbase.canary.zookeeper.permitted.failures"; + public static final String HBASE_CANARY_ZOOKEEPER_PERMITTED_FAILURES = + "hbase.canary.zookeeper.permitted.failures"; public static final String HBASE_CANARY_USE_REGEX = "hbase.canary.use.regex"; public static final String HBASE_CANARY_TIMEOUT = "hbase.canary.timeout"; public static final String HBASE_CANARY_FAIL_ON_ERROR = "hbase.canary.fail.on.error"; - private ExecutorService executor; // threads to retrieve data from regionservers public CanaryTool() { @@ -853,15 +855,15 @@ public class CanaryTool implements Tool, Canary { } } else if (cmd.equals("-zookeeper")) { this.zookeeperMode = true; - } else if(cmd.equals("-regionserver")) { + } else if (cmd.equals("-regionserver")) { this.regionServerMode = true; - } else if(cmd.equals("-allRegions")) { + } else if (cmd.equals("-allRegions")) { conf.setBoolean(HBASE_CANARY_REGIONSERVER_ALL_REGIONS, true); regionServerAllRegions = true; - } else if(cmd.equals("-writeSniffing")) { + } else if (cmd.equals("-writeSniffing")) { writeSniffing = true; conf.setBoolean(HBASE_CANARY_REGION_WRITE_SNIFFING, true); - } else if(cmd.equals("-treatFailureAsError") || cmd.equals("-failureAsError")) { + } else if (cmd.equals("-treatFailureAsError") || cmd.equals("-failureAsError")) { conf.setBoolean(HBASE_CANARY_FAIL_ON_ERROR, true); } else if (cmd.equals("-e")) { conf.setBoolean(HBASE_CANARY_USE_REGEX, true); @@ -880,7 +882,7 @@ public class CanaryTool implements Tool, Canary { printUsageAndExit(); } conf.setLong(HBASE_CANARY_TIMEOUT, timeout); - } else if(cmd.equals("-writeTableTimeout")) { + } else if (cmd.equals("-writeTableTimeout")) { i++; if (i == args.length) { @@ -906,8 +908,7 @@ public class CanaryTool implements Tool, Canary { } else if (cmd.equals("-f")) { i++; if (i == args.length) { - System.err - .println("-f needs a boolean value argument (true|false)."); + System.err.println("-f needs a boolean value argument (true|false)."); printUsageAndExit(); } @@ -915,8 +916,8 @@ public class CanaryTool implements Tool, Canary { } else if (cmd.equals("-readTableTimeouts")) { i++; if (i == args.length) { - System.err.println("-readTableTimeouts needs a comma-separated list of read " + - "millisecond timeouts per table (without spaces)."); + System.err.println("-readTableTimeouts needs a comma-separated list of read " + + "millisecond timeouts per table (without spaces)."); printUsageAndExit(); } readTableTimeoutsStr = args[i]; @@ -951,8 +952,7 @@ public class CanaryTool implements Tool, Canary { } if (this.zookeeperMode) { if (this.regionServerMode || regionServerAllRegions || writeSniffing) { - System.err.println("-zookeeper is exclusive and cannot be combined with " - + "other modes."); + System.err.println("-zookeeper is exclusive and cannot be combined with " + "other modes."); printUsageAndExit(); } } @@ -978,7 +978,7 @@ public class CanaryTool implements Tool, Canary { System.arraycopy(args, index, monitorTargets, 0, length); } if (interval > 0) { - //Only show the web page in daemon mode + // Only show the web page in daemon mode putUpWebUI(); } if (zookeeperMode) { @@ -1033,8 +1033,7 @@ public class CanaryTool implements Tool, Canary { currentTimeLength = EnvironmentEdgeManager.currentTime() - startTime; if (currentTimeLength > timeout) { LOG.error("The monitor is running too long (" + currentTimeLength - + ") after timeout limit:" + timeout - + " will be killed itself !!"); + + ") after timeout limit:" + timeout + " will be killed itself !!"); if (monitor.initialized) { return TIMEOUT_ERROR_EXIT_CODE; } else { @@ -1064,12 +1063,12 @@ public class CanaryTool implements Tool, Canary { } @Override - public Map getReadFailures() { + public Map getReadFailures() { return sink.getReadFailures(); } @Override - public Map getWriteFailures() { + public Map getWriteFailures() { return sink.getWriteFailures(); } @@ -1078,38 +1077,38 @@ public class CanaryTool implements Tool, Canary { "Usage: canary [OPTIONS] [ [ [ interval between checks in seconds"); - System.err.println(" -e consider table/regionserver argument as regular " + - "expression"); + System.err + .println(" -e consider table/regionserver argument as regular " + "expression"); System.err.println(" -f exit on first error; default=true"); System.err.println(" -failureAsError treat read/write failure as error"); System.err.println(" -t timeout for canary-test run; default=600000ms"); System.err.println(" -writeSniffing enable write sniffing"); System.err.println(" -writeTable the table used for write sniffing; default=hbase:canary"); System.err.println(" -writeTableTimeout timeout for writeTable; default=600000ms"); - System.err.println(" -readTableTimeouts =," + - "=,..."); - System.err.println(" comma-separated list of table read timeouts " + - "(no spaces);"); + System.err.println( + " -readTableTimeouts =," + "=,..."); + System.err + .println(" comma-separated list of table read timeouts " + "(no spaces);"); System.err.println(" logs 'ERROR' if takes longer. default=600000ms"); System.err.println(" -permittedZookeeperFailures Ignore first N failures attempting to "); System.err.println(" connect to individual zookeeper nodes in ensemble"); System.err.println(""); System.err.println(" -D= to assign or override configuration params"); - System.err.println(" -Dhbase.canary.read.raw.enabled= Set to enable/disable " + - "raw scan; default=false"); - System.err.println(" -Dhbase.canary.info.port=PORT_NUMBER Set for a Canary UI; " + - "default=-1 (None)"); + System.err.println(" -Dhbase.canary.read.raw.enabled= Set to enable/disable " + + "raw scan; default=false"); + System.err.println( + " -Dhbase.canary.info.port=PORT_NUMBER Set for a Canary UI; " + "default=-1 (None)"); System.err.println(""); - System.err.println("Canary runs in one of three modes: region (default), regionserver, or " + - "zookeeper."); + System.err.println( + "Canary runs in one of three modes: region (default), regionserver, or " + "zookeeper."); System.err.println("To sniff/probe all regions, pass no arguments."); System.err.println("To sniff/probe all regions of a table, pass tablename."); System.err.println("To sniff/probe regionservers, pass -regionserver, etc."); @@ -1119,14 +1118,15 @@ public class CanaryTool implements Tool, Canary { Sink getSink(Configuration configuration, Class clazz) { // In test context, this.sink might be set. Use it if non-null. For testing. - return this.sink != null? this.sink: - (Sink)ReflectionUtils.newInstance(configuration.getClass("hbase.canary.sink.class", - clazz, Sink.class)); + return this.sink != null + ? this.sink + : (Sink) ReflectionUtils + .newInstance(configuration.getClass("hbase.canary.sink.class", clazz, Sink.class)); } /** - * Canary region mode-specific data structure which stores information about each region - * to be scanned + * Canary region mode-specific data structure which stores information about each region to be + * scanned */ public static class RegionTaskResult { private RegionInfo region; @@ -1139,7 +1139,7 @@ public class CanaryTool implements Tool, Canary { private boolean writeSuccess = false; public RegionTaskResult(RegionInfo region, TableName tableName, ServerName serverName, - ColumnFamilyDescriptor column) { + ColumnFamilyDescriptor column) { this.region = region; this.tableName = tableName; this.serverName = serverName; @@ -1226,45 +1226,36 @@ public class CanaryTool implements Tool, Canary { } /** - * A Factory method for {@link Monitor}. - * Makes a RegionServerMonitor, or a ZooKeeperMonitor, or a RegionMonitor. + * A Factory method for {@link Monitor}. Makes a RegionServerMonitor, or a ZooKeeperMonitor, or a + * RegionMonitor. * @return a Monitor instance */ private Monitor newMonitor(final Connection connection, String[] monitorTargets) { Monitor monitor; boolean useRegExp = conf.getBoolean(HBASE_CANARY_USE_REGEX, false); - boolean regionServerAllRegions - = conf.getBoolean(HBASE_CANARY_REGIONSERVER_ALL_REGIONS, false); - boolean failOnError - = conf.getBoolean(HBASE_CANARY_FAIL_ON_ERROR, true); - int permittedFailures - = conf.getInt(HBASE_CANARY_ZOOKEEPER_PERMITTED_FAILURES, 0); - boolean writeSniffing - = conf.getBoolean(HBASE_CANARY_REGION_WRITE_SNIFFING, false); - String writeTableName = conf.get(HBASE_CANARY_REGION_WRITE_TABLE_NAME, - DEFAULT_WRITE_TABLE_NAME.getNameAsString()); - long configuredWriteTableTimeout - = conf.getLong(HBASE_CANARY_REGION_WRITE_TABLE_TIMEOUT, DEFAULT_TIMEOUT); + boolean regionServerAllRegions = conf.getBoolean(HBASE_CANARY_REGIONSERVER_ALL_REGIONS, false); + boolean failOnError = conf.getBoolean(HBASE_CANARY_FAIL_ON_ERROR, true); + int permittedFailures = conf.getInt(HBASE_CANARY_ZOOKEEPER_PERMITTED_FAILURES, 0); + boolean writeSniffing = conf.getBoolean(HBASE_CANARY_REGION_WRITE_SNIFFING, false); + String writeTableName = + conf.get(HBASE_CANARY_REGION_WRITE_TABLE_NAME, DEFAULT_WRITE_TABLE_NAME.getNameAsString()); + long configuredWriteTableTimeout = + conf.getLong(HBASE_CANARY_REGION_WRITE_TABLE_TIMEOUT, DEFAULT_TIMEOUT); if (this.regionServerMode) { - monitor = - new RegionServerMonitor(connection, monitorTargets, useRegExp, - getSink(connection.getConfiguration(), RegionServerStdOutSink.class), - this.executor, regionServerAllRegions, - failOnError, permittedFailures); + monitor = new RegionServerMonitor(connection, monitorTargets, useRegExp, + getSink(connection.getConfiguration(), RegionServerStdOutSink.class), this.executor, + regionServerAllRegions, failOnError, permittedFailures); } else if (this.zookeeperMode) { - monitor = - new ZookeeperMonitor(connection, monitorTargets, useRegExp, - getSink(connection.getConfiguration(), ZookeeperStdOutSink.class), - this.executor, failOnError, permittedFailures); + monitor = new ZookeeperMonitor(connection, monitorTargets, useRegExp, + getSink(connection.getConfiguration(), ZookeeperStdOutSink.class), this.executor, + failOnError, permittedFailures); } else { - monitor = - new RegionMonitor(connection, monitorTargets, useRegExp, - getSink(connection.getConfiguration(), RegionStdOutSink.class), - this.executor, writeSniffing, - TableName.valueOf(writeTableName), failOnError, configuredReadTableTimeouts, - configuredWriteTableTimeout, permittedFailures); + monitor = new RegionMonitor(connection, monitorTargets, useRegExp, + getSink(connection.getConfiguration(), RegionStdOutSink.class), this.executor, + writeSniffing, TableName.valueOf(writeTableName), failOnError, configuredReadTableTimeouts, + configuredWriteTableTimeout, permittedFailures); } return monitor; } @@ -1274,19 +1265,20 @@ public class CanaryTool implements Tool, Canary { for (String tT : tableTimeouts) { String[] nameTimeout = tT.split("="); if (nameTimeout.length < 2) { - throw new IllegalArgumentException("Each -readTableTimeouts argument must be of the form " + - "= (without spaces)."); + throw new IllegalArgumentException("Each -readTableTimeouts argument must be of the form " + + "= (without spaces)."); } long timeoutVal; try { timeoutVal = Long.parseLong(nameTimeout[1]); } catch (NumberFormatException e) { - throw new IllegalArgumentException("-readTableTimeouts read timeout for each table" + - " must be a numeric value argument."); + throw new IllegalArgumentException( + "-readTableTimeouts read timeout for each table" + " must be a numeric value argument."); } configuredReadTableTimeouts.put(nameTimeout[0], timeoutVal); } } + /** * A Monitor super-class can be extended by users */ @@ -1294,8 +1286,8 @@ public class CanaryTool implements Tool, Canary { protected Connection connection; protected Admin admin; /** - * 'Target' dependent on 'mode'. Could be Tables or RegionServers or ZNodes. - * Passed on the command-line as arguments. + * 'Target' dependent on 'mode'. Could be Tables or RegionServers or ZNodes. Passed on the + * command-line as arguments. */ protected String[] targets; protected boolean useRegExp; @@ -1320,8 +1312,10 @@ public class CanaryTool implements Tool, Canary { if (errorCode != 0) { return true; } - if (treatFailureAsError && (sink.getReadFailureCount() > allowedFailures - || sink.getWriteFailureCount() > allowedFailures)) { + if ( + treatFailureAsError && (sink.getReadFailureCount() > allowedFailures + || sink.getWriteFailureCount() > allowedFailures) + ) { LOG.error("Too many failures detected, treating failure as error, failing the Canary."); errorCode = FAILURE_EXIT_CODE; return true; @@ -1337,7 +1331,7 @@ public class CanaryTool implements Tool, Canary { } protected Monitor(Connection connection, String[] monitorTargets, boolean useRegExp, Sink sink, - ExecutorService executor, boolean treatFailureAsError, long allowedFailures) { + ExecutorService executor, boolean treatFailureAsError, long allowedFailures) { if (null == connection) { throw new IllegalArgumentException("connection shall not be null"); } @@ -1390,32 +1384,30 @@ public class CanaryTool implements Tool, Canary { private boolean readAllCF; /** - * This is a timeout per table. If read of each region in the table aggregated takes longer - * than what is configured here, we log an ERROR rather than just an INFO. + * This is a timeout per table. If read of each region in the table aggregated takes longer than + * what is configured here, we log an ERROR rather than just an INFO. */ private HashMap configuredReadTableTimeouts; private long configuredWriteTableTimeout; public RegionMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, - Sink sink, ExecutorService executor, boolean writeSniffing, TableName writeTableName, - boolean treatFailureAsError, HashMap configuredReadTableTimeouts, - long configuredWriteTableTimeout, - long allowedFailures) { + Sink sink, ExecutorService executor, boolean writeSniffing, TableName writeTableName, + boolean treatFailureAsError, HashMap configuredReadTableTimeouts, + long configuredWriteTableTimeout, long allowedFailures) { super(connection, monitorTargets, useRegExp, sink, executor, treatFailureAsError, - allowedFailures); + allowedFailures); Configuration conf = connection.getConfiguration(); this.writeSniffing = writeSniffing; this.writeTableName = writeTableName; this.writeDataTTL = - conf.getInt(HConstants.HBASE_CANARY_WRITE_DATA_TTL_KEY, DEFAULT_WRITE_DATA_TTL); + conf.getInt(HConstants.HBASE_CANARY_WRITE_DATA_TTL_KEY, DEFAULT_WRITE_DATA_TTL); this.regionsLowerLimit = - conf.getFloat(HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY, 1.0f); + conf.getFloat(HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY, 1.0f); this.regionsUpperLimit = - conf.getFloat(HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_UPPERLIMIT_KEY, 1.5f); - this.checkPeriod = - conf.getInt(HConstants.HBASE_CANARY_WRITE_TABLE_CHECK_PERIOD_KEY, - DEFAULT_WRITE_TABLE_CHECK_PERIOD); + conf.getFloat(HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_UPPERLIMIT_KEY, 1.5f); + this.checkPeriod = conf.getInt(HConstants.HBASE_CANARY_WRITE_TABLE_CHECK_PERIOD_KEY, + DEFAULT_WRITE_TABLE_CHECK_PERIOD); this.rawScanEnabled = conf.getBoolean(HConstants.HBASE_CANARY_READ_RAW_SCAN_KEY, false); this.configuredReadTableTimeouts = new HashMap<>(configuredReadTableTimeouts); this.configuredWriteTableTimeout = configuredWriteTableTimeout; @@ -1440,10 +1432,12 @@ public class CanaryTool implements Tool, Canary { String[] tables = generateMonitorTables(this.targets); // Check to see that each table name passed in the -readTableTimeouts argument is also // passed as a monitor target. - if (!new HashSet<>(Arrays.asList(tables)). - containsAll(this.configuredReadTableTimeouts.keySet())) { - LOG.error("-readTableTimeouts can only specify read timeouts for monitor targets " + - "passed via command line."); + if ( + !new HashSet<>(Arrays.asList(tables)) + .containsAll(this.configuredReadTableTimeouts.keySet()) + ) { + LOG.error("-readTableTimeouts can only specify read timeouts for monitor targets " + + "passed via command line."); this.errorCode = USAGE_EXIT_CODE; return; } @@ -1470,8 +1464,8 @@ public class CanaryTool implements Tool, Canary { regionSink.initializeWriteLatency(); LongAdder writeTableLatency = regionSink.getWriteLatency(); taskFutures - .addAll(CanaryTool.sniff(admin, regionSink, admin.getDescriptor(writeTableName), - executor, TaskType.WRITE, this.rawScanEnabled, writeTableLatency, readAllCF)); + .addAll(CanaryTool.sniff(admin, regionSink, admin.getDescriptor(writeTableName), + executor, TaskType.WRITE, this.rawScanEnabled, writeTableLatency, readAllCF)); } for (Future future : taskFutures) { @@ -1488,11 +1482,11 @@ public class CanaryTool implements Tool, Canary { Long actual = actualReadTableLatency.get(tableName).longValue(); Long configured = entry.getValue(); if (actual > configured) { - LOG.error("Read operation for {} took {}ms exceeded the configured read timeout." + - "(Configured read timeout {}ms.", tableName, actual, configured); + LOG.error("Read operation for {} took {}ms exceeded the configured read timeout." + + "(Configured read timeout {}ms.", tableName, actual, configured); } else { LOG.info("Read operation for {} took {}ms (Configured read timeout {}ms.", - tableName, actual, configured); + tableName, actual, configured); } } else { LOG.error("Read operation for {} failed!", tableName); @@ -1502,12 +1496,12 @@ public class CanaryTool implements Tool, Canary { String writeTableStringName = this.writeTableName.getNameAsString(); long actualWriteLatency = regionSink.getWriteLatency().longValue(); LOG.info("Write operation for {} took {}ms. Configured write timeout {}ms.", - writeTableStringName, actualWriteLatency, this.configuredWriteTableTimeout); + writeTableStringName, actualWriteLatency, this.configuredWriteTableTimeout); // Check that the writeTable write operation latency does not exceed the configured // timeout. if (actualWriteLatency > this.configuredWriteTableTimeout) { LOG.error("Write operation for {} exceeded the configured write timeout.", - writeTableStringName); + writeTableStringName); } } } catch (Exception e) { @@ -1568,14 +1562,16 @@ public class CanaryTool implements Tool, Canary { * Canary entry point to monitor all the tables. */ private List> sniff(TaskType taskType, RegionStdOutSink regionSink) - throws Exception { + throws Exception { LOG.debug("Reading list of tables"); List> taskFutures = new LinkedList<>(); - for (TableDescriptor td: admin.listTableDescriptors()) { - if (admin.tableExists(td.getTableName()) && admin.isTableEnabled(td.getTableName()) && - (!td.getTableName().equals(writeTableName))) { + for (TableDescriptor td : admin.listTableDescriptors()) { + if ( + admin.tableExists(td.getTableName()) && admin.isTableEnabled(td.getTableName()) + && (!td.getTableName().equals(writeTableName)) + ) { LongAdder readLatency = - regionSink.initializeAndGetReadLatencyForTable(td.getTableName().getNameAsString()); + regionSink.initializeAndGetReadLatencyForTable(td.getTableName().getNameAsString()); taskFutures.addAll(CanaryTool.sniff(admin, sink, td, executor, taskType, this.rawScanEnabled, readLatency, readAllCF)); } @@ -1597,17 +1593,19 @@ public class CanaryTool implements Tool, Canary { } ClusterMetrics status = - admin.getClusterMetrics(EnumSet.of(Option.SERVERS_NAME, Option.MASTER)); + admin.getClusterMetrics(EnumSet.of(Option.SERVERS_NAME, Option.MASTER)); int numberOfServers = status.getServersName().size(); if (status.getServersName().contains(status.getMasterName())) { numberOfServers -= 1; } List> pairs = - MetaTableAccessor.getTableRegionsAndLocations(connection, writeTableName); + MetaTableAccessor.getTableRegionsAndLocations(connection, writeTableName); int numberOfRegions = pairs.size(); - if (numberOfRegions < numberOfServers * regionsLowerLimit - || numberOfRegions > numberOfServers * regionsUpperLimit) { + if ( + numberOfRegions < numberOfServers * regionsLowerLimit + || numberOfRegions > numberOfServers * regionsUpperLimit + ) { admin.disableTable(writeTableName); admin.deleteTable(writeTableName); createWriteTable(numberOfServers); @@ -1623,11 +1621,11 @@ public class CanaryTool implements Tool, Canary { } private void createWriteTable(int numberOfServers) throws IOException { - int numberOfRegions = (int)(numberOfServers * regionsLowerLimit); - LOG.info("Number of live regionservers {}, pre-splitting the canary table into {} regions " + - "(current lower limit of regions per server is {} and you can change it with config {}).", - numberOfServers, numberOfRegions, regionsLowerLimit, - HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY); + int numberOfRegions = (int) (numberOfServers * regionsLowerLimit); + LOG.info("Number of live regionservers {}, pre-splitting the canary table into {} regions " + + "(current lower limit of regions per server is {} and you can change it with config {}).", + numberOfServers, numberOfRegions, regionsLowerLimit, + HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY); HTableDescriptor desc = new HTableDescriptor(writeTableName); HColumnDescriptor family = new HColumnDescriptor(CANARY_TABLE_FAMILY_NAME); family.setMaxVersions(1); @@ -1644,8 +1642,8 @@ public class CanaryTool implements Tool, Canary { * @throws Exception exception */ private static List> sniff(final Admin admin, final Sink sink, String tableName, - ExecutorService executor, TaskType taskType, boolean rawScanEnabled, LongAdder readLatency, - boolean readAllCF) throws Exception { + ExecutorService executor, TaskType taskType, boolean rawScanEnabled, LongAdder readLatency, + boolean readAllCF) throws Exception { LOG.debug("Checking table is enabled and getting table descriptor for table {}", tableName); if (admin.isTableEnabled(TableName.valueOf(tableName))) { return CanaryTool.sniff(admin, sink, admin.getDescriptor(TableName.valueOf(tableName)), @@ -1660,22 +1658,22 @@ public class CanaryTool implements Tool, Canary { * Loops over regions of this table, and outputs information about the state. */ private static List> sniff(final Admin admin, final Sink sink, - TableDescriptor tableDesc, ExecutorService executor, TaskType taskType, - boolean rawScanEnabled, LongAdder rwLatency, boolean readAllCF) throws Exception { + TableDescriptor tableDesc, ExecutorService executor, TaskType taskType, boolean rawScanEnabled, + LongAdder rwLatency, boolean readAllCF) throws Exception { LOG.debug("Reading list of regions for table {}", tableDesc.getTableName()); try (Table table = admin.getConnection().getTable(tableDesc.getTableName())) { List tasks = new ArrayList<>(); try (RegionLocator regionLocator = - admin.getConnection().getRegionLocator(tableDesc.getTableName())) { - for (HRegionLocation location: regionLocator.getAllRegionLocations()) { + admin.getConnection().getRegionLocator(tableDesc.getTableName())) { + for (HRegionLocation location : regionLocator.getAllRegionLocations()) { if (location == null) { LOG.warn("Null location"); continue; } ServerName rs = location.getServerName(); RegionInfo region = location.getRegion(); - tasks.add(new RegionTask(admin.getConnection(), region, rs, (RegionStdOutSink)sink, - taskType, rawScanEnabled, rwLatency, readAllCF)); + tasks.add(new RegionTask(admin.getConnection(), region, rs, (RegionStdOutSink) sink, + taskType, rawScanEnabled, rwLatency, readAllCF)); Map> regionMap = ((RegionStdOutSink) sink).getRegionMap(); regionMap.put(region.getRegionNameAsString(), new ArrayList()); } @@ -1686,24 +1684,22 @@ public class CanaryTool implements Tool, Canary { } } - // monitor for zookeeper mode + // monitor for zookeeper mode private static class ZookeeperMonitor extends Monitor { private List hosts; private final String znode; private final int timeout; protected ZookeeperMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, - Sink sink, ExecutorService executor, boolean treatFailureAsError, long allowedFailures) { - super(connection, monitorTargets, useRegExp, - sink, executor, treatFailureAsError, allowedFailures); + Sink sink, ExecutorService executor, boolean treatFailureAsError, long allowedFailures) { + super(connection, monitorTargets, useRegExp, sink, executor, treatFailureAsError, + allowedFailures); Configuration configuration = connection.getConfiguration(); - znode = - configuration.get(ZOOKEEPER_ZNODE_PARENT, - DEFAULT_ZOOKEEPER_ZNODE_PARENT); - timeout = configuration - .getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT); + znode = configuration.get(ZOOKEEPER_ZNODE_PARENT, DEFAULT_ZOOKEEPER_ZNODE_PARENT); + timeout = + configuration.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT); ConnectStringParser parser = - new ConnectStringParser(ZKConfig.getZKQuorumServersString(configuration)); + new ConnectStringParser(ZKConfig.getZKQuorumServersString(configuration)); hosts = Lists.newArrayList(); for (InetSocketAddress server : parser.getServerAddresses()) { hosts.add(inetSocketAddress2String(server)); @@ -1711,12 +1707,13 @@ public class CanaryTool implements Tool, Canary { if (allowedFailures > (hosts.size() - 1) / 2) { LOG.warn( "Confirm allowable number of failed ZooKeeper nodes, as quorum will " - + "already be lost. Setting of {} failures is unexpected for {} ensemble size.", + + "already be lost. Setting of {} failures is unexpected for {} ensemble size.", allowedFailures, hosts.size()); } } - @Override public void run() { + @Override + public void run() { List tasks = Lists.newArrayList(); ZookeeperStdOutSink zkSink = null; try { @@ -1754,7 +1751,6 @@ public class CanaryTool implements Tool, Canary { } } - /** * A monitor for regionserver mode */ @@ -1762,10 +1758,10 @@ public class CanaryTool implements Tool, Canary { private boolean allRegions; public RegionServerMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, - Sink sink, ExecutorService executor, boolean allRegions, - boolean treatFailureAsError, long allowedFailures) { + Sink sink, ExecutorService executor, boolean allRegions, boolean treatFailureAsError, + long allowedFailures) { super(connection, monitorTargets, useRegExp, sink, executor, treatFailureAsError, - allowedFailures); + allowedFailures); this.allRegions = allRegions; } @@ -1818,15 +1814,15 @@ public class CanaryTool implements Tool, Canary { } if (foundTableNames.size() > 0) { - System.err.println("Cannot pass a tablename when using the -regionserver " + - "option, tablenames:" + foundTableNames.toString()); + System.err.println("Cannot pass a tablename when using the -regionserver " + + "option, tablenames:" + foundTableNames.toString()); this.errorCode = USAGE_EXIT_CODE; } return foundTableNames.isEmpty(); } private void monitorRegionServers(Map> rsAndRMap, - RegionServerStdOutSink regionServerSink) { + RegionServerStdOutSink regionServerSink) { List tasks = new ArrayList<>(); Map successMap = new HashMap<>(); for (Map.Entry> entry : rsAndRMap.entrySet()) { @@ -1837,21 +1833,15 @@ public class CanaryTool implements Tool, Canary { LOG.error("Regionserver not serving any regions - {}", serverName); } else if (this.allRegions) { for (RegionInfo region : entry.getValue()) { - tasks.add(new RegionServerTask(this.connection, - serverName, - region, - regionServerSink, - successes)); + tasks.add(new RegionServerTask(this.connection, serverName, region, regionServerSink, + successes)); } } else { // random select a region if flag not set - RegionInfo region = entry.getValue() - .get(ThreadLocalRandom.current().nextInt(entry.getValue().size())); - tasks.add(new RegionServerTask(this.connection, - serverName, - region, - regionServerSink, - successes)); + RegionInfo region = + entry.getValue().get(ThreadLocalRandom.current().nextInt(entry.getValue().size())); + tasks.add( + new RegionServerTask(this.connection, serverName, region, regionServerSink, successes)); } } try { @@ -1867,7 +1857,7 @@ public class CanaryTool implements Tool, Canary { for (Map.Entry> entry : rsAndRMap.entrySet()) { String serverName = entry.getKey(); LOG.info("Successfully read {} regions out of {} on regionserver {}", - successMap.get(serverName), entry.getValue().size(), serverName); + successMap.get(serverName), entry.getValue().size(), serverName); } } } catch (InterruptedException e) { @@ -1888,9 +1878,9 @@ public class CanaryTool implements Tool, Canary { LOG.debug("Reading list of tables and locations"); List tableDescs = this.admin.listTableDescriptors(); List regions = null; - for (TableDescriptor tableDesc: tableDescs) { + for (TableDescriptor tableDesc : tableDescs) { try (RegionLocator regionLocator = - this.admin.getConnection().getRegionLocator(tableDesc.getTableName())) { + this.admin.getConnection().getRegionLocator(tableDesc.getTableName())) { for (HRegionLocation location : regionLocator.getAllRegionLocations()) { if (location == null) { LOG.warn("Null location"); @@ -1911,7 +1901,7 @@ public class CanaryTool implements Tool, Canary { } // get any live regionservers not serving any regions - for (ServerName rs: this.admin.getRegionServers()) { + for (ServerName rs : this.admin.getRegionServers()) { String rsName = rs.getHostname(); if (!rsAndRMap.containsKey(rsName)) { rsAndRMap.put(rsName, Collections. emptyList()); @@ -1924,8 +1914,8 @@ public class CanaryTool implements Tool, Canary { return rsAndRMap; } - private Map> doFilterRegionServerByName( - Map> fullRsAndRMap) { + private Map> + doFilterRegionServerByName(Map> fullRsAndRMap) { Map> filteredRsAndRMap = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/DataBlockEncodingValidator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/DataBlockEncodingValidator.java index c909725a616..e55c7c5737f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/DataBlockEncodingValidator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/DataBlockEncodingValidator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ package org.apache.hadoop.hbase.tool; import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -44,7 +42,6 @@ public class DataBlockEncodingValidator extends AbstractHBaseTool { /** * Check DataBlockEncodings of column families are compatible. - * * @return number of column families with incompatible DataBlockEncoding * @throws IOException if a remote or network exception occurs */ @@ -54,7 +51,7 @@ public class DataBlockEncodingValidator extends AbstractHBaseTool { LOG.info("Validating Data Block Encodings"); try (Connection connection = ConnectionFactory.createConnection(getConf()); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { List tableDescriptors = admin.listTableDescriptors(); String encoding = ""; @@ -68,7 +65,7 @@ public class DataBlockEncodingValidator extends AbstractHBaseTool { } catch (IllegalArgumentException e) { incompatibilities++; LOG.warn("Incompatible DataBlockEncoding for table: {}, cf: {}, encoding: {}", - td.getTableName().getNameAsString(), cfd.getNameAsString(), encoding); + td.getTableName().getNameAsString(), cfd.getNameAsString(), encoding); } } } @@ -76,8 +73,8 @@ public class DataBlockEncodingValidator extends AbstractHBaseTool { if (incompatibilities > 0) { LOG.warn("There are {} column families with incompatible Data Block Encodings. Do not " - + "upgrade until these encodings are converted to a supported one. " - + "Check https://s.apache.org/prefixtree for instructions.", incompatibilities); + + "upgrade until these encodings are converted to a supported one. " + + "Check https://s.apache.org/prefixtree for instructions.", incompatibilities); } else { LOG.info("The used Data Block Encodings are compatible with HBase 2.0."); } @@ -87,8 +84,8 @@ public class DataBlockEncodingValidator extends AbstractHBaseTool { @Override protected void printUsage() { - String header = "hbase " + PreUpgradeValidator.TOOL_NAME + " " + - PreUpgradeValidator.VALIDATE_DBE_NAME; + String header = + "hbase " + PreUpgradeValidator.TOOL_NAME + " " + PreUpgradeValidator.VALIDATE_DBE_NAME; printUsage(header, null, ""); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java index 2f648975724..475a2808827 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,11 +33,11 @@ import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @@ -48,7 +47,6 @@ public class HFileContentValidator extends AbstractHBaseTool { /** * Check HFile contents are readable by HBase 2. - * * @param conf used configuration * @return number of HFiles corrupted HBase * @throws IOException if a remote or network exception occurs @@ -99,7 +97,7 @@ public class HFileContentValidator extends AbstractHBaseTool { } LOG.info("Change data block encodings before upgrading. " - + "Check https://s.apache.org/prefixtree for instructions."); + + "Check https://s.apache.org/prefixtree for instructions."); return false; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java index 915e02fd1c9..7842e21d076 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -86,7 +86,6 @@ import org.apache.hadoop.hbase.io.hfile.ReaderContext; import org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.regionserver.StoreUtils; @@ -98,15 +97,16 @@ import org.apache.hadoop.hbase.util.FSVisitor; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.HashMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Multimap; import org.apache.hbase.thirdparty.com.google.common.collect.Multimaps; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Tool to load the output of HFileOutputFormat into an existing table. @@ -195,40 +195,38 @@ public class LoadIncrementalHFiles extends Configured implements Tool { assignSeqIds = conf.getBoolean(ASSIGN_SEQ_IDS, true); maxFilesPerRegionPerFamily = conf.getInt(MAX_FILES_PER_REGION_PER_FAMILY, 32); bulkLoadByFamily = conf.getBoolean(BulkLoadHFiles.BULK_LOAD_HFILES_BY_FAMILY, false); - nrThreads = conf.getInt("hbase.loadincremental.threads.max", - Runtime.getRuntime().availableProcessors()); + nrThreads = + conf.getInt("hbase.loadincremental.threads.max", Runtime.getRuntime().availableProcessors()); numRetries = new AtomicInteger(0); rpcControllerFactory = new RpcControllerFactory(conf); } private void usage() { System.err.println("Usage: " + "bin/hbase completebulkload [OPTIONS] " - + " \n" - + "Loads directory of hfiles -- a region dir or product of HFileOutputFormat -- " - + "into an hbase table.\n" - + "OPTIONS (for other -D options, see source code):\n" - + " -D" + CREATE_TABLE_CONF_KEY + "=no whether to create table; when 'no', target " - + "table must exist.\n" - + " -D" + IGNORE_UNMATCHED_CF_CONF_KEY + "=yes to ignore unmatched column families.\n" - + " -loadTable for when directory of files to load has a depth of 3; target table must " - + "exist;\n" - + " must be last of the options on command line.\n" - + "See http://hbase.apache.org/book.html#arch.bulk.load.complete.strays for " - + "documentation.\n"); + + " \n" + + "Loads directory of hfiles -- a region dir or product of HFileOutputFormat -- " + + "into an hbase table.\n" + "OPTIONS (for other -D options, see source code):\n" + " -D" + + CREATE_TABLE_CONF_KEY + "=no whether to create table; when 'no', target " + + "table must exist.\n" + " -D" + IGNORE_UNMATCHED_CF_CONF_KEY + + "=yes to ignore unmatched column families.\n" + + " -loadTable for when directory of files to load has a depth of 3; target table must " + + "exist;\n" + " must be last of the options on command line.\n" + + "See http://hbase.apache.org/book.html#arch.bulk.load.complete.strays for " + + "documentation.\n"); } /** * Prepare a collection of {@link LoadQueueItem} from list of source hfiles contained in the * passed directory and validates whether the prepared queue has all the valid table column * families in it. - * @param hfilesDir directory containing list of hfiles to be loaded into the table - * @param table table to which hfiles should be loaded - * @param queue queue which needs to be loaded into the table + * @param hfilesDir directory containing list of hfiles to be loaded into the table + * @param table table to which hfiles should be loaded + * @param queue queue which needs to be loaded into the table * @param validateHFile if true hfiles will be validated for its format * @throws IOException If any I/O or network error occurred */ public void prepareHFileQueue(Path hfilesDir, Table table, Deque queue, - boolean validateHFile) throws IOException { + boolean validateHFile) throws IOException { prepareHFileQueue(hfilesDir, table, queue, validateHFile, false); } @@ -236,15 +234,15 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * Prepare a collection of {@link LoadQueueItem} from list of source hfiles contained in the * passed directory and validates whether the prepared queue has all the valid table column * families in it. - * @param hfilesDir directory containing list of hfiles to be loaded into the table - * @param table table to which hfiles should be loaded - * @param queue queue which needs to be loaded into the table + * @param hfilesDir directory containing list of hfiles to be loaded into the table + * @param table table to which hfiles should be loaded + * @param queue queue which needs to be loaded into the table * @param validateHFile if true hfiles will be validated for its format - * @param silence true to ignore unmatched column families + * @param silence true to ignore unmatched column families * @throws IOException If any I/O or network error occurred */ public void prepareHFileQueue(Path hfilesDir, Table table, Deque queue, - boolean validateHFile, boolean silence) throws IOException { + boolean validateHFile, boolean silence) throws IOException { discoverLoadQueue(queue, hfilesDir, validateHFile); validateFamiliesInHFiles(table, queue, silence); } @@ -253,14 +251,14 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * Prepare a collection of {@link LoadQueueItem} from list of source hfiles contained in the * passed directory and validates whether the prepared queue has all the valid table column * families in it. - * @param map map of family to List of hfiles - * @param table table to which hfiles should be loaded - * @param queue queue which needs to be loaded into the table + * @param map map of family to List of hfiles + * @param table table to which hfiles should be loaded + * @param queue queue which needs to be loaded into the table * @param silence true to ignore unmatched column families * @throws IOException If any I/O or network error occurred */ public void prepareHFileQueue(Map> map, Table table, - Deque queue, boolean silence) throws IOException { + Deque queue, boolean silence) throws IOException { populateLoadQueue(queue, map); validateFamiliesInHFiles(table, queue, silence); } @@ -268,32 +266,32 @@ public class LoadIncrementalHFiles extends Configured implements Tool { /** * Perform a bulk load of the given directory into the given pre-existing table. This method is * not threadsafe. - * @param hfofDir the directory that was provided as the output path of a job using - * HFileOutputFormat - * @param admin the Admin - * @param table the table to load into + * @param hfofDir the directory that was provided as the output path of a job using + * HFileOutputFormat + * @param admin the Admin + * @param table the table to load into * @param regionLocator region locator * @throws TableNotFoundException if table does not yet exist */ public Map doBulkLoad(Path hfofDir, final Admin admin, Table table, - RegionLocator regionLocator) throws TableNotFoundException, IOException { + RegionLocator regionLocator) throws TableNotFoundException, IOException { return doBulkLoad(hfofDir, admin, table, regionLocator, false, false); } /** * Perform a bulk load of the given directory into the given pre-existing table. This method is * not threadsafe. - * @param map map of family to List of hfiles - * @param admin the Admin - * @param table the table to load into + * @param map map of family to List of hfiles + * @param admin the Admin + * @param table the table to load into * @param regionLocator region locator - * @param silence true to ignore unmatched column families - * @param copyFile always copy hfiles if true + * @param silence true to ignore unmatched column families + * @param copyFile always copy hfiles if true * @throws TableNotFoundException if table does not yet exist */ public Map doBulkLoad(Map> map, final Admin admin, - Table table, RegionLocator regionLocator, boolean silence, boolean copyFile) - throws TableNotFoundException, IOException { + Table table, RegionLocator regionLocator, boolean silence, boolean copyFile) + throws TableNotFoundException, IOException { if (!admin.isTableAvailable(regionLocator.getName())) { throw new TableNotFoundException("Table " + table.getName() + " is not currently available."); } @@ -319,18 +317,18 @@ public class LoadIncrementalHFiles extends Configured implements Tool { /** * Perform a bulk load of the given directory into the given pre-existing table. This method is * not threadsafe. - * @param hfofDir the directory that was provided as the output path of a job using - * HFileOutputFormat - * @param admin the Admin - * @param table the table to load into + * @param hfofDir the directory that was provided as the output path of a job using + * HFileOutputFormat + * @param admin the Admin + * @param table the table to load into * @param regionLocator region locator - * @param silence true to ignore unmatched column families - * @param copyFile always copy hfiles if true + * @param silence true to ignore unmatched column families + * @param copyFile always copy hfiles if true * @throws TableNotFoundException if table does not yet exist */ public Map doBulkLoad(Path hfofDir, final Admin admin, Table table, - RegionLocator regionLocator, boolean silence, boolean copyFile) - throws TableNotFoundException, IOException { + RegionLocator regionLocator, boolean silence, boolean copyFile) + throws TableNotFoundException, IOException { if (!admin.isTableAvailable(regionLocator.getName())) { throw new TableNotFoundException("Table " + table.getName() + " is not currently available."); } @@ -341,10 +339,10 @@ public class LoadIncrementalHFiles extends Configured implements Tool { */ boolean validateHFile = getConf().getBoolean("hbase.loadincremental.validate.hfile", true); if (!validateHFile) { - LOG.warn("You are skipping HFiles validation, it might cause some data loss if files " + - "are not correct. If you fail to read data from your table after using this " + - "option, consider removing the files and bulkload again without this option. " + - "See HBASE-13985"); + LOG.warn("You are skipping HFiles validation, it might cause some data loss if files " + + "are not correct. If you fail to read data from your table after using this " + + "option, consider removing the files and bulkload again without this option. " + + "See HBASE-13985"); } // LQI queue does not need to be threadsafe -- all operations on this queue // happen in this thread @@ -356,9 +354,9 @@ public class LoadIncrementalHFiles extends Configured implements Tool { if (queue.isEmpty()) { LOG.warn( - "Bulk load operation did not find any files to load in directory {}. " + - "Does it contain files in subdirectories that correspond to column family names?", - (hfofDir != null ? hfofDir.toUri().toString() : "")); + "Bulk load operation did not find any files to load in directory {}. " + + "Does it contain files in subdirectories that correspond to column family names?", + (hfofDir != null ? hfofDir.toUri().toString() : "")); return Collections.emptyMap(); } pool = createExecutorService(); @@ -376,13 +374,13 @@ public class LoadIncrementalHFiles extends Configured implements Tool { *
    • LoadIncrementalHFiles#bulkLoadPhase(Table, Connection, ExecutorService, Deque, Multimap) *
    • * - * @param table Table to which these hfiles should be loaded to - * @param conn Connection to use - * @param queue {@link LoadQueueItem} has hfiles yet to be loaded + * @param table Table to which these hfiles should be loaded to + * @param conn Connection to use + * @param queue {@link LoadQueueItem} has hfiles yet to be loaded * @param startEndKeys starting and ending row keys of the region */ public void loadHFileQueue(Table table, Connection conn, Deque queue, - Pair startEndKeys) throws IOException { + Pair startEndKeys) throws IOException { loadHFileQueue(table, conn, queue, startEndKeys, false); } @@ -393,18 +391,18 @@ public class LoadIncrementalHFiles extends Configured implements Tool { *
    • LoadIncrementalHFiles#bulkLoadPhase(Table, Connection, ExecutorService, Deque, Multimap) *
    • * - * @param table Table to which these hfiles should be loaded to - * @param conn Connection to use - * @param queue {@link LoadQueueItem} has hfiles yet to be loaded + * @param table Table to which these hfiles should be loaded to + * @param conn Connection to use + * @param queue {@link LoadQueueItem} has hfiles yet to be loaded * @param startEndKeys starting and ending row keys of the region */ public void loadHFileQueue(Table table, Connection conn, Deque queue, - Pair startEndKeys, boolean copyFile) throws IOException { + Pair startEndKeys, boolean copyFile) throws IOException { ExecutorService pool = null; try { pool = createExecutorService(); Multimap regionGroups = - groupOrSplitPhase(table, pool, queue, startEndKeys).getFirst(); + groupOrSplitPhase(table, pool, queue, startEndKeys).getFirst(); bulkLoadPhase(table, conn, pool, queue, regionGroups, copyFile, null); } finally { if (pool != null) { @@ -414,8 +412,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } private Map performBulkLoad(Admin admin, Table table, - RegionLocator regionLocator, Deque queue, ExecutorService pool, - SecureBulkLoadClient secureClient, boolean copyFile) throws IOException { + RegionLocator regionLocator, Deque queue, ExecutorService pool, + SecureBulkLoadClient secureClient, boolean copyFile) throws IOException { int count = 0; if (isSecureBulkLoadEndpointAvailable()) { @@ -433,15 +431,15 @@ public class LoadIncrementalHFiles extends Configured implements Tool { // need to reload split keys each iteration. final Pair startEndKeys = regionLocator.getStartEndKeys(); if (count != 0) { - LOG.info("Split occurred while grouping HFiles, retry attempt " + count + " with " + - queue.size() + " files remaining to group or split"); + LOG.info("Split occurred while grouping HFiles, retry attempt " + count + " with " + + queue.size() + " files remaining to group or split"); } int maxRetries = getConf().getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10); maxRetries = Math.max(maxRetries, startEndKeys.getFirst().length + 1); if (maxRetries != 0 && count >= maxRetries) { throw new IOException( - "Retry attempted " + count + " times without completing, bailing out"); + "Retry attempted " + count + " times without completing, bailing out"); } count++; @@ -451,8 +449,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { if (!checkHFilesCountPerRegionPerFamily(regionGroups)) { // Error is logged inside checkHFilesCountPerRegionPerFamily. - throw new IOException("Trying to load more than " + maxFilesPerRegionPerFamily + - " hfiles to one family of one region"); + throw new IOException("Trying to load more than " + maxFilesPerRegionPerFamily + + " hfiles to one family of one region"); } bulkLoadPhase(table, admin.getConnection(), pool, queue, regionGroups, copyFile, @@ -464,17 +462,17 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } if (!queue.isEmpty()) { - throw new RuntimeException("Bulk load aborted with some files not yet loaded." + - "Please check log for more details."); + throw new RuntimeException( + "Bulk load aborted with some files not yet loaded." + "Please check log for more details."); } return item2RegionMap; } private Map> - groupByFamilies(Collection itemsInRegion) { + groupByFamilies(Collection itemsInRegion) { Map> families2Queue = new TreeMap<>(Bytes.BYTES_COMPARATOR); itemsInRegion.forEach(item -> families2Queue - .computeIfAbsent(item.getFamily(), queue -> new ArrayList<>()).add(item)); + .computeIfAbsent(item.getFamily(), queue -> new ArrayList<>()).add(item)); return families2Queue; } @@ -486,12 +484,12 @@ public class LoadIncrementalHFiles extends Configured implements Tool { */ @InterfaceAudience.Private protected void bulkLoadPhase(Table table, Connection conn, ExecutorService pool, - Deque queue, Multimap regionGroups, - boolean copyFile, Map item2RegionMap) throws IOException { + Deque queue, Multimap regionGroups, boolean copyFile, + Map item2RegionMap) throws IOException { // atomically bulk load the groups. Set>> loadingFutures = new HashSet<>(); for (Entry> e : regionGroups.asMap() - .entrySet()) { + .entrySet()) { byte[] first = e.getKey().array(); Collection lqis = e.getValue(); if (item2RegionMap != null) { @@ -500,8 +498,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } } if (bulkLoadByFamily) { - groupByFamilies(lqis).values().forEach(familyQueue -> loadingFutures.add(pool.submit( - () -> tryAtomicRegionLoad(conn, table.getName(), first, familyQueue, copyFile)))); + groupByFamilies(lqis).values().forEach(familyQueue -> loadingFutures.add(pool + .submit(() -> tryAtomicRegionLoad(conn, table.getName(), first, familyQueue, copyFile)))); } else { loadingFutures.add( pool.submit(() -> tryAtomicRegionLoad(conn, table.getName(), first, lqis, copyFile))); @@ -539,28 +537,28 @@ public class LoadIncrementalHFiles extends Configured implements Tool { @InterfaceAudience.Private protected ClientServiceCallable buildClientServiceCallable(Connection conn, - TableName tableName, byte[] first, Collection lqis, boolean copyFile) { + TableName tableName, byte[] first, Collection lqis, boolean copyFile) { List> famPaths = - lqis.stream().map(lqi -> Pair.newPair(lqi.getFamily(), lqi.getFilePath().toString())) - .collect(Collectors.toList()); + lqis.stream().map(lqi -> Pair.newPair(lqi.getFamily(), lqi.getFilePath().toString())) + .collect(Collectors.toList()); return new ClientServiceCallable(conn, tableName, first, - rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { + rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { @Override protected byte[] rpcCall() throws Exception { SecureBulkLoadClient secureClient = null; boolean success = false; try { if (LOG.isDebugEnabled()) { - LOG.debug("Going to connect to server " + getLocation() + " for row " + - Bytes.toStringBinary(getRow()) + " with hfile group " + - LoadIncrementalHFiles.this.toString(famPaths)); + LOG.debug("Going to connect to server " + getLocation() + " for row " + + Bytes.toStringBinary(getRow()) + " with hfile group " + + LoadIncrementalHFiles.this.toString(famPaths)); } byte[] regionName = getLocation().getRegionInfo().getRegionName(); try (Table table = conn.getTable(getTableName())) { secureClient = new SecureBulkLoadClient(getConf(), table); - success = secureClient.secureBulkLoadHFiles(getStub(), famPaths, regionName, - assignSeqIds, fsDelegationToken.getUserToken(), - bulkToken, copyFile, clusterIds, replicate); + success = + secureClient.secureBulkLoadHFiles(getStub(), famPaths, regionName, assignSeqIds, + fsDelegationToken.getUserToken(), bulkToken, copyFile, clusterIds, replicate); } return success ? regionName : null; } finally { @@ -579,7 +577,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { Path hfileOrigPath = new Path(el.getSecond()); try { hfileStagingPath = new Path(new Path(bulkToken, Bytes.toString(el.getFirst())), - hfileOrigPath.getName()); + hfileOrigPath.getName()); if (targetFs.rename(hfileStagingPath, hfileOrigPath)) { LOG.debug("Moved back file " + hfileOrigPath + " from " + hfileStagingPath); } else if (targetFs.exists(hfileStagingPath)) { @@ -598,17 +596,17 @@ public class LoadIncrementalHFiles extends Configured implements Tool { }; } - private boolean checkHFilesCountPerRegionPerFamily( - final Multimap regionGroups) { + private boolean + checkHFilesCountPerRegionPerFamily(final Multimap regionGroups) { for (Map.Entry> e : regionGroups.asMap().entrySet()) { Map filesMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (LoadQueueItem lqi : e.getValue()) { MutableInt count = filesMap.computeIfAbsent(lqi.getFamily(), k -> new MutableInt()); count.increment(); if (count.intValue() > maxFilesPerRegionPerFamily) { - LOG.error("Trying to load more than " + maxFilesPerRegionPerFamily + - " hfiles to family " + Bytes.toStringBinary(lqi.getFamily()) + - " of region with start key " + Bytes.toStringBinary(e.getKey())); + LOG.error("Trying to load more than " + maxFilesPerRegionPerFamily + " hfiles to family " + + Bytes.toStringBinary(lqi.getFamily()) + " of region with start key " + + Bytes.toStringBinary(e.getKey())); return false; } } @@ -617,22 +615,22 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } /** - * @param table the table to load into - * @param pool the ExecutorService - * @param queue the queue for LoadQueueItem + * @param table the table to load into + * @param pool the ExecutorService + * @param queue the queue for LoadQueueItem * @param startEndKeys start and end keys * @return A map that groups LQI by likely bulk load region targets and Set of missing hfiles. */ private Pair, Set> groupOrSplitPhase( - final Table table, ExecutorService pool, Deque queue, - final Pair startEndKeys) throws IOException { + final Table table, ExecutorService pool, Deque queue, + final Pair startEndKeys) throws IOException { // need synchronized only within this scope of this // phase because of the puts that happen in futures. Multimap rgs = HashMultimap.create(); final Multimap regionGroups = Multimaps.synchronizedMultimap(rgs); Set missingHFiles = new HashSet<>(); Pair, Set> pair = - new Pair<>(regionGroups, missingHFiles); + new Pair<>(regionGroups, missingHFiles); // drain LQIs and figure out bulk load groups Set, String>>> splittingFutures = new HashSet<>(); @@ -640,14 +638,14 @@ public class LoadIncrementalHFiles extends Configured implements Tool { final LoadQueueItem item = queue.remove(); final Callable, String>> call = - new Callable, String>>() { - @Override - public Pair, String> call() throws Exception { - Pair, String> splits = - groupOrSplit(regionGroups, item, table, startEndKeys); - return splits; - } - }; + new Callable, String>>() { + @Override + public Pair, String> call() throws Exception { + Pair, String> splits = + groupOrSplit(regionGroups, item, table, startEndKeys); + return splits; + } + }; splittingFutures.add(pool.submit(call)); } // get all the results. All grouping and splitting must finish before @@ -679,7 +677,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } private List splitStoreFile(final LoadQueueItem item, final Table table, - byte[] startKey, byte[] splitKey) throws IOException { + byte[] startKey, byte[] splitKey) throws IOException { Path hfilePath = item.getFilePath(); byte[] family = item.getFamily(); Path tmpDir = hfilePath.getParent(); @@ -723,8 +721,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { /** * @param startEndKeys the start/end keys of regions belong to this table, the list in ascending - * order by start key - * @param key the key need to find which region belong to + * order by start key + * @param key the key need to find which region belong to * @return region index */ private int getRegionIndex(final Pair startEndKeys, byte[] key) { @@ -738,25 +736,28 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } /** - * we can consider there is a region hole in following conditions. 1) if idx < 0,then first - * region info is lost. 2) if the endkey of a region is not equal to the startkey of the next - * region. 3) if the endkey of the last region is not empty. + * we can consider there is a region hole in following conditions. 1) if idx < 0,then first region + * info is lost. 2) if the endkey of a region is not equal to the startkey of the next region. 3) + * if the endkey of the last region is not empty. */ private void checkRegionIndexValid(int idx, final Pair startEndKeys, TableName tableName) throws IOException { if (idx < 0) { - throw new IOException("The first region info for table " + tableName + - " can't be found in hbase:meta.Please use hbck tool to fix it first."); - } else if ((idx == startEndKeys.getFirst().length - 1) && - !Bytes.equals(startEndKeys.getSecond()[idx], HConstants.EMPTY_BYTE_ARRAY)) { - throw new IOException("The last region info for table " + tableName + - " can't be found in hbase:meta.Please use hbck tool to fix it first."); - } else if (idx + 1 < startEndKeys.getFirst().length && - !(Bytes.compareTo(startEndKeys.getSecond()[idx], - startEndKeys.getFirst()[idx + 1]) == 0)) { - throw new IOException("The endkey of one region for table " + tableName + - " is not equal to the startkey of the next region in hbase:meta." + - "Please use hbck tool to fix it first."); + throw new IOException("The first region info for table " + tableName + + " can't be found in hbase:meta.Please use hbck tool to fix it first."); + } else if ( + (idx == startEndKeys.getFirst().length - 1) + && !Bytes.equals(startEndKeys.getSecond()[idx], HConstants.EMPTY_BYTE_ARRAY) + ) { + throw new IOException("The last region info for table " + tableName + + " can't be found in hbase:meta.Please use hbck tool to fix it first."); + } else if ( + idx + 1 < startEndKeys.getFirst().length + && !(Bytes.compareTo(startEndKeys.getSecond()[idx], startEndKeys.getFirst()[idx + 1]) == 0) + ) { + throw new IOException("The endkey of one region for table " + tableName + + " is not equal to the startkey of the next region in hbase:meta." + + "Please use hbck tool to fix it first."); } } @@ -770,8 +771,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { */ @InterfaceAudience.Private protected Pair, String> groupOrSplit( - Multimap regionGroups, final LoadQueueItem item, final Table table, - final Pair startEndKeys) throws IOException { + Multimap regionGroups, final LoadQueueItem item, final Table table, + final Pair startEndKeys) throws IOException { Path hfilePath = item.getFilePath(); Optional first, last; try (HFile.Reader hfr = HFile.createReader(hfilePath.getFileSystem(getConf()), hfilePath, @@ -784,7 +785,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } LOG.info("Trying to load hfile=" + hfilePath + " first=" + first.map(Bytes::toStringBinary) - + " last=" + last.map(Bytes::toStringBinary)); + + " last=" + last.map(Bytes::toStringBinary)); if (!first.isPresent() || !last.isPresent()) { assert !first.isPresent() && !last.isPresent(); // TODO what if this is due to a bad HFile? @@ -793,14 +794,14 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } if (Bytes.compareTo(first.get(), last.get()) > 0) { throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(first.get()) - + " > " + Bytes.toStringBinary(last.get())); + + " > " + Bytes.toStringBinary(last.get())); } int firstKeyRegionIdx = getRegionIndex(startEndKeys, first.get()); checkRegionIndexValid(firstKeyRegionIdx, startEndKeys, table.getName()); boolean lastKeyInRange = - Bytes.compareTo(last.get(), startEndKeys.getSecond()[firstKeyRegionIdx]) < 0 || Bytes - .equals(startEndKeys.getSecond()[firstKeyRegionIdx], HConstants.EMPTY_BYTE_ARRAY); + Bytes.compareTo(last.get(), startEndKeys.getSecond()[firstKeyRegionIdx]) < 0 + || Bytes.equals(startEndKeys.getSecond()[firstKeyRegionIdx], HConstants.EMPTY_BYTE_ARRAY); if (!lastKeyInRange) { int lastKeyRegionIdx = getRegionIndex(startEndKeys, last.get()); int splitIdx = (firstKeyRegionIdx + lastKeyRegionIdx) >>> 1; @@ -860,29 +861,27 @@ public class LoadIncrementalHFiles extends Configured implements Tool { try { Configuration conf = getConf(); byte[] region = RpcRetryingCallerFactory.instantiate(conf, null). newCaller() - .callWithRetries(serviceCallable, Integer.MAX_VALUE); + .callWithRetries(serviceCallable, Integer.MAX_VALUE); if (region == null) { - LOG.warn("Attempt to bulk load region containing " + Bytes.toStringBinary(first) + - " into table " + tableName + " with files " + lqis + - " failed. This is recoverable and they will be retried."); + LOG.warn("Attempt to bulk load region containing " + Bytes.toStringBinary(first) + + " into table " + tableName + " with files " + lqis + + " failed. This is recoverable and they will be retried."); toRetry.addAll(lqis); // return lqi's to retry } // success return toRetry; } catch (IOException e) { - LOG.error("Encountered unrecoverable error from region server, additional details: " + - serviceCallable.getExceptionMessageAdditionalDetail(), - e); - LOG.warn( - "Received a " + e.getClass().getSimpleName() - + " from region server: " - + serviceCallable.getExceptionMessageAdditionalDetail(), e); - if (getConf().getBoolean(RETRY_ON_IO_EXCEPTION, false) - && numRetries.get() < getConf().getInt( - HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER)) { - LOG.warn("Will attempt to retry loading failed HFiles. Retry #" - + numRetries.incrementAndGet()); + LOG.error("Encountered unrecoverable error from region server, additional details: " + + serviceCallable.getExceptionMessageAdditionalDetail(), e); + LOG.warn("Received a " + e.getClass().getSimpleName() + " from region server: " + + serviceCallable.getExceptionMessageAdditionalDetail(), e); + if ( + getConf().getBoolean(RETRY_ON_IO_EXCEPTION, false) + && numRetries.get() < getConf().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER) + ) { + LOG.warn( + "Will attempt to retry loading failed HFiles. Retry #" + numRetries.incrementAndGet()); toRetry.addAll(lqis); return toRetry; } @@ -906,27 +905,27 @@ public class LoadIncrementalHFiles extends Configured implements Tool { @Override public ColumnFamilyDescriptorBuilder bulkFamily(byte[] familyName) { ColumnFamilyDescriptorBuilder builder = - ColumnFamilyDescriptorBuilder.newBuilder(familyName); + ColumnFamilyDescriptorBuilder.newBuilder(familyName); familyBuilders.add(builder); return builder; } @Override public void bulkHFile(ColumnFamilyDescriptorBuilder builder, FileStatus hfileStatus) - throws IOException { + throws IOException { Path hfile = hfileStatus.getPath(); try (HFile.Reader reader = - HFile.createReader(fs, hfile, CacheConfig.DISABLED, true, getConf())) { + HFile.createReader(fs, hfile, CacheConfig.DISABLED, true, getConf())) { if (builder.getCompressionType() != reader.getFileContext().getCompression()) { builder.setCompressionType(reader.getFileContext().getCompression()); - LOG.info("Setting compression " + reader.getFileContext().getCompression().name() + - " for family " + builder.getNameAsString()); + LOG.info("Setting compression " + reader.getFileContext().getCompression().name() + + " for family " + builder.getNameAsString()); } byte[] first = reader.getFirstRowKey().get(); byte[] last = reader.getLastRowKey().get(); - LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" + - Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); + LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" + + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); // To eventually infer start key-end key boundaries Integer value = map.containsKey(first) ? map.get(first) : 0; @@ -941,14 +940,14 @@ public class LoadIncrementalHFiles extends Configured implements Tool { byte[][] keys = inferBoundaries(map); TableDescriptorBuilder tdBuilder = TableDescriptorBuilder.newBuilder(tableName); familyBuilders.stream().map(ColumnFamilyDescriptorBuilder::build) - .forEachOrdered(tdBuilder::setColumnFamily); + .forEachOrdered(tdBuilder::setColumnFamily); admin.createTable(tdBuilder.build(), keys); LOG.info("Table " + tableName + " is available!!"); } private void cleanup(Admin admin, Deque queue, ExecutorService pool, - SecureBulkLoadClient secureClient) throws IOException { + SecureBulkLoadClient secureClient) throws IOException { fsDelegationToken.releaseDelegationToken(); if (bulkToken != null && secureClient != null) { secureClient.cleanupBulkLoad(admin.getConnection(), bulkToken); @@ -977,16 +976,16 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * Checks whether there is any invalid family name in HFiles to be bulk loaded. */ private void validateFamiliesInHFiles(Table table, Deque queue, boolean silence) - throws IOException { + throws IOException { Set familyNames = Arrays.asList(table.getDescriptor().getColumnFamilies()).stream() - .map(f -> f.getNameAsString()).collect(Collectors.toSet()); + .map(f -> f.getNameAsString()).collect(Collectors.toSet()); List unmatchedFamilies = queue.stream().map(item -> Bytes.toString(item.getFamily())) - .filter(fn -> !familyNames.contains(fn)).distinct().collect(Collectors.toList()); + .filter(fn -> !familyNames.contains(fn)).distinct().collect(Collectors.toList()); if (unmatchedFamilies.size() > 0) { String msg = - "Unmatched family names found: unmatched family names in HFiles to be bulkloaded: " + - unmatchedFamilies + "; valid family names of table " + table.getName() + " are: " + - familyNames; + "Unmatched family names found: unmatched family names in HFiles to be bulkloaded: " + + unmatchedFamilies + "; valid family names of table " + table.getName() + " are: " + + familyNames; LOG.error(msg); if (!silence) { throw new IOException(msg); @@ -1005,7 +1004,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * Walk the given directory for all HFiles, and return a Queue containing all such files. */ private void discoverLoadQueue(final Deque ret, final Path hfofDir, - final boolean validateHFile) throws IOException { + final boolean validateHFile) throws IOException { visitBulkHFiles(hfofDir.getFileSystem(getConf()), hfofDir, new BulkHFileVisitor() { @Override public byte[] bulkFamily(final byte[] familyName) { @@ -1015,10 +1014,12 @@ public class LoadIncrementalHFiles extends Configured implements Tool { @Override public void bulkHFile(final byte[] family, final FileStatus hfile) throws IOException { long length = hfile.getLen(); - if (length > getConf().getLong(HConstants.HREGION_MAX_FILESIZE, - HConstants.DEFAULT_MAX_FILE_SIZE)) { - LOG.warn("Trying to bulk load hfile " + hfile.getPath() + " with size: " + length + - " bytes can be problematic as it may lead to oversplitting."); + if ( + length + > getConf().getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE) + ) { + LOG.warn("Trying to bulk load hfile " + hfile.getPath() + " with size: " + length + + " bytes can be problematic as it may lead to oversplitting."); } ret.add(new LoadQueueItem(family, hfile.getPath())); } @@ -1037,7 +1038,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * non-valid hfiles. */ private static void visitBulkHFiles(final FileSystem fs, final Path bulkDir, - final BulkHFileVisitor visitor) throws IOException { + final BulkHFileVisitor visitor) throws IOException { visitBulkHFiles(fs, bulkDir, visitor, true); } @@ -1047,7 +1048,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * 'hbase.loadincremental.validate.hfile' to false. */ private static void visitBulkHFiles(FileSystem fs, Path bulkDir, - BulkHFileVisitor visitor, boolean validateHFile) throws IOException { + BulkHFileVisitor visitor, boolean validateHFile) throws IOException { FileStatus[] familyDirStatuses = fs.listStatus(bulkDir); for (FileStatus familyStat : familyDirStatuses) { if (!familyStat.isDirectory()) { @@ -1108,8 +1109,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { // Initialize a thread pool private ExecutorService createExecutorService() { ThreadPoolExecutor pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS, - new LinkedBlockingQueue<>(), - new ThreadFactoryBuilder().setNameFormat("LoadIncrementalHFiles-%1$d").build()); + new LinkedBlockingQueue<>(), + new ThreadFactoryBuilder().setNameFormat("LoadIncrementalHFiles-%1$d").build()); pool.allowCoreThreadTimeOut(true); return pool; } @@ -1119,7 +1120,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { sb.append('['); list.forEach(p -> { sb.append('{').append(Bytes.toStringBinary(p.getFirst())).append(',').append(p.getSecond()) - .append('}'); + .append('}'); }); sb.append(']'); return sb.toString(); @@ -1136,7 +1137,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { */ @InterfaceAudience.Private static void splitStoreFile(Configuration conf, Path inFile, ColumnFamilyDescriptor familyDesc, - byte[] splitKey, Path bottomOut, Path topOut) throws IOException { + byte[] splitKey, Path bottomOut, Path topOut) throws IOException { // Open reader with no block cache, and not in-memory Reference topReference = Reference.createTopReference(splitKey); Reference bottomReference = Reference.createBottomReference(splitKey); @@ -1149,7 +1150,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * Copy half of an HFile into a new HFile. */ private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, - Reference reference, ColumnFamilyDescriptor familyDescriptor) throws IOException { + Reference reference, ColumnFamilyDescriptor familyDescriptor) throws IOException { FileSystem fs = inFile.getFileSystem(conf); CacheConfig cacheConf = CacheConfig.DISABLED; HalfStoreFileReader halfReader = null; @@ -1166,12 +1167,12 @@ public class LoadIncrementalHFiles extends Configured implements Tool { Algorithm compression = familyDescriptor.getCompressionType(); BloomType bloomFilterType = familyDescriptor.getBloomFilterType(); HFileContext hFileContext = new HFileContextBuilder().withCompression(compression) - .withChecksumType(StoreUtils.getChecksumType(conf)) - .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blocksize) - .withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true) - .build(); + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blocksize) + .withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true) + .build(); halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) - .withBloomType(bloomFilterType).withFileContext(hFileContext).build(); + .withBloomType(bloomFilterType).withFileContext(hFileContext).build(); HFileScanner scanner = halfReader.getScanner(false, false, false); scanner.seekTo(); do { @@ -1220,9 +1221,9 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } protected final Map run(Path hfofDir, TableName tableName) - throws IOException { + throws IOException { try (Connection connection = ConnectionFactory.createConnection(getConf()); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { if (!admin.tableExists(tableName)) { if (isCreateTable()) { createTable(tableName, hfofDir, admin); @@ -1233,39 +1234,39 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } } try (Table table = connection.getTable(tableName); - RegionLocator locator = connection.getRegionLocator(tableName)) { - return doBulkLoad(hfofDir, admin, table, locator, isSilence(), - isAlwaysCopyFiles()); + RegionLocator locator = connection.getRegionLocator(tableName)) { + return doBulkLoad(hfofDir, admin, table, locator, isSilence(), isAlwaysCopyFiles()); } } } + /** * Perform bulk load on the given table. - * @param hfofDir the directory that was provided as the output path of a job using - * HFileOutputFormat + * @param hfofDir the directory that was provided as the output path of a job using + * HFileOutputFormat * @param tableName the table to load into */ public Map run(String hfofDir, TableName tableName) - throws IOException { + throws IOException { return run(new Path(hfofDir), tableName); } /** * Perform bulk load on the given table. * @param family2Files map of family to List of hfiles - * @param tableName the table to load into + * @param tableName the table to load into */ public Map run(Map> family2Files, - TableName tableName) throws IOException { + TableName tableName) throws IOException { try (Connection connection = ConnectionFactory.createConnection(getConf()); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { if (!admin.tableExists(tableName)) { String errorMsg = format("Table '%s' does not exist.", tableName); LOG.error(errorMsg); throw new TableNotFoundException(errorMsg); } try (Table table = connection.getTable(tableName); - RegionLocator locator = connection.getRegionLocator(tableName)) { + RegionLocator locator = connection.getRegionLocator(tableName)) { return doBulkLoad(family2Files, admin, table, locator, isSilence(), isAlwaysCopyFiles()); } } @@ -1328,9 +1329,10 @@ public class LoadIncrementalHFiles extends Configured implements Tool { /** * Disables replication for these bulkloaded files. */ - public void disableReplication(){ + public void disableReplication() { this.replicate = false; } + /** * Infers region boundaries for a new table. *

      diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/PreUpgradeValidator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/PreUpgradeValidator.java index 818004c272e..dcfb3878c50 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/PreUpgradeValidator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/PreUpgradeValidator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase.tool; import java.util.Arrays; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -44,8 +42,7 @@ import org.slf4j.LoggerFactory; */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class PreUpgradeValidator implements Tool { - private static final Logger LOG = LoggerFactory - .getLogger(PreUpgradeValidator.class); + private static final Logger LOG = LoggerFactory.getLogger(PreUpgradeValidator.class); public static final String TOOL_NAME = "pre-upgrade"; public static final String VALIDATE_CP_NAME = "validate-cp"; @@ -68,11 +65,10 @@ public class PreUpgradeValidator implements Tool { System.out.println("usage: hbase " + TOOL_NAME + " command ..."); System.out.println("Available commands:"); System.out.printf(" %-15s Validate co-processors are compatible with HBase%n", - VALIDATE_CP_NAME); + VALIDATE_CP_NAME); System.out.printf(" %-15s Validate DataBlockEncodings are compatible with HBase%n", - VALIDATE_DBE_NAME); - System.out.printf(" %-15s Validate HFile contents are readable%n", - VALIDATE_HFILE); + VALIDATE_DBE_NAME); + System.out.printf(" %-15s Validate HFile contents are readable%n", VALIDATE_HFILE); System.out.println("For further information, please use command -h"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java index 9311200ac93..e3df46cc9cb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,29 +34,23 @@ import org.slf4j.LoggerFactory; /** *

      - * This coprocessor 'shallows' all the writes. It allows to test a pure - * write workload, going through all the communication layers. - * The reads will work as well, but they as we never write, they will always always - * return an empty structure. The WAL is also skipped. - * Obviously, the region will never be split automatically. It's up to the user - * to split and move it. + * This coprocessor 'shallows' all the writes. It allows to test a pure write workload, going + * through all the communication layers. The reads will work as well, but they as we never write, + * they will always always return an empty structure. The WAL is also skipped. Obviously, the region + * will never be split automatically. It's up to the user to split and move it. *

      *

      - * For a table created like this: - * create 'usertable', {NAME => 'f1', VERSIONS => 1} + * For a table created like this: create 'usertable', {NAME => 'f1', VERSIONS => 1} *

      *

      - * You can then add the coprocessor with this command: - * alter 'usertable', 'coprocessor' => '|org.apache.hadoop.hbase.tool.WriteSinkCoprocessor|' + * You can then add the coprocessor with this command: alter 'usertable', 'coprocessor' => + * '|org.apache.hadoop.hbase.tool.WriteSinkCoprocessor|' *

      *

      - * And then - * put 'usertable', 'f1', 'f1', 'f1' + * And then put 'usertable', 'f1', 'f1', 'f1' *

      *

      - * scan 'usertable' - * Will return: - * 0 row(s) in 0.0050 seconds + * scan 'usertable' Will return: 0 row(s) in 0.0050 seconds *

      * TODO: It needs tests */ @@ -80,15 +73,14 @@ public class WriteSinkCoprocessor implements RegionCoprocessor, RegionObserver { @Override public void preBatchMutate(final ObserverContext c, - final MiniBatchOperationInProgress miniBatchOp) - throws IOException { + final MiniBatchOperationInProgress miniBatchOp) throws IOException { if (ops.incrementAndGet() % 20000 == 0) { LOG.info("Wrote " + ops.get() + " times in region " + regionName); } for (int i = 0; i < miniBatchOp.size(); i++) { miniBatchOp.setOperationStatus(i, - new OperationStatus(HConstants.OperationStatusCode.SUCCESS)); + new OperationStatus(HConstants.OperationStatusCode.SUCCESS)); } c.bypass(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/Branch1CoprocessorMethods.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/Branch1CoprocessorMethods.java index 0f5d829de6b..92f419e543a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/Branch1CoprocessorMethods.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/Branch1CoprocessorMethods.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import org.apache.yetus.audience.InterfaceAudience; @@ -32,1106 +31,721 @@ public class Branch1CoprocessorMethods extends CoprocessorMethods { private void addMethods() { /* BulkLoadObserver */ - addMethod("prePrepareBulkLoad", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest"); + addMethod("prePrepareBulkLoad", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest"); - addMethod("preCleanupBulkLoad", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest"); + addMethod("preCleanupBulkLoad", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest"); /* EndpointObserver */ - addMethod("postEndpointInvocation", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "com.google.protobuf.Service", - "java.lang.String", - "com.google.protobuf.Message", - "com.google.protobuf.Message.Builder"); + addMethod("postEndpointInvocation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "com.google.protobuf.Service", "java.lang.String", "com.google.protobuf.Message", + "com.google.protobuf.Message.Builder"); - addMethod("preEndpointInvocation", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "com.google.protobuf.Service", - "java.lang.String", - "com.google.protobuf.Message"); + addMethod("preEndpointInvocation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "com.google.protobuf.Service", "java.lang.String", "com.google.protobuf.Message"); /* MasterObserver */ - addMethod("preCreateTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HTableDescriptor", - "org.apache.hadoop.hbase.HRegionInfo[]"); - - addMethod("postCreateTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HTableDescriptor", - "org.apache.hadoop.hbase.HRegionInfo[]"); - - addMethod("preDeleteTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postDeleteTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preDeleteTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preMove", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.ServerName", - "org.apache.hadoop.hbase.ServerName"); - - addMethod("preCreateTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HTableDescriptor", - "org.apache.hadoop.hbase.HRegionInfo[]"); - - addMethod("postCreateTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HTableDescriptor", - "org.apache.hadoop.hbase.HRegionInfo[]"); - - addMethod("postMove", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.ServerName", - "org.apache.hadoop.hbase.ServerName"); - - addMethod("postDeleteTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preTruncateTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postTruncateTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preTruncateTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postTruncateTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preModifyTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("postModifyTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("preModifyTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("postModifyTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("preAddColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("postAddColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("preAddColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("postAddColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("preModifyColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("postModifyColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("preModifyColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("postModifyColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("preDeleteColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "byte[]"); - - addMethod("postDeleteColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "byte[]"); - - addMethod("preDeleteColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "byte[]"); - - addMethod("postDeleteColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "byte[]"); - - addMethod("preEnableTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postEnableTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preEnableTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postEnableTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preDisableTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postDisableTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preDisableTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postDisableTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preAbortProcedure", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.procedure2.ProcedureExecutor", - "long"); - - addMethod("postAbortProcedure", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preListProcedures", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postListProcedures", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); - - addMethod("preAssign", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("postAssign", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("preUnassign", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "boolean"); - - addMethod("postUnassign", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "boolean"); - - addMethod("preRegionOffline", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("postRegionOffline", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("preBalance", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postBalance", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); - - addMethod("preSetSplitOrMergeEnabled", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean", - "org.apache.hadoop.hbase.client.Admin.MasterSwitchType"); - - addMethod("postSetSplitOrMergeEnabled", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean", - "org.apache.hadoop.hbase.client.Admin.MasterSwitchType"); - - addMethod("preBalanceSwitch", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean"); - - addMethod("postBalanceSwitch", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean", - "boolean"); - - addMethod("preShutdown", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preStopMaster", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postStartMaster", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preMasterInitialization", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("postSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("preListSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); - - addMethod("postListSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); - - addMethod("preCloneSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("postCloneSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("preRestoreSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("postRestoreSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("preDeleteSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); - - addMethod("postDeleteSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); - - addMethod("preGetTableDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.util.List"); - - addMethod("preGetTableDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.util.List", - "java.lang.String"); - - addMethod("postGetTableDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.util.List", - "java.lang.String"); - - addMethod("postGetTableDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); - - addMethod("preGetTableNames", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.lang.String"); - - addMethod("postGetTableNames", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.lang.String"); - - addMethod("preCreateNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.NamespaceDescriptor"); - - addMethod("postCreateNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.NamespaceDescriptor"); - - addMethod("preDeleteNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("postDeleteNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("preModifyNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.NamespaceDescriptor"); - - addMethod("postModifyNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.NamespaceDescriptor"); - - addMethod("preGetNamespaceDescriptor", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("postGetNamespaceDescriptor", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.NamespaceDescriptor"); - - addMethod("preListNamespaceDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); - - addMethod("postListNamespaceDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); - - addMethod("preTableFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postTableFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("preSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("preSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("postSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("postSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("postSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("preSetTableQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("postSetTableQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("preSetNamespaceQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("postSetNamespaceQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("preDispatchMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("postDispatchMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("preGetClusterStatus", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postGetClusterStatus", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.ClusterStatus"); - - addMethod("preClearDeadServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postClearDeadServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.util.List"); - - addMethod("preMoveServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.lang.String"); - - addMethod("postMoveServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.lang.String"); - - addMethod("preMoveTables", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.lang.String"); - - addMethod("postMoveTables", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.lang.String"); - - addMethod("preMoveServersAndTables", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.util.Set", - "java.lang.String"); - - addMethod("postMoveServersAndTables", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.util.Set", - "java.lang.String"); - - addMethod("preAddRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("postAddRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("preRemoveRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("postRemoveRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("preRemoveServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set"); - - addMethod("postRemoveServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set"); - - addMethod("preBalanceRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("postBalanceRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "boolean"); + addMethod("preCreateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]"); + + addMethod("postCreateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]"); + + addMethod("preDeleteTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postDeleteTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preDeleteTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preMove", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.ServerName", + "org.apache.hadoop.hbase.ServerName"); + + addMethod("preCreateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]"); + + addMethod("postCreateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]"); + + addMethod("postMove", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.ServerName", + "org.apache.hadoop.hbase.ServerName"); + + addMethod("postDeleteTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preTruncateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postTruncateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preTruncateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postTruncateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preModifyTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("postModifyTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("preModifyTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("postModifyTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("preAddColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("postAddColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("preAddColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("postAddColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("preModifyColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("postModifyColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("preModifyColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("postModifyColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("preDeleteColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "byte[]"); + + addMethod("postDeleteColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "byte[]"); + + addMethod("preDeleteColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "byte[]"); + + addMethod("postDeleteColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "byte[]"); + + addMethod("preEnableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postEnableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preEnableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postEnableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preDisableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postDisableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preDisableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postDisableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preAbortProcedure", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.procedure2.ProcedureExecutor", "long"); + + addMethod("postAbortProcedure", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preListProcedures", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postListProcedures", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); + + addMethod("preAssign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("postAssign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("preUnassign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "boolean"); + + addMethod("postUnassign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "boolean"); + + addMethod("preRegionOffline", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("postRegionOffline", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("preBalance", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postBalance", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); + + addMethod("preSetSplitOrMergeEnabled", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "boolean", "org.apache.hadoop.hbase.client.Admin.MasterSwitchType"); + + addMethod("postSetSplitOrMergeEnabled", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "boolean", "org.apache.hadoop.hbase.client.Admin.MasterSwitchType"); + + addMethod("preBalanceSwitch", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean"); + + addMethod("postBalanceSwitch", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean", + "boolean"); + + addMethod("preShutdown", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preStopMaster", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postStartMaster", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preMasterInitialization", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("postSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("preListSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); + + addMethod("postListSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); + + addMethod("preCloneSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("postCloneSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("preRestoreSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("postRestoreSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("preDeleteSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); + + addMethod("postDeleteSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); + + addMethod("preGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.util.List"); + + addMethod("preGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.util.List", "java.lang.String"); + + addMethod("postGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.util.List", "java.lang.String"); + + addMethod("postGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); + + addMethod("preGetTableNames", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.lang.String"); + + addMethod("postGetTableNames", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.lang.String"); + + addMethod("preCreateNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.NamespaceDescriptor"); + + addMethod("postCreateNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.NamespaceDescriptor"); + + addMethod("preDeleteNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("postDeleteNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("preModifyNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.NamespaceDescriptor"); + + addMethod("postModifyNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.NamespaceDescriptor"); + + addMethod("preGetNamespaceDescriptor", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("postGetNamespaceDescriptor", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.NamespaceDescriptor"); + + addMethod("preListNamespaceDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); + + addMethod("postListNamespaceDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); + + addMethod("preTableFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postTableFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "java.lang.String", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("preSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.TableName", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("preSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("postSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "java.lang.String", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("postSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.TableName", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("postSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("preSetTableQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("postSetTableQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("preSetNamespaceQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("postSetNamespaceQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("preDispatchMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("postDispatchMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("preGetClusterStatus", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postGetClusterStatus", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.ClusterStatus"); + + addMethod("preClearDeadServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postClearDeadServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.util.List"); + + addMethod("preMoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.lang.String"); + + addMethod("postMoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.lang.String"); + + addMethod("preMoveTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.lang.String"); + + addMethod("postMoveTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.lang.String"); + + addMethod("preMoveServersAndTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.util.Set", "java.lang.String"); + + addMethod("postMoveServersAndTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.util.Set", "java.lang.String"); + + addMethod("preAddRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("postAddRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("preRemoveRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("postRemoveRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("preRemoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set"); + + addMethod("postRemoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set"); + + addMethod("preBalanceRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("postBalanceRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "boolean"); /* RegionObserver */ - addMethod("preOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("preOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - addMethod("postOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("postOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - addMethod("postLogReplay", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("postLogReplay", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - addMethod("preFlushScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.KeyValueScanner", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "long"); + addMethod("preFlushScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.KeyValueScanner", + "org.apache.hadoop.hbase.regionserver.InternalScanner", "long"); - addMethod("preFlushScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.KeyValueScanner", - "org.apache.hadoop.hbase.regionserver.InternalScanner"); + addMethod("preFlushScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.KeyValueScanner", + "org.apache.hadoop.hbase.regionserver.InternalScanner"); - addMethod("preFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.InternalScanner"); + addMethod("preFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.InternalScanner"); - addMethod("preFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("preFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - addMethod("postFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.StoreFile"); + addMethod("postFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.StoreFile"); - addMethod("postFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("postFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - addMethod("preCompactSelection", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "java.util.List"); + addMethod("preCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "java.util.List"); - addMethod("preCompactSelection", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "java.util.List", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); + addMethod("preCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); - addMethod("postCompactSelection", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "com.google.common.collect.ImmutableList"); + addMethod("postCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "com.google.common.collect.ImmutableList"); - addMethod("postCompactSelection", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "com.google.common.collect.ImmutableList", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); + addMethod("postCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "com.google.common.collect.ImmutableList", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); - addMethod("preCompact", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "org.apache.hadoop.hbase.regionserver.ScanType"); + addMethod("preCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.InternalScanner", + "org.apache.hadoop.hbase.regionserver.ScanType"); - addMethod("preCompact", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "org.apache.hadoop.hbase.regionserver.ScanType", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); + addMethod("preCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.InternalScanner", + "org.apache.hadoop.hbase.regionserver.ScanType", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); - addMethod("preClose", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean"); + addMethod("preClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean"); - addMethod("preCompactScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "java.util.List", - "org.apache.hadoop.hbase.regionserver.ScanType", - "long", - "org.apache.hadoop.hbase.regionserver.InternalScanner"); + addMethod("preCompactScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", + "org.apache.hadoop.hbase.regionserver.ScanType", "long", + "org.apache.hadoop.hbase.regionserver.InternalScanner"); - addMethod("preCompactScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "java.util.List", - "org.apache.hadoop.hbase.regionserver.ScanType", - "long", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest", - "long"); + addMethod("preCompactScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", + "org.apache.hadoop.hbase.regionserver.ScanType", "long", + "org.apache.hadoop.hbase.regionserver.InternalScanner", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest", "long"); - addMethod("preCompactScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "java.util.List", - "org.apache.hadoop.hbase.regionserver.ScanType", - "long", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); + addMethod("preCompactScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", + "org.apache.hadoop.hbase.regionserver.ScanType", "long", + "org.apache.hadoop.hbase.regionserver.InternalScanner", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); - addMethod("postCompact", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.StoreFile"); + addMethod("postCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.StoreFile"); - addMethod("postCompact", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.StoreFile", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); + addMethod("postCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.StoreFile", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); - addMethod("preSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]"); + addMethod("preSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]"); - addMethod("preSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("preSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - addMethod("postSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); + addMethod("postSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); - addMethod("preSplitBeforePONR", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "java.util.List"); + addMethod("preSplitBeforePONR", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "java.util.List"); - addMethod("preSplitAfterPONR", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("preSplitAfterPONR", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - addMethod("preRollBackSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("preRollBackSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - addMethod("postRollBackSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("postRollBackSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - addMethod("postCompleteSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("postCompleteSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - addMethod("postClose", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean"); + addMethod("postClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean"); - addMethod("preGetClosestRowBefore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.client.Result"); + addMethod("preGetClosestRowBefore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "byte[]", "org.apache.hadoop.hbase.client.Result"); - addMethod("postGetClosestRowBefore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.client.Result"); + addMethod("postGetClosestRowBefore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "byte[]", "org.apache.hadoop.hbase.client.Result"); - addMethod("preGetOp", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Get", - "java.util.List"); + addMethod("preGetOp", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Get", "java.util.List"); - addMethod("postGetOp", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Get", - "java.util.List"); + addMethod("postGetOp", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Get", "java.util.List"); - addMethod("preExists", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Get", - "boolean"); + addMethod("preExists", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Get", "boolean"); - addMethod("postExists", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Get", - "boolean"); + addMethod("postExists", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Get", "boolean"); - addMethod("prePut", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Put", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit", - "org.apache.hadoop.hbase.client.Durability"); + addMethod("prePut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Put", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", + "org.apache.hadoop.hbase.client.Durability"); - addMethod("postPut", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Put", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit", - "org.apache.hadoop.hbase.client.Durability"); + addMethod("postPut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Put", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", + "org.apache.hadoop.hbase.client.Durability"); - addMethod("preDelete", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Delete", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit", - "org.apache.hadoop.hbase.client.Durability"); + addMethod("preDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Delete", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", + "org.apache.hadoop.hbase.client.Durability"); addMethod("prePrepareTimeStampForDeleteVersion", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Mutation", - "org.apache.hadoop.hbase.Cell", - "byte[]", - "org.apache.hadoop.hbase.client.Get"); + "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Mutation", "org.apache.hadoop.hbase.Cell", "byte[]", + "org.apache.hadoop.hbase.client.Get"); - addMethod("postDelete", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Delete", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit", - "org.apache.hadoop.hbase.client.Durability"); + addMethod("postDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Delete", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", + "org.apache.hadoop.hbase.client.Durability"); - addMethod("preBatchMutate", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress"); + addMethod("preBatchMutate", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress"); - addMethod("postBatchMutate", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress"); + addMethod("postBatchMutate", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress"); - addMethod("postStartRegionOperation", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region.Operation"); + addMethod("postStartRegionOperation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region.Operation"); - addMethod("postCloseRegionOperation", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region.Operation"); + addMethod("postCloseRegionOperation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region.Operation"); - addMethod("postBatchMutateIndispensably", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress", - "boolean"); + addMethod("postBatchMutateIndispensably", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress", "boolean"); - addMethod("preCheckAndPut", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Put", - "boolean"); + addMethod("preCheckAndPut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Put", + "boolean"); - addMethod("preCheckAndPutAfterRowLock", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Put", - "boolean"); + addMethod("preCheckAndPutAfterRowLock", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Put", + "boolean"); - addMethod("postCheckAndPut", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Put", - "boolean"); + addMethod("postCheckAndPut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Put", + "boolean"); - addMethod("preCheckAndDelete", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Delete", - "boolean"); + addMethod("preCheckAndDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Delete", + "boolean"); addMethod("preCheckAndDeleteAfterRowLock", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Delete", - "boolean"); + "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", + "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Delete", + "boolean"); - addMethod("postCheckAndDelete", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Delete", - "boolean"); + addMethod("postCheckAndDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Delete", + "boolean"); - addMethod("preIncrementColumnValue", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "long", - "boolean"); + addMethod("preIncrementColumnValue", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "byte[]", "byte[]", "long", "boolean"); - addMethod("postIncrementColumnValue", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "long", - "boolean", - "long"); + addMethod("postIncrementColumnValue", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "byte[]", "byte[]", "long", "boolean", "long"); - addMethod("preAppend", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Append"); + addMethod("preAppend", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Append"); - addMethod("preAppendAfterRowLock", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Append"); + addMethod("preAppendAfterRowLock", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Append"); - addMethod("postAppend", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Append", - "org.apache.hadoop.hbase.client.Result"); + addMethod("postAppend", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Append", "org.apache.hadoop.hbase.client.Result"); - addMethod("preIncrement", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Increment"); + addMethod("preIncrement", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Increment"); - addMethod("preIncrementAfterRowLock", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Increment"); + addMethod("preIncrementAfterRowLock", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Increment"); - addMethod("postIncrement", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Increment", - "org.apache.hadoop.hbase.client.Result"); + addMethod("postIncrement", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Increment", "org.apache.hadoop.hbase.client.Result"); - addMethod("preScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Scan", - "org.apache.hadoop.hbase.regionserver.RegionScanner"); + addMethod("preScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Scan", "org.apache.hadoop.hbase.regionserver.RegionScanner"); - addMethod("preStoreScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.client.Scan", - "java.util.NavigableSet", - "org.apache.hadoop.hbase.regionserver.KeyValueScanner"); + addMethod("preStoreScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.client.Scan", + "java.util.NavigableSet", "org.apache.hadoop.hbase.regionserver.KeyValueScanner"); - addMethod("postScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Scan", - "org.apache.hadoop.hbase.regionserver.RegionScanner"); + addMethod("postScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Scan", "org.apache.hadoop.hbase.regionserver.RegionScanner"); - addMethod("preScannerNext", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "java.util.List", - "int", - "boolean"); + addMethod("preScannerNext", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.InternalScanner", "java.util.List", "int", "boolean"); - addMethod("postScannerNext", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "java.util.List", - "int", - "boolean"); + addMethod("postScannerNext", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.InternalScanner", "java.util.List", "int", "boolean"); - addMethod("postScannerFilterRow", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "byte[]", - "int", - "short", - "boolean"); + addMethod("postScannerFilterRow", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.InternalScanner", "byte[]", "int", "short", "boolean"); - addMethod("preScannerClose", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.InternalScanner"); + addMethod("preScannerClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.InternalScanner"); - addMethod("postScannerClose", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.InternalScanner"); + addMethod("postScannerClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.InternalScanner"); - addMethod("preWALRestore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.regionserver.wal.HLogKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + addMethod("preWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - addMethod("preWALRestore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.wal.WALKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + addMethod("preWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - addMethod("postWALRestore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.regionserver.wal.HLogKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + addMethod("postWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - addMethod("postWALRestore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.wal.WALKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + addMethod("postWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - addMethod("preBulkLoadHFile", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); + addMethod("preBulkLoadHFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); - addMethod("preCommitStoreFile", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "java.util.List"); + addMethod("preCommitStoreFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "java.util.List"); - addMethod("postCommitStoreFile", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "org.apache.hadoop.fs.Path", - "org.apache.hadoop.fs.Path"); + addMethod("postCommitStoreFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "org.apache.hadoop.fs.Path", "org.apache.hadoop.fs.Path"); - addMethod("postBulkLoadHFile", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "boolean"); + addMethod("postBulkLoadHFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "boolean"); - addMethod("preStoreFileReaderOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.fs.FileSystem", - "org.apache.hadoop.fs.Path", - "org.apache.hadoop.hbase.io.FSDataInputStreamWrapper", - "long", - "org.apache.hadoop.hbase.io.hfile.CacheConfig", - "org.apache.hadoop.hbase.io.Reference", - "org.apache.hadoop.hbase.regionserver.StoreFile.Reader"); + addMethod("preStoreFileReaderOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.fs.FileSystem", "org.apache.hadoop.fs.Path", + "org.apache.hadoop.hbase.io.FSDataInputStreamWrapper", "long", + "org.apache.hadoop.hbase.io.hfile.CacheConfig", "org.apache.hadoop.hbase.io.Reference", + "org.apache.hadoop.hbase.regionserver.StoreFile.Reader"); - addMethod("postStoreFileReaderOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.fs.FileSystem", - "org.apache.hadoop.fs.Path", - "org.apache.hadoop.hbase.io.FSDataInputStreamWrapper", - "long", - "org.apache.hadoop.hbase.io.hfile.CacheConfig", - "org.apache.hadoop.hbase.io.Reference", - "org.apache.hadoop.hbase.regionserver.StoreFile.Reader"); + addMethod("postStoreFileReaderOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.fs.FileSystem", "org.apache.hadoop.fs.Path", + "org.apache.hadoop.hbase.io.FSDataInputStreamWrapper", "long", + "org.apache.hadoop.hbase.io.hfile.CacheConfig", "org.apache.hadoop.hbase.io.Reference", + "org.apache.hadoop.hbase.regionserver.StoreFile.Reader"); - addMethod("postMutationBeforeWAL", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType", - "org.apache.hadoop.hbase.client.Mutation", - "org.apache.hadoop.hbase.Cell", - "org.apache.hadoop.hbase.Cell"); + addMethod("postMutationBeforeWAL", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType", + "org.apache.hadoop.hbase.client.Mutation", "org.apache.hadoop.hbase.Cell", + "org.apache.hadoop.hbase.Cell"); - addMethod("postInstantiateDeleteTracker", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.DeleteTracker"); + addMethod("postInstantiateDeleteTracker", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.DeleteTracker"); /* RegionServerObserver */ - addMethod("preMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); + addMethod("preMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); - addMethod("preStopRegionServer", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("preStopRegionServer", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - addMethod("postMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); + addMethod("postMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region", + "org.apache.hadoop.hbase.regionserver.Region"); - addMethod("preMergeCommit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region", - "java.util.List"); + addMethod("preMergeCommit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region", + "java.util.List"); - addMethod("postMergeCommit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); + addMethod("postMergeCommit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region", + "org.apache.hadoop.hbase.regionserver.Region"); - addMethod("preRollBackMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); + addMethod("preRollBackMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); - addMethod("postRollBackMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); + addMethod("postRollBackMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); - addMethod("preRollWALWriterRequest", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("preRollWALWriterRequest", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - addMethod("postRollWALWriterRequest", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("postRollWALWriterRequest", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postCreateReplicationEndPoint", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.replication.ReplicationEndpoint"); + "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.replication.ReplicationEndpoint"); - addMethod("preReplicateLogEntries", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "org.apache.hadoop.hbase.CellScanner"); + addMethod("preReplicateLogEntries", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "org.apache.hadoop.hbase.CellScanner"); - addMethod("postReplicateLogEntries", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "org.apache.hadoop.hbase.CellScanner"); + addMethod("postReplicateLogEntries", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "org.apache.hadoop.hbase.CellScanner"); /* WALObserver */ - addMethod("preWALWrite", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.wal.WALKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + addMethod("preWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - addMethod("preWALWrite", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.regionserver.wal.HLogKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + addMethod("preWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - addMethod("postWALWrite", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.regionserver.wal.HLogKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + addMethod("postWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - addMethod("postWALWrite", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.wal.WALKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + addMethod("postWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - addMethod("preWALRoll", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.fs.Path", - "org.apache.hadoop.fs.Path"); + addMethod("preWALRoll", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.fs.Path", "org.apache.hadoop.fs.Path"); - addMethod("postWALRoll", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.fs.Path", - "org.apache.hadoop.fs.Path"); + addMethod("postWALRoll", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.fs.Path", "org.apache.hadoop.fs.Path"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethod.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethod.java index 60e38417135..36b98f5bcfb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethod.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethod.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import java.util.ArrayList; import java.util.List; import java.util.Objects; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -36,7 +33,7 @@ public class CoprocessorMethod { parameters = new ArrayList<>(); } - public CoprocessorMethod withParameters(String ... parameters) { + public CoprocessorMethod withParameters(String... parameters) { for (String parameter : parameters) { this.parameters.add(parameter); } @@ -44,7 +41,7 @@ public class CoprocessorMethod { return this; } - public CoprocessorMethod withParameters(Class ... parameters) { + public CoprocessorMethod withParameters(Class... parameters) { for (Class parameter : parameters) { this.parameters.add(parameter.getCanonicalName()); } @@ -60,10 +57,9 @@ public class CoprocessorMethod { return false; } - CoprocessorMethod other = (CoprocessorMethod)obj; + CoprocessorMethod other = (CoprocessorMethod) obj; - return Objects.equals(name, other.name) && - Objects.equals(parameters, other.parameters); + return Objects.equals(name, other.name) && Objects.equals(parameters, other.parameters); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethods.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethods.java index 2e0c801b8aa..3766d901b03 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethods.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethods.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import java.lang.reflect.Method; import java.util.HashSet; import java.util.Set; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -32,35 +30,35 @@ public class CoprocessorMethods { methods = new HashSet<>(); } - public void addMethod(String name, String ... parameters) { + public void addMethod(String name, String... parameters) { CoprocessorMethod cpMethod = new CoprocessorMethod(name).withParameters(parameters); methods.add(cpMethod); } - public void addMethod(String name, Class ... parameters) { + public void addMethod(String name, Class... parameters) { CoprocessorMethod cpMethod = new CoprocessorMethod(name).withParameters(parameters); methods.add(cpMethod); } public void addMethod(Method method) { - CoprocessorMethod cpMethod = new CoprocessorMethod(method.getName()) - .withParameters(method.getParameterTypes()); + CoprocessorMethod cpMethod = + new CoprocessorMethod(method.getName()).withParameters(method.getParameterTypes()); methods.add(cpMethod); } - public boolean hasMethod(String name, String ... parameters) { + public boolean hasMethod(String name, String... parameters) { CoprocessorMethod method = new CoprocessorMethod(name).withParameters(parameters); return methods.contains(method); } - public boolean hasMethod(String name, Class ... parameters) { + public boolean hasMethod(String name, Class... parameters) { CoprocessorMethod method = new CoprocessorMethod(name).withParameters(parameters); return methods.contains(method); } public boolean hasMethod(Method method) { - CoprocessorMethod cpMethod = new CoprocessorMethod(method.getName()) - .withParameters(method.getParameterTypes()); + CoprocessorMethod cpMethod = + new CoprocessorMethod(method.getName()).withParameters(method.getParameterTypes()); return methods.contains(cpMethod); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidator.java index 766224e5d38..45cbe8eab73 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import java.io.IOException; @@ -56,8 +54,7 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class CoprocessorValidator extends AbstractHBaseTool { - private static final Logger LOG = LoggerFactory - .getLogger(CoprocessorValidator.class); + private static final Logger LOG = LoggerFactory.getLogger(CoprocessorValidator.class); private CoprocessorMethods branch1; private CoprocessorMethods current; @@ -79,11 +76,10 @@ public class CoprocessorValidator extends AbstractHBaseTool { } /** - * This classloader implementation calls {@link #resolveClass(Class)} - * method for every loaded class. It means that some extra validation will - * take place - * according to JLS. + * This classloader implementation calls {@link #resolveClass(Class)} method for every loaded + * class. It means that some extra validation will take place + * according to + * JLS. */ private static final class ResolverUrlClassLoader extends URLClassLoader { private ResolverUrlClassLoader(URL[] urls, ClassLoader parent) { @@ -110,7 +106,7 @@ public class CoprocessorValidator extends AbstractHBaseTool { } private ResolverUrlClassLoader createClassLoader(ClassLoader parent, - org.apache.hadoop.fs.Path path) throws IOException { + org.apache.hadoop.fs.Path path) throws IOException { Path tempPath = Files.createTempFile("hbase-coprocessor-", ".jar"); org.apache.hadoop.fs.Path destination = new org.apache.hadoop.fs.Path(tempPath.toString()); @@ -125,7 +121,7 @@ public class CoprocessorValidator extends AbstractHBaseTool { } private void validate(ClassLoader classLoader, String className, - List violations) { + List violations) { LOG.debug("Validating class '{}'.", className); try { @@ -135,45 +131,45 @@ public class CoprocessorValidator extends AbstractHBaseTool { LOG.trace("Validating method '{}'.", method); if (branch1.hasMethod(method) && !current.hasMethod(method)) { - CoprocessorViolation violation = new CoprocessorViolation( - className, Severity.WARNING, "method '" + method + - "' was removed from new coprocessor API, so it won't be called by HBase"); + CoprocessorViolation violation = + new CoprocessorViolation(className, Severity.WARNING, "method '" + method + + "' was removed from new coprocessor API, so it won't be called by HBase"); violations.add(violation); } } } catch (ClassNotFoundException e) { - CoprocessorViolation violation = new CoprocessorViolation( - className, Severity.ERROR, "no such class", e); + CoprocessorViolation violation = + new CoprocessorViolation(className, Severity.ERROR, "no such class", e); violations.add(violation); } catch (RuntimeException | Error e) { - CoprocessorViolation violation = new CoprocessorViolation( - className, Severity.ERROR, "could not validate class", e); + CoprocessorViolation violation = + new CoprocessorViolation(className, Severity.ERROR, "could not validate class", e); violations.add(violation); } } public void validateClasses(ClassLoader classLoader, List classNames, - List violations) { + List violations) { for (String className : classNames) { validate(classLoader, className, violations); } } public void validateClasses(ClassLoader classLoader, String[] classNames, - List violations) { + List violations) { validateClasses(classLoader, Arrays.asList(classNames), violations); } @InterfaceAudience.Private - protected void validateTables(ClassLoader classLoader, Admin admin, - Pattern pattern, List violations) throws IOException { + protected void validateTables(ClassLoader classLoader, Admin admin, Pattern pattern, + List violations) throws IOException { List tableDescriptors = admin.listTableDescriptors(pattern); for (TableDescriptor tableDescriptor : tableDescriptors) { LOG.debug("Validating table {}", tableDescriptor.getTableName()); Collection coprocessorDescriptors = - tableDescriptor.getCoprocessorDescriptors(); + tableDescriptor.getCoprocessorDescriptors(); for (CoprocessorDescriptor coprocessorDescriptor : coprocessorDescriptors) { String className = coprocessorDescriptor.getClassName(); @@ -184,9 +180,8 @@ public class CoprocessorValidator extends AbstractHBaseTool { try (ResolverUrlClassLoader cpClassLoader = createClassLoader(classLoader, path)) { validate(cpClassLoader, className, violations); } catch (IOException e) { - CoprocessorViolation violation = new CoprocessorViolation( - className, Severity.ERROR, - "could not validate jar file '" + path + "'", e); + CoprocessorViolation violation = new CoprocessorViolation(className, Severity.ERROR, + "could not validate jar file '" + path + "'", e); violations.add(violation); } } else { @@ -197,18 +192,17 @@ public class CoprocessorValidator extends AbstractHBaseTool { } private void validateTables(ClassLoader classLoader, Pattern pattern, - List violations) throws IOException { + List violations) throws IOException { try (Connection connection = ConnectionFactory.createConnection(getConf()); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { validateTables(classLoader, admin, pattern, violations); } } @Override protected void printUsage() { - String header = "hbase " + PreUpgradeValidator.TOOL_NAME + " " + - PreUpgradeValidator.VALIDATE_CP_NAME + - " [-jar ...] [-class ... | -table ... | -config]"; + String header = "hbase " + PreUpgradeValidator.TOOL_NAME + " " + + PreUpgradeValidator.VALIDATE_CP_NAME + " [-jar ...] [-class ... | -table ... | -config]"; printUsage(header, "Options:", ""); } @@ -249,9 +243,8 @@ public class CoprocessorValidator extends AbstractHBaseTool { Path jarPath = Paths.get(jar); if (Files.isDirectory(jarPath)) { try (Stream stream = Files.list(jarPath)) { - List files = stream - .filter((path) -> Files.isRegularFile(path)) - .collect(Collectors.toList()); + List files = + stream.filter((path) -> Files.isRegularFile(path)).collect(Collectors.toList()); for (Path file : files) { URL url = file.toUri().toURL(); @@ -291,13 +284,13 @@ public class CoprocessorValidator extends AbstractHBaseTool { if (config) { String[] masterCoprocessors = - getConf().getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY); + getConf().getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY); if (masterCoprocessors != null) { validateClasses(classLoader, masterCoprocessors, violations); } String[] regionCoprocessors = - getConf().getStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY); + getConf().getStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY); if (regionCoprocessors != null) { validateClasses(classLoader, regionCoprocessors, violations); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorViolation.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorViolation.java index d00398ecc27..a57fed964c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorViolation.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorViolation.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import org.apache.yetus.audience.InterfaceAudience; @@ -26,7 +24,8 @@ import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; @InterfaceAudience.Private public class CoprocessorViolation { public enum Severity { - WARNING, ERROR + WARNING, + ERROR } private final String className; @@ -38,8 +37,7 @@ public class CoprocessorViolation { this(className, severity, message, null); } - public CoprocessorViolation(String className, Severity severity, String message, - Throwable t) { + public CoprocessorViolation(String className, Severity severity, String message, Throwable t) { this.className = className; this.severity = severity; this.message = message; @@ -64,11 +62,7 @@ public class CoprocessorViolation { @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("className", className) - .add("severity", severity) - .add("message", message) - .add("throwable", throwable) - .toString(); + return MoreObjects.toStringHelper(this).add("className", className).add("severity", severity) + .add("message", message).add("throwable", throwable).toString(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CurrentCoprocessorMethods.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CurrentCoprocessorMethods.java index 265cf5158ee..8dca18aa7ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CurrentCoprocessorMethods.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CurrentCoprocessorMethods.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import java.lang.reflect.Method; - import org.apache.hadoop.hbase.coprocessor.BulkLoadObserver; import org.apache.hadoop.hbase.coprocessor.EndpointObserver; import org.apache.hadoop.hbase.coprocessor.MasterObserver; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractFileStatusFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractFileStatusFilter.java index 6825e426c7d..e43aef667ff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractFileStatusFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractFileStatusFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,33 +17,28 @@ */ package org.apache.hadoop.hbase.util; +import edu.umd.cs.findbugs.annotations.CheckForNull; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; - -import edu.umd.cs.findbugs.annotations.CheckForNull; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; /** - * Typical base class for file status filter. Works more efficiently when - * filtering file statuses, otherwise implementation will need to lookup filestatus - * for the path which will be expensive. + * Typical base class for file status filter. Works more efficiently when filtering file statuses, + * otherwise implementation will need to lookup filestatus for the path which will be expensive. */ @InterfaceAudience.Private @InterfaceStability.Evolving public abstract class AbstractFileStatusFilter implements PathFilter, FileStatusFilter { /** - * Filters out a path. Can be given an optional directory hint to avoid - * filestatus lookup. - * - * @param p A filesystem path - * @param isDir An optional boolean indicating whether the path is a directory or not - * @return true if the path is accepted, false if the path is filtered out + * Filters out a path. Can be given an optional directory hint to avoid filestatus lookup. + * @param p A filesystem path + * @param isDir An optional boolean indicating whether the path is a directory or not + * @return true if the path is accepted, false if the path is filtered out */ protected abstract boolean accept(Path p, @CheckForNull Boolean isDir); @@ -61,7 +56,8 @@ public abstract class AbstractFileStatusFilter implements PathFilter, FileStatus return !isDirectory(fs, isDir, p); } - protected boolean isDirectory(FileSystem fs, @CheckForNull Boolean isDir, Path p) throws IOException { + protected boolean isDirectory(FileSystem fs, @CheckForNull Boolean isDir, Path p) + throws IOException { return isDir != null ? isDir : fs.isDirectory(p); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java index 31394e8a97b..f0453b01112 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,15 +18,13 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.yetus.audience.InterfaceAudience; /** - * The bloom context that is used by the StorefileWriter to add the bloom details - * per cell + * The bloom context that is used by the StorefileWriter to add the bloom details per cell */ @InterfaceAudience.Private public abstract class BloomContext { @@ -44,9 +42,7 @@ public abstract class BloomContext { } /** - * Bloom information from the cell is retrieved - * @param cell - * @throws IOException + * Bloom information from the cell is retrieved nn */ public void writeBloom(Cell cell) throws IOException { // only add to the bloom filter on a new, unique key @@ -60,15 +56,13 @@ public abstract class BloomContext { if (this.getLastCell() != null) { if (comparator.compare(cell, this.getLastCell()) <= 0) { throw new IOException("Added a key not lexically larger than" + " previous. Current cell = " - + cell + ", prevCell = " + this.getLastCell()); + + cell + ", prevCell = " + this.getLastCell()); } } } /** - * Adds the last bloom key to the HFile Writer as part of StorefileWriter close. - * @param writer - * @throws IOException + * Adds the last bloom key to the HFile Writer as part of StorefileWriter close. nn */ public abstract void addLastBloomKey(HFile.Writer writer) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java index 0d99d30da45..9478a99c9b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,59 +17,40 @@ */ package org.apache.hadoop.hbase.util; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.yetus.audience.InterfaceAudience; /** - * * Implements a Bloom filter, as defined by Bloom in 1970. *

      - * The Bloom filter is a data structure that was introduced in 1970 and that has - * been adopted by the networking research community in the past decade thanks - * to the bandwidth efficiencies that it offers for the transmission of set - * membership information between networked hosts. A sender encodes the - * information into a bit vector, the Bloom filter, that is more compact than a - * conventional representation. Computation and space costs for construction are - * linear in the number of elements. The receiver uses the filter to test - * whether various elements are members of the set. Though the filter will - * occasionally return a false positive, it will never return a false negative. - * When creating the filter, the sender can choose its desired point in a - * trade-off between the false positive rate and the size. - * + * The Bloom filter is a data structure that was introduced in 1970 and that has been adopted by the + * networking research community in the past decade thanks to the bandwidth efficiencies that it + * offers for the transmission of set membership information between networked hosts. A sender + * encodes the information into a bit vector, the Bloom filter, that is more compact than a + * conventional representation. Computation and space costs for construction are linear in the + * number of elements. The receiver uses the filter to test whether various elements are members of + * the set. Though the filter will occasionally return a false positive, it will never return a + * false negative. When creating the filter, the sender can choose its desired point in a trade-off + * between the false positive rate and the size. *

      - * Originally inspired by European Commission - * One-Lab Project 034819. - * - * Bloom filters are very sensitive to the number of elements inserted into - * them. For HBase, the number of entries depends on the size of the data stored - * in the column. Currently the default region size is 256MB, so entry count ~= - * 256MB / (average value size for column). Despite this rule of thumb, there is - * no efficient way to calculate the entry count after compactions. Therefore, - * it is often easier to use a dynamic bloom filter that will add extra space - * instead of allowing the error rate to grow. - * - * ( http://www.eecs.harvard.edu/~michaelm/NEWWORK/postscripts/BloomFilterSurvey - * .pdf ) - * - * m denotes the number of bits in the Bloom filter (bitSize) n denotes the - * number of elements inserted into the Bloom filter (maxKeys) k represents the - * number of hash functions used (nbHash) e represents the desired false - * positive rate for the bloom (err) - * - * If we fix the error rate (e) and know the number of entries, then the optimal - * bloom size m = -(n * ln(err) / (ln(2)^2) ~= n * ln(err) / ln(0.6185) - * + * Originally inspired by European Commission One-Lab Project + * 034819. Bloom filters are very sensitive to the number of elements inserted into them. For + * HBase, the number of entries depends on the size of the data stored in the column. Currently the + * default region size is 256MB, so entry count ~= 256MB / (average value size for column). Despite + * this rule of thumb, there is no efficient way to calculate the entry count after compactions. + * Therefore, it is often easier to use a dynamic bloom filter that will add extra space instead of + * allowing the error rate to grow. ( + * http://www.eecs.harvard.edu/~michaelm/NEWWORK/postscripts/BloomFilterSurvey .pdf ) m denotes the + * number of bits in the Bloom filter (bitSize) n denotes the number of elements inserted into the + * Bloom filter (maxKeys) k represents the number of hash functions used (nbHash) e represents the + * desired false positive rate for the bloom (err) If we fix the error rate (e) and know the number + * of entries, then the optimal bloom size m = -(n * ln(err) / (ln(2)^2) ~= n * ln(err) / ln(0.6185) * The probability of false positives is minimized when k = m/n ln(2). - * * @see BloomFilter The general behavior of a filter - * - * @see - * Space/Time Trade-Offs in Hash Coding with Allowable Errors - * + * @see Space/Time + * Trade-Offs in Hash Coding with Allowable Errors * @see BloomFilterWriter for the ability to add elements to a Bloom filter */ @InterfaceAudience.Private @@ -79,27 +59,25 @@ public interface BloomFilter extends BloomFilterBase { /** * Check if the specified key is contained in the bloom filter. * @param keyCell the key to check for the existence of - * @param bloom bloom filter data to search. This can be null if auto-loading - * is supported. - * @param type The type of Bloom ROW/ ROW_COL + * @param bloom bloom filter data to search. This can be null if auto-loading is supported. + * @param type The type of Bloom ROW/ ROW_COL * @return true if matched by bloom, false if not */ boolean contains(Cell keyCell, ByteBuff bloom, BloomType type); /** * Check if the specified key is contained in the bloom filter. - * @param buf data to check for existence of + * @param buf data to check for existence of * @param offset offset into the data * @param length length of the data - * @param bloom bloom filter data to search. This can be null if auto-loading - * is supported. + * @param bloom bloom filter data to search. This can be null if auto-loading is supported. * @return true if matched by bloom, false if not */ boolean contains(byte[] buf, int offset, int length, ByteBuff bloom); /** - * @return true if this Bloom filter can automatically load its data - * and thus allows a null byte buffer to be passed to contains() + * @return true if this Bloom filter can automatically load its data and thus allows a null byte + * buffer to be passed to contains() */ boolean supportsAutoLoading(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterBase.java index 131552560e5..142a36c35f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterBase.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,8 +31,7 @@ public interface BloomFilterBase { long getKeyCount(); /** - * @return The max number of keys that can be inserted - * to maintain the desired error rate + * @return The max number of keys that can be inserted to maintain the desired error rate */ long getMaxKeys(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java index 06cf699e34f..e09420cf805 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,17 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.yetus.audience.InterfaceAudience; /** * The basic building block for the {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilter} @@ -56,8 +53,7 @@ public class BloomFilterChunk implements BloomFilterBase { * @param meta stored bloom meta data * @throws IllegalArgumentException meta data is invalid */ - public BloomFilterChunk(DataInput meta) - throws IOException, IllegalArgumentException { + public BloomFilterChunk(DataInput meta) throws IOException, IllegalArgumentException { this.byteSize = meta.readInt(); this.hashCount = meta.readInt(); this.hashType = meta.readInt(); @@ -72,12 +68,10 @@ public class BloomFilterChunk implements BloomFilterBase { } /** - * Computes the error rate for this Bloom filter, taking into account the - * actual number of hash functions and keys inserted. The return value of - * this function changes as a Bloom filter is being populated. Used for - * reporting the actual error rate of compound Bloom filters when writing - * them out. - * + * Computes the error rate for this Bloom filter, taking into account the actual number of hash + * functions and keys inserted. The return value of this function changes as a Bloom filter is + * being populated. Used for reporting the actual error rate of compound Bloom filters when + * writing them out. * @return error rate for this particular Bloom filter */ public double actualErrorRate() { @@ -93,21 +87,16 @@ public class BloomFilterChunk implements BloomFilterBase { /** * Determines & initializes bloom filter meta data from user config. Call * {@link #allocBloom()} to allocate bloom filter data. - * - * @param maxKeys Maximum expected number of keys that will be stored in this - * bloom - * @param errorRate Desired false positive error rate. Lower rate = more - * storage required - * @param hashType Type of hash function to use - * @param foldFactor When finished adding entries, you may be able to 'fold' - * this bloom to save space. Tradeoff potentially excess bytes in - * bloom for ability to fold if keyCount is exponentially greater - * than maxKeys. - * @throws IllegalArgumentException + * @param maxKeys Maximum expected number of keys that will be stored in this bloom + * @param errorRate Desired false positive error rate. Lower rate = more storage required + * @param hashType Type of hash function to use + * @param foldFactor When finished adding entries, you may be able to 'fold' this bloom to save + * space. Tradeoff potentially excess bytes in bloom for ability to fold if + * keyCount is exponentially greater than maxKeys. n */ // Used only in testcases - public BloomFilterChunk(int maxKeys, double errorRate, int hashType, - int foldFactor) throws IllegalArgumentException { + public BloomFilterChunk(int maxKeys, double errorRate, int hashType, int foldFactor) + throws IllegalArgumentException { this(hashType, BloomType.ROW); long bitSize = BloomFilterUtil.computeBitSize(maxKeys, errorRate); @@ -121,9 +110,8 @@ public class BloomFilterChunk implements BloomFilterBase { } /** - * Creates another similar Bloom filter. Does not copy the actual bits, and - * sets the new filter's key count to zero. - * + * Creates another similar Bloom filter. Does not copy the actual bits, and sets the new filter's + * key count to zero. * @return a Bloom filter with the same configuration as this */ public BloomFilterChunk createAnother() { @@ -138,16 +126,16 @@ public class BloomFilterChunk implements BloomFilterBase { if (this.bloom != null) { throw new IllegalArgumentException("can only create bloom once."); } - this.bloom = ByteBuffer.allocate((int)this.byteSize); + this.bloom = ByteBuffer.allocate((int) this.byteSize); assert this.bloom.hasArray(); } void sanityCheck() throws IllegalArgumentException { - if(0 >= this.byteSize || this.byteSize > Integer.MAX_VALUE) { + if (0 >= this.byteSize || this.byteSize > Integer.MAX_VALUE) { throw new IllegalArgumentException("Invalid byteSize: " + this.byteSize); } - if(this.hashCount <= 0) { + if (this.hashCount <= 0) { throw new IllegalArgumentException("Hash function count must be > 0"); } @@ -160,15 +148,14 @@ public class BloomFilterChunk implements BloomFilterBase { } } - void bloomCheck(ByteBuffer bloom) throws IllegalArgumentException { + void bloomCheck(ByteBuffer bloom) throws IllegalArgumentException { if (this.byteSize != bloom.limit()) { - throw new IllegalArgumentException( - "Configured bloom length should match actual length"); + throw new IllegalArgumentException("Configured bloom length should match actual length"); } } // Used only by tests - void add(byte [] buf, int offset, int len) { + void add(byte[] buf, int offset, int len) { /* * For faster hashing, use combinatorial generation * http://www.eecs.harvard.edu/~kirsch/pubs/bbbf/esa06.pdf @@ -208,17 +195,16 @@ public class BloomFilterChunk implements BloomFilterBase { ++this.keyCount; } - //--------------------------------------------------------------------------- + // --------------------------------------------------------------------------- /** Private helpers */ /** * Set the bit at the specified index to 1. - * * @param pos index of bit */ void set(long pos) { - int bytePos = (int)(pos / 8); - int bitPos = (int)(pos % 8); + int bytePos = (int) (pos / 8); + int bitPos = (int) (pos % 8); byte curByte = bloom.get(bytePos); curByte |= BloomFilterUtil.bitvals[bitPos]; bloom.put(bytePos, curByte); @@ -226,13 +212,12 @@ public class BloomFilterChunk implements BloomFilterBase { /** * Check if bit at specified index is 1. - * * @param pos index of bit * @return true if bit at specified index is 1, false if 0. */ static boolean get(int pos, ByteBuffer bloomBuf, int bloomOffset) { - int bytePos = pos >> 3; //pos / 8 - int bitPos = pos & 0x7; //pos % 8 + int bytePos = pos >> 3; // pos / 8 + int bitPos = pos & 0x7; // pos % 8 // TODO access this via Util API which can do Unsafe access if possible(?) byte curByte = bloomBuf.get(bloomOffset + bytePos); curByte &= BloomFilterUtil.bitvals[bitPos]; @@ -262,11 +247,11 @@ public class BloomFilterChunk implements BloomFilterBase { // see if the actual size is exponentially smaller than expected. if (this.keyCount > 0 && this.bloom.hasArray()) { int pieces = 1; - int newByteSize = (int)this.byteSize; + int newByteSize = (int) this.byteSize; int newMaxKeys = this.maxKeys; // while exponentially smaller & folding is lossless - while ((newByteSize & 1) == 0 && newMaxKeys > (this.keyCount<<1)) { + while ((newByteSize & 1) == 0 && newMaxKeys > (this.keyCount << 1)) { pieces <<= 1; newByteSize >>= 1; newMaxKeys >>= 1; @@ -278,8 +263,8 @@ public class BloomFilterChunk implements BloomFilterBase { int start = this.bloom.arrayOffset(); int end = start + newByteSize; int off = end; - for(int p = 1; p < pieces; ++p) { - for(int pos = start; pos < end; ++pos) { + for (int p = 1; p < pieces; ++p) { + for (int pos = start; pos < end; ++pos) { array[pos] |= array[off++]; } } @@ -298,8 +283,7 @@ public class BloomFilterChunk implements BloomFilterBase { * @param out OutputStream to place bloom * @throws IOException Error writing bloom array */ - public void writeBloom(final DataOutput out) - throws IOException { + public void writeBloom(final DataOutput out) throws IOException { if (!this.bloom.hasArray()) { throw new IOException("Only writes ByteBuffer with underlying array."); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java index 89ff5b7222e..08571570a89 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.util; import java.io.DataInput; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -39,80 +38,65 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.Private public final class BloomFilterFactory { - private static final Logger LOG = - LoggerFactory.getLogger(BloomFilterFactory.class.getName()); + private static final Logger LOG = LoggerFactory.getLogger(BloomFilterFactory.class.getName()); /** This class should not be instantiated. */ - private BloomFilterFactory() {} + private BloomFilterFactory() { + } /** - * Specifies the target error rate to use when selecting the number of keys - * per Bloom filter. + * Specifies the target error rate to use when selecting the number of keys per Bloom filter. */ - public static final String IO_STOREFILE_BLOOM_ERROR_RATE = - "io.storefile.bloom.error.rate"; + public static final String IO_STOREFILE_BLOOM_ERROR_RATE = "io.storefile.bloom.error.rate"; /** - * Maximum folding factor allowed. The Bloom filter will be shrunk by - * the factor of up to 2 ** this times if we oversize it initially. + * Maximum folding factor allowed. The Bloom filter will be shrunk by the factor of up to 2 ** + * this times if we oversize it initially. */ - public static final String IO_STOREFILE_BLOOM_MAX_FOLD = - "io.storefile.bloom.max.fold"; + public static final String IO_STOREFILE_BLOOM_MAX_FOLD = "io.storefile.bloom.max.fold"; /** - * For default (single-block) Bloom filters this specifies the maximum number - * of keys. + * For default (single-block) Bloom filters this specifies the maximum number of keys. */ - public static final String IO_STOREFILE_BLOOM_MAX_KEYS = - "io.storefile.bloom.max.keys"; + public static final String IO_STOREFILE_BLOOM_MAX_KEYS = "io.storefile.bloom.max.keys"; /** Master switch to enable Bloom filters */ - public static final String IO_STOREFILE_BLOOM_ENABLED = - "io.storefile.bloom.enabled"; + public static final String IO_STOREFILE_BLOOM_ENABLED = "io.storefile.bloom.enabled"; /** Master switch to enable Delete Family Bloom filters */ public static final String IO_STOREFILE_DELETEFAMILY_BLOOM_ENABLED = - "io.storefile.delete.family.bloom.enabled"; + "io.storefile.delete.family.bloom.enabled"; /** - * Target Bloom block size. Bloom filter blocks of approximately this size - * are interleaved with data blocks. + * Target Bloom block size. Bloom filter blocks of approximately this size are interleaved with + * data blocks. */ - public static final String IO_STOREFILE_BLOOM_BLOCK_SIZE = - "io.storefile.bloom.block.size"; + public static final String IO_STOREFILE_BLOOM_BLOCK_SIZE = "io.storefile.bloom.block.size"; /** Maximum number of times a Bloom filter can be "folded" if oversized */ private static final int MAX_ALLOWED_FOLD_FACTOR = 7; /** - * Instantiates the correct Bloom filter class based on the version provided - * in the meta block data. - * - * @param meta the byte array holding the Bloom filter's metadata, including - * version information - * @param reader the {@link HFile} reader to use to lazily load Bloom filter - * blocks - * @return an instance of the correct type of Bloom filter - * @throws IllegalArgumentException + * Instantiates the correct Bloom filter class based on the version provided in the meta block + * data. + * @param meta the byte array holding the Bloom filter's metadata, including version information + * @param reader the {@link HFile} reader to use to lazily load Bloom filter blocks + * @return an instance of the correct type of Bloom filter n */ - public static BloomFilter - createFromMeta(DataInput meta, HFile.Reader reader) - throws IllegalArgumentException, IOException { + public static BloomFilter createFromMeta(DataInput meta, HFile.Reader reader) + throws IllegalArgumentException, IOException { int version = meta.readInt(); switch (version) { case CompoundBloomFilterBase.VERSION: return new CompoundBloomFilter(meta, reader); default: - throw new IllegalArgumentException( - "Bad bloom filter format version " + version - ); + throw new IllegalArgumentException("Bad bloom filter format version " + version); } } /** - * @return true if general Bloom (Row or RowCol) filters are enabled in the - * given configuration + * @return true if general Bloom (Row or RowCol) filters are enabled in the given configuration */ public static boolean isGeneralBloomEnabled(Configuration conf) { return conf.getBoolean(IO_STOREFILE_BLOOM_ENABLED, true); @@ -145,32 +129,26 @@ public final class BloomFilterFactory { } /** - * @return max key for the Bloom filter from the configuration - */ + * @return max key for the Bloom filter from the configuration + */ public static int getMaxKeys(Configuration conf) { return conf.getInt(IO_STOREFILE_BLOOM_MAX_KEYS, 128 * 1000 * 1000); } /** * Creates a new general (Row or RowCol) Bloom filter at the time of - * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing. - * - * @param conf - * @param cacheConf - * @param bloomType - * @param maxKeys an estimate of the number of keys we expect to insert. - * Irrelevant if compound Bloom filters are enabled. + * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing. nnn * @param maxKeys an + * estimate of the number of keys we expect to insert. Irrelevant if compound Bloom filters are + * enabled. * @param writer the HFile writer - * @return the new Bloom filter, or null in case Bloom filters are disabled - * or when failed to create one. + * @return the new Bloom filter, or null in case Bloom filters are disabled or when failed to + * create one. */ public static BloomFilterWriter createGeneralBloomAtWrite(Configuration conf, - CacheConfig cacheConf, BloomType bloomType, int maxKeys, - HFile.Writer writer) { + CacheConfig cacheConf, BloomType bloomType, int maxKeys, HFile.Writer writer) { if (!isGeneralBloomEnabled(conf)) { - LOG.trace("Bloom filters are disabled by configuration for " - + writer.getPath() - + (conf == null ? " (configuration is null)" : "")); + LOG.trace("Bloom filters are disabled by configuration for " + writer.getPath() + + (conf == null ? " (configuration is null)" : "")); return null; } else if (bloomType == BloomType.NONE) { LOG.trace("Bloom filter is turned off for the column family"); @@ -187,35 +165,31 @@ public final class BloomFilterFactory { err = (float) (1 - Math.sqrt(1 - err)); } - int maxFold = conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD, - MAX_ALLOWED_FOLD_FACTOR); + int maxFold = conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD, MAX_ALLOWED_FOLD_FACTOR); // Do we support compound bloom filters? // In case of compound Bloom filters we ignore the maxKeys hint. CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf), - err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(), - bloomType == BloomType.ROWCOL ? CellComparatorImpl.COMPARATOR : null, bloomType); + err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(), + bloomType == BloomType.ROWCOL ? CellComparatorImpl.COMPARATOR : null, bloomType); writer.addInlineBlockWriter(bloomWriter); return bloomWriter; } /** * Creates a new Delete Family Bloom filter at the time of - * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing. - * @param conf - * @param cacheConf - * @param maxKeys an estimate of the number of keys we expect to insert. - * Irrelevant if compound Bloom filters are enabled. + * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing. nn * @param maxKeys an + * estimate of the number of keys we expect to insert. Irrelevant if compound Bloom filters are + * enabled. * @param writer the HFile writer - * @return the new Bloom filter, or null in case Bloom filters are disabled - * or when failed to create one. + * @return the new Bloom filter, or null in case Bloom filters are disabled or when failed to + * create one. */ public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf, - CacheConfig cacheConf, int maxKeys, HFile.Writer writer) { + CacheConfig cacheConf, int maxKeys, HFile.Writer writer) { if (!isDeleteFamilyBloomEnabled(conf)) { - LOG.info("Delete Bloom filters are disabled by configuration for " - + writer.getPath() - + (conf == null ? " (configuration is null)" : "")); + LOG.info("Delete Bloom filters are disabled by configuration for " + writer.getPath() + + (conf == null ? " (configuration is null)" : "")); return null; } @@ -223,9 +197,9 @@ public final class BloomFilterFactory { int maxFold = getMaxFold(conf); // In case of compound Bloom filters we ignore the maxKeys hint. - CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf), - err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(), - null, BloomType.ROW); + CompoundBloomFilterWriter bloomWriter = + new CompoundBloomFilterWriter(getBloomBlockSize(conf), err, Hash.getHashType(conf), maxFold, + cacheConf.shouldCacheBloomsOnWrite(), null, BloomType.ROW); writer.addInlineBlockWriter(bloomWriter); return bloomWriter; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java index c7afb0e5f91..b35e8258ddf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java @@ -21,7 +21,6 @@ import static org.apache.hadoop.hbase.regionserver.BloomType.ROWPREFIX_FIXED_LEN import java.text.NumberFormat; import java.util.Random; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; @@ -38,30 +37,21 @@ public final class BloomFilterUtil { /** Record separator for the Bloom filter statistics human-readable string */ public static final String STATS_RECORD_SEP = "; "; /** - * Used in computing the optimal Bloom filter size. This approximately equals - * 0.480453. + * Used in computing the optimal Bloom filter size. This approximately equals 0.480453. */ public static final double LOG2_SQUARED = Math.log(2) * Math.log(2); - + /** - * A random number generator to use for "fake lookups" when testing to - * estimate the ideal false positive rate. + * A random number generator to use for "fake lookups" when testing to estimate the ideal false + * positive rate. */ private static Random randomGeneratorForTest; public static final String PREFIX_LENGTH_KEY = "RowPrefixBloomFilter.prefix_length"; - + /** Bit-value lookup array to prevent doing the same work over and over */ - public static final byte [] bitvals = { - (byte) 0x01, - (byte) 0x02, - (byte) 0x04, - (byte) 0x08, - (byte) 0x10, - (byte) 0x20, - (byte) 0x40, - (byte) 0x80 - }; + public static final byte[] bitvals = { (byte) 0x01, (byte) 0x02, (byte) 0x04, (byte) 0x08, + (byte) 0x10, (byte) 0x20, (byte) 0x40, (byte) 0x80 }; /** * Private constructor to keep this class from being instantiated. @@ -70,12 +60,9 @@ public final class BloomFilterUtil { } /** - * @param maxKeys - * @param errorRate - * @return the number of bits for a Bloom filter than can hold the given - * number of keys and provide the given error rate, assuming that the - * optimal number of hash functions is used and it does not have to - * be an integer. + * nn * @return the number of bits for a Bloom filter than can hold the given number of keys and + * provide the given error rate, assuming that the optimal number of hash functions is used and it + * does not have to be an integer. */ public static long computeBitSize(long maxKeys, double errorRate) { return (long) Math.ceil(maxKeys * (-Math.log(errorRate) / LOG2_SQUARED)); @@ -86,7 +73,7 @@ public final class BloomFilterUtil { * simulate uniformity of accesses better in a test environment. Should not be set in a real * environment where correctness matters! *

      - * This gets used in {@link #contains(ByteBuff, int, int, Hash, int, HashKey)} + * This gets used in {@link #contains(ByteBuff, int, int, Hash, int, HashKey)} * @param random The random number source to use, or null to compute actual hashes */ public static void setRandomGeneratorForTest(Random random) { @@ -94,14 +81,10 @@ public final class BloomFilterUtil { } /** - * The maximum number of keys we can put into a Bloom filter of a certain - * size to maintain the given error rate, assuming the number of hash - * functions is chosen optimally and does not even have to be an integer - * (hence the "ideal" in the function name). - * - * @param bitSize - * @param errorRate - * @return maximum number of keys that can be inserted into the Bloom filter + * The maximum number of keys we can put into a Bloom filter of a certain size to maintain the + * given error rate, assuming the number of hash functions is chosen optimally and does not even + * have to be an integer (hence the "ideal" in the function name). nn * @return maximum number of + * keys that can be inserted into the Bloom filter * @see #computeMaxKeys(long, double, int) for a more precise estimate */ public static long idealMaxKeys(long bitSize, double errorRate) { @@ -111,47 +94,30 @@ public final class BloomFilterUtil { } /** - * The maximum number of keys we can put into a Bloom filter of a certain - * size to get the given error rate, with the given number of hash functions. - * - * @param bitSize - * @param errorRate - * @param hashCount - * @return the maximum number of keys that can be inserted in a Bloom filter - * to maintain the target error rate, if the number of hash functions - * is provided. + * The maximum number of keys we can put into a Bloom filter of a certain size to get the given + * error rate, with the given number of hash functions. nnn * @return the maximum number of keys + * that can be inserted in a Bloom filter to maintain the target error rate, if the number of hash + * functions is provided. */ - public static long computeMaxKeys(long bitSize, double errorRate, - int hashCount) { - return (long) (-bitSize * 1.0 / hashCount * - Math.log(1 - Math.exp(Math.log(errorRate) / hashCount))); + public static long computeMaxKeys(long bitSize, double errorRate, int hashCount) { + return (long) (-bitSize * 1.0 / hashCount + * Math.log(1 - Math.exp(Math.log(errorRate) / hashCount))); } /** - * Computes the actual error rate for the given number of elements, number - * of bits, and number of hash functions. Taken directly from the - * Wikipedia Bloom filter article. - * - * @param maxKeys - * @param bitSize - * @param functionCount - * @return the actual error rate + * Computes the actual error rate for the given number of elements, number of bits, and number of + * hash functions. Taken directly from the + * Wikipedia + * Bloom filter article. nnn * @return the actual error rate */ - public static double actualErrorRate(long maxKeys, long bitSize, - int functionCount) { - return Math.exp(Math.log(1 - Math.exp(-functionCount * maxKeys * 1.0 - / bitSize)) * functionCount); + public static double actualErrorRate(long maxKeys, long bitSize, int functionCount) { + return Math + .exp(Math.log(1 - Math.exp(-functionCount * maxKeys * 1.0 / bitSize)) * functionCount); } /** - * Increases the given byte size of a Bloom filter until it can be folded by - * the given factor. - * - * @param bitSize - * @param foldFactor - * @return Foldable byte size + * Increases the given byte size of a Bloom filter until it can be folded by the given factor. nn + * * @return Foldable byte size */ public static int computeFoldableByteSize(long bitSize, int foldFactor) { long byteSizeLong = (bitSize + 7) / 8; @@ -162,8 +128,8 @@ public final class BloomFilterUtil { byteSizeLong <<= foldFactor; } if (byteSizeLong > Integer.MAX_VALUE) { - throw new IllegalArgumentException("byteSize=" + byteSizeLong + " too " - + "large for bitSize=" + bitSize + ", foldFactor=" + foldFactor); + throw new IllegalArgumentException("byteSize=" + byteSizeLong + " too " + "large for bitSize=" + + bitSize + ", foldFactor=" + foldFactor); } return (int) byteSizeLong; } @@ -171,25 +137,22 @@ public final class BloomFilterUtil { public static int optimalFunctionCount(int maxKeys, long bitSize) { long i = bitSize / maxKeys; double result = Math.ceil(Math.log(2) * i); - if (result > Integer.MAX_VALUE){ + if (result > Integer.MAX_VALUE) { throw new IllegalArgumentException("result too large for integer value."); } - return (int)result; + return (int) result; } - + /** * Creates a Bloom filter chunk of the given size. - * - * @param byteSizeHint the desired number of bytes for the Bloom filter bit - * array. Will be increased so that folding is possible. - * @param errorRate target false positive rate of the Bloom filter - * @param hashType Bloom filter hash function type - * @param foldFactor - * @param bloomType - * @return the new Bloom filter of the desired size + * @param byteSizeHint the desired number of bytes for the Bloom filter bit array. Will be + * increased so that folding is possible. + * @param errorRate target false positive rate of the Bloom filter + * @param hashType Bloom filter hash function type nn * @return the new Bloom filter of the + * desired size */ - public static BloomFilterChunk createBySize(int byteSizeHint, - double errorRate, int hashType, int foldFactor, BloomType bloomType) { + public static BloomFilterChunk createBySize(int byteSizeHint, double errorRate, int hashType, + int foldFactor, BloomType bloomType) { BloomFilterChunk bbf = new BloomFilterChunk(hashType, bloomType); bbf.byteSize = computeFoldableByteSize(byteSizeHint * 8L, foldFactor); @@ -205,15 +168,14 @@ public final class BloomFilterUtil { return bbf; } - public static boolean contains(byte[] buf, int offset, int length, - ByteBuff bloomBuf, int bloomOffset, int bloomSize, Hash hash, - int hashCount) { + public static boolean contains(byte[] buf, int offset, int length, ByteBuff bloomBuf, + int bloomOffset, int bloomSize, Hash hash, int hashCount) { HashKey hashKey = new ByteArrayHashKey(buf, offset, length); return contains(bloomBuf, bloomOffset, bloomSize, hash, hashCount, hashKey); } private static boolean contains(ByteBuff bloomBuf, int bloomOffset, int bloomSize, Hash hash, - int hashCount, HashKey hashKey) { + int hashCount, HashKey hashKey) { int hash1 = hash.hash(hashKey, 0); int bloomBitSize = bloomSize << 3; @@ -228,10 +190,10 @@ public final class BloomFilterUtil { for (int i = 0; i < hashCount; i++) { int hashLoc = (randomGeneratorForTest == null - // Production mode - ? Math.abs(compositeHash % bloomBitSize) - // Test mode with "fake look-ups" to estimate "ideal false positive rate" - : randomGeneratorForTest.nextInt(bloomBitSize)); + // Production mode + ? Math.abs(compositeHash % bloomBitSize) + // Test mode with "fake look-ups" to estimate "ideal false positive rate" + : randomGeneratorForTest.nextInt(bloomBitSize)); compositeHash += hash2; if (!checkBit(hashLoc, bloomBuf, bloomOffset)) { return false; @@ -241,21 +203,20 @@ public final class BloomFilterUtil { } public static boolean contains(Cell cell, ByteBuff bloomBuf, int bloomOffset, int bloomSize, - Hash hash, int hashCount, BloomType type) { - HashKey hashKey = type == BloomType.ROWCOL ? new RowColBloomHashKey(cell) - : new RowBloomHashKey(cell); + Hash hash, int hashCount, BloomType type) { + HashKey hashKey = + type == BloomType.ROWCOL ? new RowColBloomHashKey(cell) : new RowBloomHashKey(cell); return contains(bloomBuf, bloomOffset, bloomSize, hash, hashCount, hashKey); } /** * Check if bit at specified index is 1. - * * @param pos index of bit * @return true if bit at specified index is 1, false if 0. */ - static boolean checkBit(int pos, ByteBuff bloomBuf, int bloomOffset) { - int bytePos = pos >> 3; //pos / 8 - int bitPos = pos & 0x7; //pos % 8 + static boolean checkBit(int pos, ByteBuff bloomBuf, int bloomOffset) { + int bytePos = pos >> 3; // pos / 8 + int bitPos = pos & 0x7; // pos % 8 byte curByte = bloomBuf.get(bloomOffset + bytePos); curByte &= bitvals[bitPos]; return (curByte != 0); @@ -263,10 +224,9 @@ public final class BloomFilterUtil { /** * A human-readable string with statistics for the given Bloom filter. - * * @param bloomFilter the Bloom filter to output statistics for; - * @return a string consisting of "<key>: <value>" parts - * separated by {@link #STATS_RECORD_SEP}. + * @return a string consisting of "<key>: <value>" parts separated by + * {@link #STATS_RECORD_SEP}. */ public static String formatStats(BloomFilterBase bloomFilter) { StringBuilder sb = new StringBuilder(); @@ -278,18 +238,18 @@ public final class BloomFilterUtil { sb.append("Max Keys for bloom: " + m); if (m > 0) { sb.append(STATS_RECORD_SEP + "Percentage filled: " - + NumberFormat.getPercentInstance().format(k * 1.0 / m)); + + NumberFormat.getPercentInstance().format(k * 1.0 / m)); } return sb.toString(); } public static String toString(BloomFilterChunk bloomFilter) { return formatStats(bloomFilter) + STATS_RECORD_SEP + "Actual error rate: " - + String.format("%.8f", bloomFilter.actualErrorRate()); + + String.format("%.8f", bloomFilter.actualErrorRate()); } public static byte[] getBloomFilterParam(BloomType bloomFilterType, Configuration conf) - throws IllegalArgumentException { + throws IllegalArgumentException { byte[] bloomParam = null; String message = "Bloom filter type is " + bloomFilterType + ", "; if (bloomFilterType.equals(ROWPREFIX_FIXED_LENGTH)) { @@ -303,12 +263,12 @@ public final class BloomFilterUtil { prefixLength = Integer.parseInt(prefixLengthString); if (prefixLength <= 0 || prefixLength > HConstants.MAX_ROW_LENGTH) { message += - "the value of " + PREFIX_LENGTH_KEY + " must >=0 and < " + HConstants.MAX_ROW_LENGTH; + "the value of " + PREFIX_LENGTH_KEY + " must >=0 and < " + HConstants.MAX_ROW_LENGTH; throw new IllegalArgumentException(message); } } catch (NumberFormatException nfe) { - message = "Number format exception when parsing " + PREFIX_LENGTH_KEY + " for BloomType " + - bloomFilterType.toString() + ":" + prefixLengthString; + message = "Number format exception when parsing " + PREFIX_LENGTH_KEY + " for BloomType " + + bloomFilterType.toString() + ":" + prefixLengthString; throw new IllegalArgumentException(message, nfe); } bloomParam = Bytes.toBytes(prefixLength); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java index a6889780158..ec8390697ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,35 +15,33 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.CellSink; import org.apache.hadoop.hbase.regionserver.ShipperListener; import org.apache.hadoop.io.Writable; +import org.apache.yetus.audience.InterfaceAudience; /** - * Specifies methods needed to add elements to a Bloom filter and serialize the - * resulting Bloom filter as a sequence of bytes. + * Specifies methods needed to add elements to a Bloom filter and serialize the resulting Bloom + * filter as a sequence of bytes. */ @InterfaceAudience.Private public interface BloomFilterWriter extends BloomFilterBase, CellSink, ShipperListener { /** Compact the Bloom filter before writing metadata & data to disk. */ void compactBloom(); + /** * Get a writable interface into bloom filter meta data. - * * @return a writable instance that can be later written to a stream */ Writable getMetaWriter(); /** - * Get a writable interface into bloom filter data (the actual Bloom bits). - * Not used for compound Bloom filters. - * + * Get a writable interface into bloom filter data (the actual Bloom bits). Not used for compound + * Bloom filters. * @return a writable instance that can be later written to a stream */ Writable getDataWriter(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java index 3828606bfbb..a0f5bdd32a7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java @@ -15,35 +15,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; -import java.util.concurrent.locks.Condition; -import java.util.concurrent.locks.ReentrantLock; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.TimeUnit; +import java.util.AbstractQueue; import java.util.Collection; import java.util.Comparator; import java.util.Iterator; -import java.util.AbstractQueue; - +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; - /** - * A generic bounded blocking Priority-Queue. - * - * The elements of the priority queue are ordered according to the Comparator - * provided at queue construction time. - * - * If multiple elements have the same priority this queue orders them in - * FIFO (first-in-first-out) manner. - * The head of this queue is the least element with respect to the specified - * ordering. If multiple elements are tied for least value, the head is the - * first one inserted. - * The queue retrieval operations poll, remove, peek, and element access the - * element at the head of the queue. + * A generic bounded blocking Priority-Queue. The elements of the priority queue are ordered + * according to the Comparator provided at queue construction time. If multiple elements have the + * same priority this queue orders them in FIFO (first-in-first-out) manner. The head of this queue + * is the least element with respect to the specified ordering. If multiple elements are tied for + * least value, the head is the first one inserted. The queue retrieval operations poll, remove, + * peek, and element access the element at the head of the queue. */ @InterfaceAudience.Private @InterfaceStability.Stable @@ -57,7 +48,7 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements @SuppressWarnings("unchecked") public PriorityQueue(int capacity, Comparator comparator) { - this.objects = (E[])new Object[capacity]; + this.objects = (E[]) new Object[capacity]; this.comparator = comparator; } @@ -132,7 +123,6 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements } } - // Lock used for all operations private final ReentrantLock lock = new ReentrantLock(); @@ -145,13 +135,12 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements private final PriorityQueue queue; /** - * Creates a PriorityQueue with the specified capacity that orders its - * elements according to the specified comparator. - * @param capacity the capacity of this queue + * Creates a PriorityQueue with the specified capacity that orders its elements according to the + * specified comparator. + * @param capacity the capacity of this queue * @param comparator the comparator that will be used to order this priority queue */ - public BoundedPriorityBlockingQueue(int capacity, - Comparator comparator) { + public BoundedPriorityBlockingQueue(int capacity, Comparator comparator) { this.queue = new PriorityQueue<>(capacity, comparator); } @@ -189,16 +178,14 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements } @Override - public boolean offer(E e, long timeout, TimeUnit unit) - throws InterruptedException { + public boolean offer(E e, long timeout, TimeUnit unit) throws InterruptedException { if (e == null) throw new NullPointerException(); long nanos = unit.toNanos(timeout); lock.lockInterruptibly(); try { while (queue.remainingCapacity() == 0) { - if (nanos <= 0) - return false; + if (nanos <= 0) return false; nanos = notFull.awaitNanos(nanos); } this.queue.add(e); @@ -241,8 +228,7 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements } @Override - public E poll(long timeout, TimeUnit unit) - throws InterruptedException { + public E poll(long timeout, TimeUnit unit) throws InterruptedException { long nanos = unit.toNanos(timeout); lock.lockInterruptibly(); E result = null; @@ -321,12 +307,9 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements @Override public int drainTo(Collection c, int maxElements) { - if (c == null) - throw new NullPointerException(); - if (c == this) - throw new IllegalArgumentException(); - if (maxElements <= 0) - return 0; + if (c == null) throw new NullPointerException(); + if (c == this) throw new IllegalArgumentException(); + if (maxElements <= 0) return 0; lock.lock(); try { int n = Math.min(queue.size(), maxElements); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java index d69a8c7483c..5465c24540a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,11 +22,10 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.SortedSet; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner; +import org.apache.yetus.audience.InterfaceAudience; /** * Utility scanner that wraps a sortable collection and serves as a KeyValueScanner. @@ -43,8 +41,7 @@ public class CollectionBackedScanner extends NonReversedNonLazyKeyValueScanner { this(set, CellComparator.getInstance()); } - public CollectionBackedScanner(SortedSet set, - CellComparator comparator) { + public CollectionBackedScanner(SortedSet set, CellComparator comparator) { this.comparator = comparator; data = set; init(); @@ -54,16 +51,14 @@ public class CollectionBackedScanner extends NonReversedNonLazyKeyValueScanner { this(list, CellComparator.getInstance()); } - public CollectionBackedScanner(List list, - CellComparator comparator) { + public CollectionBackedScanner(List list, CellComparator comparator) { Collections.sort(list, comparator); this.comparator = comparator; data = list; init(); } - public CollectionBackedScanner(CellComparator comparator, - Cell... array) { + public CollectionBackedScanner(CellComparator comparator, Cell... array) { this.comparator = comparator; List tmp = new ArrayList<>(array.length); @@ -75,7 +70,7 @@ public class CollectionBackedScanner extends NonReversedNonLazyKeyValueScanner { private void init() { iter = data.iterator(); - if(iter.hasNext()){ + if (iter.hasNext()) { current = iter.next(); } } @@ -88,7 +83,7 @@ public class CollectionBackedScanner extends NonReversedNonLazyKeyValueScanner { @Override public Cell next() { Cell oldCurrent = current; - if(iter.hasNext()){ + if (iter.hasNext()) { current = iter.next(); } else { current = null; @@ -105,10 +100,10 @@ public class CollectionBackedScanner extends NonReversedNonLazyKeyValueScanner { @Override public boolean reseek(Cell seekCell) { - while(iter.hasNext()){ + while (iter.hasNext()) { Cell next = iter.next(); int ret = comparator.compare(next, seekCell); - if(ret >= 0){ + if (ret >= 0) { current = next; return true; } @@ -116,7 +111,6 @@ public class CollectionBackedScanner extends NonReversedNonLazyKeyValueScanner { return false; } - @Override public void close() { // do nothing diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java index 7b897e7e020..1ed7db1b90f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,33 +19,32 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; import java.util.Locale; - import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hbase.CellComparator; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileScanner; +import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.io.compress.Compressor; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Compression validation test. Checks compression is working. Be sure to run - * on every node in your cluster. + * Compression validation test. Checks compression is working. Be sure to run on every node in your + * cluster. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Evolving @@ -74,23 +72,22 @@ public class CompressionTest { } } - private final static Boolean[] compressionTestResults - = new Boolean[Compression.Algorithm.values().length]; + private final static Boolean[] compressionTestResults = + new Boolean[Compression.Algorithm.values().length]; static { - for (int i = 0 ; i < compressionTestResults.length ; ++i) { + for (int i = 0; i < compressionTestResults.length; ++i) { compressionTestResults[i] = null; } } - public static void testCompression(Compression.Algorithm algo) - throws IOException { + public static void testCompression(Compression.Algorithm algo) throws IOException { if (compressionTestResults[algo.ordinal()] != null) { if (compressionTestResults[algo.ordinal()]) { - return ; // already passed test, dont do it again. + return; // already passed test, dont do it again. } else { // failed. - throw new DoNotRetryIOException("Compression algorithm '" + algo.getName() + "'" + - " previously failed test."); + throw new DoNotRetryIOException( + "Compression algorithm '" + algo.getName() + "'" + " previously failed test."); } } @@ -108,26 +105,20 @@ public class CompressionTest { public static void usage() { - System.err.println( - "Usage: CompressionTest " + - StringUtils.join( Compression.Algorithm.values(), "|").toLowerCase(Locale.ROOT) + - "\n" + - "For example:\n" + - " hbase " + CompressionTest.class + " file:///tmp/testfile gz\n"); + System.err.println("Usage: CompressionTest " + + StringUtils.join(Compression.Algorithm.values(), "|").toLowerCase(Locale.ROOT) + "\n" + + "For example:\n" + " hbase " + CompressionTest.class + " file:///tmp/testfile gz\n"); System.exit(1); } - public static void doSmokeTest(FileSystem fs, Path path, String codec) - throws Exception { + public static void doSmokeTest(FileSystem fs, Path path, String codec) throws Exception { Configuration conf = HBaseConfiguration.create(); - HFileContext context = new HFileContextBuilder() - .withCompression(HFileWriterImpl.compressionByName(codec)).build(); - HFile.Writer writer = HFile.getWriterFactoryNoCache(conf) - .withPath(fs, path) - .withFileContext(context) - .create(); + HFileContext context = + new HFileContextBuilder().withCompression(HFileWriterImpl.compressionByName(codec)).build(); + HFile.Writer writer = + HFile.getWriterFactoryNoCache(conf).withPath(fs, path).withFileContext(context).create(); // Write any-old Cell... - final byte [] rowKey = Bytes.toBytes("compressiontestkey"); + final byte[] rowKey = Bytes.toBytes("compressiontestkey"); Cell c = CellUtil.createCell(rowKey, Bytes.toBytes("compressiontestval")); writer.append(c); writer.appendFileInfo(Bytes.toBytes("compressioninfokey"), Bytes.toBytes("compressioninfoval")); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConfigurationUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConfigurationUtil.java index cdc926fa709..555d1fa5a8a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConfigurationUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConfigurationUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,18 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.util.StringUtils; - import java.util.AbstractMap; import java.util.Collection; import java.util.List; import java.util.Map; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** * Utilities for storing more complex collection types in @@ -34,7 +33,7 @@ import java.util.Map; */ @InterfaceAudience.Public public final class ConfigurationUtil { - // TODO: hopefully this is a good delimiter; it's not in the base64 alphabet, + // TODO: hopefully this is a good delimiter; it's not in the base64 alphabet, // nor is it valid for paths public static final char KVP_DELIMITER = '^'; @@ -44,29 +43,27 @@ public final class ConfigurationUtil { } /** - * Store a collection of Map.Entry's in conf, with each entry separated by ',' - * and key values delimited by {@link #KVP_DELIMITER} - * + * Store a collection of Map.Entry's in conf, with each entry separated by ',' and key values + * delimited by {@link #KVP_DELIMITER} * @param conf configuration to store the collection in * @param key overall key to store keyValues under * @param keyValues kvps to be stored under key in conf */ public static void setKeyValues(Configuration conf, String key, - Collection> keyValues) { + Collection> keyValues) { setKeyValues(conf, key, keyValues, KVP_DELIMITER); } /** - * Store a collection of Map.Entry's in conf, with each entry separated by ',' - * and key values delimited by delimiter. - * + * Store a collection of Map.Entry's in conf, with each entry separated by ',' and key values + * delimited by delimiter. * @param conf configuration to store the collection in * @param key overall key to store keyValues under * @param keyValues kvps to be stored under key in conf * @param delimiter character used to separate each kvp */ public static void setKeyValues(Configuration conf, String key, - Collection> keyValues, char delimiter) { + Collection> keyValues, char delimiter) { List serializedKvps = Lists.newArrayList(); for (Map.Entry kvp : keyValues) { @@ -78,7 +75,6 @@ public final class ConfigurationUtil { /** * Retrieve a list of key value pairs from configuration, stored under the provided key - * * @param conf configuration to retrieve kvps from * @param key key under which the key values are stored * @return the list of kvps stored under key in conf, or null if the key isn't present. @@ -90,7 +86,6 @@ public final class ConfigurationUtil { /** * Retrieve a list of key value pairs from configuration, stored under the provided key - * * @param conf configuration to retrieve kvps from * @param key key under which the key values are stored * @param delimiter character used to separate each kvp @@ -98,7 +93,7 @@ public final class ConfigurationUtil { * @see #setKeyValues(Configuration, String, Collection, char) */ public static List> getKeyValues(Configuration conf, String key, - char delimiter) { + char delimiter) { String[] kvps = conf.getStrings(key); if (kvps == null) { @@ -111,9 +106,8 @@ public final class ConfigurationUtil { String[] splitKvp = StringUtils.split(kvp, delimiter); if (splitKvp.length != 2) { - throw new IllegalArgumentException( - "Expected key value pair for configuration key '" + key + "'" + " to be of form '" - + delimiter + "; was " + kvp + " instead"); + throw new IllegalArgumentException("Expected key value pair for configuration key '" + key + + "'" + " to be of form '" + delimiter + "; was " + kvp + " instead"); } rtn.add(new AbstractMap.SimpleImmutableEntry<>(splitKvp[0], splitKvp[1])); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java index 7b9f021313a..63e51569b4e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,15 +21,11 @@ import java.io.IOException; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.Lock; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -39,12 +34,14 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * A utility to store user specific HConnections in memory. - * There is a chore to clean up connections idle for too long. - * This class is used by REST server and Thrift server to - * support authentication and impersonation. + * A utility to store user specific HConnections in memory. There is a chore to clean up connections + * idle for too long. This class is used by REST server and Thrift server to support authentication + * and impersonation. */ @InterfaceAudience.Private public class ConnectionCache { @@ -58,27 +55,33 @@ public class ConnectionCache { private final Configuration conf; private final ChoreService choreService; - private final ThreadLocal effectiveUserNames = - new ThreadLocal() { + private final ThreadLocal effectiveUserNames = new ThreadLocal() { @Override protected String initialValue() { return realUserName; } }; - public ConnectionCache(final Configuration conf, - final UserProvider userProvider, - final int cleanInterval, final int maxIdleTime) throws IOException { + public ConnectionCache(final Configuration conf, final UserProvider userProvider, + final int cleanInterval, final int maxIdleTime) throws IOException { Stoppable stoppable = new Stoppable() { private volatile boolean isStopped = false; - @Override public void stop(String why) { isStopped = true;} - @Override public boolean isStopped() {return isStopped;} + + @Override + public void stop(String why) { + isStopped = true; + } + + @Override + public boolean isStopped() { + return isStopped; + } }; this.choreService = new ChoreService("ConnectionCache"); ScheduledChore cleaner = new ScheduledChore("ConnectionCleaner", stoppable, cleanInterval) { @Override protected void chore() { - for (Map.Entry entry: connections.entrySet()) { + for (Map.Entry entry : connections.entrySet()) { ConnectionInfo connInfo = entry.getValue(); if (connInfo.timedOut(maxIdleTime)) { if (connInfo.admin != null) { @@ -127,8 +130,7 @@ public class ConnectionCache { } /** - * Caller doesn't close the admin afterwards. - * We need to manage it and close it properly. + * Caller doesn't close the admin afterwards. We need to manage it and close it properly. */ public Admin getAdmin() throws IOException { ConnectionInfo connInfo = getCurrentConnection(); @@ -161,8 +163,7 @@ public class ConnectionCache { } /** - * Get the cached connection for the current user. - * If none or timed out, create a new one. + * Get the cached connection for the current user. If none or timed out, create a new one. */ ConnectionInfo getCurrentConnection() throws IOException { String userName = getEffectiveUser(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java index aba421d8078..66e3c332657 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.lang.management.ManagementFactory; @@ -26,22 +24,20 @@ import java.lang.reflect.Method; import java.nio.ByteBuffer; import java.util.List; import java.util.Locale; - import javax.management.JMException; import javax.management.MBeanServer; import javax.management.MalformedObjectNameException; import javax.management.ObjectName; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocatorMetric; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocatorMetricProvider; import org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator; - /** * Utilities for interacting with and monitoring DirectByteBuffer allocations. */ @@ -82,17 +78,16 @@ public class DirectMemoryUtils { } /** - * @return the setting of -XX:MaxDirectMemorySize as a long. Returns 0 if - * -XX:MaxDirectMemorySize is not set. + * @return the setting of -XX:MaxDirectMemorySize as a long. Returns 0 if -XX:MaxDirectMemorySize + * is not set. */ public static long getDirectMemorySize() { RuntimeMXBean runtimemxBean = ManagementFactory.getRuntimeMXBean(); List arguments = runtimemxBean.getInputArguments(); - long multiplier = 1; //for the byte case. + long multiplier = 1; // for the byte case. for (String s : arguments) { if (s.contains("-XX:MaxDirectMemorySize=")) { - String memSize = s.toLowerCase(Locale.ROOT) - .replace("-xx:maxdirectmemorysize=", "").trim(); + String memSize = s.toLowerCase(Locale.ROOT).replace("-xx:maxdirectmemorysize=", "").trim(); if (memSize.contains("k")) { multiplier = 1024; @@ -133,29 +128,24 @@ public class DirectMemoryUtils { */ public static long getNettyDirectMemoryUsage() { - ByteBufAllocatorMetric metric = ((ByteBufAllocatorMetricProvider) - PooledByteBufAllocator.DEFAULT).metric(); + ByteBufAllocatorMetric metric = + ((ByteBufAllocatorMetricProvider) PooledByteBufAllocator.DEFAULT).metric(); return metric.usedDirectMemory(); } /** - * DirectByteBuffers are garbage collected by using a phantom reference and a - * reference queue. Every once a while, the JVM checks the reference queue and - * cleans the DirectByteBuffers. However, as this doesn't happen - * immediately after discarding all references to a DirectByteBuffer, it's - * easy to OutOfMemoryError yourself using DirectByteBuffers. This function - * explicitly calls the Cleaner method of a DirectByteBuffer. - * - * @param toBeDestroyed - * The DirectByteBuffer that will be "cleaned". Utilizes reflection. - * + * DirectByteBuffers are garbage collected by using a phantom reference and a reference queue. + * Every once a while, the JVM checks the reference queue and cleans the DirectByteBuffers. + * However, as this doesn't happen immediately after discarding all references to a + * DirectByteBuffer, it's easy to OutOfMemoryError yourself using DirectByteBuffers. This function + * explicitly calls the Cleaner method of a DirectByteBuffer. n * The DirectByteBuffer that will + * be "cleaned". Utilizes reflection. */ public static void destroyDirectByteBuffer(ByteBuffer toBeDestroyed) - throws IllegalArgumentException, IllegalAccessException, - InvocationTargetException, SecurityException, NoSuchMethodException { + throws IllegalArgumentException, IllegalAccessException, InvocationTargetException, + SecurityException, NoSuchMethodException { - Preconditions.checkArgument(toBeDestroyed.isDirect(), - "toBeDestroyed isn't direct!"); + Preconditions.checkArgument(toBeDestroyed.isDirect(), "toBeDestroyed isn't direct!"); Method cleanerMethod = toBeDestroyed.getClass().getMethod("cleaner"); cleanerMethod.setAccessible(true); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java index cee3b56d6f6..faacab9cb92 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,17 +22,16 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; - -import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.crypto.KeyStoreKeyProvider; import org.apache.hadoop.hbase.security.EncryptionUtil; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class EncryptionTest { @@ -47,15 +45,11 @@ public class EncryptionTest { } /** - * Check that the configured key provider can be loaded and initialized, or - * throw an exception. - * - * @param conf - * @throws IOException + * Check that the configured key provider can be loaded and initialized, or throw an exception. nn */ public static void testKeyProvider(final Configuration conf) throws IOException { - String providerClassName = conf.get(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, - KeyStoreKeyProvider.class.getName()); + String providerClassName = + conf.get(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyStoreKeyProvider.class.getName()); Boolean result = keyProviderResults.get(providerClassName); if (result == null) { try { @@ -63,8 +57,8 @@ public class EncryptionTest { keyProviderResults.put(providerClassName, true); } catch (Exception e) { // most likely a RuntimeException keyProviderResults.put(providerClassName, false); - throw new IOException("Key provider " + providerClassName + " failed test: " + - e.getMessage(), e); + throw new IOException( + "Key provider " + providerClassName + " failed test: " + e.getMessage(), e); } } else if (!result) { throw new IOException("Key provider " + providerClassName + " previously failed test"); @@ -72,15 +66,12 @@ public class EncryptionTest { } /** - * Check that the configured cipher provider can be loaded and initialized, or - * throw an exception. - * - * @param conf - * @throws IOException + * Check that the configured cipher provider can be loaded and initialized, or throw an exception. + * nn */ public static void testCipherProvider(final Configuration conf) throws IOException { - String providerClassName = conf.get(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY, - DefaultCipherProvider.class.getName()); + String providerClassName = + conf.get(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY, DefaultCipherProvider.class.getName()); Boolean result = cipherProviderResults.get(providerClassName); if (result == null) { try { @@ -88,8 +79,8 @@ public class EncryptionTest { cipherProviderResults.put(providerClassName, true); } catch (Exception e) { // most likely a RuntimeException cipherProviderResults.put(providerClassName, false); - throw new IOException("Cipher provider " + providerClassName + " failed test: " + - e.getMessage(), e); + throw new IOException( + "Cipher provider " + providerClassName + " failed test: " + e.getMessage(), e); } } else if (!result) { throw new IOException("Cipher provider " + providerClassName + " previously failed test"); @@ -97,23 +88,22 @@ public class EncryptionTest { } /** - * Check that the specified cipher can be loaded and initialized, or throw - * an exception. Verifies key and cipher provider configuration as a - * prerequisite for cipher verification. Also verifies if encryption is enabled globally. - * - * @param conf HBase configuration + * Check that the specified cipher can be loaded and initialized, or throw an exception. Verifies + * key and cipher provider configuration as a prerequisite for cipher verification. Also verifies + * if encryption is enabled globally. + * @param conf HBase configuration * @param cipher chiper algorith to use for the column family - * @param key encryption key + * @param key encryption key * @throws IOException in case of encryption configuration error */ - public static void testEncryption(final Configuration conf, final String cipher, - byte[] key) throws IOException { + public static void testEncryption(final Configuration conf, final String cipher, byte[] key) + throws IOException { if (cipher == null) { return; } - if(!Encryption.isEncryptionEnabled(conf)) { - String message = String.format("Cipher %s failed test: encryption is disabled on the cluster", - cipher); + if (!Encryption.isEncryptionEnabled(conf)) { + String message = + String.format("Cipher %s failed test: encryption is disabled on the cluster", cipher); throw new IOException(message); } testKeyProvider(conf); @@ -129,8 +119,7 @@ public class EncryptionTest { } else { // This will be a wrapped key from schema context.setKey(EncryptionUtil.unwrapKey(conf, - conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase"), - key)); + conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase"), key)); } byte[] iv = null; if (context.getCipher().getIvLength() > 0) { @@ -143,8 +132,8 @@ public class EncryptionTest { Encryption.encrypt(out, new ByteArrayInputStream(plaintext), context, iv); byte[] ciphertext = out.toByteArray(); out.reset(); - Encryption.decrypt(out, new ByteArrayInputStream(ciphertext), plaintext.length, - context, iv); + Encryption.decrypt(out, new ByteArrayInputStream(ciphertext), plaintext.length, context, + iv); byte[] test = out.toByteArray(); if (!Bytes.equals(plaintext, test)) { throw new IOException("Did not pass encrypt/decrypt test"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java index 04a33846871..0cbb33e8c36 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,22 +20,18 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; import java.util.HashMap; import java.util.Map; - import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Thread that walks over the filesystem, and computes the mappings - * Region -> BestHost and Region -> {@code Map} - * + * Thread that walks over the filesystem, and computes the mappings Region -> BestHost and Region -> + * {@code Map} */ @InterfaceAudience.Private class FSRegionScanner implements Runnable { @@ -52,17 +47,16 @@ class FSRegionScanner implements Runnable { /** * Maps each region to the RS with highest locality for that region. */ - private final Map regionToBestLocalityRSMapping; + private final Map regionToBestLocalityRSMapping; /** - * Maps region encoded names to maps of hostnames to fractional locality of - * that region on that host. + * Maps region encoded names to maps of hostnames to fractional locality of that region on that + * host. */ private Map> regionDegreeLocalityMapping; - FSRegionScanner(FileSystem fs, Path regionPath, - Map regionToBestLocalityRSMapping, - Map> regionDegreeLocalityMapping) { + FSRegionScanner(FileSystem fs, Path regionPath, Map regionToBestLocalityRSMapping, + Map> regionDegreeLocalityMapping) { this.fs = fs; this.regionPath = regionPath; this.regionToBestLocalityRSMapping = regionToBestLocalityRSMapping; @@ -75,7 +69,7 @@ class FSRegionScanner implements Runnable { // empty the map for each region Map blockCountMap = new HashMap<>(); - //get table name + // get table name String tableName = regionPath.getParent().getName(); int totalBlkCount = 0; @@ -98,15 +92,14 @@ class FSRegionScanner implements Runnable { } for (FileStatus storeFile : storeFileLists) { - BlockLocation[] blkLocations = - fs.getFileBlockLocations(storeFile, 0, storeFile.getLen()); + BlockLocation[] blkLocations = fs.getFileBlockLocations(storeFile, 0, storeFile.getLen()); if (null == blkLocations) { continue; } totalBlkCount += blkLocations.length; - for(BlockLocation blk: blkLocations) { - for (String host: blk.getHosts()) { + for (BlockLocation blk : blkLocations) { + for (String host : blk.getHosts()) { AtomicInteger count = blockCountMap.get(host); if (count == null) { count = new AtomicInteger(0); @@ -137,11 +130,11 @@ class FSRegionScanner implements Runnable { } if (hostToRun.endsWith(".")) { - hostToRun = hostToRun.substring(0, hostToRun.length()-1); + hostToRun = hostToRun.substring(0, hostToRun.length() - 1); } String name = tableName + ":" + regionPath.getName(); synchronized (regionToBestLocalityRSMapping) { - regionToBestLocalityRSMapping.put(name, hostToRun); + regionToBestLocalityRSMapping.put(name, hostToRun); } } @@ -153,7 +146,7 @@ class FSRegionScanner implements Runnable { host = host.substring(0, host.length() - 1); } // Locality is fraction of blocks local to this host. - float locality = ((float)entry.getValue().get()) / totalBlkCount; + float locality = ((float) entry.getValue().get()) / totalBlkCount; hostLocalityMap.put(host, locality); } // Put the locality map into the result map, keyed by the encoded name diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index ff555a8c40d..38300df5568 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,22 +57,19 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.primitives.Ints; /** - * Implementation of {@link TableDescriptors} that reads descriptors from the - * passed filesystem. It expects descriptors to be in a file in the - * {@link #TABLEINFO_DIR} subdir of the table's directory in FS. Can be read-only - * -- i.e. does not modify the filesystem or can be read and write. - * - *

      Also has utility for keeping up the table descriptors tableinfo file. - * The table schema file is kept in the {@link #TABLEINFO_DIR} subdir - * of the table directory in the filesystem. - * It has a {@link #TABLEINFO_FILE_PREFIX} and then a suffix that is the - * edit sequenceid: e.g. .tableinfo.0000000003. This sequenceid - * is always increasing. It starts at zero. The table schema file with the - * highest sequenceid has the most recent schema edit. Usually there is one file - * only, the most recent but there may be short periods where there are more - * than one file. Old files are eventually cleaned. Presumption is that there - * will not be lots of concurrent clients making table schema edits. If so, - * the below needs a bit of a reworking and perhaps some supporting api in hdfs. + * Implementation of {@link TableDescriptors} that reads descriptors from the passed filesystem. It + * expects descriptors to be in a file in the {@link #TABLEINFO_DIR} subdir of the table's directory + * in FS. Can be read-only -- i.e. does not modify the filesystem or can be read and write. + *

      + * Also has utility for keeping up the table descriptors tableinfo file. The table schema file is + * kept in the {@link #TABLEINFO_DIR} subdir of the table directory in the filesystem. It has a + * {@link #TABLEINFO_FILE_PREFIX} and then a suffix that is the edit sequenceid: e.g. + * .tableinfo.0000000003. This sequenceid is always increasing. It starts at zero. The + * table schema file with the highest sequenceid has the most recent schema edit. Usually there is + * one file only, the most recent but there may be short periods where there are more than one file. + * Old files are eventually cleaned. Presumption is that there will not be lots of concurrent + * clients making table schema edits. If so, the below needs a bit of a reworking and perhaps some + * supporting api in hdfs. */ @InterfaceAudience.Private public class FSTableDescriptors implements TableDescriptors { @@ -93,7 +90,7 @@ public class FSTableDescriptors implements TableDescriptors { public static final String TABLEINFO_DIR = ".tabledesc"; - // This cache does not age out the old stuff. Thinking is that the amount + // This cache does not age out the old stuff. Thinking is that the amount // of data we keep up in here is so small, no need to do occasional purge. // TODO. private final Map cache = new ConcurrentHashMap<>(); @@ -111,7 +108,7 @@ public class FSTableDescriptors implements TableDescriptors { } public FSTableDescriptors(final FileSystem fs, final Path rootdir, final boolean fsreadonly, - final boolean usecache) { + final boolean usecache) { this.fs = fs; this.rootdir = rootdir; this.fsreadonly = fsreadonly; @@ -144,29 +141,21 @@ public class FSTableDescriptors implements TableDescriptors { return td; } - public static ColumnFamilyDescriptor getTableFamilyDescForMeta( - final Configuration conf) { - return ColumnFamilyDescriptorBuilder - .newBuilder(HConstants.TABLE_FAMILY) - .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, - HConstants.DEFAULT_HBASE_META_VERSIONS)) - .setInMemory(true) - .setBlocksize(8 * 1024) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) - .setBloomFilterType(BloomType.ROWCOL) - .build(); + public static ColumnFamilyDescriptor getTableFamilyDescForMeta(final Configuration conf) { + return ColumnFamilyDescriptorBuilder.newBuilder(HConstants.TABLE_FAMILY) + .setMaxVersions( + conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)) + .setInMemory(true).setBlocksize(8 * 1024).setScope(HConstants.REPLICATION_SCOPE_LOCAL) + .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) + .setBloomFilterType(BloomType.ROWCOL).build(); } public static ColumnFamilyDescriptor getReplBarrierFamilyDescForMeta() { - return ColumnFamilyDescriptorBuilder - .newBuilder(HConstants.REPLICATION_BARRIER_FAMILY) - .setMaxVersions(HConstants.ALL_VERSIONS) - .setInMemory(true) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) - .setBloomFilterType(BloomType.ROWCOL) - .build(); + return ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_BARRIER_FAMILY) + .setMaxVersions(HConstants.ALL_VERSIONS).setInMemory(true) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) + .setBloomFilterType(BloomType.ROWCOL).build(); } public static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf) @@ -176,21 +165,18 @@ public class FSTableDescriptors implements TableDescriptors { // we have to rethink about adding back the setCacheDataInL1 for META table CFs. return TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY) - .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, - HConstants.DEFAULT_HBASE_META_VERSIONS)) + .setMaxVersions( + conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)) .setInMemory(true) - .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, - HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)) + .setBlocksize( + conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)) .setScope(HConstants.REPLICATION_SCOPE_LOCAL) .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) - .setBloomFilterType(BloomType.ROWCOL) - .build()) + .setBloomFilterType(BloomType.ROWCOL).build()) .setColumnFamily(getTableFamilyDescForMeta(conf)) - .setColumnFamily(getReplBarrierFamilyDescForMeta()) - .setCoprocessor(CoprocessorDescriptorBuilder.newBuilder( - MultiRowMutationEndpoint.class.getName()) - .setPriority(Coprocessor.PRIORITY_SYSTEM) - .build()); + .setColumnFamily(getReplBarrierFamilyDescForMeta()).setCoprocessor( + CoprocessorDescriptorBuilder.newBuilder(MultiRowMutationEndpoint.class.getName()) + .setPriority(Coprocessor.PRIORITY_SYSTEM).build()); } protected boolean isUsecache() { @@ -242,7 +228,7 @@ public class FSTableDescriptors implements TableDescriptors { public Map getAll() throws IOException { Map tds = new TreeMap<>(); if (fsvisited) { - for (Map.Entry entry: this.cache.entrySet()) { + for (Map.Entry entry : this.cache.entrySet()) { tds.put(entry.getKey().getNameWithNamespaceInclAsString(), entry.getValue()); } } else { @@ -262,9 +248,9 @@ public class FSTableDescriptors implements TableDescriptors { } /** - * Find descriptors by namespace. - * @see #get(org.apache.hadoop.hbase.TableName) - */ + * Find descriptors by namespace. + * @see #get(org.apache.hadoop.hbase.TableName) + */ @Override public Map getByNamespace(String name) throws IOException { Map htds = new TreeMap<>(); @@ -299,7 +285,7 @@ public class FSTableDescriptors implements TableDescriptors { } @RestrictedApi(explanation = "Should only be called in tests or self", link = "", - allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") + allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") Path updateTableDescriptor(TableDescriptor td) throws IOException { TableName tableName = td.getTableName(); Path tableDir = getTableDir(tableName); @@ -313,9 +299,8 @@ public class FSTableDescriptors implements TableDescriptors { } /** - * Removes the table descriptor from the local cache and returns it. - * If not in read only mode, it also deletes the entire table directory(!) - * from the FileSystem. + * Removes the table descriptor from the local cache and returns it. If not in read only mode, it + * also deletes the entire table directory(!) from the FileSystem. */ @Override public TableDescriptor remove(final TableName tablename) throws IOException { @@ -362,7 +347,8 @@ public class FSTableDescriptors implements TableDescriptors { public boolean accept(Path p) { // Accept any file that starts with TABLEINFO_NAME return p.getName().startsWith(TABLEINFO_FILE_PREFIX); - }}; + } + }; /** * Width of the sequenceid that is a suffix on a tableinfo file. @@ -375,10 +361,10 @@ public class FSTableDescriptors implements TableDescriptors { * negative). */ private static String formatTableInfoSequenceId(final int number) { - byte [] b = new byte[WIDTH_OF_SEQUENCE_ID]; + byte[] b = new byte[WIDTH_OF_SEQUENCE_ID]; int d = Math.abs(number); for (int i = b.length - 1; i >= 0; i--) { - b[i] = (byte)((d % 10) + '0'); + b[i] = (byte) ((d % 10) + '0'); d /= 10; } return Bytes.toString(b); @@ -401,7 +387,7 @@ public class FSTableDescriptors implements TableDescriptors { * @param p Path to a .tableinfo file. */ @RestrictedApi(explanation = "Should only be called in tests or self", link = "", - allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") + allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") static SequenceIdAndFileLength getTableInfoSequenceIdAndFileLength(Path p) { String name = p.getName(); if (!name.startsWith(TABLEINFO_FILE_PREFIX)) { @@ -427,19 +413,18 @@ public class FSTableDescriptors implements TableDescriptors { * Returns Name of tableinfo file. */ @RestrictedApi(explanation = "Should only be called in tests or self", link = "", - allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") + allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") static String getTableInfoFileName(int sequenceId, byte[] content) { - return TABLEINFO_FILE_PREFIX + "." + formatTableInfoSequenceId(sequenceId) + "." + - content.length; + return TABLEINFO_FILE_PREFIX + "." + formatTableInfoSequenceId(sequenceId) + "." + + content.length; } /** - * Returns the latest table descriptor for the given table directly from the file system - * if it exists, bypassing the local cache. - * Returns null if it's not found. + * Returns the latest table descriptor for the given table directly from the file system if it + * exists, bypassing the local cache. Returns null if it's not found. */ - public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, - Path hbaseRootDir, TableName tableName) throws IOException { + public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path hbaseRootDir, + TableName tableName) throws IOException { Path tableDir = CommonFSUtils.getTableDir(hbaseRootDir, tableName); return getTableDescriptorFromFs(fs, tableDir); } @@ -512,7 +497,7 @@ public class FSTableDescriptors implements TableDescriptors { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") public static void deleteTableDescriptors(FileSystem fs, Path tableDir) throws IOException { Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR); deleteTableDescriptorFiles(fs, tableInfoDir, Integer.MAX_VALUE); @@ -557,15 +542,16 @@ public class FSTableDescriptors implements TableDescriptors { // In proc v2 we have table lock so typically, there will be no concurrent writes. Keep the // retry logic here since we may still want to write the table descriptor from for example, // HBCK2? - int currentSequenceId = currentDescriptorFile == null ? 0 : - getTableInfoSequenceIdAndFileLength(currentDescriptorFile.getPath()).sequenceId; + int currentSequenceId = currentDescriptorFile == null + ? 0 + : getTableInfoSequenceIdAndFileLength(currentDescriptorFile.getPath()).sequenceId; // Put arbitrary upperbound on how often we retry int maxAttempts = 10; int maxSequenceId = currentSequenceId + maxAttempts; byte[] bytes = TableDescriptorBuilder.toByteArray(td); - for (int newSequenceId = - currentSequenceId + 1; newSequenceId <= maxSequenceId; newSequenceId++) { + for (int newSequenceId = currentSequenceId + 1; newSequenceId + <= maxSequenceId; newSequenceId++) { String fileName = getTableInfoFileName(newSequenceId, bytes); Path filePath = new Path(tableInfoDir, fileName); try (FSDataOutputStream out = fs.create(filePath, false)) { @@ -584,8 +570,7 @@ public class FSTableDescriptors implements TableDescriptors { } /** - * Create new TableDescriptor in HDFS. Happens when we are creating table. - * Used by tests. + * Create new TableDescriptor in HDFS. Happens when we are creating table. Used by tests. * @return True if we successfully created file. */ public boolean createTableDescriptor(TableDescriptor htd) throws IOException { @@ -593,32 +578,30 @@ public class FSTableDescriptors implements TableDescriptors { } /** - * Create new TableDescriptor in HDFS. Happens when we are creating table. If - * forceCreation is true then even if previous table descriptor is present it - * will be overwritten - * + * Create new TableDescriptor in HDFS. Happens when we are creating table. If forceCreation is + * true then even if previous table descriptor is present it will be overwritten * @return True if we successfully created file. */ public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation) - throws IOException { + throws IOException { Path tableDir = getTableDir(htd.getTableName()); return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation); } /** - * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create - * a new table during cluster start or in Clone and Create Table Procedures. Checks readOnly flag + * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create a + * new table during cluster start or in Clone and Create Table Procedures. Checks readOnly flag * passed on construction. - * @param tableDir table directory under which we should write the file - * @param htd description of the table to write + * @param tableDir table directory under which we should write the file + * @param htd description of the table to write * @param forceCreation if true,then even if previous table descriptor is present it will - * be overwritten + * be overwritten * @return true if the we successfully created the file, false if the file * already exists and we weren't forcing the descriptor creation. * @throws IOException if a filesystem error occurs */ public boolean createTableDescriptorForTableDirectory(Path tableDir, TableDescriptor htd, - boolean forceCreation) throws IOException { + boolean forceCreation) throws IOException { if (this.fsreadonly) { throw new NotImplementedException("Cannot create a table descriptor - in read only mode"); } @@ -626,13 +609,13 @@ public class FSTableDescriptors implements TableDescriptors { } /** - * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create - * a new table snapshoting. Does not enforce read-only. That is for caller to determine. - * @param fs Filesystem to use. - * @param tableDir table directory under which we should write the file - * @param htd description of the table to write + * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create a + * new table snapshoting. Does not enforce read-only. That is for caller to determine. + * @param fs Filesystem to use. + * @param tableDir table directory under which we should write the file + * @param htd description of the table to write * @param forceCreation if true,then even if previous table descriptor is present it will - * be overwritten + * be overwritten * @return true if the we successfully created the file, false if the file * already exists and we weren't forcing the descriptor creation. * @throws IOException if a filesystem error occurs @@ -652,4 +635,3 @@ public class FSTableDescriptors implements TableDescriptors { return writeTableDescriptor(fs, htd, tableDir, opt.map(Pair::getFirst).orElse(null)) != null; } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 90a969389fd..e03de07dc75 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -92,11 +91,11 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.com.google.common.base.Throwables; import org.apache.hbase.thirdparty.com.google.common.collect.Iterators; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.common.primitives.Ints; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos; @@ -119,26 +118,24 @@ public final class FSUtils { } /** - * @return True is fs is instance of DistributedFileSystem - * @throws IOException + * @return True is fs is instance of DistributedFileSystem n */ public static boolean isDistributedFileSystem(final FileSystem fs) throws IOException { FileSystem fileSystem = fs; // If passed an instance of HFileSystem, it fails instanceof DistributedFileSystem. // Check its backing fs for dfs-ness. if (fs instanceof HFileSystem) { - fileSystem = ((HFileSystem)fs).getBackingFs(); + fileSystem = ((HFileSystem) fs).getBackingFs(); } return fileSystem instanceof DistributedFileSystem; } /** * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the - * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true. Does not consider + * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true. Does not consider * schema; i.e. if schemas different but path or subpath matches, the two will equate. - * @param pathToSearch Path we will be trying to match. - * @param pathTail - * @return True if pathTail is tail on the path of pathToSearch + * @param pathToSearch Path we will be trying to match. n * @return True if pathTail + * is tail on the path of pathToSearch */ public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) { Path tailPath = pathTail; @@ -161,17 +158,16 @@ public final class FSUtils { if (toSearchName == null || toSearchName.isEmpty()) { break; } - // Move up a parent on each path for next go around. Path doesn't let us go off the end. + // Move up a parent on each path for next go around. Path doesn't let us go off the end. tailPath = tailPath.getParent(); toSearch = toSearch.getParent(); - } while(tailName.equals(toSearchName)); + } while (tailName.equals(toSearchName)); return result; } /** * Delete the region directory if exists. - * @return True if deleted the region directory. - * @throws IOException + * @return True if deleted the region directory. n */ public static boolean deleteRegionDir(final Configuration conf, final RegionInfo hri) throws IOException { @@ -181,7 +177,7 @@ public final class FSUtils { new Path(CommonFSUtils.getTableDir(rootDir, hri.getTable()), hri.getEncodedName())); } - /** + /** * Create the specified file on the filesystem. By default, this will: *

        *
      1. overwrite the file if it exists
      2. @@ -192,10 +188,10 @@ public final class FSUtils { *
      3. use the default block size
      4. *
      5. not track progress
      6. *
      - * @param conf configurations - * @param fs {@link FileSystem} on which to write the file - * @param path {@link Path} to the file to write - * @param perm permissions + * @param conf configurations + * @param fs {@link FileSystem} on which to write the file + * @param path {@link Path} to the file to write + * @param perm permissions * @param favoredNodes favored data nodes * @return output stream to the created file * @throws IOException if the file cannot be created @@ -232,12 +228,10 @@ public final class FSUtils { /** * Checks to see if the specified file system is available - * * @param fs filesystem * @throws IOException e */ - public static void checkFileSystemAvailable(final FileSystem fs) - throws IOException { + public static void checkFileSystemAvailable(final FileSystem fs) throws IOException { if (!(fs instanceof DistributedFileSystem)) { return; } @@ -248,8 +242,7 @@ public final class FSUtils { return; } } catch (IOException e) { - exception = e instanceof RemoteException ? - ((RemoteException)e).unwrapRemoteException() : e; + exception = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e; } try { fs.close(); @@ -261,26 +254,21 @@ public final class FSUtils { /** * Inquire the Active NameNode's safe mode status. - * * @param dfs A DistributedFileSystem object representing the underlying HDFS. - * @return whether we're in safe mode - * @throws IOException + * @return whether we're in safe mode n */ private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException { return dfs.setSafeMode(SAFEMODE_GET, true); } /** - * Check whether dfs is in safemode. - * @param conf - * @throws IOException + * Check whether dfs is in safemode. nn */ - public static void checkDfsSafeMode(final Configuration conf) - throws IOException { + public static void checkDfsSafeMode(final Configuration conf) throws IOException { boolean isInSafeMode = false; FileSystem fs = FileSystem.get(conf); if (fs instanceof DistributedFileSystem) { - DistributedFileSystem dfs = (DistributedFileSystem)fs; + DistributedFileSystem dfs = (DistributedFileSystem) fs; isInSafeMode = isInSafeMode(dfs); } if (isInSafeMode) { @@ -290,15 +278,14 @@ public final class FSUtils { /** * Verifies current version of file system - * - * @param fs filesystem object + * @param fs filesystem object * @param rootdir root hbase directory * @return null if no version file exists, version string otherwise - * @throws IOException if the version file fails to open + * @throws IOException if the version file fails to open * @throws DeserializationException if the version data cannot be translated into a version */ public static String getVersion(FileSystem fs, Path rootdir) - throws IOException, DeserializationException { + throws IOException, DeserializationException { final Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME); FileStatus[] status = null; try { @@ -312,7 +299,7 @@ public final class FSUtils { return null; } String version = null; - byte [] content = new byte [(int)status[0].getLen()]; + byte[] content = new byte[(int) status[0].getLen()]; FSDataInputStream s = fs.open(versionFile); try { IOUtils.readFully(s, content, 0, content.length); @@ -338,8 +325,7 @@ public final class FSUtils { * @return The version found in the file as a String * @throws DeserializationException if the version data cannot be translated into a version */ - static String parseVersionFrom(final byte [] bytes) - throws DeserializationException { + static String parseVersionFrom(final byte[] bytes) throws DeserializationException { ProtobufUtil.expectPBMagicPrefix(bytes); int pblen = ProtobufUtil.lengthOfPBMagic(); FSProtos.HBaseVersionFileContent.Builder builder = @@ -356,9 +342,10 @@ public final class FSUtils { /** * Create the content to write into the ${HBASE_ROOTDIR}/hbase.version file. * @param version Version to persist - * @return Serialized protobuf with version content and a bit of pb magic for a prefix. + * @return Serialized protobuf with version content and a bit of pb magic for a + * prefix. */ - static byte [] toVersionByteArray(final String version) { + static byte[] toVersionByteArray(final String version) { FSProtos.HBaseVersionFileContent.Builder builder = FSProtos.HBaseVersionFileContent.newBuilder(); return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray()); @@ -366,33 +353,29 @@ public final class FSUtils { /** * Verifies current version of file system - * - * @param fs file system + * @param fs file system * @param rootdir root directory of HBase installation * @param message if true, issues a message on System.out - * @throws IOException if the version file cannot be opened + * @throws IOException if the version file cannot be opened * @throws DeserializationException if the contents of the version file cannot be parsed */ public static void checkVersion(FileSystem fs, Path rootdir, boolean message) - throws IOException, DeserializationException { + throws IOException, DeserializationException { checkVersion(fs, rootdir, message, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS); } /** * Verifies current version of file system - * - * @param fs file system + * @param fs file system * @param rootdir root directory of HBase installation * @param message if true, issues a message on System.out - * @param wait wait interval + * @param wait wait interval * @param retries number of times to retry - * - * @throws IOException if the version file cannot be opened + * @throws IOException if the version file cannot be opened * @throws DeserializationException if the contents of the version file cannot be parsed */ - public static void checkVersion(FileSystem fs, Path rootdir, - boolean message, int wait, int retries) - throws IOException, DeserializationException { + public static void checkVersion(FileSystem fs, Path rootdir, boolean message, int wait, + int retries) throws IOException, DeserializationException { String version = getVersion(fs, rootdir); String msg; if (version == null) { @@ -402,17 +385,17 @@ public final class FSUtils { setVersion(fs, rootdir, wait, retries); return; } else { - msg = "hbase.version file is missing. Is your hbase.rootdir valid? " + - "You can restore hbase.version file by running 'HBCK2 filesystem -fix'. " + - "See https://github.com/apache/hbase-operator-tools/tree/master/hbase-hbck2"; + msg = "hbase.version file is missing. Is your hbase.rootdir valid? " + + "You can restore hbase.version file by running 'HBCK2 filesystem -fix'. " + + "See https://github.com/apache/hbase-operator-tools/tree/master/hbase-hbck2"; } } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) { return; } else { - msg = "HBase file layout needs to be upgraded. Current filesystem version is " + version + - " but software requires version " + HConstants.FILE_SYSTEM_VERSION + - ". Consult http://hbase.apache.org/book.html for further information about " + - "upgrading HBase."; + msg = "HBase file layout needs to be upgraded. Current filesystem version is " + version + + " but software requires version " + HConstants.FILE_SYSTEM_VERSION + + ". Consult http://hbase.apache.org/book.html for further information about " + + "upgrading HBase."; } // version is deprecated require migration @@ -425,47 +408,42 @@ public final class FSUtils { /** * Sets version of file system - * - * @param fs filesystem object + * @param fs filesystem object * @param rootdir hbase root * @throws IOException e */ - public static void setVersion(FileSystem fs, Path rootdir) - throws IOException { + public static void setVersion(FileSystem fs, Path rootdir) throws IOException { setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS); } /** * Sets version of file system - * - * @param fs filesystem object + * @param fs filesystem object * @param rootdir hbase root - * @param wait time to wait for retry + * @param wait time to wait for retry * @param retries number of times to retry before failing * @throws IOException e */ public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries) - throws IOException { + throws IOException { setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries); } - /** * Sets version of file system - * - * @param fs filesystem object + * @param fs filesystem object * @param rootdir hbase root directory * @param version version to set - * @param wait time to wait for retry + * @param wait time to wait for retry * @param retries number of times to retry before throwing an IOException * @throws IOException e */ - public static void setVersion(FileSystem fs, Path rootdir, String version, - int wait, int retries) throws IOException { + public static void setVersion(FileSystem fs, Path rootdir, String version, int wait, int retries) + throws IOException { Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME); - Path tempVersionFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR + - HConstants.VERSION_FILE_NAME); + Path tempVersionFile = new Path(rootdir, + HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR + HConstants.VERSION_FILE_NAME); while (true) { try { // Write the version to a temporary file @@ -487,7 +465,8 @@ public final class FSUtils { // Attempt to close the stream on the way out if it is still open. try { if (s != null) s.close(); - } catch (IOException ignore) { } + } catch (IOException ignore) { + } } LOG.info("Created version file at " + rootdir.toString() + " with version=" + version); return; @@ -500,7 +479,7 @@ public final class FSUtils { Thread.sleep(wait); } } catch (InterruptedException ie) { - throw (InterruptedIOException)new InterruptedIOException().initCause(ie); + throw (InterruptedIOException) new InterruptedIOException().initCause(ie); } retries--; } else { @@ -512,14 +491,14 @@ public final class FSUtils { /** * Checks that a cluster ID file exists in the HBase root directory - * @param fs the root directory FileSystem + * @param fs the root directory FileSystem * @param rootdir the HBase root directory in HDFS - * @param wait how long to wait between retries + * @param wait how long to wait between retries * @return true if the file exists, otherwise false * @throws IOException if checking the FileSystem fails */ - public static boolean checkClusterIdExists(FileSystem fs, Path rootdir, - long wait) throws IOException { + public static boolean checkClusterIdExists(FileSystem fs, Path rootdir, long wait) + throws IOException { while (true) { try { Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME); @@ -542,25 +521,24 @@ public final class FSUtils { /** * Returns the value of the unique cluster ID stored for this HBase instance. - * @param fs the root directory FileSystem + * @param fs the root directory FileSystem * @param rootdir the path to the HBase root directory * @return the unique cluster identifier * @throws IOException if reading the cluster ID file fails */ - public static ClusterId getClusterId(FileSystem fs, Path rootdir) - throws IOException { + public static ClusterId getClusterId(FileSystem fs, Path rootdir) throws IOException { Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME); ClusterId clusterId = null; - FileStatus status = fs.exists(idPath)? fs.getFileStatus(idPath): null; + FileStatus status = fs.exists(idPath) ? fs.getFileStatus(idPath) : null; if (status != null) { int len = Ints.checkedCast(status.getLen()); - byte [] content = new byte[len]; + byte[] content = new byte[len]; FSDataInputStream in = fs.open(idPath); try { in.readFully(content); } catch (EOFException eof) { LOG.warn("Cluster ID file {} is empty", idPath); - } finally{ + } finally { in.close(); } try { @@ -590,13 +568,11 @@ public final class FSUtils { } /** - * @param cid - * @throws IOException + * nn */ private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p, - final ClusterId cid) - throws IOException { - // Rewrite the file as pb. Move aside the old one first, write new + final ClusterId cid) throws IOException { + // Rewrite the file as pb. Move aside the old one first, write new // then delete the moved-aside file. Path movedAsideName = new Path(p + "." + EnvironmentEdgeManager.currentTime()); if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p); @@ -608,21 +584,21 @@ public final class FSUtils { } /** - * Writes a new unique identifier for this cluster to the "hbase.id" file - * in the HBase root directory - * @param fs the root directory FileSystem - * @param rootdir the path to the HBase root directory + * Writes a new unique identifier for this cluster to the "hbase.id" file in the HBase root + * directory + * @param fs the root directory FileSystem + * @param rootdir the path to the HBase root directory * @param clusterId the unique identifier to store - * @param wait how long (in milliseconds) to wait between retries + * @param wait how long (in milliseconds) to wait between retries * @throws IOException if writing to the FileSystem fails and no wait value */ - public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId, - int wait) throws IOException { + public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId, int wait) + throws IOException { while (true) { try { Path idFile = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME); - Path tempIdFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY + - Path.SEPARATOR + HConstants.CLUSTER_ID_FILE_NAME); + Path tempIdFile = new Path(rootdir, + HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR + HConstants.CLUSTER_ID_FILE_NAME); // Write the id file to a temporary location FSDataOutputStream s = fs.create(tempIdFile); try { @@ -638,7 +614,8 @@ public final class FSUtils { // Attempt to close the stream if still open on the way out try { if (s != null) s.close(); - } catch (IOException ignore) { } + } catch (IOException ignore) { + } } if (LOG.isDebugEnabled()) { LOG.debug("Created cluster ID file at " + idFile.toString() + " with ID: " + clusterId); @@ -646,12 +623,12 @@ public final class FSUtils { return; } catch (IOException ioe) { if (wait > 0) { - LOG.warn("Unable to create cluster ID file in " + rootdir.toString() + - ", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe)); + LOG.warn("Unable to create cluster ID file in " + rootdir.toString() + ", retrying in " + + wait + "msec: " + StringUtils.stringifyException(ioe)); try { Thread.sleep(wait); } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); + throw (InterruptedIOException) new InterruptedIOException().initCause(e); } } else { throw ioe; @@ -666,12 +643,10 @@ public final class FSUtils { * @param wait Sleep between retries * @throws IOException e */ - public static void waitOnSafeMode(final Configuration conf, - final long wait) - throws IOException { + public static void waitOnSafeMode(final Configuration conf, final long wait) throws IOException { FileSystem fs = FileSystem.get(conf); if (!(fs instanceof DistributedFileSystem)) return; - DistributedFileSystem dfs = (DistributedFileSystem)fs; + DistributedFileSystem dfs = (DistributedFileSystem) fs; // Make sure dfs is not in safe mode while (isInSafeMode(dfs)) { LOG.info("Waiting for dfs to exit safe mode..."); @@ -686,7 +661,7 @@ public final class FSUtils { /** * Checks if meta region exists - * @param fs file system + * @param fs file system * @param rootDir root directory of HBase installation * @return true if exists */ @@ -696,17 +671,16 @@ public final class FSUtils { } /** - * Compute HDFS block distribution of a given HdfsDataInputStream. All HdfsDataInputStreams - * are backed by a series of LocatedBlocks, which are fetched periodically from the namenode. - * This method retrieves those blocks from the input stream and uses them to calculate - * HDFSBlockDistribution. - * - * The underlying method in DFSInputStream does attempt to use locally cached blocks, but - * may hit the namenode if the cache is determined to be incomplete. The method also involves - * making copies of all LocatedBlocks rather than return the underlying blocks themselves. + * Compute HDFS block distribution of a given HdfsDataInputStream. All HdfsDataInputStreams are + * backed by a series of LocatedBlocks, which are fetched periodically from the namenode. This + * method retrieves those blocks from the input stream and uses them to calculate + * HDFSBlockDistribution. The underlying method in DFSInputStream does attempt to use locally + * cached blocks, but may hit the namenode if the cache is determined to be incomplete. The method + * also involves making copies of all LocatedBlocks rather than return the underlying blocks + * themselves. */ - public static HDFSBlocksDistribution computeHDFSBlocksDistribution( - HdfsDataInputStream inputStream) throws IOException { + public static HDFSBlocksDistribution + computeHDFSBlocksDistribution(HdfsDataInputStream inputStream) throws IOException { List blocks = inputStream.getAllBlocks(); HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution(); for (LocatedBlock block : blocks) { @@ -729,18 +703,16 @@ public final class FSUtils { /** * Compute HDFS blocks distribution of a given file, or a portion of the file - * @param fs file system + * @param fs file system * @param status file status of the file - * @param start start position of the portion + * @param start start position of the portion * @param length length of the portion * @return The HDFS blocks distribution */ - static public HDFSBlocksDistribution computeHDFSBlocksDistribution( - final FileSystem fs, FileStatus status, long start, long length) - throws IOException { + static public HDFSBlocksDistribution computeHDFSBlocksDistribution(final FileSystem fs, + FileStatus status, long start, long length) throws IOException { HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution(); - BlockLocation [] blockLocations = - fs.getFileBlockLocations(status, start, length); + BlockLocation[] blockLocations = fs.getFileBlockLocations(status, start, length); addToHDFSBlocksDistribution(blocksDistribution, blockLocations); return blocksDistribution; } @@ -748,11 +720,10 @@ public final class FSUtils { /** * Update blocksDistribution with blockLocations * @param blocksDistribution the hdfs blocks distribution - * @param blockLocations an array containing block location + * @param blockLocations an array containing block location */ - static public void addToHDFSBlocksDistribution( - HDFSBlocksDistribution blocksDistribution, BlockLocation[] blockLocations) - throws IOException { + static public void addToHDFSBlocksDistribution(HDFSBlocksDistribution blocksDistribution, + BlockLocation[] blockLocations) throws IOException { for (BlockLocation bl : blockLocations) { String[] hosts = bl.getHosts(); long len = bl.getLength(); @@ -763,27 +734,22 @@ public final class FSUtils { // TODO move this method OUT of FSUtils. No dependencies to HMaster /** - * Returns the total overall fragmentation percentage. Includes hbase:meta and - * -ROOT- as well. - * - * @param master The master defining the HBase root and file system + * Returns the total overall fragmentation percentage. Includes hbase:meta and -ROOT- as well. + * @param master The master defining the HBase root and file system * @return A map for each table and its percentage (never null) * @throws IOException When scanning the directory fails */ - public static int getTotalTableFragmentation(final HMaster master) - throws IOException { + public static int getTotalTableFragmentation(final HMaster master) throws IOException { Map map = getTableFragmentation(master); - return map.isEmpty() ? -1 : map.get("-TOTAL-"); + return map.isEmpty() ? -1 : map.get("-TOTAL-"); } /** - * Runs through the HBase rootdir and checks how many stores for each table - * have more than one file in them. Checks -ROOT- and hbase:meta too. The total - * percentage across all tables is stored under the special key "-TOTAL-". - * - * @param master The master defining the HBase root and file system. + * Runs through the HBase rootdir and checks how many stores for each table have more than one + * file in them. Checks -ROOT- and hbase:meta too. The total percentage across all tables is + * stored under the special key "-TOTAL-". + * @param master The master defining the HBase root and file system. * @return A map for each table and its percentage (never null). - * * @throws IOException When scanning the directory fails. */ public static Map getTableFragmentation(final HMaster master) @@ -795,18 +761,16 @@ public final class FSUtils { } /** - * Runs through the HBase rootdir and checks how many stores for each table - * have more than one file in them. Checks -ROOT- and hbase:meta too. The total - * percentage across all tables is stored under the special key "-TOTAL-". - * - * @param fs The file system to use - * @param hbaseRootDir The root directory to scan + * Runs through the HBase rootdir and checks how many stores for each table have more than one + * file in them. Checks -ROOT- and hbase:meta too. The total percentage across all tables is + * stored under the special key "-TOTAL-". + * @param fs The file system to use + * @param hbaseRootDir The root directory to scan * @return A map for each table and its percentage (never null) * @throws IOException When scanning the directory fails */ - public static Map getTableFragmentation( - final FileSystem fs, final Path hbaseRootDir) - throws IOException { + public static Map getTableFragmentation(final FileSystem fs, + final Path hbaseRootDir) throws IOException { Map frags = new HashMap<>(); int cfCountTotal = 0; int cfFragTotal = 0; @@ -835,11 +799,11 @@ public final class FSUtils { } // compute percentage per table and store in result list frags.put(CommonFSUtils.getTableName(d).getNameAsString(), - cfCount == 0? 0: Math.round((float) cfFrag / cfCount * 100)); + cfCount == 0 ? 0 : Math.round((float) cfFrag / cfCount * 100)); } // set overall percentage for all tables frags.put("-TOTAL-", - cfCountTotal == 0? 0: Math.round((float) cfFragTotal / cfCountTotal * 100)); + cfCountTotal == 0 ? 0 : Math.round((float) cfFragTotal / cfCountTotal * 100)); return frags; } @@ -873,16 +837,16 @@ public final class FSUtils { /** * Create a filter on the givem filesystem with the specified blacklist - * @param fs filesystem to filter + * @param fs filesystem to filter * @param directoryNameBlackList list of the names of the directories to filter. If - * null, all directories are returned + * null, all directories are returned */ @SuppressWarnings("unchecked") public BlackListDirFilter(final FileSystem fs, final List directoryNameBlackList) { this.fs = fs; - blacklist = - (List) (directoryNameBlackList == null ? Collections.emptyList() - : directoryNameBlackList); + blacklist = (List) (directoryNameBlackList == null + ? Collections.emptyList() + : directoryNameBlackList); } @Override @@ -895,7 +859,7 @@ public final class FSUtils { return isDirectory(fs, isDir, p); } catch (IOException e) { LOG.warn("An error occurred while verifying if [{}] is a valid directory." - + " Returning 'not valid' and continuing.", p, e); + + " Returning 'not valid' and continuing.", p, e); return false; } } @@ -926,8 +890,7 @@ public final class FSUtils { @Override protected boolean isValidName(final String name) { - if (!super.isValidName(name)) - return false; + if (!super.isValidName(name)) return false; try { TableName.isLegalTableQualifierName(Bytes.toBytes(name)); @@ -940,7 +903,7 @@ public final class FSUtils { } public static List getTableDirs(final FileSystem fs, final Path rootdir) - throws IOException { + throws IOException { List tableDirs = new ArrayList<>(); Path baseNamespaceDir = new Path(rootdir, HConstants.BASE_NAMESPACE_DIR); if (fs.exists(baseNamespaceDir)) { @@ -952,18 +915,15 @@ public final class FSUtils { } /** - * @param fs - * @param rootdir - * @return All the table directories under rootdir. Ignore non table hbase folders such as - * .logs, .oldlogs, .corrupt folders. - * @throws IOException + * nn * @return All the table directories under rootdir. Ignore non table hbase + * folders such as .logs, .oldlogs, .corrupt folders. n */ public static List getLocalTableDirs(final FileSystem fs, final Path rootdir) - throws IOException { + throws IOException { // presumes any directory under hbase.rootdir is a table FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs)); List tabledirs = new ArrayList<>(dirs.length); - for (FileStatus dir: dirs) { + for (FileStatus dir : dirs) { tabledirs.add(dir.getPath()); } return tabledirs; @@ -1000,19 +960,19 @@ public final class FSUtils { /** * Given a particular table dir, return all the regiondirs inside it, excluding files such as * .tableinfo - * @param fs A file system for the Path + * @param fs A file system for the Path * @param tableDir Path to a specific table directory <hbase.rootdir>/<tabledir> - * @return List of paths to valid region directories in table dir. - * @throws IOException + * @return List of paths to valid region directories in table dir. n */ - public static List getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException { + public static List getRegionDirs(final FileSystem fs, final Path tableDir) + throws IOException { // assumes we are in a table dir. List rds = listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs)); if (rds == null) { return Collections.emptyList(); } List regionDirs = new ArrayList<>(rds.size()); - for (FileStatus rdfs: rds) { + for (FileStatus rdfs : rds) { Path rdPath = rdfs.getPath(); regionDirs.add(rdPath); } @@ -1025,7 +985,7 @@ public final class FSUtils { public static Path getRegionDirFromTableDir(Path tableDir, RegionInfo region) { return getRegionDirFromTableDir(tableDir, - ServerRegionReplicaUtil.getRegionInfoForFs(region).getEncodedName()); + ServerRegionReplicaUtil.getRegionInfoForFs(region).getEncodedName()); } public static Path getRegionDirFromTableDir(Path tableDir, String encodedRegionName) { @@ -1033,8 +993,8 @@ public final class FSUtils { } /** - * Filter for all dirs that are legal column family names. This is generally used for colfam - * dirs <hbase.rootdir>/<tabledir>/<regiondir>/<colfamdir>. + * Filter for all dirs that are legal column family names. This is generally used for colfam dirs + * <hbase.rootdir>/<tabledir>/<regiondir>/<colfamdir>. */ public static class FamilyDirFilter extends AbstractFileStatusFilter { final FileSystem fs; @@ -1065,33 +1025,31 @@ public final class FSUtils { /** * Given a particular region dir, return all the familydirs inside it - * - * @param fs A file system for the Path + * @param fs A file system for the Path * @param regionDir Path to a specific region directory - * @return List of paths to valid family directories in region dir. - * @throws IOException + * @return List of paths to valid family directories in region dir. n */ public static List getFamilyDirs(final FileSystem fs, final Path regionDir) - throws IOException { + throws IOException { // assumes we are in a region dir. return getFilePaths(fs, regionDir, new FamilyDirFilter(fs)); } public static List getReferenceFilePaths(final FileSystem fs, final Path familyDir) - throws IOException { + throws IOException { return getFilePaths(fs, familyDir, new ReferenceFileFilter(fs)); } public static List getReferenceAndLinkFilePaths(final FileSystem fs, final Path familyDir) - throws IOException { + throws IOException { return getFilePaths(fs, familyDir, new ReferenceAndLinkFileFilter(fs)); } private static List getFilePaths(final FileSystem fs, final Path dir, - final PathFilter pathFilter) throws IOException { + final PathFilter pathFilter) throws IOException { FileStatus[] fds = fs.listStatus(dir, pathFilter); List files = new ArrayList<>(fds.length); - for (FileStatus fdfs: fds) { + for (FileStatus fdfs : fds) { Path fdPath = fdfs.getPath(); files.add(fdPath); } @@ -1122,11 +1080,11 @@ public final class FSUtils { public boolean accept(Path rd) { try { // only files can be references. - return !fs.getFileStatus(rd).isDirectory() && (StoreFileInfo.isReference(rd) || - HFileLink.isHFileLink(rd)); + return !fs.getFileStatus(rd).isDirectory() + && (StoreFileInfo.isReference(rd) || HFileLink.isHFileLink(rd)); } catch (IOException ioe) { // Maybe the file was moved or the fs was disconnected. - LOG.warn("Skipping file " + rd +" due to IOException", ioe); + LOG.warn("Skipping file " + rd + " due to IOException", ioe); return false; } } @@ -1159,8 +1117,8 @@ public final class FSUtils { } /** - * Filter for HFileLinks (StoreFiles and HFiles not included). - * the filter itself does not consider if a link is file or not. + * Filter for HFileLinks (StoreFiles and HFiles not included). the filter itself does not consider + * if a link is file or not. */ public static class HFileLinkFilter implements PathFilter { @@ -1196,8 +1154,7 @@ public final class FSUtils { } /** - * Called every so-often by storefile map builder getTableStoreFilePathMap to - * report progress. + * Called every so-often by storefile map builder getTableStoreFilePathMap to report progress. */ interface ProgressReporter { /** @@ -1207,107 +1164,103 @@ public final class FSUtils { } /** - * Runs through the HBase rootdir/tablename and creates a reverse lookup map for - * table StoreFile names to the full Path. - *
      + * Runs through the HBase rootdir/tablename and creates a reverse lookup map for table StoreFile + * names to the full Path.
      * Example...
      - * Key = 3944417774205889744
      + * Key = 3944417774205889744
      * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param map map to add values. If null, this method will create and populate one to return - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. - * @param tableName name of the table to scan. + * @param map map to add values. If null, this method will create and populate one to + * return + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. + * @param tableName name of the table to scan. * @return Map keyed by StoreFile name with a value of the full Path. - * @throws IOException When scanning the directory fails. - * @throws InterruptedException + * @throws IOException When scanning the directory fails. n */ public static Map getTableStoreFilePathMap(Map map, - final FileSystem fs, final Path hbaseRootDir, TableName tableName) - throws IOException, InterruptedException { + final FileSystem fs, final Path hbaseRootDir, TableName tableName) + throws IOException, InterruptedException { return getTableStoreFilePathMap(map, fs, hbaseRootDir, tableName, null, null, - (ProgressReporter)null); + (ProgressReporter) null); } /** - * Runs through the HBase rootdir/tablename and creates a reverse lookup map for - * table StoreFile names to the full Path. Note that because this method can be called - * on a 'live' HBase system that we will skip files that no longer exist by the time - * we traverse them and similarly the user of the result needs to consider that some - * entries in this map may not exist by the time this call completes. - *
      + * Runs through the HBase rootdir/tablename and creates a reverse lookup map for table StoreFile + * names to the full Path. Note that because this method can be called on a 'live' HBase system + * that we will skip files that no longer exist by the time we traverse them and similarly the + * user of the result needs to consider that some entries in this map may not exist by the time + * this call completes.
      * Example...
      - * Key = 3944417774205889744
      + * Key = 3944417774205889744
      * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param resultMap map to add values. If null, this method will create and populate one to return - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. - * @param tableName name of the table to scan. - * @param sfFilter optional path filter to apply to store files - * @param executor optional executor service to parallelize this operation + * @param resultMap map to add values. If null, this method will create and populate one to + * return + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. + * @param tableName name of the table to scan. + * @param sfFilter optional path filter to apply to store files + * @param executor optional executor service to parallelize this operation * @param progressReporter Instance or null; gets called every time we move to new region of - * family dir and for each store file. + * family dir and for each store file. * @return Map keyed by StoreFile name with a value of the full Path. * @throws IOException When scanning the directory fails. * @deprecated Since 2.3.0. For removal in hbase4. Use ProgressReporter override instead. */ @Deprecated public static Map getTableStoreFilePathMap(Map resultMap, - final FileSystem fs, final Path hbaseRootDir, TableName tableName, final PathFilter sfFilter, - ExecutorService executor, final HbckErrorReporter progressReporter) - throws IOException, InterruptedException { + final FileSystem fs, final Path hbaseRootDir, TableName tableName, final PathFilter sfFilter, + ExecutorService executor, final HbckErrorReporter progressReporter) + throws IOException, InterruptedException { return getTableStoreFilePathMap(resultMap, fs, hbaseRootDir, tableName, sfFilter, executor, - new ProgressReporter() { - @Override - public void progress(FileStatus status) { - // status is not used in this implementation. - progressReporter.progress(); - } - }); + new ProgressReporter() { + @Override + public void progress(FileStatus status) { + // status is not used in this implementation. + progressReporter.progress(); + } + }); } /** - * Runs through the HBase rootdir/tablename and creates a reverse lookup map for - * table StoreFile names to the full Path. Note that because this method can be called - * on a 'live' HBase system that we will skip files that no longer exist by the time - * we traverse them and similarly the user of the result needs to consider that some - * entries in this map may not exist by the time this call completes. - *
      + * Runs through the HBase rootdir/tablename and creates a reverse lookup map for table StoreFile + * names to the full Path. Note that because this method can be called on a 'live' HBase system + * that we will skip files that no longer exist by the time we traverse them and similarly the + * user of the result needs to consider that some entries in this map may not exist by the time + * this call completes.
      * Example...
      - * Key = 3944417774205889744
      + * Key = 3944417774205889744
      * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param resultMap map to add values. If null, this method will create and populate one - * to return - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. - * @param tableName name of the table to scan. - * @param sfFilter optional path filter to apply to store files - * @param executor optional executor service to parallelize this operation + * @param resultMap map to add values. If null, this method will create and populate one to + * return + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. + * @param tableName name of the table to scan. + * @param sfFilter optional path filter to apply to store files + * @param executor optional executor service to parallelize this operation * @param progressReporter Instance or null; gets called every time we move to new region of - * family dir and for each store file. + * family dir and for each store file. * @return Map keyed by StoreFile name with a value of the full Path. - * @throws IOException When scanning the directory fails. + * @throws IOException When scanning the directory fails. * @throws InterruptedException the thread is interrupted, either before or during the activity. */ public static Map getTableStoreFilePathMap(Map resultMap, - final FileSystem fs, final Path hbaseRootDir, TableName tableName, final PathFilter sfFilter, - ExecutorService executor, final ProgressReporter progressReporter) + final FileSystem fs, final Path hbaseRootDir, TableName tableName, final PathFilter sfFilter, + ExecutorService executor, final ProgressReporter progressReporter) throws IOException, InterruptedException { final Map finalResultMap = - resultMap == null ? new ConcurrentHashMap<>(128, 0.75f, 32) : resultMap; + resultMap == null ? new ConcurrentHashMap<>(128, 0.75f, 32) : resultMap; // only include the directory paths to tables Path tableDir = CommonFSUtils.getTableDir(hbaseRootDir, tableName); - // Inside a table, there are compaction.dir directories to skip. Otherwise, all else + // Inside a table, there are compaction.dir directories to skip. Otherwise, all else // should be regions. final FamilyDirFilter familyFilter = new FamilyDirFilter(fs); final Vector exceptions = new Vector<>(); try { - List regionDirs = FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs)); + List regionDirs = + FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs)); if (regionDirs == null) { return finalResultMap; } @@ -1328,8 +1281,9 @@ public final class FSUtils { @Override public void run() { try { - HashMap regionStoreFileMap = new HashMap<>(); - List familyDirs = FSUtils.listStatusWithStatusFilter(fs, dd, familyFilter); + HashMap regionStoreFileMap = new HashMap<>(); + List familyDirs = + FSUtils.listStatusWithStatusFilter(fs, dd, familyFilter); if (familyDirs == null) { if (!fs.exists(dd)) { LOG.warn("Skipping region because it no longer exists: " + dd); @@ -1355,7 +1309,7 @@ public final class FSUtils { } Path sf = sfStatus.getPath(); if (sfFilter == null || sfFilter.accept(sf)) { - regionStoreFileMap.put( sf.getName(), sf); + regionStoreFileMap.put(sf.getName(), sf); } } } @@ -1409,7 +1363,7 @@ public final class FSUtils { public static int getRegionReferenceFileCount(final FileSystem fs, final Path p) { int result = 0; try { - for (Path familyDir:getFamilyDirs(fs, p)){ + for (Path familyDir : getFamilyDirs(fs, p)) { result += getReferenceFilePaths(fs, familyDir).size(); } } catch (IOException e) { @@ -1419,80 +1373,69 @@ public final class FSUtils { } /** - * Runs through the HBase rootdir and creates a reverse lookup map for - * table StoreFile names to the full Path. - *
      + * Runs through the HBase rootdir and creates a reverse lookup map for table StoreFile names to + * the full Path.
      * Example...
      - * Key = 3944417774205889744
      + * Key = 3944417774205889744
      * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. * @return Map keyed by StoreFile name with a value of the full Path. * @throws IOException When scanning the directory fails. */ public static Map getTableStoreFilePathMap(final FileSystem fs, - final Path hbaseRootDir) - throws IOException, InterruptedException { - return getTableStoreFilePathMap(fs, hbaseRootDir, null, null, (ProgressReporter)null); + final Path hbaseRootDir) throws IOException, InterruptedException { + return getTableStoreFilePathMap(fs, hbaseRootDir, null, null, (ProgressReporter) null); } /** - * Runs through the HBase rootdir and creates a reverse lookup map for - * table StoreFile names to the full Path. - *
      + * Runs through the HBase rootdir and creates a reverse lookup map for table StoreFile names to + * the full Path.
      * Example...
      - * Key = 3944417774205889744
      + * Key = 3944417774205889744
      * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. - * @param sfFilter optional path filter to apply to store files - * @param executor optional executor service to parallelize this operation + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. + * @param sfFilter optional path filter to apply to store files + * @param executor optional executor service to parallelize this operation * @param progressReporter Instance or null; gets called every time we move to new region of - * family dir and for each store file. + * family dir and for each store file. * @return Map keyed by StoreFile name with a value of the full Path. * @throws IOException When scanning the directory fails. - * @deprecated Since 2.3.0. Will be removed in hbase4. Used {@link - * #getTableStoreFilePathMap(FileSystem, Path, PathFilter, ExecutorService, ProgressReporter)} + * @deprecated Since 2.3.0. Will be removed in hbase4. Used + * {@link #getTableStoreFilePathMap(FileSystem, Path, PathFilter, ExecutorService, ProgressReporter)} */ @Deprecated public static Map getTableStoreFilePathMap(final FileSystem fs, - final Path hbaseRootDir, PathFilter sfFilter, ExecutorService executor, - HbckErrorReporter progressReporter) - throws IOException, InterruptedException { - return getTableStoreFilePathMap(fs, hbaseRootDir, sfFilter, executor, - new ProgressReporter() { - @Override - public void progress(FileStatus status) { - // status is not used in this implementation. - progressReporter.progress(); - } - }); + final Path hbaseRootDir, PathFilter sfFilter, ExecutorService executor, + HbckErrorReporter progressReporter) throws IOException, InterruptedException { + return getTableStoreFilePathMap(fs, hbaseRootDir, sfFilter, executor, new ProgressReporter() { + @Override + public void progress(FileStatus status) { + // status is not used in this implementation. + progressReporter.progress(); + } + }); } /** - * Runs through the HBase rootdir and creates a reverse lookup map for - * table StoreFile names to the full Path. - *
      + * Runs through the HBase rootdir and creates a reverse lookup map for table StoreFile names to + * the full Path.
      * Example...
      - * Key = 3944417774205889744
      + * Key = 3944417774205889744
      * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. - * @param sfFilter optional path filter to apply to store files - * @param executor optional executor service to parallelize this operation + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. + * @param sfFilter optional path filter to apply to store files + * @param executor optional executor service to parallelize this operation * @param progressReporter Instance or null; gets called every time we move to new region of - * family dir and for each store file. + * family dir and for each store file. * @return Map keyed by StoreFile name with a value of the full Path. - * @throws IOException When scanning the directory fails. - * @throws InterruptedException + * @throws IOException When scanning the directory fails. n */ - public static Map getTableStoreFilePathMap( - final FileSystem fs, final Path hbaseRootDir, PathFilter sfFilter, - ExecutorService executor, ProgressReporter progressReporter) - throws IOException, InterruptedException { + public static Map getTableStoreFilePathMap(final FileSystem fs, + final Path hbaseRootDir, PathFilter sfFilter, ExecutorService executor, + ProgressReporter progressReporter) throws IOException, InterruptedException { ConcurrentHashMap map = new ConcurrentHashMap<>(1024, 0.75f, 32); // if this method looks similar to 'getTableFragmentation' that is because @@ -1508,26 +1451,23 @@ public final class FSUtils { /** * Filters FileStatuses in an array and returns a list - * - * @param input An array of FileStatuses - * @param filter A required filter to filter the array - * @return A list of FileStatuses + * @param input An array of FileStatuses + * @param filter A required filter to filter the array + * @return A list of FileStatuses */ - public static List filterFileStatuses(FileStatus[] input, - FileStatusFilter filter) { + public static List filterFileStatuses(FileStatus[] input, FileStatusFilter filter) { if (input == null) return null; return filterFileStatuses(Iterators.forArray(input), filter); } /** * Filters FileStatuses in an iterator and returns a list - * - * @param input An iterator of FileStatuses - * @param filter A required filter to filter the array - * @return A list of FileStatuses + * @param input An iterator of FileStatuses + * @param filter A required filter to filter the array + * @return A list of FileStatuses */ public static List filterFileStatuses(Iterator input, - FileStatusFilter filter) { + FileStatusFilter filter) { if (input == null) return null; ArrayList results = new ArrayList<>(); while (input.hasNext()) { @@ -1540,19 +1480,17 @@ public final class FSUtils { } /** - * Calls fs.listStatus() and treats FileNotFoundException as non-fatal - * This accommodates differences between hadoop versions, where hadoop 1 - * does not throw a FileNotFoundException, and return an empty FileStatus[] - * while Hadoop 2 will throw FileNotFoundException. - * - * @param fs file system - * @param dir directory + * Calls fs.listStatus() and treats FileNotFoundException as non-fatal This accommodates + * differences between hadoop versions, where hadoop 1 does not throw a FileNotFoundException, and + * return an empty FileStatus[] while Hadoop 2 will throw FileNotFoundException. + * @param fs file system + * @param dir directory * @param filter file status filter * @return null if dir is empty or doesn't exist, otherwise FileStatus list */ - public static List listStatusWithStatusFilter(final FileSystem fs, - final Path dir, final FileStatusFilter filter) throws IOException { - FileStatus [] status = null; + public static List listStatusWithStatusFilter(final FileSystem fs, final Path dir, + final FileStatusFilter filter) throws IOException { + FileStatus[] status = null; try { status = fs.listStatus(dir); } catch (FileNotFoundException fnfe) { @@ -1560,7 +1498,7 @@ public final class FSUtils { return null; } - if (ArrayUtils.getLength(status) == 0) { + if (ArrayUtils.getLength(status) == 0) { return null; } @@ -1577,67 +1515,42 @@ public final class FSUtils { } /** - * This function is to scan the root path of the file system to get the - * degree of locality for each region on each of the servers having at least - * one block of that region. - * This is used by the tool {@link org.apache.hadoop.hbase.master.RegionPlacementMaintainer} - * - * @param conf - * the configuration to use - * @return the mapping from region encoded name to a map of server names to - * locality fraction - * @throws IOException - * in case of file system errors or interrupts + * This function is to scan the root path of the file system to get the degree of locality for + * each region on each of the servers having at least one block of that region. This is used by + * the tool {@link org.apache.hadoop.hbase.master.RegionPlacementMaintainer} n * the configuration + * to use + * @return the mapping from region encoded name to a map of server names to locality fraction n * + * in case of file system errors or interrupts */ - public static Map> getRegionDegreeLocalityMappingFromFS( - final Configuration conf) throws IOException { - return getRegionDegreeLocalityMappingFromFS( - conf, null, - conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE)); + public static Map> + getRegionDegreeLocalityMappingFromFS(final Configuration conf) throws IOException { + return getRegionDegreeLocalityMappingFromFS(conf, null, + conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE)); } /** - * This function is to scan the root path of the file system to get the - * degree of locality for each region on each of the servers having at least - * one block of that region. - * - * @param conf - * the configuration to use - * @param desiredTable - * the table you wish to scan locality for - * @param threadPoolSize - * the thread pool size to use - * @return the mapping from region encoded name to a map of server names to - * locality fraction - * @throws IOException - * in case of file system errors or interrupts + * This function is to scan the root path of the file system to get the degree of locality for + * each region on each of the servers having at least one block of that region. n * the + * configuration to use n * the table you wish to scan locality for n * the thread pool size to + * use + * @return the mapping from region encoded name to a map of server names to locality fraction n * + * in case of file system errors or interrupts */ public static Map> getRegionDegreeLocalityMappingFromFS( - final Configuration conf, final String desiredTable, int threadPoolSize) - throws IOException { + final Configuration conf, final String desiredTable, int threadPoolSize) throws IOException { Map> regionDegreeLocalityMapping = new ConcurrentHashMap<>(); getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, regionDegreeLocalityMapping); return regionDegreeLocalityMapping; } /** - * This function is to scan the root path of the file system to get either the - * mapping between the region name and its best locality region server or the - * degree of locality of each region on each of the servers having at least - * one block of that region. The output map parameters are both optional. - * - * @param conf - * the configuration to use - * @param desiredTable - * the table you wish to scan locality for - * @param threadPoolSize - * the thread pool size to use - * @param regionDegreeLocalityMapping - * the map into which to put the locality degree mapping or null, - * must be a thread-safe implementation - * @throws IOException - * in case of file system errors or interrupts + * This function is to scan the root path of the file system to get either the mapping between the + * region name and its best locality region server or the degree of locality of each region on + * each of the servers having at least one block of that region. The output map parameters are + * both optional. n * the configuration to use n * the table you wish to scan locality for n * the + * thread pool size to use n * the map into which to put the locality degree mapping or null, must + * be a thread-safe implementation n * in case of file system errors or interrupts */ private static void getRegionLocalityMappingFromFS(final Configuration conf, final String desiredTable, int threadPoolSize, @@ -1718,13 +1631,12 @@ public final class FSUtils { try { // here we wait until TPE terminates, which is either naturally or by // exceptions in the execution of the threads - while (!tpe.awaitTermination(threadWakeFrequency, - TimeUnit.MILLISECONDS)) { + while (!tpe.awaitTermination(threadWakeFrequency, TimeUnit.MILLISECONDS)) { // printing out rough estimate, so as to not introduce // AtomicInteger LOG.info("Locality checking is underway: { Scanned Regions : " - + ((ThreadPoolExecutor) tpe).getCompletedTaskCount() + "/" - + ((ThreadPoolExecutor) tpe).getTaskCount() + " }"); + + ((ThreadPoolExecutor) tpe).getCompletedTaskCount() + "/" + + ((ThreadPoolExecutor) tpe).getTaskCount() + " }"); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); @@ -1737,9 +1649,8 @@ public final class FSUtils { } /** - * Do our short circuit read setup. - * Checks buffer size to use and whether to do checksumming in hbase or hdfs. - * @param conf + * Do our short circuit read setup. Checks buffer size to use and whether to do checksumming in + * hbase or hdfs. n */ public static void setupShortCircuitRead(final Configuration conf) { // Check that the user has not set the "dfs.client.read.shortcircuit.skip.checksum" property. @@ -1747,17 +1658,19 @@ public final class FSUtils { conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false); boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true); if (shortCircuitSkipChecksum) { - LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " + - "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " + - "it, see https://issues.apache.org/jira/browse/HBASE-6868." : "")); - assert !shortCircuitSkipChecksum; //this will fail if assertions are on + LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " + + "be set to true." + + (useHBaseChecksum + ? " HBase checksum doesn't require " + + "it, see https://issues.apache.org/jira/browse/HBASE-6868." + : "")); + assert !shortCircuitSkipChecksum; // this will fail if assertions are on } checkShortCircuitReadBufferSize(conf); } /** - * Check if short circuit read buffer size is set and if not, set it to hbase value. - * @param conf + * Check if short circuit read buffer size is set and if not, set it to hbase value. n */ public static void checkShortCircuitReadBufferSize(final Configuration conf) { final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2; @@ -1767,18 +1680,17 @@ public final class FSUtils { int size = conf.getInt(dfsKey, notSet); // If a size is set, return -- we will use it. if (size != notSet) return; - // But short circuit buffer size is normally not set. Put in place the hbase wanted size. + // But short circuit buffer size is normally not set. Put in place the hbase wanted size. int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize); conf.setIfUnset(dfsKey, Integer.toString(hbaseSize)); } /** - * @param c - * @return The DFSClient DFSHedgedReadMetrics instance or null if can't be found or not on hdfs. - * @throws IOException + * n * @return The DFSClient DFSHedgedReadMetrics instance or null if can't be found or not on + * hdfs. n */ public static DFSHedgedReadMetrics getDFSHedgedReadMetrics(final Configuration c) - throws IOException { + throws IOException { if (!CommonFSUtils.isHDFS(c)) { return null; } @@ -1786,31 +1698,31 @@ public final class FSUtils { // to the DFS FS instance and make the method getHedgedReadMetrics accessible, then invoke it // to get the singleton instance of DFSHedgedReadMetrics shared by DFSClients. final String name = "getHedgedReadMetrics"; - DFSClient dfsclient = ((DistributedFileSystem)FileSystem.get(c)).getClient(); + DFSClient dfsclient = ((DistributedFileSystem) FileSystem.get(c)).getClient(); Method m; try { m = dfsclient.getClass().getDeclaredMethod(name); } catch (NoSuchMethodException e) { - LOG.warn("Failed find method " + name + " in dfsclient; no hedged read metrics: " + - e.getMessage()); + LOG.warn( + "Failed find method " + name + " in dfsclient; no hedged read metrics: " + e.getMessage()); return null; } catch (SecurityException e) { - LOG.warn("Failed find method " + name + " in dfsclient; no hedged read metrics: " + - e.getMessage()); + LOG.warn( + "Failed find method " + name + " in dfsclient; no hedged read metrics: " + e.getMessage()); return null; } m.setAccessible(true); try { - return (DFSHedgedReadMetrics)m.invoke(dfsclient); + return (DFSHedgedReadMetrics) m.invoke(dfsclient); } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException e) { - LOG.warn("Failed invoking method " + name + " on dfsclient; no hedged read metrics: " + - e.getMessage()); + LOG.warn("Failed invoking method " + name + " on dfsclient; no hedged read metrics: " + + e.getMessage()); return null; } } public static List copyFilesParallel(FileSystem srcFS, Path src, FileSystem dstFS, Path dst, - Configuration conf, int threads) throws IOException { + Configuration conf, int threads) throws IOException { ExecutorService pool = Executors.newFixedThreadPool(threads); List> futures = new ArrayList<>(); List traversedPaths; @@ -1828,7 +1740,7 @@ public final class FSUtils { } private static List copyFiles(FileSystem srcFS, Path src, FileSystem dstFS, Path dst, - Configuration conf, ExecutorService pool, List> futures) throws IOException { + Configuration conf, ExecutorService pool, List> futures) throws IOException { List traversedPaths = new ArrayList<>(); traversedPaths.add(dst); FileStatus currentFileStatus = srcFS.getFileStatus(src); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java index c3858aeccf0..edf7f5fed2d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +36,7 @@ public final class FSVisitor { public interface StoreFileVisitor { void storeFile(final String region, final String family, final String hfileName) - throws IOException; + throws IOException; } private FSVisitor() { @@ -46,15 +45,15 @@ public final class FSVisitor { /** * Iterate over the table store files - * - * @param fs {@link FileSystem} + * @param fs {@link FileSystem} * @param tableDir {@link Path} to the table directory - * @param visitor callback object to get the store files + * @param visitor callback object to get the store files * @throws IOException if an error occurred while scanning the directory */ public static void visitTableStoreFiles(final FileSystem fs, final Path tableDir, - final StoreFileVisitor visitor) throws IOException { - List regions = FSUtils.listStatusWithStatusFilter(fs, tableDir, new FSUtils.RegionDirFilter(fs)); + final StoreFileVisitor visitor) throws IOException { + List regions = + FSUtils.listStatusWithStatusFilter(fs, tableDir, new FSUtils.RegionDirFilter(fs)); if (regions == null) { if (LOG.isTraceEnabled()) { LOG.trace("No regions under directory:" + tableDir); @@ -62,22 +61,22 @@ public final class FSVisitor { return; } - for (FileStatus region: regions) { + for (FileStatus region : regions) { visitRegionStoreFiles(fs, region.getPath(), visitor); } } /** * Iterate over the region store files - * - * @param fs {@link FileSystem} + * @param fs {@link FileSystem} * @param regionDir {@link Path} to the region directory - * @param visitor callback object to get the store files + * @param visitor callback object to get the store files * @throws IOException if an error occurred while scanning the directory */ public static void visitRegionStoreFiles(final FileSystem fs, final Path regionDir, - final StoreFileVisitor visitor) throws IOException { - List families = FSUtils.listStatusWithStatusFilter(fs, regionDir, new FSUtils.FamilyDirFilter(fs)); + final StoreFileVisitor visitor) throws IOException { + List families = + FSUtils.listStatusWithStatusFilter(fs, regionDir, new FSUtils.FamilyDirFilter(fs)); if (families == null) { if (LOG.isTraceEnabled()) { LOG.trace("No families under region directory:" + regionDir); @@ -86,7 +85,7 @@ public final class FSVisitor { } PathFilter fileFilter = new FSUtils.FileFilter(fs); - for (FileStatus family: families) { + for (FileStatus family : families) { Path familyDir = family.getPath(); String familyName = familyDir.getName(); @@ -99,7 +98,7 @@ public final class FSVisitor { continue; } - for (FileStatus hfile: storeFiles) { + for (FileStatus hfile : storeFiles) { Path hfilePath = hfile.getPath(); visitor.storeFile(regionDir.getName(), familyName, hfilePath.getName()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FileStatusFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FileStatusFilter.java index e757fca8e5b..e57d0c1814b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FileStatusFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FileStatusFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,20 +17,17 @@ */ package org.apache.hadoop.hbase.util; +import org.apache.hadoop.fs.FileStatus; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.fs.FileStatus; @InterfaceAudience.Private @InterfaceStability.Evolving public interface FileStatusFilter { /** - * Tests whether or not the specified filestatus should be - * included in a filestatus list. - * - * @param f The filestatus to be tested - * @return true if and only if the filestatus - * should be included + * Tests whether or not the specified filestatus should be included in a filestatus list. + * @param f The filestatus to be tested + * @return true if and only if the filestatus should be included */ boolean accept(FileStatus f); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/GetJavaProperty.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/GetJavaProperty.java index 2d4de3b4d52..552ac6c9f87 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/GetJavaProperty.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/GetJavaProperty.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,15 +26,16 @@ import org.apache.yetus.audience.InterfaceAudience; public final class GetJavaProperty { public static void main(String args[]) { if (args.length == 0) { - for (Object prop: System.getProperties().keySet()) { - System.out.println(prop + "=" + System.getProperty((String)prop, "")); + for (Object prop : System.getProperties().keySet()) { + System.out.println(prop + "=" + System.getProperty((String) prop, "")); } } else { - for (String prop: args) { + for (String prop : args) { System.out.println(System.getProperty(prop, "")); } } } - private GetJavaProperty() {} + private GetJavaProperty() { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseConfTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseConfTool.java index 44dd9776d3e..91cdff76b3e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseConfTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseConfTool.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.yetus.audience.InterfaceAudience; /** - * Tool that prints out a configuration. - * Pass the configuration key on the command-line. + * Tool that prints out a configuration. Pass the configuration key on the command-line. */ @InterfaceAudience.Private public class HBaseConfTool { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 4fcdd376e9d..f6cf34823ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -56,7 +56,6 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; - import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; @@ -117,8 +116,6 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; import org.apache.hadoop.hbase.security.UserProvider; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface; import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator; import org.apache.hadoop.hbase.util.HbckErrorReporter.ERROR_CODE; import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; @@ -135,67 +132,62 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.hbase.thirdparty.com.google.common.base.Joiner; -import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hbase.thirdparty.com.google.common.collect.Sets; -import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.base.Joiner; +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.collect.Sets; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface; + /** - * HBaseFsck (hbck) is a tool for checking and repairing region consistency and - * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not - * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'. - * Even though it can 'read' state, given how so much has changed in how hbase1 and hbase2 operate, - * it will often misread. See hbck2 (HBASE-19121) for a hbck tool for hbase2. This class is - * deprecated. - * + * HBaseFsck (hbck) is a tool for checking and repairing region consistency and table integrity + * problems in a corrupted HBase. This tool was written for hbase-1.x. It does not work with + * hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'. Even + * though it can 'read' state, given how so much has changed in how hbase1 and hbase2 operate, it + * will often misread. See hbck2 (HBASE-19121) for a hbck tool for hbase2. This class is deprecated. *

      - * Region consistency checks verify that hbase:meta, region deployment on region - * servers and the state of data in HDFS (.regioninfo files) all are in - * accordance. + * Region consistency checks verify that hbase:meta, region deployment on region servers and the + * state of data in HDFS (.regioninfo files) all are in accordance. *

      - * Table integrity checks verify that all possible row keys resolve to exactly - * one region of a table. This means there are no individual degenerate - * or backwards regions; no holes between regions; and that there are no - * overlapping regions. + * Table integrity checks verify that all possible row keys resolve to exactly one region of a + * table. This means there are no individual degenerate or backwards regions; no holes between + * regions; and that there are no overlapping regions. *

      * The general repair strategy works in two phases: *

        - *
      1. Repair Table Integrity on HDFS. (merge or fabricate regions) - *
      2. Repair Region Consistency with hbase:meta and assignments + *
      3. Repair Table Integrity on HDFS. (merge or fabricate regions) + *
      4. Repair Region Consistency with hbase:meta and assignments *
      *

      - * For table integrity repairs, the tables' region directories are scanned - * for .regioninfo files. Each table's integrity is then verified. If there - * are any orphan regions (regions with no .regioninfo files) or holes, new - * regions are fabricated. Backwards regions are sidelined as well as empty - * degenerate (endkey==startkey) regions. If there are any overlapping regions, - * a new region is created and all data is merged into the new region. + * For table integrity repairs, the tables' region directories are scanned for .regioninfo files. + * Each table's integrity is then verified. If there are any orphan regions (regions with no + * .regioninfo files) or holes, new regions are fabricated. Backwards regions are sidelined as well + * as empty degenerate (endkey==startkey) regions. If there are any overlapping regions, a new + * region is created and all data is merged into the new region. *

      - * Table integrity repairs deal solely with HDFS and could potentially be done - * offline -- the hbase region servers or master do not need to be running. - * This phase can eventually be used to completely reconstruct the hbase:meta table in - * an offline fashion. + * Table integrity repairs deal solely with HDFS and could potentially be done offline -- the hbase + * region servers or master do not need to be running. This phase can eventually be used to + * completely reconstruct the hbase:meta table in an offline fashion. *

      - * Region consistency requires three conditions -- 1) valid .regioninfo file - * present in an HDFS region dir, 2) valid row with .regioninfo data in META, - * and 3) a region is deployed only at the regionserver that was assigned to - * with proper state in the master. + * Region consistency requires three conditions -- 1) valid .regioninfo file present in an HDFS + * region dir, 2) valid row with .regioninfo data in META, and 3) a region is deployed only at the + * regionserver that was assigned to with proper state in the master. *

      - * Region consistency repairs require hbase to be online so that hbck can - * contact the HBase master and region servers. The hbck#connect() method must - * first be called successfully. Much of the region consistency information - * is transient and less risky to repair. + * Region consistency repairs require hbase to be online so that hbck can contact the HBase master + * and region servers. The hbck#connect() method must first be called successfully. Much of the + * region consistency information is transient and less risky to repair. *

      - * If hbck is run from the command line, there are a handful of arguments that - * can be used to limit the kinds of repairs hbck will do. See the code in - * {@link #printUsageAndExit()} for more details. + * If hbck is run from the command line, there are a handful of arguments that can be used to limit + * the kinds of repairs hbck will do. See the code in {@link #printUsageAndExit()} for more details. * @deprecated For removal in hbase-4.0.0. Use HBCK2 instead. */ @Deprecated @@ -210,8 +202,8 @@ public class HBaseFsck extends Configured implements Closeable { private static final int DEFAULT_MAX_MERGE = 5; /** - * Here is where hbase-1.x used to default the lock for hbck1. - * It puts in place a lock when it goes to write/make changes. + * Here is where hbase-1.x used to default the lock for hbck1. It puts in place a lock when it + * goes to write/make changes. */ @InterfaceAudience.Private public static final String HBCK_LOCK_FILE = "hbase-hbck.lock"; @@ -250,9 +242,9 @@ public class HBaseFsck extends Configured implements Closeable { // Unsupported options in HBase 2.0+ private static final Set unsupportedOptionsInV2 = Sets.newHashSet("-fix", - "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans", - "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents", - "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge"); + "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans", + "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents", + "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge"); /*********** * Options @@ -300,23 +292,20 @@ public class HBaseFsck extends Configured implements Closeable { int fixes = 0; /** - * This map contains the state of all hbck items. It maps from encoded region - * name to HbckRegionInfo structure. The information contained in HbckRegionInfo is used - * to detect and correct consistency (hdfs/meta/deployment) problems. + * This map contains the state of all hbck items. It maps from encoded region name to + * HbckRegionInfo structure. The information contained in HbckRegionInfo is used to detect and + * correct consistency (hdfs/meta/deployment) problems. */ private TreeMap regionInfoMap = new TreeMap<>(); // Empty regioninfo qualifiers in hbase:meta private Set emptyRegionInfoQualifiers = new HashSet<>(); /** - * This map from Tablename -> TableInfo contains the structures necessary to - * detect table consistency problems (holes, dupes, overlaps). It is sorted - * to prevent dupes. - * - * If tablesIncluded is empty, this map contains all tables. - * Otherwise, it contains only meta tables and tables in tablesIncluded, - * unless checkMetaOnly is specified, in which case, it contains only - * the meta table + * This map from Tablename -> TableInfo contains the structures necessary to detect table + * consistency problems (holes, dupes, overlaps). It is sorted to prevent dupes. If tablesIncluded + * is empty, this map contains all tables. Otherwise, it contains only meta tables and tables in + * tablesIncluded, unless checkMetaOnly is specified, in which case, it contains only the meta + * table */ private SortedMap tablesInfo = new ConcurrentSkipListMap<>(); @@ -338,9 +327,8 @@ public class HBaseFsck extends Configured implements Closeable { /** * Constructor - * * @param conf Configuration object - * @throws MasterNotRunningException if the master is not running + * @throws MasterNotRunningException if the master is not running * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper */ public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException { @@ -355,17 +343,11 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Constructor - * - * @param conf - * Configuration object - * @throws MasterNotRunningException - * if the master is not running - * @throws ZooKeeperConnectionException - * if unable to connect to ZooKeeper + * Constructor n * Configuration object n * if the master is not running n * if unable to connect + * to ZooKeeper */ public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException, - ZooKeeperConnectionException, IOException, ClassNotFoundException { + ZooKeeperConnectionException, IOException, ClassNotFoundException { super(conf); errors = getErrorReporter(getConf()); this.executor = exec; @@ -379,11 +361,11 @@ public class HBaseFsck extends Configured implements Closeable { */ public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) { return new RetryCounterFactory( - conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS), - conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval", - DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL), - conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime", - DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME)); + conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS), + conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval", + DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL), + conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime", + DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME)); } /** @@ -391,11 +373,11 @@ public class HBaseFsck extends Configured implements Closeable { */ private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) { return new RetryCounterFactory( - conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS), - conf.getInt("hbase.hbck.createznode.attempt.sleep.interval", - DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL), - conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime", - DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME)); + conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS), + conf.getInt("hbase.hbck.createznode.attempt.sleep.interval", + DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL), + conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime", + DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME)); } /** @@ -435,13 +417,13 @@ public class HBaseFsck extends Configured implements Closeable { final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms); out.writeBytes(InetAddress.getLocalHost().toString()); // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file. - out.writeBytes(" Written by an hbase-2.x Master to block an " + - "attempt by an hbase-1.x HBCK tool making modification to state. " + - "See 'HBCK must match HBase server version' in the hbase refguide."); + out.writeBytes(" Written by an hbase-2.x Master to block an " + + "attempt by an hbase-1.x HBCK tool making modification to state. " + + "See 'HBCK must match HBase server version' in the hbase refguide."); out.flush(); return out; - } catch(RemoteException e) { - if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){ + } catch (RemoteException e) { + if (AlreadyBeingCreatedException.class.getName().equals(e.getClassName())) { return null; } else { throw e; @@ -450,25 +432,21 @@ public class HBaseFsck extends Configured implements Closeable { } private FSDataOutputStream createFileWithRetries(final FileSystem fs, - final Path hbckLockFilePath, final FsPermission defaultPerms) - throws IOException { + final Path hbckLockFilePath, final FsPermission defaultPerms) throws IOException { IOException exception = null; do { try { return CommonFSUtils.create(fs, hbckLockFilePath, defaultPerms, false); } catch (IOException ioe) { - LOG.info("Failed to create lock file " + hbckLockFilePath.getName() - + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of " - + retryCounter.getMaxAttempts()); - LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(), - ioe); + LOG.info("Failed to create lock file " + hbckLockFilePath.getName() + ", try=" + + (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); + LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(), ioe); try { exception = ioe; retryCounter.sleepUntilNextRetry(); } catch (InterruptedException ie) { throw (InterruptedIOException) new InterruptedIOException( - "Can't create lock file " + hbckLockFilePath.getName()) - .initCause(ie); + "Can't create lock file " + hbckLockFilePath.getName()).initCause(ie); } } } while (retryCounter.shouldRetry()); @@ -479,18 +457,17 @@ public class HBaseFsck extends Configured implements Closeable { /** * This method maintains a lock using a file. If the creation fails we return null - * * @return FSDataOutputStream object corresponding to the newly opened lock file * @throws IOException if IO failure occurs */ public static Pair checkAndMarkRunningHbck(Configuration conf, - RetryCounter retryCounter) throws IOException { + RetryCounter retryCounter) throws IOException { FileLockCallable callable = new FileLockCallable(conf, retryCounter); ExecutorService executor = Executors.newFixedThreadPool(1); FutureTask futureTask = new FutureTask<>(callable); executor.execute(futureTask); - final int timeoutInSeconds = conf.getInt( - "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT); + final int timeoutInSeconds = + conf.getInt("hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT); FSDataOutputStream stream = null; try { stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS); @@ -520,15 +497,13 @@ public class HBaseFsck extends Configured implements Closeable { return; } catch (IOException ioe) { LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try=" - + (retryCounter.getAttemptTimes() + 1) + " of " - + retryCounter.getMaxAttempts()); + + (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe); try { retryCounter.sleepUntilNextRetry(); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); - LOG.warn("Interrupted while deleting lock file" + - HBCK_LOCK_PATH); + LOG.warn("Interrupted while deleting lock file" + HBCK_LOCK_PATH); return; } } @@ -537,22 +512,21 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * To repair region consistency, one must call connect() in order to repair - * online state. + * To repair region consistency, one must call connect() in order to repair online state. */ public void connect() throws IOException { if (isExclusive()) { // Grab the lock Pair pair = - checkAndMarkRunningHbck(getConf(), this.lockFileRetryCounterFactory.create()); + checkAndMarkRunningHbck(getConf(), this.lockFileRetryCounterFactory.create()); HBCK_LOCK_PATH = pair.getFirst(); this.hbckOutFd = pair.getSecond(); if (hbckOutFd == null) { setRetCode(-1); - LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " + - "[If you are sure no other instance is running, delete the lock file " + - HBCK_LOCK_PATH + " and rerun the tool]"); + LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " + + "[If you are sure no other instance is running, delete the lock file " + HBCK_LOCK_PATH + + " and rerun the tool]"); throw new IOException("Duplicate hbck - Abort"); } @@ -560,7 +534,6 @@ public class HBaseFsck extends Configured implements Closeable { hbckLockCleanup.set(true); } - // Add a shutdown hook to this thread, in case user tries to // kill the hbck with a ctrl-c, we want to cleanup the lock so that // it is available for further calls @@ -575,12 +548,11 @@ public class HBaseFsck extends Configured implements Closeable { LOG.info("Launching hbck"); - connection = (ClusterConnection)ConnectionFactory.createConnection(getConf()); + connection = (ClusterConnection) ConnectionFactory.createConnection(getConf()); admin = connection.getAdmin(); meta = connection.getTable(TableName.META_TABLE_NAME); - status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, - Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS, - Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION)); + status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS, + Option.MASTER, Option.BACKUP_MASTERS, Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION)); } /** @@ -591,7 +563,7 @@ public class HBaseFsck extends Configured implements Closeable { Collection regionServers = status.getLiveServerMetrics().keySet(); errors.print("Number of live region servers: " + regionServers.size()); if (details) { - for (ServerName rsinfo: regionServers) { + for (ServerName rsinfo : regionServers) { errors.print(" " + rsinfo.getServerName()); } } @@ -600,7 +572,7 @@ public class HBaseFsck extends Configured implements Closeable { Collection deadRegionServers = status.getDeadServerNames(); errors.print("Number of dead region servers: " + deadRegionServers.size()); if (details) { - for (ServerName name: deadRegionServers) { + for (ServerName name : deadRegionServers) { errors.print(" " + name); } } @@ -612,7 +584,7 @@ public class HBaseFsck extends Configured implements Closeable { Collection backupMasters = status.getBackupMasterNames(); errors.print("Number of backup masters: " + backupMasters.size()); if (details) { - for (ServerName name: backupMasters) { + for (ServerName name : backupMasters) { errors.print(" " + name); } } @@ -624,7 +596,7 @@ public class HBaseFsck extends Configured implements Closeable { List rits = status.getRegionStatesInTransition(); errors.print("Number of regions in transition: " + rits.size()); if (details) { - for (RegionState state: rits) { + for (RegionState state : rits) { errors.print(" " + state.toDescriptiveString()); } } @@ -649,14 +621,15 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * This repair method analyzes hbase data in hdfs and repairs it to satisfy - * the table integrity rules. HBase doesn't need to be online for this - * operation to work. + * This repair method analyzes hbase data in hdfs and repairs it to satisfy the table integrity + * rules. HBase doesn't need to be online for this operation to work. */ public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException { // Initial pass to fix orphans. - if (shouldCheckHdfs() && (shouldFixHdfsOrphans() || shouldFixHdfsHoles() - || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) { + if ( + shouldCheckHdfs() && (shouldFixHdfsOrphans() || shouldFixHdfsHoles() + || shouldFixHdfsOverlaps() || shouldFixTableOrphans()) + ) { LOG.info("Loading regioninfos HDFS"); // if nothing is happening this should always complete in two iterations. int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3); @@ -673,7 +646,7 @@ public class HBaseFsck extends Configured implements Closeable { if (curIter > 2) { if (curIter == maxIterations) { LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. " - + "Tables integrity may not be fully repaired!"); + + "Tables integrity may not be fully repaired!"); } else { LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations"); } @@ -682,15 +655,12 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * This repair method requires the cluster to be online since it contacts - * region servers and the masters. It makes each region's state in HDFS, in - * hbase:meta, and deployments consistent. - * - * @return If > 0 , number of errors detected, if < 0 there was an unrecoverable - * error. If 0, we have a clean hbase. + * This repair method requires the cluster to be online since it contacts region servers and the + * masters. It makes each region's state in HDFS, in hbase:meta, and deployments consistent. + * @return If > 0 , number of errors detected, if < 0 there was an unrecoverable error. If + * 0, we have a clean hbase. */ - public int onlineConsistencyRepair() throws IOException, KeeperException, - InterruptedException { + public int onlineConsistencyRepair() throws IOException, KeeperException, InterruptedException { // get regions according to what is online on each RegionServer loadDeployedRegions(); @@ -751,14 +721,12 @@ public class HBaseFsck extends Configured implements Closeable { /** * This method maintains an ephemeral znode. If the creation fails we return false or throw * exception - * * @return true if creating znode succeeds; false otherwise * @throws IOException if IO failure occurs */ private boolean setMasterInMaintenanceMode() throws IOException { RetryCounter retryCounter = createZNodeRetryCounterFactory.create(); - hbckEphemeralNodePath = ZNodePaths.joinZNode( - zkw.getZNodePaths().masterMaintZNode, + hbckEphemeralNodePath = ZNodePaths.joinZNode(zkw.getZNodePaths().masterMaintZNode, "hbck-" + Long.toString(EnvironmentEdgeManager.currentTime())); do { try { @@ -768,19 +736,19 @@ public class HBaseFsck extends Configured implements Closeable { } } catch (KeeperException e) { if (retryCounter.getAttemptTimes() >= retryCounter.getMaxAttempts()) { - throw new IOException("Can't create znode " + hbckEphemeralNodePath, e); + throw new IOException("Can't create znode " + hbckEphemeralNodePath, e); } // fall through and retry } - LOG.warn("Fail to create znode " + hbckEphemeralNodePath + ", try=" + - (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); + LOG.warn("Fail to create znode " + hbckEphemeralNodePath + ", try=" + + (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); try { retryCounter.sleepUntilNextRetry(); } catch (InterruptedException ie) { throw (InterruptedIOException) new InterruptedIOException( - "Can't create znode " + hbckEphemeralNodePath).initCause(ie); + "Can't create znode " + hbckEphemeralNodePath).initCause(ie); } } while (retryCounter.shouldRetry()); return hbckZodeCreated; @@ -805,7 +773,7 @@ public class HBaseFsck extends Configured implements Closeable { * @return 0 on success, non-zero on failure */ public int onlineHbck() - throws IOException, KeeperException, InterruptedException, ReplicationException { + throws IOException, KeeperException, InterruptedException, ReplicationException { // print hbase server version errors.print("Version: " + status.getHBaseVersion()); @@ -816,7 +784,7 @@ public class HBaseFsck extends Configured implements Closeable { offlineReferenceFileRepair(); offlineHLinkFileRepair(); // If Master runs maintenance tasks (such as balancer, catalog janitor, etc) during online - // hbck, it is likely that hbck would be misled and report transient errors. Therefore, it + // hbck, it is likely that hbck would be misled and report transient errors. Therefore, it // is better to set Master into maintenance mode during online hbck. // if (!setMasterInMaintenanceMode()) { @@ -846,8 +814,7 @@ public class HBaseFsck extends Configured implements Closeable { } public static byte[] keyOnly(byte[] b) { - if (b == null) - return b; + if (b == null) return b; int rowlength = Bytes.toShort(b, 0); byte[] result = new byte[rowlength]; System.arraycopy(b, Bytes.SIZEOF_SHORT, result, 0, rowlength); @@ -873,18 +840,18 @@ public class HBaseFsck extends Configured implements Closeable { } private static class RegionBoundariesInformation { - public byte [] regionName; - public byte [] metaFirstKey; - public byte [] metaLastKey; - public byte [] storesFirstKey; - public byte [] storesLastKey; + public byte[] regionName; + public byte[] metaFirstKey; + public byte[] metaLastKey; + public byte[] storesFirstKey; + public byte[] storesLastKey; + @Override - public String toString () { - return "regionName=" + Bytes.toStringBinary(regionName) + - "\nmetaFirstKey=" + Bytes.toStringBinary(metaFirstKey) + - "\nmetaLastKey=" + Bytes.toStringBinary(metaLastKey) + - "\nstoresFirstKey=" + Bytes.toStringBinary(storesFirstKey) + - "\nstoresLastKey=" + Bytes.toStringBinary(storesLastKey); + public String toString() { + return "regionName=" + Bytes.toStringBinary(regionName) + "\nmetaFirstKey=" + + Bytes.toStringBinary(metaFirstKey) + "\nmetaLastKey=" + Bytes.toStringBinary(metaLastKey) + + "\nstoresFirstKey=" + Bytes.toStringBinary(storesFirstKey) + "\nstoresLastKey=" + + Bytes.toStringBinary(storesLastKey); } } @@ -893,7 +860,7 @@ public class HBaseFsck extends Configured implements Closeable { ByteArrayComparator comparator = new ByteArrayComparator(); List regions = MetaTableAccessor.getAllRegions(connection, true); final RegionBoundariesInformation currentRegionBoundariesInformation = - new RegionBoundariesInformation(); + new RegionBoundariesInformation(); Path hbaseRoot = CommonFSUtils.getRootDir(getConf()); for (RegionInfo regionInfo : regions) { Path tableDir = CommonFSUtils.getTableDir(hbaseRoot, regionInfo.getTable()); @@ -913,17 +880,21 @@ public class HBaseFsck extends Configured implements Closeable { FileStatus[] storeFiles = fs.listStatus(file.getPath()); // For all the stores in this column family. for (FileStatus storeFile : storeFiles) { - HFile.Reader reader = HFile.createReader(fs, storeFile.getPath(), - CacheConfig.DISABLED, true, getConf()); - if ((reader.getFirstKey() != null) + HFile.Reader reader = + HFile.createReader(fs, storeFile.getPath(), CacheConfig.DISABLED, true, getConf()); + if ( + (reader.getFirstKey() != null) && ((storeFirstKey == null) || (comparator.compare(storeFirstKey, - ((KeyValue.KeyOnlyKeyValue) reader.getFirstKey().get()).getKey()) > 0))) { - storeFirstKey = ((KeyValue.KeyOnlyKeyValue)reader.getFirstKey().get()).getKey(); + ((KeyValue.KeyOnlyKeyValue) reader.getFirstKey().get()).getKey()) > 0)) + ) { + storeFirstKey = ((KeyValue.KeyOnlyKeyValue) reader.getFirstKey().get()).getKey(); } - if ((reader.getLastKey() != null) + if ( + (reader.getLastKey() != null) && ((storeLastKey == null) || (comparator.compare(storeLastKey, - ((KeyValue.KeyOnlyKeyValue)reader.getLastKey().get()).getKey())) < 0)) { - storeLastKey = ((KeyValue.KeyOnlyKeyValue)reader.getLastKey().get()).getKey(); + ((KeyValue.KeyOnlyKeyValue) reader.getLastKey().get()).getKey())) < 0) + ) { + storeLastKey = ((KeyValue.KeyOnlyKeyValue) reader.getLastKey().get()).getKey(); } reader.close(); } @@ -945,18 +916,20 @@ public class HBaseFsck extends Configured implements Closeable { boolean valid = true; // Checking start key. - if ((currentRegionBoundariesInformation.storesFirstKey != null) - && (currentRegionBoundariesInformation.metaFirstKey != null)) { - valid = valid - && comparator.compare(currentRegionBoundariesInformation.storesFirstKey, - currentRegionBoundariesInformation.metaFirstKey) >= 0; + if ( + (currentRegionBoundariesInformation.storesFirstKey != null) + && (currentRegionBoundariesInformation.metaFirstKey != null) + ) { + valid = valid && comparator.compare(currentRegionBoundariesInformation.storesFirstKey, + currentRegionBoundariesInformation.metaFirstKey) >= 0; } // Checking stop key. - if ((currentRegionBoundariesInformation.storesLastKey != null) - && (currentRegionBoundariesInformation.metaLastKey != null)) { - valid = valid - && comparator.compare(currentRegionBoundariesInformation.storesLastKey, - currentRegionBoundariesInformation.metaLastKey) < 0; + if ( + (currentRegionBoundariesInformation.storesLastKey != null) + && (currentRegionBoundariesInformation.metaLastKey != null) + ) { + valid = valid && comparator.compare(currentRegionBoundariesInformation.storesLastKey, + currentRegionBoundariesInformation.metaLastKey) < 0; } if (!valid) { errors.reportError(ERROR_CODE.BOUNDARIES_ERROR, "Found issues with regions boundaries", @@ -981,13 +954,11 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Orphaned regions are regions without a .regioninfo file in them. We "adopt" - * these orphans by creating a new region, and moving the column families, - * recovered edits, WALs, into the new region dir. We determine the region - * startkey and endkeys by looking at all of the hfiles inside the column - * families to identify the min and max keys. The resulting region will - * likely violate table integrity but will be dealt with by merging - * overlapping regions. + * Orphaned regions are regions without a .regioninfo file in them. We "adopt" these orphans by + * creating a new region, and moving the column families, recovered edits, WALs, into the new + * region dir. We determine the region startkey and endkeys by looking at all of the hfiles inside + * the column families to identify the min and max keys. The resulting region will likely violate + * table integrity but will be dealt with by merging overlapping regions. */ @SuppressWarnings("deprecation") private void adoptHdfsOrphan(HbckRegionInfo hi) throws IOException { @@ -995,9 +966,9 @@ public class HBaseFsck extends Configured implements Closeable { FileSystem fs = p.getFileSystem(getConf()); FileStatus[] dirs = fs.listStatus(p); if (dirs == null) { - LOG.warn("Attempt to adopt orphan hdfs region skipped because no files present in " + - p + ". This dir could probably be deleted."); - return ; + LOG.warn("Attempt to adopt orphan hdfs region skipped because no files present in " + p + + ". This dir could probably be deleted."); + return; } TableName tableName = hi.getTableName(); @@ -1006,9 +977,9 @@ public class HBaseFsck extends Configured implements Closeable { TableDescriptor template = tableInfo.getTableDescriptor(); // find min and max key values - Pair orphanRegionRange = null; + Pair orphanRegionRange = null; for (FileStatus cf : dirs) { - String cfName= cf.getPath().getName(); + String cfName = cf.getPath().getName(); // TODO Figure out what the special dirs are if (cfName.startsWith(".") || cfName.equals(HConstants.SPLIT_LOGDIR_NAME)) continue; @@ -1045,7 +1016,7 @@ public class HBaseFsck extends Configured implements Closeable { if (Bytes.compareTo(orphanRegionRange.getFirst(), start) > 0) { orphanRegionRange.setFirst(start); } - if (Bytes.compareTo(orphanRegionRange.getSecond(), end) < 0 ) { + if (Bytes.compareTo(orphanRegionRange.getSecond(), end) < 0) { orphanRegionRange.setSecond(end); } } @@ -1057,14 +1028,13 @@ public class HBaseFsck extends Configured implements Closeable { sidelineRegionDir(fs, hi); return; } - LOG.info("Min max keys are : [" + Bytes.toString(orphanRegionRange.getFirst()) + ", " + - Bytes.toString(orphanRegionRange.getSecond()) + ")"); + LOG.info("Min max keys are : [" + Bytes.toString(orphanRegionRange.getFirst()) + ", " + + Bytes.toString(orphanRegionRange.getSecond()) + ")"); // create new region on hdfs. move data into place. RegionInfo regionInfo = RegionInfoBuilder.newBuilder(template.getTableName()) - .setStartKey(orphanRegionRange.getFirst()) - .setEndKey(Bytes.add(orphanRegionRange.getSecond(), new byte[1])) - .build(); + .setStartKey(orphanRegionRange.getFirst()) + .setEndKey(Bytes.add(orphanRegionRange.getSecond(), new byte[1])).build(); LOG.info("Creating new region : " + regionInfo); HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), regionInfo, template); Path target = region.getRegionFileSystem().getRegionDir(); @@ -1075,11 +1045,9 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * This method determines if there are table integrity errors in HDFS. If - * there are errors and the appropriate "fix" options are enabled, the method - * will first correct orphan regions making them into legit regiondirs, and - * then reload to merge potentially overlapping regions. - * + * This method determines if there are table integrity errors in HDFS. If there are errors and the + * appropriate "fix" options are enabled, the method will first correct orphan regions making them + * into legit regiondirs, and then reload to merge potentially overlapping regions. * @return number of table integrity errors found */ private int restoreHdfsIntegrity() throws IOException, InterruptedException { @@ -1123,12 +1091,12 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Scan all the store file names to find any lingering reference files, - * which refer to some none-exiting files. If "fix" option is enabled, - * any lingering reference file will be sidelined if found. + * Scan all the store file names to find any lingering reference files, which refer to some + * none-exiting files. If "fix" option is enabled, any lingering reference file will be sidelined + * if found. *

      - * Lingering reference file prevents a region from opening. It has to - * be fixed before a cluster can start properly. + * Lingering reference file prevents a region from opening. It has to be fixed before a cluster + * can start properly. */ private void offlineReferenceFileRepair() throws IOException, InterruptedException { clearState(); @@ -1140,9 +1108,9 @@ public class HBaseFsck extends Configured implements Closeable { new FSUtils.ReferenceFileFilter(fs), executor, errors); errors.print(""); LOG.info("Validating mapping using HDFS state"); - for (Path path: allFiles.values()) { + for (Path path : allFiles.values()) { Path referredToFile = StoreFileInfo.getReferredToFile(path); - if (fs.exists(referredToFile)) continue; // good, expected + if (fs.exists(referredToFile)) continue; // good, expected // Found a lingering reference file errors.reportError(ERROR_CODE.LINGERING_REFERENCE_HFILE, @@ -1165,8 +1133,7 @@ public class HBaseFsck extends Configured implements Closeable { Path rootDir = getSidelineDir(); Path dst = new Path(rootDir, pathStr.substring(index + 1)); fs.mkdirs(dst.getParent()); - LOG.info("Trying to sideline reference file " - + path + " to " + dst); + LOG.info("Trying to sideline reference file " + path + " to " + dst); setShouldRerun(); success = fs.rename(path, dst); @@ -1180,17 +1147,17 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Scan all the store file names to find any lingering HFileLink files, - * which refer to some none-exiting files. If "fix" option is enabled, - * any lingering HFileLink file will be sidelined if found. + * Scan all the store file names to find any lingering HFileLink files, which refer to some + * none-exiting files. If "fix" option is enabled, any lingering HFileLink file will be sidelined + * if found. */ private void offlineHLinkFileRepair() throws IOException, InterruptedException { Configuration conf = getConf(); Path hbaseRoot = CommonFSUtils.getRootDir(conf); FileSystem fs = hbaseRoot.getFileSystem(conf); LOG.info("Computing mapping of all link files"); - Map allFiles = FSUtils - .getTableStoreFilePathMap(fs, hbaseRoot, new FSUtils.HFileLinkFilter(), executor, errors); + Map allFiles = FSUtils.getTableStoreFilePathMap(fs, hbaseRoot, + new FSUtils.HFileLinkFilter(), executor, errors); errors.print(""); LOG.info("Validating mapping using HDFS state"); @@ -1208,7 +1175,8 @@ public class HBaseFsck extends Configured implements Closeable { // An HFileLink path should be like // ${hbase.rootdir}/data/namespace/table_name/region_id/family_name/linkedtable=linkedregionname-linkedhfilename - // sidelineing will happen in the ${hbase.rootdir}/${sidelinedir} directory with the same folder structure. + // sidelineing will happen in the ${hbase.rootdir}/${sidelinedir} directory with the same + // folder structure. boolean success = sidelineFile(fs, hbaseRoot, path); if (!success) { @@ -1217,12 +1185,13 @@ public class HBaseFsck extends Configured implements Closeable { // An HFileLink backreference path should be like // ${hbase.rootdir}/archive/data/namespace/table_name/region_id/family_name/.links-linkedhfilename - // sidelineing will happen in the ${hbase.rootdir}/${sidelinedir} directory with the same folder structure. - Path backRefPath = FileLink.getBackReferencesDir(HFileArchiveUtil - .getStoreArchivePath(conf, HFileLink.getReferencedTableName(path.getName().toString()), - HFileLink.getReferencedRegionName(path.getName().toString()), - path.getParent().getName()), - HFileLink.getReferencedHFileName(path.getName().toString())); + // sidelineing will happen in the ${hbase.rootdir}/${sidelinedir} directory with the same + // folder structure. + Path backRefPath = FileLink.getBackReferencesDir( + HFileArchiveUtil.getStoreArchivePath(conf, + HFileLink.getReferencedTableName(path.getName().toString()), + HFileLink.getReferencedRegionName(path.getName().toString()), path.getParent().getName()), + HFileLink.getReferencedHFileName(path.getName().toString())); success = sidelineFile(fs, hbaseRoot, backRefPath); if (!success) { @@ -1250,10 +1219,10 @@ public class HBaseFsck extends Configured implements Closeable { * TODO -- need to add tests for this. */ private void reportEmptyMetaCells() { - errors.print("Number of empty REGIONINFO_QUALIFIER rows in hbase:meta: " + - emptyRegionInfoQualifiers.size()); + errors.print("Number of empty REGIONINFO_QUALIFIER rows in hbase:meta: " + + emptyRegionInfoQualifiers.size()); if (details) { - for (Result r: emptyRegionInfoQualifiers) { + for (Result r : emptyRegionInfoQualifiers) { errors.print(" " + r); } } @@ -1271,10 +1240,9 @@ public class HBaseFsck extends Configured implements Closeable { errors.detail("Number of Tables in flux: " + numSkipped.get()); } for (TableDescriptor td : allTables) { - errors.detail(" Table: " + td.getTableName() + "\t" + - (td.isReadOnly() ? "ro" : "rw") + "\t" + - (td.isMetaRegion() ? "META" : " ") + "\t" + - " families: " + td.getColumnFamilyCount()); + errors.detail(" Table: " + td.getTableName() + "\t" + (td.isReadOnly() ? "ro" : "rw") + + "\t" + (td.isMetaRegion() ? "META" : " ") + "\t" + " families: " + + td.getColumnFamilyCount()); } } } @@ -1287,7 +1255,7 @@ public class HBaseFsck extends Configured implements Closeable { * Populate hbi's from regionInfos loaded from file system. */ private SortedMap loadHdfsRegionInfos() - throws IOException, InterruptedException { + throws IOException, InterruptedException { tablesInfo.clear(); // regenerating the data // generate region split structure Collection hbckRegionInfos = regionInfoMap.values(); @@ -1304,28 +1272,27 @@ public class HBaseFsck extends Configured implements Closeable { // Submit and wait for completion hbiFutures = executor.invokeAll(hbis); - for(int i=0; i f = hbiFutures.get(i); try { f.get(); - } catch(ExecutionException e) { - LOG.warn("Failed to read .regioninfo file for region " + - work.hbi.getRegionNameAsString(), e.getCause()); + } catch (ExecutionException e) { + LOG.warn("Failed to read .regioninfo file for region " + work.hbi.getRegionNameAsString(), + e.getCause()); } } Path hbaseRoot = CommonFSUtils.getRootDir(getConf()); FileSystem fs = hbaseRoot.getFileSystem(getConf()); // serialized table info gathering. - for (HbckRegionInfo hbi: hbckRegionInfos) { + for (HbckRegionInfo hbi : hbckRegionInfos) { if (hbi.getHdfsHRI() == null) { // was an orphan continue; } - // get table name from hdfs, populate various HBaseFsck tables. TableName tableName = hbi.getTableName(); if (tableName == null) { @@ -1341,14 +1308,14 @@ public class HBaseFsck extends Configured implements Closeable { tablesInfo.put(tableName, modTInfo); try { TableDescriptor htd = - FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName); + FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName); modTInfo.htds.add(htd); } catch (IOException ioe) { if (!orphanTableDirs.containsKey(tableName)) { LOG.warn("Unable to read .tableinfo from " + hbaseRoot, ioe); - //should only report once for each table + // should only report once for each table errors.reportError(ERROR_CODE.NO_TABLEINFO_FILE, - "Unable to read .tableinfo from " + hbaseRoot + "/" + tableName); + "Unable to read .tableinfo from " + hbaseRoot + "/" + tableName); Set columns = new HashSet<>(); orphanTableDirs.put(tableName, getColumnFamilyList(columns, hbi)); } @@ -1366,14 +1333,11 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * To get the column family list according to the column family dirs - * @param columns - * @param hbi - * @return a set of column families - * @throws IOException + * To get the column family list according to the column family dirs nn * @return a set of column + * families n */ private Set getColumnFamilyList(Set columns, HbckRegionInfo hbi) - throws IOException { + throws IOException { Path regionDir = hbi.getHdfsRegionDir(); FileSystem fs = regionDir.getFileSystem(getConf()); FileStatus[] subDirs = fs.listStatus(regionDir, new FSUtils.FamilyDirFilter(fs)); @@ -1388,12 +1352,13 @@ public class HBaseFsck extends Configured implements Closeable { * To fabricate a .tableinfo file with following contents
      * 1. the correct tablename
      * 2. the correct colfamily list
      - * 3. the default properties for both {@link TableDescriptor} and {@link ColumnFamilyDescriptor}
      - * @throws IOException + * 3. the default properties for both {@link TableDescriptor} and + * {@link ColumnFamilyDescriptor}
      + * n */ private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName, - Set columns) throws IOException { - if (columns ==null || columns.isEmpty()) return false; + Set columns) throws IOException { + if (columns == null || columns.isEmpty()) return false; TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); for (String columnfamimly : columns) { builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(columnfamimly)); @@ -1404,7 +1369,7 @@ public class HBaseFsck extends Configured implements Closeable { /** * To fix the empty REGIONINFO_QUALIFIER rows from hbase:meta
      - * @throws IOException + * n */ public void fixEmptyMetaCells() throws IOException { if (shouldFixEmptyMetaCells() && !emptyRegionInfoQualifiers.isEmpty()) { @@ -1423,8 +1388,9 @@ public class HBaseFsck extends Configured implements Closeable { * 2. else create a default .tableinfo file with following items
      *  2.1 the correct tablename
      *  2.2 the correct colfamily list
      - *  2.3 the default properties for both {@link TableDescriptor} and {@link ColumnFamilyDescriptor}
      - * @throws IOException + *  2.3 the default properties for both {@link TableDescriptor} and + * {@link ColumnFamilyDescriptor}
      + * n */ public void fixOrphanTables() throws IOException { if (shouldFixTableOrphans() && !orphanTableDirs.isEmpty()) { @@ -1432,14 +1398,12 @@ public class HBaseFsck extends Configured implements Closeable { List tmpList = new ArrayList<>(orphanTableDirs.keySet().size()); tmpList.addAll(orphanTableDirs.keySet()); TableDescriptor[] htds = getTableDescriptors(tmpList); - Iterator>> iter = - orphanTableDirs.entrySet().iterator(); + Iterator>> iter = orphanTableDirs.entrySet().iterator(); int j = 0; int numFailedCase = 0; FSTableDescriptors fstd = new FSTableDescriptors(getConf()); while (iter.hasNext()) { - Entry> entry = - iter.next(); + Entry> entry = iter.next(); TableName tableName = entry.getKey(); LOG.info("Trying to fix orphan table error: " + tableName); if (j < htds.length) { @@ -1453,10 +1417,12 @@ public class HBaseFsck extends Configured implements Closeable { } else { if (fabricateTableInfo(fstd, tableName, entry.getValue())) { LOG.warn("fixing orphan table: " + tableName + " with a default .tableinfo file"); - LOG.warn("Strongly recommend to modify the TableDescriptor if necessary for: " + tableName); + LOG.warn( + "Strongly recommend to modify the TableDescriptor if necessary for: " + tableName); iter.remove(); } else { - LOG.error("Unable to create default .tableinfo for " + tableName + " while missing column family information"); + LOG.error("Unable to create default .tableinfo for " + tableName + + " while missing column family information"); numFailedCase++; } } @@ -1467,14 +1433,14 @@ public class HBaseFsck extends Configured implements Closeable { // all orphanTableDirs are luckily recovered // re-run doFsck after recovering the .tableinfo file setShouldRerun(); - LOG.warn("Strongly recommend to re-run manually hfsck after all orphanTableDirs being fixed"); + LOG.warn( + "Strongly recommend to re-run manually hfsck after all orphanTableDirs being fixed"); } else if (numFailedCase > 0) { - LOG.error("Failed to fix " + numFailedCase - + " OrphanTables with default .tableinfo files"); + LOG.error("Failed to fix " + numFailedCase + " OrphanTables with default .tableinfo files"); } } - //cleanup the list + // cleanup the list orphanTableDirs.clear(); } @@ -1484,23 +1450,22 @@ public class HBaseFsck extends Configured implements Closeable { */ private void logParallelMerge() { if (getConf().getBoolean("hbasefsck.overlap.merge.parallel", true)) { - LOG.info("Handling overlap merges in parallel. set hbasefsck.overlap.merge.parallel to" + - " false to run serially."); + LOG.info("Handling overlap merges in parallel. set hbasefsck.overlap.merge.parallel to" + + " false to run serially."); } else { - LOG.info("Handling overlap merges serially. set hbasefsck.overlap.merge.parallel to" + - " true to run in parallel."); + LOG.info("Handling overlap merges serially. set hbasefsck.overlap.merge.parallel to" + + " true to run in parallel."); } } private SortedMap checkHdfsIntegrity(boolean fixHoles, - boolean fixOverlaps) throws IOException { + boolean fixOverlaps) throws IOException { LOG.info("Checking HBase region split map from HDFS data..."); logParallelMerge(); for (HbckTableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler; if (fixHoles || fixOverlaps) { - handler = tInfo.new HDFSIntegrityFixer(tInfo, errors, getConf(), - fixHoles, fixOverlaps); + handler = tInfo.new HDFSIntegrityFixer(tInfo, errors, getConf(), fixHoles, fixOverlaps); } else { handler = tInfo.new IntegrityFixSuggester(tInfo, errors); } @@ -1516,8 +1481,7 @@ public class HBaseFsck extends Configured implements Closeable { if (sidelineDir == null) { Path hbaseDir = CommonFSUtils.getRootDir(getConf()); Path hbckDir = new Path(hbaseDir, HConstants.HBCK_SIDELINEDIR_NAME); - sidelineDir = new Path(hbckDir, hbaseDir.getName() + "-" - + startMillis); + sidelineDir = new Path(hbckDir, hbaseDir.getName() + "-" + startMillis); } return sidelineDir; } @@ -1531,14 +1495,12 @@ public class HBaseFsck extends Configured implements Closeable { /** * Sideline a region dir (instead of deleting it) - * * @param parentDir if specified, the region will be sidelined to folder like - * {@literal .../parentDir/

      /}. The purpose is to group together - * similar regions sidelined, for example, those regions should be bulk loaded back later - * on. If NULL, it is ignored. + * {@literal .../parentDir/
      /}. The purpose is to group + * together similar regions sidelined, for example, those regions should be bulk + * loaded back later on. If NULL, it is ignored. */ - Path sidelineRegionDir(FileSystem fs, - String parentDir, HbckRegionInfo hi) throws IOException { + Path sidelineRegionDir(FileSystem fs, String parentDir, HbckRegionInfo hi) throws IOException { TableName tableName = hi.getTableName(); Path regionDir = hi.getHdfsRegionDir(); @@ -1551,22 +1513,22 @@ public class HBaseFsck extends Configured implements Closeable { if (parentDir != null) { rootDir = new Path(rootDir, parentDir); } - Path sidelineTableDir= CommonFSUtils.getTableDir(rootDir, tableName); + Path sidelineTableDir = CommonFSUtils.getTableDir(rootDir, tableName); Path sidelineRegionDir = new Path(sidelineTableDir, regionDir.getName()); fs.mkdirs(sidelineRegionDir); boolean success = false; - FileStatus[] cfs = fs.listStatus(regionDir); + FileStatus[] cfs = fs.listStatus(regionDir); if (cfs == null) { LOG.info("Region dir is empty: " + regionDir); } else { for (FileStatus cf : cfs) { Path src = cf.getPath(); - Path dst = new Path(sidelineRegionDir, src.getName()); + Path dst = new Path(sidelineRegionDir, src.getName()); if (fs.isFile(src)) { // simple file success = fs.rename(src, dst); if (!success) { - String msg = "Unable to rename file " + src + " to " + dst; + String msg = "Unable to rename file " + src + " to " + dst; LOG.error(msg); throw new IOException(msg); } @@ -1579,14 +1541,14 @@ public class HBaseFsck extends Configured implements Closeable { LOG.info("Sidelining files from " + src + " into containing region " + dst); // FileSystem.rename is inconsistent with directories -- if the // dst (foo/a) exists and is a dir, and the src (foo/b) is a dir, - // it moves the src into the dst dir resulting in (foo/a/b). If + // it moves the src into the dst dir resulting in (foo/a/b). If // the dst does not exist, and the src a dir, src becomes dst. (foo/b) FileStatus[] hfiles = fs.listStatus(src); if (hfiles != null && hfiles.length > 0) { for (FileStatus hfile : hfiles) { success = fs.rename(hfile.getPath(), dst); if (!success) { - String msg = "Unable to rename file " + src + " to " + dst; + String msg = "Unable to rename file " + src + " to " + dst; LOG.error(msg); throw new IOException(msg); } @@ -1608,18 +1570,15 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Load the list of disabled tables in ZK into local set. - * @throws ZooKeeperConnectionException - * @throws IOException + * Load the list of disabled tables in ZK into local set. nn */ - private void loadTableStates() - throws IOException { + private void loadTableStates() throws IOException { tableStates = MetaTableAccessor.getTableStates(connection); // Add hbase:meta so this tool keeps working. In hbase2, meta is always enabled though it // has no entry in the table states. HBCK doesn't work right w/ hbase2 but just do this in // meantime. this.tableStates.put(TableName.META_TABLE_NAME, - new TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED)); + new TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED)); } /** @@ -1628,13 +1587,11 @@ public class HBaseFsck extends Configured implements Closeable { */ boolean isTableDisabled(TableName tableName) { return tableStates.containsKey(tableName) - && tableStates.get(tableName) - .inStates(TableState.State.DISABLED, TableState.State.DISABLING); + && tableStates.get(tableName).inStates(TableState.State.DISABLED, TableState.State.DISABLING); } /** - * Scan HDFS for all regions, recording their information into - * regionInfoMap + * Scan HDFS for all regions, recording their information into regionInfoMap */ public void loadHdfsRegionDirs() throws IOException, InterruptedException { Path rootDir = CommonFSUtils.getRootDir(getConf()); @@ -1648,44 +1605,43 @@ public class HBaseFsck extends Configured implements Closeable { List paths = FSUtils.getTableDirs(fs, rootDir); for (Path path : paths) { TableName tableName = CommonFSUtils.getTableName(path); - if ((!checkMetaOnly && - isTableIncluded(tableName)) || - tableName.equals(TableName.META_TABLE_NAME)) { - tableDirs.add(fs.getFileStatus(path)); - } + if ( + (!checkMetaOnly && isTableIncluded(tableName)) + || tableName.equals(TableName.META_TABLE_NAME) + ) { + tableDirs.add(fs.getFileStatus(path)); + } } // verify that version file exists if (!foundVersionFile) { errors.reportError(ERROR_CODE.NO_VERSION_FILE, - "Version file does not exist in root dir " + rootDir); + "Version file does not exist in root dir " + rootDir); if (shouldFixVersionFile()) { - LOG.info("Trying to create a new " + HConstants.VERSION_FILE_NAME - + " file."); + LOG.info("Trying to create a new " + HConstants.VERSION_FILE_NAME + " file."); setShouldRerun(); - FSUtils.setVersion(fs, rootDir, getConf().getInt( - HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt( - HConstants.VERSION_FILE_WRITE_ATTEMPTS, + FSUtils.setVersion(fs, rootDir, + getConf().getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), + getConf().getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS)); } } // Avoid multithreading at table-level because already multithreaded internally at - // region-level. Additionally multithreading at table-level can lead to deadlock - // if there are many tables in the cluster. Since there are a limited # of threads + // region-level. Additionally multithreading at table-level can lead to deadlock + // if there are many tables in the cluster. Since there are a limited # of threads // in the executor's thread pool and if we multithread at the table-level by putting // WorkItemHdfsDir callables into the executor, then we will have some threads in the // executor tied up solely in waiting for the tables' region-level calls to complete. // If there are enough tables then there will be no actual threads in the pool left // for the region-level callables to be serviced. for (FileStatus tableDir : tableDirs) { - LOG.debug("Loading region dirs from " +tableDir.getPath()); + LOG.debug("Loading region dirs from " + tableDir.getPath()); WorkItemHdfsDir item = new WorkItemHdfsDir(fs, errors, tableDir); try { item.call(); } catch (ExecutionException e) { - LOG.warn("Could not completely load table dir " + - tableDir.getPath(), e.getCause()); + LOG.warn("Could not completely load table dir " + tableDir.getPath(), e.getCause()); } } errors.print(""); @@ -1695,33 +1651,29 @@ public class HBaseFsck extends Configured implements Closeable { * Record the location of the hbase:meta region as found in ZooKeeper. */ private boolean recordMetaRegion() throws IOException { - RegionLocations rl = connection.locateRegion(TableName.META_TABLE_NAME, - HConstants.EMPTY_START_ROW, false, false); + RegionLocations rl = + connection.locateRegion(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, false, false); if (rl == null) { - errors.reportError(ERROR_CODE.NULL_META_REGION, - "META region was not found in ZooKeeper"); + errors.reportError(ERROR_CODE.NULL_META_REGION, "META region was not found in ZooKeeper"); return false; } for (HRegionLocation metaLocation : rl.getRegionLocations()) { // Check if Meta region is valid and existing - if (metaLocation == null ) { - errors.reportError(ERROR_CODE.NULL_META_REGION, - "META region location is null"); + if (metaLocation == null) { + errors.reportError(ERROR_CODE.NULL_META_REGION, "META region location is null"); return false; } if (metaLocation.getRegionInfo() == null) { - errors.reportError(ERROR_CODE.NULL_META_REGION, - "META location regionInfo is null"); + errors.reportError(ERROR_CODE.NULL_META_REGION, "META location regionInfo is null"); return false; } if (metaLocation.getHostname() == null) { - errors.reportError(ERROR_CODE.NULL_META_REGION, - "META location hostName is null"); + errors.reportError(ERROR_CODE.NULL_META_REGION, "META location hostName is null"); return false; } ServerName sn = metaLocation.getServerName(); HbckRegionInfo.MetaEntry m = new HbckRegionInfo.MetaEntry(metaLocation.getRegion(), sn, - EnvironmentEdgeManager.currentTime()); + EnvironmentEdgeManager.currentTime()); HbckRegionInfo hbckRegionInfo = regionInfoMap.get(metaLocation.getRegion().getEncodedName()); if (hbckRegionInfo == null) { regionInfoMap.put(metaLocation.getRegion().getEncodedName(), new HbckRegionInfo(m)); @@ -1760,20 +1712,19 @@ public class HBaseFsck extends Configured implements Closeable { List> workFutures; // loop to contact each region server in parallel - for (ServerName rsinfo: regionServerList) { + for (ServerName rsinfo : regionServerList) { workItems.add(new WorkItemRegion(this, rsinfo, errors, connection)); } workFutures = executor.invokeAll(workItems); - for(int i=0; i f = workFutures.get(i); try { f.get(); - } catch(ExecutionException e) { - LOG.warn("Could not process regionserver {}", item.rsinfo.getAddress(), - e.getCause()); + } catch (ExecutionException e) { + LOG.warn("Could not process regionserver {}", item.rsinfo.getAddress(), e.getCause()); } } } @@ -1781,13 +1732,12 @@ public class HBaseFsck extends Configured implements Closeable { /** * Check consistency of all regions that have been found in previous phases. */ - private void checkAndFixConsistency() - throws IOException, KeeperException, InterruptedException { + private void checkAndFixConsistency() throws IOException, KeeperException, InterruptedException { // Divide the checks in two phases. One for default/primary replicas and another // for the non-primary ones. Keeps code cleaner this way. List workItems = new ArrayList<>(regionInfoMap.size()); - for (java.util.Map.Entry e: regionInfoMap.entrySet()) { + for (java.util.Map.Entry e : regionInfoMap.entrySet()) { if (e.getValue().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { workItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue())); } @@ -1795,11 +1745,11 @@ public class HBaseFsck extends Configured implements Closeable { checkRegionConsistencyConcurrently(workItems); boolean prevHdfsCheck = shouldCheckHdfs(); - setCheckHdfs(false); //replicas don't have any hdfs data + setCheckHdfs(false); // replicas don't have any hdfs data // Run a pass over the replicas and fix any assignment issues that exist on the currently // deployed/undeployed replicas. List replicaWorkItems = new ArrayList<>(regionInfoMap.size()); - for (java.util.Map.Entry e: regionInfoMap.entrySet()) { + for (java.util.Map.Entry e : regionInfoMap.entrySet()) { if (e.getValue().getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { replicaWorkItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue())); } @@ -1811,11 +1761,11 @@ public class HBaseFsck extends Configured implements Closeable { // not get accurate state of the hbase if continuing. The config here allows users to tune // the tolerance of number of skipped region. // TODO: evaluate the consequence to continue the hbck operation without config. - int terminateThreshold = getConf().getInt("hbase.hbck.skipped.regions.limit", 0); + int terminateThreshold = getConf().getInt("hbase.hbck.skipped.regions.limit", 0); int numOfSkippedRegions = skippedRegions.size(); if (numOfSkippedRegions > 0 && numOfSkippedRegions > terminateThreshold) { - throw new IOException(numOfSkippedRegions - + " region(s) could not be checked or repaired. See logs for detail."); + throw new IOException( + numOfSkippedRegions + " region(s) could not be checked or repaired. See logs for detail."); } if (shouldCheckHdfs()) { @@ -1826,25 +1776,25 @@ public class HBaseFsck extends Configured implements Closeable { /** * Check consistency of all regions using multiple threads concurrently. */ - private void checkRegionConsistencyConcurrently( - final List workItems) - throws IOException, KeeperException, InterruptedException { + private void + checkRegionConsistencyConcurrently(final List workItems) + throws IOException, KeeperException, InterruptedException { if (workItems.isEmpty()) { - return; // nothing to check + return; // nothing to check } List> workFutures = executor.invokeAll(workItems); - for(Future f: workFutures) { + for (Future f : workFutures) { try { f.get(); - } catch(ExecutionException e1) { - LOG.warn("Could not check region consistency " , e1.getCause()); + } catch (ExecutionException e1) { + LOG.warn("Could not check region consistency ", e1.getCause()); if (e1.getCause() instanceof IOException) { - throw (IOException)e1.getCause(); + throw (IOException) e1.getCause(); } else if (e1.getCause() instanceof KeeperException) { - throw (KeeperException)e1.getCause(); + throw (KeeperException) e1.getCause(); } else if (e1.getCause() instanceof InterruptedException) { - throw (InterruptedException)e1.getCause(); + throw (InterruptedException) e1.getCause(); } else { throw new IOException(e1.getCause()); } @@ -1868,8 +1818,9 @@ public class HBaseFsck extends Configured implements Closeable { } catch (Exception e) { // If the region is non-META region, skip this region and send warning/error message; if // the region is META region, we should not continue. - LOG.warn("Unable to complete check or repair the region '" + hbi.getRegionNameAsString() - + "'.", e); + LOG.warn( + "Unable to complete check or repair the region '" + hbi.getRegionNameAsString() + "'.", + e); if (hbi.getHdfsHRI().isMetaRegion()) { throw e; } @@ -1890,9 +1841,7 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Check and fix table states, assumes full info available: - * - tableInfos - * - empty tables loaded + * Check and fix table states, assumes full info available: - tableInfos - empty tables loaded */ private void checkAndFixTableStates() throws IOException { // first check dangling states @@ -1900,21 +1849,19 @@ public class HBaseFsck extends Configured implements Closeable { TableName tableName = entry.getKey(); TableState tableState = entry.getValue(); HbckTableInfo tableInfo = tablesInfo.get(tableName); - if (isTableIncluded(tableName) - && !tableName.isSystemTable() - && tableInfo == null) { + if (isTableIncluded(tableName) && !tableName.isSystemTable() && tableInfo == null) { if (fixMeta) { MetaTableAccessor.deleteTableState(connection, tableName); TableState state = MetaTableAccessor.getTableState(connection, tableName); if (state != null) { errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE, - tableName + " unable to delete dangling table state " + tableState); + tableName + " unable to delete dangling table state " + tableState); } } else if (!checkMetaOnly) { // dangling table state in meta if checkMetaOnly is false. If checkMetaOnly is // true, tableInfo will be null as tablesInfo are not polulated for all tables from hdfs errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE, - tableName + " has dangling table state " + tableState); + tableName + " has dangling table state " + tableState); } } } @@ -1926,11 +1873,10 @@ public class HBaseFsck extends Configured implements Closeable { TableState newState = MetaTableAccessor.getTableState(connection, tableName); if (newState == null) { errors.reportError(ERROR_CODE.NO_TABLE_STATE, - "Unable to change state for table " + tableName + " in meta "); + "Unable to change state for table " + tableName + " in meta "); } } else { - errors.reportError(ERROR_CODE.NO_TABLE_STATE, - tableName + " has no state in meta "); + errors.reportError(ERROR_CODE.NO_TABLE_STATE, tableName + " has no state in meta "); } } } @@ -1951,9 +1897,9 @@ public class HBaseFsck extends Configured implements Closeable { fs.access(file.getPath(), FsAction.WRITE); } catch (AccessControlException ace) { LOG.warn("Got AccessDeniedException when preCheckPermission ", ace); - errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + ugi.getUserName() - + " does not have write perms to " + file.getPath() - + ". Please rerun hbck as hdfs user " + file.getOwner()); + errors.reportError(ERROR_CODE.WRONG_USAGE, + "Current user " + ugi.getUserName() + " does not have write perms to " + file.getPath() + + ". Please rerun hbck as hdfs user " + file.getOwner()); throw ace; } } @@ -1972,7 +1918,7 @@ public class HBaseFsck extends Configured implements Closeable { private void deleteMetaRegion(byte[] metaKey) throws IOException { Delete d = new Delete(metaKey); meta.delete(d); - LOG.info("Deleted " + Bytes.toString(metaKey) + " from META" ); + LOG.info("Deleted " + Bytes.toString(metaKey) + " from META"); } /** @@ -1985,10 +1931,8 @@ public class HBaseFsck extends Configured implements Closeable { d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER); mutations.add(d); - RegionInfo hri = RegionInfoBuilder.newBuilder(hi.getMetaEntry()) - .setOffline(false) - .setSplit(false) - .build(); + RegionInfo hri = + RegionInfoBuilder.newBuilder(hi.getMetaEntry()).setOffline(false).setSplit(false).build(); Put p = MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime()); mutations.add(p); @@ -1997,19 +1941,17 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * This backwards-compatibility wrapper for permanently offlining a region - * that should not be alive. If the region server does not support the - * "offline" method, it will use the closest unassign method instead. This - * will basically work until one attempts to disable or delete the affected - * table. The problem has to do with in-memory only master state, so - * restarting the HMaster or failing over to another should fix this. + * This backwards-compatibility wrapper for permanently offlining a region that should not be + * alive. If the region server does not support the "offline" method, it will use the closest + * unassign method instead. This will basically work until one attempts to disable or delete the + * affected table. The problem has to do with in-memory only master state, so restarting the + * HMaster or failing over to another should fix this. */ void offline(byte[] regionName) throws IOException { String regionString = Bytes.toStringBinary(regionName); if (!rsSupportsOffline) { - LOG.warn( - "Using unassign region " + regionString + " instead of using offline method, you should" + - " restart HMaster after these repairs"); + LOG.warn("Using unassign region " + regionString + + " instead of using offline method, you should" + " restart HMaster after these repairs"); admin.unassign(regionName, true); return; } @@ -2019,12 +1961,12 @@ public class HBaseFsck extends Configured implements Closeable { LOG.info("Offlining region " + regionString); admin.offline(regionName); } catch (IOException ioe) { - String notFoundMsg = "java.lang.NoSuchMethodException: " + - "org.apache.hadoop.hbase.master.HMaster.offline([B)"; + String notFoundMsg = + "java.lang.NoSuchMethodException: " + "org.apache.hadoop.hbase.master.HMaster.offline([B)"; if (ioe.getMessage().contains(notFoundMsg)) { - LOG.warn("Using unassign region " + regionString + - " instead of using offline method, you should" + - " restart HMaster after these repairs"); + LOG.warn( + "Using unassign region " + regionString + " instead of using offline method, you should" + + " restart HMaster after these repairs"); rsSupportsOffline = false; // in the future just use unassign admin.unassign(regionName, true); return; @@ -2034,16 +1976,13 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Attempts to undeploy a region from a region server based in information in - * META. Any operations that modify the file system should make sure that - * its corresponding region is not deployed to prevent data races. - * - * A separate call is required to update the master in-memory region state - * kept in the AssignementManager. Because disable uses this state instead of - * that found in META, we can't seem to cleanly disable/delete tables that - * have been hbck fixed. When used on a version of HBase that does not have - * the offline ipc call exposed on the master (<0.90.5, <0.92.0) a master - * restart or failover may be required. + * Attempts to undeploy a region from a region server based in information in META. Any operations + * that modify the file system should make sure that its corresponding region is not deployed to + * prevent data races. A separate call is required to update the master in-memory region state + * kept in the AssignementManager. Because disable uses this state instead of that found in META, + * we can't seem to cleanly disable/delete tables that have been hbck fixed. When used on a + * version of HBase that does not have the offline ipc call exposed on the master (<0.90.5, + * <0.92.0) a master restart or failover may be required. */ void closeRegion(HbckRegionInfo hi) throws IOException, InterruptedException { if (hi.getMetaEntry() == null && hi.getHdfsEntry() == null) { @@ -2067,25 +2006,22 @@ public class HBaseFsck extends Configured implements Closeable { Result r = meta.get(get); RegionLocations rl = MetaTableAccessor.getRegionLocations(r); if (rl == null) { - LOG.warn("Unable to close region " + hi.getRegionNameAsString() + - " since meta does not have handle to reach it"); + LOG.warn("Unable to close region " + hi.getRegionNameAsString() + + " since meta does not have handle to reach it"); return; } for (HRegionLocation h : rl.getRegionLocations()) { ServerName serverName = h.getServerName(); if (serverName == null) { - errors.reportError("Unable to close region " - + hi.getRegionNameAsString() + " because meta does not " - + "have handle to reach it."); + errors.reportError("Unable to close region " + hi.getRegionNameAsString() + + " because meta does not " + "have handle to reach it."); continue; } RegionInfo hri = h.getRegionInfo(); if (hri == null) { LOG.warn("Unable to close region " + hi.getRegionNameAsString() - + " because hbase:meta had invalid or missing " - + HConstants.CATALOG_FAMILY_STR + ":" - + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) - + " qualifier value."); + + " because hbase:meta had invalid or missing " + HConstants.CATALOG_FAMILY_STR + ":" + + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " qualifier value."); continue; } // close the region -- close files and remove assignment @@ -2102,13 +2038,13 @@ public class HBaseFsck extends Configured implements Closeable { int numReplicas = admin.getDescriptor(hi.getTableName()).getRegionReplication(); for (int i = 1; i < numReplicas; i++) { if (hi.getPrimaryHRIForDeployedReplica() == null) continue; - RegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica( - hi.getPrimaryHRIForDeployedReplica(), i); + RegionInfo hri = + RegionReplicaUtil.getRegionInfoForReplica(hi.getPrimaryHRIForDeployedReplica(), i); HbckRegionInfo h = regionInfoMap.get(hri.getEncodedName()); if (h != null) { undeployRegionsForHbi(h); - //set skip checks; we undeployed it, and we don't want to evaluate this anymore - //in consistency checks + // set skip checks; we undeployed it, and we don't want to evaluate this anymore + // in consistency checks h.setSkipChecks(true); } } @@ -2116,20 +2052,20 @@ public class HBaseFsck extends Configured implements Closeable { private void undeployRegionsForHbi(HbckRegionInfo hi) throws IOException, InterruptedException { for (HbckRegionInfo.OnlineEntry rse : hi.getOnlineEntries()) { - LOG.debug("Undeploy region " + rse.getRegionInfo() + " from " + rse.getServerName()); + LOG.debug("Undeploy region " + rse.getRegionInfo() + " from " + rse.getServerName()); try { - HBaseFsckRepair - .closeRegionSilentlyAndWait(connection, rse.getServerName(), rse.getRegionInfo()); + HBaseFsckRepair.closeRegionSilentlyAndWait(connection, rse.getServerName(), + rse.getRegionInfo()); offline(rse.getRegionInfo().getRegionName()); } catch (IOException ioe) { LOG.warn("Got exception when attempting to offline region " - + Bytes.toString(rse.getRegionInfo().getRegionName()), ioe); + + Bytes.toString(rse.getRegionInfo().getRegionName()), ioe); } } } - private void tryAssignmentRepair(HbckRegionInfo hbi, String msg) throws IOException, - KeeperException, InterruptedException { + private void tryAssignmentRepair(HbckRegionInfo hbi, String msg) + throws IOException, KeeperException, InterruptedException { // If we are trying to fix the errors if (shouldFixAssignments()) { errors.print(msg); @@ -2150,8 +2086,8 @@ public class HBaseFsck extends Configured implements Closeable { HbckRegionInfo h = regionInfoMap.get(hri.getEncodedName()); if (h != null) { undeployRegions(h); - //set skip checks; we undeploy & deploy it; we don't want to evaluate this hbi anymore - //in consistency checks + // set skip checks; we undeploy & deploy it; we don't want to evaluate this hbi anymore + // in consistency checks h.setSkipChecks(true); } HBaseFsckRepair.fixUnassigned(admin, hri); @@ -2165,7 +2101,7 @@ public class HBaseFsck extends Configured implements Closeable { * Check a single region for consistency and correct deployment. */ private void checkRegionConsistency(final String key, final HbckRegionInfo hbi) - throws IOException, KeeperException, InterruptedException { + throws IOException, KeeperException, InterruptedException { if (hbi.isSkipChecks()) return; String descriptiveName = hbi.toString(); @@ -2175,14 +2111,12 @@ public class HBaseFsck extends Configured implements Closeable { boolean hasMetaAssignment = inMeta && hbi.getMetaEntry().regionServer != null; boolean isDeployed = !hbi.getDeployedOn().isEmpty(); boolean isMultiplyDeployed = hbi.getDeployedOn().size() > 1; - boolean deploymentMatchesMeta = - hasMetaAssignment && isDeployed && !isMultiplyDeployed && - hbi.getMetaEntry().regionServer.equals(hbi.getDeployedOn().get(0)); - boolean splitParent = - inMeta && hbi.getMetaEntry().isSplit() && hbi.getMetaEntry().isOffline(); + boolean deploymentMatchesMeta = hasMetaAssignment && isDeployed && !isMultiplyDeployed + && hbi.getMetaEntry().regionServer.equals(hbi.getDeployedOn().get(0)); + boolean splitParent = inMeta && hbi.getMetaEntry().isSplit() && hbi.getMetaEntry().isOffline(); boolean shouldBeDeployed = inMeta && !isTableDisabled(hbi.getMetaEntry().getTable()); - boolean recentlyModified = inHdfs && - hbi.getModTime() + timelag > EnvironmentEdgeManager.currentTime(); + boolean recentlyModified = + inHdfs && hbi.getModTime() + timelag > EnvironmentEdgeManager.currentTime(); // ========== First the healthy cases ============= if (hbi.containsOnlyHdfsEdits()) { @@ -2191,8 +2125,8 @@ public class HBaseFsck extends Configured implements Closeable { if (inMeta && inHdfs && isDeployed && deploymentMatchesMeta && shouldBeDeployed) { return; } else if (inMeta && inHdfs && !shouldBeDeployed && !isDeployed) { - LOG.info("Region " + descriptiveName + " is in META, and in a disabled " + - "tabled that is not deployed"); + LOG.info("Region " + descriptiveName + " is in META, and in a disabled " + + "tabled that is not deployed"); return; } else if (recentlyModified) { LOG.warn("Region " + descriptiveName + " was recently modified -- skipping"); @@ -2203,9 +2137,9 @@ public class HBaseFsck extends Configured implements Closeable { // We shouldn't have record of this region at all then! assert false : "Entry for region with no data"; } else if (!inMeta && !inHdfs && isDeployed) { - errors.reportError(ERROR_CODE.NOT_IN_META_HDFS, "Region " - + descriptiveName + ", key=" + key + ", not on HDFS or in hbase:meta but " + - "deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); + errors.reportError(ERROR_CODE.NOT_IN_META_HDFS, + "Region " + descriptiveName + ", key=" + key + ", not on HDFS or in hbase:meta but " + + "deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); if (shouldFixAssignments()) { undeployRegions(hbi); } @@ -2216,18 +2150,16 @@ public class HBaseFsck extends Configured implements Closeable { // cleaned by CatalogJanitor later hbi.setSkipChecks(true); LOG.info("Region " + descriptiveName - + " got merge recently, its file(s) will be cleaned by CatalogJanitor later"); + + " got merge recently, its file(s) will be cleaned by CatalogJanitor later"); return; } - errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED, "Region " - + descriptiveName + " on HDFS, but not listed in hbase:meta " + - "or deployed on any region server"); + errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED, "Region " + descriptiveName + + " on HDFS, but not listed in hbase:meta " + "or deployed on any region server"); // restore region consistency of an adopted orphan if (shouldFixMeta()) { if (!hbi.isHdfsRegioninfoPresent()) { LOG.error("Region " + hbi.getHdfsHRI() + " could have been repaired" - + " in table integrity repair phase if -fixHdfsOrphans was" + - " used."); + + " in table integrity repair phase if -fixHdfsOrphans was" + " used."); return; } @@ -2235,11 +2167,13 @@ public class HBaseFsck extends Configured implements Closeable { HbckTableInfo tableInfo = tablesInfo.get(hri.getTable()); for (RegionInfo region : tableInfo.getRegionsFromMeta(this.regionInfoMap)) { - if (Bytes.compareTo(region.getStartKey(), hri.getStartKey()) <= 0 - && (region.getEndKey().length == 0 || Bytes.compareTo(region.getEndKey(), - hri.getEndKey()) >= 0) - && Bytes.compareTo(region.getStartKey(), hri.getEndKey()) <= 0) { - if(region.isSplit() || region.isOffline()) continue; + if ( + Bytes.compareTo(region.getStartKey(), hri.getStartKey()) <= 0 + && (region.getEndKey().length == 0 + || Bytes.compareTo(region.getEndKey(), hri.getEndKey()) >= 0) + && Bytes.compareTo(region.getStartKey(), hri.getEndKey()) <= 0 + ) { + if (region.isSplit() || region.isOffline()) continue; Path regionDir = hbi.getHdfsRegionDir(); FileSystem fs = regionDir.getFileSystem(getConf()); List familyDirs = FSUtils.getFamilyDirs(fs, regionDir); @@ -2247,13 +2181,13 @@ public class HBaseFsck extends Configured implements Closeable { List referenceFilePaths = FSUtils.getReferenceFilePaths(fs, familyDir); for (Path referenceFilePath : referenceFilePaths) { Path parentRegionDir = - StoreFileInfo.getReferredToFile(referenceFilePath).getParent().getParent(); + StoreFileInfo.getReferredToFile(referenceFilePath).getParent().getParent(); if (parentRegionDir.toString().endsWith(region.getEncodedName())) { LOG.warn(hri + " start and stop keys are in the range of " + region - + ". The region might not be cleaned up from hdfs when region " + region - + " split failed. Hence deleting from hdfs."); - HRegionFileSystem.deleteRegionFromFileSystem(getConf(), fs, - regionDir.getParent(), hri); + + ". The region might not be cleaned up from hdfs when region " + region + + " split failed. Hence deleting from hdfs."); + HRegionFileSystem.deleteRegionFromFileSystem(getConf(), fs, regionDir.getParent(), + hri); return; } } @@ -2263,15 +2197,15 @@ public class HBaseFsck extends Configured implements Closeable { LOG.info("Patching hbase:meta with .regioninfo: " + hbi.getHdfsHRI()); int numReplicas = admin.getTableDescriptor(hbi.getTableName()).getRegionReplication(); HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(), - admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().keySet(), numReplicas); + admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(), + numReplicas); tryAssignmentRepair(hbi, "Trying to reassign region..."); } } else if (!inMeta && inHdfs && isDeployed) { errors.reportError(ERROR_CODE.NOT_IN_META, "Region " + descriptiveName - + " not in META, but deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); + + " not in META, but deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); debugLsr(hbi.getHdfsRegionDir()); if (hbi.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { // for replicas, this means that we should undeploy the region (we would have @@ -2291,12 +2225,12 @@ public class HBaseFsck extends Configured implements Closeable { LOG.info("Patching hbase:meta with with .regioninfo: " + hbi.getHdfsHRI()); int numReplicas = admin.getTableDescriptor(hbi.getTableName()).getRegionReplication(); HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(), - admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().keySet(), numReplicas); + admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(), + numReplicas); tryAssignmentRepair(hbi, "Trying to fix unassigned region..."); } - // ========== Cases where the region is in hbase:meta ============= + // ========== Cases where the region is in hbase:meta ============= } else if (inMeta && inHdfs && !isDeployed && splitParent) { // check whether this is an actual error, or just transient state where parent // is not cleaned @@ -2315,13 +2249,13 @@ public class HBaseFsck extends Configured implements Closeable { // error is going to be reported against primary daughter region. if (hbi.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { LOG.info("Region " + descriptiveName + " is a split parent in META, in HDFS, " - + "and not deployed on any region server. This may be transient."); + + "and not deployed on any region server. This may be transient."); hbi.setSkipChecks(true); return; } - errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region " - + descriptiveName + " is a split parent in META, in HDFS, " + errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, + "Region " + descriptiveName + " is a split parent in META, in HDFS, " + "and not deployed on any region server. This could be transient, " + "consider to run the catalog janitor first!"); if (shouldFixSplitParents()) { @@ -2329,18 +2263,17 @@ public class HBaseFsck extends Configured implements Closeable { resetSplitParent(hbi); } } else if (inMeta && !inHdfs && !isDeployed) { - errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region " - + descriptiveName + " found in META, but not in HDFS " - + "or deployed on any region server."); + errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region " + descriptiveName + + " found in META, but not in HDFS " + "or deployed on any region server."); if (shouldFixMeta()) { deleteMetaRegion(hbi); } } else if (inMeta && !inHdfs && isDeployed) { - errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName - + " found in META, but not in HDFS, " + - "and deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); - // We treat HDFS as ground truth. Any information in meta is transient - // and equivalent data can be regenerated. So, lets unassign and remove + errors.reportError(ERROR_CODE.NOT_IN_HDFS, + "Region " + descriptiveName + " found in META, but not in HDFS, " + "and deployed on " + + Joiner.on(", ").join(hbi.getDeployedOn())); + // We treat HDFS as ground truth. Any information in meta is transient + // and equivalent data can be regenerated. So, lets unassign and remove // these problems from META. if (shouldFixAssignments()) { errors.print("Trying to fix unassigned region..."); @@ -2351,23 +2284,23 @@ public class HBaseFsck extends Configured implements Closeable { deleteMetaRegion(hbi); } } else if (inMeta && inHdfs && !isDeployed && shouldBeDeployed) { - errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName - + " not deployed on any region server."); + errors.reportError(ERROR_CODE.NOT_DEPLOYED, + "Region " + descriptiveName + " not deployed on any region server."); tryAssignmentRepair(hbi, "Trying to fix unassigned region..."); } else if (inMeta && inHdfs && isDeployed && !shouldBeDeployed) { errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED, - "Region " + descriptiveName + " should not be deployed according " + - "to META, but is deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); + "Region " + descriptiveName + " should not be deployed according " + + "to META, but is deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); if (shouldFixAssignments()) { errors.print("Trying to close the region " + descriptiveName); setShouldRerun(); HBaseFsckRepair.fixMultiAssignment(connection, hbi.getMetaEntry(), hbi.getDeployedOn()); } } else if (inMeta && inHdfs && isMultiplyDeployed) { - errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName - + " is listed in hbase:meta on region server " + hbi.getMetaEntry().regionServer - + " but is multiply assigned to region servers " + - Joiner.on(", ").join(hbi.getDeployedOn())); + errors.reportError(ERROR_CODE.MULTI_DEPLOYED, + "Region " + descriptiveName + " is listed in hbase:meta on region server " + + hbi.getMetaEntry().regionServer + " but is multiply assigned to region servers " + + Joiner.on(", ").join(hbi.getDeployedOn())); // If we are trying to fix the errors if (shouldFixAssignments()) { errors.print("Trying to fix assignment error..."); @@ -2375,10 +2308,10 @@ public class HBaseFsck extends Configured implements Closeable { HBaseFsckRepair.fixMultiAssignment(connection, hbi.getMetaEntry(), hbi.getDeployedOn()); } } else if (inMeta && inHdfs && isDeployed && !deploymentMatchesMeta) { - errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region " - + descriptiveName + " listed in hbase:meta on region server " + - hbi.getMetaEntry().regionServer + " but found on region server " + - hbi.getDeployedOn().get(0)); + errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, + "Region " + descriptiveName + " listed in hbase:meta on region server " + + hbi.getMetaEntry().regionServer + " but found on region server " + + hbi.getDeployedOn().get(0)); // If we are trying to fix the errors if (shouldFixAssignments()) { errors.print("Trying to fix assignment error..."); @@ -2387,22 +2320,17 @@ public class HBaseFsck extends Configured implements Closeable { HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI()); } } else { - errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName + - " is in an unforeseen state:" + - " inMeta=" + inMeta + - " inHdfs=" + inHdfs + - " isDeployed=" + isDeployed + - " isMultiplyDeployed=" + isMultiplyDeployed + - " deploymentMatchesMeta=" + deploymentMatchesMeta + - " shouldBeDeployed=" + shouldBeDeployed); + errors.reportError(ERROR_CODE.UNKNOWN, + "Region " + descriptiveName + " is in an unforeseen state:" + " inMeta=" + inMeta + + " inHdfs=" + inHdfs + " isDeployed=" + isDeployed + " isMultiplyDeployed=" + + isMultiplyDeployed + " deploymentMatchesMeta=" + deploymentMatchesMeta + + " shouldBeDeployed=" + shouldBeDeployed); } } /** - * Checks tables integrity. Goes over all regions and scans the tables. - * Collects all the pieces for each table and checks if there are missing, - * repeated or overlapping ones. - * @throws IOException + * Checks tables integrity. Goes over all regions and scans the tables. Collects all the pieces + * for each table and checks if there are missing, repeated or overlapping ones. n */ SortedMap checkIntegrity() throws IOException { tablesInfo = new TreeMap<>(); @@ -2435,8 +2363,8 @@ public class HBaseFsck extends Configured implements Closeable { // Missing regionDir or over-deployment is checked elsewhere. Include // these cases in modTInfo, so we can evaluate those regions as part of // the region chain in META - //if (hbi.foundRegionDir == null) continue; - //if (hbi.deployedOn.size() != 1) continue; + // if (hbi.foundRegionDir == null) continue; + // if (hbi.deployedOn.size() != 1) continue; if (hbi.getDeployedOn().isEmpty()) { continue; } @@ -2470,8 +2398,9 @@ public class HBaseFsck extends Configured implements Closeable { return tablesInfo; } - /** Loads table info's for tables that may not have been included, since there are no - * regions reported for the table, but table dir is there in hdfs + /** + * Loads table info's for tables that may not have been included, since there are no regions + * reported for the table, but table dir is there in hdfs */ private void loadTableInfosForTablesWithNoRegion() throws IOException { Map allTables = new FSTableDescriptors(getConf()).getAll(); @@ -2505,11 +2434,11 @@ public class HBaseFsck extends Configured implements Closeable { try { dirs = fs.listStatus(contained.getHdfsRegionDir()); } catch (FileNotFoundException fnfe) { - // region we are attempting to merge in is not present! Since this is a merge, there is + // region we are attempting to merge in is not present! Since this is a merge, there is // no harm skipping this region if it does not exist. if (!fs.exists(contained.getHdfsRegionDir())) { LOG.warn("[" + thread + "] HDFS region dir " + contained.getHdfsRegionDir() - + " is missing. Assuming already sidelined or moved."); + + " is missing. Assuming already sidelined or moved."); } else { sidelineRegionDir(fs, contained); } @@ -2519,7 +2448,7 @@ public class HBaseFsck extends Configured implements Closeable { if (dirs == null) { if (!fs.exists(contained.getHdfsRegionDir())) { LOG.warn("[" + thread + "] HDFS region dir " + contained.getHdfsRegionDir() - + " already sidelined."); + + " already sidelined."); } else { sidelineRegionDir(fs, contained); } @@ -2528,7 +2457,7 @@ public class HBaseFsck extends Configured implements Closeable { for (FileStatus cf : dirs) { Path src = cf.getPath(); - Path dst = new Path(targetRegionDir, src.getName()); + Path dst = new Path(targetRegionDir, src.getName()); if (src.getName().equals(HRegionFileSystem.REGION_INFO_FILE)) { // do not copy the old .regioninfo file. @@ -2543,7 +2472,7 @@ public class HBaseFsck extends Configured implements Closeable { LOG.info("[" + thread + "] Moving files from " + src + " into containing region " + dst); // FileSystem.rename is inconsistent with directories -- if the // dst (foo/a) exists and is a dir, and the src (foo/b) is a dir, - // it moves the src into the dst dir resulting in (foo/a/b). If + // it moves the src into the dst dir resulting in (foo/a/b). If // the dst does not exist, and the src a dir, src becomes dst. (foo/b) for (FileStatus hfile : fs.listStatus(src)) { boolean success = fs.rename(hfile.getPath(), dst); @@ -2557,20 +2486,19 @@ public class HBaseFsck extends Configured implements Closeable { // if all success. sidelineRegionDir(fs, contained); - LOG.info("[" + thread + "] Sidelined region dir "+ contained.getHdfsRegionDir() + " into " + - getSidelineDir()); + LOG.info("[" + thread + "] Sidelined region dir " + contained.getHdfsRegionDir() + " into " + + getSidelineDir()); debugLsr(contained.getHdfsRegionDir()); return fileMoves; } - static class WorkItemOverlapMerge implements Callable { private TableIntegrityErrorHandler handler; Collection overlapgroup; WorkItemOverlapMerge(Collection overlapgroup, - TableIntegrityErrorHandler handler) { + TableIntegrityErrorHandler handler) { this.handler = handler; this.overlapgroup = overlapgroup; } @@ -2583,10 +2511,9 @@ public class HBaseFsck extends Configured implements Closeable { }; /** - * Return a list of user-space table names whose metadata have not been - * modified in the last few milliseconds specified by timelag - * if any of the REGIONINFO_QUALIFIER, SERVER_QUALIFIER, STARTCODE_QUALIFIER, - * SPLITA_QUALIFIER, SPLITB_QUALIFIER have not changed in the last + * Return a list of user-space table names whose metadata have not been modified in the last few + * milliseconds specified by timelag if any of the REGIONINFO_QUALIFIER, SERVER_QUALIFIER, + * STARTCODE_QUALIFIER, SPLITA_QUALIFIER, SPLITB_QUALIFIER have not changed in the last * milliseconds specified by timelag, then the table is a candidate to be returned. * @return tables that have not been modified recently * @throws IOException if an error is encountered @@ -2612,9 +2539,9 @@ public class HBaseFsck extends Configured implements Closeable { } TableDescriptor[] getTableDescriptors(List tableNames) { - LOG.info("getTableDescriptors == tableNames => " + tableNames); + LOG.info("getTableDescriptors == tableNames => " + tableNames); try (Connection conn = ConnectionFactory.createConnection(getConf()); - Admin admin = conn.getAdmin()) { + Admin admin = conn.getAdmin()) { List tds = admin.listTableDescriptors(tableNames); return tds.toArray(new TableDescriptor[tds.size()]); } catch (IOException e) { @@ -2624,9 +2551,8 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Gets the entry in regionInfo corresponding to the the given encoded - * region name. If the region has not been seen yet, a new entry is added - * and returned. + * Gets the entry in regionInfo corresponding to the the given encoded region name. If the region + * has not been seen yet, a new entry is added and returned. */ private synchronized HbckRegionInfo getOrCreateInfo(String name) { HbckRegionInfo hbi = regionInfoMap.get(name); @@ -2648,14 +2574,11 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Check values in regionInfo for hbase:meta - * Check if zero or more than one regions with hbase:meta are found. - * If there are inconsistencies (i.e. zero or more than one regions - * pretend to be holding the hbase:meta) try to fix that and report an error. - * @throws IOException from HBaseFsckRepair functions - * @throws KeeperException - * @throws InterruptedException - */ + * Check values in regionInfo for hbase:meta Check if zero or more than one regions with + * hbase:meta are found. If there are inconsistencies (i.e. zero or more than one regions pretend + * to be holding the hbase:meta) try to fix that and report an error. + * @throws IOException from HBaseFsckRepair functions nn + */ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedException { Map metaRegions = new HashMap<>(); for (HbckRegionInfo value : regionInfoMap.values()) { @@ -2663,8 +2586,8 @@ public class HBaseFsck extends Configured implements Closeable { metaRegions.put(value.getReplicaId(), value); } } - int metaReplication = admin.getTableDescriptor(TableName.META_TABLE_NAME) - .getRegionReplication(); + int metaReplication = + admin.getTableDescriptor(TableName.META_TABLE_NAME).getRegionReplication(); boolean noProblem = true; // There will be always entries in regionInfoMap corresponding to hbase:meta & its replicas // Check the deployed servers. It should be exactly one server for each replica. @@ -2679,16 +2602,15 @@ public class HBaseFsck extends Configured implements Closeable { if (servers.isEmpty()) { assignMetaReplica(i); } else if (servers.size() > 1) { - errors - .reportError(ERROR_CODE.MULTI_META_REGION, "hbase:meta, replicaId " + - metaHbckRegionInfo.getReplicaId() + " is found on more than one region."); + errors.reportError(ERROR_CODE.MULTI_META_REGION, "hbase:meta, replicaId " + + metaHbckRegionInfo.getReplicaId() + " is found on more than one region."); if (shouldFixAssignments()) { - errors.print("Trying to fix a problem with hbase:meta, replicaId " + - metaHbckRegionInfo.getReplicaId() + ".."); + errors.print("Trying to fix a problem with hbase:meta, replicaId " + + metaHbckRegionInfo.getReplicaId() + ".."); setShouldRerun(); // try fix it (treat is a dupe assignment) - HBaseFsckRepair - .fixMultiAssignment(connection, metaHbckRegionInfo.getMetaEntry(), servers); + HBaseFsckRepair.fixMultiAssignment(connection, metaHbckRegionInfo.getMetaEntry(), + servers); } } } @@ -2697,11 +2619,11 @@ public class HBaseFsck extends Configured implements Closeable { for (Map.Entry entry : metaRegions.entrySet()) { noProblem = false; errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED, - "hbase:meta replicas are deployed in excess. Configured " + metaReplication + - ", deployed " + metaRegions.size()); + "hbase:meta replicas are deployed in excess. Configured " + metaReplication + ", deployed " + + metaRegions.size()); if (shouldFixAssignments()) { - errors.print("Trying to undeploy excess replica, replicaId: " + entry.getKey() + - " of hbase:meta.."); + errors.print( + "Trying to undeploy excess replica, replicaId: " + entry.getKey() + " of hbase:meta.."); setShouldRerun(); unassignMetaReplica(entry.getValue()); } @@ -2712,22 +2634,22 @@ public class HBaseFsck extends Configured implements Closeable { } private void unassignMetaReplica(HbckRegionInfo hi) - throws IOException, InterruptedException, KeeperException { + throws IOException, InterruptedException, KeeperException { undeployRegions(hi); - ZKUtil - .deleteNode(zkw, zkw.getZNodePaths().getZNodeForReplica(hi.getMetaEntry().getReplicaId())); + ZKUtil.deleteNode(zkw, + zkw.getZNodePaths().getZNodeForReplica(hi.getMetaEntry().getReplicaId())); } private void assignMetaReplica(int replicaId) - throws IOException, KeeperException, InterruptedException { - errors.reportError(ERROR_CODE.NO_META_REGION, "hbase:meta, replicaId " + - replicaId +" is not found on any region."); + throws IOException, KeeperException, InterruptedException { + errors.reportError(ERROR_CODE.NO_META_REGION, + "hbase:meta, replicaId " + replicaId + " is not found on any region."); if (shouldFixAssignments()) { errors.print("Trying to fix a problem with hbase:meta.."); setShouldRerun(); // try to fix it (treat it as unassigned region) - RegionInfo h = RegionReplicaUtil.getRegionInfoForReplica( - RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId); + RegionInfo h = RegionReplicaUtil + .getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId); HBaseFsckRepair.fixUnassigned(admin, h); HBaseFsckRepair.waitUntilAssigned(admin, h); } @@ -2754,7 +2676,7 @@ public class HBaseFsck extends Configured implements Closeable { try { // record the latest modification of this META record - long ts = Collections.max(result.listCells(), comp).getTimestamp(); + long ts = Collections.max(result.listCells(), comp).getTimestamp(); RegionLocations rl = MetaTableAccessor.getRegionLocations(result); if (rl == null) { emptyRegionInfoQualifiers.add(result); @@ -2763,16 +2685,17 @@ public class HBaseFsck extends Configured implements Closeable { return true; } ServerName sn = null; - if (rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID) == null || - rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID).getRegionInfo() == null) { + if ( + rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID) == null + || rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID).getRegionInfo() == null + ) { emptyRegionInfoQualifiers.add(result); errors.reportError(ERROR_CODE.EMPTY_META_CELL, "Empty REGIONINFO_QUALIFIER found in hbase:meta"); return true; } RegionInfo hri = rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID).getRegionInfo(); - if (!(isTableIncluded(hri.getTable()) - || hri.isMetaRegion())) { + if (!(isTableIncluded(hri.getTable()) || hri.isMetaRegion())) { return true; } PairOfSameType daughters = MetaTableAccessor.getDaughterRegions(result); @@ -2786,7 +2709,7 @@ public class HBaseFsck extends Configured implements Closeable { HbckRegionInfo.MetaEntry m = null; if (hri.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { m = new HbckRegionInfo.MetaEntry(hri, sn, ts, daughters.getFirst(), - daughters.getSecond()); + daughters.getSecond()); } else { m = new HbckRegionInfo.MetaEntry(hri, sn, ts, null, null); } @@ -2839,16 +2762,16 @@ public class HBaseFsck extends Configured implements Closeable { int numOfSkippedRegions; errors.print("Summary:"); for (HbckTableInfo tInfo : tablesInfo.values()) { - numOfSkippedRegions = (skippedRegions.containsKey(tInfo.getName())) ? - skippedRegions.get(tInfo.getName()).size() : 0; + numOfSkippedRegions = (skippedRegions.containsKey(tInfo.getName())) + ? skippedRegions.get(tInfo.getName()).size() + : 0; if (errors.tableHasErrors(tInfo)) { errors.print("Table " + tInfo.getName() + " is inconsistent."); - } else if (numOfSkippedRegions > 0){ - errors.print("Table " + tInfo.getName() + " is okay (with " - + numOfSkippedRegions + " skipped regions)."); - } - else { + } else if (numOfSkippedRegions > 0) { + errors.print("Table " + tInfo.getName() + " is okay (with " + numOfSkippedRegions + + " skipped regions)."); + } else { errors.print("Table " + tInfo.getName() + " is okay."); } errors.print(" Number of regions: " + tInfo.getNumRegions()); @@ -2856,7 +2779,7 @@ public class HBaseFsck extends Configured implements Closeable { Set skippedRegionStrings = skippedRegions.get(tInfo.getName()); System.out.println(" Number of skipped regions: " + numOfSkippedRegions); System.out.println(" List of skipped regions:"); - for(String sr : skippedRegionStrings) { + for (String sr : skippedRegionStrings) { System.out.println(" " + sr); } } @@ -2870,10 +2793,9 @@ public class HBaseFsck extends Configured implements Closeable { } static HbckErrorReporter getErrorReporter(final Configuration conf) - throws ClassNotFoundException { - Class reporter = - conf.getClass("hbasefsck.errorreporter", PrintingErrorReporter.class, - HbckErrorReporter.class); + throws ClassNotFoundException { + Class reporter = conf.getClass("hbasefsck.errorreporter", + PrintingErrorReporter.class, HbckErrorReporter.class); return ReflectionUtils.newInstance(reporter, conf); } @@ -2912,14 +2834,14 @@ public class HBaseFsck extends Configured implements Closeable { @Override public synchronized void reportError(ERROR_CODE errorCode, String message, - HbckTableInfo table) { + HbckTableInfo table) { errorTables.add(table); reportError(errorCode, message); } @Override public synchronized void reportError(ERROR_CODE errorCode, String message, HbckTableInfo table, - HbckRegionInfo info) { + HbckRegionInfo info) { errorTables.add(table); String reference = "(region " + info.getRegionNameAsString() + ")"; reportError(errorCode, reference + " " + message); @@ -2927,10 +2849,10 @@ public class HBaseFsck extends Configured implements Closeable { @Override public synchronized void reportError(ERROR_CODE errorCode, String message, HbckTableInfo table, - HbckRegionInfo info1, HbckRegionInfo info2) { + HbckRegionInfo info1, HbckRegionInfo info2) { errorTables.add(table); - String reference = "(regions " + info1.getRegionNameAsString() - + " and " + info2.getRegionNameAsString() + ")"; + String reference = + "(regions " + info1.getRegionNameAsString() + " and " + info2.getRegionNameAsString() + ")"; reportError(errorCode, reference + " " + message); } @@ -2940,13 +2862,12 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Report error information, but do not increment the error count. Intended for cases - * where the actual error would have been reported previously. - * @param message + * Report error information, but do not increment the error count. Intended for cases where the + * actual error would have been reported previously. n */ @Override public synchronized void report(String message) { - if (! summary) { + if (!summary) { System.out.println("ERROR: " + message); } showProgress = 0; @@ -2954,8 +2875,7 @@ public class HBaseFsck extends Configured implements Closeable { @Override public synchronized int summarize() { - System.out.println(Integer.toString(errorCount) + - " inconsistencies detected."); + System.out.println(Integer.toString(errorCount) + " inconsistencies detected."); if (errorCount == 0) { System.out.println("Status: OK"); return 0; @@ -3016,7 +2936,7 @@ public class HBaseFsck extends Configured implements Closeable { private final ClusterConnection connection; WorkItemRegion(HBaseFsck hbck, ServerName info, HbckErrorReporter errors, - ClusterConnection connection) { + ClusterConnection connection) { this.hbck = hbck; this.rsinfo = info; this.errors = errors; @@ -3034,14 +2954,13 @@ public class HBaseFsck extends Configured implements Closeable { regions = filterRegions(regions); if (details) { - errors.detail("RegionServer: " + rsinfo.getServerName() + - " number of regions: " + regions.size()); - for (RegionInfo rinfo: regions) { - errors.detail(" " + rinfo.getRegionNameAsString() + - " id: " + rinfo.getRegionId() + - " encoded_name: " + rinfo.getEncodedName() + - " start: " + Bytes.toStringBinary(rinfo.getStartKey()) + - " end: " + Bytes.toStringBinary(rinfo.getEndKey())); + errors.detail( + "RegionServer: " + rsinfo.getServerName() + " number of regions: " + regions.size()); + for (RegionInfo rinfo : regions) { + errors.detail(" " + rinfo.getRegionNameAsString() + " id: " + rinfo.getRegionId() + + " encoded_name: " + rinfo.getEncodedName() + " start: " + + Bytes.toStringBinary(rinfo.getStartKey()) + " end: " + + Bytes.toStringBinary(rinfo.getEndKey())); } } @@ -3051,9 +2970,9 @@ public class HBaseFsck extends Configured implements Closeable { HbckRegionInfo hbi = hbck.getOrCreateInfo(r.getEncodedName()); hbi.addServer(r, rsinfo); } - } catch (IOException e) { // unable to connect to the region server. - errors.reportError(ERROR_CODE.RS_CONNECT_FAILURE, "RegionServer: " + rsinfo.getServerName() + - " Unable to fetch region information. " + e); + } catch (IOException e) { // unable to connect to the region server. + errors.reportError(ERROR_CODE.RS_CONNECT_FAILURE, + "RegionServer: " + rsinfo.getServerName() + " Unable to fetch region information. " + e); throw e; } return null; @@ -3062,8 +2981,7 @@ public class HBaseFsck extends Configured implements Closeable { private List filterRegions(List regions) { List ret = Lists.newArrayList(); for (RegionInfo hri : regions) { - if (hri.isMetaRegion() || (!hbck.checkMetaOnly - && hbck.isTableIncluded(hri.getTable()))) { + if (hri.isMetaRegion() || (!hbck.checkMetaOnly && hbck.isTableIncluded(hri.getTable()))) { ret.add(hri); } } @@ -3072,8 +2990,7 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Contact hdfs and get all information about specified table directory into - * regioninfo list. + * Contact hdfs and get all information about specified table directory into regioninfo list. */ class WorkItemHdfsDir implements Callable { private FileStatus tableDir; @@ -3110,9 +3027,10 @@ public class HBaseFsck extends Configured implements Closeable { @Override public void run() { try { - LOG.debug("Loading region info from hdfs:"+ regionDir.getPath()); + LOG.debug("Loading region info from hdfs:" + regionDir.getPath()); - Path regioninfoFile = new Path(regionDir.getPath(), HRegionFileSystem.REGION_INFO_FILE); + Path regioninfoFile = + new Path(regionDir.getPath(), HRegionFileSystem.REGION_INFO_FILE); boolean regioninfoFileExists = fs.exists(regioninfoFile); if (!regioninfoFileExists) { @@ -3120,7 +3038,7 @@ public class HBaseFsck extends Configured implements Closeable { // reach a given region that it will be gone due to region splits/merges. if (!fs.exists(regionDir.getPath())) { LOG.warn("By the time we tried to process this region dir it was already gone: " - + regionDir.getPath()); + + regionDir.getPath()); return; } } @@ -3129,8 +3047,8 @@ public class HBaseFsck extends Configured implements Closeable { HbckRegionInfo.HdfsEntry he = new HbckRegionInfo.HdfsEntry(); synchronized (hbi) { if (hbi.getHdfsRegionDir() != null) { - errors.print("Directory " + encodedName + " duplicate??" + - hbi.getHdfsRegionDir()); + errors + .print("Directory " + encodedName + " duplicate??" + hbi.getHdfsRegionDir()); } he.regionDir = regionDir.getPath(); @@ -3171,7 +3089,8 @@ public class HBaseFsck extends Configured implements Closeable { } catch (ExecutionException e) { LOG.error("Unexpected exec exception! Should've been caught already. (Bug?)", e); // Shouldn't happen, we already logged/caught any exceptions in the Runnable - }; + } + ; } } catch (IOException e) { LOG.error("Cannot execute WorkItemHdfsDir for " + tableDir, e); @@ -3179,11 +3098,11 @@ public class HBaseFsck extends Configured implements Closeable { } finally { if (!exceptions.isEmpty()) { errors.reportError(ERROR_CODE.RS_CONNECT_FAILURE, "Table Directory: " - + tableDir.getPath().getName() - + " Unable to fetch all HDFS region information. "); + + tableDir.getPath().getName() + " Unable to fetch all HDFS region information. "); // Just throw the first exception as an indication something bad happened // Don't need to propagate all the exceptions, we already logged them all anyway - throw new ExecutionException("First exception in WorkItemHdfsDir", exceptions.firstElement()); + throw new ExecutionException("First exception in WorkItemHdfsDir", + exceptions.firstElement()); } } return null; @@ -3191,8 +3110,7 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Contact hdfs and get all information about specified table directory into - * regioninfo list. + * Contact hdfs and get all information about specified table directory into regioninfo list. */ static class WorkItemHdfsRegionInfo implements Callable { private HbckRegionInfo hbi; @@ -3214,10 +3132,9 @@ public class HBaseFsck extends Configured implements Closeable { hbi.loadHdfsRegioninfo(hbck.getConf()); } catch (IOException ioe) { String msg = "Orphan region in HDFS: Unable to load .regioninfo from table " - + hbi.getTableName() + " in hdfs dir " - + hbi.getHdfsRegionDir() - + "! It may be an invalid format or version file. Treating as " - + "an orphaned regiondir."; + + hbi.getTableName() + " in hdfs dir " + hbi.getHdfsRegionDir() + + "! It may be an invalid format or version file. Treating as " + + "an orphaned regiondir."; errors.reportError(ERROR_CODE.ORPHAN_HDFS_REGION, msg); try { hbck.debugLsr(hbi.getHdfsRegionDir()); @@ -3234,8 +3151,8 @@ public class HBaseFsck extends Configured implements Closeable { }; /** - * Display the full report from fsck. This displays all live and dead region - * servers, and all known regions. + * Display the full report from fsck. This displays all live and dead region servers, and all + * known regions. */ public static void setDisplayFullReport() { details = true; @@ -3260,16 +3177,14 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Set summary mode. - * Print only summary of the tables and status (OK or INCONSISTENT) + * Set summary mode. Print only summary of the tables and status (OK or INCONSISTENT) */ static void setSummary() { summary = true; } /** - * Set hbase:meta check mode. - * Print only info about hbase:meta table deployment/state + * Set hbase:meta check mode. Print only info about hbase:meta table deployment/state */ void setCheckMetaOnly() { checkMetaOnly = true; @@ -3295,9 +3210,8 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Check if we should rerun fsck again. This checks if we've tried to - * fix something and we should rerun fsck tool again. - * Display the full report from fsck. This displays all live and dead + * Check if we should rerun fsck again. This checks if we've tried to fix something and we should + * rerun fsck tool again. Display the full report from fsck. This displays all live and dead * region servers, and all known regions. */ void setShouldRerun() { @@ -3309,8 +3223,8 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Fix inconsistencies found by fsck. This should try to fix errors (if any) - * found by fsck utility. + * Fix inconsistencies found by fsck. This should try to fix errors (if any) found by fsck + * utility. */ public void setFixAssignments(boolean shouldFix) { fixAssignments = shouldFix; @@ -3464,8 +3378,7 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Only check/fix tables specified by the list, - * Empty list means all tables are included. + * Only check/fix tables specified by the list, Empty list means all tables are included. */ boolean isTableIncluded(TableName table) { return (tablesIncluded.isEmpty()) || tablesIncluded.contains(table); @@ -3480,8 +3393,8 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * We are interested in only those tables that have not changed their state in - * hbase:meta during the last few seconds specified by hbase.admin.fsck.timelag + * We are interested in only those tables that have not changed their state in hbase:meta during + * the last few seconds specified by hbase.admin.fsck.timelag * @param seconds - the time in seconds */ public void setTimeLag(long seconds) { @@ -3489,14 +3402,14 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * * @param sidelineDir - HDFS path to sideline data */ public void setSidelineDir(String sidelineDir) { this.sidelineDir = new Path(sidelineDir); } - protected HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException { + protected HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) + throws IOException { return new HFileCorruptionChecker(getConf(), executor, sidelineCorruptHFiles); } @@ -3531,65 +3444,77 @@ public class HBaseFsck extends Configured implements Closeable { out.println(" where [opts] are:"); out.println(" -help Display help options (this)"); out.println(" -details Display full report of all regions."); - out.println(" -timelag Process only regions that " + - " have not experienced any metadata updates in the last " + - " seconds."); - out.println(" -sleepBeforeRerun Sleep this many seconds" + - " before checking if the fix worked if run with -fix"); + out.println(" -timelag Process only regions that " + + " have not experienced any metadata updates in the last " + " seconds."); + out.println(" -sleepBeforeRerun Sleep this many seconds" + + " before checking if the fix worked if run with -fix"); out.println(" -summary Print only summary of the tables and status."); out.println(" -metaonly Only check the state of the hbase:meta table."); out.println(" -sidelineDir HDFS path to backup existing meta."); - out.println(" -boundaries Verify that regions boundaries are the same between META and store files."); + out.println( + " -boundaries Verify that regions boundaries are the same between META and store files."); out.println(" -exclusive Abort if another hbck is exclusive or fixing."); out.println(""); out.println(" Datafile Repair options: (expert features, use with caution!)"); - out.println(" -checkCorruptHFiles Check all Hfiles by opening them to make sure they are valid"); - out.println(" -sidelineCorruptHFiles Quarantine corrupted HFiles. implies -checkCorruptHFiles"); + out.println( + " -checkCorruptHFiles Check all Hfiles by opening them to make sure they are valid"); + out.println( + " -sidelineCorruptHFiles Quarantine corrupted HFiles. implies -checkCorruptHFiles"); out.println(""); out.println(" Replication options"); out.println(" -fixReplication Deletes replication queues for removed peers"); out.println(""); - out.println(" Metadata Repair options supported as of version 2.0: (expert features, use with caution!)"); + out.println( + " Metadata Repair options supported as of version 2.0: (expert features, use with caution!)"); out.println(" -fixVersionFile Try to fix missing hbase.version file in hdfs."); out.println(" -fixReferenceFiles Try to offline lingering reference store files"); out.println(" -fixHFileLinks Try to offline lingering HFileLinks"); out.println(" -noHdfsChecking Don't load/check region info from HDFS." - + " Assumes hbase:meta region info is good. Won't check/fix any HDFS issue, e.g. hole, orphan, or overlap"); + + " Assumes hbase:meta region info is good. Won't check/fix any HDFS issue, e.g. hole, orphan, or overlap"); out.println(" -ignorePreCheckPermission ignore filesystem permission pre-check"); out.println(""); out.println("NOTE: Following options are NOT supported as of HBase version 2.0+."); out.println(""); out.println(" UNSUPPORTED Metadata Repair options: (expert features, use with caution!)"); - out.println(" -fix Try to fix region assignments. This is for backwards compatibility"); + out.println( + " -fix Try to fix region assignments. This is for backwards compatibility"); out.println(" -fixAssignments Try to fix region assignments. Replaces the old -fix"); - out.println(" -fixMeta Try to fix meta problems. This assumes HDFS region info is good."); + out.println( + " -fixMeta Try to fix meta problems. This assumes HDFS region info is good."); out.println(" -fixHdfsHoles Try to fix region holes in hdfs."); out.println(" -fixHdfsOrphans Try to fix region dirs with no .regioninfo file in hdfs"); - out.println(" -fixTableOrphans Try to fix table dirs with no .tableinfo file in hdfs (online mode only)"); + out.println( + " -fixTableOrphans Try to fix table dirs with no .tableinfo file in hdfs (online mode only)"); out.println(" -fixHdfsOverlaps Try to fix region overlaps in hdfs."); - out.println(" -maxMerge When fixing region overlaps, allow at most regions to merge. (n=" + DEFAULT_MAX_MERGE +" by default)"); - out.println(" -sidelineBigOverlaps When fixing region overlaps, allow to sideline big overlaps"); - out.println(" -maxOverlapsToSideline When fixing region overlaps, allow at most regions to sideline per group. (n=" + DEFAULT_OVERLAPS_TO_SIDELINE +" by default)"); + out.println( + " -maxMerge When fixing region overlaps, allow at most regions to merge. (n=" + + DEFAULT_MAX_MERGE + " by default)"); + out.println( + " -sidelineBigOverlaps When fixing region overlaps, allow to sideline big overlaps"); + out.println( + " -maxOverlapsToSideline When fixing region overlaps, allow at most regions to sideline per group. (n=" + + DEFAULT_OVERLAPS_TO_SIDELINE + " by default)"); out.println(" -fixSplitParents Try to force offline split parents to be online."); - out.println(" -removeParents Try to offline and sideline lingering parents and keep daughter regions."); + out.println( + " -removeParents Try to offline and sideline lingering parents and keep daughter regions."); out.println(" -fixEmptyMetaCells Try to fix hbase:meta entries not referencing any region" - + " (empty REGIONINFO_QUALIFIER rows)"); + + " (empty REGIONINFO_QUALIFIER rows)"); out.println(""); out.println(" UNSUPPORTED Metadata Repair shortcuts"); - out.println(" -repair Shortcut for -fixAssignments -fixMeta -fixHdfsHoles " + - "-fixHdfsOrphans -fixHdfsOverlaps -fixVersionFile -sidelineBigOverlaps -fixReferenceFiles" + - "-fixHFileLinks"); + out.println(" -repair Shortcut for -fixAssignments -fixMeta -fixHdfsHoles " + + "-fixHdfsOrphans -fixHdfsOverlaps -fixVersionFile -sidelineBigOverlaps -fixReferenceFiles" + + "-fixHFileLinks"); out.println(" -repairHoles Shortcut for -fixAssignments -fixMeta -fixHdfsHoles"); out.println(""); out.println(" Replication options"); out.println(" -fixReplication Deletes replication queues for removed peers"); - out.println(" -cleanReplicationBarrier [tableName] clean the replication barriers " + - "of a specified table, tableName is required"); + out.println(" -cleanReplicationBarrier [tableName] clean the replication barriers " + + "of a specified table, tableName is required"); out.flush(); errors.reportError(ERROR_CODE.WRONG_USAGE, sw.toString()); @@ -3598,10 +3523,7 @@ public class HBaseFsck extends Configured implements Closeable { } /** - * Main program - * - * @param args - * @throws Exception + * Main program nn */ public static void main(String[] args) throws Exception { // create a fsck object @@ -3617,7 +3539,10 @@ public class HBaseFsck extends Configured implements Closeable { * This is a Tool wrapper that gathers -Dxxx=yyy configuration settings from the command line. */ static class HBaseFsckTool extends Configured implements Tool { - HBaseFsckTool(Configuration conf) { super(conf); } + HBaseFsckTool(Configuration conf) { + super(conf); + } + @Override public int run(String[] args) throws Exception { HBaseFsck hbck = new HBaseFsck(getConf()); @@ -3627,9 +3552,8 @@ public class HBaseFsck extends Configured implements Closeable { } }; - public HBaseFsck exec(ExecutorService exec, String[] args) - throws KeeperException, IOException, InterruptedException, ReplicationException { + throws KeeperException, IOException, InterruptedException, ReplicationException { long sleepBeforeRerun = DEFAULT_SLEEP_BEFORE_RERUN; boolean checkCorruptHFiles = false; @@ -3658,8 +3582,7 @@ public class HBaseFsck extends Configured implements Closeable { } } else if (cmd.equals("-sleepBeforeRerun")) { if (i == args.length - 1) { - errors.reportError(ERROR_CODE.WRONG_USAGE, - "HBaseFsck: -sleepBeforeRerun needs a value."); + errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -sleepBeforeRerun needs a value."); return printUsageAndExit(); } try { @@ -3752,16 +3675,14 @@ public class HBaseFsck extends Configured implements Closeable { } } else if (cmd.equals("-maxMerge")) { if (i == args.length - 1) { - errors.reportError(ERROR_CODE.WRONG_USAGE, - "-maxMerge needs a numeric value argument."); + errors.reportError(ERROR_CODE.WRONG_USAGE, "-maxMerge needs a numeric value argument."); return printUsageAndExit(); } try { int maxMerge = Integer.parseInt(args[++i]); setMaxMerge(maxMerge); } catch (NumberFormatException e) { - errors.reportError(ERROR_CODE.WRONG_USAGE, - "-maxMerge needs a numeric value argument."); + errors.reportError(ERROR_CODE.WRONG_USAGE, "-maxMerge needs a numeric value argument."); return printUsageAndExit(); } } else if (cmd.equals("-summary")) { @@ -3774,7 +3695,7 @@ public class HBaseFsck extends Configured implements Closeable { setFixReplication(true); } else if (cmd.equals("-cleanReplicationBarrier")) { setCleanReplicationBarrier(true); - if(args[++i].startsWith("-")){ + if (args[++i].startsWith("-")) { printUsageAndExit(); } setCleanReplicationBarrierTable(args[i]); @@ -3865,7 +3786,7 @@ public class HBaseFsck extends Configured implements Closeable { for (String arg : args) { if (unsupportedOptionsInV2.contains(arg)) { errors.reportError(ERROR_CODE.UNSUPPORTED_OPTION, - "option '" + arg + "' is not " + "supported!"); + "option '" + arg + "' is not " + "supported!"); result = false; break; } @@ -3905,10 +3826,10 @@ public class HBaseFsck extends Configured implements Closeable { barrierScan.setCaching(100); barrierScan.addFamily(HConstants.REPLICATION_BARRIER_FAMILY); barrierScan - .withStartRow(MetaTableAccessor.getTableStartRowForMeta(cleanReplicationBarrierTable, - MetaTableAccessor.QueryType.REGION)) - .withStopRow(MetaTableAccessor.getTableStopRowForMeta(cleanReplicationBarrierTable, - MetaTableAccessor.QueryType.REGION)); + .withStartRow(MetaTableAccessor.getTableStartRowForMeta(cleanReplicationBarrierTable, + MetaTableAccessor.QueryType.REGION)) + .withStopRow(MetaTableAccessor.getTableStopRowForMeta(cleanReplicationBarrierTable, + MetaTableAccessor.QueryType.REGION)); Result result; try (ResultScanner scanner = meta.getScanner(barrierScan)) { while ((result = scanner.next()) != null) { @@ -3921,13 +3842,13 @@ public class HBaseFsck extends Configured implements Closeable { return; } ReplicationQueueStorage queueStorage = - ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf()); + ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf()); List peerDescriptions = admin.listReplicationPeers(); if (peerDescriptions != null && peerDescriptions.size() > 0) { List peers = peerDescriptions.stream() - .filter(peerConfig -> peerConfig.getPeerConfig() - .needToReplicate(cleanReplicationBarrierTable)) - .map(peerConfig -> peerConfig.getPeerId()).collect(Collectors.toList()); + .filter( + peerConfig -> peerConfig.getPeerConfig().needToReplicate(cleanReplicationBarrierTable)) + .map(peerConfig -> peerConfig.getPeerId()).collect(Collectors.toList()); try { List batch = new ArrayList<>(); for (String peer : peers) { @@ -3963,16 +3884,15 @@ public class HBaseFsck extends Configured implements Closeable { /** * ls -r for debugging purposes */ - public static void debugLsr(Configuration conf, - Path p) throws IOException { + public static void debugLsr(Configuration conf, Path p) throws IOException { debugLsr(conf, p, new PrintingErrorReporter()); } /** * ls -r for debugging purposes */ - public static void debugLsr(Configuration conf, - Path p, HbckErrorReporter errors) throws IOException { + public static void debugLsr(Configuration conf, Path p, HbckErrorReporter errors) + throws IOException { if (!LOG.isDebugEnabled() || p == null) { return; } @@ -3989,7 +3909,7 @@ public class HBaseFsck extends Configured implements Closeable { } if (fs.getFileStatus(p).isDirectory()) { - FileStatus[] fss= fs.listStatus(p); + FileStatus[] fss = fs.listStatus(p); for (FileStatus status : fss) { debugLsr(conf, status.getPath(), errors); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java index b37dc18dc7e..1e74997f27c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +23,6 @@ import java.util.EnumSet; import java.util.List; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ClusterMetrics.Option; @@ -49,27 +47,23 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This class contains helper methods that repair parts of hbase's filesystem - * contents. + * This class contains helper methods that repair parts of hbase's filesystem contents. */ @InterfaceAudience.Private public class HBaseFsckRepair { private static final Logger LOG = LoggerFactory.getLogger(HBaseFsckRepair.class); /** - * Fix multiple assignment by doing silent closes on each RS hosting the region - * and then force ZK unassigned node to OFFLINE to trigger assignment by - * master. - * + * Fix multiple assignment by doing silent closes on each RS hosting the region and then force ZK + * unassigned node to OFFLINE to trigger assignment by master. * @param connection HBase connection to the cluster - * @param region Region to undeploy - * @param servers list of Servers to undeploy from + * @param region Region to undeploy + * @param servers list of Servers to undeploy from */ public static void fixMultiAssignment(Connection connection, RegionInfo region, - List servers) - throws IOException, KeeperException, InterruptedException { + List servers) throws IOException, KeeperException, InterruptedException { // Close region on the servers silently - for(ServerName server : servers) { + for (ServerName server : servers) { closeRegionSilentlyAndWait(connection, server, region); } @@ -78,50 +72,41 @@ public class HBaseFsckRepair { } /** - * Fix unassigned by creating/transition the unassigned ZK node for this - * region to OFFLINE state with a special flag to tell the master that this is - * a forced operation by HBCK. - * - * This assumes that info is in META. - * - * @param admin - * @param region - * @throws IOException - * @throws KeeperException + * Fix unassigned by creating/transition the unassigned ZK node for this region to OFFLINE state + * with a special flag to tell the master that this is a forced operation by HBCK. This assumes + * that info is in META. nnnn */ public static void fixUnassigned(Admin admin, RegionInfo region) - throws IOException, KeeperException, InterruptedException { + throws IOException, KeeperException, InterruptedException { // Force ZK node to OFFLINE so master assigns forceOfflineInZK(admin, region); } /** - * In 0.90, this forces an HRI offline by setting the RegionTransitionData - * in ZK to have HBCK_CODE_NAME as the server. This is a special case in - * the AssignmentManager that attempts an assign call by the master. - * - * This doesn't seem to work properly in the updated version of 0.92+'s hbck - * so we use assign to force the region into transition. This has the - * side-effect of requiring a RegionInfo that considers regionId (timestamp) - * in comparators that is addressed by HBASE-5563. + * In 0.90, this forces an HRI offline by setting the RegionTransitionData in ZK to have + * HBCK_CODE_NAME as the server. This is a special case in the AssignmentManager that attempts an + * assign call by the master. This doesn't seem to work properly in the updated version of 0.92+'s + * hbck so we use assign to force the region into transition. This has the side-effect of + * requiring a RegionInfo that considers regionId (timestamp) in comparators that is addressed by + * HBASE-5563. */ private static void forceOfflineInZK(Admin admin, final RegionInfo region) - throws ZooKeeperConnectionException, KeeperException, IOException, InterruptedException { + throws ZooKeeperConnectionException, KeeperException, IOException, InterruptedException { admin.assign(region.getRegionName()); } /* * Should we check all assignments or just not in RIT? */ - public static void waitUntilAssigned(Admin admin, - RegionInfo region) throws IOException, InterruptedException { + public static void waitUntilAssigned(Admin admin, RegionInfo region) + throws IOException, InterruptedException { long timeout = admin.getConfiguration().getLong("hbase.hbck.assign.timeout", 120000); long expiration = timeout + EnvironmentEdgeManager.currentTime(); while (EnvironmentEdgeManager.currentTime() < expiration) { try { boolean inTransition = false; for (RegionState rs : admin.getClusterMetrics(EnumSet.of(Option.REGIONS_IN_TRANSITION)) - .getRegionStatesInTransition()) { + .getRegionStatesInTransition()) { if (RegionInfo.COMPARATOR.compare(rs.getRegion(), region) == 0) { inTransition = true; break; @@ -132,36 +117,33 @@ public class HBaseFsckRepair { return; } // still in rit - LOG.info("Region still in transition, waiting for " - + "it to become assigned: " + region); + LOG.info("Region still in transition, waiting for " + "it to become assigned: " + region); } catch (IOException e) { - LOG.warn("Exception when waiting for region to become assigned," - + " retrying", e); + LOG.warn("Exception when waiting for region to become assigned," + " retrying", e); } Thread.sleep(1000); } - throw new IOException("Region " + region + " failed to move out of " + - "transition within timeout " + timeout + "ms"); + throw new IOException("Region " + region + " failed to move out of " + + "transition within timeout " + timeout + "ms"); } /** - * Contacts a region server and waits up to hbase.hbck.close.timeout ms - * (default 120s) to close the region. This bypasses the active hmaster. + * Contacts a region server and waits up to hbase.hbck.close.timeout ms (default 120s) to close + * the region. This bypasses the active hmaster. */ @SuppressWarnings("deprecation") - public static void closeRegionSilentlyAndWait(Connection connection, - ServerName server, RegionInfo region) throws IOException, InterruptedException { - long timeout = connection.getConfiguration() - .getLong("hbase.hbck.close.timeout", 120000); - ServerManager.closeRegionSilentlyAndWait((ClusterConnection)connection, server, - region, timeout); + public static void closeRegionSilentlyAndWait(Connection connection, ServerName server, + RegionInfo region) throws IOException, InterruptedException { + long timeout = connection.getConfiguration().getLong("hbase.hbck.close.timeout", 120000); + ServerManager.closeRegionSilentlyAndWait((ClusterConnection) connection, server, region, + timeout); } /** * Puts the specified RegionInfo into META with replica related columns */ - public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf, - RegionInfo hri, Collection servers, int numReplicas) throws IOException { + public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf, RegionInfo hri, + Collection servers, int numReplicas) throws IOException { Connection conn = ConnectionFactory.createConnection(conf); Table meta = conn.getTable(TableName.META_TABLE_NAME); Put put = MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime()); @@ -185,8 +167,8 @@ public class HBaseFsckRepair { /** * Creates, flushes, and closes a new region. */ - public static HRegion createHDFSRegionDir(Configuration conf, - RegionInfo hri, TableDescriptor htd) throws IOException { + public static HRegion createHDFSRegionDir(Configuration conf, RegionInfo hri, TableDescriptor htd) + throws IOException { // Create HRegion Path root = CommonFSUtils.getRootDir(conf); HRegion region = HRegion.createHRegion(hri, root, conf, htd, null); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java index f54864492f3..9f26eda12c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,8 +38,8 @@ public final class HFileArchiveUtil { /** * Get the directory to archive a store directory - * @param conf {@link Configuration} to read for the archive directory name - * @param tableName table name under which the store currently lives + * @param conf {@link Configuration} to read for the archive directory name + * @param tableName table name under which the store currently lives * @param regionName region encoded name under which the store currently lives * @param familyName name of the family in the store * @return {@link Path} to the directory to archive the given store or null if it should @@ -54,10 +54,10 @@ public final class HFileArchiveUtil { /** * Get the directory to archive a store directory - * @param conf {@link Configuration} to read for the archive directory name. - * @param region parent region information under which the store currently lives + * @param conf {@link Configuration} to read for the archive directory name. + * @param region parent region information under which the store currently lives * @param tabledir directory for the table under which the store currently lives - * @param family name of the family in the store + * @param family name of the family in the store * @return {@link Path} to the directory to archive the given store or null if it should * not be archived */ @@ -68,7 +68,7 @@ public final class HFileArchiveUtil { /** * Gets the directory to archive a store directory. - * @param conf {@link Configuration} to read for the archive directory name. + * @param conf {@link Configuration} to read for the archive directory name. * @param region parent region information under which the store currently lives * @param family name of the family in the store * @return {@link Path} to the directory to archive the given store or null if it should @@ -87,8 +87,8 @@ public final class HFileArchiveUtil { * HDFS. This is mostly useful for archiving recovered edits, when * hbase.region.archive.recovered.edits is enabled. * @param rootDir {@link Path} the root dir under which archive path should be created. - * @param region parent region information under which the store currently lives - * @param family name of the family in the store + * @param region parent region information under which the store currently lives + * @param family name of the family in the store * @return {@link Path} to the WAL FS directory to archive the given store or null if it * should not be archived */ @@ -121,8 +121,8 @@ public final class HFileArchiveUtil { /** * Get the archive directory for a given region under the specified table - * @param rootDir {@link Path} to the root directory where hbase files are stored (for building - * the archive path) + * @param rootDir {@link Path} to the root directory where hbase files are stored (for building + * the archive path) * @param tableName name of the table to archive. Cannot be null. * @return {@link Path} to the directory to archive the given region, or null if it * should not be archived @@ -140,8 +140,8 @@ public final class HFileArchiveUtil { * Get the path to the table's archive directory. *

      * Generally of the form: /hbase/.archive/[tablename] - * @param rootdir {@link Path} to the root directory where hbase files are stored (for building - * the archive path) + * @param rootdir {@link Path} to the root directory where hbase files are stored (for building + * the archive path) * @param tableName Name of the table to be archived. Cannot be null. * @return {@link Path} to the archive directory for the table */ @@ -153,7 +153,7 @@ public final class HFileArchiveUtil { * Get the path to the table archive directory based on the configured archive directory. *

      * Assumed that the table should already be archived. - * @param conf {@link Configuration} to read the archive directory property. Can be null + * @param conf {@link Configuration} to read the archive directory property. Can be null * @param tableName Name of the table to be archived. Cannot be null. * @return {@link Path} to the archive directory for the table */ @@ -166,7 +166,7 @@ public final class HFileArchiveUtil { * Get the full path to the archive directory on the configured * {@link org.apache.hadoop.hbase.master.MasterFileSystem} * @param conf to look for archive directory name and root directory. Cannot be null. Notes for - * testing: requires a FileSystem root directory to be specified. + * testing: requires a FileSystem root directory to be specified. * @return the full {@link Path} to the archive directory, as defined by the configuration * @throws IOException if an unexpected error occurs */ @@ -178,7 +178,7 @@ public final class HFileArchiveUtil { * Get the full path to the archive directory on the configured * {@link org.apache.hadoop.hbase.master.MasterFileSystem} * @param rootdir {@link Path} to the root directory where hbase files are stored (for building - * the archive path) + * the archive path) * @return the full {@link Path} to the archive directory, as defined by the configuration */ private static Path getArchivePath(final Path rootdir) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java index 774871b3826..c1ac06cada1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,13 +18,12 @@ package org.apache.hadoop.hbase.util; import java.util.Arrays; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * This class encapsulates a byte array and overrides hashCode and equals so - * that it's identity is based on the data rather than the array instance. + * This class encapsulates a byte array and overrides hashCode and equals so that it's identity is + * based on the data rather than the array instance. */ @InterfaceAudience.Private @InterfaceStability.Stable @@ -50,10 +48,8 @@ public class HashedBytes { @Override public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null || getClass() != obj.getClass()) - return false; + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; HashedBytes other = (HashedBytes) obj; return (hashCode == other.hashCode) && Arrays.equals(bytes, other.bytes); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckErrorReporter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckErrorReporter.java index 52012dfa235..0735809424e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckErrorReporter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckErrorReporter.java @@ -18,27 +18,54 @@ package org.apache.hadoop.hbase.util; import java.util.ArrayList; - import org.apache.yetus.audience.InterfaceAudience; /** * Used by {@link HBaseFsck} reporting system. * @deprecated Since 2.3.0. To be removed in hbase4. Use HBCK2 instead. Remove when - * {@link HBaseFsck} is removed. + * {@link HBaseFsck} is removed. */ @Deprecated @InterfaceAudience.Private public interface HbckErrorReporter { enum ERROR_CODE { - UNKNOWN, NO_META_REGION, NULL_META_REGION, NO_VERSION_FILE, NOT_IN_META_HDFS, NOT_IN_META, - NOT_IN_META_OR_DEPLOYED, NOT_IN_HDFS_OR_DEPLOYED, NOT_IN_HDFS, SERVER_DOES_NOT_MATCH_META, - NOT_DEPLOYED, MULTI_DEPLOYED, SHOULD_NOT_BE_DEPLOYED, MULTI_META_REGION, RS_CONNECT_FAILURE, - FIRST_REGION_STARTKEY_NOT_EMPTY, LAST_REGION_ENDKEY_NOT_EMPTY, DUPE_STARTKEYS, - HOLE_IN_REGION_CHAIN, OVERLAP_IN_REGION_CHAIN, REGION_CYCLE, DEGENERATE_REGION, - ORPHAN_HDFS_REGION, LINGERING_SPLIT_PARENT, NO_TABLEINFO_FILE, LINGERING_REFERENCE_HFILE, - LINGERING_HFILELINK, WRONG_USAGE, EMPTY_META_CELL, EXPIRED_TABLE_LOCK, BOUNDARIES_ERROR, - ORPHAN_TABLE_STATE, NO_TABLE_STATE, UNDELETED_REPLICATION_QUEUE, DUPE_ENDKEYS, - UNSUPPORTED_OPTION, INVALID_TABLE + UNKNOWN, + NO_META_REGION, + NULL_META_REGION, + NO_VERSION_FILE, + NOT_IN_META_HDFS, + NOT_IN_META, + NOT_IN_META_OR_DEPLOYED, + NOT_IN_HDFS_OR_DEPLOYED, + NOT_IN_HDFS, + SERVER_DOES_NOT_MATCH_META, + NOT_DEPLOYED, + MULTI_DEPLOYED, + SHOULD_NOT_BE_DEPLOYED, + MULTI_META_REGION, + RS_CONNECT_FAILURE, + FIRST_REGION_STARTKEY_NOT_EMPTY, + LAST_REGION_ENDKEY_NOT_EMPTY, + DUPE_STARTKEYS, + HOLE_IN_REGION_CHAIN, + OVERLAP_IN_REGION_CHAIN, + REGION_CYCLE, + DEGENERATE_REGION, + ORPHAN_HDFS_REGION, + LINGERING_SPLIT_PARENT, + NO_TABLEINFO_FILE, + LINGERING_REFERENCE_HFILE, + LINGERING_HFILELINK, + WRONG_USAGE, + EMPTY_META_CELL, + EXPIRED_TABLE_LOCK, + BOUNDARIES_ERROR, + ORPHAN_TABLE_STATE, + NO_TABLE_STATE, + UNDELETED_REPLICATION_QUEUE, + DUPE_ENDKEYS, + UNSUPPORTED_OPTION, + INVALID_TABLE } void clear(); @@ -54,7 +81,7 @@ public interface HbckErrorReporter { void reportError(ERROR_CODE errorCode, String message, HbckTableInfo table, HbckRegionInfo info); void reportError(ERROR_CODE errorCode, String message, HbckTableInfo table, HbckRegionInfo info1, - HbckRegionInfo info2); + HbckRegionInfo info2); int summarize(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckRegionInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckRegionInfo.java index acd207a67fd..c14689b05af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckRegionInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckRegionInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,8 +39,8 @@ import org.apache.hbase.thirdparty.com.google.common.base.Joiner; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * Maintain information about a particular region. It gathers information - * from three places -- HDFS, META, and region servers. + * Maintain information about a particular region. It gathers information from three places -- HDFS, + * META, and region servers. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -61,24 +61,24 @@ public class HbckRegionInfo implements KeyRange { } public synchronized int getReplicaId() { - return metaEntry != null? metaEntry.getReplicaId(): deployedReplicaId; + return metaEntry != null ? metaEntry.getReplicaId() : deployedReplicaId; } public synchronized void addServer(RegionInfo regionInfo, ServerName serverName) { - OnlineEntry rse = new OnlineEntry(regionInfo, serverName) ; + OnlineEntry rse = new OnlineEntry(regionInfo, serverName); this.deployedEntries.add(rse); this.deployedOn.add(serverName); // save the replicaId that we see deployed in the cluster this.deployedReplicaId = regionInfo.getReplicaId(); this.primaryHRIForDeployedReplica = - RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo); + RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo); } @Override public synchronized String toString() { StringBuilder sb = new StringBuilder(); sb.append("{ meta => "); - sb.append((metaEntry != null)? metaEntry.getRegionNameAsString() : "null"); + sb.append((metaEntry != null) ? metaEntry.getRegionNameAsString() : "null"); sb.append(", hdfs => " + getHdfsRegionDir()); sb.append(", deployed => " + Joiner.on(", ").join(deployedEntries)); sb.append(", replicaId => " + getReplicaId()); @@ -135,8 +135,8 @@ public class HbckRegionInfo implements KeyRange { } /** - * Read the .regioninfo file from the file system. If there is no - * .regioninfo, add it to the orphan hdfs region list. + * Read the .regioninfo file from the file system. If there is no .regioninfo, add it to the + * orphan hdfs region list. */ public void loadHdfsRegioninfo(Configuration conf) throws IOException { Path regionDir = getHdfsRegionDir(); @@ -265,16 +265,16 @@ public class HbckRegionInfo implements KeyRange { * Stores the regioninfo entries scanned from META */ public static class MetaEntry extends HRegionInfo { - ServerName regionServer; // server hosting this region - long modTime; // timestamp of most recent modification metadata - RegionInfo splitA, splitB; //split daughters + ServerName regionServer; // server hosting this region + long modTime; // timestamp of most recent modification metadata + RegionInfo splitA, splitB; // split daughters public MetaEntry(RegionInfo rinfo, ServerName regionServer, long modTime) { this(rinfo, regionServer, modTime, null, null); } - public MetaEntry(RegionInfo rinfo, ServerName regionServer, long modTime, - RegionInfo splitA, RegionInfo splitB) { + public MetaEntry(RegionInfo rinfo, ServerName regionServer, long modTime, RegionInfo splitA, + RegionInfo splitB) { super(rinfo); this.regionServer = regionServer; this.modTime = modTime; @@ -373,8 +373,8 @@ public class HbckRegionInfo implements KeyRange { return tableCompare; } - int startComparison = RegionSplitCalculator.BYTES_COMPARATOR.compare( - l.getStartKey(), r.getStartKey()); + int startComparison = + RegionSplitCalculator.BYTES_COMPARATOR.compare(l.getStartKey(), r.getStartKey()); if (startComparison != 0) { return startComparison; } @@ -384,8 +384,7 @@ public class HbckRegionInfo implements KeyRange { endKey = (endKey.length == 0) ? null : endKey; byte[] endKey2 = l.getEndKey(); endKey2 = (endKey2.length == 0) ? null : endKey2; - int endComparison = RegionSplitCalculator.BYTES_COMPARATOR.compare( - endKey2, endKey); + int endComparison = RegionSplitCalculator.BYTES_COMPARATOR.compare(endKey2, endKey); if (endComparison != 0) { return endComparison; @@ -407,4 +406,4 @@ public class HbckRegionInfo implements KeyRange { return Long.compare(l.getHdfsEntry().hri.getRegionId(), r.getHdfsEntry().hri.getRegionId()); } }; -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckTableInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckTableInfo.java index c44dbc97836..f06546fef24 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckTableInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckTableInfo.java @@ -30,7 +30,6 @@ import java.util.TreeMap; import java.util.TreeSet; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -77,14 +76,14 @@ public class HbckTableInfo { // region split calculator final RegionSplitCalculator sc = - new RegionSplitCalculator<>(HbckRegionInfo.COMPARATOR); + new RegionSplitCalculator<>(HbckRegionInfo.COMPARATOR); - // Histogram of different TableDescriptors found. Ideally there is only one! + // Histogram of different TableDescriptors found. Ideally there is only one! final Set htds = new HashSet<>(); // key = start split, values = set of splits in problem group final Multimap overlapGroups = - TreeMultimap.create(RegionSplitCalculator.BYTES_COMPARATOR, HbckRegionInfo.COMPARATOR); + TreeMultimap.create(RegionSplitCalculator.BYTES_COMPARATOR, HbckRegionInfo.COMPARATOR); // list of regions derived from meta entries. private ImmutableList regionsFromMeta = null; @@ -98,14 +97,14 @@ public class HbckTableInfo { } /** - * @return descriptor common to all regions. null if are none or multiple! + * @return descriptor common to all regions. null if are none or multiple! */ TableDescriptor getTableDescriptor() { if (htds.size() == 1) { - return (TableDescriptor)htds.toArray()[0]; + return (TableDescriptor) htds.toArray()[0]; } else { - LOG.error("None/Multiple table descriptors found for table '" - + tableName + "' regions: " + htds); + LOG.error( + "None/Multiple table descriptors found for table '" + tableName + "' regions: " + htds); } return null; } @@ -122,10 +121,11 @@ public class HbckTableInfo { // if not the absolute end key, check for cycle if (Bytes.compareTo(hir.getStartKey(), hir.getEndKey()) > 0) { - hbck.getErrors().reportError(HbckErrorReporter.ERROR_CODE.REGION_CYCLE, String.format( + hbck.getErrors().reportError(HbckErrorReporter.ERROR_CODE.REGION_CYCLE, + String.format( "The endkey for this region comes before the " + "startkey, startkey=%s, endkey=%s", - Bytes.toStringBinary(hir.getStartKey()), Bytes.toStringBinary(hir.getEndKey())), this, - hir); + Bytes.toStringBinary(hir.getStartKey()), Bytes.toStringBinary(hir.getEndKey())), + this, hir); backwards.add(hir); return; } @@ -149,8 +149,8 @@ public class HbckTableInfo { return sc.getStarts().size() + backwards.size(); } - public synchronized ImmutableList getRegionsFromMeta( - TreeMap regionInfoMap) { + public synchronized ImmutableList + getRegionsFromMeta(TreeMap regionInfoMap) { // lazy loaded, synchronized to ensure a single load if (regionsFromMeta == null) { List regions = new ArrayList<>(); @@ -178,22 +178,23 @@ public class HbckTableInfo { @Override public void handleRegionStartKeyNotEmpty(HbckRegionInfo hi) throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY, - "First region should start with an empty key. You need to " - + " create a new region and regioninfo in HDFS to plug the hole.", - getTableInfo(), hi); + "First region should start with an empty key. You need to " + + " create a new region and regioninfo in HDFS to plug the hole.", + getTableInfo(), hi); } @Override public void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY, - "Last region should end with an empty key. You need to " - + "create a new region and regioninfo in HDFS to plug the hole.", getTableInfo()); + "Last region should end with an empty key. You need to " + + "create a new region and regioninfo in HDFS to plug the hole.", + getTableInfo()); } @Override - public void handleDegenerateRegion(HbckRegionInfo hi) throws IOException{ + public void handleDegenerateRegion(HbckRegionInfo hi) throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.DEGENERATE_REGION, - "Region has the same start and end key.", getTableInfo(), hi); + "Region has the same start and end key.", getTableInfo(), hi); } @Override @@ -201,55 +202,47 @@ public class HbckTableInfo { byte[] key = r1.getStartKey(); // dup start key errors.reportError(HbckErrorReporter.ERROR_CODE.DUPE_STARTKEYS, - "Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(), - r1); + "Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(), + r1); errors.reportError(HbckErrorReporter.ERROR_CODE.DUPE_STARTKEYS, - "Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(), - r2); + "Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(), + r2); } @Override - public void handleSplit(HbckRegionInfo r1, HbckRegionInfo r2) throws IOException{ + public void handleSplit(HbckRegionInfo r1, HbckRegionInfo r2) throws IOException { byte[] key = r1.getStartKey(); // dup start key errors.reportError(HbckErrorReporter.ERROR_CODE.DUPE_ENDKEYS, - "Multiple regions have the same regionID: " - + Bytes.toStringBinary(key), getTableInfo(), r1); + "Multiple regions have the same regionID: " + Bytes.toStringBinary(key), getTableInfo(), + r1); errors.reportError(HbckErrorReporter.ERROR_CODE.DUPE_ENDKEYS, - "Multiple regions have the same regionID: " - + Bytes.toStringBinary(key), getTableInfo(), r2); + "Multiple regions have the same regionID: " + Bytes.toStringBinary(key), getTableInfo(), + r2); } @Override public void handleOverlapInRegionChain(HbckRegionInfo hi1, HbckRegionInfo hi2) - throws IOException { + throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.OVERLAP_IN_REGION_CHAIN, - "There is an overlap in the region chain.", getTableInfo(), hi1, hi2); + "There is an overlap in the region chain.", getTableInfo(), hi1, hi2); } @Override public void handleHoleInRegionChain(byte[] holeStart, byte[] holeStop) throws IOException { - errors.reportError( - HbckErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN, - "There is a hole in the region chain between " - + Bytes.toStringBinary(holeStart) + " and " - + Bytes.toStringBinary(holeStop) - + ". You need to create a new .regioninfo and region " - + "dir in hdfs to plug the hole."); + errors.reportError(HbckErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN, + "There is a hole in the region chain between " + Bytes.toStringBinary(holeStart) + " and " + + Bytes.toStringBinary(holeStop) + ". You need to create a new .regioninfo and region " + + "dir in hdfs to plug the hole."); } } /** - * This handler fixes integrity errors from hdfs information. There are - * basically three classes of integrity problems 1) holes, 2) overlaps, and - * 3) invalid regions. - * - * This class overrides methods that fix holes and the overlap group case. - * Individual cases of particular overlaps are handled by the general - * overlap group merge repair case. - * - * If hbase is online, this forces regions offline before doing merge - * operations. + * This handler fixes integrity errors from hdfs information. There are basically three classes of + * integrity problems 1) holes, 2) overlaps, and 3) invalid regions. This class overrides methods + * that fix holes and the overlap group case. Individual cases of particular overlaps are handled + * by the general overlap group merge repair case. If hbase is online, this forces regions offline + * before doing merge operations. */ class HDFSIntegrityFixer extends IntegrityFixSuggester { Configuration conf; @@ -257,7 +250,7 @@ public class HbckTableInfo { boolean fixOverlaps = true; HDFSIntegrityFixer(HbckTableInfo ti, HbckErrorReporter errors, Configuration conf, - boolean fixHoles, boolean fixOverlaps) { + boolean fixHoles, boolean fixOverlaps) { super(ti, errors); this.conf = conf; this.fixOverlaps = fixOverlaps; @@ -265,84 +258,74 @@ public class HbckTableInfo { } /** - * This is a special case hole -- when the first region of a table is - * missing from META, HBase doesn't acknowledge the existance of the - * table. + * This is a special case hole -- when the first region of a table is missing from META, HBase + * doesn't acknowledge the existance of the table. */ @Override public void handleRegionStartKeyNotEmpty(HbckRegionInfo next) throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY, - "First region should start with an empty key. Creating a new " + - "region and regioninfo in HDFS to plug the hole.", - getTableInfo(), next); + "First region should start with an empty key. Creating a new " + + "region and regioninfo in HDFS to plug the hole.", + getTableInfo(), next); TableDescriptor htd = getTableInfo().getTableDescriptor(); // from special EMPTY_START_ROW to next region's startKey RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()) - .setStartKey(HConstants.EMPTY_START_ROW) - .setEndKey(next.getStartKey()) - .build(); + .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(next.getStartKey()).build(); // TODO test HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); - LOG.info("Table region start key was not empty. Created new empty region: " - + newRegion + " " +region); + LOG.info("Table region start key was not empty. Created new empty region: " + newRegion + " " + + region); hbck.fixes++; } @Override public void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY, - "Last region should end with an empty key. Creating a new " - + "region and regioninfo in HDFS to plug the hole.", getTableInfo()); + "Last region should end with an empty key. Creating a new " + + "region and regioninfo in HDFS to plug the hole.", + getTableInfo()); TableDescriptor htd = getTableInfo().getTableDescriptor(); // from curEndKey to EMPTY_START_ROW - RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()) - .setStartKey(curEndKey) - .setEndKey(HConstants.EMPTY_START_ROW) - .build(); + RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(curEndKey) + .setEndKey(HConstants.EMPTY_START_ROW).build(); HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); - LOG.info("Table region end key was not empty. Created new empty region: " + newRegion - + " " + region); + LOG.info("Table region end key was not empty. Created new empty region: " + newRegion + " " + + region); hbck.fixes++; } /** - * There is a hole in the hdfs regions that violates the table integrity - * rules. Create a new empty region that patches the hole. + * There is a hole in the hdfs regions that violates the table integrity rules. Create a new + * empty region that patches the hole. */ @Override public void handleHoleInRegionChain(byte[] holeStartKey, byte[] holeStopKey) - throws IOException { + throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN, - "There is a hole in the region chain between " + Bytes.toStringBinary(holeStartKey) + - " and " + Bytes.toStringBinary(holeStopKey) + - ". Creating a new regioninfo and region " + "dir in hdfs to plug the hole."); + "There is a hole in the region chain between " + Bytes.toStringBinary(holeStartKey) + + " and " + Bytes.toStringBinary(holeStopKey) + ". Creating a new regioninfo and region " + + "dir in hdfs to plug the hole."); TableDescriptor htd = getTableInfo().getTableDescriptor(); - RegionInfo newRegion = - RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(holeStartKey) - .setEndKey(holeStopKey).build(); + RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()) + .setStartKey(holeStartKey).setEndKey(holeStopKey).build(); HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); LOG.info("Plugged hole by creating new empty region: " + newRegion + " " + region); hbck.fixes++; } /** - * This takes set of overlapping regions and merges them into a single - * region. This covers cases like degenerate regions, shared start key, - * general overlaps, duplicate ranges, and partial overlapping regions. - * - * Cases: - * - Clean regions that overlap - * - Only .oldlogs regions (can't find start/stop range, or figure out) - * - * This is basically threadsafe, except for the fixer increment in mergeOverlaps. + * This takes set of overlapping regions and merges them into a single region. This covers cases + * like degenerate regions, shared start key, general overlaps, duplicate ranges, and partial + * overlapping regions. Cases: - Clean regions that overlap - Only .oldlogs regions (can't find + * start/stop range, or figure out) This is basically threadsafe, except for the fixer increment + * in mergeOverlaps. */ @Override - public void handleOverlapGroup(Collection overlap) - throws IOException { + public void handleOverlapGroup(Collection overlap) throws IOException { Preconditions.checkNotNull(overlap); - Preconditions.checkArgument(overlap.size() >0); + Preconditions.checkArgument(overlap.size() > 0); if (!this.fixOverlaps) { LOG.warn("Not attempting to repair overlaps."); @@ -350,9 +333,9 @@ public class HbckTableInfo { } if (overlap.size() > hbck.getMaxMerge()) { - LOG.warn("Overlap group has " + overlap.size() + " overlapping " + - "regions which is greater than " + hbck.getMaxMerge() + - ", the max number of regions to merge"); + LOG.warn( + "Overlap group has " + overlap.size() + " overlapping " + "regions which is greater than " + + hbck.getMaxMerge() + ", the max number of regions to merge"); if (hbck.shouldSidelineBigOverlaps()) { // we only sideline big overlapped groups that exceeds the max number of regions to merge sidelineBigOverlaps(overlap); @@ -385,24 +368,28 @@ public class HbckTableInfo { if (range == null) { range = new Pair(hi.getStartKey(), hi.getEndKey()); } else { - if (RegionSplitCalculator.BYTES_COMPARATOR - .compare(hi.getStartKey(), range.getFirst()) < 0) { + if ( + RegionSplitCalculator.BYTES_COMPARATOR.compare(hi.getStartKey(), range.getFirst()) < 0 + ) { range.setFirst(hi.getStartKey()); } - if (RegionSplitCalculator.BYTES_COMPARATOR - .compare(hi.getEndKey(), range.getSecond()) > 0) { + if ( + RegionSplitCalculator.BYTES_COMPARATOR.compare(hi.getEndKey(), range.getSecond()) > 0 + ) { range.setSecond(hi.getEndKey()); } } } LOG.info("This group range is [" + Bytes.toStringBinary(range.getFirst()) + ", " - + Bytes.toStringBinary(range.getSecond()) + "]"); + + Bytes.toStringBinary(range.getSecond()) + "]"); // attempt to find a possible parent for the edge case of a split for (HbckRegionInfo hi : overlap) { - if (Bytes.compareTo(hi.getHdfsHRI().getStartKey(), range.getFirst()) == 0 - && Bytes.compareTo(hi.getHdfsHRI().getEndKey(), range.getSecond()) == 0) { + if ( + Bytes.compareTo(hi.getHdfsHRI().getStartKey(), range.getFirst()) == 0 + && Bytes.compareTo(hi.getHdfsHRI().getEndKey(), range.getSecond()) == 0 + ) { LOG.info("This is a parent for this group: " + hi.toString()); parent = hi; } @@ -429,8 +416,10 @@ public class HbckTableInfo { } // daughters must share the same regionID and we should have a parent too - if (daughterA.getHdfsHRI().getRegionId() != daughterB.getHdfsHRI().getRegionId() || - parent == null) { + if ( + daughterA.getHdfsHRI().getRegionId() != daughterB.getHdfsHRI().getRegionId() + || parent == null + ) { return; } @@ -453,7 +442,7 @@ public class HbckTableInfo { hbck.offline(parent.getRegionName()); } catch (IOException ioe) { LOG.warn("Unable to offline parent region: " + parent.getRegionNameAsString() - + ". Just continuing with regular merge... ", ioe); + + ". Just continuing with regular merge... ", ioe); return; } @@ -461,14 +450,13 @@ public class HbckTableInfo { HBaseFsckRepair.removeParentInMeta(conf, parent.getHdfsHRI()); } catch (IOException ioe) { LOG.warn("Unable to remove parent region in META: " + parent.getRegionNameAsString() - + ". Just continuing with regular merge... ", ioe); + + ". Just continuing with regular merge... ", ioe); return; } hbck.sidelineRegionDir(fs, parent); - LOG.info( - "[" + thread + "] Sidelined parent region dir " + parent.getHdfsRegionDir() + " into " + - hbck.getSidelineDir()); + LOG.info("[" + thread + "] Sidelined parent region dir " + parent.getHdfsRegionDir() + + " into " + hbck.getSidelineDir()); hbck.debugLsr(parent.getHdfsRegionDir()); // Make sure we don't have the parents and daughters around @@ -480,39 +468,40 @@ public class HbckTableInfo { } - void mergeOverlaps(Collection overlap) - throws IOException { + void mergeOverlaps(Collection overlap) throws IOException { String thread = Thread.currentThread().getName(); - LOG.info("== [" + thread + "] Merging regions into one region: " - + Joiner.on(",").join(overlap)); + LOG.info( + "== [" + thread + "] Merging regions into one region: " + Joiner.on(",").join(overlap)); // get the min / max range and close all concerned regions Pair range = null; for (HbckRegionInfo hi : overlap) { if (range == null) { range = new Pair<>(hi.getStartKey(), hi.getEndKey()); } else { - if (RegionSplitCalculator.BYTES_COMPARATOR - .compare(hi.getStartKey(), range.getFirst()) < 0) { + if ( + RegionSplitCalculator.BYTES_COMPARATOR.compare(hi.getStartKey(), range.getFirst()) < 0 + ) { range.setFirst(hi.getStartKey()); } - if (RegionSplitCalculator.BYTES_COMPARATOR - .compare(hi.getEndKey(), range.getSecond()) > 0) { + if ( + RegionSplitCalculator.BYTES_COMPARATOR.compare(hi.getEndKey(), range.getSecond()) > 0 + ) { range.setSecond(hi.getEndKey()); } } // need to close files so delete can happen. - LOG.debug("[" + thread + "] Closing region before moving data around: " + hi); + LOG.debug("[" + thread + "] Closing region before moving data around: " + hi); LOG.debug("[" + thread + "] Contained region dir before close"); hbck.debugLsr(hi.getHdfsRegionDir()); try { LOG.info("[" + thread + "] Closing region: " + hi); hbck.closeRegion(hi); } catch (IOException ioe) { - LOG.warn("[" + thread + "] Was unable to close region " + hi - + ". Just continuing... ", ioe); + LOG.warn("[" + thread + "] Was unable to close region " + hi + ". Just continuing... ", + ioe); } catch (InterruptedException e) { - LOG.warn("[" + thread + "] Was unable to close region " + hi - + ". Just continuing... ", e); + LOG.warn("[" + thread + "] Was unable to close region " + hi + ". Just continuing... ", + e); } try { @@ -520,7 +509,7 @@ public class HbckTableInfo { hbck.offline(hi.getRegionName()); } catch (IOException ioe) { LOG.warn("[" + thread + "] Unable to offline region from master: " + hi - + ". Just continuing... ", ioe); + + ". Just continuing... ", ioe); } } @@ -528,19 +517,17 @@ public class HbckTableInfo { TableDescriptor htd = getTableInfo().getTableDescriptor(); // from start key to end Key RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()) - .setStartKey(range.getFirst()) - .setEndKey(range.getSecond()) - .build(); + .setStartKey(range.getFirst()).setEndKey(range.getSecond()).build(); HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); - LOG.info("[" + thread + "] Created new empty container region: " + - newRegion + " to contain regions: " + Joiner.on(",").join(overlap)); + LOG.info("[" + thread + "] Created new empty container region: " + newRegion + + " to contain regions: " + Joiner.on(",").join(overlap)); hbck.debugLsr(region.getRegionFileSystem().getRegionDir()); // all target regions are closed, should be able to safely cleanup. - boolean didFix= false; + boolean didFix = false; Path target = region.getRegionFileSystem().getRegionDir(); for (HbckRegionInfo contained : overlap) { - LOG.info("[" + thread + "] Merging " + contained + " into " + target); + LOG.info("[" + thread + "] Merging " + contained + " into " + target); int merges = hbck.mergeRegionDirs(target, contained); if (merges > 0) { didFix = true; @@ -552,9 +539,8 @@ public class HbckTableInfo { } /** - * Sideline some regions in a big overlap group so that it - * will have fewer regions, and it is easier to merge them later on. - * + * Sideline some regions in a big overlap group so that it will have fewer regions, and it is + * easier to merge them later on. * @param bigOverlap the overlapped group with regions more than maxMerge */ void sidelineBigOverlaps(Collection bigOverlap) throws IOException { @@ -563,26 +549,26 @@ public class HbckTableInfo { overlapsToSideline = hbck.getMaxOverlapsToSideline(); } List regionsToSideline = - RegionSplitCalculator.findBigRanges(bigOverlap, overlapsToSideline); + RegionSplitCalculator.findBigRanges(bigOverlap, overlapsToSideline); FileSystem fs = FileSystem.get(conf); - for (HbckRegionInfo regionToSideline: regionsToSideline) { + for (HbckRegionInfo regionToSideline : regionsToSideline) { try { LOG.info("Closing region: " + regionToSideline); hbck.closeRegion(regionToSideline); } catch (IOException ioe) { - LOG.warn("Was unable to close region " + regionToSideline - + ". Just continuing... ", ioe); + LOG.warn("Was unable to close region " + regionToSideline + ". Just continuing... ", + ioe); } catch (InterruptedException e) { - LOG.warn("Was unable to close region " + regionToSideline - + ". Just continuing... ", e); + LOG.warn("Was unable to close region " + regionToSideline + ". Just continuing... ", e); } try { LOG.info("Offlining region: " + regionToSideline); hbck.offline(regionToSideline.getRegionName()); } catch (IOException ioe) { - LOG.warn("Unable to offline region from master: " + regionToSideline - + ". Just continuing... ", ioe); + LOG.warn( + "Unable to offline region from master: " + regionToSideline + ". Just continuing... ", + ioe); } LOG.info("Before sideline big overlapped region: " + regionToSideline.toString()); @@ -590,8 +576,7 @@ public class HbckTableInfo { if (sidelineRegionDir != null) { sidelinedRegions.put(sidelineRegionDir, regionToSideline); LOG.info("After sidelined big overlapped region: " - + regionToSideline.getRegionNameAsString() - + " to " + sidelineRegionDir.toString()); + + regionToSideline.getRegionNameAsString() + " to " + sidelineRegionDir.toString()); hbck.fixes++; } } @@ -599,8 +584,8 @@ public class HbckTableInfo { } /** - * Check the region chain (from META) of this table. We are looking for - * holes, overlaps, and cycles. + * Check the region chain (from META) of this table. We are looking for holes, overlaps, and + * cycles. * @return false if there are errors */ public boolean checkRegionChain(TableIntegrityErrorHandler handler) throws IOException { @@ -635,7 +620,7 @@ public class HbckTableInfo { // special endkey case converts '' to null byte[] endKey = rng.getEndKey(); endKey = (endKey.length == 0) ? null : endKey; - if (Bytes.equals(rng.getStartKey(),endKey)) { + if (Bytes.equals(rng.getStartKey(), endKey)) { handler.handleDegenerateRegion(rng); } } @@ -658,7 +643,7 @@ public class HbckTableInfo { // record errors ArrayList subRange = new ArrayList<>(ranges); - // this dumb and n^2 but this shouldn't happen often + // this dumb and n^2 but this shouldn't happen often for (HbckRegionInfo r1 : ranges) { if (r1.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { continue; @@ -669,10 +654,12 @@ public class HbckTableInfo { continue; } // general case of same start key - if (Bytes.compareTo(r1.getStartKey(), r2.getStartKey())==0) { - handler.handleDuplicateStartKeys(r1,r2); - } else if (Bytes.compareTo(r1.getEndKey(), r2.getStartKey())==0 && - r1.getHdfsHRI().getRegionId() == r2.getHdfsHRI().getRegionId()) { + if (Bytes.compareTo(r1.getStartKey(), r2.getStartKey()) == 0) { + handler.handleDuplicateStartKeys(r1, r2); + } else if ( + Bytes.compareTo(r1.getEndKey(), r2.getStartKey()) == 0 + && r1.getHdfsHRI().getRegionId() == r2.getHdfsHRI().getRegionId() + ) { LOG.info("this is a split, log to splits"); handler.handleSplit(r1, r2); } else { @@ -718,28 +705,25 @@ public class HbckTableInfo { if (HBaseFsck.shouldDisplayFullReport()) { // do full region split map dump - hbck.getErrors().print("---- Table '" + this.tableName - + "': region split map"); + hbck.getErrors().print("---- Table '" + this.tableName + "': region split map"); dump(splits, regions); - hbck.getErrors().print("---- Table '" + this.tableName - + "': overlap groups"); + hbck.getErrors().print("---- Table '" + this.tableName + "': overlap groups"); dumpOverlapProblems(overlapGroups); - hbck.getErrors().print("There are " + overlapGroups.keySet().size() - + " overlap groups with " + overlapGroups.size() - + " overlapping regions"); + hbck.getErrors().print("There are " + overlapGroups.keySet().size() + " overlap groups with " + + overlapGroups.size() + " overlapping regions"); } if (!sidelinedRegions.isEmpty()) { LOG.warn("Sidelined big overlapped regions, please bulk load them!"); - hbck.getErrors().print("---- Table '" + this.tableName - + "': sidelined big overlapped regions"); + hbck.getErrors() + .print("---- Table '" + this.tableName + "': sidelined big overlapped regions"); dumpSidelinedRegions(sidelinedRegions); } return hbck.getErrors().getErrorList().size() == originalErrorsCount; } private boolean handleOverlapsParallel(TableIntegrityErrorHandler handler, byte[] prevKey) - throws IOException { - // we parallelize overlap handler for the case we have lots of groups to fix. We can + throws IOException { + // we parallelize overlap handler for the case we have lots of groups to fix. We can // safely assume each group is independent. List merges = new ArrayList<>(overlapGroups.size()); List> rets; @@ -753,12 +737,12 @@ public class HbckTableInfo { LOG.error("Overlap merges were interrupted", e); return false; } - for(int i=0; i f = rets.get(i); try { f.get(); - } catch(ExecutionException e) { + } catch (ExecutionException e) { LOG.warn("Failed to merge overlap group" + work, e.getCause()); } catch (InterruptedException e) { LOG.error("Waiting for overlap merges was interrupted", e); @@ -778,8 +762,7 @@ public class HbckTableInfo { sb.setLength(0); // clear out existing buffer, if any. sb.append(Bytes.toStringBinary(k) + ":\t"); for (HbckRegionInfo r : regions.get(k)) { - sb.append("[ "+ r.toString() + ", " - + Bytes.toStringBinary(r.getEndKey())+ "]\t"); + sb.append("[ " + r.toString() + ", " + Bytes.toStringBinary(r.getEndKey()) + "]\t"); } hbck.getErrors().print(sb.toString()); } @@ -791,8 +774,8 @@ public class HbckTableInfo { for (byte[] k : regions.keySet()) { hbck.getErrors().print(Bytes.toStringBinary(k) + ":"); for (HbckRegionInfo r : regions.get(k)) { - hbck.getErrors().print("[ " + r.toString() + ", " - + Bytes.toStringBinary(r.getEndKey()) + "]"); + hbck.getErrors() + .print("[ " + r.toString() + ", " + Bytes.toStringBinary(r.getEndKey()) + "]"); } hbck.getErrors().print("----"); } @@ -803,8 +786,8 @@ public class HbckTableInfo { TableName tableName = entry.getValue().getTableName(); Path path = entry.getKey(); hbck.getErrors().print("This sidelined region dir should be bulk loaded: " + path.toString()); - hbck.getErrors().print("Bulk load command looks like: " + BulkLoadHFilesTool.NAME + " " + - path.toUri().getPath() + " " + tableName); + hbck.getErrors().print("Bulk load command looks like: " + BulkLoadHFilesTool.NAME + " " + + path.toUri().getPath() + " " + tableName); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java index a644a9a34b3..e940b072ea0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ package org.apache.hadoop.hbase.util; import java.lang.ref.Reference; import java.util.concurrent.locks.ReentrantReadWriteLock; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -57,8 +55,8 @@ public class IdReadWriteLock { /** * Constructor of IdReadWriteLock * @param referenceType type of the reference used in lock pool, {@link ReferenceType#WEAK} by - * default. Use {@link ReferenceType#SOFT} if the key set is limited and the locks will - * be reused with a high frequency + * default. Use {@link ReferenceType#SOFT} if the key set is limited and the + * locks will be reused with a high frequency */ public IdReadWriteLock(ReferenceType referenceType) { this.refType = referenceType; @@ -83,7 +81,8 @@ public class IdReadWriteLock { } public static enum ReferenceType { - WEAK, SOFT + WEAK, + SOFT } /** @@ -104,7 +103,7 @@ public class IdReadWriteLock { return lockPool.size(); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DM_GC", justification="Intentional") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DM_GC", justification = "Intentional") private void gc() { System.gc(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java index 1e2ac3ebb97..a9f9443d4bf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +17,21 @@ */ package org.apache.hadoop.hbase.util; -import java.io.InterruptedIOException; import java.io.IOException; +import java.io.InterruptedIOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; - +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.regionserver.HRegionServer; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** @@ -59,29 +58,25 @@ public class JVMClusterUtil { } /** - * Block until the region server has come online, indicating it is ready - * to be used. + * Block until the region server has come online, indicating it is ready to be used. */ public void waitForServerOnline() { // The server is marked online after the init method completes inside of - // the HRS#run method. HRS#init can fail for whatever region. In those - // cases, we'll jump out of the run without setting online flag. Check + // the HRS#run method. HRS#init can fail for whatever region. In those + // cases, we'll jump out of the run without setting online flag. Check // stopRequested so we don't wait here a flag that will never be flipped. regionServer.waitForServerOnline(); } } /** - * Creates a {@link RegionServerThread}. - * Call 'start' on the returned thread to make it run. - * @param c Configuration to use. - * @param hrsc Class to create. - * @param index Used distinguishing the object returned. - * @throws IOException - * @return Region server added. + * Creates a {@link RegionServerThread}. Call 'start' on the returned thread to make it run. + * @param c Configuration to use. + * @param hrsc Class to create. + * @param index Used distinguishing the object returned. n * @return Region server added. */ public static JVMClusterUtil.RegionServerThread createRegionServerThread(final Configuration c, - final Class hrsc, final int index) throws IOException { + final Class hrsc, final int index) throws IOException { HRegionServer server; try { Constructor ctor = hrsc.getConstructor(Configuration.class); @@ -89,16 +84,14 @@ public class JVMClusterUtil { server = ctor.newInstance(c); } catch (InvocationTargetException ite) { Throwable target = ite.getTargetException(); - throw new RuntimeException("Failed construction of RegionServer: " + - hrsc.toString() + ((target.getCause() != null)? - target.getCause().getMessage(): ""), target); + throw new RuntimeException("Failed construction of RegionServer: " + hrsc.toString() + + ((target.getCause() != null) ? target.getCause().getMessage() : ""), target); } catch (Exception e) { throw new IOException(e); } return new JVMClusterUtil.RegionServerThread(server, index); } - /** * Datastructure to hold Master Thread and Master instance */ @@ -117,24 +110,20 @@ public class JVMClusterUtil { } /** - * Creates a {@link MasterThread}. - * Call 'start' on the returned thread to make it run. - * @param c Configuration to use. - * @param hmc Class to create. - * @param index Used distinguishing the object returned. - * @throws IOException - * @return Master added. + * Creates a {@link MasterThread}. Call 'start' on the returned thread to make it run. + * @param c Configuration to use. + * @param hmc Class to create. + * @param index Used distinguishing the object returned. n * @return Master added. */ public static JVMClusterUtil.MasterThread createMasterThread(final Configuration c, - final Class hmc, final int index) throws IOException { + final Class hmc, final int index) throws IOException { HMaster server; try { server = hmc.getConstructor(Configuration.class).newInstance(c); } catch (InvocationTargetException ite) { Throwable target = ite.getTargetException(); - throw new RuntimeException("Failed construction of Master: " + - hmc.toString() + ((target.getCause() != null)? - target.getCause().getMessage(): ""), target); + throw new RuntimeException("Failed construction of Master: " + hmc.toString() + + ((target.getCause() != null) ? target.getCause().getMessage() : ""), target); } catch (Exception e) { throw new IOException(e); } @@ -142,12 +131,12 @@ public class JVMClusterUtil { // just add the current master host port since we do not know other master addresses up front // in mini cluster tests. c.set(HConstants.MASTER_ADDRS_KEY, - Preconditions.checkNotNull(server.getServerName().getAddress()).toString()); + Preconditions.checkNotNull(server.getServerName().getAddress()).toString()); return new JVMClusterUtil.MasterThread(server, index); } - private static JVMClusterUtil.MasterThread findActiveMaster( - List masters) { + private static JVMClusterUtil.MasterThread + findActiveMaster(List masters) { for (JVMClusterUtil.MasterThread t : masters) { if (t.master.isActiveMaster()) { return t; @@ -158,14 +147,11 @@ public class JVMClusterUtil { } /** - * Start the cluster. Waits until there is a primary master initialized - * and returns its address. - * @param masters - * @param regionservers - * @return Address to use contacting primary master. + * Start the cluster. Waits until there is a primary master initialized and returns its address. + * nn * @return Address to use contacting primary master. */ public static String startup(final List masters, - final List regionservers) throws IOException { + final List regionservers) throws IOException { // Implementation note: This method relies on timed sleeps in a loop. It's not great, and // should probably be re-written to use actual synchronization objects, but it's ok for now @@ -181,28 +167,29 @@ public class JVMClusterUtil { } // Wait for an active master - // having an active master before starting the region threads allows - // then to succeed on their connection to master - final int startTimeout = configuration != null ? Integer.parseInt( - configuration.get("hbase.master.start.timeout.localHBaseCluster", "30000")) : 30000; + // having an active master before starting the region threads allows + // then to succeed on their connection to master + final int startTimeout = configuration != null + ? Integer.parseInt(configuration.get("hbase.master.start.timeout.localHBaseCluster", "30000")) + : 30000; waitForEvent(startTimeout, "active", () -> findActiveMaster(masters) != null); if (regionservers != null) { - for (JVMClusterUtil.RegionServerThread t: regionservers) { + for (JVMClusterUtil.RegionServerThread t : regionservers) { t.start(); } } // Wait for an active master to be initialized (implies being master) - // with this, when we return the cluster is complete - final int initTimeout = configuration != null ? Integer.parseInt( - configuration.get("hbase.master.init.timeout.localHBaseCluster", "200000")) : 200000; + // with this, when we return the cluster is complete + final int initTimeout = configuration != null + ? Integer.parseInt(configuration.get("hbase.master.init.timeout.localHBaseCluster", "200000")) + : 200000; waitForEvent(initTimeout, "initialized", () -> { - JVMClusterUtil.MasterThread t = findActiveMaster(masters); - // master thread should never be null at this point, but let's keep the check anyway - return t != null && t.master.isInitialized(); - } - ); + JVMClusterUtil.MasterThread t = findActiveMaster(masters); + // master thread should never be null at this point, but let's keep the check anyway + return t != null && t.master.isInitialized(); + }); return findActiveMaster(masters).master.getServerName().toString(); } @@ -210,15 +197,15 @@ public class JVMClusterUtil { /** * Utility method to wait some time for an event to occur, and then return control to the caller. * @param millis How long to wait, in milliseconds. - * @param action The action that we are waiting for. Will be used in log message if the event - * does not occur. - * @param check A Supplier that will be checked periodically to produce an updated true/false - * result indicating if the expected event has happened or not. + * @param action The action that we are waiting for. Will be used in log message if the event does + * not occur. + * @param check A Supplier that will be checked periodically to produce an updated true/false + * result indicating if the expected event has happened or not. * @throws InterruptedIOException If we are interrupted while waiting for the event. - * @throws RuntimeException If we reach the specified timeout while waiting for the event. + * @throws RuntimeException If we reach the specified timeout while waiting for the event. */ private static void waitForEvent(long millis, String action, Supplier check) - throws InterruptedIOException { + throws InterruptedIOException { long end = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(millis); while (true) { @@ -235,18 +222,17 @@ public class JVMClusterUtil { try { Thread.sleep(100); } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); + throw (InterruptedIOException) new InterruptedIOException().initCause(e); } } } /** - * @param masters - * @param regionservers + * nn */ public static void shutdown(final List masters, - final List regionservers) { + final List regionservers) { LOG.debug("Shutting down HBase Cluster"); if (masters != null) { // Do backups first. @@ -260,15 +246,15 @@ public class JVMClusterUtil { } catch (IOException e) { LOG.error("Exception occurred while stopping master", e); } - LOG.info("Stopped backup Master {} is stopped: {}", - t.master.hashCode(), t.master.isStopped()); + LOG.info("Stopped backup Master {} is stopped: {}", t.master.hashCode(), + t.master.isStopped()); } else { if (activeMaster != null) { LOG.warn("Found more than 1 active master, hash {}", activeMaster.master.hashCode()); } activeMaster = t; - LOG.debug("Found active master hash={}, stopped={}", - t.master.hashCode(), t.master.isStopped()); + LOG.debug("Found active master hash={}, stopped={}", t.master.hashCode(), + t.master.isStopped()); } } } @@ -294,8 +280,8 @@ public class JVMClusterUtil { try { t.join(maxTime - now); } catch (InterruptedException e) { - LOG.info("Got InterruptedException on shutdown - " + - "not waiting anymore on region server ends", e); + LOG.info("Got InterruptedException on shutdown - " + + "not waiting anymore on region server ends", e); wasInterrupted = true; // someone wants us to speed up. } } @@ -318,8 +304,8 @@ public class JVMClusterUtil { if (!atLeastOneLiveServer) break; for (RegionServerThread t : regionservers) { if (t.isAlive()) { - LOG.warn("RegionServerThreads taking too long to stop, interrupting; thread dump " + - "if > 3 attempts: i=" + i); + LOG.warn("RegionServerThreads taking too long to stop, interrupting; thread dump " + + "if > 3 attempts: i=" + i); if (i > 3) { Threads.printThreadInfo(System.out, "Thread dump " + t.getName()); } @@ -337,20 +323,19 @@ public class JVMClusterUtil { // tests. // this.master.join(): Threads.threadDumpingIsAlive(t.master); - } catch(InterruptedException e) { - LOG.info("Got InterruptedException on shutdown - " + - "not waiting anymore on master ends", e); + } catch (InterruptedException e) { + LOG.info( + "Got InterruptedException on shutdown - " + "not waiting anymore on master ends", e); wasInterrupted = true; } } } } - LOG.info("Shutdown of " + - ((masters != null) ? masters.size() : "0") + " master(s) and " + - ((regionservers != null) ? regionservers.size() : "0") + - " regionserver(s) " + (wasInterrupted ? "interrupted" : "complete")); + LOG.info("Shutdown of " + ((masters != null) ? masters.size() : "0") + " master(s) and " + + ((regionservers != null) ? regionservers.size() : "0") + " regionserver(s) " + + (wasInterrupted ? "interrupted" : "complete")); - if (wasInterrupted){ + if (wasInterrupted) { Thread.currentThread().interrupt(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmPauseMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmPauseMonitor.java index 9c00771ee4f..6d8566ab571 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmPauseMonitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmPauseMonitor.java @@ -23,12 +23,11 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; - +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.metrics.JvmPauseMonitorSource; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.metrics.JvmPauseMonitorSource; -import org.apache.hadoop.conf.Configuration; import org.apache.hbase.thirdparty.com.google.common.base.Joiner; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; @@ -38,16 +37,13 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /** - * Class which sets up a simple thread which runs in a loop sleeping - * for a short interval of time. If the sleep takes significantly longer - * than its target time, it implies that the JVM or host machine has - * paused processing, which may cause other problems. If such a pause is - * detected, the thread logs a message. - * The original JvmPauseMonitor is: - * ${hadoop-common-project}/hadoop-common/src/main/java/org/apache/hadoop/util/ - * JvmPauseMonitor.java - * r1503806 | cmccabe | 2013-07-17 01:48:24 +0800 (Wed, 17 Jul 2013) | 1 line - * HADOOP-9618. thread which detects GC pauses(Todd Lipcon) + * Class which sets up a simple thread which runs in a loop sleeping for a short interval of time. + * If the sleep takes significantly longer than its target time, it implies that the JVM or host + * machine has paused processing, which may cause other problems. If such a pause is detected, the + * thread logs a message. The original JvmPauseMonitor is: + * ${hadoop-common-project}/hadoop-common/src/main/java/org/apache/hadoop/util/ JvmPauseMonitor.java + * r1503806 | cmccabe | 2013-07-17 01:48:24 +0800 (Wed, 17 Jul 2013) | 1 line HADOOP-9618. thread + * which detects GC pauses(Todd Lipcon) */ @InterfaceAudience.Private public class JvmPauseMonitor { @@ -55,17 +51,15 @@ public class JvmPauseMonitor { /** The target sleep time */ private static final long SLEEP_INTERVAL_MS = 500; - + /** log WARN if we detect a pause longer than this threshold */ private final long warnThresholdMs; - public static final String WARN_THRESHOLD_KEY = - "jvm.pause.warn-threshold.ms"; + public static final String WARN_THRESHOLD_KEY = "jvm.pause.warn-threshold.ms"; private static final long WARN_THRESHOLD_DEFAULT = 10000; - + /** log INFO if we detect a pause longer than this threshold */ private final long infoThresholdMs; - public static final String INFO_THRESHOLD_KEY = - "jvm.pause.info-threshold.ms"; + public static final String INFO_THRESHOLD_KEY = "jvm.pause.info-threshold.ms"; private static final long INFO_THRESHOLD_DEFAULT = 1000; private Thread monitorThread; @@ -81,7 +75,7 @@ public class JvmPauseMonitor { this.infoThresholdMs = conf.getLong(INFO_THRESHOLD_KEY, INFO_THRESHOLD_DEFAULT); this.metricsSource = metricsSource; } - + public void start() { Preconditions.checkState(monitorThread == null, "Already started"); monitorThread = new Thread(new Monitor(), "JvmPauseMonitor"); @@ -98,10 +92,10 @@ public class JvmPauseMonitor { Thread.currentThread().interrupt(); } } - + private String formatMessage(long extraSleepTime, List gcDiffs) { String ret = "Detected pause in JVM or host machine (eg GC): " + "pause of approximately " - + extraSleepTime + "ms\n"; + + extraSleepTime + "ms\n"; if (gcDiffs.isEmpty()) { ret += "No GCs detected"; } else { @@ -109,7 +103,7 @@ public class JvmPauseMonitor { } return ret; } - + private Map getGcTimes() { Map map = Maps.newHashMap(); List gcBeans = ManagementFactory.getGarbageCollectorMXBeans(); @@ -160,8 +154,8 @@ public class JvmPauseMonitor { Map gcTimesAfterSleep = getGcTimes(); if (extraSleepTime > infoThresholdMs) { - Set gcBeanNames = Sets.intersection(gcTimesAfterSleep.keySet(), - gcTimesBeforeSleep.keySet()); + Set gcBeanNames = + Sets.intersection(gcTimesAfterSleep.keySet(), gcTimesBeforeSleep.keySet()); List gcDiffs = Lists.newArrayList(); for (String name : gcBeanNames) { GcTimes diff = gcTimesAfterSleep.get(name).subtract(gcTimesBeforeSleep.get(name)); @@ -207,13 +201,11 @@ public class JvmPauseMonitor { } /** - * Simple 'main' to facilitate manual testing of the pause monitor. - * - * This main function just leaks memory into a list. Running this class - * with a 1GB heap will very quickly go into "GC hell" and result in - * log messages about the GC pauses. + * Simple 'main' to facilitate manual testing of the pause monitor. This main function just leaks + * memory into a list. Running this class with a 1GB heap will very quickly go into "GC hell" and + * result in log messages about the GC pauses. */ - public static void main(String []args) throws Exception { + public static void main(String[] args) throws Exception { new JvmPauseMonitor(new Configuration()).start(); List list = Lists.newArrayList(); int i = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java index 65c952e4be7..01932b07c60 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ package org.apache.hadoop.hbase.util; import java.util.HashSet; import java.util.Set; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -47,8 +45,8 @@ public abstract class JvmVersion { * Return the current JVM version information. */ public static String getVersion() { - return System.getProperty("java.vm.vendor", "UNKNOWN_VM_VENDOR") + ' ' + - System.getProperty("java.version", "UNKNOWN_JAVA_VERSION") + '-' + - System.getProperty("java.vm.version", "UNKNOWN_VM_VERSION"); + return System.getProperty("java.vm.vendor", "UNKNOWN_VM_VENDOR") + ' ' + + System.getProperty("java.version", "UNKNOWN_JAVA_VERSION") + '-' + + System.getProperty("java.vm.version", "UNKNOWN_VM_VERSION"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyRange.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyRange.java index 29e7836a748..b579b609f32 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyRange.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyRange.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java index e6075d2754b..0857364fc06 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,8 +21,7 @@ import org.apache.hadoop.hbase.HBaseIOException; import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when the lease was expected to be recovered, - * but the file can't be opened. + * Thrown when the lease was expected to be recovered, but the file can't be opened. */ @InterfaceAudience.Public public class LeaseNotRecoveredException extends HBaseIOException { @@ -36,10 +34,10 @@ public class LeaseNotRecoveredException extends HBaseIOException { } public LeaseNotRecoveredException(String message, Throwable cause) { - super(message, cause); + super(message, cause); } public LeaseNotRecoveredException(Throwable cause) { - super(cause); + super(cause); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java index 9ade12d578c..7d2483c6663 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.util.Map; @@ -35,14 +33,10 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * LossyCounting utility, bounded data structure that maintains approximate high frequency - * elements in data stream. - * - * Bucket size is 1 / error rate. (Error rate is 0.02 by default) - * Lemma If element does not appear in set, then is frequency is less than e * N - * (N is total element counts until now.) - * Based on paper: - * http://www.vldb.org/conf/2002/S10P03.pdf + * LossyCounting utility, bounded data structure that maintains approximate high frequency elements + * in data stream. Bucket size is 1 / error rate. (Error rate is 0.02 by default) Lemma If element + * does not appear in set, then is frequency is less than e * N (N is total element counts until + * now.) Based on paper: http://www.vldb.org/conf/2002/S10P03.pdf */ @InterfaceAudience.Private public class LossyCounting { @@ -88,22 +82,22 @@ public class LossyCounting { } private void addByOne(T key) { - //If entry exists, we update the entry by incrementing its frequency by one. Otherwise, - //we create a new entry starting with currentTerm so that it will not be pruned immediately + // If entry exists, we update the entry by incrementing its frequency by one. Otherwise, + // we create a new entry starting with currentTerm so that it will not be pruned immediately data.put(key, data.getOrDefault(key, currentTerm != 0 ? currentTerm - 1 : 0) + 1); - //update totalDataCount and term + // update totalDataCount and term totalDataCount++; calculateCurrentTerm(); } public void add(T key) { addByOne(key); - if(totalDataCount % bucketSize == 0) { - //sweep the entries at bucket boundaries - //run Sweep + if (totalDataCount % bucketSize == 0) { + // sweep the entries at bucket boundaries + // run Sweep Future future = fut.get(); - if (future != null && !future.isDone()){ + if (future != null && !future.isDone()) { return; } future = executor.submit(new SweepRunnable()); @@ -111,13 +105,12 @@ public class LossyCounting { } } - /** * sweep low frequency data */ public void sweep() { - for(Map.Entry entry : data.entrySet()) { - if(entry.getValue() < currentTerm) { + for (Map.Entry entry : data.entrySet()) { + if (entry.getValue() < currentTerm) { T metric = entry.getKey(); data.remove(metric); if (listener != null) { @@ -134,7 +127,7 @@ public class LossyCounting { this.currentTerm = (int) Math.ceil(1.0 * totalDataCount / (double) bucketSize); } - public long getBucketSize(){ + public long getBucketSize() { return bucketSize; } @@ -146,7 +139,7 @@ public class LossyCounting { return data.containsKey(key); } - public Set getElements(){ + public Set getElements() { return data.keySet(); } @@ -155,7 +148,8 @@ public class LossyCounting { } class SweepRunnable implements Runnable { - @Override public void run() { + @Override + public void run() { if (LOG.isTraceEnabled()) { LOG.trace("Starting sweep of lossyCounting-" + name); } @@ -171,4 +165,3 @@ public class LossyCounting { return fut.get(); } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java index e5081273d47..29cc1063b97 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +20,8 @@ package org.apache.hadoop.hbase.util; import org.apache.yetus.audience.InterfaceAudience; /** - * An environment edge that uses a manually set value. This is useful for testing events that are supposed to - * happen in the same millisecond. + * An environment edge that uses a manually set value. This is useful for testing events that are + * supposed to happen in the same millisecond. */ @InterfaceAudience.Private public class ManualEnvironmentEdge implements EnvironmentEdge { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java index 67a94f21e94..fa2173d42ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.io.IOException; @@ -30,7 +28,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; @@ -38,11 +35,12 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * Utility methods for interacting with the regions. */ @@ -62,18 +60,12 @@ public abstract class ModifyRegionUtils { } public static RegionInfo[] createRegionInfos(TableDescriptor tableDescriptor, - byte[][] splitKeys) { + byte[][] splitKeys) { long regionId = EnvironmentEdgeManager.currentTime(); RegionInfo[] hRegionInfos = null; if (splitKeys == null || splitKeys.length == 0) { - hRegionInfos = new RegionInfo[]{ - RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) - .setStartKey(null) - .setEndKey(null) - .setSplit(false) - .setRegionId(regionId) - .build() - }; + hRegionInfos = new RegionInfo[] { RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) + .setStartKey(null).setEndKey(null).setSplit(false).setRegionId(regionId).build() }; } else { int numRegions = splitKeys.length + 1; hRegionInfos = new RegionInfo[numRegions]; @@ -81,13 +73,8 @@ public abstract class ModifyRegionUtils { byte[] endKey = null; for (int i = 0; i < numRegions; i++) { endKey = (i == splitKeys.length) ? null : splitKeys[i]; - hRegionInfos[i] = - RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) - .setStartKey(startKey) - .setEndKey(endKey) - .setSplit(false) - .setRegionId(regionId) - .build(); + hRegionInfos[i] = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) + .setStartKey(startKey).setEndKey(endKey).setSplit(false).setRegionId(regionId).build(); startKey = endKey; } } @@ -95,23 +82,21 @@ public abstract class ModifyRegionUtils { } /** - * Create new set of regions on the specified file-system. - * NOTE: that you should add the regions to hbase:meta after this operation. - * - * @param conf {@link Configuration} - * @param rootDir Root directory for HBase instance + * Create new set of regions on the specified file-system. NOTE: that you should add the regions + * to hbase:meta after this operation. + * @param conf {@link Configuration} + * @param rootDir Root directory for HBase instance * @param tableDescriptor description of the table - * @param newRegions {@link RegionInfo} that describes the regions to create - * @param task {@link RegionFillTask} custom code to populate region after creation - * @throws IOException + * @param newRegions {@link RegionInfo} that describes the regions to create + * @param task {@link RegionFillTask} custom code to populate region after creation n */ public static List createRegions(final Configuration conf, final Path rootDir, - final TableDescriptor tableDescriptor, final RegionInfo[] newRegions, - final RegionFillTask task) throws IOException { + final TableDescriptor tableDescriptor, final RegionInfo[] newRegions, final RegionFillTask task) + throws IOException { if (newRegions == null) return null; int regionNumber = newRegions.length; ThreadPoolExecutor exec = getRegionOpenAndInitThreadPool(conf, - "RegionOpenAndInit-" + tableDescriptor.getTableName(), regionNumber); + "RegionOpenAndInit-" + tableDescriptor.getTableName(), regionNumber); try { return createRegions(exec, conf, rootDir, tableDescriptor, newRegions, task); } finally { @@ -120,21 +105,18 @@ public abstract class ModifyRegionUtils { } /** - * Create new set of regions on the specified file-system. - * NOTE: that you should add the regions to hbase:meta after this operation. - * - * @param exec Thread Pool Executor - * @param conf {@link Configuration} - * @param rootDir Root directory for HBase instance + * Create new set of regions on the specified file-system. NOTE: that you should add the regions + * to hbase:meta after this operation. + * @param exec Thread Pool Executor + * @param conf {@link Configuration} + * @param rootDir Root directory for HBase instance * @param tableDescriptor description of the table - * @param newRegions {@link RegionInfo} that describes the regions to create - * @param task {@link RegionFillTask} custom code to populate region after creation - * @throws IOException + * @param newRegions {@link RegionInfo} that describes the regions to create + * @param task {@link RegionFillTask} custom code to populate region after creation n */ public static List createRegions(final ThreadPoolExecutor exec, - final Configuration conf, final Path rootDir, - final TableDescriptor tableDescriptor, final RegionInfo[] newRegions, - final RegionFillTask task) throws IOException { + final Configuration conf, final Path rootDir, final TableDescriptor tableDescriptor, + final RegionInfo[] newRegions, final RegionFillTask task) throws IOException { if (newRegions == null) return null; int regionNumber = newRegions.length; CompletionService completionService = new ExecutorCompletionService<>(exec); @@ -163,16 +145,15 @@ public abstract class ModifyRegionUtils { /** * Create new set of regions on the specified file-system. - * @param conf {@link Configuration} - * @param rootDir Root directory for HBase instance + * @param conf {@link Configuration} + * @param rootDir Root directory for HBase instance * @param tableDescriptor description of the table - * @param newRegion {@link RegionInfo} that describes the region to create - * @param task {@link RegionFillTask} custom code to populate region after creation - * @throws IOException + * @param newRegion {@link RegionInfo} that describes the region to create + * @param task {@link RegionFillTask} custom code to populate region after creation n */ public static RegionInfo createRegion(final Configuration conf, final Path rootDir, - final TableDescriptor tableDescriptor, final RegionInfo newRegion, - final RegionFillTask task) throws IOException { + final TableDescriptor tableDescriptor, final RegionInfo newRegion, final RegionFillTask task) + throws IOException { // 1. Create HRegion // The WAL subsystem will use the default rootDir rather than the passed in rootDir // unless I pass along via the conf. @@ -193,16 +174,14 @@ public abstract class ModifyRegionUtils { /** * Execute the task on the specified set of regions. - * - * @param exec Thread Pool Executor + * @param exec Thread Pool Executor * @param regions {@link RegionInfo} that describes the regions to edit - * @param task {@link RegionFillTask} custom code to edit the region - * @throws IOException + * @param task {@link RegionFillTask} custom code to edit the region n */ public static void editRegions(final ThreadPoolExecutor exec, - final Collection regions, final RegionEditTask task) throws IOException { + final Collection regions, final RegionEditTask task) throws IOException { final ExecutorCompletionService completionService = new ExecutorCompletionService<>(exec); - for (final RegionInfo hri: regions) { + for (final RegionInfo hri : regions) { completionService.submit(new Callable() { @Override public Void call() throws IOException { @@ -213,7 +192,7 @@ public abstract class ModifyRegionUtils { } try { - for (RegionInfo hri: regions) { + for (RegionInfo hri : regions) { completionService.take().get(); } } catch (InterruptedException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithAck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithAck.java index 4952cafccbc..bf1fef80998 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithAck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithAck.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,9 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.Callable; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Admin; @@ -32,10 +33,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.List; -import java.util.concurrent.Callable; - /** * Move Regions and make sure that they are up on the target server.If a region movement fails we * exit as failure @@ -66,10 +63,10 @@ class MoveWithAck implements Callable { public Boolean call() throws IOException, InterruptedException { boolean moved = false; int count = 0; - int retries = admin.getConfiguration() - .getInt(RegionMover.MOVE_RETRIES_MAX_KEY, RegionMover.DEFAULT_MOVE_RETRIES_MAX); - int maxWaitInSeconds = admin.getConfiguration() - .getInt(RegionMover.MOVE_WAIT_MAX_KEY, RegionMover.DEFAULT_MOVE_WAIT_MAX); + int retries = admin.getConfiguration().getInt(RegionMover.MOVE_RETRIES_MAX_KEY, + RegionMover.DEFAULT_MOVE_RETRIES_MAX); + int maxWaitInSeconds = admin.getConfiguration().getInt(RegionMover.MOVE_WAIT_MAX_KEY, + RegionMover.DEFAULT_MOVE_WAIT_MAX); long startTime = EnvironmentEdgeManager.currentTime(); boolean sameServer = true; // Assert we can scan the region in its current location @@ -114,8 +111,7 @@ class MoveWithAck implements Callable { */ private void isSuccessfulScan(RegionInfo region) throws IOException { Scan scan = new Scan().withStartRow(region.getStartKey()).setRaw(true).setOneRowLimit() - .setMaxResultSize(1L).setCaching(1).setFilter(new FirstKeyOnlyFilter()) - .setCacheBlocks(false); + .setMaxResultSize(1L).setCaching(1).setFilter(new FirstKeyOnlyFilter()).setCacheBlocks(false); try (Table table = conn.getTable(region.getTable()); ResultScanner scanner = table.getScanner(scan)) { scanner.next(); @@ -129,8 +125,7 @@ class MoveWithAck implements Callable { * Returns true if passed region is still on serverName when we look at hbase:meta. * @return true if region is hosted on serverName otherwise false */ - private boolean isSameServer(RegionInfo region, ServerName serverName) - throws IOException { + private boolean isSameServer(RegionInfo region, ServerName serverName) throws IOException { ServerName serverForRegion = getServerNameForRegion(region, admin, conn); return serverForRegion != null && serverForRegion.equals(serverName); } @@ -141,14 +136,14 @@ class MoveWithAck implements Callable { * @return regionServer hosting the given region */ static ServerName getServerNameForRegion(RegionInfo region, Admin admin, Connection conn) - throws IOException { + throws IOException { if (!admin.isTableEnabled(region.getTable())) { return null; } HRegionLocation loc; try { - loc = conn.getRegionLocator(region.getTable()) - .getRegionLocation(region.getStartKey(), region.getReplicaId(), true); + loc = conn.getRegionLocator(region.getTable()).getRegionLocation(region.getStartKey(), + region.getReplicaId(), true); } catch (IOException e) { if (e.getMessage() != null && e.getMessage().startsWith("Unable to find region for")) { return null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithoutAck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithoutAck.java index 0ddb99ac418..b4abd0de73d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithoutAck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithoutAck.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,9 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; +import java.util.List; +import java.util.concurrent.Callable; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionInfo; @@ -26,12 +26,9 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.List; -import java.util.concurrent.Callable; - /** - * Move Regions without Acknowledging.Usefule in case of RS shutdown as we might want to shut the - * RS down anyways and not abort on a stuck region. Improves movement performance + * Move Regions without Acknowledging.Usefule in case of RS shutdown as we might want to shut the RS + * down anyways and not abort on a stuck region. Improves movement performance */ @InterfaceAudience.Private class MoveWithoutAck implements Callable { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java index 58bd4ddc683..a7dd67d6ce1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.io.IOException; @@ -26,41 +24,40 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.coprocessor.Batch; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * Provides ability to create multiple Connection instances and allows to process a batch of - * actions using CHTable.doBatchWithCallback() + * Provides ability to create multiple Connection instances and allows to process a batch of actions + * using CHTable.doBatchWithCallback() */ @InterfaceAudience.Private public class MultiHConnection { private static final Logger LOG = LoggerFactory.getLogger(MultiHConnection.class); private Connection[] connections; - private final Object connectionsLock = new Object(); + private final Object connectionsLock = new Object(); private final int noOfConnections; private ExecutorService batchPool; /** * Create multiple Connection instances and initialize a thread pool executor - * @param conf configuration + * @param conf configuration * @param noOfConnections total no of Connections to create * @throws IOException if IO failure occurs */ - public MultiHConnection(Configuration conf, int noOfConnections) - throws IOException { + public MultiHConnection(Configuration conf, int noOfConnections) throws IOException { this.noOfConnections = noOfConnections; synchronized (this.connectionsLock) { connections = new Connection[noOfConnections]; @@ -107,15 +104,15 @@ public class MultiHConnection { /** * Randomly pick a connection and process the batch of actions for a given table - * @param actions the actions + * @param actions the actions * @param tableName table name - * @param results the results array - * @param callback to run when results are in + * @param results the results array + * @param callback to run when results are in * @throws IOException If IO failure occurs */ @SuppressWarnings("deprecation") public void processBatchCallback(List actions, TableName tableName, - Object[] results, Batch.Callback callback) throws IOException { + Object[] results, Batch.Callback callback) throws IOException { // Currently used by RegionStateStore ClusterConnection conn = (ClusterConnection) connections[ThreadLocalRandom.current().nextInt(noOfConnections)]; @@ -134,9 +131,8 @@ public class MultiHConnection { } long keepAliveTime = conf.getLong("hbase.multihconnection.threads.keepalivetime", 60); LinkedBlockingQueue workQueue = - new LinkedBlockingQueue<>(maxThreads - * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, - HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); + new LinkedBlockingQueue<>(maxThreads * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, + HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); ThreadPoolExecutor tpe = new ThreadPoolExecutor(maxThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, new ThreadFactoryBuilder().setNameFormat("MultiHConnection" + "-shared-pool-%d") @@ -144,5 +140,5 @@ public class MultiHConnection { tpe.allowCoreThreadTimeOut(true); this.batchPool = tpe; } - + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java index fa4c18442ac..f3e731f5333 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,18 +20,15 @@ package org.apache.hadoop.hbase.util; import java.util.Arrays; import java.util.Deque; import java.util.LinkedList; - import org.apache.yetus.audience.InterfaceAudience; /** - * Computes the optimal (minimal cost) assignment of jobs to workers (or other - * analogous) concepts given a cost matrix of each pair of job and worker, using - * the algorithm by James Munkres in "Algorithms for the Assignment and - * Transportation Problems", with additional optimizations as described by Jin - * Kue Wong in "A New Implementation of an Algorithm for the Optimal Assignment - * Problem: An Improved Version of Munkres' Algorithm". The algorithm runs in - * O(n^3) time and need O(n^2) auxiliary space where n is the number of jobs or - * workers, whichever is greater. + * Computes the optimal (minimal cost) assignment of jobs to workers (or other analogous) concepts + * given a cost matrix of each pair of job and worker, using the algorithm by James Munkres in + * "Algorithms for the Assignment and Transportation Problems", with additional optimizations as + * described by Jin Kue Wong in "A New Implementation of an Algorithm for the Optimal Assignment + * Problem: An Improved Version of Munkres' Algorithm". The algorithm runs in O(n^3) time and need + * O(n^2) auxiliary space where n is the number of jobs or workers, whichever is greater. */ @InterfaceAudience.Private public class MunkresAssignment { @@ -88,11 +84,10 @@ public class MunkresAssignment { private float[] colAdjust; /** - * Construct a new problem instance with the specified cost matrix. The cost - * matrix must be rectangular, though not necessarily square. If one dimension - * is greater than the other, some elements in the greater dimension will not - * be assigned. The input cost matrix will not be modified. - * @param costMatrix + * Construct a new problem instance with the specified cost matrix. The cost matrix must be + * rectangular, though not necessarily square. If one dimension is greater than the other, some + * elements in the greater dimension will not be assigned. The input cost matrix will not be + * modified. n */ public MunkresAssignment(float[][] costMatrix) { // The algorithm assumes that the number of columns is at least as great as @@ -146,11 +141,10 @@ public class MunkresAssignment { } /** - * Get the optimal assignments. The returned array will have the same number - * of elements as the number of elements as the number of rows in the input - * cost matrix. Each element will indicate which column should be assigned to - * that row or -1 if no column should be assigned, i.e. if result[i] = j then - * row i should be assigned to column j. Subsequent invocations of this method + * Get the optimal assignments. The returned array will have the same number of elements as the + * number of elements as the number of rows in the input cost matrix. Each element will indicate + * which column should be assigned to that row or -1 if no column should be assigned, i.e. if + * result[i] = j then row i should be assigned to column j. Subsequent invocations of this method * will simply return the same object without additional computation. * @return an array with the optimal assignments */ @@ -174,8 +168,7 @@ public class MunkresAssignment { // Extract the assignments from the mask matrix. if (transposed) { assignments = new int[cols]; - outer: - for (int c = 0; c < cols; c++) { + outer: for (int c = 0; c < cols; c++) { for (int r = 0; r < rows; r++) { if (mask[r][c] == STAR) { assignments[c] = r; @@ -187,8 +180,7 @@ public class MunkresAssignment { } } else { assignments = new int[rows]; - outer: - for (int r = 0; r < rows; r++) { + outer: for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { if (mask[r][c] == STAR) { assignments[r] = c; @@ -215,9 +207,8 @@ public class MunkresAssignment { } /** - * Corresponds to the "preliminaries" step of the original algorithm. - * Guarantees that the matrix is an equivalent non-negative matrix with at - * least one zero in each row. + * Corresponds to the "preliminaries" step of the original algorithm. Guarantees that the matrix + * is an equivalent non-negative matrix with at least one zero in each row. */ private void preliminaries() { for (int r = 0; r < rows; r++) { @@ -250,8 +241,8 @@ public class MunkresAssignment { } /** - * Test whether the algorithm is done, i.e. we have the optimal assignment. - * This occurs when there is exactly one starred zero in each row. + * Test whether the algorithm is done, i.e. we have the optimal assignment. This occurs when there + * is exactly one starred zero in each row. * @return true if the algorithm is done */ private boolean testIsDone() { @@ -431,8 +422,8 @@ public class MunkresAssignment { } /** - * Find a zero cost assignment which is not covered. If there are no zero cost - * assignments which are uncovered, then null will be returned. + * Find a zero cost assignment which is not covered. If there are no zero cost assignments which + * are uncovered, then null will be returned. * @return pair of row and column indices of an uncovered zero or null */ private Pair findUncoveredZero() { @@ -445,8 +436,8 @@ public class MunkresAssignment { } /** - * A specified row has become covered, and a specified column has become - * uncovered. The least value per row may need to be updated. + * A specified row has become covered, and a specified column has become uncovered. The least + * value per row may need to be updated. * @param row the index of the row which was just covered * @param col the index of the column which was just uncovered */ @@ -467,8 +458,8 @@ public class MunkresAssignment { } /** - * Find a starred zero in a specified row. If there are no starred zeroes in - * the specified row, then null will be returned. + * Find a starred zero in a specified row. If there are no starred zeroes in the specified row, + * then null will be returned. * @param r the index of the row to be searched * @return pair of row and column indices of starred zero or null */ @@ -482,8 +473,8 @@ public class MunkresAssignment { } /** - * Find a starred zero in the specified column. If there are no starred zeroes - * in the specified row, then null will be returned. + * Find a starred zero in the specified column. If there are no starred zeroes in the specified + * row, then null will be returned. * @param c the index of the column to be searched * @return pair of row and column indices of starred zero or null */ @@ -497,8 +488,8 @@ public class MunkresAssignment { } /** - * Find a primed zero in the specified row. If there are no primed zeroes in - * the specified row, then null will be returned. + * Find a primed zero in the specified row. If there are no primed zeroes in the specified row, + * then null will be returned. * @param r the index of the row to be searched * @return pair of row and column indices of primed zero or null */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java index 3e247f3061e..4ff401633ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hbase.util; +import java.util.concurrent.ThreadFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; import org.apache.hbase.thirdparty.io.netty.channel.ServerChannel; @@ -27,9 +31,6 @@ import org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoopGroup; import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioServerSocketChannel; import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel; import org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultThreadFactory; -import java.util.concurrent.ThreadFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; /** * Event loop group related config. @@ -53,7 +54,7 @@ public class NettyEventLoopGroupConfig { boolean useEpoll = useEpoll(conf); int workerCount = conf.getInt("hbase.netty.worker.count", 0); ThreadFactory eventLoopThreadFactory = - new DefaultThreadFactory(threadPoolName, true, Thread.MAX_PRIORITY); + new DefaultThreadFactory(threadPoolName, true, Thread.MAX_PRIORITY); if (useEpoll) { group = new EpollEventLoopGroup(workerCount, eventLoopThreadFactory); serverChannelClass = EpollServerSocketChannel.class; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index ad184e9ac44..6662b6598fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.io.BufferedInputStream; @@ -168,9 +166,9 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { } /** - * @param hostname Hostname to unload regions from or load regions to. Can be either hostname - * or hostname:port. - * @param conf Configuration object + * @param hostname Hostname to unload regions from or load regions to. Can be either hostname or + * hostname:port. + * @param conf Configuration object */ public RegionMoverBuilder(String hostname, Configuration conf) { String[] splitHostname = hostname.toLowerCase().split(":"); @@ -186,9 +184,8 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { } /** - * Path of file where regions will be written to during unloading/read from during loading - * @param filename - * @return RegionMoverBuilder object + * Path of file where regions will be written to during unloading/read from during loading n + * * @return RegionMoverBuilder object */ public RegionMoverBuilder filename(String filename) { this.filename = filename; @@ -233,8 +230,7 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { * effort mode,each region movement is tried once.This can be used during graceful shutdown as * even if we have a stuck region,upon shutdown it'll be reassigned anyway. *

      - * @param ack - * @return RegionMoverBuilder object + * n * @return RegionMoverBuilder object */ public RegionMoverBuilder ack(boolean ack) { this.ack = ack; @@ -254,9 +250,7 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { } /** - * Set specific rackManager implementation. - * This setter method is for testing purpose only. - * + * Set specific rackManager implementation. This setter method is for testing purpose only. * @param rackManager rackManager impl * @return RegionMoverBuilder object */ @@ -330,13 +324,11 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { return regionsToMove.stream().filter(RegionInfo::isMetaRegion).findFirst(); } - private void loadRegions(List regionsToMove) - throws Exception { + private void loadRegions(List regionsToMove) throws Exception { ServerName server = getTargetServer(); List movedRegions = Collections.synchronizedList(new ArrayList<>()); - LOG.info( - "Moving " + regionsToMove.size() + " regions to " + server + " using " + this.maxthreads - + " threads.Ack mode:" + this.ack); + LOG.info("Moving " + regionsToMove.size() + " regions to " + server + " using " + + this.maxthreads + " threads.Ack mode:" + this.ack); final ExecutorService moveRegionsPool = Executors.newFixedThreadPool(this.maxthreads); List> taskList = new ArrayList<>(); @@ -345,13 +337,13 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { RegionInfo region = regionsToMove.get(counter); ServerName currentServer = MoveWithAck.getServerNameForRegion(region, admin, conn); if (currentServer == null) { - LOG.warn( - "Could not get server for Region:" + region.getRegionNameAsString() + " moving on"); + LOG + .warn("Could not get server for Region:" + region.getRegionNameAsString() + " moving on"); counter++; continue; } else if (server.equals(currentServer)) { LOG.info( - "Region " + region.getRegionNameAsString() + " is already on target server=" + server); + "Region " + region.getRegionNameAsString() + " is already on target server=" + server); counter++; continue; } @@ -368,8 +360,8 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { } moveRegionsPool.shutdown(); - long timeoutInSeconds = regionsToMove.size() * admin.getConfiguration() - .getLong(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); + long timeoutInSeconds = regionsToMove.size() + * admin.getConfiguration().getLong(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); waitMoveTasksToFinish(moveRegionsPool, taskList, timeoutInSeconds); } @@ -379,7 +371,6 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { * server,hence it is best effort.We do not unload regions to hostnames given in * {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions * to hostnames provided in {@link #designatedFile} - * * @return true if unloading succeeded, false otherwise */ public boolean unload() throws InterruptedException, ExecutionException, TimeoutException { @@ -391,19 +382,18 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { * noAck mode we do not make sure that region is successfully online on the target region * server,hence it is best effort.We do not unload regions to hostnames given in * {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions - * to hostnames provided in {@link #designatedFile}. - * While unloading regions, destination RegionServers are selected from different rack i.e - * regions should not move to any RegionServers that belong to same rack as source RegionServer. - * + * to hostnames provided in {@link #designatedFile}. While unloading regions, destination + * RegionServers are selected from different rack i.e regions should not move to any RegionServers + * that belong to same rack as source RegionServer. * @return true if unloading succeeded, false otherwise */ public boolean unloadFromRack() - throws InterruptedException, ExecutionException, TimeoutException { + throws InterruptedException, ExecutionException, TimeoutException { return unloadRegions(true); } - private boolean unloadRegions(boolean unloadFromRack) throws InterruptedException, - ExecutionException, TimeoutException { + private boolean unloadRegions(boolean unloadFromRack) + throws InterruptedException, ExecutionException, TimeoutException { deleteFile(this.filename); ExecutorService unloadPool = Executors.newFixedThreadPool(1); Future unloadTask = unloadPool.submit(() -> { @@ -416,7 +406,7 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { ServerName server = stripServer(regionServers, hostname, port); if (server == null) { LOG.info("Could not find server '{}:{}' in the set of region servers. giving up.", - hostname, port); + hostname, port); LOG.debug("List of region servers: {}", regionServers); return false; } @@ -447,8 +437,8 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { Set decommissionedRS = new HashSet<>(admin.listDecommissionedRegionServers()); if (CollectionUtils.isNotEmpty(decommissionedRS)) { regionServers.removeIf(decommissionedRS::contains); - LOG.debug("Excluded RegionServers from unloading regions to because they " + - "are marked as decommissioned. Servers: {}", decommissionedRS); + LOG.debug("Excluded RegionServers from unloading regions to because they " + + "are marked as decommissioned. Servers: {}", decommissionedRS); } stripMaster(regionServers); @@ -471,7 +461,7 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { } private void unloadRegions(ServerName server, List regionServers, - List movedRegions) throws Exception { + List movedRegions) throws Exception { while (true) { List regionsToMove = admin.getRegions(server); regionsToMove.removeAll(movedRegions); @@ -500,32 +490,29 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { int serverIndex = 0; for (RegionInfo regionToMove : regionsToMove) { if (ack) { - Future task = moveRegionsPool.submit( - new MoveWithAck(conn, regionToMove, server, regionServers.get(serverIndex), - movedRegions)); + Future task = moveRegionsPool.submit(new MoveWithAck(conn, regionToMove, server, + regionServers.get(serverIndex), movedRegions)); taskList.add(task); } else { - Future task = moveRegionsPool.submit( - new MoveWithoutAck(admin, regionToMove, server, regionServers.get(serverIndex), - movedRegions)); + Future task = moveRegionsPool.submit(new MoveWithoutAck(admin, regionToMove, + server, regionServers.get(serverIndex), movedRegions)); taskList.add(task); } serverIndex = (serverIndex + 1) % regionServers.size(); } moveRegionsPool.shutdown(); - long timeoutInSeconds = regionsToMove.size() * admin.getConfiguration() - .getLong(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); + long timeoutInSeconds = regionsToMove.size() + * admin.getConfiguration().getLong(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); waitMoveTasksToFinish(moveRegionsPool, taskList, timeoutInSeconds); } private boolean waitTaskToFinish(ExecutorService pool, Future task, String operation) - throws TimeoutException, InterruptedException, ExecutionException { + throws TimeoutException, InterruptedException, ExecutionException { pool.shutdown(); try { if (!pool.awaitTermination((long) this.timeout, TimeUnit.SECONDS)) { - LOG.warn( - "Timed out before finishing the " + operation + " operation. Timeout: " + this.timeout - + "sec"); + LOG.warn("Timed out before finishing the " + operation + " operation. Timeout: " + + this.timeout + "sec"); pool.shutdownNow(); } } catch (InterruptedException e) { @@ -544,7 +531,7 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { } private void waitMoveTasksToFinish(ExecutorService moveRegionsPool, - List> taskList, long timeoutInSeconds) throws Exception { + List> taskList, long timeoutInSeconds) throws Exception { try { if (!moveRegionsPool.awaitTermination(timeoutInSeconds, TimeUnit.SECONDS)) { moveRegionsPool.shutdownNow(); @@ -573,7 +560,7 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { } } catch (CancellationException e) { LOG.error("Thread for moving region cancelled. Timeout for cancellation:" + timeoutInSeconds - + "secs", e); + + "secs", e); throw e; } } @@ -584,9 +571,11 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { if (e.getCause() instanceof UnknownRegionException) { // region does not exist anymore ignoreFailure = true; - } else if (e.getCause() instanceof DoNotRetryRegionException - && e.getCause().getMessage() != null && e.getCause().getMessage() - .contains(AssignmentManager.UNEXPECTED_STATE_REGION + "state=SPLIT,")) { + } else if ( + e.getCause() instanceof DoNotRetryRegionException && e.getCause().getMessage() != null + && e.getCause().getMessage() + .contains(AssignmentManager.UNEXPECTED_STATE_REGION + "state=SPLIT,") + ) { // region is recently split ignoreFailure = true; } @@ -596,7 +585,7 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { private ServerName getTargetServer() throws Exception { ServerName server = null; int maxWaitInSeconds = - admin.getConfiguration().getInt(SERVERSTART_WAIT_MAX_KEY, DEFAULT_SERVERSTART_WAIT_MAX); + admin.getConfiguration().getInt(SERVERSTART_WAIT_MAX_KEY, DEFAULT_SERVERSTART_WAIT_MAX); long maxWait = EnvironmentEdgeManager.currentTime() + maxWaitInSeconds * 1000; while (EnvironmentEdgeManager.currentTime() < maxWait) { try { @@ -627,8 +616,8 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { if (!f.exists()) { return regions; } - try (DataInputStream dis = new DataInputStream( - new BufferedInputStream(new FileInputStream(f)))) { + try ( + DataInputStream dis = new DataInputStream(new BufferedInputStream(new FileInputStream(f)))) { int numRegions = dis.readInt(); int index = 0; while (index < numRegions) { @@ -647,16 +636,15 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { * lines */ private void writeFile(String filename, List movedRegions) throws IOException { - try (DataOutputStream dos = new DataOutputStream( - new BufferedOutputStream(new FileOutputStream(filename)))) { + try (DataOutputStream dos = + new DataOutputStream(new BufferedOutputStream(new FileOutputStream(filename)))) { dos.writeInt(movedRegions.size()); for (RegionInfo region : movedRegions) { Bytes.writeByteArray(dos, RegionInfo.toByteArray(region)); } } catch (IOException e) { - LOG.error( - "ERROR: Was Not able to write regions moved to output file but moved " + movedRegions - .size() + " regions", e); + LOG.error("ERROR: Was Not able to write regions moved to output file but moved " + + movedRegions.size() + " regions", e); throw e; } } @@ -688,18 +676,17 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { } /** - * Designates or excludes the servername whose hostname and port portion matches the list given - * in the file. - * Example:
      + * Designates or excludes the servername whose hostname and port portion matches the list given in + * the file. Example:
      * If you want to designated RSs, suppose designatedFile has RS1, regionServers has RS1, RS2 and - * RS3. When we call includeExcludeRegionServers(designatedFile, regionServers, true), RS2 and - * RS3 are removed from regionServers list so that regions can move to only RS1. - * If you want to exclude RSs, suppose excludeFile has RS1, regionServers has RS1, RS2 and RS3. - * When we call includeExcludeRegionServers(excludeFile, servers, false), RS1 is removed from - * regionServers list so that regions can move to only RS2 and RS3. + * RS3. When we call includeExcludeRegionServers(designatedFile, regionServers, true), RS2 and RS3 + * are removed from regionServers list so that regions can move to only RS1. If you want to + * exclude RSs, suppose excludeFile has RS1, regionServers has RS1, RS2 and RS3. When we call + * includeExcludeRegionServers(excludeFile, servers, false), RS1 is removed from regionServers + * list so that regions can move to only RS2 and RS3. */ private void includeExcludeRegionServers(String fileName, List regionServers, - boolean isInclude) throws IOException { + boolean isInclude) throws IOException { if (fileName != null) { List servers = readServersFromFile(fileName); if (servers.isEmpty()) { @@ -709,8 +696,8 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { Iterator i = regionServers.iterator(); while (i.hasNext()) { String rs = i.next().getServerName(); - String rsPort = rs.split(ServerName.SERVERNAME_SEPARATOR)[0].toLowerCase() + ":" + rs - .split(ServerName.SERVERNAME_SEPARATOR)[1]; + String rsPort = rs.split(ServerName.SERVERNAME_SEPARATOR)[0].toLowerCase() + ":" + + rs.split(ServerName.SERVERNAME_SEPARATOR)[1]; if (isInclude != servers.contains(rsPort)) { i.remove(); } @@ -734,8 +721,10 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { private ServerName stripServer(List regionServers, String hostname, int port) { for (Iterator iter = regionServers.iterator(); iter.hasNext();) { ServerName server = iter.next(); - if (server.getAddress().getHostName().equalsIgnoreCase(hostname) && - server.getAddress().getPort() == port) { + if ( + server.getAddress().getHostName().equalsIgnoreCase(hostname) + && server.getAddress().getPort() == port + ) { iter.remove(); return server; } @@ -748,22 +737,22 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { this.addRequiredOptWithArg("r", "regionserverhost", "region server |"); this.addRequiredOptWithArg("o", "operation", "Expected: load/unload/unload_from_rack"); this.addOptWithArg("m", "maxthreads", - "Define the maximum number of threads to use to unload and reload the regions"); + "Define the maximum number of threads to use to unload and reload the regions"); this.addOptWithArg("x", "excludefile", - "File with per line to exclude as unload targets; default excludes only " - + "target host; useful for rack decommisioning."); - this.addOptWithArg("d","designatedfile","File with per line as unload targets;" - + "default is all online hosts"); + "File with per line to exclude as unload targets; default excludes only " + + "target host; useful for rack decommisioning."); + this.addOptWithArg("d", "designatedfile", + "File with per line as unload targets;" + "default is all online hosts"); this.addOptWithArg("f", "filename", - "File to save regions list into unloading, or read from loading; " - + "default /tmp/"); + "File to save regions list into unloading, or read from loading; " + + "default /tmp/"); this.addOptNoArg("n", "noack", - "Turn on No-Ack mode(default: false) which won't check if region is online on target " - + "RegionServer, hence best effort. This is more performant in unloading and loading " - + "but might lead to region being unavailable for some time till master reassigns it " - + "in case the move failed"); + "Turn on No-Ack mode(default: false) which won't check if region is online on target " + + "RegionServer, hence best effort. This is more performant in unloading and loading " + + "but might lead to region being unavailable for some time till master reassigns it " + + "in case the move failed"); this.addOptWithArg("t", "timeout", "timeout in seconds after which the tool will exit " - + "irrespective of whether it finished or not;default Integer.MAX_VALUE"); + + "irrespective of whether it finished or not;default Integer.MAX_VALUE"); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java index 0f75b0e9bd5..67117b260e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,30 +24,23 @@ import java.util.List; import java.util.Map.Entry; import java.util.TreeMap; import java.util.TreeSet; - +import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.Multimap; import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap; /** - * This is a generic region split calculator. It requires Ranges that provide - * start, end, and a comparator. It works in two phases -- the first adds ranges - * and rejects backwards ranges. Then one calls calcRegions to generate the - * multimap that has a start split key as a key and possibly multiple Ranges as - * members. - * - * To traverse, one normally would get the split set, and iterate through the - * calcRegions. Normal regions would have only one entry, holes would have zero, - * and any overlaps would have multiple entries. - * - * The interface is a bit cumbersome currently but is exposed this way so that - * clients can choose how to iterate through the region splits. - * + * This is a generic region split calculator. It requires Ranges that provide start, end, and a + * comparator. It works in two phases -- the first adds ranges and rejects backwards ranges. Then + * one calls calcRegions to generate the multimap that has a start split key as a key and possibly + * multiple Ranges as members. To traverse, one normally would get the split set, and iterate + * through the calcRegions. Normal regions would have only one entry, holes would have zero, and any + * overlaps would have multiple entries. The interface is a bit cumbersome currently but is exposed + * this way so that clients can choose how to iterate through the region splits. * @param */ @InterfaceAudience.Private @@ -57,17 +49,14 @@ public class RegionSplitCalculator { private final Comparator rangeCmp; /** - * This contains a sorted set of all the possible split points - * - * Invariant: once populated this has 0 entries if empty or at most n+1 values - * where n == number of added ranges. + * This contains a sorted set of all the possible split points Invariant: once populated this has + * 0 entries if empty or at most n+1 values where n == number of added ranges. */ private final TreeSet splits = new TreeSet<>(BYTES_COMPARATOR); /** - * This is a map from start key to regions with the same start key. - * - * Invariant: This always have n values in total + * This is a map from start key to regions with the same start key. Invariant: This always have n + * values in total */ private final Multimap starts = ArrayListMultimap.create(); @@ -83,19 +72,15 @@ public class RegionSplitCalculator { public final static Comparator BYTES_COMPARATOR = new ByteArrayComparator() { @Override public int compare(byte[] l, byte[] r) { - if (l == null && r == null) - return 0; - if (l == null) - return 1; - if (r == null) - return -1; + if (l == null && r == null) return 0; + if (l == null) return 1; + if (r == null) return -1; return super.compare(l, r); } }; /** * SPECIAL CASE wrapper for empty end key - * * @return ENDKEY if end key is empty, else normal endkey. */ private static byte[] specialEndKey(R range) { @@ -108,7 +93,6 @@ public class RegionSplitCalculator { /** * Adds an edge to the split calculator - * * @return true if is included, false if backwards/invalid */ public boolean add(R range) { @@ -118,8 +102,8 @@ public class RegionSplitCalculator { // No need to use Arrays.equals because ENDKEY is null if (end != ENDKEY && Bytes.compareTo(start, end) > 0) { // don't allow backwards edges - LOG.debug("attempted to add backwards edge: " + Bytes.toString(start) - + " " + Bytes.toString(end)); + LOG.debug( + "attempted to add backwards edge: " + Bytes.toString(start) + " " + Bytes.toString(end)); return false; } @@ -130,16 +114,13 @@ public class RegionSplitCalculator { } /** - * Generates a coverage multimap from split key to Regions that start with the - * split key. - * + * Generates a coverage multimap from split key to Regions that start with the split key. * @return coverage multimap */ public Multimap calcCoverage() { // This needs to be sorted to force the use of the comparator on the values, // otherwise byte array comparison isn't used - Multimap regions = TreeMultimap.create(BYTES_COMPARATOR, - rangeCmp); + Multimap regions = TreeMultimap.create(BYTES_COMPARATOR, rangeCmp); // march through all splits from the start points for (Entry> start : starts.asMap().entrySet()) { @@ -147,8 +128,7 @@ public class RegionSplitCalculator { for (R r : start.getValue()) { regions.put(key, r); - for (byte[] coveredSplit : splits.subSet(r.getStartKey(), - specialEndKey(r))) { + for (byte[] coveredSplit : splits.subSet(r.getStartKey(), specialEndKey(r))) { regions.put(coveredSplit, r); } } @@ -165,36 +145,34 @@ public class RegionSplitCalculator { } /** - * Find specified number of top ranges in a big overlap group. - * It could return less if there are not that many top ranges. - * Once these top ranges are excluded, the big overlap group will - * be broken into ranges with no overlapping, or smaller overlapped - * groups, and most likely some holes. - * + * Find specified number of top ranges in a big overlap group. It could return less if there are + * not that many top ranges. Once these top ranges are excluded, the big overlap group will be + * broken into ranges with no overlapping, or smaller overlapped groups, and most likely some + * holes. * @param bigOverlap a list of ranges that overlap with each other - * @param count the max number of ranges to find + * @param count the max number of ranges to find * @return a list of ranges that overlap with most others */ - public static List - findBigRanges(Collection bigOverlap, int count) { + public static List findBigRanges(Collection bigOverlap, int count) { List bigRanges = new ArrayList<>(); // The key is the count of overlaps, // The value is a list of ranges that have that many overlaps TreeMap> overlapRangeMap = new TreeMap<>(); - for (R r: bigOverlap) { + for (R r : bigOverlap) { // Calculates the # of overlaps for each region // and populates rangeOverlapMap byte[] startKey = r.getStartKey(); byte[] endKey = specialEndKey(r); int overlappedRegions = 0; - for (R rr: bigOverlap) { + for (R rr : bigOverlap) { byte[] start = rr.getStartKey(); byte[] end = specialEndKey(rr); - if (BYTES_COMPARATOR.compare(startKey, end) < 0 - && BYTES_COMPARATOR.compare(endKey, start) > 0) { + if ( + BYTES_COMPARATOR.compare(startKey, end) < 0 && BYTES_COMPARATOR.compare(endKey, start) > 0 + ) { overlappedRegions++; } } @@ -213,7 +191,7 @@ public class RegionSplitCalculator { } } int toBeAdded = count; - for (Integer key: overlapRangeMap.descendingKeySet()) { + for (Integer key : overlapRangeMap.descendingKeySet()) { List chunk = overlapRangeMap.get(key); int chunkSize = chunk.size(); if (chunkSize <= toBeAdded) { @@ -225,7 +203,7 @@ public class RegionSplitCalculator { // chained, for example: [a, c), [b, e), [d, g), [f h)... // In such a case, sideline the middle chunk will break // the group efficiently. - int start = (chunkSize - toBeAdded)/2; + int start = (chunkSize - toBeAdded) / 2; int end = start + toBeAdded; for (int i = start; i < end; i++) { bigRanges.add(chunk.get(i)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index 760ded56ce9..723425ad95e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,22 +39,22 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; -import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.NoServerForRegionException; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; - -import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; @@ -68,10 +67,9 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.Options; import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException; /** - * The {@link RegionSplitter} class provides several utilities to help in the - * administration lifecycle for developers who choose to manually split regions - * instead of having HBase handle that automatically. The most useful utilities - * are: + * The {@link RegionSplitter} class provides several utilities to help in the administration + * lifecycle for developers who choose to manually split regions instead of having HBase handle that + * automatically. The most useful utilities are: *

      *

        *
      • Create a table with a specified number of pre-split regions @@ -82,13 +80,13 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException; *

        * Question: How do I turn off automatic splitting?
        * Answer: Automatic splitting is determined by the configuration value - * HConstants.HREGION_MAX_FILESIZE. It is not recommended that you set this - * to Long.MAX_VALUE in case you forget about manual splits. A suggested setting - * is 100GB, which would result in > 1hr major compactions if reached. + * HConstants.HREGION_MAX_FILESIZE. It is not recommended that you set this to Long.MAX_VALUE + * in case you forget about manual splits. A suggested setting is 100GB, which would result in > + * 1hr major compactions if reached. *

        * Question: Why did the original authors decide to manually split?
        - * Answer: Specific workload characteristics of our use case allowed us - * to benefit from a manual split system. + * Answer: Specific workload characteristics of our use case allowed us to benefit from a + * manual split system. *

        *

          *
        • Data (~1k) that would grow instead of being replaced @@ -97,146 +95,120 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException; *
        *

        * Question: Why is manual splitting good for this workload?
        - * Answer: Although automated splitting is not a bad option, there are - * benefits to manual splitting. + * Answer: Although automated splitting is not a bad option, there are benefits to manual + * splitting. *

        *

          - *
        • With growing amounts of data, splits will continually be needed. Since - * you always know exactly what regions you have, long-term debugging and - * profiling is much easier with manual splits. It is hard to trace the logs to - * understand region level problems if it keeps splitting and getting renamed. - *
        • Data offlining bugs + unknown number of split regions == oh crap! If an - * WAL or StoreFile was mistakenly unprocessed by HBase due to a weird bug and - * you notice it a day or so later, you can be assured that the regions - * specified in these files are the same as the current regions and you have - * less headaches trying to restore/replay your data. - *
        • You can finely tune your compaction algorithm. With roughly uniform data - * growth, it's easy to cause split / compaction storms as the regions all - * roughly hit the same data size at the same time. With manual splits, you can - * let staggered, time-based major compactions spread out your network IO load. + *
        • With growing amounts of data, splits will continually be needed. Since you always know + * exactly what regions you have, long-term debugging and profiling is much easier with manual + * splits. It is hard to trace the logs to understand region level problems if it keeps splitting + * and getting renamed. + *
        • Data offlining bugs + unknown number of split regions == oh crap! If an WAL or StoreFile was + * mistakenly unprocessed by HBase due to a weird bug and you notice it a day or so later, you can + * be assured that the regions specified in these files are the same as the current regions and you + * have less headaches trying to restore/replay your data. + *
        • You can finely tune your compaction algorithm. With roughly uniform data growth, it's easy to + * cause split / compaction storms as the regions all roughly hit the same data size at the same + * time. With manual splits, you can let staggered, time-based major compactions spread out your + * network IO load. *
        *

        * Question: What's the optimal number of pre-split regions to create?
        * Answer: Mileage will vary depending upon your application. *

        - * The short answer for our application is that we started with 10 pre-split - * regions / server and watched our data growth over time. It's better to err on - * the side of too little regions and rolling split later. + * The short answer for our application is that we started with 10 pre-split regions / server and + * watched our data growth over time. It's better to err on the side of too little regions and + * rolling split later. *

        - * The more complicated answer is that this depends upon the largest storefile - * in your region. With a growing data size, this will get larger over time. You - * want the largest region to be just big enough that the - * {@link org.apache.hadoop.hbase.regionserver.HStore} compact - * selection algorithm only compacts it due to a timed major. If you don't, your - * cluster can be prone to compaction storms as the algorithm decides to run - * major compactions on a large series of regions all at once. Note that - * compaction storms are due to the uniform data growth, not the manual split + * The more complicated answer is that this depends upon the largest storefile in your region. With + * a growing data size, this will get larger over time. You want the largest region to be just big + * enough that the {@link org.apache.hadoop.hbase.regionserver.HStore} compact selection algorithm + * only compacts it due to a timed major. If you don't, your cluster can be prone to compaction + * storms as the algorithm decides to run major compactions on a large series of regions all at + * once. Note that compaction storms are due to the uniform data growth, not the manual split * decision. *

        - * If you pre-split your regions too thin, you can increase the major compaction - * interval by configuring HConstants.MAJOR_COMPACTION_PERIOD. If your data size - * grows too large, use this script to perform a network IO safe rolling split - * of all regions. + * If you pre-split your regions too thin, you can increase the major compaction interval by + * configuring HConstants.MAJOR_COMPACTION_PERIOD. If your data size grows too large, use this + * script to perform a network IO safe rolling split of all regions. */ @InterfaceAudience.Private public class RegionSplitter { private static final Logger LOG = LoggerFactory.getLogger(RegionSplitter.class); /** - * A generic interface for the RegionSplitter code to use for all it's - * functionality. Note that the original authors of this code use - * {@link HexStringSplit} to partition their table and set it as default, but - * provided this for your custom algorithm. To use, create a new derived class + * A generic interface for the RegionSplitter code to use for all it's functionality. Note that + * the original authors of this code use {@link HexStringSplit} to partition their table and set + * it as default, but provided this for your custom algorithm. To use, create a new derived class * from this interface and call {@link RegionSplitter#createPresplitTable} or - * RegionSplitter#rollingSplit(TableName, SplitAlgorithm, Configuration) with the - * argument splitClassName giving the name of your class. + * RegionSplitter#rollingSplit(TableName, SplitAlgorithm, Configuration) with the argument + * splitClassName giving the name of your class. */ public interface SplitAlgorithm { /** - * Split a pre-existing region into 2 regions. - * - * @param start - * first row (inclusive) - * @param end - * last row (exclusive) + * Split a pre-existing region into 2 regions. n * first row (inclusive) n * last row + * (exclusive) * @return the split row to use */ byte[] split(byte[] start, byte[] end); /** - * Split an entire table. - * - * @param numRegions - * number of regions to split the table into - * - * @throws RuntimeException - * user input is validated at this time. may throw a runtime - * exception in response to a parse failure - * @return array of split keys for the initial regions of the table. The - * length of the returned array should be numRegions-1. + * Split an entire table. n * number of regions to split the table into n * user input is + * validated at this time. may throw a runtime exception in response to a parse failure + * @return array of split keys for the initial regions of the table. The length of the returned + * array should be numRegions-1. */ byte[][] split(int numRegions); /** - * Some MapReduce jobs may want to run multiple mappers per region, - * this is intended for such usecase. - * - * @param start first row (inclusive) - * @param end last row (exclusive) + * Some MapReduce jobs may want to run multiple mappers per region, this is intended for such + * usecase. + * @param start first row (inclusive) + * @param end last row (exclusive) * @param numSplits number of splits to generate * @param inclusive whether start and end are returned as split points */ byte[][] split(byte[] start, byte[] end, int numSplits, boolean inclusive); /** - * In HBase, the first row is represented by an empty byte array. This might - * cause problems with your split algorithm or row printing. All your APIs - * will be passed firstRow() instead of empty array. - * + * In HBase, the first row is represented by an empty byte array. This might cause problems with + * your split algorithm or row printing. All your APIs will be passed firstRow() instead of + * empty array. * @return your representation of your first row */ byte[] firstRow(); /** - * In HBase, the last row is represented by an empty byte array. This might - * cause problems with your split algorithm or row printing. All your APIs - * will be passed firstRow() instead of empty array. - * + * In HBase, the last row is represented by an empty byte array. This might cause problems with + * your split algorithm or row printing. All your APIs will be passed firstRow() instead of + * empty array. * @return your representation of your last row */ byte[] lastRow(); /** - * In HBase, the last row is represented by an empty byte array. Set this - * value to help the split code understand how to evenly divide the first - * region. - * - * @param userInput - * raw user input (may throw RuntimeException on parse failure) + * In HBase, the last row is represented by an empty byte array. Set this value to help the + * split code understand how to evenly divide the first region. n * raw user input (may throw + * RuntimeException on parse failure) */ void setFirstRow(String userInput); /** - * In HBase, the last row is represented by an empty byte array. Set this - * value to help the split code understand how to evenly divide the last - * region. Note that this last row is inclusive for all rows sharing the - * same prefix. - * - * @param userInput - * raw user input (may throw RuntimeException on parse failure) + * In HBase, the last row is represented by an empty byte array. Set this value to help the + * split code understand how to evenly divide the last region. Note that this last row is + * inclusive for all rows sharing the same prefix. n * raw user input (may throw + * RuntimeException on parse failure) */ void setLastRow(String userInput); /** - * @param input - * user or file input for row + * n * user or file input for row * @return byte array representation of this row for HBase */ byte[] strToRow(String input); /** - * @param row - * byte array representing a row in HBase + * n * byte array representing a row in HBase * @return String to use for debug & file printing */ String rowToStr(byte[] row); @@ -263,72 +235,51 @@ public class RegionSplitter { * The main function for the RegionSplitter application. Common uses: *

        *

          - *
        • create a table named 'myTable' with 60 pre-split regions containing 2 - * column families 'test' & 'rs', assuming the keys are hex-encoded ASCII: + *
        • create a table named 'myTable' with 60 pre-split regions containing 2 column families + * 'test' & 'rs', assuming the keys are hex-encoded ASCII: *
            - *
          • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 60 -f test:rs - * myTable HexStringSplit + *
          • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 60 -f test:rs myTable + * HexStringSplit *
          - *
        • create a table named 'myTable' with 50 pre-split regions, - * assuming the keys are decimal-encoded ASCII: + *
        • create a table named 'myTable' with 50 pre-split regions, assuming the keys are + * decimal-encoded ASCII: *
            - *
          • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 50 - * myTable DecimalStringSplit + *
          • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 50 myTable DecimalStringSplit *
          - *
        • perform a rolling split of 'myTable' (i.e. 60 => 120 regions), # 2 - * outstanding splits at a time, assuming keys are uniformly distributed - * bytes: + *
        • perform a rolling split of 'myTable' (i.e. 60 => 120 regions), # 2 outstanding splits at + * a time, assuming keys are uniformly distributed bytes: *
            - *
          • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -r -o 2 myTable - * UniformSplit + *
          • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -r -o 2 myTable UniformSplit *
          *
        - * - * There are three SplitAlgorithms built into RegionSplitter, HexStringSplit, - * DecimalStringSplit, and UniformSplit. These are different strategies for - * choosing region boundaries. See their source code for details. - * - * @param args - * Usage: RegionSplitter <TABLE> <SPLITALGORITHM> - * <-c <# regions> -f <family:family:...> | -r - * [-o <# outstanding splits>]> - * [-D <conf.param=value>] - * @throws IOException - * HBase IO problem - * @throws InterruptedException - * user requested exit - * @throws ParseException - * problem parsing user input + * There are three SplitAlgorithms built into RegionSplitter, HexStringSplit, DecimalStringSplit, + * and UniformSplit. These are different strategies for choosing region boundaries. See their + * source code for details. n * Usage: RegionSplitter <TABLE> <SPLITALGORITHM> <-c + * <# regions> -f <family:family:...> | -r [-o <# outstanding splits>]> [-D + * <conf.param=value>] n * HBase IO problem n * user requested exit n * problem parsing user + * input */ @SuppressWarnings("static-access") - public static void main(String[] args) throws IOException, - InterruptedException, ParseException { + public static void main(String[] args) throws IOException, InterruptedException, ParseException { Configuration conf = HBaseConfiguration.create(); // parse user input Options opt = new Options(); opt.addOption(OptionBuilder.withArgName("property=value").hasArg() - .withDescription("Override HBase Configuration Settings").create("D")); + .withDescription("Override HBase Configuration Settings").create("D")); opt.addOption(OptionBuilder.withArgName("region count").hasArg() - .withDescription( - "Create a new table with a pre-split number of regions") - .create("c")); + .withDescription("Create a new table with a pre-split number of regions").create("c")); opt.addOption(OptionBuilder.withArgName("family:family:...").hasArg() - .withDescription( - "Column Families to create with new table. Required with -c") - .create("f")); + .withDescription("Column Families to create with new table. Required with -c").create("f")); opt.addOption("h", false, "Print this usage help"); opt.addOption("r", false, "Perform a rolling split of an existing region"); - opt.addOption(OptionBuilder.withArgName("count").hasArg().withDescription( - "Max outstanding splits that have unfinished major compactions") - .create("o")); - opt.addOption(null, "firstrow", true, - "First Row in Table for Split Algorithm"); - opt.addOption(null, "lastrow", true, - "Last Row in Table for Split Algorithm"); - opt.addOption(null, "risky", false, - "Skip verification steps to complete quickly. " - + "STRONGLY DISCOURAGED for production systems. "); + opt.addOption(OptionBuilder.withArgName("count").hasArg() + .withDescription("Max outstanding splits that have unfinished major compactions") + .create("o")); + opt.addOption(null, "firstrow", true, "First Row in Table for Split Algorithm"); + opt.addOption(null, "lastrow", true, "Last Row in Table for Split Algorithm"); + opt.addOption(null, "risky", false, "Skip verification steps to complete quickly. " + + "STRONGLY DISCOURAGED for production systems. "); CommandLine cmd = new GnuParser().parse(opt, args); if (cmd.hasOption("D")) { @@ -352,13 +303,13 @@ public class RegionSplitter { boolean oneOperOnly = createTable ^ rollingSplit; if (2 != cmd.getArgList().size() || !oneOperOnly || cmd.hasOption("h")) { - new HelpFormatter().printHelp("bin/hbase regionsplitter
      \n"+ - "SPLITALGORITHM is the java class name of a class implementing " + - "SplitAlgorithm, or one of the special strings HexStringSplit or " + - "DecimalStringSplit or UniformSplit, which are built-in split algorithms. " + - "HexStringSplit treats keys as hexadecimal ASCII, and " + - "DecimalStringSplit treats keys as decimal ASCII, and " + - "UniformSplit treats keys as arbitrary bytes.", opt); + new HelpFormatter().printHelp("bin/hbase regionsplitter
      \n" + + "SPLITALGORITHM is the java class name of a class implementing " + + "SplitAlgorithm, or one of the special strings HexStringSplit or " + + "DecimalStringSplit or UniformSplit, which are built-in split algorithms. " + + "HexStringSplit treats keys as hexadecimal ASCII, and " + + "DecimalStringSplit treats keys as decimal ASCII, and " + + "UniformSplit treats keys as arbitrary bytes.", opt); return; } TableName tableName = TableName.valueOf(cmd.getArgs()[0]); @@ -386,15 +337,14 @@ public class RegionSplitter { } static void createPresplitTable(TableName tableName, SplitAlgorithm splitAlgo, - String[] columnFamilies, Configuration conf) - throws IOException, InterruptedException { + String[] columnFamilies, Configuration conf) throws IOException, InterruptedException { final int splitCount = conf.getInt("split.count", 0); Preconditions.checkArgument(splitCount > 1, "Split count must be > 1"); Preconditions.checkArgument(columnFamilies.length > 0, - "Must specify at least one column family. "); + "Must specify at least one column family. "); LOG.debug("Creating table " + tableName + " with " + columnFamilies.length - + " column families. Presplitting to " + splitCount + " regions"); + + " column families. Presplitting to " + splitCount + " regions"); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); for (String cf : columnFamilies) { @@ -428,22 +378,21 @@ public class RegionSplitter { } /** - * Alternative getCurrentNrHRS which is no longer available. - * @param connection - * @return Rough count of regionservers out on cluster. + * Alternative getCurrentNrHRS which is no longer available. n * @return Rough count of + * regionservers out on cluster. * @throws IOException if a remote or network exception occurs */ private static int getRegionServerCount(final Connection connection) throws IOException { try (Admin admin = connection.getAdmin()) { Collection servers = admin.getRegionServers(); - return servers == null || servers.isEmpty()? 0: servers.size(); + return servers == null || servers.isEmpty() ? 0 : servers.size(); } } - private static byte [] readFile(final FileSystem fs, final Path path) throws IOException { + private static byte[] readFile(final FileSystem fs, final Path path) throws IOException { FSDataInputStream tmpIn = fs.open(path); try { - byte [] rawData = new byte[tmpIn.available()]; + byte[] rawData = new byte[tmpIn.available()]; tmpIn.readFully(rawData); return rawData; } finally { @@ -452,7 +401,7 @@ public class RegionSplitter { } static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configuration conf) - throws IOException, InterruptedException { + throws IOException, InterruptedException { final int minOS = conf.getInt("split.outstanding", 2); try (Connection connection = ConnectionFactory.createConnection(conf)) { // Max outstanding splits. default == 50% of servers @@ -476,9 +425,8 @@ public class RegionSplitter { // requests to the same RS can stall the outstanding split queue. // To fix, group the regions into an RS pool and round-robin through it LOG.debug("Bucketing regions by regionserver..."); - TreeMap>> daughterRegions = - Maps.newTreeMap(); - // Get a regionLocator. Need it in below. + TreeMap>> daughterRegions = Maps.newTreeMap(); + // Get a regionLocator. Need it in below. try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) { for (Pair dr : tmpRegionSet) { ServerName rsLocation = regionLocator.getRegionLocation(dr.getSecond()).getServerName(); @@ -506,7 +454,7 @@ public class RegionSplitter { // Get ServerName to region count mapping final TreeMap rsSizes = Maps.newTreeMap(); List hrls = regionLocator.getAllRegionLocations(); - for (HRegionLocation hrl: hrls) { + for (HRegionLocation hrl : hrls) { ServerName sn = hrl.getServerName(); if (rsSizes.containsKey(sn)) { rsSizes.put(sn, rsSizes.get(sn) + 1); @@ -517,8 +465,8 @@ public class RegionSplitter { // Round-robin through the ServerName list. Choose the lightest-loaded servers // first to keep the master from load-balancing regions as we split. - for (Map.Entry>> daughterRegion : - daughterRegions.entrySet()) { + for (Map.Entry>> daughterRegion : daughterRegions.entrySet()) { Pair dr = null; ServerName rsLoc = daughterRegion.getKey(); LinkedList> regionList = daughterRegion.getValue(); @@ -535,8 +483,8 @@ public class RegionSplitter { // if this region moved locations ServerName newRs = regionLoc.getServerName(); if (newRs.compareTo(rsLoc) != 0) { - LOG.debug("Region with " + splitAlgo.rowToStr(split) - + " moved to " + newRs + ". Relocating..."); + LOG.debug("Region with " + splitAlgo.rowToStr(split) + " moved to " + newRs + + ". Relocating..."); // relocate it, don't use it right now if (!daughterRegions.containsKey(newRs)) { LinkedList> entry = Lists.newLinkedList(); @@ -551,15 +499,15 @@ public class RegionSplitter { byte[] sk = regionLoc.getRegionInfo().getStartKey(); if (sk.length != 0) { if (Bytes.equals(split, sk)) { - LOG.debug("Region already split on " - + splitAlgo.rowToStr(split) + ". Skipping this region..."); + LOG.debug("Region already split on " + splitAlgo.rowToStr(split) + + ". Skipping this region..."); ++splitCount; dr = null; continue; } byte[] start = dr.getFirst(); - Preconditions.checkArgument(Bytes.equals(start, sk), splitAlgo - .rowToStr(start) + " != " + splitAlgo.rowToStr(sk)); + Preconditions.checkArgument(Bytes.equals(start, sk), + splitAlgo.rowToStr(start) + " != " + splitAlgo.rowToStr(sk)); } // passed all checks! found a good region @@ -568,8 +516,7 @@ public class RegionSplitter { if (regionList.isEmpty()) { daughterRegions.remove(rsLoc); } - if (dr == null) - continue; + if (dr == null) continue; // we have a good region, time to split! byte[] split = dr.getSecond(); @@ -601,14 +548,13 @@ public class RegionSplitter { // mark each finished region as successfully split. for (Pair region : finished) { - splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) - + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); + splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + " " + + splitAlgo.rowToStr(region.getSecond()) + "\n"); splitCount++; if (splitCount % 10 == 0) { - long tDiff = (EnvironmentEdgeManager.currentTime() - startTime) - / splitCount; - LOG.debug("STATUS UPDATE: " + splitCount + " / " + origCount - + ". Avg Time / Split = " + long tDiff = (EnvironmentEdgeManager.currentTime() - startTime) / splitCount; + LOG.debug( + "STATUS UPDATE: " + splitCount + " / " + origCount + ". Avg Time / Split = " + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); } } @@ -617,15 +563,15 @@ public class RegionSplitter { if (conf.getBoolean("split.verify", true)) { while (!outstanding.isEmpty()) { LOG.debug("Finally Wait for outstanding splits " + outstanding.size()); - LinkedList> finished = splitScan(outstanding, - connection, tableName, splitAlgo); + LinkedList> finished = + splitScan(outstanding, connection, tableName, splitAlgo); if (finished.isEmpty()) { Thread.sleep(30 * 1000); } else { outstanding.removeAll(finished); for (Pair region : finished) { - splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) - + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); + splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + " " + + splitAlgo.rowToStr(region.getSecond()) + "\n"); splitCount++; } LOG.debug("Finally " + finished.size() + " outstanding splits finished"); @@ -635,12 +581,11 @@ public class RegionSplitter { LOG.debug("All regions have been successfully split!"); } finally { long tDiff = EnvironmentEdgeManager.currentTime() - startTime; - LOG.debug("TOTAL TIME = " - + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); + LOG.debug("TOTAL TIME = " + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); LOG.debug("Splits = " + splitCount); if (0 < splitCount) { LOG.debug("Avg Time / Split = " - + org.apache.hadoop.util.StringUtils.formatTime(tDiff / splitCount)); + + org.apache.hadoop.util.StringUtils.formatTime(tDiff / splitCount)); } } } finally { @@ -652,16 +597,15 @@ public class RegionSplitter { } /** - * @throws IOException if the specified SplitAlgorithm class couldn't be - * instantiated + * @throws IOException if the specified SplitAlgorithm class couldn't be instantiated */ - public static SplitAlgorithm newSplitAlgoInstance(Configuration conf, - String splitClassName) throws IOException { + public static SplitAlgorithm newSplitAlgoInstance(Configuration conf, String splitClassName) + throws IOException { Class splitClass; // For split algorithms builtin to RegionSplitter, the user can specify // their simple class name instead of a fully qualified class name. - if(splitClassName.equals(HexStringSplit.class.getSimpleName())) { + if (splitClassName.equals(HexStringSplit.class.getSimpleName())) { splitClass = HexStringSplit.class; } else if (splitClassName.equals(DecimalStringSplit.class.getSimpleName())) { splitClass = DecimalStringSplit.class; @@ -673,12 +617,11 @@ public class RegionSplitter { } catch (ClassNotFoundException e) { throw new IOException("Couldn't load split class " + splitClassName, e); } - if(splitClass == null) { + if (splitClass == null) { throw new IOException("Failed loading split class " + splitClassName); } - if(!SplitAlgorithm.class.isAssignableFrom(splitClass)) { - throw new IOException( - "Specified split class doesn't implement SplitAlgorithm"); + if (!SplitAlgorithm.class.isAssignableFrom(splitClass)) { + throw new IOException("Specified split class doesn't implement SplitAlgorithm"); } } try { @@ -688,12 +631,9 @@ public class RegionSplitter { } } - static LinkedList> splitScan( - LinkedList> regionList, - final Connection connection, - final TableName tableName, - SplitAlgorithm splitAlgo) - throws IOException, InterruptedException { + static LinkedList> splitScan(LinkedList> regionList, + final Connection connection, final TableName tableName, SplitAlgorithm splitAlgo) + throws IOException, InterruptedException { LinkedList> finished = Lists.newLinkedList(); LinkedList> logicalSplitting = Lists.newLinkedList(); LinkedList> physicalSplitting = Lists.newLinkedList(); @@ -704,7 +644,7 @@ public class RegionSplitter { Path tableDir = tableDirAndSplitFile.getFirst(); FileSystem fs = tableDir.getFileSystem(connection.getConfiguration()); // Clear the cache to forcibly refresh region information - ((ClusterConnection)connection).clearRegionLocationCache(); + ((ClusterConnection) connection).clearRegionLocationCache(); TableDescriptor htd = null; try (Table table = connection.getTable(tableName)) { htd = table.getDescriptor(); @@ -738,11 +678,10 @@ public class RegionSplitter { check.add(regionLocator.getRegionLocation(split).getRegionInfo()); for (HRegionInfo hri : check.toArray(new HRegionInfo[check.size()])) { byte[] sk = hri.getStartKey(); - if (sk.length == 0) - sk = splitAlgo.firstRow(); + if (sk.length == 0) sk = splitAlgo.firstRow(); - HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem( - connection.getConfiguration(), fs, tableDir, hri, true); + HRegionFileSystem regionFs = HRegionFileSystem + .openRegionFromFileSystem(connection.getConfiguration(), fs, tableDir, hri, true); // Check every Column Family for that region -- check does not have references. boolean refFound = false; @@ -765,22 +704,19 @@ public class RegionSplitter { } catch (NoServerForRegionException nsfre) { LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start)); physicalSplitting.add(region); - ((ClusterConnection)connection).clearRegionLocationCache(); + ((ClusterConnection) connection).clearRegionLocationCache(); } } - LOG.debug("Split Scan: " + finished.size() + " finished / " - + logicalSplitting.size() + " split wait / " - + physicalSplitting.size() + " reference wait"); + LOG.debug("Split Scan: " + finished.size() + " finished / " + logicalSplitting.size() + + " split wait / " + physicalSplitting.size() + " reference wait"); return finished; } } /** - * @param conf - * @param tableName - * @return A Pair where first item is table dir and second is the split file. + * nn * @return A Pair where first item is table dir and second is the split file. * @throws IOException if a remote or network exception occurs */ private static Pair getTableDirAndSplitFile(final Configuration conf, @@ -792,8 +728,7 @@ public class RegionSplitter { } static LinkedList> getSplits(final Connection connection, - TableName tableName, SplitAlgorithm splitAlgo) - throws IOException { + TableName tableName, SplitAlgorithm splitAlgo) throws IOException { Pair tableDirAndSplitFile = getTableDirAndSplitFile(connection.getConfiguration(), tableName); Path tableDir = tableDirAndSplitFile.getFirst(); @@ -816,13 +751,11 @@ public class RegionSplitter { tmp = regionLocator.getStartEndKeys(); } Preconditions.checkArgument(tmp.getFirst().length == tmp.getSecond().length, - "Start and End rows should be equivalent"); + "Start and End rows should be equivalent"); for (int i = 0; i < tmp.getFirst().length; ++i) { byte[] start = tmp.getFirst()[i], end = tmp.getSecond()[i]; - if (start.length == 0) - start = splitAlgo.firstRow(); - if (end.length == 0) - end = splitAlgo.lastRow(); + if (start.length == 0) start = splitAlgo.firstRow(); + if (end.length == 0) end = splitAlgo.lastRow(); rows.add(Pair.newPair(start, end)); } LOG.debug("Table " + tableName + " has " + rows.size() + " regions that will be split."); @@ -837,10 +770,9 @@ public class RegionSplitter { String startStr = splitAlgo.rowToStr(r.getFirst()); String splitStr = splitAlgo.rowToStr(splitPoint); daughterRegions.add(Pair.newPair(startStr, splitStr)); - LOG.debug("Will Split [" + startStr + " , " - + splitAlgo.rowToStr(r.getSecond()) + ") at " + splitStr); - tmpOut.writeChars("+ " + startStr + splitAlgo.separator() + splitStr - + "\n"); + LOG.debug("Will Split [" + startStr + " , " + splitAlgo.rowToStr(r.getSecond()) + ") at " + + splitStr); + tmpOut.writeChars("+ " + startStr + splitAlgo.separator() + splitStr + "\n"); } tmpOut.close(); fs.rename(tmpFile, splitFile); @@ -868,10 +800,8 @@ public class RegionSplitter { daughterRegions.add(r); } else { LOG.debug("Removing: " + r); - Preconditions.checkArgument(cmd[0].equals("-"), - "Unknown option: " + cmd[0]); - Preconditions.checkState(daughterRegions.contains(r), - "Missing row: " + r); + Preconditions.checkArgument(cmd[0].equals("-"), "Unknown option: " + cmd[0]); + Preconditions.checkState(daughterRegions.contains(r), "Missing row: " + r); daughterRegions.remove(r); } } @@ -879,22 +809,18 @@ public class RegionSplitter { } LinkedList> ret = Lists.newLinkedList(); for (Pair r : daughterRegions) { - ret.add(Pair.newPair(splitAlgo.strToRow(r.getFirst()), splitAlgo - .strToRow(r.getSecond()))); + ret.add(Pair.newPair(splitAlgo.strToRow(r.getFirst()), splitAlgo.strToRow(r.getSecond()))); } return ret; } /** - * HexStringSplit is a well-known {@link SplitAlgorithm} for choosing region - * boundaries. The format of a HexStringSplit region boundary is the ASCII - * representation of an MD5 checksum, or any other uniformly distributed - * hexadecimal value. Row are hex-encoded long values in the range - * "00000000" => "FFFFFFFF" and are left-padded with zeros to keep the - * same order lexicographically as if they were binary. - * - * Since this split algorithm uses hex strings as keys, it is easy to read & - * write in the shell but takes up more space and may be non-intuitive. + * HexStringSplit is a well-known {@link SplitAlgorithm} for choosing region boundaries. The + * format of a HexStringSplit region boundary is the ASCII representation of an MD5 checksum, or + * any other uniformly distributed hexadecimal value. Row are hex-encoded long values in the range + * "00000000" => "FFFFFFFF" and are left-padded with zeros to keep the same order + * lexicographically as if they were binary. Since this split algorithm uses hex strings as keys, + * it is easy to read & write in the shell but takes up more space and may be non-intuitive. */ public static class HexStringSplit extends NumberStringSplit { final static String DEFAULT_MIN_HEX = "00000000"; @@ -908,11 +834,10 @@ public class RegionSplitter { } /** - * The format of a DecimalStringSplit region boundary is the ASCII representation of - * reversed sequential number, or any other uniformly distributed decimal value. - * Row are decimal-encoded long values in the range - * "00000000" => "99999999" and are left-padded with zeros to keep the - * same order lexicographically as if they were binary. + * The format of a DecimalStringSplit region boundary is the ASCII representation of reversed + * sequential number, or any other uniformly distributed decimal value. Row are decimal-encoded + * long values in the range "00000000" => "99999999" and are left-padded with zeros to + * keep the same order lexicographically as if they were binary. */ public static class DecimalStringSplit extends NumberStringSplit { final static String DEFAULT_MIN_DEC = "00000000"; @@ -954,20 +879,18 @@ public class RegionSplitter { @Override public byte[][] split(int n) { Preconditions.checkArgument(lastRowInt.compareTo(firstRowInt) > 0, - "last row (%s) is configured less than first row (%s)", lastRow, - firstRow); + "last row (%s) is configured less than first row (%s)", lastRow, firstRow); // +1 to range because the last row is inclusive BigInteger range = lastRowInt.subtract(firstRowInt).add(BigInteger.ONE); Preconditions.checkState(range.compareTo(BigInteger.valueOf(n)) >= 0, - "split granularity (%s) is greater than the range (%s)", n, range); + "split granularity (%s) is greater than the range (%s)", n, range); BigInteger[] splits = new BigInteger[n - 1]; BigInteger sizeOfEachSplit = range.divide(BigInteger.valueOf(n)); for (int i = 1; i < n; i++) { // NOTE: this means the last region gets all the slop. // This is not a big deal if we're assuming n << MAXHEX - splits[i - 1] = firstRowInt.add(sizeOfEachSplit.multiply(BigInteger - .valueOf(i))); + splits[i - 1] = firstRowInt.add(sizeOfEachSplit.multiply(BigInteger.valueOf(i))); } return convertToBytes(splits); } @@ -978,20 +901,18 @@ public class RegionSplitter { BigInteger e = convertToBigInteger(end); Preconditions.checkArgument(e.compareTo(s) > 0, - "last row (%s) is configured less than first row (%s)", rowToStr(end), - end); + "last row (%s) is configured less than first row (%s)", rowToStr(end), end); // +1 to range because the last row is inclusive BigInteger range = e.subtract(s).add(BigInteger.ONE); Preconditions.checkState(range.compareTo(BigInteger.valueOf(numSplits)) >= 0, - "split granularity (%s) is greater than the range (%s)", numSplits, range); + "split granularity (%s) is greater than the range (%s)", numSplits, range); BigInteger[] splits = new BigInteger[numSplits - 1]; BigInteger sizeOfEachSplit = range.divide(BigInteger.valueOf(numSplits)); for (int i = 1; i < numSplits; i++) { // NOTE: this means the last region gets all the slop. // This is not a big deal if we're assuming n << MAXHEX - splits[i - 1] = s.add(sizeOfEachSplit.multiply(BigInteger - .valueOf(i))); + splits[i - 1] = s.add(sizeOfEachSplit.multiply(BigInteger.valueOf(i))); } if (inclusive) { @@ -1056,7 +977,6 @@ public class RegionSplitter { /** * Divide 2 numbers in half (for split algorithm) - * * @param a number #1 * @param b number #2 * @return the midpoint of the 2 numbers @@ -1067,7 +987,6 @@ public class RegionSplitter { /** * Returns an array of bytes corresponding to an array of BigIntegers - * * @param bigIntegers numbers to convert * @return bytes corresponding to the bigIntegers */ @@ -1081,9 +1000,8 @@ public class RegionSplitter { /** * Returns the bytes corresponding to the BigInteger - * * @param bigInteger number to convert - * @param pad padding length + * @param pad padding length * @return byte corresponding to input BigInteger */ public byte[] convertToByte(BigInteger bigInteger, int pad) { @@ -1094,7 +1012,6 @@ public class RegionSplitter { /** * Returns the bytes corresponding to the BigInteger - * * @param bigInteger number to convert * @return corresponding bytes */ @@ -1104,35 +1021,32 @@ public class RegionSplitter { /** * Returns the BigInteger represented by the byte array - * * @param row byte array representing row * @return the corresponding BigInteger */ public BigInteger convertToBigInteger(byte[] row) { - return (row.length > 0) ? new BigInteger(Bytes.toString(row), radix) - : BigInteger.ZERO; + return (row.length > 0) ? new BigInteger(Bytes.toString(row), radix) : BigInteger.ZERO; } @Override public String toString() { - return this.getClass().getSimpleName() + " [" + rowToStr(firstRow()) - + "," + rowToStr(lastRow()) + "]"; + return this.getClass().getSimpleName() + " [" + rowToStr(firstRow()) + "," + + rowToStr(lastRow()) + "]"; } } /** - * A SplitAlgorithm that divides the space of possible keys evenly. Useful - * when the keys are approximately uniform random bytes (e.g. hashes). Rows - * are raw byte values in the range 00 => FF and are right-padded with - * zeros to keep the same memcmp() order. This is the natural algorithm to use - * for a byte[] environment and saves space, but is not necessarily the + * A SplitAlgorithm that divides the space of possible keys evenly. Useful when the keys are + * approximately uniform random bytes (e.g. hashes). Rows are raw byte values in the range 00 + * => FF and are right-padded with zeros to keep the same memcmp() order. This is the + * natural algorithm to use for a byte[] environment and saves space, but is not necessarily the * easiest for readability. */ public static class UniformSplit implements SplitAlgorithm { static final byte xFF = (byte) 0xFF; byte[] firstRowBytes = ArrayUtils.EMPTY_BYTE_ARRAY; - byte[] lastRowBytes = - new byte[] {xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF}; + byte[] lastRowBytes = new byte[] { xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF }; + @Override public byte[] split(byte[] start, byte[] end) { return Bytes.split(start, end, 1)[1]; @@ -1140,20 +1054,17 @@ public class RegionSplitter { @Override public byte[][] split(int numRegions) { - Preconditions.checkArgument( - Bytes.compareTo(lastRowBytes, firstRowBytes) > 0, - "last row (%s) is configured less than first row (%s)", - Bytes.toStringBinary(lastRowBytes), - Bytes.toStringBinary(firstRowBytes)); + Preconditions.checkArgument(Bytes.compareTo(lastRowBytes, firstRowBytes) > 0, + "last row (%s) is configured less than first row (%s)", Bytes.toStringBinary(lastRowBytes), + Bytes.toStringBinary(firstRowBytes)); - byte[][] splits = Bytes.split(firstRowBytes, lastRowBytes, true, - numRegions - 1); + byte[][] splits = Bytes.split(firstRowBytes, lastRowBytes, true, numRegions - 1); Preconditions.checkState(splits != null, - "Could not split region with given user input: " + this); + "Could not split region with given user input: " + this); // remove endpoints, which are included in the splits list - return splits == null? null: Arrays.copyOfRange(splits, 1, splits.length - 1); + return splits == null ? null : Arrays.copyOfRange(splits, 1, splits.length - 1); } @Override @@ -1164,16 +1075,13 @@ public class RegionSplitter { if (Arrays.equals(end, HConstants.EMPTY_BYTE_ARRAY)) { end = lastRowBytes; } - Preconditions.checkArgument( - Bytes.compareTo(end, start) > 0, - "last row (%s) is configured less than first row (%s)", - Bytes.toStringBinary(end), - Bytes.toStringBinary(start)); + Preconditions.checkArgument(Bytes.compareTo(end, start) > 0, + "last row (%s) is configured less than first row (%s)", Bytes.toStringBinary(end), + Bytes.toStringBinary(start)); - byte[][] splits = Bytes.split(start, end, true, - numSplits - 1); + byte[][] splits = Bytes.split(start, end, true, numSplits - 1); Preconditions.checkState(splits != null, - "Could not calculate input splits with given user input: " + this); + "Could not calculate input splits with given user input: " + this); if (inclusive) { return splits; } else { @@ -1202,7 +1110,6 @@ public class RegionSplitter { lastRowBytes = Bytes.toBytesBinary(userInput); } - @Override public void setFirstRow(byte[] userInput) { firstRowBytes = userInput; @@ -1230,8 +1137,8 @@ public class RegionSplitter { @Override public String toString() { - return this.getClass().getSimpleName() + " [" + rowToStr(firstRow()) - + "," + rowToStr(lastRow()) + "]"; + return this.getClass().getSimpleName() + " [" + rowToStr(firstRow()) + "," + + rowToStr(lastRow()) + "]"; } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java index fb2a9541742..1fd17be600f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,21 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import org.apache.yetus.audience.InterfaceAudience; /** - * This class maintains mean and variation for any sequence of input provided to it. - * It is initialized with number of rolling periods which basically means the number of past - * inputs whose data will be considered to maintain mean and variation. - * It will use O(N) memory to maintain these statistics, where N is number of look up periods it - * was initialized with. - * If zero is passed during initialization then it will maintain mean and variance from the - * start. It will use O(1) memory only. But note that since it will maintain mean / variance - * from the start the statistics may behave like constants and may ignore short trends. - * All operations are O(1) except the initialization which is O(N). + * This class maintains mean and variation for any sequence of input provided to it. It is + * initialized with number of rolling periods which basically means the number of past inputs whose + * data will be considered to maintain mean and variation. It will use O(N) memory to maintain these + * statistics, where N is number of look up periods it was initialized with. If zero is passed + * during initialization then it will maintain mean and variance from the start. It will use O(1) + * memory only. But note that since it will maintain mean / variance from the start the statistics + * may behave like constants and may ignore short trends. All operations are O(1) except the + * initialization which is O(N). */ @InterfaceAudience.Private public class RollingStatCalculator { @@ -41,11 +38,10 @@ public class RollingStatCalculator { private int rollingPeriod; private int currentIndexPosition; // to be used only if we have non-zero rolling period - private long [] dataValues; + private long[] dataValues; /** - * Creates a RollingStatCalculator with given number of rolling periods. - * @param rollingPeriod + * Creates a RollingStatCalculator with given number of rolling periods. n */ public RollingStatCalculator(int rollingPeriod) { this.rollingPeriod = rollingPeriod; @@ -57,32 +53,29 @@ public class RollingStatCalculator { } /** - * Inserts given data value to array of data values to be considered for statistics calculation - * @param data + * Inserts given data value to array of data values to be considered for statistics calculation n */ public void insertDataValue(long data) { // if current number of data points already equals rolling period and rolling period is // non-zero then remove one data and update the statistics - if(numberOfDataValues >= rollingPeriod && rollingPeriod > 0) { + if (numberOfDataValues >= rollingPeriod && rollingPeriod > 0) { this.removeData(dataValues[currentIndexPosition]); } numberOfDataValues++; - currentSum = currentSum + (double)data; - currentSqrSum = currentSqrSum + ((double)data * data); - if (rollingPeriod >0) - { + currentSum = currentSum + (double) data; + currentSqrSum = currentSqrSum + ((double) data * data); + if (rollingPeriod > 0) { dataValues[currentIndexPosition] = data; currentIndexPosition = (currentIndexPosition + 1) % rollingPeriod; } } /** - * Update the statistics after removing the given data value - * @param data + * Update the statistics after removing the given data value n */ private void removeData(long data) { - currentSum = currentSum - (double)data; - currentSqrSum = currentSqrSum - ((double)data * data); + currentSum = currentSum - (double) data; + currentSqrSum = currentSqrSum - ((double) data * data); numberOfDataValues--; } @@ -90,25 +83,24 @@ public class RollingStatCalculator { * @return mean of the data values that are in the current list of data values */ public double getMean() { - return this.currentSum / (double)numberOfDataValues; + return this.currentSum / (double) numberOfDataValues; } /** * @return deviation of the data values that are in the current list of data values */ public double getDeviation() { - double variance = (currentSqrSum - (currentSum*currentSum)/(double)(numberOfDataValues))/ - numberOfDataValues; + double variance = (currentSqrSum - (currentSum * currentSum) / (double) (numberOfDataValues)) + / numberOfDataValues; return Math.sqrt(variance); } /** - * @param size - * @return an array of given size initialized with zeros + * n * @return an array of given size initialized with zeros */ - private long [] fillWithZeros(int size) { - long [] zeros = new long [size]; - for (int i=0; iIf enabled, you can also exclude environment variables containing - * certain substrings by setting {@code "hbase.envvars.logging.skipwords"} - * to comma separated list of such substrings. + * Logs information about the currently running JVM process including the environment variables. + * Logging of env vars can be disabled by setting {@code "hbase.envvars.logging.disabled"} to + * {@code "true"}. + *

      + * If enabled, you can also exclude environment variables containing certain substrings by setting + * {@code "hbase.envvars.logging.skipwords"} to comma separated list of such substrings. */ public static void logProcessInfo(Configuration conf) { logHBaseConfigs(conf); @@ -124,16 +119,14 @@ public abstract class ServerCommandLine extends Configured implements Tool { } } - nextEnv: - for (Entry entry : System.getenv().entrySet()) { + nextEnv: for (Entry entry : System.getenv().entrySet()) { String key = entry.getKey().toLowerCase(Locale.ROOT); String value = entry.getValue().toLowerCase(Locale.ROOT); // exclude variables which may contain skip words - for(String skipWord : skipWords) { - if (key.contains(skipWord) || value.contains(skipWord)) - continue nextEnv; + for (String skipWord : skipWords) { + if (key.contains(skipWord) || value.contains(skipWord)) continue nextEnv; } - LOG.info("env:"+entry); + LOG.info("env:" + entry); } } @@ -142,10 +135,9 @@ public abstract class ServerCommandLine extends Configured implements Tool { } /** - * Parse and run the given command line. This will exit the JVM with - * the exit code returned from run(). - * If return code is 0, wait for atmost 30 seconds for all non-daemon threads to quit, - * otherwise exit the jvm + * Parse and run the given command line. This will exit the JVM with the exit code returned from + * run(). If return code is 0, wait for atmost 30 seconds for all non-daemon threads + * to quit, otherwise exit the jvm */ public void doMain(String args[]) { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java index 5583a477a55..a5ba1424c7f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionInfo; @@ -49,34 +47,33 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { private static final Logger LOG = LoggerFactory.getLogger(ServerRegionReplicaUtil.class); /** - * Whether asynchronous WAL replication to the secondary region replicas is enabled or not. - * If this is enabled, a replication peer named "region_replica_replication" will be created - * which will tail the logs and replicate the mutatations to region replicas for tables that - * have region replication > 1. If this is enabled once, disabling this replication also - * requires disabling the replication peer using shell or {@link Admin} java class. - * Replication to secondary region replicas works over standard inter-cluster replication.· + * Whether asynchronous WAL replication to the secondary region replicas is enabled or not. If + * this is enabled, a replication peer named "region_replica_replication" will be created which + * will tail the logs and replicate the mutatations to region replicas for tables that have region + * replication > 1. If this is enabled once, disabling this replication also requires disabling + * the replication peer using shell or {@link Admin} java class. Replication to secondary region + * replicas works over standard inter-cluster replication.· */ - public static final String REGION_REPLICA_REPLICATION_CONF_KEY - = "hbase.region.replica.replication.enabled"; + public static final String REGION_REPLICA_REPLICATION_CONF_KEY = + "hbase.region.replica.replication.enabled"; private static final boolean DEFAULT_REGION_REPLICA_REPLICATION = false; public static final String REGION_REPLICA_REPLICATION_PEER = "region_replica_replication"; /** * Same as for {@link #REGION_REPLICA_REPLICATION_CONF_KEY} but for catalog replication. */ - public static final String REGION_REPLICA_REPLICATION_CATALOG_CONF_KEY - = "hbase.region.replica.replication.catalog.enabled"; + public static final String REGION_REPLICA_REPLICATION_CATALOG_CONF_KEY = + "hbase.region.replica.replication.catalog.enabled"; private static final boolean DEFAULT_REGION_REPLICA_REPLICATION_CATALOG = false; - /** * Enables or disables refreshing store files of secondary region replicas when the memory is * above the global memstore lower limit. Refreshing the store files means that we will do a file * list of the primary regions store files, and pick up new files. Also depending on the store * files, we can drop some memstore contents which will free up memory. */ - public static final String REGION_REPLICA_STORE_FILE_REFRESH - = "hbase.region.replica.storefile.refresh"; + public static final String REGION_REPLICA_STORE_FILE_REFRESH = + "hbase.region.replica.storefile.refresh"; private static final boolean DEFAULT_REGION_REPLICA_STORE_FILE_REFRESH = true; /** @@ -84,8 +81,8 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { * region. Default value assumes that for doing the file refresh, the biggest secondary should be * 4 times bigger than the biggest primary. */ - public static final String REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER - = "hbase.region.replica.storefile.refresh.memstore.multiplier"; + public static final String REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER = + "hbase.region.replica.storefile.refresh.memstore.multiplier"; private static final double DEFAULT_REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER = 4; /** @@ -105,15 +102,13 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { * @return whether the replica is read only */ public static boolean isReadOnly(HRegion region) { - return region.getTableDescriptor().isReadOnly() - || !isDefaultReplica(region.getRegionInfo()); + return region.getTableDescriptor().isReadOnly() || !isDefaultReplica(region.getRegionInfo()); } /** - * Returns whether to replay the recovered edits to flush the results. - * Currently secondary region replicas do not replay the edits, since it would - * cause flushes which might affect the primary region. Primary regions even opened - * in read only mode should replay the edits. + * Returns whether to replay the recovered edits to flush the results. Currently secondary region + * replicas do not replay the edits, since it would cause flushes which might affect the primary + * region. Primary regions even opened in read only mode should replay the edits. * @param region the HRegion object * @return whether recovered edits should be replayed. */ @@ -122,14 +117,14 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { } /** - * Returns a StoreFileInfo from the given FileStatus. Secondary replicas refer to the - * files of the primary region, so an HFileLink is used to construct the StoreFileInfo. This - * way ensures that the secondary will be able to continue reading the store files even if - * they are moved to archive after compaction + * Returns a StoreFileInfo from the given FileStatus. Secondary replicas refer to the files of the + * primary region, so an HFileLink is used to construct the StoreFileInfo. This way ensures that + * the secondary will be able to continue reading the store files even if they are moved to + * archive after compaction */ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, - RegionInfo regionInfo, RegionInfo regionInfoForFs, String familyName, Path path) - throws IOException { + RegionInfo regionInfo, RegionInfo regionInfoForFs, String familyName, Path path) + throws IOException { // if this is a primary region, just return the StoreFileInfo constructed from path if (RegionInfo.COMPARATOR.compare(regionInfo, regionInfoForFs) == 0) { @@ -138,9 +133,8 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { // else create a store file link. The link file does not exists on filesystem though. if (HFileLink.isHFileLink(path) || StoreFileInfo.isHFile(path)) { - HFileLink link = HFileLink - .build(conf, regionInfoForFs.getTable(), regionInfoForFs.getEncodedName(), familyName, - path.getName()); + HFileLink link = HFileLink.build(conf, regionInfoForFs.getTable(), + regionInfoForFs.getEncodedName(), familyName, path.getName()); return new StoreFileInfo(conf, fs, link.getFileStatus(fs), link); } else if (StoreFileInfo.isReference(path)) { Reference reference = Reference.read(fs, path); @@ -151,9 +145,8 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { return new StoreFileInfo(conf, fs, link.getFileStatus(fs), reference, link); } else { // Reference - HFileLink link = HFileLink - .build(conf, regionInfoForFs.getTable(), regionInfoForFs.getEncodedName(), familyName, - path.getName()); + HFileLink link = HFileLink.build(conf, regionInfoForFs.getTable(), + regionInfoForFs.getEncodedName(), familyName, path.getName()); return new StoreFileInfo(conf, fs, link.getFileStatus(fs), reference); } } else { @@ -162,20 +155,22 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { } /** - * Create replication peer for replicating user-space Region Read Replicas. - * This methods should only be called at master side. + * Create replication peer for replicating user-space Region Read Replicas. This methods should + * only be called at master side. */ public static void setupRegionReplicaReplication(MasterServices services) throws IOException, ReplicationException { if (!isRegionReplicaReplicationEnabled(services.getConfiguration())) { return; } - if (services.getReplicationPeerManager().getPeerConfig(REGION_REPLICA_REPLICATION_PEER) - .isPresent()) { + if ( + services.getReplicationPeerManager().getPeerConfig(REGION_REPLICA_REPLICATION_PEER) + .isPresent() + ) { return; } - LOG.info("Region replica replication peer id=" + REGION_REPLICA_REPLICATION_PEER + - " not exist. Creating..."); + LOG.info("Region replica replication peer id=" + REGION_REPLICA_REPLICATION_PEER + + " not exist. Creating..."); ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder() .setClusterKey(ZKConfig.getZooKeeperClusterKey(services.getConfiguration())) .setReplicationEndpointImpl(RegionReplicaReplicationEndpoint.class.getName()).build(); @@ -184,11 +179,11 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { /** * @return True if Region Read Replica is enabled for tn (whether hbase:meta or - * user-space tables). + * user-space tables). */ public static boolean isRegionReplicaReplicationEnabled(Configuration conf, TableName tn) { - return isMetaRegionReplicaReplicationEnabled(conf, tn) || - isRegionReplicaReplicationEnabled(conf); + return isMetaRegionReplicaReplicationEnabled(conf, tn) + || isRegionReplicaReplicationEnabled(conf); } /** @@ -202,9 +197,8 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { * @return True if hbase:meta Region Read Replica is enabled. */ public static boolean isMetaRegionReplicaReplicationEnabled(Configuration conf, TableName tn) { - return TableName.isMetaTableName(tn) && - conf.getBoolean(REGION_REPLICA_REPLICATION_CATALOG_CONF_KEY, - DEFAULT_REGION_REPLICA_REPLICATION_CATALOG); + return TableName.isMetaTableName(tn) && conf.getBoolean( + REGION_REPLICA_REPLICATION_CATALOG_CONF_KEY, DEFAULT_REGION_REPLICA_REPLICATION_CATALOG); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java index 3b206a88314..cbb41310ffb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,9 +21,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** * This class provides ShutdownHookManager shims for HBase to interact with the Hadoop 1.0.x and the - * Hadoop 2.0+ series. - * - * NOTE: No testing done against 0.22.x, or 0.21.x. + * Hadoop 2.0+ series. NOTE: No testing done against 0.22.x, or 0.21.x. */ @InterfaceAudience.Private abstract public class ShutdownHookManager { @@ -75,10 +73,8 @@ abstract public class ShutdownHookManager { public void addShutdownHook(Thread shutdownHookThread, int priority) { try { Methods.call(shutdownHookManagerClass, - Methods.call(shutdownHookManagerClass, null, "get", null, null), - "addShutdownHook", - new Class[] { Runnable.class, int.class }, - new Object[] { shutdownHookThread, priority }); + Methods.call(shutdownHookManagerClass, null, "get", null, null), "addShutdownHook", + new Class[] { Runnable.class, int.class }, new Object[] { shutdownHookThread, priority }); } catch (Exception ex) { throw new RuntimeException("we could not use ShutdownHookManager.addShutdownHook", ex); } @@ -87,12 +83,9 @@ abstract public class ShutdownHookManager { @Override public boolean removeShutdownHook(Runnable shutdownHook) { try { - return (Boolean) - Methods.call(shutdownHookManagerClass, - Methods.call(shutdownHookManagerClass, null, "get", null, null), - "removeShutdownHook", - new Class[] { Runnable.class }, - new Object[] { shutdownHook }); + return (Boolean) Methods.call(shutdownHookManagerClass, + Methods.call(shutdownHookManagerClass, null, "get", null, null), "removeShutdownHook", + new Class[] { Runnable.class }, new Object[] { shutdownHook }); } catch (Exception ex) { throw new RuntimeException("we could not use ShutdownHookManager", ex); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java index f896e550a16..637ec5cc4b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.util.ArrayList; @@ -29,35 +28,31 @@ import java.util.RandomAccess; import org.apache.yetus.audience.InterfaceAudience; /** - * Simple sorted list implementation that uses {@link java.util.ArrayList} as - * the underlying collection so we can support RandomAccess. All mutations - * create a new copy of the ArrayList instance, so can be - * expensive. This class is only intended for use on small, very rarely - * written collections that expect highly concurrent reads. + * Simple sorted list implementation that uses {@link java.util.ArrayList} as the underlying + * collection so we can support RandomAccess. All mutations create a new copy of the + * ArrayList instance, so can be expensive. This class is only intended for use on + * small, very rarely written collections that expect highly concurrent reads. *

      - * Read operations are performed on a reference to the internal list at the - * time of invocation, so will not see any mutations to the collection during - * their operation. Iterating over list elements manually using the - * RandomAccess pattern involves multiple operations. For this to be safe get - * a reference to the internal list first using get(). + * Read operations are performed on a reference to the internal list at the time of invocation, so + * will not see any mutations to the collection during their operation. Iterating over list elements + * manually using the RandomAccess pattern involves multiple operations. For this to be safe get a + * reference to the internal list first using get(). *

      - * If constructed with a {@link java.util.Comparator}, the list will be sorted - * using the comparator. Adding or changing an element using an index will - * trigger a resort. + * If constructed with a {@link java.util.Comparator}, the list will be sorted using the comparator. + * Adding or changing an element using an index will trigger a resort. *

      * Iterators are read-only. They cannot be used to remove elements. */ -@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UG_SYNC_SET_UNSYNC_GET", - justification="TODO: synchronization in here needs review!!!") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "UG_SYNC_SET_UNSYNC_GET", + justification = "TODO: synchronization in here needs review!!!") @InterfaceAudience.Private public class SortedList implements List, RandomAccess { private volatile List list; private final Comparator comparator; /** - * Constructs an empty list with the default initial capacity that will be - * sorted using the given comparator. - * + * Constructs an empty list with the default initial capacity that will be sorted using the given + * comparator. * @param comparator the comparator */ public SortedList(Comparator comparator) { @@ -66,11 +61,9 @@ public class SortedList implements List, RandomAccess { } /** - * Constructs a list containing the elements of the given collection, in the - * order returned by the collection's iterator, that will be sorted with the - * given comparator. - * - * @param c the collection + * Constructs a list containing the elements of the given collection, in the order returned by the + * collection's iterator, that will be sorted with the given comparator. + * @param c the collection * @param comparator the comparator */ public SortedList(Collection c, Comparator comparator) { @@ -79,10 +72,9 @@ public class SortedList implements List, RandomAccess { } /** - * Returns a reference to the unmodifiable list currently backing the SortedList. - * Changes to the SortedList will not be reflected in this list. Use this - * method to get a reference for iterating over using the RandomAccess - * pattern. + * Returns a reference to the unmodifiable list currently backing the SortedList. Changes to the + * SortedList will not be reflected in this list. Use this method to get a reference for iterating + * over using the RandomAccess pattern. */ public List get() { // FindBugs: UG_SYNC_SET_UNSYNC_GET complaint. Fix!! return list; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/StealJobQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/StealJobQueue.java index efd3da3a88d..7b41331abeb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/StealJobQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/StealJobQueue.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,8 +17,6 @@ */ package org.apache.hadoop.hbase.util; -import org.apache.yetus.audience.InterfaceAudience; - import java.util.Comparator; import java.util.concurrent.BlockingQueue; import java.util.concurrent.PriorityBlockingQueue; @@ -27,17 +24,16 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import org.apache.yetus.audience.InterfaceAudience; /** - * This queue allows a ThreadPoolExecutor to steal jobs from another ThreadPoolExecutor. - * This queue also acts as the factory for creating the PriorityBlockingQueue to be used in the - * steal-from ThreadPoolExecutor. The behavior of this queue is the same as a normal - * PriorityBlockingQueue except the take/poll(long,TimeUnit) methods would also check whether there - * are jobs in the steal-from queue if this q ueue is empty. - * - * Note the workers in ThreadPoolExecutor must be pre-started so that they can steal job from the - * other queue, otherwise the worker will only be started after there are jobs submitted to main - * queue. + * This queue allows a ThreadPoolExecutor to steal jobs from another ThreadPoolExecutor. This queue + * also acts as the factory for creating the PriorityBlockingQueue to be used in the steal-from + * ThreadPoolExecutor. The behavior of this queue is the same as a normal PriorityBlockingQueue + * except the take/poll(long,TimeUnit) methods would also check whether there are jobs in the + * steal-from queue if this q ueue is empty. Note the workers in ThreadPoolExecutor must be + * pre-started so that they can steal job from the other queue, otherwise the worker will only be + * started after there are jobs submitted to main queue. */ @InterfaceAudience.Private public class StealJobQueue extends PriorityBlockingQueue { @@ -54,7 +50,7 @@ public class StealJobQueue extends PriorityBlockingQueue { } public StealJobQueue(int initCapacity, int stealFromQueueInitCapacity, - Comparator comparator) { + Comparator comparator) { super(initCapacity, comparator); this.stealFromQueue = new PriorityBlockingQueue(stealFromQueueInitCapacity, comparator) { @@ -92,7 +88,6 @@ public class StealJobQueue extends PriorityBlockingQueue { } } - @Override public T take() throws InterruptedException { lock.lockInterruptibly(); @@ -124,8 +119,7 @@ public class StealJobQueue extends PriorityBlockingQueue { retVal = stealFromQueue.poll(); } if (retVal == null) { - if (nanos <= 0) - return null; + if (nanos <= 0) return null; nanos = notEmpty.awaitNanos(nanos); } else { return retVal; @@ -136,4 +130,3 @@ public class StealJobQueue extends PriorityBlockingQueue { } } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java index 5a28187b824..6683b8734a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -50,11 +49,11 @@ public final class TableDescriptorChecker { public static final String TABLE_SANITY_CHECKS = "hbase.table.sanity.checks"; public static final boolean DEFAULT_TABLE_SANITY_CHECKS = true; - //should we check the compression codec type at master side, default true, HBASE-6370 + // should we check the compression codec type at master side, default true, HBASE-6370 public static final String MASTER_CHECK_COMPRESSION = "hbase.master.check.compression"; public static final boolean DEFAULT_MASTER_CHECK_COMPRESSION = true; - //should we check encryption settings at master side, default true + // should we check encryption settings at master side, default true public static final String MASTER_CHECK_ENCRYPTION = "hbase.master.check.encryption"; public static final boolean DEFAULT_MASTER_CHECK_ENCRYPTION = true; @@ -62,14 +61,12 @@ public final class TableDescriptorChecker { } /** - * Checks whether the table conforms to some sane limits, and configured - * values (compression, etc) work. Throws an exception if something is wrong. + * Checks whether the table conforms to some sane limits, and configured values (compression, etc) + * work. Throws an exception if something is wrong. */ public static void sanityCheck(final Configuration c, final TableDescriptor td) - throws IOException { - CompoundConfiguration conf = new CompoundConfiguration() - .add(c) - .addBytesMap(td.getValues()); + throws IOException { + CompoundConfiguration conf = new CompoundConfiguration().add(c).addBytesMap(td.getValues()); // Setting this to true logs the warning instead of throwing exception boolean logWarn = false; @@ -85,14 +82,13 @@ public final class TableDescriptorChecker { long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit // if not set MAX_FILESIZE in TableDescriptor, and not set HREGION_MAX_FILESIZE in // hbase-site.xml, use maxFileSizeLowerLimit instead to skip this check - long maxFileSize = td.getValue(TableDescriptorBuilder.MAX_FILESIZE) == null ? - conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit) : - Long.parseLong(td.getValue(TableDescriptorBuilder.MAX_FILESIZE)); + long maxFileSize = td.getValue(TableDescriptorBuilder.MAX_FILESIZE) == null + ? conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit) + : Long.parseLong(td.getValue(TableDescriptorBuilder.MAX_FILESIZE)); if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) { - String message = - "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" (" + - maxFileSize + ") is too small, which might cause over splitting into unmanageable " + - "number of regions."; + String message = "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" (" + + maxFileSize + ") is too small, which might cause over splitting into unmanageable " + + "number of regions."; warnOrThrowExceptionForFailure(logWarn, message, null); } @@ -100,13 +96,13 @@ public final class TableDescriptorChecker { long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit // if not set MEMSTORE_FLUSHSIZE in TableDescriptor, and not set HREGION_MEMSTORE_FLUSH_SIZE in // hbase-site.xml, use flushSizeLowerLimit instead to skip this check - long flushSize = td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE) == null ? - conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit) : - Long.parseLong(td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE)); + long flushSize = td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE) == null + ? conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit) + : Long.parseLong(td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE)); if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) { - String message = "MEMSTORE_FLUSHSIZE for table descriptor or " + - "\"hbase.hregion.memstore.flush.size\" (" + flushSize + - ") is too small, which might cause" + " very frequent flushing."; + String message = + "MEMSTORE_FLUSHSIZE for table descriptor or " + "\"hbase.hregion.memstore.flush.size\" (" + + flushSize + ") is too small, which might cause" + " very frequent flushing."; warnOrThrowExceptionForFailure(logWarn, message, null); } @@ -167,25 +163,25 @@ public final class TableDescriptorChecker { // check blockSize if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) { - String message = "Block size for column family " + hcd.getNameAsString() + - " must be between 1K and 16MB."; + String message = "Block size for column family " + hcd.getNameAsString() + + " must be between 1K and 16MB."; warnOrThrowExceptionForFailure(logWarn, message, null); } // check versions if (hcd.getMinVersions() < 0) { String message = - "Min versions for column family " + hcd.getNameAsString() + " must be positive."; + "Min versions for column family " + hcd.getNameAsString() + " must be positive."; warnOrThrowExceptionForFailure(logWarn, message, null); } // max versions already being checked // HBASE-13776 Setting illegal versions for ColumnFamilyDescriptor - // does not throw IllegalArgumentException + // does not throw IllegalArgumentException // check minVersions <= maxVerions if (hcd.getMinVersions() > hcd.getMaxVersions()) { - String message = "Min versions for column family " + hcd.getNameAsString() + - " must be less than the Max versions."; + String message = "Min versions for column family " + hcd.getNameAsString() + + " must be less than the Max versions."; warnOrThrowExceptionForFailure(logWarn, message, null); } @@ -197,8 +193,8 @@ public final class TableDescriptorChecker { // check data replication factor, it can be 0(default value) when user has not explicitly // set the value, in this case we use default replication factor set in the file system. if (hcd.getDFSReplication() < 0) { - String message = "HFile Replication for column family " + hcd.getNameAsString() + - " must be greater than zero."; + String message = "HFile Replication for column family " + hcd.getNameAsString() + + " must be greater than zero."; warnOrThrowExceptionForFailure(logWarn, message, null); } @@ -215,9 +211,8 @@ public final class TableDescriptorChecker { // check replication scope WALProtos.ScopeType scop = WALProtos.ScopeType.valueOf(cfd.getScope()); if (scop == null) { - String message = - "Replication scope for column family " + cfd.getNameAsString() + " is " + cfd.getScope() + - " which is invalid."; + String message = "Replication scope for column family " + cfd.getNameAsString() + " is " + + cfd.getScope() + " which is invalid."; LOG.error(message); throw new DoNotRetryIOException(message); @@ -225,13 +220,13 @@ public final class TableDescriptorChecker { } private static void checkCompactionPolicy(Configuration conf, TableDescriptor td) - throws IOException { + throws IOException { // FIFO compaction has some requirements // Actually FCP ignores periodic major compactions String className = td.getValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY); if (className == null) { className = conf.get(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, - ExploringCompactionPolicy.class.getName()); + ExploringCompactionPolicy.class.getName()); } int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT; @@ -244,7 +239,7 @@ public final class TableDescriptorChecker { for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) { String compactionPolicy = - hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY); + hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY); if (compactionPolicy == null) { compactionPolicy = className; } @@ -273,8 +268,8 @@ public final class TableDescriptorChecker { } if (blockingFileCount < 1000) { message = - "Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount + - " is below recommended minimum of 1000 for column family " + hcd.getNameAsString(); + "Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount + + " is below recommended minimum of 1000 for column family " + hcd.getNameAsString(); throw new IOException(message); } } @@ -299,24 +294,24 @@ public final class TableDescriptorChecker { } public static void checkEncryption(final Configuration conf, final TableDescriptor td) - throws IOException { + throws IOException { for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { EncryptionTest.testEncryption(conf, cfd.getEncryptionType(), cfd.getEncryptionKey()); } } public static void checkClassLoading(final Configuration conf, final TableDescriptor td) - throws IOException { + throws IOException { RegionSplitPolicy.getSplitPolicyClass(td, conf); RegionCoprocessorHost.testTableCoprocessorAttrs(conf, td); } // HBASE-13350 - Helper method to log warning on sanity check failures if checks disabled. private static void warnOrThrowExceptionForFailure(boolean logWarn, String message, - Exception cause) throws IOException { + Exception cause) throws IOException { if (!logWarn) { - throw new DoNotRetryIOException(message + " Set " + TABLE_SANITY_CHECKS + - " to false at conf or table descriptor if you want to bypass sanity checks", cause); + throw new DoNotRetryIOException(message + " Set " + TABLE_SANITY_CHECKS + + " to false at conf or table descriptor if you want to bypass sanity checks", cause); } LOG.warn(message); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java index 3070fb37277..72f874fe7a2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +29,8 @@ import org.apache.yetus.audience.InterfaceAudience; public final class YammerHistogramUtils { // not for public consumption - private YammerHistogramUtils() {} + private YammerHistogramUtils() { + } /** * Used formatting doubles so only two places after decimal point. @@ -38,13 +38,12 @@ public final class YammerHistogramUtils { private static DecimalFormat DOUBLE_FORMAT = new DecimalFormat("#0.00"); /** - * Create a new {@link com.codahale.metrics.Histogram} instance. These constructors are - * not public in 2.2.0, so we use reflection to find them. + * Create a new {@link com.codahale.metrics.Histogram} instance. These constructors are not public + * in 2.2.0, so we use reflection to find them. */ public static Histogram newHistogram(Reservoir sample) { try { - Constructor ctor = - Histogram.class.getDeclaredConstructor(Reservoir.class); + Constructor ctor = Histogram.class.getDeclaredConstructor(Reservoir.class); ctor.setAccessible(true); return (Histogram) ctor.newInstance(sample); } catch (Exception e) { @@ -55,44 +54,41 @@ public final class YammerHistogramUtils { /** @return an abbreviated summary of {@code hist}. */ public static String getShortHistogramReport(final Histogram hist) { Snapshot sn = hist.getSnapshot(); - return "mean=" + DOUBLE_FORMAT.format(sn.getMean()) + - ", min=" + DOUBLE_FORMAT.format(sn.getMin()) + - ", max=" + DOUBLE_FORMAT.format(sn.getMax()) + - ", stdDev=" + DOUBLE_FORMAT.format(sn.getStdDev()) + - ", 95th=" + DOUBLE_FORMAT.format(sn.get95thPercentile()) + - ", 99th=" + DOUBLE_FORMAT.format(sn.get99thPercentile()); + return "mean=" + DOUBLE_FORMAT.format(sn.getMean()) + ", min=" + + DOUBLE_FORMAT.format(sn.getMin()) + ", max=" + DOUBLE_FORMAT.format(sn.getMax()) + + ", stdDev=" + DOUBLE_FORMAT.format(sn.getStdDev()) + ", 95th=" + + DOUBLE_FORMAT.format(sn.get95thPercentile()) + ", 99th=" + + DOUBLE_FORMAT.format(sn.get99thPercentile()); } /** @return a summary of {@code hist}. */ public static String getHistogramReport(final Histogram hist) { Snapshot sn = hist.getSnapshot(); - return "mean=" + DOUBLE_FORMAT.format(sn.getMean()) + - ", min=" + DOUBLE_FORMAT.format(sn.getMin()) + - ", max=" + DOUBLE_FORMAT.format(sn.getMax()) + - ", stdDev=" + DOUBLE_FORMAT.format(sn.getStdDev()) + - ", 50th=" + DOUBLE_FORMAT.format(sn.getMedian()) + - ", 75th=" + DOUBLE_FORMAT.format(sn.get75thPercentile()) + - ", 95th=" + DOUBLE_FORMAT.format(sn.get95thPercentile()) + - ", 99th=" + DOUBLE_FORMAT.format(sn.get99thPercentile()) + - ", 99.9th=" + DOUBLE_FORMAT.format(sn.get999thPercentile()) + - ", 99.99th=" + DOUBLE_FORMAT.format(sn.getValue(0.9999)) + - ", 99.999th=" + DOUBLE_FORMAT.format(sn.getValue(0.99999)); + return "mean=" + DOUBLE_FORMAT.format(sn.getMean()) + ", min=" + + DOUBLE_FORMAT.format(sn.getMin()) + ", max=" + DOUBLE_FORMAT.format(sn.getMax()) + + ", stdDev=" + DOUBLE_FORMAT.format(sn.getStdDev()) + ", 50th=" + + DOUBLE_FORMAT.format(sn.getMedian()) + ", 75th=" + + DOUBLE_FORMAT.format(sn.get75thPercentile()) + ", 95th=" + + DOUBLE_FORMAT.format(sn.get95thPercentile()) + ", 99th=" + + DOUBLE_FORMAT.format(sn.get99thPercentile()) + ", 99.9th=" + + DOUBLE_FORMAT.format(sn.get999thPercentile()) + ", 99.99th=" + + DOUBLE_FORMAT.format(sn.getValue(0.9999)) + ", 99.999th=" + + DOUBLE_FORMAT.format(sn.getValue(0.99999)); } /** @return pretty summary of {@code hist}. */ public static String getPrettyHistogramReport(final Histogram h) { Snapshot sn = h.getSnapshot(); - return - "Mean = " + DOUBLE_FORMAT.format(sn.getMean()) + "\n" + - "Min = " + DOUBLE_FORMAT.format(sn.getMin()) + "\n" + - "Max = " + DOUBLE_FORMAT.format(sn.getMax()) + "\n" + - "StdDev = " + DOUBLE_FORMAT.format(sn.getStdDev()) + "\n" + - "50th = " + DOUBLE_FORMAT.format(sn.getMedian()) + "\n" + - "75th = " + DOUBLE_FORMAT.format(sn.get75thPercentile()) + "\n" + - "95th = " + DOUBLE_FORMAT.format(sn.get95thPercentile()) + "\n" + - "99th = " + DOUBLE_FORMAT.format(sn.get99thPercentile()) + "\n" + - "99.9th = " + DOUBLE_FORMAT.format(sn.get999thPercentile()) + "\n" + - "99.99th = " + DOUBLE_FORMAT.format(sn.getValue(0.9999)) + "\n" + - "99.999th = " + DOUBLE_FORMAT.format(sn.getValue(0.99999)); + return "Mean = " + DOUBLE_FORMAT.format(sn.getMean()) + "\n" + "Min = " + + DOUBLE_FORMAT.format(sn.getMin()) + "\n" + "Max = " + + DOUBLE_FORMAT.format(sn.getMax()) + "\n" + "StdDev = " + + DOUBLE_FORMAT.format(sn.getStdDev()) + "\n" + "50th = " + + DOUBLE_FORMAT.format(sn.getMedian()) + "\n" + "75th = " + + DOUBLE_FORMAT.format(sn.get75thPercentile()) + "\n" + "95th = " + + DOUBLE_FORMAT.format(sn.get95thPercentile()) + "\n" + "99th = " + + DOUBLE_FORMAT.format(sn.get99thPercentile()) + "\n" + "99.9th = " + + DOUBLE_FORMAT.format(sn.get999thPercentile()) + "\n" + "99.99th = " + + DOUBLE_FORMAT.format(sn.getValue(0.9999)) + "\n" + "99.999th = " + + DOUBLE_FORMAT.format(sn.getValue(0.99999)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java index 78ef55ca2c5..173c202e2d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +21,9 @@ import java.io.IOException; import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; @@ -35,6 +32,9 @@ import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; + /** * Utlity method to migrate zookeeper data across HBase versions. * @deprecated Since 2.0.0. To be removed in hbase-3.0.0. @@ -45,43 +45,40 @@ public class ZKDataMigrator { private static final Logger LOG = LoggerFactory.getLogger(ZKDataMigrator.class); // Shutdown constructor. - private ZKDataMigrator() {} + private ZKDataMigrator() { + } /** - * Method for table states migration. - * Used when upgrading from pre-2.0 to 2.0 - * Reading state from zk, applying them to internal state - * and delete. - * Used by master to clean migration from zk based states to - * table descriptor based states. + * Method for table states migration. Used when upgrading from pre-2.0 to 2.0 Reading state from + * zk, applying them to internal state and delete. Used by master to clean migration from zk based + * states to table descriptor based states. * @deprecated Since 2.0.0. To be removed in hbase-3.0.0. */ @Deprecated public static Map queryForTableStates(ZKWatcher zkw) - throws KeeperException, InterruptedException { + throws KeeperException, InterruptedException { Map rv = new HashMap<>(); List children = ZKUtil.listChildrenNoWatch(zkw, zkw.getZNodePaths().tableZNode); - if (children == null) - return rv; - for (String child: children) { + if (children == null) return rv; + for (String child : children) { TableName tableName = TableName.valueOf(child); ZooKeeperProtos.DeprecatedTableState.State state = getTableState(zkw, tableName); TableState.State newState = TableState.State.ENABLED; if (state != null) { switch (state) { - case ENABLED: - newState = TableState.State.ENABLED; - break; - case DISABLED: - newState = TableState.State.DISABLED; - break; - case DISABLING: - newState = TableState.State.DISABLING; - break; - case ENABLING: - newState = TableState.State.ENABLING; - break; - default: + case ENABLED: + newState = TableState.State.ENABLED; + break; + case DISABLED: + newState = TableState.State.DISABLED; + break; + case DISABLING: + newState = TableState.State.DISABLING; + break; + case ENABLING: + newState = TableState.State.ENABLING; + break; + default: } } rv.put(tableName, newState); @@ -91,26 +88,23 @@ public class ZKDataMigrator { /** * Gets table state from ZK. - * @param zkw ZKWatcher instance to use + * @param zkw ZKWatcher instance to use * @param tableName table we're checking * @return Null or - * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State} - * found in znode. - * @throws KeeperException - * @deprecated Since 2.0.0. To be removed in hbase-3.0.0. + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State} + * found in znode. n * @deprecated Since 2.0.0. To be removed in hbase-3.0.0. */ @Deprecated - private static ZooKeeperProtos.DeprecatedTableState.State getTableState( - final ZKWatcher zkw, final TableName tableName) - throws KeeperException, InterruptedException { - String znode = ZNodePaths.joinZNode(zkw.getZNodePaths().tableZNode, - tableName.getNameAsString()); - byte [] data = ZKUtil.getData(zkw, znode); + private static ZooKeeperProtos.DeprecatedTableState.State getTableState(final ZKWatcher zkw, + final TableName tableName) throws KeeperException, InterruptedException { + String znode = + ZNodePaths.joinZNode(zkw.getZNodePaths().tableZNode, tableName.getNameAsString()); + byte[] data = ZKUtil.getData(zkw, znode); if (data == null || data.length <= 0) return null; try { ProtobufUtil.expectPBMagicPrefix(data); ZooKeeperProtos.DeprecatedTableState.Builder builder = - ZooKeeperProtos.DeprecatedTableState.newBuilder(); + ZooKeeperProtos.DeprecatedTableState.newBuilder(); int magicLen = ProtobufUtil.lengthOfPBMagic(); ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen); return builder.getState(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/ClusterCompactionQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/ClusterCompactionQueues.java index 9be182d245f..ee8517739e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/ClusterCompactionQueues.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/ClusterCompactionQueues.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -126,9 +126,9 @@ class ClusterCompactionQueues { lock.readLock().lock(); try { return compactionQueues.entrySet().stream() - .filter(entry -> !compactingServers.contains(entry.getKey())) - .max(Map.Entry.comparingByValue( - (o1, o2) -> Integer.compare(o1.size(), o2.size()))).map(Map.Entry::getKey); + .filter(entry -> !compactingServers.contains(entry.getKey())) + .max(Map.Entry.comparingByValue((o1, o2) -> Integer.compare(o1.size(), o2.size()))) + .map(Map.Entry::getKey); } finally { lock.readLock().unlock(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java index 2112b97c741..31aded84109 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,16 +52,14 @@ class MajorCompactionRequest { this.region = region; } - MajorCompactionRequest(Connection connection, RegionInfo region, - Set stores) { + MajorCompactionRequest(Connection connection, RegionInfo region, Set stores) { this(connection, region); this.stores = stores; } static Optional newRequest(Connection connection, RegionInfo info, - Set stores, long timestamp) throws IOException { - MajorCompactionRequest request = - new MajorCompactionRequest(connection, info, stores); + Set stores, long timestamp) throws IOException { + MajorCompactionRequest request = new MajorCompactionRequest(connection, info, stores); return request.createRequest(connection, stores, timestamp); } @@ -77,8 +75,8 @@ class MajorCompactionRequest { this.stores = stores; } - Optional createRequest(Connection connection, - Set stores, long timestamp) throws IOException { + Optional createRequest(Connection connection, Set stores, + long timestamp) throws IOException { Set familiesToCompact = getStoresRequiringCompaction(stores, timestamp); MajorCompactionRequest request = null; if (!familiesToCompact.isEmpty()) { @@ -88,7 +86,7 @@ class MajorCompactionRequest { } Set getStoresRequiringCompaction(Set requestedStores, long timestamp) - throws IOException { + throws IOException { HRegionFileSystem fileSystem = getFileSystem(); Set familiesToCompact = Sets.newHashSet(); for (String family : requestedStores) { @@ -100,37 +98,36 @@ class MajorCompactionRequest { } boolean shouldCFBeCompacted(HRegionFileSystem fileSystem, String family, long ts) - throws IOException { + throws IOException { // do we have any store files? Collection storeFiles = fileSystem.getStoreFiles(family); if (storeFiles == null) { - LOG.info("Excluding store: " + family + " for compaction for region: " + fileSystem - .getRegionInfo().getEncodedName(), " has no store files"); + LOG.info("Excluding store: " + family + " for compaction for region: " + + fileSystem.getRegionInfo().getEncodedName(), " has no store files"); return false; } // check for reference files if (fileSystem.hasReferences(family) && familyHasReferenceFile(fileSystem, family, ts)) { LOG.info("Including store: " + family + " with: " + storeFiles.size() - + " files for compaction for region: " + fileSystem.getRegionInfo().getEncodedName()); + + " files for compaction for region: " + fileSystem.getRegionInfo().getEncodedName()); return true; } // check store file timestamps boolean includeStore = this.shouldIncludeStore(fileSystem, family, storeFiles, ts); if (!includeStore) { - LOG.info("Excluding store: " + family + " for compaction for region: " + fileSystem - .getRegionInfo().getEncodedName() + " already compacted"); + LOG.info("Excluding store: " + family + " for compaction for region: " + + fileSystem.getRegionInfo().getEncodedName() + " already compacted"); } return includeStore; } protected boolean shouldIncludeStore(HRegionFileSystem fileSystem, String family, - Collection storeFiles, long ts) throws IOException { + Collection storeFiles, long ts) throws IOException { for (StoreFileInfo storeFile : storeFiles) { if (storeFile.getModificationTime() < ts) { LOG.info("Including store: " + family + " with: " + storeFiles.size() - + " files for compaction for region: " - + fileSystem.getRegionInfo().getEncodedName()); + + " files for compaction for region: " + fileSystem.getRegionInfo().getEncodedName()); return true; } } @@ -138,14 +135,14 @@ class MajorCompactionRequest { } protected boolean familyHasReferenceFile(HRegionFileSystem fileSystem, String family, long ts) - throws IOException { + throws IOException { List referenceFiles = - getReferenceFilePaths(fileSystem.getFileSystem(), fileSystem.getStoreDir(family)); + getReferenceFilePaths(fileSystem.getFileSystem(), fileSystem.getStoreDir(family)); for (Path referenceFile : referenceFiles) { FileStatus status = fileSystem.getFileSystem().getFileLinkStatus(referenceFile); if (status.getModificationTime() < ts) { - LOG.info("Including store: " + family + " for compaction for region: " + fileSystem - .getRegionInfo().getEncodedName() + " (reference store files)"); + LOG.info("Including store: " + family + " for compaction for region: " + + fileSystem.getRegionInfo().getEncodedName() + " (reference store files)"); return true; } } @@ -153,17 +150,16 @@ class MajorCompactionRequest { } - List getReferenceFilePaths(FileSystem fileSystem, Path familyDir) - throws IOException { + List getReferenceFilePaths(FileSystem fileSystem, Path familyDir) throws IOException { return FSUtils.getReferenceFilePaths(fileSystem, familyDir); } HRegionFileSystem getFileSystem() throws IOException { try (Admin admin = connection.getAdmin()) { return HRegionFileSystem.openRegionFromFileSystem(admin.getConfiguration(), - CommonFSUtils.getCurrentFileSystem(admin.getConfiguration()), - CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(admin.getConfiguration()), - region.getTable()), region, true); + CommonFSUtils.getCurrentFileSystem(admin.getConfiguration()), CommonFSUtils.getTableDir( + CommonFSUtils.getRootDir(admin.getConfiguration()), region.getTable()), + region, true); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java index a12fa71080c..c84c01dbad2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util.compaction; import java.io.IOException; @@ -49,13 +48,13 @@ public class MajorCompactionTTLRequest extends MajorCompactionRequest { } static Optional newRequest(Connection connection, RegionInfo info, - TableDescriptor htd) throws IOException { + TableDescriptor htd) throws IOException { MajorCompactionTTLRequest request = new MajorCompactionTTLRequest(connection, info); return request.createRequest(connection, htd); } private Optional createRequest(Connection connection, TableDescriptor htd) - throws IOException { + throws IOException { Map familiesToCompact = getStoresRequiringCompaction(htd); MajorCompactionRequest request = null; if (!familiesToCompact.isEmpty()) { @@ -88,15 +87,14 @@ public class MajorCompactionTTLRequest extends MajorCompactionRequest { @Override protected boolean shouldIncludeStore(HRegionFileSystem fileSystem, String family, - Collection storeFiles, long ts) throws IOException { + Collection storeFiles, long ts) throws IOException { for (StoreFileInfo storeFile : storeFiles) { // Lets only compact when all files are older than TTL if (storeFile.getModificationTime() >= ts) { LOG.info("There is atleast one file in store: " + family + " file: " + storeFile.getPath() - + " with timestamp " + storeFile.getModificationTime() - + " for region: " + fileSystem.getRegionInfo().getEncodedName() - + " older than TTL: " + ts); + + " with timestamp " + storeFile.getModificationTime() + " for region: " + + fileSystem.getRegionInfo().getEncodedName() + " older than TTL: " + ts); return false; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java index b8c8626d818..a987bef3340 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,14 +6,15 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util.compaction; import java.io.IOException; @@ -86,7 +87,7 @@ public class MajorCompactor extends Configured implements Tool { } public MajorCompactor(Configuration conf, TableName tableName, Set storesToCompact, - int concurrency, long timestamp, long sleepForMs) throws IOException { + int concurrency, long timestamp, long sleepForMs) throws IOException { this.connection = ConnectionFactory.createConnection(conf); this.tableName = tableName; this.timestamp = timestamp; @@ -104,7 +105,7 @@ public class MajorCompactor extends Configured implements Tool { Thread.sleep(sleepForMs); } Optional serverToProcess = - clusterCompactionQueues.getLargestQueueFromServersNotCompacting(); + clusterCompactionQueues.getLargestQueueFromServersNotCompacting(); if (serverToProcess.isPresent() && clusterCompactionQueues.hasWorkItems()) { ServerName serverName = serverToProcess.get(); // check to see if the region has moved... if so we have to enqueue it again with @@ -112,18 +113,18 @@ public class MajorCompactor extends Configured implements Tool { MajorCompactionRequest request = clusterCompactionQueues.reserveForCompaction(serverName); ServerName currentServer = connection.getRegionLocator(tableName) - .getRegionLocation(request.getRegion().getStartKey()).getServerName(); + .getRegionLocation(request.getRegion().getStartKey()).getServerName(); if (!currentServer.equals(serverName)) { // add it back to the queue with the correct server it should be picked up in the future. LOG.info("Server changed for region: " + request.getRegion().getEncodedName() + " from: " - + serverName + " to: " + currentServer + " re-queuing request"); + + serverName + " to: " + currentServer + " re-queuing request"); clusterCompactionQueues.addToCompactionQueue(currentServer, request); clusterCompactionQueues.releaseCompaction(serverName); } else { LOG.info("Firing off compaction request for server: " + serverName + ", " + request - + " total queue size left: " + clusterCompactionQueues - .getCompactionRequestsLeftToFinish()); + + " total queue size left: " + + clusterCompactionQueues.getCompactionRequestsLeftToFinish()); futures.add(executor.submit(new Compact(serverName, request))); } } else { @@ -143,11 +144,10 @@ public class MajorCompactor extends Configured implements Tool { executor.shutdown(); executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS); if (!ERRORS.isEmpty()) { - StringBuilder builder = - new StringBuilder().append("Major compaction failed, there were: ").append(ERRORS.size()) - .append(" regions / stores that failed compacting\n") - .append("Failed compaction requests\n").append("--------------------------\n") - .append(Joiner.on("\n").join(ERRORS)); + StringBuilder builder = new StringBuilder().append("Major compaction failed, there were: ") + .append(ERRORS.size()).append(" regions / stores that failed compacting\n") + .append("Failed compaction requests\n").append("--------------------------\n") + .append(Joiner.on("\n").join(ERRORS)); LOG.error(builder.toString()); } if (connection != null) { @@ -160,25 +160,25 @@ public class MajorCompactor extends Configured implements Tool { void initializeWorkQueues() throws IOException { if (storesToCompact.isEmpty()) { connection.getTable(tableName).getDescriptor().getColumnFamilyNames() - .forEach(a -> storesToCompact.add(Bytes.toString(a))); + .forEach(a -> storesToCompact.add(Bytes.toString(a))); LOG.info("No family specified, will execute for all families"); } LOG.info( - "Initializing compaction queues for table: " + tableName + " with cf: " + storesToCompact); + "Initializing compaction queues for table: " + tableName + " with cf: " + storesToCompact); Map> snRegionMap = getServerRegionsMap(); /* - * If numservers is specified, stop inspecting regions beyond the numservers, it will serve - * to throttle and won't end up scanning all the regions in the event there are not many - * regions to compact based on the criteria. + * If numservers is specified, stop inspecting regions beyond the numservers, it will serve to + * throttle and won't end up scanning all the regions in the event there are not many regions to + * compact based on the criteria. */ for (ServerName sn : getServersToCompact(snRegionMap.keySet())) { List regions = snRegionMap.get(sn); LOG.debug("Table: " + tableName + " Server: " + sn + " No of regions: " + regions.size()); /* - * If the tool is run periodically, then we could shuffle the regions and provide - * some random order to select regions. Helps if numregions is specified. + * If the tool is run periodically, then we could shuffle the regions and provide some random + * order to select regions. Helps if numregions is specified. */ Collections.shuffle(regions); int regionsToCompact = numRegions; @@ -201,12 +201,12 @@ public class MajorCompactor extends Configured implements Tool { } protected Optional getMajorCompactionRequest(RegionInfo hri) - throws IOException { + throws IOException { return MajorCompactionRequest.newRequest(connection, hri, storesToCompact, timestamp); } private Collection getServersToCompact(Set snSet) { - if(numServers < 0 || snSet.size() <= numServers) { + if (numServers < 0 || snSet.size() <= numServers) { return snSet; } else { @@ -219,7 +219,7 @@ public class MajorCompactor extends Configured implements Tool { private Map> getServerRegionsMap() throws IOException { Map> snRegionMap = Maps.newHashMap(); List regionLocations = - connection.getRegionLocator(tableName).getAllRegionLocations(); + connection.getRegionLocator(tableName).getAllRegionLocations(); for (HRegionLocation regionLocation : regionLocations) { ServerName sn = regionLocation.getServerName(); RegionInfo hri = regionLocation.getRegion(); @@ -253,7 +253,8 @@ public class MajorCompactor extends Configured implements Tool { this.request = request; } - @Override public void run() { + @Override + public void run() { try { compactAndWait(request); } catch (NotServingRegionException e) { @@ -290,15 +291,15 @@ public class MajorCompactor extends Configured implements Tool { if (!skipWait) { while (isCompacting(request)) { Thread.sleep(sleepForMs); - LOG.debug("Waiting for compaction to complete for region: " + request.getRegion() - .getEncodedName()); + LOG.debug("Waiting for compaction to complete for region: " + + request.getRegion().getEncodedName()); } } } finally { if (!skipWait) { // Make sure to wait for the CompactedFileDischarger chore to do its work int waitForArchive = connection.getConfiguration() - .getInt("hbase.hfile.compaction.discharger.interval", 2 * 60 * 1000); + .getInt("hbase.hfile.compaction.discharger.interval", 2 * 60 * 1000); Thread.sleep(waitForArchive); // check if compaction completed successfully, otherwise put that request back in the // proper queue @@ -308,52 +309,50 @@ public class MajorCompactor extends Configured implements Tool { // the new regionserver doesn't pick it up because its accounted for in the WAL replay, // thus you have more store files on the filesystem than the regionserver knows about. boolean regionHasNotMoved = connection.getRegionLocator(tableName) - .getRegionLocation(request.getRegion().getStartKey()).getServerName() - .equals(serverName); + .getRegionLocation(request.getRegion().getStartKey()).getServerName() + .equals(serverName); if (regionHasNotMoved) { LOG.error( - "Not all store files were compacted, this may be due to the regionserver not " - + "being aware of all store files. Will not reattempt compacting, " - + request); + "Not all store files were compacted, this may be due to the regionserver not " + + "being aware of all store files. Will not reattempt compacting, " + request); ERRORS.add(request); } else { request.setStores(storesRequiringCompaction); clusterCompactionQueues.addToCompactionQueue(serverName, request); LOG.info("Compaction failed for the following stores: " + storesRequiringCompaction - + " region: " + request.getRegion().getEncodedName()); + + " region: " + request.getRegion().getEncodedName()); } } else { LOG.info("Compaction complete for region: " + request.getRegion().getEncodedName() - + " -> cf(s): " + request.getStores()); + + " -> cf(s): " + request.getStores()); } } } } private void compactRegionOnServer(MajorCompactionRequest request, Admin admin, String store) - throws IOException { - admin.majorCompactRegion(request.getRegion().getEncodedNameAsBytes(), - Bytes.toBytes(store)); + throws IOException { + admin.majorCompactRegion(request.getRegion().getEncodedNameAsBytes(), Bytes.toBytes(store)); } } private boolean isCompacting(MajorCompactionRequest request) throws Exception { CompactionState compactionState = connection.getAdmin() - .getCompactionStateForRegion(request.getRegion().getEncodedNameAsBytes()); - return compactionState.equals(CompactionState.MAJOR) || compactionState - .equals(CompactionState.MAJOR_AND_MINOR); + .getCompactionStateForRegion(request.getRegion().getEncodedNameAsBytes()); + return compactionState.equals(CompactionState.MAJOR) + || compactionState.equals(CompactionState.MAJOR_AND_MINOR); } private void addNewRegions() { try { List locations = - connection.getRegionLocator(tableName).getAllRegionLocations(); + connection.getRegionLocator(tableName).getAllRegionLocations(); for (HRegionLocation location : locations) { if (location.getRegion().getRegionId() > timestamp) { Optional compactionRequest = MajorCompactionRequest - .newRequest(connection, location.getRegion(), storesToCompact, timestamp); + .newRequest(connection, location.getRegion(), storesToCompact, timestamp); compactionRequest.ifPresent(request -> clusterCompactionQueues - .addToCompactionQueue(location.getServerName(), request)); + .addToCompactionQueue(location.getServerName(), request)); } } } catch (IOException e) { @@ -362,7 +361,7 @@ public class MajorCompactor extends Configured implements Tool { } protected Set getStoresRequiringCompaction(MajorCompactionRequest request) - throws IOException { + throws IOException { return request.getStoresRequiringCompaction(storesToCompact, timestamp); } @@ -370,104 +369,48 @@ public class MajorCompactor extends Configured implements Tool { Options options = new Options(); options.addOption( - Option.builder("servers") - .required() - .desc("Concurrent servers compacting") - .hasArg() - .build() - ); + Option.builder("servers").required().desc("Concurrent servers compacting").hasArg().build()); + options.addOption(Option.builder("minModTime") + .desc("Compact if store files have modification time < minModTime").hasArg().build()); + options.addOption(Option.builder("zk").optionalArg(true).desc("zk quorum").hasArg().build()); options.addOption( - Option.builder("minModTime"). - desc("Compact if store files have modification time < minModTime") - .hasArg() - .build() - ); - options.addOption( - Option.builder("zk") - .optionalArg(true) - .desc("zk quorum") - .hasArg() - .build() - ); - options.addOption( - Option.builder("rootDir") - .optionalArg(true) - .desc("hbase.rootDir") - .hasArg() - .build() - ); - options.addOption( - Option.builder("sleep") - .desc("Time to sleepForMs (ms) for checking compaction status per region and available " - + "work queues: default 30s") - .hasArg() - .build() - ); - options.addOption( - Option.builder("retries") - .desc("Max # of retries for a compaction request," + " defaults to 3") - .hasArg() - .build() - ); - options.addOption( - Option.builder("dryRun") - .desc("Dry run, will just output a list of regions that require compaction based on " - + "parameters passed") - .hasArg(false) - .build() - ); + Option.builder("rootDir").optionalArg(true).desc("hbase.rootDir").hasArg().build()); + options.addOption(Option.builder("sleep") + .desc("Time to sleepForMs (ms) for checking compaction status per region and available " + + "work queues: default 30s") + .hasArg().build()); + options.addOption(Option.builder("retries") + .desc("Max # of retries for a compaction request," + " defaults to 3").hasArg().build()); + options.addOption(Option.builder("dryRun") + .desc("Dry run, will just output a list of regions that require compaction based on " + + "parameters passed") + .hasArg(false).build()); - options.addOption( - Option.builder("skipWait") - .desc("Skip waiting after triggering compaction.") - .hasArg(false) - .build() - ); + options.addOption(Option.builder("skipWait").desc("Skip waiting after triggering compaction.") + .hasArg(false).build()); - options.addOption( - Option.builder("numservers") - .optionalArg(true) - .desc("Number of servers to compact in this run, defaults to all") - .hasArg() - .build() - ); + options.addOption(Option.builder("numservers").optionalArg(true) + .desc("Number of servers to compact in this run, defaults to all").hasArg().build()); - options.addOption( - Option.builder("numregions") - .optionalArg(true) - .desc("Number of regions to compact per server, defaults to all") - .hasArg() - .build() - ); + options.addOption(Option.builder("numregions").optionalArg(true) + .desc("Number of regions to compact per server, defaults to all").hasArg().build()); return options; } @Override public int run(String[] args) throws Exception { Options options = getCommonOptions(); - options.addOption( - Option.builder("table") - .required() - .desc("table name") - .hasArg() - .build() - ); - options.addOption( - Option.builder("cf") - .optionalArg(true) - .desc("column families: comma separated eg: a,b,c") - .hasArg() - .build() - ); + options.addOption(Option.builder("table").required().desc("table name").hasArg().build()); + options.addOption(Option.builder("cf").optionalArg(true) + .desc("column families: comma separated eg: a,b,c").hasArg().build()); final CommandLineParser cmdLineParser = new DefaultParser(); CommandLine commandLine = null; try { commandLine = cmdLineParser.parse(options, args); } catch (ParseException parseException) { - System.out.println( - "ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + " due to: " - + parseException); + System.out.println("ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + + " due to: " + parseException); printUsage(options); return -1; } @@ -485,11 +428,10 @@ public class MajorCompactor extends Configured implements Tool { Configuration configuration = getConf(); int concurrency = Integer.parseInt(commandLine.getOptionValue("servers")); - long minModTime = Long.parseLong( - commandLine.getOptionValue("minModTime", - String.valueOf(EnvironmentEdgeManager.currentTime()))); + long minModTime = Long.parseLong(commandLine.getOptionValue("minModTime", + String.valueOf(EnvironmentEdgeManager.currentTime()))); String quorum = - commandLine.getOptionValue("zk", configuration.get(HConstants.ZOOKEEPER_QUORUM)); + commandLine.getOptionValue("zk", configuration.get(HConstants.ZOOKEEPER_QUORUM)); String rootDir = commandLine.getOptionValue("rootDir", configuration.get(HConstants.HBASE_DIR)); long sleep = Long.parseLong(commandLine.getOptionValue("sleep", Long.toString(30000))); @@ -499,9 +441,8 @@ public class MajorCompactor extends Configured implements Tool { configuration.set(HConstants.HBASE_DIR, rootDir); configuration.set(HConstants.ZOOKEEPER_QUORUM, quorum); - MajorCompactor compactor = - new MajorCompactor(configuration, TableName.valueOf(tableName), families, concurrency, - minModTime, sleep); + MajorCompactor compactor = new MajorCompactor(configuration, TableName.valueOf(tableName), + families, concurrency, minModTime, sleep); compactor.setNumServers(numServers); compactor.setNumRegions(numRegions); compactor.setSkipWait(commandLine.hasOption("skipWait")); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactorTTL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactorTTL.java index c6ea5af7e13..c21595ad22a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactorTTL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactorTTL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util.compaction; import java.io.IOException; @@ -53,13 +52,13 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class MajorCompactorTTL extends MajorCompactor { - private static final Logger LOG = LoggerFactory.getLogger(MajorCompactorTTL .class); + private static final Logger LOG = LoggerFactory.getLogger(MajorCompactorTTL.class); private TableDescriptor htd; @InterfaceAudience.Private public MajorCompactorTTL(Configuration conf, TableDescriptor htd, int concurrency, - long sleepForMs) throws IOException { + long sleepForMs) throws IOException { this.connection = ConnectionFactory.createConnection(conf); this.htd = htd; this.tableName = htd.getTableName(); @@ -75,19 +74,18 @@ public class MajorCompactorTTL extends MajorCompactor { @Override protected Optional getMajorCompactionRequest(RegionInfo hri) - throws IOException { + throws IOException { return MajorCompactionTTLRequest.newRequest(connection, hri, htd); } @Override protected Set getStoresRequiringCompaction(MajorCompactionRequest request) - throws IOException { - return ((MajorCompactionTTLRequest)request).getStoresRequiringCompaction(htd).keySet(); + throws IOException { + return ((MajorCompactionTTLRequest) request).getStoresRequiringCompaction(htd).keySet(); } - public int compactRegionsTTLOnTable(Configuration conf, String table, int concurrency, - long sleep, int numServers, int numRegions, boolean dryRun, boolean skipWait) - throws Exception { + public int compactRegionsTTLOnTable(Configuration conf, String table, int concurrency, long sleep, + int numServers, int numRegions, boolean dryRun, boolean skipWait) throws Exception { Connection conn = ConnectionFactory.createConnection(conf); TableName tableName = TableName.valueOf(table); @@ -124,13 +122,7 @@ public class MajorCompactorTTL extends MajorCompactor { private Options getOptions() { Options options = getCommonOptions(); - options.addOption( - Option.builder("table") - .required() - .desc("table name") - .hasArg() - .build() - ); + options.addOption(Option.builder("table").required().desc("table name").hasArg().build()); return options; } @@ -144,9 +136,8 @@ public class MajorCompactorTTL extends MajorCompactor { try { commandLine = cmdLineParser.parse(options, args); } catch (ParseException parseException) { - System.out.println( - "ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + " due to: " - + parseException); + System.out.println("ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + + " due to: " + parseException); printUsage(options); return -1; } @@ -165,7 +156,7 @@ public class MajorCompactorTTL extends MajorCompactor { boolean skipWait = commandLine.hasOption("skipWait"); return compactRegionsTTLOnTable(HBaseConfiguration.create(), table, concurrency, sleep, - numServers, numRegions, dryRun, skipWait); + numServers, numRegions, dryRun, skipWait); } public static void main(String[] args) throws Exception { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java index 6a6c530c3b6..99bbb2af544 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,14 +50,11 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** - * This class marches through all of the region's hfiles and verifies that - * they are all valid files. One just needs to instantiate the class, use - * checkTables(List<Path>) and then retrieve the corrupted hfiles (and - * quarantined files if in quarantining mode) - * - * The implementation currently parallelizes at the regionDir level. + * This class marches through all of the region's hfiles and verifies that they are all valid files. + * One just needs to instantiate the class, use checkTables(List<Path>) and then retrieve the + * corrupted hfiles (and quarantined files if in quarantining mode) The implementation currently + * parallelizes at the regionDir level. */ @InterfaceAudience.Private public class HFileCorruptionChecker { @@ -79,8 +76,8 @@ public class HFileCorruptionChecker { final AtomicInteger hfilesChecked = new AtomicInteger(); final AtomicInteger mobFilesChecked = new AtomicInteger(); - public HFileCorruptionChecker(Configuration conf, ExecutorService executor, - boolean quarantine) throws IOException { + public HFileCorruptionChecker(Configuration conf, ExecutorService executor, boolean quarantine) + throws IOException { this.conf = conf; this.fs = FileSystem.get(conf); this.cacheConf = CacheConfig.DISABLED; @@ -89,12 +86,8 @@ public class HFileCorruptionChecker { } /** - * Checks a path to see if it is a valid hfile. - * - * @param p - * full Path to an HFile - * @throws IOException - * This is a connectivity related exception + * Checks a path to see if it is a valid hfile. n * full Path to an HFile n * This is a + * connectivity related exception */ protected void checkHFile(Path p) throws IOException { HFile.Reader r = null; @@ -107,7 +100,7 @@ public class HFileCorruptionChecker { Path dest = createQuarantinePath(p); LOG.warn("Quarantining corrupt HFile " + p + " into " + dest); boolean success = fs.mkdirs(dest.getParent()); - success = success ? fs.rename(p, dest): false; + success = success ? fs.rename(p, dest) : false; if (!success) { failures.add(p); } else { @@ -127,12 +120,8 @@ public class HFileCorruptionChecker { } /** - * Given a path, generates a new path to where we move a corrupted hfile (bad - * trailer, no trailer). - * - * @param hFile - * Path to a corrupt hfile (assumes that it is HBASE_DIR/ table - * /region/cf/file) + * Given a path, generates a new path to where we move a corrupted hfile (bad trailer, no + * trailer). n * Path to a corrupt hfile (assumes that it is HBASE_DIR/ table /region/cf/file) * @return path to where corrupted files are stored. This should be * HBASE_DIR/.corrupt/table/region/cf/file. */ @@ -155,11 +144,7 @@ public class HFileCorruptionChecker { } /** - * Check all files in a column family dir. - * - * @param cfDir - * column family directory - * @throws IOException + * Check all files in a column family dir. n * column family directory n */ protected void checkColFamDir(Path cfDir) throws IOException { FileStatus[] statuses = null; @@ -167,8 +152,8 @@ public class HFileCorruptionChecker { statuses = fs.listStatus(cfDir); // use same filter as scanner. } catch (FileNotFoundException fnfe) { // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist. - LOG.warn("Colfam Directory " + cfDir + - " does not exist. Likely due to concurrent split/compaction. Skipping."); + LOG.warn("Colfam Directory " + cfDir + + " does not exist. Likely due to concurrent split/compaction. Skipping."); missing.add(cfDir); return; } @@ -176,8 +161,8 @@ public class HFileCorruptionChecker { List hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs)); // Hadoop 1.0 listStatus does not throw an exception if the path does not exist. if (hfs.isEmpty() && !fs.exists(cfDir)) { - LOG.warn("Colfam Directory " + cfDir + - " does not exist. Likely due to concurrent split/compaction. Skipping."); + LOG.warn("Colfam Directory " + cfDir + + " does not exist. Likely due to concurrent split/compaction. Skipping."); missing.add(cfDir); return; } @@ -191,11 +176,7 @@ public class HFileCorruptionChecker { } /** - * Check all files in a mob column family dir. - * - * @param cfDir - * mob column family directory - * @throws IOException + * Check all files in a mob column family dir. n * mob column family directory n */ protected void checkMobColFamDir(Path cfDir) throws IOException { FileStatus[] statuses = null; @@ -203,8 +184,8 @@ public class HFileCorruptionChecker { statuses = fs.listStatus(cfDir); // use same filter as scanner. } catch (FileNotFoundException fnfe) { // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist. - LOG.warn("Mob colfam Directory " + cfDir + - " does not exist. Likely the table is deleted. Skipping."); + LOG.warn("Mob colfam Directory " + cfDir + + " does not exist. Likely the table is deleted. Skipping."); missedMobFiles.add(cfDir); return; } @@ -212,8 +193,8 @@ public class HFileCorruptionChecker { List hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs)); // Hadoop 1.0 listStatus does not throw an exception if the path does not exist. if (hfs.isEmpty() && !fs.exists(cfDir)) { - LOG.warn("Mob colfam Directory " + cfDir + - " does not exist. Likely the table is deleted. Skipping."); + LOG.warn("Mob colfam Directory " + cfDir + + " does not exist. Likely the table is deleted. Skipping."); missedMobFiles.add(cfDir); return; } @@ -227,12 +208,8 @@ public class HFileCorruptionChecker { } /** - * Checks a path to see if it is a valid mob file. - * - * @param p - * full Path to a mob file. - * @throws IOException - * This is a connectivity related exception + * Checks a path to see if it is a valid mob file. n * full Path to a mob file. n * This is a + * connectivity related exception */ protected void checkMobFile(Path p) throws IOException { HFile.Reader r = null; @@ -245,7 +222,7 @@ public class HFileCorruptionChecker { Path dest = createQuarantinePath(p); LOG.warn("Quarantining corrupt mob file " + p + " into " + dest); boolean success = fs.mkdirs(dest.getParent()); - success = success ? fs.rename(p, dest): false; + success = success ? fs.rename(p, dest) : false; if (!success) { failureMobFiles.add(p); } else { @@ -266,8 +243,7 @@ public class HFileCorruptionChecker { /** * Checks all the mob files of a table. - * @param regionDir The mob region directory - * @throws IOException + * @param regionDir The mob region directory n */ private void checkMobRegionDir(Path regionDir) throws IOException { if (!fs.exists(regionDir)) { @@ -278,16 +254,16 @@ public class HFileCorruptionChecker { hfs = fs.listStatus(regionDir, new FamilyDirFilter(fs)); } catch (FileNotFoundException fnfe) { // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist. - LOG.warn("Mob directory " + regionDir - + " does not exist. Likely the table is deleted. Skipping."); + LOG.warn( + "Mob directory " + regionDir + " does not exist. Likely the table is deleted. Skipping."); missedMobFiles.add(regionDir); return; } // Hadoop 1.0 listStatus does not throw an exception if the path does not exist. if (hfs.length == 0 && !fs.exists(regionDir)) { - LOG.warn("Mob directory " + regionDir - + " does not exist. Likely the table is deleted. Skipping."); + LOG.warn( + "Mob directory " + regionDir + " does not exist. Likely the table is deleted. Skipping."); missedMobFiles.add(regionDir); return; } @@ -301,11 +277,7 @@ public class HFileCorruptionChecker { } /** - * Check all column families in a region dir. - * - * @param regionDir - * region directory - * @throws IOException + * Check all column families in a region dir. n * region directory n */ protected void checkRegionDir(Path regionDir) throws IOException { FileStatus[] statuses = null; @@ -313,8 +285,8 @@ public class HFileCorruptionChecker { statuses = fs.listStatus(regionDir); } catch (FileNotFoundException fnfe) { // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist. - LOG.warn("Region Directory " + regionDir + - " does not exist. Likely due to concurrent split/compaction. Skipping."); + LOG.warn("Region Directory " + regionDir + + " does not exist. Likely due to concurrent split/compaction. Skipping."); missing.add(regionDir); return; } @@ -322,8 +294,8 @@ public class HFileCorruptionChecker { List cfs = FSUtils.filterFileStatuses(statuses, new FamilyDirFilter(fs)); // Hadoop 1.0 listStatus does not throw an exception if the path does not exist. if (cfs.isEmpty() && !fs.exists(regionDir)) { - LOG.warn("Region Directory " + regionDir + - " does not exist. Likely due to concurrent split/compaction. Skipping."); + LOG.warn("Region Directory " + regionDir + + " does not exist. Likely due to concurrent split/compaction. Skipping."); missing.add(regionDir); return; } @@ -337,24 +309,22 @@ public class HFileCorruptionChecker { } /** - * Check all the regiondirs in the specified tableDir - * - * @param tableDir - * path to a table - * @throws IOException + * Check all the regiondirs in the specified tableDir n * path to a table n */ void checkTableDir(Path tableDir) throws IOException { - List rds = FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs)); + List rds = + FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs)); if (rds == null) { if (!fs.exists(tableDir)) { - LOG.warn("Table Directory " + tableDir + - " does not exist. Likely due to concurrent delete. Skipping."); + LOG.warn("Table Directory " + tableDir + + " does not exist. Likely due to concurrent delete. Skipping."); missing.add(tableDir); } return; } - LOG.info("Checking Table Directory {}. Number of entries (including mob) = {}", tableDir, rds.size() + 1); + LOG.info("Checking Table Directory {}. Number of entries (including mob) = {}", tableDir, + rds.size() + 1); // Parallelize check at the region dir level List rdcs = new ArrayList<>(rds.size() + 1); @@ -382,8 +352,8 @@ public class HFileCorruptionChecker { try { f.get(); } catch (ExecutionException e) { - LOG.warn("Failed to quarantine an HFile in regiondir " - + rdcs.get(i).regionDir, e.getCause()); + LOG.warn("Failed to quarantine an HFile in regiondir " + rdcs.get(i).regionDir, + e.getCause()); // rethrow IOExceptions if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); @@ -407,8 +377,8 @@ public class HFileCorruptionChecker { } /** - * An individual work item for parallelized regiondir processing. This is - * intentionally an inner class so it can use the shared error sets and fs. + * An individual work item for parallelized regiondir processing. This is intentionally an inner + * class so it can use the shared error sets and fs. */ private class RegionDirChecker implements Callable { final Path regionDir; @@ -425,8 +395,8 @@ public class HFileCorruptionChecker { } /** - * An individual work item for parallelized mob dir processing. This is - * intentionally an inner class so it can use the shared error sets and fs. + * An individual work item for parallelized mob dir processing. This is intentionally an inner + * class so it can use the shared error sets and fs. */ private class MobRegionDirChecker extends RegionDirChecker { @@ -490,8 +460,8 @@ public class HFileCorruptionChecker { } /** - * @return the set of paths that were missing. Likely due to deletion/moves from - * compaction or flushes. + * @return the set of paths that were missing. Likely due to deletion/moves from compaction or + * flushes. */ public Collection getMissing() { return new HashSet<>(missing); @@ -526,16 +496,15 @@ public class HFileCorruptionChecker { } /** - * @return the set of paths that were missing. Likely due to table deletion or - * deletion/moves from compaction. + * @return the set of paths that were missing. Likely due to table deletion or deletion/moves from + * compaction. */ public Collection getMissedMobFiles() { return new HashSet<>(missedMobFiles); } /** - * Print a human readable summary of hfile quarantining operations. - * @param out + * Print a human readable summary of hfile quarantining operations. n */ public void report(HbckErrorReporter out) { out.print("Checked " + hfilesChecked.get() + " hfile for corruption"); @@ -556,8 +525,7 @@ public class HFileCorruptionChecker { } String initialState = (corrupted.isEmpty()) ? "OK" : "CORRUPTED"; - String fixedState = (corrupted.size() == quarantined.size()) ? "OK" - : "CORRUPTED"; + String fixedState = (corrupted.size() == quarantined.size()) ? "OK" : "CORRUPTED"; // print mob-related report out.print("Checked " + mobFilesChecked.get() + " Mob files for corruption"); @@ -577,8 +545,8 @@ public class HFileCorruptionChecker { out.print(" " + mq); } String initialMobState = (corruptedMobFiles.isEmpty()) ? "OK" : "CORRUPTED"; - String fixedMobState = (corruptedMobFiles.size() == quarantinedMobFiles.size()) ? "OK" - : "CORRUPTED"; + String fixedMobState = + (corruptedMobFiles.size() == quarantinedMobFiles.size()) ? "OK" : "CORRUPTED"; if (inQuarantineMode) { out.print("Summary: " + initialState + " => " + fixedState); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java index 4e9b53e2fd1..1050dd82bfa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,8 +22,8 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * @deprecated Since 2.0.0. Will be removed in 3.0.0. We've deprecated this tool in hbase-2+ - * because it destroyed the hbase2 meta table. + * @deprecated Since 2.0.0. Will be removed in 3.0.0. We've deprecated this tool in hbase-2+ because + * it destroyed the hbase2 meta table. */ @Deprecated @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @@ -31,7 +31,8 @@ import org.apache.yetus.audience.InterfaceStability; public final class OfflineMetaRepair { // Private constructor included here to avoid checkstyle warnings - private OfflineMetaRepair() {} + private OfflineMetaRepair() { + } public static void main(String[] args) throws Exception { System.err.println("This tool is no longer supported in HBase-2+." diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java index 7203fd103bb..234daef85b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ public class ReplicationChecker { public boolean hasUnDeletedQueues() { return errorReporter.getErrorList() - .contains(HbckErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE); + .contains(HbckErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE); } private Map> getUnDeletedQueues() throws ReplicationException { @@ -73,8 +73,8 @@ public class ReplicationChecker { if (!peerIds.contains(queueInfo.getPeerId())) { undeletedQueues.computeIfAbsent(replicator, key -> new ArrayList<>()).add(queueId); LOG.debug( - "Undeleted replication queue for removed peer found: " + - "[removedPeerId={}, replicator={}, queueId={}]", + "Undeleted replication queue for removed peer found: " + + "[removedPeerId={}, replicator={}, queueId={}]", queueInfo.getPeerId(), replicator, queueId); } } @@ -100,17 +100,17 @@ public class ReplicationChecker { undeletedQueueIds.forEach((replicator, queueIds) -> { queueIds.forEach(queueId -> { ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId); - String msg = "Undeleted replication queue for removed peer found: " + - String.format("[removedPeerId=%s, replicator=%s, queueId=%s]", queueInfo.getPeerId(), + String msg = "Undeleted replication queue for removed peer found: " + + String.format("[removedPeerId=%s, replicator=%s, queueId=%s]", queueInfo.getPeerId(), replicator, queueId); errorReporter.reportError(HbckErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, msg); }); }); undeletedHFileRefsPeerIds = getUndeletedHFileRefsPeers(); - undeletedHFileRefsPeerIds.stream().map( - peerId -> "Undeleted replication hfile-refs queue for removed peer " + peerId + " found") - .forEach(msg -> errorReporter - .reportError(HbckErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, msg)); + undeletedHFileRefsPeerIds.stream() + .map(peerId -> "Undeleted replication hfile-refs queue for removed peer " + peerId + " found") + .forEach(msg -> errorReporter + .reportError(HbckErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, msg)); } public void fixUnDeletedQueues() throws ReplicationException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.java index e4b4a814e2d..db8b0fdbf50 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util.hbck; import java.io.IOException; @@ -25,10 +24,9 @@ import org.apache.hadoop.hbase.util.HbckTableInfo; import org.apache.yetus.audience.InterfaceAudience; /** - * This interface provides callbacks for handling particular table integrity - * invariant violations. This could probably be boiled down to handling holes - * and handling overlaps but currently preserves the older more specific error - * condition codes. + * This interface provides callbacks for handling particular table integrity invariant violations. + * This could probably be boiled down to handling holes and handling overlaps but currently + * preserves the older more specific error condition codes. */ @InterfaceAudience.Private public interface TableIntegrityErrorHandler { @@ -41,66 +39,56 @@ public interface TableIntegrityErrorHandler { void setTableInfo(HbckTableInfo ti); /** - * Callback for handling case where a Table has a first region that does not - * have an empty start key. - * - * @param hi An HbckRegionInfo of the second region in a table. This should have - * a non-empty startkey, and can be used to fabricate a first region that - * has an empty start key. + * Callback for handling case where a Table has a first region that does not have an empty start + * key. + * @param hi An HbckRegionInfo of the second region in a table. This should have a non-empty + * startkey, and can be used to fabricate a first region that has an empty start key. */ void handleRegionStartKeyNotEmpty(HbckRegionInfo hi) throws IOException; /** - * Callback for handling case where a Table has a last region that does not - * have an empty end key. - * - * @param curEndKey The end key of the current last region. There should be a new region - * with start key as this and an empty end key. + * Callback for handling case where a Table has a last region that does not have an empty end key. + * @param curEndKey The end key of the current last region. There should be a new region with + * start key as this and an empty end key. */ void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException; /** * Callback for handling a region that has the same start and end key. - * * @param hi An HbckRegionInfo for a degenerate key. */ void handleDegenerateRegion(HbckRegionInfo hi) throws IOException; /** - * Callback for handling two regions that have the same start key. This is - * a specific case of a region overlap. + * Callback for handling two regions that have the same start key. This is a specific case of a + * region overlap. * @param hi1 one of the overlapping HbckRegionInfo * @param hi2 the other overlapping HbckRegionInfo */ void handleDuplicateStartKeys(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException; /** - * Callback for handling two regions that have the same regionID - * a specific case of a split + * Callback for handling two regions that have the same regionID a specific case of a split * @param hi1 one of the overlapping HbckRegionInfo * @param hi2 the other overlapping HbckRegionInfo */ void handleSplit(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException; /** - * Callback for handling two reigons that overlap in some arbitrary way. - * This is a specific case of region overlap, and called for each possible - * pair. If two regions have the same start key, the handleDuplicateStartKeys - * method is called. + * Callback for handling two reigons that overlap in some arbitrary way. This is a specific case + * of region overlap, and called for each possible pair. If two regions have the same start key, + * the handleDuplicateStartKeys method is called. * @param hi1 one of the overlapping HbckRegionInfo * @param hi2 the other overlapping HbckRegionInfo */ - void handleOverlapInRegionChain(HbckRegionInfo hi1, HbckRegionInfo hi2) - throws IOException; + void handleOverlapInRegionChain(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException; /** * Callback for handling a region hole between two keys. * @param holeStartKey key at the beginning of the region hole - * @param holeEndKey key at the end of the region hole - + * @param holeEndKey key at the end of the region hole */ - void handleHoleInRegionChain(byte[] holeStartKey, byte[] holeEndKey) - throws IOException; + void handleHoleInRegionChain(byte[] holeStartKey, byte[] holeEndKey) throws IOException; /** * Callback for handling an group of regions that overlap. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.java index f39c623aa46..39b07820ef4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,12 +24,10 @@ import org.apache.hadoop.hbase.util.HbckTableInfo; import org.apache.yetus.audience.InterfaceAudience; /** - * Simple implementation of TableIntegrityErrorHandler. Can be used as a base - * class. + * Simple implementation of TableIntegrityErrorHandler. Can be used as a base class. */ @InterfaceAudience.Private -abstract public class TableIntegrityErrorHandlerImpl implements - TableIntegrityErrorHandler { +abstract public class TableIntegrityErrorHandlerImpl implements TableIntegrityErrorHandler { HbckTableInfo ti; /** @@ -73,8 +71,7 @@ abstract public class TableIntegrityErrorHandlerImpl implements * {@inheritDoc} */ @Override - public void handleDuplicateStartKeys(HbckRegionInfo hi1, HbckRegionInfo hi2) - throws IOException { + public void handleDuplicateStartKeys(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException { } /** @@ -82,23 +79,21 @@ abstract public class TableIntegrityErrorHandlerImpl implements */ @Override public void handleOverlapInRegionChain(HbckRegionInfo hi1, HbckRegionInfo hi2) - throws IOException { + throws IOException { } /** * {@inheritDoc} */ @Override - public void handleHoleInRegionChain(byte[] holeStart, byte[] holeEnd) - throws IOException { + public void handleHoleInRegionChain(byte[] holeStart, byte[] holeEnd) throws IOException { } /** * {@inheritDoc} */ @Override - public void handleOverlapGroup(Collection overlap) - throws IOException { + public void handleOverlapGroup(Collection overlap) throws IOException { } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index 989210b8de2..0ef56d2d213 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -72,10 +72,10 @@ public abstract class AbstractFSWALProvider> implemen // Only public so classes back in regionserver.wal can access public interface Reader extends WAL.Reader { /** - * @param fs File system. + * @param fs File system. * @param path Path. - * @param c Configuration. - * @param s Input stream that may have been pre-opened by the caller; may be null. + * @param c Configuration. + * @param s Input stream that may have been pre-opened by the caller; may be null. */ void init(FileSystem fs, Path path, Configuration c, FSDataInputStream s) throws IOException; } @@ -97,14 +97,14 @@ public abstract class AbstractFSWALProvider> implemen private final ReadWriteLock walCreateLock = new ReentrantReadWriteLock(); /** - * @param factory factory that made us, identity used for FS layout. may not be null - * @param conf may not be null + * @param factory factory that made us, identity used for FS layout. may not be null + * @param conf may not be null * @param providerId differentiate between providers from one factory, used for FS layout. may be - * null + * null */ @Override public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) - throws IOException { + throws IOException { if (!initialized.compareAndSet(false, true)) { throw new IllegalStateException("WALProvider.init should only be called once."); } @@ -327,7 +327,7 @@ public abstract class AbstractFSWALProvider> implemen * this log file otherwise. */ public static ServerName getServerNameFromWALDirectoryName(Configuration conf, String path) - throws IOException { + throws IOException { if (path == null || path.length() <= HConstants.HREGION_LOGDIR_NAME.length()) { return null; } @@ -418,8 +418,8 @@ public abstract class AbstractFSWALProvider> implemen } /** - * Comparator used to compare WAL files together based on their start time. - * Just compares start times and nothing else. + * Comparator used to compare WAL files together based on their start time. Just compares start + * times and nothing else. */ public static class WALStartTimeComparator implements Comparator { @Override @@ -428,10 +428,9 @@ public abstract class AbstractFSWALProvider> implemen } /** - * Split a path to get the start time - * For example: 10.20.20.171%3A60020.1277499063250 - * Could also be a meta WAL which adds a '.meta' suffix or a synchronous replication WAL - * which adds a '.syncrep' suffix. Check. + * Split a path to get the start time For example: 10.20.20.171%3A60020.1277499063250 Could also + * be a meta WAL which adds a '.meta' suffix or a synchronous replication WAL which adds a + * '.syncrep' suffix. Check. * @param p path to split * @return start time */ @@ -440,8 +439,6 @@ public abstract class AbstractFSWALProvider> implemen } } - - public static boolean isArchivedLogFile(Path p) { String oldLog = Path.SEPARATOR + HConstants.HREGION_OLDLOGDIR_NAME + Path.SEPARATOR; return p.toString().contains(oldLog); @@ -471,9 +468,8 @@ public abstract class AbstractFSWALProvider> implemen ServerName serverName = getServerNameFromWALDirectoryName(path); // Try finding the log in separate old log dir - oldLogDir = - new Path(walRootDir, new StringBuilder(HConstants.HREGION_OLDLOGDIR_NAME) - .append(Path.SEPARATOR).append(serverName.getServerName()).toString()); + oldLogDir = new Path(walRootDir, new StringBuilder(HConstants.HREGION_OLDLOGDIR_NAME) + .append(Path.SEPARATOR).append(serverName.getServerName()).toString()); archivedLogLocation = new Path(oldLogDir, path.getName()); if (fs.exists(archivedLogLocation)) { LOG.info("Log " + path + " was moved to " + archivedLogLocation); @@ -490,7 +486,7 @@ public abstract class AbstractFSWALProvider> implemen * @return WAL Reader instance */ public static org.apache.hadoop.hbase.wal.WAL.Reader openReader(Path path, Configuration conf) - throws IOException { + throws IOException { long retryInterval = 2000; // 2 sec int maxAttempts = 30; int attempt = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractRecoveredEditsOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractRecoveredEditsOutputSink.java index 0da082a4caf..1b559dcce20 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractRecoveredEditsOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractRecoveredEditsOutputSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -48,7 +47,7 @@ abstract class AbstractRecoveredEditsOutputSink extends OutputSink { private final ConcurrentMap regionMaximumEditLogSeqNum = new ConcurrentHashMap<>(); public AbstractRecoveredEditsOutputSink(WALSplitter walSplitter, - WALSplitter.PipelineController controller, EntryBuffers entryBuffers, int numWriters) { + WALSplitter.PipelineController controller, EntryBuffers entryBuffers, int numWriters) { super(controller, entryBuffers, numWriters); this.walSplitter = walSplitter; } @@ -57,14 +56,14 @@ abstract class AbstractRecoveredEditsOutputSink extends OutputSink { * @return a writer that wraps a {@link WALProvider.Writer} and its Path. Caller should close. */ protected RecoveredEditsWriter createRecoveredEditsWriter(TableName tableName, byte[] region, - long seqId) throws IOException { + long seqId) throws IOException { Path regionEditsPath = getRegionSplitEditsPath(tableName, region, seqId, walSplitter.getFileBeingSplit().getPath().getName(), walSplitter.getTmpDirName(), walSplitter.conf); if (walSplitter.walFS.exists(regionEditsPath)) { - LOG.warn("Found old edits file. It could be the " + - "result of a previous failed split attempt. Deleting " + regionEditsPath + ", length=" + - walSplitter.walFS.getFileStatus(regionEditsPath).getLen()); + LOG.warn("Found old edits file. It could be the " + + "result of a previous failed split attempt. Deleting " + regionEditsPath + ", length=" + + walSplitter.walFS.getFileStatus(regionEditsPath).getLen()); if (!walSplitter.walFS.delete(regionEditsPath, false)) { LOG.warn("Failed delete of old {}", regionEditsPath); } @@ -77,7 +76,7 @@ abstract class AbstractRecoveredEditsOutputSink extends OutputSink { } protected Path closeRecoveredEditsWriter(RecoveredEditsWriter editsWriter, - List thrown) throws IOException { + List thrown) throws IOException { try { editsWriter.writer.close(); } catch (IOException ioe) { @@ -88,14 +87,16 @@ abstract class AbstractRecoveredEditsOutputSink extends OutputSink { return null; } final String msg = "Closed recovered edits writer path=" + editsWriter.path + " (wrote " - + editsWriter.editsWritten + " edits, skipped " + editsWriter.editsSkipped + " edits in " + ( - editsWriter.nanosSpent / 1000 / 1000) + " ms)"; + + editsWriter.editsWritten + " edits, skipped " + editsWriter.editsSkipped + " edits in " + + (editsWriter.nanosSpent / 1000 / 1000) + " ms)"; LOG.info(msg); updateStatusWithMsg(msg); if (editsWriter.editsWritten == 0) { // just remove the empty recovered.edits file - if (walSplitter.walFS.exists(editsWriter.path) - && !walSplitter.walFS.delete(editsWriter.path, false)) { + if ( + walSplitter.walFS.exists(editsWriter.path) + && !walSplitter.walFS.delete(editsWriter.path, false) + ) { final String errorMsg = "Failed deleting empty " + editsWriter.path; LOG.warn(errorMsg); updateStatusWithMsg(errorMsg); @@ -125,8 +126,7 @@ abstract class AbstractRecoveredEditsOutputSink extends OutputSink { updateStatusWithMsg(renameEditMsg); } } catch (IOException ioe) { - final String errorMsg = "Could not rename recovered edits " + editsWriter.path - + " to " + dst; + final String errorMsg = "Could not rename recovered edits " + editsWriter.path + " to " + dst; LOG.error(errorMsg, ioe); updateStatusWithMsg(errorMsg); thrown.add(ioe); @@ -173,17 +173,17 @@ abstract class AbstractRecoveredEditsOutputSink extends OutputSink { e); } if (editsWriter.minLogSeqNum < dstMinLogSeqNum) { - LOG.warn("Found existing old edits file. It could be the result of a previous failed" + - " split attempt or we have duplicated wal entries. Deleting " + dst + ", length=" + - walSplitter.walFS.getFileStatus(dst).getLen()); + LOG.warn("Found existing old edits file. It could be the result of a previous failed" + + " split attempt or we have duplicated wal entries. Deleting " + dst + ", length=" + + walSplitter.walFS.getFileStatus(dst).getLen()); if (!walSplitter.walFS.delete(dst, false)) { LOG.warn("Failed deleting of old {}", dst); throw new IOException("Failed deleting of old " + dst); } } else { - LOG.warn( - "Found existing old edits file and we have less entries. Deleting " + editsWriter.path + - ", length=" + walSplitter.walFS.getFileStatus(editsWriter.path).getLen()); + LOG + .warn("Found existing old edits file and we have less entries. Deleting " + editsWriter.path + + ", length=" + walSplitter.walFS.getFileStatus(editsWriter.path).getLen()); if (!walSplitter.walFS.delete(editsWriter.path, false)) { LOG.warn("Failed deleting of {}", editsWriter.path); throw new IOException("Failed deleting of " + editsWriter.path); @@ -252,7 +252,7 @@ abstract class AbstractRecoveredEditsOutputSink extends OutputSink { } private void logAndThrowWriterAppendFailure(WAL.Entry logEntry, IOException e) - throws IOException { + throws IOException { e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e; final String errorMsg = "Failed to write log entry " + logEntry.toString() + " to log"; LOG.error(HBaseMarkers.FATAL, errorMsg, e); @@ -262,7 +262,7 @@ abstract class AbstractRecoveredEditsOutputSink extends OutputSink { private void filterCellByStore(WAL.Entry logEntry) { Map maxSeqIdInStores = walSplitter.getRegionMaxSeqIdInStores() - .get(Bytes.toString(logEntry.getKey().getEncodedRegionName())); + .get(Bytes.toString(logEntry.getKey().getEncodedRegionName())); if (MapUtils.isEmpty(maxSeqIdInStores)) { return; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java index ad7d4da4c79..218d3fa86a5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,14 +47,13 @@ import org.slf4j.LoggerFactory; * NOTE: This class extends Thread rather than Chore because the sleep time can be interrupted when * there is something to do, rather than the Chore sleep time which is invariant. *

      - * The {@link #scheduleFlush(String, List)} is abstract here, - * as sometimes we hold a region without a region server but we still want to roll its WAL. + * The {@link #scheduleFlush(String, List)} is abstract here, as sometimes we hold a region without + * a region server but we still want to roll its WAL. *

      * TODO: change to a pool of threads */ @InterfaceAudience.Private -public abstract class AbstractWALRoller extends Thread - implements Closeable { +public abstract class AbstractWALRoller extends Thread implements Closeable { private static final Logger LOG = LoggerFactory.getLogger(AbstractWALRoller.class); protected static final String WAL_ROLL_PERIOD_KEY = "hbase.regionserver.logroll.period"; @@ -66,9 +65,8 @@ public abstract class AbstractWALRoller extends Thread public static final long DEFAULT_WAL_ROLL_WAIT_TIMEOUT = 30000; /** - * Configure for the max count of log rolling retry. - * The real retry count is also limited by the timeout of log rolling - * via {@link #WAL_ROLL_WAIT_TIMEOUT} + * Configure for the max count of log rolling retry. The real retry count is also limited by the + * timeout of log rolling via {@link #WAL_ROLL_WAIT_TIMEOUT} */ protected static final String WAL_ROLL_RETRIES = "hbase.regionserver.logroll.retries"; @@ -189,8 +187,8 @@ public abstract class AbstractWALRoller extends Thread } } try { - for (Iterator> iter = wals.entrySet().iterator(); - iter.hasNext();) { + for (Iterator> iter = wals.entrySet().iterator(); iter + .hasNext();) { Entry entry = iter.next(); WAL wal = entry.getKey(); RollController controller = entry.getValue(); @@ -217,7 +215,7 @@ public abstract class AbstractWALRoller extends Thread if (waitingTime < rollWaitTimeout && nAttempts < maxRollRetry) { nAttempts++; LOG.warn("Retry to roll log, nAttempts={}, waiting time={}ms, sleeping 1s to retry," - + " last exception", nAttempts, waitingTime, ioe); + + " last exception", nAttempts, waitingTime, ioe); sleep(1000); } else { LOG.error("Roll wal failed and waiting timeout, will not retry", ioe); @@ -250,7 +248,7 @@ public abstract class AbstractWALRoller extends Thread /** * @param encodedRegionName Encoded name of region to flush. - * @param families stores of region to flush. + * @param families stores of region to flush. */ protected abstract void scheduleFlush(String encodedRegionName, List families); @@ -265,8 +263,7 @@ public abstract class AbstractWALRoller extends Thread public boolean walRollFinished() { // TODO add a status field of roll in RollController return wals.values().stream() - .noneMatch(rc -> rc.needsRoll(EnvironmentEdgeManager.currentTime())) - && isWaiting(); + .noneMatch(rc -> rc.needsRoll(EnvironmentEdgeManager.currentTime())) && isWaiting(); } /** @@ -285,8 +282,8 @@ public abstract class AbstractWALRoller extends Thread } /** - * Independently control the roll of each wal. When use multiwal, - * can avoid all wal roll together. see HBASE-24665 for detail + * Independently control the roll of each wal. When use multiwal, can avoid all wal roll together. + * see HBASE-24665 for detail */ protected class RollController { private final WAL wal; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java index 4a4ac376541..bda04086763 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.wal; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -36,6 +35,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Throwables; import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; @@ -57,17 +57,18 @@ public class AsyncFSWALProvider extends AbstractFSWALProvider { // Only public so classes back in regionserver.wal can access public interface AsyncWriter extends WALProvider.AsyncWriter { /** - * @throws IOException if something goes wrong initializing an output stream + * @throws IOException if something goes wrong initializing an output stream * @throws StreamLacksCapabilityException if the given FileSystem can't provide streams that - * meet the needs of the given Writer implementation. + * meet the needs of the given Writer implementation. */ void init(FileSystem fs, Path path, Configuration c, boolean overwritable, long blocksize, - StreamSlowMonitor monitor) throws IOException, CommonFSUtils.StreamLacksCapabilityException; + StreamSlowMonitor monitor) throws IOException, CommonFSUtils.StreamLacksCapabilityException; } private EventLoopGroup eventLoopGroup; private Class channelClass; + @Override protected AsyncFSWAL createWAL() throws IOException { return new AsyncFSWAL(CommonFSUtils.getWALFileSystem(conf), this.abortable, @@ -80,13 +81,13 @@ public class AsyncFSWALProvider extends AbstractFSWALProvider { @Override protected void doInit(Configuration conf) throws IOException { Pair> eventLoopGroupAndChannelClass = - NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf); + NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf); if (eventLoopGroupAndChannelClass != null) { eventLoopGroup = eventLoopGroupAndChannelClass.getFirst(); channelClass = eventLoopGroupAndChannelClass.getSecond(); } else { - eventLoopGroup = new NioEventLoopGroup(1, - new DefaultThreadFactory("AsyncFSWAL", true, Thread.MAX_PRIORITY)); + eventLoopGroup = + new NioEventLoopGroup(1, new DefaultThreadFactory("AsyncFSWAL", true, Thread.MAX_PRIORITY)); channelClass = NioSocketChannel.class; } } @@ -95,33 +96,33 @@ public class AsyncFSWALProvider extends AbstractFSWALProvider { * Public because of AsyncFSWAL. Should be package-private */ public static AsyncWriter createAsyncWriter(Configuration conf, FileSystem fs, Path path, - boolean overwritable, EventLoopGroup eventLoopGroup, - Class channelClass) throws IOException { + boolean overwritable, EventLoopGroup eventLoopGroup, Class channelClass) + throws IOException { return createAsyncWriter(conf, fs, path, overwritable, WALUtil.getWALBlockSize(conf, fs, path), - eventLoopGroup, channelClass, StreamSlowMonitor.create(conf, path.getName())); + eventLoopGroup, channelClass, StreamSlowMonitor.create(conf, path.getName())); } /** * Public because of AsyncFSWAL. Should be package-private */ public static AsyncWriter createAsyncWriter(Configuration conf, FileSystem fs, Path path, - boolean overwritable, long blocksize, EventLoopGroup eventLoopGroup, - Class channelClass, StreamSlowMonitor monitor) throws IOException { + boolean overwritable, long blocksize, EventLoopGroup eventLoopGroup, + Class channelClass, StreamSlowMonitor monitor) throws IOException { // Configuration already does caching for the Class lookup. - Class logWriterClass = conf.getClass( - WRITER_IMPL, AsyncProtobufLogWriter.class, AsyncWriter.class); + Class logWriterClass = + conf.getClass(WRITER_IMPL, AsyncProtobufLogWriter.class, AsyncWriter.class); try { AsyncWriter writer = logWriterClass.getConstructor(EventLoopGroup.class, Class.class) - .newInstance(eventLoopGroup, channelClass); + .newInstance(eventLoopGroup, channelClass); writer.init(fs, path, conf, overwritable, blocksize, monitor); return writer; } catch (Exception e) { if (e instanceof CommonFSUtils.StreamLacksCapabilityException) { - LOG.error("The RegionServer async write ahead log provider " + - "relies on the ability to call " + e.getMessage() + " for proper operation during " + - "component failures, but the current FileSystem does not support doing so. Please " + - "check the config value of '" + CommonFSUtils.HBASE_WAL_DIR + "' and ensure " + - "it points to a FileSystem mount that has suitable capabilities for output streams."); + LOG.error("The RegionServer async write ahead log provider " + + "relies on the ability to call " + e.getMessage() + " for proper operation during " + + "component failures, but the current FileSystem does not support doing so. Please " + + "check the config value of '" + CommonFSUtils.HBASE_WAL_DIR + "' and ensure " + + "it points to a FileSystem mount that has suitable capabilities for output streams."); } else { LOG.debug("Error instantiating log writer.", e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedEntryBuffers.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedEntryBuffers.java index ed3c8b7f3e2..7db007e1d2e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedEntryBuffers.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedEntryBuffers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,11 +21,10 @@ import org.apache.yetus.audience.InterfaceAudience; /** * Used for {@link BoundedRecoveredEditsOutputSink}. The core part of limiting opening writers is it - * doesn't return chunk only if the heap size is over maxHeapUsage. Thus it doesn't need to create - * a writer for each region during splitting. The returned {@link EntryBuffers.RegionEntryBuffer} - * will be write to recovered edits file and close the writer immediately. - * See {@link BoundedRecoveredEditsOutputSink#append(EntryBuffers.RegionEntryBuffer)} for more - * details. + * doesn't return chunk only if the heap size is over maxHeapUsage. Thus it doesn't need to create a + * writer for each region during splitting. The returned {@link EntryBuffers.RegionEntryBuffer} will + * be write to recovered edits file and close the writer immediately. See + * {@link BoundedRecoveredEditsOutputSink#append(EntryBuffers.RegionEntryBuffer)} for more details. */ @InterfaceAudience.Private public class BoundedEntryBuffers extends EntryBuffers { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java index bafcee339e7..cf531354e44 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,18 +21,17 @@ import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.RegionGroupingProvider.RegionGroupingStrategy; +import org.apache.yetus.audience.InterfaceAudience; /** * A WAL grouping strategy that limits the number of wal groups to * "hbase.wal.regiongrouping.numgroups". */ @InterfaceAudience.Private -public class BoundedGroupingStrategy implements RegionGroupingStrategy{ +public class BoundedGroupingStrategy implements RegionGroupingStrategy { static final String NUM_REGION_GROUPS = "hbase.wal.regiongrouping.numgroups"; static final int DEFAULT_NUM_REGION_GROUPS = 2; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredEditsOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredEditsOutputSink.java index 9532fd7badd..789a2ad157a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredEditsOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredEditsOutputSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.MultipleIOException; @@ -36,14 +35,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Class that manages the output streams from the log splitting process. - * Every region may have many recovered edits file. But the opening writers is bounded. - * Bounded means the output streams will be no more than the size of threadpool. + * Class that manages the output streams from the log splitting process. Every region may have many + * recovered edits file. But the opening writers is bounded. Bounded means the output streams will + * be no more than the size of threadpool. */ @InterfaceAudience.Private class BoundedRecoveredEditsOutputSink extends AbstractRecoveredEditsOutputSink { - private static final Logger LOG = - LoggerFactory.getLogger(BoundedRecoveredEditsOutputSink.class); + private static final Logger LOG = LoggerFactory.getLogger(BoundedRecoveredEditsOutputSink.class); // Since the splitting process may create multiple output files, we need a map // to track the output count of each region. @@ -52,7 +50,7 @@ class BoundedRecoveredEditsOutputSink extends AbstractRecoveredEditsOutputSink { private final AtomicInteger openingWritersNum = new AtomicInteger(0); public BoundedRecoveredEditsOutputSink(WALSplitter walSplitter, - WALSplitter.PipelineController controller, EntryBuffers entryBuffers, int numWriters) { + WALSplitter.PipelineController controller, EntryBuffers entryBuffers, int numWriters) { super(walSplitter, controller, entryBuffers, numWriters); } @@ -64,9 +62,8 @@ class BoundedRecoveredEditsOutputSink extends AbstractRecoveredEditsOutputSink { return; } // The key point is create a new writer, write edits then close writer. - RecoveredEditsWriter writer = - createRecoveredEditsWriter(buffer.tableName, buffer.encodedRegionName, - entries.get(0).getKey().getSequenceId()); + RecoveredEditsWriter writer = createRecoveredEditsWriter(buffer.tableName, + buffer.encodedRegionName, entries.get(0).getKey().getSequenceId()); if (writer != null) { openingWritersNum.incrementAndGet(); writer.writeRegionEntries(entries); @@ -95,7 +92,6 @@ class BoundedRecoveredEditsOutputSink extends AbstractRecoveredEditsOutputSink { /** * Write out the remaining RegionEntryBuffers and close the writers. - * * @return true when there is no error. */ private boolean writeRemainingEntryBuffers() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java index e1fb9157243..323788c572e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.wal; import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; + import java.io.IOException; import java.io.InterruptedIOException; import java.util.HashMap; @@ -49,10 +50,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * A WALSplitter sink that outputs {@link org.apache.hadoop.hbase.io.hfile.HFile}s. - * Runs with a bounded number of HFile writers at any one time rather than let the count run up. + * A WALSplitter sink that outputs {@link org.apache.hadoop.hbase.io.hfile.HFile}s. Runs with a + * bounded number of HFile writers at any one time rather than let the count run up. * @see BoundedRecoveredEditsOutputSink for a sink implementation that writes intermediate - * recovered.edits files. + * recovered.edits files. */ @InterfaceAudience.Private public class BoundedRecoveredHFilesOutputSink extends OutputSink { @@ -90,10 +91,10 @@ public class BoundedRecoveredHFilesOutputSink extends OutputSink { String familyName = Bytes.toString(CellUtil.cloneFamily(cell)); // comparator need to be specified for meta familyCells - .computeIfAbsent(familyName, - key -> new CellSet( - isMetaTable ? MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR)) - .add(cell); + .computeIfAbsent(familyName, + key -> new CellSet( + isMetaTable ? MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR)) + .add(cell); familySeqIds.compute(familyName, (k, v) -> v == null ? seqId : Math.max(v, seqId)); } } @@ -136,7 +137,6 @@ public class BoundedRecoveredHFilesOutputSink extends OutputSink { /** * Write out the remaining RegionEntryBuffers and close the writers. - * * @return true when there is no error. */ private boolean writeRemainingEntryBuffers() throws IOException { @@ -188,21 +188,21 @@ public class BoundedRecoveredHFilesOutputSink extends OutputSink { } /** - * @return Returns a base HFile without compressions or encodings; good enough for recovery - * given hfile has metadata on how it was written. + * @return Returns a base HFile without compressions or encodings; good enough for recovery given + * hfile has metadata on how it was written. */ private StoreFileWriter createRecoveredHFileWriter(TableName tableName, String regionName, - long seqId, String familyName, boolean isMetaTable) throws IOException { + long seqId, String familyName, boolean isMetaTable) throws IOException { Path outputDir = WALSplitUtil.tryCreateRecoveredHFilesDir(walSplitter.rootFS, walSplitter.conf, tableName, regionName, familyName); StoreFileWriter.Builder writerBuilder = - new StoreFileWriter.Builder(walSplitter.conf, CacheConfig.DISABLED, walSplitter.rootFS) - .withOutputDir(outputDir); - HFileContext hFileContext = new HFileContextBuilder(). - withChecksumType(StoreUtils.getChecksumType(walSplitter.conf)). - withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(walSplitter.conf)). - withCellComparator(isMetaTable? - MetaCellComparator.META_COMPARATOR: CellComparatorImpl.COMPARATOR).build(); + new StoreFileWriter.Builder(walSplitter.conf, CacheConfig.DISABLED, walSplitter.rootFS) + .withOutputDir(outputDir); + HFileContext hFileContext = + new HFileContextBuilder().withChecksumType(StoreUtils.getChecksumType(walSplitter.conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(walSplitter.conf)).withCellComparator( + isMetaTable ? MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR) + .build(); return writerBuilder.withFileContext(hFileContext).build(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java index 6c215f84ed4..4700ecdea8e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,10 +43,8 @@ import org.slf4j.LoggerFactory; // imports for things that haven't moved from regionserver.wal yet. /** - * No-op implementation of {@link WALProvider} used when the WAL is disabled. - * - * Should only be used when severe data loss is acceptable. - * + * No-op implementation of {@link WALProvider} used when the WAL is disabled. Should only be used + * when severe data loss is acceptable. */ @InterfaceAudience.Private class DisabledWALProvider implements WALProvider { @@ -57,7 +55,7 @@ class DisabledWALProvider implements WALProvider { @Override public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) - throws IOException { + throws IOException { if (null != disabled) { throw new IllegalStateException("WALProvider.init should only be called once."); } @@ -96,11 +94,11 @@ class DisabledWALProvider implements WALProvider { protected final AtomicBoolean closed = new AtomicBoolean(false); public DisabledWAL(final Path path, final Configuration conf, - final List listeners) { + final List listeners) { this.coprocessorHost = new WALCoprocessorHost(this, conf); this.path = path; if (null != listeners) { - for(WALActionsListener listener : listeners) { + for (WALActionsListener listener : listeners) { registerWALActionsListener(listener); } } @@ -147,7 +145,7 @@ class DisabledWALProvider implements WALProvider { @Override public void shutdown() { - if(closed.compareAndSet(false, true)) { + if (closed.compareAndSet(false, true)) { if (!this.listeners.isEmpty()) { for (WALActionsListener listener : this.listeners) { listener.logCloseRequested(); @@ -167,13 +165,12 @@ class DisabledWALProvider implements WALProvider { } @Override - public long appendMarker(RegionInfo info, WALKeyImpl key, WALEdit edits) - throws IOException { + public long appendMarker(RegionInfo info, WALKeyImpl key, WALEdit edits) throws IOException { return append(info, key, edits, false); } private long append(RegionInfo info, WALKeyImpl key, WALEdit edits, boolean inMemstore) - throws IOException { + throws IOException { WriteEntry writeEntry = key.getMvcc().begin(); if (!edits.isReplay()) { for (Cell cell : edits.getCells()) { @@ -196,8 +193,10 @@ class DisabledWALProvider implements WALProvider { } @Override - public void updateStore(byte[] encodedRegionName, byte[] familyName, - Long sequenceid, boolean onlyIfGreater) { return; } + public void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceid, + boolean onlyIfGreater) { + return; + } @Override public void sync() { @@ -214,8 +213,8 @@ class DisabledWALProvider implements WALProvider { } @Override - public Long startCacheFlush(final byte[] encodedRegionName, Map - flushedFamilyNamesToSeq) { + public Long startCacheFlush(final byte[] encodedRegionName, + Map flushedFamilyNamesToSeq) { return startCacheFlush(encodedRegionName, flushedFamilyNamesToSeq.keySet()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java index 4c1e5be695c..d8cca1cd4f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.util.Bytes; @@ -146,10 +145,9 @@ public class EntryBuffers { } /** - * A buffer of some number of edits for a given region. - * This accumulates edits and also provides a memory optimization in order to - * share a single byte array instance for the table and region name. - * Also tracks memory usage of the accumulated edits. + * A buffer of some number of edits for a given region. This accumulates edits and also provides a + * memory optimization in order to share a single byte array instance for the table and region + * name. Also tracks memory usage of the accumulated edits. */ public static class RegionEntryBuffer implements HeapSize { private long heapInBuffer = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java index 58f072dd91a..5d63ac2cf45 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,21 +44,22 @@ public class FSHLogProvider extends AbstractFSWALProvider { // Only public so classes back in regionserver.wal can access public interface Writer extends WALProvider.Writer { /** - * @throws IOException if something goes wrong initializing an output stream + * @throws IOException if something goes wrong initializing an output stream * @throws StreamLacksCapabilityException if the given FileSystem can't provide streams that - * meet the needs of the given Writer implementation. + * meet the needs of the given Writer implementation. */ void init(FileSystem fs, Path path, Configuration c, boolean overwritable, long blocksize, - StreamSlowMonitor monitor) throws IOException, CommonFSUtils.StreamLacksCapabilityException; + StreamSlowMonitor monitor) throws IOException, CommonFSUtils.StreamLacksCapabilityException; } /** * Public because of FSHLog. Should be package-private * @param overwritable if the created writer can overwrite. For recovered edits, it is true and - * for WAL it is false. Thus we can distinguish WAL and recovered edits by this. + * for WAL it is false. Thus we can distinguish WAL and recovered edits by + * this. */ public static Writer createWriter(final Configuration conf, final FileSystem fs, final Path path, - final boolean overwritable) throws IOException { + final boolean overwritable) throws IOException { return createWriter(conf, fs, path, overwritable, WALUtil.getWALBlockSize(conf, fs, path, overwritable)); } @@ -71,22 +71,21 @@ public class FSHLogProvider extends AbstractFSWALProvider { final boolean overwritable, long blocksize) throws IOException { // Configuration already does caching for the Class lookup. Class logWriterClass = - conf.getClass("hbase.regionserver.hlog.writer.impl", ProtobufLogWriter.class, - Writer.class); + conf.getClass("hbase.regionserver.hlog.writer.impl", ProtobufLogWriter.class, Writer.class); Writer writer = null; try { writer = logWriterClass.getDeclaredConstructor().newInstance(); FileSystem rootFs = FileSystem.get(path.toUri(), conf); writer.init(rootFs, path, conf, overwritable, blocksize, - StreamSlowMonitor.create(conf, path.getName())); + StreamSlowMonitor.create(conf, path.getName())); return writer; - } catch (Exception e) { + } catch (Exception e) { if (e instanceof CommonFSUtils.StreamLacksCapabilityException) { - LOG.error("The RegionServer write ahead log provider for FileSystem implementations " + - "relies on the ability to call " + e.getMessage() + " for proper operation during " + - "component failures, but the current FileSystem does not support doing so. Please " + - "check the config value of '" + CommonFSUtils.HBASE_WAL_DIR + "' and ensure " + - "it points to a FileSystem mount that has suitable capabilities for output streams."); + LOG.error("The RegionServer write ahead log provider for FileSystem implementations " + + "relies on the ability to call " + e.getMessage() + " for proper operation during " + + "component failures, but the current FileSystem does not support doing so. Please " + + "check the config value of '" + CommonFSUtils.HBASE_WAL_DIR + "' and ensure " + + "it points to a FileSystem mount that has suitable capabilities for output streams."); } else { LOG.debug("Error instantiating log writer.", e); } @@ -97,9 +96,9 @@ public class FSHLogProvider extends AbstractFSWALProvider { @Override protected FSHLog createWAL() throws IOException { return new FSHLog(CommonFSUtils.getWALFileSystem(conf), abortable, - CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.factoryId), - getWALArchiveDirectoryName(conf, factory.factoryId), conf, listeners, true, logPrefix, - META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null); + CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.factoryId), + getWALArchiveDirectoryName(conf, factory.factoryId), conf, listeners, true, logPrefix, + META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NamespaceGroupingStrategy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NamespaceGroupingStrategy.java index 3022a25fdb1..c718fb96172 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NamespaceGroupingStrategy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NamespaceGroupingStrategy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +19,14 @@ package org.apache.hadoop.hbase.wal; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.RegionGroupingProvider.RegionGroupingStrategy; +import org.apache.yetus.audience.InterfaceAudience; /** - * A WAL grouping strategy based on namespace. - * Notice: the wal-group mapping might change if we support dynamic namespace updating later, - * and special attention needed if we support feature like group-based replication. + * A WAL grouping strategy based on namespace. Notice: the wal-group mapping might change if we + * support dynamic namespace updating later, and special attention needed if we support feature like + * group-based replication. */ @InterfaceAudience.Private public class NamespaceGroupingStrategy implements RegionGroupingStrategy { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java index 12b63f5b813..505712b3b7a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,14 +39,14 @@ public final class NettyAsyncFSWALConfigHelper { private static final String CONFIG_NAME = "global-event-loop"; - private static final Map>> EVENT_LOOP_CONFIG_MAP = - new HashMap<>(); + private static final Map>> EVENT_LOOP_CONFIG_MAP = new HashMap<>(); /** * Set the EventLoopGroup and channel class for {@code AsyncFSWALProvider}. */ public static void setEventLoopConfig(Configuration conf, EventLoopGroup group, - Class channelClass) { + Class channelClass) { Preconditions.checkNotNull(group, "group is null"); Preconditions.checkNotNull(channelClass, "channel class is null"); conf.set(EVENT_LOOP_CONFIG, CONFIG_NAME); @@ -62,5 +62,6 @@ public final class NettyAsyncFSWALConfigHelper { return EVENT_LOOP_CONFIG_MAP.get(name); } - private NettyAsyncFSWALConfigHelper() {} + private NettyAsyncFSWALConfigHelper() { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java index 443e41ccd63..8bd03217063 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,17 +27,16 @@ import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * The following class is an abstraction class to provide a common interface to support different @@ -72,7 +71,7 @@ public abstract class OutputSink { protected final CompletionService closeCompletionService; public OutputSink(WALSplitter.PipelineController controller, EntryBuffers entryBuffers, - int numWriters) { + int numWriters) { this.numThreads = numWriters; this.controller = controller; this.entryBuffers = entryBuffers; @@ -102,9 +101,9 @@ public abstract class OutputSink { } public synchronized void restartWriterThreadsIfNeeded() { - for(int i = 0; i< writerThreads.size(); i++){ + for (int i = 0; i < writerThreads.size(); i++) { WriterThread t = writerThreads.get(i); - if (!t.isAlive()){ + if (!t.isAlive()) { String threadName = t.getName(); LOG.debug("Replacing dead thread: " + threadName); WriterThread newThread = new WriterThread(controller, entryBuffers, this, threadName); @@ -116,7 +115,6 @@ public abstract class OutputSink { /** * Wait for writer threads to dump all info to the sink - * * @return true when there is no error */ protected boolean finishWriterThreads(boolean interrupt) throws IOException { @@ -201,12 +199,12 @@ public abstract class OutputSink { private OutputSink outputSink = null; WriterThread(WALSplitter.PipelineController controller, EntryBuffers entryBuffers, - OutputSink sink, int i) { + OutputSink sink, int i) { this(controller, entryBuffers, sink, Thread.currentThread().getName() + "-Writer-" + i); } WriterThread(WALSplitter.PipelineController controller, EntryBuffers entryBuffers, - OutputSink sink, String threadName) { + OutputSink sink, String threadName) { super(threadName); this.controller = controller; this.entryBuffers = entryBuffers; @@ -214,7 +212,7 @@ public abstract class OutputSink { } @Override - public void run() { + public void run() { try { doRun(); } catch (Throwable t) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RecoveredEditsOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RecoveredEditsOutputSink.java index 2ac0cea78c6..44ef151b7b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RecoveredEditsOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RecoveredEditsOutputSink.java @@ -28,21 +28,20 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.MultipleIOException; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * Class that manages the output streams from the log splitting process. - * Every region only has one recovered edits file PER split WAL (if we split - * multiple WALs during a log-splitting session, on open, a Region may - * have multiple recovered.edits files to replay -- one per split WAL). - * @see BoundedRecoveredEditsOutputSink which is like this class but imposes upper bound on - * the number of writers active at one time (makes for better throughput). + * Class that manages the output streams from the log splitting process. Every region only has one + * recovered edits file PER split WAL (if we split multiple WALs during a log-splitting session, on + * open, a Region may have multiple recovered.edits files to replay -- one per split WAL). + * @see BoundedRecoveredEditsOutputSink which is like this class but imposes upper bound on the + * number of writers active at one time (makes for better throughput). */ @InterfaceAudience.Private class RecoveredEditsOutputSink extends AbstractRecoveredEditsOutputSink { @@ -50,7 +49,7 @@ class RecoveredEditsOutputSink extends AbstractRecoveredEditsOutputSink { private ConcurrentMap writers = new ConcurrentHashMap<>(); public RecoveredEditsOutputSink(WALSplitter walSplitter, - WALSplitter.PipelineController controller, EntryBuffers entryBuffers, int numWriters) { + WALSplitter.PipelineController controller, EntryBuffers entryBuffers, int numWriters) { super(walSplitter, controller, entryBuffers, numWriters); } @@ -61,9 +60,8 @@ class RecoveredEditsOutputSink extends AbstractRecoveredEditsOutputSink { LOG.warn("got an empty buffer, skipping"); return; } - RecoveredEditsWriter writer = - getRecoveredEditsWriter(buffer.tableName, buffer.encodedRegionName, - entries.get(0).getKey().getSequenceId()); + RecoveredEditsWriter writer = getRecoveredEditsWriter(buffer.tableName, + buffer.encodedRegionName, entries.get(0).getKey().getSequenceId()); if (writer != null) { writer.writeRegionEntries(entries); } @@ -75,7 +73,7 @@ class RecoveredEditsOutputSink extends AbstractRecoveredEditsOutputSink { * @return null if this region shouldn't output any logs */ private RecoveredEditsWriter getRecoveredEditsWriter(TableName tableName, byte[] region, - long seqId) throws IOException { + long seqId) throws IOException { RecoveredEditsWriter ret = writers.get(Bytes.toString(region)); if (ret != null) { return ret; @@ -102,7 +100,6 @@ class RecoveredEditsOutputSink extends AbstractRecoveredEditsOutputSink { /** * Close all of the output streams. - * * @return true when there is no error. */ private boolean closeWriters() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java index 4a2c2204b81..2a92acfd0b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,12 +27,10 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.Lock; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; -// imports for classes still in regionserver.wal import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.KeyLocker; @@ -42,18 +39,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * A WAL Provider that returns a WAL per group of regions. - * - * This provider follows the decorator pattern and mainly holds the logic for WAL grouping. - * WAL creation/roll/close is delegated to {@link #DELEGATE_PROVIDER} - * - * Region grouping is handled via {@link RegionGroupingStrategy} and can be configured via the - * property "hbase.wal.regiongrouping.strategy". Current strategy choices are + * A WAL Provider that returns a WAL per group of regions. This provider follows the decorator + * pattern and mainly holds the logic for WAL grouping. WAL creation/roll/close is delegated to + * {@link #DELEGATE_PROVIDER} Region grouping is handled via {@link RegionGroupingStrategy} and can + * be configured via the property "hbase.wal.regiongrouping.strategy". Current strategy choices are *

        - *
      • defaultStrategy : Whatever strategy this version of HBase picks. currently - * "bounded".
      • - *
      • identity : each region belongs to its own group.
      • - *
      • bounded : bounded number of groups and region evenly assigned to each group.
      • + *
      • defaultStrategy : Whatever strategy this version of HBase picks. currently + * "bounded".
      • + *
      • identity : each region belongs to its own group.
      • + *
      • bounded : bounded number of groups and region evenly assigned to each group.
      • *
      * Optionally, a FQCN to a custom implementation may be given. */ @@ -71,6 +65,7 @@ public class RegionGroupingProvider implements WALProvider { * Given an identifier and a namespace, pick a group. */ String group(final byte[] identifier, byte[] namespace); + void init(Configuration config, String providerId); } @@ -84,17 +79,18 @@ public class RegionGroupingProvider implements WALProvider { namespace(NamespaceGroupingStrategy.class); final Class clazz; + Strategies(Class clazz) { this.clazz = clazz; } } /** - * instantiate a strategy from a config property. - * requires conf to have already been set (as well as anything the provider might need to read). + * instantiate a strategy from a config property. requires conf to have already been set (as well + * as anything the provider might need to read). */ RegionGroupingStrategy getStrategy(final Configuration conf, final String key, - final String defaultValue) throws IOException { + final String defaultValue) throws IOException { Class clazz; try { clazz = Strategies.valueOf(conf.get(key, defaultValue)).clazz; @@ -110,8 +106,8 @@ public class RegionGroupingProvider implements WALProvider { result.init(conf, providerId); return result; } catch (Exception e) { - LOG.error("couldn't set up region grouping strategy, check config key " + - REGION_GROUPING_STRATEGY); + LOG.error( + "couldn't set up region grouping strategy, check config key " + REGION_GROUPING_STRATEGY); LOG.debug("Exception details for failure to load region grouping strategy.", e); throw new IOException("couldn't set up region grouping strategy", e); } @@ -122,8 +118,8 @@ public class RegionGroupingProvider implements WALProvider { /** delegate provider for WAL creation/roll/close, but not support multiwal */ public static final String DELEGATE_PROVIDER = "hbase.wal.regiongrouping.delegate.provider"; - public static final String DEFAULT_DELEGATE_PROVIDER = WALFactory.Providers.defaultProvider - .name(); + public static final String DEFAULT_DELEGATE_PROVIDER = + WALFactory.Providers.defaultProvider.name(); private static final String META_WAL_GROUP_NAME = "meta"; @@ -140,7 +136,7 @@ public class RegionGroupingProvider implements WALProvider { @Override public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) - throws IOException { + throws IOException { if (null != strategy) { throw new IllegalStateException("WALProvider.init should only be called once."); } @@ -223,7 +219,7 @@ public class RegionGroupingProvider implements WALProvider { public void shutdown() throws IOException { // save the last exception and rethrow IOException failure = null; - for (WALProvider provider: cached.values()) { + for (WALProvider provider : cached.values()) { try { provider.shutdown(); } catch (IOException e) { @@ -261,7 +257,9 @@ public class RegionGroupingProvider implements WALProvider { static class IdentityGroupingStrategy implements RegionGroupingStrategy { @Override - public void init(Configuration config, String providerId) {} + public void init(Configuration config, String providerId) { + } + @Override public String group(final byte[] identifier, final byte[] namespace) { return Bytes.toString(identifier); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java index 20e45de9ab8..38b873ec56f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.wal; +import static org.apache.commons.lang3.StringUtils.isNumeric; + import java.io.Closeable; import java.io.IOException; import java.util.List; @@ -32,14 +34,11 @@ import org.apache.hadoop.hbase.replication.regionserver.WALFileLengthProvider; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import static org.apache.commons.lang3.StringUtils.isNumeric; - /** * A Write Ahead Log (WAL) provides service for reading, writing waledits. This interface provides - * APIs for WAL users (such as RegionServer) to use the WAL (do append, sync, etc). - * - * Note that some internals, such as log rolling and performance evaluation tools, will use - * WAL.equals to determine if they have already seen a given WAL. + * APIs for WAL users (such as RegionServer) to use the WAL (do append, sync, etc). Note that some + * internals, such as log rolling and performance evaluation tools, will use WAL.equals to determine + * if they have already seen a given WAL. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -57,43 +56,36 @@ public interface WAL extends Closeable, WALFileLengthProvider { /** * Roll the log writer. That is, start writing log messages to a new file. - * *

      - * The implementation is synchronized in order to make sure there's one rollWriter - * running at any given time. - * - * @return If lots of logs, flush the stores of returned regions so next time through we - * can clean logs. Returns null if nothing to flush. Names are actual - * region names as returned by {@link RegionInfo#getEncodedName()} + * The implementation is synchronized in order to make sure there's one rollWriter running at any + * given time. + * @return If lots of logs, flush the stores of returned regions so next time through we can clean + * logs. Returns null if nothing to flush. Names are actual region names as returned by + * {@link RegionInfo#getEncodedName()} */ Map> rollWriter() throws FailedLogCloseException, IOException; /** * Roll the log writer. That is, start writing log messages to a new file. - * *

      - * The implementation is synchronized in order to make sure there's one rollWriter - * running at any given time. - * - * @param force - * If true, force creation of a new writer even if no entries have - * been written to the current writer - * @return If lots of logs, flush the stores of returned regions so next time through we - * can clean logs. Returns null if nothing to flush. Names are actual - * region names as returned by {@link RegionInfo#getEncodedName()} + * The implementation is synchronized in order to make sure there's one rollWriter running at any + * given time. n * If true, force creation of a new writer even if no entries have been written to + * the current writer + * @return If lots of logs, flush the stores of returned regions so next time through we can clean + * logs. Returns null if nothing to flush. Names are actual region names as returned by + * {@link RegionInfo#getEncodedName()} */ Map> rollWriter(boolean force) throws IOException; /** - * Stop accepting new writes. If we have unsynced writes still in buffer, sync them. - * Extant edits are left in place in backing storage to be replayed later. + * Stop accepting new writes. If we have unsynced writes still in buffer, sync them. Extant edits + * are left in place in backing storage to be replayed later. */ void shutdown() throws IOException; /** - * Caller no longer needs any edits from this WAL. Implementers are free to reclaim - * underlying resources after this call; i.e. filesystem based WALs can archive or - * delete files. + * Caller no longer needs any edits from this WAL. Implementers are free to reclaim underlying + * resources after this call; i.e. filesystem based WALs can archive or delete files. */ @Override void close() throws IOException; @@ -105,10 +97,10 @@ public interface WAL extends Closeable, WALFileLengthProvider { * The WAL is not flushed/sync'd after this transaction completes BUT on return this edit must * have its region edit/sequence id assigned else it messes up our unification of mvcc and * sequenceid. On return key will have the region edit/sequence id filled in. - * @param info the regioninfo associated with append - * @param key Modified by this call; we add to it this edits region edit/sequence id. + * @param info the regioninfo associated with append + * @param key Modified by this call; we add to it this edits region edit/sequence id. * @param edits Edits to append. MAY CONTAIN NO EDITS for case where we want to get an edit - * sequence id that is after all currently appended edits. + * sequence id that is after all currently appended edits. * @return Returns a 'transaction id' and key will have the region edit/sequence id * in it. * @see #appendMarker(RegionInfo, WALKeyImpl, WALEdit) @@ -116,19 +108,19 @@ public interface WAL extends Closeable, WALFileLengthProvider { long appendData(RegionInfo info, WALKeyImpl key, WALEdit edits) throws IOException; /** - * Append an operational 'meta' event marker edit to the WAL. A marker meta edit could - * be a FlushDescriptor, a compaction marker, or a region event marker; e.g. region open - * or region close. The difference between a 'marker' append and a 'data' append as in - * {@link #appendData(RegionInfo, WALKeyImpl, WALEdit)}is that a marker will not have - * transitioned through the memstore. + * Append an operational 'meta' event marker edit to the WAL. A marker meta edit could be a + * FlushDescriptor, a compaction marker, or a region event marker; e.g. region open or region + * close. The difference between a 'marker' append and a 'data' append as in + * {@link #appendData(RegionInfo, WALKeyImpl, WALEdit)}is that a marker will not have transitioned + * through the memstore. *

      * The WAL is not flushed/sync'd after this transaction completes BUT on return this edit must * have its region edit/sequence id assigned else it messes up our unification of mvcc and * sequenceid. On return key will have the region edit/sequence id filled in. - * @param info the regioninfo associated with append - * @param key Modified by this call; we add to it this edits region edit/sequence id. + * @param info the regioninfo associated with append + * @param key Modified by this call; we add to it this edits region edit/sequence id. * @param edits Edits to append. MAY CONTAIN NO EDITS for case where we want to get an edit - * sequence id that is after all currently appended edits. + * sequence id that is after all currently appended edits. * @return Returns a 'transaction id' and key will have the region edit/sequence id * in it. * @see #appendData(RegionInfo, WALKeyImpl, WALEdit) @@ -136,12 +128,11 @@ public interface WAL extends Closeable, WALFileLengthProvider { long appendMarker(RegionInfo info, WALKeyImpl key, WALEdit edits) throws IOException; /** - * updates the seuence number of a specific store. - * depending on the flag: replaces current seq number if the given seq id is bigger, - * or even if it is lower than existing one + * updates the seuence number of a specific store. depending on the flag: replaces current seq + * number if the given seq id is bigger, or even if it is lower than existing one */ void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceid, - boolean onlyIfGreater); + boolean onlyIfGreater); /** * Sync what we have in the WAL. @@ -156,35 +147,35 @@ public interface WAL extends Closeable, WALFileLengthProvider { /** * @param forceSync Flag to force sync rather than flushing to the buffer. Example - Hadoop hflush - * vs hsync. + * vs hsync. */ default void sync(boolean forceSync) throws IOException { sync(); } /** - * @param txid Transaction id to sync to. + * @param txid Transaction id to sync to. * @param forceSync Flag to force sync rather than flushing to the buffer. Example - Hadoop hflush - * vs hsync. + * vs hsync. */ default void sync(long txid, boolean forceSync) throws IOException { sync(txid); } /** - * WAL keeps track of the sequence numbers that are as yet not flushed im memstores - * in order to be able to do accounting to figure which WALs can be let go. This method tells WAL - * that some region is about to flush. The flush can be the whole region or for a column family - * of the region only. - * - *

      Currently, it is expected that the update lock is held for the region; i.e. no - * concurrent appends while we set up cache flush. + * WAL keeps track of the sequence numbers that are as yet not flushed im memstores in order to be + * able to do accounting to figure which WALs can be let go. This method tells WAL that some + * region is about to flush. The flush can be the whole region or for a column family of the + * region only. + *

      + * Currently, it is expected that the update lock is held for the region; i.e. no concurrent + * appends while we set up cache flush. * @param families Families to flush. May be a subset of all families in the region. - * @return Returns {@link HConstants#NO_SEQNUM} if we are flushing the whole region OR if - * we are flushing a subset of all families but there are no edits in those families not - * being flushed; in other words, this is effectively same as a flush of all of the region - * though we were passed a subset of regions. Otherwise, it returns the sequence id of the - * oldest/lowest outstanding edit. + * @return Returns {@link HConstants#NO_SEQNUM} if we are flushing the whole region OR if we are + * flushing a subset of all families but there are no edits in those families not being + * flushed; in other words, this is effectively same as a flush of all of the region + * though we were passed a subset of regions. Otherwise, it returns the sequence id of the + * oldest/lowest outstanding edit. * @see #completeCacheFlush(byte[], long) * @see #abortCacheFlush(byte[]) */ @@ -195,17 +186,17 @@ public interface WAL extends Closeable, WALFileLengthProvider { /** * Complete the cache flush. * @param encodedRegionName Encoded region name. - * @param maxFlushedSeqId The maxFlushedSeqId for this flush. There is no edit in memory that is - * less that this sequence id. + * @param maxFlushedSeqId The maxFlushedSeqId for this flush. There is no edit in memory that is + * less that this sequence id. * @see #startCacheFlush(byte[], Set) * @see #abortCacheFlush(byte[]) */ void completeCacheFlush(final byte[] encodedRegionName, long maxFlushedSeqId); /** - * Abort a cache flush. Call if the flush fails. Note that the only recovery - * for an aborted flush currently is a restart of the regionserver so the - * snapshot content dropped by the failure gets restored to the memstore. + * Abort a cache flush. Call if the flush fails. Note that the only recovery for an aborted flush + * currently is a restart of the regionserver so the snapshot content dropped by the failure gets + * restored to the memstore. * @param encodedRegionName Encoded region name. */ void abortCacheFlush(byte[] encodedRegionName); @@ -220,7 +211,7 @@ public interface WAL extends Closeable, WALFileLengthProvider { * @param encodedRegionName The region to get the number for. * @return The earliest/lowest/oldest sequence id if present, HConstants.NO_SEQNUM if absent. * @deprecated Since version 1.2.0. Removing because not used and exposes subtle internal - * workings. Use {@link #getEarliestMemStoreSeqNum(byte[], byte[])} + * workings. Use {@link #getEarliestMemStoreSeqNum(byte[], byte[])} */ @Deprecated long getEarliestMemStoreSeqNum(byte[] encodedRegionName); @@ -228,29 +219,31 @@ public interface WAL extends Closeable, WALFileLengthProvider { /** * Gets the earliest unflushed sequence id in the memstore for the store. * @param encodedRegionName The region to get the number for. - * @param familyName The family to get the number for. + * @param familyName The family to get the number for. * @return The earliest/lowest/oldest sequence id if present, HConstants.NO_SEQNUM if absent. */ long getEarliestMemStoreSeqNum(byte[] encodedRegionName, byte[] familyName); /** - * Human readable identifying information about the state of this WAL. - * Implementors are encouraged to include information appropriate for debugging. - * Consumers are advised not to rely on the details of the returned String; it does - * not have a defined structure. + * Human readable identifying information about the state of this WAL. Implementors are encouraged + * to include information appropriate for debugging. Consumers are advised not to rely on the + * details of the returned String; it does not have a defined structure. */ @Override String toString(); /** - * When outside clients need to consume persisted WALs, they rely on a provided - * Reader. + * When outside clients need to consume persisted WALs, they rely on a provided Reader. */ interface Reader extends Closeable { Entry next() throws IOException; + Entry next(Entry reuse) throws IOException; + void seek(long pos) throws IOException; + long getPosition() throws IOException; + void reset() throws IOException; } @@ -267,9 +260,8 @@ public interface WAL extends Closeable, WALFileLengthProvider { /** * Constructor for both params - * * @param edit log's edit - * @param key log's key + * @param key log's key */ public Entry(WALKeyImpl key, WALEdit edit) { this.key = key; @@ -277,28 +269,21 @@ public interface WAL extends Closeable, WALFileLengthProvider { } /** - * Gets the edit - * - * @return edit + * Gets the edit n */ public WALEdit getEdit() { return edit; } /** - * Gets the key - * - * @return key + * Gets the key n */ public WALKeyImpl getKey() { return key; } /** - * Set compression context for this entry. - * - * @param compressionContext - * Compression context + * Set compression context for this entry. n * Compression context * @deprecated deparcated since hbase 2.1.0 */ @Deprecated @@ -317,15 +302,15 @@ public interface WAL extends Closeable, WALFileLengthProvider { * as part of their name, usually the suffix. Sometimes there will be an extra suffix as when it * is a WAL for the meta table. For example, WALs might look like this * 10.20.20.171%3A60020.1277499063250 where 1277499063250 is the - * timestamp. Could also be a meta WAL which adds a '.meta' suffix or a - * synchronous replication WAL which adds a '.syncrep' suffix. Check for these. File also may have - * no timestamp on it. For example the recovered.edits files are WALs but are named in ascending - * order. Here is an example: 0000000000000016310. Allow for this. + * timestamp. Could also be a meta WAL which adds a '.meta' suffix or a synchronous replication + * WAL which adds a '.syncrep' suffix. Check for these. File also may have no timestamp on it. For + * example the recovered.edits files are WALs but are named in ascending order. Here is an + * example: 0000000000000016310. Allow for this. * @param name Name of the WAL file. * @return Timestamp or -1. */ public static long getTimestamp(String name) { - String [] splits = name.split("\\."); + String[] splits = name.split("\\."); if (splits.length <= 1) { return -1; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java index cefc3a9fc52..6794c2d5bd2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java @@ -23,12 +23,11 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.io.HeapSize; @@ -46,50 +45,52 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDe * Used in HBase's transaction log (WAL) to represent a collection of edits (Cell/KeyValue objects) * that came in as a single transaction. All the edits for a given transaction are written out as a * single record, in PB format, followed (optionally) by Cells written via the WALCellEncoder. - *

      This class is LimitedPrivate for CPs to read-only. The {@link #add} methods are - * classified as private methods, not for use by CPs.

      - * - *

      A particular WALEdit 'type' is the 'meta' type used to mark key operational - * events in the WAL such as compaction, flush, or region open. These meta types do not traverse - * hbase memstores. They are edits made by the hbase system rather than edit data submitted by - * clients. They only show in the WAL. These 'Meta' types have not been formally specified - * (or made into an explicit class type). They evolved organically. HBASE-8457 suggests codifying - * a WALEdit 'type' by adding a type field to WALEdit that gets serialized into the WAL. TODO. - * Would have to work on the consumption-side. Reading WALs on replay we seem to consume - * a Cell-at-a-time rather than by WALEdit. We are already in the below going out of our - * way to figure particular types -- e.g. if a compaction, replay, or close meta Marker -- during - * normal processing so would make sense to do this. Current system is an awkward marking of Cell - * columnfamily as {@link #METAFAMILY} and then setting qualifier based off meta edit type. For - * replay-time where we read Cell-at-a-time, there are utility methods below for figuring - * meta type. See also - * {@link #createBulkLoadEvent(RegionInfo, WALProtos.BulkLoadDescriptor)}, etc., for where we - * create meta WALEdit instances.

      - * - *

      WALEdit will accumulate a Set of all column family names referenced by the Cells - * {@link #add(Cell)}'d. This is an optimization. Usually when loading a WALEdit, we have the - * column family name to-hand.. just shove it into the WALEdit if available. Doing this, we can - * save on a parse of each Cell to figure column family down the line when we go to add the - * WALEdit to the WAL file. See the hand-off in FSWALEntry Constructor. + *

      + * This class is LimitedPrivate for CPs to read-only. The {@link #add} methods are classified as + * private methods, not for use by CPs. + *

      + *

      + * A particular WALEdit 'type' is the 'meta' type used to mark key operational events in the WAL + * such as compaction, flush, or region open. These meta types do not traverse hbase memstores. They + * are edits made by the hbase system rather than edit data submitted by clients. They only show in + * the WAL. These 'Meta' types have not been formally specified (or made into an explicit class + * type). They evolved organically. HBASE-8457 suggests codifying a WALEdit 'type' by adding a type + * field to WALEdit that gets serialized into the WAL. TODO. Would have to work on the + * consumption-side. Reading WALs on replay we seem to consume a Cell-at-a-time rather than by + * WALEdit. We are already in the below going out of our way to figure particular types -- e.g. if a + * compaction, replay, or close meta Marker -- during normal processing so would make sense to do + * this. Current system is an awkward marking of Cell columnfamily as {@link #METAFAMILY} and then + * setting qualifier based off meta edit type. For replay-time where we read Cell-at-a-time, there + * are utility methods below for figuring meta type. See also + * {@link #createBulkLoadEvent(RegionInfo, WALProtos.BulkLoadDescriptor)}, etc., for where we create + * meta WALEdit instances. + *

      + *

      + * WALEdit will accumulate a Set of all column family names referenced by the Cells + * {@link #add(Cell)}'d. This is an optimization. Usually when loading a WALEdit, we have the column + * family name to-hand.. just shove it into the WALEdit if available. Doing this, we can save on a + * parse of each Cell to figure column family down the line when we go to add the WALEdit to the WAL + * file. See the hand-off in FSWALEntry Constructor. * @see WALKey */ // TODO: Do not expose this class to Coprocessors. It has set methods. A CP might meddle. @InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.REPLICATION, - HBaseInterfaceAudience.COPROC }) + HBaseInterfaceAudience.COPROC }) public class WALEdit implements HeapSize { // Below defines are for writing WALEdit 'meta' Cells.. // TODO: Get rid of this system of special 'meta' Cells. See HBASE-8457. It suggests // adding a type to WALEdit itself for use denoting meta Edits and their types. - public static final byte [] METAFAMILY = Bytes.toBytes("METAFAMILY"); + public static final byte[] METAFAMILY = Bytes.toBytes("METAFAMILY"); /** * @deprecated Since 2.3.0. Not used. */ @Deprecated - public static final byte [] METAROW = Bytes.toBytes("METAROW"); + public static final byte[] METAROW = Bytes.toBytes("METAROW"); /** * @deprecated Since 2.3.0. Make it protected, internal-use only. Use - * {@link #isCompactionMarker(Cell)} + * {@link #isCompactionMarker(Cell)} */ @Deprecated @InterfaceAudience.Private @@ -100,37 +101,36 @@ public class WALEdit implements HeapSize { */ @Deprecated @InterfaceAudience.Private - public static final byte [] FLUSH = Bytes.toBytes("HBASE::FLUSH"); + public static final byte[] FLUSH = Bytes.toBytes("HBASE::FLUSH"); /** - * Qualifier for region event meta 'Marker' WALEdits start with the - * {@link #REGION_EVENT_PREFIX} prefix ('HBASE::REGION_EVENT::'). After the prefix, - * we note the type of the event which we get from the RegionEventDescriptor protobuf - * instance type (A RegionEventDescriptor protobuf instance is written as the meta Marker - * Cell value). Adding a type suffix means we do not have to deserialize the protobuf to - * figure out what type of event this is.. .just read the qualifier suffix. For example, - * a close region event descriptor will have a qualifier of HBASE::REGION_EVENT::REGION_CLOSE. - * See WAL.proto and the EventType in RegionEventDescriptor protos for all possible - * event types. + * Qualifier for region event meta 'Marker' WALEdits start with the {@link #REGION_EVENT_PREFIX} + * prefix ('HBASE::REGION_EVENT::'). After the prefix, we note the type of the event which we get + * from the RegionEventDescriptor protobuf instance type (A RegionEventDescriptor protobuf + * instance is written as the meta Marker Cell value). Adding a type suffix means we do not have + * to deserialize the protobuf to figure out what type of event this is.. .just read the qualifier + * suffix. For example, a close region event descriptor will have a qualifier of + * HBASE::REGION_EVENT::REGION_CLOSE. See WAL.proto and the EventType in RegionEventDescriptor + * protos for all possible event types. */ private static final String REGION_EVENT_STR = "HBASE::REGION_EVENT"; private static final String REGION_EVENT_PREFIX_STR = REGION_EVENT_STR + "::"; - private static final byte [] REGION_EVENT_PREFIX = Bytes.toBytes(REGION_EVENT_PREFIX_STR); + private static final byte[] REGION_EVENT_PREFIX = Bytes.toBytes(REGION_EVENT_PREFIX_STR); /** * @deprecated Since 2.3.0. Remove. Not for external use. Not used. */ @Deprecated - public static final byte [] REGION_EVENT = Bytes.toBytes(REGION_EVENT_STR); + public static final byte[] REGION_EVENT = Bytes.toBytes(REGION_EVENT_STR); /** * We use this define figuring if we are carrying a close event. */ - private static final byte [] REGION_EVENT_CLOSE = - createRegionEventDescriptorQualifier(RegionEventDescriptor.EventType.REGION_CLOSE); + private static final byte[] REGION_EVENT_CLOSE = + createRegionEventDescriptorQualifier(RegionEventDescriptor.EventType.REGION_CLOSE); @InterfaceAudience.Private - public static final byte [] BULK_LOAD = Bytes.toBytes("HBASE::BULK_LOAD"); + public static final byte[] BULK_LOAD = Bytes.toBytes("HBASE::BULK_LOAD"); private final transient boolean replay; @@ -138,10 +138,10 @@ public class WALEdit implements HeapSize { /** * All the Cell families in cells. Updated by {@link #add(Cell)} and - * {@link #add(Map)}. This Set is passed to the FSWALEntry so it does not have - * to recalculate the Set of families in a transaction; makes for a bunch of CPU savings. + * {@link #add(Map)}. This Set is passed to the FSWALEntry so it does not have to recalculate the + * Set of families in a transaction; makes for a bunch of CPU savings. */ - private Set families = null; + private Set families = null; public WALEdit() { this(1, false); @@ -149,7 +149,7 @@ public class WALEdit implements HeapSize { /** * @deprecated since 2.0.1 and will be removed in 4.0.0. Use {@link #WALEdit(int, boolean)} - * instead. + * instead. * @see #WALEdit(int, boolean) * @see HBASE-20781 */ @@ -160,7 +160,7 @@ public class WALEdit implements HeapSize { /** * @deprecated since 2.0.1 and will be removed in 4.0.0. Use {@link #WALEdit(int, boolean)} - * instead. + * instead. * @see #WALEdit(int, boolean) * @see HBASE-20781 */ @@ -188,7 +188,7 @@ public class WALEdit implements HeapSize { * For use by FSWALEntry ONLY. An optimization. * @return All families in {@link #getCells()}; may be null. */ - public Set getFamilies() { + public Set getFamilies() { return this.families; } @@ -197,7 +197,7 @@ public class WALEdit implements HeapSize { * @deprecated Since 2.3.0. Do not expose. Make protected. */ @Deprecated - public static boolean isMetaEditFamily(final byte [] f) { + public static boolean isMetaEditFamily(final byte[] f) { return Bytes.equals(METAFAMILY, f); } @@ -209,8 +209,8 @@ public class WALEdit implements HeapSize { } /** - * @return True if this is a meta edit; has one edit only and its columnfamily - * is {@link #METAFAMILY}. + * @return True if this is a meta edit; has one edit only and its columnfamily is + * {@link #METAFAMILY}. */ public boolean isMetaEdit() { return this.families != null && this.families.size() == 1 && this.families.contains(METAFAMILY); @@ -225,7 +225,7 @@ public class WALEdit implements HeapSize { } @InterfaceAudience.Private - public WALEdit add(Cell cell, byte [] family) { + public WALEdit add(Cell cell, byte[] family) { getOrCreateFamilies().add(family); return addCell(cell); } @@ -250,10 +250,8 @@ public class WALEdit implements HeapSize { } /** - * This is not thread safe. - * This will change the WALEdit and shouldn't be used unless you are sure that nothing - * else depends on the contents being immutable. - * + * This is not thread safe. This will change the WALEdit and shouldn't be used unless you are sure + * that nothing else depends on the contents being immutable. * @param cells the list of cells that this WALEdit now contains. */ @InterfaceAudience.Private @@ -265,7 +263,7 @@ public class WALEdit implements HeapSize { /** * Reads WALEdit from cells. - * @param cellDecoder Cell decoder. + * @param cellDecoder Cell decoder. * @param expectedCount Expected cell count. * @return Number of KVs read. */ @@ -289,7 +287,7 @@ public class WALEdit implements HeapSize { public long estimatedSerializedSizeOf() { long ret = 0; - for (Cell cell: cells) { + for (Cell cell : cells) { ret += PrivateCellUtil.estimatedSerializedSizeOf(cell); } return ret; @@ -315,37 +313,38 @@ public class WALEdit implements HeapSize { } public static FlushDescriptor getFlushDescriptor(Cell cell) throws IOException { - return CellUtil.matchingColumn(cell, METAFAMILY, FLUSH)? - FlushDescriptor.parseFrom(CellUtil.cloneValue(cell)): null; + return CellUtil.matchingColumn(cell, METAFAMILY, FLUSH) + ? FlushDescriptor.parseFrom(CellUtil.cloneValue(cell)) + : null; } /** * @return A meta Marker WALEdit that has a single Cell whose value is the passed in - * regionEventDesc serialized and whose row is this region, - * columnfamily is {@link #METAFAMILY} and qualifier is - * {@link #REGION_EVENT_PREFIX} + {@link RegionEventDescriptor#getEventType()}; - * for example HBASE::REGION_EVENT::REGION_CLOSE. + * regionEventDesc serialized and whose row is this region, columnfamily is + * {@link #METAFAMILY} and qualifier is {@link #REGION_EVENT_PREFIX} + + * {@link RegionEventDescriptor#getEventType()}; for example + * HBASE::REGION_EVENT::REGION_CLOSE. */ public static WALEdit createRegionEventWALEdit(RegionInfo hri, - RegionEventDescriptor regionEventDesc) { + RegionEventDescriptor regionEventDesc) { return createRegionEventWALEdit(getRowForRegion(hri), regionEventDesc); } @InterfaceAudience.Private - public static WALEdit createRegionEventWALEdit(byte [] rowForRegion, - RegionEventDescriptor regionEventDesc) { + public static WALEdit createRegionEventWALEdit(byte[] rowForRegion, + RegionEventDescriptor regionEventDesc) { KeyValue kv = new KeyValue(rowForRegion, METAFAMILY, - createRegionEventDescriptorQualifier(regionEventDesc.getEventType()), - EnvironmentEdgeManager.currentTime(), regionEventDesc.toByteArray()); + createRegionEventDescriptorQualifier(regionEventDesc.getEventType()), + EnvironmentEdgeManager.currentTime(), regionEventDesc.toByteArray()); return new WALEdit().add(kv, METAFAMILY); } /** - * @return Cell qualifier for the passed in RegionEventDescriptor Type; e.g. we'll - * return something like a byte array with HBASE::REGION_EVENT::REGION_OPEN in it. + * @return Cell qualifier for the passed in RegionEventDescriptor Type; e.g. we'll return + * something like a byte array with HBASE::REGION_EVENT::REGION_OPEN in it. */ @InterfaceAudience.Private - public static byte [] createRegionEventDescriptorQualifier(RegionEventDescriptor.EventType t) { + public static byte[] createRegionEventDescriptorQualifier(RegionEventDescriptor.EventType t) { return Bytes.toBytes(REGION_EVENT_PREFIX_STR + t.toString()); } @@ -354,28 +353,28 @@ public class WALEdit implements HeapSize { * @return True if this is a Marker Edit and it is a RegionClose type. */ public boolean isRegionCloseMarker() { - return isMetaEdit() && PrivateCellUtil.matchingQualifier(this.cells.get(0), - REGION_EVENT_CLOSE, 0, REGION_EVENT_CLOSE.length); + return isMetaEdit() && PrivateCellUtil.matchingQualifier(this.cells.get(0), REGION_EVENT_CLOSE, + 0, REGION_EVENT_CLOSE.length); } /** - * @return Returns a RegionEventDescriptor made by deserializing the content of the - * passed in cell, IFF the cell is a RegionEventDescriptor - * type WALEdit. + * @return Returns a RegionEventDescriptor made by deserializing the content of the passed in + * cell, IFF the cell is a RegionEventDescriptor type WALEdit. */ public static RegionEventDescriptor getRegionEventDescriptor(Cell cell) throws IOException { - return CellUtil.matchingColumnFamilyAndQualifierPrefix(cell, METAFAMILY, REGION_EVENT_PREFIX)? - RegionEventDescriptor.parseFrom(CellUtil.cloneValue(cell)): null; + return CellUtil.matchingColumnFamilyAndQualifierPrefix(cell, METAFAMILY, REGION_EVENT_PREFIX) + ? RegionEventDescriptor.parseFrom(CellUtil.cloneValue(cell)) + : null; } /** * @return A Marker WALEdit that has c serialized as its value */ public static WALEdit createCompaction(final RegionInfo hri, final CompactionDescriptor c) { - byte [] pbbytes = c.toByteArray(); + byte[] pbbytes = c.toByteArray(); KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, COMPACTION, EnvironmentEdgeManager.currentTime(), pbbytes); - return new WALEdit().add(kv, METAFAMILY); //replication scope null so this won't be replicated + return new WALEdit().add(kv, METAFAMILY); // replication scope null so this won't be replicated } public static byte[] getRowForRegion(RegionInfo hri) { @@ -383,7 +382,7 @@ public class WALEdit implements HeapSize { if (startKey.length == 0) { // empty row key is not allowed in mutations because it is both the start key and the end key // we return the smallest byte[] that is bigger (in lex comparison) than byte[0]. - return new byte[] {0}; + return new byte[] { 0 }; } return startKey; } @@ -394,12 +393,11 @@ public class WALEdit implements HeapSize { * @return deserialized CompactionDescriptor or null. */ public static CompactionDescriptor getCompaction(Cell kv) throws IOException { - return isCompactionMarker(kv)? CompactionDescriptor.parseFrom(CellUtil.cloneValue(kv)): null; + return isCompactionMarker(kv) ? CompactionDescriptor.parseFrom(CellUtil.cloneValue(kv)) : null; } /** * Returns true if the given cell is a serialized {@link CompactionDescriptor} - * * @see #getCompaction(Cell) */ public static boolean isCompactionMarker(Cell cell) { @@ -408,15 +406,14 @@ public class WALEdit implements HeapSize { /** * Create a bulk loader WALEdit - * * @param hri The RegionInfo for the region in which we are bulk loading * @param bulkLoadDescriptor The descriptor for the Bulk Loader * @return The WALEdit for the BulkLoad */ public static WALEdit createBulkLoadEvent(RegionInfo hri, - WALProtos.BulkLoadDescriptor bulkLoadDescriptor) { + WALProtos.BulkLoadDescriptor bulkLoadDescriptor) { KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, BULK_LOAD, - EnvironmentEdgeManager.currentTime(), bulkLoadDescriptor.toByteArray()); + EnvironmentEdgeManager.currentTime(), bulkLoadDescriptor.toByteArray()); return new WALEdit().add(kv, METAFAMILY); } @@ -426,21 +423,20 @@ public class WALEdit implements HeapSize { * @return deserialized BulkLoadDescriptor or null. */ public static WALProtos.BulkLoadDescriptor getBulkLoadDescriptor(Cell cell) throws IOException { - return CellUtil.matchingColumn(cell, METAFAMILY, BULK_LOAD)? - WALProtos.BulkLoadDescriptor.parseFrom(CellUtil.cloneValue(cell)): null; + return CellUtil.matchingColumn(cell, METAFAMILY, BULK_LOAD) + ? WALProtos.BulkLoadDescriptor.parseFrom(CellUtil.cloneValue(cell)) + : null; } /** - * Append the given map of family->edits to a WALEdit data structure. - * This does not write to the WAL itself. - * Note that as an optimization, we will stamp the Set of column families into the WALEdit - * to save on our having to calculate column families subsequently down in the actual WAL + * Append the given map of family->edits to a WALEdit data structure. This does not write to the + * WAL itself. Note that as an optimization, we will stamp the Set of column families into the + * WALEdit to save on our having to calculate column families subsequently down in the actual WAL * writing. - * * @param familyMap map of family->edits */ public void add(Map> familyMap) { - for (Map.Entry> e: familyMap.entrySet()) { + for (Map.Entry> e : familyMap.entrySet()) { // 'foreach' loop NOT used. See HBASE-12023 "...creates too many iterator objects." int listSize = e.getValue().size(); // Add all Cells first and then at end, add the family rather than call {@link #add(Cell)} @@ -452,7 +448,7 @@ public class WALEdit implements HeapSize { } } - private void addFamily(byte [] family) { + private void addFamily(byte[] family) { getOrCreateFamilies().add(family); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java index d258b6198bf..e3968ae3cff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java @@ -39,23 +39,19 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Entry point for users of the Write Ahead Log. - * Acts as the shim between internal use and the particular WALProvider we use to handle wal - * requests. - * - * Configure which provider gets used with the configuration setting "hbase.wal.provider". Available - * implementations: + * Entry point for users of the Write Ahead Log. Acts as the shim between internal use and the + * particular WALProvider we use to handle wal requests. Configure which provider gets used with the + * configuration setting "hbase.wal.provider". Available implementations: *

        - *
      • defaultProvider : whatever provider is standard for the hbase version. Currently - * "asyncfs"
      • - *
      • asyncfs : a provider that will run on top of an implementation of the Hadoop - * FileSystem interface via an asynchronous client.
      • - *
      • filesystem : a provider that will run on top of an implementation of the Hadoop - * FileSystem interface via HDFS's synchronous DFSClient.
      • - *
      • multiwal : a provider that will use multiple "filesystem" wal instances per region - * server.
      • + *
      • defaultProvider : whatever provider is standard for the hbase version. Currently + * "asyncfs"
      • + *
      • asyncfs : a provider that will run on top of an implementation of the Hadoop + * FileSystem interface via an asynchronous client.
      • + *
      • filesystem : a provider that will run on top of an implementation of the Hadoop + * FileSystem interface via HDFS's synchronous DFSClient.
      • + *
      • multiwal : a provider that will use multiple "filesystem" wal instances per region + * server.
      • *
      - * * Alternatively, you may provide a custom implementation of {@link WALProvider} by class name. */ @InterfaceAudience.Private @@ -73,6 +69,7 @@ public class WALFactory { asyncfs(AsyncFSWALProvider.class); final Class clazz; + Providers(Class clazz) { this.clazz = clazz; } @@ -137,8 +134,10 @@ public class WALFactory { // AsyncFSWALProvider is not guaranteed to work on all Hadoop versions, when it's chosen as // the default and we can't use it, we want to fall back to FSHLog which we know works on // all versions. - if (provider == getDefaultProvider() && provider.clazz == AsyncFSWALProvider.class - && !AsyncFSWALProvider.load()) { + if ( + provider == getDefaultProvider() && provider.clazz == AsyncFSWALProvider.class + && !AsyncFSWALProvider.load() + ) { // AsyncFSWAL has better performance in most cases, and also uses less resources, we will // try to use it if possible. It deeply hacks into the internal of DFSClient so will be // easily broken when upgrading hadoop. @@ -159,7 +158,7 @@ public class WALFactory { } WALProvider createProvider(Class clazz, String providerId) - throws IOException { + throws IOException { LOG.info("Instantiating WALProvider of type " + clazz); try { final WALProvider result = clazz.getDeclaredConstructor().newInstance(); @@ -188,8 +187,8 @@ public class WALFactory { } /** - * @param conf must not be null, will keep a reference to read params in later reader/writer - * instances. + * @param conf must not be null, will keep a reference to read params in later reader/writer + * instances. * @param abortable the server to abort */ public WALFactory(Configuration conf, String factoryId, Abortable abortable) throws IOException { @@ -215,9 +214,8 @@ public class WALFactory { } /** - * Shutdown all WALs and clean up any underlying storage. - * Use only when you will not need to replay and edits that have gone to any wals from this - * factory. + * Shutdown all WALs and clean up any underlying storage. Use only when you will not need to + * replay and edits that have gone to any wals from this factory. */ public void close() throws IOException { final WALProvider metaProvider = this.metaProvider.get(); @@ -232,9 +230,9 @@ public class WALFactory { } /** - * Tell the underlying WAL providers to shut down, but do not clean up underlying storage. - * If you are not ending cleanly and will need to replay edits from this factory's wals, - * use this method if you can as it will try to leave things as tidy as possible. + * Tell the underlying WAL providers to shut down, but do not clean up underlying storage. If you + * are not ending cleanly and will need to replay edits from this factory's wals, use this method + * if you can as it will try to leave things as tidy as possible. */ public void shutdown() throws IOException { IOException exception = null; @@ -242,7 +240,7 @@ public class WALFactory { if (null != metaProvider) { try { metaProvider.shutdown(); - } catch(IOException ioe) { + } catch (IOException ioe) { exception = ioe; } } @@ -275,7 +273,7 @@ public class WALFactory { // the WAL provider should be an enum. Proceed } } - if (clz == null){ + if (clz == null) { clz = getProviderClass(META_WAL_PROVIDER, conf.get(WAL_PROVIDER, DEFAULT_WAL_PROVIDER)); } provider = createProvider(clz, AbstractFSWALProvider.META_WAL_PROVIDER_ID); @@ -293,8 +291,10 @@ public class WALFactory { */ public WAL getWAL(RegionInfo region) throws IOException { // Use different WAL for hbase:meta. Instantiates the meta WALProvider if not already up. - if (region != null && region.isMetaRegion() && - region.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { + if ( + region != null && region.isMetaRegion() + && region.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID + ) { return getMetaProvider().getWAL(region); } else { return provider.getWAL(region); @@ -302,24 +302,24 @@ public class WALFactory { } public Reader createReader(final FileSystem fs, final Path path) throws IOException { - return createReader(fs, path, (CancelableProgressable)null); + return createReader(fs, path, (CancelableProgressable) null); } /** - * Create a reader for the WAL. If you are reading from a file that's being written to and need - * to reopen it multiple times, use {@link WAL.Reader#reset()} instead of this method - * then just seek back to the last known good position. - * @return A WAL reader. Close when done with it. + * Create a reader for the WAL. If you are reading from a file that's being written to and need to + * reopen it multiple times, use {@link WAL.Reader#reset()} instead of this method then just seek + * back to the last known good position. + * @return A WAL reader. Close when done with it. */ - public Reader createReader(final FileSystem fs, final Path path, - CancelableProgressable reporter) throws IOException { + public Reader createReader(final FileSystem fs, final Path path, CancelableProgressable reporter) + throws IOException { return createReader(fs, path, reporter, true); } public Reader createReader(final FileSystem fs, final Path path, CancelableProgressable reporter, - boolean allowCustom) throws IOException { + boolean allowCustom) throws IOException { Class lrClass = - allowCustom ? logReaderClass : ProtobufLogReader.class; + allowCustom ? logReaderClass : ProtobufLogReader.class; try { // A wal file could be under recovery, so it may take several // tries to get it open. Instead of claiming it is corrupted, retry @@ -348,10 +348,11 @@ public class WALFactory { // Only inspect the Exception to consider retry when it's an IOException if (e instanceof IOException) { String msg = e.getMessage(); - if (msg != null - && (msg.contains("Cannot obtain block length") - || msg.contains("Could not obtain the last block") || msg - .matches("Blocklist for [^ ]* has changed.*"))) { + if ( + msg != null && (msg.contains("Cannot obtain block length") + || msg.contains("Could not obtain the last block") + || msg.matches("Blocklist for [^ ]* has changed.*")) + ) { if (++nbAttempt == 1) { LOG.warn("Lease should have recovered. This is not expected. Will retry", e); } @@ -360,7 +361,7 @@ public class WALFactory { } if (nbAttempt > 2 && openTimeout < EnvironmentEdgeManager.currentTime()) { LOG.error("Can't open after " + nbAttempt + " attempts and " - + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms " + " for " + path); + + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms " + " for " + path); } else { try { Thread.sleep(nbAttempt < 3 ? 500 : 1000); @@ -387,8 +388,7 @@ public class WALFactory { } /** - * Create a writer for the WAL. - * Uses defaults. + * Create a writer for the WAL. Uses defaults. *

      * Should be package-private. public only for tests and * {@link org.apache.hadoop.hbase.regionserver.wal.Compressor} @@ -399,12 +399,11 @@ public class WALFactory { } /** - * Should be package-private, visible for recovery testing. - * Uses defaults. + * Should be package-private, visible for recovery testing. Uses defaults. * @return an overwritable writer for recovered edits. caller should close. */ public Writer createRecoveredEditsWriter(final FileSystem fs, final Path path) - throws IOException { + throws IOException { return FSHLogProvider.createWriter(conf, fs, path, true); } @@ -414,7 +413,7 @@ public class WALFactory { // For now, first Configuration object wins. Practically this just impacts the reader/writer class private static final AtomicReference singleton = new AtomicReference<>(); private static final String SINGLETON_ID = WALFactory.class.getName(); - + // Public only for FSHLog public static WALFactory getInstance(Configuration configuration) { WALFactory factory = singleton.get(); @@ -436,55 +435,51 @@ public class WALFactory { } /** - * Create a reader for the given path, accept custom reader classes from conf. - * If you already have a WALFactory, you should favor the instance method. + * Create a reader for the given path, accept custom reader classes from conf. If you already have + * a WALFactory, you should favor the instance method. * @return a WAL Reader, caller must close. */ public static Reader createReader(final FileSystem fs, final Path path, - final Configuration configuration) throws IOException { + final Configuration configuration) throws IOException { return getInstance(configuration).createReader(fs, path); } /** - * Create a reader for the given path, accept custom reader classes from conf. - * If you already have a WALFactory, you should favor the instance method. + * Create a reader for the given path, accept custom reader classes from conf. If you already have + * a WALFactory, you should favor the instance method. * @return a WAL Reader, caller must close. */ static Reader createReader(final FileSystem fs, final Path path, - final Configuration configuration, final CancelableProgressable reporter) throws IOException { + final Configuration configuration, final CancelableProgressable reporter) throws IOException { return getInstance(configuration).createReader(fs, path, reporter); } /** - * Create a reader for the given path, ignore custom reader classes from conf. - * If you already have a WALFactory, you should favor the instance method. - * only public pending move of {@link org.apache.hadoop.hbase.regionserver.wal.Compressor} + * Create a reader for the given path, ignore custom reader classes from conf. If you already have + * a WALFactory, you should favor the instance method. only public pending move of + * {@link org.apache.hadoop.hbase.regionserver.wal.Compressor} * @return a WAL Reader, caller must close. */ public static Reader createReaderIgnoreCustomClass(final FileSystem fs, final Path path, - final Configuration configuration) throws IOException { + final Configuration configuration) throws IOException { return getInstance(configuration).createReader(fs, path, null, false); } /** - * If you already have a WALFactory, you should favor the instance method. - * Uses defaults. + * If you already have a WALFactory, you should favor the instance method. Uses defaults. * @return a Writer that will overwrite files. Caller must close. */ static Writer createRecoveredEditsWriter(final FileSystem fs, final Path path, - final Configuration configuration) - throws IOException { + final Configuration configuration) throws IOException { return FSHLogProvider.createWriter(configuration, fs, path, true); } /** - * If you already have a WALFactory, you should favor the instance method. - * Uses defaults. + * If you already have a WALFactory, you should favor the instance method. Uses defaults. * @return a writer that won't overwrite files. Caller must close. */ public static Writer createWALWriter(final FileSystem fs, final Path path, - final Configuration configuration) - throws IOException { + final Configuration configuration) throws IOException { return FSHLogProvider.createWriter(configuration, fs, path, false); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java index fdbacbda277..2c6ae83f63d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java @@ -17,6 +17,12 @@ */ package org.apache.hadoop.hbase.wal; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -24,19 +30,11 @@ import org.apache.hadoop.hbase.regionserver.SequenceId; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; - - /** * Key for WAL Entry. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.REPLICATION, - HBaseInterfaceAudience.COPROC}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.REPLICATION, + HBaseInterfaceAudience.COPROC }) public interface WALKey extends SequenceId, Comparable { /** * Unmodifiable empty list of UUIDs. @@ -87,31 +85,30 @@ public interface WALKey extends SequenceId, Comparable { /** * Add a named String value to this WALKey to be persisted into the WAL - * @param attributeKey Name of the attribute + * @param attributeKey Name of the attribute * @param attributeValue Value of the attribute */ void addExtendedAttribute(String attributeKey, byte[] attributeValue); - /** - * Return a named String value injected into the WALKey during processing, such as by a - * coprocessor - * @param attributeKey The key of a key / value pair - */ - default byte[] getExtendedAttribute(String attributeKey){ + /** + * Return a named String value injected into the WALKey during processing, such as by a + * coprocessor + * @param attributeKey The key of a key / value pair + */ + default byte[] getExtendedAttribute(String attributeKey) { return null; } - /** - * Returns a map of all extended attributes injected into this WAL key. - */ + /** + * Returns a map of all extended attributes injected into this WAL key. + */ default Map getExtendedAttributes() { return new HashMap<>(); } + /** - * Produces a string map for this key. Useful for programmatic use and - * manipulation of the data stored in an WALKeyImpl, for example, printing - * as JSON. - * + * Produces a string map for this key. Useful for programmatic use and manipulation of the data + * stored in an WALKeyImpl, for example, printing as JSON. * @return a Map containing data from this key */ default Map toStringMap() { @@ -120,8 +117,8 @@ public interface WALKey extends SequenceId, Comparable { stringMap.put("region", Bytes.toStringBinary(getEncodedRegionName())); stringMap.put("sequence", getSequenceId()); Map extendedAttributes = getExtendedAttributes(); - if (extendedAttributes != null){ - for (Map.Entry entry : extendedAttributes.entrySet()){ + if (extendedAttributes != null) { + for (Map.Entry entry : extendedAttributes.entrySet()) { stringMap.put(entry.getKey(), Bytes.toStringBinary(entry.getValue())); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java index b2a995614ee..22e013afa46 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,19 +44,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FamilyScope; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.ScopeType; /** - * Default implementation of Key for an Entry in the WAL. - * For internal use only though Replication needs to have access. - * - * The log intermingles edits to many tables and rows, so each log entry - * identifies the appropriate table and row. Within a table and row, they're - * also sorted. - * - *

      Some Transactional edits (START, COMMIT, ABORT) will not have an associated row. - * + * Default implementation of Key for an Entry in the WAL. For internal use only though Replication + * needs to have access. The log intermingles edits to many tables and rows, so each log entry + * identifies the appropriate table and row. Within a table and row, they're also sorted. + *

      + * Some Transactional edits (START, COMMIT, ABORT) will not have an associated row. */ // TODO: Key and WALEdit are never used separately, or in one-to-many relation, for practical -// purposes. They need to be merged into WALEntry. -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.REPLICATION}) +// purposes. They need to be merged into WALEntry. +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.REPLICATION }) public class WALKeyImpl implements WALKey { public static final WALKeyImpl EMPTY_WALKEYIMPL = new WALKeyImpl(); @@ -65,11 +61,10 @@ public class WALKeyImpl implements WALKey { } /** - * Use it to complete mvcc transaction. This WALKeyImpl was part of - * (the transaction is started when you call append; see the comment on FSHLog#append). To - * complete call + * Use it to complete mvcc transaction. This WALKeyImpl was part of (the transaction is started + * when you call append; see the comment on FSHLog#append). To complete call + * {@link MultiVersionConcurrencyControl#complete(MultiVersionConcurrencyControl.WriteEntry)} or * {@link MultiVersionConcurrencyControl#complete(MultiVersionConcurrencyControl.WriteEntry)} - * or {@link MultiVersionConcurrencyControl#complete(MultiVersionConcurrencyControl.WriteEntry)} * @return A WriteEntry gotten from local WAL subsystem. * @see #setWriteEntry(MultiVersionConcurrencyControl.WriteEntry) */ @@ -84,7 +79,7 @@ public class WALKeyImpl implements WALKey { this.sequenceId = writeEntry.getWriteNumber(); } - private byte [] encodedRegionName; + private byte[] encodedRegionName; private TableName tablename; @@ -119,19 +114,18 @@ public class WALKeyImpl implements WALKey { private Map extendedAttributes; public WALKeyImpl() { - init(null, null, 0L, HConstants.LATEST_TIMESTAMP, - new ArrayList<>(), HConstants.NO_NONCE, HConstants.NO_NONCE, null, null, null); + init(null, null, 0L, HConstants.LATEST_TIMESTAMP, new ArrayList<>(), HConstants.NO_NONCE, + HConstants.NO_NONCE, null, null, null); } public WALKeyImpl(final NavigableMap replicationScope) { - init(null, null, 0L, HConstants.LATEST_TIMESTAMP, - new ArrayList<>(), HConstants.NO_NONCE, HConstants.NO_NONCE, null, replicationScope, null); + init(null, null, 0L, HConstants.LATEST_TIMESTAMP, new ArrayList<>(), HConstants.NO_NONCE, + HConstants.NO_NONCE, null, replicationScope, null); } @InterfaceAudience.Private - public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, - long logSeqNum, - final long now, UUID clusterId) { + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, + final long now, UUID clusterId) { List clusterIds = new ArrayList<>(1); clusterIds.add(clusterId); init(encodedRegionName, tablename, logSeqNum, now, clusterIds, HConstants.NO_NONCE, @@ -140,90 +134,68 @@ public class WALKeyImpl implements WALKey { // TODO: Fix being able to pass in sequenceid. public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now) { - init(encodedRegionName, - tablename, - NO_SEQUENCE_ID, - now, - EMPTY_UUIDS, - HConstants.NO_NONCE, - HConstants.NO_NONCE, - null, null, null); + init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, EMPTY_UUIDS, HConstants.NO_NONCE, + HConstants.NO_NONCE, null, null, null); } // TODO: Fix being able to pass in sequenceid. public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, - final NavigableMap replicationScope) { + final NavigableMap replicationScope) { init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, EMPTY_UUIDS, HConstants.NO_NONCE, - HConstants.NO_NONCE, null, replicationScope, null); + HConstants.NO_NONCE, null, replicationScope, null); } public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, - MultiVersionConcurrencyControl mvcc, final NavigableMap replicationScope) { + MultiVersionConcurrencyControl mvcc, final NavigableMap replicationScope) { init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, EMPTY_UUIDS, HConstants.NO_NONCE, - HConstants.NO_NONCE, mvcc, replicationScope, null); + HConstants.NO_NONCE, mvcc, replicationScope, null); } public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, - MultiVersionConcurrencyControl mvcc, - final NavigableMap replicationScope, - Map extendedAttributes) { + MultiVersionConcurrencyControl mvcc, final NavigableMap replicationScope, + Map extendedAttributes) { init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, EMPTY_UUIDS, HConstants.NO_NONCE, - HConstants.NO_NONCE, mvcc, replicationScope, extendedAttributes); + HConstants.NO_NONCE, mvcc, replicationScope, extendedAttributes); } - public WALKeyImpl(final byte[] encodedRegionName, - final TableName tablename, - final long now, - MultiVersionConcurrencyControl mvcc) { - init(encodedRegionName, - tablename, - NO_SEQUENCE_ID, - now, - EMPTY_UUIDS, - HConstants.NO_NONCE, - HConstants.NO_NONCE, - mvcc, null, null); + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, + MultiVersionConcurrencyControl mvcc) { + init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, EMPTY_UUIDS, HConstants.NO_NONCE, + HConstants.NO_NONCE, mvcc, null, null); } /** - * Copy constructor that takes in an existing WALKeyImpl plus some extended attributes. - * Intended for coprocessors to add annotations to a system-generated WALKey - * for persistence to the WAL. - * @param key Key to be copied into this new key + * Copy constructor that takes in an existing WALKeyImpl plus some extended attributes. Intended + * for coprocessors to add annotations to a system-generated WALKey for persistence to the WAL. + * @param key Key to be copied into this new key * @param extendedAttributes Extra attributes to copy into the new key */ - public WALKeyImpl(WALKeyImpl key, - Map extendedAttributes){ - init(key.getEncodedRegionName(), key.getTableName(), key.getSequenceId(), - key.getWriteTime(), key.getClusterIds(), key.getNonceGroup(), key.getNonce(), - key.getMvcc(), key.getReplicationScopes(), extendedAttributes); + public WALKeyImpl(WALKeyImpl key, Map extendedAttributes) { + init(key.getEncodedRegionName(), key.getTableName(), key.getSequenceId(), key.getWriteTime(), + key.getClusterIds(), key.getNonceGroup(), key.getNonce(), key.getMvcc(), + key.getReplicationScopes(), extendedAttributes); } /** - * Copy constructor that takes in an existing WALKey, the extra WALKeyImpl fields that the - * parent interface is missing, plus some extended attributes. Intended - * for coprocessors to add annotations to a system-generated WALKey for - * persistence to the WAL. + * Copy constructor that takes in an existing WALKey, the extra WALKeyImpl fields that the parent + * interface is missing, plus some extended attributes. Intended for coprocessors to add + * annotations to a system-generated WALKey for persistence to the WAL. */ - public WALKeyImpl(WALKey key, - List clusterIds, - MultiVersionConcurrencyControl mvcc, - final NavigableMap replicationScopes, - Map extendedAttributes){ - init(key.getEncodedRegionName(), key.getTableName(), key.getSequenceId(), - key.getWriteTime(), clusterIds, key.getNonceGroup(), key.getNonce(), - mvcc, replicationScopes, extendedAttributes); + public WALKeyImpl(WALKey key, List clusterIds, MultiVersionConcurrencyControl mvcc, + final NavigableMap replicationScopes, Map extendedAttributes) { + init(key.getEncodedRegionName(), key.getTableName(), key.getSequenceId(), key.getWriteTime(), + clusterIds, key.getNonceGroup(), key.getNonce(), mvcc, replicationScopes, extendedAttributes); } + /** - * Create the log key for writing to somewhere. - * We maintain the tablename mainly for debugging purposes. - * A regionName is always a sub-table object. - *

      Used by log splitting and snapshots. - * + * Create the log key for writing to somewhere. We maintain the tablename mainly for debugging + * purposes. A regionName is always a sub-table object. + *

      + * Used by log splitting and snapshots. * @param encodedRegionName Encoded name of the region as returned by - * HRegionInfo#getEncodedNameAsBytes(). + * HRegionInfo#getEncodedNameAsBytes(). * @param tablename - name of table * @param logSeqNum - log sequence number * @param now Time at which this edit was written. @@ -231,23 +203,22 @@ public class WALKeyImpl implements WALKey { * @param nonceGroup the nonceGroup * @param nonce the nonce * @param mvcc the mvcc associate the WALKeyImpl - * @param replicationScope the non-default replication scope - * associated with the region's column families + * @param replicationScope the non-default replication scope associated with the region's column + * families */ // TODO: Fix being able to pass in sequenceid. public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, - final long now, List clusterIds, long nonceGroup, long nonce, - MultiVersionConcurrencyControl mvcc, final NavigableMap replicationScope) { + final long now, List clusterIds, long nonceGroup, long nonce, + MultiVersionConcurrencyControl mvcc, final NavigableMap replicationScope) { init(encodedRegionName, tablename, logSeqNum, now, clusterIds, nonceGroup, nonce, mvcc, - replicationScope, null); + replicationScope, null); } /** - * Create the log key for writing to somewhere. - * We maintain the tablename mainly for debugging purposes. - * A regionName is always a sub-table object. - *

      Used by log splitting and snapshots. - * + * Create the log key for writing to somewhere. We maintain the tablename mainly for debugging + * purposes. A regionName is always a sub-table object. + *

      + * Used by log splitting and snapshots. * @param encodedRegionName Encoded name of the region as returned by * HRegionInfo#getEncodedNameAsBytes(). * @param tablename - name of table @@ -256,117 +227,77 @@ public class WALKeyImpl implements WALKey { * @param clusterIds the clusters that have consumed the change(used in Replication) */ // TODO: Fix being able to pass in sequenceid. - public WALKeyImpl(final byte[] encodedRegionName, - final TableName tablename, - long logSeqNum, - final long now, - List clusterIds, - long nonceGroup, - long nonce, - MultiVersionConcurrencyControl mvcc) { - init(encodedRegionName, tablename, logSeqNum, now, clusterIds, nonceGroup, - nonce, mvcc, null, null); + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, + final long now, List clusterIds, long nonceGroup, long nonce, + MultiVersionConcurrencyControl mvcc) { + init(encodedRegionName, tablename, logSeqNum, now, clusterIds, nonceGroup, nonce, mvcc, null, + null); } /** - * Create the log key for writing to somewhere. - * We maintain the tablename mainly for debugging purposes. - * A regionName is always a sub-table object. - * + * Create the log key for writing to somewhere. We maintain the tablename mainly for debugging + * purposes. A regionName is always a sub-table object. * @param encodedRegionName Encoded name of the region as returned by * HRegionInfo#getEncodedNameAsBytes(). * @param tablename the tablename * @param now Time at which this edit was written. - * @param clusterIds the clusters that have consumed the change(used in Replication) - * @param nonceGroup - * @param nonce - * @param mvcc mvcc control used to generate sequence numbers and control read/write points + * @param clusterIds the clusters that have consumed the change(used in Replication) nn + * * @param mvcc mvcc control used to generate sequence numbers and + * control read/write points */ - public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, - final long now, List clusterIds, long nonceGroup, - final long nonce, final MultiVersionConcurrencyControl mvcc) { + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, + List clusterIds, long nonceGroup, final long nonce, + final MultiVersionConcurrencyControl mvcc) { init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, clusterIds, nonceGroup, nonce, mvcc, - null, null); + null, null); } /** - * Create the log key for writing to somewhere. - * We maintain the tablename mainly for debugging purposes. - * A regionName is always a sub-table object. - * + * Create the log key for writing to somewhere. We maintain the tablename mainly for debugging + * purposes. A regionName is always a sub-table object. * @param encodedRegionName Encoded name of the region as returned by - * HRegionInfo#getEncodedNameAsBytes(). - * @param tablename - * @param now Time at which this edit was written. + * HRegionInfo#getEncodedNameAsBytes(). n * @param now Time + * at which this edit was written. * @param clusterIds the clusters that have consumed the change(used in Replication) * @param nonceGroup the nonceGroup * @param nonce the nonce - * @param mvcc mvcc control used to generate sequence numbers and control read/write points + * @param mvcc mvcc control used to generate sequence numbers and control read/write + * points * @param replicationScope the non-default replication scope of the column families */ - public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, - final long now, List clusterIds, long nonceGroup, - final long nonce, final MultiVersionConcurrencyControl mvcc, - NavigableMap replicationScope) { + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, + List clusterIds, long nonceGroup, final long nonce, + final MultiVersionConcurrencyControl mvcc, NavigableMap replicationScope) { init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, clusterIds, nonceGroup, nonce, mvcc, - replicationScope, null); + replicationScope, null); } /** - * Create the log key for writing to somewhere. - * We maintain the tablename mainly for debugging purposes. - * A regionName is always a sub-table object. - * + * Create the log key for writing to somewhere. We maintain the tablename mainly for debugging + * purposes. A regionName is always a sub-table object. * @param encodedRegionName Encoded name of the region as returned by - * HRegionInfo#getEncodedNameAsBytes(). - * @param tablename - * @param logSeqNum - * @param nonceGroup - * @param nonce + * HRegionInfo#getEncodedNameAsBytes(). nnnn */ // TODO: Fix being able to pass in sequenceid. - public WALKeyImpl(final byte[] encodedRegionName, - final TableName tablename, - long logSeqNum, - long nonceGroup, - long nonce, - final MultiVersionConcurrencyControl mvcc) { - init(encodedRegionName, - tablename, - logSeqNum, - EnvironmentEdgeManager.currentTime(), - EMPTY_UUIDS, - nonceGroup, - nonce, - mvcc, null, null); + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, + long nonceGroup, long nonce, final MultiVersionConcurrencyControl mvcc) { + init(encodedRegionName, tablename, logSeqNum, EnvironmentEdgeManager.currentTime(), EMPTY_UUIDS, + nonceGroup, nonce, mvcc, null, null); } - public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, - final long now, List clusterIds, long nonceGroup, - final long nonce, final MultiVersionConcurrencyControl mvcc, - NavigableMap replicationScope, - Map extendedAttributes){ - init(encodedRegionName, - tablename, - NO_SEQUENCE_ID, - now, - clusterIds, - nonceGroup, - nonce, - mvcc, replicationScope, extendedAttributes); + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, + List clusterIds, long nonceGroup, final long nonce, + final MultiVersionConcurrencyControl mvcc, NavigableMap replicationScope, + Map extendedAttributes) { + init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, clusterIds, nonceGroup, nonce, mvcc, + replicationScope, extendedAttributes); } @InterfaceAudience.Private - protected void init(final byte[] encodedRegionName, - final TableName tablename, - long logSeqNum, - final long now, - List clusterIds, - long nonceGroup, - long nonce, - MultiVersionConcurrencyControl mvcc, - NavigableMap replicationScope, - Map extendedAttributes) { + protected void init(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, + final long now, List clusterIds, long nonceGroup, long nonce, + MultiVersionConcurrencyControl mvcc, NavigableMap replicationScope, + Map extendedAttributes) { this.sequenceId = logSeqNum; this.writeTime = now; this.clusterIds = clusterIds; @@ -394,12 +325,12 @@ public class WALKeyImpl implements WALKey { */ @Deprecated public void setCompressionContext(CompressionContext compressionContext) { - //do nothing + // do nothing } /** @return encoded region name */ @Override - public byte [] getEncodedRegionName() { + public byte[] getEncodedRegionName() { return encodedRegionName; } @@ -409,7 +340,8 @@ public class WALKeyImpl implements WALKey { return tablename; } - /** @return log sequence number + /** + * @return log sequence number * @deprecated Use {@link #getSequenceId()} */ @Deprecated @@ -498,27 +430,28 @@ public class WALKeyImpl implements WALKey { * returns DEFAULT_CLUSTER_ID (cases where replication is not enabled) */ @Override - public UUID getOriginatingClusterId(){ - return clusterIds.isEmpty()? HConstants.DEFAULT_CLUSTER_ID: clusterIds.get(0); + public UUID getOriginatingClusterId() { + return clusterIds.isEmpty() ? HConstants.DEFAULT_CLUSTER_ID : clusterIds.get(0); } @Override - public void addExtendedAttribute(String attributeKey, byte[] attributeValue){ - if (extendedAttributes == null){ + public void addExtendedAttribute(String attributeKey, byte[] attributeValue) { + if (extendedAttributes == null) { extendedAttributes = new HashMap(); } extendedAttributes.put(attributeKey, attributeValue); } @Override - public byte[] getExtendedAttribute(String attributeKey){ + public byte[] getExtendedAttribute(String attributeKey) { return extendedAttributes != null ? extendedAttributes.get(attributeKey) : null; } @Override - public Map getExtendedAttributes(){ - return extendedAttributes != null ? new HashMap(extendedAttributes) : - new HashMap(); + public Map getExtendedAttributes() { + return extendedAttributes != null + ? new HashMap(extendedAttributes) + : new HashMap(); } @Override @@ -534,7 +467,7 @@ public class WALKeyImpl implements WALKey { if (obj == null || getClass() != obj.getClass()) { return false; } - return compareTo((WALKey)obj) == 0; + return compareTo((WALKey) obj) == 0; } @Override @@ -553,7 +486,7 @@ public class WALKeyImpl implements WALKey { long otherSid = o.getSequenceId(); if (sid < otherSid) { result = -1; - } else if (sid > otherSid) { + } else if (sid > otherSid) { result = 1; } if (result == 0) { @@ -569,10 +502,9 @@ public class WALKeyImpl implements WALKey { } /** - * Drop this instance's tablename byte array and instead - * hold a reference to the provided tablename. This is not - * meant to be a general purpose setter - it's only used - * to collapse references to conserve memory. + * Drop this instance's tablename byte array and instead hold a reference to the provided + * tablename. This is not meant to be a general purpose setter - it's only used to collapse + * references to conserve memory. */ void internTableName(TableName tablename) { // We should not use this as a setter - only to swap @@ -582,12 +514,11 @@ public class WALKeyImpl implements WALKey { } /** - * Drop this instance's region name byte array and instead - * hold a reference to the provided region name. This is not - * meant to be a general purpose setter - it's only used - * to collapse references to conserve memory. + * Drop this instance's region name byte array and instead hold a reference to the provided region + * name. This is not meant to be a general purpose setter - it's only used to collapse references + * to conserve memory. */ - void internEncodedRegionName(byte []encodedRegionName) { + void internEncodedRegionName(byte[] encodedRegionName) { // We should not use this as a setter - only to swap // in a new reference to the same table name. assert Bytes.equals(this.encodedRegionName, encodedRegionName); @@ -595,7 +526,7 @@ public class WALKeyImpl implements WALKey { } public WALProtos.WALKey.Builder getBuilder(WALCellCodec.ByteStringCompressor compressor) - throws IOException { + throws IOException { WALProtos.WALKey.Builder builder = WALProtos.WALKey.newBuilder(); builder.setEncodedRegionName( compressor.compress(this.encodedRegionName, CompressionContext.DictionaryIndex.REGION)); @@ -621,16 +552,16 @@ public class WALKeyImpl implements WALKey { if (replicationScope != null) { for (Map.Entry e : replicationScope.entrySet()) { ByteString family = - compressor.compress(e.getKey(), CompressionContext.DictionaryIndex.FAMILY); + compressor.compress(e.getKey(), CompressionContext.DictionaryIndex.FAMILY); builder.addScopes(FamilyScope.newBuilder().setFamily(family) - .setScopeType(ScopeType.forNumber(e.getValue()))); + .setScopeType(ScopeType.forNumber(e.getValue()))); } } - if (extendedAttributes != null){ - for (Map.Entry e : extendedAttributes.entrySet()){ - WALProtos.Attribute attr = WALProtos.Attribute.newBuilder(). - setKey(e.getKey()).setValue(compressor.compress(e.getValue(), - CompressionContext.DictionaryIndex.TABLE)).build(); + if (extendedAttributes != null) { + for (Map.Entry e : extendedAttributes.entrySet()) { + WALProtos.Attribute attr = WALProtos.Attribute.newBuilder().setKey(e.getKey()) + .setValue(compressor.compress(e.getValue(), CompressionContext.DictionaryIndex.TABLE)) + .build(); builder.addExtendedAttributes(attr); } } @@ -638,11 +569,11 @@ public class WALKeyImpl implements WALKey { } public void readFieldsFromPb(WALProtos.WALKey walKey, - WALCellCodec.ByteStringUncompressor uncompressor) throws IOException { + WALCellCodec.ByteStringUncompressor uncompressor) throws IOException { this.encodedRegionName = uncompressor.uncompress(walKey.getEncodedRegionName(), CompressionContext.DictionaryIndex.REGION); byte[] tablenameBytes = - uncompressor.uncompress(walKey.getTableName(), CompressionContext.DictionaryIndex.TABLE); + uncompressor.uncompress(walKey.getTableName(), CompressionContext.DictionaryIndex.TABLE); this.tablename = TableName.valueOf(tablenameBytes); clusterIds.clear(); for (HBaseProtos.UUID clusterId : walKey.getClusterIdsList()) { @@ -659,7 +590,7 @@ public class WALKeyImpl implements WALKey { this.replicationScope = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (FamilyScope scope : walKey.getScopesList()) { byte[] family = - uncompressor.uncompress(scope.getFamily(), CompressionContext.DictionaryIndex.FAMILY); + uncompressor.uncompress(scope.getFamily(), CompressionContext.DictionaryIndex.FAMILY); this.replicationScope.put(family, scope.getScopeType().getNumber()); } } @@ -668,11 +599,11 @@ public class WALKeyImpl implements WALKey { if (walKey.hasOrigSequenceNumber()) { this.origLogSeqNum = walKey.getOrigSequenceNumber(); } - if (walKey.getExtendedAttributesCount() > 0){ + if (walKey.getExtendedAttributesCount() > 0) { this.extendedAttributes = new HashMap<>(walKey.getExtendedAttributesCount()); - for (WALProtos.Attribute attr : walKey.getExtendedAttributesList()){ + for (WALProtos.Attribute attr : walKey.getExtendedAttributesList()) { byte[] value = - uncompressor.uncompress(attr.getValue(), CompressionContext.DictionaryIndex.TABLE); + uncompressor.uncompress(attr.getValue(), CompressionContext.DictionaryIndex.TABLE); extendedAttributes.put(attr.getKey(), value); } } @@ -692,7 +623,7 @@ public class WALKeyImpl implements WALKey { size += Bytes.SIZEOF_LONG; // nonce } if (replicationScope != null) { - for (Map.Entry scope: replicationScope.entrySet()) { + for (Map.Entry scope : replicationScope.entrySet()) { size += scope.getKey().length; size += Bytes.SIZEOF_INT; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java index d274d8b3fe3..ef52c278c4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,6 +46,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Strings; import org.apache.hbase.thirdparty.com.google.gson.Gson; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; @@ -56,17 +57,10 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException; import org.apache.hbase.thirdparty.org.apache.commons.cli.PosixParser; /** - * WALPrettyPrinter prints the contents of a given WAL with a variety of - * options affecting formatting and extent of content. - * - * It targets two usage cases: pretty printing for ease of debugging directly by - * humans, and JSON output for consumption by monitoring and/or maintenance - * scripts. - * - * It can filter by row, region, or sequence id. - * - * It can also toggle output of values. - * + * WALPrettyPrinter prints the contents of a given WAL with a variety of options affecting + * formatting and extent of content. It targets two usage cases: pretty printing for ease of + * debugging directly by humans, and JSON output for consumption by monitoring and/or maintenance + * scripts. It can filter by row, region, or sequence id. It can also toggle output of values. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Evolving @@ -75,7 +69,7 @@ public class WALPrettyPrinter { // Output template for pretty printing. private static final String outputTmpl = - "Sequence=%s, table=%s, region=%s, at write timestamp=%s"; + "Sequence=%s, table=%s, region=%s, at write timestamp=%s"; private boolean outputValues; private boolean outputJSON; @@ -99,47 +93,27 @@ public class WALPrettyPrinter { private PrintStream out; // for JSON encoding private static final Gson GSON = GsonUtil.createGson().create(); - //allows for jumping straight to a given portion of the file + // allows for jumping straight to a given portion of the file private long position; /** * Basic constructor that simply initializes values to reasonable defaults. */ public WALPrettyPrinter() { - this(false, false, -1, new HashSet<>(), null, - null, null, false, false, System.out); + this(false, false, -1, new HashSet<>(), null, null, null, false, false, System.out); } /** - * Fully specified constructor. - * - * @param outputValues - * when true, enables output of values along with other log - * information - * @param outputJSON - * when true, enables output in JSON format rather than a - * "pretty string" - * @param sequence - * when nonnegative, serves as a filter; only log entries with this - * sequence id will be printed - * @param tableSet - * when non null, serves as a filter. only entries corresponding to tables - * in the tableSet are printed - * @param region - * when not null, serves as a filter; only log entries from this - * region will be printed - * @param row - * when not null, serves as a filter; only log entries from this row - * will be printed - * @param rowPrefix - * when not null, serves as a filter; only log entries with row key - * having this prefix will be printed - * @param persistentOutput - * keeps a single list running for multiple files. if enabled, the - * endPersistentOutput() method must be used! - * @param out - * Specifies an alternative to stdout for the destination of this - * PrettyPrinter's output. + * Fully specified constructor. n * when true, enables output of values along with other log + * information n * when true, enables output in JSON format rather than a "pretty string" n * when + * nonnegative, serves as a filter; only log entries with this sequence id will be printed n * + * when non null, serves as a filter. only entries corresponding to tables in the tableSet are + * printed n * when not null, serves as a filter; only log entries from this region will be + * printed n * when not null, serves as a filter; only log entries from this row will be printed n + * * when not null, serves as a filter; only log entries with row key having this prefix will be + * printed n * keeps a single list running for multiple files. if enabled, the + * endPersistentOutput() method must be used! n * Specifies an alternative to stdout for the + * destination of this PrettyPrinter's output. */ public WALPrettyPrinter(boolean outputValues, boolean outputJSON, long sequence, Set tableSet, String region, String row, String rowPrefix, boolean outputOnlyRowKey, @@ -189,11 +163,8 @@ public class WALPrettyPrinter { } /** - * sets the region by which output will be filtered - * - * @param sequence - * when nonnegative, serves as a filter; only log entries with this - * sequence id will be printed + * sets the region by which output will be filtered n * when nonnegative, serves as a filter; only + * log entries with this sequence id will be printed */ public void setSequenceFilter(long sequence) { this.sequence = sequence; @@ -206,34 +177,26 @@ public class WALPrettyPrinter { public void setTableFilter(String tablesWithDelimiter) { Collections.addAll(tableSet, tablesWithDelimiter.split(",")); } + /** - * sets the region by which output will be filtered - * - * @param region - * when not null, serves as a filter; only log entries from this - * region will be printed + * sets the region by which output will be filtered n * when not null, serves as a filter; only + * log entries from this region will be printed */ public void setRegionFilter(String region) { this.region = region; } /** - * sets the row key by which output will be filtered - * - * @param row - * when not null, serves as a filter; only log entries from this row - * will be printed + * sets the row key by which output will be filtered n * when not null, serves as a filter; only + * log entries from this row will be printed */ public void setRowFilter(String row) { this.row = row; } /** - * sets the rowPrefix key prefix by which output will be filtered - * - * @param rowPrefix - * when not null, serves as a filter; only log entries with rows - * having this prefix will be printed + * sets the rowPrefix key prefix by which output will be filtered n * when not null, serves as a + * filter; only log entries with rows having this prefix will be printed */ public void setRowPrefixFilter(String rowPrefix) { this.rowPrefix = rowPrefix; @@ -247,17 +210,16 @@ public class WALPrettyPrinter { } /** - * sets the position to start seeking the WAL file - * @param position - * initial position to start seeking the given WAL file + * sets the position to start seeking the WAL file n * initial position to start seeking the given + * WAL file */ public void setPosition(long position) { this.position = position; } /** - * enables output as a single, persistent list. at present, only relevant in - * the case of JSON output. + * enables output as a single, persistent list. at present, only relevant in the case of JSON + * output. */ public void beginPersistentOutput() { if (persistentOutput) { @@ -271,8 +233,7 @@ public class WALPrettyPrinter { } /** - * ends output of a single, persistent list. at present, only relevant in the - * case of JSON output. + * ends output of a single, persistent list. at present, only relevant in the case of JSON output. */ public void endPersistentOutput() { if (!persistentOutput) { @@ -285,19 +246,12 @@ public class WALPrettyPrinter { } /** - * reads a log file and outputs its contents, one transaction at a time, as - * specified by the currently configured options - * - * @param conf - * the HBase configuration relevant to this log file - * @param p - * the path of the log file to be read - * @throws IOException - * may be unable to access the configured filesystem or requested - * file. + * reads a log file and outputs its contents, one transaction at a time, as specified by the + * currently configured options n * the HBase configuration relevant to this log file n * the path + * of the log file to be read n * may be unable to access the configured filesystem or requested + * file. */ - public void processFile(final Configuration conf, final Path p) - throws IOException { + public void processFile(final Configuration conf, final Path p) throws IOException { FileSystem fs = p.getFileSystem(conf); if (!fs.exists(p)) { throw new FileNotFoundException(p.toString()); @@ -345,8 +299,7 @@ public class WALPrettyPrinter { Map txn = key.toStringMap(); long writeTime = key.getWriteTime(); // check output filters - if (!tableSet.isEmpty() && - !tableSet.contains(txn.get("table").toString())) { + if (!tableSet.isEmpty() && !tableSet.contains(txn.get("table").toString())) { continue; } if (sequence >= 0 && ((Long) txn.get("sequence")) != sequence) { @@ -382,8 +335,8 @@ public class WALPrettyPrinter { } else { // Pretty output, complete with indentation by atomic action if (!outputOnlyRowKey) { - out.println(String.format(outputTmpl, - txn.get("sequence"), txn.get("table"), txn.get("region"), new Date(writeTime))); + out.println(String.format(outputTmpl, txn.get("sequence"), txn.get("table"), + txn.get("region"), new Date(writeTime))); } for (int i = 0; i < actions.size(); i++) { Map op = actions.get(i); @@ -403,8 +356,8 @@ public class WALPrettyPrinter { } } - public static void printCell(PrintStream out, Map op, - boolean outputValues, boolean outputOnlyRowKey) { + public static void printCell(PrintStream out, Map op, boolean outputValues, + boolean outputOnlyRowKey) { String rowDetails = "row=" + op.get("row"); if (outputOnlyRowKey) { out.println(rowDetails); @@ -423,16 +376,18 @@ public class WALPrettyPrinter { out.println("cell total size sum: " + op.get("total_size_sum")); } - public static Map toStringMap(Cell cell, - boolean printRowKeyOnly, String rowPrefix, String row, boolean outputValues) { + public static Map toStringMap(Cell cell, boolean printRowKeyOnly, + String rowPrefix, String row, boolean outputValues) { Map stringMap = new HashMap<>(); - String rowKey = Bytes.toStringBinary(cell.getRowArray(), - cell.getRowOffset(), cell.getRowLength()); + String rowKey = + Bytes.toStringBinary(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); // Row and row prefix are mutually options so both cannot be true at the // same time. We can include checks in the same condition // Check if any of the filters are satisfied by the row, if not return empty map - if ((!Strings.isNullOrEmpty(rowPrefix) && !rowKey.startsWith(rowPrefix)) || - (!Strings.isNullOrEmpty(row) && !rowKey.equals(row))) { + if ( + (!Strings.isNullOrEmpty(rowPrefix) && !rowKey.startsWith(rowPrefix)) + || (!Strings.isNullOrEmpty(row) && !rowKey.equals(row)) + ) { return stringMap; } @@ -441,11 +396,10 @@ public class WALPrettyPrinter { return stringMap; } stringMap.put("type", cell.getType()); - stringMap.put("family", Bytes.toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(), - cell.getFamilyLength())); - stringMap.put("qualifier", - Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength())); + stringMap.put("family", + Bytes.toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())); + stringMap.put("qualifier", Bytes.toStringBinary(cell.getQualifierArray(), + cell.getQualifierOffset(), cell.getQualifierLength())); stringMap.put("timestamp", cell.getTimestamp()); stringMap.put("vlen", cell.getValueLength()); stringMap.put("total_size_sum", cell.heapSize()); @@ -454,8 +408,7 @@ public class WALPrettyPrinter { Iterator tagsIterator = PrivateCellUtil.tagsIterator(cell); while (tagsIterator.hasNext()) { Tag tag = tagsIterator.next(); - tagsString - .add((tag.getType()) + ":" + Bytes.toStringBinary(Tag.cloneValue(tag))); + tagsString.add((tag.getType()) + ":" + Bytes.toStringBinary(Tag.cloneValue(tag))); } stringMap.put("tag", tagsString); } @@ -474,13 +427,9 @@ public class WALPrettyPrinter { } /** - * Pass one or more log file names and formatting options and it will dump out - * a text version of the contents on stdout. - * - * @param args - * Command line arguments - * @throws IOException - * Thrown upon file system errors etc. + * Pass one or more log file names and formatting options and it will dump out a text version of + * the contents on stdout. n * Command line arguments n * Thrown upon file system + * errors etc. */ public static void run(String[] args) throws IOException { // create options @@ -491,11 +440,9 @@ public class WALPrettyPrinter { options.addOption("t", "tables", true, "Table names (comma separated) to filter by; eg: test1,test2,test3 "); options.addOption("r", "region", true, - "Region to filter by. Pass encoded region name; e.g. '9192caead6a5a20acb4454ffbc79fa14'"); - options.addOption("s", "sequence", true, - "Sequence to filter by. Pass sequence number."); - options.addOption("k", "outputOnlyRowKey", false, - "Print only row keys"); + "Region to filter by. Pass encoded region name; e.g. '9192caead6a5a20acb4454ffbc79fa14'"); + options.addOption("s", "sequence", true, "Sequence to filter by. Pass sequence number."); + options.addOption("k", "outputOnlyRowKey", false, "Print only row keys"); options.addOption("w", "row", true, "Row to filter by. Pass row name."); options.addOption("f", "rowPrefix", true, "Row prefix to filter by."); options.addOption("g", "goto", true, "Position to seek to in the file"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java index 0a3123a8104..4f26506d43a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import java.io.IOException; import java.util.List; import java.util.OptionalLong; import java.util.concurrent.CompletableFuture; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.client.RegionInfo; @@ -44,12 +42,12 @@ public interface WALProvider { /** * Set up the provider to create wals. will only be called once per instance. - * @param factory factory that made us may not be null - * @param conf may not be null + * @param factory factory that made us may not be null + * @param conf may not be null * @param providerId differentiate between providers from one factory. may be null */ void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) - throws IOException; + throws IOException; /** * @param region the region which we want to get a WAL for it. Could be null. @@ -78,18 +76,17 @@ public interface WALProvider { interface WriterBase extends Closeable { long getLength(); + /** - * NOTE: We add this method for {@link WALFileLengthProvider} used for replication, - * considering the case if we use {@link AsyncFSWAL},we write to 3 DNs concurrently, - * according to the visibility guarantee of HDFS, the data will be available immediately - * when arriving at DN since all the DNs will be considered as the last one in pipeline. - * This means replication may read uncommitted data and replicate it to the remote cluster - * and cause data inconsistency. - * The method {@link WriterBase#getLength} may return length which just in hdfs client - * buffer and not successfully synced to HDFS, so we use this method to return the length - * successfully synced to HDFS and replication thread could only read writing WAL file - * limited by this length. - * see also HBASE-14004 and this document for more details: + * NOTE: We add this method for {@link WALFileLengthProvider} used for replication, considering + * the case if we use {@link AsyncFSWAL},we write to 3 DNs concurrently, according to the + * visibility guarantee of HDFS, the data will be available immediately when arriving at DN + * since all the DNs will be considered as the last one in pipeline. This means replication may + * read uncommitted data and replicate it to the remote cluster and cause data inconsistency. + * The method {@link WriterBase#getLength} may return length which just in hdfs client buffer + * and not successfully synced to HDFS, so we use this method to return the length successfully + * synced to HDFS and replication thread could only read writing WAL file limited by this + * length. see also HBASE-14004 and this document for more details: * https://docs.google.com/document/d/11AyWtGhItQs6vsLRIx32PwTxmBY3libXwGXI25obVEY/edit# * @return byteSize successfully synced to underlying filesystem. */ @@ -131,6 +128,6 @@ public interface WALProvider { default WALFileLengthProvider getWALFileLengthProvider() { return path -> getWALs().stream().map(w -> w.getLogFileSizeIfBeingWritten(path)) - .filter(o -> o.isPresent()).findAny().orElse(OptionalLong.empty()); + .filter(o -> o.isPresent()).findAny().orElse(OptionalLong.empty()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java index e5a6d70f241..218777a55f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java @@ -107,7 +107,7 @@ public final class WALSplitUtil { * that couldn't be successfully parsed to corruptDir (.corrupt) for later investigation */ static void archive(final Path wal, final boolean corrupt, final Path oldWALDir, - final FileSystem walFS, final Configuration conf) throws IOException { + final FileSystem walFS, final Configuration conf) throws IOException { Path dir; Path target; if (corrupt) { @@ -131,8 +131,8 @@ public final class WALSplitUtil { } /** - * Move WAL. Used to move processed WALs to archive or bad WALs to corrupt WAL dir. - * WAL may have already been moved; makes allowance. + * Move WAL. Used to move processed WALs to archive or bad WALs to corrupt WAL dir. WAL may have + * already been moved; makes allowance. */ public static void moveWAL(FileSystem fs, Path p, Path targetDir) throws IOException { if (fs.exists(p)) { @@ -148,19 +148,19 @@ public final class WALSplitUtil { * Path to a file under RECOVERED_EDITS_DIR directory of the region found in logEntry * named for the sequenceid in the passed logEntry: e.g. * /hbase/some_table/2323432434/recovered.edits/2332. This method also ensures existence of - * RECOVERED_EDITS_DIR under the region creating it if necessary. - * And also set storage policy for RECOVERED_EDITS_DIR if WAL_STORAGE_POLICY is configured. - * @param tableName the table name - * @param encodedRegionName the encoded region name - * @param seqId the sequence id which used to generate file name + * RECOVERED_EDITS_DIR under the region creating it if necessary. And also set storage policy for + * RECOVERED_EDITS_DIR if WAL_STORAGE_POLICY is configured. + * @param tableName the table name + * @param encodedRegionName the encoded region name + * @param seqId the sequence id which used to generate file name * @param fileNameBeingSplit the file being split currently. Used to generate tmp file name. - * @param tmpDirName of the directory used to sideline old recovered edits file - * @param conf configuration + * @param tmpDirName of the directory used to sideline old recovered edits file + * @param conf configuration * @return Path to file into which to dump split log edits. */ @SuppressWarnings("deprecation") static Path getRegionSplitEditsPath(TableName tableName, byte[] encodedRegionName, long seqId, - String fileNameBeingSplit, String tmpDirName, Configuration conf) throws IOException { + String fileNameBeingSplit, String tmpDirName, Configuration conf) throws IOException { FileSystem walFS = CommonFSUtils.getWALFileSystem(conf); Path tableDir = CommonFSUtils.getWALTableDir(conf, tableName); String encodedRegionNameStr = Bytes.toString(encodedRegionName); @@ -174,8 +174,7 @@ public final class WALSplitUtil { } tmp = new Path(tmp, HConstants.RECOVERED_EDITS_DIR + "_" + encodedRegionNameStr); LOG.warn("Found existing old file: {}. It could be some " - + "leftover of an old installation. It should be a folder instead. " - + "So moving it to {}", + + "leftover of an old installation. It should be a folder instead. " + "So moving it to {}", dir, tmp); if (!walFS.rename(dir, tmp)) { LOG.warn("Failed to sideline old file {}", dir); @@ -226,12 +225,12 @@ public final class WALSplitUtil { /** * Check whether there is recovered.edits in the region dir - * @param conf conf + * @param conf conf * @param regionInfo the region to check * @return true if recovered.edits exist in the region dir */ public static boolean hasRecoveredEdits(final Configuration conf, final RegionInfo regionInfo) - throws IOException { + throws IOException { // No recovered.edits for non default replica regions if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { return false; @@ -287,12 +286,12 @@ public final class WALSplitUtil { /** * Returns sorted set of edit files made by splitter, excluding files with '.temp' suffix. - * @param walFS WAL FileSystem used to retrieving split edits files. + * @param walFS WAL FileSystem used to retrieving split edits files. * @param regionDir WAL region dir to look for recovered edits files under. * @return Files in passed regionDir as a sorted set. */ public static NavigableSet getSplitEditFilesSorted(final FileSystem walFS, - final Path regionDir) throws IOException { + final Path regionDir) throws IOException { NavigableSet filesSorted = new TreeSet<>(); Path editsdir = getRegionDirRecoveredEditsDir(regionDir); if (!walFS.exists(editsdir)) { @@ -332,14 +331,14 @@ public final class WALSplitUtil { /** * Move aside a bad edits file. - * @param fs the file system used to rename bad edits file. + * @param fs the file system used to rename bad edits file. * @param edits Edits file to move aside. * @return The name of the moved aside file. */ public static Path moveAsideBadEditsFile(final FileSystem fs, final Path edits) - throws IOException { + throws IOException { Path moveAsideName = - new Path(edits.getParent(), edits.getName() + "." + EnvironmentEdgeManager.currentTime()); + new Path(edits.getParent(), edits.getName() + "." + EnvironmentEdgeManager.currentTime()); if (!fs.rename(edits, moveAsideName)) { LOG.warn("Rename failed from {} to {}", edits, moveAsideName); } @@ -351,11 +350,11 @@ public final class WALSplitUtil { */ public static boolean isSequenceIdFile(final Path file) { return file.getName().endsWith(SEQUENCE_ID_FILE_SUFFIX) - || file.getName().endsWith(OLD_SEQUENCE_ID_FILE_SUFFIX); + || file.getName().endsWith(OLD_SEQUENCE_ID_FILE_SUFFIX); } private static FileStatus[] getSequenceIdFiles(FileSystem walFS, Path regionDir) - throws IOException { + throws IOException { // TODO: Why are we using a method in here as part of our normal region open where // there is no splitting involved? Fix. St.Ack 01/20/2017. Path editsDir = getRegionDirRecoveredEditsDir(regionDir); @@ -373,7 +372,7 @@ public final class WALSplitUtil { String fileName = file.getPath().getName(); try { maxSeqId = Math.max(maxSeqId, Long - .parseLong(fileName.substring(0, fileName.length() - SEQUENCE_ID_FILE_SUFFIX_LENGTH))); + .parseLong(fileName.substring(0, fileName.length() - SEQUENCE_ID_FILE_SUFFIX_LENGTH))); } catch (NumberFormatException ex) { LOG.warn("Invalid SeqId File Name={}", fileName); } @@ -392,16 +391,16 @@ public final class WALSplitUtil { * Create a file with name as region's max sequence id */ public static void writeRegionSequenceIdFile(FileSystem walFS, Path regionDir, long newMaxSeqId) - throws IOException { + throws IOException { FileStatus[] files = getSequenceIdFiles(walFS, regionDir); long maxSeqId = getMaxSequenceId(files); if (maxSeqId > newMaxSeqId) { throw new IOException("The new max sequence id " + newMaxSeqId - + " is less than the old max sequence id " + maxSeqId); + + " is less than the old max sequence id " + maxSeqId); } // write a new seqId file Path newSeqIdFile = - new Path(getRegionDirRecoveredEditsDir(regionDir), newMaxSeqId + SEQUENCE_ID_FILE_SUFFIX); + new Path(getRegionDirRecoveredEditsDir(regionDir), newMaxSeqId + SEQUENCE_ID_FILE_SUFFIX); if (newMaxSeqId != maxSeqId) { try { if (!walFS.createNewFile(newSeqIdFile) && !walFS.exists(newSeqIdFile)) { @@ -424,7 +423,7 @@ public final class WALSplitUtil { /** A struct used by getMutationsFromWALEntry */ public static class MutationReplay implements Comparable { public MutationReplay(ClientProtos.MutationProto.MutationType type, Mutation mutation, - long nonceGroup, long nonce) { + long nonceGroup, long nonce) { this.type = type; this.mutation = mutation; if (this.mutation.getDurability() != Durability.SKIP_WAL) { @@ -436,9 +435,12 @@ public final class WALSplitUtil { } private final ClientProtos.MutationProto.MutationType type; - @SuppressWarnings("checkstyle:VisibilityModifier") public final Mutation mutation; - @SuppressWarnings("checkstyle:VisibilityModifier") public final long nonceGroup; - @SuppressWarnings("checkstyle:VisibilityModifier") public final long nonce; + @SuppressWarnings("checkstyle:VisibilityModifier") + public final Mutation mutation; + @SuppressWarnings("checkstyle:VisibilityModifier") + public final long nonceGroup; + @SuppressWarnings("checkstyle:VisibilityModifier") + public final long nonce; @Override public int compareTo(final MutationReplay d) { @@ -468,19 +470,19 @@ public final class WALSplitUtil { * This function is used to construct mutations from a WALEntry. It also reconstructs WALKey & * WALEdit from the passed in WALEntry * @param logEntry pair of WALKey and WALEdit instance stores WALKey and WALEdit instances - * extracted from the passed in WALEntry. + * extracted from the passed in WALEntry. * @return list of Pair<MutationType, Mutation> to be replayed */ public static List getMutationsFromWALEntry(AdminProtos.WALEntry entry, - CellScanner cells, Pair logEntry, Durability durability) throws IOException { + CellScanner cells, Pair logEntry, Durability durability) throws IOException { if (entry == null) { // return an empty array return Collections.emptyList(); } - long replaySeqId = - (entry.getKey().hasOrigSequenceNumber()) ? entry.getKey().getOrigSequenceNumber() - : entry.getKey().getLogSequenceNumber(); + long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) + ? entry.getKey().getOrigSequenceNumber() + : entry.getKey().getLogSequenceNumber(); int count = entry.getAssociatedCellCount(); List mutations = new ArrayList<>(); Cell previousCell = null; @@ -502,20 +504,20 @@ public final class WALSplitUtil { } boolean isNewRowOrType = - previousCell == null || previousCell.getTypeByte() != cell.getTypeByte() - || !CellUtil.matchingRows(previousCell, cell); + previousCell == null || previousCell.getTypeByte() != cell.getTypeByte() + || !CellUtil.matchingRows(previousCell, cell); if (isNewRowOrType) { // Create new mutation if (CellUtil.isDelete(cell)) { m = new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); // Deletes don't have nonces. mutations.add(new MutationReplay(ClientProtos.MutationProto.MutationType.DELETE, m, - HConstants.NO_NONCE, HConstants.NO_NONCE)); + HConstants.NO_NONCE, HConstants.NO_NONCE)); } else { m = new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); // Puts might come from increment or append, thus we need nonces. long nonceGroup = - entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE; + entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE; long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE; mutations.add( new MutationReplay(ClientProtos.MutationProto.MutationType.PUT, m, nonceGroup, nonce)); @@ -533,15 +535,15 @@ public final class WALSplitUtil { // reconstruct WALKey if (logEntry != null) { org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey walKeyProto = - entry.getKey(); + entry.getKey(); List clusterIds = new ArrayList<>(walKeyProto.getClusterIdsCount()); for (HBaseProtos.UUID uuid : entry.getKey().getClusterIdsList()) { clusterIds.add(new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits())); } key = new WALKeyImpl(walKeyProto.getEncodedRegionName().toByteArray(), - TableName.valueOf(walKeyProto.getTableName().toByteArray()), replaySeqId, - walKeyProto.getWriteTime(), clusterIds, walKeyProto.getNonceGroup(), - walKeyProto.getNonce(), null); + TableName.valueOf(walKeyProto.getTableName().toByteArray()), replaySeqId, + walKeyProto.getWriteTime(), clusterIds, walKeyProto.getNonceGroup(), walKeyProto.getNonce(), + null); logEntry.setFirst(key); logEntry.setSecond(val); } @@ -553,15 +555,15 @@ public final class WALSplitUtil { * Return path to recovered.hfiles directory of the region's column family: e.g. * /hbase/some_table/2323432434/cf/recovered.hfiles/. This method also ensures existence of * recovered.hfiles directory under the region's column family, creating it if necessary. - * @param rootFS the root file system - * @param conf configuration - * @param tableName the table name + * @param rootFS the root file system + * @param conf configuration + * @param tableName the table name * @param encodedRegionName the encoded region name - * @param familyName the column family name + * @param familyName the column family name * @return Path to recovered.hfiles directory of the region's column family. */ static Path tryCreateRecoveredHFilesDir(FileSystem rootFS, Configuration conf, - TableName tableName, String encodedRegionName, String familyName) throws IOException { + TableName tableName, String encodedRegionName, String familyName) throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); Path regionDir = FSUtils.getRegionDirFromTableDir(CommonFSUtils.getTableDir(rootDir, tableName), encodedRegionName); @@ -582,8 +584,8 @@ public final class WALSplitUtil { return new Path(new Path(regionDir, familyName), HConstants.RECOVERED_HFILES_DIR); } - public static FileStatus[] getRecoveredHFiles(final FileSystem rootFS, - final Path regionDir, String familyName) throws IOException { + public static FileStatus[] getRecoveredHFiles(final FileSystem rootFS, final Path regionDir, + String familyName) throws IOException { Path dir = getRecoveredHFilesDir(regionDir, familyName); return CommonFSUtils.listStatus(rootFS, dir); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 603a26e4768..f1bc9c8dcf7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -63,10 +63,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.Reg import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId; /** - * Split RegionServer WAL files. Splits the WAL into new files, - * one per region, to be picked up on Region reopen. Deletes the split WAL when finished. - * Create an instance and call {@link #splitWAL(FileStatus, CancelableProgressable)} per file or - * use static helper methods. + * Split RegionServer WAL files. Splits the WAL into new files, one per region, to be picked up on + * Region reopen. Deletes the split WAL when finished. Create an instance and call + * {@link #splitWAL(FileStatus, CancelableProgressable)} per file or use static helper methods. */ @InterfaceAudience.Private public class WALSplitter { @@ -92,8 +91,8 @@ public class WALSplitter { private EntryBuffers entryBuffers; /** - * Coordinator for split log. Used by the zk-based log splitter. - * Not used by the procedure v2-based log splitter. + * Coordinator for split log. Used by the zk-based log splitter. Not used by the procedure + * v2-based log splitter. */ private SplitLogWorkerCoordination splitLogWorkerCoordination; @@ -120,10 +119,10 @@ public class WALSplitter { public static final boolean DEFAULT_WAL_SPLIT_TO_HFILE = false; /** - * True if we are to run with bounded amount of writers rather than let the count blossom. - * Default is 'false'. Does not apply if you have set 'hbase.wal.split.to.hfile' as that - * is always bounded. Only applies when you are doing recovery to 'recovered.edits' - * files (the old default). Bounded writing tends to have higher throughput. + * True if we are to run with bounded amount of writers rather than let the count blossom. Default + * is 'false'. Does not apply if you have set 'hbase.wal.split.to.hfile' as that is always + * bounded. Only applies when you are doing recovery to 'recovered.edits' files (the old default). + * Bounded writing tends to have higher throughput. */ public final static String SPLIT_WRITER_CREATION_BOUNDED = "hbase.split.writer.creation.bounded"; @@ -137,14 +136,14 @@ public class WALSplitter { private final boolean hfile; private final boolean skipErrors; - WALSplitter(final WALFactory factory, Configuration conf, Path walRootDir, - FileSystem walFS, Path rootDir, FileSystem rootFS) { + WALSplitter(final WALFactory factory, Configuration conf, Path walRootDir, FileSystem walFS, + Path rootDir, FileSystem rootFS) { this(factory, conf, walRootDir, walFS, rootDir, rootFS, null, null, null); } - WALSplitter(final WALFactory factory, Configuration conf, Path walRootDir, - FileSystem walFS, Path rootDir, FileSystem rootFS, LastSequenceId idChecker, - SplitLogWorkerCoordination splitLogWorkerCoordination, RegionServerServices rsServices) { + WALSplitter(final WALFactory factory, Configuration conf, Path walRootDir, FileSystem walFS, + Path rootDir, FileSystem rootFS, LastSequenceId idChecker, + SplitLogWorkerCoordination splitLogWorkerCoordination, RegionServerServices rsServices) { this.conf = HBaseConfiguration.create(conf); String codecClassName = conf.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName()); @@ -184,16 +183,14 @@ public class WALSplitter { } /** - * Splits a WAL file. - * Used by old {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker} and tests. - * Not used by new procedure-based WAL splitter. - * + * Splits a WAL file. Used by old {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker} and + * tests. Not used by new procedure-based WAL splitter. * @return false if it is interrupted by the progress-able. */ public static boolean splitLogFile(Path walDir, FileStatus logfile, FileSystem walFS, - Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, - SplitLogWorkerCoordination splitLogWorkerCoordination, WALFactory factory, - RegionServerServices rsServices) throws IOException { + Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, + SplitLogWorkerCoordination splitLogWorkerCoordination, WALFactory factory, + RegionServerServices rsServices) throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem rootFS = rootDir.getFileSystem(conf); WALSplitter splitter = new WALSplitter(factory, conf, walDir, walFS, rootDir, rootFS, idChecker, @@ -205,14 +202,13 @@ public class WALSplitter { } /** - * Split a folder of WAL files. Delete the directory when done. - * Used by tools and unit tests. It should be package private. - * It is public only because TestWALObserver is in a different package, + * Split a folder of WAL files. Delete the directory when done. Used by tools and unit tests. It + * should be package private. It is public only because TestWALObserver is in a different package, * which uses this method to do log splitting. * @return List of output files created by the split. */ public static List split(Path walRootDir, Path walsDir, Path archiveDir, FileSystem walFS, - Configuration conf, final WALFactory factory) throws IOException { + Configuration conf, final WALFactory factory) throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem rootFS = rootDir.getFileSystem(conf); WALSplitter splitter = new WALSplitter(factory, conf, walRootDir, walFS, rootDir, rootFS); @@ -220,11 +216,11 @@ public class WALSplitter { SplitLogManager.getFileList(conf, Collections.singletonList(walsDir), null); List splits = new ArrayList<>(); if (!wals.isEmpty()) { - for (FileStatus wal: wals) { + for (FileStatus wal : wals) { SplitWALResult splitWALResult = splitter.splitWAL(wal, null); if (splitWALResult.isFinished()) { WALSplitUtil.archive(wal.getPath(), splitWALResult.isCorrupt(), archiveDir, walFS, conf); - //splitter.outputSink.splits is mark as final, do not need null check + // splitter.outputSink.splits is mark as final, do not need null check splits.addAll(splitter.outputSink.splits); } } @@ -236,9 +232,9 @@ public class WALSplitter { } /** - * Data structure returned as result by #splitWAL(FileStatus, CancelableProgressable). - * Test {@link #isFinished()} to see if we are done with the WAL and {@link #isCorrupt()} for if - * the WAL is corrupt. + * Data structure returned as result by #splitWAL(FileStatus, CancelableProgressable). Test + * {@link #isFinished()} to see if we are done with the WAL and {@link #isCorrupt()} for if the + * WAL is corrupt. */ static final class SplitWALResult { private final boolean finished; @@ -265,16 +261,16 @@ public class WALSplitter { PipelineController controller = new PipelineController(); if (this.hfile) { this.entryBuffers = new BoundedEntryBuffers(controller, this.bufferSize); - this.outputSink = new BoundedRecoveredHFilesOutputSink(this, controller, - this.entryBuffers, this.numWriterThreads); + this.outputSink = new BoundedRecoveredHFilesOutputSink(this, controller, this.entryBuffers, + this.numWriterThreads); } else if (this.splitWriterCreationBounded) { this.entryBuffers = new BoundedEntryBuffers(controller, this.bufferSize); - this.outputSink = new BoundedRecoveredEditsOutputSink(this, controller, - this.entryBuffers, this.numWriterThreads); + this.outputSink = new BoundedRecoveredEditsOutputSink(this, controller, this.entryBuffers, + this.numWriterThreads); } else { this.entryBuffers = new EntryBuffers(controller, this.bufferSize); - this.outputSink = new RecoveredEditsOutputSink(this, controller, - this.entryBuffers, this.numWriterThreads); + this.outputSink = + new RecoveredEditsOutputSink(this, controller, this.entryBuffers, this.numWriterThreads); } } @@ -327,8 +323,9 @@ public class WALSplitter { String encodedRegionNameAsStr = Bytes.toString(region); Long lastFlushedSequenceId = lastFlushedSequenceIds.get(encodedRegionNameAsStr); if (lastFlushedSequenceId == null) { - if (!(isRegionDirPresentUnderRoot(entry.getKey().getTableName(), - encodedRegionNameAsStr))) { + if ( + !(isRegionDirPresentUnderRoot(entry.getKey().getTableName(), encodedRegionNameAsStr)) + ) { // The region directory itself is not present in the FS. This indicates that // the region/table is already removed. We can just skip all the edits for this // region. Setting lastFlushedSequenceId as Long.MAX_VALUE so that all edits @@ -342,13 +339,13 @@ public class WALSplitter { Map maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (StoreSequenceId storeSeqId : ids.getStoreSequenceIdList()) { maxSeqIdInStores.put(storeSeqId.getFamilyName().toByteArray(), - storeSeqId.getSequenceId()); + storeSeqId.getSequenceId()); } regionMaxSeqIdInStores.put(encodedRegionNameAsStr, maxSeqIdInStores); lastFlushedSequenceId = ids.getLastFlushedSequenceId(); if (LOG.isDebugEnabled()) { LOG.debug("Last flushed sequenceid for " + encodedRegionNameAsStr + ": " - + TextFormat.shortDebugString(ids)); + + TextFormat.shortDebugString(ids)); } } if (lastFlushedSequenceId == null) { @@ -370,11 +367,12 @@ public class WALSplitter { entryBuffers.appendEntry(entry); int moreWritersFromLastCheck = this.getNumOpenWriters() - numOpenedFilesLastCheck; // If sufficient edits have passed, check if we should report progress. - if (editsCount % interval == 0 - || moreWritersFromLastCheck > numOpenedFilesBeforeReporting) { + if ( + editsCount % interval == 0 || moreWritersFromLastCheck > numOpenedFilesBeforeReporting + ) { numOpenedFilesLastCheck = this.getNumOpenWriters(); String countsStr = (editsCount - (editsSkipped + outputSink.getTotalSkippedEdits())) - + " edits, skipped " + editsSkipped + " edits."; + + " edits, skipped " + editsSkipped + " edits."; status.setStatus("Split " + countsStr); if (cancel != null && !cancel.progress()) { cancelled = true; @@ -419,10 +417,10 @@ public class WALSplitter { } finally { long processCost = EnvironmentEdgeManager.currentTime() - startTS; // See if length got updated post lease recovery - String msg = "Processed " + editsCount + " edits across " + - outputSink.getNumberOfRecoveredRegions() + " Regions in " + processCost + - " ms; skipped=" + editsSkipped + "; WAL=" + wal + ", size=" + lengthStr + - ", length=" + length + ", corrupted=" + corrupt + ", cancelled=" + cancelled; + String msg = "Processed " + editsCount + " edits across " + + outputSink.getNumberOfRecoveredRegions() + " Regions in " + processCost + + " ms; skipped=" + editsSkipped + "; WAL=" + wal + ", size=" + lengthStr + ", length=" + + length + ", corrupted=" + corrupt + ", cancelled=" + cancelled; LOG.info(msg); status.markComplete(msg); if (LOG.isDebugEnabled()) { @@ -441,8 +439,8 @@ public class WALSplitter { * Create a new {@link Reader} for reading logs to split. * @return Returns null if file has length zero or file can't be found. */ - protected Reader getReader(FileStatus walStatus, boolean skipErrors, CancelableProgressable cancel) - throws IOException, CorruptedLogFileException { + protected Reader getReader(FileStatus walStatus, boolean skipErrors, + CancelableProgressable cancel) throws IOException, CorruptedLogFileException { Path path = walStatus.getPath(); long length = walStatus.getLen(); Reader in; @@ -479,14 +477,14 @@ public class WALSplitter { if (!skipErrors || e instanceof InterruptedIOException) { throw e; // Don't mark the file corrupted if interrupted, or not skipErrors } - throw new CorruptedLogFileException("skipErrors=true; could not open " + path + - ", skipping", e); + throw new CorruptedLogFileException("skipErrors=true; could not open " + path + ", skipping", + e); } return in; } private Entry getNextLogLine(Reader in, Path path, boolean skipErrors) - throws CorruptedLogFileException, IOException { + throws CorruptedLogFileException, IOException { try { return in.next(); } catch (EOFException eof) { @@ -496,8 +494,10 @@ public class WALSplitter { } catch (IOException e) { // If the IOE resulted from bad file format, // then this problem is idempotent and retrying won't help - if (e.getCause() != null && (e.getCause() instanceof ParseException - || e.getCause() instanceof org.apache.hadoop.fs.ChecksumException)) { + if ( + e.getCause() != null && (e.getCause() instanceof ParseException + || e.getCause() instanceof org.apache.hadoop.fs.ChecksumException) + ) { LOG.warn("Parse exception from {}; continuing", path, e); return null; } @@ -578,9 +578,8 @@ public class WALSplitter { /** * CorruptedLogFileException with cause - * * @param message the message for this exception - * @param cause the cause for this exception + * @param cause the cause for this exception */ CorruptedLogFileException(String message, Throwable cause) { super(message, cause); diff --git a/hbase-server/src/main/python/hbase/merge_conf.py b/hbase-server/src/main/python/hbase/merge_conf.py index 764d98af288..ff6dfacf5bf 100644 --- a/hbase-server/src/main/python/hbase/merge_conf.py +++ b/hbase-server/src/main/python/hbase/merge_conf.py @@ -150,4 +150,3 @@ class MergeConfTool: if __name__ == '__main__': MergeConfTool().main() - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestBase.java index d84dd59a757..24f46346417 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,13 +70,13 @@ public abstract class AcidGuaranteesTestBase { public void setUp() throws Exception { MemoryCompactionPolicy policy = getMemoryCompactionPolicy(); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setValue(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, policy.name()); + .setValue(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, policy.name()); if (policy == MemoryCompactionPolicy.EAGER) { builder.setValue(MemStoreLAB.USEMSLAB_KEY, "false"); builder.setValue(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, "0.9"); } Stream.of(FAMILIES).map(ColumnFamilyDescriptorBuilder::of) - .forEachOrdered(builder::setColumnFamily); + .forEachOrdered(builder::setColumnFamily); UTIL.getAdmin().createTable(builder.build()); tool.setConf(UTIL.getConfiguration()); } @@ -87,12 +87,12 @@ public abstract class AcidGuaranteesTestBase { } private void runTestAtomicity(long millisToRun, int numWriters, int numGetters, int numScanners, - int numUniqueRows) throws Exception { + int numUniqueRows) throws Exception { runTestAtomicity(millisToRun, numWriters, numGetters, numScanners, numUniqueRows, false); } private void runTestAtomicity(long millisToRun, int numWriters, int numGetters, int numScanners, - int numUniqueRows, boolean useMob) throws Exception { + int numUniqueRows, boolean useMob) throws Exception { List args = Lists.newArrayList("-millis", String.valueOf(millisToRun), "-numWriters", String.valueOf(numWriters), "-numGetters", String.valueOf(numGetters), "-numScanners", String.valueOf(numScanners), "-numUniqueRows", String.valueOf(numUniqueRows), "-crazyFlush"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java index ae940dc24a1..1839b43d5db 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,11 +47,12 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; /** @@ -89,7 +90,7 @@ public class AcidGuaranteesTestTool extends AbstractHBaseTool { long keepAliveTime = 60; BlockingQueue workQueue = new LinkedBlockingQueue( - maxThreads * HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS); + maxThreads * HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS); ThreadPoolExecutor tpe = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, @@ -145,7 +146,7 @@ public class AcidGuaranteesTestTool extends AbstractHBaseTool { AtomicLong numWritten = new AtomicLong(); public AtomicityWriter(TestContext ctx, byte[][] targetRows, byte[][] targetFamilies, - ExecutorService pool) throws IOException { + ExecutorService pool) throws IOException { super(ctx); this.targetRows = targetRows; this.targetFamilies = targetFamilies; @@ -191,7 +192,7 @@ public class AcidGuaranteesTestTool extends AbstractHBaseTool { AtomicLong numRead = new AtomicLong(); public AtomicGetReader(TestContext ctx, byte[] targetRow, byte[][] targetFamilies, - ExecutorService pool) throws IOException { + ExecutorService pool) throws IOException { super(ctx); this.targetRow = targetRow; this.targetFamilies = targetFamilies; @@ -260,7 +261,7 @@ public class AcidGuaranteesTestTool extends AbstractHBaseTool { AtomicLong numRowsScanned = new AtomicLong(); public AtomicScanReader(TestContext ctx, byte[][] targetFamilies, ExecutorService pool) - throws IOException { + throws IOException { super(ctx); this.targetFamilies = targetFamilies; connection = ConnectionFactory.createConnection(ctx.getConf(), pool); @@ -321,13 +322,13 @@ public class AcidGuaranteesTestTool extends AbstractHBaseTool { if (!admin.tableExists(TABLE_NAME)) { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TABLE_NAME); Stream.of(FAMILIES).map(ColumnFamilyDescriptorBuilder::of) - .forEachOrdered(builder::setColumnFamily); + .forEachOrdered(builder::setColumnFamily); admin.createTable(builder.build()); } ColumnFamilyDescriptor cfd = admin.getDescriptor(TABLE_NAME).getColumnFamilies()[0]; if (cfd.isMobEnabled() != useMob) { admin.modifyColumnFamily(TABLE_NAME, ColumnFamilyDescriptorBuilder.newBuilder(cfd) - .setMobEnabled(useMob).setMobThreshold(4).build()); + .setMobEnabled(useMob).setMobThreshold(4).build()); } } @@ -372,7 +373,7 @@ public class AcidGuaranteesTestTool extends AbstractHBaseTool { List getters = Lists.newArrayList(); for (int i = 0; i < numGetters; i++) { AtomicGetReader getter = - new AtomicGetReader(ctx, rows[i % numUniqueRows], FAMILIES, sharedPool); + new AtomicGetReader(ctx, rows[i % numUniqueRows], FAMILIES, sharedPool); getters.add(getter); ctx.addThread(getter); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ClearUserNamespacesAndTablesRule.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ClearUserNamespacesAndTablesRule.java index b0ea6f4879f..87adccf2e3d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ClearUserNamespacesAndTablesRule.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ClearUserNamespacesAndTablesRule.java @@ -33,18 +33,19 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * A {@link TestRule} that clears all user namespaces and tables - * {@link ExternalResource#before() before} the test executes. Can be used in either the - * {@link Rule} or {@link ClassRule} positions. Lazily realizes the provided - * {@link AsyncConnection} so as to avoid initialization races with other {@link Rule Rules}. - * Does not {@link AsyncConnection#close() close()} provided connection instance when - * finished. + * A {@link TestRule} that clears all user namespaces and tables {@link ExternalResource#before() + * before} the test executes. Can be used in either the {@link Rule} or {@link ClassRule} positions. + * Lazily realizes the provided {@link AsyncConnection} so as to avoid initialization races with + * other {@link Rule Rules}. Does not {@link AsyncConnection#close() close()} provided + * connection instance when finished. *

      * Use in combination with {@link MiniClusterRule} and {@link ConnectionRule}, for example: * - *
      {@code
      + * 
      + * {
      + *   @code
        *   public class TestMyClass {
      - *     @ClassRule
      + *     @ClassRule
        *     public static final MiniClusterRule miniClusterRule = MiniClusterRule.newBuilder().build();
        *
        *     private final ConnectionRule connectionRule =
      @@ -52,12 +53,12 @@ import org.slf4j.LoggerFactory;
        *     private final ClearUserNamespacesAndTablesRule clearUserNamespacesAndTablesRule =
        *       new ClearUserNamespacesAndTablesRule(connectionRule::getConnection);
        *
      - *     @Rule
      - *     public TestRule rule = RuleChain
      - *       .outerRule(connectionRule)
      - *       .around(clearUserNamespacesAndTablesRule);
      + *     @Rule
      + *     public TestRule rule =
      + *       RuleChain.outerRule(connectionRule).around(clearUserNamespacesAndTablesRule);
        *   }
      - * }
      + * } + *
      */ public class ClearUserNamespacesAndTablesRule extends ExternalResource { private static final Logger logger = @@ -83,18 +84,14 @@ public class ClearUserNamespacesAndTablesRule extends ExternalResource { } private CompletableFuture deleteUserTables() { - return listTableNames() - .thenApply(tableNames -> tableNames.stream() - .map(tableName -> disableIfEnabled(tableName).thenCompose(_void -> deleteTable(tableName))) - .toArray(CompletableFuture[]::new)) - .thenCompose(CompletableFuture::allOf); + return listTableNames().thenApply(tableNames -> tableNames.stream() + .map(tableName -> disableIfEnabled(tableName).thenCompose(_void -> deleteTable(tableName))) + .toArray(CompletableFuture[]::new)).thenCompose(CompletableFuture::allOf); } private CompletableFuture> listTableNames() { - return CompletableFuture - .runAsync(() -> logger.trace("listing tables")) - .thenCompose(_void -> admin.listTableNames(false)) - .thenApply(tableNames -> { + return CompletableFuture.runAsync(() -> logger.trace("listing tables")) + .thenCompose(_void -> admin.listTableNames(false)).thenApply(tableNames -> { if (logger.isTraceEnabled()) { final StringJoiner joiner = new StringJoiner(", ", "[", "]"); tableNames.stream().map(TableName::getNameAsString).forEach(joiner::add); @@ -105,63 +102,51 @@ public class ClearUserNamespacesAndTablesRule extends ExternalResource { } private CompletableFuture isTableEnabled(final TableName tableName) { - return admin.isTableEnabled(tableName) - .thenApply(isEnabled -> { - logger.trace("table {} is enabled.", tableName); - return isEnabled; - }); + return admin.isTableEnabled(tableName).thenApply(isEnabled -> { + logger.trace("table {} is enabled.", tableName); + return isEnabled; + }); } private CompletableFuture disableIfEnabled(final TableName tableName) { - return isTableEnabled(tableName) - .thenCompose(isEnabled -> isEnabled - ? disableTable(tableName) - : CompletableFuture.completedFuture(null)); + return isTableEnabled(tableName).thenCompose( + isEnabled -> isEnabled ? disableTable(tableName) : CompletableFuture.completedFuture(null)); } private CompletableFuture disableTable(final TableName tableName) { - return CompletableFuture - .runAsync(() -> logger.trace("disabling enabled table {}", tableName)) + return CompletableFuture.runAsync(() -> logger.trace("disabling enabled table {}", tableName)) .thenCompose(_void -> admin.disableTable(tableName)); } private CompletableFuture deleteTable(final TableName tableName) { - return CompletableFuture - .runAsync(() -> logger.trace("deleting disabled table {}", tableName)) + return CompletableFuture.runAsync(() -> logger.trace("deleting disabled table {}", tableName)) .thenCompose(_void -> admin.deleteTable(tableName)); } private CompletableFuture> listUserNamespaces() { - return CompletableFuture - .runAsync(() -> logger.trace("listing namespaces")) - .thenCompose(_void -> admin.listNamespaceDescriptors()) - .thenApply(namespaceDescriptors -> { + return CompletableFuture.runAsync(() -> logger.trace("listing namespaces")) + .thenCompose(_void -> admin.listNamespaceDescriptors()).thenApply(namespaceDescriptors -> { final StringJoiner joiner = new StringJoiner(", ", "[", "]"); - final List names = namespaceDescriptors.stream() - .map(NamespaceDescriptor::getName) - .peek(joiner::add) - .collect(Collectors.toList()); + final List names = namespaceDescriptors.stream().map(NamespaceDescriptor::getName) + .peek(joiner::add).collect(Collectors.toList()); logger.trace("found existing namespaces {}", joiner); return names; }) .thenApply(namespaces -> namespaces.stream() - .filter(namespace -> !Objects.equals( - namespace, NamespaceDescriptor.SYSTEM_NAMESPACE.getName())) - .filter(namespace -> !Objects.equals( - namespace, NamespaceDescriptor.DEFAULT_NAMESPACE.getName())) + .filter( + namespace -> !Objects.equals(namespace, NamespaceDescriptor.SYSTEM_NAMESPACE.getName())) + .filter( + namespace -> !Objects.equals(namespace, NamespaceDescriptor.DEFAULT_NAMESPACE.getName())) .collect(Collectors.toList())); } private CompletableFuture deleteNamespace(final String namespace) { - return CompletableFuture - .runAsync(() -> logger.trace("deleting namespace {}", namespace)) + return CompletableFuture.runAsync(() -> logger.trace("deleting namespace {}", namespace)) .thenCompose(_void -> admin.deleteNamespace(namespace)); } private CompletableFuture deleteUserNamespaces() { - return listUserNamespaces() - .thenCompose(namespaces -> CompletableFuture.allOf(namespaces.stream() - .map(this::deleteNamespace) - .toArray(CompletableFuture[]::new))); + return listUserNamespaces().thenCompose(namespaces -> CompletableFuture + .allOf(namespaces.stream().map(this::deleteNamespace).toArray(CompletableFuture[]::new))); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ConnectionRule.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ConnectionRule.java index b029cf97e78..9945ad7d68d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ConnectionRule.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ConnectionRule.java @@ -32,18 +32,20 @@ import org.junit.rules.ExternalResource; *

      * Use in combination with {@link MiniClusterRule}, for example: * - *
      {@code
      + * 
      + * {
      + *   @code
        *   public class TestMyClass {
        *     private static final MiniClusterRule miniClusterRule = MiniClusterRule.newBuilder().build();
        *     private static final ConnectionRule connectionRule =
        *       ConnectionRule.createAsyncConnectionRule(miniClusterRule::createConnection);
        *
      - *     @ClassRule
      - *     public static final TestRule rule = RuleChain
      - *       .outerRule(miniClusterRule)
      - *       .around(connectionRule);
      + *     @ClassRule
      + *     public static final TestRule rule =
      + *       RuleChain.outerRule(miniClusterRule).around(connectionRule);
        *   }
      - * }
      + * } + *
      */ public final class ConnectionRule extends ExternalResource { @@ -53,29 +55,22 @@ public final class ConnectionRule extends ExternalResource { private Connection connection; private AsyncConnection asyncConnection; - public static ConnectionRule createConnectionRule( - final Supplier connectionSupplier - ) { + public static ConnectionRule createConnectionRule(final Supplier connectionSupplier) { return new ConnectionRule(connectionSupplier, null); } public static ConnectionRule createAsyncConnectionRule( - final Supplier> asyncConnectionSupplier - ) { + final Supplier> asyncConnectionSupplier) { return new ConnectionRule(null, asyncConnectionSupplier); } - public static ConnectionRule createConnectionRule( - final Supplier connectionSupplier, - final Supplier> asyncConnectionSupplier - ) { + public static ConnectionRule createConnectionRule(final Supplier connectionSupplier, + final Supplier> asyncConnectionSupplier) { return new ConnectionRule(connectionSupplier, asyncConnectionSupplier); } - private ConnectionRule( - final Supplier connectionSupplier, - final Supplier> asyncConnectionSupplier - ) { + private ConnectionRule(final Supplier connectionSupplier, + final Supplier> asyncConnectionSupplier) { this.connectionSupplier = connectionSupplier; this.asyncConnectionSupplier = asyncConnectionSupplier; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/GenericTestUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/GenericTestUtils.java index 2fc5dd7ad39..da67037a6a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/GenericTestUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/GenericTestUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,12 +72,12 @@ public abstract class GenericTestUtils { } /** - * List all of the files in 'dir' that match the regex 'pattern'. - * Then check that this list is identical to 'expectedMatches'. + * List all of the files in 'dir' that match the regex 'pattern'. Then check that this list is + * identical to 'expectedMatches'. * @throws IOException if the dir is inaccessible */ - public static void assertGlobEquals(File dir, String pattern, - String ... expectedMatches) throws IOException { + public static void assertGlobEquals(File dir, String pattern, String... expectedMatches) + throws IOException { Set found = Sets.newTreeSet(); for (File f : FileUtil.listFiles(dir)) { @@ -85,17 +85,13 @@ public abstract class GenericTestUtils { found.add(f.getName()); } } - Set expectedSet = Sets.newTreeSet( - Arrays.asList(expectedMatches)); + Set expectedSet = Sets.newTreeSet(Arrays.asList(expectedMatches)); Assert.assertEquals("Bad files matching " + pattern + " in " + dir, - Joiner.on(",").join(expectedSet), - Joiner.on(",").join(found)); + Joiner.on(",").join(expectedSet), Joiner.on(",").join(found)); } - public static void waitFor(Supplier check, - int checkEveryMillis, int waitForMillis) - throws TimeoutException, InterruptedException - { + public static void waitFor(Supplier check, int checkEveryMillis, int waitForMillis) + throws TimeoutException, InterruptedException { long st = Time.now(); do { boolean result = check.get(); @@ -106,14 +102,13 @@ public abstract class GenericTestUtils { Thread.sleep(checkEveryMillis); } while (Time.now() - st < waitForMillis); - throw new TimeoutException("Timed out waiting for condition. " + - "Thread diagnostics:\n" + - TimedOutTestsListener.buildThreadDiagnosticString()); + throw new TimeoutException("Timed out waiting for condition. " + "Thread diagnostics:\n" + + TimedOutTestsListener.buildThreadDiagnosticString()); } /** - * Mockito answer helper that triggers one latch as soon as the - * method is called, then waits on another before continuing. + * Mockito answer helper that triggers one latch as soon as the method is called, then waits on + * another before continuing. */ public static class DelayAnswer implements Answer { private final Logger LOG; @@ -141,8 +136,7 @@ public abstract class GenericTestUtils { } /** - * Tell the method to proceed. - * This should only be called after waitForCall() + * Tell the method to proceed. This should only be called after waitForCall() */ public void proceed() { waitLatch.countDown(); @@ -178,24 +172,24 @@ public abstract class GenericTestUtils { } /** - * After calling proceed(), this will wait until the call has - * completed and a result has been returned to the caller. + * After calling proceed(), this will wait until the call has completed and a result has been + * returned to the caller. */ public void waitForResult() throws InterruptedException { resultLatch.await(); } /** - * After the call has gone through, return any exception that - * was thrown, or null if no exception was thrown. + * After the call has gone through, return any exception that was thrown, or null if no + * exception was thrown. */ public Throwable getThrown() { return thrown; } /** - * After the call has gone through, return the call's return value, - * or null in case it was void or an exception was thrown. + * After the call has gone through, return the call's return value, or null in case it was void + * or an exception was thrown. */ public Object getReturnValue() { return returnValue; @@ -211,12 +205,9 @@ public abstract class GenericTestUtils { } /** - * An Answer implementation that simply forwards all calls through - * to a delegate. - * - * This is useful as the default Answer for a mock object, to create - * something like a spy on an RPC proxy. For example: - * + * An Answer implementation that simply forwards all calls through to a delegate. This is useful + * as the default Answer for a mock object, to create something like a spy on an RPC proxy. For + * example: * NamenodeProtocol origNNProxy = secondary.getNameNode(); * NamenodeProtocol spyNNProxy = Mockito.mock(NameNodeProtocol.class, * new DelegateAnswer(origNNProxy); @@ -241,11 +232,9 @@ public abstract class GenericTestUtils { public Object answer(InvocationOnMock invocation) throws Throwable { try { if (log != null) { - log.info("Call to " + invocation + " on " + delegate, - new Exception("TRACE")); + log.info("Call to " + invocation + " on " + delegate, new Exception("TRACE")); } - return invocation.getMethod().invoke( - delegate, invocation.getArguments()); + return invocation.getMethod().invoke(delegate, invocation.getArguments()); } catch (InvocationTargetException ite) { throw ite.getCause(); } @@ -253,10 +242,9 @@ public abstract class GenericTestUtils { } /** - * An Answer implementation which sleeps for a random number of milliseconds - * between 0 and a configurable value before delegating to the real - * implementation of the method. This can be useful for drawing out race - * conditions. + * An Answer implementation which sleeps for a random number of milliseconds between 0 and a + * configurable value before delegating to the real implementation of the method. This can be + * useful for drawing out race conditions. */ public static class SleepAnswer implements Answer { private final int maxSleepTime; @@ -285,24 +273,22 @@ public abstract class GenericTestUtils { } public static void assertMatches(String output, String pattern) { - Assert.assertTrue("Expected output to match /" + pattern + "/" + - " but got:\n" + output, - Pattern.compile(pattern).matcher(output).find()); + Assert.assertTrue("Expected output to match /" + pattern + "/" + " but got:\n" + output, + Pattern.compile(pattern).matcher(output).find()); } public static void assertValueNear(long expected, long actual, long allowedError) { assertValueWithinRange(expected - allowedError, expected + allowedError, actual); } - public static void assertValueWithinRange(long expectedMin, long expectedMax, - long actual) { - Assert.assertTrue("Expected " + actual + " to be in range (" + expectedMin + "," - + expectedMax + ")", expectedMin <= actual && actual <= expectedMax); + public static void assertValueWithinRange(long expectedMin, long expectedMax, long actual) { + Assert.assertTrue( + "Expected " + actual + " to be in range (" + expectedMin + "," + expectedMax + ")", + expectedMin <= actual && actual <= expectedMax); } /** - * Assert that there are no threads running whose name matches the - * given regular expression. + * Assert that there are no threads running whose name matches the given regular expression. * @param regex the regex to match against */ public static void assertNoThreadsMatching(String regex) { @@ -313,8 +299,7 @@ public abstract class GenericTestUtils { for (ThreadInfo info : infos) { if (info == null) continue; if (pattern.matcher(info.getThreadName()).matches()) { - Assert.fail("Leaked thread: " + info + "\n" + - Joiner.on("\n").join(info.getStackTrace())); + Assert.fail("Leaked thread: " + info + "\n" + Joiner.on("\n").join(info.getStackTrace())); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java index 9f80a6215ff..3cc529a511c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase; import java.io.Closeable; import java.io.IOException; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.RegionInfoBuilder; @@ -34,28 +33,27 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientServ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; /** - * This class defines methods that can help with managing HBase clusters - * from unit tests and system tests. There are 3 types of cluster deployments: + * This class defines methods that can help with managing HBase clusters from unit tests and system + * tests. There are 3 types of cluster deployments: *
        - *
      • MiniHBaseCluster: each server is run in the same JVM in separate threads, - * used by unit tests
      • + *
      • MiniHBaseCluster: each server is run in the same JVM in separate threads, used by unit + * tests
      • *
      • DistributedHBaseCluster: the cluster is pre-deployed, system and integration tests can - * interact with the cluster.
      • - *
      • ProcessBasedLocalHBaseCluster: each server is deployed locally but in separate - * JVMs.
      • + * interact with the cluster. + *
      • ProcessBasedLocalHBaseCluster: each server is deployed locally but in separate JVMs. + *
      • *
      *

      - * HBaseCluster unifies the way tests interact with the cluster, so that the same test can - * be run against a mini-cluster during unit test execution, or a distributed cluster having - * tens/hundreds of nodes during execution of integration tests. - * + * HBaseCluster unifies the way tests interact with the cluster, so that the same test can be run + * against a mini-cluster during unit test execution, or a distributed cluster having tens/hundreds + * of nodes during execution of integration tests. *

      * HBaseCluster exposes client-side public interfaces to tests, so that tests does not assume - * running in a particular mode. Not all the tests are suitable to be run on an actual cluster, - * and some tests will still need to mock stuff and introspect internal state. For those use - * cases from unit tests, or if more control is needed, you can use the subclasses directly. - * In that sense, this class does not abstract away every interface that - * MiniHBaseCluster or DistributedHBaseCluster provide. + * running in a particular mode. Not all the tests are suitable to be run on an actual cluster, and + * some tests will still need to mock stuff and introspect internal state. For those use cases from + * unit tests, or if more control is needed, you can use the subclasses directly. In that sense, + * this class does not abstract away every interface that MiniHBaseCluster or + * DistributedHBaseCluster provide. */ @InterfaceAudience.Public public abstract class HBaseCluster implements Closeable, Configurable { @@ -91,8 +89,7 @@ public abstract class HBaseCluster implements Closeable, Configurable { public abstract ClusterMetrics getClusterMetrics() throws IOException; /** - * Returns a ClusterStatus for this HBase cluster as observed at the - * starting of the HBaseCluster + * Returns a ClusterStatus for this HBase cluster as observed at the starting of the HBaseCluster */ public ClusterMetrics getInitialClusterMetrics() throws IOException { return initialClusterStatus; @@ -101,32 +98,31 @@ public abstract class HBaseCluster implements Closeable, Configurable { /** * Returns an {@link MasterService.BlockingInterface} to the active master */ - public abstract MasterService.BlockingInterface getMasterAdminService() - throws IOException; + public abstract MasterService.BlockingInterface getMasterAdminService() throws IOException; /** * Returns an AdminProtocol interface to the regionserver */ public abstract AdminService.BlockingInterface getAdminProtocol(ServerName serverName) - throws IOException; + throws IOException; /** * Returns a ClientProtocol interface to the regionserver */ public abstract ClientService.BlockingInterface getClientProtocol(ServerName serverName) - throws IOException; + throws IOException; /** - * Starts a new region server on the given hostname or if this is a mini/local cluster, - * starts a region server locally. + * Starts a new region server on the given hostname or if this is a mini/local cluster, starts a + * region server locally. * @param hostname the hostname to start the regionserver on * @throws IOException if something goes wrong */ public abstract void startRegionServer(String hostname, int port) throws IOException; /** - * Kills the region server process if this is a distributed cluster, otherwise - * this causes the region server to exit doing basic clean up only. + * Kills the region server process if this is a distributed cluster, otherwise this causes the + * region server to exit doing basic clean up only. * @throws IOException if something goes wrong */ public abstract void killRegionServer(ServerName serverName) throws IOException; @@ -134,9 +130,9 @@ public abstract class HBaseCluster implements Closeable, Configurable { /** * Keeping track of killed servers and being able to check if a particular server was killed makes * it possible to do fault tolerance testing for dead servers in a deterministic way. A concrete - * example of such case is - killing servers and waiting for all regions of a particular table - * to be assigned. We can check for server column in META table and that its value is not one - * of the killed servers. + * example of such case is - killing servers and waiting for all regions of a particular table to + * be assigned. We can check for server column in META table and that its value is not one of the + * killed servers. */ public abstract boolean isKilledRS(ServerName serverName); @@ -151,7 +147,7 @@ public abstract class HBaseCluster implements Closeable, Configurable { * @throws IOException if something goes wrong or timeout occurs */ public void waitForRegionServerToStart(String hostname, int port, long timeout) - throws IOException { + throws IOException { long start = EnvironmentEdgeManager.currentTime(); while ((EnvironmentEdgeManager.currentTime() - start) < timeout) { for (ServerName server : getClusterMetrics().getLiveServerMetrics().keySet()) { @@ -161,8 +157,8 @@ public abstract class HBaseCluster implements Closeable, Configurable { } Threads.sleep(100); } - throw new IOException("did timeout " + timeout + "ms waiting for region server to start: " - + hostname); + throw new IOException( + "did timeout " + timeout + "ms waiting for region server to start: " + hostname); } /** @@ -170,7 +166,7 @@ public abstract class HBaseCluster implements Closeable, Configurable { * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForRegionServerToStop(ServerName serverName, long timeout) - throws IOException; + throws IOException; /** * Suspend the region server @@ -187,23 +183,23 @@ public abstract class HBaseCluster implements Closeable, Configurable { public abstract void resumeRegionServer(ServerName serverName) throws IOException; /** - * Starts a new zookeeper node on the given hostname or if this is a mini/local cluster, - * silently logs warning message. + * Starts a new zookeeper node on the given hostname or if this is a mini/local cluster, silently + * logs warning message. * @param hostname the hostname to start the regionserver on * @throws IOException if something goes wrong */ public abstract void startZkNode(String hostname, int port) throws IOException; /** - * Kills the zookeeper node process if this is a distributed cluster, otherwise, - * this causes master to exit doing basic clean up only. + * Kills the zookeeper node process if this is a distributed cluster, otherwise, this causes + * master to exit doing basic clean up only. * @throws IOException if something goes wrong */ public abstract void killZkNode(ServerName serverName) throws IOException; /** - * Stops the region zookeeper if this is a distributed cluster, otherwise - * silently logs warning message. + * Stops the region zookeeper if this is a distributed cluster, otherwise silently logs warning + * message. * @throws IOException if something goes wrong */ public abstract void stopZkNode(ServerName serverName) throws IOException; @@ -212,33 +208,30 @@ public abstract class HBaseCluster implements Closeable, Configurable { * Wait for the specified zookeeper node to join the cluster * @throws IOException if something goes wrong or timeout occurs */ - public abstract void waitForZkNodeToStart(ServerName serverName, long timeout) - throws IOException; + public abstract void waitForZkNodeToStart(ServerName serverName, long timeout) throws IOException; /** * Wait for the specified zookeeper node to stop the thread / process. * @throws IOException if something goes wrong or timeout occurs */ - public abstract void waitForZkNodeToStop(ServerName serverName, long timeout) - throws IOException; + public abstract void waitForZkNodeToStop(ServerName serverName, long timeout) throws IOException; /** - * Starts a new datanode on the given hostname or if this is a mini/local cluster, - * silently logs warning message. + * Starts a new datanode on the given hostname or if this is a mini/local cluster, silently logs + * warning message. * @throws IOException if something goes wrong */ public abstract void startDataNode(ServerName serverName) throws IOException; /** - * Kills the datanode process if this is a distributed cluster, otherwise, - * this causes master to exit doing basic clean up only. + * Kills the datanode process if this is a distributed cluster, otherwise, this causes master to + * exit doing basic clean up only. * @throws IOException if something goes wrong */ public abstract void killDataNode(ServerName serverName) throws IOException; /** - * Stops the datanode if this is a distributed cluster, otherwise - * silently logs warning message. + * Stops the datanode if this is a distributed cluster, otherwise silently logs warning message. * @throws IOException if something goes wrong */ public abstract void stopDataNode(ServerName serverName) throws IOException; @@ -282,26 +275,26 @@ public abstract class HBaseCluster implements Closeable, Configurable { * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForNameNodeToStart(ServerName serverName, long timeout) - throws IOException; + throws IOException; /** * Wait for the specified namenode to stop * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForNameNodeToStop(ServerName serverName, long timeout) - throws IOException; + throws IOException; /** - * Starts a new master on the given hostname or if this is a mini/local cluster, - * starts a master locally. + * Starts a new master on the given hostname or if this is a mini/local cluster, starts a master + * locally. * @param hostname the hostname to start the master on * @throws IOException if something goes wrong */ public abstract void startMaster(String hostname, int port) throws IOException; /** - * Kills the master process if this is a distributed cluster, otherwise, - * this causes master to exit doing basic clean up only. + * Kills the master process if this is a distributed cluster, otherwise, this causes master to + * exit doing basic clean up only. * @throws IOException if something goes wrong */ public abstract void killMaster(ServerName serverName) throws IOException; @@ -316,31 +309,23 @@ public abstract class HBaseCluster implements Closeable, Configurable { * Wait for the specified master to stop the thread / process. * @throws IOException if something goes wrong or timeout occurs */ - public abstract void waitForMasterToStop(ServerName serverName, long timeout) - throws IOException; + public abstract void waitForMasterToStop(ServerName serverName, long timeout) throws IOException; /** - * Blocks until there is an active master and that master has completed - * initialization. - * - * @return true if an active master becomes available. false if there are no - * masters left. + * Blocks until there is an active master and that master has completed initialization. + * @return true if an active master becomes available. false if there are no masters left. * @throws IOException if something goes wrong or timeout occurs */ - public boolean waitForActiveAndReadyMaster() - throws IOException { + public boolean waitForActiveAndReadyMaster() throws IOException { return waitForActiveAndReadyMaster(Long.MAX_VALUE); } /** - * Blocks until there is an active master and that master has completed - * initialization. + * Blocks until there is an active master and that master has completed initialization. * @param timeout the timeout limit in ms - * @return true if an active master becomes available. false if there are no - * masters left. + * @return true if an active master becomes available. false if there are no masters left. */ - public abstract boolean waitForActiveAndReadyMaster(long timeout) - throws IOException; + public abstract boolean waitForActiveAndReadyMaster(long timeout) throws IOException; /** * Wait for HBase Cluster to shut down. @@ -353,10 +338,9 @@ public abstract class HBaseCluster implements Closeable, Configurable { public abstract void shutdown() throws IOException; /** - * Restores the cluster to it's initial state if this is a real cluster, - * otherwise does nothing. - * This is a best effort restore. If the servers are not reachable, or insufficient - * permissions, etc. restoration might be partial. + * Restores the cluster to it's initial state if this is a real cluster, otherwise does nothing. + * This is a best effort restore. If the servers are not reachable, or insufficient permissions, + * etc. restoration might be partial. * @return whether restoration is complete */ public boolean restoreInitialStatus() throws IOException { @@ -364,10 +348,9 @@ public abstract class HBaseCluster implements Closeable, Configurable { } /** - * Restores the cluster to given state if this is a real cluster, - * otherwise does nothing. - * This is a best effort restore. If the servers are not reachable, or insufficient - * permissions, etc. restoration might be partial. + * Restores the cluster to given state if this is a real cluster, otherwise does nothing. This is + * a best effort restore. If the servers are not reachable, or insufficient permissions, etc. + * restoration might be partial. * @return whether restoration is complete */ public boolean restoreClusterMetrics(ClusterMetrics desiredStatus) throws IOException { @@ -385,32 +368,30 @@ public abstract class HBaseCluster implements Closeable, Configurable { /** * Get the ServerName of region server serving the specified region * @param regionName Name of the region in bytes - * @param tn Table name that has the region. + * @param tn Table name that has the region. * @return ServerName that hosts the region or null */ public abstract ServerName getServerHoldingRegion(final TableName tn, byte[] regionName) - throws IOException; + throws IOException; /** - * @return whether we are interacting with a distributed cluster as opposed to an - * in-process mini/local cluster. + * @return whether we are interacting with a distributed cluster as opposed to an in-process + * mini/local cluster. */ public boolean isDistributedCluster() { return false; } /** - * Closes all the resources held open for this cluster. Note that this call does not shutdown - * the cluster. + * Closes all the resources held open for this cluster. Note that this call does not shutdown the + * cluster. * @see #shutdown() */ @Override public abstract void close() throws IOException; /** - * Wait for the namenode. - * - * @throws InterruptedException + * Wait for the namenode. n */ public void waitForNamenodeAvailable() throws InterruptedException { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index 5687a04c7f8..6bcf35a9609 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,10 +40,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Abstract HBase test class. Initializes a few things that can come in handly - * like an HBaseConfiguration and filesystem. + * Abstract HBase test class. Initializes a few things that can come in handly like an + * HBaseConfiguration and filesystem. * @deprecated since 2.0.0 and will be removed in 3.0.0. Write junit4 unit tests using - * {@link HBaseTestingUtility}. + * {@link HBaseTestingUtility}. * @see HBaseTestingUtility * @see HBASE-11912 */ @@ -52,11 +51,11 @@ import org.slf4j.LoggerFactory; public abstract class HBaseTestCase extends junit.framework.TestCase { private static final Logger LOG = LoggerFactory.getLogger(HBaseTestCase.class); - protected final static byte [] fam1 = Bytes.toBytes("colfamily11"); - protected final static byte [] fam2 = Bytes.toBytes("colfamily21"); - protected final static byte [] fam3 = Bytes.toBytes("colfamily31"); + protected final static byte[] fam1 = Bytes.toBytes("colfamily11"); + protected final static byte[] fam2 = Bytes.toBytes("colfamily21"); + protected final static byte[] fam3 = Bytes.toBytes("colfamily31"); - protected static final byte [][] COLUMNS = {fam1, fam2, fam3}; + protected static final byte[][] COLUMNS = { fam1, fam2, fam3 }; private boolean localfs = false; protected static Path testDir = null; @@ -65,7 +64,7 @@ public abstract class HBaseTestCase extends junit.framework.TestCase { protected static final char FIRST_CHAR = 'a'; protected static final char LAST_CHAR = 'z'; protected static final String PUNCTUATION = "~`@#$%^&*()-_+=:;',.<>/?[]{}|"; - protected static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR}; + protected static final byte[] START_KEY_BYTES = { FIRST_CHAR, FIRST_CHAR, FIRST_CHAR }; protected String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET); protected static final int MAXVERSIONS = 3; @@ -87,21 +86,20 @@ public abstract class HBaseTestCase extends junit.framework.TestCase { } /** - * @param name + * n */ public HBaseTestCase(String name) { super(name); } /** - * Note that this method must be called after the mini hdfs cluster has - * started or we end up with a local file system. + * Note that this method must be called after the mini hdfs cluster has started or we end up with + * a local file system. */ @Override protected void setUp() throws Exception { super.setUp(); - localfs = - (conf.get("fs.defaultFS", "file:///").compareTo("file:///") == 0); + localfs = (conf.get("fs.defaultFS", "file:///").compareTo("file:///") == 0); if (fs == null) { this.fs = FileSystem.get(conf); @@ -136,44 +134,34 @@ public abstract class HBaseTestCase extends junit.framework.TestCase { } /** - * @see HBaseTestingUtility#getBaseTestDir - * @param testName - * @return directory to use for this test + * @see HBaseTestingUtility#getBaseTestDir n * @return directory to use for this test */ - protected Path getUnitTestdir(String testName) { - return testUtil.getDataTestDir(testName); - } + protected Path getUnitTestdir(String testName) { + return testUtil.getDataTestDir(testName); + } /** * You must call close on the returned region and then close on the log file it created. Do * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to close both the region and the WAL. - * @param desc - * @param startKey - * @param endKey - * @return An {@link HRegion} - * @throws IOException + * nnn * @return An {@link HRegion} n */ - public HRegion createNewHRegion(HTableDescriptor desc, byte [] startKey, - byte [] endKey) - throws IOException { + public HRegion createNewHRegion(HTableDescriptor desc, byte[] startKey, byte[] endKey) + throws IOException { return createNewHRegion(desc, startKey, endKey, this.conf); } - public HRegion createNewHRegion(HTableDescriptor desc, byte [] startKey, - byte [] endKey, Configuration conf) - throws IOException { + public HRegion createNewHRegion(HTableDescriptor desc, byte[] startKey, byte[] endKey, + Configuration conf) throws IOException { HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey); return HBaseTestingUtility.createRegionAndWAL(hri, testDir, conf, desc); } - protected HRegion openClosedRegion(final HRegion closedRegion) - throws IOException { + protected HRegion openClosedRegion(final HRegion closedRegion) throws IOException { return HRegion.openHRegion(closedRegion, null); } /** - * Create a table of name {@code name} with {@link #COLUMNS} for - * families. + * Create a table of name {@code name} with {@link #COLUMNS} for families. * @param name Name to give table. * @return Column descriptor. */ @@ -182,55 +170,42 @@ public abstract class HBaseTestCase extends junit.framework.TestCase { } /** - * Create a table of name {@code name} with {@link #COLUMNS} for - * families. - * @param name Name to give table. + * Create a table of name {@code name} with {@link #COLUMNS} for families. + * @param name Name to give table. * @param versions How many versions to allow per column. * @return Column descriptor. */ - protected HTableDescriptor createTableDescriptor(final String name, - final int versions) { - return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS, - versions, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED); + protected HTableDescriptor createTableDescriptor(final String name, final int versions) { + return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS, versions, + HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED); } /** - * Create a table of name {@code name} with {@link #COLUMNS} for - * families. - * @param name Name to give table. + * Create a table of name {@code name} with {@link #COLUMNS} for families. + * @param name Name to give table. * @param versions How many versions to allow per column. * @return Column descriptor. */ - protected HTableDescriptor createTableDescriptor(final String name, - final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) { + protected HTableDescriptor createTableDescriptor(final String name, final int minVersions, + final int versions, final int ttl, KeepDeletedCells keepDeleted) { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); - for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) { - htd.addFamily(new HColumnDescriptor(cfName) - .setMinVersions(minVersions) - .setMaxVersions(versions) - .setKeepDeletedCells(keepDeleted) - .setBlockCacheEnabled(false) - .setTimeToLive(ttl) - ); + for (byte[] cfName : new byte[][] { fam1, fam2, fam3 }) { + htd.addFamily( + new HColumnDescriptor(cfName).setMinVersions(minVersions).setMaxVersions(versions) + .setKeepDeletedCells(keepDeleted).setBlockCacheEnabled(false).setTimeToLive(ttl)); } return htd; } /** - * Add content to region r on the passed column - * column. - * Adds data of the from 'aaa', 'aab', etc where key and value are the same. - * @param r - * @param columnFamily - * @param column - * @throws IOException - * @return count of what we added. + * Add content to region r on the passed column column. Adds data of the + * from 'aaa', 'aab', etc where key and value are the same. nnnn * @return count of what we added. */ - public static long addContent(final Region r, final byte [] columnFamily, final byte[] column) - throws IOException { - byte [] startKey = r.getRegionInfo().getStartKey(); - byte [] endKey = r.getRegionInfo().getEndKey(); - byte [] startKeyBytes = startKey; + public static long addContent(final Region r, final byte[] columnFamily, final byte[] column) + throws IOException { + byte[] startKey = r.getRegionInfo().getStartKey(); + byte[] endKey = r.getRegionInfo().getEndKey(); + byte[] startKeyBytes = startKey; if (startKeyBytes == null || startKeyBytes.length == 0) { startKeyBytes = START_KEY_BYTES; } @@ -238,75 +213,62 @@ public abstract class HBaseTestCase extends junit.framework.TestCase { startKeyBytes, endKey, -1); } - public static long addContent(final Region r, final byte [] columnFamily) throws IOException { + public static long addContent(final Region r, final byte[] columnFamily) throws IOException { return addContent(r, columnFamily, null); } /** - * Add content to region r on the passed column - * column. - * Adds data of the from 'aaa', 'aab', etc where key and value are the same. - * @throws IOException - * @return count of what we added. + * Add content to region r on the passed column column. Adds data of the + * from 'aaa', 'aab', etc where key and value are the same. n * @return count of what we added. */ - public static long addContent(final Table updater, - final String columnFamily) throws IOException { + public static long addContent(final Table updater, final String columnFamily) throws IOException { return addContent(updater, columnFamily, START_KEY_BYTES, null); } - public static long addContent(final Table updater, final String family, - final String column) throws IOException { + public static long addContent(final Table updater, final String family, final String column) + throws IOException { return addContent(updater, family, column, START_KEY_BYTES, null); } /** - * Add content to region r on the passed column - * column. - * Adds data of the from 'aaa', 'aab', etc where key and value are the same. - * @return count of what we added. - * @throws IOException + * Add content to region r on the passed column column. Adds data of the + * from 'aaa', 'aab', etc where key and value are the same. + * @return count of what we added. n */ public static long addContent(final Table updater, final String columnFamily, - final byte [] startKeyBytes, final byte [] endKey) - throws IOException { + final byte[] startKeyBytes, final byte[] endKey) throws IOException { return addContent(updater, columnFamily, null, startKeyBytes, endKey, -1); } public static long addContent(final Table updater, final String family, String column, - final byte [] startKeyBytes, final byte [] endKey) throws IOException { + final byte[] startKeyBytes, final byte[] endKey) throws IOException { return addContent(updater, family, column, startKeyBytes, endKey, -1); } /** - * Add content to region r on the passed column - * column. - * Adds data of the from 'aaa', 'aab', etc where key and value are the same. - * @return count of what we added. - * @throws IOException + * Add content to region r on the passed column column. Adds data of the + * from 'aaa', 'aab', etc where key and value are the same. + * @return count of what we added. n */ - public static long addContent(final Table updater, - final String columnFamily, - final String column, - final byte [] startKeyBytes, final byte [] endKey, final long ts) - throws IOException { + public static long addContent(final Table updater, final String columnFamily, final String column, + final byte[] startKeyBytes, final byte[] endKey, final long ts) throws IOException { long count = 0; - // Add rows of three characters. The first character starts with the - // 'a' character and runs up to 'z'. Per first character, we run the - // second character over same range. And same for the third so rows + // Add rows of three characters. The first character starts with the + // 'a' character and runs up to 'z'. Per first character, we run the + // second character over same range. And same for the third so rows // (and values) look like this: 'aaa', 'aab', 'aac', etc. - char secondCharStart = (char)startKeyBytes[1]; - char thirdCharStart = (char)startKeyBytes[2]; - EXIT: for (char c = (char)startKeyBytes[0]; c <= LAST_CHAR; c++) { + char secondCharStart = (char) startKeyBytes[1]; + char thirdCharStart = (char) startKeyBytes[2]; + EXIT: for (char c = (char) startKeyBytes[0]; c <= LAST_CHAR; c++) { for (char d = secondCharStart; d <= LAST_CHAR; d++) { for (char e = thirdCharStart; e <= LAST_CHAR; e++) { - byte [] t = new byte [] {(byte)c, (byte)d, (byte)e}; - if (endKey != null && endKey.length > 0 - && Bytes.compareTo(endKey, t) <= 0) { + byte[] t = new byte[] { (byte) c, (byte) d, (byte) e }; + if (endKey != null && endKey.length > 0 && Bytes.compareTo(endKey, t) <= 0) { break EXIT; } try { Put put; - if(ts != -1) { + if (ts != -1) { put = new Put(t, ts); } else { put = new Put(t); @@ -326,9 +288,8 @@ public abstract class HBaseTestCase extends junit.framework.TestCase { } } } - byte[][] split = - CellUtil.parseColumn(Bytes.toBytes(sb.toString())); - if(split.length == 1) { + byte[][] split = CellUtil.parseColumn(Bytes.toBytes(sb.toString())); + if (split.length == 1) { byte[] qualifier = new byte[0]; put.addColumn(split[0], qualifier, t); } else { @@ -360,37 +321,33 @@ public abstract class HBaseTestCase extends junit.framework.TestCase { return count; } - protected void assertResultEquals(final HRegion region, final byte [] row, - final byte [] family, final byte [] qualifier, final long timestamp, - final byte [] value) throws IOException { + protected void assertResultEquals(final HRegion region, final byte[] row, final byte[] family, + final byte[] qualifier, final long timestamp, final byte[] value) throws IOException { Get get = new Get(row); get.setTimestamp(timestamp); Result res = region.get(get); - NavigableMap>> map = - res.getMap(); - byte [] res_value = map.get(family).get(qualifier).get(timestamp); + NavigableMap>> map = res.getMap(); + byte[] res_value = map.get(family).get(qualifier).get(timestamp); if (value == null) { - assertEquals(Bytes.toString(family) + " " + Bytes.toString(qualifier) + - " at timestamp " + timestamp, null, res_value); + assertEquals( + Bytes.toString(family) + " " + Bytes.toString(qualifier) + " at timestamp " + timestamp, + null, res_value); } else { if (res_value == null) { - fail(Bytes.toString(family) + " " + Bytes.toString(qualifier) + - " at timestamp " + timestamp + "\" was expected to be \"" + - Bytes.toStringBinary(value) + " but was null"); + fail(Bytes.toString(family) + " " + Bytes.toString(qualifier) + " at timestamp " + timestamp + + "\" was expected to be \"" + Bytes.toStringBinary(value) + " but was null"); } if (res_value != null) { - assertEquals(Bytes.toString(family) + " " + Bytes.toString(qualifier) + - " at timestamp " + - timestamp, value, new String(res_value, StandardCharsets.UTF_8)); + assertEquals( + Bytes.toString(family) + " " + Bytes.toString(qualifier) + " at timestamp " + timestamp, + value, new String(res_value, StandardCharsets.UTF_8)); } } } /** - * Common method to close down a MiniDFSCluster and the associated file system - * - * @param cluster + * Common method to close down a MiniDFSCluster and the associated file system n */ public static void shutdownDfs(MiniDFSCluster cluster) { if (cluster != null) { @@ -416,14 +373,13 @@ public abstract class HBaseTestCase extends junit.framework.TestCase { } /** - * You must call {@link #closeRootAndMeta()} when done after calling this - * method. It does cleanup. - * @throws IOException + * You must call {@link #closeRootAndMeta()} when done after calling this method. It does cleanup. + * n */ protected void createMetaRegion() throws IOException { FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(conf); - meta = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, testDir, - conf, fsTableDescriptors.get(TableName.META_TABLE_NAME)); + meta = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, testDir, conf, + fsTableDescriptors.get(TableName.META_TABLE_NAME)); } protected void closeRootAndMeta() throws IOException { @@ -439,8 +395,8 @@ public abstract class HBaseTestCase extends junit.framework.TestCase { public static void assertEquals(byte[] expected, byte[] actual) { if (Bytes.compareTo(expected, actual) != 0) { - throw new junit.framework.AssertionFailedError("expected:<" + Bytes.toStringBinary(expected) + - "> but was:<" + Bytes.toStringBinary(actual) + ">"); + throw new junit.framework.AssertionFailedError("expected:<" + Bytes.toStringBinary(expected) + + "> but was:<" + Bytes.toStringBinary(actual) + ">"); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index c7c7d751586..e30a1cf89cc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import edu.umd.cs.findbugs.annotations.Nullable; import java.io.File; import java.io.IOException; @@ -150,24 +151,23 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.WatchedEvent; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.ZooKeeper.States; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** - * Facility for testing HBase. Replacement for - * old HBaseTestCase and HBaseClusterTestCase functionality. - * Create an instance and keep it around testing HBase. This class is - * meant to be your one-stop shop for anything you might need testing. Manages - * one cluster at a time only. Managed cluster can be an in-process - * {@link MiniHBaseCluster}, or a deployed cluster of type {@code DistributedHBaseCluster}. - * Not all methods work with the real cluster. - * Depends on log4j being on classpath and - * hbase-site.xml for logging and test-run configuration. It does not set - * logging levels. - * In the configuration properties, default values for master-info-port and - * region-server-port are overridden such that a random port will be assigned (thus - * avoiding port contention if another local HBase instance is already running). - *

      To preserve test data directories, pass the system property "hbase.testing.preserve.testdir" + * Facility for testing HBase. Replacement for old HBaseTestCase and HBaseClusterTestCase + * functionality. Create an instance and keep it around testing HBase. This class is meant to be + * your one-stop shop for anything you might need testing. Manages one cluster at a time only. + * Managed cluster can be an in-process {@link MiniHBaseCluster}, or a deployed cluster of type + * {@code DistributedHBaseCluster}. Not all methods work with the real cluster. Depends on log4j + * being on classpath and hbase-site.xml for logging and test-run configuration. It does not set + * logging levels. In the configuration properties, default values for master-info-port and + * region-server-port are overridden such that a random port will be assigned (thus avoiding port + * contention if another local HBase instance is already running). + *

      + * To preserve test data directories, pass the system property "hbase.testing.preserve.testdir" * setting it to true. */ @InterfaceAudience.Public @@ -186,12 +186,10 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server"; /** - * The default number of regions per regionserver when creating a pre-split - * table. + * The default number of regions per regionserver when creating a pre-split table. */ public static final int DEFAULT_REGIONS_PER_SERVER = 3; - public static final String PRESPLIT_TEST_TABLE_KEY = "hbase.test.pre-split-table"; public static final boolean PRESPLIT_TEST_TABLE = true; @@ -205,8 +203,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { private String hadoopLogDir; - /** Directory on test filesystem where we put the data for this instance of - * HBaseTestingUtility*/ + /** + * Directory on test filesystem where we put the data for this instance of HBaseTestingUtility + */ private Path dataTestDirOnTestFS = null; private final AtomicReference connection = new AtomicReference<>(); @@ -219,7 +218,6 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { /** * Checks to see if a specific port is available. - * * @param port the port number to check for availability * @return true if the port is available, or false if not */ @@ -252,13 +250,11 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Create all combinations of Bloom filters and compression algorithms for - * testing. + * Create all combinations of Bloom filters and compression algorithms for testing. */ private static List bloomAndCompressionCombinations() { List configurations = new ArrayList<>(); - for (Compression.Algorithm comprAlgo : - HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) { + for (Compression.Algorithm comprAlgo : HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) { for (BloomType bloomType : BloomType.values()) { configurations.add(new Object[] { comprAlgo, bloomType }); } @@ -292,37 +288,37 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } public static final Collection BLOOM_AND_COMPRESSION_COMBINATIONS = - bloomAndCompressionCombinations(); - + bloomAndCompressionCombinations(); /** - *

      Create an HBaseTestingUtility using a default configuration. - * - *

      Initially, all tmp files are written to a local test data directory. - * Once {@link #startMiniDFSCluster} is called, either directly or via - * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead. - * - *

      Previously, there was a distinction between the type of utility returned by + *

      + * Create an HBaseTestingUtility using a default configuration. + *

      + * Initially, all tmp files are written to a local test data directory. Once + * {@link #startMiniDFSCluster} is called, either directly or via {@link #startMiniCluster()}, tmp + * data will be written to the DFS directory instead. + *

      + * Previously, there was a distinction between the type of utility returned by * {@link #createLocalHTU()} and this constructor; this is no longer the case. All - * HBaseTestingUtility objects will behave as local until a DFS cluster is started, - * at which point they will switch to using mini DFS for storage. + * HBaseTestingUtility objects will behave as local until a DFS cluster is started, at which point + * they will switch to using mini DFS for storage. */ public HBaseTestingUtility() { this(HBaseConfiguration.create()); } /** - *

      Create an HBaseTestingUtility using a given configuration. - * - *

      Initially, all tmp files are written to a local test data directory. - * Once {@link #startMiniDFSCluster} is called, either directly or via - * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead. - * - *

      Previously, there was a distinction between the type of utility returned by + *

      + * Create an HBaseTestingUtility using a given configuration. + *

      + * Initially, all tmp files are written to a local test data directory. Once + * {@link #startMiniDFSCluster} is called, either directly or via {@link #startMiniCluster()}, tmp + * data will be written to the DFS directory instead. + *

      + * Previously, there was a distinction between the type of utility returned by * {@link #createLocalHTU()} and this constructor; this is no longer the case. All - * HBaseTestingUtility objects will behave as local until a DFS cluster is started, - * at which point they will switch to using mini DFS for storage. - * + * HBaseTestingUtility objects will behave as local until a DFS cluster is started, at which point + * they will switch to using mini DFS for storage. * @param conf The configuration to use for further operations */ public HBaseTestingUtility(@Nullable Configuration conf) { @@ -341,19 +337,19 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { // Every cluster is a local cluster until we start DFS // Note that conf could be null, but this.conf will not be String dataTestDir = getDataTestDir().toString(); - this.conf.set("fs.defaultFS","file:///"); + this.conf.set("fs.defaultFS", "file:///"); this.conf.set(HConstants.HBASE_DIR, "file://" + dataTestDir); LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir); - this.conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE,false); + this.conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, false); // If the value for random ports isn't set set it to true, thus making // tests opt-out for random port assignment this.conf.setBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS, - this.conf.getBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS, true)); + this.conf.getBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS, true)); } /** * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #HBaseTestingUtility()} - * instead. + * instead. * @return a normal HBaseTestingUtility * @see #HBaseTestingUtility() * @see HBASE-19841 @@ -365,7 +361,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { /** * @deprecated since 2.0.0 and will be removed in 3.0.0. Use - * {@link #HBaseTestingUtility(Configuration)} instead. + * {@link #HBaseTestingUtility(Configuration)} instead. * @return a normal HBaseTestingUtility * @see #HBaseTestingUtility(Configuration) * @see HBASE-19841 @@ -379,7 +375,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { * Close both the region {@code r} and it's underlying WAL. For use in tests. */ public static void closeRegionAndWAL(final Region r) throws IOException { - closeRegionAndWAL((HRegion)r); + closeRegionAndWAL((HRegion) r); } /** @@ -393,13 +389,11 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Returns this classes's instance of {@link Configuration}. Be careful how - * you use the returned Configuration since {@link Connection} instances - * can be shared. The Map of Connections is keyed by the Configuration. If - * say, a Connection was being used against a cluster that had been shutdown, - * see {@link #shutdownMiniCluster()}, then the Connection will no longer - * be wholesome. Rather than use the return direct, its usually best to - * make a copy and use that. Do + * Returns this classes's instance of {@link Configuration}. Be careful how you use the returned + * Configuration since {@link Connection} instances can be shared. The Map of Connections is keyed + * by the Configuration. If say, a Connection was being used against a cluster that had been + * shutdown, see {@link #shutdownMiniCluster()}, then the Connection will no longer be wholesome. + * Rather than use the return direct, its usually best to make a copy and use that. Do * Configuration c = new Configuration(INSTANCE.getConfiguration()); * @return Instance of Configuration. */ @@ -413,19 +407,14 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}. - * Give it a random name so can have many concurrent tests running if - * we need to. It needs to amend the {@link #TEST_DIRECTORY_KEY} - * System property, as it's what minidfscluster bases - * it data dir on. Moding a System property is not the way to do concurrent - * instances -- another instance could grab the temporary - * value unintentionally -- but not anything can do about it at moment; - * single instance only is how the minidfscluster works. - * - * We also create the underlying directory names for - * hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values - * in the conf, and as a system property for hadoop.tmp.dir (We do not create them!). - * + * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}. Give it a random name so can + * have many concurrent tests running if we need to. It needs to amend the + * {@link #TEST_DIRECTORY_KEY} System property, as it's what minidfscluster bases it data dir on. + * Moding a System property is not the way to do concurrent instances -- another instance could + * grab the temporary value unintentionally -- but not anything can do about it at moment; single + * instance only is how the minidfscluster works. We also create the underlying directory names + * for hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values in the + * conf, and as a system property for hadoop.tmp.dir (We do not create them!). * @return The calculated data test build directory, if newly-created. */ @Override @@ -435,40 +424,31 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { return null; } - createSubDirAndSystemProperty( - "hadoop.log.dir", - testPath, "hadoop-log-dir"); + createSubDirAndSystemProperty("hadoop.log.dir", testPath, "hadoop-log-dir"); // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but - // we want our own value to ensure uniqueness on the same machine - createSubDirAndSystemProperty( - "hadoop.tmp.dir", - testPath, "hadoop-tmp-dir"); + // we want our own value to ensure uniqueness on the same machine + createSubDirAndSystemProperty("hadoop.tmp.dir", testPath, "hadoop-tmp-dir"); // Read and modified in org.apache.hadoop.mapred.MiniMRCluster - createSubDir( - "mapreduce.cluster.local.dir", - testPath, "mapred-local-dir"); + createSubDir("mapreduce.cluster.local.dir", testPath, "mapred-local-dir"); return testPath; } - private void createSubDirAndSystemProperty( - String propertyName, Path parent, String subDirName){ + private void createSubDirAndSystemProperty(String propertyName, Path parent, String subDirName) { String sysValue = System.getProperty(propertyName); if (sysValue != null) { // There is already a value set. So we do nothing but hope - // that there will be no conflicts - LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+ - sysValue + " so I do NOT create it in " + parent); + // that there will be no conflicts + LOG.info("System.getProperty(\"" + propertyName + "\") already set to: " + sysValue + + " so I do NOT create it in " + parent); String confValue = conf.get(propertyName); - if (confValue != null && !confValue.endsWith(sysValue)){ - LOG.warn( - propertyName + " property value differs in configuration and system: "+ - "Configuration="+confValue+" while System="+sysValue+ - " Erasing configuration value by system value." - ); + if (confValue != null && !confValue.endsWith(sysValue)) { + LOG.warn(propertyName + " property value differs in configuration and system: " + + "Configuration=" + confValue + " while System=" + sysValue + + " Erasing configuration value by system value."); } conf.set(propertyName, sysValue); } else { @@ -479,8 +459,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * @return Where to write test data on the test filesystem; Returns working directory - * for the test filesystem by default + * @return Where to write test data on the test filesystem; Returns working directory for the test + * filesystem by default * @see #setupDataTestDirOnTestFS() * @see #getTestFileSystem() */ @@ -491,8 +471,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { /** * @return META table descriptor - * @deprecated since 2.0 version and will be removed in 3.0 version. Currently for test only. - * use {@link #getMetaTableDescriptorBuilder()} + * @deprecated since 2.0 version and will be removed in 3.0 version. Currently for test only. use + * {@link #getMetaTableDescriptorBuilder()} */ @Deprecated public HTableDescriptor getMetaTableDescriptor() { @@ -514,9 +494,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} - * to write temporary test data. Call this method after setting up the mini dfs cluster - * if the test relies on it. + * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} to write + * temporary test data. Call this method after setting up the mini dfs cluster if the test relies + * on it. * @return a unique path in the test filesystem */ public Path getDataTestDirOnTestFS() throws IOException { @@ -528,9 +508,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} - * to write temporary test data. Call this method after setting up the mini dfs cluster - * if the test relies on it. + * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} to write + * temporary test data. Call this method after setting up the mini dfs cluster if the test relies + * on it. * @return a unique path in the test filesystem * @param subdirName name of the subdir to create under the base test dir */ @@ -539,13 +519,12 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Sets up a path in test filesystem to be used by tests. - * Creates a new directory if not already setup. + * Sets up a path in test filesystem to be used by tests. Creates a new directory if not already + * setup. */ private void setupDataTestDirOnTestFS() throws IOException { if (dataTestDirOnTestFS != null) { - LOG.warn("Data test on test fs dir already setup in " - + dataTestDirOnTestFS.toString()); + LOG.warn("Data test on test fs dir already setup in " + dataTestDirOnTestFS.toString()); return; } dataTestDirOnTestFS = getNewDataTestDirOnTestFS(); @@ -555,10 +534,10 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { * Sets up a new path in test filesystem to be used by tests. */ private Path getNewDataTestDirOnTestFS() throws IOException { - //The file system can be either local, mini dfs, or if the configuration - //is supplied externally, it can be an external cluster FS. If it is a local - //file system, the tests should use getBaseTestDir, otherwise, we can use - //the working directory, and create a unique sub dir there + // The file system can be either local, mini dfs, or if the configuration + // is supplied externally, it can be an external cluster FS. If it is a local + // file system, the tests should use getBaseTestDir, otherwise, we can use + // the working directory, and create a unique sub dir there FileSystem fs = getTestFileSystem(); Path newDataTestDir; String randomStr = getRandomUUID().toString(); @@ -576,20 +555,17 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { /** * Cleans the test data directory on the test filesystem. - * @return True if we removed the test dirs - * @throws IOException + * @return True if we removed the test dirs n */ public boolean cleanupDataTestDirOnTestFS() throws IOException { boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true); - if (ret) - dataTestDirOnTestFS = null; + if (ret) dataTestDirOnTestFS = null; return ret; } /** * Cleans a subdirectory under the test data directory on the test filesystem. - * @return True if we removed child - * @throws IOException + * @return True if we removed child n */ public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException { Path cpath = getDataTestDirOnTestFS(subdirName); @@ -598,9 +574,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { /** * Start a minidfscluster. - * @param servers How many DNs to start. - * @throws Exception - * @see #shutdownMiniDFSCluster() + * @param servers How many DNs to start. n * @see #shutdownMiniDFSCluster() * @return The mini dfs cluster created. */ public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception { @@ -608,19 +582,14 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Start a minidfscluster. - * This is useful if you want to run datanode on distinct hosts for things - * like HDFS block location verification. - * If you start MiniDFSCluster without host names, all instances of the - * datanodes will have the same host name. - * @param hosts hostnames DNs to run on. - * @throws Exception - * @see #shutdownMiniDFSCluster() + * Start a minidfscluster. This is useful if you want to run datanode on distinct hosts for things + * like HDFS block location verification. If you start MiniDFSCluster without host names, all + * instances of the datanodes will have the same host name. + * @param hosts hostnames DNs to run on. n * @see #shutdownMiniDFSCluster() * @return The mini dfs cluster created. */ - public MiniDFSCluster startMiniDFSCluster(final String hosts[]) - throws Exception { - if ( hosts != null && hosts.length != 0) { + public MiniDFSCluster startMiniDFSCluster(final String hosts[]) throws Exception { + if (hosts != null && hosts.length != 0) { return startMiniDFSCluster(hosts.length, hosts); } else { return startMiniDFSCluster(1, null); @@ -628,21 +597,17 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Start a minidfscluster. - * Can only create one. + * Start a minidfscluster. Can only create one. * @param servers How many DNs to start. - * @param hosts hostnames DNs to run on. - * @throws Exception - * @see #shutdownMiniDFSCluster() + * @param hosts hostnames DNs to run on. n * @see #shutdownMiniDFSCluster() * @return The mini dfs cluster created. */ - public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[]) - throws Exception { + public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[]) throws Exception { return startMiniDFSCluster(servers, null, hosts); } private void setFs() throws IOException { - if(this.dfsCluster == null){ + if (this.dfsCluster == null) { LOG.info("Skipping setting fs because dfsCluster is null"); return; } @@ -653,8 +618,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { conf.unset(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE); } - public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[]) - throws Exception { + public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[]) + throws Exception { createDirsAndSetProperties(); EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); @@ -663,8 +628,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(), "ERROR"); - this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true, - true, null, racks, hosts, null); + this.dfsCluster = + new MiniDFSCluster(0, this.conf, servers, true, true, true, null, racks, hosts, null); // Set this just-started cluster as our filesystem. setFs(); @@ -672,7 +637,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { // Wait for the cluster to be totally up this.dfsCluster.waitClusterUp(); - //reset the test directory for test file system + // reset the test directory for test file system dataTestDirOnTestFS = null; String dataTestDir = getDataTestDir().toString(); conf.set(HConstants.HBASE_DIR, dataTestDir); @@ -687,8 +652,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.util.MBeans.class.getName(), "ERROR"); Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(), "ERROR"); - dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, - null, null, null); + dfsCluster = + new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null); return dfsCluster; } @@ -696,6 +661,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { * This is used before starting HDFS and map-reduce mini-clusters Run something like the below to * check for the likes of '/tmp' references -- i.e. references outside of the test data dir -- in * the conf. + * *

          * Configuration conf = TEST_UTIL.getConfiguration();
          * for (Iterator<Map.Entry<String, String>> i = conf.iterator(); i.hasNext();) {
      @@ -744,35 +710,35 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   *  Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating
      -   *  new column families. Default to false.
      +   * Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating new column families.
      +   * Default to false.
          */
      -  public boolean isNewVersionBehaviorEnabled(){
      +  public boolean isNewVersionBehaviorEnabled() {
           final String propName = "hbase.tests.new.version.behavior";
           String v = System.getProperty(propName);
      -    if (v != null){
      +    if (v != null) {
             return Boolean.parseBoolean(v);
           }
           return false;
         }
       
         /**
      -   *  Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.
      -   *  This allows to specify this parameter on the command line.
      -   *   If not set, default is true.
      +   * Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property. This
      +   * allows to specify this parameter on the command line. If not set, default is true.
          */
      -  public boolean isReadShortCircuitOn(){
      +  public boolean isReadShortCircuitOn() {
           final String propName = "hbase.tests.use.shortcircuit.reads";
           String readOnProp = System.getProperty(propName);
      -    if (readOnProp != null){
      -      return  Boolean.parseBoolean(readOnProp);
      +    if (readOnProp != null) {
      +      return Boolean.parseBoolean(readOnProp);
           } else {
             return conf.getBoolean(propName, false);
           }
         }
       
      -  /** Enable the short circuit read, unless configured differently.
      -   * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.
      +  /**
      +   * Enable the short circuit read, unless configured differently. Set both HBase and HDFS settings,
      +   * including skipping the hdfs checksum checks.
          */
         private void enableShortCircuit() {
           if (isReadShortCircuitOn()) {
      @@ -803,9 +769,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
      -   * or does nothing.
      -   * @throws IOException
      +   * Shuts down instance created by call to {@link #startMiniDFSCluster(int)} or does nothing. n
          */
         public void shutdownMiniDFSCluster() throws IOException {
           if (this.dfsCluster != null) {
      @@ -824,277 +788,264 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
          * @return The mini HBase cluster created.
          * @see #shutdownMiniCluster()
          * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
      -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
      +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
          * @see #startMiniCluster(StartMiniClusterOption)
          * @see HBASE-21071
          */
         @Deprecated
         public MiniHBaseCluster startMiniCluster(boolean createWALDir) throws Exception {
      -    StartMiniClusterOption option = StartMiniClusterOption.builder()
      -        .createWALDir(createWALDir).build();
      +    StartMiniClusterOption option =
      +      StartMiniClusterOption.builder().createWALDir(createWALDir).build();
           return startMiniCluster(option);
         }
       
         /**
      -   * Start up a minicluster of hbase, dfs, and zookeeper.
      -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
      -   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
      +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
      +   * defined in {@link StartMiniClusterOption.Builder}.
      +   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
          * @param createRootDir Whether to create a new root or data directory path.
          * @return The mini HBase cluster created.
          * @see #shutdownMiniCluster()
          * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
      -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
      +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
          * @see #startMiniCluster(StartMiniClusterOption)
          * @see HBASE-21071
          */
         @Deprecated
      -  public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir)
      -  throws Exception {
      -    StartMiniClusterOption option = StartMiniClusterOption.builder()
      -        .numRegionServers(numSlaves).numDataNodes(numSlaves).createRootDir(createRootDir).build();
      +  public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir) throws Exception {
      +    StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(numSlaves)
      +      .numDataNodes(numSlaves).createRootDir(createRootDir).build();
           return startMiniCluster(option);
         }
       
         /**
      -   * Start up a minicluster of hbase, dfs, and zookeeper.
      -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
      -   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
      +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
      +   * defined in {@link StartMiniClusterOption.Builder}.
      +   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
          * @param createRootDir Whether to create a new root or data directory path.
      -   * @param createWALDir Whether to create a new WAL directory.
      +   * @param createWALDir  Whether to create a new WAL directory.
          * @return The mini HBase cluster created.
          * @see #shutdownMiniCluster()
          * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
      -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
      +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
          * @see #startMiniCluster(StartMiniClusterOption)
          * @see HBASE-21071
          */
         @Deprecated
         public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir,
      -      boolean createWALDir) throws Exception {
      -    StartMiniClusterOption option = StartMiniClusterOption.builder()
      -        .numRegionServers(numSlaves).numDataNodes(numSlaves).createRootDir(createRootDir)
      -        .createWALDir(createWALDir).build();
      +    boolean createWALDir) throws Exception {
      +    StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(numSlaves)
      +      .numDataNodes(numSlaves).createRootDir(createRootDir).createWALDir(createWALDir).build();
           return startMiniCluster(option);
         }
       
         /**
      -   * Start up a minicluster of hbase, dfs, and zookeeper.
      -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
      -   * @param numMasters Master node number.
      -   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
      +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
      +   * defined in {@link StartMiniClusterOption.Builder}.
      +   * @param numMasters    Master node number.
      +   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
          * @param createRootDir Whether to create a new root or data directory path.
          * @return The mini HBase cluster created.
          * @see #shutdownMiniCluster()
          * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
      -   *  {@link #startMiniCluster(StartMiniClusterOption)} instead.
      +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
          * @see #startMiniCluster(StartMiniClusterOption)
          * @see HBASE-21071
          */
         @Deprecated
         public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, boolean createRootDir)
           throws Exception {
      -    StartMiniClusterOption option = StartMiniClusterOption.builder()
      -        .numMasters(numMasters).numRegionServers(numSlaves).createRootDir(createRootDir)
      -        .numDataNodes(numSlaves).build();
      +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
      +      .numRegionServers(numSlaves).createRootDir(createRootDir).numDataNodes(numSlaves).build();
           return startMiniCluster(option);
         }
       
         /**
      -   * Start up a minicluster of hbase, dfs, and zookeeper.
      -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
      +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
      +   * defined in {@link StartMiniClusterOption.Builder}.
          * @param numMasters Master node number.
      -   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
      +   * @param numSlaves  Slave node number, for both HBase region server and HDFS data node.
          * @return The mini HBase cluster created.
          * @see #shutdownMiniCluster()
          * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
      -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
      +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
          * @see #startMiniCluster(StartMiniClusterOption)
          * @see HBASE-21071
          */
         @Deprecated
         public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves) throws Exception {
      -    StartMiniClusterOption option = StartMiniClusterOption.builder()
      -        .numMasters(numMasters).numRegionServers(numSlaves).numDataNodes(numSlaves).build();
      +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
      +      .numRegionServers(numSlaves).numDataNodes(numSlaves).build();
           return startMiniCluster(option);
         }
       
         /**
      -   * Start up a minicluster of hbase, dfs, and zookeeper.
      -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
      -   * @param numMasters Master node number.
      -   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
      +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
      +   * defined in {@link StartMiniClusterOption.Builder}.
      +   * @param numMasters    Master node number.
      +   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
          * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
          *                      HDFS data node number.
          * @param createRootDir Whether to create a new root or data directory path.
          * @return The mini HBase cluster created.
          * @see #shutdownMiniCluster()
          * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
      -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
      +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
          * @see #startMiniCluster(StartMiniClusterOption)
          * @see HBASE-21071
          */
         @Deprecated
         public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
      -      boolean createRootDir) throws Exception {
      -    StartMiniClusterOption option = StartMiniClusterOption.builder()
      -        .numMasters(numMasters).numRegionServers(numSlaves).createRootDir(createRootDir)
      -        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
      +    boolean createRootDir) throws Exception {
      +    StartMiniClusterOption option =
      +      StartMiniClusterOption.builder().numMasters(numMasters).numRegionServers(numSlaves)
      +        .createRootDir(createRootDir).numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
           return startMiniCluster(option);
         }
       
         /**
      -   * Start up a minicluster of hbase, dfs, and zookeeper.
      -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
      -   * @param numMasters Master node number.
      -   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
      +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
      +   * defined in {@link StartMiniClusterOption.Builder}.
      +   * @param numMasters    Master node number.
      +   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
          * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
          *                      HDFS data node number.
          * @return The mini HBase cluster created.
          * @see #shutdownMiniCluster()
          * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
      -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
      +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
          * @see #startMiniCluster(StartMiniClusterOption)
          * @see HBASE-21071
          */
         @Deprecated
         public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts)
      -      throws Exception {
      -    StartMiniClusterOption option = StartMiniClusterOption.builder()
      -        .numMasters(numMasters).numRegionServers(numSlaves)
      -        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
      +    throws Exception {
      +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
      +      .numRegionServers(numSlaves).numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
           return startMiniCluster(option);
         }
       
         /**
      -   * Start up a minicluster of hbase, dfs, and zookeeper.
      -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
      -   * @param numMasters Master node number.
      +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
      +   * defined in {@link StartMiniClusterOption.Builder}.
      +   * @param numMasters       Master node number.
          * @param numRegionServers Number of region servers.
      -   * @param numDataNodes Number of datanodes.
      +   * @param numDataNodes     Number of datanodes.
          * @return The mini HBase cluster created.
          * @see #shutdownMiniCluster()
          * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
      -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
      +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
          * @see #startMiniCluster(StartMiniClusterOption)
          * @see HBASE-21071
          */
         @Deprecated
         public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes)
      -      throws Exception {
      -    StartMiniClusterOption option = StartMiniClusterOption.builder()
      -        .numMasters(numMasters).numRegionServers(numRegionServers).numDataNodes(numDataNodes)
      -        .build();
      +    throws Exception {
      +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
      +      .numRegionServers(numRegionServers).numDataNodes(numDataNodes).build();
           return startMiniCluster(option);
         }
       
         /**
      -   * Start up a minicluster of hbase, dfs, and zookeeper.
      -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
      -   * @param numMasters Master node number.
      -   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
      +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
      +   * defined in {@link StartMiniClusterOption.Builder}.
      +   * @param numMasters    Master node number.
      +   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
          * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
          *                      HDFS data node number.
      -   * @param masterClass The class to use as HMaster, or null for default.
      -   * @param rsClass The class to use as HRegionServer, or null for default.
      +   * @param masterClass   The class to use as HMaster, or null for default.
      +   * @param rsClass       The class to use as HRegionServer, or null for default.
          * @return The mini HBase cluster created.
          * @see #shutdownMiniCluster()
          * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
      -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
      +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
          * @see #startMiniCluster(StartMiniClusterOption)
          * @see HBASE-21071
          */
         @Deprecated
         public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
      -      Class masterClass,
      -      Class rsClass)
      -      throws Exception {
      -    StartMiniClusterOption option = StartMiniClusterOption.builder()
      -        .numMasters(numMasters).masterClass(masterClass)
      -        .numRegionServers(numSlaves).rsClass(rsClass)
      -        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts)
      -        .build();
      +    Class masterClass,
      +    Class rsClass) throws Exception {
      +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
      +      .masterClass(masterClass).numRegionServers(numSlaves).rsClass(rsClass).numDataNodes(numSlaves)
      +      .dataNodeHosts(dataNodeHosts).build();
           return startMiniCluster(option);
         }
       
         /**
      -   * Start up a minicluster of hbase, dfs, and zookeeper.
      -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
      -   * @param numMasters Master node number.
      +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
      +   * defined in {@link StartMiniClusterOption.Builder}.
      +   * @param numMasters       Master node number.
          * @param numRegionServers Number of region servers.
      -   * @param numDataNodes Number of datanodes.
      -   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
      -   *                      HDFS data node number.
      -   * @param masterClass The class to use as HMaster, or null for default.
      -   * @param rsClass The class to use as HRegionServer, or null for default.
      +   * @param numDataNodes     Number of datanodes.
      +   * @param dataNodeHosts    The hostnames of DataNodes to run on. If not null, its size will
      +   *                         overwrite HDFS data node number.
      +   * @param masterClass      The class to use as HMaster, or null for default.
      +   * @param rsClass          The class to use as HRegionServer, or null for default.
          * @return The mini HBase cluster created.
          * @see #shutdownMiniCluster()
          * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
      -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
      +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
          * @see #startMiniCluster(StartMiniClusterOption)
          * @see HBASE-21071
          */
         @Deprecated
         public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes,
      -      String[] dataNodeHosts, Class masterClass,
      -      Class rsClass)
      -    throws Exception {
      -    StartMiniClusterOption option = StartMiniClusterOption.builder()
      -        .numMasters(numMasters).masterClass(masterClass)
      -        .numRegionServers(numRegionServers).rsClass(rsClass)
      -        .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts)
      -        .build();
      +    String[] dataNodeHosts, Class masterClass,
      +    Class rsClass) throws Exception {
      +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
      +      .masterClass(masterClass).numRegionServers(numRegionServers).rsClass(rsClass)
      +      .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts).build();
           return startMiniCluster(option);
         }
       
         /**
      -   * Start up a minicluster of hbase, dfs, and zookeeper.
      -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
      -   * @param numMasters Master node number.
      +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
      +   * defined in {@link StartMiniClusterOption.Builder}.
      +   * @param numMasters       Master node number.
          * @param numRegionServers Number of region servers.
      -   * @param numDataNodes Number of datanodes.
      -   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
      -   *                      HDFS data node number.
      -   * @param masterClass The class to use as HMaster, or null for default.
      -   * @param rsClass The class to use as HRegionServer, or null for default.
      -   * @param createRootDir Whether to create a new root or data directory path.
      -   * @param createWALDir Whether to create a new WAL directory.
      +   * @param numDataNodes     Number of datanodes.
      +   * @param dataNodeHosts    The hostnames of DataNodes to run on. If not null, its size will
      +   *                         overwrite HDFS data node number.
      +   * @param masterClass      The class to use as HMaster, or null for default.
      +   * @param rsClass          The class to use as HRegionServer, or null for default.
      +   * @param createRootDir    Whether to create a new root or data directory path.
      +   * @param createWALDir     Whether to create a new WAL directory.
          * @return The mini HBase cluster created.
          * @see #shutdownMiniCluster()
          * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
      -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
      +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
          * @see #startMiniCluster(StartMiniClusterOption)
          * @see HBASE-21071
          */
         @Deprecated
         public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes,
      -      String[] dataNodeHosts, Class masterClass,
      -      Class rsClass, boolean createRootDir,
      -      boolean createWALDir) throws Exception {
      -    StartMiniClusterOption option = StartMiniClusterOption.builder()
      -        .numMasters(numMasters).masterClass(masterClass)
      -        .numRegionServers(numRegionServers).rsClass(rsClass)
      -        .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts)
      -        .createRootDir(createRootDir).createWALDir(createWALDir)
      -        .build();
      +    String[] dataNodeHosts, Class masterClass,
      +    Class rsClass, boolean createRootDir,
      +    boolean createWALDir) throws Exception {
      +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
      +      .masterClass(masterClass).numRegionServers(numRegionServers).rsClass(rsClass)
      +      .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts).createRootDir(createRootDir)
      +      .createWALDir(createWALDir).build();
           return startMiniCluster(option);
         }
       
         /**
      -   * Start up a minicluster of hbase, dfs and zookeeper clusters with given slave node number.
      -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
      +   * Start up a minicluster of hbase, dfs and zookeeper clusters with given slave node number. All
      +   * other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
          * @param numSlaves slave node number, for both HBase region server and HDFS data node.
          * @see #startMiniCluster(StartMiniClusterOption option)
          * @see #shutdownMiniDFSCluster()
          */
         public MiniHBaseCluster startMiniCluster(int numSlaves) throws Exception {
      -    StartMiniClusterOption option = StartMiniClusterOption.builder()
      -        .numRegionServers(numSlaves).numDataNodes(numSlaves).build();
      +    StartMiniClusterOption option =
      +      StartMiniClusterOption.builder().numRegionServers(numSlaves).numDataNodes(numSlaves).build();
           return startMiniCluster(option);
         }
       
         /**
      -   * Start up a minicluster of hbase, dfs and zookeeper all using default options.
      -   * Option default value can be found in {@link StartMiniClusterOption.Builder}.
      +   * Start up a minicluster of hbase, dfs and zookeeper all using default options. Option default
      +   * value can be found in {@link StartMiniClusterOption.Builder}.
          * @see #startMiniCluster(StartMiniClusterOption option)
          * @see #shutdownMiniDFSCluster()
          */
      @@ -1103,9 +1054,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Start up a mini cluster of hbase, optionally dfs and zookeeper if needed.
      -   * It modifies Configuration.  It homes the cluster data directory under a random
      -   * subdirectory in a directory under System property test.build.data, to be cleaned up on exit.
      +   * Start up a mini cluster of hbase, optionally dfs and zookeeper if needed. It modifies
      +   * Configuration. It homes the cluster data directory under a random subdirectory in a directory
      +   * under System property test.build.data, to be cleaned up on exit.
          * @see #shutdownMiniDFSCluster()
          */
         public MiniHBaseCluster startMiniCluster(StartMiniClusterOption option) throws Exception {
      @@ -1184,7 +1135,6 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
             }
           }
       
      -
           getAdmin(); // create immediately the hbaseAdmin
           LOG.info("Minicluster is up; activeMaster={}", getHBaseCluster().getMaster());
       
      @@ -1192,8 +1142,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Starts up mini hbase cluster using default options.
      -   * Default options can be found in {@link StartMiniClusterOption.Builder}.
      +   * Starts up mini hbase cluster using default options. Default options can be found in
      +   * {@link StartMiniClusterOption.Builder}.
          * @see #startMiniHBaseCluster(StartMiniClusterOption)
          * @see #shutdownMiniHBaseCluster()
          */
      @@ -1202,81 +1152,80 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Starts up mini hbase cluster.
      -   * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
      -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
      -   * @param numMasters Master node number.
      +   * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
      +   * {@link #startMiniCluster()}. All other options will use default values, defined in
      +   * {@link StartMiniClusterOption.Builder}.
      +   * @param numMasters       Master node number.
          * @param numRegionServers Number of region servers.
          * @return The mini HBase cluster created.
          * @see #shutdownMiniHBaseCluster()
          * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
      -   *   {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
      +   *             {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
          * @see #startMiniHBaseCluster(StartMiniClusterOption)
          * @see HBASE-21071
          */
         @Deprecated
         public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers)
      -      throws IOException, InterruptedException {
      -    StartMiniClusterOption option = StartMiniClusterOption.builder()
      -        .numMasters(numMasters).numRegionServers(numRegionServers).build();
      +    throws IOException, InterruptedException {
      +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
      +      .numRegionServers(numRegionServers).build();
           return startMiniHBaseCluster(option);
         }
       
         /**
      -   * Starts up mini hbase cluster.
      -   * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
      -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
      -   * @param numMasters Master node number.
      +   * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
      +   * {@link #startMiniCluster()}. All other options will use default values, defined in
      +   * {@link StartMiniClusterOption.Builder}.
      +   * @param numMasters       Master node number.
          * @param numRegionServers Number of region servers.
      -   * @param rsPorts Ports that RegionServer should use.
      +   * @param rsPorts          Ports that RegionServer should use.
          * @return The mini HBase cluster created.
          * @see #shutdownMiniHBaseCluster()
          * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
      -   *   {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
      +   *             {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
          * @see #startMiniHBaseCluster(StartMiniClusterOption)
          * @see HBASE-21071
          */
         @Deprecated
         public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
      -      List rsPorts) throws IOException, InterruptedException {
      -    StartMiniClusterOption option = StartMiniClusterOption.builder()
      -        .numMasters(numMasters).numRegionServers(numRegionServers).rsPorts(rsPorts).build();
      +    List rsPorts) throws IOException, InterruptedException {
      +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
      +      .numRegionServers(numRegionServers).rsPorts(rsPorts).build();
           return startMiniHBaseCluster(option);
         }
       
         /**
      -   * Starts up mini hbase cluster.
      -   * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
      -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
      -   * @param numMasters Master node number.
      +   * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
      +   * {@link #startMiniCluster()}. All other options will use default values, defined in
      +   * {@link StartMiniClusterOption.Builder}.
      +   * @param numMasters       Master node number.
          * @param numRegionServers Number of region servers.
      -   * @param rsPorts Ports that RegionServer should use.
      -   * @param masterClass The class to use as HMaster, or null for default.
      -   * @param rsClass The class to use as HRegionServer, or null for default.
      -   * @param createRootDir Whether to create a new root or data directory path.
      -   * @param createWALDir Whether to create a new WAL directory.
      +   * @param rsPorts          Ports that RegionServer should use.
      +   * @param masterClass      The class to use as HMaster, or null for default.
      +   * @param rsClass          The class to use as HRegionServer, or null for default.
      +   * @param createRootDir    Whether to create a new root or data directory path.
      +   * @param createWALDir     Whether to create a new WAL directory.
          * @return The mini HBase cluster created.
          * @see #shutdownMiniHBaseCluster()
          * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
      -   *   {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
      +   *             {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
          * @see #startMiniHBaseCluster(StartMiniClusterOption)
          * @see HBASE-21071
          */
         @Deprecated
         public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
      -      List rsPorts, Class masterClass,
      -      Class rsClass,
      -      boolean createRootDir, boolean createWALDir) throws IOException, InterruptedException {
      -    StartMiniClusterOption option = StartMiniClusterOption.builder()
      -        .numMasters(numMasters).masterClass(masterClass)
      -        .numRegionServers(numRegionServers).rsClass(rsClass).rsPorts(rsPorts)
      -        .createRootDir(createRootDir).createWALDir(createWALDir).build();
      +    List rsPorts, Class masterClass,
      +    Class rsClass, boolean createRootDir,
      +    boolean createWALDir) throws IOException, InterruptedException {
      +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
      +      .masterClass(masterClass).numRegionServers(numRegionServers).rsClass(rsClass).rsPorts(rsPorts)
      +      .createRootDir(createRootDir).createWALDir(createWALDir).build();
           return startMiniHBaseCluster(option);
         }
       
         /**
      -   * Starts the hbase cluster up again after shutting it down previously in a
      -   * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
      +   * Starts the hbase cluster up again after shutting it down previously in a test. Use this if you
      +   * want to keep dfs/zk up and just stop/start hbase.
          * @param servers number of region servers
          */
         public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
      @@ -1284,20 +1233,19 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         public void restartHBaseCluster(int servers, List ports)
      -      throws IOException, InterruptedException {
      +    throws IOException, InterruptedException {
           StartMiniClusterOption option =
      -        StartMiniClusterOption.builder().numRegionServers(servers).rsPorts(ports).build();
      +      StartMiniClusterOption.builder().numRegionServers(servers).rsPorts(ports).build();
           restartHBaseCluster(option);
           invalidateConnection();
         }
       
         public void restartHBaseCluster(StartMiniClusterOption option)
      -      throws IOException, InterruptedException {
      +    throws IOException, InterruptedException {
           closeConnection();
      -    this.hbaseCluster =
      -        new MiniHBaseCluster(this.conf, option.getNumMasters(), option.getNumAlwaysStandByMasters(),
      -            option.getNumRegionServers(), option.getRsPorts(), option.getMasterClass(),
      -            option.getRsClass());
      +    this.hbaseCluster = new MiniHBaseCluster(this.conf, option.getNumMasters(),
      +      option.getNumAlwaysStandByMasters(), option.getNumRegionServers(), option.getRsPorts(),
      +      option.getMasterClass(), option.getRsClass());
           // Don't leave here till we've done a successful scan of the hbase:meta
           Connection conn = ConnectionFactory.createConnection(this.conf);
           Table t = conn.getTable(TableName.META_TABLE_NAME);
      @@ -1312,16 +1260,16 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * @return Current mini hbase cluster. Only has something in it after a call
      -   * to {@link #startMiniCluster()}.
      +   * @return Current mini hbase cluster. Only has something in it after a call to
      +   *         {@link #startMiniCluster()}.
          * @see #startMiniCluster()
          */
         public MiniHBaseCluster getMiniHBaseCluster() {
           if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
      -      return (MiniHBaseCluster)this.hbaseCluster;
      +      return (MiniHBaseCluster) this.hbaseCluster;
           }
      -    throw new RuntimeException(hbaseCluster + " not an instance of " +
      -                               MiniHBaseCluster.class.getName());
      +    throw new RuntimeException(
      +      hbaseCluster + " not an instance of " + MiniHBaseCluster.class.getName());
         }
       
         /**
      @@ -1382,12 +1330,10 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Returns the path to the default root dir the minicluster uses. If create
      -   * is true, a new root directory path is fetched irrespective of whether it has been fetched
      -   * before or not. If false, previous path is used.
      -   * Note: this does not cause the root dir to be created.
      -   * @return Fully qualified path for the default hbase root dir
      -   * @throws IOException
      +   * Returns the path to the default root dir the minicluster uses. If create is true,
      +   * a new root directory path is fetched irrespective of whether it has been fetched before or not.
      +   * If false, previous path is used. Note: this does not cause the root dir to be created.
      +   * @return Fully qualified path for the default hbase root dir n
          */
         public Path getDefaultRootDirPath(boolean create) throws IOException {
           if (!create) {
      @@ -1398,27 +1344,22 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Same as {{@link HBaseTestingUtility#getDefaultRootDirPath(boolean create)}
      -   * except that create flag is false.
      -   * Note: this does not cause the root dir to be created.
      -   * @return Fully qualified path for the default hbase root dir
      -   * @throws IOException
      +   * Same as {{@link HBaseTestingUtility#getDefaultRootDirPath(boolean create)} except that
      +   * create flag is false. Note: this does not cause the root dir to be created.
      +   * @return Fully qualified path for the default hbase root dir n
          */
         public Path getDefaultRootDirPath() throws IOException {
           return getDefaultRootDirPath(false);
         }
       
         /**
      -   * Creates an hbase rootdir in user home directory.  Also creates hbase
      -   * version file.  Normally you won't make use of this method.  Root hbasedir
      -   * is created for you as part of mini cluster startup.  You'd only use this
      -   * method if you were doing manual operation.
      -   * @param create This flag decides whether to get a new
      -   * root or data directory path or not, if it has been fetched already.
      -   * Note : Directory will be made irrespective of whether path has been fetched or not.
      -   * If directory already exists, it will be overwritten
      -   * @return Fully qualified path to hbase root dir
      -   * @throws IOException
      +   * Creates an hbase rootdir in user home directory. Also creates hbase version file. Normally you
      +   * won't make use of this method. Root hbasedir is created for you as part of mini cluster
      +   * startup. You'd only use this method if you were doing manual operation.
      +   * @param create This flag decides whether to get a new root or data directory path or not, if it
      +   *               has been fetched already. Note : Directory will be made irrespective of whether
      +   *               path has been fetched or not. If directory already exists, it will be overwritten
      +   * @return Fully qualified path to hbase root dir n
          */
         public Path createRootDir(boolean create) throws IOException {
           FileSystem fs = FileSystem.get(this.conf);
      @@ -1430,24 +1371,20 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Same as {@link HBaseTestingUtility#createRootDir(boolean create)}
      -   * except that create flag is false.
      -   * @return Fully qualified path to hbase root dir
      -   * @throws IOException
      +   * Same as {@link HBaseTestingUtility#createRootDir(boolean create)} except that
      +   * create flag is false.
      +   * @return Fully qualified path to hbase root dir n
          */
         public Path createRootDir() throws IOException {
           return createRootDir(false);
         }
       
         /**
      -   * Creates a hbase walDir in the user's home directory.
      -   * Normally you won't make use of this method. Root hbaseWALDir
      -   * is created for you as part of mini cluster startup. You'd only use this
      -   * method if you were doing manual operation.
      -   *
      -   * @return Fully qualified path to hbase root dir
      -   * @throws IOException
      -  */
      +   * Creates a hbase walDir in the user's home directory. Normally you won't make use of this
      +   * method. Root hbaseWALDir is created for you as part of mini cluster startup. You'd only use
      +   * this method if you were doing manual operation.
      +   * @return Fully qualified path to hbase root dir n
      +   */
         public Path createWALRootDir() throws IOException {
           FileSystem fs = FileSystem.get(this.conf);
           Path walDir = getNewDataTestDirOnTestFS();
      @@ -1459,7 +1396,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         private void setHBaseFsTmpDir() throws IOException {
           String hbaseFsTmpDirInString = this.conf.get("hbase.fs.tmp.dir");
           if (hbaseFsTmpDirInString == null) {
      -      this.conf.set("hbase.fs.tmp.dir",  getDataTestDirOnTestFS("hbase-staging").toString());
      +      this.conf.set("hbase.fs.tmp.dir", getDataTestDirOnTestFS("hbase-staging").toString());
             LOG.info("Setting hbase.fs.tmp.dir to " + this.conf.get("hbase.fs.tmp.dir"));
           } else {
             LOG.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString);
      @@ -1495,26 +1432,16 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Create a table.
      -   * @param tableName
      -   * @param family
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * Create a table. nn * @return A Table instance for the created table. n
          */
      -  public Table createTable(TableName tableName, String family)
      -  throws IOException{
      -    return createTable(tableName, new String[]{family});
      +  public Table createTable(TableName tableName, String family) throws IOException {
      +    return createTable(tableName, new String[] { family });
         }
       
         /**
      -   * Create a table.
      -   * @param tableName
      -   * @param families
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * Create a table. nn * @return A Table instance for the created table. n
          */
      -  public Table createTable(TableName tableName, String[] families)
      -  throws IOException {
      +  public Table createTable(TableName tableName, String[] families) throws IOException {
           List fams = new ArrayList<>(families.length);
           for (String family : families) {
             fams.add(Bytes.toBytes(family));
      @@ -1523,27 +1450,17 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Create a table.
      -   * @param tableName
      -   * @param family
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * Create a table. nn * @return A Table instance for the created table. n
          */
      -  public Table createTable(TableName tableName, byte[] family)
      -  throws IOException{
      -    return createTable(tableName, new byte[][]{family});
      +  public Table createTable(TableName tableName, byte[] family) throws IOException {
      +    return createTable(tableName, new byte[][] { family });
         }
       
         /**
      -   * Create a table with multiple regions.
      -   * @param tableName
      -   * @param family
      -   * @param numRegions
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * Create a table with multiple regions. nnn * @return A Table instance for the created table. n
          */
         public Table createMultiRegionTable(TableName tableName, byte[] family, int numRegions)
      -      throws IOException {
      +    throws IOException {
           if (numRegions < 3) throw new IOException("Must create at least 3 regions");
           byte[] startKey = Bytes.toBytes("aaaaa");
           byte[] endKey = Bytes.toBytes("zzzzz");
      @@ -1553,35 +1470,22 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Create a table.
      -   * @param tableName
      -   * @param families
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * Create a table. nn * @return A Table instance for the created table. n
          */
      -  public Table createTable(TableName tableName, byte[][] families)
      -  throws IOException {
      +  public Table createTable(TableName tableName, byte[][] families) throws IOException {
           return createTable(tableName, families, (byte[][]) null);
         }
       
         /**
      -   * Create a table with multiple regions.
      -   * @param tableName
      -   * @param families
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * Create a table with multiple regions. nn * @return A Table instance for the created table. n
          */
         public Table createMultiRegionTable(TableName tableName, byte[][] families) throws IOException {
           return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE);
         }
       
         /**
      -   * Create a table with multiple regions.
      -   * @param tableName
      -   * @param replicaCount replica count.
      -   * @param families
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * Create a table with multiple regions. n * @param replicaCount replica count. n * @return A
      +   * Table instance for the created table. n
          */
         public Table createMultiRegionTable(TableName tableName, int replicaCount, byte[][] families)
           throws IOException {
      @@ -1589,36 +1493,30 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Create a table.
      -   * @param tableName
      -   * @param families
      -   * @param splitKeys
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * Create a table. nnn * @return A Table instance for the created table. n
          */
         public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys)
      -      throws IOException {
      +    throws IOException {
           return createTable(tableName, families, splitKeys, 1, new Configuration(getConfiguration()));
         }
       
         /**
          * Create a table.
      -   * @param tableName the table name
      -   * @param families the families
      -   * @param splitKeys the splitkeys
      +   * @param tableName    the table name
      +   * @param families     the families
      +   * @param splitKeys    the splitkeys
          * @param replicaCount the region replica count
          * @return A Table instance for the created table.
          * @throws IOException throws IOException
          */
         public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
      -      int replicaCount) throws IOException {
      +    int replicaCount) throws IOException {
           return createTable(tableName, families, splitKeys, replicaCount,
             new Configuration(getConfiguration()));
         }
       
      -  public Table createTable(TableName tableName, byte[][] families,
      -      int numVersions, byte[] startKey, byte[] endKey, int numRegions)
      -  throws IOException{
      +  public Table createTable(TableName tableName, byte[][] families, int numVersions, byte[] startKey,
      +    byte[] endKey, int numRegions) throws IOException {
           HTableDescriptor desc = createTableDescriptor(tableName, families, numVersions);
       
           getAdmin().createTable(desc, startKey, endKey, numRegions);
      @@ -1629,29 +1527,25 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Create a table.
      -   * @param htd
      -   * @param families
      -   * @param c Configuration to use
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * Create a table. nn * @param c Configuration to use
      +   * @return A Table instance for the created table. n
          */
         public Table createTable(TableDescriptor htd, byte[][] families, Configuration c)
      -  throws IOException {
      +    throws IOException {
           return createTable(htd, families, null, c);
         }
       
         /**
          * Create a table.
      -   * @param htd table descriptor
      -   * @param families array of column families
      +   * @param htd       table descriptor
      +   * @param families  array of column families
          * @param splitKeys array of split keys
      -   * @param c Configuration to use
      +   * @param c         Configuration to use
          * @return A Table instance for the created table.
          * @throws IOException if getAdmin or createTable fails
          */
         public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
      -      Configuration c) throws IOException {
      +    Configuration c) throws IOException {
           // Disable blooms (they are on by default as of 0.95) but we disable them here because
           // tests have hard coded counts of what to expect in block cache, etc., and blooms being
           // on is interfering.
      @@ -1660,25 +1554,24 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
       
         /**
          * Create a table.
      -   * @param htd table descriptor
      -   * @param families array of column families
      +   * @param htd       table descriptor
      +   * @param families  array of column families
          * @param splitKeys array of split keys
      -   * @param type Bloom type
      +   * @param type      Bloom type
          * @param blockSize block size
      -   * @param c Configuration to use
      +   * @param c         Configuration to use
          * @return A Table instance for the created table.
          * @throws IOException if getAdmin or createTable fails
          */
       
         public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
      -      BloomType type, int blockSize, Configuration c) throws IOException {
      +    BloomType type, int blockSize, Configuration c) throws IOException {
           TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
           for (byte[] family : families) {
             ColumnFamilyDescriptorBuilder cfdb = ColumnFamilyDescriptorBuilder.newBuilder(family)
      -        .setBloomFilterType(type)
      -        .setBlocksize(blockSize);
      +        .setBloomFilterType(type).setBlocksize(blockSize);
             if (isNewVersionBehaviorEnabled()) {
      -          cfdb.setNewVersionBehavior(true);
      +        cfdb.setNewVersionBehavior(true);
             }
             builder.setColumnFamily(cfdb.build());
           }
      @@ -1692,18 +1585,16 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
       
         /**
          * Create a table.
      -   * @param htd table descriptor
      +   * @param htd       table descriptor
          * @param splitRows array of split keys
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * @return A Table instance for the created table. n
          */
      -  public Table createTable(TableDescriptor htd, byte[][] splitRows)
      -      throws IOException {
      +  public Table createTable(TableDescriptor htd, byte[][] splitRows) throws IOException {
           TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
           if (isNewVersionBehaviorEnabled()) {
             for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
      -         builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family)
      -           .setNewVersionBehavior(true).build());
      +        builder.setColumnFamily(
      +          ColumnFamilyDescriptorBuilder.newBuilder(family).setNewVersionBehavior(true).build());
             }
           }
           getAdmin().createTable(builder.build(), splitRows);
      @@ -1715,58 +1606,40 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
       
         /**
          * Create a table.
      -   * @param tableName the table name
      -   * @param families the families
      -   * @param splitKeys the split keys
      +   * @param tableName    the table name
      +   * @param families     the families
      +   * @param splitKeys    the split keys
          * @param replicaCount the replica count
      -   * @param c Configuration to use
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * @param c            Configuration to use
      +   * @return A Table instance for the created table. n
          */
         public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
      -      int replicaCount, final Configuration c) throws IOException {
      +    int replicaCount, final Configuration c) throws IOException {
           HTableDescriptor htd = new HTableDescriptor(tableName);
           htd.setRegionReplication(replicaCount);
           return createTable(htd, families, splitKeys, c);
         }
       
         /**
      -   * Create a table.
      -   * @param tableName
      -   * @param family
      -   * @param numVersions
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * Create a table. nnn * @return A Table instance for the created table. n
          */
      -  public Table createTable(TableName tableName, byte[] family, int numVersions)
      -  throws IOException {
      -    return createTable(tableName, new byte[][]{family}, numVersions);
      +  public Table createTable(TableName tableName, byte[] family, int numVersions) throws IOException {
      +    return createTable(tableName, new byte[][] { family }, numVersions);
         }
       
         /**
      -   * Create a table.
      -   * @param tableName
      -   * @param families
      -   * @param numVersions
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * Create a table. nnn * @return A Table instance for the created table. n
          */
         public Table createTable(TableName tableName, byte[][] families, int numVersions)
      -      throws IOException {
      +    throws IOException {
           return createTable(tableName, families, numVersions, (byte[][]) null);
         }
       
         /**
      -   * Create a table.
      -   * @param tableName
      -   * @param families
      -   * @param numVersions
      -   * @param splitKeys
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * Create a table. nnnn * @return A Table instance for the created table. n
          */
         public Table createTable(TableName tableName, byte[][] families, int numVersions,
      -      byte[][] splitKeys) throws IOException {
      +    byte[][] splitKeys) throws IOException {
           HTableDescriptor desc = new HTableDescriptor(tableName);
           for (byte[] family : families) {
             HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
      @@ -1783,34 +1656,22 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Create a table with multiple regions.
      -   * @param tableName
      -   * @param families
      -   * @param numVersions
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * Create a table with multiple regions. nnn * @return A Table instance for the created table. n
          */
         public Table createMultiRegionTable(TableName tableName, byte[][] families, int numVersions)
      -      throws IOException {
      +    throws IOException {
           return createTable(tableName, families, numVersions, KEYS_FOR_HBA_CREATE_TABLE);
         }
       
         /**
      -   * Create a table.
      -   * @param tableName
      -   * @param families
      -   * @param numVersions
      -   * @param blockSize
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * Create a table. nnnn * @return A Table instance for the created table. n
          */
      -  public Table createTable(TableName tableName, byte[][] families,
      -    int numVersions, int blockSize) throws IOException {
      +  public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize)
      +    throws IOException {
           HTableDescriptor desc = new HTableDescriptor(tableName);
           for (byte[] family : families) {
      -      HColumnDescriptor hcd = new HColumnDescriptor(family)
      -          .setMaxVersions(numVersions)
      -          .setBlocksize(blockSize);
      +      HColumnDescriptor hcd =
      +        new HColumnDescriptor(family).setMaxVersions(numVersions).setBlocksize(blockSize);
             if (isNewVersionBehaviorEnabled()) {
               hcd.setNewVersionBehavior(true);
             }
      @@ -1823,44 +1684,36 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
           return getConnection().getTable(tableName);
         }
       
      -  public Table createTable(TableName tableName, byte[][] families,
      -      int numVersions, int blockSize, String cpName) throws IOException {
      -      HTableDescriptor desc = new HTableDescriptor(tableName);
      -      for (byte[] family : families) {
      -        HColumnDescriptor hcd = new HColumnDescriptor(family)
      -            .setMaxVersions(numVersions)
      -            .setBlocksize(blockSize);
      -        if (isNewVersionBehaviorEnabled()) {
      -          hcd.setNewVersionBehavior(true);
      -        }
      -        desc.addFamily(hcd);
      +  public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize,
      +    String cpName) throws IOException {
      +    HTableDescriptor desc = new HTableDescriptor(tableName);
      +    for (byte[] family : families) {
      +      HColumnDescriptor hcd =
      +        new HColumnDescriptor(family).setMaxVersions(numVersions).setBlocksize(blockSize);
      +      if (isNewVersionBehaviorEnabled()) {
      +        hcd.setNewVersionBehavior(true);
             }
      -      if(cpName != null) {
      -        desc.addCoprocessor(cpName);
      -      }
      -      getAdmin().createTable(desc);
      -      // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
      -      // assigned
      -      waitUntilAllRegionsAssigned(tableName);
      -      return getConnection().getTable(tableName);
      +      desc.addFamily(hcd);
           }
      +    if (cpName != null) {
      +      desc.addCoprocessor(cpName);
      +    }
      +    getAdmin().createTable(desc);
      +    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
      +    // assigned
      +    waitUntilAllRegionsAssigned(tableName);
      +    return getConnection().getTable(tableName);
      +  }
       
         /**
      -   * Create a table.
      -   * @param tableName
      -   * @param families
      -   * @param numVersions
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * Create a table. nnn * @return A Table instance for the created table. n
          */
      -  public Table createTable(TableName tableName, byte[][] families,
      -      int[] numVersions)
      -  throws IOException {
      +  public Table createTable(TableName tableName, byte[][] families, int[] numVersions)
      +    throws IOException {
           HTableDescriptor desc = new HTableDescriptor(tableName);
           int i = 0;
           for (byte[] family : families) {
      -      HColumnDescriptor hcd = new HColumnDescriptor(family)
      -          .setMaxVersions(numVersions[i]);
      +      HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions[i]);
             if (isNewVersionBehaviorEnabled()) {
               hcd.setNewVersionBehavior(true);
             }
      @@ -1875,15 +1728,10 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Create a table.
      -   * @param tableName
      -   * @param family
      -   * @param splitRows
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * Create a table. nnn * @return A Table instance for the created table. n
          */
         public Table createTable(TableName tableName, byte[] family, byte[][] splitRows)
      -      throws IOException {
      +    throws IOException {
           HTableDescriptor desc = new HTableDescriptor(tableName);
           HColumnDescriptor hcd = new HColumnDescriptor(family);
           if (isNewVersionBehaviorEnabled()) {
      @@ -1898,11 +1746,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Create a table with multiple regions.
      -   * @param tableName
      -   * @param family
      -   * @return A Table instance for the created table.
      -   * @throws IOException
      +   * Create a table with multiple regions. nn * @return A Table instance for the created table. n
          */
         public Table createMultiRegionTable(TableName tableName, byte[] family) throws IOException {
           return createTable(tableName, family, KEYS_FOR_HBA_CREATE_TABLE);
      @@ -1913,18 +1757,20 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
          */
         @SuppressWarnings("serial")
         public static void modifyTableSync(Admin admin, TableDescriptor desc)
      -      throws IOException, InterruptedException {
      +    throws IOException, InterruptedException {
           admin.modifyTable(desc);
      -    Pair status = new Pair() {{
      -      setFirst(0);
      -      setSecond(0);
      -    }};
      +    Pair status = new Pair() {
      +      {
      +        setFirst(0);
      +        setSecond(0);
      +      }
      +    };
           int i = 0;
           do {
             status = admin.getAlterStatus(desc.getTableName());
             if (status.getSecond() != 0) {
      -        LOG.debug(status.getSecond() - status.getFirst() + "/" + status.getSecond()
      -          + " regions updated.");
      +        LOG.debug(
      +          status.getSecond() - status.getFirst() + "/" + status.getSecond() + " regions updated.");
               Thread.sleep(1 * 1000L);
             } else {
               LOG.debug("All regions updated.");
      @@ -1975,28 +1821,28 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         // Canned table and table descriptor creation
         // TODO replace HBaseTestCase
       
      -  public final static byte [] fam1 = Bytes.toBytes("colfamily11");
      -  public final static byte [] fam2 = Bytes.toBytes("colfamily21");
      -  public final static byte [] fam3 = Bytes.toBytes("colfamily31");
      -  public static final byte[][] COLUMNS = {fam1, fam2, fam3};
      +  public final static byte[] fam1 = Bytes.toBytes("colfamily11");
      +  public final static byte[] fam2 = Bytes.toBytes("colfamily21");
      +  public final static byte[] fam3 = Bytes.toBytes("colfamily31");
      +  public static final byte[][] COLUMNS = { fam1, fam2, fam3 };
         private static final int MAXVERSIONS = 3;
       
         public static final char FIRST_CHAR = 'a';
         public static final char LAST_CHAR = 'z';
      -  public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
      +  public static final byte[] START_KEY_BYTES = { FIRST_CHAR, FIRST_CHAR, FIRST_CHAR };
         public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
       
         /**
          * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
      -   *   {@link #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)} instead.
      +   *             {@link #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)} instead.
          * @see #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)
          * @see HBASE-13893
          */
         @Deprecated
      -  public HTableDescriptor createTableDescriptor(final String name,
      -      final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
      +  public HTableDescriptor createTableDescriptor(final String name, final int minVersions,
      +    final int versions, final int ttl, KeepDeletedCells keepDeleted) {
           return this.createTableDescriptor(TableName.valueOf(name), minVersions, versions, ttl,
      -        keepDeleted);
      +      keepDeleted);
         }
       
         /**
      @@ -2004,28 +1850,25 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
          * @param name Name to give table.
          * @return Column descriptor.
          * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
      -   *   {@link #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)} instead.
      +   *             {@link #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)} instead.
          * @see #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)
          * @see HBASE-13893
          */
         @Deprecated
         public HTableDescriptor createTableDescriptor(final String name) {
      -    return createTableDescriptor(TableName.valueOf(name),  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
      -        MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
      +    return createTableDescriptor(TableName.valueOf(name), HColumnDescriptor.DEFAULT_MIN_VERSIONS,
      +      MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
         }
       
      -  public HTableDescriptor createTableDescriptor(final TableName name,
      -      final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
      +  public HTableDescriptor createTableDescriptor(final TableName name, final int minVersions,
      +    final int versions, final int ttl, KeepDeletedCells keepDeleted) {
           HTableDescriptor htd = new HTableDescriptor(name);
      -    for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
      -      HColumnDescriptor hcd = new HColumnDescriptor(cfName)
      -          .setMinVersions(minVersions)
      -          .setMaxVersions(versions)
      -          .setKeepDeletedCells(keepDeleted)
      -          .setBlockCacheEnabled(false)
      -          .setTimeToLive(ttl);
      +    for (byte[] cfName : new byte[][] { fam1, fam2, fam3 }) {
      +      HColumnDescriptor hcd =
      +        new HColumnDescriptor(cfName).setMinVersions(minVersions).setMaxVersions(versions)
      +          .setKeepDeletedCells(keepDeleted).setBlockCacheEnabled(false).setTimeToLive(ttl);
             if (isNewVersionBehaviorEnabled()) {
      -          hcd.setNewVersionBehavior(true);
      +        hcd.setNewVersionBehavior(true);
             }
             htd.addFamily(hcd);
           }
      @@ -2038,23 +1881,21 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
          * @return Column descriptor.
          */
         public HTableDescriptor createTableDescriptor(final TableName name) {
      -    return createTableDescriptor(name,  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
      -        MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
      +    return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS, MAXVERSIONS,
      +      HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
         }
       
      -  public HTableDescriptor createTableDescriptor(final TableName tableName,
      -      byte[] family) {
      -    return createTableDescriptor(tableName, new byte[][] {family}, 1);
      +  public HTableDescriptor createTableDescriptor(final TableName tableName, byte[] family) {
      +    return createTableDescriptor(tableName, new byte[][] { family }, 1);
         }
       
      -  public HTableDescriptor createTableDescriptor(final TableName tableName,
      -      byte[][] families, int maxVersions) {
      +  public HTableDescriptor createTableDescriptor(final TableName tableName, byte[][] families,
      +    int maxVersions) {
           HTableDescriptor desc = new HTableDescriptor(tableName);
           for (byte[] family : families) {
      -      HColumnDescriptor hcd = new HColumnDescriptor(family)
      -          .setMaxVersions(maxVersions);
      +      HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(maxVersions);
             if (isNewVersionBehaviorEnabled()) {
      -          hcd.setNewVersionBehavior(true);
      +        hcd.setNewVersionBehavior(true);
             }
             desc.addFamily(hcd);
           }
      @@ -2063,15 +1904,13 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
       
         /**
          * Create an HRegion that writes to the local tmp dirs
      -   * @param desc a table descriptor indicating which table the region belongs to
      +   * @param desc     a table descriptor indicating which table the region belongs to
          * @param startKey the start boundary of the region
      -   * @param endKey the end boundary of the region
      -   * @return a region that writes to local dir for testing
      -   * @throws IOException
      +   * @param endKey   the end boundary of the region
      +   * @return a region that writes to local dir for testing n
          */
      -  public HRegion createLocalHRegion(TableDescriptor desc, byte [] startKey,
      -      byte [] endKey)
      -  throws IOException {
      +  public HRegion createLocalHRegion(TableDescriptor desc, byte[] startKey, byte[] endKey)
      +    throws IOException {
           HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
           return createLocalHRegion(hri, desc);
         }
      @@ -2089,12 +1928,11 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
          * @param info regioninfo
          * @param conf configuration
          * @param desc table descriptor
      -   * @param wal wal for this region.
      -   * @return created hregion
      -   * @throws IOException
      +   * @param wal  wal for this region.
      +   * @return created hregion n
          */
         public HRegion createLocalHRegion(RegionInfo info, Configuration conf, TableDescriptor desc,
      -      WAL wal) throws IOException {
      +    WAL wal) throws IOException {
           return HRegion.createHRegion(info, getDataTestDir(), conf, desc, wal);
         }
       
      @@ -2103,68 +1941,58 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
          * @param info regioninfo
          * @param info configuration
          * @param desc table descriptor
      -   * @param wal wal for this region.
      -   * @return created hregion
      -   * @throws IOException
      +   * @param wal  wal for this region.
      +   * @return created hregion n
          */
         public HRegion createLocalHRegion(HRegionInfo info, Configuration conf, HTableDescriptor desc,
      -      WAL wal) throws IOException {
      +    WAL wal) throws IOException {
           return HRegion.createHRegion(info, getDataTestDir(), conf, desc, wal);
         }
       
         /**
      -   * @param tableName the name of the table
      -   * @param startKey the start key of the region
      -   * @param stopKey the stop key of the region
      +   * @param tableName     the name of the table
      +   * @param startKey      the start key of the region
      +   * @param stopKey       the stop key of the region
          * @param callingMethod the name of the calling method probably a test method
      -   * @param conf the configuration to use
      -   * @param isReadOnly {@code true} if the table is read only, {@code false} otherwise
      -   * @param families the column families to use
      +   * @param conf          the configuration to use
      +   * @param isReadOnly    {@code true} if the table is read only, {@code false} otherwise
      +   * @param families      the column families to use
          * @throws IOException if an IO problem is encountered
          * @return A region on which you must call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)}
          *         when done.
      -   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
      -   *   {@link #createLocalHRegion(TableName, byte[], byte[], boolean, Durability, WAL, byte[]...)}
      -   *   instead.
      +   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #createLocalHRegion(TableName,
      +   *             byte[], byte[], boolean, Durability, WAL, byte[]...)} instead.
          * @see #createLocalHRegion(TableName, byte[], byte[], boolean, Durability, WAL, byte[]...)
          * @see HBASE-13893
          */
         @Deprecated
         public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
      -      String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
      -      WAL wal, byte[]... families) throws IOException {
      +    String callingMethod, Configuration conf, boolean isReadOnly, Durability durability, WAL wal,
      +    byte[]... families) throws IOException {
           return createLocalHRegion(TableName.valueOf(tableName), startKey, stopKey, conf, isReadOnly,
             durability, wal, families);
         }
       
         /**
      -   * @param tableName
      -   * @param startKey
      -   * @param stopKey
      -   * @param isReadOnly
      -   * @param families
      -   * @return A region on which you must call
      -   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
      -   * @throws IOException
      +   * nnnnn * @return A region on which you must call
      +   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. n
          */
         public HRegion createLocalHRegion(TableName tableName, byte[] startKey, byte[] stopKey,
      -      Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families)
      -      throws IOException {
      -    return createLocalHRegionWithInMemoryFlags(tableName,startKey, stopKey, conf, isReadOnly,
      -        durability, wal, null, families);
      +    Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families)
      +    throws IOException {
      +    return createLocalHRegionWithInMemoryFlags(tableName, startKey, stopKey, conf, isReadOnly,
      +      durability, wal, null, families);
         }
       
         public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] startKey,
      -      byte[] stopKey, Configuration conf,
      -      boolean isReadOnly, Durability durability, WAL wal, boolean[] compactedMemStore,
      -      byte[]... families)
      -      throws IOException {
      +    byte[] stopKey, Configuration conf, boolean isReadOnly, Durability durability, WAL wal,
      +    boolean[] compactedMemStore, byte[]... families) throws IOException {
           HTableDescriptor htd = new HTableDescriptor(tableName);
           htd.setReadOnly(isReadOnly);
      -    int i=0;
      +    int i = 0;
           for (byte[] family : families) {
             HColumnDescriptor hcd = new HColumnDescriptor(family);
      -      if(compactedMemStore != null && i < compactedMemStore.length) {
      +      if (compactedMemStore != null && i < compactedMemStore.length) {
               hcd.setInMemoryCompaction(MemoryCompactionPolicy.BASIC);
             } else {
               hcd.setInMemoryCompaction(MemoryCompactionPolicy.NONE);
      @@ -2184,17 +2012,16 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         // ==========================================================================
       
         /**
      -   * Provide an existing table name to truncate.
      -   * Scans the table and issues a delete for each row read.
      +   * Provide an existing table name to truncate. Scans the table and issues a delete for each row
      +   * read.
          * @param tableName existing table
      -   * @return HTable to that new table
      -   * @throws IOException
      +   * @return HTable to that new table n
          */
         public Table deleteTableData(TableName tableName) throws IOException {
           Table table = getConnection().getTable(tableName);
           Scan scan = new Scan();
           ResultScanner resScan = table.getScanner(scan);
      -    for(Result res : resScan) {
      +    for (Result res : resScan) {
             Delete del = new Delete(res.getRow());
             table.delete(del);
           }
      @@ -2204,14 +2031,14 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Truncate a table using the admin command.
      -   * Effectively disables, deletes, and recreates the table.
      -   * @param tableName table which must exist.
      +   * Truncate a table using the admin command. Effectively disables, deletes, and recreates the
      +   * table.
      +   * @param tableName       table which must exist.
          * @param preserveRegions keep the existing split points
          * @return HTable for the new table
          */
      -  public Table truncateTable(final TableName tableName, final boolean preserveRegions) throws
      -      IOException {
      +  public Table truncateTable(final TableName tableName, final boolean preserveRegions)
      +    throws IOException {
           Admin admin = getAdmin();
           if (!admin.isTableDisabled(tableName)) {
             admin.disableTable(tableName);
      @@ -2221,11 +2048,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Truncate a table using the admin command.
      -   * Effectively disables, deletes, and recreates the table.
      -   * For previous behavior of issuing row deletes, see
      -   * deleteTableData.
      -   * Expressly does not preserve regions of existing table.
      +   * Truncate a table using the admin command. Effectively disables, deletes, and recreates the
      +   * table. For previous behavior of issuing row deletes, see deleteTableData. Expressly does not
      +   * preserve regions of existing table.
          * @param tableName table which must exist.
          * @return HTable for the new table
          */
      @@ -2237,30 +2062,27 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
          * Load table with rows from 'aaa' to 'zzz'.
          * @param t Table
          * @param f Family
      -   * @return Count of rows loaded.
      -   * @throws IOException
      +   * @return Count of rows loaded. n
          */
         public int loadTable(final Table t, final byte[] f) throws IOException {
      -    return loadTable(t, new byte[][] {f});
      +    return loadTable(t, new byte[][] { f });
         }
       
         /**
          * Load table with rows from 'aaa' to 'zzz'.
          * @param t Table
          * @param f Family
      -   * @return Count of rows loaded.
      -   * @throws IOException
      +   * @return Count of rows loaded. n
          */
         public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
      -    return loadTable(t, new byte[][] {f}, null, writeToWAL);
      +    return loadTable(t, new byte[][] { f }, null, writeToWAL);
         }
       
         /**
          * Load table of multiple column families with rows from 'aaa' to 'zzz'.
          * @param t Table
          * @param f Array of Families to load
      -   * @return Count of rows loaded.
      -   * @throws IOException
      +   * @return Count of rows loaded. n
          */
         public int loadTable(final Table t, final byte[][] f) throws IOException {
           return loadTable(t, f, null);
      @@ -2268,11 +2090,10 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
       
         /**
          * Load table of multiple column families with rows from 'aaa' to 'zzz'.
      -   * @param t Table
      -   * @param f Array of Families to load
      +   * @param t     Table
      +   * @param f     Array of Families to load
          * @param value the values of the cells. If null is passed, the row key is used as value
      -   * @return Count of rows loaded.
      -   * @throws IOException
      +   * @return Count of rows loaded. n
          */
         public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException {
           return loadTable(t, f, value, true);
      @@ -2280,14 +2101,13 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
       
         /**
          * Load table of multiple column families with rows from 'aaa' to 'zzz'.
      -   * @param t Table
      -   * @param f Array of Families to load
      +   * @param t     Table
      +   * @param f     Array of Families to load
          * @param value the values of the cells. If null is passed, the row key is used as value
      -   * @return Count of rows loaded.
      -   * @throws IOException
      +   * @return Count of rows loaded. n
          */
      -  public int loadTable(final Table t, final byte[][] f, byte[] value,
      -      boolean writeToWAL) throws IOException {
      +  public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL)
      +    throws IOException {
           List puts = new ArrayList<>();
           for (byte[] row : HBaseTestingUtility.ROWS) {
             Put put = new Put(row);
      @@ -2302,12 +2122,13 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
           return puts.size();
         }
       
      -  /** A tracker for tracking and validating table rows
      -   * generated with {@link HBaseTestingUtility#loadTable(Table, byte[])}
      +  /**
      +   * A tracker for tracking and validating table rows generated with
      +   * {@link HBaseTestingUtility#loadTable(Table, byte[])}
          */
         public static class SeenRowTracker {
           int dim = 'z' - 'a' + 1;
      -    int[][][] seenRows = new int[dim][dim][dim]; //count of how many times the row is seen
      +    int[][][] seenRows = new int[dim][dim][dim]; // count of how many times the row is seen
           byte[] startRow;
           byte[] stopRow;
       
      @@ -2330,8 +2151,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
             seenRows[i(row[0])][i(row[1])][i(row[2])]++;
           }
       
      -    /** Validate that all the rows between startRow and stopRow are seen exactly once, and
      -     * all other rows none
      +    /**
      +     * Validate that all the rows between startRow and stopRow are seen exactly once, and all other
      +     * rows none
            */
           public void validate() {
             for (byte b1 = 'a'; b1 <= 'z'; b1++) {
      @@ -2339,14 +2161,16 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
                 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
                   int count = seenRows[i(b1)][i(b2)][i(b3)];
                   int expectedCount = 0;
      -            if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
      -                && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
      +            if (
      +              Bytes.compareTo(new byte[] { b1, b2, b3 }, startRow) >= 0
      +                && Bytes.compareTo(new byte[] { b1, b2, b3 }, stopRow) < 0
      +            ) {
                     expectedCount = 1;
                   }
                   if (count != expectedCount) {
      -              String row = new String(new byte[] {b1,b2,b3}, StandardCharsets.UTF_8);
      -              throw new RuntimeException("Row:" + row + " has a seen count of " + count + " " +
      -                  "instead of " + expectedCount);
      +              String row = new String(new byte[] { b1, b2, b3 }, StandardCharsets.UTF_8);
      +              throw new RuntimeException("Row:" + row + " has a seen count of " + count + " "
      +                + "instead of " + expectedCount);
                   }
                 }
               }
      @@ -2359,19 +2183,17 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         public int loadRegion(final Region r, final byte[] f) throws IOException {
      -    return loadRegion((HRegion)r, f);
      +    return loadRegion((HRegion) r, f);
         }
       
         /**
          * Load region with rows from 'aaa' to 'zzz'.
      -   * @param r Region
      -   * @param f Family
      +   * @param r     Region
      +   * @param f     Family
          * @param flush flush the cache if true
      -   * @return Count of rows loaded.
      -   * @throws IOException
      +   * @return Count of rows loaded. n
          */
      -  public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
      -  throws IOException {
      +  public int loadRegion(final HRegion r, final byte[] f, final boolean flush) throws IOException {
           byte[] k = new byte[3];
           int rowCount = 0;
           for (byte b1 = 'a'; b1 <= 'z'; b1++) {
      @@ -2408,7 +2230,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         public void loadNumericRows(final Table t, final byte[] f, int startRow, int endRow)
      -      throws IOException {
      +    throws IOException {
           for (int i = startRow; i < endRow; i++) {
             byte[] data = Bytes.toBytes(String.valueOf(i));
             Put put = new Put(data);
      @@ -2418,19 +2240,18 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         public void loadRandomRows(final Table t, final byte[] f, int rowSize, int totalRows)
      -      throws IOException {
      +    throws IOException {
           byte[] row = new byte[rowSize];
           for (int i = 0; i < totalRows; i++) {
             Bytes.random(row);
             Put put = new Put(row);
      -      put.addColumn(f, new byte[]{0}, new byte[]{0});
      +      put.addColumn(f, new byte[] { 0 }, new byte[] { 0 });
             t.put(put);
           }
         }
       
         public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow,
      -      int replicaId)
      -      throws IOException {
      +    int replicaId) throws IOException {
           for (int i = startRow; i < endRow; i++) {
             String failMsg = "Failed verification of row :" + i;
             byte[] data = Bytes.toBytes(String.valueOf(i));
      @@ -2441,29 +2262,28 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
             assertTrue(failMsg, result.containsColumn(f, null));
             assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
             Cell cell = result.getColumnLatestCell(f, null);
      -      assertTrue(failMsg,
      -        Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
      -          cell.getValueLength()));
      +      assertTrue(failMsg, Bytes.equals(data, 0, data.length, cell.getValueArray(),
      +        cell.getValueOffset(), cell.getValueLength()));
           }
         }
       
         public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow)
      -      throws IOException {
      -    verifyNumericRows((HRegion)region, f, startRow, endRow);
      +    throws IOException {
      +    verifyNumericRows((HRegion) region, f, startRow, endRow);
         }
       
         public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow)
      -      throws IOException {
      +    throws IOException {
           verifyNumericRows(region, f, startRow, endRow, true);
         }
       
         public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow,
      -      final boolean present) throws IOException {
      -    verifyNumericRows((HRegion)region, f, startRow, endRow, present);
      +    final boolean present) throws IOException {
      +    verifyNumericRows((HRegion) region, f, startRow, endRow, present);
         }
       
         public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
      -      final boolean present) throws IOException {
      +    final boolean present) throws IOException {
           for (int i = startRow; i < endRow; i++) {
             String failMsg = "Failed verification of row :" + i;
             byte[] data = Bytes.toBytes(String.valueOf(i));
      @@ -2476,14 +2296,13 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
             assertTrue(failMsg, result.containsColumn(f, null));
             assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
             Cell cell = result.getColumnLatestCell(f, null);
      -      assertTrue(failMsg,
      -        Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
      -          cell.getValueLength()));
      +      assertTrue(failMsg, Bytes.equals(data, 0, data.length, cell.getValueArray(),
      +        cell.getValueOffset(), cell.getValueLength()));
           }
         }
       
         public void deleteNumericRows(final Table t, final byte[] f, int startRow, int endRow)
      -      throws IOException {
      +    throws IOException {
           for (int i = startRow; i < endRow; i++) {
             byte[] data = Bytes.toBytes(String.valueOf(i));
             Delete delete = new Delete(data);
      @@ -2513,7 +2332,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
       
         public int countRows(final Table table, final byte[]... families) throws IOException {
           Scan scan = new Scan();
      -    for (byte[] family: families) {
      +    for (byte[] family : families) {
             scan.addFamily(family);
           }
           return countRows(table, scan);
      @@ -2587,73 +2406,53 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
           }
         }
       
      -  public static final byte[][] KEYS = {
      -    HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
      -    Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
      -    Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
      -    Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
      -    Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
      -    Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
      -    Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
      -    Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
      -    Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
      -  };
      +  public static final byte[][] KEYS = { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
      +    Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"),
      +    Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("jjj"),
      +    Bytes.toBytes("kkk"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
      +    Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"),
      +    Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"),
      +    Bytes.toBytes("www"), Bytes.toBytes("xxx"), Bytes.toBytes("yyy") };
       
      -  public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
      -      Bytes.toBytes("bbb"),
      -      Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
      -      Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
      -      Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
      -      Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
      -      Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
      -      Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
      -      Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
      -      Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
      -  };
      +  public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = { Bytes.toBytes("bbb"),
      +    Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"),
      +    Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("jjj"),
      +    Bytes.toBytes("kkk"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
      +    Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"),
      +    Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"),
      +    Bytes.toBytes("www"), Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz") };
       
         /**
      -   * Create rows in hbase:meta for regions of the specified table with the specified
      -   * start keys.  The first startKey should be a 0 length byte array if you
      -   * want to form a proper range of regions.
      -   * @param conf
      -   * @param htd
      -   * @param startKeys
      -   * @return list of region info for regions added to meta
      -   * @throws IOException
      -   * @deprecated since 2.0 version and will be removed in 3.0 version.
      -   *             use {@link #createMultiRegionsInMeta(Configuration, TableDescriptor, byte[][])}
      +   * Create rows in hbase:meta for regions of the specified table with the specified start keys. The
      +   * first startKey should be a 0 length byte array if you want to form a proper range of regions.
      +   * nnn * @return list of region info for regions added to meta n * @deprecated since 2.0 version
      +   * and will be removed in 3.0 version. use
      +   * {@link #createMultiRegionsInMeta(Configuration, TableDescriptor, byte[][])}
          */
         @Deprecated
         public List createMultiRegionsInMeta(final Configuration conf,
      -      final HTableDescriptor htd, byte [][] startKeys) throws IOException {
      -    return createMultiRegionsInMeta(conf, (TableDescriptor) htd, startKeys)
      -        .stream().map(ImmutableHRegionInfo::new).collect(Collectors.toList());
      +    final HTableDescriptor htd, byte[][] startKeys) throws IOException {
      +    return createMultiRegionsInMeta(conf, (TableDescriptor) htd, startKeys).stream()
      +      .map(ImmutableHRegionInfo::new).collect(Collectors.toList());
         }
      +
         /**
      -   * Create rows in hbase:meta for regions of the specified table with the specified
      -   * start keys.  The first startKey should be a 0 length byte array if you
      -   * want to form a proper range of regions.
      -   * @param conf
      -   * @param htd
      -   * @param startKeys
      -   * @return list of region info for regions added to meta
      -   * @throws IOException
      +   * Create rows in hbase:meta for regions of the specified table with the specified start keys. The
      +   * first startKey should be a 0 length byte array if you want to form a proper range of regions.
      +   * nnn * @return list of region info for regions added to meta n
          */
         public List createMultiRegionsInMeta(final Configuration conf,
      -      final TableDescriptor htd, byte [][] startKeys)
      -  throws IOException {
      +    final TableDescriptor htd, byte[][] startKeys) throws IOException {
           Table meta = getConnection().getTable(TableName.META_TABLE_NAME);
           Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
           List newRegions = new ArrayList<>(startKeys.length);
      -    MetaTableAccessor
      -        .updateTableState(getConnection(), htd.getTableName(), TableState.State.ENABLED);
      +    MetaTableAccessor.updateTableState(getConnection(), htd.getTableName(),
      +      TableState.State.ENABLED);
           // add custom ones
           for (int i = 0; i < startKeys.length; i++) {
             int j = (i + 1) % startKeys.length;
      -      RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName())
      -          .setStartKey(startKeys[i])
      -          .setEndKey(startKeys[j])
      -          .build();
      +      RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(startKeys[i])
      +        .setEndKey(startKeys[j]).build();
             MetaTableAccessor.addRegionsToMeta(getConnection(), Collections.singletonList(hri), 1);
             newRegions.add(hri);
           }
      @@ -2666,7 +2465,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
          * Create an unmanaged WAL. Be sure to close it when you're through.
          */
         public static WAL createWal(final Configuration conf, final Path rootDir, final RegionInfo hri)
      -      throws IOException {
      +    throws IOException {
           // The WAL subsystem will use the default rootDir rather than the passed in rootDir
           // unless I pass along via the conf.
           Configuration confForWAL = new Configuration(conf);
      @@ -2674,13 +2473,12 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
           return new WALFactory(confForWAL, "hregion-" + RandomStringUtils.randomNumeric(8)).getWAL(hri);
         }
       
      -
         /**
          * Create a region with it's own WAL. Be sure to call
          * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
          */
         public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
      -      final Configuration conf, final TableDescriptor htd) throws IOException {
      +    final Configuration conf, final TableDescriptor htd) throws IOException {
           return createRegionAndWAL(info, rootDir, conf, htd, true);
         }
       
      @@ -2689,20 +2487,20 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
          * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
          */
         public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
      -      final Configuration conf, final TableDescriptor htd, BlockCache blockCache)
      -      throws IOException {
      +    final Configuration conf, final TableDescriptor htd, BlockCache blockCache) throws IOException {
           HRegion region = createRegionAndWAL(info, rootDir, conf, htd, false);
           region.setBlockCache(blockCache);
           region.initialize();
           return region;
         }
      +
         /**
          * Create a region with it's own WAL. Be sure to call
          * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
          */
         public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
      -      final Configuration conf, final TableDescriptor htd, MobFileCache mobFileCache)
      -      throws IOException {
      +    final Configuration conf, final TableDescriptor htd, MobFileCache mobFileCache)
      +    throws IOException {
           HRegion region = createRegionAndWAL(info, rootDir, conf, htd, false);
           region.setMobFileCache(mobFileCache);
           region.initialize();
      @@ -2714,17 +2512,15 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
          * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
          */
         public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
      -      final Configuration conf, final TableDescriptor htd, boolean initialize)
      -      throws IOException {
      -    ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0,
      -      0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
      +    final Configuration conf, final TableDescriptor htd, boolean initialize) throws IOException {
      +    ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null,
      +      MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
           WAL wal = createWal(conf, rootDir, info);
           return HRegion.createHRegion(info, rootDir, conf, htd, wal, initialize);
         }
       
         /**
          * Returns all rows from the hbase:meta table.
      -   *
          * @throws IOException When reading the rows fails.
          */
         public List getMetaTableRows() throws IOException {
      @@ -2733,8 +2529,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
           List rows = new ArrayList<>();
           ResultScanner s = t.getScanner(new Scan());
           for (Result result : s) {
      -      LOG.info("getMetaTableRows: row -> " +
      -        Bytes.toStringBinary(result.getRow()));
      +      LOG.info("getMetaTableRows: row -> " + Bytes.toStringBinary(result.getRow()));
             rows.add(result.getRow());
           }
           s.close();
      @@ -2744,7 +2539,6 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
       
         /**
          * Returns all rows from the hbase:meta table for a given user table
      -   *
          * @throws IOException When reading the rows fails.
          */
         public List getMetaTableRows(TableName tableName) throws IOException {
      @@ -2761,8 +2555,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
             }
       
             if (info.getTable().equals(tableName)) {
      -        LOG.info("getMetaTableRows: row -> " +
      -            Bytes.toStringBinary(result.getRow()) + info);
      +        LOG.info("getMetaTableRows: row -> " + Bytes.toStringBinary(result.getRow()) + info);
               rows.add(result.getRow());
             }
           }
      @@ -2773,7 +2566,6 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
       
         /**
          * Returns all regions of the specified table
      -   *
          * @param tableName the table name
          * @return all regions of the specified table
          * @throws IOException when getting the regions fails.
      @@ -2785,13 +2577,11 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /*
      -   * Find any other region server which is different from the one identified by parameter
      -   * @param rs
      -   * @return another region server
      +   * Find any other region server which is different from the one identified by parameter n
      +   * * @return another region server
          */
         public HRegionServer getOtherRegionServer(HRegionServer rs) {
      -    for (JVMClusterUtil.RegionServerThread rst :
      -      getMiniHBaseCluster().getRegionServerThreads()) {
      +    for (JVMClusterUtil.RegionServerThread rst : getMiniHBaseCluster().getRegionServerThreads()) {
             if (!(rst.getRegionServer() == rs)) {
               return rst.getRegionServer();
             }
      @@ -2800,26 +2590,21 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Tool to get the reference to the region server object that holds the
      -   * region of the specified user table.
      +   * Tool to get the reference to the region server object that holds the region of the specified
      +   * user table.
          * @param tableName user table to lookup in hbase:meta
      -   * @return region server that holds it, null if the row doesn't exist
      -   * @throws IOException
      -   * @throws InterruptedException
      +   * @return region server that holds it, null if the row doesn't exist nn
          */
         public HRegionServer getRSForFirstRegionInTable(TableName tableName)
      -      throws IOException, InterruptedException {
      +    throws IOException, InterruptedException {
           List regions = getRegions(tableName);
           if (regions == null || regions.isEmpty()) {
             return null;
           }
      -    LOG.debug("Found " + regions.size() + " regions for table " +
      -        tableName);
      +    LOG.debug("Found " + regions.size() + " regions for table " + tableName);
       
      -    byte[] firstRegionName = regions.stream()
      -        .filter(r -> !r.isOffline())
      -        .map(RegionInfo::getRegionName)
      -        .findFirst()
      +    byte[] firstRegionName =
      +      regions.stream().filter(r -> !r.isOffline()).map(RegionInfo::getRegionName).findFirst()
               .orElseThrow(() -> new IOException("online regions not found in table " + tableName));
       
           LOG.debug("firstRegionName=" + Bytes.toString(firstRegionName));
      @@ -2827,36 +2612,33 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
             HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
           int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
             HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
      -    RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
      -    while(retrier.shouldRetry()) {
      +    RetryCounter retrier = new RetryCounter(numRetries + 1, (int) pause, TimeUnit.MICROSECONDS);
      +    while (retrier.shouldRetry()) {
             int index = getMiniHBaseCluster().getServerWith(firstRegionName);
             if (index != -1) {
               return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
             }
      -      // Came back -1.  Region may not be online yet.  Sleep a while.
      +      // Came back -1. Region may not be online yet. Sleep a while.
             retrier.sleepUntilNextRetry();
           }
           return null;
         }
       
         /**
      -   * Starts a MiniMRCluster with a default number of
      -   * TaskTracker's.
      -   *
      +   * Starts a MiniMRCluster with a default number of TaskTracker's.
          * @throws IOException When starting the cluster fails.
          */
         public MiniMRCluster startMiniMapReduceCluster() throws IOException {
           // Set a very high max-disk-utilization percentage to avoid the NodeManagers from failing.
      -    conf.setIfUnset(
      -        "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage",
      -        "99.0");
      +    conf.setIfUnset("yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage",
      +      "99.0");
           startMiniMapReduceCluster(2);
           return mrCluster;
         }
       
         /**
      -   * Tasktracker has a bug where changing the hadoop.log.dir system property
      -   * will not change its internal static LOG_DIR variable.
      +   * Tasktracker has a bug where changing the hadoop.log.dir system property will not change its
      +   * internal static LOG_DIR variable.
          */
         private void forceChangeTaskLogDir() {
           Field logDirField;
      @@ -2884,7 +2666,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         /**
          * Starts a MiniMRCluster. Call {@link #setFileSystemURI(String)} to use a different
          * filesystem.
      -   * @param servers  The number of TaskTracker's to start.
      +   * @param servers The number of TaskTracker's to start.
          * @throws IOException When starting the cluster fails.
          */
         private void startMiniMapReduceCluster(final int servers) throws IOException {
      @@ -2917,9 +2699,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
           }
       
           // Allow the user to override FS URI for this map-reduce cluster to use.
      -    mrCluster = new MiniMRCluster(servers,
      -      FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
      -      null, null, new JobConf(this.conf));
      +    mrCluster =
      +      new MiniMRCluster(servers, FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(),
      +        1, null, null, new JobConf(this.conf));
           JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
           if (jobConf == null) {
             jobConf = mrCluster.createJobConf();
      @@ -2929,8 +2711,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
           LOG.info("Mini mapreduce cluster started");
       
           // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
      -    // Our HBase MR jobs need several of these settings in order to properly run.  So we copy the
      -    // necessary config properties here.  YARN-129 required adding a few properties.
      +    // Our HBase MR jobs need several of these settings in order to properly run. So we copy the
      +    // necessary config properties here. YARN-129 required adding a few properties.
           conf.set("mapreduce.jobtracker.address", jobConf.get("mapreduce.jobtracker.address"));
           // this for mrv2 support; mr1 ignores this
           conf.set("mapreduce.framework.name", "yarn");
      @@ -2943,18 +2725,15 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
           if (historyAddress != null) {
             conf.set("mapreduce.jobhistory.address", historyAddress);
           }
      -    String schedulerAddress =
      -      jobConf.get("yarn.resourcemanager.scheduler.address");
      +    String schedulerAddress = jobConf.get("yarn.resourcemanager.scheduler.address");
           if (schedulerAddress != null) {
             conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
           }
      -    String mrJobHistoryWebappAddress =
      -      jobConf.get("mapreduce.jobhistory.webapp.address");
      +    String mrJobHistoryWebappAddress = jobConf.get("mapreduce.jobhistory.webapp.address");
           if (mrJobHistoryWebappAddress != null) {
             conf.set("mapreduce.jobhistory.webapp.address", mrJobHistoryWebappAddress);
           }
      -    String yarnRMWebappAddress =
      -      jobConf.get("yarn.resourcemanager.webapp.address");
      +    String yarnRMWebappAddress = jobConf.get("yarn.resourcemanager.webapp.address");
           if (yarnRMWebappAddress != null) {
             conf.set("yarn.resourcemanager.webapp.address", yarnRMWebappAddress);
           }
      @@ -2978,15 +2757,15 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
          * Create a stubbed out RegionServerService, mainly for getting FS.
          */
         public RegionServerServices createMockRegionServerService() throws IOException {
      -    return createMockRegionServerService((ServerName)null);
      +    return createMockRegionServerService((ServerName) null);
         }
       
         /**
      -   * Create a stubbed out RegionServerService, mainly for getting FS.
      -   * This version is used by TestTokenAuthentication
      +   * Create a stubbed out RegionServerService, mainly for getting FS. This version is used by
      +   * TestTokenAuthentication
          */
      -  public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws
      -      IOException {
      +  public RegionServerServices createMockRegionServerService(RpcServerInterface rpc)
      +    throws IOException {
           final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
           rss.setFileSystem(getTestFileSystem());
           rss.setRpcServer(rpc);
      @@ -2994,8 +2773,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Create a stubbed out RegionServerService, mainly for getting FS.
      -   * This version is used by TestOpenRegionHandler
      +   * Create a stubbed out RegionServerService, mainly for getting FS. This version is used by
      +   * TestOpenRegionHandler
          */
         public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
           final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
      @@ -3016,8 +2795,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         /**
      -   * Expire the Master's session
      -   * @throws Exception
      +   * Expire the Master's session n
          */
         public void expireMasterSession() throws Exception {
           HMaster master = getMiniHBaseCluster().getMaster();
      @@ -3046,31 +2824,25 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
       
         private void decrementMinRegionServerCount(Configuration conf) {
      -    int currentCount = conf.getInt(
      -        ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
      +    int currentCount = conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
           if (currentCount != -1) {
      -      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
      -          Math.max(currentCount - 1, 1));
      +      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, Math.max(currentCount - 1, 1));
           }
         }
       
         public void expireSession(ZKWatcher nodeZK) throws Exception {
      -   expireSession(nodeZK, false);
      +    expireSession(nodeZK, false);
         }
       
         /**
          * Expire a ZooKeeper session as recommended in ZooKeeper documentation
      -   * http://hbase.apache.org/book.html#trouble.zookeeper
      -   * There are issues when doing this:
      -   * [1] http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
      -   * [2] https://issues.apache.org/jira/browse/ZOOKEEPER-1105
      -   *
      -   * @param nodeZK - the ZK watcher to expire
      -   * @param checkStatus - true to check if we can create a Table with the
      -   *                    current configuration.
      +   * http://hbase.apache.org/book.html#trouble.zookeeper There are issues when doing this: [1]
      +   * http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html [2]
      +   * https://issues.apache.org/jira/browse/ZOOKEEPER-1105
      +   * @param nodeZK      - the ZK watcher to expire
      +   * @param checkStatus - true to check if we can create a Table with the current configuration.
          */
      -  public void expireSession(ZKWatcher nodeZK, boolean checkStatus)
      -    throws Exception {
      +  public void expireSession(ZKWatcher nodeZK, boolean checkStatus) throws Exception {
           Configuration c = new Configuration(this.conf);
           String quorumServers = ZKConfig.getZKQuorumServersString(c);
           ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
      @@ -3078,30 +2850,30 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
           long sessionID = zk.getSessionId();
       
           // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
      -    //  so we create a first watcher to be sure that the
      -    //  event was sent. We expect that if our watcher receives the event
      -    //  other watchers on the same machine will get is as well.
      +    // so we create a first watcher to be sure that the
      +    // event was sent. We expect that if our watcher receives the event
      +    // other watchers on the same machine will get is as well.
           // When we ask to close the connection, ZK does not close it before
      -    //  we receive all the events, so don't have to capture the event, just
      -    //  closing the connection should be enough.
      -    ZooKeeper monitor = new ZooKeeper(quorumServers,
      -      1000, new org.apache.zookeeper.Watcher(){
      +    // we receive all the events, so don't have to capture the event, just
      +    // closing the connection should be enough.
      +    ZooKeeper monitor = new ZooKeeper(quorumServers, 1000, new org.apache.zookeeper.Watcher() {
             @Override
             public void process(WatchedEvent watchedEvent) {
      -        LOG.info("Monitor ZKW received event="+watchedEvent);
      +        LOG.info("Monitor ZKW received event=" + watchedEvent);
             }
      -    } , sessionID, password);
      +    }, sessionID, password);
       
           // Making it expire
      -    ZooKeeper newZK = new ZooKeeper(quorumServers,
      -        1000, EmptyWatcher.instance, sessionID, password);
      +    ZooKeeper newZK =
      +      new ZooKeeper(quorumServers, 1000, EmptyWatcher.instance, sessionID, password);
       
      -    //ensure that we have connection to the server before closing down, otherwise
      -    //the close session event will be eaten out before we start CONNECTING state
      +    // ensure that we have connection to the server before closing down, otherwise
      +    // the close session event will be eaten out before we start CONNECTING state
           long start = EnvironmentEdgeManager.currentTime();
      -    while (newZK.getState() != States.CONNECTED
      -         && EnvironmentEdgeManager.currentTime() - start < 1000) {
      -       Thread.sleep(1);
      +    while (
      +      newZK.getState() != States.CONNECTED && EnvironmentEdgeManager.currentTime() - start < 1000
      +    ) {
      +      Thread.sleep(1);
           }
           newZK.close();
           LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
      @@ -3116,7 +2888,6 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
       
         /**
          * Get the Mini HBase cluster.
      -   *
          * @return hbase cluster
          * @see #getHBaseClusterInterface()
          */
      @@ -3126,26 +2897,26 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
       
         /**
          * Returns the HBaseCluster instance.
      -   * 

      Returned object can be any of the subclasses of HBaseCluster, and the - * tests referring this should not assume that the cluster is a mini cluster or a - * distributed one. If the test only works on a mini cluster, then specific - * method {@link #getMiniHBaseCluster()} can be used instead w/o the - * need to type-cast. + *

      + * Returned object can be any of the subclasses of HBaseCluster, and the tests referring this + * should not assume that the cluster is a mini cluster or a distributed one. If the test only + * works on a mini cluster, then specific method {@link #getMiniHBaseCluster()} can be used + * instead w/o the need to type-cast. */ public HBaseCluster getHBaseClusterInterface() { - //implementation note: we should rename this method as #getHBaseCluster(), - //but this would require refactoring 90+ calls. + // implementation note: we should rename this method as #getHBaseCluster(), + // but this would require refactoring 90+ calls. return hbaseCluster; } /** * Resets the connections so that the next time getConnection() is called, a new connection is * created. This is needed in cases where the entire cluster / all the masters are shutdown and - * the connection is not valid anymore. - * TODO: There should be a more coherent way of doing this. Unfortunately the way tests are - * written, not all start() stop() calls go through this class. Most tests directly operate on - * the underlying mini/local hbase cluster. That makes it difficult for this wrapper class to - * maintain the connection state automatically. Cleaning this is a much bigger refactor. + * the connection is not valid anymore. TODO: There should be a more coherent way of doing this. + * Unfortunately the way tests are written, not all start() stop() calls go through this class. + * Most tests directly operate on the underlying mini/local hbase cluster. That makes it difficult + * for this wrapper class to maintain the connection state automatically. Cleaning this is a much + * bigger refactor. */ public void invalidateConnection() throws IOException { closeConnection(); @@ -3153,16 +2924,14 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { final String masterConfigBefore = conf.get(HConstants.MASTER_ADDRS_KEY); final String masterConfAfter = getMiniHBaseCluster().conf.get(HConstants.MASTER_ADDRS_KEY); LOG.info("Invalidated connection. Updating master addresses before: {} after: {}", - masterConfigBefore, masterConfAfter); + masterConfigBefore, masterConfAfter); conf.set(HConstants.MASTER_ADDRS_KEY, - getMiniHBaseCluster().conf.get(HConstants.MASTER_ADDRS_KEY)); + getMiniHBaseCluster().conf.get(HConstants.MASTER_ADDRS_KEY)); } /** - * Get a shared Connection to the cluster. - * this method is threadsafe. - * @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster. - * @throws IOException + * Get a shared Connection to the cluster. this method is threadsafe. + * @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster. n */ public Connection getConnection() throws IOException { try { @@ -3170,7 +2939,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { if (connection == null) { try { connection = ConnectionFactory.createConnection(this.conf); - } catch(IOException ioe) { + } catch (IOException ioe) { throw new UncheckedIOException("Failed to create connection", ioe); } } @@ -3182,19 +2951,16 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Returns a Admin instance. - * This instance is shared between HBaseTestingUtility instance users. Closing it has no effect, - * it will be closed automatically when the cluster shutdowns - * + * Returns a Admin instance. This instance is shared between HBaseTestingUtility instance users. + * Closing it has no effect, it will be closed automatically when the cluster shutdowns * @return HBaseAdmin instance which is guaranteed to support only {@link Admin} interface. - * Functions in HBaseAdmin not provided by {@link Admin} interface can be changed/deleted - * anytime. + * Functions in HBaseAdmin not provided by {@link Admin} interface can be changed/deleted + * anytime. * @deprecated Since 2.0. Will be removed in 3.0. Use {@link #getAdmin()} instead. */ @Deprecated - public synchronized HBaseAdmin getHBaseAdmin() - throws IOException { - if (hbaseAdmin == null){ + public synchronized HBaseAdmin getHBaseAdmin() throws IOException { + if (hbaseAdmin == null) { this.hbaseAdmin = (HBaseAdmin) getConnection().getAdmin(); } return hbaseAdmin; @@ -3212,11 +2978,11 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Returns an Admin instance which is shared between HBaseTestingUtility instance users. - * Closing it has no effect, it will be closed automatically when the cluster shutdowns + * Returns an Admin instance which is shared between HBaseTestingUtility instance users. Closing + * it has no effect, it will be closed automatically when the cluster shutdowns */ public synchronized Admin getAdmin() throws IOException { - if (hbaseAdmin == null){ + if (hbaseAdmin == null) { this.hbaseAdmin = (HBaseAdmin) getConnection().getAdmin(); } return hbaseAdmin; @@ -3233,8 +2999,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { /** * Unassign the named region. - * - * @param regionName The region to unassign. + * @param regionName The region to unassign. */ public void unassignRegion(String regionName) throws IOException { unassignRegion(Bytes.toBytes(regionName)); @@ -3242,8 +3007,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { /** * Unassign the named region. - * - * @param regionName The region to unassign. + * @param regionName The region to unassign. */ public void unassignRegion(byte[] regionName) throws IOException { getAdmin().unassign(regionName, true); @@ -3251,9 +3015,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { /** * Closes the region containing the given row. - * - * @param row The row to find the containing region. - * @param table The table to find the region. + * @param row The row to find the containing region. + * @param table The table to find the region. */ public void unassignRegionByRow(String row, RegionLocator table) throws IOException { unassignRegionByRow(Bytes.toBytes(row), table); @@ -3261,10 +3024,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { /** * Closes the region containing the given row. - * - * @param row The row to find the containing region. - * @param table The table to find the region. - * @throws IOException + * @param row The row to find the containing region. + * @param table The table to find the region. n */ public void unassignRegionByRow(byte[] row, RegionLocator table) throws IOException { HRegionLocation hrl = table.getRegionLocation(row); @@ -3273,7 +3034,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { /** * Retrieves a splittable region randomly from tableName - * @param tableName name of table + * @param tableName name of table * @param maxAttempts maximum number of attempts, unlimited for value of -1 * @return the HRegion chosen, null if none was found within limit of maxAttempts */ @@ -3291,7 +3052,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } regCount = regions.size(); // There are chances that before we get the region for the table from an RS the region may - // be going for CLOSE. This may be because online schema change is enabled + // be going for CLOSE. This may be because online schema change is enabled if (regCount > 0) { idx = ThreadLocalRandom.current().nextInt(regCount); // if we have just tried this region, there is no need to try again @@ -3319,14 +3080,14 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { /** * Set the MiniDFSCluster - * @param cluster cluster to use - * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before - * it is set. + * @param cluster cluster to use + * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before it + * is set. * @throws IllegalStateException if the passed cluster is up when it is required to be down - * @throws IOException if the FileSystem could not be set from the passed dfs cluster + * @throws IOException if the FileSystem could not be set from the passed dfs cluster */ public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown) - throws IllegalStateException, IOException { + throws IllegalStateException, IOException { if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) { throw new IllegalStateException("DFSCluster is already running! Shut it down first."); } @@ -3339,56 +3100,47 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Wait until all regions in a table have been assigned. Waits default timeout before giving up + * Wait until all regions in a table have been assigned. Waits default timeout before giving up * (30 seconds). - * @param table Table to wait on. - * @throws InterruptedException - * @throws IOException + * @param table Table to wait on. nn */ - public void waitTableAvailable(TableName table) - throws InterruptedException, IOException { + public void waitTableAvailable(TableName table) throws InterruptedException, IOException { waitTableAvailable(table.getName(), 30000); } public void waitTableAvailable(TableName table, long timeoutMillis) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitFor(timeoutMillis, predicateTableAvailable(table)); } /** * Wait until all regions in a table have been assigned - * @param table Table to wait on. - * @param timeoutMillis Timeout. - * @throws InterruptedException - * @throws IOException + * @param table Table to wait on. + * @param timeoutMillis Timeout. nn */ public void waitTableAvailable(byte[] table, long timeoutMillis) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitFor(timeoutMillis, predicateTableAvailable(TableName.valueOf(table))); } public String explainTableAvailability(TableName tableName) throws IOException { String msg = explainTableState(tableName, TableState.State.ENABLED) + ", "; if (getHBaseCluster().getMaster().isAlive()) { - Map assignments = - getHBaseCluster().getMaster().getAssignmentManager().getRegionStates() - .getRegionAssignments(); + Map assignments = getHBaseCluster().getMaster().getAssignmentManager() + .getRegionStates().getRegionAssignments(); final List> metaLocations = - MetaTableAccessor.getTableRegionsAndLocations(getConnection(), tableName); + MetaTableAccessor.getTableRegionsAndLocations(getConnection(), tableName); for (Pair metaLocation : metaLocations) { RegionInfo hri = metaLocation.getFirst(); ServerName sn = metaLocation.getSecond(); if (!assignments.containsKey(hri)) { - msg += ", region " + hri - + " not assigned, but found in meta, it expected to be on " + sn; + msg += ", region " + hri + " not assigned, but found in meta, it expected to be on " + sn; } else if (sn == null) { - msg += ", region " + hri - + " assigned, but has no server in meta"; + msg += ", region " + hri + " assigned, but has no server in meta"; } else if (!sn.equals(assignments.get(hri))) { - msg += ", region " + hri - + " assigned, but has different servers in meta and AM ( " + - sn + " <> " + assignments.get(hri); + msg += ", region " + hri + " assigned, but has different servers in meta and AM ( " + sn + + " <> " + assignments.get(hri); } } } @@ -3396,11 +3148,11 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } public String explainTableState(final TableName table, TableState.State state) - throws IOException { + throws IOException { TableState tableState = MetaTableAccessor.getTableState(getConnection(), table); if (tableState == null) { return "TableState in META: No table state in META for table " + table - + " last state in meta (including deleted is " + findLastTableState(table) + ")"; + + " last state in meta (including deleted is " + findLastTableState(table) + ")"; } else if (!tableState.inStates(state)) { return "TableState in META: Not " + state + " state, but " + tableState; } else { @@ -3414,94 +3166,78 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { @Override public boolean visit(Result r) throws IOException { - if (!Arrays.equals(r.getRow(), table.getName())) - return false; + if (!Arrays.equals(r.getRow(), table.getName())) return false; TableState state = MetaTableAccessor.getTableState(r); - if (state != null) - lastTableState.set(state); + if (state != null) lastTableState.set(state); return true; } }; - MetaTableAccessor - .scanMeta(getConnection(), null, null, - MetaTableAccessor.QueryType.TABLE, - Integer.MAX_VALUE, visitor); + MetaTableAccessor.scanMeta(getConnection(), null, null, MetaTableAccessor.QueryType.TABLE, + Integer.MAX_VALUE, visitor); return lastTableState.get(); } /** - * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the - * regions have been all assigned. Will timeout after default period (30 seconds) - * Tolerates nonexistent table. + * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the regions + * have been all assigned. Will timeout after default period (30 seconds) Tolerates nonexistent + * table. * @param table the table to wait on. * @throws InterruptedException if interrupted while waiting - * @throws IOException if an IO problem is encountered + * @throws IOException if an IO problem is encountered */ - public void waitTableEnabled(TableName table) - throws InterruptedException, IOException { + public void waitTableEnabled(TableName table) throws InterruptedException, IOException { waitTableEnabled(table, 30000); } /** - * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the - * regions have been all assigned. + * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the regions + * have been all assigned. * @see #waitTableEnabled(TableName, long) - * @param table Table to wait on. - * @param timeoutMillis Time to wait on it being marked enabled. - * @throws InterruptedException - * @throws IOException + * @param table Table to wait on. + * @param timeoutMillis Time to wait on it being marked enabled. nn */ public void waitTableEnabled(byte[] table, long timeoutMillis) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitTableEnabled(TableName.valueOf(table), timeoutMillis); } - public void waitTableEnabled(TableName table, long timeoutMillis) - throws IOException { + public void waitTableEnabled(TableName table, long timeoutMillis) throws IOException { waitFor(timeoutMillis, predicateTableEnabled(table)); } /** - * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' - * Will timeout after default period (30 seconds) - * @param table Table to wait on. - * @throws InterruptedException - * @throws IOException + * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' Will timeout + * after default period (30 seconds) + * @param table Table to wait on. nn */ - public void waitTableDisabled(byte[] table) - throws InterruptedException, IOException { + public void waitTableDisabled(byte[] table) throws InterruptedException, IOException { waitTableDisabled(table, 30000); } public void waitTableDisabled(TableName table, long millisTimeout) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitFor(millisTimeout, predicateTableDisabled(table)); } /** - * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' - * @param table Table to wait on. - * @param timeoutMillis Time to wait on it being marked disabled. - * @throws InterruptedException - * @throws IOException + * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' + * @param table Table to wait on. + * @param timeoutMillis Time to wait on it being marked disabled. nn */ public void waitTableDisabled(byte[] table, long timeoutMillis) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitTableDisabled(TableName.valueOf(table), timeoutMillis); } /** - * Make sure that at least the specified number of region servers - * are running + * Make sure that at least the specified number of region servers are running * @param num minimum number of region servers that should be running - * @return true if we started some servers - * @throws IOException + * @return true if we started some servers n */ - public boolean ensureSomeRegionServersAvailable(final int num) - throws IOException { + public boolean ensureSomeRegionServersAvailable(final int num) throws IOException { boolean startedServer = false; MiniHBaseCluster hbaseCluster = getMiniHBaseCluster(); - for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i getAllOnlineRegions(MiniHBaseCluster cluster) - throws IOException { + throws IOException { NavigableSet online = new TreeSet<>(); for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) { try { - for (RegionInfo region : - ProtobufUtil.getOnlineRegions(rst.getRegionServer().getRSRpcServices())) { + for (RegionInfo region : ProtobufUtil + .getOnlineRegions(rst.getRegionServer().getRSRpcServices())) { online.add(region.getRegionNameAsString()); } } catch (RegionServerStoppedException e) { @@ -3581,8 +3306,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } for (MasterThread mt : cluster.getLiveMasterThreads()) { try { - for (RegionInfo region : - ProtobufUtil.getOnlineRegions(mt.getMaster().getRSRpcServices())) { + for (RegionInfo region : ProtobufUtil.getOnlineRegions(mt.getMaster().getRSRpcServices())) { online.add(region.getRegionNameAsString()); } } catch (RegionServerStoppedException e) { @@ -3595,26 +3319,22 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Set maxRecoveryErrorCount in DFSClient. In 0.20 pre-append its hard-coded to 5 and - * makes tests linger. Here is the exception you'll see: + * Set maxRecoveryErrorCount in DFSClient. In 0.20 pre-append its hard-coded to 5 and makes tests + * linger. Here is the exception you'll see: + * *

          * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/wal.1276627923013 block
          * blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block
          * blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683
          * failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
          * 
      - * @param stream A DFSClient.DFSOutputStream. - * @param max - * @throws NoSuchFieldException - * @throws SecurityException - * @throws IllegalAccessException - * @throws IllegalArgumentException + * + * @param stream A DFSClient.DFSOutputStream. nnnnn */ - public static void setMaxRecoveryErrorCount(final OutputStream stream, - final int max) { + public static void setMaxRecoveryErrorCount(final OutputStream stream, final int max) { try { - Class [] clazzes = DFSClient.class.getDeclaredClasses(); - for (Class clazz: clazzes) { + Class[] clazzes = DFSClient.class.getDeclaredClasses(); + for (Class clazz : clazzes) { String className = clazz.getSimpleName(); if (className.equals("DFSOutputStream")) { if (clazz.isInstance(stream)) { @@ -3637,7 +3357,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { * @return true if the region is assigned false otherwise. */ public boolean assignRegion(final RegionInfo regionInfo) - throws IOException, InterruptedException { + throws IOException, InterruptedException { final AssignmentManager am = getHBaseCluster().getMaster().getAssignmentManager(); am.assign(regionInfo); return AssignmentTestingUtil.waitForAssignment(am, regionInfo); @@ -3645,20 +3365,17 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { /** * Move region to destination server and wait till region is completely moved and online - * * @param destRegion region to move - * @param destServer destination server of the region - * @throws InterruptedException - * @throws IOException + * @param destServer destination server of the region nn */ public void moveRegionAndWait(RegionInfo destRegion, ServerName destServer) - throws InterruptedException, IOException { + throws InterruptedException, IOException { HMaster master = getMiniHBaseCluster().getMaster(); // TODO: Here we start the move. The move can take a while. getAdmin().move(destRegion.getEncodedNameAsBytes(), destServer); while (true) { - ServerName serverName = master.getAssignmentManager().getRegionStates() - .getRegionServerOfRegion(destRegion); + ServerName serverName = + master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(destRegion); if (serverName != null && serverName.equals(destServer)) { assertRegionOnServer(destRegion, serverName, 2000); break; @@ -3668,13 +3385,10 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Wait until all regions for a table in hbase:meta have a non-empty - * info:server, up to a configuable timeout value (default is 60 seconds) - * This means all regions have been deployed, - * master has been informed and updated hbase:meta with the regions deployed - * server. - * @param tableName the table name - * @throws IOException + * Wait until all regions for a table in hbase:meta have a non-empty info:server, up to a + * configuable timeout value (default is 60 seconds) This means all regions have been deployed, + * master has been informed and updated hbase:meta with the regions deployed server. + * @param tableName the table name n */ public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException { waitUntilAllRegionsAssigned(tableName, @@ -3682,8 +3396,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Waith until all system table's regions get assigned - * @throws IOException + * Waith until all system table's regions get assigned n */ public void waitUntilAllSystemRegionsAssigned() throws IOException { waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); @@ -3691,20 +3404,18 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Wait until all regions for a table in hbase:meta have a non-empty - * info:server, or until timeout. This means all regions have been deployed, - * master has been informed and updated hbase:meta with the regions deployed - * server. + * Wait until all regions for a table in hbase:meta have a non-empty info:server, or until + * timeout. This means all regions have been deployed, master has been informed and updated + * hbase:meta with the regions deployed server. * @param tableName the table name - * @param timeout timeout, in milliseconds - * @throws IOException + * @param timeout timeout, in milliseconds n */ public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout) - throws IOException { + throws IOException { if (!TableName.isMetaTableName(tableName)) { try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) { - LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " + - timeout + "ms"); + LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " + + timeout + "ms"); waitFor(timeout, 200, true, new ExplainingPredicate() { @Override public String explainFailure() throws IOException { @@ -3726,17 +3437,19 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { // (for fault tolerance testing). tableFound = true; byte[] server = - r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); + r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); if (server == null) { return false; } else { byte[] startCode = - r.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); + r.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); ServerName serverName = - ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," + - Bytes.toLong(startCode)); - if (!getHBaseClusterInterface().isDistributedCluster() && - getHBaseCluster().isKilledRS(serverName)) { + ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," + + Bytes.toLong(startCode)); + if ( + !getHBaseClusterInterface().isDistributedCluster() + && getHBaseCluster().isKilledRS(serverName) + ) { return false; } } @@ -3747,7 +3460,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } } if (!tableFound) { - LOG.warn("Didn't find the entries for table " + tableName + " in meta, already deleted?"); + LOG.warn( + "Didn't find the entries for table " + tableName + " in meta, already deleted?"); } return tableFound; } @@ -3778,17 +3492,16 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Do a small get/scan against one store. This is required because store - * has no actual methods of querying itself, and relies on StoreScanner. + * Do a small get/scan against one store. This is required because store has no actual methods of + * querying itself, and relies on StoreScanner. */ - public static List getFromStoreFile(HStore store, - Get get) throws IOException { + public static List getFromStoreFile(HStore store, Get get) throws IOException { Scan scan = new Scan(get); InternalScanner scanner = (InternalScanner) store.getScanner(scan, - scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()), - // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set - // readpoint 0. - 0); + scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()), + // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set + // readpoint 0. + 0); List result = new ArrayList<>(); scanner.next(result); @@ -3804,48 +3517,42 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Create region split keys between startkey and endKey - * - * @param startKey - * @param endKey - * @param numRegions the number of regions to be created. it has to be greater than 3. + * Create region split keys between startkey and endKey nn * @param numRegions the number of + * regions to be created. it has to be greater than 3. * @return resulting split keys */ - public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){ - assertTrue(numRegions>3); - byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3); - byte [][] result = new byte[tmpSplitKeys.length+1][]; + public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions) { + assertTrue(numRegions > 3); + byte[][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3); + byte[][] result = new byte[tmpSplitKeys.length + 1][]; System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length); result[0] = HConstants.EMPTY_BYTE_ARRAY; return result; } /** - * Do a small get/scan against one store. This is required because store - * has no actual methods of querying itself, and relies on StoreScanner. + * Do a small get/scan against one store. This is required because store has no actual methods of + * querying itself, and relies on StoreScanner. */ - public static List getFromStoreFile(HStore store, - byte [] row, - NavigableSet columns - ) throws IOException { + public static List getFromStoreFile(HStore store, byte[] row, NavigableSet columns) + throws IOException { Get get = new Get(row); Map> s = get.getFamilyMap(); s.put(store.getColumnFamilyDescriptor().getName(), columns); - return getFromStoreFile(store,get); + return getFromStoreFile(store, get); } - public static void assertKVListsEqual(String additionalMsg, - final List expected, - final List actual) { + public static void assertKVListsEqual(String additionalMsg, final List expected, + final List actual) { final int eLen = expected.size(); final int aLen = actual.size(); final int minLen = Math.min(eLen, aLen); int i; for (i = 0; i < minLen - && CellComparator.getInstance().compare(expected.get(i), actual.get(i)) == 0; - ++i) {} + && CellComparator.getInstance().compare(expected.get(i), actual.get(i)) == 0; ++i) { + } if (additionalMsg == null) { additionalMsg = ""; @@ -3855,10 +3562,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } if (eLen != aLen || i != minLen) { - throw new AssertionError( - "Expected and actual KV arrays differ at position " + i + ": " + - safeGetAsStr(expected, i) + " (length " + eLen +") vs. " + - safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg); + throw new AssertionError("Expected and actual KV arrays differ at position " + i + ": " + + safeGetAsStr(expected, i) + " (length " + eLen + ") vs. " + safeGetAsStr(actual, i) + + " (length " + aLen + ")" + additionalMsg); } } @@ -3871,26 +3577,19 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } public String getClusterKey() { - return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" - + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":" - + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, - HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + + ":" + + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); } /** Creates a random table with the given parameters */ - public Table createRandomTable(TableName tableName, - final Collection families, - final int maxVersions, - final int numColsPerRow, - final int numFlushes, - final int numRegions, - final int numRowsPerFlush) - throws IOException, InterruptedException { + public Table createRandomTable(TableName tableName, final Collection families, + final int maxVersions, final int numColsPerRow, final int numFlushes, final int numRegions, + final int numRowsPerFlush) throws IOException, InterruptedException { - LOG.info("\n\nCreating random table " + tableName + " with " + numRegions + - " regions, " + numFlushes + " storefiles per region, " + - numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions + - "\n"); + LOG.info("\n\nCreating random table " + tableName + " with " + numRegions + " regions, " + + numFlushes + " storefiles per region, " + numRowsPerFlush + " rows per flush, maxVersions=" + + maxVersions + "\n"); final int numCF = families.size(); final byte[][] cfBytes = new byte[numCF][]; @@ -3907,11 +3606,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { final int splitStartKey = actualStartKey + keysPerRegion; final int splitEndKey = actualEndKey - keysPerRegion; final String keyFormat = "%08x"; - final Table table = createTable(tableName, cfBytes, - maxVersions, - Bytes.toBytes(String.format(keyFormat, splitStartKey)), - Bytes.toBytes(String.format(keyFormat, splitEndKey)), - numRegions); + final Table table = createTable(tableName, cfBytes, maxVersions, + Bytes.toBytes(String.format(keyFormat, splitStartKey)), + Bytes.toBytes(String.format(keyFormat, splitEndKey)), numRegions); if (hbaseCluster != null) { getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME); @@ -3922,8 +3619,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { final Random rand = ThreadLocalRandom.current(); for (int iFlush = 0; iFlush < numFlushes; ++iFlush) { for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) { - final byte[] row = Bytes.toBytes(String.format(keyFormat, - actualStartKey + rand.nextInt(actualEndKey - actualStartKey))); + final byte[] row = Bytes.toBytes( + String.format(keyFormat, actualStartKey + rand.nextInt(actualEndKey - actualStartKey))); Put put = new Put(row); Delete del = new Delete(row); @@ -3932,9 +3629,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { final long ts = rand.nextInt(); final byte[] qual = Bytes.toBytes("col" + iCol); if (rand.nextBoolean()) { - final byte[] value = Bytes.toBytes("value_for_row_" + iRow + - "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" + - ts + "_random_" + rand.nextLong()); + final byte[] value = + Bytes.toBytes("value_for_row_" + iRow + "_cf_" + Bytes.toStringBinary(cf) + "_col_" + + iCol + "_ts_" + ts + "_random_" + rand.nextLong()); put.addColumn(cf, qual, ts, value); } else if (rand.nextDouble() < 0.8) { del.addColumn(cf, qual, ts); @@ -3970,8 +3667,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { return "226.1.1." + ThreadLocalRandom.current().nextInt(254); } - public static void waitForHostPort(String host, int port) - throws IOException { + public static void waitForHostPort(String host, int port) throws IOException { final int maxTimeMs = 10000; final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS; IOException savedException = null; @@ -3997,27 +3693,25 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ - public static int createPreSplitLoadTestTable(Configuration conf, - TableName tableName, byte[] columnFamily, Algorithm compression, - DataBlockEncoding dataBlockEncoding) throws IOException { - return createPreSplitLoadTestTable(conf, tableName, - columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1, - Durability.USE_DEFAULT); + public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName, + byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding) + throws IOException { + return createPreSplitLoadTestTable(conf, tableName, columnFamily, compression, + dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1, Durability.USE_DEFAULT); } + /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ - public static int createPreSplitLoadTestTable(Configuration conf, - TableName tableName, byte[] columnFamily, Algorithm compression, - DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication, - Durability durability) - throws IOException { + public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName, + byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding, + int numRegionsPerServer, int regionReplication, Durability durability) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); desc.setDurability(durability); desc.setRegionReplication(regionReplication); @@ -4028,15 +3722,13 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ - public static int createPreSplitLoadTestTable(Configuration conf, - TableName tableName, byte[][] columnFamilies, Algorithm compression, - DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication, - Durability durability) - throws IOException { + public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName, + byte[][] columnFamilies, Algorithm compression, DataBlockEncoding dataBlockEncoding, + int numRegionsPerServer, int regionReplication, Durability durability) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); desc.setDurability(durability); desc.setRegionReplication(regionReplication); @@ -4051,46 +3743,45 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ - public static int createPreSplitLoadTestTable(Configuration conf, - TableDescriptor desc, ColumnFamilyDescriptor hcd) throws IOException { + public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc, + ColumnFamilyDescriptor hcd) throws IOException { return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER); } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ - public static int createPreSplitLoadTestTable(Configuration conf, - TableDescriptor desc, ColumnFamilyDescriptor hcd, int numRegionsPerServer) throws IOException { - return createPreSplitLoadTestTable(conf, desc, new ColumnFamilyDescriptor[] {hcd}, - numRegionsPerServer); + public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc, + ColumnFamilyDescriptor hcd, int numRegionsPerServer) throws IOException { + return createPreSplitLoadTestTable(conf, desc, new ColumnFamilyDescriptor[] { hcd }, + numRegionsPerServer); } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ - public static int createPreSplitLoadTestTable(Configuration conf, - TableDescriptor desc, ColumnFamilyDescriptor[] hcds, - int numRegionsPerServer) throws IOException { - return createPreSplitLoadTestTable(conf, desc, hcds, - new RegionSplitter.HexStringSplit(), numRegionsPerServer); + public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc, + ColumnFamilyDescriptor[] hcds, int numRegionsPerServer) throws IOException { + return createPreSplitLoadTestTable(conf, desc, hcds, new RegionSplitter.HexStringSplit(), + numRegionsPerServer); } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ - public static int createPreSplitLoadTestTable(Configuration conf, - TableDescriptor td, ColumnFamilyDescriptor[] cds, - SplitAlgorithm splitter, int numRegionsPerServer) throws IOException { + public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor td, + ColumnFamilyDescriptor[] cds, SplitAlgorithm splitter, int numRegionsPerServer) + throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(td); for (ColumnFamilyDescriptor cd : cds) { if (!td.hasColumnFamily(cd.getName())) { @@ -4105,27 +3796,25 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { try { // create a table a pre-splits regions. // The number of splits is set as: - // region servers * regions per region server). + // region servers * regions per region server). int numberOfServers = admin.getRegionServers().size(); if (numberOfServers == 0) { throw new IllegalStateException("No live regionservers"); } totalNumberOfRegions = numberOfServers * numRegionsPerServer; - LOG.info("Number of live regionservers: " + numberOfServers + ", " + - "pre-splitting table into " + totalNumberOfRegions + " regions " + - "(regions per server: " + numRegionsPerServer + ")"); + LOG.info("Number of live regionservers: " + numberOfServers + ", " + + "pre-splitting table into " + totalNumberOfRegions + " regions " + "(regions per server: " + + numRegionsPerServer + ")"); - byte[][] splits = splitter.split( - totalNumberOfRegions); + byte[][] splits = splitter.split(totalNumberOfRegions); admin.createTable(td, splits); } catch (MasterNotRunningException e) { LOG.error("Master not running", e); throw new IOException(e); } catch (TableExistsException e) { - LOG.warn("Table " + td.getTableName() + - " already exists, continuing"); + LOG.warn("Table " + td.getTableName() + " already exists, continuing"); } finally { admin.close(); unmanagedConnection.close(); @@ -4140,14 +3829,11 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Due to async racing issue, a region may not be in - * the online region list of a region server yet, after - * the assignment znode is deleted and the new assignment - * is recorded in master. + * Due to async racing issue, a region may not be in the online region list of a region server + * yet, after the assignment znode is deleted and the new assignment is recorded in master. */ - public void assertRegionOnServer( - final RegionInfo hri, final ServerName server, - final long timeout) throws IOException, InterruptedException { + public void assertRegionOnServer(final RegionInfo hri, final ServerName server, + final long timeout) throws IOException, InterruptedException { long timeoutTime = EnvironmentEdgeManager.currentTime() + timeout; while (true) { List regions = getAdmin().getRegions(server); @@ -4156,30 +3842,27 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { if (now > timeoutTime) break; Thread.sleep(10); } - fail("Could not find region " + hri.getRegionNameAsString() - + " on server " + server); + fail("Could not find region " + hri.getRegionNameAsString() + " on server " + server); } /** - * Check to make sure the region is open on the specified - * region server, but not on any other one. + * Check to make sure the region is open on the specified region server, but not on any other one. */ - public void assertRegionOnlyOnServer( - final RegionInfo hri, final ServerName server, - final long timeout) throws IOException, InterruptedException { + public void assertRegionOnlyOnServer(final RegionInfo hri, final ServerName server, + final long timeout) throws IOException, InterruptedException { long timeoutTime = EnvironmentEdgeManager.currentTime() + timeout; while (true) { List regions = getAdmin().getRegions(server); if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) { List rsThreads = getHBaseCluster().getLiveRegionServerThreads(); - for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) { + for (JVMClusterUtil.RegionServerThread rsThread : rsThreads) { HRegionServer rs = rsThread.getRegionServer(); if (server.equals(rs.getServerName())) { continue; } Collection hrs = rs.getOnlineRegionsLocalContext(); - for (HRegion r: hrs) { + for (HRegion r : hrs) { assertTrue("Region should not be double assigned", r.getRegionInfo().getRegionId() != hri.getRegionId()); } @@ -4190,21 +3873,20 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { if (now > timeoutTime) break; Thread.sleep(10); } - fail("Could not find region " + hri.getRegionNameAsString() - + " on server " + server); + fail("Could not find region " + hri.getRegionNameAsString() + " on server " + server); } public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd) throws IOException { TableDescriptor td = - TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build(); RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build(); return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), td); } public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd, - BlockCache blockCache) throws IOException { + BlockCache blockCache) throws IOException { TableDescriptor td = - TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build(); RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build(); return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), td, blockCache); } @@ -4220,8 +3902,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { return new ExplainingPredicate() { @Override public String explainFailure() throws IOException { - final RegionStates regionStates = getMiniHBaseCluster().getMaster() - .getAssignmentManager().getRegionStates(); + final RegionStates regionStates = + getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); return "found in transition: " + regionStates.getRegionsInTransition().toString(); } @@ -4287,10 +3969,10 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { try (Table table = getConnection().getTable(tableName)) { TableDescriptor htd = table.getDescriptor(); for (HRegionLocation loc : getConnection().getRegionLocator(tableName) - .getAllRegionLocations()) { + .getAllRegionLocations()) { Scan scan = new Scan().withStartRow(loc.getRegionInfo().getStartKey()) - .withStopRow(loc.getRegionInfo().getEndKey()).setOneRowLimit() - .setMaxResultsPerColumnFamily(1).setCacheBlocks(false); + .withStopRow(loc.getRegionInfo().getEndKey()).setOneRowLimit() + .setMaxResultsPerColumnFamily(1).setCacheBlocks(false); for (byte[] family : htd.getColumnFamilyNames()) { scan.addFamily(family); } @@ -4307,25 +3989,21 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { /** * Wait until no regions in transition. - * @param timeout How long to wait. - * @throws IOException + * @param timeout How long to wait. n */ public void waitUntilNoRegionsInTransition(final long timeout) throws IOException { waitFor(timeout, predicateNoRegionsInTransition()); } /** - * Wait until no regions in transition. (time limit 15min) - * @throws IOException + * Wait until no regions in transition. (time limit 15min) n */ public void waitUntilNoRegionsInTransition() throws IOException { waitUntilNoRegionsInTransition(15 * 60000); } /** - * Wait until labels is ready in VisibilityLabelsCache. - * @param timeoutMillis - * @param labels + * Wait until labels is ready in VisibilityLabelsCache. nn */ public void waitLabelAvailable(long timeoutMillis, final String... labels) { final VisibilityLabelsCache labelsCache = VisibilityLabelsCache.get(); @@ -4354,8 +4032,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Create a set of column descriptors with the combination of compression, - * encoding, bloom codecs available. + * Create a set of column descriptors with the combination of compression, encoding, bloom codecs + * available. * @return the list of column descriptors */ public static List generateColumnDescriptors() { @@ -4363,17 +4041,17 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Create a set of column descriptors with the combination of compression, - * encoding, bloom codecs available. + * Create a set of column descriptors with the combination of compression, encoding, bloom codecs + * available. * @param prefix family names prefix * @return the list of column descriptors */ public static List generateColumnDescriptors(final String prefix) { List htds = new ArrayList<>(); long familyId = 0; - for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) { - for (DataBlockEncoding encodingType: DataBlockEncoding.values()) { - for (BloomType bloomType: BloomType.values()) { + for (Compression.Algorithm compressionType : getSupportedCompressionAlgorithms()) { + for (DataBlockEncoding encodingType : DataBlockEncoding.values()) { + for (BloomType bloomType : BloomType.values()) { String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId); HColumnDescriptor htd = new HColumnDescriptor(name); htd.setCompressionType(compressionType); @@ -4431,10 +4109,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } /** - * Sets up {@link MiniKdc} for testing security. - * Uses {@link HBaseKerberosUtils} to set the given keytab file as - * {@link HBaseKerberosUtils#KRB_KEYTAB_FILE}. - * FYI, there is also the easier-to-use kerby KDC server and utility for using it, + * Sets up {@link MiniKdc} for testing security. Uses {@link HBaseKerberosUtils} to set the given + * keytab file as {@link HBaseKerberosUtils#KRB_KEYTAB_FILE}. FYI, there is also the easier-to-use + * kerby KDC server and utility for using it, * {@link org.apache.hadoop.hbase.util.SimpleKdcServerUtil}. The kerby KDC server is preferred; * less baggage. It came in in HBASE-5291. */ @@ -4454,7 +4131,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { kdc = new MiniKdc(conf, dir); kdc.start(); } catch (BindException e) { - FileUtils.deleteDirectory(dir); // clean directory + FileUtils.deleteDirectory(dir); // clean directory numTries++; if (numTries == 3) { LOG.error("Failed setting up MiniKDC. Tried " + numTries + " times."); @@ -4471,14 +4148,13 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { public int getNumHFiles(final TableName tableName, final byte[] family) { int numHFiles = 0; for (RegionServerThread regionServerThread : getMiniHBaseCluster().getRegionServerThreads()) { - numHFiles+= getNumHFilesForRS(regionServerThread.getRegionServer(), tableName, - family); + numHFiles += getNumHFilesForRS(regionServerThread.getRegionServer(), tableName, family); } return numHFiles; } public int getNumHFilesForRS(final HRegionServer rs, final TableName tableName, - final byte[] family) { + final byte[] family) { int numHFiles = 0; for (Region region : rs.getRegions(tableName)) { numHFiles += region.getStore(family).getStorefilesCount(); @@ -4491,10 +4167,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { Collection ltdFamilies = Arrays.asList(ltd.getColumnFamilies()); Collection rtdFamilies = Arrays.asList(rtd.getColumnFamilies()); assertEquals(ltdFamilies.size(), rtdFamilies.size()); - for (Iterator it = ltdFamilies.iterator(), it2 = - rtdFamilies.iterator(); it.hasNext();) { - assertEquals(0, - ColumnFamilyDescriptor.COMPARATOR.compare(it.next(), it2.next())); + for (Iterator it = ltdFamilies.iterator(), + it2 = rtdFamilies.iterator(); it.hasNext();) { + assertEquals(0, ColumnFamilyDescriptor.COMPARATOR.compare(it.next(), it2.next())); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java index a12a83389de..09993645ac6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,29 +19,28 @@ package org.apache.hadoop.hbase; import java.io.IOException; import java.util.concurrent.ThreadLocalRandom; - import org.apache.commons.math3.random.RandomData; import org.apache.commons.math3.random.RandomDataImpl; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.crypto.CryptoCipherProvider; import org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting; import org.apache.hadoop.hbase.io.crypto.aes.AES; -import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileScanner; +import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This class runs performance benchmarks for {@link HFile}. @@ -53,19 +51,19 @@ public class HFilePerformanceEvaluation { private static final int ROW_COUNT = 1000000; private static final int RFILE_BLOCKSIZE = 8 * 1024; private static StringBuilder testSummary = new StringBuilder(); - + // Disable verbose INFO logging from org.apache.hadoop.io.compress.CodecPool static { - System.setProperty("org.apache.commons.logging.Log", + System.setProperty("org.apache.commons.logging.Log", "org.apache.commons.logging.impl.SimpleLog"); - System.setProperty("org.apache.commons.logging.simplelog.log.org.apache.hadoop.io.compress.CodecPool", - "WARN"); + System.setProperty( + "org.apache.commons.logging.simplelog.log.org.apache.hadoop.io.compress.CodecPool", "WARN"); } - + private static final Logger LOG = LoggerFactory.getLogger(HFilePerformanceEvaluation.class.getName()); - static byte [] format(final int i) { + static byte[] format(final int i) { String v = Integer.toString(i); return Bytes.toBytes("0000000000".substring(v.length()) + v); } @@ -80,38 +78,37 @@ public class HFilePerformanceEvaluation { } /** - * HFile is Cell-based. It used to be byte arrays. Doing this test, pass Cells. All Cells + * HFile is Cell-based. It used to be byte arrays. Doing this test, pass Cells. All Cells * intentionally have same coordinates in all fields but row. - * @param i Integer to format as a row Key. + * @param i Integer to format as a row Key. * @param value Value to use * @return Created Cell. */ - static Cell createCell(final int i, final byte [] value) { + static Cell createCell(final int i, final byte[] value) { return createCell(format(i), value); } - static Cell createCell(final byte [] keyRow) { + static Cell createCell(final byte[] keyRow) { return CellUtil.createCell(keyRow); } - static Cell createCell(final byte [] keyRow, final byte [] value) { + static Cell createCell(final byte[] keyRow, final byte[] value) { return CellUtil.createCell(keyRow, value); } /** - * Add any supported codec or cipher to test the HFile read/write performance. - * Specify "none" to disable codec or cipher or both. - * @throws Exception + * Add any supported codec or cipher to test the HFile read/write performance. Specify "none" to + * disable codec or cipher or both. n */ private void runBenchmarks() throws Exception { final Configuration conf = new Configuration(); final FileSystem fs = FileSystem.get(conf); final Path mf = fs.makeQualified(new Path("performanceevaluation.mapfile")); - + // codec=none cipher=none runWriteBenchmark(conf, fs, mf, "none", "none"); runReadBenchmark(conf, fs, mf, "none", "none"); - + // codec=gz cipher=none runWriteBenchmark(conf, fs, mf, "gz", "none"); runReadBenchmark(conf, fs, mf, "gz", "none"); @@ -167,105 +164,95 @@ public class HFilePerformanceEvaluation { } /** - * Write a test HFile with the given codec & cipher - * @param conf - * @param fs - * @param mf - * @param codec "none", "lzo", "gz", "snappy" - * @param cipher "none", "aes" - * @throws Exception + * Write a test HFile with the given codec & cipher nnn * @param codec "none", "lzo", "gz", + * "snappy" + * @param cipher "none", "aes" n */ private void runWriteBenchmark(Configuration conf, FileSystem fs, Path mf, String codec, - String cipher) throws Exception { + String cipher) throws Exception { if (fs.exists(mf)) { fs.delete(mf, true); } - runBenchmark(new SequentialWriteBenchmark(conf, fs, mf, ROW_COUNT, codec, cipher), - ROW_COUNT, codec, getCipherName(conf, cipher)); + runBenchmark(new SequentialWriteBenchmark(conf, fs, mf, ROW_COUNT, codec, cipher), ROW_COUNT, + codec, getCipherName(conf, cipher)); } /** - * Run all the read benchmarks for the test HFile - * @param conf - * @param fs - * @param mf - * @param codec "none", "lzo", "gz", "snappy" + * Run all the read benchmarks for the test HFile nnn * @param codec "none", "lzo", "gz", "snappy" * @param cipher "none", "aes" */ private void runReadBenchmark(final Configuration conf, final FileSystem fs, final Path mf, - final String codec, final String cipher) { + final String codec, final String cipher) { PerformanceEvaluationCommons.concurrentReads(new Runnable() { @Override public void run() { try { - runBenchmark(new UniformRandomSmallScan(conf, fs, mf, ROW_COUNT), - ROW_COUNT, codec, getCipherName(conf, cipher)); + runBenchmark(new UniformRandomSmallScan(conf, fs, mf, ROW_COUNT), ROW_COUNT, codec, + getCipherName(conf, cipher)); } catch (Exception e) { testSummary.append("UniformRandomSmallScan failed " + e.getMessage()); e.printStackTrace(); } } }); - + PerformanceEvaluationCommons.concurrentReads(new Runnable() { @Override public void run() { try { - runBenchmark(new UniformRandomReadBenchmark(conf, fs, mf, ROW_COUNT), - ROW_COUNT, codec, getCipherName(conf, cipher)); + runBenchmark(new UniformRandomReadBenchmark(conf, fs, mf, ROW_COUNT), ROW_COUNT, codec, + getCipherName(conf, cipher)); } catch (Exception e) { testSummary.append("UniformRandomReadBenchmark failed " + e.getMessage()); e.printStackTrace(); } } }); - + PerformanceEvaluationCommons.concurrentReads(new Runnable() { @Override public void run() { try { - runBenchmark(new GaussianRandomReadBenchmark(conf, fs, mf, ROW_COUNT), - ROW_COUNT, codec, getCipherName(conf, cipher)); + runBenchmark(new GaussianRandomReadBenchmark(conf, fs, mf, ROW_COUNT), ROW_COUNT, codec, + getCipherName(conf, cipher)); } catch (Exception e) { testSummary.append("GaussianRandomReadBenchmark failed " + e.getMessage()); e.printStackTrace(); } } }); - + PerformanceEvaluationCommons.concurrentReads(new Runnable() { @Override public void run() { try { - runBenchmark(new SequentialReadBenchmark(conf, fs, mf, ROW_COUNT), - ROW_COUNT, codec, getCipherName(conf, cipher)); + runBenchmark(new SequentialReadBenchmark(conf, fs, mf, ROW_COUNT), ROW_COUNT, codec, + getCipherName(conf, cipher)); } catch (Exception e) { testSummary.append("SequentialReadBenchmark failed " + e.getMessage()); e.printStackTrace(); } } - }); + }); } - - protected void runBenchmark(RowOrientedBenchmark benchmark, int rowCount, - String codec, String cipher) throws Exception { - LOG.info("Running " + benchmark.getClass().getSimpleName() + " with codec[" + - codec + "] " + "cipher[" + cipher + "] for " + rowCount + " rows."); - + + protected void runBenchmark(RowOrientedBenchmark benchmark, int rowCount, String codec, + String cipher) throws Exception { + LOG.info("Running " + benchmark.getClass().getSimpleName() + " with codec[" + codec + "] " + + "cipher[" + cipher + "] for " + rowCount + " rows."); + long elapsedTime = benchmark.run(); - - LOG.info("Running " + benchmark.getClass().getSimpleName() + " with codec[" + - codec + "] " + "cipher[" + cipher + "] for " + rowCount + " rows took " + - elapsedTime + "ms."); - + + LOG.info("Running " + benchmark.getClass().getSimpleName() + " with codec[" + codec + "] " + + "cipher[" + cipher + "] for " + rowCount + " rows took " + elapsedTime + "ms."); + // Store results to print summary at the end testSummary.append("Running ").append(benchmark.getClass().getSimpleName()) - .append(" with codec[").append(codec).append("] cipher[").append(cipher) - .append("] for ").append(rowCount).append(" rows took ").append(elapsedTime) - .append("ms.").append("\n"); + .append(" with codec[").append(codec).append("] cipher[").append(cipher).append("] for ") + .append(rowCount).append(" rows took ").append(elapsedTime).append("ms.").append("\n"); } static abstract class RowOrientedBenchmark { @@ -277,8 +264,8 @@ public class HFilePerformanceEvaluation { protected String codec = "none"; protected String cipher = "none"; - public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, - int totalRows, String codec, String cipher) { + public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows, + String codec, String cipher) { this.conf = conf; this.fs = fs; this.mf = mf; @@ -287,8 +274,7 @@ public class HFilePerformanceEvaluation { this.cipher = cipher; } - public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, - int totalRows) { + public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { this.conf = conf; this.fs = fs; this.mf = mf; @@ -311,8 +297,7 @@ public class HFilePerformanceEvaluation { /** * Run benchmark - * @return elapsed time. - * @throws Exception + * @return elapsed time. n */ long run() throws Exception { long elapsedTime; @@ -338,8 +323,8 @@ public class HFilePerformanceEvaluation { protected HFile.Writer writer; private byte[] bytes = new byte[ROW_LENGTH]; - public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf, - int totalRows, String codec, String cipher) { + public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows, + String codec, String cipher) { super(conf, fs, mf, totalRows, codec, cipher); } @@ -347,27 +332,23 @@ public class HFilePerformanceEvaluation { void setUp() throws Exception { HFileContextBuilder builder = new HFileContextBuilder() - .withCompression(HFileWriterImpl.compressionByName(codec)) - .withBlockSize(RFILE_BLOCKSIZE); - + .withCompression(HFileWriterImpl.compressionByName(codec)).withBlockSize(RFILE_BLOCKSIZE); + if (cipher == "aes") { byte[] cipherKey = new byte[AES.KEY_LENGTH]; Bytes.secureRandom(cipherKey); builder.withEncryptionContext(Encryption.newContext(conf) - .setCipher(Encryption.getCipher(conf, cipher)) - .setKey(cipherKey)); + .setCipher(Encryption.getCipher(conf, cipher)).setKey(cipherKey)); } else if (!"none".equals(cipher)) { throw new IOException("Cipher " + cipher + " not supported."); } - + HFileContext hFileContext = builder.build(); - writer = HFile.getWriterFactoryNoCache(conf) - .withPath(fs, mf) - .withFileContext(hFileContext) - .create(); + writer = + HFile.getWriterFactoryNoCache(conf).withPath(fs, mf).withFileContext(hFileContext).create(); } - + @Override void doRow(int i) throws Exception { writer.append(createCell(i, generateValue())); @@ -394,8 +375,7 @@ public class HFilePerformanceEvaluation { protected HFile.Reader reader; - public ReadBenchmark(Configuration conf, FileSystem fs, Path mf, - int totalRows) { + public ReadBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); } @@ -414,8 +394,7 @@ public class HFilePerformanceEvaluation { static class SequentialReadBenchmark extends ReadBenchmark { private HFileScanner scanner; - public SequentialReadBenchmark(Configuration conf, FileSystem fs, - Path mf, int totalRows) { + public SequentialReadBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); } @@ -445,15 +424,14 @@ public class HFilePerformanceEvaluation { static class UniformRandomReadBenchmark extends ReadBenchmark { - public UniformRandomReadBenchmark(Configuration conf, FileSystem fs, - Path mf, int totalRows) { + public UniformRandomReadBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); } @Override void doRow(int i) throws Exception { HFileScanner scanner = this.reader.getScanner(conf, false, true); - byte [] b = getRandomRow(); + byte[] b = getRandomRow(); if (scanner.seekTo(createCell(b)) < 0) { LOG.info("Not able to seekTo " + new String(b)); return; @@ -464,22 +442,21 @@ public class HFilePerformanceEvaluation { PerformanceEvaluationCommons.assertValueSize(ROW_LENGTH, c.getValueLength()); } - private byte [] getRandomRow() { + private byte[] getRandomRow() { return format(ThreadLocalRandom.current().nextInt(totalRows)); } } static class UniformRandomSmallScan extends ReadBenchmark { - public UniformRandomSmallScan(Configuration conf, FileSystem fs, - Path mf, int totalRows) { - super(conf, fs, mf, totalRows/10); + public UniformRandomSmallScan(Configuration conf, FileSystem fs, Path mf, int totalRows) { + super(conf, fs, mf, totalRows / 10); } @Override void doRow(int i) throws Exception { HFileScanner scanner = this.reader.getScanner(conf, false, false); - byte [] b = getRandomRow(); + byte[] b = getRandomRow(); // System.out.println("Random row: " + new String(b)); Cell c = createCell(b); if (scanner.seekTo(c) != 0) { @@ -489,7 +466,7 @@ public class HFilePerformanceEvaluation { // TODO: HFileScanner doesn't do Cells yet. Temporary fix. c = scanner.getCell(); // System.out.println("Found row: " + - // new String(c.getRowArray(), c.getRowOffset(), c.getRowLength())); + // new String(c.getRowArray(), c.getRowOffset(), c.getRowLength())); PerformanceEvaluationCommons.assertKey(b, c); for (int ii = 0; ii < 30; ii++) { if (!scanner.next()) { @@ -501,7 +478,7 @@ public class HFilePerformanceEvaluation { } } - private byte [] getRandomRow() { + private byte[] getRandomRow() { return format(ThreadLocalRandom.current().nextInt(totalRows)); } } @@ -510,8 +487,7 @@ public class HFilePerformanceEvaluation { private RandomData randomData = new RandomDataImpl(); - public GaussianRandomReadBenchmark(Configuration conf, FileSystem fs, - Path mf, int totalRows) { + public GaussianRandomReadBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); } @@ -530,18 +506,15 @@ public class HFilePerformanceEvaluation { } } - private byte [] getGaussianRandomRowBytes() { - int r = (int) randomData.nextGaussian((double)totalRows / 2.0, - (double)totalRows / 10.0); + private byte[] getGaussianRandomRowBytes() { + int r = (int) randomData.nextGaussian((double) totalRows / 2.0, (double) totalRows / 10.0); // make sure r falls into [0,totalRows) - return format(Math.min(totalRows, Math.max(r,0))); + return format(Math.min(totalRows, Math.max(r, 0))); } } /** - * @param args - * @throws Exception - * @throws IOException + * nnn */ public static void main(String[] args) throws Exception { new HFilePerformanceEvaluation().runBenchmarks(); @@ -550,8 +523,10 @@ public class HFilePerformanceEvaluation { private String getCipherName(Configuration conf, String cipherName) { if (cipherName.equals("aes")) { String provider = conf.get(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY); - if (provider == null || provider.equals("") - || provider.equals(DefaultCipherProvider.class.getName())) { + if ( + provider == null || provider.equals("") + || provider.equals(DefaultCipherProvider.class.getName()) + ) { return "aes-default"; } else if (provider.equals(CryptoCipherProvider.class.getName())) { return "aes-commons"; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java index c490c836c63..823d890348f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java @@ -1,26 +1,27 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase; import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.Set; -import java.util.Collections; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java index e5cbfaf1584..9b61855cb4b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; @@ -32,74 +31,62 @@ import org.apache.hadoop.hbase.util.Bytes; public class MetaMockingUtil { /** - * Returns a Result object constructed from the given region information simulating - * a catalog table result. + * Returns a Result object constructed from the given region information simulating a catalog + * table result. * @param region the HRegionInfo object or null - * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. - * @throws IOException + * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. n */ - public static Result getMetaTableRowResult(final HRegionInfo region) - throws IOException { + public static Result getMetaTableRowResult(final HRegionInfo region) throws IOException { return getMetaTableRowResult(region, null, null, null); } /** - * Returns a Result object constructed from the given region information simulating - * a catalog table result. + * Returns a Result object constructed from the given region information simulating a catalog + * table result. * @param region the HRegionInfo object or null - * @param sn to use making startcode and server hostname:port in meta or null - * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. - * @throws IOException + * @param sn to use making startcode and server hostname:port in meta or null + * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. n */ public static Result getMetaTableRowResult(final HRegionInfo region, final ServerName sn) - throws IOException { + throws IOException { return getMetaTableRowResult(region, sn, null, null); } /** - * Returns a Result object constructed from the given region information simulating - * a catalog table result. + * Returns a Result object constructed from the given region information simulating a catalog + * table result. * @param region the HRegionInfo object or null - * @param sn to use making startcode and server hostname:port in meta or null + * @param sn to use making startcode and server hostname:port in meta or null * @param splita daughter region or null - * @param splitb daughter region or null - * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. - * @throws IOException + * @param splitb daughter region or null + * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. n */ public static Result getMetaTableRowResult(RegionInfo region, final ServerName sn, - RegionInfo splita, RegionInfo splitb) throws IOException { + RegionInfo splita, RegionInfo splitb) throws IOException { List kvs = new ArrayList<>(); if (region != null) { - kvs.add(new KeyValue( - region.getRegionName(), - HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, - RegionInfo.toByteArray(region))); + kvs.add(new KeyValue(region.getRegionName(), HConstants.CATALOG_FAMILY, + HConstants.REGIONINFO_QUALIFIER, RegionInfo.toByteArray(region))); } if (sn != null) { - kvs.add(new KeyValue(region.getRegionName(), - HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, - Bytes.toBytes(sn.getAddress().toString()))); - kvs.add(new KeyValue(region.getRegionName(), - HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, - Bytes.toBytes(sn.getStartcode()))); + kvs.add(new KeyValue(region.getRegionName(), HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER, Bytes.toBytes(sn.getAddress().toString()))); + kvs.add(new KeyValue(region.getRegionName(), HConstants.CATALOG_FAMILY, + HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn.getStartcode()))); } if (splita != null) { - kvs.add(new KeyValue( - region.getRegionName(), - HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, - RegionInfo.toByteArray(splita))); + kvs.add(new KeyValue(region.getRegionName(), HConstants.CATALOG_FAMILY, + HConstants.SPLITA_QUALIFIER, RegionInfo.toByteArray(splita))); } if (splitb != null) { - kvs.add(new KeyValue( - region.getRegionName(), - HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, - RegionInfo.toByteArray(splitb))); + kvs.add(new KeyValue(region.getRegionName(), HConstants.CATALOG_FAMILY, + HConstants.SPLITB_QUALIFIER, RegionInfo.toByteArray(splitb))); } - //important: sort the kvs so that binary search work + // important: sort the kvs so that binary search work Collections.sort(kvs, MetaCellComparator.META_COMPARATOR); return Result.create(kvs); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java index 8789a9d19a9..3cee10e27f9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java @@ -36,16 +36,19 @@ import org.junit.rules.TestRule; *

      * Use in combination with {@link ConnectionRule}, for example: * - *
      {@code
      + * 
      + * {
      + *   @code
        *   public class TestMyClass {
      - *     @ClassRule
      + *     @ClassRule
        *     public static final MiniClusterRule miniClusterRule = MiniClusterRule.newBuilder().build();
        *
      - *     @Rule
      + *     @Rule
        *     public final ConnectionRule connectionRule =
        *       ConnectionRule.createAsyncConnectionRule(miniClusterRule::createAsyncConnection);
        *   }
      - * }
      + * } + *
      */ public final class MiniClusterRule extends ExternalResource { @@ -78,11 +81,8 @@ public final class MiniClusterRule extends ExternalResource { } public MiniClusterRule build() { - return new MiniClusterRule( - conf, - miniClusterOption != null - ? miniClusterOption - : StartMiniClusterOption.builder().build()); + return new MiniClusterRule(conf, + miniClusterOption != null ? miniClusterOption : StartMiniClusterOption.builder().build()); } } @@ -109,8 +109,8 @@ public final class MiniClusterRule extends ExternalResource { } /** - * Create a {@link Connection} to the managed {@link MiniHBaseCluster}. It's up to the caller - * to {@link Connection#close() close()} the connection when finished. + * Create a {@link Connection} to the managed {@link MiniHBaseCluster}. It's up to the caller to + * {@link Connection#close() close()} the connection when finished. */ public Connection createConnection() { if (miniCluster == null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index b5339d72012..7c940551221 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,10 +48,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterServ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; /** - * This class creates a single process HBase cluster. - * each server. The master uses the 'default' FileSystem. The RegionServers, - * if we are running on DistributedFilesystem, create a FileSystem instance - * each and will close down their instance on the way out. + * This class creates a single process HBase cluster. each server. The master uses the 'default' + * FileSystem. The RegionServers, if we are running on DistributedFilesystem, create a FileSystem + * instance each and will close down their instance on the way out. */ @InterfaceAudience.Public public class MiniHBaseCluster extends HBaseCluster { @@ -62,59 +60,55 @@ public class MiniHBaseCluster extends HBaseCluster { /** * Start a MiniHBaseCluster. - * @param conf Configuration to be used for cluster - * @param numRegionServers initial number of region servers to start. - * @throws IOException + * @param conf Configuration to be used for cluster + * @param numRegionServers initial number of region servers to start. n */ public MiniHBaseCluster(Configuration conf, int numRegionServers) - throws IOException, InterruptedException { + throws IOException, InterruptedException { this(conf, 1, numRegionServers); } /** * Start a MiniHBaseCluster. - * @param conf Configuration to be used for cluster - * @param numMasters initial number of masters to start. - * @param numRegionServers initial number of region servers to start. - * @throws IOException + * @param conf Configuration to be used for cluster + * @param numMasters initial number of masters to start. + * @param numRegionServers initial number of region servers to start. n */ public MiniHBaseCluster(Configuration conf, int numMasters, int numRegionServers) - throws IOException, InterruptedException { + throws IOException, InterruptedException { this(conf, numMasters, numRegionServers, null, null); } /** * Start a MiniHBaseCluster. - * @param conf Configuration to be used for cluster - * @param numMasters initial number of masters to start. + * @param conf Configuration to be used for cluster + * @param numMasters initial number of masters to start. * @param numRegionServers initial number of region servers to start. */ public MiniHBaseCluster(Configuration conf, int numMasters, int numRegionServers, - Class masterClass, - Class regionserverClass) - throws IOException, InterruptedException { + Class masterClass, + Class regionserverClass) + throws IOException, InterruptedException { this(conf, numMasters, 0, numRegionServers, null, masterClass, regionserverClass); } /** * @param rsPorts Ports that RegionServer should use; pass ports if you want to test cluster - * restart where for sure the regionservers come up on same address+port (but - * just with different startcode); by default mini hbase clusters choose new - * arbitrary ports on each cluster start. - * @throws IOException - * @throws InterruptedException + * restart where for sure the regionservers come up on same address+port (but just + * with different startcode); by default mini hbase clusters choose new arbitrary + * ports on each cluster start. nn */ public MiniHBaseCluster(Configuration conf, int numMasters, int numAlwaysStandByMasters, - int numRegionServers, List rsPorts, Class masterClass, - Class regionserverClass) - throws IOException, InterruptedException { + int numRegionServers, List rsPorts, Class masterClass, + Class regionserverClass) + throws IOException, InterruptedException { super(conf); // Hadoop 2 CompatibilityFactory.getInstance(MetricsAssertHelper.class).init(); init(numMasters, numAlwaysStandByMasters, numRegionServers, rsPorts, masterClass, - regionserverClass); + regionserverClass); this.initialClusterStatus = getClusterMetrics(); } @@ -123,38 +117,35 @@ public class MiniHBaseCluster extends HBaseCluster { } /** - * Subclass so can get at protected methods (none at moment). Also, creates - * a FileSystem instance per instantiation. Adds a shutdown own FileSystem - * on the way out. Shuts down own Filesystem only, not All filesystems as - * the FileSystem system exit hook does. + * Subclass so can get at protected methods (none at moment). Also, creates a FileSystem instance + * per instantiation. Adds a shutdown own FileSystem on the way out. Shuts down own Filesystem + * only, not All filesystems as the FileSystem system exit hook does. */ public static class MiniHBaseClusterRegionServer extends HRegionServer { private Thread shutdownThread = null; private User user = null; /** - * List of RegionServers killed so far. ServerName also comprises startCode of a server, - * so any restarted instances of the same server will have different ServerName and will not - * coincide with past dead ones. So there's no need to cleanup this list. + * List of RegionServers killed so far. ServerName also comprises startCode of a server, so any + * restarted instances of the same server will have different ServerName and will not coincide + * with past dead ones. So there's no need to cleanup this list. */ static Set killedServers = new HashSet<>(); public MiniHBaseClusterRegionServer(Configuration conf) - throws IOException, InterruptedException { + throws IOException, InterruptedException { super(conf); this.user = User.getCurrent(); } /* - * @param c - * @param currentfs We return this if we did not make a new one. + * n * @param currentfs We return this if we did not make a new one. * @param uniqueName Same name used to help identify the created fs. - * @return A new fs instance if we are up on DistributeFileSystem. - * @throws IOException + * @return A new fs instance if we are up on DistributeFileSystem. n */ @Override - protected void handleReportForDutyResponse( - final RegionServerStartupResponse c) throws IOException { + protected void handleReportForDutyResponse(final RegionServerStartupResponse c) + throws IOException { super.handleReportForDutyResponse(c); // Run this thread to shutdown our filesystem on way out. this.shutdownThread = new SingleFileSystemShutdownThread(getFileSystem()); @@ -208,15 +199,17 @@ public class MiniHBaseCluster extends HBaseCluster { } /** - * Alternate shutdown hook. - * Just shuts down the passed fs, not all as default filesystem hook does. + * Alternate shutdown hook. Just shuts down the passed fs, not all as default filesystem hook + * does. */ static class SingleFileSystemShutdownThread extends Thread { private final FileSystem fs; + SingleFileSystemShutdownThread(final FileSystem fs) { super("Shutdown of " + fs); this.fs = fs; } + @Override public void run() { try { @@ -231,20 +224,20 @@ public class MiniHBaseCluster extends HBaseCluster { } private void init(final int nMasterNodes, final int numAlwaysStandByMasters, - final int nRegionNodes, List rsPorts, Class masterClass, - Class regionserverClass) - throws IOException, InterruptedException { + final int nRegionNodes, List rsPorts, Class masterClass, + Class regionserverClass) + throws IOException, InterruptedException { try { - if (masterClass == null){ - masterClass = HMaster.class; + if (masterClass == null) { + masterClass = HMaster.class; } - if (regionserverClass == null){ + if (regionserverClass == null) { regionserverClass = MiniHBaseCluster.MiniHBaseClusterRegionServer.class; } // start up a LocalHBaseCluster hbaseCluster = new LocalHBaseCluster(conf, nMasterNodes, numAlwaysStandByMasters, 0, - masterClass, regionserverClass); + masterClass, regionserverClass); // manually add the regionservers as other users for (int i = 0; i < nRegionNodes; i++) { @@ -252,8 +245,7 @@ public class MiniHBaseCluster extends HBaseCluster { if (rsPorts != null) { rsConf.setInt(HConstants.REGIONSERVER_PORT, rsPorts.get(i)); } - User user = HBaseTestingUtility.getDifferentUser(rsConf, - ".hfs."+index++); + User user = HBaseTestingUtility.getDifferentUser(rsConf, ".hfs." + index++); hbaseCluster.addRegionServer(rsConf, i, user); } @@ -308,7 +300,7 @@ public class MiniHBaseCluster extends HBaseCluster { @Override public void waitForRegionServerToStop(ServerName serverName, long timeout) throws IOException { - //ignore timeout for now + // ignore timeout for now waitOnRegionServer(getRegionServerIndex(serverName)); } @@ -404,30 +396,25 @@ public class MiniHBaseCluster extends HBaseCluster { @Override public void waitForMasterToStop(ServerName serverName, long timeout) throws IOException { - //ignore timeout for now + // ignore timeout for now waitOnMaster(getMasterIndex(serverName)); } /** - * Starts a region server thread running - * - * @throws IOException - * @return New RegionServerThread + * Starts a region server thread running n * @return New RegionServerThread */ - public JVMClusterUtil.RegionServerThread startRegionServer() - throws IOException { + public JVMClusterUtil.RegionServerThread startRegionServer() throws IOException { final Configuration newConf = HBaseConfiguration.create(conf); return startRegionServer(newConf); } private JVMClusterUtil.RegionServerThread startRegionServer(Configuration configuration) - throws IOException { - User rsUser = - HBaseTestingUtility.getDifferentUser(configuration, ".hfs."+index++); - JVMClusterUtil.RegionServerThread t = null; + throws IOException { + User rsUser = HBaseTestingUtility.getDifferentUser(configuration, ".hfs." + index++); + JVMClusterUtil.RegionServerThread t = null; try { - t = hbaseCluster.addRegionServer( - configuration, hbaseCluster.getRegionServers().size(), rsUser); + t = + hbaseCluster.addRegionServer(configuration, hbaseCluster.getRegionServers().size(), rsUser); t.start(); t.waitForServerOnline(); } catch (InterruptedException ie) { @@ -437,16 +424,15 @@ public class MiniHBaseCluster extends HBaseCluster { } /** - * Starts a region server thread and waits until its processed by master. Throws an exception - * when it can't start a region server or when the region server is not processed by master - * within the timeout. - * + * Starts a region server thread and waits until its processed by master. Throws an exception when + * it can't start a region server or when the region server is not processed by master within the + * timeout. * @return New RegionServerThread */ public JVMClusterUtil.RegionServerThread startRegionServerAndWait(long timeout) - throws IOException { + throws IOException { - JVMClusterUtil.RegionServerThread t = startRegionServer(); + JVMClusterUtil.RegionServerThread t = startRegionServer(); ServerName rsServerName = t.getRegionServer().getServerName(); long start = EnvironmentEdgeManager.currentTime(); @@ -466,7 +452,7 @@ public class MiniHBaseCluster extends HBaseCluster { /** * Cause a region server to exit doing basic clean up only on its way out. - * @param serverNumber Used as index into a list. + * @param serverNumber Used as index into a list. */ public String abortRegionServer(int serverNumber) { HRegionServer server = getRegionServer(serverNumber); @@ -477,8 +463,7 @@ public class MiniHBaseCluster extends HBaseCluster { /** * Shut down the specified region server cleanly - * - * @param serverNumber Used as index into a list. + * @param serverNumber Used as index into a list. * @return the region server that was stopped */ public JVMClusterUtil.RegionServerThread stopRegionServer(int serverNumber) { @@ -487,18 +472,16 @@ public class MiniHBaseCluster extends HBaseCluster { /** * Shut down the specified region server cleanly - * - * @param serverNumber Used as index into a list. - * @param shutdownFS True is we are to shutdown the filesystem as part of this - * regionserver's shutdown. Usually we do but you do not want to do this if - * you are running multiple regionservers in a test and you shut down one - * before end of the test. + * @param serverNumber Used as index into a list. + * @param shutdownFS True is we are to shutdown the filesystem as part of this regionserver's + * shutdown. Usually we do but you do not want to do this if you are running + * multiple regionservers in a test and you shut down one before end of the + * test. * @return the region server that was stopped */ public JVMClusterUtil.RegionServerThread stopRegionServer(int serverNumber, - final boolean shutdownFS) { - JVMClusterUtil.RegionServerThread server = - hbaseCluster.getRegionServers().get(serverNumber); + final boolean shutdownFS) { + JVMClusterUtil.RegionServerThread server = hbaseCluster.getRegionServers().get(serverNumber); LOG.info("Stopping " + server.toString()); server.getRegionServer().stop("Stopping rs " + serverNumber); return server; @@ -506,12 +489,10 @@ public class MiniHBaseCluster extends HBaseCluster { /** * Suspend the specified region server - * @param serverNumber Used as index into a list. - * @return + * @param serverNumber Used as index into a list. n */ public JVMClusterUtil.RegionServerThread suspendRegionServer(int serverNumber) { - JVMClusterUtil.RegionServerThread server = - hbaseCluster.getRegionServers().get(serverNumber); + JVMClusterUtil.RegionServerThread server = hbaseCluster.getRegionServers().get(serverNumber); LOG.info("Suspending {}", server.toString()); server.suspend(); return server; @@ -519,37 +500,30 @@ public class MiniHBaseCluster extends HBaseCluster { /** * Resume the specified region server - * @param serverNumber Used as index into a list. - * @return + * @param serverNumber Used as index into a list. n */ public JVMClusterUtil.RegionServerThread resumeRegionServer(int serverNumber) { - JVMClusterUtil.RegionServerThread server = - hbaseCluster.getRegionServers().get(serverNumber); + JVMClusterUtil.RegionServerThread server = hbaseCluster.getRegionServers().get(serverNumber); LOG.info("Resuming {}", server.toString()); server.resume(); return server; } /** - * Wait for the specified region server to stop. Removes this thread from list - * of running threads. - * @param serverNumber - * @return Name of region server that just went down. + * Wait for the specified region server to stop. Removes this thread from list of running threads. + * n * @return Name of region server that just went down. */ public String waitOnRegionServer(final int serverNumber) { return this.hbaseCluster.waitOnRegionServer(serverNumber); } - /** * Starts a master thread running - * * @return New RegionServerThread */ public JVMClusterUtil.MasterThread startMaster() throws IOException { Configuration c = HBaseConfiguration.create(conf); - User user = - HBaseTestingUtility.getDifferentUser(c, ".hfs."+index++); + User user = HBaseTestingUtility.getDifferentUser(c, ".hfs." + index++); JVMClusterUtil.MasterThread t = null; try { @@ -559,7 +533,7 @@ public class MiniHBaseCluster extends HBaseCluster { throw new IOException("Interrupted adding master to cluster", ie); } conf.set(HConstants.MASTER_ADDRS_KEY, - hbaseCluster.getConfiguration().get(HConstants.MASTER_ADDRS_KEY)); + hbaseCluster.getConfiguration().get(HConstants.MASTER_ADDRS_KEY)); return t; } @@ -585,7 +559,7 @@ public class MiniHBaseCluster extends HBaseCluster { * @return the active MasterThread, null if none is active. */ public MasterThread getMasterThread() { - for (MasterThread mt: hbaseCluster.getLiveMasters()) { + for (MasterThread mt : hbaseCluster.getLiveMasters()) { if (mt.getMaster().isActiveMaster()) { return mt; } @@ -603,7 +577,7 @@ public class MiniHBaseCluster extends HBaseCluster { /** * Cause a master to exit without shutting down entire cluster. - * @param serverNumber Used as index into a list. + * @param serverNumber Used as index into a list. */ public String abortMaster(int serverNumber) { HMaster server = getMaster(serverNumber); @@ -614,8 +588,7 @@ public class MiniHBaseCluster extends HBaseCluster { /** * Shut down the specified master cleanly - * - * @param serverNumber Used as index into a list. + * @param serverNumber Used as index into a list. * @return the region server that was stopped */ public JVMClusterUtil.MasterThread stopMaster(int serverNumber) { @@ -624,47 +597,39 @@ public class MiniHBaseCluster extends HBaseCluster { /** * Shut down the specified master cleanly - * - * @param serverNumber Used as index into a list. - * @param shutdownFS True is we are to shutdown the filesystem as part of this - * master's shutdown. Usually we do but you do not want to do this if - * you are running multiple master in a test and you shut down one - * before end of the test. + * @param serverNumber Used as index into a list. + * @param shutdownFS True is we are to shutdown the filesystem as part of this master's + * shutdown. Usually we do but you do not want to do this if you are running + * multiple master in a test and you shut down one before end of the test. * @return the master that was stopped */ - public JVMClusterUtil.MasterThread stopMaster(int serverNumber, - final boolean shutdownFS) { - JVMClusterUtil.MasterThread server = - hbaseCluster.getMasters().get(serverNumber); + public JVMClusterUtil.MasterThread stopMaster(int serverNumber, final boolean shutdownFS) { + JVMClusterUtil.MasterThread server = hbaseCluster.getMasters().get(serverNumber); LOG.info("Stopping " + server.toString()); server.getMaster().stop("Stopping master " + serverNumber); return server; } /** - * Wait for the specified master to stop. Removes this thread from list - * of running threads. - * @param serverNumber - * @return Name of master that just went down. + * Wait for the specified master to stop. Removes this thread from list of running threads. n + * * @return Name of master that just went down. */ public String waitOnMaster(final int serverNumber) { return this.hbaseCluster.waitOnMaster(serverNumber); } /** - * Blocks until there is an active master and that master has completed - * initialization. - * - * @return true if an active master becomes available. false if there are no - * masters left. - * @throws InterruptedException + * Blocks until there is an active master and that master has completed initialization. + * @return true if an active master becomes available. false if there are no masters left. n */ @Override public boolean waitForActiveAndReadyMaster(long timeout) throws IOException { List mts; long start = EnvironmentEdgeManager.currentTime(); - while (!(mts = getMasterThreads()).isEmpty() - && (EnvironmentEdgeManager.currentTime() - start) < timeout) { + while ( + !(mts = getMasterThreads()).isEmpty() + && (EnvironmentEdgeManager.currentTime() - start) < timeout + ) { for (JVMClusterUtil.MasterThread mt : mts) { if (mt.getMaster().isActiveMaster() && mt.getMaster().isInitialized()) { return true; @@ -712,8 +677,8 @@ public class MiniHBaseCluster extends HBaseCluster { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getClusterMetrics()} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getClusterMetrics()} instead. */ @Deprecated public ClusterStatus getClusterStatus() throws IOException { @@ -766,8 +731,7 @@ public class MiniHBaseCluster extends HBaseCluster { } /** - * Call flushCache on all regions on all participating regionservers. - * @throws IOException + * Call flushCache on all regions on all participating regionservers. n */ public void compact(boolean major) throws IOException { for (JVMClusterUtil.RegionServerThread t : this.hbaseCluster.getRegionServers()) { @@ -780,8 +744,7 @@ public class MiniHBaseCluster extends HBaseCluster { } /** - * Call flushCache on all regions of the specified table. - * @throws IOException + * Call flushCache on all regions of the specified table. n */ public void compact(TableName tableName, boolean major) throws IOException { for (JVMClusterUtil.RegionServerThread t : this.hbaseCluster.getRegionServers()) { @@ -803,8 +766,8 @@ public class MiniHBaseCluster extends HBaseCluster { } /** - * @return List of region server threads. Does not return the master even though it is also - * a region server. + * @return List of region server threads. Does not return the master even though it is also a + * region server. */ public List getRegionServerThreads() { return this.hbaseCluster.getRegionServers(); @@ -818,19 +781,15 @@ public class MiniHBaseCluster extends HBaseCluster { } /** - * Grab a numbered region server of your choice. - * @param serverNumber - * @return region server + * Grab a numbered region server of your choice. n * @return region server */ public HRegionServer getRegionServer(int serverNumber) { return hbaseCluster.getRegionServer(serverNumber); } public HRegionServer getRegionServer(ServerName serverName) { - return hbaseCluster.getRegionServers().stream() - .map(t -> t.getRegionServer()) - .filter(r -> r.getServerName().equals(serverName)) - .findFirst().orElse(null); + return hbaseCluster.getRegionServers().stream().map(t -> t.getRegionServer()) + .filter(r -> r.getServerName().equals(serverName)).findFirst().orElse(null); } public List getRegions(byte[] tableName) { @@ -843,7 +802,7 @@ public class MiniHBaseCluster extends HBaseCluster { HRegionServer hrs = rst.getRegionServer(); for (Region region : hrs.getOnlineRegionsLocalContext()) { if (region.getTableDescriptor().getTableName().equals(tableName)) { - ret.add((HRegion)region); + ret.add((HRegion) region); } } } @@ -851,8 +810,8 @@ public class MiniHBaseCluster extends HBaseCluster { } /** - * @return Index into List of {@link MiniHBaseCluster#getRegionServerThreads()} - * of HRS carrying regionName. Returns -1 if none found. + * @return Index into List of {@link MiniHBaseCluster#getRegionServerThreads()} of HRS carrying + * regionName. Returns -1 if none found. */ public int getServerWithMeta() { return getServerWith(HRegionInfo.FIRST_META_REGIONINFO.getRegionName()); @@ -861,13 +820,13 @@ public class MiniHBaseCluster extends HBaseCluster { /** * Get the location of the specified region * @param regionName Name of the region in bytes - * @return Index into List of {@link MiniHBaseCluster#getRegionServerThreads()} - * of HRS carrying hbase:meta. Returns -1 if none found. + * @return Index into List of {@link MiniHBaseCluster#getRegionServerThreads()} of HRS carrying + * hbase:meta. Returns -1 if none found. */ public int getServerWith(byte[] regionName) { int index = -1; int count = 0; - for (JVMClusterUtil.RegionServerThread rst: getRegionServerThreads()) { + for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) { HRegionServer hrs = rst.getRegionServer(); if (!hrs.isStopped()) { Region region = hrs.getOnlineRegion(regionName); @@ -883,7 +842,7 @@ public class MiniHBaseCluster extends HBaseCluster { @Override public ServerName getServerHoldingRegion(final TableName tn, byte[] regionName) - throws IOException { + throws IOException { // Assume there is only one master thread which is the active master. // If there are multiple master threads, the backup master threads // should hold some regions. Please refer to #countServedRegions @@ -901,9 +860,9 @@ public class MiniHBaseCluster extends HBaseCluster { } /** - * Counts the total numbers of regions being served by the currently online - * region servers by asking each how many regions they have. Does not look - * at hbase:meta at all. Count includes catalog tables. + * Counts the total numbers of regions being served by the currently online region servers by + * asking each how many regions they have. Does not look at hbase:meta at all. Count includes + * catalog tables. * @return number of regions being served by all region servers */ public long countServedRegions() { @@ -918,8 +877,8 @@ public class MiniHBaseCluster extends HBaseCluster { } /** - * Do a simulated kill all masters and regionservers. Useful when it is - * impossible to bring the mini-cluster back for clean shutdown. + * Do a simulated kill all masters and regionservers. Useful when it is impossible to bring the + * mini-cluster back for clean shutdown. */ public void killAll() { // Do backups first. @@ -951,18 +910,17 @@ public class MiniHBaseCluster extends HBaseCluster { HRegionServer hrs = rst.getRegionServer(); for (Region region : hrs.getRegions(tableName)) { if (region.getTableDescriptor().getTableName().equals(tableName)) { - ret.add((HRegion)region); + ret.add((HRegion) region); } } } return ret; } - protected int getRegionServerIndex(ServerName serverName) { - //we have a small number of region servers, this should be fine for now. + // we have a small number of region servers, this should be fine for now. List servers = getRegionServerThreads(); - for (int i=0; i < servers.size(); i++) { + for (int i = 0; i < servers.size(); i++) { if (servers.get(i).getRegionServer().getServerName().equals(serverName)) { return i; } @@ -987,7 +945,7 @@ public class MiniHBaseCluster extends HBaseCluster { @Override public ClientService.BlockingInterface getClientProtocol(ServerName serverName) - throws IOException { + throws IOException { return getRegionServer(getRegionServerIndex(serverName)).getRSRpcServices(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java index 82db21268dd..bc488c5947c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -66,7 +66,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** - * Basic mock region server services. Should only be instantiated by HBaseTestingUtility.b + * Basic mock region server services. Should only be instantiated by HBaseTestingUtility.b */ public class MockRegionServerServices implements RegionServerServices { protected static final Logger LOG = LoggerFactory.getLogger(MockRegionServerServices.class); @@ -92,7 +92,7 @@ public class MockRegionServerServices implements RegionServerServices { this.conf = (zkw == null ? new Configuration() : zkw.getConfiguration()); } - MockRegionServerServices(){ + MockRegionServerServices() { this(null, null); } @@ -224,7 +224,7 @@ public class MockRegionServerServices implements RegionServerServices { } public void setFileSystem(FileSystem hfs) { - this.hfs = (HFileSystem)hfs; + this.hfs = (HFileSystem) hfs; } @Override @@ -254,7 +254,7 @@ public class MockRegionServerServices implements RegionServerServices { @Override public void updateRegionFavoredNodesMapping(String encodedRegionName, - List favoredNodes) { + List favoredNodes) { } @Override @@ -311,7 +311,7 @@ public class MockRegionServerServices implements RegionServerServices { @Override public EntityLock regionLock(List regionInfos, String description, Abortable abort) - throws IOException { + throws IOException { return null; } @@ -370,8 +370,8 @@ public class MockRegionServerServices implements RegionServerServices { } @Override - public boolean reportFileArchivalForQuotas( - TableName tableName, Collection> archivedFiles) { + public boolean reportFileArchivalForQuotas(TableName tableName, + Collection> archivedFiles) { return true; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java index 5268d3d7b38..98d910278b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +23,6 @@ import java.util.List; import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.slf4j.Logger; @@ -32,8 +30,7 @@ import org.slf4j.LoggerFactory; public abstract class MultithreadedTestUtil { - private static final Logger LOG = - LoggerFactory.getLogger(MultithreadedTestUtil.class); + private static final Logger LOG = LoggerFactory.getLogger(MultithreadedTestUtil.class); public static class TestContext { private final Configuration conf; @@ -50,7 +47,7 @@ public abstract class MultithreadedTestUtil { return conf; } - public synchronized boolean shouldRun() { + public synchronized boolean shouldRun() { return !stopped && err == null; } @@ -75,6 +72,7 @@ public abstract class MultithreadedTestUtil { } } } + private synchronized void checkException() throws Exception { if (err != null) { throw new RuntimeException("Deferred", err); @@ -109,8 +107,7 @@ public abstract class MultithreadedTestUtil { } /** - * A thread that can be added to a test context, and properly - * passes exceptions through. + * A thread that can be added to a test context, and properly passes exceptions through. */ public static abstract class TestThread extends Thread { protected final TestContext ctx; @@ -157,13 +154,16 @@ public abstract class MultithreadedTestUtil { } public abstract void doAnAction() throws Exception; - public void workDone() throws IOException {} + + public void workDone() throws IOException { + } } /** - * Verify that no assertions have failed inside a future. - * Used for unit tests that spawn threads. E.g., + * Verify that no assertions have failed inside a future. Used for unit tests that spawn threads. + * E.g., *

      + * *

          *   List<Future<Void>> results = Lists.newArrayList();
          *   Future<Void> f = executor.submit(new Callable<Void> {
      @@ -174,14 +174,14 @@ public abstract class MultithreadedTestUtil {
          *   results.add(f);
          *   assertOnFutures(results);
          * 
      + * * @param threadResults A list of futures - * @throws InterruptedException If interrupted when waiting for a result - * from one of the futures - * @throws ExecutionException If an exception other than AssertionError - * occurs inside any of the futures + * @throws InterruptedException If interrupted when waiting for a result from one of the futures + * @throws ExecutionException If an exception other than AssertionError occurs inside any of the + * futures */ public static void assertOnFutures(List> threadResults) - throws InterruptedException, ExecutionException { + throws InterruptedException, ExecutionException { for (Future threadResult : threadResults) { try { threadResult.get(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java index 97d326aa632..8a6347ce605 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +20,10 @@ package org.apache.hadoop.hbase; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Code shared by PE tests. */ @@ -40,28 +37,29 @@ public class PerformanceEvaluationCommons { } } - public static void assertKey(final byte [] expected, final ByteBuffer got) { - byte [] b = new byte[got.limit()]; + public static void assertKey(final byte[] expected, final ByteBuffer got) { + byte[] b = new byte[got.limit()]; got.get(b, 0, got.limit()); assertKey(expected, b); } - public static void assertKey(final byte [] expected, final Cell c) { + public static void assertKey(final byte[] expected, final Cell c) { assertKey(expected, c.getRowArray(), c.getRowOffset(), c.getRowLength()); } - public static void assertKey(final byte [] expected, final byte [] got) { + public static void assertKey(final byte[] expected, final byte[] got) { assertKey(expected, got, 0, got.length); } - public static void assertKey(final byte [] expected, final byte [] gotArray, - final int gotArrayOffset, final int gotArrayLength) { - if (!org.apache.hadoop.hbase.util.Bytes.equals(expected, 0, expected.length, - gotArray, gotArrayOffset, gotArrayLength)) { - throw new AssertionError("Expected " + - org.apache.hadoop.hbase.util.Bytes.toString(expected) + - " but got " + - org.apache.hadoop.hbase.util.Bytes.toString(gotArray, gotArrayOffset, gotArrayLength)); + public static void assertKey(final byte[] expected, final byte[] gotArray, + final int gotArrayOffset, final int gotArrayLength) { + if ( + !org.apache.hadoop.hbase.util.Bytes.equals(expected, 0, expected.length, gotArray, + gotArrayOffset, gotArrayLength) + ) { + throw new AssertionError( + "Expected " + org.apache.hadoop.hbase.util.Bytes.toString(expected) + " but got " + + org.apache.hadoop.hbase.util.Bytes.toString(gotArray, gotArrayOffset, gotArrayLength)); } } @@ -72,10 +70,10 @@ public class PerformanceEvaluationCommons { for (int i = 0; i < count; i++) { threads.add(new Thread(r, "concurrentRead-" + i)); } - for (Thread t: threads) { + for (Thread t : threads) { t.start(); } - for (Thread t: threads) { + for (Thread t : threads) { try { t.join(); } catch (InterruptedException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ProcedureTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ProcedureTestUtil.java index ff23d85616b..beb1ad96fcf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ProcedureTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ProcedureTestUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public final class ProcedureTestUtil { } private static Optional getProcedure(HBaseTestingUtility util, - Class> clazz, JsonParser parser) throws IOException { + Class> clazz, JsonParser parser) throws IOException { JsonArray array = parser.parse(util.getAdmin().getProcedures()).getAsJsonArray(); Iterator iterator = array.iterator(); while (iterator.hasNext()) { @@ -54,7 +54,7 @@ public final class ProcedureTestUtil { } public static void waitUntilProcedureWaitingTimeout(HBaseTestingUtility util, - Class> clazz, long timeout) throws IOException { + Class> clazz, long timeout) throws IOException { JsonParser parser = new JsonParser(); util.waitFor(timeout, () -> getProcedure(util, clazz, parser) @@ -63,7 +63,7 @@ public final class ProcedureTestUtil { } public static void waitUntilProcedureTimeoutIncrease(HBaseTestingUtility util, - Class> clazz, int times) throws IOException, InterruptedException { + Class> clazz, int times) throws IOException, InterruptedException { JsonParser parser = new JsonParser(); long oldTimeout = 0; int timeoutIncrements = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/QosTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/QosTestHelper.java index 23c2ad8bc50..62297b247d2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/QosTestHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/QosTestHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,25 +17,27 @@ */ package org.apache.hadoop.hbase; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; +import static org.junit.Assert.assertEquals; + import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; import org.apache.hadoop.hbase.regionserver.AnnotationReadingPriorityFunction; import org.apache.hadoop.hbase.security.User; -import static org.junit.Assert.assertEquals; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; public class QosTestHelper { protected void checkMethod(Configuration conf, final String methodName, final int expected, - final AnnotationReadingPriorityFunction qosf) { + final AnnotationReadingPriorityFunction qosf) { checkMethod(conf, methodName, expected, qosf, null); } protected void checkMethod(Configuration conf, final String methodName, final int expected, - final AnnotationReadingPriorityFunction qosf, final Message param) { + final AnnotationReadingPriorityFunction qosf, final Message param) { RPCProtos.RequestHeader.Builder builder = RPCProtos.RequestHeader.newBuilder(); builder.setMethodName(methodName); assertEquals(methodName, expected, qosf.getPriority(builder.build(), param, - User.createUserForTesting(conf, "someuser", new String[]{"somegroup"}))); + User.createUserForTesting(conf, "someuser", new String[] { "somegroup" }))); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/StartMiniClusterOption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/StartMiniClusterOption.java index 5ffd8f09121..e8ef17e8c7c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/StartMiniClusterOption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/StartMiniClusterOption.java @@ -28,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience; * The options include HDFS options to build mini dfs cluster, Zookeeper options to build mini zk * cluster, and mostly HBase options to build mini hbase cluster. To create an object, use a * {@link Builder}. Example usage: - * + * *
        *    StartMiniClusterOption option = StartMiniClusterOption.builder().
        *        .numMasters(3).rsClass(MyRegionServer.class).createWALDir(true).build();
      @@ -104,9 +104,9 @@ public final class StartMiniClusterOption {
          * Private constructor. Use {@link Builder#build()}.
          */
         private StartMiniClusterOption(int numMasters, int numAlwaysStandByMasters,
      -      Class masterClass, int numRegionServers, List rsPorts,
      -      Class rsClass, int numDataNodes,
      -      String[] dataNodeHosts, int numZkServers, boolean createRootDir, boolean createWALDir) {
      +    Class masterClass, int numRegionServers, List rsPorts,
      +    Class rsClass, int numDataNodes,
      +    String[] dataNodeHosts, int numZkServers, boolean createRootDir, boolean createWALDir) {
           this.numMasters = numMasters;
           this.numAlwaysStandByMasters = numAlwaysStandByMasters;
           this.masterClass = masterClass;
      @@ -167,10 +167,10 @@ public final class StartMiniClusterOption {
         @Override
         public String toString() {
           return "StartMiniClusterOption{" + "numMasters=" + numMasters + ", masterClass=" + masterClass
      -        + ", numRegionServers=" + numRegionServers + ", rsPorts=" + StringUtils.join(rsPorts)
      -        + ", rsClass=" + rsClass + ", numDataNodes=" + numDataNodes + ", dataNodeHosts="
      -        + Arrays.toString(dataNodeHosts) + ", numZkServers=" + numZkServers + ", createRootDir="
      -        + createRootDir + ", createWALDir=" + createWALDir + '}';
      +      + ", numRegionServers=" + numRegionServers + ", rsPorts=" + StringUtils.join(rsPorts)
      +      + ", rsClass=" + rsClass + ", numDataNodes=" + numDataNodes + ", dataNodeHosts="
      +      + Arrays.toString(dataNodeHosts) + ", numZkServers=" + numZkServers + ", createRootDir="
      +      + createRootDir + ", createWALDir=" + createWALDir + '}';
         }
       
         /**
      @@ -206,8 +206,8 @@ public final class StartMiniClusterOption {
               numDataNodes = dataNodeHosts.length;
             }
             return new StartMiniClusterOption(numMasters, numAlwaysStandByMasters, masterClass,
      -          numRegionServers, rsPorts, rsClass, numDataNodes, dataNodeHosts, numZkServers,
      -          createRootDir, createWALDir);
      +        numRegionServers, rsPorts, rsClass, numDataNodes, dataNodeHosts, numZkServers,
      +        createRootDir, createWALDir);
           }
       
           public Builder numMasters(int numMasters) {
      diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithAdaptivePolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithAdaptivePolicy.java
      index ff770815dd2..8e7f924ce10 100644
      --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithAdaptivePolicy.java
      +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithAdaptivePolicy.java
      @@ -1,4 +1,4 @@
      -/**
      +/*
        * Licensed to the Apache Software Foundation (ASF) under one
        * or more contributor license agreements.  See the NOTICE file
        * distributed with this work for additional information
      @@ -26,7 +26,7 @@ public class TestAcidGuaranteesWithAdaptivePolicy extends AcidGuaranteesTestBase
       
         @ClassRule
         public static final HBaseClassTestRule CLASS_RULE =
      -      HBaseClassTestRule.forClass(TestAcidGuaranteesWithAdaptivePolicy.class);
      +    HBaseClassTestRule.forClass(TestAcidGuaranteesWithAdaptivePolicy.class);
       
         @Override
         protected MemoryCompactionPolicy getMemoryCompactionPolicy() {
      diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithBasicPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithBasicPolicy.java
      index 02c6a98a7a4..5f1b5e0e41f 100644
      --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithBasicPolicy.java
      +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithBasicPolicy.java
      @@ -1,4 +1,4 @@
      -/**
      +/*
        * Licensed to the Apache Software Foundation (ASF) under one
        * or more contributor license agreements.  See the NOTICE file
        * distributed with this work for additional information
      @@ -26,7 +26,7 @@ public class TestAcidGuaranteesWithBasicPolicy extends AcidGuaranteesTestBase {
       
         @ClassRule
         public static final HBaseClassTestRule CLASS_RULE =
      -      HBaseClassTestRule.forClass(TestAcidGuaranteesWithBasicPolicy.class);
      +    HBaseClassTestRule.forClass(TestAcidGuaranteesWithBasicPolicy.class);
       
         @Override
         protected MemoryCompactionPolicy getMemoryCompactionPolicy() {
      diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithEagerPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithEagerPolicy.java
      index 5a9f2eebe8e..e1d5b55cf52 100644
      --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithEagerPolicy.java
      +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithEagerPolicy.java
      @@ -1,4 +1,4 @@
      -/**
      +/*
        * Licensed to the Apache Software Foundation (ASF) under one
        * or more contributor license agreements.  See the NOTICE file
        * distributed with this work for additional information
      diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithNoInMemCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithNoInMemCompaction.java
      index 74374b0b018..5768016ba6a 100644
      --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithNoInMemCompaction.java
      +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithNoInMemCompaction.java
      @@ -1,4 +1,4 @@
      -/**
      +/*
        * Licensed to the Apache Software Foundation (ASF) under one
        * or more contributor license agreements.  See the NOTICE file
        * distributed with this work for additional information
      diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java
      index c146685699e..09f3de9c0fc 100644
      --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java
      +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java
      @@ -18,6 +18,7 @@
       package org.apache.hadoop.hbase;
       
       import static org.junit.Assert.assertEquals;
      +
       import org.apache.hadoop.conf.Configuration;
       import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
       import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
      @@ -34,7 +35,7 @@ import org.junit.experimental.categories.Category;
       public class TestCachedClusterId {
         @ClassRule
         public static final HBaseClassTestRule CLASS_RULE =
      -      HBaseClassTestRule.forClass(TestCachedClusterId.class);
      +    HBaseClassTestRule.forClass(TestCachedClusterId.class);
       
         private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
       
      @@ -44,6 +45,7 @@ public class TestCachedClusterId {
       
         private static class GetClusterIdThread extends TestThread {
           CachedClusterId cachedClusterId;
      +
           public GetClusterIdThread(TestContext ctx, CachedClusterId clusterId) {
             super(ctx);
             cachedClusterId = clusterId;
      @@ -76,8 +78,8 @@ public class TestCachedClusterId {
         @Test
         public void testMultiThreadedGetClusterId() throws Exception {
           Configuration conf = TEST_UTIL.getConfiguration();
      -    CachedClusterId cachedClusterId = new CachedClusterId(TEST_UTIL.getHBaseCluster().getMaster(),
      -      conf);
      +    CachedClusterId cachedClusterId =
      +      new CachedClusterId(TEST_UTIL.getHBaseCluster().getMaster(), conf);
           TestContext context = new TestContext(conf);
           int numThreads = 16;
           for (int i = 0; i < numThreads; i++) {
      diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java
      index 7cf9b6c012e..3d3ca12bd82 100644
      --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java
      +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java
      @@ -1,4 +1,4 @@
      -/**
      +/*
        * Licensed to the Apache Software Foundation (ASF) under one
        * or more contributor license agreements.  See the NOTICE file
        * distributed with this work for additional information
      @@ -18,6 +18,7 @@
       package org.apache.hadoop.hbase;
       
       import static org.junit.Assert.assertTrue;
      +
       import java.util.List;
       import org.apache.hadoop.hbase.testclassification.MiscTests;
       import org.apache.hadoop.hbase.testclassification.SmallTests;
      @@ -28,12 +29,12 @@ import org.junit.experimental.categories.Category;
       /**
        * Checks tests are categorized.
        */
      -@Category({MiscTests.class, SmallTests.class})
      +@Category({ MiscTests.class, SmallTests.class })
       public class TestCheckTestClasses {
       
         @ClassRule
         public static final HBaseClassTestRule CLASS_RULE =
      -      HBaseClassTestRule.forClass(TestCheckTestClasses.class);
      +    HBaseClassTestRule.forClass(TestCheckTestClasses.class);
       
         /**
          * Throws an assertion if we find a test class without category (small/medium/large/integration).
      @@ -48,7 +49,7 @@ public class TestCheckTestClasses {
               badClasses.add(c);
             }
           }
      -    assertTrue("There are " + badClasses.size() + " test classes without category: "
      -      + badClasses, badClasses.isEmpty());
      +    assertTrue("There are " + badClasses.size() + " test classes without category: " + badClasses,
      +      badClasses.isEmpty());
         }
       }
      diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
      index a8e52808278..4d327d2e44f 100644
      --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
      +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
      @@ -1,4 +1,4 @@
      -/**
      +/*
        * Licensed to the Apache Software Foundation (ASF) under one
        * or more contributor license agreements.  See the NOTICE file
        * distributed with this work for additional information
      @@ -25,7 +25,6 @@ import java.util.Map;
       import java.util.Optional;
       import java.util.concurrent.CompletableFuture;
       import java.util.concurrent.atomic.AtomicInteger;
      -
       import org.apache.hadoop.conf.Configuration;
       import org.apache.hadoop.hbase.ClusterMetrics.Option;
       import org.apache.hadoop.hbase.Waiter.Predicate;
      @@ -70,7 +69,7 @@ public class TestClientClusterMetrics {
       
         @ClassRule
         public static final HBaseClassTestRule CLASS_RULE =
      -      HBaseClassTestRule.forClass(TestClientClusterMetrics.class);
      +    HBaseClassTestRule.forClass(TestClientClusterMetrics.class);
       
         private static HBaseTestingUtility UTIL;
         private static Admin ADMIN;
      @@ -86,9 +85,9 @@ public class TestClientClusterMetrics {
           public MyRegionServer(Configuration conf) throws IOException, InterruptedException {
             super(conf);
           }
      +
           @Override
      -    public void tryRegionServerReport(long reportStartTime, long reportEndTime)
      -        throws IOException {
      +    public void tryRegionServerReport(long reportStartTime, long reportEndTime) throws IOException {
             super.tryRegionServerReport(reportStartTime, reportEndTime);
           }
         }
      @@ -98,8 +97,8 @@ public class TestClientClusterMetrics {
           Configuration conf = HBaseConfiguration.create();
           conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, MyObserver.class.getName());
           UTIL = new HBaseTestingUtility(conf);
      -    StartMiniClusterOption option = StartMiniClusterOption.builder()
      -        .rsClass(TestClientClusterMetrics.MyRegionServer.class)
      +    StartMiniClusterOption option =
      +      StartMiniClusterOption.builder().rsClass(TestClientClusterMetrics.MyRegionServer.class)
               .numMasters(MASTERS).numRegionServers(SLAVES).numDataNodes(SLAVES).build();
           UTIL.startMiniCluster(option);
           CLUSTER = UTIL.getHBaseCluster();
      @@ -123,11 +122,11 @@ public class TestClientClusterMetrics {
           Assert.assertEquals(origin.getClusterId(), defaults.getClusterId());
           Assert.assertEquals(origin.getAverageLoad(), defaults.getAverageLoad(), 0);
           Assert.assertEquals(origin.getBackupMasterNames().size(),
      -        defaults.getBackupMasterNames().size());
      +      defaults.getBackupMasterNames().size());
           Assert.assertEquals(origin.getDeadServerNames().size(), defaults.getDeadServerNames().size());
           Assert.assertEquals(origin.getRegionCount(), defaults.getRegionCount());
           Assert.assertEquals(origin.getLiveServerMetrics().size(),
      -        defaults.getLiveServerMetrics().size());
      +      defaults.getLiveServerMetrics().size());
           Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort());
           Assert.assertEquals(origin.getServersName().size(), defaults.getServersName().size());
           Assert.assertEquals(ADMIN.getRegionServers().size(), defaults.getServersName().size());
      @@ -135,11 +134,10 @@ public class TestClientClusterMetrics {
       
         @Test
         public void testAsyncClient() throws Exception {
      -    try (AsyncConnection asyncConnect = ConnectionFactory.createAsyncConnection(
      -      UTIL.getConfiguration()).get()) {
      +    try (AsyncConnection asyncConnect =
      +      ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get()) {
             AsyncAdmin asyncAdmin = asyncConnect.getAdmin();
      -      CompletableFuture originFuture =
      -        asyncAdmin.getClusterMetrics();
      +      CompletableFuture originFuture = asyncAdmin.getClusterMetrics();
             CompletableFuture defaultsFuture =
               asyncAdmin.getClusterMetrics(EnumSet.allOf(Option.class));
             ClusterMetrics origin = originFuture.get();
      @@ -158,8 +156,8 @@ public class TestClientClusterMetrics {
             Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort());
             Assert.assertEquals(origin.getServersName().size(), defaults.getServersName().size());
             origin.getTableRegionStatesCount().forEach(((tableName, regionStatesCount) -> {
      -        RegionStatesCount defaultRegionStatesCount = defaults.getTableRegionStatesCount()
      -          .get(tableName);
      +        RegionStatesCount defaultRegionStatesCount =
      +          defaults.getTableRegionStatesCount().get(tableName);
               Assert.assertEquals(defaultRegionStatesCount, regionStatesCount);
             }));
           }
      @@ -188,11 +186,11 @@ public class TestClientClusterMetrics {
           });
           // Retrieve live servers and dead servers info.
           EnumSet